xref: /freebsd/sys/netinet/sctputil.c (revision 0e1497aefd602cea581d2380d22e67dfdcac6b4e)
1 /*-
2  * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *
7  * a) Redistributions of source code must retain the above copyright notice,
8  *   this list of conditions and the following disclaimer.
9  *
10  * b) Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in
12  *   the documentation and/or other materials provided with the distribution.
13  *
14  * c) Neither the name of Cisco Systems, Inc. nor the names of its
15  *    contributors may be used to endorse or promote products derived
16  *    from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28  * THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 /* $KAME: sctputil.c,v 1.37 2005/03/07 23:26:09 itojun Exp $	 */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_pcb.h>
38 #include <netinet/sctputil.h>
39 #include <netinet/sctp_var.h>
40 #include <netinet/sctp_sysctl.h>
41 #ifdef INET6
42 #endif
43 #include <netinet/sctp_header.h>
44 #include <netinet/sctp_output.h>
45 #include <netinet/sctp_uio.h>
46 #include <netinet/sctp_timer.h>
47 #include <netinet/sctp_indata.h>/* for sctp_deliver_data() */
48 #include <netinet/sctp_auth.h>
49 #include <netinet/sctp_asconf.h>
50 #include <netinet/sctp_cc_functions.h>
51 #include <netinet/sctp_bsd_addr.h>
52 
53 
54 #ifndef KTR_SCTP
55 #define KTR_SCTP KTR_SUBSYS
56 #endif
57 
58 void
59 sctp_sblog(struct sockbuf *sb,
60     struct sctp_tcb *stcb, int from, int incr)
61 {
62 	struct sctp_cwnd_log sctp_clog;
63 
64 	sctp_clog.x.sb.stcb = stcb;
65 	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
66 	if (stcb)
67 		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
68 	else
69 		sctp_clog.x.sb.stcb_sbcc = 0;
70 	sctp_clog.x.sb.incr = incr;
71 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
72 	    SCTP_LOG_EVENT_SB,
73 	    from,
74 	    sctp_clog.x.misc.log1,
75 	    sctp_clog.x.misc.log2,
76 	    sctp_clog.x.misc.log3,
77 	    sctp_clog.x.misc.log4);
78 }
79 
80 void
81 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
82 {
83 	struct sctp_cwnd_log sctp_clog;
84 
85 	sctp_clog.x.close.inp = (void *)inp;
86 	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
87 	if (stcb) {
88 		sctp_clog.x.close.stcb = (void *)stcb;
89 		sctp_clog.x.close.state = (uint16_t) stcb->asoc.state;
90 	} else {
91 		sctp_clog.x.close.stcb = 0;
92 		sctp_clog.x.close.state = 0;
93 	}
94 	sctp_clog.x.close.loc = loc;
95 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
96 	    SCTP_LOG_EVENT_CLOSE,
97 	    0,
98 	    sctp_clog.x.misc.log1,
99 	    sctp_clog.x.misc.log2,
100 	    sctp_clog.x.misc.log3,
101 	    sctp_clog.x.misc.log4);
102 }
103 
104 
105 void
106 rto_logging(struct sctp_nets *net, int from)
107 {
108 	struct sctp_cwnd_log sctp_clog;
109 
110 	memset(&sctp_clog, 0, sizeof(sctp_clog));
111 	sctp_clog.x.rto.net = (void *)net;
112 	sctp_clog.x.rto.rtt = net->prev_rtt;
113 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
114 	    SCTP_LOG_EVENT_RTT,
115 	    from,
116 	    sctp_clog.x.misc.log1,
117 	    sctp_clog.x.misc.log2,
118 	    sctp_clog.x.misc.log3,
119 	    sctp_clog.x.misc.log4);
120 
121 }
122 
123 void
124 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
125 {
126 	struct sctp_cwnd_log sctp_clog;
127 
128 	sctp_clog.x.strlog.stcb = stcb;
129 	sctp_clog.x.strlog.n_tsn = tsn;
130 	sctp_clog.x.strlog.n_sseq = sseq;
131 	sctp_clog.x.strlog.e_tsn = 0;
132 	sctp_clog.x.strlog.e_sseq = 0;
133 	sctp_clog.x.strlog.strm = stream;
134 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
135 	    SCTP_LOG_EVENT_STRM,
136 	    from,
137 	    sctp_clog.x.misc.log1,
138 	    sctp_clog.x.misc.log2,
139 	    sctp_clog.x.misc.log3,
140 	    sctp_clog.x.misc.log4);
141 
142 }
143 
144 void
145 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
146 {
147 	struct sctp_cwnd_log sctp_clog;
148 
149 	sctp_clog.x.nagle.stcb = (void *)stcb;
150 	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
151 	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
152 	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
153 	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
154 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
155 	    SCTP_LOG_EVENT_NAGLE,
156 	    action,
157 	    sctp_clog.x.misc.log1,
158 	    sctp_clog.x.misc.log2,
159 	    sctp_clog.x.misc.log3,
160 	    sctp_clog.x.misc.log4);
161 }
162 
163 
164 void
165 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
166 {
167 	struct sctp_cwnd_log sctp_clog;
168 
169 	sctp_clog.x.sack.cumack = cumack;
170 	sctp_clog.x.sack.oldcumack = old_cumack;
171 	sctp_clog.x.sack.tsn = tsn;
172 	sctp_clog.x.sack.numGaps = gaps;
173 	sctp_clog.x.sack.numDups = dups;
174 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
175 	    SCTP_LOG_EVENT_SACK,
176 	    from,
177 	    sctp_clog.x.misc.log1,
178 	    sctp_clog.x.misc.log2,
179 	    sctp_clog.x.misc.log3,
180 	    sctp_clog.x.misc.log4);
181 }
182 
183 void
184 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
185 {
186 	struct sctp_cwnd_log sctp_clog;
187 
188 	memset(&sctp_clog, 0, sizeof(sctp_clog));
189 	sctp_clog.x.map.base = map;
190 	sctp_clog.x.map.cum = cum;
191 	sctp_clog.x.map.high = high;
192 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
193 	    SCTP_LOG_EVENT_MAP,
194 	    from,
195 	    sctp_clog.x.misc.log1,
196 	    sctp_clog.x.misc.log2,
197 	    sctp_clog.x.misc.log3,
198 	    sctp_clog.x.misc.log4);
199 }
200 
201 void
202 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn,
203     int from)
204 {
205 	struct sctp_cwnd_log sctp_clog;
206 
207 	memset(&sctp_clog, 0, sizeof(sctp_clog));
208 	sctp_clog.x.fr.largest_tsn = biggest_tsn;
209 	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
210 	sctp_clog.x.fr.tsn = tsn;
211 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
212 	    SCTP_LOG_EVENT_FR,
213 	    from,
214 	    sctp_clog.x.misc.log1,
215 	    sctp_clog.x.misc.log2,
216 	    sctp_clog.x.misc.log3,
217 	    sctp_clog.x.misc.log4);
218 
219 }
220 
221 
222 void
223 sctp_log_mb(struct mbuf *m, int from)
224 {
225 	struct sctp_cwnd_log sctp_clog;
226 
227 	sctp_clog.x.mb.mp = m;
228 	sctp_clog.x.mb.mbuf_flags = (uint8_t) (SCTP_BUF_GET_FLAGS(m));
229 	sctp_clog.x.mb.size = (uint16_t) (SCTP_BUF_LEN(m));
230 	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
231 	if (SCTP_BUF_IS_EXTENDED(m)) {
232 		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
233 		sctp_clog.x.mb.refcnt = (uint8_t) (SCTP_BUF_EXTEND_REFCNT(m));
234 	} else {
235 		sctp_clog.x.mb.ext = 0;
236 		sctp_clog.x.mb.refcnt = 0;
237 	}
238 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
239 	    SCTP_LOG_EVENT_MBUF,
240 	    from,
241 	    sctp_clog.x.misc.log1,
242 	    sctp_clog.x.misc.log2,
243 	    sctp_clog.x.misc.log3,
244 	    sctp_clog.x.misc.log4);
245 }
246 
247 
248 void
249 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk,
250     int from)
251 {
252 	struct sctp_cwnd_log sctp_clog;
253 
254 	if (control == NULL) {
255 		SCTP_PRINTF("Gak log of NULL?\n");
256 		return;
257 	}
258 	sctp_clog.x.strlog.stcb = control->stcb;
259 	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
260 	sctp_clog.x.strlog.n_sseq = control->sinfo_ssn;
261 	sctp_clog.x.strlog.strm = control->sinfo_stream;
262 	if (poschk != NULL) {
263 		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
264 		sctp_clog.x.strlog.e_sseq = poschk->sinfo_ssn;
265 	} else {
266 		sctp_clog.x.strlog.e_tsn = 0;
267 		sctp_clog.x.strlog.e_sseq = 0;
268 	}
269 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
270 	    SCTP_LOG_EVENT_STRM,
271 	    from,
272 	    sctp_clog.x.misc.log1,
273 	    sctp_clog.x.misc.log2,
274 	    sctp_clog.x.misc.log3,
275 	    sctp_clog.x.misc.log4);
276 
277 }
278 
279 void
280 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
281 {
282 	struct sctp_cwnd_log sctp_clog;
283 
284 	sctp_clog.x.cwnd.net = net;
285 	if (stcb->asoc.send_queue_cnt > 255)
286 		sctp_clog.x.cwnd.cnt_in_send = 255;
287 	else
288 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
289 	if (stcb->asoc.stream_queue_cnt > 255)
290 		sctp_clog.x.cwnd.cnt_in_str = 255;
291 	else
292 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
293 
294 	if (net) {
295 		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
296 		sctp_clog.x.cwnd.inflight = net->flight_size;
297 		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
298 		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
299 		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
300 	}
301 	if (SCTP_CWNDLOG_PRESEND == from) {
302 		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
303 	}
304 	sctp_clog.x.cwnd.cwnd_augment = augment;
305 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
306 	    SCTP_LOG_EVENT_CWND,
307 	    from,
308 	    sctp_clog.x.misc.log1,
309 	    sctp_clog.x.misc.log2,
310 	    sctp_clog.x.misc.log3,
311 	    sctp_clog.x.misc.log4);
312 
313 }
314 
315 void
316 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
317 {
318 	struct sctp_cwnd_log sctp_clog;
319 
320 	memset(&sctp_clog, 0, sizeof(sctp_clog));
321 	if (inp) {
322 		sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
323 
324 	} else {
325 		sctp_clog.x.lock.sock = (void *)NULL;
326 	}
327 	sctp_clog.x.lock.inp = (void *)inp;
328 	if (stcb) {
329 		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
330 	} else {
331 		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
332 	}
333 	if (inp) {
334 		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
335 		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
336 	} else {
337 		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
338 		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
339 	}
340 	sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
341 	if (inp && (inp->sctp_socket)) {
342 		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
343 		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
344 		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
345 	} else {
346 		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
347 		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
348 		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
349 	}
350 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
351 	    SCTP_LOG_LOCK_EVENT,
352 	    from,
353 	    sctp_clog.x.misc.log1,
354 	    sctp_clog.x.misc.log2,
355 	    sctp_clog.x.misc.log3,
356 	    sctp_clog.x.misc.log4);
357 
358 }
359 
360 void
361 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
362 {
363 	struct sctp_cwnd_log sctp_clog;
364 
365 	memset(&sctp_clog, 0, sizeof(sctp_clog));
366 	sctp_clog.x.cwnd.net = net;
367 	sctp_clog.x.cwnd.cwnd_new_value = error;
368 	sctp_clog.x.cwnd.inflight = net->flight_size;
369 	sctp_clog.x.cwnd.cwnd_augment = burst;
370 	if (stcb->asoc.send_queue_cnt > 255)
371 		sctp_clog.x.cwnd.cnt_in_send = 255;
372 	else
373 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
374 	if (stcb->asoc.stream_queue_cnt > 255)
375 		sctp_clog.x.cwnd.cnt_in_str = 255;
376 	else
377 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
378 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
379 	    SCTP_LOG_EVENT_MAXBURST,
380 	    from,
381 	    sctp_clog.x.misc.log1,
382 	    sctp_clog.x.misc.log2,
383 	    sctp_clog.x.misc.log3,
384 	    sctp_clog.x.misc.log4);
385 
386 }
387 
388 void
389 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
390 {
391 	struct sctp_cwnd_log sctp_clog;
392 
393 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
394 	sctp_clog.x.rwnd.send_size = snd_size;
395 	sctp_clog.x.rwnd.overhead = overhead;
396 	sctp_clog.x.rwnd.new_rwnd = 0;
397 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
398 	    SCTP_LOG_EVENT_RWND,
399 	    from,
400 	    sctp_clog.x.misc.log1,
401 	    sctp_clog.x.misc.log2,
402 	    sctp_clog.x.misc.log3,
403 	    sctp_clog.x.misc.log4);
404 }
405 
406 void
407 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
408 {
409 	struct sctp_cwnd_log sctp_clog;
410 
411 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
412 	sctp_clog.x.rwnd.send_size = flight_size;
413 	sctp_clog.x.rwnd.overhead = overhead;
414 	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
415 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
416 	    SCTP_LOG_EVENT_RWND,
417 	    from,
418 	    sctp_clog.x.misc.log1,
419 	    sctp_clog.x.misc.log2,
420 	    sctp_clog.x.misc.log3,
421 	    sctp_clog.x.misc.log4);
422 }
423 
424 void
425 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
426 {
427 	struct sctp_cwnd_log sctp_clog;
428 
429 	sctp_clog.x.mbcnt.total_queue_size = total_oq;
430 	sctp_clog.x.mbcnt.size_change = book;
431 	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
432 	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
433 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
434 	    SCTP_LOG_EVENT_MBCNT,
435 	    from,
436 	    sctp_clog.x.misc.log1,
437 	    sctp_clog.x.misc.log2,
438 	    sctp_clog.x.misc.log3,
439 	    sctp_clog.x.misc.log4);
440 
441 }
442 
443 void
444 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
445 {
446 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
447 	    SCTP_LOG_MISC_EVENT,
448 	    from,
449 	    a, b, c, d);
450 }
451 
452 void
453 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t cumtsn, uint32_t wake_cnt, int from)
454 {
455 	struct sctp_cwnd_log sctp_clog;
456 
457 	sctp_clog.x.wake.stcb = (void *)stcb;
458 	sctp_clog.x.wake.wake_cnt = wake_cnt;
459 	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
460 	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
461 	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
462 
463 	if (stcb->asoc.stream_queue_cnt < 0xff)
464 		sctp_clog.x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt;
465 	else
466 		sctp_clog.x.wake.stream_qcnt = 0xff;
467 
468 	if (stcb->asoc.chunks_on_out_queue < 0xff)
469 		sctp_clog.x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue;
470 	else
471 		sctp_clog.x.wake.chunks_on_oque = 0xff;
472 
473 	sctp_clog.x.wake.sctpflags = 0;
474 	/* set in the defered mode stuff */
475 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
476 		sctp_clog.x.wake.sctpflags |= 1;
477 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
478 		sctp_clog.x.wake.sctpflags |= 2;
479 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
480 		sctp_clog.x.wake.sctpflags |= 4;
481 	/* what about the sb */
482 	if (stcb->sctp_socket) {
483 		struct socket *so = stcb->sctp_socket;
484 
485 		sctp_clog.x.wake.sbflags = (uint8_t) ((so->so_snd.sb_flags & 0x00ff));
486 	} else {
487 		sctp_clog.x.wake.sbflags = 0xff;
488 	}
489 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
490 	    SCTP_LOG_EVENT_WAKE,
491 	    from,
492 	    sctp_clog.x.misc.log1,
493 	    sctp_clog.x.misc.log2,
494 	    sctp_clog.x.misc.log3,
495 	    sctp_clog.x.misc.log4);
496 
497 }
498 
499 void
500 sctp_log_block(uint8_t from, struct socket *so, struct sctp_association *asoc, int sendlen)
501 {
502 	struct sctp_cwnd_log sctp_clog;
503 
504 	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
505 	sctp_clog.x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt);
506 	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
507 	sctp_clog.x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt;
508 	sctp_clog.x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue;
509 	sctp_clog.x.blk.flight_size = (uint16_t) (asoc->total_flight / 1024);
510 	sctp_clog.x.blk.sndlen = sendlen;
511 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
512 	    SCTP_LOG_EVENT_BLOCK,
513 	    from,
514 	    sctp_clog.x.misc.log1,
515 	    sctp_clog.x.misc.log2,
516 	    sctp_clog.x.misc.log3,
517 	    sctp_clog.x.misc.log4);
518 
519 }
520 
521 int
522 sctp_fill_stat_log(void *optval, size_t *optsize)
523 {
524 	/* May need to fix this if ktrdump does not work */
525 	return (0);
526 }
527 
528 #ifdef SCTP_AUDITING_ENABLED
529 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
530 static int sctp_audit_indx = 0;
531 
532 static
533 void
534 sctp_print_audit_report(void)
535 {
536 	int i;
537 	int cnt;
538 
539 	cnt = 0;
540 	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
541 		if ((sctp_audit_data[i][0] == 0xe0) &&
542 		    (sctp_audit_data[i][1] == 0x01)) {
543 			cnt = 0;
544 			SCTP_PRINTF("\n");
545 		} else if (sctp_audit_data[i][0] == 0xf0) {
546 			cnt = 0;
547 			SCTP_PRINTF("\n");
548 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
549 		    (sctp_audit_data[i][1] == 0x01)) {
550 			SCTP_PRINTF("\n");
551 			cnt = 0;
552 		}
553 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
554 		    (uint32_t) sctp_audit_data[i][1]);
555 		cnt++;
556 		if ((cnt % 14) == 0)
557 			SCTP_PRINTF("\n");
558 	}
559 	for (i = 0; i < sctp_audit_indx; i++) {
560 		if ((sctp_audit_data[i][0] == 0xe0) &&
561 		    (sctp_audit_data[i][1] == 0x01)) {
562 			cnt = 0;
563 			SCTP_PRINTF("\n");
564 		} else if (sctp_audit_data[i][0] == 0xf0) {
565 			cnt = 0;
566 			SCTP_PRINTF("\n");
567 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
568 		    (sctp_audit_data[i][1] == 0x01)) {
569 			SCTP_PRINTF("\n");
570 			cnt = 0;
571 		}
572 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
573 		    (uint32_t) sctp_audit_data[i][1]);
574 		cnt++;
575 		if ((cnt % 14) == 0)
576 			SCTP_PRINTF("\n");
577 	}
578 	SCTP_PRINTF("\n");
579 }
580 
581 void
582 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
583     struct sctp_nets *net)
584 {
585 	int resend_cnt, tot_out, rep, tot_book_cnt;
586 	struct sctp_nets *lnet;
587 	struct sctp_tmit_chunk *chk;
588 
589 	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
590 	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
591 	sctp_audit_indx++;
592 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
593 		sctp_audit_indx = 0;
594 	}
595 	if (inp == NULL) {
596 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
597 		sctp_audit_data[sctp_audit_indx][1] = 0x01;
598 		sctp_audit_indx++;
599 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
600 			sctp_audit_indx = 0;
601 		}
602 		return;
603 	}
604 	if (stcb == NULL) {
605 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
606 		sctp_audit_data[sctp_audit_indx][1] = 0x02;
607 		sctp_audit_indx++;
608 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
609 			sctp_audit_indx = 0;
610 		}
611 		return;
612 	}
613 	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
614 	sctp_audit_data[sctp_audit_indx][1] =
615 	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
616 	sctp_audit_indx++;
617 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
618 		sctp_audit_indx = 0;
619 	}
620 	rep = 0;
621 	tot_book_cnt = 0;
622 	resend_cnt = tot_out = 0;
623 	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
624 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
625 			resend_cnt++;
626 		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
627 			tot_out += chk->book_size;
628 			tot_book_cnt++;
629 		}
630 	}
631 	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
632 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
633 		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
634 		sctp_audit_indx++;
635 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
636 			sctp_audit_indx = 0;
637 		}
638 		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
639 		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
640 		rep = 1;
641 		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
642 		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
643 		sctp_audit_data[sctp_audit_indx][1] =
644 		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
645 		sctp_audit_indx++;
646 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
647 			sctp_audit_indx = 0;
648 		}
649 	}
650 	if (tot_out != stcb->asoc.total_flight) {
651 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
652 		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
653 		sctp_audit_indx++;
654 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
655 			sctp_audit_indx = 0;
656 		}
657 		rep = 1;
658 		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
659 		    (int)stcb->asoc.total_flight);
660 		stcb->asoc.total_flight = tot_out;
661 	}
662 	if (tot_book_cnt != stcb->asoc.total_flight_count) {
663 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
664 		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
665 		sctp_audit_indx++;
666 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
667 			sctp_audit_indx = 0;
668 		}
669 		rep = 1;
670 		SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt);
671 
672 		stcb->asoc.total_flight_count = tot_book_cnt;
673 	}
674 	tot_out = 0;
675 	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
676 		tot_out += lnet->flight_size;
677 	}
678 	if (tot_out != stcb->asoc.total_flight) {
679 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
680 		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
681 		sctp_audit_indx++;
682 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
683 			sctp_audit_indx = 0;
684 		}
685 		rep = 1;
686 		SCTP_PRINTF("real flight:%d net total was %d\n",
687 		    stcb->asoc.total_flight, tot_out);
688 		/* now corrective action */
689 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
690 
691 			tot_out = 0;
692 			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
693 				if ((chk->whoTo == lnet) &&
694 				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
695 					tot_out += chk->book_size;
696 				}
697 			}
698 			if (lnet->flight_size != tot_out) {
699 				SCTP_PRINTF("net:%p flight was %d corrected to %d\n",
700 				    lnet, lnet->flight_size,
701 				    tot_out);
702 				lnet->flight_size = tot_out;
703 			}
704 		}
705 	}
706 	if (rep) {
707 		sctp_print_audit_report();
708 	}
709 }
710 
711 void
712 sctp_audit_log(uint8_t ev, uint8_t fd)
713 {
714 
715 	sctp_audit_data[sctp_audit_indx][0] = ev;
716 	sctp_audit_data[sctp_audit_indx][1] = fd;
717 	sctp_audit_indx++;
718 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
719 		sctp_audit_indx = 0;
720 	}
721 }
722 
723 #endif
724 
725 /*
726  * sctp_stop_timers_for_shutdown() should be called
727  * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT
728  * state to make sure that all timers are stopped.
729  */
730 void
731 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
732 {
733 	struct sctp_association *asoc;
734 	struct sctp_nets *net;
735 
736 	asoc = &stcb->asoc;
737 
738 	(void)SCTP_OS_TIMER_STOP(&asoc->hb_timer.timer);
739 	(void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
740 	(void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
741 	(void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
742 	(void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
743 	(void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
744 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
745 		(void)SCTP_OS_TIMER_STOP(&net->fr_timer.timer);
746 		(void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
747 	}
748 }
749 
750 /*
751  * a list of sizes based on typical mtu's, used only if next hop size not
752  * returned.
753  */
754 static uint32_t sctp_mtu_sizes[] = {
755 	68,
756 	296,
757 	508,
758 	512,
759 	544,
760 	576,
761 	1006,
762 	1492,
763 	1500,
764 	1536,
765 	2002,
766 	2048,
767 	4352,
768 	4464,
769 	8166,
770 	17914,
771 	32000,
772 	65535
773 };
774 
775 /*
776  * Return the largest MTU smaller than val. If there is no
777  * entry, just return val.
778  */
779 uint32_t
780 sctp_get_prev_mtu(uint32_t val)
781 {
782 	uint32_t i;
783 
784 	if (val <= sctp_mtu_sizes[0]) {
785 		return (val);
786 	}
787 	for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
788 		if (val <= sctp_mtu_sizes[i]) {
789 			break;
790 		}
791 	}
792 	return (sctp_mtu_sizes[i - 1]);
793 }
794 
795 /*
796  * Return the smallest MTU larger than val. If there is no
797  * entry, just return val.
798  */
799 uint32_t
800 sctp_get_next_mtu(struct sctp_inpcb *inp, uint32_t val)
801 {
802 	/* select another MTU that is just bigger than this one */
803 	uint32_t i;
804 
805 	for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
806 		if (val < sctp_mtu_sizes[i]) {
807 			return (sctp_mtu_sizes[i]);
808 		}
809 	}
810 	return (val);
811 }
812 
813 void
814 sctp_fill_random_store(struct sctp_pcb *m)
815 {
816 	/*
817 	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
818 	 * our counter. The result becomes our good random numbers and we
819 	 * then setup to give these out. Note that we do no locking to
820 	 * protect this. This is ok, since if competing folks call this we
821 	 * will get more gobbled gook in the random store which is what we
822 	 * want. There is a danger that two guys will use the same random
823 	 * numbers, but thats ok too since that is random as well :->
824 	 */
825 	m->store_at = 0;
826 	(void)sctp_hmac(SCTP_HMAC, (uint8_t *) m->random_numbers,
827 	    sizeof(m->random_numbers), (uint8_t *) & m->random_counter,
828 	    sizeof(m->random_counter), (uint8_t *) m->random_store);
829 	m->random_counter++;
830 }
831 
832 uint32_t
833 sctp_select_initial_TSN(struct sctp_pcb *inp)
834 {
835 	/*
836 	 * A true implementation should use random selection process to get
837 	 * the initial stream sequence number, using RFC1750 as a good
838 	 * guideline
839 	 */
840 	uint32_t x, *xp;
841 	uint8_t *p;
842 	int store_at, new_store;
843 
844 	if (inp->initial_sequence_debug != 0) {
845 		uint32_t ret;
846 
847 		ret = inp->initial_sequence_debug;
848 		inp->initial_sequence_debug++;
849 		return (ret);
850 	}
851 retry:
852 	store_at = inp->store_at;
853 	new_store = store_at + sizeof(uint32_t);
854 	if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
855 		new_store = 0;
856 	}
857 	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
858 		goto retry;
859 	}
860 	if (new_store == 0) {
861 		/* Refill the random store */
862 		sctp_fill_random_store(inp);
863 	}
864 	p = &inp->random_store[store_at];
865 	xp = (uint32_t *) p;
866 	x = *xp;
867 	return (x);
868 }
869 
870 uint32_t
871 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int save_in_twait)
872 {
873 	uint32_t x, not_done;
874 	struct timeval now;
875 
876 	(void)SCTP_GETTIME_TIMEVAL(&now);
877 	not_done = 1;
878 	while (not_done) {
879 		x = sctp_select_initial_TSN(&inp->sctp_ep);
880 		if (x == 0) {
881 			/* we never use 0 */
882 			continue;
883 		}
884 		if (sctp_is_vtag_good(inp, x, lport, rport, &now, save_in_twait)) {
885 			not_done = 0;
886 		}
887 	}
888 	return (x);
889 }
890 
891 int
892 sctp_init_asoc(struct sctp_inpcb *m, struct sctp_tcb *stcb,
893     uint32_t override_tag, uint32_t vrf_id)
894 {
895 	struct sctp_association *asoc;
896 
897 	/*
898 	 * Anything set to zero is taken care of by the allocation routine's
899 	 * bzero
900 	 */
901 
902 	/*
903 	 * Up front select what scoping to apply on addresses I tell my peer
904 	 * Not sure what to do with these right now, we will need to come up
905 	 * with a way to set them. We may need to pass them through from the
906 	 * caller in the sctp_aloc_assoc() function.
907 	 */
908 	int i;
909 
910 	asoc = &stcb->asoc;
911 	/* init all variables to a known value. */
912 	SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE);
913 	asoc->max_burst = m->sctp_ep.max_burst;
914 	asoc->heart_beat_delay = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
915 	asoc->cookie_life = m->sctp_ep.def_cookie_life;
916 	asoc->sctp_cmt_on_off = m->sctp_cmt_on_off;
917 	asoc->sctp_nr_sack_on_off = (uint8_t) SCTP_BASE_SYSCTL(sctp_nr_sack_on_off);
918 	asoc->sctp_cmt_pf = (uint8_t) SCTP_BASE_SYSCTL(sctp_cmt_pf);
919 	asoc->sctp_frag_point = m->sctp_frag_point;
920 #ifdef INET
921 	asoc->default_tos = m->ip_inp.inp.inp_ip_tos;
922 #else
923 	asoc->default_tos = 0;
924 #endif
925 
926 #ifdef INET6
927 	asoc->default_flowlabel = ((struct in6pcb *)m)->in6p_flowinfo;
928 #else
929 	asoc->default_flowlabel = 0;
930 #endif
931 	asoc->sb_send_resv = 0;
932 	if (override_tag) {
933 		asoc->my_vtag = override_tag;
934 	} else {
935 		asoc->my_vtag = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
936 	}
937 	/* Get the nonce tags */
938 	asoc->my_vtag_nonce = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
939 	asoc->peer_vtag_nonce = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
940 	asoc->vrf_id = vrf_id;
941 
942 	if (sctp_is_feature_on(m, SCTP_PCB_FLAGS_DONOT_HEARTBEAT))
943 		asoc->hb_is_disabled = 1;
944 	else
945 		asoc->hb_is_disabled = 0;
946 
947 #ifdef SCTP_ASOCLOG_OF_TSNS
948 	asoc->tsn_in_at = 0;
949 	asoc->tsn_out_at = 0;
950 	asoc->tsn_in_wrapped = 0;
951 	asoc->tsn_out_wrapped = 0;
952 	asoc->cumack_log_at = 0;
953 	asoc->cumack_log_atsnt = 0;
954 #endif
955 #ifdef SCTP_FS_SPEC_LOG
956 	asoc->fs_index = 0;
957 #endif
958 	asoc->refcnt = 0;
959 	asoc->assoc_up_sent = 0;
960 	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
961 	    sctp_select_initial_TSN(&m->sctp_ep);
962 	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
963 	/* we are optimisitic here */
964 	asoc->peer_supports_pktdrop = 1;
965 	asoc->peer_supports_nat = 0;
966 	asoc->sent_queue_retran_cnt = 0;
967 
968 	/* for CMT */
969 	asoc->last_net_cmt_send_started = NULL;
970 
971 	/* This will need to be adjusted */
972 	asoc->last_cwr_tsn = asoc->init_seq_number - 1;
973 	asoc->last_acked_seq = asoc->init_seq_number - 1;
974 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
975 	asoc->asconf_seq_in = asoc->last_acked_seq;
976 
977 	/* here we are different, we hold the next one we expect */
978 	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
979 
980 	asoc->initial_init_rto_max = m->sctp_ep.initial_init_rto_max;
981 	asoc->initial_rto = m->sctp_ep.initial_rto;
982 
983 	asoc->max_init_times = m->sctp_ep.max_init_times;
984 	asoc->max_send_times = m->sctp_ep.max_send_times;
985 	asoc->def_net_failure = m->sctp_ep.def_net_failure;
986 	asoc->free_chunk_cnt = 0;
987 
988 	asoc->iam_blocking = 0;
989 	/* ECN Nonce initialization */
990 	asoc->context = m->sctp_context;
991 	asoc->def_send = m->def_send;
992 	asoc->ecn_nonce_allowed = 0;
993 	asoc->receiver_nonce_sum = 1;
994 	asoc->nonce_sum_expect_base = 1;
995 	asoc->nonce_sum_check = 1;
996 	asoc->nonce_resync_tsn = 0;
997 	asoc->nonce_wait_for_ecne = 0;
998 	asoc->nonce_wait_tsn = 0;
999 	asoc->delayed_ack = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
1000 	asoc->sack_freq = m->sctp_ep.sctp_sack_freq;
1001 	asoc->pr_sctp_cnt = 0;
1002 	asoc->total_output_queue_size = 0;
1003 
1004 	if (m->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1005 		struct in6pcb *inp6;
1006 
1007 		/* Its a V6 socket */
1008 		inp6 = (struct in6pcb *)m;
1009 		asoc->ipv6_addr_legal = 1;
1010 		/* Now look at the binding flag to see if V4 will be legal */
1011 		if (SCTP_IPV6_V6ONLY(inp6) == 0) {
1012 			asoc->ipv4_addr_legal = 1;
1013 		} else {
1014 			/* V4 addresses are NOT legal on the association */
1015 			asoc->ipv4_addr_legal = 0;
1016 		}
1017 	} else {
1018 		/* Its a V4 socket, no - V6 */
1019 		asoc->ipv4_addr_legal = 1;
1020 		asoc->ipv6_addr_legal = 0;
1021 	}
1022 
1023 	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(m->sctp_socket), SCTP_MINIMAL_RWND);
1024 	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(m->sctp_socket);
1025 
1026 	asoc->smallest_mtu = m->sctp_frag_point;
1027 	asoc->minrto = m->sctp_ep.sctp_minrto;
1028 	asoc->maxrto = m->sctp_ep.sctp_maxrto;
1029 
1030 	asoc->locked_on_sending = NULL;
1031 	asoc->stream_locked_on = 0;
1032 	asoc->ecn_echo_cnt_onq = 0;
1033 	asoc->stream_locked = 0;
1034 
1035 	asoc->send_sack = 1;
1036 
1037 	LIST_INIT(&asoc->sctp_restricted_addrs);
1038 
1039 	TAILQ_INIT(&asoc->nets);
1040 	TAILQ_INIT(&asoc->pending_reply_queue);
1041 	TAILQ_INIT(&asoc->asconf_ack_sent);
1042 	/* Setup to fill the hb random cache at first HB */
1043 	asoc->hb_random_idx = 4;
1044 
1045 	asoc->sctp_autoclose_ticks = m->sctp_ep.auto_close_time;
1046 
1047 	/*
1048 	 * JRS - Pick the default congestion control module based on the
1049 	 * sysctl.
1050 	 */
1051 	switch (m->sctp_ep.sctp_default_cc_module) {
1052 		/* JRS - Standard TCP congestion control */
1053 	case SCTP_CC_RFC2581:
1054 		{
1055 			stcb->asoc.congestion_control_module = SCTP_CC_RFC2581;
1056 			stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param;
1057 			stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_cwnd_update_after_sack;
1058 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_cwnd_update_after_fr;
1059 			stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout;
1060 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo;
1061 			stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1062 			stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1063 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer;
1064 			break;
1065 		}
1066 		/* JRS - High Speed TCP congestion control (Floyd) */
1067 	case SCTP_CC_HSTCP:
1068 		{
1069 			stcb->asoc.congestion_control_module = SCTP_CC_HSTCP;
1070 			stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param;
1071 			stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_hs_cwnd_update_after_sack;
1072 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_hs_cwnd_update_after_fr;
1073 			stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout;
1074 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo;
1075 			stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1076 			stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1077 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer;
1078 			break;
1079 		}
1080 		/* JRS - HTCP congestion control */
1081 	case SCTP_CC_HTCP:
1082 		{
1083 			stcb->asoc.congestion_control_module = SCTP_CC_HTCP;
1084 			stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_htcp_set_initial_cc_param;
1085 			stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_htcp_cwnd_update_after_sack;
1086 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_htcp_cwnd_update_after_fr;
1087 			stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_htcp_cwnd_update_after_timeout;
1088 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_htcp_cwnd_update_after_ecn_echo;
1089 			stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1090 			stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1091 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_htcp_cwnd_update_after_fr_timer;
1092 			break;
1093 		}
1094 		/* JRS - By default, use RFC2581 */
1095 	default:
1096 		{
1097 			stcb->asoc.congestion_control_module = SCTP_CC_RFC2581;
1098 			stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param;
1099 			stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_cwnd_update_after_sack;
1100 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_cwnd_update_after_fr;
1101 			stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout;
1102 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo;
1103 			stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1104 			stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1105 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer;
1106 			break;
1107 		}
1108 	}
1109 
1110 	/*
1111 	 * Now the stream parameters, here we allocate space for all streams
1112 	 * that we request by default.
1113 	 */
1114 	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
1115 	    m->sctp_ep.pre_open_stream_count;
1116 	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1117 	    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1118 	    SCTP_M_STRMO);
1119 	if (asoc->strmout == NULL) {
1120 		/* big trouble no memory */
1121 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1122 		return (ENOMEM);
1123 	}
1124 	for (i = 0; i < asoc->streamoutcnt; i++) {
1125 		/*
1126 		 * inbound side must be set to 0xffff, also NOTE when we get
1127 		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1128 		 * count (streamoutcnt) but first check if we sent to any of
1129 		 * the upper streams that were dropped (if some were). Those
1130 		 * that were dropped must be notified to the upper layer as
1131 		 * failed to send.
1132 		 */
1133 		asoc->strmout[i].next_sequence_sent = 0x0;
1134 		TAILQ_INIT(&asoc->strmout[i].outqueue);
1135 		asoc->strmout[i].stream_no = i;
1136 		asoc->strmout[i].last_msg_incomplete = 0;
1137 		asoc->strmout[i].next_spoke.tqe_next = 0;
1138 		asoc->strmout[i].next_spoke.tqe_prev = 0;
1139 	}
1140 	/* Now the mapping array */
1141 	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1142 	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1143 	    SCTP_M_MAP);
1144 	if (asoc->mapping_array == NULL) {
1145 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1146 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1147 		return (ENOMEM);
1148 	}
1149 	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1150 	SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size,
1151 	    SCTP_M_MAP);
1152 	if (asoc->nr_mapping_array == NULL) {
1153 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1154 		SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1155 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1156 		return (ENOMEM);
1157 	}
1158 	memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
1159 
1160 	/* Now the init of the other outqueues */
1161 	TAILQ_INIT(&asoc->free_chunks);
1162 	TAILQ_INIT(&asoc->out_wheel);
1163 	TAILQ_INIT(&asoc->control_send_queue);
1164 	TAILQ_INIT(&asoc->asconf_send_queue);
1165 	TAILQ_INIT(&asoc->send_queue);
1166 	TAILQ_INIT(&asoc->sent_queue);
1167 	TAILQ_INIT(&asoc->reasmqueue);
1168 	TAILQ_INIT(&asoc->resetHead);
1169 	asoc->max_inbound_streams = m->sctp_ep.max_open_streams_intome;
1170 	TAILQ_INIT(&asoc->asconf_queue);
1171 	/* authentication fields */
1172 	asoc->authinfo.random = NULL;
1173 	asoc->authinfo.active_keyid = 0;
1174 	asoc->authinfo.assoc_key = NULL;
1175 	asoc->authinfo.assoc_keyid = 0;
1176 	asoc->authinfo.recv_key = NULL;
1177 	asoc->authinfo.recv_keyid = 0;
1178 	LIST_INIT(&asoc->shared_keys);
1179 	asoc->marked_retrans = 0;
1180 	asoc->timoinit = 0;
1181 	asoc->timodata = 0;
1182 	asoc->timosack = 0;
1183 	asoc->timoshutdown = 0;
1184 	asoc->timoheartbeat = 0;
1185 	asoc->timocookie = 0;
1186 	asoc->timoshutdownack = 0;
1187 	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1188 	asoc->discontinuity_time = asoc->start_time;
1189 	/*
1190 	 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1191 	 * freed later when the association is freed.
1192 	 */
1193 	return (0);
1194 }
1195 
1196 void
1197 sctp_print_mapping_array(struct sctp_association *asoc)
1198 {
1199 	unsigned int i, limit;
1200 
1201 	printf("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n",
1202 	    asoc->mapping_array_size,
1203 	    asoc->mapping_array_base_tsn,
1204 	    asoc->cumulative_tsn,
1205 	    asoc->highest_tsn_inside_map,
1206 	    asoc->highest_tsn_inside_nr_map);
1207 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1208 		if (asoc->mapping_array[limit - 1]) {
1209 			break;
1210 		}
1211 	}
1212 	printf("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1213 	for (i = 0; i < limit; i++) {
1214 		printf("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1215 	}
1216 	if (limit % 16)
1217 		printf("\n");
1218 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1219 		if (asoc->nr_mapping_array[limit - 1]) {
1220 			break;
1221 		}
1222 	}
1223 	printf("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1224 	for (i = 0; i < limit; i++) {
1225 		printf("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1226 	}
1227 	if (limit % 16)
1228 		printf("\n");
1229 }
1230 
1231 int
1232 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1233 {
1234 	/* mapping array needs to grow */
1235 	uint8_t *new_array1, *new_array2;
1236 	uint32_t new_size;
1237 
1238 	new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1239 	SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP);
1240 	SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP);
1241 	if ((new_array1 == NULL) || (new_array2 == NULL)) {
1242 		/* can't get more, forget it */
1243 		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size);
1244 		if (new_array1) {
1245 			SCTP_FREE(new_array1, SCTP_M_MAP);
1246 		}
1247 		if (new_array2) {
1248 			SCTP_FREE(new_array2, SCTP_M_MAP);
1249 		}
1250 		return (-1);
1251 	}
1252 	memset(new_array1, 0, new_size);
1253 	memset(new_array2, 0, new_size);
1254 	memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size);
1255 	memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size);
1256 	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1257 	SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1258 	asoc->mapping_array = new_array1;
1259 	asoc->nr_mapping_array = new_array2;
1260 	asoc->mapping_array_size = new_size;
1261 	return (0);
1262 }
1263 
1264 
1265 static void
1266 sctp_iterator_work(struct sctp_iterator *it)
1267 {
1268 	int iteration_count = 0;
1269 	int inp_skip = 0;
1270 	int first_in = 1;
1271 	struct sctp_inpcb *tinp;
1272 
1273 	SCTP_INP_INFO_RLOCK();
1274 	SCTP_ITERATOR_LOCK();
1275 	if (it->inp) {
1276 		SCTP_INP_RLOCK(it->inp);
1277 		SCTP_INP_DECR_REF(it->inp);
1278 	}
1279 	if (it->inp == NULL) {
1280 		/* iterator is complete */
1281 done_with_iterator:
1282 		SCTP_ITERATOR_UNLOCK();
1283 		SCTP_INP_INFO_RUNLOCK();
1284 		if (it->function_atend != NULL) {
1285 			(*it->function_atend) (it->pointer, it->val);
1286 		}
1287 		SCTP_FREE(it, SCTP_M_ITER);
1288 		return;
1289 	}
1290 select_a_new_ep:
1291 	if (first_in) {
1292 		first_in = 0;
1293 	} else {
1294 		SCTP_INP_RLOCK(it->inp);
1295 	}
1296 	while (((it->pcb_flags) &&
1297 	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1298 	    ((it->pcb_features) &&
1299 	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1300 		/* endpoint flags or features don't match, so keep looking */
1301 		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1302 			SCTP_INP_RUNLOCK(it->inp);
1303 			goto done_with_iterator;
1304 		}
1305 		tinp = it->inp;
1306 		it->inp = LIST_NEXT(it->inp, sctp_list);
1307 		SCTP_INP_RUNLOCK(tinp);
1308 		if (it->inp == NULL) {
1309 			goto done_with_iterator;
1310 		}
1311 		SCTP_INP_RLOCK(it->inp);
1312 	}
1313 	/* now go through each assoc which is in the desired state */
1314 	if (it->done_current_ep == 0) {
1315 		if (it->function_inp != NULL)
1316 			inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1317 		it->done_current_ep = 1;
1318 	}
1319 	if (it->stcb == NULL) {
1320 		/* run the per instance function */
1321 		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1322 	}
1323 	if ((inp_skip) || it->stcb == NULL) {
1324 		if (it->function_inp_end != NULL) {
1325 			inp_skip = (*it->function_inp_end) (it->inp,
1326 			    it->pointer,
1327 			    it->val);
1328 		}
1329 		SCTP_INP_RUNLOCK(it->inp);
1330 		goto no_stcb;
1331 	}
1332 	while (it->stcb) {
1333 		SCTP_TCB_LOCK(it->stcb);
1334 		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1335 			/* not in the right state... keep looking */
1336 			SCTP_TCB_UNLOCK(it->stcb);
1337 			goto next_assoc;
1338 		}
1339 		/* see if we have limited out the iterator loop */
1340 		iteration_count++;
1341 		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1342 			/* Pause to let others grab the lock */
1343 			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1344 			SCTP_TCB_UNLOCK(it->stcb);
1345 			SCTP_INP_INCR_REF(it->inp);
1346 			SCTP_INP_RUNLOCK(it->inp);
1347 			SCTP_ITERATOR_UNLOCK();
1348 			SCTP_INP_INFO_RUNLOCK();
1349 			SCTP_INP_INFO_RLOCK();
1350 			SCTP_ITERATOR_LOCK();
1351 			if (sctp_it_ctl.iterator_flags) {
1352 				/* We won't be staying here */
1353 				SCTP_INP_DECR_REF(it->inp);
1354 				atomic_add_int(&it->stcb->asoc.refcnt, -1);
1355 				if (sctp_it_ctl.iterator_flags &
1356 				    SCTP_ITERATOR_MUST_EXIT) {
1357 					goto done_with_iterator;
1358 				}
1359 				if (sctp_it_ctl.iterator_flags &
1360 				    SCTP_ITERATOR_STOP_CUR_IT) {
1361 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT;
1362 					goto done_with_iterator;
1363 				}
1364 				if (sctp_it_ctl.iterator_flags &
1365 				    SCTP_ITERATOR_STOP_CUR_INP) {
1366 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP;
1367 					goto no_stcb;
1368 				}
1369 				/* If we reach here huh? */
1370 				printf("Unknown it ctl flag %x\n",
1371 				    sctp_it_ctl.iterator_flags);
1372 				sctp_it_ctl.iterator_flags = 0;
1373 			}
1374 			SCTP_INP_RLOCK(it->inp);
1375 			SCTP_INP_DECR_REF(it->inp);
1376 			SCTP_TCB_LOCK(it->stcb);
1377 			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1378 			iteration_count = 0;
1379 		}
1380 		/* run function on this one */
1381 		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1382 
1383 		/*
1384 		 * we lie here, it really needs to have its own type but
1385 		 * first I must verify that this won't effect things :-0
1386 		 */
1387 		if (it->no_chunk_output == 0)
1388 			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1389 
1390 		SCTP_TCB_UNLOCK(it->stcb);
1391 next_assoc:
1392 		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1393 		if (it->stcb == NULL) {
1394 			/* Run last function */
1395 			if (it->function_inp_end != NULL) {
1396 				inp_skip = (*it->function_inp_end) (it->inp,
1397 				    it->pointer,
1398 				    it->val);
1399 			}
1400 		}
1401 	}
1402 	SCTP_INP_RUNLOCK(it->inp);
1403 no_stcb:
1404 	/* done with all assocs on this endpoint, move on to next endpoint */
1405 	it->done_current_ep = 0;
1406 	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1407 		it->inp = NULL;
1408 	} else {
1409 		it->inp = LIST_NEXT(it->inp, sctp_list);
1410 	}
1411 	if (it->inp == NULL) {
1412 		goto done_with_iterator;
1413 	}
1414 	goto select_a_new_ep;
1415 }
1416 
1417 void
1418 sctp_iterator_worker(void)
1419 {
1420 	struct sctp_iterator *it = NULL;
1421 
1422 	/* This function is called with the WQ lock in place */
1423 
1424 	sctp_it_ctl.iterator_running = 1;
1425 	sctp_it_ctl.cur_it = it = TAILQ_FIRST(&sctp_it_ctl.iteratorhead);
1426 	while (it) {
1427 		/* now lets work on this one */
1428 		TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
1429 		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1430 		CURVNET_SET(it->vn);
1431 		sctp_iterator_work(it);
1432 
1433 		CURVNET_RESTORE();
1434 		SCTP_IPI_ITERATOR_WQ_LOCK();
1435 		if (sctp_it_ctl.iterator_flags & SCTP_ITERATOR_MUST_EXIT) {
1436 			sctp_it_ctl.cur_it = NULL;
1437 			break;
1438 		}
1439 		/* sa_ignore FREED_MEMORY */
1440 		sctp_it_ctl.cur_it = it = TAILQ_FIRST(&sctp_it_ctl.iteratorhead);
1441 	}
1442 	sctp_it_ctl.iterator_running = 0;
1443 	return;
1444 }
1445 
1446 
1447 static void
1448 sctp_handle_addr_wq(void)
1449 {
1450 	/* deal with the ADDR wq from the rtsock calls */
1451 	struct sctp_laddr *wi;
1452 	struct sctp_asconf_iterator *asc;
1453 
1454 	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1455 	    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1456 	if (asc == NULL) {
1457 		/* Try later, no memory */
1458 		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1459 		    (struct sctp_inpcb *)NULL,
1460 		    (struct sctp_tcb *)NULL,
1461 		    (struct sctp_nets *)NULL);
1462 		return;
1463 	}
1464 	LIST_INIT(&asc->list_of_work);
1465 	asc->cnt = 0;
1466 
1467 	SCTP_WQ_ADDR_LOCK();
1468 	wi = LIST_FIRST(&SCTP_BASE_INFO(addr_wq));
1469 	while (wi != NULL) {
1470 		LIST_REMOVE(wi, sctp_nxt_addr);
1471 		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1472 		asc->cnt++;
1473 		wi = LIST_FIRST(&SCTP_BASE_INFO(addr_wq));
1474 	}
1475 	SCTP_WQ_ADDR_UNLOCK();
1476 
1477 	if (asc->cnt == 0) {
1478 		SCTP_FREE(asc, SCTP_M_ASC_IT);
1479 	} else {
1480 		(void)sctp_initiate_iterator(sctp_asconf_iterator_ep,
1481 		    sctp_asconf_iterator_stcb,
1482 		    NULL,	/* No ep end for boundall */
1483 		    SCTP_PCB_FLAGS_BOUNDALL,
1484 		    SCTP_PCB_ANY_FEATURES,
1485 		    SCTP_ASOC_ANY_STATE,
1486 		    (void *)asc, 0,
1487 		    sctp_asconf_iterator_end, NULL, 0);
1488 	}
1489 }
1490 
1491 int retcode = 0;
1492 int cur_oerr = 0;
1493 
1494 void
1495 sctp_timeout_handler(void *t)
1496 {
1497 	struct sctp_inpcb *inp;
1498 	struct sctp_tcb *stcb;
1499 	struct sctp_nets *net;
1500 	struct sctp_timer *tmr;
1501 
1502 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1503 	struct socket *so;
1504 
1505 #endif
1506 	int did_output, type;
1507 
1508 	tmr = (struct sctp_timer *)t;
1509 	inp = (struct sctp_inpcb *)tmr->ep;
1510 	stcb = (struct sctp_tcb *)tmr->tcb;
1511 	net = (struct sctp_nets *)tmr->net;
1512 	CURVNET_SET((struct vnet *)tmr->vnet);
1513 	did_output = 1;
1514 
1515 #ifdef SCTP_AUDITING_ENABLED
1516 	sctp_audit_log(0xF0, (uint8_t) tmr->type);
1517 	sctp_auditing(3, inp, stcb, net);
1518 #endif
1519 
1520 	/* sanity checks... */
1521 	if (tmr->self != (void *)tmr) {
1522 		/*
1523 		 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1524 		 * tmr);
1525 		 */
1526 		CURVNET_RESTORE();
1527 		return;
1528 	}
1529 	tmr->stopped_from = 0xa001;
1530 	if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1531 		/*
1532 		 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1533 		 * tmr->type);
1534 		 */
1535 		CURVNET_RESTORE();
1536 		return;
1537 	}
1538 	tmr->stopped_from = 0xa002;
1539 	if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1540 		CURVNET_RESTORE();
1541 		return;
1542 	}
1543 	/* if this is an iterator timeout, get the struct and clear inp */
1544 	tmr->stopped_from = 0xa003;
1545 	type = tmr->type;
1546 	if (inp) {
1547 		SCTP_INP_INCR_REF(inp);
1548 		if ((inp->sctp_socket == 0) &&
1549 		    ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1550 		    (tmr->type != SCTP_TIMER_TYPE_INIT) &&
1551 		    (tmr->type != SCTP_TIMER_TYPE_SEND) &&
1552 		    (tmr->type != SCTP_TIMER_TYPE_RECV) &&
1553 		    (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) &&
1554 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1555 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1556 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1557 		    (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))
1558 		    ) {
1559 			SCTP_INP_DECR_REF(inp);
1560 			CURVNET_RESTORE();
1561 			return;
1562 		}
1563 	}
1564 	tmr->stopped_from = 0xa004;
1565 	if (stcb) {
1566 		atomic_add_int(&stcb->asoc.refcnt, 1);
1567 		if (stcb->asoc.state == 0) {
1568 			atomic_add_int(&stcb->asoc.refcnt, -1);
1569 			if (inp) {
1570 				SCTP_INP_DECR_REF(inp);
1571 			}
1572 			CURVNET_RESTORE();
1573 			return;
1574 		}
1575 	}
1576 	tmr->stopped_from = 0xa005;
1577 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", tmr->type);
1578 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1579 		if (inp) {
1580 			SCTP_INP_DECR_REF(inp);
1581 		}
1582 		if (stcb) {
1583 			atomic_add_int(&stcb->asoc.refcnt, -1);
1584 		}
1585 		CURVNET_RESTORE();
1586 		return;
1587 	}
1588 	tmr->stopped_from = 0xa006;
1589 
1590 	if (stcb) {
1591 		SCTP_TCB_LOCK(stcb);
1592 		atomic_add_int(&stcb->asoc.refcnt, -1);
1593 		if ((tmr->type != SCTP_TIMER_TYPE_ASOCKILL) &&
1594 		    ((stcb->asoc.state == 0) ||
1595 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1596 			SCTP_TCB_UNLOCK(stcb);
1597 			if (inp) {
1598 				SCTP_INP_DECR_REF(inp);
1599 			}
1600 			CURVNET_RESTORE();
1601 			return;
1602 		}
1603 	}
1604 	/* record in stopped what t-o occured */
1605 	tmr->stopped_from = tmr->type;
1606 
1607 	/* mark as being serviced now */
1608 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1609 		/*
1610 		 * Callout has been rescheduled.
1611 		 */
1612 		goto get_out;
1613 	}
1614 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1615 		/*
1616 		 * Not active, so no action.
1617 		 */
1618 		goto get_out;
1619 	}
1620 	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1621 
1622 	/* call the handler for the appropriate timer type */
1623 	switch (tmr->type) {
1624 	case SCTP_TIMER_TYPE_ZERO_COPY:
1625 		if (inp == NULL) {
1626 			break;
1627 		}
1628 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1629 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
1630 		}
1631 		break;
1632 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1633 		if (inp == NULL) {
1634 			break;
1635 		}
1636 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1637 			SCTP_ZERO_COPY_SENDQ_EVENT(inp, inp->sctp_socket);
1638 		}
1639 		break;
1640 	case SCTP_TIMER_TYPE_ADDR_WQ:
1641 		sctp_handle_addr_wq();
1642 		break;
1643 	case SCTP_TIMER_TYPE_SEND:
1644 		if ((stcb == NULL) || (inp == NULL)) {
1645 			break;
1646 		}
1647 		SCTP_STAT_INCR(sctps_timodata);
1648 		stcb->asoc.timodata++;
1649 		stcb->asoc.num_send_timers_up--;
1650 		if (stcb->asoc.num_send_timers_up < 0) {
1651 			stcb->asoc.num_send_timers_up = 0;
1652 		}
1653 		SCTP_TCB_LOCK_ASSERT(stcb);
1654 		cur_oerr = stcb->asoc.overall_error_count;
1655 		retcode = sctp_t3rxt_timer(inp, stcb, net);
1656 		if (retcode) {
1657 			/* no need to unlock on tcb its gone */
1658 
1659 			goto out_decr;
1660 		}
1661 		SCTP_TCB_LOCK_ASSERT(stcb);
1662 #ifdef SCTP_AUDITING_ENABLED
1663 		sctp_auditing(4, inp, stcb, net);
1664 #endif
1665 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1666 		if ((stcb->asoc.num_send_timers_up == 0) &&
1667 		    (stcb->asoc.sent_queue_cnt > 0)
1668 		    ) {
1669 			struct sctp_tmit_chunk *chk;
1670 
1671 			/*
1672 			 * safeguard. If there on some on the sent queue
1673 			 * somewhere but no timers running something is
1674 			 * wrong... so we start a timer on the first chunk
1675 			 * on the send queue on whatever net it is sent to.
1676 			 */
1677 			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1678 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1679 			    chk->whoTo);
1680 		}
1681 		break;
1682 	case SCTP_TIMER_TYPE_INIT:
1683 		if ((stcb == NULL) || (inp == NULL)) {
1684 			break;
1685 		}
1686 		SCTP_STAT_INCR(sctps_timoinit);
1687 		stcb->asoc.timoinit++;
1688 		if (sctp_t1init_timer(inp, stcb, net)) {
1689 			/* no need to unlock on tcb its gone */
1690 			goto out_decr;
1691 		}
1692 		/* We do output but not here */
1693 		did_output = 0;
1694 		break;
1695 	case SCTP_TIMER_TYPE_RECV:
1696 		if ((stcb == NULL) || (inp == NULL)) {
1697 			break;
1698 		} {
1699 			SCTP_STAT_INCR(sctps_timosack);
1700 			stcb->asoc.timosack++;
1701 			sctp_send_sack(stcb);
1702 		}
1703 #ifdef SCTP_AUDITING_ENABLED
1704 		sctp_auditing(4, inp, stcb, net);
1705 #endif
1706 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1707 		break;
1708 	case SCTP_TIMER_TYPE_SHUTDOWN:
1709 		if ((stcb == NULL) || (inp == NULL)) {
1710 			break;
1711 		}
1712 		if (sctp_shutdown_timer(inp, stcb, net)) {
1713 			/* no need to unlock on tcb its gone */
1714 			goto out_decr;
1715 		}
1716 		SCTP_STAT_INCR(sctps_timoshutdown);
1717 		stcb->asoc.timoshutdown++;
1718 #ifdef SCTP_AUDITING_ENABLED
1719 		sctp_auditing(4, inp, stcb, net);
1720 #endif
1721 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1722 		break;
1723 	case SCTP_TIMER_TYPE_HEARTBEAT:
1724 		{
1725 			struct sctp_nets *lnet;
1726 			int cnt_of_unconf = 0;
1727 
1728 			if ((stcb == NULL) || (inp == NULL)) {
1729 				break;
1730 			}
1731 			SCTP_STAT_INCR(sctps_timoheartbeat);
1732 			stcb->asoc.timoheartbeat++;
1733 			TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
1734 				if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1735 				    (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
1736 					cnt_of_unconf++;
1737 				}
1738 			}
1739 			if (cnt_of_unconf == 0) {
1740 				if (sctp_heartbeat_timer(inp, stcb, lnet,
1741 				    cnt_of_unconf)) {
1742 					/* no need to unlock on tcb its gone */
1743 					goto out_decr;
1744 				}
1745 			}
1746 #ifdef SCTP_AUDITING_ENABLED
1747 			sctp_auditing(4, inp, stcb, lnet);
1748 #endif
1749 			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT,
1750 			    stcb->sctp_ep, stcb, lnet);
1751 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1752 		}
1753 		break;
1754 	case SCTP_TIMER_TYPE_COOKIE:
1755 		if ((stcb == NULL) || (inp == NULL)) {
1756 			break;
1757 		}
1758 		if (sctp_cookie_timer(inp, stcb, net)) {
1759 			/* no need to unlock on tcb its gone */
1760 			goto out_decr;
1761 		}
1762 		SCTP_STAT_INCR(sctps_timocookie);
1763 		stcb->asoc.timocookie++;
1764 #ifdef SCTP_AUDITING_ENABLED
1765 		sctp_auditing(4, inp, stcb, net);
1766 #endif
1767 		/*
1768 		 * We consider T3 and Cookie timer pretty much the same with
1769 		 * respect to where from in chunk_output.
1770 		 */
1771 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1772 		break;
1773 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1774 		{
1775 			struct timeval tv;
1776 			int i, secret;
1777 
1778 			if (inp == NULL) {
1779 				break;
1780 			}
1781 			SCTP_STAT_INCR(sctps_timosecret);
1782 			(void)SCTP_GETTIME_TIMEVAL(&tv);
1783 			SCTP_INP_WLOCK(inp);
1784 			inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1785 			inp->sctp_ep.last_secret_number =
1786 			    inp->sctp_ep.current_secret_number;
1787 			inp->sctp_ep.current_secret_number++;
1788 			if (inp->sctp_ep.current_secret_number >=
1789 			    SCTP_HOW_MANY_SECRETS) {
1790 				inp->sctp_ep.current_secret_number = 0;
1791 			}
1792 			secret = (int)inp->sctp_ep.current_secret_number;
1793 			for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1794 				inp->sctp_ep.secret_key[secret][i] =
1795 				    sctp_select_initial_TSN(&inp->sctp_ep);
1796 			}
1797 			SCTP_INP_WUNLOCK(inp);
1798 			sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1799 		}
1800 		did_output = 0;
1801 		break;
1802 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1803 		if ((stcb == NULL) || (inp == NULL)) {
1804 			break;
1805 		}
1806 		SCTP_STAT_INCR(sctps_timopathmtu);
1807 		sctp_pathmtu_timer(inp, stcb, net);
1808 		did_output = 0;
1809 		break;
1810 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1811 		if ((stcb == NULL) || (inp == NULL)) {
1812 			break;
1813 		}
1814 		if (sctp_shutdownack_timer(inp, stcb, net)) {
1815 			/* no need to unlock on tcb its gone */
1816 			goto out_decr;
1817 		}
1818 		SCTP_STAT_INCR(sctps_timoshutdownack);
1819 		stcb->asoc.timoshutdownack++;
1820 #ifdef SCTP_AUDITING_ENABLED
1821 		sctp_auditing(4, inp, stcb, net);
1822 #endif
1823 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1824 		break;
1825 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1826 		if ((stcb == NULL) || (inp == NULL)) {
1827 			break;
1828 		}
1829 		SCTP_STAT_INCR(sctps_timoshutdownguard);
1830 		sctp_abort_an_association(inp, stcb,
1831 		    SCTP_SHUTDOWN_GUARD_EXPIRES, NULL, SCTP_SO_NOT_LOCKED);
1832 		/* no need to unlock on tcb its gone */
1833 		goto out_decr;
1834 
1835 	case SCTP_TIMER_TYPE_STRRESET:
1836 		if ((stcb == NULL) || (inp == NULL)) {
1837 			break;
1838 		}
1839 		if (sctp_strreset_timer(inp, stcb, net)) {
1840 			/* no need to unlock on tcb its gone */
1841 			goto out_decr;
1842 		}
1843 		SCTP_STAT_INCR(sctps_timostrmrst);
1844 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1845 		break;
1846 	case SCTP_TIMER_TYPE_EARLYFR:
1847 		/* Need to do FR of things for net */
1848 		if ((stcb == NULL) || (inp == NULL)) {
1849 			break;
1850 		}
1851 		SCTP_STAT_INCR(sctps_timoearlyfr);
1852 		sctp_early_fr_timer(inp, stcb, net);
1853 		break;
1854 	case SCTP_TIMER_TYPE_ASCONF:
1855 		if ((stcb == NULL) || (inp == NULL)) {
1856 			break;
1857 		}
1858 		if (sctp_asconf_timer(inp, stcb, net)) {
1859 			/* no need to unlock on tcb its gone */
1860 			goto out_decr;
1861 		}
1862 		SCTP_STAT_INCR(sctps_timoasconf);
1863 #ifdef SCTP_AUDITING_ENABLED
1864 		sctp_auditing(4, inp, stcb, net);
1865 #endif
1866 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1867 		break;
1868 	case SCTP_TIMER_TYPE_PRIM_DELETED:
1869 		if ((stcb == NULL) || (inp == NULL)) {
1870 			break;
1871 		}
1872 		sctp_delete_prim_timer(inp, stcb, net);
1873 		SCTP_STAT_INCR(sctps_timodelprim);
1874 		break;
1875 
1876 	case SCTP_TIMER_TYPE_AUTOCLOSE:
1877 		if ((stcb == NULL) || (inp == NULL)) {
1878 			break;
1879 		}
1880 		SCTP_STAT_INCR(sctps_timoautoclose);
1881 		sctp_autoclose_timer(inp, stcb, net);
1882 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1883 		did_output = 0;
1884 		break;
1885 	case SCTP_TIMER_TYPE_ASOCKILL:
1886 		if ((stcb == NULL) || (inp == NULL)) {
1887 			break;
1888 		}
1889 		SCTP_STAT_INCR(sctps_timoassockill);
1890 		/* Can we free it yet? */
1891 		SCTP_INP_DECR_REF(inp);
1892 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1893 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1894 		so = SCTP_INP_SO(inp);
1895 		atomic_add_int(&stcb->asoc.refcnt, 1);
1896 		SCTP_TCB_UNLOCK(stcb);
1897 		SCTP_SOCKET_LOCK(so, 1);
1898 		SCTP_TCB_LOCK(stcb);
1899 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1900 #endif
1901 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1902 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1903 		SCTP_SOCKET_UNLOCK(so, 1);
1904 #endif
1905 		/*
1906 		 * free asoc, always unlocks (or destroy's) so prevent
1907 		 * duplicate unlock or unlock of a free mtx :-0
1908 		 */
1909 		stcb = NULL;
1910 		goto out_no_decr;
1911 	case SCTP_TIMER_TYPE_INPKILL:
1912 		SCTP_STAT_INCR(sctps_timoinpkill);
1913 		if (inp == NULL) {
1914 			break;
1915 		}
1916 		/*
1917 		 * special case, take away our increment since WE are the
1918 		 * killer
1919 		 */
1920 		SCTP_INP_DECR_REF(inp);
1921 		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1922 		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1923 		    SCTP_CALLED_FROM_INPKILL_TIMER);
1924 		inp = NULL;
1925 		goto out_no_decr;
1926 	default:
1927 		SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
1928 		    tmr->type);
1929 		break;
1930 	};
1931 #ifdef SCTP_AUDITING_ENABLED
1932 	sctp_audit_log(0xF1, (uint8_t) tmr->type);
1933 	if (inp)
1934 		sctp_auditing(5, inp, stcb, net);
1935 #endif
1936 	if ((did_output) && stcb) {
1937 		/*
1938 		 * Now we need to clean up the control chunk chain if an
1939 		 * ECNE is on it. It must be marked as UNSENT again so next
1940 		 * call will continue to send it until such time that we get
1941 		 * a CWR, to remove it. It is, however, less likely that we
1942 		 * will find a ecn echo on the chain though.
1943 		 */
1944 		sctp_fix_ecn_echo(&stcb->asoc);
1945 	}
1946 get_out:
1947 	if (stcb) {
1948 		SCTP_TCB_UNLOCK(stcb);
1949 	}
1950 out_decr:
1951 	if (inp) {
1952 		SCTP_INP_DECR_REF(inp);
1953 	}
1954 out_no_decr:
1955 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type %d)\n",
1956 	    type);
1957 	CURVNET_RESTORE();
1958 }
1959 
1960 void
1961 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1962     struct sctp_nets *net)
1963 {
1964 	int to_ticks;
1965 	struct sctp_timer *tmr;
1966 
1967 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
1968 		return;
1969 
1970 	to_ticks = 0;
1971 
1972 	tmr = NULL;
1973 	if (stcb) {
1974 		SCTP_TCB_LOCK_ASSERT(stcb);
1975 	}
1976 	switch (t_type) {
1977 	case SCTP_TIMER_TYPE_ZERO_COPY:
1978 		tmr = &inp->sctp_ep.zero_copy_timer;
1979 		to_ticks = SCTP_ZERO_COPY_TICK_DELAY;
1980 		break;
1981 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1982 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
1983 		to_ticks = SCTP_ZERO_COPY_SENDQ_TICK_DELAY;
1984 		break;
1985 	case SCTP_TIMER_TYPE_ADDR_WQ:
1986 		/* Only 1 tick away :-) */
1987 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
1988 		to_ticks = SCTP_ADDRESS_TICK_DELAY;
1989 		break;
1990 	case SCTP_TIMER_TYPE_SEND:
1991 		/* Here we use the RTO timer */
1992 		{
1993 			int rto_val;
1994 
1995 			if ((stcb == NULL) || (net == NULL)) {
1996 				return;
1997 			}
1998 			tmr = &net->rxt_timer;
1999 			if (net->RTO == 0) {
2000 				rto_val = stcb->asoc.initial_rto;
2001 			} else {
2002 				rto_val = net->RTO;
2003 			}
2004 			to_ticks = MSEC_TO_TICKS(rto_val);
2005 		}
2006 		break;
2007 	case SCTP_TIMER_TYPE_INIT:
2008 		/*
2009 		 * Here we use the INIT timer default usually about 1
2010 		 * minute.
2011 		 */
2012 		if ((stcb == NULL) || (net == NULL)) {
2013 			return;
2014 		}
2015 		tmr = &net->rxt_timer;
2016 		if (net->RTO == 0) {
2017 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2018 		} else {
2019 			to_ticks = MSEC_TO_TICKS(net->RTO);
2020 		}
2021 		break;
2022 	case SCTP_TIMER_TYPE_RECV:
2023 		/*
2024 		 * Here we use the Delayed-Ack timer value from the inp
2025 		 * ususually about 200ms.
2026 		 */
2027 		if (stcb == NULL) {
2028 			return;
2029 		}
2030 		tmr = &stcb->asoc.dack_timer;
2031 		to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
2032 		break;
2033 	case SCTP_TIMER_TYPE_SHUTDOWN:
2034 		/* Here we use the RTO of the destination. */
2035 		if ((stcb == NULL) || (net == NULL)) {
2036 			return;
2037 		}
2038 		if (net->RTO == 0) {
2039 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2040 		} else {
2041 			to_ticks = MSEC_TO_TICKS(net->RTO);
2042 		}
2043 		tmr = &net->rxt_timer;
2044 		break;
2045 	case SCTP_TIMER_TYPE_HEARTBEAT:
2046 		/*
2047 		 * the net is used here so that we can add in the RTO. Even
2048 		 * though we use a different timer. We also add the HB timer
2049 		 * PLUS a random jitter.
2050 		 */
2051 		if ((inp == NULL) || (stcb == NULL)) {
2052 			return;
2053 		} else {
2054 			uint32_t rndval;
2055 			uint8_t this_random;
2056 			int cnt_of_unconf = 0;
2057 			struct sctp_nets *lnet;
2058 
2059 			TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
2060 				if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2061 				    (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
2062 					cnt_of_unconf++;
2063 				}
2064 			}
2065 			if (cnt_of_unconf) {
2066 				net = lnet = NULL;
2067 				(void)sctp_heartbeat_timer(inp, stcb, lnet, cnt_of_unconf);
2068 			}
2069 			if (stcb->asoc.hb_random_idx > 3) {
2070 				rndval = sctp_select_initial_TSN(&inp->sctp_ep);
2071 				memcpy(stcb->asoc.hb_random_values, &rndval,
2072 				    sizeof(stcb->asoc.hb_random_values));
2073 				stcb->asoc.hb_random_idx = 0;
2074 			}
2075 			this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx];
2076 			stcb->asoc.hb_random_idx++;
2077 			stcb->asoc.hb_ect_randombit = 0;
2078 			/*
2079 			 * this_random will be 0 - 256 ms RTO is in ms.
2080 			 */
2081 			if ((stcb->asoc.hb_is_disabled) &&
2082 			    (cnt_of_unconf == 0)) {
2083 				return;
2084 			}
2085 			if (net) {
2086 				int delay;
2087 
2088 				delay = stcb->asoc.heart_beat_delay;
2089 				TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
2090 					if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2091 					    ((lnet->dest_state & SCTP_ADDR_OUT_OF_SCOPE) == 0) &&
2092 					    (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
2093 						delay = 0;
2094 					}
2095 				}
2096 				if (net->RTO == 0) {
2097 					/* Never been checked */
2098 					to_ticks = this_random + stcb->asoc.initial_rto + delay;
2099 				} else {
2100 					/* set rto_val to the ms */
2101 					to_ticks = delay + net->RTO + this_random;
2102 				}
2103 			} else {
2104 				if (cnt_of_unconf) {
2105 					to_ticks = this_random + stcb->asoc.initial_rto;
2106 				} else {
2107 					to_ticks = stcb->asoc.heart_beat_delay + this_random + stcb->asoc.initial_rto;
2108 				}
2109 			}
2110 			/*
2111 			 * Now we must convert the to_ticks that are now in
2112 			 * ms to ticks.
2113 			 */
2114 			to_ticks = MSEC_TO_TICKS(to_ticks);
2115 			tmr = &stcb->asoc.hb_timer;
2116 		}
2117 		break;
2118 	case SCTP_TIMER_TYPE_COOKIE:
2119 		/*
2120 		 * Here we can use the RTO timer from the network since one
2121 		 * RTT was compelete. If a retran happened then we will be
2122 		 * using the RTO initial value.
2123 		 */
2124 		if ((stcb == NULL) || (net == NULL)) {
2125 			return;
2126 		}
2127 		if (net->RTO == 0) {
2128 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2129 		} else {
2130 			to_ticks = MSEC_TO_TICKS(net->RTO);
2131 		}
2132 		tmr = &net->rxt_timer;
2133 		break;
2134 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2135 		/*
2136 		 * nothing needed but the endpoint here ususually about 60
2137 		 * minutes.
2138 		 */
2139 		if (inp == NULL) {
2140 			return;
2141 		}
2142 		tmr = &inp->sctp_ep.signature_change;
2143 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
2144 		break;
2145 	case SCTP_TIMER_TYPE_ASOCKILL:
2146 		if (stcb == NULL) {
2147 			return;
2148 		}
2149 		tmr = &stcb->asoc.strreset_timer;
2150 		to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
2151 		break;
2152 	case SCTP_TIMER_TYPE_INPKILL:
2153 		/*
2154 		 * The inp is setup to die. We re-use the signature_chage
2155 		 * timer since that has stopped and we are in the GONE
2156 		 * state.
2157 		 */
2158 		if (inp == NULL) {
2159 			return;
2160 		}
2161 		tmr = &inp->sctp_ep.signature_change;
2162 		to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2163 		break;
2164 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2165 		/*
2166 		 * Here we use the value found in the EP for PMTU ususually
2167 		 * about 10 minutes.
2168 		 */
2169 		if ((stcb == NULL) || (inp == NULL)) {
2170 			return;
2171 		}
2172 		if (net == NULL) {
2173 			return;
2174 		}
2175 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2176 		tmr = &net->pmtu_timer;
2177 		break;
2178 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2179 		/* Here we use the RTO of the destination */
2180 		if ((stcb == NULL) || (net == NULL)) {
2181 			return;
2182 		}
2183 		if (net->RTO == 0) {
2184 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2185 		} else {
2186 			to_ticks = MSEC_TO_TICKS(net->RTO);
2187 		}
2188 		tmr = &net->rxt_timer;
2189 		break;
2190 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2191 		/*
2192 		 * Here we use the endpoints shutdown guard timer usually
2193 		 * about 3 minutes.
2194 		 */
2195 		if ((inp == NULL) || (stcb == NULL)) {
2196 			return;
2197 		}
2198 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2199 		tmr = &stcb->asoc.shut_guard_timer;
2200 		break;
2201 	case SCTP_TIMER_TYPE_STRRESET:
2202 		/*
2203 		 * Here the timer comes from the stcb but its value is from
2204 		 * the net's RTO.
2205 		 */
2206 		if ((stcb == NULL) || (net == NULL)) {
2207 			return;
2208 		}
2209 		if (net->RTO == 0) {
2210 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2211 		} else {
2212 			to_ticks = MSEC_TO_TICKS(net->RTO);
2213 		}
2214 		tmr = &stcb->asoc.strreset_timer;
2215 		break;
2216 
2217 	case SCTP_TIMER_TYPE_EARLYFR:
2218 		{
2219 			unsigned int msec;
2220 
2221 			if ((stcb == NULL) || (net == NULL)) {
2222 				return;
2223 			}
2224 			if (net->flight_size > net->cwnd) {
2225 				/* no need to start */
2226 				return;
2227 			}
2228 			SCTP_STAT_INCR(sctps_earlyfrstart);
2229 			if (net->lastsa == 0) {
2230 				/* Hmm no rtt estimate yet? */
2231 				msec = stcb->asoc.initial_rto >> 2;
2232 			} else {
2233 				msec = ((net->lastsa >> 2) + net->lastsv) >> 1;
2234 			}
2235 			if (msec < SCTP_BASE_SYSCTL(sctp_early_fr_msec)) {
2236 				msec = SCTP_BASE_SYSCTL(sctp_early_fr_msec);
2237 				if (msec < SCTP_MINFR_MSEC_FLOOR) {
2238 					msec = SCTP_MINFR_MSEC_FLOOR;
2239 				}
2240 			}
2241 			to_ticks = MSEC_TO_TICKS(msec);
2242 			tmr = &net->fr_timer;
2243 		}
2244 		break;
2245 	case SCTP_TIMER_TYPE_ASCONF:
2246 		/*
2247 		 * Here the timer comes from the stcb but its value is from
2248 		 * the net's RTO.
2249 		 */
2250 		if ((stcb == NULL) || (net == NULL)) {
2251 			return;
2252 		}
2253 		if (net->RTO == 0) {
2254 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2255 		} else {
2256 			to_ticks = MSEC_TO_TICKS(net->RTO);
2257 		}
2258 		tmr = &stcb->asoc.asconf_timer;
2259 		break;
2260 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2261 		if ((stcb == NULL) || (net != NULL)) {
2262 			return;
2263 		}
2264 		to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2265 		tmr = &stcb->asoc.delete_prim_timer;
2266 		break;
2267 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2268 		if (stcb == NULL) {
2269 			return;
2270 		}
2271 		if (stcb->asoc.sctp_autoclose_ticks == 0) {
2272 			/*
2273 			 * Really an error since stcb is NOT set to
2274 			 * autoclose
2275 			 */
2276 			return;
2277 		}
2278 		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2279 		tmr = &stcb->asoc.autoclose_timer;
2280 		break;
2281 	default:
2282 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2283 		    __FUNCTION__, t_type);
2284 		return;
2285 		break;
2286 	};
2287 	if ((to_ticks <= 0) || (tmr == NULL)) {
2288 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2289 		    __FUNCTION__, t_type, to_ticks, tmr);
2290 		return;
2291 	}
2292 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2293 		/*
2294 		 * we do NOT allow you to have it already running. if it is
2295 		 * we leave the current one up unchanged
2296 		 */
2297 		return;
2298 	}
2299 	/* At this point we can proceed */
2300 	if (t_type == SCTP_TIMER_TYPE_SEND) {
2301 		stcb->asoc.num_send_timers_up++;
2302 	}
2303 	tmr->stopped_from = 0;
2304 	tmr->type = t_type;
2305 	tmr->ep = (void *)inp;
2306 	tmr->tcb = (void *)stcb;
2307 	tmr->net = (void *)net;
2308 	tmr->self = (void *)tmr;
2309 	tmr->vnet = (void *)curvnet;
2310 	tmr->ticks = sctp_get_tick_count();
2311 	(void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2312 	return;
2313 }
2314 
2315 void
2316 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2317     struct sctp_nets *net, uint32_t from)
2318 {
2319 	struct sctp_timer *tmr;
2320 
2321 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2322 	    (inp == NULL))
2323 		return;
2324 
2325 	tmr = NULL;
2326 	if (stcb) {
2327 		SCTP_TCB_LOCK_ASSERT(stcb);
2328 	}
2329 	switch (t_type) {
2330 	case SCTP_TIMER_TYPE_ZERO_COPY:
2331 		tmr = &inp->sctp_ep.zero_copy_timer;
2332 		break;
2333 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
2334 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
2335 		break;
2336 	case SCTP_TIMER_TYPE_ADDR_WQ:
2337 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2338 		break;
2339 	case SCTP_TIMER_TYPE_EARLYFR:
2340 		if ((stcb == NULL) || (net == NULL)) {
2341 			return;
2342 		}
2343 		tmr = &net->fr_timer;
2344 		SCTP_STAT_INCR(sctps_earlyfrstop);
2345 		break;
2346 	case SCTP_TIMER_TYPE_SEND:
2347 		if ((stcb == NULL) || (net == NULL)) {
2348 			return;
2349 		}
2350 		tmr = &net->rxt_timer;
2351 		break;
2352 	case SCTP_TIMER_TYPE_INIT:
2353 		if ((stcb == NULL) || (net == NULL)) {
2354 			return;
2355 		}
2356 		tmr = &net->rxt_timer;
2357 		break;
2358 	case SCTP_TIMER_TYPE_RECV:
2359 		if (stcb == NULL) {
2360 			return;
2361 		}
2362 		tmr = &stcb->asoc.dack_timer;
2363 		break;
2364 	case SCTP_TIMER_TYPE_SHUTDOWN:
2365 		if ((stcb == NULL) || (net == NULL)) {
2366 			return;
2367 		}
2368 		tmr = &net->rxt_timer;
2369 		break;
2370 	case SCTP_TIMER_TYPE_HEARTBEAT:
2371 		if (stcb == NULL) {
2372 			return;
2373 		}
2374 		tmr = &stcb->asoc.hb_timer;
2375 		break;
2376 	case SCTP_TIMER_TYPE_COOKIE:
2377 		if ((stcb == NULL) || (net == NULL)) {
2378 			return;
2379 		}
2380 		tmr = &net->rxt_timer;
2381 		break;
2382 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2383 		/* nothing needed but the endpoint here */
2384 		tmr = &inp->sctp_ep.signature_change;
2385 		/*
2386 		 * We re-use the newcookie timer for the INP kill timer. We
2387 		 * must assure that we do not kill it by accident.
2388 		 */
2389 		break;
2390 	case SCTP_TIMER_TYPE_ASOCKILL:
2391 		/*
2392 		 * Stop the asoc kill timer.
2393 		 */
2394 		if (stcb == NULL) {
2395 			return;
2396 		}
2397 		tmr = &stcb->asoc.strreset_timer;
2398 		break;
2399 
2400 	case SCTP_TIMER_TYPE_INPKILL:
2401 		/*
2402 		 * The inp is setup to die. We re-use the signature_chage
2403 		 * timer since that has stopped and we are in the GONE
2404 		 * state.
2405 		 */
2406 		tmr = &inp->sctp_ep.signature_change;
2407 		break;
2408 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2409 		if ((stcb == NULL) || (net == NULL)) {
2410 			return;
2411 		}
2412 		tmr = &net->pmtu_timer;
2413 		break;
2414 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2415 		if ((stcb == NULL) || (net == NULL)) {
2416 			return;
2417 		}
2418 		tmr = &net->rxt_timer;
2419 		break;
2420 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2421 		if (stcb == NULL) {
2422 			return;
2423 		}
2424 		tmr = &stcb->asoc.shut_guard_timer;
2425 		break;
2426 	case SCTP_TIMER_TYPE_STRRESET:
2427 		if (stcb == NULL) {
2428 			return;
2429 		}
2430 		tmr = &stcb->asoc.strreset_timer;
2431 		break;
2432 	case SCTP_TIMER_TYPE_ASCONF:
2433 		if (stcb == NULL) {
2434 			return;
2435 		}
2436 		tmr = &stcb->asoc.asconf_timer;
2437 		break;
2438 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2439 		if (stcb == NULL) {
2440 			return;
2441 		}
2442 		tmr = &stcb->asoc.delete_prim_timer;
2443 		break;
2444 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2445 		if (stcb == NULL) {
2446 			return;
2447 		}
2448 		tmr = &stcb->asoc.autoclose_timer;
2449 		break;
2450 	default:
2451 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2452 		    __FUNCTION__, t_type);
2453 		break;
2454 	};
2455 	if (tmr == NULL) {
2456 		return;
2457 	}
2458 	if ((tmr->type != t_type) && tmr->type) {
2459 		/*
2460 		 * Ok we have a timer that is under joint use. Cookie timer
2461 		 * per chance with the SEND timer. We therefore are NOT
2462 		 * running the timer that the caller wants stopped.  So just
2463 		 * return.
2464 		 */
2465 		return;
2466 	}
2467 	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2468 		stcb->asoc.num_send_timers_up--;
2469 		if (stcb->asoc.num_send_timers_up < 0) {
2470 			stcb->asoc.num_send_timers_up = 0;
2471 		}
2472 	}
2473 	tmr->self = NULL;
2474 	tmr->stopped_from = from;
2475 	(void)SCTP_OS_TIMER_STOP(&tmr->timer);
2476 	return;
2477 }
2478 
2479 uint32_t
2480 sctp_calculate_len(struct mbuf *m)
2481 {
2482 	uint32_t tlen = 0;
2483 	struct mbuf *at;
2484 
2485 	at = m;
2486 	while (at) {
2487 		tlen += SCTP_BUF_LEN(at);
2488 		at = SCTP_BUF_NEXT(at);
2489 	}
2490 	return (tlen);
2491 }
2492 
2493 void
2494 sctp_mtu_size_reset(struct sctp_inpcb *inp,
2495     struct sctp_association *asoc, uint32_t mtu)
2496 {
2497 	/*
2498 	 * Reset the P-MTU size on this association, this involves changing
2499 	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2500 	 * allow the DF flag to be cleared.
2501 	 */
2502 	struct sctp_tmit_chunk *chk;
2503 	unsigned int eff_mtu, ovh;
2504 
2505 	asoc->smallest_mtu = mtu;
2506 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2507 		ovh = SCTP_MIN_OVERHEAD;
2508 	} else {
2509 		ovh = SCTP_MIN_V4_OVERHEAD;
2510 	}
2511 	eff_mtu = mtu - ovh;
2512 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2513 		if (chk->send_size > eff_mtu) {
2514 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2515 		}
2516 	}
2517 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2518 		if (chk->send_size > eff_mtu) {
2519 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2520 		}
2521 	}
2522 }
2523 
2524 
2525 /*
2526  * given an association and starting time of the current RTT period return
2527  * RTO in number of msecs net should point to the current network
2528  */
2529 uint32_t
2530 sctp_calculate_rto(struct sctp_tcb *stcb,
2531     struct sctp_association *asoc,
2532     struct sctp_nets *net,
2533     struct timeval *told,
2534     int safe)
2535 {
2536 	/*-
2537 	 * given an association and the starting time of the current RTT
2538 	 * period (in value1/value2) return RTO in number of msecs.
2539 	 */
2540 	int calc_time = 0;
2541 	int o_calctime;
2542 	uint32_t new_rto = 0;
2543 	int first_measure = 0;
2544 	struct timeval now, then, *old;
2545 
2546 	/* Copy it out for sparc64 */
2547 	if (safe == sctp_align_unsafe_makecopy) {
2548 		old = &then;
2549 		memcpy(&then, told, sizeof(struct timeval));
2550 	} else if (safe == sctp_align_safe_nocopy) {
2551 		old = told;
2552 	} else {
2553 		/* error */
2554 		SCTP_PRINTF("Huh, bad rto calc call\n");
2555 		return (0);
2556 	}
2557 	/************************/
2558 	/* 1. calculate new RTT */
2559 	/************************/
2560 	/* get the current time */
2561 	(void)SCTP_GETTIME_TIMEVAL(&now);
2562 	/* compute the RTT value */
2563 	if ((u_long)now.tv_sec > (u_long)old->tv_sec) {
2564 		calc_time = ((u_long)now.tv_sec - (u_long)old->tv_sec) * 1000;
2565 		if ((u_long)now.tv_usec > (u_long)old->tv_usec) {
2566 			calc_time += (((u_long)now.tv_usec -
2567 			    (u_long)old->tv_usec) / 1000);
2568 		} else if ((u_long)now.tv_usec < (u_long)old->tv_usec) {
2569 			/* Borrow 1,000ms from current calculation */
2570 			calc_time -= 1000;
2571 			/* Add in the slop over */
2572 			calc_time += ((int)now.tv_usec / 1000);
2573 			/* Add in the pre-second ms's */
2574 			calc_time += (((int)1000000 - (int)old->tv_usec) / 1000);
2575 		}
2576 	} else if ((u_long)now.tv_sec == (u_long)old->tv_sec) {
2577 		if ((u_long)now.tv_usec > (u_long)old->tv_usec) {
2578 			calc_time = ((u_long)now.tv_usec -
2579 			    (u_long)old->tv_usec) / 1000;
2580 		} else if ((u_long)now.tv_usec < (u_long)old->tv_usec) {
2581 			/* impossible .. garbage in nothing out */
2582 			goto calc_rto;
2583 		} else if ((u_long)now.tv_usec == (u_long)old->tv_usec) {
2584 			/*
2585 			 * We have to have 1 usec :-D this must be the
2586 			 * loopback.
2587 			 */
2588 			calc_time = 1;
2589 		} else {
2590 			/* impossible .. garbage in nothing out */
2591 			goto calc_rto;
2592 		}
2593 	} else {
2594 		/* Clock wrapped? */
2595 		goto calc_rto;
2596 	}
2597 	/***************************/
2598 	/* 2. update RTTVAR & SRTT */
2599 	/***************************/
2600 	net->rtt = o_calctime = calc_time;
2601 	/* this is Van Jacobson's integer version */
2602 	if (net->RTO_measured) {
2603 		calc_time -= (net->lastsa >> SCTP_RTT_SHIFT);	/* take away 1/8th when
2604 								 * shift=3 */
2605 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2606 			rto_logging(net, SCTP_LOG_RTTVAR);
2607 		}
2608 		net->prev_rtt = o_calctime;
2609 		net->lastsa += calc_time;	/* add 7/8th into sa when
2610 						 * shift=3 */
2611 		if (calc_time < 0) {
2612 			calc_time = -calc_time;
2613 		}
2614 		calc_time -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);	/* take away 1/4 when
2615 									 * VAR shift=2 */
2616 		net->lastsv += calc_time;
2617 		if (net->lastsv == 0) {
2618 			net->lastsv = SCTP_CLOCK_GRANULARITY;
2619 		}
2620 	} else {
2621 		/* First RTO measurment */
2622 		net->RTO_measured = 1;
2623 		net->lastsa = calc_time << SCTP_RTT_SHIFT;	/* Multiply by 8 when
2624 								 * shift=3 */
2625 		net->lastsv = calc_time;
2626 		if (net->lastsv == 0) {
2627 			net->lastsv = SCTP_CLOCK_GRANULARITY;
2628 		}
2629 		first_measure = 1;
2630 		net->prev_rtt = o_calctime;
2631 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2632 			rto_logging(net, SCTP_LOG_INITIAL_RTT);
2633 		}
2634 	}
2635 calc_rto:
2636 	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2637 	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2638 	    (stcb->asoc.sat_network_lockout == 0)) {
2639 		stcb->asoc.sat_network = 1;
2640 	} else if ((!first_measure) && stcb->asoc.sat_network) {
2641 		stcb->asoc.sat_network = 0;
2642 		stcb->asoc.sat_network_lockout = 1;
2643 	}
2644 	/* bound it, per C6/C7 in Section 5.3.1 */
2645 	if (new_rto < stcb->asoc.minrto) {
2646 		new_rto = stcb->asoc.minrto;
2647 	}
2648 	if (new_rto > stcb->asoc.maxrto) {
2649 		new_rto = stcb->asoc.maxrto;
2650 	}
2651 	/* we are now returning the RTO */
2652 	return (new_rto);
2653 }
2654 
2655 /*
2656  * return a pointer to a contiguous piece of data from the given mbuf chain
2657  * starting at 'off' for 'len' bytes.  If the desired piece spans more than
2658  * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2659  * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2660  */
2661 caddr_t
2662 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr)
2663 {
2664 	uint32_t count;
2665 	uint8_t *ptr;
2666 
2667 	ptr = in_ptr;
2668 	if ((off < 0) || (len <= 0))
2669 		return (NULL);
2670 
2671 	/* find the desired start location */
2672 	while ((m != NULL) && (off > 0)) {
2673 		if (off < SCTP_BUF_LEN(m))
2674 			break;
2675 		off -= SCTP_BUF_LEN(m);
2676 		m = SCTP_BUF_NEXT(m);
2677 	}
2678 	if (m == NULL)
2679 		return (NULL);
2680 
2681 	/* is the current mbuf large enough (eg. contiguous)? */
2682 	if ((SCTP_BUF_LEN(m) - off) >= len) {
2683 		return (mtod(m, caddr_t)+off);
2684 	} else {
2685 		/* else, it spans more than one mbuf, so save a temp copy... */
2686 		while ((m != NULL) && (len > 0)) {
2687 			count = min(SCTP_BUF_LEN(m) - off, len);
2688 			bcopy(mtod(m, caddr_t)+off, ptr, count);
2689 			len -= count;
2690 			ptr += count;
2691 			off = 0;
2692 			m = SCTP_BUF_NEXT(m);
2693 		}
2694 		if ((m == NULL) && (len > 0))
2695 			return (NULL);
2696 		else
2697 			return ((caddr_t)in_ptr);
2698 	}
2699 }
2700 
2701 
2702 
2703 struct sctp_paramhdr *
2704 sctp_get_next_param(struct mbuf *m,
2705     int offset,
2706     struct sctp_paramhdr *pull,
2707     int pull_limit)
2708 {
2709 	/* This just provides a typed signature to Peter's Pull routine */
2710 	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2711 	    (uint8_t *) pull));
2712 }
2713 
2714 
2715 int
2716 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2717 {
2718 	/*
2719 	 * add padlen bytes of 0 filled padding to the end of the mbuf. If
2720 	 * padlen is > 3 this routine will fail.
2721 	 */
2722 	uint8_t *dp;
2723 	int i;
2724 
2725 	if (padlen > 3) {
2726 		SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
2727 		return (ENOBUFS);
2728 	}
2729 	if (padlen <= M_TRAILINGSPACE(m)) {
2730 		/*
2731 		 * The easy way. We hope the majority of the time we hit
2732 		 * here :)
2733 		 */
2734 		dp = (uint8_t *) (mtod(m, caddr_t)+SCTP_BUF_LEN(m));
2735 		SCTP_BUF_LEN(m) += padlen;
2736 	} else {
2737 		/* Hard way we must grow the mbuf */
2738 		struct mbuf *tmp;
2739 
2740 		tmp = sctp_get_mbuf_for_msg(padlen, 0, M_DONTWAIT, 1, MT_DATA);
2741 		if (tmp == NULL) {
2742 			/* Out of space GAK! we are in big trouble. */
2743 			SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
2744 			return (ENOSPC);
2745 		}
2746 		/* setup and insert in middle */
2747 		SCTP_BUF_LEN(tmp) = padlen;
2748 		SCTP_BUF_NEXT(tmp) = NULL;
2749 		SCTP_BUF_NEXT(m) = tmp;
2750 		dp = mtod(tmp, uint8_t *);
2751 	}
2752 	/* zero out the pad */
2753 	for (i = 0; i < padlen; i++) {
2754 		*dp = 0;
2755 		dp++;
2756 	}
2757 	return (0);
2758 }
2759 
2760 int
2761 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2762 {
2763 	/* find the last mbuf in chain and pad it */
2764 	struct mbuf *m_at;
2765 
2766 	m_at = m;
2767 	if (last_mbuf) {
2768 		return (sctp_add_pad_tombuf(last_mbuf, padval));
2769 	} else {
2770 		while (m_at) {
2771 			if (SCTP_BUF_NEXT(m_at) == NULL) {
2772 				return (sctp_add_pad_tombuf(m_at, padval));
2773 			}
2774 			m_at = SCTP_BUF_NEXT(m_at);
2775 		}
2776 	}
2777 	SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
2778 	return (EFAULT);
2779 }
2780 
2781 static void
2782 sctp_notify_assoc_change(uint32_t event, struct sctp_tcb *stcb,
2783     uint32_t error, void *data, int so_locked
2784 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2785     SCTP_UNUSED
2786 #endif
2787 )
2788 {
2789 	struct mbuf *m_notify;
2790 	struct sctp_assoc_change *sac;
2791 	struct sctp_queued_to_read *control;
2792 
2793 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2794 	struct socket *so;
2795 
2796 #endif
2797 
2798 	/*
2799 	 * For TCP model AND UDP connected sockets we will send an error up
2800 	 * when an ABORT comes in.
2801 	 */
2802 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2803 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2804 	    ((event == SCTP_COMM_LOST) || (event == SCTP_CANT_STR_ASSOC))) {
2805 		if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
2806 			SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2807 			stcb->sctp_socket->so_error = ECONNREFUSED;
2808 		} else {
2809 			SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2810 			stcb->sctp_socket->so_error = ECONNRESET;
2811 		}
2812 		/* Wake ANY sleepers */
2813 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2814 		so = SCTP_INP_SO(stcb->sctp_ep);
2815 		if (!so_locked) {
2816 			atomic_add_int(&stcb->asoc.refcnt, 1);
2817 			SCTP_TCB_UNLOCK(stcb);
2818 			SCTP_SOCKET_LOCK(so, 1);
2819 			SCTP_TCB_LOCK(stcb);
2820 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
2821 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2822 				SCTP_SOCKET_UNLOCK(so, 1);
2823 				return;
2824 			}
2825 		}
2826 #endif
2827 		socantrcvmore(stcb->sctp_socket);
2828 		sorwakeup(stcb->sctp_socket);
2829 		sowwakeup(stcb->sctp_socket);
2830 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2831 		if (!so_locked) {
2832 			SCTP_SOCKET_UNLOCK(so, 1);
2833 		}
2834 #endif
2835 	}
2836 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2837 		/* event not enabled */
2838 		return;
2839 	}
2840 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_change), 0, M_DONTWAIT, 1, MT_DATA);
2841 	if (m_notify == NULL)
2842 		/* no space left */
2843 		return;
2844 	SCTP_BUF_LEN(m_notify) = 0;
2845 
2846 	sac = mtod(m_notify, struct sctp_assoc_change *);
2847 	sac->sac_type = SCTP_ASSOC_CHANGE;
2848 	sac->sac_flags = 0;
2849 	sac->sac_length = sizeof(struct sctp_assoc_change);
2850 	sac->sac_state = event;
2851 	sac->sac_error = error;
2852 	/* XXX verify these stream counts */
2853 	sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2854 	sac->sac_inbound_streams = stcb->asoc.streamincnt;
2855 	sac->sac_assoc_id = sctp_get_associd(stcb);
2856 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_change);
2857 	SCTP_BUF_NEXT(m_notify) = NULL;
2858 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2859 	    0, 0, 0, 0, 0, 0,
2860 	    m_notify);
2861 	if (control == NULL) {
2862 		/* no memory */
2863 		sctp_m_freem(m_notify);
2864 		return;
2865 	}
2866 	control->length = SCTP_BUF_LEN(m_notify);
2867 	/* not that we need this */
2868 	control->tail_mbuf = m_notify;
2869 	control->spec_flags = M_NOTIFICATION;
2870 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2871 	    control,
2872 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
2873 	    so_locked);
2874 	if (event == SCTP_COMM_LOST) {
2875 		/* Wake up any sleeper */
2876 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2877 		so = SCTP_INP_SO(stcb->sctp_ep);
2878 		if (!so_locked) {
2879 			atomic_add_int(&stcb->asoc.refcnt, 1);
2880 			SCTP_TCB_UNLOCK(stcb);
2881 			SCTP_SOCKET_LOCK(so, 1);
2882 			SCTP_TCB_LOCK(stcb);
2883 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
2884 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2885 				SCTP_SOCKET_UNLOCK(so, 1);
2886 				return;
2887 			}
2888 		}
2889 #endif
2890 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
2891 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2892 		if (!so_locked) {
2893 			SCTP_SOCKET_UNLOCK(so, 1);
2894 		}
2895 #endif
2896 	}
2897 }
2898 
2899 static void
2900 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2901     struct sockaddr *sa, uint32_t error)
2902 {
2903 	struct mbuf *m_notify;
2904 	struct sctp_paddr_change *spc;
2905 	struct sctp_queued_to_read *control;
2906 
2907 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
2908 		/* event not enabled */
2909 		return;
2910 	}
2911 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_DONTWAIT, 1, MT_DATA);
2912 	if (m_notify == NULL)
2913 		return;
2914 	SCTP_BUF_LEN(m_notify) = 0;
2915 	spc = mtod(m_notify, struct sctp_paddr_change *);
2916 	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2917 	spc->spc_flags = 0;
2918 	spc->spc_length = sizeof(struct sctp_paddr_change);
2919 	switch (sa->sa_family) {
2920 	case AF_INET:
2921 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2922 		break;
2923 #ifdef INET6
2924 	case AF_INET6:
2925 		{
2926 			struct sockaddr_in6 *sin6;
2927 
2928 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2929 
2930 			sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
2931 			if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
2932 				if (sin6->sin6_scope_id == 0) {
2933 					/* recover scope_id for user */
2934 					(void)sa6_recoverscope(sin6);
2935 				} else {
2936 					/* clear embedded scope_id for user */
2937 					in6_clearscope(&sin6->sin6_addr);
2938 				}
2939 			}
2940 			break;
2941 		}
2942 #endif
2943 	default:
2944 		/* TSNH */
2945 		break;
2946 	}
2947 	spc->spc_state = state;
2948 	spc->spc_error = error;
2949 	spc->spc_assoc_id = sctp_get_associd(stcb);
2950 
2951 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
2952 	SCTP_BUF_NEXT(m_notify) = NULL;
2953 
2954 	/* append to socket */
2955 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2956 	    0, 0, 0, 0, 0, 0,
2957 	    m_notify);
2958 	if (control == NULL) {
2959 		/* no memory */
2960 		sctp_m_freem(m_notify);
2961 		return;
2962 	}
2963 	control->length = SCTP_BUF_LEN(m_notify);
2964 	control->spec_flags = M_NOTIFICATION;
2965 	/* not that we need this */
2966 	control->tail_mbuf = m_notify;
2967 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2968 	    control,
2969 	    &stcb->sctp_socket->so_rcv, 1,
2970 	    SCTP_READ_LOCK_NOT_HELD,
2971 	    SCTP_SO_NOT_LOCKED);
2972 }
2973 
2974 
2975 static void
2976 sctp_notify_send_failed(struct sctp_tcb *stcb, uint32_t error,
2977     struct sctp_tmit_chunk *chk, int so_locked
2978 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2979     SCTP_UNUSED
2980 #endif
2981 )
2982 {
2983 	struct mbuf *m_notify;
2984 	struct sctp_send_failed *ssf;
2985 	struct sctp_queued_to_read *control;
2986 	int length;
2987 
2988 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)) {
2989 		/* event not enabled */
2990 		return;
2991 	}
2992 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_send_failed), 0, M_DONTWAIT, 1, MT_DATA);
2993 	if (m_notify == NULL)
2994 		/* no space left */
2995 		return;
2996 	length = sizeof(struct sctp_send_failed) + chk->send_size;
2997 	length -= sizeof(struct sctp_data_chunk);
2998 	SCTP_BUF_LEN(m_notify) = 0;
2999 	ssf = mtod(m_notify, struct sctp_send_failed *);
3000 	ssf->ssf_type = SCTP_SEND_FAILED;
3001 	if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
3002 		ssf->ssf_flags = SCTP_DATA_UNSENT;
3003 	else
3004 		ssf->ssf_flags = SCTP_DATA_SENT;
3005 	ssf->ssf_length = length;
3006 	ssf->ssf_error = error;
3007 	/* not exactly what the user sent in, but should be close :) */
3008 	bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
3009 	ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number;
3010 	ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq;
3011 	ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
3012 	ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype;
3013 	ssf->ssf_info.sinfo_context = chk->rec.data.context;
3014 	ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3015 	ssf->ssf_assoc_id = sctp_get_associd(stcb);
3016 
3017 	if (chk->data) {
3018 		/*
3019 		 * trim off the sctp chunk header(it should be there)
3020 		 */
3021 		if (chk->send_size >= sizeof(struct sctp_data_chunk)) {
3022 			m_adj(chk->data, sizeof(struct sctp_data_chunk));
3023 			sctp_mbuf_crush(chk->data);
3024 			chk->send_size -= sizeof(struct sctp_data_chunk);
3025 		}
3026 	}
3027 	SCTP_BUF_NEXT(m_notify) = chk->data;
3028 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
3029 	/* Steal off the mbuf */
3030 	chk->data = NULL;
3031 	/*
3032 	 * For this case, we check the actual socket buffer, since the assoc
3033 	 * is going away we don't want to overfill the socket buffer for a
3034 	 * non-reader
3035 	 */
3036 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3037 		sctp_m_freem(m_notify);
3038 		return;
3039 	}
3040 	/* append to socket */
3041 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3042 	    0, 0, 0, 0, 0, 0,
3043 	    m_notify);
3044 	if (control == NULL) {
3045 		/* no memory */
3046 		sctp_m_freem(m_notify);
3047 		return;
3048 	}
3049 	control->spec_flags = M_NOTIFICATION;
3050 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3051 	    control,
3052 	    &stcb->sctp_socket->so_rcv, 1,
3053 	    SCTP_READ_LOCK_NOT_HELD,
3054 	    so_locked);
3055 }
3056 
3057 
3058 static void
3059 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
3060     struct sctp_stream_queue_pending *sp, int so_locked
3061 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3062     SCTP_UNUSED
3063 #endif
3064 )
3065 {
3066 	struct mbuf *m_notify;
3067 	struct sctp_send_failed *ssf;
3068 	struct sctp_queued_to_read *control;
3069 	int length;
3070 
3071 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)) {
3072 		/* event not enabled */
3073 		return;
3074 	}
3075 	length = sizeof(struct sctp_send_failed) + sp->length;
3076 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_send_failed), 0, M_DONTWAIT, 1, MT_DATA);
3077 	if (m_notify == NULL)
3078 		/* no space left */
3079 		return;
3080 	SCTP_BUF_LEN(m_notify) = 0;
3081 	ssf = mtod(m_notify, struct sctp_send_failed *);
3082 	ssf->ssf_type = SCTP_SEND_FAILED;
3083 	if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
3084 		ssf->ssf_flags = SCTP_DATA_UNSENT;
3085 	else
3086 		ssf->ssf_flags = SCTP_DATA_SENT;
3087 	ssf->ssf_length = length;
3088 	ssf->ssf_error = error;
3089 	/* not exactly what the user sent in, but should be close :) */
3090 	bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
3091 	ssf->ssf_info.sinfo_stream = sp->stream;
3092 	ssf->ssf_info.sinfo_ssn = sp->strseq;
3093 	if (sp->some_taken) {
3094 		ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
3095 	} else {
3096 		ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
3097 	}
3098 	ssf->ssf_info.sinfo_ppid = sp->ppid;
3099 	ssf->ssf_info.sinfo_context = sp->context;
3100 	ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3101 	ssf->ssf_assoc_id = sctp_get_associd(stcb);
3102 	SCTP_BUF_NEXT(m_notify) = sp->data;
3103 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
3104 
3105 	/* Steal off the mbuf */
3106 	sp->data = NULL;
3107 	/*
3108 	 * For this case, we check the actual socket buffer, since the assoc
3109 	 * is going away we don't want to overfill the socket buffer for a
3110 	 * non-reader
3111 	 */
3112 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3113 		sctp_m_freem(m_notify);
3114 		return;
3115 	}
3116 	/* append to socket */
3117 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3118 	    0, 0, 0, 0, 0, 0,
3119 	    m_notify);
3120 	if (control == NULL) {
3121 		/* no memory */
3122 		sctp_m_freem(m_notify);
3123 		return;
3124 	}
3125 	control->spec_flags = M_NOTIFICATION;
3126 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3127 	    control,
3128 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3129 }
3130 
3131 
3132 
3133 static void
3134 sctp_notify_adaptation_layer(struct sctp_tcb *stcb,
3135     uint32_t error)
3136 {
3137 	struct mbuf *m_notify;
3138 	struct sctp_adaptation_event *sai;
3139 	struct sctp_queued_to_read *control;
3140 
3141 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
3142 		/* event not enabled */
3143 		return;
3144 	}
3145 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_DONTWAIT, 1, MT_DATA);
3146 	if (m_notify == NULL)
3147 		/* no space left */
3148 		return;
3149 	SCTP_BUF_LEN(m_notify) = 0;
3150 	sai = mtod(m_notify, struct sctp_adaptation_event *);
3151 	sai->sai_type = SCTP_ADAPTATION_INDICATION;
3152 	sai->sai_flags = 0;
3153 	sai->sai_length = sizeof(struct sctp_adaptation_event);
3154 	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3155 	sai->sai_assoc_id = sctp_get_associd(stcb);
3156 
3157 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3158 	SCTP_BUF_NEXT(m_notify) = NULL;
3159 
3160 	/* append to socket */
3161 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3162 	    0, 0, 0, 0, 0, 0,
3163 	    m_notify);
3164 	if (control == NULL) {
3165 		/* no memory */
3166 		sctp_m_freem(m_notify);
3167 		return;
3168 	}
3169 	control->length = SCTP_BUF_LEN(m_notify);
3170 	control->spec_flags = M_NOTIFICATION;
3171 	/* not that we need this */
3172 	control->tail_mbuf = m_notify;
3173 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3174 	    control,
3175 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3176 }
3177 
3178 /* This always must be called with the read-queue LOCKED in the INP */
3179 static void
3180 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3181     uint32_t val, int so_locked
3182 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3183     SCTP_UNUSED
3184 #endif
3185 )
3186 {
3187 	struct mbuf *m_notify;
3188 	struct sctp_pdapi_event *pdapi;
3189 	struct sctp_queued_to_read *control;
3190 	struct sockbuf *sb;
3191 
3192 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3193 		/* event not enabled */
3194 		return;
3195 	}
3196 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
3197 		return;
3198 	}
3199 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_DONTWAIT, 1, MT_DATA);
3200 	if (m_notify == NULL)
3201 		/* no space left */
3202 		return;
3203 	SCTP_BUF_LEN(m_notify) = 0;
3204 	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3205 	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3206 	pdapi->pdapi_flags = 0;
3207 	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3208 	pdapi->pdapi_indication = error;
3209 	pdapi->pdapi_stream = (val >> 16);
3210 	pdapi->pdapi_seq = (val & 0x0000ffff);
3211 	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3212 
3213 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3214 	SCTP_BUF_NEXT(m_notify) = NULL;
3215 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3216 	    0, 0, 0, 0, 0, 0,
3217 	    m_notify);
3218 	if (control == NULL) {
3219 		/* no memory */
3220 		sctp_m_freem(m_notify);
3221 		return;
3222 	}
3223 	control->spec_flags = M_NOTIFICATION;
3224 	control->length = SCTP_BUF_LEN(m_notify);
3225 	/* not that we need this */
3226 	control->tail_mbuf = m_notify;
3227 	control->held_length = 0;
3228 	control->length = 0;
3229 	sb = &stcb->sctp_socket->so_rcv;
3230 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3231 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3232 	}
3233 	sctp_sballoc(stcb, sb, m_notify);
3234 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3235 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3236 	}
3237 	atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify));
3238 	control->end_added = 1;
3239 	if (stcb->asoc.control_pdapi)
3240 		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3241 	else {
3242 		/* we really should not see this case */
3243 		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3244 	}
3245 	if (stcb->sctp_ep && stcb->sctp_socket) {
3246 		/* This should always be the case */
3247 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3248 		struct socket *so;
3249 
3250 		so = SCTP_INP_SO(stcb->sctp_ep);
3251 		if (!so_locked) {
3252 			atomic_add_int(&stcb->asoc.refcnt, 1);
3253 			SCTP_TCB_UNLOCK(stcb);
3254 			SCTP_SOCKET_LOCK(so, 1);
3255 			SCTP_TCB_LOCK(stcb);
3256 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
3257 			if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3258 				SCTP_SOCKET_UNLOCK(so, 1);
3259 				return;
3260 			}
3261 		}
3262 #endif
3263 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3264 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3265 		if (!so_locked) {
3266 			SCTP_SOCKET_UNLOCK(so, 1);
3267 		}
3268 #endif
3269 	}
3270 }
3271 
3272 static void
3273 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3274 {
3275 	struct mbuf *m_notify;
3276 	struct sctp_shutdown_event *sse;
3277 	struct sctp_queued_to_read *control;
3278 
3279 	/*
3280 	 * For TCP model AND UDP connected sockets we will send an error up
3281 	 * when an SHUTDOWN completes
3282 	 */
3283 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3284 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3285 		/* mark socket closed for read/write and wakeup! */
3286 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3287 		struct socket *so;
3288 
3289 		so = SCTP_INP_SO(stcb->sctp_ep);
3290 		atomic_add_int(&stcb->asoc.refcnt, 1);
3291 		SCTP_TCB_UNLOCK(stcb);
3292 		SCTP_SOCKET_LOCK(so, 1);
3293 		SCTP_TCB_LOCK(stcb);
3294 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3295 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3296 			SCTP_SOCKET_UNLOCK(so, 1);
3297 			return;
3298 		}
3299 #endif
3300 		socantsendmore(stcb->sctp_socket);
3301 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3302 		SCTP_SOCKET_UNLOCK(so, 1);
3303 #endif
3304 	}
3305 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3306 		/* event not enabled */
3307 		return;
3308 	}
3309 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_DONTWAIT, 1, MT_DATA);
3310 	if (m_notify == NULL)
3311 		/* no space left */
3312 		return;
3313 	sse = mtod(m_notify, struct sctp_shutdown_event *);
3314 	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3315 	sse->sse_flags = 0;
3316 	sse->sse_length = sizeof(struct sctp_shutdown_event);
3317 	sse->sse_assoc_id = sctp_get_associd(stcb);
3318 
3319 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3320 	SCTP_BUF_NEXT(m_notify) = NULL;
3321 
3322 	/* append to socket */
3323 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3324 	    0, 0, 0, 0, 0, 0,
3325 	    m_notify);
3326 	if (control == NULL) {
3327 		/* no memory */
3328 		sctp_m_freem(m_notify);
3329 		return;
3330 	}
3331 	control->spec_flags = M_NOTIFICATION;
3332 	control->length = SCTP_BUF_LEN(m_notify);
3333 	/* not that we need this */
3334 	control->tail_mbuf = m_notify;
3335 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3336 	    control,
3337 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3338 }
3339 
3340 static void
3341 sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3342     int so_locked
3343 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3344     SCTP_UNUSED
3345 #endif
3346 )
3347 {
3348 	struct mbuf *m_notify;
3349 	struct sctp_sender_dry_event *event;
3350 	struct sctp_queued_to_read *control;
3351 
3352 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_DRYEVNT)) {
3353 		/* event not enabled */
3354 		return;
3355 	}
3356 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_DONTWAIT, 1, MT_DATA);
3357 	if (m_notify == NULL) {
3358 		/* no space left */
3359 		return;
3360 	}
3361 	SCTP_BUF_LEN(m_notify) = 0;
3362 	event = mtod(m_notify, struct sctp_sender_dry_event *);
3363 	event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3364 	event->sender_dry_flags = 0;
3365 	event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3366 	event->sender_dry_assoc_id = sctp_get_associd(stcb);
3367 
3368 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3369 	SCTP_BUF_NEXT(m_notify) = NULL;
3370 
3371 	/* append to socket */
3372 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3373 	    0, 0, 0, 0, 0, 0, m_notify);
3374 	if (control == NULL) {
3375 		/* no memory */
3376 		sctp_m_freem(m_notify);
3377 		return;
3378 	}
3379 	control->length = SCTP_BUF_LEN(m_notify);
3380 	control->spec_flags = M_NOTIFICATION;
3381 	/* not that we need this */
3382 	control->tail_mbuf = m_notify;
3383 	sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3384 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3385 }
3386 
3387 
3388 static void
3389 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, int number_entries, int flag)
3390 {
3391 	struct mbuf *m_notify;
3392 	struct sctp_queued_to_read *control;
3393 	struct sctp_stream_reset_event *strreset;
3394 	int len;
3395 
3396 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_STREAM_RESETEVNT)) {
3397 		/* event not enabled */
3398 		return;
3399 	}
3400 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3401 	if (m_notify == NULL)
3402 		/* no space left */
3403 		return;
3404 	SCTP_BUF_LEN(m_notify) = 0;
3405 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3406 	if (len > M_TRAILINGSPACE(m_notify)) {
3407 		/* never enough room */
3408 		sctp_m_freem(m_notify);
3409 		return;
3410 	}
3411 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3412 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3413 	strreset->strreset_flags = SCTP_STRRESET_ADD_STREAM | flag;
3414 	strreset->strreset_length = len;
3415 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3416 	strreset->strreset_list[0] = number_entries;
3417 
3418 	SCTP_BUF_LEN(m_notify) = len;
3419 	SCTP_BUF_NEXT(m_notify) = NULL;
3420 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3421 		/* no space */
3422 		sctp_m_freem(m_notify);
3423 		return;
3424 	}
3425 	/* append to socket */
3426 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3427 	    0, 0, 0, 0, 0, 0,
3428 	    m_notify);
3429 	if (control == NULL) {
3430 		/* no memory */
3431 		sctp_m_freem(m_notify);
3432 		return;
3433 	}
3434 	control->spec_flags = M_NOTIFICATION;
3435 	control->length = SCTP_BUF_LEN(m_notify);
3436 	/* not that we need this */
3437 	control->tail_mbuf = m_notify;
3438 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3439 	    control,
3440 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3441 }
3442 
3443 
3444 static void
3445 sctp_notify_stream_reset(struct sctp_tcb *stcb,
3446     int number_entries, uint16_t * list, int flag)
3447 {
3448 	struct mbuf *m_notify;
3449 	struct sctp_queued_to_read *control;
3450 	struct sctp_stream_reset_event *strreset;
3451 	int len;
3452 
3453 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_STREAM_RESETEVNT)) {
3454 		/* event not enabled */
3455 		return;
3456 	}
3457 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3458 	if (m_notify == NULL)
3459 		/* no space left */
3460 		return;
3461 	SCTP_BUF_LEN(m_notify) = 0;
3462 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3463 	if (len > M_TRAILINGSPACE(m_notify)) {
3464 		/* never enough room */
3465 		sctp_m_freem(m_notify);
3466 		return;
3467 	}
3468 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3469 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3470 	if (number_entries == 0) {
3471 		strreset->strreset_flags = flag | SCTP_STRRESET_ALL_STREAMS;
3472 	} else {
3473 		strreset->strreset_flags = flag | SCTP_STRRESET_STREAM_LIST;
3474 	}
3475 	strreset->strreset_length = len;
3476 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3477 	if (number_entries) {
3478 		int i;
3479 
3480 		for (i = 0; i < number_entries; i++) {
3481 			strreset->strreset_list[i] = ntohs(list[i]);
3482 		}
3483 	}
3484 	SCTP_BUF_LEN(m_notify) = len;
3485 	SCTP_BUF_NEXT(m_notify) = NULL;
3486 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3487 		/* no space */
3488 		sctp_m_freem(m_notify);
3489 		return;
3490 	}
3491 	/* append to socket */
3492 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3493 	    0, 0, 0, 0, 0, 0,
3494 	    m_notify);
3495 	if (control == NULL) {
3496 		/* no memory */
3497 		sctp_m_freem(m_notify);
3498 		return;
3499 	}
3500 	control->spec_flags = M_NOTIFICATION;
3501 	control->length = SCTP_BUF_LEN(m_notify);
3502 	/* not that we need this */
3503 	control->tail_mbuf = m_notify;
3504 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3505 	    control,
3506 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3507 }
3508 
3509 
3510 void
3511 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3512     uint32_t error, void *data, int so_locked
3513 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3514     SCTP_UNUSED
3515 #endif
3516 )
3517 {
3518 	if ((stcb == NULL) ||
3519 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3520 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3521 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3522 		/* If the socket is gone we are out of here */
3523 		return;
3524 	}
3525 	if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
3526 		return;
3527 	}
3528 	if (stcb && ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
3529 	    (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED))) {
3530 		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3531 		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3532 		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3533 			/* Don't report these in front states */
3534 			return;
3535 		}
3536 	}
3537 	switch (notification) {
3538 	case SCTP_NOTIFY_ASSOC_UP:
3539 		if (stcb->asoc.assoc_up_sent == 0) {
3540 			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, so_locked);
3541 			stcb->asoc.assoc_up_sent = 1;
3542 		}
3543 		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3544 			sctp_notify_adaptation_layer(stcb, error);
3545 		}
3546 		if (stcb->asoc.peer_supports_auth == 0) {
3547 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3548 			    NULL, so_locked);
3549 		}
3550 		break;
3551 	case SCTP_NOTIFY_ASSOC_DOWN:
3552 		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, so_locked);
3553 		break;
3554 	case SCTP_NOTIFY_INTERFACE_DOWN:
3555 		{
3556 			struct sctp_nets *net;
3557 
3558 			net = (struct sctp_nets *)data;
3559 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3560 			    (struct sockaddr *)&net->ro._l_addr, error);
3561 			break;
3562 		}
3563 	case SCTP_NOTIFY_INTERFACE_UP:
3564 		{
3565 			struct sctp_nets *net;
3566 
3567 			net = (struct sctp_nets *)data;
3568 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3569 			    (struct sockaddr *)&net->ro._l_addr, error);
3570 			break;
3571 		}
3572 	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3573 		{
3574 			struct sctp_nets *net;
3575 
3576 			net = (struct sctp_nets *)data;
3577 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3578 			    (struct sockaddr *)&net->ro._l_addr, error);
3579 			break;
3580 		}
3581 	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3582 		sctp_notify_send_failed2(stcb, error,
3583 		    (struct sctp_stream_queue_pending *)data, so_locked);
3584 		break;
3585 	case SCTP_NOTIFY_DG_FAIL:
3586 		sctp_notify_send_failed(stcb, error,
3587 		    (struct sctp_tmit_chunk *)data, so_locked);
3588 		break;
3589 	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3590 		{
3591 			uint32_t val;
3592 
3593 			val = *((uint32_t *) data);
3594 
3595 			sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
3596 			break;
3597 		}
3598 	case SCTP_NOTIFY_STRDATA_ERR:
3599 		break;
3600 	case SCTP_NOTIFY_ASSOC_ABORTED:
3601 		if ((stcb) && (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3602 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED))) {
3603 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, NULL, so_locked);
3604 		} else {
3605 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, NULL, so_locked);
3606 		}
3607 		break;
3608 	case SCTP_NOTIFY_PEER_OPENED_STREAM:
3609 		break;
3610 	case SCTP_NOTIFY_STREAM_OPENED_OK:
3611 		break;
3612 	case SCTP_NOTIFY_ASSOC_RESTART:
3613 		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, data, so_locked);
3614 		if (stcb->asoc.peer_supports_auth == 0) {
3615 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3616 			    NULL, so_locked);
3617 		}
3618 		break;
3619 	case SCTP_NOTIFY_HB_RESP:
3620 		break;
3621 	case SCTP_NOTIFY_STR_RESET_INSTREAM_ADD_OK:
3622 		sctp_notify_stream_reset_add(stcb, error, SCTP_STRRESET_INBOUND_STR);
3623 		break;
3624 	case SCTP_NOTIFY_STR_RESET_ADD_OK:
3625 		sctp_notify_stream_reset_add(stcb, error, SCTP_STRRESET_OUTBOUND_STR);
3626 		break;
3627 	case SCTP_NOTIFY_STR_RESET_ADD_FAIL:
3628 		sctp_notify_stream_reset_add(stcb, error, (SCTP_STRRESET_FAILED | SCTP_STRRESET_OUTBOUND_STR));
3629 		break;
3630 
3631 	case SCTP_NOTIFY_STR_RESET_SEND:
3632 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_OUTBOUND_STR);
3633 		break;
3634 	case SCTP_NOTIFY_STR_RESET_RECV:
3635 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_INBOUND_STR);
3636 		break;
3637 	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3638 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_OUTBOUND_STR | SCTP_STRRESET_FAILED));
3639 		break;
3640 	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3641 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_INBOUND_STR | SCTP_STRRESET_FAILED));
3642 		break;
3643 	case SCTP_NOTIFY_ASCONF_ADD_IP:
3644 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3645 		    error);
3646 		break;
3647 	case SCTP_NOTIFY_ASCONF_DELETE_IP:
3648 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3649 		    error);
3650 		break;
3651 	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3652 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3653 		    error);
3654 		break;
3655 	case SCTP_NOTIFY_ASCONF_SUCCESS:
3656 		break;
3657 	case SCTP_NOTIFY_ASCONF_FAILED:
3658 		break;
3659 	case SCTP_NOTIFY_PEER_SHUTDOWN:
3660 		sctp_notify_shutdown_event(stcb);
3661 		break;
3662 	case SCTP_NOTIFY_AUTH_NEW_KEY:
3663 		sctp_notify_authentication(stcb, SCTP_AUTH_NEWKEY, error,
3664 		    (uint16_t) (uintptr_t) data,
3665 		    so_locked);
3666 		break;
3667 	case SCTP_NOTIFY_AUTH_FREE_KEY:
3668 		sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
3669 		    (uint16_t) (uintptr_t) data,
3670 		    so_locked);
3671 		break;
3672 	case SCTP_NOTIFY_NO_PEER_AUTH:
3673 		sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
3674 		    (uint16_t) (uintptr_t) data,
3675 		    so_locked);
3676 		break;
3677 	case SCTP_NOTIFY_SENDER_DRY:
3678 		sctp_notify_sender_dry_event(stcb, so_locked);
3679 		break;
3680 	default:
3681 		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3682 		    __FUNCTION__, notification, notification);
3683 		break;
3684 	}			/* end switch */
3685 }
3686 
3687 void
3688 sctp_report_all_outbound(struct sctp_tcb *stcb, int holds_lock, int so_locked
3689 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3690     SCTP_UNUSED
3691 #endif
3692 )
3693 {
3694 	struct sctp_association *asoc;
3695 	struct sctp_stream_out *outs;
3696 	struct sctp_tmit_chunk *chk;
3697 	struct sctp_stream_queue_pending *sp;
3698 	int i;
3699 
3700 	asoc = &stcb->asoc;
3701 
3702 	if (stcb == NULL) {
3703 		return;
3704 	}
3705 	if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
3706 		/* already being freed */
3707 		return;
3708 	}
3709 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3710 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3711 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3712 		return;
3713 	}
3714 	/* now through all the gunk freeing chunks */
3715 	if (holds_lock == 0) {
3716 		SCTP_TCB_SEND_LOCK(stcb);
3717 	}
3718 	/* sent queue SHOULD be empty */
3719 	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3720 		chk = TAILQ_FIRST(&asoc->sent_queue);
3721 		while (chk) {
3722 			TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3723 			asoc->sent_queue_cnt--;
3724 			if (chk->data != NULL) {
3725 				sctp_free_bufspace(stcb, asoc, chk, 1);
3726 				sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
3727 				    SCTP_NOTIFY_DATAGRAM_SENT, chk, so_locked);
3728 				if (chk->data) {
3729 					sctp_m_freem(chk->data);
3730 					chk->data = NULL;
3731 				}
3732 			}
3733 			sctp_free_a_chunk(stcb, chk);
3734 			/* sa_ignore FREED_MEMORY */
3735 			chk = TAILQ_FIRST(&asoc->sent_queue);
3736 		}
3737 	}
3738 	/* pending send queue SHOULD be empty */
3739 	if (!TAILQ_EMPTY(&asoc->send_queue)) {
3740 		chk = TAILQ_FIRST(&asoc->send_queue);
3741 		while (chk) {
3742 			TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3743 			asoc->send_queue_cnt--;
3744 			if (chk->data != NULL) {
3745 				sctp_free_bufspace(stcb, asoc, chk, 1);
3746 				sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
3747 				    SCTP_NOTIFY_DATAGRAM_UNSENT, chk, so_locked);
3748 				if (chk->data) {
3749 					sctp_m_freem(chk->data);
3750 					chk->data = NULL;
3751 				}
3752 			}
3753 			sctp_free_a_chunk(stcb, chk);
3754 			/* sa_ignore FREED_MEMORY */
3755 			chk = TAILQ_FIRST(&asoc->send_queue);
3756 		}
3757 	}
3758 	for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
3759 		/* For each stream */
3760 		outs = &stcb->asoc.strmout[i];
3761 		/* clean up any sends there */
3762 		stcb->asoc.locked_on_sending = NULL;
3763 		sp = TAILQ_FIRST(&outs->outqueue);
3764 		while (sp) {
3765 			stcb->asoc.stream_queue_cnt--;
3766 			TAILQ_REMOVE(&outs->outqueue, sp, next);
3767 			sctp_free_spbufspace(stcb, asoc, sp);
3768 			if (sp->data) {
3769 				sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
3770 				    SCTP_NOTIFY_DATAGRAM_UNSENT, (void *)sp, so_locked);
3771 				if (sp->data) {
3772 					sctp_m_freem(sp->data);
3773 					sp->data = NULL;
3774 				}
3775 			}
3776 			if (sp->net) {
3777 				sctp_free_remote_addr(sp->net);
3778 				sp->net = NULL;
3779 			}
3780 			/* Free the chunk */
3781 			sctp_free_a_strmoq(stcb, sp);
3782 			/* sa_ignore FREED_MEMORY */
3783 			sp = TAILQ_FIRST(&outs->outqueue);
3784 		}
3785 	}
3786 
3787 	if (holds_lock == 0) {
3788 		SCTP_TCB_SEND_UNLOCK(stcb);
3789 	}
3790 }
3791 
3792 void
3793 sctp_abort_notification(struct sctp_tcb *stcb, int error, int so_locked
3794 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3795     SCTP_UNUSED
3796 #endif
3797 )
3798 {
3799 
3800 	if (stcb == NULL) {
3801 		return;
3802 	}
3803 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3804 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3805 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3806 		return;
3807 	}
3808 	/* Tell them we lost the asoc */
3809 	sctp_report_all_outbound(stcb, 1, so_locked);
3810 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
3811 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3812 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
3813 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
3814 	}
3815 	sctp_ulp_notify(SCTP_NOTIFY_ASSOC_ABORTED, stcb, error, NULL, so_locked);
3816 }
3817 
3818 void
3819 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3820     struct mbuf *m, int iphlen, struct sctphdr *sh, struct mbuf *op_err,
3821     uint32_t vrf_id, uint16_t port)
3822 {
3823 	uint32_t vtag;
3824 
3825 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3826 	struct socket *so;
3827 
3828 #endif
3829 
3830 	vtag = 0;
3831 	if (stcb != NULL) {
3832 		/* We have a TCB to abort, send notification too */
3833 		vtag = stcb->asoc.peer_vtag;
3834 		sctp_abort_notification(stcb, 0, SCTP_SO_NOT_LOCKED);
3835 		/* get the assoc vrf id and table id */
3836 		vrf_id = stcb->asoc.vrf_id;
3837 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3838 	}
3839 	sctp_send_abort(m, iphlen, sh, vtag, op_err, vrf_id, port);
3840 	if (stcb != NULL) {
3841 		/* Ok, now lets free it */
3842 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3843 		so = SCTP_INP_SO(inp);
3844 		atomic_add_int(&stcb->asoc.refcnt, 1);
3845 		SCTP_TCB_UNLOCK(stcb);
3846 		SCTP_SOCKET_LOCK(so, 1);
3847 		SCTP_TCB_LOCK(stcb);
3848 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3849 #endif
3850 		SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3851 		if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3852 		    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3853 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3854 		}
3855 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
3856 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3857 		SCTP_SOCKET_UNLOCK(so, 1);
3858 #endif
3859 	}
3860 }
3861 
3862 #ifdef SCTP_ASOCLOG_OF_TSNS
3863 void
3864 sctp_print_out_track_log(struct sctp_tcb *stcb)
3865 {
3866 #ifdef NOSIY_PRINTS
3867 	int i;
3868 
3869 	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
3870 	SCTP_PRINTF("IN bound TSN log-aaa\n");
3871 	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
3872 		SCTP_PRINTF("None rcvd\n");
3873 		goto none_in;
3874 	}
3875 	if (stcb->asoc.tsn_in_wrapped) {
3876 		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
3877 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3878 			    stcb->asoc.in_tsnlog[i].tsn,
3879 			    stcb->asoc.in_tsnlog[i].strm,
3880 			    stcb->asoc.in_tsnlog[i].seq,
3881 			    stcb->asoc.in_tsnlog[i].flgs,
3882 			    stcb->asoc.in_tsnlog[i].sz);
3883 		}
3884 	}
3885 	if (stcb->asoc.tsn_in_at) {
3886 		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
3887 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3888 			    stcb->asoc.in_tsnlog[i].tsn,
3889 			    stcb->asoc.in_tsnlog[i].strm,
3890 			    stcb->asoc.in_tsnlog[i].seq,
3891 			    stcb->asoc.in_tsnlog[i].flgs,
3892 			    stcb->asoc.in_tsnlog[i].sz);
3893 		}
3894 	}
3895 none_in:
3896 	SCTP_PRINTF("OUT bound TSN log-aaa\n");
3897 	if ((stcb->asoc.tsn_out_at == 0) &&
3898 	    (stcb->asoc.tsn_out_wrapped == 0)) {
3899 		SCTP_PRINTF("None sent\n");
3900 	}
3901 	if (stcb->asoc.tsn_out_wrapped) {
3902 		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
3903 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3904 			    stcb->asoc.out_tsnlog[i].tsn,
3905 			    stcb->asoc.out_tsnlog[i].strm,
3906 			    stcb->asoc.out_tsnlog[i].seq,
3907 			    stcb->asoc.out_tsnlog[i].flgs,
3908 			    stcb->asoc.out_tsnlog[i].sz);
3909 		}
3910 	}
3911 	if (stcb->asoc.tsn_out_at) {
3912 		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
3913 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3914 			    stcb->asoc.out_tsnlog[i].tsn,
3915 			    stcb->asoc.out_tsnlog[i].strm,
3916 			    stcb->asoc.out_tsnlog[i].seq,
3917 			    stcb->asoc.out_tsnlog[i].flgs,
3918 			    stcb->asoc.out_tsnlog[i].sz);
3919 		}
3920 	}
3921 #endif
3922 }
3923 
3924 #endif
3925 
3926 void
3927 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3928     int error, struct mbuf *op_err,
3929     int so_locked
3930 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3931     SCTP_UNUSED
3932 #endif
3933 )
3934 {
3935 	uint32_t vtag;
3936 
3937 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3938 	struct socket *so;
3939 
3940 #endif
3941 
3942 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3943 	so = SCTP_INP_SO(inp);
3944 #endif
3945 	if (stcb == NULL) {
3946 		/* Got to have a TCB */
3947 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3948 			if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3949 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3950 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
3951 			}
3952 		}
3953 		return;
3954 	} else {
3955 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3956 	}
3957 	vtag = stcb->asoc.peer_vtag;
3958 	/* notify the ulp */
3959 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0)
3960 		sctp_abort_notification(stcb, error, so_locked);
3961 	/* notify the peer */
3962 #if defined(SCTP_PANIC_ON_ABORT)
3963 	panic("aborting an association");
3964 #endif
3965 	sctp_send_abort_tcb(stcb, op_err, so_locked);
3966 	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3967 	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3968 	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3969 		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3970 	}
3971 	/* now free the asoc */
3972 #ifdef SCTP_ASOCLOG_OF_TSNS
3973 	sctp_print_out_track_log(stcb);
3974 #endif
3975 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3976 	if (!so_locked) {
3977 		atomic_add_int(&stcb->asoc.refcnt, 1);
3978 		SCTP_TCB_UNLOCK(stcb);
3979 		SCTP_SOCKET_LOCK(so, 1);
3980 		SCTP_TCB_LOCK(stcb);
3981 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3982 	}
3983 #endif
3984 	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
3985 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3986 	if (!so_locked) {
3987 		SCTP_SOCKET_UNLOCK(so, 1);
3988 	}
3989 #endif
3990 }
3991 
3992 void
3993 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh,
3994     struct sctp_inpcb *inp, struct mbuf *op_err, uint32_t vrf_id, uint16_t port)
3995 {
3996 	struct sctp_chunkhdr *ch, chunk_buf;
3997 	unsigned int chk_length;
3998 
3999 	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
4000 	/* Generate a TO address for future reference */
4001 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
4002 		if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
4003 			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4004 			    SCTP_CALLED_DIRECTLY_NOCMPSET);
4005 		}
4006 	}
4007 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4008 	    sizeof(*ch), (uint8_t *) & chunk_buf);
4009 	while (ch != NULL) {
4010 		chk_length = ntohs(ch->chunk_length);
4011 		if (chk_length < sizeof(*ch)) {
4012 			/* break to abort land */
4013 			break;
4014 		}
4015 		switch (ch->chunk_type) {
4016 		case SCTP_COOKIE_ECHO:
4017 			/* We hit here only if the assoc is being freed */
4018 			return;
4019 		case SCTP_PACKET_DROPPED:
4020 			/* we don't respond to pkt-dropped */
4021 			return;
4022 		case SCTP_ABORT_ASSOCIATION:
4023 			/* we don't respond with an ABORT to an ABORT */
4024 			return;
4025 		case SCTP_SHUTDOWN_COMPLETE:
4026 			/*
4027 			 * we ignore it since we are not waiting for it and
4028 			 * peer is gone
4029 			 */
4030 			return;
4031 		case SCTP_SHUTDOWN_ACK:
4032 			sctp_send_shutdown_complete2(m, iphlen, sh, vrf_id, port);
4033 			return;
4034 		default:
4035 			break;
4036 		}
4037 		offset += SCTP_SIZE32(chk_length);
4038 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4039 		    sizeof(*ch), (uint8_t *) & chunk_buf);
4040 	}
4041 	sctp_send_abort(m, iphlen, sh, 0, op_err, vrf_id, port);
4042 }
4043 
4044 /*
4045  * check the inbound datagram to make sure there is not an abort inside it,
4046  * if there is return 1, else return 0.
4047  */
4048 int
4049 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill)
4050 {
4051 	struct sctp_chunkhdr *ch;
4052 	struct sctp_init_chunk *init_chk, chunk_buf;
4053 	int offset;
4054 	unsigned int chk_length;
4055 
4056 	offset = iphlen + sizeof(struct sctphdr);
4057 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
4058 	    (uint8_t *) & chunk_buf);
4059 	while (ch != NULL) {
4060 		chk_length = ntohs(ch->chunk_length);
4061 		if (chk_length < sizeof(*ch)) {
4062 			/* packet is probably corrupt */
4063 			break;
4064 		}
4065 		/* we seem to be ok, is it an abort? */
4066 		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
4067 			/* yep, tell them */
4068 			return (1);
4069 		}
4070 		if (ch->chunk_type == SCTP_INITIATION) {
4071 			/* need to update the Vtag */
4072 			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4073 			    offset, sizeof(*init_chk), (uint8_t *) & chunk_buf);
4074 			if (init_chk != NULL) {
4075 				*vtagfill = ntohl(init_chk->init.initiate_tag);
4076 			}
4077 		}
4078 		/* Nope, move to the next chunk */
4079 		offset += SCTP_SIZE32(chk_length);
4080 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4081 		    sizeof(*ch), (uint8_t *) & chunk_buf);
4082 	}
4083 	return (0);
4084 }
4085 
4086 /*
4087  * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4088  * set (i.e. it's 0) so, create this function to compare link local scopes
4089  */
4090 #ifdef INET6
4091 uint32_t
4092 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4093 {
4094 	struct sockaddr_in6 a, b;
4095 
4096 	/* save copies */
4097 	a = *addr1;
4098 	b = *addr2;
4099 
4100 	if (a.sin6_scope_id == 0)
4101 		if (sa6_recoverscope(&a)) {
4102 			/* can't get scope, so can't match */
4103 			return (0);
4104 		}
4105 	if (b.sin6_scope_id == 0)
4106 		if (sa6_recoverscope(&b)) {
4107 			/* can't get scope, so can't match */
4108 			return (0);
4109 		}
4110 	if (a.sin6_scope_id != b.sin6_scope_id)
4111 		return (0);
4112 
4113 	return (1);
4114 }
4115 
4116 /*
4117  * returns a sockaddr_in6 with embedded scope recovered and removed
4118  */
4119 struct sockaddr_in6 *
4120 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4121 {
4122 	/* check and strip embedded scope junk */
4123 	if (addr->sin6_family == AF_INET6) {
4124 		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4125 			if (addr->sin6_scope_id == 0) {
4126 				*store = *addr;
4127 				if (!sa6_recoverscope(store)) {
4128 					/* use the recovered scope */
4129 					addr = store;
4130 				}
4131 			} else {
4132 				/* else, return the original "to" addr */
4133 				in6_clearscope(&addr->sin6_addr);
4134 			}
4135 		}
4136 	}
4137 	return (addr);
4138 }
4139 
4140 #endif
4141 
4142 /*
4143  * are the two addresses the same?  currently a "scopeless" check returns: 1
4144  * if same, 0 if not
4145  */
4146 int
4147 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4148 {
4149 
4150 	/* must be valid */
4151 	if (sa1 == NULL || sa2 == NULL)
4152 		return (0);
4153 
4154 	/* must be the same family */
4155 	if (sa1->sa_family != sa2->sa_family)
4156 		return (0);
4157 
4158 	switch (sa1->sa_family) {
4159 #ifdef INET6
4160 	case AF_INET6:
4161 		{
4162 			/* IPv6 addresses */
4163 			struct sockaddr_in6 *sin6_1, *sin6_2;
4164 
4165 			sin6_1 = (struct sockaddr_in6 *)sa1;
4166 			sin6_2 = (struct sockaddr_in6 *)sa2;
4167 			return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
4168 			    sin6_2));
4169 		}
4170 #endif
4171 	case AF_INET:
4172 		{
4173 			/* IPv4 addresses */
4174 			struct sockaddr_in *sin_1, *sin_2;
4175 
4176 			sin_1 = (struct sockaddr_in *)sa1;
4177 			sin_2 = (struct sockaddr_in *)sa2;
4178 			return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4179 		}
4180 	default:
4181 		/* we don't do these... */
4182 		return (0);
4183 	}
4184 }
4185 
4186 void
4187 sctp_print_address(struct sockaddr *sa)
4188 {
4189 #ifdef INET6
4190 	char ip6buf[INET6_ADDRSTRLEN];
4191 
4192 	ip6buf[0] = 0;
4193 #endif
4194 
4195 	switch (sa->sa_family) {
4196 #ifdef INET6
4197 	case AF_INET6:
4198 		{
4199 			struct sockaddr_in6 *sin6;
4200 
4201 			sin6 = (struct sockaddr_in6 *)sa;
4202 			SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4203 			    ip6_sprintf(ip6buf, &sin6->sin6_addr),
4204 			    ntohs(sin6->sin6_port),
4205 			    sin6->sin6_scope_id);
4206 			break;
4207 		}
4208 #endif
4209 	case AF_INET:
4210 		{
4211 			struct sockaddr_in *sin;
4212 			unsigned char *p;
4213 
4214 			sin = (struct sockaddr_in *)sa;
4215 			p = (unsigned char *)&sin->sin_addr;
4216 			SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4217 			    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4218 			break;
4219 		}
4220 	default:
4221 		SCTP_PRINTF("?\n");
4222 		break;
4223 	}
4224 }
4225 
4226 void
4227 sctp_print_address_pkt(struct ip *iph, struct sctphdr *sh)
4228 {
4229 	switch (iph->ip_v) {
4230 	case IPVERSION:
4231 		{
4232 			struct sockaddr_in lsa, fsa;
4233 
4234 			bzero(&lsa, sizeof(lsa));
4235 			lsa.sin_len = sizeof(lsa);
4236 			lsa.sin_family = AF_INET;
4237 			lsa.sin_addr = iph->ip_src;
4238 			lsa.sin_port = sh->src_port;
4239 			bzero(&fsa, sizeof(fsa));
4240 			fsa.sin_len = sizeof(fsa);
4241 			fsa.sin_family = AF_INET;
4242 			fsa.sin_addr = iph->ip_dst;
4243 			fsa.sin_port = sh->dest_port;
4244 			SCTP_PRINTF("src: ");
4245 			sctp_print_address((struct sockaddr *)&lsa);
4246 			SCTP_PRINTF("dest: ");
4247 			sctp_print_address((struct sockaddr *)&fsa);
4248 			break;
4249 		}
4250 #ifdef INET6
4251 	case IPV6_VERSION >> 4:
4252 		{
4253 			struct ip6_hdr *ip6;
4254 			struct sockaddr_in6 lsa6, fsa6;
4255 
4256 			ip6 = (struct ip6_hdr *)iph;
4257 			bzero(&lsa6, sizeof(lsa6));
4258 			lsa6.sin6_len = sizeof(lsa6);
4259 			lsa6.sin6_family = AF_INET6;
4260 			lsa6.sin6_addr = ip6->ip6_src;
4261 			lsa6.sin6_port = sh->src_port;
4262 			bzero(&fsa6, sizeof(fsa6));
4263 			fsa6.sin6_len = sizeof(fsa6);
4264 			fsa6.sin6_family = AF_INET6;
4265 			fsa6.sin6_addr = ip6->ip6_dst;
4266 			fsa6.sin6_port = sh->dest_port;
4267 			SCTP_PRINTF("src: ");
4268 			sctp_print_address((struct sockaddr *)&lsa6);
4269 			SCTP_PRINTF("dest: ");
4270 			sctp_print_address((struct sockaddr *)&fsa6);
4271 			break;
4272 		}
4273 #endif
4274 	default:
4275 		/* TSNH */
4276 		break;
4277 	}
4278 }
4279 
4280 void
4281 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4282     struct sctp_inpcb *new_inp,
4283     struct sctp_tcb *stcb,
4284     int waitflags)
4285 {
4286 	/*
4287 	 * go through our old INP and pull off any control structures that
4288 	 * belong to stcb and move then to the new inp.
4289 	 */
4290 	struct socket *old_so, *new_so;
4291 	struct sctp_queued_to_read *control, *nctl;
4292 	struct sctp_readhead tmp_queue;
4293 	struct mbuf *m;
4294 	int error = 0;
4295 
4296 	old_so = old_inp->sctp_socket;
4297 	new_so = new_inp->sctp_socket;
4298 	TAILQ_INIT(&tmp_queue);
4299 	error = sblock(&old_so->so_rcv, waitflags);
4300 	if (error) {
4301 		/*
4302 		 * Gak, can't get sblock, we have a problem. data will be
4303 		 * left stranded.. and we don't dare look at it since the
4304 		 * other thread may be reading something. Oh well, its a
4305 		 * screwed up app that does a peeloff OR a accept while
4306 		 * reading from the main socket... actually its only the
4307 		 * peeloff() case, since I think read will fail on a
4308 		 * listening socket..
4309 		 */
4310 		return;
4311 	}
4312 	/* lock the socket buffers */
4313 	SCTP_INP_READ_LOCK(old_inp);
4314 	control = TAILQ_FIRST(&old_inp->read_queue);
4315 	/* Pull off all for out target stcb */
4316 	while (control) {
4317 		nctl = TAILQ_NEXT(control, next);
4318 		if (control->stcb == stcb) {
4319 			/* remove it we want it */
4320 			TAILQ_REMOVE(&old_inp->read_queue, control, next);
4321 			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4322 			m = control->data;
4323 			while (m) {
4324 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4325 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4326 				}
4327 				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4328 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4329 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4330 				}
4331 				m = SCTP_BUF_NEXT(m);
4332 			}
4333 		}
4334 		control = nctl;
4335 	}
4336 	SCTP_INP_READ_UNLOCK(old_inp);
4337 	/* Remove the sb-lock on the old socket */
4338 
4339 	sbunlock(&old_so->so_rcv);
4340 	/* Now we move them over to the new socket buffer */
4341 	control = TAILQ_FIRST(&tmp_queue);
4342 	SCTP_INP_READ_LOCK(new_inp);
4343 	while (control) {
4344 		nctl = TAILQ_NEXT(control, next);
4345 		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4346 		m = control->data;
4347 		while (m) {
4348 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4349 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4350 			}
4351 			sctp_sballoc(stcb, &new_so->so_rcv, m);
4352 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4353 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4354 			}
4355 			m = SCTP_BUF_NEXT(m);
4356 		}
4357 		control = nctl;
4358 	}
4359 	SCTP_INP_READ_UNLOCK(new_inp);
4360 }
4361 
4362 void
4363 sctp_add_to_readq(struct sctp_inpcb *inp,
4364     struct sctp_tcb *stcb,
4365     struct sctp_queued_to_read *control,
4366     struct sockbuf *sb,
4367     int end,
4368     int inp_read_lock_held,
4369     int so_locked
4370 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4371     SCTP_UNUSED
4372 #endif
4373 )
4374 {
4375 	/*
4376 	 * Here we must place the control on the end of the socket read
4377 	 * queue AND increment sb_cc so that select will work properly on
4378 	 * read.
4379 	 */
4380 	struct mbuf *m, *prev = NULL;
4381 
4382 	if (inp == NULL) {
4383 		/* Gak, TSNH!! */
4384 #ifdef INVARIANTS
4385 		panic("Gak, inp NULL on add_to_readq");
4386 #endif
4387 		return;
4388 	}
4389 	if (inp_read_lock_held == 0)
4390 		SCTP_INP_READ_LOCK(inp);
4391 	if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
4392 		sctp_free_remote_addr(control->whoFrom);
4393 		if (control->data) {
4394 			sctp_m_freem(control->data);
4395 			control->data = NULL;
4396 		}
4397 		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control);
4398 		if (inp_read_lock_held == 0)
4399 			SCTP_INP_READ_UNLOCK(inp);
4400 		return;
4401 	}
4402 	if (!(control->spec_flags & M_NOTIFICATION)) {
4403 		atomic_add_int(&inp->total_recvs, 1);
4404 		if (!control->do_not_ref_stcb) {
4405 			atomic_add_int(&stcb->total_recvs, 1);
4406 		}
4407 	}
4408 	m = control->data;
4409 	control->held_length = 0;
4410 	control->length = 0;
4411 	while (m) {
4412 		if (SCTP_BUF_LEN(m) == 0) {
4413 			/* Skip mbufs with NO length */
4414 			if (prev == NULL) {
4415 				/* First one */
4416 				control->data = sctp_m_free(m);
4417 				m = control->data;
4418 			} else {
4419 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4420 				m = SCTP_BUF_NEXT(prev);
4421 			}
4422 			if (m == NULL) {
4423 				control->tail_mbuf = prev;
4424 			}
4425 			continue;
4426 		}
4427 		prev = m;
4428 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4429 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4430 		}
4431 		sctp_sballoc(stcb, sb, m);
4432 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4433 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4434 		}
4435 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4436 		m = SCTP_BUF_NEXT(m);
4437 	}
4438 	if (prev != NULL) {
4439 		control->tail_mbuf = prev;
4440 	} else {
4441 		/* Everything got collapsed out?? */
4442 		sctp_free_remote_addr(control->whoFrom);
4443 		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control);
4444 		if (inp_read_lock_held == 0)
4445 			SCTP_INP_READ_UNLOCK(inp);
4446 		return;
4447 	}
4448 	if (end) {
4449 		control->end_added = 1;
4450 	}
4451 	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4452 	if (inp_read_lock_held == 0)
4453 		SCTP_INP_READ_UNLOCK(inp);
4454 	if (inp && inp->sctp_socket) {
4455 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4456 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4457 		} else {
4458 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4459 			struct socket *so;
4460 
4461 			so = SCTP_INP_SO(inp);
4462 			if (!so_locked) {
4463 				atomic_add_int(&stcb->asoc.refcnt, 1);
4464 				SCTP_TCB_UNLOCK(stcb);
4465 				SCTP_SOCKET_LOCK(so, 1);
4466 				SCTP_TCB_LOCK(stcb);
4467 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
4468 				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4469 					SCTP_SOCKET_UNLOCK(so, 1);
4470 					return;
4471 				}
4472 			}
4473 #endif
4474 			sctp_sorwakeup(inp, inp->sctp_socket);
4475 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4476 			if (!so_locked) {
4477 				SCTP_SOCKET_UNLOCK(so, 1);
4478 			}
4479 #endif
4480 		}
4481 	}
4482 }
4483 
4484 
4485 int
4486 sctp_append_to_readq(struct sctp_inpcb *inp,
4487     struct sctp_tcb *stcb,
4488     struct sctp_queued_to_read *control,
4489     struct mbuf *m,
4490     int end,
4491     int ctls_cumack,
4492     struct sockbuf *sb)
4493 {
4494 	/*
4495 	 * A partial delivery API event is underway. OR we are appending on
4496 	 * the reassembly queue.
4497 	 *
4498 	 * If PDAPI this means we need to add m to the end of the data.
4499 	 * Increase the length in the control AND increment the sb_cc.
4500 	 * Otherwise sb is NULL and all we need to do is put it at the end
4501 	 * of the mbuf chain.
4502 	 */
4503 	int len = 0;
4504 	struct mbuf *mm, *tail = NULL, *prev = NULL;
4505 
4506 	if (inp) {
4507 		SCTP_INP_READ_LOCK(inp);
4508 	}
4509 	if (control == NULL) {
4510 get_out:
4511 		if (inp) {
4512 			SCTP_INP_READ_UNLOCK(inp);
4513 		}
4514 		return (-1);
4515 	}
4516 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ)) {
4517 		SCTP_INP_READ_UNLOCK(inp);
4518 		return 0;
4519 	}
4520 	if (control->end_added) {
4521 		/* huh this one is complete? */
4522 		goto get_out;
4523 	}
4524 	mm = m;
4525 	if (mm == NULL) {
4526 		goto get_out;
4527 	}
4528 	while (mm) {
4529 		if (SCTP_BUF_LEN(mm) == 0) {
4530 			/* Skip mbufs with NO lenght */
4531 			if (prev == NULL) {
4532 				/* First one */
4533 				m = sctp_m_free(mm);
4534 				mm = m;
4535 			} else {
4536 				SCTP_BUF_NEXT(prev) = sctp_m_free(mm);
4537 				mm = SCTP_BUF_NEXT(prev);
4538 			}
4539 			continue;
4540 		}
4541 		prev = mm;
4542 		len += SCTP_BUF_LEN(mm);
4543 		if (sb) {
4544 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4545 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(mm));
4546 			}
4547 			sctp_sballoc(stcb, sb, mm);
4548 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4549 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4550 			}
4551 		}
4552 		mm = SCTP_BUF_NEXT(mm);
4553 	}
4554 	if (prev) {
4555 		tail = prev;
4556 	} else {
4557 		/* Really there should always be a prev */
4558 		if (m == NULL) {
4559 			/* Huh nothing left? */
4560 #ifdef INVARIANTS
4561 			panic("Nothing left to add?");
4562 #else
4563 			goto get_out;
4564 #endif
4565 		}
4566 		tail = m;
4567 	}
4568 	if (control->tail_mbuf) {
4569 		/* append */
4570 		SCTP_BUF_NEXT(control->tail_mbuf) = m;
4571 		control->tail_mbuf = tail;
4572 	} else {
4573 		/* nothing there */
4574 #ifdef INVARIANTS
4575 		if (control->data != NULL) {
4576 			panic("This should NOT happen");
4577 		}
4578 #endif
4579 		control->data = m;
4580 		control->tail_mbuf = tail;
4581 	}
4582 	atomic_add_int(&control->length, len);
4583 	if (end) {
4584 		/* message is complete */
4585 		if (stcb && (control == stcb->asoc.control_pdapi)) {
4586 			stcb->asoc.control_pdapi = NULL;
4587 		}
4588 		control->held_length = 0;
4589 		control->end_added = 1;
4590 	}
4591 	if (stcb == NULL) {
4592 		control->do_not_ref_stcb = 1;
4593 	}
4594 	/*
4595 	 * When we are appending in partial delivery, the cum-ack is used
4596 	 * for the actual pd-api highest tsn on this mbuf. The true cum-ack
4597 	 * is populated in the outbound sinfo structure from the true cumack
4598 	 * if the association exists...
4599 	 */
4600 	control->sinfo_tsn = control->sinfo_cumtsn = ctls_cumack;
4601 	if (inp) {
4602 		SCTP_INP_READ_UNLOCK(inp);
4603 	}
4604 	if (inp && inp->sctp_socket) {
4605 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4606 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4607 		} else {
4608 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4609 			struct socket *so;
4610 
4611 			so = SCTP_INP_SO(inp);
4612 			atomic_add_int(&stcb->asoc.refcnt, 1);
4613 			SCTP_TCB_UNLOCK(stcb);
4614 			SCTP_SOCKET_LOCK(so, 1);
4615 			SCTP_TCB_LOCK(stcb);
4616 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4617 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4618 				SCTP_SOCKET_UNLOCK(so, 1);
4619 				return (0);
4620 			}
4621 #endif
4622 			sctp_sorwakeup(inp, inp->sctp_socket);
4623 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4624 			SCTP_SOCKET_UNLOCK(so, 1);
4625 #endif
4626 		}
4627 	}
4628 	return (0);
4629 }
4630 
4631 
4632 
4633 /*************HOLD THIS COMMENT FOR PATCH FILE OF
4634  *************ALTERNATE ROUTING CODE
4635  */
4636 
4637 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4638  *************ALTERNATE ROUTING CODE
4639  */
4640 
4641 struct mbuf *
4642 sctp_generate_invmanparam(int err)
4643 {
4644 	/* Return a MBUF with a invalid mandatory parameter */
4645 	struct mbuf *m;
4646 
4647 	m = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_DONTWAIT, 1, MT_DATA);
4648 	if (m) {
4649 		struct sctp_paramhdr *ph;
4650 
4651 		SCTP_BUF_LEN(m) = sizeof(struct sctp_paramhdr);
4652 		ph = mtod(m, struct sctp_paramhdr *);
4653 		ph->param_length = htons(sizeof(struct sctp_paramhdr));
4654 		ph->param_type = htons(err);
4655 	}
4656 	return (m);
4657 }
4658 
4659 #ifdef SCTP_MBCNT_LOGGING
4660 void
4661 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
4662     struct sctp_tmit_chunk *tp1, int chk_cnt)
4663 {
4664 	if (tp1->data == NULL) {
4665 		return;
4666 	}
4667 	asoc->chunks_on_out_queue -= chk_cnt;
4668 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
4669 		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
4670 		    asoc->total_output_queue_size,
4671 		    tp1->book_size,
4672 		    0,
4673 		    tp1->mbcnt);
4674 	}
4675 	if (asoc->total_output_queue_size >= tp1->book_size) {
4676 		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
4677 	} else {
4678 		asoc->total_output_queue_size = 0;
4679 	}
4680 
4681 	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
4682 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
4683 		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
4684 			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
4685 		} else {
4686 			stcb->sctp_socket->so_snd.sb_cc = 0;
4687 
4688 		}
4689 	}
4690 }
4691 
4692 #endif
4693 
4694 int
4695 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
4696     int reason, int so_locked
4697 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4698     SCTP_UNUSED
4699 #endif
4700 )
4701 {
4702 	struct sctp_stream_out *strq;
4703 	struct sctp_tmit_chunk *chk = NULL;
4704 	struct sctp_stream_queue_pending *sp;
4705 	uint16_t stream = 0, seq = 0;
4706 	uint8_t foundeom = 0;
4707 	int ret_sz = 0;
4708 	int notdone;
4709 	int do_wakeup_routine = 0;
4710 
4711 	stream = tp1->rec.data.stream_number;
4712 	seq = tp1->rec.data.stream_seq;
4713 	do {
4714 		ret_sz += tp1->book_size;
4715 		if (tp1->data != NULL) {
4716 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4717 				sctp_flight_size_decrease(tp1);
4718 				sctp_total_flight_decrease(stcb, tp1);
4719 			}
4720 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4721 			stcb->asoc.peers_rwnd += tp1->send_size;
4722 			stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
4723 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, reason, tp1, so_locked);
4724 			if (tp1->data) {
4725 				sctp_m_freem(tp1->data);
4726 				tp1->data = NULL;
4727 			}
4728 			do_wakeup_routine = 1;
4729 			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4730 				stcb->asoc.sent_queue_cnt_removeable--;
4731 			}
4732 		}
4733 		tp1->sent = SCTP_FORWARD_TSN_SKIP;
4734 		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
4735 		    SCTP_DATA_NOT_FRAG) {
4736 			/* not frag'ed we ae done   */
4737 			notdone = 0;
4738 			foundeom = 1;
4739 		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4740 			/* end of frag, we are done */
4741 			notdone = 0;
4742 			foundeom = 1;
4743 		} else {
4744 			/*
4745 			 * Its a begin or middle piece, we must mark all of
4746 			 * it
4747 			 */
4748 			notdone = 1;
4749 			tp1 = TAILQ_NEXT(tp1, sctp_next);
4750 		}
4751 	} while (tp1 && notdone);
4752 	if (foundeom == 0) {
4753 		/*
4754 		 * The multi-part message was scattered across the send and
4755 		 * sent queue.
4756 		 */
4757 next_on_sent:
4758 		tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
4759 		/*
4760 		 * recurse throught the send_queue too, starting at the
4761 		 * beginning.
4762 		 */
4763 		if ((tp1) &&
4764 		    (tp1->rec.data.stream_number == stream) &&
4765 		    (tp1->rec.data.stream_seq == seq)) {
4766 			/*
4767 			 * save to chk in case we have some on stream out
4768 			 * queue. If so and we have an un-transmitted one we
4769 			 * don't have to fudge the TSN.
4770 			 */
4771 			chk = tp1;
4772 			ret_sz += tp1->book_size;
4773 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4774 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, reason, tp1, so_locked);
4775 			if (tp1->data) {
4776 				sctp_m_freem(tp1->data);
4777 				tp1->data = NULL;
4778 			}
4779 			/* No flight involved here book the size to 0 */
4780 			tp1->book_size = 0;
4781 			if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4782 				foundeom = 1;
4783 			}
4784 			do_wakeup_routine = 1;
4785 			tp1->sent = SCTP_FORWARD_TSN_SKIP;
4786 			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
4787 			/*
4788 			 * on to the sent queue so we can wait for it to be
4789 			 * passed by.
4790 			 */
4791 			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
4792 			    sctp_next);
4793 			stcb->asoc.send_queue_cnt--;
4794 			stcb->asoc.sent_queue_cnt++;
4795 			goto next_on_sent;
4796 		}
4797 	}
4798 	if (foundeom == 0) {
4799 		/*
4800 		 * Still no eom found. That means there is stuff left on the
4801 		 * stream out queue.. yuck.
4802 		 */
4803 		strq = &stcb->asoc.strmout[stream];
4804 		SCTP_TCB_SEND_LOCK(stcb);
4805 		sp = TAILQ_FIRST(&strq->outqueue);
4806 		while (sp->strseq <= seq) {
4807 			/* Check if its our SEQ */
4808 			if (sp->strseq == seq) {
4809 				sp->discard_rest = 1;
4810 				/*
4811 				 * We may need to put a chunk on the queue
4812 				 * that holds the TSN that would have been
4813 				 * sent with the LAST bit.
4814 				 */
4815 				if (chk == NULL) {
4816 					/* Yep, we have to */
4817 					sctp_alloc_a_chunk(stcb, chk);
4818 					if (chk == NULL) {
4819 						/*
4820 						 * we are hosed. All we can
4821 						 * do is nothing.. which
4822 						 * will cause an abort if
4823 						 * the peer is paying
4824 						 * attention.
4825 						 */
4826 						goto oh_well;
4827 					}
4828 					memset(chk, 0, sizeof(*chk));
4829 					chk->rec.data.rcv_flags = SCTP_DATA_LAST_FRAG;
4830 					chk->sent = SCTP_FORWARD_TSN_SKIP;
4831 					chk->asoc = &stcb->asoc;
4832 					chk->rec.data.stream_seq = sp->strseq;
4833 					chk->rec.data.stream_number = sp->stream;
4834 					chk->rec.data.payloadtype = sp->ppid;
4835 					chk->rec.data.context = sp->context;
4836 					chk->flags = sp->act_flags;
4837 					if (sp->net)
4838 						chk->whoTo = sp->net;
4839 					else
4840 						chk->whoTo = stcb->asoc.primary_destination;
4841 					atomic_add_int(&chk->whoTo->ref_count, 1);
4842 					chk->rec.data.TSN_seq = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
4843 					stcb->asoc.pr_sctp_cnt++;
4844 					chk->pr_sctp_on = 1;
4845 					TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
4846 					stcb->asoc.sent_queue_cnt++;
4847 					stcb->asoc.pr_sctp_cnt++;
4848 				} else {
4849 					chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
4850 				}
4851 		oh_well:
4852 				if (sp->data) {
4853 					/*
4854 					 * Pull any data to free up the SB
4855 					 * and allow sender to "add more"
4856 					 * whilc we will throw away :-)
4857 					 */
4858 					sctp_free_spbufspace(stcb, &stcb->asoc,
4859 					    sp);
4860 					ret_sz += sp->length;
4861 					do_wakeup_routine = 1;
4862 					sp->some_taken = 1;
4863 					sctp_m_freem(sp->data);
4864 					sp->length = 0;
4865 					sp->data = NULL;
4866 					sp->tail_mbuf = NULL;
4867 				}
4868 				break;
4869 			} else {
4870 				/* Next one please */
4871 				sp = TAILQ_NEXT(sp, next);
4872 			}
4873 		}		/* End while */
4874 		SCTP_TCB_SEND_UNLOCK(stcb);
4875 	}
4876 	if (do_wakeup_routine) {
4877 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4878 		struct socket *so;
4879 
4880 		so = SCTP_INP_SO(stcb->sctp_ep);
4881 		if (!so_locked) {
4882 			atomic_add_int(&stcb->asoc.refcnt, 1);
4883 			SCTP_TCB_UNLOCK(stcb);
4884 			SCTP_SOCKET_LOCK(so, 1);
4885 			SCTP_TCB_LOCK(stcb);
4886 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4887 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4888 				/* assoc was freed while we were unlocked */
4889 				SCTP_SOCKET_UNLOCK(so, 1);
4890 				return (ret_sz);
4891 			}
4892 		}
4893 #endif
4894 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
4895 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4896 		if (!so_locked) {
4897 			SCTP_SOCKET_UNLOCK(so, 1);
4898 		}
4899 #endif
4900 	}
4901 	return (ret_sz);
4902 }
4903 
4904 /*
4905  * checks to see if the given address, sa, is one that is currently known by
4906  * the kernel note: can't distinguish the same address on multiple interfaces
4907  * and doesn't handle multiple addresses with different zone/scope id's note:
4908  * ifa_ifwithaddr() compares the entire sockaddr struct
4909  */
4910 struct sctp_ifa *
4911 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
4912     int holds_lock)
4913 {
4914 	struct sctp_laddr *laddr;
4915 
4916 	if (holds_lock == 0) {
4917 		SCTP_INP_RLOCK(inp);
4918 	}
4919 	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4920 		if (laddr->ifa == NULL)
4921 			continue;
4922 		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
4923 			continue;
4924 		if (addr->sa_family == AF_INET) {
4925 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4926 			    laddr->ifa->address.sin.sin_addr.s_addr) {
4927 				/* found him. */
4928 				if (holds_lock == 0) {
4929 					SCTP_INP_RUNLOCK(inp);
4930 				}
4931 				return (laddr->ifa);
4932 				break;
4933 			}
4934 		}
4935 #ifdef INET6
4936 		if (addr->sa_family == AF_INET6) {
4937 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4938 			    &laddr->ifa->address.sin6)) {
4939 				/* found him. */
4940 				if (holds_lock == 0) {
4941 					SCTP_INP_RUNLOCK(inp);
4942 				}
4943 				return (laddr->ifa);
4944 				break;
4945 			}
4946 		}
4947 #endif
4948 	}
4949 	if (holds_lock == 0) {
4950 		SCTP_INP_RUNLOCK(inp);
4951 	}
4952 	return (NULL);
4953 }
4954 
4955 uint32_t
4956 sctp_get_ifa_hash_val(struct sockaddr *addr)
4957 {
4958 	if (addr->sa_family == AF_INET) {
4959 		struct sockaddr_in *sin;
4960 
4961 		sin = (struct sockaddr_in *)addr;
4962 		return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
4963 	} else if (addr->sa_family == AF_INET6) {
4964 		struct sockaddr_in6 *sin6;
4965 		uint32_t hash_of_addr;
4966 
4967 		sin6 = (struct sockaddr_in6 *)addr;
4968 		hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
4969 		    sin6->sin6_addr.s6_addr32[1] +
4970 		    sin6->sin6_addr.s6_addr32[2] +
4971 		    sin6->sin6_addr.s6_addr32[3]);
4972 		hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
4973 		return (hash_of_addr);
4974 	}
4975 	return (0);
4976 }
4977 
4978 struct sctp_ifa *
4979 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
4980 {
4981 	struct sctp_ifa *sctp_ifap;
4982 	struct sctp_vrf *vrf;
4983 	struct sctp_ifalist *hash_head;
4984 	uint32_t hash_of_addr;
4985 
4986 	if (holds_lock == 0)
4987 		SCTP_IPI_ADDR_RLOCK();
4988 
4989 	vrf = sctp_find_vrf(vrf_id);
4990 	if (vrf == NULL) {
4991 stage_right:
4992 		if (holds_lock == 0)
4993 			SCTP_IPI_ADDR_RUNLOCK();
4994 		return (NULL);
4995 	}
4996 	hash_of_addr = sctp_get_ifa_hash_val(addr);
4997 
4998 	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
4999 	if (hash_head == NULL) {
5000 		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
5001 		    hash_of_addr, (uint32_t) vrf->vrf_addr_hashmark,
5002 		    (uint32_t) (hash_of_addr & vrf->vrf_addr_hashmark));
5003 		sctp_print_address(addr);
5004 		SCTP_PRINTF("No such bucket for address\n");
5005 		if (holds_lock == 0)
5006 			SCTP_IPI_ADDR_RUNLOCK();
5007 
5008 		return (NULL);
5009 	}
5010 	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
5011 		if (sctp_ifap == NULL) {
5012 #ifdef INVARIANTS
5013 			panic("Huh LIST_FOREACH corrupt");
5014 			goto stage_right;
5015 #else
5016 			SCTP_PRINTF("LIST corrupt of sctp_ifap's?\n");
5017 			goto stage_right;
5018 #endif
5019 		}
5020 		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
5021 			continue;
5022 		if (addr->sa_family == AF_INET) {
5023 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5024 			    sctp_ifap->address.sin.sin_addr.s_addr) {
5025 				/* found him. */
5026 				if (holds_lock == 0)
5027 					SCTP_IPI_ADDR_RUNLOCK();
5028 				return (sctp_ifap);
5029 				break;
5030 			}
5031 		}
5032 #ifdef INET6
5033 		if (addr->sa_family == AF_INET6) {
5034 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5035 			    &sctp_ifap->address.sin6)) {
5036 				/* found him. */
5037 				if (holds_lock == 0)
5038 					SCTP_IPI_ADDR_RUNLOCK();
5039 				return (sctp_ifap);
5040 				break;
5041 			}
5042 		}
5043 #endif
5044 	}
5045 	if (holds_lock == 0)
5046 		SCTP_IPI_ADDR_RUNLOCK();
5047 	return (NULL);
5048 }
5049 
5050 static void
5051 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t * freed_so_far, int hold_rlock,
5052     uint32_t rwnd_req)
5053 {
5054 	/* User pulled some data, do we need a rwnd update? */
5055 	int r_unlocked = 0;
5056 	uint32_t dif, rwnd;
5057 	struct socket *so = NULL;
5058 
5059 	if (stcb == NULL)
5060 		return;
5061 
5062 	atomic_add_int(&stcb->asoc.refcnt, 1);
5063 
5064 	if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED |
5065 	    SCTP_STATE_SHUTDOWN_RECEIVED |
5066 	    SCTP_STATE_SHUTDOWN_ACK_SENT)) {
5067 		/* Pre-check If we are freeing no update */
5068 		goto no_lock;
5069 	}
5070 	SCTP_INP_INCR_REF(stcb->sctp_ep);
5071 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5072 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5073 		goto out;
5074 	}
5075 	so = stcb->sctp_socket;
5076 	if (so == NULL) {
5077 		goto out;
5078 	}
5079 	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
5080 	/* Have you have freed enough to look */
5081 	*freed_so_far = 0;
5082 	/* Yep, its worth a look and the lock overhead */
5083 
5084 	/* Figure out what the rwnd would be */
5085 	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
5086 	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
5087 		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
5088 	} else {
5089 		dif = 0;
5090 	}
5091 	if (dif >= rwnd_req) {
5092 		if (hold_rlock) {
5093 			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5094 			r_unlocked = 1;
5095 		}
5096 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5097 			/*
5098 			 * One last check before we allow the guy possibly
5099 			 * to get in. There is a race, where the guy has not
5100 			 * reached the gate. In that case
5101 			 */
5102 			goto out;
5103 		}
5104 		SCTP_TCB_LOCK(stcb);
5105 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5106 			/* No reports here */
5107 			SCTP_TCB_UNLOCK(stcb);
5108 			goto out;
5109 		}
5110 		SCTP_STAT_INCR(sctps_wu_sacks_sent);
5111 		sctp_send_sack(stcb);
5112 
5113 		sctp_chunk_output(stcb->sctp_ep, stcb,
5114 		    SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
5115 		/* make sure no timer is running */
5116 		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
5117 		SCTP_TCB_UNLOCK(stcb);
5118 	} else {
5119 		/* Update how much we have pending */
5120 		stcb->freed_by_sorcv_sincelast = dif;
5121 	}
5122 out:
5123 	if (so && r_unlocked && hold_rlock) {
5124 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5125 	}
5126 	SCTP_INP_DECR_REF(stcb->sctp_ep);
5127 no_lock:
5128 	atomic_add_int(&stcb->asoc.refcnt, -1);
5129 	return;
5130 }
5131 
5132 int
5133 sctp_sorecvmsg(struct socket *so,
5134     struct uio *uio,
5135     struct mbuf **mp,
5136     struct sockaddr *from,
5137     int fromlen,
5138     int *msg_flags,
5139     struct sctp_sndrcvinfo *sinfo,
5140     int filling_sinfo)
5141 {
5142 	/*
5143 	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
5144 	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
5145 	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
5146 	 * On the way out we may send out any combination of:
5147 	 * MSG_NOTIFICATION MSG_EOR
5148 	 *
5149 	 */
5150 	struct sctp_inpcb *inp = NULL;
5151 	int my_len = 0;
5152 	int cp_len = 0, error = 0;
5153 	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
5154 	struct mbuf *m = NULL;
5155 	struct sctp_tcb *stcb = NULL;
5156 	int wakeup_read_socket = 0;
5157 	int freecnt_applied = 0;
5158 	int out_flags = 0, in_flags = 0;
5159 	int block_allowed = 1;
5160 	uint32_t freed_so_far = 0;
5161 	uint32_t copied_so_far = 0;
5162 	int in_eeor_mode = 0;
5163 	int no_rcv_needed = 0;
5164 	uint32_t rwnd_req = 0;
5165 	int hold_sblock = 0;
5166 	int hold_rlock = 0;
5167 	int slen = 0;
5168 	uint32_t held_length = 0;
5169 	int sockbuf_lock = 0;
5170 
5171 	if (uio == NULL) {
5172 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5173 		return (EINVAL);
5174 	}
5175 	if (msg_flags) {
5176 		in_flags = *msg_flags;
5177 		if (in_flags & MSG_PEEK)
5178 			SCTP_STAT_INCR(sctps_read_peeks);
5179 	} else {
5180 		in_flags = 0;
5181 	}
5182 	slen = uio->uio_resid;
5183 
5184 	/* Pull in and set up our int flags */
5185 	if (in_flags & MSG_OOB) {
5186 		/* Out of band's NOT supported */
5187 		return (EOPNOTSUPP);
5188 	}
5189 	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5190 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5191 		return (EINVAL);
5192 	}
5193 	if ((in_flags & (MSG_DONTWAIT
5194 	    | MSG_NBIO
5195 	    )) ||
5196 	    SCTP_SO_IS_NBIO(so)) {
5197 		block_allowed = 0;
5198 	}
5199 	/* setup the endpoint */
5200 	inp = (struct sctp_inpcb *)so->so_pcb;
5201 	if (inp == NULL) {
5202 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5203 		return (EFAULT);
5204 	}
5205 	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5206 	/* Must be at least a MTU's worth */
5207 	if (rwnd_req < SCTP_MIN_RWND)
5208 		rwnd_req = SCTP_MIN_RWND;
5209 	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5210 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5211 		sctp_misc_ints(SCTP_SORECV_ENTER,
5212 		    rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid);
5213 	}
5214 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5215 		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5216 		    rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid);
5217 	}
5218 	error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5219 	sockbuf_lock = 1;
5220 	if (error) {
5221 		goto release_unlocked;
5222 	}
5223 restart:
5224 
5225 
5226 restart_nosblocks:
5227 	if (hold_sblock == 0) {
5228 		SOCKBUF_LOCK(&so->so_rcv);
5229 		hold_sblock = 1;
5230 	}
5231 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5232 	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5233 		goto out;
5234 	}
5235 	if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5236 		if (so->so_error) {
5237 			error = so->so_error;
5238 			if ((in_flags & MSG_PEEK) == 0)
5239 				so->so_error = 0;
5240 			goto out;
5241 		} else {
5242 			if (so->so_rcv.sb_cc == 0) {
5243 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5244 				/* indicate EOF */
5245 				error = 0;
5246 				goto out;
5247 			}
5248 		}
5249 	}
5250 	if ((so->so_rcv.sb_cc <= held_length) && block_allowed) {
5251 		/* we need to wait for data */
5252 		if ((so->so_rcv.sb_cc == 0) &&
5253 		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5254 		    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5255 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5256 				/*
5257 				 * For active open side clear flags for
5258 				 * re-use passive open is blocked by
5259 				 * connect.
5260 				 */
5261 				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5262 					/*
5263 					 * You were aborted, passive side
5264 					 * always hits here
5265 					 */
5266 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5267 					error = ECONNRESET;
5268 					/*
5269 					 * You get this once if you are
5270 					 * active open side
5271 					 */
5272 					if (!(inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5273 						/*
5274 						 * Remove flag if on the
5275 						 * active open side
5276 						 */
5277 						inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_ABORTED;
5278 					}
5279 				}
5280 				so->so_state &= ~(SS_ISCONNECTING |
5281 				    SS_ISDISCONNECTING |
5282 				    SS_ISCONFIRMING |
5283 				    SS_ISCONNECTED);
5284 				if (error == 0) {
5285 					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5286 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5287 						error = ENOTCONN;
5288 					} else {
5289 						inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_CONNECTED;
5290 					}
5291 				}
5292 				goto out;
5293 			}
5294 		}
5295 		error = sbwait(&so->so_rcv);
5296 		if (error) {
5297 			goto out;
5298 		}
5299 		held_length = 0;
5300 		goto restart_nosblocks;
5301 	} else if (so->so_rcv.sb_cc == 0) {
5302 		if (so->so_error) {
5303 			error = so->so_error;
5304 			if ((in_flags & MSG_PEEK) == 0)
5305 				so->so_error = 0;
5306 		} else {
5307 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5308 			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5309 				if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5310 					/*
5311 					 * For active open side clear flags
5312 					 * for re-use passive open is
5313 					 * blocked by connect.
5314 					 */
5315 					if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5316 						/*
5317 						 * You were aborted, passive
5318 						 * side always hits here
5319 						 */
5320 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5321 						error = ECONNRESET;
5322 						/*
5323 						 * You get this once if you
5324 						 * are active open side
5325 						 */
5326 						if (!(inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5327 							/*
5328 							 * Remove flag if on
5329 							 * the active open
5330 							 * side
5331 							 */
5332 							inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_ABORTED;
5333 						}
5334 					}
5335 					so->so_state &= ~(SS_ISCONNECTING |
5336 					    SS_ISDISCONNECTING |
5337 					    SS_ISCONFIRMING |
5338 					    SS_ISCONNECTED);
5339 					if (error == 0) {
5340 						if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5341 							SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5342 							error = ENOTCONN;
5343 						} else {
5344 							inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_CONNECTED;
5345 						}
5346 					}
5347 					goto out;
5348 				}
5349 			}
5350 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5351 			error = EWOULDBLOCK;
5352 		}
5353 		goto out;
5354 	}
5355 	if (hold_sblock == 1) {
5356 		SOCKBUF_UNLOCK(&so->so_rcv);
5357 		hold_sblock = 0;
5358 	}
5359 	/* we possibly have data we can read */
5360 	/* sa_ignore FREED_MEMORY */
5361 	control = TAILQ_FIRST(&inp->read_queue);
5362 	if (control == NULL) {
5363 		/*
5364 		 * This could be happening since the appender did the
5365 		 * increment but as not yet did the tailq insert onto the
5366 		 * read_queue
5367 		 */
5368 		if (hold_rlock == 0) {
5369 			SCTP_INP_READ_LOCK(inp);
5370 			hold_rlock = 1;
5371 		}
5372 		control = TAILQ_FIRST(&inp->read_queue);
5373 		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5374 #ifdef INVARIANTS
5375 			panic("Huh, its non zero and nothing on control?");
5376 #endif
5377 			so->so_rcv.sb_cc = 0;
5378 		}
5379 		SCTP_INP_READ_UNLOCK(inp);
5380 		hold_rlock = 0;
5381 		goto restart;
5382 	}
5383 	if ((control->length == 0) &&
5384 	    (control->do_not_ref_stcb)) {
5385 		/*
5386 		 * Clean up code for freeing assoc that left behind a
5387 		 * pdapi.. maybe a peer in EEOR that just closed after
5388 		 * sending and never indicated a EOR.
5389 		 */
5390 		if (hold_rlock == 0) {
5391 			hold_rlock = 1;
5392 			SCTP_INP_READ_LOCK(inp);
5393 		}
5394 		control->held_length = 0;
5395 		if (control->data) {
5396 			/* Hmm there is data here .. fix */
5397 			struct mbuf *m_tmp;
5398 			int cnt = 0;
5399 
5400 			m_tmp = control->data;
5401 			while (m_tmp) {
5402 				cnt += SCTP_BUF_LEN(m_tmp);
5403 				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5404 					control->tail_mbuf = m_tmp;
5405 					control->end_added = 1;
5406 				}
5407 				m_tmp = SCTP_BUF_NEXT(m_tmp);
5408 			}
5409 			control->length = cnt;
5410 		} else {
5411 			/* remove it */
5412 			TAILQ_REMOVE(&inp->read_queue, control, next);
5413 			/* Add back any hiddend data */
5414 			sctp_free_remote_addr(control->whoFrom);
5415 			sctp_free_a_readq(stcb, control);
5416 		}
5417 		if (hold_rlock) {
5418 			hold_rlock = 0;
5419 			SCTP_INP_READ_UNLOCK(inp);
5420 		}
5421 		goto restart;
5422 	}
5423 	if ((control->length == 0) &&
5424 	    (control->end_added == 1)) {
5425 		/*
5426 		 * Do we also need to check for (control->pdapi_aborted ==
5427 		 * 1)?
5428 		 */
5429 		if (hold_rlock == 0) {
5430 			hold_rlock = 1;
5431 			SCTP_INP_READ_LOCK(inp);
5432 		}
5433 		TAILQ_REMOVE(&inp->read_queue, control, next);
5434 		if (control->data) {
5435 #ifdef INVARIANTS
5436 			panic("control->data not null but control->length == 0");
5437 #else
5438 			SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
5439 			sctp_m_freem(control->data);
5440 			control->data = NULL;
5441 #endif
5442 		}
5443 		if (control->aux_data) {
5444 			sctp_m_free(control->aux_data);
5445 			control->aux_data = NULL;
5446 		}
5447 		sctp_free_remote_addr(control->whoFrom);
5448 		sctp_free_a_readq(stcb, control);
5449 		if (hold_rlock) {
5450 			hold_rlock = 0;
5451 			SCTP_INP_READ_UNLOCK(inp);
5452 		}
5453 		goto restart;
5454 	}
5455 	if (control->length == 0) {
5456 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5457 		    (filling_sinfo)) {
5458 			/* find a more suitable one then this */
5459 			ctl = TAILQ_NEXT(control, next);
5460 			while (ctl) {
5461 				if ((ctl->stcb != control->stcb) && (ctl->length) &&
5462 				    (ctl->some_taken ||
5463 				    (ctl->spec_flags & M_NOTIFICATION) ||
5464 				    ((ctl->do_not_ref_stcb == 0) &&
5465 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5466 				    ) {
5467 					/*-
5468 					 * If we have a different TCB next, and there is data
5469 					 * present. If we have already taken some (pdapi), OR we can
5470 					 * ref the tcb and no delivery as started on this stream, we
5471 					 * take it. Note we allow a notification on a different
5472 					 * assoc to be delivered..
5473 					 */
5474 					control = ctl;
5475 					goto found_one;
5476 				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5477 					    (ctl->length) &&
5478 					    ((ctl->some_taken) ||
5479 					    ((ctl->do_not_ref_stcb == 0) &&
5480 					    ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5481 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) {
5482 					/*-
5483 					 * If we have the same tcb, and there is data present, and we
5484 					 * have the strm interleave feature present. Then if we have
5485 					 * taken some (pdapi) or we can refer to tht tcb AND we have
5486 					 * not started a delivery for this stream, we can take it.
5487 					 * Note we do NOT allow a notificaiton on the same assoc to
5488 					 * be delivered.
5489 					 */
5490 					control = ctl;
5491 					goto found_one;
5492 				}
5493 				ctl = TAILQ_NEXT(ctl, next);
5494 			}
5495 		}
5496 		/*
5497 		 * if we reach here, not suitable replacement is available
5498 		 * <or> fragment interleave is NOT on. So stuff the sb_cc
5499 		 * into the our held count, and its time to sleep again.
5500 		 */
5501 		held_length = so->so_rcv.sb_cc;
5502 		control->held_length = so->so_rcv.sb_cc;
5503 		goto restart;
5504 	}
5505 	/* Clear the held length since there is something to read */
5506 	control->held_length = 0;
5507 	if (hold_rlock) {
5508 		SCTP_INP_READ_UNLOCK(inp);
5509 		hold_rlock = 0;
5510 	}
5511 found_one:
5512 	/*
5513 	 * If we reach here, control has a some data for us to read off.
5514 	 * Note that stcb COULD be NULL.
5515 	 */
5516 	control->some_taken++;
5517 	if (hold_sblock) {
5518 		SOCKBUF_UNLOCK(&so->so_rcv);
5519 		hold_sblock = 0;
5520 	}
5521 	stcb = control->stcb;
5522 	if (stcb) {
5523 		if ((control->do_not_ref_stcb == 0) &&
5524 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5525 			if (freecnt_applied == 0)
5526 				stcb = NULL;
5527 		} else if (control->do_not_ref_stcb == 0) {
5528 			/* you can't free it on me please */
5529 			/*
5530 			 * The lock on the socket buffer protects us so the
5531 			 * free code will stop. But since we used the
5532 			 * socketbuf lock and the sender uses the tcb_lock
5533 			 * to increment, we need to use the atomic add to
5534 			 * the refcnt
5535 			 */
5536 			if (freecnt_applied) {
5537 #ifdef INVARIANTS
5538 				panic("refcnt already incremented");
5539 #else
5540 				printf("refcnt already incremented?\n");
5541 #endif
5542 			} else {
5543 				atomic_add_int(&stcb->asoc.refcnt, 1);
5544 				freecnt_applied = 1;
5545 			}
5546 			/*
5547 			 * Setup to remember how much we have not yet told
5548 			 * the peer our rwnd has opened up. Note we grab the
5549 			 * value from the tcb from last time. Note too that
5550 			 * sack sending clears this when a sack is sent,
5551 			 * which is fine. Once we hit the rwnd_req, we then
5552 			 * will go to the sctp_user_rcvd() that will not
5553 			 * lock until it KNOWs it MUST send a WUP-SACK.
5554 			 */
5555 			freed_so_far = stcb->freed_by_sorcv_sincelast;
5556 			stcb->freed_by_sorcv_sincelast = 0;
5557 		}
5558 	}
5559 	if (stcb &&
5560 	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
5561 	    control->do_not_ref_stcb == 0) {
5562 		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
5563 	}
5564 	/* First lets get off the sinfo and sockaddr info */
5565 	if ((sinfo) && filling_sinfo) {
5566 		memcpy(sinfo, control, sizeof(struct sctp_nonpad_sndrcvinfo));
5567 		nxt = TAILQ_NEXT(control, next);
5568 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
5569 			struct sctp_extrcvinfo *s_extra;
5570 
5571 			s_extra = (struct sctp_extrcvinfo *)sinfo;
5572 			if ((nxt) &&
5573 			    (nxt->length)) {
5574 				s_extra->sreinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
5575 				if (nxt->sinfo_flags & SCTP_UNORDERED) {
5576 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
5577 				}
5578 				if (nxt->spec_flags & M_NOTIFICATION) {
5579 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
5580 				}
5581 				s_extra->sreinfo_next_aid = nxt->sinfo_assoc_id;
5582 				s_extra->sreinfo_next_length = nxt->length;
5583 				s_extra->sreinfo_next_ppid = nxt->sinfo_ppid;
5584 				s_extra->sreinfo_next_stream = nxt->sinfo_stream;
5585 				if (nxt->tail_mbuf != NULL) {
5586 					if (nxt->end_added) {
5587 						s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
5588 					}
5589 				}
5590 			} else {
5591 				/*
5592 				 * we explicitly 0 this, since the memcpy
5593 				 * got some other things beyond the older
5594 				 * sinfo_ that is on the control's structure
5595 				 * :-D
5596 				 */
5597 				nxt = NULL;
5598 				s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
5599 				s_extra->sreinfo_next_aid = 0;
5600 				s_extra->sreinfo_next_length = 0;
5601 				s_extra->sreinfo_next_ppid = 0;
5602 				s_extra->sreinfo_next_stream = 0;
5603 			}
5604 		}
5605 		/*
5606 		 * update off the real current cum-ack, if we have an stcb.
5607 		 */
5608 		if ((control->do_not_ref_stcb == 0) && stcb)
5609 			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
5610 		/*
5611 		 * mask off the high bits, we keep the actual chunk bits in
5612 		 * there.
5613 		 */
5614 		sinfo->sinfo_flags &= 0x00ff;
5615 		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
5616 			sinfo->sinfo_flags |= SCTP_UNORDERED;
5617 		}
5618 	}
5619 #ifdef SCTP_ASOCLOG_OF_TSNS
5620 	{
5621 		int index, newindex;
5622 		struct sctp_pcbtsn_rlog *entry;
5623 
5624 		do {
5625 			index = inp->readlog_index;
5626 			newindex = index + 1;
5627 			if (newindex >= SCTP_READ_LOG_SIZE) {
5628 				newindex = 0;
5629 			}
5630 		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
5631 		entry = &inp->readlog[index];
5632 		entry->vtag = control->sinfo_assoc_id;
5633 		entry->strm = control->sinfo_stream;
5634 		entry->seq = control->sinfo_ssn;
5635 		entry->sz = control->length;
5636 		entry->flgs = control->sinfo_flags;
5637 	}
5638 #endif
5639 	if (fromlen && from) {
5640 		struct sockaddr *to;
5641 
5642 #ifdef INET
5643 		cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sin.sin_len);
5644 		memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
5645 		((struct sockaddr_in *)from)->sin_port = control->port_from;
5646 #else
5647 		/* No AF_INET use AF_INET6 */
5648 		cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sin6.sin6_len);
5649 		memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
5650 		((struct sockaddr_in6 *)from)->sin6_port = control->port_from;
5651 #endif
5652 
5653 		to = from;
5654 #if defined(INET) && defined(INET6)
5655 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) &&
5656 		    (to->sa_family == AF_INET) &&
5657 		    ((size_t)fromlen >= sizeof(struct sockaddr_in6))) {
5658 			struct sockaddr_in *sin;
5659 			struct sockaddr_in6 sin6;
5660 
5661 			sin = (struct sockaddr_in *)to;
5662 			bzero(&sin6, sizeof(sin6));
5663 			sin6.sin6_family = AF_INET6;
5664 			sin6.sin6_len = sizeof(struct sockaddr_in6);
5665 			sin6.sin6_addr.s6_addr32[2] = htonl(0xffff);
5666 			bcopy(&sin->sin_addr,
5667 			    &sin6.sin6_addr.s6_addr32[3],
5668 			    sizeof(sin6.sin6_addr.s6_addr32[3]));
5669 			sin6.sin6_port = sin->sin_port;
5670 			memcpy(from, (caddr_t)&sin6, sizeof(sin6));
5671 		}
5672 #endif
5673 #if defined(INET6)
5674 		{
5675 			struct sockaddr_in6 lsa6, *to6;
5676 
5677 			to6 = (struct sockaddr_in6 *)to;
5678 			sctp_recover_scope_mac(to6, (&lsa6));
5679 		}
5680 #endif
5681 	}
5682 	/* now copy out what data we can */
5683 	if (mp == NULL) {
5684 		/* copy out each mbuf in the chain up to length */
5685 get_more_data:
5686 		m = control->data;
5687 		while (m) {
5688 			/* Move out all we can */
5689 			cp_len = (int)uio->uio_resid;
5690 			my_len = (int)SCTP_BUF_LEN(m);
5691 			if (cp_len > my_len) {
5692 				/* not enough in this buf */
5693 				cp_len = my_len;
5694 			}
5695 			if (hold_rlock) {
5696 				SCTP_INP_READ_UNLOCK(inp);
5697 				hold_rlock = 0;
5698 			}
5699 			if (cp_len > 0)
5700 				error = uiomove(mtod(m, char *), cp_len, uio);
5701 			/* re-read */
5702 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5703 				goto release;
5704 			}
5705 			if ((control->do_not_ref_stcb == 0) && stcb &&
5706 			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5707 				no_rcv_needed = 1;
5708 			}
5709 			if (error) {
5710 				/* error we are out of here */
5711 				goto release;
5712 			}
5713 			if ((SCTP_BUF_NEXT(m) == NULL) &&
5714 			    (cp_len >= SCTP_BUF_LEN(m)) &&
5715 			    ((control->end_added == 0) ||
5716 			    (control->end_added &&
5717 			    (TAILQ_NEXT(control, next) == NULL)))
5718 			    ) {
5719 				SCTP_INP_READ_LOCK(inp);
5720 				hold_rlock = 1;
5721 			}
5722 			if (cp_len == SCTP_BUF_LEN(m)) {
5723 				if ((SCTP_BUF_NEXT(m) == NULL) &&
5724 				    (control->end_added)) {
5725 					out_flags |= MSG_EOR;
5726 					if ((control->do_not_ref_stcb == 0) &&
5727 					    (control->stcb != NULL) &&
5728 					    ((control->spec_flags & M_NOTIFICATION) == 0))
5729 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5730 				}
5731 				if (control->spec_flags & M_NOTIFICATION) {
5732 					out_flags |= MSG_NOTIFICATION;
5733 				}
5734 				/* we ate up the mbuf */
5735 				if (in_flags & MSG_PEEK) {
5736 					/* just looking */
5737 					m = SCTP_BUF_NEXT(m);
5738 					copied_so_far += cp_len;
5739 				} else {
5740 					/* dispose of the mbuf */
5741 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5742 						sctp_sblog(&so->so_rcv,
5743 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5744 					}
5745 					sctp_sbfree(control, stcb, &so->so_rcv, m);
5746 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5747 						sctp_sblog(&so->so_rcv,
5748 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5749 					}
5750 					copied_so_far += cp_len;
5751 					freed_so_far += cp_len;
5752 					freed_so_far += MSIZE;
5753 					atomic_subtract_int(&control->length, cp_len);
5754 					control->data = sctp_m_free(m);
5755 					m = control->data;
5756 					/*
5757 					 * been through it all, must hold sb
5758 					 * lock ok to null tail
5759 					 */
5760 					if (control->data == NULL) {
5761 #ifdef INVARIANTS
5762 						if ((control->end_added == 0) ||
5763 						    (TAILQ_NEXT(control, next) == NULL)) {
5764 							/*
5765 							 * If the end is not
5766 							 * added, OR the
5767 							 * next is NOT null
5768 							 * we MUST have the
5769 							 * lock.
5770 							 */
5771 							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
5772 								panic("Hmm we don't own the lock?");
5773 							}
5774 						}
5775 #endif
5776 						control->tail_mbuf = NULL;
5777 #ifdef INVARIANTS
5778 						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
5779 							panic("end_added, nothing left and no MSG_EOR");
5780 						}
5781 #endif
5782 					}
5783 				}
5784 			} else {
5785 				/* Do we need to trim the mbuf? */
5786 				if (control->spec_flags & M_NOTIFICATION) {
5787 					out_flags |= MSG_NOTIFICATION;
5788 				}
5789 				if ((in_flags & MSG_PEEK) == 0) {
5790 					SCTP_BUF_RESV_UF(m, cp_len);
5791 					SCTP_BUF_LEN(m) -= cp_len;
5792 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5793 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len);
5794 					}
5795 					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
5796 					if ((control->do_not_ref_stcb == 0) &&
5797 					    stcb) {
5798 						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
5799 					}
5800 					copied_so_far += cp_len;
5801 					freed_so_far += cp_len;
5802 					freed_so_far += MSIZE;
5803 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5804 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
5805 						    SCTP_LOG_SBRESULT, 0);
5806 					}
5807 					atomic_subtract_int(&control->length, cp_len);
5808 				} else {
5809 					copied_so_far += cp_len;
5810 				}
5811 			}
5812 			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
5813 				break;
5814 			}
5815 			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5816 			    (control->do_not_ref_stcb == 0) &&
5817 			    (freed_so_far >= rwnd_req)) {
5818 				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5819 			}
5820 		}		/* end while(m) */
5821 		/*
5822 		 * At this point we have looked at it all and we either have
5823 		 * a MSG_EOR/or read all the user wants... <OR>
5824 		 * control->length == 0.
5825 		 */
5826 		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
5827 			/* we are done with this control */
5828 			if (control->length == 0) {
5829 				if (control->data) {
5830 #ifdef INVARIANTS
5831 					panic("control->data not null at read eor?");
5832 #else
5833 					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
5834 					sctp_m_freem(control->data);
5835 					control->data = NULL;
5836 #endif
5837 				}
5838 		done_with_control:
5839 				if (TAILQ_NEXT(control, next) == NULL) {
5840 					/*
5841 					 * If we don't have a next we need a
5842 					 * lock, if there is a next
5843 					 * interrupt is filling ahead of us
5844 					 * and we don't need a lock to
5845 					 * remove this guy (which is the
5846 					 * head of the queue).
5847 					 */
5848 					if (hold_rlock == 0) {
5849 						SCTP_INP_READ_LOCK(inp);
5850 						hold_rlock = 1;
5851 					}
5852 				}
5853 				TAILQ_REMOVE(&inp->read_queue, control, next);
5854 				/* Add back any hiddend data */
5855 				if (control->held_length) {
5856 					held_length = 0;
5857 					control->held_length = 0;
5858 					wakeup_read_socket = 1;
5859 				}
5860 				if (control->aux_data) {
5861 					sctp_m_free(control->aux_data);
5862 					control->aux_data = NULL;
5863 				}
5864 				no_rcv_needed = control->do_not_ref_stcb;
5865 				sctp_free_remote_addr(control->whoFrom);
5866 				control->data = NULL;
5867 				sctp_free_a_readq(stcb, control);
5868 				control = NULL;
5869 				if ((freed_so_far >= rwnd_req) &&
5870 				    (no_rcv_needed == 0))
5871 					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5872 
5873 			} else {
5874 				/*
5875 				 * The user did not read all of this
5876 				 * message, turn off the returned MSG_EOR
5877 				 * since we are leaving more behind on the
5878 				 * control to read.
5879 				 */
5880 #ifdef INVARIANTS
5881 				if (control->end_added &&
5882 				    (control->data == NULL) &&
5883 				    (control->tail_mbuf == NULL)) {
5884 					panic("Gak, control->length is corrupt?");
5885 				}
5886 #endif
5887 				no_rcv_needed = control->do_not_ref_stcb;
5888 				out_flags &= ~MSG_EOR;
5889 			}
5890 		}
5891 		if (out_flags & MSG_EOR) {
5892 			goto release;
5893 		}
5894 		if ((uio->uio_resid == 0) ||
5895 		    ((in_eeor_mode) && (copied_so_far >= max(so->so_rcv.sb_lowat, 1)))
5896 		    ) {
5897 			goto release;
5898 		}
5899 		/*
5900 		 * If I hit here the receiver wants more and this message is
5901 		 * NOT done (pd-api). So two questions. Can we block? if not
5902 		 * we are done. Did the user NOT set MSG_WAITALL?
5903 		 */
5904 		if (block_allowed == 0) {
5905 			goto release;
5906 		}
5907 		/*
5908 		 * We need to wait for more data a few things: - We don't
5909 		 * sbunlock() so we don't get someone else reading. - We
5910 		 * must be sure to account for the case where what is added
5911 		 * is NOT to our control when we wakeup.
5912 		 */
5913 
5914 		/*
5915 		 * Do we need to tell the transport a rwnd update might be
5916 		 * needed before we go to sleep?
5917 		 */
5918 		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5919 		    ((freed_so_far >= rwnd_req) &&
5920 		    (control->do_not_ref_stcb == 0) &&
5921 		    (no_rcv_needed == 0))) {
5922 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5923 		}
5924 wait_some_more:
5925 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5926 			goto release;
5927 		}
5928 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
5929 			goto release;
5930 
5931 		if (hold_rlock == 1) {
5932 			SCTP_INP_READ_UNLOCK(inp);
5933 			hold_rlock = 0;
5934 		}
5935 		if (hold_sblock == 0) {
5936 			SOCKBUF_LOCK(&so->so_rcv);
5937 			hold_sblock = 1;
5938 		}
5939 		if ((copied_so_far) && (control->length == 0) &&
5940 		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) {
5941 			goto release;
5942 		}
5943 		if (so->so_rcv.sb_cc <= control->held_length) {
5944 			error = sbwait(&so->so_rcv);
5945 			if (error) {
5946 				goto release;
5947 			}
5948 			control->held_length = 0;
5949 		}
5950 		if (hold_sblock) {
5951 			SOCKBUF_UNLOCK(&so->so_rcv);
5952 			hold_sblock = 0;
5953 		}
5954 		if (control->length == 0) {
5955 			/* still nothing here */
5956 			if (control->end_added == 1) {
5957 				/* he aborted, or is done i.e.did a shutdown */
5958 				out_flags |= MSG_EOR;
5959 				if (control->pdapi_aborted) {
5960 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5961 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5962 
5963 					out_flags |= MSG_TRUNC;
5964 				} else {
5965 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5966 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5967 				}
5968 				goto done_with_control;
5969 			}
5970 			if (so->so_rcv.sb_cc > held_length) {
5971 				control->held_length = so->so_rcv.sb_cc;
5972 				held_length = 0;
5973 			}
5974 			goto wait_some_more;
5975 		} else if (control->data == NULL) {
5976 			/*
5977 			 * we must re-sync since data is probably being
5978 			 * added
5979 			 */
5980 			SCTP_INP_READ_LOCK(inp);
5981 			if ((control->length > 0) && (control->data == NULL)) {
5982 				/*
5983 				 * big trouble.. we have the lock and its
5984 				 * corrupt?
5985 				 */
5986 #ifdef INVARIANTS
5987 				panic("Impossible data==NULL length !=0");
5988 #endif
5989 				out_flags |= MSG_EOR;
5990 				out_flags |= MSG_TRUNC;
5991 				control->length = 0;
5992 				SCTP_INP_READ_UNLOCK(inp);
5993 				goto done_with_control;
5994 			}
5995 			SCTP_INP_READ_UNLOCK(inp);
5996 			/* We will fall around to get more data */
5997 		}
5998 		goto get_more_data;
5999 	} else {
6000 		/*-
6001 		 * Give caller back the mbuf chain,
6002 		 * store in uio_resid the length
6003 		 */
6004 		wakeup_read_socket = 0;
6005 		if ((control->end_added == 0) ||
6006 		    (TAILQ_NEXT(control, next) == NULL)) {
6007 			/* Need to get rlock */
6008 			if (hold_rlock == 0) {
6009 				SCTP_INP_READ_LOCK(inp);
6010 				hold_rlock = 1;
6011 			}
6012 		}
6013 		if (control->end_added) {
6014 			out_flags |= MSG_EOR;
6015 			if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
6016 				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6017 		}
6018 		if (control->spec_flags & M_NOTIFICATION) {
6019 			out_flags |= MSG_NOTIFICATION;
6020 		}
6021 		uio->uio_resid = control->length;
6022 		*mp = control->data;
6023 		m = control->data;
6024 		while (m) {
6025 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6026 				sctp_sblog(&so->so_rcv,
6027 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
6028 			}
6029 			sctp_sbfree(control, stcb, &so->so_rcv, m);
6030 			freed_so_far += SCTP_BUF_LEN(m);
6031 			freed_so_far += MSIZE;
6032 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6033 				sctp_sblog(&so->so_rcv,
6034 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
6035 			}
6036 			m = SCTP_BUF_NEXT(m);
6037 		}
6038 		control->data = control->tail_mbuf = NULL;
6039 		control->length = 0;
6040 		if (out_flags & MSG_EOR) {
6041 			/* Done with this control */
6042 			goto done_with_control;
6043 		}
6044 	}
6045 release:
6046 	if (hold_rlock == 1) {
6047 		SCTP_INP_READ_UNLOCK(inp);
6048 		hold_rlock = 0;
6049 	}
6050 	if (hold_sblock == 1) {
6051 		SOCKBUF_UNLOCK(&so->so_rcv);
6052 		hold_sblock = 0;
6053 	}
6054 	sbunlock(&so->so_rcv);
6055 	sockbuf_lock = 0;
6056 
6057 release_unlocked:
6058 	if (hold_sblock) {
6059 		SOCKBUF_UNLOCK(&so->so_rcv);
6060 		hold_sblock = 0;
6061 	}
6062 	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
6063 		if ((freed_so_far >= rwnd_req) &&
6064 		    (control && (control->do_not_ref_stcb == 0)) &&
6065 		    (no_rcv_needed == 0))
6066 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6067 	}
6068 out:
6069 	if (msg_flags) {
6070 		*msg_flags = out_flags;
6071 	}
6072 	if (((out_flags & MSG_EOR) == 0) &&
6073 	    ((in_flags & MSG_PEEK) == 0) &&
6074 	    (sinfo) &&
6075 	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO))) {
6076 		struct sctp_extrcvinfo *s_extra;
6077 
6078 		s_extra = (struct sctp_extrcvinfo *)sinfo;
6079 		s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
6080 	}
6081 	if (hold_rlock == 1) {
6082 		SCTP_INP_READ_UNLOCK(inp);
6083 		hold_rlock = 0;
6084 	}
6085 	if (hold_sblock) {
6086 		SOCKBUF_UNLOCK(&so->so_rcv);
6087 		hold_sblock = 0;
6088 	}
6089 	if (sockbuf_lock) {
6090 		sbunlock(&so->so_rcv);
6091 	}
6092 	if (freecnt_applied) {
6093 		/*
6094 		 * The lock on the socket buffer protects us so the free
6095 		 * code will stop. But since we used the socketbuf lock and
6096 		 * the sender uses the tcb_lock to increment, we need to use
6097 		 * the atomic add to the refcnt.
6098 		 */
6099 		if (stcb == NULL) {
6100 #ifdef INVARIANTS
6101 			panic("stcb for refcnt has gone NULL?");
6102 			goto stage_left;
6103 #else
6104 			goto stage_left;
6105 #endif
6106 		}
6107 		atomic_add_int(&stcb->asoc.refcnt, -1);
6108 		freecnt_applied = 0;
6109 		/* Save the value back for next time */
6110 		stcb->freed_by_sorcv_sincelast = freed_so_far;
6111 	}
6112 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
6113 		if (stcb) {
6114 			sctp_misc_ints(SCTP_SORECV_DONE,
6115 			    freed_so_far,
6116 			    ((uio) ? (slen - uio->uio_resid) : slen),
6117 			    stcb->asoc.my_rwnd,
6118 			    so->so_rcv.sb_cc);
6119 		} else {
6120 			sctp_misc_ints(SCTP_SORECV_DONE,
6121 			    freed_so_far,
6122 			    ((uio) ? (slen - uio->uio_resid) : slen),
6123 			    0,
6124 			    so->so_rcv.sb_cc);
6125 		}
6126 	}
6127 stage_left:
6128 	if (wakeup_read_socket) {
6129 		sctp_sorwakeup(inp, so);
6130 	}
6131 	return (error);
6132 }
6133 
6134 
6135 #ifdef SCTP_MBUF_LOGGING
6136 struct mbuf *
6137 sctp_m_free(struct mbuf *m)
6138 {
6139 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6140 		if (SCTP_BUF_IS_EXTENDED(m)) {
6141 			sctp_log_mb(m, SCTP_MBUF_IFREE);
6142 		}
6143 	}
6144 	return (m_free(m));
6145 }
6146 
6147 void
6148 sctp_m_freem(struct mbuf *mb)
6149 {
6150 	while (mb != NULL)
6151 		mb = sctp_m_free(mb);
6152 }
6153 
6154 #endif
6155 
6156 int
6157 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
6158 {
6159 	/*
6160 	 * Given a local address. For all associations that holds the
6161 	 * address, request a peer-set-primary.
6162 	 */
6163 	struct sctp_ifa *ifa;
6164 	struct sctp_laddr *wi;
6165 
6166 	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
6167 	if (ifa == NULL) {
6168 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
6169 		return (EADDRNOTAVAIL);
6170 	}
6171 	/*
6172 	 * Now that we have the ifa we must awaken the iterator with this
6173 	 * message.
6174 	 */
6175 	wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
6176 	if (wi == NULL) {
6177 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
6178 		return (ENOMEM);
6179 	}
6180 	/* Now incr the count and int wi structure */
6181 	SCTP_INCR_LADDR_COUNT();
6182 	bzero(wi, sizeof(*wi));
6183 	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
6184 	wi->ifa = ifa;
6185 	wi->action = SCTP_SET_PRIM_ADDR;
6186 	atomic_add_int(&ifa->refcount, 1);
6187 
6188 	/* Now add it to the work queue */
6189 	SCTP_WQ_ADDR_LOCK();
6190 	/*
6191 	 * Should this really be a tailq? As it is we will process the
6192 	 * newest first :-0
6193 	 */
6194 	LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
6195 	SCTP_WQ_ADDR_UNLOCK();
6196 	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
6197 	    (struct sctp_inpcb *)NULL,
6198 	    (struct sctp_tcb *)NULL,
6199 	    (struct sctp_nets *)NULL);
6200 	return (0);
6201 }
6202 
6203 
6204 int
6205 sctp_soreceive(struct socket *so,
6206     struct sockaddr **psa,
6207     struct uio *uio,
6208     struct mbuf **mp0,
6209     struct mbuf **controlp,
6210     int *flagsp)
6211 {
6212 	int error, fromlen;
6213 	uint8_t sockbuf[256];
6214 	struct sockaddr *from;
6215 	struct sctp_extrcvinfo sinfo;
6216 	int filling_sinfo = 1;
6217 	struct sctp_inpcb *inp;
6218 
6219 	inp = (struct sctp_inpcb *)so->so_pcb;
6220 	/* pickup the assoc we are reading from */
6221 	if (inp == NULL) {
6222 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6223 		return (EINVAL);
6224 	}
6225 	if ((sctp_is_feature_off(inp,
6226 	    SCTP_PCB_FLAGS_RECVDATAIOEVNT)) ||
6227 	    (controlp == NULL)) {
6228 		/* user does not want the sndrcv ctl */
6229 		filling_sinfo = 0;
6230 	}
6231 	if (psa) {
6232 		from = (struct sockaddr *)sockbuf;
6233 		fromlen = sizeof(sockbuf);
6234 		from->sa_len = 0;
6235 	} else {
6236 		from = NULL;
6237 		fromlen = 0;
6238 	}
6239 
6240 	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp,
6241 	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6242 	if ((controlp) && (filling_sinfo)) {
6243 		/* copy back the sinfo in a CMSG format */
6244 		if (filling_sinfo)
6245 			*controlp = sctp_build_ctl_nchunk(inp,
6246 			    (struct sctp_sndrcvinfo *)&sinfo);
6247 		else
6248 			*controlp = NULL;
6249 	}
6250 	if (psa) {
6251 		/* copy back the address info */
6252 		if (from && from->sa_len) {
6253 			*psa = sodupsockaddr(from, M_NOWAIT);
6254 		} else {
6255 			*psa = NULL;
6256 		}
6257 	}
6258 	return (error);
6259 }
6260 
6261 
6262 int
6263 sctp_l_soreceive(struct socket *so,
6264     struct sockaddr **name,
6265     struct uio *uio,
6266     char **controlp,
6267     int *controllen,
6268     int *flag)
6269 {
6270 	int error, fromlen;
6271 	uint8_t sockbuf[256];
6272 	struct sockaddr *from;
6273 	struct sctp_extrcvinfo sinfo;
6274 	int filling_sinfo = 1;
6275 	struct sctp_inpcb *inp;
6276 
6277 	inp = (struct sctp_inpcb *)so->so_pcb;
6278 	/* pickup the assoc we are reading from */
6279 	if (inp == NULL) {
6280 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6281 		return (EINVAL);
6282 	}
6283 	if ((sctp_is_feature_off(inp,
6284 	    SCTP_PCB_FLAGS_RECVDATAIOEVNT)) ||
6285 	    (controlp == NULL)) {
6286 		/* user does not want the sndrcv ctl */
6287 		filling_sinfo = 0;
6288 	}
6289 	if (name) {
6290 		from = (struct sockaddr *)sockbuf;
6291 		fromlen = sizeof(sockbuf);
6292 		from->sa_len = 0;
6293 	} else {
6294 		from = NULL;
6295 		fromlen = 0;
6296 	}
6297 
6298 	error = sctp_sorecvmsg(so, uio,
6299 	    (struct mbuf **)NULL,
6300 	    from, fromlen, flag,
6301 	    (struct sctp_sndrcvinfo *)&sinfo,
6302 	    filling_sinfo);
6303 	if ((controlp) && (filling_sinfo)) {
6304 		/*
6305 		 * copy back the sinfo in a CMSG format note that the caller
6306 		 * has reponsibility for freeing the memory.
6307 		 */
6308 		if (filling_sinfo)
6309 			*controlp = sctp_build_ctl_cchunk(inp,
6310 			    controllen,
6311 			    (struct sctp_sndrcvinfo *)&sinfo);
6312 	}
6313 	if (name) {
6314 		/* copy back the address info */
6315 		if (from && from->sa_len) {
6316 			*name = sodupsockaddr(from, M_WAIT);
6317 		} else {
6318 			*name = NULL;
6319 		}
6320 	}
6321 	return (error);
6322 }
6323 
6324 
6325 
6326 
6327 
6328 
6329 
6330 int
6331 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6332     int totaddr, int *error)
6333 {
6334 	int added = 0;
6335 	int i;
6336 	struct sctp_inpcb *inp;
6337 	struct sockaddr *sa;
6338 	size_t incr = 0;
6339 
6340 	sa = addr;
6341 	inp = stcb->sctp_ep;
6342 	*error = 0;
6343 	for (i = 0; i < totaddr; i++) {
6344 		if (sa->sa_family == AF_INET) {
6345 			incr = sizeof(struct sockaddr_in);
6346 			if (sctp_add_remote_addr(stcb, sa, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6347 				/* assoc gone no un-lock */
6348 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6349 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_7);
6350 				*error = ENOBUFS;
6351 				goto out_now;
6352 			}
6353 			added++;
6354 		} else if (sa->sa_family == AF_INET6) {
6355 			incr = sizeof(struct sockaddr_in6);
6356 			if (sctp_add_remote_addr(stcb, sa, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6357 				/* assoc gone no un-lock */
6358 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6359 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_8);
6360 				*error = ENOBUFS;
6361 				goto out_now;
6362 			}
6363 			added++;
6364 		}
6365 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6366 	}
6367 out_now:
6368 	return (added);
6369 }
6370 
6371 struct sctp_tcb *
6372 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6373     int *totaddr, int *num_v4, int *num_v6, int *error,
6374     int limit, int *bad_addr)
6375 {
6376 	struct sockaddr *sa;
6377 	struct sctp_tcb *stcb = NULL;
6378 	size_t incr, at, i;
6379 
6380 	at = incr = 0;
6381 	sa = addr;
6382 	*error = *num_v6 = *num_v4 = 0;
6383 	/* account and validate addresses */
6384 	for (i = 0; i < (size_t)*totaddr; i++) {
6385 		if (sa->sa_family == AF_INET) {
6386 			(*num_v4) += 1;
6387 			incr = sizeof(struct sockaddr_in);
6388 			if (sa->sa_len != incr) {
6389 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6390 				*error = EINVAL;
6391 				*bad_addr = 1;
6392 				return (NULL);
6393 			}
6394 		} else if (sa->sa_family == AF_INET6) {
6395 			struct sockaddr_in6 *sin6;
6396 
6397 			sin6 = (struct sockaddr_in6 *)sa;
6398 			if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6399 				/* Must be non-mapped for connectx */
6400 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6401 				*error = EINVAL;
6402 				*bad_addr = 1;
6403 				return (NULL);
6404 			}
6405 			(*num_v6) += 1;
6406 			incr = sizeof(struct sockaddr_in6);
6407 			if (sa->sa_len != incr) {
6408 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6409 				*error = EINVAL;
6410 				*bad_addr = 1;
6411 				return (NULL);
6412 			}
6413 		} else {
6414 			*totaddr = i;
6415 			/* we are done */
6416 			break;
6417 		}
6418 		SCTP_INP_INCR_REF(inp);
6419 		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6420 		if (stcb != NULL) {
6421 			/* Already have or am bring up an association */
6422 			return (stcb);
6423 		} else {
6424 			SCTP_INP_DECR_REF(inp);
6425 		}
6426 		if ((at + incr) > (size_t)limit) {
6427 			*totaddr = i;
6428 			break;
6429 		}
6430 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6431 	}
6432 	return ((struct sctp_tcb *)NULL);
6433 }
6434 
6435 /*
6436  * sctp_bindx(ADD) for one address.
6437  * assumes all arguments are valid/checked by caller.
6438  */
6439 void
6440 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6441     struct sockaddr *sa, sctp_assoc_t assoc_id,
6442     uint32_t vrf_id, int *error, void *p)
6443 {
6444 	struct sockaddr *addr_touse;
6445 
6446 #ifdef INET6
6447 	struct sockaddr_in sin;
6448 
6449 #endif
6450 
6451 	/* see if we're bound all already! */
6452 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6453 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6454 		*error = EINVAL;
6455 		return;
6456 	}
6457 	addr_touse = sa;
6458 #if defined(INET6) && !defined(__Userspace__)	/* TODO port in6_sin6_2_sin */
6459 	if (sa->sa_family == AF_INET6) {
6460 		struct sockaddr_in6 *sin6;
6461 
6462 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6463 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6464 			*error = EINVAL;
6465 			return;
6466 		}
6467 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6468 			/* can only bind v6 on PF_INET6 sockets */
6469 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6470 			*error = EINVAL;
6471 			return;
6472 		}
6473 		sin6 = (struct sockaddr_in6 *)addr_touse;
6474 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6475 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6476 			    SCTP_IPV6_V6ONLY(inp)) {
6477 				/* can't bind v4-mapped on PF_INET sockets */
6478 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6479 				*error = EINVAL;
6480 				return;
6481 			}
6482 			in6_sin6_2_sin(&sin, sin6);
6483 			addr_touse = (struct sockaddr *)&sin;
6484 		}
6485 	}
6486 #endif
6487 	if (sa->sa_family == AF_INET) {
6488 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6489 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6490 			*error = EINVAL;
6491 			return;
6492 		}
6493 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6494 		    SCTP_IPV6_V6ONLY(inp)) {
6495 			/* can't bind v4 on PF_INET sockets */
6496 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6497 			*error = EINVAL;
6498 			return;
6499 		}
6500 	}
6501 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6502 		if (p == NULL) {
6503 			/* Can't get proc for Net/Open BSD */
6504 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6505 			*error = EINVAL;
6506 			return;
6507 		}
6508 		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6509 		return;
6510 	}
6511 	/*
6512 	 * No locks required here since bind and mgmt_ep_sa all do their own
6513 	 * locking. If we do something for the FIX: below we may need to
6514 	 * lock in that case.
6515 	 */
6516 	if (assoc_id == 0) {
6517 		/* add the address */
6518 		struct sctp_inpcb *lep;
6519 		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
6520 
6521 		/* validate the incoming port */
6522 		if ((lsin->sin_port != 0) &&
6523 		    (lsin->sin_port != inp->sctp_lport)) {
6524 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6525 			*error = EINVAL;
6526 			return;
6527 		} else {
6528 			/* user specified 0 port, set it to existing port */
6529 			lsin->sin_port = inp->sctp_lport;
6530 		}
6531 
6532 		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
6533 		if (lep != NULL) {
6534 			/*
6535 			 * We must decrement the refcount since we have the
6536 			 * ep already and are binding. No remove going on
6537 			 * here.
6538 			 */
6539 			SCTP_INP_DECR_REF(lep);
6540 		}
6541 		if (lep == inp) {
6542 			/* already bound to it.. ok */
6543 			return;
6544 		} else if (lep == NULL) {
6545 			((struct sockaddr_in *)addr_touse)->sin_port = 0;
6546 			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6547 			    SCTP_ADD_IP_ADDRESS,
6548 			    vrf_id, NULL);
6549 		} else {
6550 			*error = EADDRINUSE;
6551 		}
6552 		if (*error)
6553 			return;
6554 	} else {
6555 		/*
6556 		 * FIX: decide whether we allow assoc based bindx
6557 		 */
6558 	}
6559 }
6560 
6561 /*
6562  * sctp_bindx(DELETE) for one address.
6563  * assumes all arguments are valid/checked by caller.
6564  */
6565 void
6566 sctp_bindx_delete_address(struct socket *so, struct sctp_inpcb *inp,
6567     struct sockaddr *sa, sctp_assoc_t assoc_id,
6568     uint32_t vrf_id, int *error)
6569 {
6570 	struct sockaddr *addr_touse;
6571 
6572 #ifdef INET6
6573 	struct sockaddr_in sin;
6574 
6575 #endif
6576 
6577 	/* see if we're bound all already! */
6578 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6579 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6580 		*error = EINVAL;
6581 		return;
6582 	}
6583 	addr_touse = sa;
6584 #if defined(INET6) && !defined(__Userspace__)	/* TODO port in6_sin6_2_sin */
6585 	if (sa->sa_family == AF_INET6) {
6586 		struct sockaddr_in6 *sin6;
6587 
6588 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6589 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6590 			*error = EINVAL;
6591 			return;
6592 		}
6593 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6594 			/* can only bind v6 on PF_INET6 sockets */
6595 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6596 			*error = EINVAL;
6597 			return;
6598 		}
6599 		sin6 = (struct sockaddr_in6 *)addr_touse;
6600 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6601 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6602 			    SCTP_IPV6_V6ONLY(inp)) {
6603 				/* can't bind mapped-v4 on PF_INET sockets */
6604 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6605 				*error = EINVAL;
6606 				return;
6607 			}
6608 			in6_sin6_2_sin(&sin, sin6);
6609 			addr_touse = (struct sockaddr *)&sin;
6610 		}
6611 	}
6612 #endif
6613 	if (sa->sa_family == AF_INET) {
6614 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6615 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6616 			*error = EINVAL;
6617 			return;
6618 		}
6619 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6620 		    SCTP_IPV6_V6ONLY(inp)) {
6621 			/* can't bind v4 on PF_INET sockets */
6622 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6623 			*error = EINVAL;
6624 			return;
6625 		}
6626 	}
6627 	/*
6628 	 * No lock required mgmt_ep_sa does its own locking. If the FIX:
6629 	 * below is ever changed we may need to lock before calling
6630 	 * association level binding.
6631 	 */
6632 	if (assoc_id == 0) {
6633 		/* delete the address */
6634 		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6635 		    SCTP_DEL_IP_ADDRESS,
6636 		    vrf_id, NULL);
6637 	} else {
6638 		/*
6639 		 * FIX: decide whether we allow assoc based bindx
6640 		 */
6641 	}
6642 }
6643 
6644 /*
6645  * returns the valid local address count for an assoc, taking into account
6646  * all scoping rules
6647  */
6648 int
6649 sctp_local_addr_count(struct sctp_tcb *stcb)
6650 {
6651 	int loopback_scope, ipv4_local_scope, local_scope, site_scope;
6652 	int ipv4_addr_legal, ipv6_addr_legal;
6653 	struct sctp_vrf *vrf;
6654 	struct sctp_ifn *sctp_ifn;
6655 	struct sctp_ifa *sctp_ifa;
6656 	int count = 0;
6657 
6658 	/* Turn on all the appropriate scopes */
6659 	loopback_scope = stcb->asoc.loopback_scope;
6660 	ipv4_local_scope = stcb->asoc.ipv4_local_scope;
6661 	local_scope = stcb->asoc.local_scope;
6662 	site_scope = stcb->asoc.site_scope;
6663 	ipv4_addr_legal = ipv6_addr_legal = 0;
6664 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
6665 		ipv6_addr_legal = 1;
6666 		if (SCTP_IPV6_V6ONLY(stcb->sctp_ep) == 0) {
6667 			ipv4_addr_legal = 1;
6668 		}
6669 	} else {
6670 		ipv4_addr_legal = 1;
6671 	}
6672 
6673 	SCTP_IPI_ADDR_RLOCK();
6674 	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
6675 	if (vrf == NULL) {
6676 		/* no vrf, no addresses */
6677 		SCTP_IPI_ADDR_RUNLOCK();
6678 		return (0);
6679 	}
6680 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6681 		/*
6682 		 * bound all case: go through all ifns on the vrf
6683 		 */
6684 		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
6685 			if ((loopback_scope == 0) &&
6686 			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
6687 				continue;
6688 			}
6689 			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
6690 				if (sctp_is_addr_restricted(stcb, sctp_ifa))
6691 					continue;
6692 				switch (sctp_ifa->address.sa.sa_family) {
6693 				case AF_INET:
6694 					if (ipv4_addr_legal) {
6695 						struct sockaddr_in *sin;
6696 
6697 						sin = (struct sockaddr_in *)&sctp_ifa->address.sa;
6698 						if (sin->sin_addr.s_addr == 0) {
6699 							/*
6700 							 * skip unspecified
6701 							 * addrs
6702 							 */
6703 							continue;
6704 						}
6705 						if ((ipv4_local_scope == 0) &&
6706 						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
6707 							continue;
6708 						}
6709 						/* count this one */
6710 						count++;
6711 					} else {
6712 						continue;
6713 					}
6714 					break;
6715 #ifdef INET6
6716 				case AF_INET6:
6717 					if (ipv6_addr_legal) {
6718 						struct sockaddr_in6 *sin6;
6719 
6720 						sin6 = (struct sockaddr_in6 *)&sctp_ifa->address.sa;
6721 						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
6722 							continue;
6723 						}
6724 						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
6725 							if (local_scope == 0)
6726 								continue;
6727 							if (sin6->sin6_scope_id == 0) {
6728 								if (sa6_recoverscope(sin6) != 0)
6729 									/*
6730 									 *
6731 									 * bad
6732 									 *
6733 									 * li
6734 									 * nk
6735 									 *
6736 									 * loc
6737 									 * al
6738 									 *
6739 									 * add
6740 									 * re
6741 									 * ss
6742 									 * */
6743 									continue;
6744 							}
6745 						}
6746 						if ((site_scope == 0) &&
6747 						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
6748 							continue;
6749 						}
6750 						/* count this one */
6751 						count++;
6752 					}
6753 					break;
6754 #endif
6755 				default:
6756 					/* TSNH */
6757 					break;
6758 				}
6759 			}
6760 		}
6761 	} else {
6762 		/*
6763 		 * subset bound case
6764 		 */
6765 		struct sctp_laddr *laddr;
6766 
6767 		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
6768 		    sctp_nxt_addr) {
6769 			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
6770 				continue;
6771 			}
6772 			/* count this one */
6773 			count++;
6774 		}
6775 	}
6776 	SCTP_IPI_ADDR_RUNLOCK();
6777 	return (count);
6778 }
6779 
6780 #if defined(SCTP_LOCAL_TRACE_BUF)
6781 
6782 void
6783 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
6784 {
6785 	uint32_t saveindex, newindex;
6786 
6787 	do {
6788 		saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
6789 		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6790 			newindex = 1;
6791 		} else {
6792 			newindex = saveindex + 1;
6793 		}
6794 	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
6795 	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6796 		saveindex = 0;
6797 	}
6798 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
6799 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
6800 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
6801 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
6802 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
6803 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
6804 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
6805 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
6806 }
6807 
6808 #endif
6809 /* We will need to add support
6810  * to bind the ports and such here
6811  * so we can do UDP tunneling. In
6812  * the mean-time, we return error
6813  */
6814 #include <netinet/udp.h>
6815 #include <netinet/udp_var.h>
6816 #include <sys/proc.h>
6817 #ifdef INET6
6818 #include <netinet6/sctp6_var.h>
6819 #endif
6820 
6821 static void
6822 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *ignored)
6823 {
6824 	struct ip *iph;
6825 	struct mbuf *sp, *last;
6826 	struct udphdr *uhdr;
6827 	uint16_t port = 0, len;
6828 	int header_size = sizeof(struct udphdr) + sizeof(struct sctphdr);
6829 
6830 	/*
6831 	 * Split out the mbuf chain. Leave the IP header in m, place the
6832 	 * rest in the sp.
6833 	 */
6834 	if ((m->m_flags & M_PKTHDR) == 0) {
6835 		/* Can't handle one that is not a pkt hdr */
6836 		goto out;
6837 	}
6838 	/* pull the src port */
6839 	iph = mtod(m, struct ip *);
6840 	uhdr = (struct udphdr *)((caddr_t)iph + off);
6841 
6842 	port = uhdr->uh_sport;
6843 	sp = m_split(m, off, M_DONTWAIT);
6844 	if (sp == NULL) {
6845 		/* Gak, drop packet, we can't do a split */
6846 		goto out;
6847 	}
6848 	if (sp->m_pkthdr.len < header_size) {
6849 		/* Gak, packet can't have an SCTP header in it - to small */
6850 		m_freem(sp);
6851 		goto out;
6852 	}
6853 	/* ok now pull up the UDP header and SCTP header together */
6854 	sp = m_pullup(sp, header_size);
6855 	if (sp == NULL) {
6856 		/* Gak pullup failed */
6857 		goto out;
6858 	}
6859 	/* trim out the UDP header */
6860 	m_adj(sp, sizeof(struct udphdr));
6861 
6862 	/* Now reconstruct the mbuf chain */
6863 	/* 1) find last one */
6864 	last = m;
6865 	while (last->m_next != NULL) {
6866 		last = last->m_next;
6867 	}
6868 	last->m_next = sp;
6869 	m->m_pkthdr.len += sp->m_pkthdr.len;
6870 	last = m;
6871 	while (last != NULL) {
6872 		last = last->m_next;
6873 	}
6874 	/* Now its ready for sctp_input or sctp6_input */
6875 	iph = mtod(m, struct ip *);
6876 	switch (iph->ip_v) {
6877 	case IPVERSION:
6878 		{
6879 			/* its IPv4 */
6880 			len = SCTP_GET_IPV4_LENGTH(iph);
6881 			len -= sizeof(struct udphdr);
6882 			SCTP_GET_IPV4_LENGTH(iph) = len;
6883 			sctp_input_with_port(m, off, port);
6884 			break;
6885 		}
6886 #ifdef INET6
6887 	case IPV6_VERSION >> 4:
6888 		{
6889 			/* its IPv6 - NOT supported */
6890 			goto out;
6891 			break;
6892 
6893 		}
6894 #endif
6895 	default:
6896 		{
6897 			m_freem(m);
6898 			break;
6899 		}
6900 	}
6901 	return;
6902 out:
6903 	m_freem(m);
6904 }
6905 
6906 void
6907 sctp_over_udp_stop(void)
6908 {
6909 	struct socket *sop;
6910 
6911 	/*
6912 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
6913 	 * for writting!
6914 	 */
6915 	if (SCTP_BASE_INFO(udp_tun_socket) == NULL) {
6916 		/* Nothing to do */
6917 		return;
6918 	}
6919 	sop = SCTP_BASE_INFO(udp_tun_socket);
6920 	soclose(sop);
6921 	SCTP_BASE_INFO(udp_tun_socket) = NULL;
6922 }
6923 int
6924 sctp_over_udp_start(void)
6925 {
6926 	uint16_t port;
6927 	int ret;
6928 	struct sockaddr_in sin;
6929 	struct socket *sop = NULL;
6930 	struct thread *th;
6931 	struct ucred *cred;
6932 
6933 	/*
6934 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
6935 	 * for writting!
6936 	 */
6937 	port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
6938 	if (port == 0) {
6939 		/* Must have a port set */
6940 		return (EINVAL);
6941 	}
6942 	if (SCTP_BASE_INFO(udp_tun_socket) != NULL) {
6943 		/* Already running -- must stop first */
6944 		return (EALREADY);
6945 	}
6946 	th = curthread;
6947 	cred = th->td_ucred;
6948 	if ((ret = socreate(PF_INET, &sop,
6949 	    SOCK_DGRAM, IPPROTO_UDP, cred, th))) {
6950 		return (ret);
6951 	}
6952 	SCTP_BASE_INFO(udp_tun_socket) = sop;
6953 	/* call the special UDP hook */
6954 	ret = udp_set_kernel_tunneling(sop, sctp_recv_udp_tunneled_packet);
6955 	if (ret) {
6956 		goto exit_stage_left;
6957 	}
6958 	/* Ok we have a socket, bind it to the port */
6959 	memset(&sin, 0, sizeof(sin));
6960 	sin.sin_len = sizeof(sin);
6961 	sin.sin_family = AF_INET;
6962 	sin.sin_port = htons(port);
6963 	ret = sobind(sop, (struct sockaddr *)&sin, th);
6964 	if (ret) {
6965 		/* Close up we cant get the port */
6966 exit_stage_left:
6967 		sctp_over_udp_stop();
6968 		return (ret);
6969 	}
6970 	/*
6971 	 * Ok we should now get UDP packets directly to our input routine
6972 	 * sctp_recv_upd_tunneled_packet().
6973 	 */
6974 	return (0);
6975 }
6976