xref: /freebsd/sys/netinet/sctputil.c (revision 7aa383846770374466b1dcb2cefd71bde9acf463)
1 /*-
2  * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *
7  * a) Redistributions of source code must retain the above copyright notice,
8  *   this list of conditions and the following disclaimer.
9  *
10  * b) Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in
12  *   the documentation and/or other materials provided with the distribution.
13  *
14  * c) Neither the name of Cisco Systems, Inc. nor the names of its
15  *    contributors may be used to endorse or promote products derived
16  *    from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28  * THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 /* $KAME: sctputil.c,v 1.37 2005/03/07 23:26:09 itojun Exp $	 */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_pcb.h>
38 #include <netinet/sctputil.h>
39 #include <netinet/sctp_var.h>
40 #include <netinet/sctp_sysctl.h>
41 #ifdef INET6
42 #endif
43 #include <netinet/sctp_header.h>
44 #include <netinet/sctp_output.h>
45 #include <netinet/sctp_uio.h>
46 #include <netinet/sctp_timer.h>
47 #include <netinet/sctp_indata.h>/* for sctp_deliver_data() */
48 #include <netinet/sctp_auth.h>
49 #include <netinet/sctp_asconf.h>
50 #include <netinet/sctp_cc_functions.h>
51 #include <netinet/sctp_bsd_addr.h>
52 
53 #define NUMBER_OF_MTU_SIZES 18
54 
55 
56 #ifndef KTR_SCTP
57 #define KTR_SCTP KTR_SUBSYS
58 #endif
59 
60 void
61 sctp_sblog(struct sockbuf *sb,
62     struct sctp_tcb *stcb, int from, int incr)
63 {
64 	struct sctp_cwnd_log sctp_clog;
65 
66 	sctp_clog.x.sb.stcb = stcb;
67 	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
68 	if (stcb)
69 		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
70 	else
71 		sctp_clog.x.sb.stcb_sbcc = 0;
72 	sctp_clog.x.sb.incr = incr;
73 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
74 	    SCTP_LOG_EVENT_SB,
75 	    from,
76 	    sctp_clog.x.misc.log1,
77 	    sctp_clog.x.misc.log2,
78 	    sctp_clog.x.misc.log3,
79 	    sctp_clog.x.misc.log4);
80 }
81 
82 void
83 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
84 {
85 	struct sctp_cwnd_log sctp_clog;
86 
87 	sctp_clog.x.close.inp = (void *)inp;
88 	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
89 	if (stcb) {
90 		sctp_clog.x.close.stcb = (void *)stcb;
91 		sctp_clog.x.close.state = (uint16_t) stcb->asoc.state;
92 	} else {
93 		sctp_clog.x.close.stcb = 0;
94 		sctp_clog.x.close.state = 0;
95 	}
96 	sctp_clog.x.close.loc = loc;
97 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
98 	    SCTP_LOG_EVENT_CLOSE,
99 	    0,
100 	    sctp_clog.x.misc.log1,
101 	    sctp_clog.x.misc.log2,
102 	    sctp_clog.x.misc.log3,
103 	    sctp_clog.x.misc.log4);
104 }
105 
106 
107 void
108 rto_logging(struct sctp_nets *net, int from)
109 {
110 	struct sctp_cwnd_log sctp_clog;
111 
112 	memset(&sctp_clog, 0, sizeof(sctp_clog));
113 	sctp_clog.x.rto.net = (void *)net;
114 	sctp_clog.x.rto.rtt = net->prev_rtt;
115 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
116 	    SCTP_LOG_EVENT_RTT,
117 	    from,
118 	    sctp_clog.x.misc.log1,
119 	    sctp_clog.x.misc.log2,
120 	    sctp_clog.x.misc.log3,
121 	    sctp_clog.x.misc.log4);
122 
123 }
124 
125 void
126 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
127 {
128 	struct sctp_cwnd_log sctp_clog;
129 
130 	sctp_clog.x.strlog.stcb = stcb;
131 	sctp_clog.x.strlog.n_tsn = tsn;
132 	sctp_clog.x.strlog.n_sseq = sseq;
133 	sctp_clog.x.strlog.e_tsn = 0;
134 	sctp_clog.x.strlog.e_sseq = 0;
135 	sctp_clog.x.strlog.strm = stream;
136 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
137 	    SCTP_LOG_EVENT_STRM,
138 	    from,
139 	    sctp_clog.x.misc.log1,
140 	    sctp_clog.x.misc.log2,
141 	    sctp_clog.x.misc.log3,
142 	    sctp_clog.x.misc.log4);
143 
144 }
145 
146 void
147 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
148 {
149 	struct sctp_cwnd_log sctp_clog;
150 
151 	sctp_clog.x.nagle.stcb = (void *)stcb;
152 	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
153 	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
154 	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
155 	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
156 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
157 	    SCTP_LOG_EVENT_NAGLE,
158 	    action,
159 	    sctp_clog.x.misc.log1,
160 	    sctp_clog.x.misc.log2,
161 	    sctp_clog.x.misc.log3,
162 	    sctp_clog.x.misc.log4);
163 }
164 
165 
166 void
167 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
168 {
169 	struct sctp_cwnd_log sctp_clog;
170 
171 	sctp_clog.x.sack.cumack = cumack;
172 	sctp_clog.x.sack.oldcumack = old_cumack;
173 	sctp_clog.x.sack.tsn = tsn;
174 	sctp_clog.x.sack.numGaps = gaps;
175 	sctp_clog.x.sack.numDups = dups;
176 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
177 	    SCTP_LOG_EVENT_SACK,
178 	    from,
179 	    sctp_clog.x.misc.log1,
180 	    sctp_clog.x.misc.log2,
181 	    sctp_clog.x.misc.log3,
182 	    sctp_clog.x.misc.log4);
183 }
184 
185 void
186 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
187 {
188 	struct sctp_cwnd_log sctp_clog;
189 
190 	memset(&sctp_clog, 0, sizeof(sctp_clog));
191 	sctp_clog.x.map.base = map;
192 	sctp_clog.x.map.cum = cum;
193 	sctp_clog.x.map.high = high;
194 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
195 	    SCTP_LOG_EVENT_MAP,
196 	    from,
197 	    sctp_clog.x.misc.log1,
198 	    sctp_clog.x.misc.log2,
199 	    sctp_clog.x.misc.log3,
200 	    sctp_clog.x.misc.log4);
201 }
202 
203 void
204 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn,
205     int from)
206 {
207 	struct sctp_cwnd_log sctp_clog;
208 
209 	memset(&sctp_clog, 0, sizeof(sctp_clog));
210 	sctp_clog.x.fr.largest_tsn = biggest_tsn;
211 	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
212 	sctp_clog.x.fr.tsn = tsn;
213 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
214 	    SCTP_LOG_EVENT_FR,
215 	    from,
216 	    sctp_clog.x.misc.log1,
217 	    sctp_clog.x.misc.log2,
218 	    sctp_clog.x.misc.log3,
219 	    sctp_clog.x.misc.log4);
220 
221 }
222 
223 
224 void
225 sctp_log_mb(struct mbuf *m, int from)
226 {
227 	struct sctp_cwnd_log sctp_clog;
228 
229 	sctp_clog.x.mb.mp = m;
230 	sctp_clog.x.mb.mbuf_flags = (uint8_t) (SCTP_BUF_GET_FLAGS(m));
231 	sctp_clog.x.mb.size = (uint16_t) (SCTP_BUF_LEN(m));
232 	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
233 	if (SCTP_BUF_IS_EXTENDED(m)) {
234 		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
235 		sctp_clog.x.mb.refcnt = (uint8_t) (SCTP_BUF_EXTEND_REFCNT(m));
236 	} else {
237 		sctp_clog.x.mb.ext = 0;
238 		sctp_clog.x.mb.refcnt = 0;
239 	}
240 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
241 	    SCTP_LOG_EVENT_MBUF,
242 	    from,
243 	    sctp_clog.x.misc.log1,
244 	    sctp_clog.x.misc.log2,
245 	    sctp_clog.x.misc.log3,
246 	    sctp_clog.x.misc.log4);
247 }
248 
249 
250 void
251 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk,
252     int from)
253 {
254 	struct sctp_cwnd_log sctp_clog;
255 
256 	if (control == NULL) {
257 		SCTP_PRINTF("Gak log of NULL?\n");
258 		return;
259 	}
260 	sctp_clog.x.strlog.stcb = control->stcb;
261 	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
262 	sctp_clog.x.strlog.n_sseq = control->sinfo_ssn;
263 	sctp_clog.x.strlog.strm = control->sinfo_stream;
264 	if (poschk != NULL) {
265 		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
266 		sctp_clog.x.strlog.e_sseq = poschk->sinfo_ssn;
267 	} else {
268 		sctp_clog.x.strlog.e_tsn = 0;
269 		sctp_clog.x.strlog.e_sseq = 0;
270 	}
271 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
272 	    SCTP_LOG_EVENT_STRM,
273 	    from,
274 	    sctp_clog.x.misc.log1,
275 	    sctp_clog.x.misc.log2,
276 	    sctp_clog.x.misc.log3,
277 	    sctp_clog.x.misc.log4);
278 
279 }
280 
281 void
282 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
283 {
284 	struct sctp_cwnd_log sctp_clog;
285 
286 	sctp_clog.x.cwnd.net = net;
287 	if (stcb->asoc.send_queue_cnt > 255)
288 		sctp_clog.x.cwnd.cnt_in_send = 255;
289 	else
290 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
291 	if (stcb->asoc.stream_queue_cnt > 255)
292 		sctp_clog.x.cwnd.cnt_in_str = 255;
293 	else
294 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
295 
296 	if (net) {
297 		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
298 		sctp_clog.x.cwnd.inflight = net->flight_size;
299 		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
300 		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
301 		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
302 	}
303 	if (SCTP_CWNDLOG_PRESEND == from) {
304 		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
305 	}
306 	sctp_clog.x.cwnd.cwnd_augment = augment;
307 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
308 	    SCTP_LOG_EVENT_CWND,
309 	    from,
310 	    sctp_clog.x.misc.log1,
311 	    sctp_clog.x.misc.log2,
312 	    sctp_clog.x.misc.log3,
313 	    sctp_clog.x.misc.log4);
314 
315 }
316 
317 void
318 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
319 {
320 	struct sctp_cwnd_log sctp_clog;
321 
322 	memset(&sctp_clog, 0, sizeof(sctp_clog));
323 	if (inp) {
324 		sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
325 
326 	} else {
327 		sctp_clog.x.lock.sock = (void *)NULL;
328 	}
329 	sctp_clog.x.lock.inp = (void *)inp;
330 	if (stcb) {
331 		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
332 	} else {
333 		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
334 	}
335 	if (inp) {
336 		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
337 		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
338 	} else {
339 		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
340 		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
341 	}
342 	sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
343 	if (inp->sctp_socket) {
344 		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
345 		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
346 		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
347 	} else {
348 		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
349 		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
350 		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
351 	}
352 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
353 	    SCTP_LOG_LOCK_EVENT,
354 	    from,
355 	    sctp_clog.x.misc.log1,
356 	    sctp_clog.x.misc.log2,
357 	    sctp_clog.x.misc.log3,
358 	    sctp_clog.x.misc.log4);
359 
360 }
361 
362 void
363 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
364 {
365 	struct sctp_cwnd_log sctp_clog;
366 
367 	memset(&sctp_clog, 0, sizeof(sctp_clog));
368 	sctp_clog.x.cwnd.net = net;
369 	sctp_clog.x.cwnd.cwnd_new_value = error;
370 	sctp_clog.x.cwnd.inflight = net->flight_size;
371 	sctp_clog.x.cwnd.cwnd_augment = burst;
372 	if (stcb->asoc.send_queue_cnt > 255)
373 		sctp_clog.x.cwnd.cnt_in_send = 255;
374 	else
375 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
376 	if (stcb->asoc.stream_queue_cnt > 255)
377 		sctp_clog.x.cwnd.cnt_in_str = 255;
378 	else
379 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
380 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
381 	    SCTP_LOG_EVENT_MAXBURST,
382 	    from,
383 	    sctp_clog.x.misc.log1,
384 	    sctp_clog.x.misc.log2,
385 	    sctp_clog.x.misc.log3,
386 	    sctp_clog.x.misc.log4);
387 
388 }
389 
390 void
391 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
392 {
393 	struct sctp_cwnd_log sctp_clog;
394 
395 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
396 	sctp_clog.x.rwnd.send_size = snd_size;
397 	sctp_clog.x.rwnd.overhead = overhead;
398 	sctp_clog.x.rwnd.new_rwnd = 0;
399 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
400 	    SCTP_LOG_EVENT_RWND,
401 	    from,
402 	    sctp_clog.x.misc.log1,
403 	    sctp_clog.x.misc.log2,
404 	    sctp_clog.x.misc.log3,
405 	    sctp_clog.x.misc.log4);
406 }
407 
408 void
409 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
410 {
411 	struct sctp_cwnd_log sctp_clog;
412 
413 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
414 	sctp_clog.x.rwnd.send_size = flight_size;
415 	sctp_clog.x.rwnd.overhead = overhead;
416 	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
417 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
418 	    SCTP_LOG_EVENT_RWND,
419 	    from,
420 	    sctp_clog.x.misc.log1,
421 	    sctp_clog.x.misc.log2,
422 	    sctp_clog.x.misc.log3,
423 	    sctp_clog.x.misc.log4);
424 }
425 
426 void
427 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
428 {
429 	struct sctp_cwnd_log sctp_clog;
430 
431 	sctp_clog.x.mbcnt.total_queue_size = total_oq;
432 	sctp_clog.x.mbcnt.size_change = book;
433 	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
434 	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
435 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
436 	    SCTP_LOG_EVENT_MBCNT,
437 	    from,
438 	    sctp_clog.x.misc.log1,
439 	    sctp_clog.x.misc.log2,
440 	    sctp_clog.x.misc.log3,
441 	    sctp_clog.x.misc.log4);
442 
443 }
444 
445 void
446 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
447 {
448 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
449 	    SCTP_LOG_MISC_EVENT,
450 	    from,
451 	    a, b, c, d);
452 }
453 
454 void
455 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t cumtsn, uint32_t wake_cnt, int from)
456 {
457 	struct sctp_cwnd_log sctp_clog;
458 
459 	sctp_clog.x.wake.stcb = (void *)stcb;
460 	sctp_clog.x.wake.wake_cnt = wake_cnt;
461 	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
462 	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
463 	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
464 
465 	if (stcb->asoc.stream_queue_cnt < 0xff)
466 		sctp_clog.x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt;
467 	else
468 		sctp_clog.x.wake.stream_qcnt = 0xff;
469 
470 	if (stcb->asoc.chunks_on_out_queue < 0xff)
471 		sctp_clog.x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue;
472 	else
473 		sctp_clog.x.wake.chunks_on_oque = 0xff;
474 
475 	sctp_clog.x.wake.sctpflags = 0;
476 	/* set in the defered mode stuff */
477 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
478 		sctp_clog.x.wake.sctpflags |= 1;
479 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
480 		sctp_clog.x.wake.sctpflags |= 2;
481 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
482 		sctp_clog.x.wake.sctpflags |= 4;
483 	/* what about the sb */
484 	if (stcb->sctp_socket) {
485 		struct socket *so = stcb->sctp_socket;
486 
487 		sctp_clog.x.wake.sbflags = (uint8_t) ((so->so_snd.sb_flags & 0x00ff));
488 	} else {
489 		sctp_clog.x.wake.sbflags = 0xff;
490 	}
491 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
492 	    SCTP_LOG_EVENT_WAKE,
493 	    from,
494 	    sctp_clog.x.misc.log1,
495 	    sctp_clog.x.misc.log2,
496 	    sctp_clog.x.misc.log3,
497 	    sctp_clog.x.misc.log4);
498 
499 }
500 
501 void
502 sctp_log_block(uint8_t from, struct socket *so, struct sctp_association *asoc, int sendlen)
503 {
504 	struct sctp_cwnd_log sctp_clog;
505 
506 	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
507 	sctp_clog.x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt);
508 	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
509 	sctp_clog.x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt;
510 	sctp_clog.x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue;
511 	sctp_clog.x.blk.flight_size = (uint16_t) (asoc->total_flight / 1024);
512 	sctp_clog.x.blk.sndlen = sendlen;
513 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
514 	    SCTP_LOG_EVENT_BLOCK,
515 	    from,
516 	    sctp_clog.x.misc.log1,
517 	    sctp_clog.x.misc.log2,
518 	    sctp_clog.x.misc.log3,
519 	    sctp_clog.x.misc.log4);
520 
521 }
522 
523 int
524 sctp_fill_stat_log(void *optval, size_t *optsize)
525 {
526 	/* May need to fix this if ktrdump does not work */
527 	return (0);
528 }
529 
530 #ifdef SCTP_AUDITING_ENABLED
531 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
532 static int sctp_audit_indx = 0;
533 
534 static
535 void
536 sctp_print_audit_report(void)
537 {
538 	int i;
539 	int cnt;
540 
541 	cnt = 0;
542 	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
543 		if ((sctp_audit_data[i][0] == 0xe0) &&
544 		    (sctp_audit_data[i][1] == 0x01)) {
545 			cnt = 0;
546 			SCTP_PRINTF("\n");
547 		} else if (sctp_audit_data[i][0] == 0xf0) {
548 			cnt = 0;
549 			SCTP_PRINTF("\n");
550 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
551 		    (sctp_audit_data[i][1] == 0x01)) {
552 			SCTP_PRINTF("\n");
553 			cnt = 0;
554 		}
555 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
556 		    (uint32_t) sctp_audit_data[i][1]);
557 		cnt++;
558 		if ((cnt % 14) == 0)
559 			SCTP_PRINTF("\n");
560 	}
561 	for (i = 0; i < sctp_audit_indx; i++) {
562 		if ((sctp_audit_data[i][0] == 0xe0) &&
563 		    (sctp_audit_data[i][1] == 0x01)) {
564 			cnt = 0;
565 			SCTP_PRINTF("\n");
566 		} else if (sctp_audit_data[i][0] == 0xf0) {
567 			cnt = 0;
568 			SCTP_PRINTF("\n");
569 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
570 		    (sctp_audit_data[i][1] == 0x01)) {
571 			SCTP_PRINTF("\n");
572 			cnt = 0;
573 		}
574 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
575 		    (uint32_t) sctp_audit_data[i][1]);
576 		cnt++;
577 		if ((cnt % 14) == 0)
578 			SCTP_PRINTF("\n");
579 	}
580 	SCTP_PRINTF("\n");
581 }
582 
583 void
584 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
585     struct sctp_nets *net)
586 {
587 	int resend_cnt, tot_out, rep, tot_book_cnt;
588 	struct sctp_nets *lnet;
589 	struct sctp_tmit_chunk *chk;
590 
591 	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
592 	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
593 	sctp_audit_indx++;
594 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
595 		sctp_audit_indx = 0;
596 	}
597 	if (inp == NULL) {
598 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
599 		sctp_audit_data[sctp_audit_indx][1] = 0x01;
600 		sctp_audit_indx++;
601 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
602 			sctp_audit_indx = 0;
603 		}
604 		return;
605 	}
606 	if (stcb == NULL) {
607 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
608 		sctp_audit_data[sctp_audit_indx][1] = 0x02;
609 		sctp_audit_indx++;
610 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
611 			sctp_audit_indx = 0;
612 		}
613 		return;
614 	}
615 	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
616 	sctp_audit_data[sctp_audit_indx][1] =
617 	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
618 	sctp_audit_indx++;
619 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
620 		sctp_audit_indx = 0;
621 	}
622 	rep = 0;
623 	tot_book_cnt = 0;
624 	resend_cnt = tot_out = 0;
625 	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
626 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
627 			resend_cnt++;
628 		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
629 			tot_out += chk->book_size;
630 			tot_book_cnt++;
631 		}
632 	}
633 	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
634 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
635 		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
636 		sctp_audit_indx++;
637 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
638 			sctp_audit_indx = 0;
639 		}
640 		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
641 		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
642 		rep = 1;
643 		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
644 		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
645 		sctp_audit_data[sctp_audit_indx][1] =
646 		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
647 		sctp_audit_indx++;
648 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
649 			sctp_audit_indx = 0;
650 		}
651 	}
652 	if (tot_out != stcb->asoc.total_flight) {
653 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
654 		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
655 		sctp_audit_indx++;
656 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
657 			sctp_audit_indx = 0;
658 		}
659 		rep = 1;
660 		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
661 		    (int)stcb->asoc.total_flight);
662 		stcb->asoc.total_flight = tot_out;
663 	}
664 	if (tot_book_cnt != stcb->asoc.total_flight_count) {
665 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
666 		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
667 		sctp_audit_indx++;
668 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
669 			sctp_audit_indx = 0;
670 		}
671 		rep = 1;
672 		SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt);
673 
674 		stcb->asoc.total_flight_count = tot_book_cnt;
675 	}
676 	tot_out = 0;
677 	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
678 		tot_out += lnet->flight_size;
679 	}
680 	if (tot_out != stcb->asoc.total_flight) {
681 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
682 		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
683 		sctp_audit_indx++;
684 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
685 			sctp_audit_indx = 0;
686 		}
687 		rep = 1;
688 		SCTP_PRINTF("real flight:%d net total was %d\n",
689 		    stcb->asoc.total_flight, tot_out);
690 		/* now corrective action */
691 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
692 
693 			tot_out = 0;
694 			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
695 				if ((chk->whoTo == lnet) &&
696 				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
697 					tot_out += chk->book_size;
698 				}
699 			}
700 			if (lnet->flight_size != tot_out) {
701 				SCTP_PRINTF("net:%p flight was %d corrected to %d\n",
702 				    lnet, lnet->flight_size,
703 				    tot_out);
704 				lnet->flight_size = tot_out;
705 			}
706 		}
707 	}
708 	if (rep) {
709 		sctp_print_audit_report();
710 	}
711 }
712 
713 void
714 sctp_audit_log(uint8_t ev, uint8_t fd)
715 {
716 
717 	sctp_audit_data[sctp_audit_indx][0] = ev;
718 	sctp_audit_data[sctp_audit_indx][1] = fd;
719 	sctp_audit_indx++;
720 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
721 		sctp_audit_indx = 0;
722 	}
723 }
724 
725 #endif
726 
727 /*
728  * a list of sizes based on typical mtu's, used only if next hop size not
729  * returned.
730  */
731 static int sctp_mtu_sizes[] = {
732 	68,
733 	296,
734 	508,
735 	512,
736 	544,
737 	576,
738 	1006,
739 	1492,
740 	1500,
741 	1536,
742 	2002,
743 	2048,
744 	4352,
745 	4464,
746 	8166,
747 	17914,
748 	32000,
749 	65535
750 };
751 
752 void
753 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
754 {
755 	struct sctp_association *asoc;
756 	struct sctp_nets *net;
757 
758 	asoc = &stcb->asoc;
759 
760 	(void)SCTP_OS_TIMER_STOP(&asoc->hb_timer.timer);
761 	(void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
762 	(void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
763 	(void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
764 	(void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
765 	(void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
766 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
767 		(void)SCTP_OS_TIMER_STOP(&net->fr_timer.timer);
768 		(void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
769 	}
770 }
771 
772 int
773 find_next_best_mtu(int totsz)
774 {
775 	int i, perfer;
776 
777 	/*
778 	 * if we are in here we must find the next best fit based on the
779 	 * size of the dg that failed to be sent.
780 	 */
781 	perfer = 0;
782 	for (i = 0; i < NUMBER_OF_MTU_SIZES; i++) {
783 		if (totsz < sctp_mtu_sizes[i]) {
784 			perfer = i - 1;
785 			if (perfer < 0)
786 				perfer = 0;
787 			break;
788 		}
789 	}
790 	return (sctp_mtu_sizes[perfer]);
791 }
792 
793 void
794 sctp_fill_random_store(struct sctp_pcb *m)
795 {
796 	/*
797 	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
798 	 * our counter. The result becomes our good random numbers and we
799 	 * then setup to give these out. Note that we do no locking to
800 	 * protect this. This is ok, since if competing folks call this we
801 	 * will get more gobbled gook in the random store which is what we
802 	 * want. There is a danger that two guys will use the same random
803 	 * numbers, but thats ok too since that is random as well :->
804 	 */
805 	m->store_at = 0;
806 	(void)sctp_hmac(SCTP_HMAC, (uint8_t *) m->random_numbers,
807 	    sizeof(m->random_numbers), (uint8_t *) & m->random_counter,
808 	    sizeof(m->random_counter), (uint8_t *) m->random_store);
809 	m->random_counter++;
810 }
811 
812 uint32_t
813 sctp_select_initial_TSN(struct sctp_pcb *inp)
814 {
815 	/*
816 	 * A true implementation should use random selection process to get
817 	 * the initial stream sequence number, using RFC1750 as a good
818 	 * guideline
819 	 */
820 	uint32_t x, *xp;
821 	uint8_t *p;
822 	int store_at, new_store;
823 
824 	if (inp->initial_sequence_debug != 0) {
825 		uint32_t ret;
826 
827 		ret = inp->initial_sequence_debug;
828 		inp->initial_sequence_debug++;
829 		return (ret);
830 	}
831 retry:
832 	store_at = inp->store_at;
833 	new_store = store_at + sizeof(uint32_t);
834 	if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
835 		new_store = 0;
836 	}
837 	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
838 		goto retry;
839 	}
840 	if (new_store == 0) {
841 		/* Refill the random store */
842 		sctp_fill_random_store(inp);
843 	}
844 	p = &inp->random_store[store_at];
845 	xp = (uint32_t *) p;
846 	x = *xp;
847 	return (x);
848 }
849 
850 uint32_t
851 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int save_in_twait)
852 {
853 	uint32_t x, not_done;
854 	struct timeval now;
855 
856 	(void)SCTP_GETTIME_TIMEVAL(&now);
857 	not_done = 1;
858 	while (not_done) {
859 		x = sctp_select_initial_TSN(&inp->sctp_ep);
860 		if (x == 0) {
861 			/* we never use 0 */
862 			continue;
863 		}
864 		if (sctp_is_vtag_good(inp, x, lport, rport, &now, save_in_twait)) {
865 			not_done = 0;
866 		}
867 	}
868 	return (x);
869 }
870 
871 int
872 sctp_init_asoc(struct sctp_inpcb *m, struct sctp_tcb *stcb,
873     uint32_t override_tag, uint32_t vrf_id)
874 {
875 	struct sctp_association *asoc;
876 
877 	/*
878 	 * Anything set to zero is taken care of by the allocation routine's
879 	 * bzero
880 	 */
881 
882 	/*
883 	 * Up front select what scoping to apply on addresses I tell my peer
884 	 * Not sure what to do with these right now, we will need to come up
885 	 * with a way to set them. We may need to pass them through from the
886 	 * caller in the sctp_aloc_assoc() function.
887 	 */
888 	int i;
889 
890 	asoc = &stcb->asoc;
891 	/* init all variables to a known value. */
892 	SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE);
893 	asoc->max_burst = m->sctp_ep.max_burst;
894 	asoc->heart_beat_delay = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
895 	asoc->cookie_life = m->sctp_ep.def_cookie_life;
896 	asoc->sctp_cmt_on_off = (uint8_t) SCTP_BASE_SYSCTL(sctp_cmt_on_off);
897 	/* EY Init nr_sack variable */
898 	asoc->sctp_nr_sack_on_off = (uint8_t) SCTP_BASE_SYSCTL(sctp_nr_sack_on_off);
899 	/* JRS 5/21/07 - Init CMT PF variables */
900 	asoc->sctp_cmt_pf = (uint8_t) SCTP_BASE_SYSCTL(sctp_cmt_pf);
901 	asoc->sctp_frag_point = m->sctp_frag_point;
902 #ifdef INET
903 	asoc->default_tos = m->ip_inp.inp.inp_ip_tos;
904 #else
905 	asoc->default_tos = 0;
906 #endif
907 
908 #ifdef INET6
909 	asoc->default_flowlabel = ((struct in6pcb *)m)->in6p_flowinfo;
910 #else
911 	asoc->default_flowlabel = 0;
912 #endif
913 	asoc->sb_send_resv = 0;
914 	if (override_tag) {
915 		asoc->my_vtag = override_tag;
916 	} else {
917 		asoc->my_vtag = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
918 	}
919 	/* Get the nonce tags */
920 	asoc->my_vtag_nonce = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
921 	asoc->peer_vtag_nonce = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
922 	asoc->vrf_id = vrf_id;
923 
924 	if (sctp_is_feature_on(m, SCTP_PCB_FLAGS_DONOT_HEARTBEAT))
925 		asoc->hb_is_disabled = 1;
926 	else
927 		asoc->hb_is_disabled = 0;
928 
929 #ifdef SCTP_ASOCLOG_OF_TSNS
930 	asoc->tsn_in_at = 0;
931 	asoc->tsn_out_at = 0;
932 	asoc->tsn_in_wrapped = 0;
933 	asoc->tsn_out_wrapped = 0;
934 	asoc->cumack_log_at = 0;
935 	asoc->cumack_log_atsnt = 0;
936 #endif
937 #ifdef SCTP_FS_SPEC_LOG
938 	asoc->fs_index = 0;
939 #endif
940 	asoc->refcnt = 0;
941 	asoc->assoc_up_sent = 0;
942 	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
943 	    sctp_select_initial_TSN(&m->sctp_ep);
944 	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
945 	/* we are optimisitic here */
946 	asoc->peer_supports_pktdrop = 1;
947 	asoc->peer_supports_nat = 0;
948 	asoc->sent_queue_retran_cnt = 0;
949 
950 	/* for CMT */
951 	asoc->last_net_cmt_send_started = NULL;
952 
953 	/* This will need to be adjusted */
954 	asoc->last_cwr_tsn = asoc->init_seq_number - 1;
955 	asoc->last_acked_seq = asoc->init_seq_number - 1;
956 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
957 	asoc->asconf_seq_in = asoc->last_acked_seq;
958 
959 	/* here we are different, we hold the next one we expect */
960 	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
961 
962 	asoc->initial_init_rto_max = m->sctp_ep.initial_init_rto_max;
963 	asoc->initial_rto = m->sctp_ep.initial_rto;
964 
965 	asoc->max_init_times = m->sctp_ep.max_init_times;
966 	asoc->max_send_times = m->sctp_ep.max_send_times;
967 	asoc->def_net_failure = m->sctp_ep.def_net_failure;
968 	asoc->free_chunk_cnt = 0;
969 
970 	asoc->iam_blocking = 0;
971 	/* ECN Nonce initialization */
972 	asoc->context = m->sctp_context;
973 	asoc->def_send = m->def_send;
974 	asoc->ecn_nonce_allowed = 0;
975 	asoc->receiver_nonce_sum = 1;
976 	asoc->nonce_sum_expect_base = 1;
977 	asoc->nonce_sum_check = 1;
978 	asoc->nonce_resync_tsn = 0;
979 	asoc->nonce_wait_for_ecne = 0;
980 	asoc->nonce_wait_tsn = 0;
981 	asoc->delayed_ack = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
982 	asoc->sack_freq = m->sctp_ep.sctp_sack_freq;
983 	asoc->pr_sctp_cnt = 0;
984 	asoc->total_output_queue_size = 0;
985 
986 	if (m->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
987 		struct in6pcb *inp6;
988 
989 		/* Its a V6 socket */
990 		inp6 = (struct in6pcb *)m;
991 		asoc->ipv6_addr_legal = 1;
992 		/* Now look at the binding flag to see if V4 will be legal */
993 		if (SCTP_IPV6_V6ONLY(inp6) == 0) {
994 			asoc->ipv4_addr_legal = 1;
995 		} else {
996 			/* V4 addresses are NOT legal on the association */
997 			asoc->ipv4_addr_legal = 0;
998 		}
999 	} else {
1000 		/* Its a V4 socket, no - V6 */
1001 		asoc->ipv4_addr_legal = 1;
1002 		asoc->ipv6_addr_legal = 0;
1003 	}
1004 
1005 	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(m->sctp_socket), SCTP_MINIMAL_RWND);
1006 	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(m->sctp_socket);
1007 
1008 	asoc->smallest_mtu = m->sctp_frag_point;
1009 #ifdef SCTP_PRINT_FOR_B_AND_M
1010 	SCTP_PRINTF("smallest_mtu init'd with asoc to :%d\n",
1011 	    asoc->smallest_mtu);
1012 #endif
1013 	asoc->minrto = m->sctp_ep.sctp_minrto;
1014 	asoc->maxrto = m->sctp_ep.sctp_maxrto;
1015 
1016 	asoc->locked_on_sending = NULL;
1017 	asoc->stream_locked_on = 0;
1018 	asoc->ecn_echo_cnt_onq = 0;
1019 	asoc->stream_locked = 0;
1020 
1021 	asoc->send_sack = 1;
1022 
1023 	LIST_INIT(&asoc->sctp_restricted_addrs);
1024 
1025 	TAILQ_INIT(&asoc->nets);
1026 	TAILQ_INIT(&asoc->pending_reply_queue);
1027 	TAILQ_INIT(&asoc->asconf_ack_sent);
1028 	/* Setup to fill the hb random cache at first HB */
1029 	asoc->hb_random_idx = 4;
1030 
1031 	asoc->sctp_autoclose_ticks = m->sctp_ep.auto_close_time;
1032 
1033 	/*
1034 	 * JRS - Pick the default congestion control module based on the
1035 	 * sysctl.
1036 	 */
1037 	switch (m->sctp_ep.sctp_default_cc_module) {
1038 		/* JRS - Standard TCP congestion control */
1039 	case SCTP_CC_RFC2581:
1040 		{
1041 			stcb->asoc.congestion_control_module = SCTP_CC_RFC2581;
1042 			stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param;
1043 			stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_cwnd_update_after_sack;
1044 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_cwnd_update_after_fr;
1045 			stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout;
1046 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo;
1047 			stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1048 			stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1049 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer;
1050 			break;
1051 		}
1052 		/* JRS - High Speed TCP congestion control (Floyd) */
1053 	case SCTP_CC_HSTCP:
1054 		{
1055 			stcb->asoc.congestion_control_module = SCTP_CC_HSTCP;
1056 			stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param;
1057 			stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_hs_cwnd_update_after_sack;
1058 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_hs_cwnd_update_after_fr;
1059 			stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout;
1060 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo;
1061 			stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1062 			stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1063 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer;
1064 			break;
1065 		}
1066 		/* JRS - HTCP congestion control */
1067 	case SCTP_CC_HTCP:
1068 		{
1069 			stcb->asoc.congestion_control_module = SCTP_CC_HTCP;
1070 			stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_htcp_set_initial_cc_param;
1071 			stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_htcp_cwnd_update_after_sack;
1072 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_htcp_cwnd_update_after_fr;
1073 			stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_htcp_cwnd_update_after_timeout;
1074 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_htcp_cwnd_update_after_ecn_echo;
1075 			stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1076 			stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1077 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_htcp_cwnd_update_after_fr_timer;
1078 			break;
1079 		}
1080 		/* JRS - By default, use RFC2581 */
1081 	default:
1082 		{
1083 			stcb->asoc.congestion_control_module = SCTP_CC_RFC2581;
1084 			stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param;
1085 			stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_cwnd_update_after_sack;
1086 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_cwnd_update_after_fr;
1087 			stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout;
1088 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo;
1089 			stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1090 			stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1091 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer;
1092 			break;
1093 		}
1094 	}
1095 
1096 	/*
1097 	 * Now the stream parameters, here we allocate space for all streams
1098 	 * that we request by default.
1099 	 */
1100 	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
1101 	    m->sctp_ep.pre_open_stream_count;
1102 	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1103 	    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1104 	    SCTP_M_STRMO);
1105 	if (asoc->strmout == NULL) {
1106 		/* big trouble no memory */
1107 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1108 		return (ENOMEM);
1109 	}
1110 	for (i = 0; i < asoc->streamoutcnt; i++) {
1111 		/*
1112 		 * inbound side must be set to 0xffff, also NOTE when we get
1113 		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1114 		 * count (streamoutcnt) but first check if we sent to any of
1115 		 * the upper streams that were dropped (if some were). Those
1116 		 * that were dropped must be notified to the upper layer as
1117 		 * failed to send.
1118 		 */
1119 		asoc->strmout[i].next_sequence_sent = 0x0;
1120 		TAILQ_INIT(&asoc->strmout[i].outqueue);
1121 		asoc->strmout[i].stream_no = i;
1122 		asoc->strmout[i].last_msg_incomplete = 0;
1123 		asoc->strmout[i].next_spoke.tqe_next = 0;
1124 		asoc->strmout[i].next_spoke.tqe_prev = 0;
1125 	}
1126 	/* Now the mapping array */
1127 	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1128 	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1129 	    SCTP_M_MAP);
1130 	if (asoc->mapping_array == NULL) {
1131 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1132 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1133 		return (ENOMEM);
1134 	}
1135 	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1136 	SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size,
1137 	    SCTP_M_MAP);
1138 	if (asoc->nr_mapping_array == NULL) {
1139 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1140 		SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1141 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1142 		return (ENOMEM);
1143 	}
1144 	memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
1145 
1146 	/* Now the init of the other outqueues */
1147 	TAILQ_INIT(&asoc->free_chunks);
1148 	TAILQ_INIT(&asoc->out_wheel);
1149 	TAILQ_INIT(&asoc->control_send_queue);
1150 	TAILQ_INIT(&asoc->asconf_send_queue);
1151 	TAILQ_INIT(&asoc->send_queue);
1152 	TAILQ_INIT(&asoc->sent_queue);
1153 	TAILQ_INIT(&asoc->reasmqueue);
1154 	TAILQ_INIT(&asoc->resetHead);
1155 	asoc->max_inbound_streams = m->sctp_ep.max_open_streams_intome;
1156 	TAILQ_INIT(&asoc->asconf_queue);
1157 	/* authentication fields */
1158 	asoc->authinfo.random = NULL;
1159 	asoc->authinfo.active_keyid = 0;
1160 	asoc->authinfo.assoc_key = NULL;
1161 	asoc->authinfo.assoc_keyid = 0;
1162 	asoc->authinfo.recv_key = NULL;
1163 	asoc->authinfo.recv_keyid = 0;
1164 	LIST_INIT(&asoc->shared_keys);
1165 	asoc->marked_retrans = 0;
1166 	asoc->timoinit = 0;
1167 	asoc->timodata = 0;
1168 	asoc->timosack = 0;
1169 	asoc->timoshutdown = 0;
1170 	asoc->timoheartbeat = 0;
1171 	asoc->timocookie = 0;
1172 	asoc->timoshutdownack = 0;
1173 	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1174 	asoc->discontinuity_time = asoc->start_time;
1175 	/*
1176 	 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1177 	 * freed later when the association is freed.
1178 	 */
1179 	return (0);
1180 }
1181 
1182 void
1183 sctp_print_mapping_array(struct sctp_association *asoc)
1184 {
1185 	unsigned int i, limit;
1186 
1187 	printf("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n",
1188 	    asoc->mapping_array_size,
1189 	    asoc->mapping_array_base_tsn,
1190 	    asoc->cumulative_tsn,
1191 	    asoc->highest_tsn_inside_map,
1192 	    asoc->highest_tsn_inside_nr_map);
1193 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1194 		if (asoc->mapping_array[limit - 1]) {
1195 			break;
1196 		}
1197 	}
1198 	printf("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1199 	for (i = 0; i < limit; i++) {
1200 		printf("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1201 		if (((i + 1) % 16) == 0)
1202 			printf("\n");
1203 	}
1204 	if (limit % 16)
1205 		printf("\n");
1206 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1207 		if (asoc->nr_mapping_array[limit - 1]) {
1208 			break;
1209 		}
1210 	}
1211 	printf("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1212 	for (i = 0; i < limit; i++) {
1213 		printf("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1214 	}
1215 	if (limit % 16)
1216 		printf("\n");
1217 }
1218 
1219 int
1220 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1221 {
1222 	/* mapping array needs to grow */
1223 	uint8_t *new_array1, *new_array2;
1224 	uint32_t new_size;
1225 
1226 	new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1227 	SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP);
1228 	SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP);
1229 	if ((new_array1 == NULL) || (new_array2 == NULL)) {
1230 		/* can't get more, forget it */
1231 		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size);
1232 		if (new_array1) {
1233 			SCTP_FREE(new_array1, SCTP_M_MAP);
1234 		}
1235 		if (new_array2) {
1236 			SCTP_FREE(new_array2, SCTP_M_MAP);
1237 		}
1238 		return (-1);
1239 	}
1240 	memset(new_array1, 0, new_size);
1241 	memset(new_array2, 0, new_size);
1242 	memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size);
1243 	memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size);
1244 	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1245 	SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1246 	asoc->mapping_array = new_array1;
1247 	asoc->nr_mapping_array = new_array2;
1248 	asoc->mapping_array_size = new_size;
1249 	return (0);
1250 }
1251 
1252 
1253 static void
1254 sctp_iterator_work(struct sctp_iterator *it)
1255 {
1256 	int iteration_count = 0;
1257 	int inp_skip = 0;
1258 	int first_in = 1;
1259 	struct sctp_inpcb *tinp;
1260 
1261 	SCTP_INP_INFO_RLOCK();
1262 	SCTP_ITERATOR_LOCK();
1263 	if (it->inp) {
1264 		SCTP_INP_RLOCK(it->inp);
1265 		SCTP_INP_DECR_REF(it->inp);
1266 	}
1267 	if (it->inp == NULL) {
1268 		/* iterator is complete */
1269 done_with_iterator:
1270 		SCTP_ITERATOR_UNLOCK();
1271 		SCTP_INP_INFO_RUNLOCK();
1272 		if (it->function_atend != NULL) {
1273 			(*it->function_atend) (it->pointer, it->val);
1274 		}
1275 		SCTP_FREE(it, SCTP_M_ITER);
1276 		return;
1277 	}
1278 select_a_new_ep:
1279 	if (first_in) {
1280 		first_in = 0;
1281 	} else {
1282 		SCTP_INP_RLOCK(it->inp);
1283 	}
1284 	while (((it->pcb_flags) &&
1285 	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1286 	    ((it->pcb_features) &&
1287 	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1288 		/* endpoint flags or features don't match, so keep looking */
1289 		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1290 			SCTP_INP_RUNLOCK(it->inp);
1291 			goto done_with_iterator;
1292 		}
1293 		tinp = it->inp;
1294 		it->inp = LIST_NEXT(it->inp, sctp_list);
1295 		SCTP_INP_RUNLOCK(tinp);
1296 		if (it->inp == NULL) {
1297 			goto done_with_iterator;
1298 		}
1299 		SCTP_INP_RLOCK(it->inp);
1300 	}
1301 	/* now go through each assoc which is in the desired state */
1302 	if (it->done_current_ep == 0) {
1303 		if (it->function_inp != NULL)
1304 			inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1305 		it->done_current_ep = 1;
1306 	}
1307 	if (it->stcb == NULL) {
1308 		/* run the per instance function */
1309 		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1310 	}
1311 	if ((inp_skip) || it->stcb == NULL) {
1312 		if (it->function_inp_end != NULL) {
1313 			inp_skip = (*it->function_inp_end) (it->inp,
1314 			    it->pointer,
1315 			    it->val);
1316 		}
1317 		SCTP_INP_RUNLOCK(it->inp);
1318 		goto no_stcb;
1319 	}
1320 	while (it->stcb) {
1321 		SCTP_TCB_LOCK(it->stcb);
1322 		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1323 			/* not in the right state... keep looking */
1324 			SCTP_TCB_UNLOCK(it->stcb);
1325 			goto next_assoc;
1326 		}
1327 		/* see if we have limited out the iterator loop */
1328 		iteration_count++;
1329 		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1330 			/* Pause to let others grab the lock */
1331 			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1332 			SCTP_TCB_UNLOCK(it->stcb);
1333 			SCTP_INP_INCR_REF(it->inp);
1334 			SCTP_INP_RUNLOCK(it->inp);
1335 			SCTP_ITERATOR_UNLOCK();
1336 			SCTP_INP_INFO_RUNLOCK();
1337 			SCTP_INP_INFO_RLOCK();
1338 			SCTP_ITERATOR_LOCK();
1339 			if (sctp_it_ctl.iterator_flags) {
1340 				/* We won't be staying here */
1341 				SCTP_INP_DECR_REF(it->inp);
1342 				atomic_add_int(&it->stcb->asoc.refcnt, -1);
1343 				if (sctp_it_ctl.iterator_flags &
1344 				    SCTP_ITERATOR_MUST_EXIT) {
1345 					goto done_with_iterator;
1346 				}
1347 				if (sctp_it_ctl.iterator_flags &
1348 				    SCTP_ITERATOR_STOP_CUR_IT) {
1349 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT;
1350 					goto done_with_iterator;
1351 				}
1352 				if (sctp_it_ctl.iterator_flags &
1353 				    SCTP_ITERATOR_STOP_CUR_INP) {
1354 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP;
1355 					goto no_stcb;
1356 				}
1357 				/* If we reach here huh? */
1358 				printf("Unknown it ctl flag %x\n",
1359 				    sctp_it_ctl.iterator_flags);
1360 				sctp_it_ctl.iterator_flags = 0;
1361 			}
1362 			SCTP_INP_RLOCK(it->inp);
1363 			SCTP_INP_DECR_REF(it->inp);
1364 			SCTP_TCB_LOCK(it->stcb);
1365 			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1366 			iteration_count = 0;
1367 		}
1368 		/* run function on this one */
1369 		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1370 
1371 		/*
1372 		 * we lie here, it really needs to have its own type but
1373 		 * first I must verify that this won't effect things :-0
1374 		 */
1375 		if (it->no_chunk_output == 0)
1376 			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1377 
1378 		SCTP_TCB_UNLOCK(it->stcb);
1379 next_assoc:
1380 		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1381 		if (it->stcb == NULL) {
1382 			/* Run last function */
1383 			if (it->function_inp_end != NULL) {
1384 				inp_skip = (*it->function_inp_end) (it->inp,
1385 				    it->pointer,
1386 				    it->val);
1387 			}
1388 		}
1389 	}
1390 	SCTP_INP_RUNLOCK(it->inp);
1391 no_stcb:
1392 	/* done with all assocs on this endpoint, move on to next endpoint */
1393 	it->done_current_ep = 0;
1394 	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1395 		it->inp = NULL;
1396 	} else {
1397 		it->inp = LIST_NEXT(it->inp, sctp_list);
1398 	}
1399 	if (it->inp == NULL) {
1400 		goto done_with_iterator;
1401 	}
1402 	goto select_a_new_ep;
1403 }
1404 
1405 void
1406 sctp_iterator_worker(void)
1407 {
1408 	struct sctp_iterator *it = NULL;
1409 
1410 	/* This function is called with the WQ lock in place */
1411 
1412 	sctp_it_ctl.iterator_running = 1;
1413 	sctp_it_ctl.cur_it = it = TAILQ_FIRST(&sctp_it_ctl.iteratorhead);
1414 	while (it) {
1415 		/* now lets work on this one */
1416 		TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
1417 		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1418 		CURVNET_SET(it->vn);
1419 		sctp_iterator_work(it);
1420 
1421 		CURVNET_RESTORE();
1422 		SCTP_IPI_ITERATOR_WQ_LOCK();
1423 		if (sctp_it_ctl.iterator_flags & SCTP_ITERATOR_MUST_EXIT) {
1424 			sctp_it_ctl.cur_it = NULL;
1425 			break;
1426 		}
1427 		/* sa_ignore FREED_MEMORY */
1428 		sctp_it_ctl.cur_it = it = TAILQ_FIRST(&sctp_it_ctl.iteratorhead);
1429 	}
1430 	sctp_it_ctl.iterator_running = 0;
1431 	return;
1432 }
1433 
1434 
1435 static void
1436 sctp_handle_addr_wq(void)
1437 {
1438 	/* deal with the ADDR wq from the rtsock calls */
1439 	struct sctp_laddr *wi;
1440 	struct sctp_asconf_iterator *asc;
1441 
1442 	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1443 	    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1444 	if (asc == NULL) {
1445 		/* Try later, no memory */
1446 		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1447 		    (struct sctp_inpcb *)NULL,
1448 		    (struct sctp_tcb *)NULL,
1449 		    (struct sctp_nets *)NULL);
1450 		return;
1451 	}
1452 	LIST_INIT(&asc->list_of_work);
1453 	asc->cnt = 0;
1454 
1455 	SCTP_WQ_ADDR_LOCK();
1456 	wi = LIST_FIRST(&SCTP_BASE_INFO(addr_wq));
1457 	while (wi != NULL) {
1458 		LIST_REMOVE(wi, sctp_nxt_addr);
1459 		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1460 		asc->cnt++;
1461 		wi = LIST_FIRST(&SCTP_BASE_INFO(addr_wq));
1462 	}
1463 	SCTP_WQ_ADDR_UNLOCK();
1464 
1465 	if (asc->cnt == 0) {
1466 		SCTP_FREE(asc, SCTP_M_ASC_IT);
1467 	} else {
1468 		(void)sctp_initiate_iterator(sctp_asconf_iterator_ep,
1469 		    sctp_asconf_iterator_stcb,
1470 		    NULL,	/* No ep end for boundall */
1471 		    SCTP_PCB_FLAGS_BOUNDALL,
1472 		    SCTP_PCB_ANY_FEATURES,
1473 		    SCTP_ASOC_ANY_STATE,
1474 		    (void *)asc, 0,
1475 		    sctp_asconf_iterator_end, NULL, 0);
1476 	}
1477 }
1478 
1479 int retcode = 0;
1480 int cur_oerr = 0;
1481 
1482 void
1483 sctp_timeout_handler(void *t)
1484 {
1485 	struct sctp_inpcb *inp;
1486 	struct sctp_tcb *stcb;
1487 	struct sctp_nets *net;
1488 	struct sctp_timer *tmr;
1489 
1490 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1491 	struct socket *so;
1492 
1493 #endif
1494 	int did_output, type;
1495 
1496 	tmr = (struct sctp_timer *)t;
1497 	inp = (struct sctp_inpcb *)tmr->ep;
1498 	stcb = (struct sctp_tcb *)tmr->tcb;
1499 	net = (struct sctp_nets *)tmr->net;
1500 	CURVNET_SET((struct vnet *)tmr->vnet);
1501 	did_output = 1;
1502 
1503 #ifdef SCTP_AUDITING_ENABLED
1504 	sctp_audit_log(0xF0, (uint8_t) tmr->type);
1505 	sctp_auditing(3, inp, stcb, net);
1506 #endif
1507 
1508 	/* sanity checks... */
1509 	if (tmr->self != (void *)tmr) {
1510 		/*
1511 		 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1512 		 * tmr);
1513 		 */
1514 		CURVNET_RESTORE();
1515 		return;
1516 	}
1517 	tmr->stopped_from = 0xa001;
1518 	if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1519 		/*
1520 		 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1521 		 * tmr->type);
1522 		 */
1523 		CURVNET_RESTORE();
1524 		return;
1525 	}
1526 	tmr->stopped_from = 0xa002;
1527 	if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1528 		CURVNET_RESTORE();
1529 		return;
1530 	}
1531 	/* if this is an iterator timeout, get the struct and clear inp */
1532 	tmr->stopped_from = 0xa003;
1533 	type = tmr->type;
1534 	if (inp) {
1535 		SCTP_INP_INCR_REF(inp);
1536 		if ((inp->sctp_socket == 0) &&
1537 		    ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1538 		    (tmr->type != SCTP_TIMER_TYPE_INIT) &&
1539 		    (tmr->type != SCTP_TIMER_TYPE_SEND) &&
1540 		    (tmr->type != SCTP_TIMER_TYPE_RECV) &&
1541 		    (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) &&
1542 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1543 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1544 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1545 		    (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))
1546 		    ) {
1547 			SCTP_INP_DECR_REF(inp);
1548 			CURVNET_RESTORE();
1549 			return;
1550 		}
1551 	}
1552 	tmr->stopped_from = 0xa004;
1553 	if (stcb) {
1554 		atomic_add_int(&stcb->asoc.refcnt, 1);
1555 		if (stcb->asoc.state == 0) {
1556 			atomic_add_int(&stcb->asoc.refcnt, -1);
1557 			if (inp) {
1558 				SCTP_INP_DECR_REF(inp);
1559 			}
1560 			CURVNET_RESTORE();
1561 			return;
1562 		}
1563 	}
1564 	tmr->stopped_from = 0xa005;
1565 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", tmr->type);
1566 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1567 		if (inp) {
1568 			SCTP_INP_DECR_REF(inp);
1569 		}
1570 		if (stcb) {
1571 			atomic_add_int(&stcb->asoc.refcnt, -1);
1572 		}
1573 		CURVNET_RESTORE();
1574 		return;
1575 	}
1576 	tmr->stopped_from = 0xa006;
1577 
1578 	if (stcb) {
1579 		SCTP_TCB_LOCK(stcb);
1580 		atomic_add_int(&stcb->asoc.refcnt, -1);
1581 		if ((tmr->type != SCTP_TIMER_TYPE_ASOCKILL) &&
1582 		    ((stcb->asoc.state == 0) ||
1583 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1584 			SCTP_TCB_UNLOCK(stcb);
1585 			if (inp) {
1586 				SCTP_INP_DECR_REF(inp);
1587 			}
1588 			CURVNET_RESTORE();
1589 			return;
1590 		}
1591 	}
1592 	/* record in stopped what t-o occured */
1593 	tmr->stopped_from = tmr->type;
1594 
1595 	/* mark as being serviced now */
1596 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1597 		/*
1598 		 * Callout has been rescheduled.
1599 		 */
1600 		goto get_out;
1601 	}
1602 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1603 		/*
1604 		 * Not active, so no action.
1605 		 */
1606 		goto get_out;
1607 	}
1608 	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1609 
1610 	/* call the handler for the appropriate timer type */
1611 	switch (tmr->type) {
1612 	case SCTP_TIMER_TYPE_ZERO_COPY:
1613 		if (inp == NULL) {
1614 			break;
1615 		}
1616 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1617 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
1618 		}
1619 		break;
1620 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1621 		if (inp == NULL) {
1622 			break;
1623 		}
1624 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1625 			SCTP_ZERO_COPY_SENDQ_EVENT(inp, inp->sctp_socket);
1626 		}
1627 		break;
1628 	case SCTP_TIMER_TYPE_ADDR_WQ:
1629 		sctp_handle_addr_wq();
1630 		break;
1631 	case SCTP_TIMER_TYPE_SEND:
1632 		if ((stcb == NULL) || (inp == NULL)) {
1633 			break;
1634 		}
1635 		SCTP_STAT_INCR(sctps_timodata);
1636 		stcb->asoc.timodata++;
1637 		stcb->asoc.num_send_timers_up--;
1638 		if (stcb->asoc.num_send_timers_up < 0) {
1639 			stcb->asoc.num_send_timers_up = 0;
1640 		}
1641 		SCTP_TCB_LOCK_ASSERT(stcb);
1642 		cur_oerr = stcb->asoc.overall_error_count;
1643 		retcode = sctp_t3rxt_timer(inp, stcb, net);
1644 		if (retcode) {
1645 			/* no need to unlock on tcb its gone */
1646 
1647 			goto out_decr;
1648 		}
1649 		SCTP_TCB_LOCK_ASSERT(stcb);
1650 #ifdef SCTP_AUDITING_ENABLED
1651 		sctp_auditing(4, inp, stcb, net);
1652 #endif
1653 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1654 		if ((stcb->asoc.num_send_timers_up == 0) &&
1655 		    (stcb->asoc.sent_queue_cnt > 0)
1656 		    ) {
1657 			struct sctp_tmit_chunk *chk;
1658 
1659 			/*
1660 			 * safeguard. If there on some on the sent queue
1661 			 * somewhere but no timers running something is
1662 			 * wrong... so we start a timer on the first chunk
1663 			 * on the send queue on whatever net it is sent to.
1664 			 */
1665 			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1666 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1667 			    chk->whoTo);
1668 		}
1669 		break;
1670 	case SCTP_TIMER_TYPE_INIT:
1671 		if ((stcb == NULL) || (inp == NULL)) {
1672 			break;
1673 		}
1674 		SCTP_STAT_INCR(sctps_timoinit);
1675 		stcb->asoc.timoinit++;
1676 		if (sctp_t1init_timer(inp, stcb, net)) {
1677 			/* no need to unlock on tcb its gone */
1678 			goto out_decr;
1679 		}
1680 		/* We do output but not here */
1681 		did_output = 0;
1682 		break;
1683 	case SCTP_TIMER_TYPE_RECV:
1684 		if ((stcb == NULL) || (inp == NULL)) {
1685 			break;
1686 		} {
1687 			SCTP_STAT_INCR(sctps_timosack);
1688 			stcb->asoc.timosack++;
1689 			sctp_send_sack(stcb);
1690 		}
1691 #ifdef SCTP_AUDITING_ENABLED
1692 		sctp_auditing(4, inp, stcb, net);
1693 #endif
1694 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1695 		break;
1696 	case SCTP_TIMER_TYPE_SHUTDOWN:
1697 		if ((stcb == NULL) || (inp == NULL)) {
1698 			break;
1699 		}
1700 		if (sctp_shutdown_timer(inp, stcb, net)) {
1701 			/* no need to unlock on tcb its gone */
1702 			goto out_decr;
1703 		}
1704 		SCTP_STAT_INCR(sctps_timoshutdown);
1705 		stcb->asoc.timoshutdown++;
1706 #ifdef SCTP_AUDITING_ENABLED
1707 		sctp_auditing(4, inp, stcb, net);
1708 #endif
1709 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1710 		break;
1711 	case SCTP_TIMER_TYPE_HEARTBEAT:
1712 		{
1713 			struct sctp_nets *lnet;
1714 			int cnt_of_unconf = 0;
1715 
1716 			if ((stcb == NULL) || (inp == NULL)) {
1717 				break;
1718 			}
1719 			SCTP_STAT_INCR(sctps_timoheartbeat);
1720 			stcb->asoc.timoheartbeat++;
1721 			TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
1722 				if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1723 				    (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
1724 					cnt_of_unconf++;
1725 				}
1726 			}
1727 			if (cnt_of_unconf == 0) {
1728 				if (sctp_heartbeat_timer(inp, stcb, lnet,
1729 				    cnt_of_unconf)) {
1730 					/* no need to unlock on tcb its gone */
1731 					goto out_decr;
1732 				}
1733 			}
1734 #ifdef SCTP_AUDITING_ENABLED
1735 			sctp_auditing(4, inp, stcb, lnet);
1736 #endif
1737 			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT,
1738 			    stcb->sctp_ep, stcb, lnet);
1739 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1740 		}
1741 		break;
1742 	case SCTP_TIMER_TYPE_COOKIE:
1743 		if ((stcb == NULL) || (inp == NULL)) {
1744 			break;
1745 		}
1746 		if (sctp_cookie_timer(inp, stcb, net)) {
1747 			/* no need to unlock on tcb its gone */
1748 			goto out_decr;
1749 		}
1750 		SCTP_STAT_INCR(sctps_timocookie);
1751 		stcb->asoc.timocookie++;
1752 #ifdef SCTP_AUDITING_ENABLED
1753 		sctp_auditing(4, inp, stcb, net);
1754 #endif
1755 		/*
1756 		 * We consider T3 and Cookie timer pretty much the same with
1757 		 * respect to where from in chunk_output.
1758 		 */
1759 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1760 		break;
1761 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1762 		{
1763 			struct timeval tv;
1764 			int i, secret;
1765 
1766 			if (inp == NULL) {
1767 				break;
1768 			}
1769 			SCTP_STAT_INCR(sctps_timosecret);
1770 			(void)SCTP_GETTIME_TIMEVAL(&tv);
1771 			SCTP_INP_WLOCK(inp);
1772 			inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1773 			inp->sctp_ep.last_secret_number =
1774 			    inp->sctp_ep.current_secret_number;
1775 			inp->sctp_ep.current_secret_number++;
1776 			if (inp->sctp_ep.current_secret_number >=
1777 			    SCTP_HOW_MANY_SECRETS) {
1778 				inp->sctp_ep.current_secret_number = 0;
1779 			}
1780 			secret = (int)inp->sctp_ep.current_secret_number;
1781 			for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1782 				inp->sctp_ep.secret_key[secret][i] =
1783 				    sctp_select_initial_TSN(&inp->sctp_ep);
1784 			}
1785 			SCTP_INP_WUNLOCK(inp);
1786 			sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1787 		}
1788 		did_output = 0;
1789 		break;
1790 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1791 		if ((stcb == NULL) || (inp == NULL)) {
1792 			break;
1793 		}
1794 		SCTP_STAT_INCR(sctps_timopathmtu);
1795 		sctp_pathmtu_timer(inp, stcb, net);
1796 		did_output = 0;
1797 		break;
1798 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1799 		if ((stcb == NULL) || (inp == NULL)) {
1800 			break;
1801 		}
1802 		if (sctp_shutdownack_timer(inp, stcb, net)) {
1803 			/* no need to unlock on tcb its gone */
1804 			goto out_decr;
1805 		}
1806 		SCTP_STAT_INCR(sctps_timoshutdownack);
1807 		stcb->asoc.timoshutdownack++;
1808 #ifdef SCTP_AUDITING_ENABLED
1809 		sctp_auditing(4, inp, stcb, net);
1810 #endif
1811 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1812 		break;
1813 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1814 		if ((stcb == NULL) || (inp == NULL)) {
1815 			break;
1816 		}
1817 		SCTP_STAT_INCR(sctps_timoshutdownguard);
1818 		sctp_abort_an_association(inp, stcb,
1819 		    SCTP_SHUTDOWN_GUARD_EXPIRES, NULL, SCTP_SO_NOT_LOCKED);
1820 		/* no need to unlock on tcb its gone */
1821 		goto out_decr;
1822 
1823 	case SCTP_TIMER_TYPE_STRRESET:
1824 		if ((stcb == NULL) || (inp == NULL)) {
1825 			break;
1826 		}
1827 		if (sctp_strreset_timer(inp, stcb, net)) {
1828 			/* no need to unlock on tcb its gone */
1829 			goto out_decr;
1830 		}
1831 		SCTP_STAT_INCR(sctps_timostrmrst);
1832 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1833 		break;
1834 	case SCTP_TIMER_TYPE_EARLYFR:
1835 		/* Need to do FR of things for net */
1836 		if ((stcb == NULL) || (inp == NULL)) {
1837 			break;
1838 		}
1839 		SCTP_STAT_INCR(sctps_timoearlyfr);
1840 		sctp_early_fr_timer(inp, stcb, net);
1841 		break;
1842 	case SCTP_TIMER_TYPE_ASCONF:
1843 		if ((stcb == NULL) || (inp == NULL)) {
1844 			break;
1845 		}
1846 		if (sctp_asconf_timer(inp, stcb, net)) {
1847 			/* no need to unlock on tcb its gone */
1848 			goto out_decr;
1849 		}
1850 		SCTP_STAT_INCR(sctps_timoasconf);
1851 #ifdef SCTP_AUDITING_ENABLED
1852 		sctp_auditing(4, inp, stcb, net);
1853 #endif
1854 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1855 		break;
1856 	case SCTP_TIMER_TYPE_PRIM_DELETED:
1857 		if ((stcb == NULL) || (inp == NULL)) {
1858 			break;
1859 		}
1860 		sctp_delete_prim_timer(inp, stcb, net);
1861 		SCTP_STAT_INCR(sctps_timodelprim);
1862 		break;
1863 
1864 	case SCTP_TIMER_TYPE_AUTOCLOSE:
1865 		if ((stcb == NULL) || (inp == NULL)) {
1866 			break;
1867 		}
1868 		SCTP_STAT_INCR(sctps_timoautoclose);
1869 		sctp_autoclose_timer(inp, stcb, net);
1870 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1871 		did_output = 0;
1872 		break;
1873 	case SCTP_TIMER_TYPE_ASOCKILL:
1874 		if ((stcb == NULL) || (inp == NULL)) {
1875 			break;
1876 		}
1877 		SCTP_STAT_INCR(sctps_timoassockill);
1878 		/* Can we free it yet? */
1879 		SCTP_INP_DECR_REF(inp);
1880 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1881 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1882 		so = SCTP_INP_SO(inp);
1883 		atomic_add_int(&stcb->asoc.refcnt, 1);
1884 		SCTP_TCB_UNLOCK(stcb);
1885 		SCTP_SOCKET_LOCK(so, 1);
1886 		SCTP_TCB_LOCK(stcb);
1887 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1888 #endif
1889 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1890 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1891 		SCTP_SOCKET_UNLOCK(so, 1);
1892 #endif
1893 		/*
1894 		 * free asoc, always unlocks (or destroy's) so prevent
1895 		 * duplicate unlock or unlock of a free mtx :-0
1896 		 */
1897 		stcb = NULL;
1898 		goto out_no_decr;
1899 	case SCTP_TIMER_TYPE_INPKILL:
1900 		SCTP_STAT_INCR(sctps_timoinpkill);
1901 		if (inp == NULL) {
1902 			break;
1903 		}
1904 		/*
1905 		 * special case, take away our increment since WE are the
1906 		 * killer
1907 		 */
1908 		SCTP_INP_DECR_REF(inp);
1909 		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1910 		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1911 		    SCTP_CALLED_FROM_INPKILL_TIMER);
1912 		inp = NULL;
1913 		goto out_no_decr;
1914 	default:
1915 		SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
1916 		    tmr->type);
1917 		break;
1918 	};
1919 #ifdef SCTP_AUDITING_ENABLED
1920 	sctp_audit_log(0xF1, (uint8_t) tmr->type);
1921 	if (inp)
1922 		sctp_auditing(5, inp, stcb, net);
1923 #endif
1924 	if ((did_output) && stcb) {
1925 		/*
1926 		 * Now we need to clean up the control chunk chain if an
1927 		 * ECNE is on it. It must be marked as UNSENT again so next
1928 		 * call will continue to send it until such time that we get
1929 		 * a CWR, to remove it. It is, however, less likely that we
1930 		 * will find a ecn echo on the chain though.
1931 		 */
1932 		sctp_fix_ecn_echo(&stcb->asoc);
1933 	}
1934 get_out:
1935 	if (stcb) {
1936 		SCTP_TCB_UNLOCK(stcb);
1937 	}
1938 out_decr:
1939 	if (inp) {
1940 		SCTP_INP_DECR_REF(inp);
1941 	}
1942 out_no_decr:
1943 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type %d)\n",
1944 	    type);
1945 	CURVNET_RESTORE();
1946 }
1947 
1948 void
1949 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1950     struct sctp_nets *net)
1951 {
1952 	int to_ticks;
1953 	struct sctp_timer *tmr;
1954 
1955 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
1956 		return;
1957 
1958 	to_ticks = 0;
1959 
1960 	tmr = NULL;
1961 	if (stcb) {
1962 		SCTP_TCB_LOCK_ASSERT(stcb);
1963 	}
1964 	switch (t_type) {
1965 	case SCTP_TIMER_TYPE_ZERO_COPY:
1966 		tmr = &inp->sctp_ep.zero_copy_timer;
1967 		to_ticks = SCTP_ZERO_COPY_TICK_DELAY;
1968 		break;
1969 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1970 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
1971 		to_ticks = SCTP_ZERO_COPY_SENDQ_TICK_DELAY;
1972 		break;
1973 	case SCTP_TIMER_TYPE_ADDR_WQ:
1974 		/* Only 1 tick away :-) */
1975 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
1976 		to_ticks = SCTP_ADDRESS_TICK_DELAY;
1977 		break;
1978 	case SCTP_TIMER_TYPE_SEND:
1979 		/* Here we use the RTO timer */
1980 		{
1981 			int rto_val;
1982 
1983 			if ((stcb == NULL) || (net == NULL)) {
1984 				return;
1985 			}
1986 			tmr = &net->rxt_timer;
1987 			if (net->RTO == 0) {
1988 				rto_val = stcb->asoc.initial_rto;
1989 			} else {
1990 				rto_val = net->RTO;
1991 			}
1992 			to_ticks = MSEC_TO_TICKS(rto_val);
1993 		}
1994 		break;
1995 	case SCTP_TIMER_TYPE_INIT:
1996 		/*
1997 		 * Here we use the INIT timer default usually about 1
1998 		 * minute.
1999 		 */
2000 		if ((stcb == NULL) || (net == NULL)) {
2001 			return;
2002 		}
2003 		tmr = &net->rxt_timer;
2004 		if (net->RTO == 0) {
2005 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2006 		} else {
2007 			to_ticks = MSEC_TO_TICKS(net->RTO);
2008 		}
2009 		break;
2010 	case SCTP_TIMER_TYPE_RECV:
2011 		/*
2012 		 * Here we use the Delayed-Ack timer value from the inp
2013 		 * ususually about 200ms.
2014 		 */
2015 		if (stcb == NULL) {
2016 			return;
2017 		}
2018 		tmr = &stcb->asoc.dack_timer;
2019 		to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
2020 		break;
2021 	case SCTP_TIMER_TYPE_SHUTDOWN:
2022 		/* Here we use the RTO of the destination. */
2023 		if ((stcb == NULL) || (net == NULL)) {
2024 			return;
2025 		}
2026 		if (net->RTO == 0) {
2027 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2028 		} else {
2029 			to_ticks = MSEC_TO_TICKS(net->RTO);
2030 		}
2031 		tmr = &net->rxt_timer;
2032 		break;
2033 	case SCTP_TIMER_TYPE_HEARTBEAT:
2034 		/*
2035 		 * the net is used here so that we can add in the RTO. Even
2036 		 * though we use a different timer. We also add the HB timer
2037 		 * PLUS a random jitter.
2038 		 */
2039 		if ((inp == NULL) || (stcb == NULL)) {
2040 			return;
2041 		} else {
2042 			uint32_t rndval;
2043 			uint8_t this_random;
2044 			int cnt_of_unconf = 0;
2045 			struct sctp_nets *lnet;
2046 
2047 			TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
2048 				if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2049 				    (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
2050 					cnt_of_unconf++;
2051 				}
2052 			}
2053 			if (cnt_of_unconf) {
2054 				net = lnet = NULL;
2055 				(void)sctp_heartbeat_timer(inp, stcb, lnet, cnt_of_unconf);
2056 			}
2057 			if (stcb->asoc.hb_random_idx > 3) {
2058 				rndval = sctp_select_initial_TSN(&inp->sctp_ep);
2059 				memcpy(stcb->asoc.hb_random_values, &rndval,
2060 				    sizeof(stcb->asoc.hb_random_values));
2061 				stcb->asoc.hb_random_idx = 0;
2062 			}
2063 			this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx];
2064 			stcb->asoc.hb_random_idx++;
2065 			stcb->asoc.hb_ect_randombit = 0;
2066 			/*
2067 			 * this_random will be 0 - 256 ms RTO is in ms.
2068 			 */
2069 			if ((stcb->asoc.hb_is_disabled) &&
2070 			    (cnt_of_unconf == 0)) {
2071 				return;
2072 			}
2073 			if (net) {
2074 				int delay;
2075 
2076 				delay = stcb->asoc.heart_beat_delay;
2077 				TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
2078 					if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2079 					    ((lnet->dest_state & SCTP_ADDR_OUT_OF_SCOPE) == 0) &&
2080 					    (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
2081 						delay = 0;
2082 					}
2083 				}
2084 				if (net->RTO == 0) {
2085 					/* Never been checked */
2086 					to_ticks = this_random + stcb->asoc.initial_rto + delay;
2087 				} else {
2088 					/* set rto_val to the ms */
2089 					to_ticks = delay + net->RTO + this_random;
2090 				}
2091 			} else {
2092 				if (cnt_of_unconf) {
2093 					to_ticks = this_random + stcb->asoc.initial_rto;
2094 				} else {
2095 					to_ticks = stcb->asoc.heart_beat_delay + this_random + stcb->asoc.initial_rto;
2096 				}
2097 			}
2098 			/*
2099 			 * Now we must convert the to_ticks that are now in
2100 			 * ms to ticks.
2101 			 */
2102 			to_ticks = MSEC_TO_TICKS(to_ticks);
2103 			tmr = &stcb->asoc.hb_timer;
2104 		}
2105 		break;
2106 	case SCTP_TIMER_TYPE_COOKIE:
2107 		/*
2108 		 * Here we can use the RTO timer from the network since one
2109 		 * RTT was compelete. If a retran happened then we will be
2110 		 * using the RTO initial value.
2111 		 */
2112 		if ((stcb == NULL) || (net == NULL)) {
2113 			return;
2114 		}
2115 		if (net->RTO == 0) {
2116 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2117 		} else {
2118 			to_ticks = MSEC_TO_TICKS(net->RTO);
2119 		}
2120 		tmr = &net->rxt_timer;
2121 		break;
2122 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2123 		/*
2124 		 * nothing needed but the endpoint here ususually about 60
2125 		 * minutes.
2126 		 */
2127 		if (inp == NULL) {
2128 			return;
2129 		}
2130 		tmr = &inp->sctp_ep.signature_change;
2131 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
2132 		break;
2133 	case SCTP_TIMER_TYPE_ASOCKILL:
2134 		if (stcb == NULL) {
2135 			return;
2136 		}
2137 		tmr = &stcb->asoc.strreset_timer;
2138 		to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
2139 		break;
2140 	case SCTP_TIMER_TYPE_INPKILL:
2141 		/*
2142 		 * The inp is setup to die. We re-use the signature_chage
2143 		 * timer since that has stopped and we are in the GONE
2144 		 * state.
2145 		 */
2146 		if (inp == NULL) {
2147 			return;
2148 		}
2149 		tmr = &inp->sctp_ep.signature_change;
2150 		to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2151 		break;
2152 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2153 		/*
2154 		 * Here we use the value found in the EP for PMTU ususually
2155 		 * about 10 minutes.
2156 		 */
2157 		if ((stcb == NULL) || (inp == NULL)) {
2158 			return;
2159 		}
2160 		if (net == NULL) {
2161 			return;
2162 		}
2163 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2164 		tmr = &net->pmtu_timer;
2165 		break;
2166 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2167 		/* Here we use the RTO of the destination */
2168 		if ((stcb == NULL) || (net == NULL)) {
2169 			return;
2170 		}
2171 		if (net->RTO == 0) {
2172 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2173 		} else {
2174 			to_ticks = MSEC_TO_TICKS(net->RTO);
2175 		}
2176 		tmr = &net->rxt_timer;
2177 		break;
2178 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2179 		/*
2180 		 * Here we use the endpoints shutdown guard timer usually
2181 		 * about 3 minutes.
2182 		 */
2183 		if ((inp == NULL) || (stcb == NULL)) {
2184 			return;
2185 		}
2186 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2187 		tmr = &stcb->asoc.shut_guard_timer;
2188 		break;
2189 	case SCTP_TIMER_TYPE_STRRESET:
2190 		/*
2191 		 * Here the timer comes from the stcb but its value is from
2192 		 * the net's RTO.
2193 		 */
2194 		if ((stcb == NULL) || (net == NULL)) {
2195 			return;
2196 		}
2197 		if (net->RTO == 0) {
2198 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2199 		} else {
2200 			to_ticks = MSEC_TO_TICKS(net->RTO);
2201 		}
2202 		tmr = &stcb->asoc.strreset_timer;
2203 		break;
2204 
2205 	case SCTP_TIMER_TYPE_EARLYFR:
2206 		{
2207 			unsigned int msec;
2208 
2209 			if ((stcb == NULL) || (net == NULL)) {
2210 				return;
2211 			}
2212 			if (net->flight_size > net->cwnd) {
2213 				/* no need to start */
2214 				return;
2215 			}
2216 			SCTP_STAT_INCR(sctps_earlyfrstart);
2217 			if (net->lastsa == 0) {
2218 				/* Hmm no rtt estimate yet? */
2219 				msec = stcb->asoc.initial_rto >> 2;
2220 			} else {
2221 				msec = ((net->lastsa >> 2) + net->lastsv) >> 1;
2222 			}
2223 			if (msec < SCTP_BASE_SYSCTL(sctp_early_fr_msec)) {
2224 				msec = SCTP_BASE_SYSCTL(sctp_early_fr_msec);
2225 				if (msec < SCTP_MINFR_MSEC_FLOOR) {
2226 					msec = SCTP_MINFR_MSEC_FLOOR;
2227 				}
2228 			}
2229 			to_ticks = MSEC_TO_TICKS(msec);
2230 			tmr = &net->fr_timer;
2231 		}
2232 		break;
2233 	case SCTP_TIMER_TYPE_ASCONF:
2234 		/*
2235 		 * Here the timer comes from the stcb but its value is from
2236 		 * the net's RTO.
2237 		 */
2238 		if ((stcb == NULL) || (net == NULL)) {
2239 			return;
2240 		}
2241 		if (net->RTO == 0) {
2242 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2243 		} else {
2244 			to_ticks = MSEC_TO_TICKS(net->RTO);
2245 		}
2246 		tmr = &stcb->asoc.asconf_timer;
2247 		break;
2248 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2249 		if ((stcb == NULL) || (net != NULL)) {
2250 			return;
2251 		}
2252 		to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2253 		tmr = &stcb->asoc.delete_prim_timer;
2254 		break;
2255 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2256 		if (stcb == NULL) {
2257 			return;
2258 		}
2259 		if (stcb->asoc.sctp_autoclose_ticks == 0) {
2260 			/*
2261 			 * Really an error since stcb is NOT set to
2262 			 * autoclose
2263 			 */
2264 			return;
2265 		}
2266 		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2267 		tmr = &stcb->asoc.autoclose_timer;
2268 		break;
2269 	default:
2270 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2271 		    __FUNCTION__, t_type);
2272 		return;
2273 		break;
2274 	};
2275 	if ((to_ticks <= 0) || (tmr == NULL)) {
2276 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2277 		    __FUNCTION__, t_type, to_ticks, tmr);
2278 		return;
2279 	}
2280 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2281 		/*
2282 		 * we do NOT allow you to have it already running. if it is
2283 		 * we leave the current one up unchanged
2284 		 */
2285 		return;
2286 	}
2287 	/* At this point we can proceed */
2288 	if (t_type == SCTP_TIMER_TYPE_SEND) {
2289 		stcb->asoc.num_send_timers_up++;
2290 	}
2291 	tmr->stopped_from = 0;
2292 	tmr->type = t_type;
2293 	tmr->ep = (void *)inp;
2294 	tmr->tcb = (void *)stcb;
2295 	tmr->net = (void *)net;
2296 	tmr->self = (void *)tmr;
2297 	tmr->vnet = (void *)curvnet;
2298 	tmr->ticks = sctp_get_tick_count();
2299 	(void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2300 	return;
2301 }
2302 
2303 void
2304 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2305     struct sctp_nets *net, uint32_t from)
2306 {
2307 	struct sctp_timer *tmr;
2308 
2309 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2310 	    (inp == NULL))
2311 		return;
2312 
2313 	tmr = NULL;
2314 	if (stcb) {
2315 		SCTP_TCB_LOCK_ASSERT(stcb);
2316 	}
2317 	switch (t_type) {
2318 	case SCTP_TIMER_TYPE_ZERO_COPY:
2319 		tmr = &inp->sctp_ep.zero_copy_timer;
2320 		break;
2321 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
2322 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
2323 		break;
2324 	case SCTP_TIMER_TYPE_ADDR_WQ:
2325 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2326 		break;
2327 	case SCTP_TIMER_TYPE_EARLYFR:
2328 		if ((stcb == NULL) || (net == NULL)) {
2329 			return;
2330 		}
2331 		tmr = &net->fr_timer;
2332 		SCTP_STAT_INCR(sctps_earlyfrstop);
2333 		break;
2334 	case SCTP_TIMER_TYPE_SEND:
2335 		if ((stcb == NULL) || (net == NULL)) {
2336 			return;
2337 		}
2338 		tmr = &net->rxt_timer;
2339 		break;
2340 	case SCTP_TIMER_TYPE_INIT:
2341 		if ((stcb == NULL) || (net == NULL)) {
2342 			return;
2343 		}
2344 		tmr = &net->rxt_timer;
2345 		break;
2346 	case SCTP_TIMER_TYPE_RECV:
2347 		if (stcb == NULL) {
2348 			return;
2349 		}
2350 		tmr = &stcb->asoc.dack_timer;
2351 		break;
2352 	case SCTP_TIMER_TYPE_SHUTDOWN:
2353 		if ((stcb == NULL) || (net == NULL)) {
2354 			return;
2355 		}
2356 		tmr = &net->rxt_timer;
2357 		break;
2358 	case SCTP_TIMER_TYPE_HEARTBEAT:
2359 		if (stcb == NULL) {
2360 			return;
2361 		}
2362 		tmr = &stcb->asoc.hb_timer;
2363 		break;
2364 	case SCTP_TIMER_TYPE_COOKIE:
2365 		if ((stcb == NULL) || (net == NULL)) {
2366 			return;
2367 		}
2368 		tmr = &net->rxt_timer;
2369 		break;
2370 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2371 		/* nothing needed but the endpoint here */
2372 		tmr = &inp->sctp_ep.signature_change;
2373 		/*
2374 		 * We re-use the newcookie timer for the INP kill timer. We
2375 		 * must assure that we do not kill it by accident.
2376 		 */
2377 		break;
2378 	case SCTP_TIMER_TYPE_ASOCKILL:
2379 		/*
2380 		 * Stop the asoc kill timer.
2381 		 */
2382 		if (stcb == NULL) {
2383 			return;
2384 		}
2385 		tmr = &stcb->asoc.strreset_timer;
2386 		break;
2387 
2388 	case SCTP_TIMER_TYPE_INPKILL:
2389 		/*
2390 		 * The inp is setup to die. We re-use the signature_chage
2391 		 * timer since that has stopped and we are in the GONE
2392 		 * state.
2393 		 */
2394 		tmr = &inp->sctp_ep.signature_change;
2395 		break;
2396 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2397 		if ((stcb == NULL) || (net == NULL)) {
2398 			return;
2399 		}
2400 		tmr = &net->pmtu_timer;
2401 		break;
2402 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2403 		if ((stcb == NULL) || (net == NULL)) {
2404 			return;
2405 		}
2406 		tmr = &net->rxt_timer;
2407 		break;
2408 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2409 		if (stcb == NULL) {
2410 			return;
2411 		}
2412 		tmr = &stcb->asoc.shut_guard_timer;
2413 		break;
2414 	case SCTP_TIMER_TYPE_STRRESET:
2415 		if (stcb == NULL) {
2416 			return;
2417 		}
2418 		tmr = &stcb->asoc.strreset_timer;
2419 		break;
2420 	case SCTP_TIMER_TYPE_ASCONF:
2421 		if (stcb == NULL) {
2422 			return;
2423 		}
2424 		tmr = &stcb->asoc.asconf_timer;
2425 		break;
2426 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2427 		if (stcb == NULL) {
2428 			return;
2429 		}
2430 		tmr = &stcb->asoc.delete_prim_timer;
2431 		break;
2432 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2433 		if (stcb == NULL) {
2434 			return;
2435 		}
2436 		tmr = &stcb->asoc.autoclose_timer;
2437 		break;
2438 	default:
2439 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2440 		    __FUNCTION__, t_type);
2441 		break;
2442 	};
2443 	if (tmr == NULL) {
2444 		return;
2445 	}
2446 	if ((tmr->type != t_type) && tmr->type) {
2447 		/*
2448 		 * Ok we have a timer that is under joint use. Cookie timer
2449 		 * per chance with the SEND timer. We therefore are NOT
2450 		 * running the timer that the caller wants stopped.  So just
2451 		 * return.
2452 		 */
2453 		return;
2454 	}
2455 	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2456 		stcb->asoc.num_send_timers_up--;
2457 		if (stcb->asoc.num_send_timers_up < 0) {
2458 			stcb->asoc.num_send_timers_up = 0;
2459 		}
2460 	}
2461 	tmr->self = NULL;
2462 	tmr->stopped_from = from;
2463 	(void)SCTP_OS_TIMER_STOP(&tmr->timer);
2464 	return;
2465 }
2466 
2467 uint32_t
2468 sctp_calculate_len(struct mbuf *m)
2469 {
2470 	uint32_t tlen = 0;
2471 	struct mbuf *at;
2472 
2473 	at = m;
2474 	while (at) {
2475 		tlen += SCTP_BUF_LEN(at);
2476 		at = SCTP_BUF_NEXT(at);
2477 	}
2478 	return (tlen);
2479 }
2480 
2481 void
2482 sctp_mtu_size_reset(struct sctp_inpcb *inp,
2483     struct sctp_association *asoc, uint32_t mtu)
2484 {
2485 	/*
2486 	 * Reset the P-MTU size on this association, this involves changing
2487 	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2488 	 * allow the DF flag to be cleared.
2489 	 */
2490 	struct sctp_tmit_chunk *chk;
2491 	unsigned int eff_mtu, ovh;
2492 
2493 #ifdef SCTP_PRINT_FOR_B_AND_M
2494 	SCTP_PRINTF("sctp_mtu_size_reset(%p, asoc:%p mtu:%d\n",
2495 	    inp, asoc, mtu);
2496 #endif
2497 	asoc->smallest_mtu = mtu;
2498 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2499 		ovh = SCTP_MIN_OVERHEAD;
2500 	} else {
2501 		ovh = SCTP_MIN_V4_OVERHEAD;
2502 	}
2503 	eff_mtu = mtu - ovh;
2504 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2505 
2506 		if (chk->send_size > eff_mtu) {
2507 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2508 		}
2509 	}
2510 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2511 		if (chk->send_size > eff_mtu) {
2512 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2513 		}
2514 	}
2515 }
2516 
2517 
2518 /*
2519  * given an association and starting time of the current RTT period return
2520  * RTO in number of msecs net should point to the current network
2521  */
2522 uint32_t
2523 sctp_calculate_rto(struct sctp_tcb *stcb,
2524     struct sctp_association *asoc,
2525     struct sctp_nets *net,
2526     struct timeval *told,
2527     int safe)
2528 {
2529 	/*-
2530 	 * given an association and the starting time of the current RTT
2531 	 * period (in value1/value2) return RTO in number of msecs.
2532 	 */
2533 	int calc_time = 0;
2534 	int o_calctime;
2535 	uint32_t new_rto = 0;
2536 	int first_measure = 0;
2537 	struct timeval now, then, *old;
2538 
2539 	/* Copy it out for sparc64 */
2540 	if (safe == sctp_align_unsafe_makecopy) {
2541 		old = &then;
2542 		memcpy(&then, told, sizeof(struct timeval));
2543 	} else if (safe == sctp_align_safe_nocopy) {
2544 		old = told;
2545 	} else {
2546 		/* error */
2547 		SCTP_PRINTF("Huh, bad rto calc call\n");
2548 		return (0);
2549 	}
2550 	/************************/
2551 	/* 1. calculate new RTT */
2552 	/************************/
2553 	/* get the current time */
2554 	(void)SCTP_GETTIME_TIMEVAL(&now);
2555 	/* compute the RTT value */
2556 	if ((u_long)now.tv_sec > (u_long)old->tv_sec) {
2557 		calc_time = ((u_long)now.tv_sec - (u_long)old->tv_sec) * 1000;
2558 		if ((u_long)now.tv_usec > (u_long)old->tv_usec) {
2559 			calc_time += (((u_long)now.tv_usec -
2560 			    (u_long)old->tv_usec) / 1000);
2561 		} else if ((u_long)now.tv_usec < (u_long)old->tv_usec) {
2562 			/* Borrow 1,000ms from current calculation */
2563 			calc_time -= 1000;
2564 			/* Add in the slop over */
2565 			calc_time += ((int)now.tv_usec / 1000);
2566 			/* Add in the pre-second ms's */
2567 			calc_time += (((int)1000000 - (int)old->tv_usec) / 1000);
2568 		}
2569 	} else if ((u_long)now.tv_sec == (u_long)old->tv_sec) {
2570 		if ((u_long)now.tv_usec > (u_long)old->tv_usec) {
2571 			calc_time = ((u_long)now.tv_usec -
2572 			    (u_long)old->tv_usec) / 1000;
2573 		} else if ((u_long)now.tv_usec < (u_long)old->tv_usec) {
2574 			/* impossible .. garbage in nothing out */
2575 			goto calc_rto;
2576 		} else if ((u_long)now.tv_usec == (u_long)old->tv_usec) {
2577 			/*
2578 			 * We have to have 1 usec :-D this must be the
2579 			 * loopback.
2580 			 */
2581 			calc_time = 1;
2582 		} else {
2583 			/* impossible .. garbage in nothing out */
2584 			goto calc_rto;
2585 		}
2586 	} else {
2587 		/* Clock wrapped? */
2588 		goto calc_rto;
2589 	}
2590 	/***************************/
2591 	/* 2. update RTTVAR & SRTT */
2592 	/***************************/
2593 	net->rtt = o_calctime = calc_time;
2594 	/* this is Van Jacobson's integer version */
2595 	if (net->RTO_measured) {
2596 		calc_time -= (net->lastsa >> SCTP_RTT_SHIFT);	/* take away 1/8th when
2597 								 * shift=3 */
2598 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2599 			rto_logging(net, SCTP_LOG_RTTVAR);
2600 		}
2601 		net->prev_rtt = o_calctime;
2602 		net->lastsa += calc_time;	/* add 7/8th into sa when
2603 						 * shift=3 */
2604 		if (calc_time < 0) {
2605 			calc_time = -calc_time;
2606 		}
2607 		calc_time -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);	/* take away 1/4 when
2608 									 * VAR shift=2 */
2609 		net->lastsv += calc_time;
2610 		if (net->lastsv == 0) {
2611 			net->lastsv = SCTP_CLOCK_GRANULARITY;
2612 		}
2613 	} else {
2614 		/* First RTO measurment */
2615 		net->RTO_measured = 1;
2616 		net->lastsa = calc_time << SCTP_RTT_SHIFT;	/* Multiply by 8 when
2617 								 * shift=3 */
2618 		net->lastsv = calc_time;
2619 		if (net->lastsv == 0) {
2620 			net->lastsv = SCTP_CLOCK_GRANULARITY;
2621 		}
2622 		first_measure = 1;
2623 		net->prev_rtt = o_calctime;
2624 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2625 			rto_logging(net, SCTP_LOG_INITIAL_RTT);
2626 		}
2627 	}
2628 calc_rto:
2629 	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2630 	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2631 	    (stcb->asoc.sat_network_lockout == 0)) {
2632 		stcb->asoc.sat_network = 1;
2633 	} else if ((!first_measure) && stcb->asoc.sat_network) {
2634 		stcb->asoc.sat_network = 0;
2635 		stcb->asoc.sat_network_lockout = 1;
2636 	}
2637 	/* bound it, per C6/C7 in Section 5.3.1 */
2638 	if (new_rto < stcb->asoc.minrto) {
2639 		new_rto = stcb->asoc.minrto;
2640 	}
2641 	if (new_rto > stcb->asoc.maxrto) {
2642 		new_rto = stcb->asoc.maxrto;
2643 	}
2644 	/* we are now returning the RTO */
2645 	return (new_rto);
2646 }
2647 
2648 /*
2649  * return a pointer to a contiguous piece of data from the given mbuf chain
2650  * starting at 'off' for 'len' bytes.  If the desired piece spans more than
2651  * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2652  * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2653  */
2654 caddr_t
2655 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr)
2656 {
2657 	uint32_t count;
2658 	uint8_t *ptr;
2659 
2660 	ptr = in_ptr;
2661 	if ((off < 0) || (len <= 0))
2662 		return (NULL);
2663 
2664 	/* find the desired start location */
2665 	while ((m != NULL) && (off > 0)) {
2666 		if (off < SCTP_BUF_LEN(m))
2667 			break;
2668 		off -= SCTP_BUF_LEN(m);
2669 		m = SCTP_BUF_NEXT(m);
2670 	}
2671 	if (m == NULL)
2672 		return (NULL);
2673 
2674 	/* is the current mbuf large enough (eg. contiguous)? */
2675 	if ((SCTP_BUF_LEN(m) - off) >= len) {
2676 		return (mtod(m, caddr_t)+off);
2677 	} else {
2678 		/* else, it spans more than one mbuf, so save a temp copy... */
2679 		while ((m != NULL) && (len > 0)) {
2680 			count = min(SCTP_BUF_LEN(m) - off, len);
2681 			bcopy(mtod(m, caddr_t)+off, ptr, count);
2682 			len -= count;
2683 			ptr += count;
2684 			off = 0;
2685 			m = SCTP_BUF_NEXT(m);
2686 		}
2687 		if ((m == NULL) && (len > 0))
2688 			return (NULL);
2689 		else
2690 			return ((caddr_t)in_ptr);
2691 	}
2692 }
2693 
2694 
2695 
2696 struct sctp_paramhdr *
2697 sctp_get_next_param(struct mbuf *m,
2698     int offset,
2699     struct sctp_paramhdr *pull,
2700     int pull_limit)
2701 {
2702 	/* This just provides a typed signature to Peter's Pull routine */
2703 	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2704 	    (uint8_t *) pull));
2705 }
2706 
2707 
2708 int
2709 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2710 {
2711 	/*
2712 	 * add padlen bytes of 0 filled padding to the end of the mbuf. If
2713 	 * padlen is > 3 this routine will fail.
2714 	 */
2715 	uint8_t *dp;
2716 	int i;
2717 
2718 	if (padlen > 3) {
2719 		SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
2720 		return (ENOBUFS);
2721 	}
2722 	if (padlen <= M_TRAILINGSPACE(m)) {
2723 		/*
2724 		 * The easy way. We hope the majority of the time we hit
2725 		 * here :)
2726 		 */
2727 		dp = (uint8_t *) (mtod(m, caddr_t)+SCTP_BUF_LEN(m));
2728 		SCTP_BUF_LEN(m) += padlen;
2729 	} else {
2730 		/* Hard way we must grow the mbuf */
2731 		struct mbuf *tmp;
2732 
2733 		tmp = sctp_get_mbuf_for_msg(padlen, 0, M_DONTWAIT, 1, MT_DATA);
2734 		if (tmp == NULL) {
2735 			/* Out of space GAK! we are in big trouble. */
2736 			SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
2737 			return (ENOSPC);
2738 		}
2739 		/* setup and insert in middle */
2740 		SCTP_BUF_LEN(tmp) = padlen;
2741 		SCTP_BUF_NEXT(tmp) = NULL;
2742 		SCTP_BUF_NEXT(m) = tmp;
2743 		dp = mtod(tmp, uint8_t *);
2744 	}
2745 	/* zero out the pad */
2746 	for (i = 0; i < padlen; i++) {
2747 		*dp = 0;
2748 		dp++;
2749 	}
2750 	return (0);
2751 }
2752 
2753 int
2754 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2755 {
2756 	/* find the last mbuf in chain and pad it */
2757 	struct mbuf *m_at;
2758 
2759 	m_at = m;
2760 	if (last_mbuf) {
2761 		return (sctp_add_pad_tombuf(last_mbuf, padval));
2762 	} else {
2763 		while (m_at) {
2764 			if (SCTP_BUF_NEXT(m_at) == NULL) {
2765 				return (sctp_add_pad_tombuf(m_at, padval));
2766 			}
2767 			m_at = SCTP_BUF_NEXT(m_at);
2768 		}
2769 	}
2770 	SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
2771 	return (EFAULT);
2772 }
2773 
2774 static void
2775 sctp_notify_assoc_change(uint32_t event, struct sctp_tcb *stcb,
2776     uint32_t error, void *data, int so_locked
2777 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2778     SCTP_UNUSED
2779 #endif
2780 )
2781 {
2782 	struct mbuf *m_notify;
2783 	struct sctp_assoc_change *sac;
2784 	struct sctp_queued_to_read *control;
2785 
2786 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2787 	struct socket *so;
2788 
2789 #endif
2790 
2791 	/*
2792 	 * For TCP model AND UDP connected sockets we will send an error up
2793 	 * when an ABORT comes in.
2794 	 */
2795 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2796 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2797 	    ((event == SCTP_COMM_LOST) || (event == SCTP_CANT_STR_ASSOC))) {
2798 		if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
2799 			SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2800 			stcb->sctp_socket->so_error = ECONNREFUSED;
2801 		} else {
2802 			SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2803 			stcb->sctp_socket->so_error = ECONNRESET;
2804 		}
2805 		/* Wake ANY sleepers */
2806 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2807 		so = SCTP_INP_SO(stcb->sctp_ep);
2808 		if (!so_locked) {
2809 			atomic_add_int(&stcb->asoc.refcnt, 1);
2810 			SCTP_TCB_UNLOCK(stcb);
2811 			SCTP_SOCKET_LOCK(so, 1);
2812 			SCTP_TCB_LOCK(stcb);
2813 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
2814 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2815 				SCTP_SOCKET_UNLOCK(so, 1);
2816 				return;
2817 			}
2818 		}
2819 #endif
2820 		sorwakeup(stcb->sctp_socket);
2821 		sowwakeup(stcb->sctp_socket);
2822 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2823 		if (!so_locked) {
2824 			SCTP_SOCKET_UNLOCK(so, 1);
2825 		}
2826 #endif
2827 	}
2828 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2829 		/* event not enabled */
2830 		return;
2831 	}
2832 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_change), 0, M_DONTWAIT, 1, MT_DATA);
2833 	if (m_notify == NULL)
2834 		/* no space left */
2835 		return;
2836 	SCTP_BUF_LEN(m_notify) = 0;
2837 
2838 	sac = mtod(m_notify, struct sctp_assoc_change *);
2839 	sac->sac_type = SCTP_ASSOC_CHANGE;
2840 	sac->sac_flags = 0;
2841 	sac->sac_length = sizeof(struct sctp_assoc_change);
2842 	sac->sac_state = event;
2843 	sac->sac_error = error;
2844 	/* XXX verify these stream counts */
2845 	sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2846 	sac->sac_inbound_streams = stcb->asoc.streamincnt;
2847 	sac->sac_assoc_id = sctp_get_associd(stcb);
2848 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_change);
2849 	SCTP_BUF_NEXT(m_notify) = NULL;
2850 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2851 	    0, 0, 0, 0, 0, 0,
2852 	    m_notify);
2853 	if (control == NULL) {
2854 		/* no memory */
2855 		sctp_m_freem(m_notify);
2856 		return;
2857 	}
2858 	control->length = SCTP_BUF_LEN(m_notify);
2859 	/* not that we need this */
2860 	control->tail_mbuf = m_notify;
2861 	control->spec_flags = M_NOTIFICATION;
2862 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2863 	    control,
2864 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
2865 	    so_locked);
2866 	if (event == SCTP_COMM_LOST) {
2867 		/* Wake up any sleeper */
2868 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2869 		so = SCTP_INP_SO(stcb->sctp_ep);
2870 		if (!so_locked) {
2871 			atomic_add_int(&stcb->asoc.refcnt, 1);
2872 			SCTP_TCB_UNLOCK(stcb);
2873 			SCTP_SOCKET_LOCK(so, 1);
2874 			SCTP_TCB_LOCK(stcb);
2875 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
2876 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2877 				SCTP_SOCKET_UNLOCK(so, 1);
2878 				return;
2879 			}
2880 		}
2881 #endif
2882 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
2883 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2884 		if (!so_locked) {
2885 			SCTP_SOCKET_UNLOCK(so, 1);
2886 		}
2887 #endif
2888 	}
2889 }
2890 
2891 static void
2892 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2893     struct sockaddr *sa, uint32_t error)
2894 {
2895 	struct mbuf *m_notify;
2896 	struct sctp_paddr_change *spc;
2897 	struct sctp_queued_to_read *control;
2898 
2899 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
2900 		/* event not enabled */
2901 		return;
2902 	}
2903 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_DONTWAIT, 1, MT_DATA);
2904 	if (m_notify == NULL)
2905 		return;
2906 	SCTP_BUF_LEN(m_notify) = 0;
2907 	spc = mtod(m_notify, struct sctp_paddr_change *);
2908 	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2909 	spc->spc_flags = 0;
2910 	spc->spc_length = sizeof(struct sctp_paddr_change);
2911 	switch (sa->sa_family) {
2912 	case AF_INET:
2913 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2914 		break;
2915 #ifdef INET6
2916 	case AF_INET6:
2917 		{
2918 			struct sockaddr_in6 *sin6;
2919 
2920 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2921 
2922 			sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
2923 			if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
2924 				if (sin6->sin6_scope_id == 0) {
2925 					/* recover scope_id for user */
2926 					(void)sa6_recoverscope(sin6);
2927 				} else {
2928 					/* clear embedded scope_id for user */
2929 					in6_clearscope(&sin6->sin6_addr);
2930 				}
2931 			}
2932 			break;
2933 		}
2934 #endif
2935 	default:
2936 		/* TSNH */
2937 		break;
2938 	}
2939 	spc->spc_state = state;
2940 	spc->spc_error = error;
2941 	spc->spc_assoc_id = sctp_get_associd(stcb);
2942 
2943 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
2944 	SCTP_BUF_NEXT(m_notify) = NULL;
2945 
2946 	/* append to socket */
2947 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2948 	    0, 0, 0, 0, 0, 0,
2949 	    m_notify);
2950 	if (control == NULL) {
2951 		/* no memory */
2952 		sctp_m_freem(m_notify);
2953 		return;
2954 	}
2955 	control->length = SCTP_BUF_LEN(m_notify);
2956 	control->spec_flags = M_NOTIFICATION;
2957 	/* not that we need this */
2958 	control->tail_mbuf = m_notify;
2959 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2960 	    control,
2961 	    &stcb->sctp_socket->so_rcv, 1,
2962 	    SCTP_READ_LOCK_NOT_HELD,
2963 	    SCTP_SO_NOT_LOCKED);
2964 }
2965 
2966 
2967 static void
2968 sctp_notify_send_failed(struct sctp_tcb *stcb, uint32_t error,
2969     struct sctp_tmit_chunk *chk, int so_locked
2970 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2971     SCTP_UNUSED
2972 #endif
2973 )
2974 {
2975 	struct mbuf *m_notify;
2976 	struct sctp_send_failed *ssf;
2977 	struct sctp_queued_to_read *control;
2978 	int length;
2979 
2980 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)) {
2981 		/* event not enabled */
2982 		return;
2983 	}
2984 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_send_failed), 0, M_DONTWAIT, 1, MT_DATA);
2985 	if (m_notify == NULL)
2986 		/* no space left */
2987 		return;
2988 	length = sizeof(struct sctp_send_failed) + chk->send_size;
2989 	length -= sizeof(struct sctp_data_chunk);
2990 	SCTP_BUF_LEN(m_notify) = 0;
2991 	ssf = mtod(m_notify, struct sctp_send_failed *);
2992 	ssf->ssf_type = SCTP_SEND_FAILED;
2993 	if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
2994 		ssf->ssf_flags = SCTP_DATA_UNSENT;
2995 	else
2996 		ssf->ssf_flags = SCTP_DATA_SENT;
2997 	ssf->ssf_length = length;
2998 	ssf->ssf_error = error;
2999 	/* not exactly what the user sent in, but should be close :) */
3000 	bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
3001 	ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number;
3002 	ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq;
3003 	ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
3004 	ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype;
3005 	ssf->ssf_info.sinfo_context = chk->rec.data.context;
3006 	ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3007 	ssf->ssf_assoc_id = sctp_get_associd(stcb);
3008 
3009 	if (chk->data) {
3010 		/*
3011 		 * trim off the sctp chunk header(it should be there)
3012 		 */
3013 		if (chk->send_size >= sizeof(struct sctp_data_chunk)) {
3014 			m_adj(chk->data, sizeof(struct sctp_data_chunk));
3015 			sctp_mbuf_crush(chk->data);
3016 			chk->send_size -= sizeof(struct sctp_data_chunk);
3017 		}
3018 	}
3019 	SCTP_BUF_NEXT(m_notify) = chk->data;
3020 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
3021 	/* Steal off the mbuf */
3022 	chk->data = NULL;
3023 	/*
3024 	 * For this case, we check the actual socket buffer, since the assoc
3025 	 * is going away we don't want to overfill the socket buffer for a
3026 	 * non-reader
3027 	 */
3028 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3029 		sctp_m_freem(m_notify);
3030 		return;
3031 	}
3032 	/* append to socket */
3033 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3034 	    0, 0, 0, 0, 0, 0,
3035 	    m_notify);
3036 	if (control == NULL) {
3037 		/* no memory */
3038 		sctp_m_freem(m_notify);
3039 		return;
3040 	}
3041 	control->spec_flags = M_NOTIFICATION;
3042 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3043 	    control,
3044 	    &stcb->sctp_socket->so_rcv, 1,
3045 	    SCTP_READ_LOCK_NOT_HELD,
3046 	    so_locked);
3047 }
3048 
3049 
3050 static void
3051 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
3052     struct sctp_stream_queue_pending *sp, int so_locked
3053 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3054     SCTP_UNUSED
3055 #endif
3056 )
3057 {
3058 	struct mbuf *m_notify;
3059 	struct sctp_send_failed *ssf;
3060 	struct sctp_queued_to_read *control;
3061 	int length;
3062 
3063 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)) {
3064 		/* event not enabled */
3065 		return;
3066 	}
3067 	length = sizeof(struct sctp_send_failed) + sp->length;
3068 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_send_failed), 0, M_DONTWAIT, 1, MT_DATA);
3069 	if (m_notify == NULL)
3070 		/* no space left */
3071 		return;
3072 	SCTP_BUF_LEN(m_notify) = 0;
3073 	ssf = mtod(m_notify, struct sctp_send_failed *);
3074 	ssf->ssf_type = SCTP_SEND_FAILED;
3075 	if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
3076 		ssf->ssf_flags = SCTP_DATA_UNSENT;
3077 	else
3078 		ssf->ssf_flags = SCTP_DATA_SENT;
3079 	ssf->ssf_length = length;
3080 	ssf->ssf_error = error;
3081 	/* not exactly what the user sent in, but should be close :) */
3082 	bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
3083 	ssf->ssf_info.sinfo_stream = sp->stream;
3084 	ssf->ssf_info.sinfo_ssn = sp->strseq;
3085 	if (sp->some_taken) {
3086 		ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
3087 	} else {
3088 		ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
3089 	}
3090 	ssf->ssf_info.sinfo_ppid = sp->ppid;
3091 	ssf->ssf_info.sinfo_context = sp->context;
3092 	ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3093 	ssf->ssf_assoc_id = sctp_get_associd(stcb);
3094 	SCTP_BUF_NEXT(m_notify) = sp->data;
3095 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
3096 
3097 	/* Steal off the mbuf */
3098 	sp->data = NULL;
3099 	/*
3100 	 * For this case, we check the actual socket buffer, since the assoc
3101 	 * is going away we don't want to overfill the socket buffer for a
3102 	 * non-reader
3103 	 */
3104 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3105 		sctp_m_freem(m_notify);
3106 		return;
3107 	}
3108 	/* append to socket */
3109 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3110 	    0, 0, 0, 0, 0, 0,
3111 	    m_notify);
3112 	if (control == NULL) {
3113 		/* no memory */
3114 		sctp_m_freem(m_notify);
3115 		return;
3116 	}
3117 	control->spec_flags = M_NOTIFICATION;
3118 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3119 	    control,
3120 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3121 }
3122 
3123 
3124 
3125 static void
3126 sctp_notify_adaptation_layer(struct sctp_tcb *stcb,
3127     uint32_t error)
3128 {
3129 	struct mbuf *m_notify;
3130 	struct sctp_adaptation_event *sai;
3131 	struct sctp_queued_to_read *control;
3132 
3133 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
3134 		/* event not enabled */
3135 		return;
3136 	}
3137 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_DONTWAIT, 1, MT_DATA);
3138 	if (m_notify == NULL)
3139 		/* no space left */
3140 		return;
3141 	SCTP_BUF_LEN(m_notify) = 0;
3142 	sai = mtod(m_notify, struct sctp_adaptation_event *);
3143 	sai->sai_type = SCTP_ADAPTATION_INDICATION;
3144 	sai->sai_flags = 0;
3145 	sai->sai_length = sizeof(struct sctp_adaptation_event);
3146 	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3147 	sai->sai_assoc_id = sctp_get_associd(stcb);
3148 
3149 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3150 	SCTP_BUF_NEXT(m_notify) = NULL;
3151 
3152 	/* append to socket */
3153 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3154 	    0, 0, 0, 0, 0, 0,
3155 	    m_notify);
3156 	if (control == NULL) {
3157 		/* no memory */
3158 		sctp_m_freem(m_notify);
3159 		return;
3160 	}
3161 	control->length = SCTP_BUF_LEN(m_notify);
3162 	control->spec_flags = M_NOTIFICATION;
3163 	/* not that we need this */
3164 	control->tail_mbuf = m_notify;
3165 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3166 	    control,
3167 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3168 }
3169 
3170 /* This always must be called with the read-queue LOCKED in the INP */
3171 static void
3172 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3173     uint32_t val, int so_locked
3174 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3175     SCTP_UNUSED
3176 #endif
3177 )
3178 {
3179 	struct mbuf *m_notify;
3180 	struct sctp_pdapi_event *pdapi;
3181 	struct sctp_queued_to_read *control;
3182 	struct sockbuf *sb;
3183 
3184 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3185 		/* event not enabled */
3186 		return;
3187 	}
3188 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
3189 		return;
3190 	}
3191 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_DONTWAIT, 1, MT_DATA);
3192 	if (m_notify == NULL)
3193 		/* no space left */
3194 		return;
3195 	SCTP_BUF_LEN(m_notify) = 0;
3196 	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3197 	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3198 	pdapi->pdapi_flags = 0;
3199 	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3200 	pdapi->pdapi_indication = error;
3201 	pdapi->pdapi_stream = (val >> 16);
3202 	pdapi->pdapi_seq = (val & 0x0000ffff);
3203 	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3204 
3205 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3206 	SCTP_BUF_NEXT(m_notify) = NULL;
3207 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3208 	    0, 0, 0, 0, 0, 0,
3209 	    m_notify);
3210 	if (control == NULL) {
3211 		/* no memory */
3212 		sctp_m_freem(m_notify);
3213 		return;
3214 	}
3215 	control->spec_flags = M_NOTIFICATION;
3216 	control->length = SCTP_BUF_LEN(m_notify);
3217 	/* not that we need this */
3218 	control->tail_mbuf = m_notify;
3219 	control->held_length = 0;
3220 	control->length = 0;
3221 	sb = &stcb->sctp_socket->so_rcv;
3222 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3223 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3224 	}
3225 	sctp_sballoc(stcb, sb, m_notify);
3226 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3227 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3228 	}
3229 	atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify));
3230 	control->end_added = 1;
3231 	if (stcb->asoc.control_pdapi)
3232 		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3233 	else {
3234 		/* we really should not see this case */
3235 		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3236 	}
3237 	if (stcb->sctp_ep && stcb->sctp_socket) {
3238 		/* This should always be the case */
3239 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3240 		struct socket *so;
3241 
3242 		so = SCTP_INP_SO(stcb->sctp_ep);
3243 		if (!so_locked) {
3244 			atomic_add_int(&stcb->asoc.refcnt, 1);
3245 			SCTP_TCB_UNLOCK(stcb);
3246 			SCTP_SOCKET_LOCK(so, 1);
3247 			SCTP_TCB_LOCK(stcb);
3248 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
3249 			if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3250 				SCTP_SOCKET_UNLOCK(so, 1);
3251 				return;
3252 			}
3253 		}
3254 #endif
3255 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3256 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3257 		if (!so_locked) {
3258 			SCTP_SOCKET_UNLOCK(so, 1);
3259 		}
3260 #endif
3261 	}
3262 }
3263 
3264 static void
3265 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3266 {
3267 	struct mbuf *m_notify;
3268 	struct sctp_shutdown_event *sse;
3269 	struct sctp_queued_to_read *control;
3270 
3271 	/*
3272 	 * For TCP model AND UDP connected sockets we will send an error up
3273 	 * when an SHUTDOWN completes
3274 	 */
3275 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3276 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3277 		/* mark socket closed for read/write and wakeup! */
3278 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3279 		struct socket *so;
3280 
3281 		so = SCTP_INP_SO(stcb->sctp_ep);
3282 		atomic_add_int(&stcb->asoc.refcnt, 1);
3283 		SCTP_TCB_UNLOCK(stcb);
3284 		SCTP_SOCKET_LOCK(so, 1);
3285 		SCTP_TCB_LOCK(stcb);
3286 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3287 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3288 			SCTP_SOCKET_UNLOCK(so, 1);
3289 			return;
3290 		}
3291 #endif
3292 		socantsendmore(stcb->sctp_socket);
3293 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3294 		SCTP_SOCKET_UNLOCK(so, 1);
3295 #endif
3296 	}
3297 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3298 		/* event not enabled */
3299 		return;
3300 	}
3301 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_DONTWAIT, 1, MT_DATA);
3302 	if (m_notify == NULL)
3303 		/* no space left */
3304 		return;
3305 	sse = mtod(m_notify, struct sctp_shutdown_event *);
3306 	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3307 	sse->sse_flags = 0;
3308 	sse->sse_length = sizeof(struct sctp_shutdown_event);
3309 	sse->sse_assoc_id = sctp_get_associd(stcb);
3310 
3311 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3312 	SCTP_BUF_NEXT(m_notify) = NULL;
3313 
3314 	/* append to socket */
3315 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3316 	    0, 0, 0, 0, 0, 0,
3317 	    m_notify);
3318 	if (control == NULL) {
3319 		/* no memory */
3320 		sctp_m_freem(m_notify);
3321 		return;
3322 	}
3323 	control->spec_flags = M_NOTIFICATION;
3324 	control->length = SCTP_BUF_LEN(m_notify);
3325 	/* not that we need this */
3326 	control->tail_mbuf = m_notify;
3327 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3328 	    control,
3329 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3330 }
3331 
3332 static void
3333 sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3334     int so_locked
3335 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3336     SCTP_UNUSED
3337 #endif
3338 )
3339 {
3340 	struct mbuf *m_notify;
3341 	struct sctp_sender_dry_event *event;
3342 	struct sctp_queued_to_read *control;
3343 
3344 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_DRYEVNT)) {
3345 		/* event not enabled */
3346 		return;
3347 	}
3348 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_DONTWAIT, 1, MT_DATA);
3349 	if (m_notify == NULL) {
3350 		/* no space left */
3351 		return;
3352 	}
3353 	SCTP_BUF_LEN(m_notify) = 0;
3354 	event = mtod(m_notify, struct sctp_sender_dry_event *);
3355 	event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3356 	event->sender_dry_flags = 0;
3357 	event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3358 	event->sender_dry_assoc_id = sctp_get_associd(stcb);
3359 
3360 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3361 	SCTP_BUF_NEXT(m_notify) = NULL;
3362 
3363 	/* append to socket */
3364 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3365 	    0, 0, 0, 0, 0, 0, m_notify);
3366 	if (control == NULL) {
3367 		/* no memory */
3368 		sctp_m_freem(m_notify);
3369 		return;
3370 	}
3371 	control->length = SCTP_BUF_LEN(m_notify);
3372 	control->spec_flags = M_NOTIFICATION;
3373 	/* not that we need this */
3374 	control->tail_mbuf = m_notify;
3375 	sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3376 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3377 }
3378 
3379 
3380 static void
3381 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, int number_entries, int flag)
3382 {
3383 	struct mbuf *m_notify;
3384 	struct sctp_queued_to_read *control;
3385 	struct sctp_stream_reset_event *strreset;
3386 	int len;
3387 
3388 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_STREAM_RESETEVNT)) {
3389 		/* event not enabled */
3390 		return;
3391 	}
3392 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3393 	if (m_notify == NULL)
3394 		/* no space left */
3395 		return;
3396 	SCTP_BUF_LEN(m_notify) = 0;
3397 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3398 	if (len > M_TRAILINGSPACE(m_notify)) {
3399 		/* never enough room */
3400 		sctp_m_freem(m_notify);
3401 		return;
3402 	}
3403 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3404 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3405 	strreset->strreset_flags = SCTP_STRRESET_ADD_STREAM | flag;
3406 	strreset->strreset_length = len;
3407 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3408 	strreset->strreset_list[0] = number_entries;
3409 
3410 	SCTP_BUF_LEN(m_notify) = len;
3411 	SCTP_BUF_NEXT(m_notify) = NULL;
3412 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3413 		/* no space */
3414 		sctp_m_freem(m_notify);
3415 		return;
3416 	}
3417 	/* append to socket */
3418 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3419 	    0, 0, 0, 0, 0, 0,
3420 	    m_notify);
3421 	if (control == NULL) {
3422 		/* no memory */
3423 		sctp_m_freem(m_notify);
3424 		return;
3425 	}
3426 	control->spec_flags = M_NOTIFICATION;
3427 	control->length = SCTP_BUF_LEN(m_notify);
3428 	/* not that we need this */
3429 	control->tail_mbuf = m_notify;
3430 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3431 	    control,
3432 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3433 }
3434 
3435 
3436 static void
3437 sctp_notify_stream_reset(struct sctp_tcb *stcb,
3438     int number_entries, uint16_t * list, int flag)
3439 {
3440 	struct mbuf *m_notify;
3441 	struct sctp_queued_to_read *control;
3442 	struct sctp_stream_reset_event *strreset;
3443 	int len;
3444 
3445 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_STREAM_RESETEVNT)) {
3446 		/* event not enabled */
3447 		return;
3448 	}
3449 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3450 	if (m_notify == NULL)
3451 		/* no space left */
3452 		return;
3453 	SCTP_BUF_LEN(m_notify) = 0;
3454 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3455 	if (len > M_TRAILINGSPACE(m_notify)) {
3456 		/* never enough room */
3457 		sctp_m_freem(m_notify);
3458 		return;
3459 	}
3460 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3461 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3462 	if (number_entries == 0) {
3463 		strreset->strreset_flags = flag | SCTP_STRRESET_ALL_STREAMS;
3464 	} else {
3465 		strreset->strreset_flags = flag | SCTP_STRRESET_STREAM_LIST;
3466 	}
3467 	strreset->strreset_length = len;
3468 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3469 	if (number_entries) {
3470 		int i;
3471 
3472 		for (i = 0; i < number_entries; i++) {
3473 			strreset->strreset_list[i] = ntohs(list[i]);
3474 		}
3475 	}
3476 	SCTP_BUF_LEN(m_notify) = len;
3477 	SCTP_BUF_NEXT(m_notify) = NULL;
3478 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3479 		/* no space */
3480 		sctp_m_freem(m_notify);
3481 		return;
3482 	}
3483 	/* append to socket */
3484 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3485 	    0, 0, 0, 0, 0, 0,
3486 	    m_notify);
3487 	if (control == NULL) {
3488 		/* no memory */
3489 		sctp_m_freem(m_notify);
3490 		return;
3491 	}
3492 	control->spec_flags = M_NOTIFICATION;
3493 	control->length = SCTP_BUF_LEN(m_notify);
3494 	/* not that we need this */
3495 	control->tail_mbuf = m_notify;
3496 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3497 	    control,
3498 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3499 }
3500 
3501 
3502 void
3503 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3504     uint32_t error, void *data, int so_locked
3505 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3506     SCTP_UNUSED
3507 #endif
3508 )
3509 {
3510 	if ((stcb == NULL) ||
3511 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3512 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3513 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3514 		/* If the socket is gone we are out of here */
3515 		return;
3516 	}
3517 	if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
3518 		return;
3519 	}
3520 	if (stcb && ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
3521 	    (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED))) {
3522 		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3523 		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3524 		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3525 			/* Don't report these in front states */
3526 			return;
3527 		}
3528 	}
3529 	switch (notification) {
3530 	case SCTP_NOTIFY_ASSOC_UP:
3531 		if (stcb->asoc.assoc_up_sent == 0) {
3532 			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, so_locked);
3533 			stcb->asoc.assoc_up_sent = 1;
3534 		}
3535 		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3536 			sctp_notify_adaptation_layer(stcb, error);
3537 		}
3538 		if (stcb->asoc.peer_supports_auth == 0) {
3539 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3540 			    NULL, so_locked);
3541 		}
3542 		break;
3543 	case SCTP_NOTIFY_ASSOC_DOWN:
3544 		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, so_locked);
3545 		break;
3546 	case SCTP_NOTIFY_INTERFACE_DOWN:
3547 		{
3548 			struct sctp_nets *net;
3549 
3550 			net = (struct sctp_nets *)data;
3551 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3552 			    (struct sockaddr *)&net->ro._l_addr, error);
3553 			break;
3554 		}
3555 	case SCTP_NOTIFY_INTERFACE_UP:
3556 		{
3557 			struct sctp_nets *net;
3558 
3559 			net = (struct sctp_nets *)data;
3560 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3561 			    (struct sockaddr *)&net->ro._l_addr, error);
3562 			break;
3563 		}
3564 	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3565 		{
3566 			struct sctp_nets *net;
3567 
3568 			net = (struct sctp_nets *)data;
3569 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3570 			    (struct sockaddr *)&net->ro._l_addr, error);
3571 			break;
3572 		}
3573 	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3574 		sctp_notify_send_failed2(stcb, error,
3575 		    (struct sctp_stream_queue_pending *)data, so_locked);
3576 		break;
3577 	case SCTP_NOTIFY_DG_FAIL:
3578 		sctp_notify_send_failed(stcb, error,
3579 		    (struct sctp_tmit_chunk *)data, so_locked);
3580 		break;
3581 	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3582 		{
3583 			uint32_t val;
3584 
3585 			val = *((uint32_t *) data);
3586 
3587 			sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
3588 			break;
3589 		}
3590 	case SCTP_NOTIFY_STRDATA_ERR:
3591 		break;
3592 	case SCTP_NOTIFY_ASSOC_ABORTED:
3593 		if ((stcb) && (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3594 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED))) {
3595 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, NULL, so_locked);
3596 		} else {
3597 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, NULL, so_locked);
3598 		}
3599 		break;
3600 	case SCTP_NOTIFY_PEER_OPENED_STREAM:
3601 		break;
3602 	case SCTP_NOTIFY_STREAM_OPENED_OK:
3603 		break;
3604 	case SCTP_NOTIFY_ASSOC_RESTART:
3605 		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, data, so_locked);
3606 		if (stcb->asoc.peer_supports_auth == 0) {
3607 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3608 			    NULL, so_locked);
3609 		}
3610 		break;
3611 	case SCTP_NOTIFY_HB_RESP:
3612 		break;
3613 	case SCTP_NOTIFY_STR_RESET_INSTREAM_ADD_OK:
3614 		sctp_notify_stream_reset_add(stcb, error, SCTP_STRRESET_INBOUND_STR);
3615 		break;
3616 	case SCTP_NOTIFY_STR_RESET_ADD_OK:
3617 		sctp_notify_stream_reset_add(stcb, error, SCTP_STRRESET_OUTBOUND_STR);
3618 		break;
3619 	case SCTP_NOTIFY_STR_RESET_ADD_FAIL:
3620 		sctp_notify_stream_reset_add(stcb, error, (SCTP_STRRESET_FAILED | SCTP_STRRESET_OUTBOUND_STR));
3621 		break;
3622 
3623 	case SCTP_NOTIFY_STR_RESET_SEND:
3624 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_OUTBOUND_STR);
3625 		break;
3626 	case SCTP_NOTIFY_STR_RESET_RECV:
3627 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_INBOUND_STR);
3628 		break;
3629 	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3630 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_OUTBOUND_STR | SCTP_STRRESET_FAILED));
3631 		break;
3632 	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3633 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_INBOUND_STR | SCTP_STRRESET_FAILED));
3634 		break;
3635 	case SCTP_NOTIFY_ASCONF_ADD_IP:
3636 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3637 		    error);
3638 		break;
3639 	case SCTP_NOTIFY_ASCONF_DELETE_IP:
3640 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3641 		    error);
3642 		break;
3643 	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3644 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3645 		    error);
3646 		break;
3647 	case SCTP_NOTIFY_ASCONF_SUCCESS:
3648 		break;
3649 	case SCTP_NOTIFY_ASCONF_FAILED:
3650 		break;
3651 	case SCTP_NOTIFY_PEER_SHUTDOWN:
3652 		sctp_notify_shutdown_event(stcb);
3653 		break;
3654 	case SCTP_NOTIFY_AUTH_NEW_KEY:
3655 		sctp_notify_authentication(stcb, SCTP_AUTH_NEWKEY, error,
3656 		    (uint16_t) (uintptr_t) data,
3657 		    so_locked);
3658 		break;
3659 	case SCTP_NOTIFY_AUTH_FREE_KEY:
3660 		sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
3661 		    (uint16_t) (uintptr_t) data,
3662 		    so_locked);
3663 		break;
3664 	case SCTP_NOTIFY_NO_PEER_AUTH:
3665 		sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
3666 		    (uint16_t) (uintptr_t) data,
3667 		    so_locked);
3668 		break;
3669 	case SCTP_NOTIFY_SENDER_DRY:
3670 		sctp_notify_sender_dry_event(stcb, so_locked);
3671 		break;
3672 	default:
3673 		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3674 		    __FUNCTION__, notification, notification);
3675 		break;
3676 	}			/* end switch */
3677 }
3678 
3679 void
3680 sctp_report_all_outbound(struct sctp_tcb *stcb, int holds_lock, int so_locked
3681 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3682     SCTP_UNUSED
3683 #endif
3684 )
3685 {
3686 	struct sctp_association *asoc;
3687 	struct sctp_stream_out *outs;
3688 	struct sctp_tmit_chunk *chk;
3689 	struct sctp_stream_queue_pending *sp;
3690 	int i;
3691 
3692 	asoc = &stcb->asoc;
3693 
3694 	if (stcb == NULL) {
3695 		return;
3696 	}
3697 	if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
3698 		/* already being freed */
3699 		return;
3700 	}
3701 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3702 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3703 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3704 		return;
3705 	}
3706 	/* now through all the gunk freeing chunks */
3707 	if (holds_lock == 0) {
3708 		SCTP_TCB_SEND_LOCK(stcb);
3709 	}
3710 	/* sent queue SHOULD be empty */
3711 	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3712 		chk = TAILQ_FIRST(&asoc->sent_queue);
3713 		while (chk) {
3714 			TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3715 			asoc->sent_queue_cnt--;
3716 			if (chk->data != NULL) {
3717 				sctp_free_bufspace(stcb, asoc, chk, 1);
3718 				sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
3719 				    SCTP_NOTIFY_DATAGRAM_SENT, chk, so_locked);
3720 				if (chk->data) {
3721 					sctp_m_freem(chk->data);
3722 					chk->data = NULL;
3723 				}
3724 			}
3725 			sctp_free_a_chunk(stcb, chk);
3726 			/* sa_ignore FREED_MEMORY */
3727 			chk = TAILQ_FIRST(&asoc->sent_queue);
3728 		}
3729 	}
3730 	/* pending send queue SHOULD be empty */
3731 	if (!TAILQ_EMPTY(&asoc->send_queue)) {
3732 		chk = TAILQ_FIRST(&asoc->send_queue);
3733 		while (chk) {
3734 			TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3735 			asoc->send_queue_cnt--;
3736 			if (chk->data != NULL) {
3737 				sctp_free_bufspace(stcb, asoc, chk, 1);
3738 				sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
3739 				    SCTP_NOTIFY_DATAGRAM_UNSENT, chk, so_locked);
3740 				if (chk->data) {
3741 					sctp_m_freem(chk->data);
3742 					chk->data = NULL;
3743 				}
3744 			}
3745 			sctp_free_a_chunk(stcb, chk);
3746 			/* sa_ignore FREED_MEMORY */
3747 			chk = TAILQ_FIRST(&asoc->send_queue);
3748 		}
3749 	}
3750 	for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
3751 		/* For each stream */
3752 		outs = &stcb->asoc.strmout[i];
3753 		/* clean up any sends there */
3754 		stcb->asoc.locked_on_sending = NULL;
3755 		sp = TAILQ_FIRST(&outs->outqueue);
3756 		while (sp) {
3757 			stcb->asoc.stream_queue_cnt--;
3758 			TAILQ_REMOVE(&outs->outqueue, sp, next);
3759 			sctp_free_spbufspace(stcb, asoc, sp);
3760 			if (sp->data) {
3761 				sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
3762 				    SCTP_NOTIFY_DATAGRAM_UNSENT, (void *)sp, so_locked);
3763 				if (sp->data) {
3764 					sctp_m_freem(sp->data);
3765 					sp->data = NULL;
3766 				}
3767 			}
3768 			if (sp->net)
3769 				sctp_free_remote_addr(sp->net);
3770 			sp->net = NULL;
3771 			/* Free the chunk */
3772 			sctp_free_a_strmoq(stcb, sp);
3773 			/* sa_ignore FREED_MEMORY */
3774 			sp = TAILQ_FIRST(&outs->outqueue);
3775 		}
3776 	}
3777 
3778 	if (holds_lock == 0) {
3779 		SCTP_TCB_SEND_UNLOCK(stcb);
3780 	}
3781 }
3782 
3783 void
3784 sctp_abort_notification(struct sctp_tcb *stcb, int error, int so_locked
3785 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3786     SCTP_UNUSED
3787 #endif
3788 )
3789 {
3790 
3791 	if (stcb == NULL) {
3792 		return;
3793 	}
3794 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3795 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3796 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3797 		return;
3798 	}
3799 	/* Tell them we lost the asoc */
3800 	sctp_report_all_outbound(stcb, 1, so_locked);
3801 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
3802 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3803 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
3804 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
3805 	}
3806 	sctp_ulp_notify(SCTP_NOTIFY_ASSOC_ABORTED, stcb, error, NULL, so_locked);
3807 }
3808 
3809 void
3810 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3811     struct mbuf *m, int iphlen, struct sctphdr *sh, struct mbuf *op_err,
3812     uint32_t vrf_id, uint16_t port)
3813 {
3814 	uint32_t vtag;
3815 
3816 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3817 	struct socket *so;
3818 
3819 #endif
3820 
3821 	vtag = 0;
3822 	if (stcb != NULL) {
3823 		/* We have a TCB to abort, send notification too */
3824 		vtag = stcb->asoc.peer_vtag;
3825 		sctp_abort_notification(stcb, 0, SCTP_SO_NOT_LOCKED);
3826 		/* get the assoc vrf id and table id */
3827 		vrf_id = stcb->asoc.vrf_id;
3828 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3829 	}
3830 	sctp_send_abort(m, iphlen, sh, vtag, op_err, vrf_id, port);
3831 	if (stcb != NULL) {
3832 		/* Ok, now lets free it */
3833 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3834 		so = SCTP_INP_SO(inp);
3835 		atomic_add_int(&stcb->asoc.refcnt, 1);
3836 		SCTP_TCB_UNLOCK(stcb);
3837 		SCTP_SOCKET_LOCK(so, 1);
3838 		SCTP_TCB_LOCK(stcb);
3839 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3840 #endif
3841 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
3842 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3843 		SCTP_SOCKET_UNLOCK(so, 1);
3844 #endif
3845 	}
3846 }
3847 
3848 #ifdef SCTP_ASOCLOG_OF_TSNS
3849 void
3850 sctp_print_out_track_log(struct sctp_tcb *stcb)
3851 {
3852 #ifdef NOSIY_PRINTS
3853 	int i;
3854 
3855 	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
3856 	SCTP_PRINTF("IN bound TSN log-aaa\n");
3857 	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
3858 		SCTP_PRINTF("None rcvd\n");
3859 		goto none_in;
3860 	}
3861 	if (stcb->asoc.tsn_in_wrapped) {
3862 		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
3863 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3864 			    stcb->asoc.in_tsnlog[i].tsn,
3865 			    stcb->asoc.in_tsnlog[i].strm,
3866 			    stcb->asoc.in_tsnlog[i].seq,
3867 			    stcb->asoc.in_tsnlog[i].flgs,
3868 			    stcb->asoc.in_tsnlog[i].sz);
3869 		}
3870 	}
3871 	if (stcb->asoc.tsn_in_at) {
3872 		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
3873 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3874 			    stcb->asoc.in_tsnlog[i].tsn,
3875 			    stcb->asoc.in_tsnlog[i].strm,
3876 			    stcb->asoc.in_tsnlog[i].seq,
3877 			    stcb->asoc.in_tsnlog[i].flgs,
3878 			    stcb->asoc.in_tsnlog[i].sz);
3879 		}
3880 	}
3881 none_in:
3882 	SCTP_PRINTF("OUT bound TSN log-aaa\n");
3883 	if ((stcb->asoc.tsn_out_at == 0) &&
3884 	    (stcb->asoc.tsn_out_wrapped == 0)) {
3885 		SCTP_PRINTF("None sent\n");
3886 	}
3887 	if (stcb->asoc.tsn_out_wrapped) {
3888 		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
3889 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3890 			    stcb->asoc.out_tsnlog[i].tsn,
3891 			    stcb->asoc.out_tsnlog[i].strm,
3892 			    stcb->asoc.out_tsnlog[i].seq,
3893 			    stcb->asoc.out_tsnlog[i].flgs,
3894 			    stcb->asoc.out_tsnlog[i].sz);
3895 		}
3896 	}
3897 	if (stcb->asoc.tsn_out_at) {
3898 		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
3899 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3900 			    stcb->asoc.out_tsnlog[i].tsn,
3901 			    stcb->asoc.out_tsnlog[i].strm,
3902 			    stcb->asoc.out_tsnlog[i].seq,
3903 			    stcb->asoc.out_tsnlog[i].flgs,
3904 			    stcb->asoc.out_tsnlog[i].sz);
3905 		}
3906 	}
3907 #endif
3908 }
3909 
3910 #endif
3911 
3912 void
3913 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3914     int error, struct mbuf *op_err,
3915     int so_locked
3916 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3917     SCTP_UNUSED
3918 #endif
3919 )
3920 {
3921 	uint32_t vtag;
3922 
3923 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3924 	struct socket *so;
3925 
3926 #endif
3927 
3928 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3929 	so = SCTP_INP_SO(inp);
3930 #endif
3931 	if (stcb == NULL) {
3932 		/* Got to have a TCB */
3933 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3934 			if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3935 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3936 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
3937 			}
3938 		}
3939 		return;
3940 	} else {
3941 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3942 	}
3943 	vtag = stcb->asoc.peer_vtag;
3944 	/* notify the ulp */
3945 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0)
3946 		sctp_abort_notification(stcb, error, so_locked);
3947 	/* notify the peer */
3948 #if defined(SCTP_PANIC_ON_ABORT)
3949 	panic("aborting an association");
3950 #endif
3951 	sctp_send_abort_tcb(stcb, op_err, so_locked);
3952 	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3953 	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3954 	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3955 		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3956 	}
3957 	/* now free the asoc */
3958 #ifdef SCTP_ASOCLOG_OF_TSNS
3959 	sctp_print_out_track_log(stcb);
3960 #endif
3961 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3962 	if (!so_locked) {
3963 		atomic_add_int(&stcb->asoc.refcnt, 1);
3964 		SCTP_TCB_UNLOCK(stcb);
3965 		SCTP_SOCKET_LOCK(so, 1);
3966 		SCTP_TCB_LOCK(stcb);
3967 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3968 	}
3969 #endif
3970 	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
3971 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3972 	if (!so_locked) {
3973 		SCTP_SOCKET_UNLOCK(so, 1);
3974 	}
3975 #endif
3976 }
3977 
3978 void
3979 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh,
3980     struct sctp_inpcb *inp, struct mbuf *op_err, uint32_t vrf_id, uint16_t port)
3981 {
3982 	struct sctp_chunkhdr *ch, chunk_buf;
3983 	unsigned int chk_length;
3984 
3985 	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
3986 	/* Generate a TO address for future reference */
3987 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
3988 		if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3989 			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3990 			    SCTP_CALLED_DIRECTLY_NOCMPSET);
3991 		}
3992 	}
3993 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
3994 	    sizeof(*ch), (uint8_t *) & chunk_buf);
3995 	while (ch != NULL) {
3996 		chk_length = ntohs(ch->chunk_length);
3997 		if (chk_length < sizeof(*ch)) {
3998 			/* break to abort land */
3999 			break;
4000 		}
4001 		switch (ch->chunk_type) {
4002 		case SCTP_COOKIE_ECHO:
4003 			/* We hit here only if the assoc is being freed */
4004 			return;
4005 		case SCTP_PACKET_DROPPED:
4006 			/* we don't respond to pkt-dropped */
4007 			return;
4008 		case SCTP_ABORT_ASSOCIATION:
4009 			/* we don't respond with an ABORT to an ABORT */
4010 			return;
4011 		case SCTP_SHUTDOWN_COMPLETE:
4012 			/*
4013 			 * we ignore it since we are not waiting for it and
4014 			 * peer is gone
4015 			 */
4016 			return;
4017 		case SCTP_SHUTDOWN_ACK:
4018 			sctp_send_shutdown_complete2(m, iphlen, sh, vrf_id, port);
4019 			return;
4020 		default:
4021 			break;
4022 		}
4023 		offset += SCTP_SIZE32(chk_length);
4024 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4025 		    sizeof(*ch), (uint8_t *) & chunk_buf);
4026 	}
4027 	sctp_send_abort(m, iphlen, sh, 0, op_err, vrf_id, port);
4028 }
4029 
4030 /*
4031  * check the inbound datagram to make sure there is not an abort inside it,
4032  * if there is return 1, else return 0.
4033  */
4034 int
4035 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill)
4036 {
4037 	struct sctp_chunkhdr *ch;
4038 	struct sctp_init_chunk *init_chk, chunk_buf;
4039 	int offset;
4040 	unsigned int chk_length;
4041 
4042 	offset = iphlen + sizeof(struct sctphdr);
4043 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
4044 	    (uint8_t *) & chunk_buf);
4045 	while (ch != NULL) {
4046 		chk_length = ntohs(ch->chunk_length);
4047 		if (chk_length < sizeof(*ch)) {
4048 			/* packet is probably corrupt */
4049 			break;
4050 		}
4051 		/* we seem to be ok, is it an abort? */
4052 		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
4053 			/* yep, tell them */
4054 			return (1);
4055 		}
4056 		if (ch->chunk_type == SCTP_INITIATION) {
4057 			/* need to update the Vtag */
4058 			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4059 			    offset, sizeof(*init_chk), (uint8_t *) & chunk_buf);
4060 			if (init_chk != NULL) {
4061 				*vtagfill = ntohl(init_chk->init.initiate_tag);
4062 			}
4063 		}
4064 		/* Nope, move to the next chunk */
4065 		offset += SCTP_SIZE32(chk_length);
4066 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4067 		    sizeof(*ch), (uint8_t *) & chunk_buf);
4068 	}
4069 	return (0);
4070 }
4071 
4072 /*
4073  * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4074  * set (i.e. it's 0) so, create this function to compare link local scopes
4075  */
4076 #ifdef INET6
4077 uint32_t
4078 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4079 {
4080 	struct sockaddr_in6 a, b;
4081 
4082 	/* save copies */
4083 	a = *addr1;
4084 	b = *addr2;
4085 
4086 	if (a.sin6_scope_id == 0)
4087 		if (sa6_recoverscope(&a)) {
4088 			/* can't get scope, so can't match */
4089 			return (0);
4090 		}
4091 	if (b.sin6_scope_id == 0)
4092 		if (sa6_recoverscope(&b)) {
4093 			/* can't get scope, so can't match */
4094 			return (0);
4095 		}
4096 	if (a.sin6_scope_id != b.sin6_scope_id)
4097 		return (0);
4098 
4099 	return (1);
4100 }
4101 
4102 /*
4103  * returns a sockaddr_in6 with embedded scope recovered and removed
4104  */
4105 struct sockaddr_in6 *
4106 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4107 {
4108 	/* check and strip embedded scope junk */
4109 	if (addr->sin6_family == AF_INET6) {
4110 		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4111 			if (addr->sin6_scope_id == 0) {
4112 				*store = *addr;
4113 				if (!sa6_recoverscope(store)) {
4114 					/* use the recovered scope */
4115 					addr = store;
4116 				}
4117 			} else {
4118 				/* else, return the original "to" addr */
4119 				in6_clearscope(&addr->sin6_addr);
4120 			}
4121 		}
4122 	}
4123 	return (addr);
4124 }
4125 
4126 #endif
4127 
4128 /*
4129  * are the two addresses the same?  currently a "scopeless" check returns: 1
4130  * if same, 0 if not
4131  */
4132 int
4133 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4134 {
4135 
4136 	/* must be valid */
4137 	if (sa1 == NULL || sa2 == NULL)
4138 		return (0);
4139 
4140 	/* must be the same family */
4141 	if (sa1->sa_family != sa2->sa_family)
4142 		return (0);
4143 
4144 	switch (sa1->sa_family) {
4145 #ifdef INET6
4146 	case AF_INET6:
4147 		{
4148 			/* IPv6 addresses */
4149 			struct sockaddr_in6 *sin6_1, *sin6_2;
4150 
4151 			sin6_1 = (struct sockaddr_in6 *)sa1;
4152 			sin6_2 = (struct sockaddr_in6 *)sa2;
4153 			return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
4154 			    sin6_2));
4155 		}
4156 #endif
4157 	case AF_INET:
4158 		{
4159 			/* IPv4 addresses */
4160 			struct sockaddr_in *sin_1, *sin_2;
4161 
4162 			sin_1 = (struct sockaddr_in *)sa1;
4163 			sin_2 = (struct sockaddr_in *)sa2;
4164 			return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4165 		}
4166 	default:
4167 		/* we don't do these... */
4168 		return (0);
4169 	}
4170 }
4171 
4172 void
4173 sctp_print_address(struct sockaddr *sa)
4174 {
4175 #ifdef INET6
4176 	char ip6buf[INET6_ADDRSTRLEN];
4177 
4178 	ip6buf[0] = 0;
4179 #endif
4180 
4181 	switch (sa->sa_family) {
4182 #ifdef INET6
4183 	case AF_INET6:
4184 		{
4185 			struct sockaddr_in6 *sin6;
4186 
4187 			sin6 = (struct sockaddr_in6 *)sa;
4188 			SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4189 			    ip6_sprintf(ip6buf, &sin6->sin6_addr),
4190 			    ntohs(sin6->sin6_port),
4191 			    sin6->sin6_scope_id);
4192 			break;
4193 		}
4194 #endif
4195 	case AF_INET:
4196 		{
4197 			struct sockaddr_in *sin;
4198 			unsigned char *p;
4199 
4200 			sin = (struct sockaddr_in *)sa;
4201 			p = (unsigned char *)&sin->sin_addr;
4202 			SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4203 			    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4204 			break;
4205 		}
4206 	default:
4207 		SCTP_PRINTF("?\n");
4208 		break;
4209 	}
4210 }
4211 
4212 void
4213 sctp_print_address_pkt(struct ip *iph, struct sctphdr *sh)
4214 {
4215 	switch (iph->ip_v) {
4216 	case IPVERSION:
4217 		{
4218 			struct sockaddr_in lsa, fsa;
4219 
4220 			bzero(&lsa, sizeof(lsa));
4221 			lsa.sin_len = sizeof(lsa);
4222 			lsa.sin_family = AF_INET;
4223 			lsa.sin_addr = iph->ip_src;
4224 			lsa.sin_port = sh->src_port;
4225 			bzero(&fsa, sizeof(fsa));
4226 			fsa.sin_len = sizeof(fsa);
4227 			fsa.sin_family = AF_INET;
4228 			fsa.sin_addr = iph->ip_dst;
4229 			fsa.sin_port = sh->dest_port;
4230 			SCTP_PRINTF("src: ");
4231 			sctp_print_address((struct sockaddr *)&lsa);
4232 			SCTP_PRINTF("dest: ");
4233 			sctp_print_address((struct sockaddr *)&fsa);
4234 			break;
4235 		}
4236 #ifdef INET6
4237 	case IPV6_VERSION >> 4:
4238 		{
4239 			struct ip6_hdr *ip6;
4240 			struct sockaddr_in6 lsa6, fsa6;
4241 
4242 			ip6 = (struct ip6_hdr *)iph;
4243 			bzero(&lsa6, sizeof(lsa6));
4244 			lsa6.sin6_len = sizeof(lsa6);
4245 			lsa6.sin6_family = AF_INET6;
4246 			lsa6.sin6_addr = ip6->ip6_src;
4247 			lsa6.sin6_port = sh->src_port;
4248 			bzero(&fsa6, sizeof(fsa6));
4249 			fsa6.sin6_len = sizeof(fsa6);
4250 			fsa6.sin6_family = AF_INET6;
4251 			fsa6.sin6_addr = ip6->ip6_dst;
4252 			fsa6.sin6_port = sh->dest_port;
4253 			SCTP_PRINTF("src: ");
4254 			sctp_print_address((struct sockaddr *)&lsa6);
4255 			SCTP_PRINTF("dest: ");
4256 			sctp_print_address((struct sockaddr *)&fsa6);
4257 			break;
4258 		}
4259 #endif
4260 	default:
4261 		/* TSNH */
4262 		break;
4263 	}
4264 }
4265 
4266 void
4267 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4268     struct sctp_inpcb *new_inp,
4269     struct sctp_tcb *stcb,
4270     int waitflags)
4271 {
4272 	/*
4273 	 * go through our old INP and pull off any control structures that
4274 	 * belong to stcb and move then to the new inp.
4275 	 */
4276 	struct socket *old_so, *new_so;
4277 	struct sctp_queued_to_read *control, *nctl;
4278 	struct sctp_readhead tmp_queue;
4279 	struct mbuf *m;
4280 	int error = 0;
4281 
4282 	old_so = old_inp->sctp_socket;
4283 	new_so = new_inp->sctp_socket;
4284 	TAILQ_INIT(&tmp_queue);
4285 	error = sblock(&old_so->so_rcv, waitflags);
4286 	if (error) {
4287 		/*
4288 		 * Gak, can't get sblock, we have a problem. data will be
4289 		 * left stranded.. and we don't dare look at it since the
4290 		 * other thread may be reading something. Oh well, its a
4291 		 * screwed up app that does a peeloff OR a accept while
4292 		 * reading from the main socket... actually its only the
4293 		 * peeloff() case, since I think read will fail on a
4294 		 * listening socket..
4295 		 */
4296 		return;
4297 	}
4298 	/* lock the socket buffers */
4299 	SCTP_INP_READ_LOCK(old_inp);
4300 	control = TAILQ_FIRST(&old_inp->read_queue);
4301 	/* Pull off all for out target stcb */
4302 	while (control) {
4303 		nctl = TAILQ_NEXT(control, next);
4304 		if (control->stcb == stcb) {
4305 			/* remove it we want it */
4306 			TAILQ_REMOVE(&old_inp->read_queue, control, next);
4307 			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4308 			m = control->data;
4309 			while (m) {
4310 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4311 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4312 				}
4313 				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4314 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4315 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4316 				}
4317 				m = SCTP_BUF_NEXT(m);
4318 			}
4319 		}
4320 		control = nctl;
4321 	}
4322 	SCTP_INP_READ_UNLOCK(old_inp);
4323 	/* Remove the sb-lock on the old socket */
4324 
4325 	sbunlock(&old_so->so_rcv);
4326 	/* Now we move them over to the new socket buffer */
4327 	control = TAILQ_FIRST(&tmp_queue);
4328 	SCTP_INP_READ_LOCK(new_inp);
4329 	while (control) {
4330 		nctl = TAILQ_NEXT(control, next);
4331 		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4332 		m = control->data;
4333 		while (m) {
4334 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4335 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4336 			}
4337 			sctp_sballoc(stcb, &new_so->so_rcv, m);
4338 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4339 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4340 			}
4341 			m = SCTP_BUF_NEXT(m);
4342 		}
4343 		control = nctl;
4344 	}
4345 	SCTP_INP_READ_UNLOCK(new_inp);
4346 }
4347 
4348 void
4349 sctp_add_to_readq(struct sctp_inpcb *inp,
4350     struct sctp_tcb *stcb,
4351     struct sctp_queued_to_read *control,
4352     struct sockbuf *sb,
4353     int end,
4354     int inp_read_lock_held,
4355     int so_locked
4356 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4357     SCTP_UNUSED
4358 #endif
4359 )
4360 {
4361 	/*
4362 	 * Here we must place the control on the end of the socket read
4363 	 * queue AND increment sb_cc so that select will work properly on
4364 	 * read.
4365 	 */
4366 	struct mbuf *m, *prev = NULL;
4367 
4368 	if (inp == NULL) {
4369 		/* Gak, TSNH!! */
4370 #ifdef INVARIANTS
4371 		panic("Gak, inp NULL on add_to_readq");
4372 #endif
4373 		return;
4374 	}
4375 	if (inp_read_lock_held == 0)
4376 		SCTP_INP_READ_LOCK(inp);
4377 	if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
4378 		sctp_free_remote_addr(control->whoFrom);
4379 		if (control->data) {
4380 			sctp_m_freem(control->data);
4381 			control->data = NULL;
4382 		}
4383 		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control);
4384 		if (inp_read_lock_held == 0)
4385 			SCTP_INP_READ_UNLOCK(inp);
4386 		return;
4387 	}
4388 	if (!(control->spec_flags & M_NOTIFICATION)) {
4389 		atomic_add_int(&inp->total_recvs, 1);
4390 		if (!control->do_not_ref_stcb) {
4391 			atomic_add_int(&stcb->total_recvs, 1);
4392 		}
4393 	}
4394 	m = control->data;
4395 	control->held_length = 0;
4396 	control->length = 0;
4397 	while (m) {
4398 		if (SCTP_BUF_LEN(m) == 0) {
4399 			/* Skip mbufs with NO length */
4400 			if (prev == NULL) {
4401 				/* First one */
4402 				control->data = sctp_m_free(m);
4403 				m = control->data;
4404 			} else {
4405 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4406 				m = SCTP_BUF_NEXT(prev);
4407 			}
4408 			if (m == NULL) {
4409 				control->tail_mbuf = prev;
4410 			}
4411 			continue;
4412 		}
4413 		prev = m;
4414 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4415 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4416 		}
4417 		sctp_sballoc(stcb, sb, m);
4418 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4419 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4420 		}
4421 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4422 		m = SCTP_BUF_NEXT(m);
4423 	}
4424 	if (prev != NULL) {
4425 		control->tail_mbuf = prev;
4426 	} else {
4427 		/* Everything got collapsed out?? */
4428 		sctp_free_remote_addr(control->whoFrom);
4429 		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control);
4430 		if (inp_read_lock_held == 0)
4431 			SCTP_INP_READ_UNLOCK(inp);
4432 		return;
4433 	}
4434 	if (end) {
4435 		control->end_added = 1;
4436 	}
4437 	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4438 	if (inp_read_lock_held == 0)
4439 		SCTP_INP_READ_UNLOCK(inp);
4440 	if (inp && inp->sctp_socket) {
4441 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4442 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4443 		} else {
4444 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4445 			struct socket *so;
4446 
4447 			so = SCTP_INP_SO(inp);
4448 			if (!so_locked) {
4449 				atomic_add_int(&stcb->asoc.refcnt, 1);
4450 				SCTP_TCB_UNLOCK(stcb);
4451 				SCTP_SOCKET_LOCK(so, 1);
4452 				SCTP_TCB_LOCK(stcb);
4453 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
4454 				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4455 					SCTP_SOCKET_UNLOCK(so, 1);
4456 					return;
4457 				}
4458 			}
4459 #endif
4460 			sctp_sorwakeup(inp, inp->sctp_socket);
4461 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4462 			if (!so_locked) {
4463 				SCTP_SOCKET_UNLOCK(so, 1);
4464 			}
4465 #endif
4466 		}
4467 	}
4468 }
4469 
4470 
4471 int
4472 sctp_append_to_readq(struct sctp_inpcb *inp,
4473     struct sctp_tcb *stcb,
4474     struct sctp_queued_to_read *control,
4475     struct mbuf *m,
4476     int end,
4477     int ctls_cumack,
4478     struct sockbuf *sb)
4479 {
4480 	/*
4481 	 * A partial delivery API event is underway. OR we are appending on
4482 	 * the reassembly queue.
4483 	 *
4484 	 * If PDAPI this means we need to add m to the end of the data.
4485 	 * Increase the length in the control AND increment the sb_cc.
4486 	 * Otherwise sb is NULL and all we need to do is put it at the end
4487 	 * of the mbuf chain.
4488 	 */
4489 	int len = 0;
4490 	struct mbuf *mm, *tail = NULL, *prev = NULL;
4491 
4492 	if (inp) {
4493 		SCTP_INP_READ_LOCK(inp);
4494 	}
4495 	if (control == NULL) {
4496 get_out:
4497 		if (inp) {
4498 			SCTP_INP_READ_UNLOCK(inp);
4499 		}
4500 		return (-1);
4501 	}
4502 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ)) {
4503 		SCTP_INP_READ_UNLOCK(inp);
4504 		return 0;
4505 	}
4506 	if (control->end_added) {
4507 		/* huh this one is complete? */
4508 		goto get_out;
4509 	}
4510 	mm = m;
4511 	if (mm == NULL) {
4512 		goto get_out;
4513 	}
4514 	while (mm) {
4515 		if (SCTP_BUF_LEN(mm) == 0) {
4516 			/* Skip mbufs with NO lenght */
4517 			if (prev == NULL) {
4518 				/* First one */
4519 				m = sctp_m_free(mm);
4520 				mm = m;
4521 			} else {
4522 				SCTP_BUF_NEXT(prev) = sctp_m_free(mm);
4523 				mm = SCTP_BUF_NEXT(prev);
4524 			}
4525 			continue;
4526 		}
4527 		prev = mm;
4528 		len += SCTP_BUF_LEN(mm);
4529 		if (sb) {
4530 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4531 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(mm));
4532 			}
4533 			sctp_sballoc(stcb, sb, mm);
4534 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4535 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4536 			}
4537 		}
4538 		mm = SCTP_BUF_NEXT(mm);
4539 	}
4540 	if (prev) {
4541 		tail = prev;
4542 	} else {
4543 		/* Really there should always be a prev */
4544 		if (m == NULL) {
4545 			/* Huh nothing left? */
4546 #ifdef INVARIANTS
4547 			panic("Nothing left to add?");
4548 #else
4549 			goto get_out;
4550 #endif
4551 		}
4552 		tail = m;
4553 	}
4554 	if (control->tail_mbuf) {
4555 		/* append */
4556 		SCTP_BUF_NEXT(control->tail_mbuf) = m;
4557 		control->tail_mbuf = tail;
4558 	} else {
4559 		/* nothing there */
4560 #ifdef INVARIANTS
4561 		if (control->data != NULL) {
4562 			panic("This should NOT happen");
4563 		}
4564 #endif
4565 		control->data = m;
4566 		control->tail_mbuf = tail;
4567 	}
4568 	atomic_add_int(&control->length, len);
4569 	if (end) {
4570 		/* message is complete */
4571 		if (stcb && (control == stcb->asoc.control_pdapi)) {
4572 			stcb->asoc.control_pdapi = NULL;
4573 		}
4574 		control->held_length = 0;
4575 		control->end_added = 1;
4576 	}
4577 	if (stcb == NULL) {
4578 		control->do_not_ref_stcb = 1;
4579 	}
4580 	/*
4581 	 * When we are appending in partial delivery, the cum-ack is used
4582 	 * for the actual pd-api highest tsn on this mbuf. The true cum-ack
4583 	 * is populated in the outbound sinfo structure from the true cumack
4584 	 * if the association exists...
4585 	 */
4586 	control->sinfo_tsn = control->sinfo_cumtsn = ctls_cumack;
4587 	if (inp) {
4588 		SCTP_INP_READ_UNLOCK(inp);
4589 	}
4590 	if (inp && inp->sctp_socket) {
4591 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4592 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4593 		} else {
4594 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4595 			struct socket *so;
4596 
4597 			so = SCTP_INP_SO(inp);
4598 			atomic_add_int(&stcb->asoc.refcnt, 1);
4599 			SCTP_TCB_UNLOCK(stcb);
4600 			SCTP_SOCKET_LOCK(so, 1);
4601 			SCTP_TCB_LOCK(stcb);
4602 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4603 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4604 				SCTP_SOCKET_UNLOCK(so, 1);
4605 				return (0);
4606 			}
4607 #endif
4608 			sctp_sorwakeup(inp, inp->sctp_socket);
4609 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4610 			SCTP_SOCKET_UNLOCK(so, 1);
4611 #endif
4612 		}
4613 	}
4614 	return (0);
4615 }
4616 
4617 
4618 
4619 /*************HOLD THIS COMMENT FOR PATCH FILE OF
4620  *************ALTERNATE ROUTING CODE
4621  */
4622 
4623 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4624  *************ALTERNATE ROUTING CODE
4625  */
4626 
4627 struct mbuf *
4628 sctp_generate_invmanparam(int err)
4629 {
4630 	/* Return a MBUF with a invalid mandatory parameter */
4631 	struct mbuf *m;
4632 
4633 	m = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_DONTWAIT, 1, MT_DATA);
4634 	if (m) {
4635 		struct sctp_paramhdr *ph;
4636 
4637 		SCTP_BUF_LEN(m) = sizeof(struct sctp_paramhdr);
4638 		ph = mtod(m, struct sctp_paramhdr *);
4639 		ph->param_length = htons(sizeof(struct sctp_paramhdr));
4640 		ph->param_type = htons(err);
4641 	}
4642 	return (m);
4643 }
4644 
4645 #ifdef SCTP_MBCNT_LOGGING
4646 void
4647 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
4648     struct sctp_tmit_chunk *tp1, int chk_cnt)
4649 {
4650 	if (tp1->data == NULL) {
4651 		return;
4652 	}
4653 	asoc->chunks_on_out_queue -= chk_cnt;
4654 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
4655 		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
4656 		    asoc->total_output_queue_size,
4657 		    tp1->book_size,
4658 		    0,
4659 		    tp1->mbcnt);
4660 	}
4661 	if (asoc->total_output_queue_size >= tp1->book_size) {
4662 		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
4663 	} else {
4664 		asoc->total_output_queue_size = 0;
4665 	}
4666 
4667 	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
4668 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
4669 		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
4670 			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
4671 		} else {
4672 			stcb->sctp_socket->so_snd.sb_cc = 0;
4673 
4674 		}
4675 	}
4676 }
4677 
4678 #endif
4679 
4680 int
4681 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
4682     int reason, int so_locked
4683 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4684     SCTP_UNUSED
4685 #endif
4686 )
4687 {
4688 	struct sctp_stream_out *strq;
4689 	struct sctp_tmit_chunk *chk = NULL;
4690 	struct sctp_stream_queue_pending *sp;
4691 	uint16_t stream = 0, seq = 0;
4692 	uint8_t foundeom = 0;
4693 	int ret_sz = 0;
4694 	int notdone;
4695 	int do_wakeup_routine = 0;
4696 
4697 	stream = tp1->rec.data.stream_number;
4698 	seq = tp1->rec.data.stream_seq;
4699 	do {
4700 		ret_sz += tp1->book_size;
4701 		if (tp1->data != NULL) {
4702 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4703 				sctp_flight_size_decrease(tp1);
4704 				sctp_total_flight_decrease(stcb, tp1);
4705 			}
4706 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4707 			stcb->asoc.peers_rwnd += tp1->send_size;
4708 			stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
4709 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, reason, tp1, so_locked);
4710 			if (tp1->data) {
4711 				sctp_m_freem(tp1->data);
4712 				tp1->data = NULL;
4713 			}
4714 			do_wakeup_routine = 1;
4715 			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4716 				stcb->asoc.sent_queue_cnt_removeable--;
4717 			}
4718 		}
4719 		tp1->sent = SCTP_FORWARD_TSN_SKIP;
4720 		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
4721 		    SCTP_DATA_NOT_FRAG) {
4722 			/* not frag'ed we ae done   */
4723 			notdone = 0;
4724 			foundeom = 1;
4725 		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4726 			/* end of frag, we are done */
4727 			notdone = 0;
4728 			foundeom = 1;
4729 		} else {
4730 			/*
4731 			 * Its a begin or middle piece, we must mark all of
4732 			 * it
4733 			 */
4734 			notdone = 1;
4735 			tp1 = TAILQ_NEXT(tp1, sctp_next);
4736 		}
4737 	} while (tp1 && notdone);
4738 	if (foundeom == 0) {
4739 		/*
4740 		 * The multi-part message was scattered across the send and
4741 		 * sent queue.
4742 		 */
4743 next_on_sent:
4744 		tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
4745 		/*
4746 		 * recurse throught the send_queue too, starting at the
4747 		 * beginning.
4748 		 */
4749 		if ((tp1) &&
4750 		    (tp1->rec.data.stream_number == stream) &&
4751 		    (tp1->rec.data.stream_seq == seq)) {
4752 			/*
4753 			 * save to chk in case we have some on stream out
4754 			 * queue. If so and we have an un-transmitted one we
4755 			 * don't have to fudge the TSN.
4756 			 */
4757 			chk = tp1;
4758 			ret_sz += tp1->book_size;
4759 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4760 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, reason, tp1, so_locked);
4761 			if (tp1->data) {
4762 				sctp_m_freem(tp1->data);
4763 				tp1->data = NULL;
4764 			}
4765 			/* No flight involved here book the size to 0 */
4766 			tp1->book_size = 0;
4767 			if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4768 				foundeom = 1;
4769 			}
4770 			do_wakeup_routine = 1;
4771 			tp1->sent = SCTP_FORWARD_TSN_SKIP;
4772 			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
4773 			/*
4774 			 * on to the sent queue so we can wait for it to be
4775 			 * passed by.
4776 			 */
4777 			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
4778 			    sctp_next);
4779 			stcb->asoc.send_queue_cnt--;
4780 			stcb->asoc.sent_queue_cnt++;
4781 			goto next_on_sent;
4782 		}
4783 	}
4784 	if (foundeom == 0) {
4785 		/*
4786 		 * Still no eom found. That means there is stuff left on the
4787 		 * stream out queue.. yuck.
4788 		 */
4789 		strq = &stcb->asoc.strmout[stream];
4790 		SCTP_TCB_SEND_LOCK(stcb);
4791 		sp = TAILQ_FIRST(&strq->outqueue);
4792 		while (sp->strseq <= seq) {
4793 			/* Check if its our SEQ */
4794 			if (sp->strseq == seq) {
4795 				sp->discard_rest = 1;
4796 				/*
4797 				 * We may need to put a chunk on the queue
4798 				 * that holds the TSN that would have been
4799 				 * sent with the LAST bit.
4800 				 */
4801 				if (chk == NULL) {
4802 					/* Yep, we have to */
4803 					sctp_alloc_a_chunk(stcb, chk);
4804 					if (chk == NULL) {
4805 						/*
4806 						 * we are hosed. All we can
4807 						 * do is nothing.. which
4808 						 * will cause an abort if
4809 						 * the peer is paying
4810 						 * attention.
4811 						 */
4812 						goto oh_well;
4813 					}
4814 					memset(chk, 0, sizeof(*chk));
4815 					chk->rec.data.rcv_flags = SCTP_DATA_LAST_FRAG;
4816 					chk->sent = SCTP_FORWARD_TSN_SKIP;
4817 					chk->asoc = &stcb->asoc;
4818 					chk->rec.data.stream_seq = sp->strseq;
4819 					chk->rec.data.stream_number = sp->stream;
4820 					chk->rec.data.payloadtype = sp->ppid;
4821 					chk->rec.data.context = sp->context;
4822 					chk->flags = sp->act_flags;
4823 					chk->whoTo = sp->net;
4824 					atomic_add_int(&chk->whoTo->ref_count, 1);
4825 					chk->rec.data.TSN_seq = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
4826 					stcb->asoc.pr_sctp_cnt++;
4827 					chk->pr_sctp_on = 1;
4828 					TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
4829 					stcb->asoc.sent_queue_cnt++;
4830 					stcb->asoc.pr_sctp_cnt++;
4831 				} else {
4832 					chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
4833 				}
4834 		oh_well:
4835 				if (sp->data) {
4836 					/*
4837 					 * Pull any data to free up the SB
4838 					 * and allow sender to "add more"
4839 					 * whilc we will throw away :-)
4840 					 */
4841 					sctp_free_spbufspace(stcb, &stcb->asoc,
4842 					    sp);
4843 					ret_sz += sp->length;
4844 					do_wakeup_routine = 1;
4845 					sp->some_taken = 1;
4846 					sctp_m_freem(sp->data);
4847 					sp->length = 0;
4848 					sp->data = NULL;
4849 					sp->tail_mbuf = NULL;
4850 				}
4851 				break;
4852 			} else {
4853 				/* Next one please */
4854 				sp = TAILQ_NEXT(sp, next);
4855 			}
4856 		}		/* End while */
4857 		SCTP_TCB_SEND_UNLOCK(stcb);
4858 	}
4859 	if (do_wakeup_routine) {
4860 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4861 		struct socket *so;
4862 
4863 		so = SCTP_INP_SO(stcb->sctp_ep);
4864 		if (!so_locked) {
4865 			atomic_add_int(&stcb->asoc.refcnt, 1);
4866 			SCTP_TCB_UNLOCK(stcb);
4867 			SCTP_SOCKET_LOCK(so, 1);
4868 			SCTP_TCB_LOCK(stcb);
4869 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4870 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4871 				/* assoc was freed while we were unlocked */
4872 				SCTP_SOCKET_UNLOCK(so, 1);
4873 				return (ret_sz);
4874 			}
4875 		}
4876 #endif
4877 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
4878 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4879 		if (!so_locked) {
4880 			SCTP_SOCKET_UNLOCK(so, 1);
4881 		}
4882 #endif
4883 	}
4884 	return (ret_sz);
4885 }
4886 
4887 /*
4888  * checks to see if the given address, sa, is one that is currently known by
4889  * the kernel note: can't distinguish the same address on multiple interfaces
4890  * and doesn't handle multiple addresses with different zone/scope id's note:
4891  * ifa_ifwithaddr() compares the entire sockaddr struct
4892  */
4893 struct sctp_ifa *
4894 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
4895     int holds_lock)
4896 {
4897 	struct sctp_laddr *laddr;
4898 
4899 	if (holds_lock == 0) {
4900 		SCTP_INP_RLOCK(inp);
4901 	}
4902 	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4903 		if (laddr->ifa == NULL)
4904 			continue;
4905 		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
4906 			continue;
4907 		if (addr->sa_family == AF_INET) {
4908 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4909 			    laddr->ifa->address.sin.sin_addr.s_addr) {
4910 				/* found him. */
4911 				if (holds_lock == 0) {
4912 					SCTP_INP_RUNLOCK(inp);
4913 				}
4914 				return (laddr->ifa);
4915 				break;
4916 			}
4917 		}
4918 #ifdef INET6
4919 		if (addr->sa_family == AF_INET6) {
4920 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4921 			    &laddr->ifa->address.sin6)) {
4922 				/* found him. */
4923 				if (holds_lock == 0) {
4924 					SCTP_INP_RUNLOCK(inp);
4925 				}
4926 				return (laddr->ifa);
4927 				break;
4928 			}
4929 		}
4930 #endif
4931 	}
4932 	if (holds_lock == 0) {
4933 		SCTP_INP_RUNLOCK(inp);
4934 	}
4935 	return (NULL);
4936 }
4937 
4938 uint32_t
4939 sctp_get_ifa_hash_val(struct sockaddr *addr)
4940 {
4941 	if (addr->sa_family == AF_INET) {
4942 		struct sockaddr_in *sin;
4943 
4944 		sin = (struct sockaddr_in *)addr;
4945 		return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
4946 	} else if (addr->sa_family == AF_INET6) {
4947 		struct sockaddr_in6 *sin6;
4948 		uint32_t hash_of_addr;
4949 
4950 		sin6 = (struct sockaddr_in6 *)addr;
4951 		hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
4952 		    sin6->sin6_addr.s6_addr32[1] +
4953 		    sin6->sin6_addr.s6_addr32[2] +
4954 		    sin6->sin6_addr.s6_addr32[3]);
4955 		hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
4956 		return (hash_of_addr);
4957 	}
4958 	return (0);
4959 }
4960 
4961 struct sctp_ifa *
4962 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
4963 {
4964 	struct sctp_ifa *sctp_ifap;
4965 	struct sctp_vrf *vrf;
4966 	struct sctp_ifalist *hash_head;
4967 	uint32_t hash_of_addr;
4968 
4969 	if (holds_lock == 0)
4970 		SCTP_IPI_ADDR_RLOCK();
4971 
4972 	vrf = sctp_find_vrf(vrf_id);
4973 	if (vrf == NULL) {
4974 stage_right:
4975 		if (holds_lock == 0)
4976 			SCTP_IPI_ADDR_RUNLOCK();
4977 		return (NULL);
4978 	}
4979 	hash_of_addr = sctp_get_ifa_hash_val(addr);
4980 
4981 	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
4982 	if (hash_head == NULL) {
4983 		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
4984 		    hash_of_addr, (uint32_t) vrf->vrf_addr_hashmark,
4985 		    (uint32_t) (hash_of_addr & vrf->vrf_addr_hashmark));
4986 		sctp_print_address(addr);
4987 		SCTP_PRINTF("No such bucket for address\n");
4988 		if (holds_lock == 0)
4989 			SCTP_IPI_ADDR_RUNLOCK();
4990 
4991 		return (NULL);
4992 	}
4993 	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
4994 		if (sctp_ifap == NULL) {
4995 #ifdef INVARIANTS
4996 			panic("Huh LIST_FOREACH corrupt");
4997 			goto stage_right;
4998 #else
4999 			SCTP_PRINTF("LIST corrupt of sctp_ifap's?\n");
5000 			goto stage_right;
5001 #endif
5002 		}
5003 		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
5004 			continue;
5005 		if (addr->sa_family == AF_INET) {
5006 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5007 			    sctp_ifap->address.sin.sin_addr.s_addr) {
5008 				/* found him. */
5009 				if (holds_lock == 0)
5010 					SCTP_IPI_ADDR_RUNLOCK();
5011 				return (sctp_ifap);
5012 				break;
5013 			}
5014 		}
5015 #ifdef INET6
5016 		if (addr->sa_family == AF_INET6) {
5017 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5018 			    &sctp_ifap->address.sin6)) {
5019 				/* found him. */
5020 				if (holds_lock == 0)
5021 					SCTP_IPI_ADDR_RUNLOCK();
5022 				return (sctp_ifap);
5023 				break;
5024 			}
5025 		}
5026 #endif
5027 	}
5028 	if (holds_lock == 0)
5029 		SCTP_IPI_ADDR_RUNLOCK();
5030 	return (NULL);
5031 }
5032 
5033 static void
5034 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t * freed_so_far, int hold_rlock,
5035     uint32_t rwnd_req)
5036 {
5037 	/* User pulled some data, do we need a rwnd update? */
5038 	int r_unlocked = 0;
5039 	uint32_t dif, rwnd;
5040 	struct socket *so = NULL;
5041 
5042 	if (stcb == NULL)
5043 		return;
5044 
5045 	atomic_add_int(&stcb->asoc.refcnt, 1);
5046 
5047 	if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED |
5048 	    SCTP_STATE_SHUTDOWN_RECEIVED |
5049 	    SCTP_STATE_SHUTDOWN_ACK_SENT)) {
5050 		/* Pre-check If we are freeing no update */
5051 		goto no_lock;
5052 	}
5053 	SCTP_INP_INCR_REF(stcb->sctp_ep);
5054 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5055 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5056 		goto out;
5057 	}
5058 	so = stcb->sctp_socket;
5059 	if (so == NULL) {
5060 		goto out;
5061 	}
5062 	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
5063 	/* Have you have freed enough to look */
5064 	*freed_so_far = 0;
5065 	/* Yep, its worth a look and the lock overhead */
5066 
5067 	/* Figure out what the rwnd would be */
5068 	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
5069 	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
5070 		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
5071 	} else {
5072 		dif = 0;
5073 	}
5074 	if (dif >= rwnd_req) {
5075 		if (hold_rlock) {
5076 			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5077 			r_unlocked = 1;
5078 		}
5079 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5080 			/*
5081 			 * One last check before we allow the guy possibly
5082 			 * to get in. There is a race, where the guy has not
5083 			 * reached the gate. In that case
5084 			 */
5085 			goto out;
5086 		}
5087 		SCTP_TCB_LOCK(stcb);
5088 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5089 			/* No reports here */
5090 			SCTP_TCB_UNLOCK(stcb);
5091 			goto out;
5092 		}
5093 		SCTP_STAT_INCR(sctps_wu_sacks_sent);
5094 		sctp_send_sack(stcb);
5095 
5096 		sctp_chunk_output(stcb->sctp_ep, stcb,
5097 		    SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
5098 		/* make sure no timer is running */
5099 		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
5100 		SCTP_TCB_UNLOCK(stcb);
5101 	} else {
5102 		/* Update how much we have pending */
5103 		stcb->freed_by_sorcv_sincelast = dif;
5104 	}
5105 out:
5106 	if (so && r_unlocked && hold_rlock) {
5107 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5108 	}
5109 	SCTP_INP_DECR_REF(stcb->sctp_ep);
5110 no_lock:
5111 	atomic_add_int(&stcb->asoc.refcnt, -1);
5112 	return;
5113 }
5114 
5115 int
5116 sctp_sorecvmsg(struct socket *so,
5117     struct uio *uio,
5118     struct mbuf **mp,
5119     struct sockaddr *from,
5120     int fromlen,
5121     int *msg_flags,
5122     struct sctp_sndrcvinfo *sinfo,
5123     int filling_sinfo)
5124 {
5125 	/*
5126 	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
5127 	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
5128 	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
5129 	 * On the way out we may send out any combination of:
5130 	 * MSG_NOTIFICATION MSG_EOR
5131 	 *
5132 	 */
5133 	struct sctp_inpcb *inp = NULL;
5134 	int my_len = 0;
5135 	int cp_len = 0, error = 0;
5136 	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
5137 	struct mbuf *m = NULL, *embuf = NULL;
5138 	struct sctp_tcb *stcb = NULL;
5139 	int wakeup_read_socket = 0;
5140 	int freecnt_applied = 0;
5141 	int out_flags = 0, in_flags = 0;
5142 	int block_allowed = 1;
5143 	uint32_t freed_so_far = 0;
5144 	uint32_t copied_so_far = 0;
5145 	int in_eeor_mode = 0;
5146 	int no_rcv_needed = 0;
5147 	uint32_t rwnd_req = 0;
5148 	int hold_sblock = 0;
5149 	int hold_rlock = 0;
5150 	int slen = 0;
5151 	uint32_t held_length = 0;
5152 	int sockbuf_lock = 0;
5153 
5154 	if (uio == NULL) {
5155 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5156 		return (EINVAL);
5157 	}
5158 	if (msg_flags) {
5159 		in_flags = *msg_flags;
5160 		if (in_flags & MSG_PEEK)
5161 			SCTP_STAT_INCR(sctps_read_peeks);
5162 	} else {
5163 		in_flags = 0;
5164 	}
5165 	slen = uio->uio_resid;
5166 
5167 	/* Pull in and set up our int flags */
5168 	if (in_flags & MSG_OOB) {
5169 		/* Out of band's NOT supported */
5170 		return (EOPNOTSUPP);
5171 	}
5172 	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5173 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5174 		return (EINVAL);
5175 	}
5176 	if ((in_flags & (MSG_DONTWAIT
5177 	    | MSG_NBIO
5178 	    )) ||
5179 	    SCTP_SO_IS_NBIO(so)) {
5180 		block_allowed = 0;
5181 	}
5182 	/* setup the endpoint */
5183 	inp = (struct sctp_inpcb *)so->so_pcb;
5184 	if (inp == NULL) {
5185 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5186 		return (EFAULT);
5187 	}
5188 	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5189 	/* Must be at least a MTU's worth */
5190 	if (rwnd_req < SCTP_MIN_RWND)
5191 		rwnd_req = SCTP_MIN_RWND;
5192 	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5193 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5194 		sctp_misc_ints(SCTP_SORECV_ENTER,
5195 		    rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid);
5196 	}
5197 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5198 		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5199 		    rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid);
5200 	}
5201 	error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5202 	sockbuf_lock = 1;
5203 	if (error) {
5204 		goto release_unlocked;
5205 	}
5206 restart:
5207 
5208 
5209 restart_nosblocks:
5210 	if (hold_sblock == 0) {
5211 		SOCKBUF_LOCK(&so->so_rcv);
5212 		hold_sblock = 1;
5213 	}
5214 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5215 	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5216 		goto out;
5217 	}
5218 	if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5219 		if (so->so_error) {
5220 			error = so->so_error;
5221 			if ((in_flags & MSG_PEEK) == 0)
5222 				so->so_error = 0;
5223 			goto out;
5224 		} else {
5225 			if (so->so_rcv.sb_cc == 0) {
5226 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5227 				/* indicate EOF */
5228 				error = 0;
5229 				goto out;
5230 			}
5231 		}
5232 	}
5233 	if ((so->so_rcv.sb_cc <= held_length) && block_allowed) {
5234 		/* we need to wait for data */
5235 		if ((so->so_rcv.sb_cc == 0) &&
5236 		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5237 		    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5238 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5239 				/*
5240 				 * For active open side clear flags for
5241 				 * re-use passive open is blocked by
5242 				 * connect.
5243 				 */
5244 				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5245 					/*
5246 					 * You were aborted, passive side
5247 					 * always hits here
5248 					 */
5249 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5250 					error = ECONNRESET;
5251 					/*
5252 					 * You get this once if you are
5253 					 * active open side
5254 					 */
5255 					if (!(inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5256 						/*
5257 						 * Remove flag if on the
5258 						 * active open side
5259 						 */
5260 						inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_ABORTED;
5261 					}
5262 				}
5263 				so->so_state &= ~(SS_ISCONNECTING |
5264 				    SS_ISDISCONNECTING |
5265 				    SS_ISCONFIRMING |
5266 				    SS_ISCONNECTED);
5267 				if (error == 0) {
5268 					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5269 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5270 						error = ENOTCONN;
5271 					} else {
5272 						inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_CONNECTED;
5273 					}
5274 				}
5275 				goto out;
5276 			}
5277 		}
5278 		error = sbwait(&so->so_rcv);
5279 		if (error) {
5280 			goto out;
5281 		}
5282 		held_length = 0;
5283 		goto restart_nosblocks;
5284 	} else if (so->so_rcv.sb_cc == 0) {
5285 		if (so->so_error) {
5286 			error = so->so_error;
5287 			if ((in_flags & MSG_PEEK) == 0)
5288 				so->so_error = 0;
5289 		} else {
5290 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5291 			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5292 				if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5293 					/*
5294 					 * For active open side clear flags
5295 					 * for re-use passive open is
5296 					 * blocked by connect.
5297 					 */
5298 					if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5299 						/*
5300 						 * You were aborted, passive
5301 						 * side always hits here
5302 						 */
5303 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5304 						error = ECONNRESET;
5305 						/*
5306 						 * You get this once if you
5307 						 * are active open side
5308 						 */
5309 						if (!(inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5310 							/*
5311 							 * Remove flag if on
5312 							 * the active open
5313 							 * side
5314 							 */
5315 							inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_ABORTED;
5316 						}
5317 					}
5318 					so->so_state &= ~(SS_ISCONNECTING |
5319 					    SS_ISDISCONNECTING |
5320 					    SS_ISCONFIRMING |
5321 					    SS_ISCONNECTED);
5322 					if (error == 0) {
5323 						if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5324 							SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5325 							error = ENOTCONN;
5326 						} else {
5327 							inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_CONNECTED;
5328 						}
5329 					}
5330 					goto out;
5331 				}
5332 			}
5333 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5334 			error = EWOULDBLOCK;
5335 		}
5336 		goto out;
5337 	}
5338 	if (hold_sblock == 1) {
5339 		SOCKBUF_UNLOCK(&so->so_rcv);
5340 		hold_sblock = 0;
5341 	}
5342 	/* we possibly have data we can read */
5343 	/* sa_ignore FREED_MEMORY */
5344 	control = TAILQ_FIRST(&inp->read_queue);
5345 	if (control == NULL) {
5346 		/*
5347 		 * This could be happening since the appender did the
5348 		 * increment but as not yet did the tailq insert onto the
5349 		 * read_queue
5350 		 */
5351 		if (hold_rlock == 0) {
5352 			SCTP_INP_READ_LOCK(inp);
5353 			hold_rlock = 1;
5354 		}
5355 		control = TAILQ_FIRST(&inp->read_queue);
5356 		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5357 #ifdef INVARIANTS
5358 			panic("Huh, its non zero and nothing on control?");
5359 #endif
5360 			so->so_rcv.sb_cc = 0;
5361 		}
5362 		SCTP_INP_READ_UNLOCK(inp);
5363 		hold_rlock = 0;
5364 		goto restart;
5365 	}
5366 	if ((control->length == 0) &&
5367 	    (control->do_not_ref_stcb)) {
5368 		/*
5369 		 * Clean up code for freeing assoc that left behind a
5370 		 * pdapi.. maybe a peer in EEOR that just closed after
5371 		 * sending and never indicated a EOR.
5372 		 */
5373 		if (hold_rlock == 0) {
5374 			hold_rlock = 1;
5375 			SCTP_INP_READ_LOCK(inp);
5376 		}
5377 		control->held_length = 0;
5378 		if (control->data) {
5379 			/* Hmm there is data here .. fix */
5380 			struct mbuf *m_tmp;
5381 			int cnt = 0;
5382 
5383 			m_tmp = control->data;
5384 			while (m_tmp) {
5385 				cnt += SCTP_BUF_LEN(m_tmp);
5386 				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5387 					control->tail_mbuf = m_tmp;
5388 					control->end_added = 1;
5389 				}
5390 				m_tmp = SCTP_BUF_NEXT(m_tmp);
5391 			}
5392 			control->length = cnt;
5393 		} else {
5394 			/* remove it */
5395 			TAILQ_REMOVE(&inp->read_queue, control, next);
5396 			/* Add back any hiddend data */
5397 			sctp_free_remote_addr(control->whoFrom);
5398 			sctp_free_a_readq(stcb, control);
5399 		}
5400 		if (hold_rlock) {
5401 			hold_rlock = 0;
5402 			SCTP_INP_READ_UNLOCK(inp);
5403 		}
5404 		goto restart;
5405 	}
5406 	if ((control->length == 0) &&
5407 	    (control->end_added == 1)) {
5408 		/*
5409 		 * Do we also need to check for (control->pdapi_aborted ==
5410 		 * 1)?
5411 		 */
5412 		if (hold_rlock == 0) {
5413 			hold_rlock = 1;
5414 			SCTP_INP_READ_LOCK(inp);
5415 		}
5416 		TAILQ_REMOVE(&inp->read_queue, control, next);
5417 		if (control->data) {
5418 #ifdef INVARIANTS
5419 			panic("control->data not null but control->length == 0");
5420 #else
5421 			SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
5422 			sctp_m_freem(control->data);
5423 			control->data = NULL;
5424 #endif
5425 		}
5426 		if (control->aux_data) {
5427 			sctp_m_free(control->aux_data);
5428 			control->aux_data = NULL;
5429 		}
5430 		sctp_free_remote_addr(control->whoFrom);
5431 		sctp_free_a_readq(stcb, control);
5432 		if (hold_rlock) {
5433 			hold_rlock = 0;
5434 			SCTP_INP_READ_UNLOCK(inp);
5435 		}
5436 		goto restart;
5437 	}
5438 	if (control->length == 0) {
5439 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5440 		    (filling_sinfo)) {
5441 			/* find a more suitable one then this */
5442 			ctl = TAILQ_NEXT(control, next);
5443 			while (ctl) {
5444 				if ((ctl->stcb != control->stcb) && (ctl->length) &&
5445 				    (ctl->some_taken ||
5446 				    (ctl->spec_flags & M_NOTIFICATION) ||
5447 				    ((ctl->do_not_ref_stcb == 0) &&
5448 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5449 				    ) {
5450 					/*-
5451 					 * If we have a different TCB next, and there is data
5452 					 * present. If we have already taken some (pdapi), OR we can
5453 					 * ref the tcb and no delivery as started on this stream, we
5454 					 * take it. Note we allow a notification on a different
5455 					 * assoc to be delivered..
5456 					 */
5457 					control = ctl;
5458 					goto found_one;
5459 				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5460 					    (ctl->length) &&
5461 					    ((ctl->some_taken) ||
5462 					    ((ctl->do_not_ref_stcb == 0) &&
5463 					    ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5464 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) {
5465 					/*-
5466 					 * If we have the same tcb, and there is data present, and we
5467 					 * have the strm interleave feature present. Then if we have
5468 					 * taken some (pdapi) or we can refer to tht tcb AND we have
5469 					 * not started a delivery for this stream, we can take it.
5470 					 * Note we do NOT allow a notificaiton on the same assoc to
5471 					 * be delivered.
5472 					 */
5473 					control = ctl;
5474 					goto found_one;
5475 				}
5476 				ctl = TAILQ_NEXT(ctl, next);
5477 			}
5478 		}
5479 		/*
5480 		 * if we reach here, not suitable replacement is available
5481 		 * <or> fragment interleave is NOT on. So stuff the sb_cc
5482 		 * into the our held count, and its time to sleep again.
5483 		 */
5484 		held_length = so->so_rcv.sb_cc;
5485 		control->held_length = so->so_rcv.sb_cc;
5486 		goto restart;
5487 	}
5488 	/* Clear the held length since there is something to read */
5489 	control->held_length = 0;
5490 	if (hold_rlock) {
5491 		SCTP_INP_READ_UNLOCK(inp);
5492 		hold_rlock = 0;
5493 	}
5494 found_one:
5495 	/*
5496 	 * If we reach here, control has a some data for us to read off.
5497 	 * Note that stcb COULD be NULL.
5498 	 */
5499 	control->some_taken++;
5500 	if (hold_sblock) {
5501 		SOCKBUF_UNLOCK(&so->so_rcv);
5502 		hold_sblock = 0;
5503 	}
5504 	stcb = control->stcb;
5505 	if (stcb) {
5506 		if ((control->do_not_ref_stcb == 0) &&
5507 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5508 			if (freecnt_applied == 0)
5509 				stcb = NULL;
5510 		} else if (control->do_not_ref_stcb == 0) {
5511 			/* you can't free it on me please */
5512 			/*
5513 			 * The lock on the socket buffer protects us so the
5514 			 * free code will stop. But since we used the
5515 			 * socketbuf lock and the sender uses the tcb_lock
5516 			 * to increment, we need to use the atomic add to
5517 			 * the refcnt
5518 			 */
5519 			if (freecnt_applied) {
5520 #ifdef INVARIANTS
5521 				panic("refcnt already incremented");
5522 #else
5523 				printf("refcnt already incremented?\n");
5524 #endif
5525 			} else {
5526 				atomic_add_int(&stcb->asoc.refcnt, 1);
5527 				freecnt_applied = 1;
5528 			}
5529 			/*
5530 			 * Setup to remember how much we have not yet told
5531 			 * the peer our rwnd has opened up. Note we grab the
5532 			 * value from the tcb from last time. Note too that
5533 			 * sack sending clears this when a sack is sent,
5534 			 * which is fine. Once we hit the rwnd_req, we then
5535 			 * will go to the sctp_user_rcvd() that will not
5536 			 * lock until it KNOWs it MUST send a WUP-SACK.
5537 			 */
5538 			freed_so_far = stcb->freed_by_sorcv_sincelast;
5539 			stcb->freed_by_sorcv_sincelast = 0;
5540 		}
5541 	}
5542 	if (stcb &&
5543 	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
5544 	    control->do_not_ref_stcb == 0) {
5545 		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
5546 	}
5547 	/* First lets get off the sinfo and sockaddr info */
5548 	if ((sinfo) && filling_sinfo) {
5549 		memcpy(sinfo, control, sizeof(struct sctp_nonpad_sndrcvinfo));
5550 		nxt = TAILQ_NEXT(control, next);
5551 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
5552 			struct sctp_extrcvinfo *s_extra;
5553 
5554 			s_extra = (struct sctp_extrcvinfo *)sinfo;
5555 			if ((nxt) &&
5556 			    (nxt->length)) {
5557 				s_extra->sreinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
5558 				if (nxt->sinfo_flags & SCTP_UNORDERED) {
5559 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
5560 				}
5561 				if (nxt->spec_flags & M_NOTIFICATION) {
5562 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
5563 				}
5564 				s_extra->sreinfo_next_aid = nxt->sinfo_assoc_id;
5565 				s_extra->sreinfo_next_length = nxt->length;
5566 				s_extra->sreinfo_next_ppid = nxt->sinfo_ppid;
5567 				s_extra->sreinfo_next_stream = nxt->sinfo_stream;
5568 				if (nxt->tail_mbuf != NULL) {
5569 					if (nxt->end_added) {
5570 						s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
5571 					}
5572 				}
5573 			} else {
5574 				/*
5575 				 * we explicitly 0 this, since the memcpy
5576 				 * got some other things beyond the older
5577 				 * sinfo_ that is on the control's structure
5578 				 * :-D
5579 				 */
5580 				nxt = NULL;
5581 				s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
5582 				s_extra->sreinfo_next_aid = 0;
5583 				s_extra->sreinfo_next_length = 0;
5584 				s_extra->sreinfo_next_ppid = 0;
5585 				s_extra->sreinfo_next_stream = 0;
5586 			}
5587 		}
5588 		/*
5589 		 * update off the real current cum-ack, if we have an stcb.
5590 		 */
5591 		if ((control->do_not_ref_stcb == 0) && stcb)
5592 			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
5593 		/*
5594 		 * mask off the high bits, we keep the actual chunk bits in
5595 		 * there.
5596 		 */
5597 		sinfo->sinfo_flags &= 0x00ff;
5598 		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
5599 			sinfo->sinfo_flags |= SCTP_UNORDERED;
5600 		}
5601 	}
5602 #ifdef SCTP_ASOCLOG_OF_TSNS
5603 	{
5604 		int index, newindex;
5605 		struct sctp_pcbtsn_rlog *entry;
5606 
5607 		do {
5608 			index = inp->readlog_index;
5609 			newindex = index + 1;
5610 			if (newindex >= SCTP_READ_LOG_SIZE) {
5611 				newindex = 0;
5612 			}
5613 		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
5614 		entry = &inp->readlog[index];
5615 		entry->vtag = control->sinfo_assoc_id;
5616 		entry->strm = control->sinfo_stream;
5617 		entry->seq = control->sinfo_ssn;
5618 		entry->sz = control->length;
5619 		entry->flgs = control->sinfo_flags;
5620 	}
5621 #endif
5622 	if (fromlen && from) {
5623 		struct sockaddr *to;
5624 
5625 #ifdef INET
5626 		cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sin.sin_len);
5627 		memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
5628 		((struct sockaddr_in *)from)->sin_port = control->port_from;
5629 #else
5630 		/* No AF_INET use AF_INET6 */
5631 		cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sin6.sin6_len);
5632 		memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
5633 		((struct sockaddr_in6 *)from)->sin6_port = control->port_from;
5634 #endif
5635 
5636 		to = from;
5637 #if defined(INET) && defined(INET6)
5638 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) &&
5639 		    (to->sa_family == AF_INET) &&
5640 		    ((size_t)fromlen >= sizeof(struct sockaddr_in6))) {
5641 			struct sockaddr_in *sin;
5642 			struct sockaddr_in6 sin6;
5643 
5644 			sin = (struct sockaddr_in *)to;
5645 			bzero(&sin6, sizeof(sin6));
5646 			sin6.sin6_family = AF_INET6;
5647 			sin6.sin6_len = sizeof(struct sockaddr_in6);
5648 			sin6.sin6_addr.s6_addr32[2] = htonl(0xffff);
5649 			bcopy(&sin->sin_addr,
5650 			    &sin6.sin6_addr.s6_addr32[3],
5651 			    sizeof(sin6.sin6_addr.s6_addr32[3]));
5652 			sin6.sin6_port = sin->sin_port;
5653 			memcpy(from, (caddr_t)&sin6, sizeof(sin6));
5654 		}
5655 #endif
5656 #if defined(INET6)
5657 		{
5658 			struct sockaddr_in6 lsa6, *to6;
5659 
5660 			to6 = (struct sockaddr_in6 *)to;
5661 			sctp_recover_scope_mac(to6, (&lsa6));
5662 		}
5663 #endif
5664 	}
5665 	/* now copy out what data we can */
5666 	if (mp == NULL) {
5667 		/* copy out each mbuf in the chain up to length */
5668 get_more_data:
5669 		m = control->data;
5670 		while (m) {
5671 			/* Move out all we can */
5672 			cp_len = (int)uio->uio_resid;
5673 			my_len = (int)SCTP_BUF_LEN(m);
5674 			if (cp_len > my_len) {
5675 				/* not enough in this buf */
5676 				cp_len = my_len;
5677 			}
5678 			if (hold_rlock) {
5679 				SCTP_INP_READ_UNLOCK(inp);
5680 				hold_rlock = 0;
5681 			}
5682 			if (cp_len > 0)
5683 				error = uiomove(mtod(m, char *), cp_len, uio);
5684 			/* re-read */
5685 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5686 				goto release;
5687 			}
5688 			if ((control->do_not_ref_stcb == 0) && stcb &&
5689 			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5690 				no_rcv_needed = 1;
5691 			}
5692 			if (error) {
5693 				/* error we are out of here */
5694 				goto release;
5695 			}
5696 			if ((SCTP_BUF_NEXT(m) == NULL) &&
5697 			    (cp_len >= SCTP_BUF_LEN(m)) &&
5698 			    ((control->end_added == 0) ||
5699 			    (control->end_added &&
5700 			    (TAILQ_NEXT(control, next) == NULL)))
5701 			    ) {
5702 				SCTP_INP_READ_LOCK(inp);
5703 				hold_rlock = 1;
5704 			}
5705 			if (cp_len == SCTP_BUF_LEN(m)) {
5706 				if ((SCTP_BUF_NEXT(m) == NULL) &&
5707 				    (control->end_added)) {
5708 					out_flags |= MSG_EOR;
5709 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5710 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5711 				}
5712 				if (control->spec_flags & M_NOTIFICATION) {
5713 					out_flags |= MSG_NOTIFICATION;
5714 				}
5715 				/* we ate up the mbuf */
5716 				if (in_flags & MSG_PEEK) {
5717 					/* just looking */
5718 					m = SCTP_BUF_NEXT(m);
5719 					copied_so_far += cp_len;
5720 				} else {
5721 					/* dispose of the mbuf */
5722 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5723 						sctp_sblog(&so->so_rcv,
5724 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5725 					}
5726 					sctp_sbfree(control, stcb, &so->so_rcv, m);
5727 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5728 						sctp_sblog(&so->so_rcv,
5729 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5730 					}
5731 					embuf = m;
5732 					copied_so_far += cp_len;
5733 					freed_so_far += cp_len;
5734 					freed_so_far += MSIZE;
5735 					atomic_subtract_int(&control->length, cp_len);
5736 					control->data = sctp_m_free(m);
5737 					m = control->data;
5738 					/*
5739 					 * been through it all, must hold sb
5740 					 * lock ok to null tail
5741 					 */
5742 					if (control->data == NULL) {
5743 #ifdef INVARIANTS
5744 						if ((control->end_added == 0) ||
5745 						    (TAILQ_NEXT(control, next) == NULL)) {
5746 							/*
5747 							 * If the end is not
5748 							 * added, OR the
5749 							 * next is NOT null
5750 							 * we MUST have the
5751 							 * lock.
5752 							 */
5753 							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
5754 								panic("Hmm we don't own the lock?");
5755 							}
5756 						}
5757 #endif
5758 						control->tail_mbuf = NULL;
5759 #ifdef INVARIANTS
5760 						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
5761 							panic("end_added, nothing left and no MSG_EOR");
5762 						}
5763 #endif
5764 					}
5765 				}
5766 			} else {
5767 				/* Do we need to trim the mbuf? */
5768 				if (control->spec_flags & M_NOTIFICATION) {
5769 					out_flags |= MSG_NOTIFICATION;
5770 				}
5771 				if ((in_flags & MSG_PEEK) == 0) {
5772 					SCTP_BUF_RESV_UF(m, cp_len);
5773 					SCTP_BUF_LEN(m) -= cp_len;
5774 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5775 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len);
5776 					}
5777 					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
5778 					if ((control->do_not_ref_stcb == 0) &&
5779 					    stcb) {
5780 						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
5781 					}
5782 					copied_so_far += cp_len;
5783 					embuf = m;
5784 					freed_so_far += cp_len;
5785 					freed_so_far += MSIZE;
5786 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5787 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
5788 						    SCTP_LOG_SBRESULT, 0);
5789 					}
5790 					atomic_subtract_int(&control->length, cp_len);
5791 				} else {
5792 					copied_so_far += cp_len;
5793 				}
5794 			}
5795 			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
5796 				break;
5797 			}
5798 			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5799 			    (control->do_not_ref_stcb == 0) &&
5800 			    (freed_so_far >= rwnd_req)) {
5801 				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5802 			}
5803 		}		/* end while(m) */
5804 		/*
5805 		 * At this point we have looked at it all and we either have
5806 		 * a MSG_EOR/or read all the user wants... <OR>
5807 		 * control->length == 0.
5808 		 */
5809 		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
5810 			/* we are done with this control */
5811 			if (control->length == 0) {
5812 				if (control->data) {
5813 #ifdef INVARIANTS
5814 					panic("control->data not null at read eor?");
5815 #else
5816 					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
5817 					sctp_m_freem(control->data);
5818 					control->data = NULL;
5819 #endif
5820 				}
5821 		done_with_control:
5822 				if (TAILQ_NEXT(control, next) == NULL) {
5823 					/*
5824 					 * If we don't have a next we need a
5825 					 * lock, if there is a next
5826 					 * interrupt is filling ahead of us
5827 					 * and we don't need a lock to
5828 					 * remove this guy (which is the
5829 					 * head of the queue).
5830 					 */
5831 					if (hold_rlock == 0) {
5832 						SCTP_INP_READ_LOCK(inp);
5833 						hold_rlock = 1;
5834 					}
5835 				}
5836 				TAILQ_REMOVE(&inp->read_queue, control, next);
5837 				/* Add back any hiddend data */
5838 				if (control->held_length) {
5839 					held_length = 0;
5840 					control->held_length = 0;
5841 					wakeup_read_socket = 1;
5842 				}
5843 				if (control->aux_data) {
5844 					sctp_m_free(control->aux_data);
5845 					control->aux_data = NULL;
5846 				}
5847 				no_rcv_needed = control->do_not_ref_stcb;
5848 				sctp_free_remote_addr(control->whoFrom);
5849 				control->data = NULL;
5850 				sctp_free_a_readq(stcb, control);
5851 				control = NULL;
5852 				if ((freed_so_far >= rwnd_req) &&
5853 				    (no_rcv_needed == 0))
5854 					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5855 
5856 			} else {
5857 				/*
5858 				 * The user did not read all of this
5859 				 * message, turn off the returned MSG_EOR
5860 				 * since we are leaving more behind on the
5861 				 * control to read.
5862 				 */
5863 #ifdef INVARIANTS
5864 				if (control->end_added &&
5865 				    (control->data == NULL) &&
5866 				    (control->tail_mbuf == NULL)) {
5867 					panic("Gak, control->length is corrupt?");
5868 				}
5869 #endif
5870 				no_rcv_needed = control->do_not_ref_stcb;
5871 				out_flags &= ~MSG_EOR;
5872 			}
5873 		}
5874 		if (out_flags & MSG_EOR) {
5875 			goto release;
5876 		}
5877 		if ((uio->uio_resid == 0) ||
5878 		    ((in_eeor_mode) && (copied_so_far >= max(so->so_rcv.sb_lowat, 1)))
5879 		    ) {
5880 			goto release;
5881 		}
5882 		/*
5883 		 * If I hit here the receiver wants more and this message is
5884 		 * NOT done (pd-api). So two questions. Can we block? if not
5885 		 * we are done. Did the user NOT set MSG_WAITALL?
5886 		 */
5887 		if (block_allowed == 0) {
5888 			goto release;
5889 		}
5890 		/*
5891 		 * We need to wait for more data a few things: - We don't
5892 		 * sbunlock() so we don't get someone else reading. - We
5893 		 * must be sure to account for the case where what is added
5894 		 * is NOT to our control when we wakeup.
5895 		 */
5896 
5897 		/*
5898 		 * Do we need to tell the transport a rwnd update might be
5899 		 * needed before we go to sleep?
5900 		 */
5901 		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5902 		    ((freed_so_far >= rwnd_req) &&
5903 		    (control->do_not_ref_stcb == 0) &&
5904 		    (no_rcv_needed == 0))) {
5905 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5906 		}
5907 wait_some_more:
5908 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5909 			goto release;
5910 		}
5911 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
5912 			goto release;
5913 
5914 		if (hold_rlock == 1) {
5915 			SCTP_INP_READ_UNLOCK(inp);
5916 			hold_rlock = 0;
5917 		}
5918 		if (hold_sblock == 0) {
5919 			SOCKBUF_LOCK(&so->so_rcv);
5920 			hold_sblock = 1;
5921 		}
5922 		if ((copied_so_far) && (control->length == 0) &&
5923 		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) {
5924 			goto release;
5925 		}
5926 		if (so->so_rcv.sb_cc <= control->held_length) {
5927 			error = sbwait(&so->so_rcv);
5928 			if (error) {
5929 				goto release;
5930 			}
5931 			control->held_length = 0;
5932 		}
5933 		if (hold_sblock) {
5934 			SOCKBUF_UNLOCK(&so->so_rcv);
5935 			hold_sblock = 0;
5936 		}
5937 		if (control->length == 0) {
5938 			/* still nothing here */
5939 			if (control->end_added == 1) {
5940 				/* he aborted, or is done i.e.did a shutdown */
5941 				out_flags |= MSG_EOR;
5942 				if (control->pdapi_aborted) {
5943 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5944 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5945 
5946 					out_flags |= MSG_TRUNC;
5947 				} else {
5948 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5949 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5950 				}
5951 				goto done_with_control;
5952 			}
5953 			if (so->so_rcv.sb_cc > held_length) {
5954 				control->held_length = so->so_rcv.sb_cc;
5955 				held_length = 0;
5956 			}
5957 			goto wait_some_more;
5958 		} else if (control->data == NULL) {
5959 			/*
5960 			 * we must re-sync since data is probably being
5961 			 * added
5962 			 */
5963 			SCTP_INP_READ_LOCK(inp);
5964 			if ((control->length > 0) && (control->data == NULL)) {
5965 				/*
5966 				 * big trouble.. we have the lock and its
5967 				 * corrupt?
5968 				 */
5969 #ifdef INVARIANTS
5970 				panic("Impossible data==NULL length !=0");
5971 #endif
5972 				out_flags |= MSG_EOR;
5973 				out_flags |= MSG_TRUNC;
5974 				control->length = 0;
5975 				SCTP_INP_READ_UNLOCK(inp);
5976 				goto done_with_control;
5977 			}
5978 			SCTP_INP_READ_UNLOCK(inp);
5979 			/* We will fall around to get more data */
5980 		}
5981 		goto get_more_data;
5982 	} else {
5983 		/*-
5984 		 * Give caller back the mbuf chain,
5985 		 * store in uio_resid the length
5986 		 */
5987 		wakeup_read_socket = 0;
5988 		if ((control->end_added == 0) ||
5989 		    (TAILQ_NEXT(control, next) == NULL)) {
5990 			/* Need to get rlock */
5991 			if (hold_rlock == 0) {
5992 				SCTP_INP_READ_LOCK(inp);
5993 				hold_rlock = 1;
5994 			}
5995 		}
5996 		if (control->end_added) {
5997 			out_flags |= MSG_EOR;
5998 			if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5999 				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6000 		}
6001 		if (control->spec_flags & M_NOTIFICATION) {
6002 			out_flags |= MSG_NOTIFICATION;
6003 		}
6004 		uio->uio_resid = control->length;
6005 		*mp = control->data;
6006 		m = control->data;
6007 		while (m) {
6008 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6009 				sctp_sblog(&so->so_rcv,
6010 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
6011 			}
6012 			sctp_sbfree(control, stcb, &so->so_rcv, m);
6013 			freed_so_far += SCTP_BUF_LEN(m);
6014 			freed_so_far += MSIZE;
6015 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6016 				sctp_sblog(&so->so_rcv,
6017 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
6018 			}
6019 			m = SCTP_BUF_NEXT(m);
6020 		}
6021 		control->data = control->tail_mbuf = NULL;
6022 		control->length = 0;
6023 		if (out_flags & MSG_EOR) {
6024 			/* Done with this control */
6025 			goto done_with_control;
6026 		}
6027 	}
6028 release:
6029 	if (hold_rlock == 1) {
6030 		SCTP_INP_READ_UNLOCK(inp);
6031 		hold_rlock = 0;
6032 	}
6033 	if (hold_sblock == 1) {
6034 		SOCKBUF_UNLOCK(&so->so_rcv);
6035 		hold_sblock = 0;
6036 	}
6037 	sbunlock(&so->so_rcv);
6038 	sockbuf_lock = 0;
6039 
6040 release_unlocked:
6041 	if (hold_sblock) {
6042 		SOCKBUF_UNLOCK(&so->so_rcv);
6043 		hold_sblock = 0;
6044 	}
6045 	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
6046 		if ((freed_so_far >= rwnd_req) &&
6047 		    (control && (control->do_not_ref_stcb == 0)) &&
6048 		    (no_rcv_needed == 0))
6049 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6050 	}
6051 out:
6052 	if (msg_flags) {
6053 		*msg_flags = out_flags;
6054 	}
6055 	if (((out_flags & MSG_EOR) == 0) &&
6056 	    ((in_flags & MSG_PEEK) == 0) &&
6057 	    (sinfo) &&
6058 	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO))) {
6059 		struct sctp_extrcvinfo *s_extra;
6060 
6061 		s_extra = (struct sctp_extrcvinfo *)sinfo;
6062 		s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
6063 	}
6064 	if (hold_rlock == 1) {
6065 		SCTP_INP_READ_UNLOCK(inp);
6066 		hold_rlock = 0;
6067 	}
6068 	if (hold_sblock) {
6069 		SOCKBUF_UNLOCK(&so->so_rcv);
6070 		hold_sblock = 0;
6071 	}
6072 	if (sockbuf_lock) {
6073 		sbunlock(&so->so_rcv);
6074 	}
6075 	if (freecnt_applied) {
6076 		/*
6077 		 * The lock on the socket buffer protects us so the free
6078 		 * code will stop. But since we used the socketbuf lock and
6079 		 * the sender uses the tcb_lock to increment, we need to use
6080 		 * the atomic add to the refcnt.
6081 		 */
6082 		if (stcb == NULL) {
6083 #ifdef INVARIANTS
6084 			panic("stcb for refcnt has gone NULL?");
6085 			goto stage_left;
6086 #else
6087 			goto stage_left;
6088 #endif
6089 		}
6090 		atomic_add_int(&stcb->asoc.refcnt, -1);
6091 		freecnt_applied = 0;
6092 		/* Save the value back for next time */
6093 		stcb->freed_by_sorcv_sincelast = freed_so_far;
6094 	}
6095 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
6096 		if (stcb) {
6097 			sctp_misc_ints(SCTP_SORECV_DONE,
6098 			    freed_so_far,
6099 			    ((uio) ? (slen - uio->uio_resid) : slen),
6100 			    stcb->asoc.my_rwnd,
6101 			    so->so_rcv.sb_cc);
6102 		} else {
6103 			sctp_misc_ints(SCTP_SORECV_DONE,
6104 			    freed_so_far,
6105 			    ((uio) ? (slen - uio->uio_resid) : slen),
6106 			    0,
6107 			    so->so_rcv.sb_cc);
6108 		}
6109 	}
6110 stage_left:
6111 	if (wakeup_read_socket) {
6112 		sctp_sorwakeup(inp, so);
6113 	}
6114 	return (error);
6115 }
6116 
6117 
6118 #ifdef SCTP_MBUF_LOGGING
6119 struct mbuf *
6120 sctp_m_free(struct mbuf *m)
6121 {
6122 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6123 		if (SCTP_BUF_IS_EXTENDED(m)) {
6124 			sctp_log_mb(m, SCTP_MBUF_IFREE);
6125 		}
6126 	}
6127 	return (m_free(m));
6128 }
6129 
6130 void
6131 sctp_m_freem(struct mbuf *mb)
6132 {
6133 	while (mb != NULL)
6134 		mb = sctp_m_free(mb);
6135 }
6136 
6137 #endif
6138 
6139 int
6140 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
6141 {
6142 	/*
6143 	 * Given a local address. For all associations that holds the
6144 	 * address, request a peer-set-primary.
6145 	 */
6146 	struct sctp_ifa *ifa;
6147 	struct sctp_laddr *wi;
6148 
6149 	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
6150 	if (ifa == NULL) {
6151 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
6152 		return (EADDRNOTAVAIL);
6153 	}
6154 	/*
6155 	 * Now that we have the ifa we must awaken the iterator with this
6156 	 * message.
6157 	 */
6158 	wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
6159 	if (wi == NULL) {
6160 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
6161 		return (ENOMEM);
6162 	}
6163 	/* Now incr the count and int wi structure */
6164 	SCTP_INCR_LADDR_COUNT();
6165 	bzero(wi, sizeof(*wi));
6166 	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
6167 	wi->ifa = ifa;
6168 	wi->action = SCTP_SET_PRIM_ADDR;
6169 	atomic_add_int(&ifa->refcount, 1);
6170 
6171 	/* Now add it to the work queue */
6172 	SCTP_WQ_ADDR_LOCK();
6173 	/*
6174 	 * Should this really be a tailq? As it is we will process the
6175 	 * newest first :-0
6176 	 */
6177 	LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
6178 	SCTP_WQ_ADDR_UNLOCK();
6179 	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
6180 	    (struct sctp_inpcb *)NULL,
6181 	    (struct sctp_tcb *)NULL,
6182 	    (struct sctp_nets *)NULL);
6183 	return (0);
6184 }
6185 
6186 
6187 int
6188 sctp_soreceive(struct socket *so,
6189     struct sockaddr **psa,
6190     struct uio *uio,
6191     struct mbuf **mp0,
6192     struct mbuf **controlp,
6193     int *flagsp)
6194 {
6195 	int error, fromlen;
6196 	uint8_t sockbuf[256];
6197 	struct sockaddr *from;
6198 	struct sctp_extrcvinfo sinfo;
6199 	int filling_sinfo = 1;
6200 	struct sctp_inpcb *inp;
6201 
6202 	inp = (struct sctp_inpcb *)so->so_pcb;
6203 	/* pickup the assoc we are reading from */
6204 	if (inp == NULL) {
6205 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6206 		return (EINVAL);
6207 	}
6208 	if ((sctp_is_feature_off(inp,
6209 	    SCTP_PCB_FLAGS_RECVDATAIOEVNT)) ||
6210 	    (controlp == NULL)) {
6211 		/* user does not want the sndrcv ctl */
6212 		filling_sinfo = 0;
6213 	}
6214 	if (psa) {
6215 		from = (struct sockaddr *)sockbuf;
6216 		fromlen = sizeof(sockbuf);
6217 		from->sa_len = 0;
6218 	} else {
6219 		from = NULL;
6220 		fromlen = 0;
6221 	}
6222 
6223 	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp,
6224 	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6225 	if ((controlp) && (filling_sinfo)) {
6226 		/* copy back the sinfo in a CMSG format */
6227 		if (filling_sinfo)
6228 			*controlp = sctp_build_ctl_nchunk(inp,
6229 			    (struct sctp_sndrcvinfo *)&sinfo);
6230 		else
6231 			*controlp = NULL;
6232 	}
6233 	if (psa) {
6234 		/* copy back the address info */
6235 		if (from && from->sa_len) {
6236 			*psa = sodupsockaddr(from, M_NOWAIT);
6237 		} else {
6238 			*psa = NULL;
6239 		}
6240 	}
6241 	return (error);
6242 }
6243 
6244 
6245 int
6246 sctp_l_soreceive(struct socket *so,
6247     struct sockaddr **name,
6248     struct uio *uio,
6249     char **controlp,
6250     int *controllen,
6251     int *flag)
6252 {
6253 	int error, fromlen;
6254 	uint8_t sockbuf[256];
6255 	struct sockaddr *from;
6256 	struct sctp_extrcvinfo sinfo;
6257 	int filling_sinfo = 1;
6258 	struct sctp_inpcb *inp;
6259 
6260 	inp = (struct sctp_inpcb *)so->so_pcb;
6261 	/* pickup the assoc we are reading from */
6262 	if (inp == NULL) {
6263 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6264 		return (EINVAL);
6265 	}
6266 	if ((sctp_is_feature_off(inp,
6267 	    SCTP_PCB_FLAGS_RECVDATAIOEVNT)) ||
6268 	    (controlp == NULL)) {
6269 		/* user does not want the sndrcv ctl */
6270 		filling_sinfo = 0;
6271 	}
6272 	if (name) {
6273 		from = (struct sockaddr *)sockbuf;
6274 		fromlen = sizeof(sockbuf);
6275 		from->sa_len = 0;
6276 	} else {
6277 		from = NULL;
6278 		fromlen = 0;
6279 	}
6280 
6281 	error = sctp_sorecvmsg(so, uio,
6282 	    (struct mbuf **)NULL,
6283 	    from, fromlen, flag,
6284 	    (struct sctp_sndrcvinfo *)&sinfo,
6285 	    filling_sinfo);
6286 	if ((controlp) && (filling_sinfo)) {
6287 		/*
6288 		 * copy back the sinfo in a CMSG format note that the caller
6289 		 * has reponsibility for freeing the memory.
6290 		 */
6291 		if (filling_sinfo)
6292 			*controlp = sctp_build_ctl_cchunk(inp,
6293 			    controllen,
6294 			    (struct sctp_sndrcvinfo *)&sinfo);
6295 	}
6296 	if (name) {
6297 		/* copy back the address info */
6298 		if (from && from->sa_len) {
6299 			*name = sodupsockaddr(from, M_WAIT);
6300 		} else {
6301 			*name = NULL;
6302 		}
6303 	}
6304 	return (error);
6305 }
6306 
6307 
6308 
6309 
6310 
6311 
6312 
6313 int
6314 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6315     int totaddr, int *error)
6316 {
6317 	int added = 0;
6318 	int i;
6319 	struct sctp_inpcb *inp;
6320 	struct sockaddr *sa;
6321 	size_t incr = 0;
6322 
6323 	sa = addr;
6324 	inp = stcb->sctp_ep;
6325 	*error = 0;
6326 	for (i = 0; i < totaddr; i++) {
6327 		if (sa->sa_family == AF_INET) {
6328 			incr = sizeof(struct sockaddr_in);
6329 			if (sctp_add_remote_addr(stcb, sa, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6330 				/* assoc gone no un-lock */
6331 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6332 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_7);
6333 				*error = ENOBUFS;
6334 				goto out_now;
6335 			}
6336 			added++;
6337 		} else if (sa->sa_family == AF_INET6) {
6338 			incr = sizeof(struct sockaddr_in6);
6339 			if (sctp_add_remote_addr(stcb, sa, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6340 				/* assoc gone no un-lock */
6341 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6342 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_8);
6343 				*error = ENOBUFS;
6344 				goto out_now;
6345 			}
6346 			added++;
6347 		}
6348 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6349 	}
6350 out_now:
6351 	return (added);
6352 }
6353 
6354 struct sctp_tcb *
6355 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6356     int *totaddr, int *num_v4, int *num_v6, int *error,
6357     int limit, int *bad_addr)
6358 {
6359 	struct sockaddr *sa;
6360 	struct sctp_tcb *stcb = NULL;
6361 	size_t incr, at, i;
6362 
6363 	at = incr = 0;
6364 	sa = addr;
6365 	*error = *num_v6 = *num_v4 = 0;
6366 	/* account and validate addresses */
6367 	for (i = 0; i < (size_t)*totaddr; i++) {
6368 		if (sa->sa_family == AF_INET) {
6369 			(*num_v4) += 1;
6370 			incr = sizeof(struct sockaddr_in);
6371 			if (sa->sa_len != incr) {
6372 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6373 				*error = EINVAL;
6374 				*bad_addr = 1;
6375 				return (NULL);
6376 			}
6377 		} else if (sa->sa_family == AF_INET6) {
6378 			struct sockaddr_in6 *sin6;
6379 
6380 			sin6 = (struct sockaddr_in6 *)sa;
6381 			if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6382 				/* Must be non-mapped for connectx */
6383 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6384 				*error = EINVAL;
6385 				*bad_addr = 1;
6386 				return (NULL);
6387 			}
6388 			(*num_v6) += 1;
6389 			incr = sizeof(struct sockaddr_in6);
6390 			if (sa->sa_len != incr) {
6391 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6392 				*error = EINVAL;
6393 				*bad_addr = 1;
6394 				return (NULL);
6395 			}
6396 		} else {
6397 			*totaddr = i;
6398 			/* we are done */
6399 			break;
6400 		}
6401 		SCTP_INP_INCR_REF(inp);
6402 		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6403 		if (stcb != NULL) {
6404 			/* Already have or am bring up an association */
6405 			return (stcb);
6406 		} else {
6407 			SCTP_INP_DECR_REF(inp);
6408 		}
6409 		if ((at + incr) > (size_t)limit) {
6410 			*totaddr = i;
6411 			break;
6412 		}
6413 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6414 	}
6415 	return ((struct sctp_tcb *)NULL);
6416 }
6417 
6418 /*
6419  * sctp_bindx(ADD) for one address.
6420  * assumes all arguments are valid/checked by caller.
6421  */
6422 void
6423 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6424     struct sockaddr *sa, sctp_assoc_t assoc_id,
6425     uint32_t vrf_id, int *error, void *p)
6426 {
6427 	struct sockaddr *addr_touse;
6428 
6429 #ifdef INET6
6430 	struct sockaddr_in sin;
6431 
6432 #endif
6433 
6434 	/* see if we're bound all already! */
6435 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6436 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6437 		*error = EINVAL;
6438 		return;
6439 	}
6440 	addr_touse = sa;
6441 #if defined(INET6) && !defined(__Userspace__)	/* TODO port in6_sin6_2_sin */
6442 	if (sa->sa_family == AF_INET6) {
6443 		struct sockaddr_in6 *sin6;
6444 
6445 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6446 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6447 			*error = EINVAL;
6448 			return;
6449 		}
6450 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6451 			/* can only bind v6 on PF_INET6 sockets */
6452 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6453 			*error = EINVAL;
6454 			return;
6455 		}
6456 		sin6 = (struct sockaddr_in6 *)addr_touse;
6457 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6458 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6459 			    SCTP_IPV6_V6ONLY(inp)) {
6460 				/* can't bind v4-mapped on PF_INET sockets */
6461 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6462 				*error = EINVAL;
6463 				return;
6464 			}
6465 			in6_sin6_2_sin(&sin, sin6);
6466 			addr_touse = (struct sockaddr *)&sin;
6467 		}
6468 	}
6469 #endif
6470 	if (sa->sa_family == AF_INET) {
6471 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6472 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6473 			*error = EINVAL;
6474 			return;
6475 		}
6476 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6477 		    SCTP_IPV6_V6ONLY(inp)) {
6478 			/* can't bind v4 on PF_INET sockets */
6479 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6480 			*error = EINVAL;
6481 			return;
6482 		}
6483 	}
6484 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6485 		if (p == NULL) {
6486 			/* Can't get proc for Net/Open BSD */
6487 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6488 			*error = EINVAL;
6489 			return;
6490 		}
6491 		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6492 		return;
6493 	}
6494 	/*
6495 	 * No locks required here since bind and mgmt_ep_sa all do their own
6496 	 * locking. If we do something for the FIX: below we may need to
6497 	 * lock in that case.
6498 	 */
6499 	if (assoc_id == 0) {
6500 		/* add the address */
6501 		struct sctp_inpcb *lep;
6502 		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
6503 
6504 		/* validate the incoming port */
6505 		if ((lsin->sin_port != 0) &&
6506 		    (lsin->sin_port != inp->sctp_lport)) {
6507 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6508 			*error = EINVAL;
6509 			return;
6510 		} else {
6511 			/* user specified 0 port, set it to existing port */
6512 			lsin->sin_port = inp->sctp_lport;
6513 		}
6514 
6515 		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
6516 		if (lep != NULL) {
6517 			/*
6518 			 * We must decrement the refcount since we have the
6519 			 * ep already and are binding. No remove going on
6520 			 * here.
6521 			 */
6522 			SCTP_INP_DECR_REF(lep);
6523 		}
6524 		if (lep == inp) {
6525 			/* already bound to it.. ok */
6526 			return;
6527 		} else if (lep == NULL) {
6528 			((struct sockaddr_in *)addr_touse)->sin_port = 0;
6529 			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6530 			    SCTP_ADD_IP_ADDRESS,
6531 			    vrf_id, NULL);
6532 		} else {
6533 			*error = EADDRINUSE;
6534 		}
6535 		if (*error)
6536 			return;
6537 	} else {
6538 		/*
6539 		 * FIX: decide whether we allow assoc based bindx
6540 		 */
6541 	}
6542 }
6543 
6544 /*
6545  * sctp_bindx(DELETE) for one address.
6546  * assumes all arguments are valid/checked by caller.
6547  */
6548 void
6549 sctp_bindx_delete_address(struct socket *so, struct sctp_inpcb *inp,
6550     struct sockaddr *sa, sctp_assoc_t assoc_id,
6551     uint32_t vrf_id, int *error)
6552 {
6553 	struct sockaddr *addr_touse;
6554 
6555 #ifdef INET6
6556 	struct sockaddr_in sin;
6557 
6558 #endif
6559 
6560 	/* see if we're bound all already! */
6561 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6562 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6563 		*error = EINVAL;
6564 		return;
6565 	}
6566 	addr_touse = sa;
6567 #if defined(INET6) && !defined(__Userspace__)	/* TODO port in6_sin6_2_sin */
6568 	if (sa->sa_family == AF_INET6) {
6569 		struct sockaddr_in6 *sin6;
6570 
6571 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6572 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6573 			*error = EINVAL;
6574 			return;
6575 		}
6576 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6577 			/* can only bind v6 on PF_INET6 sockets */
6578 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6579 			*error = EINVAL;
6580 			return;
6581 		}
6582 		sin6 = (struct sockaddr_in6 *)addr_touse;
6583 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6584 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6585 			    SCTP_IPV6_V6ONLY(inp)) {
6586 				/* can't bind mapped-v4 on PF_INET sockets */
6587 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6588 				*error = EINVAL;
6589 				return;
6590 			}
6591 			in6_sin6_2_sin(&sin, sin6);
6592 			addr_touse = (struct sockaddr *)&sin;
6593 		}
6594 	}
6595 #endif
6596 	if (sa->sa_family == AF_INET) {
6597 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6598 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6599 			*error = EINVAL;
6600 			return;
6601 		}
6602 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6603 		    SCTP_IPV6_V6ONLY(inp)) {
6604 			/* can't bind v4 on PF_INET sockets */
6605 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6606 			*error = EINVAL;
6607 			return;
6608 		}
6609 	}
6610 	/*
6611 	 * No lock required mgmt_ep_sa does its own locking. If the FIX:
6612 	 * below is ever changed we may need to lock before calling
6613 	 * association level binding.
6614 	 */
6615 	if (assoc_id == 0) {
6616 		/* delete the address */
6617 		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6618 		    SCTP_DEL_IP_ADDRESS,
6619 		    vrf_id, NULL);
6620 	} else {
6621 		/*
6622 		 * FIX: decide whether we allow assoc based bindx
6623 		 */
6624 	}
6625 }
6626 
6627 /*
6628  * returns the valid local address count for an assoc, taking into account
6629  * all scoping rules
6630  */
6631 int
6632 sctp_local_addr_count(struct sctp_tcb *stcb)
6633 {
6634 	int loopback_scope, ipv4_local_scope, local_scope, site_scope;
6635 	int ipv4_addr_legal, ipv6_addr_legal;
6636 	struct sctp_vrf *vrf;
6637 	struct sctp_ifn *sctp_ifn;
6638 	struct sctp_ifa *sctp_ifa;
6639 	int count = 0;
6640 
6641 	/* Turn on all the appropriate scopes */
6642 	loopback_scope = stcb->asoc.loopback_scope;
6643 	ipv4_local_scope = stcb->asoc.ipv4_local_scope;
6644 	local_scope = stcb->asoc.local_scope;
6645 	site_scope = stcb->asoc.site_scope;
6646 	ipv4_addr_legal = ipv6_addr_legal = 0;
6647 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
6648 		ipv6_addr_legal = 1;
6649 		if (SCTP_IPV6_V6ONLY(stcb->sctp_ep) == 0) {
6650 			ipv4_addr_legal = 1;
6651 		}
6652 	} else {
6653 		ipv4_addr_legal = 1;
6654 	}
6655 
6656 	SCTP_IPI_ADDR_RLOCK();
6657 	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
6658 	if (vrf == NULL) {
6659 		/* no vrf, no addresses */
6660 		SCTP_IPI_ADDR_RUNLOCK();
6661 		return (0);
6662 	}
6663 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6664 		/*
6665 		 * bound all case: go through all ifns on the vrf
6666 		 */
6667 		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
6668 			if ((loopback_scope == 0) &&
6669 			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
6670 				continue;
6671 			}
6672 			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
6673 				if (sctp_is_addr_restricted(stcb, sctp_ifa))
6674 					continue;
6675 				switch (sctp_ifa->address.sa.sa_family) {
6676 				case AF_INET:
6677 					if (ipv4_addr_legal) {
6678 						struct sockaddr_in *sin;
6679 
6680 						sin = (struct sockaddr_in *)&sctp_ifa->address.sa;
6681 						if (sin->sin_addr.s_addr == 0) {
6682 							/*
6683 							 * skip unspecified
6684 							 * addrs
6685 							 */
6686 							continue;
6687 						}
6688 						if ((ipv4_local_scope == 0) &&
6689 						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
6690 							continue;
6691 						}
6692 						/* count this one */
6693 						count++;
6694 					} else {
6695 						continue;
6696 					}
6697 					break;
6698 #ifdef INET6
6699 				case AF_INET6:
6700 					if (ipv6_addr_legal) {
6701 						struct sockaddr_in6 *sin6;
6702 
6703 						sin6 = (struct sockaddr_in6 *)&sctp_ifa->address.sa;
6704 						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
6705 							continue;
6706 						}
6707 						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
6708 							if (local_scope == 0)
6709 								continue;
6710 							if (sin6->sin6_scope_id == 0) {
6711 								if (sa6_recoverscope(sin6) != 0)
6712 									/*
6713 									 *
6714 									 * bad
6715 									 *
6716 									 * li
6717 									 * nk
6718 									 *
6719 									 * loc
6720 									 * al
6721 									 *
6722 									 * add
6723 									 * re
6724 									 * ss
6725 									 * */
6726 									continue;
6727 							}
6728 						}
6729 						if ((site_scope == 0) &&
6730 						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
6731 							continue;
6732 						}
6733 						/* count this one */
6734 						count++;
6735 					}
6736 					break;
6737 #endif
6738 				default:
6739 					/* TSNH */
6740 					break;
6741 				}
6742 			}
6743 		}
6744 	} else {
6745 		/*
6746 		 * subset bound case
6747 		 */
6748 		struct sctp_laddr *laddr;
6749 
6750 		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
6751 		    sctp_nxt_addr) {
6752 			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
6753 				continue;
6754 			}
6755 			/* count this one */
6756 			count++;
6757 		}
6758 	}
6759 	SCTP_IPI_ADDR_RUNLOCK();
6760 	return (count);
6761 }
6762 
6763 #if defined(SCTP_LOCAL_TRACE_BUF)
6764 
6765 void
6766 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
6767 {
6768 	uint32_t saveindex, newindex;
6769 
6770 	do {
6771 		saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
6772 		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6773 			newindex = 1;
6774 		} else {
6775 			newindex = saveindex + 1;
6776 		}
6777 	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
6778 	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6779 		saveindex = 0;
6780 	}
6781 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
6782 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
6783 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
6784 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
6785 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
6786 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
6787 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
6788 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
6789 }
6790 
6791 #endif
6792 /* We will need to add support
6793  * to bind the ports and such here
6794  * so we can do UDP tunneling. In
6795  * the mean-time, we return error
6796  */
6797 #include <netinet/udp.h>
6798 #include <netinet/udp_var.h>
6799 #include <sys/proc.h>
6800 #ifdef INET6
6801 #include <netinet6/sctp6_var.h>
6802 #endif
6803 
6804 static void
6805 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *ignored)
6806 {
6807 	struct ip *iph;
6808 	struct mbuf *sp, *last;
6809 	struct udphdr *uhdr;
6810 	uint16_t port = 0, len;
6811 	int header_size = sizeof(struct udphdr) + sizeof(struct sctphdr);
6812 
6813 	/*
6814 	 * Split out the mbuf chain. Leave the IP header in m, place the
6815 	 * rest in the sp.
6816 	 */
6817 	if ((m->m_flags & M_PKTHDR) == 0) {
6818 		/* Can't handle one that is not a pkt hdr */
6819 		goto out;
6820 	}
6821 	/* pull the src port */
6822 	iph = mtod(m, struct ip *);
6823 	uhdr = (struct udphdr *)((caddr_t)iph + off);
6824 
6825 	port = uhdr->uh_sport;
6826 	sp = m_split(m, off, M_DONTWAIT);
6827 	if (sp == NULL) {
6828 		/* Gak, drop packet, we can't do a split */
6829 		goto out;
6830 	}
6831 	if (sp->m_pkthdr.len < header_size) {
6832 		/* Gak, packet can't have an SCTP header in it - to small */
6833 		m_freem(sp);
6834 		goto out;
6835 	}
6836 	/* ok now pull up the UDP header and SCTP header together */
6837 	sp = m_pullup(sp, header_size);
6838 	if (sp == NULL) {
6839 		/* Gak pullup failed */
6840 		goto out;
6841 	}
6842 	/* trim out the UDP header */
6843 	m_adj(sp, sizeof(struct udphdr));
6844 
6845 	/* Now reconstruct the mbuf chain */
6846 	/* 1) find last one */
6847 	last = m;
6848 	while (last->m_next != NULL) {
6849 		last = last->m_next;
6850 	}
6851 	last->m_next = sp;
6852 	m->m_pkthdr.len += sp->m_pkthdr.len;
6853 	last = m;
6854 	while (last != NULL) {
6855 		last = last->m_next;
6856 	}
6857 	/* Now its ready for sctp_input or sctp6_input */
6858 	iph = mtod(m, struct ip *);
6859 	switch (iph->ip_v) {
6860 	case IPVERSION:
6861 		{
6862 			/* its IPv4 */
6863 			len = SCTP_GET_IPV4_LENGTH(iph);
6864 			len -= sizeof(struct udphdr);
6865 			SCTP_GET_IPV4_LENGTH(iph) = len;
6866 			sctp_input_with_port(m, off, port);
6867 			break;
6868 		}
6869 #ifdef INET6
6870 	case IPV6_VERSION >> 4:
6871 		{
6872 			/* its IPv6 - NOT supported */
6873 			goto out;
6874 			break;
6875 
6876 		}
6877 #endif
6878 	default:
6879 		{
6880 			m_freem(m);
6881 			break;
6882 		}
6883 	}
6884 	return;
6885 out:
6886 	m_freem(m);
6887 }
6888 
6889 void
6890 sctp_over_udp_stop(void)
6891 {
6892 	struct socket *sop;
6893 
6894 	/*
6895 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
6896 	 * for writting!
6897 	 */
6898 	if (SCTP_BASE_INFO(udp_tun_socket) == NULL) {
6899 		/* Nothing to do */
6900 		return;
6901 	}
6902 	sop = SCTP_BASE_INFO(udp_tun_socket);
6903 	soclose(sop);
6904 	SCTP_BASE_INFO(udp_tun_socket) = NULL;
6905 }
6906 int
6907 sctp_over_udp_start(void)
6908 {
6909 	uint16_t port;
6910 	int ret;
6911 	struct sockaddr_in sin;
6912 	struct socket *sop = NULL;
6913 	struct thread *th;
6914 	struct ucred *cred;
6915 
6916 	/*
6917 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
6918 	 * for writting!
6919 	 */
6920 	port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
6921 	if (port == 0) {
6922 		/* Must have a port set */
6923 		return (EINVAL);
6924 	}
6925 	if (SCTP_BASE_INFO(udp_tun_socket) != NULL) {
6926 		/* Already running -- must stop first */
6927 		return (EALREADY);
6928 	}
6929 	th = curthread;
6930 	cred = th->td_ucred;
6931 	if ((ret = socreate(PF_INET, &sop,
6932 	    SOCK_DGRAM, IPPROTO_UDP, cred, th))) {
6933 		return (ret);
6934 	}
6935 	SCTP_BASE_INFO(udp_tun_socket) = sop;
6936 	/* call the special UDP hook */
6937 	ret = udp_set_kernel_tunneling(sop, sctp_recv_udp_tunneled_packet);
6938 	if (ret) {
6939 		goto exit_stage_left;
6940 	}
6941 	/* Ok we have a socket, bind it to the port */
6942 	memset(&sin, 0, sizeof(sin));
6943 	sin.sin_len = sizeof(sin);
6944 	sin.sin_family = AF_INET;
6945 	sin.sin_port = htons(port);
6946 	ret = sobind(sop, (struct sockaddr *)&sin, th);
6947 	if (ret) {
6948 		/* Close up we cant get the port */
6949 exit_stage_left:
6950 		sctp_over_udp_stop();
6951 		return (ret);
6952 	}
6953 	/*
6954 	 * Ok we should now get UDP packets directly to our input routine
6955 	 * sctp_recv_upd_tunneled_packet().
6956 	 */
6957 	return (0);
6958 }
6959