xref: /freebsd/sys/netinet/sctputil.c (revision 9162f64b58d01ec01481d60b6cdc06ffd8e8c7fc)
1 /*-
2  * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *
7  * a) Redistributions of source code must retain the above copyright notice,
8  *   this list of conditions and the following disclaimer.
9  *
10  * b) Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in
12  *   the documentation and/or other materials provided with the distribution.
13  *
14  * c) Neither the name of Cisco Systems, Inc. nor the names of its
15  *    contributors may be used to endorse or promote products derived
16  *    from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28  * THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 /* $KAME: sctputil.c,v 1.37 2005/03/07 23:26:09 itojun Exp $	 */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_pcb.h>
38 #include <netinet/sctputil.h>
39 #include <netinet/sctp_var.h>
40 #include <netinet/sctp_sysctl.h>
41 #ifdef INET6
42 #endif
43 #include <netinet/sctp_header.h>
44 #include <netinet/sctp_output.h>
45 #include <netinet/sctp_uio.h>
46 #include <netinet/sctp_timer.h>
47 #include <netinet/sctp_crc32.h>
48 #include <netinet/sctp_indata.h>/* for sctp_deliver_data() */
49 #include <netinet/sctp_auth.h>
50 #include <netinet/sctp_asconf.h>
51 #include <netinet/sctp_cc_functions.h>
52 
53 #define NUMBER_OF_MTU_SIZES 18
54 
55 
56 #ifndef KTR_SCTP
57 #define KTR_SCTP KTR_SUBSYS
58 #endif
59 
60 void
61 sctp_sblog(struct sockbuf *sb,
62     struct sctp_tcb *stcb, int from, int incr)
63 {
64 	struct sctp_cwnd_log sctp_clog;
65 
66 	sctp_clog.x.sb.stcb = stcb;
67 	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
68 	if (stcb)
69 		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
70 	else
71 		sctp_clog.x.sb.stcb_sbcc = 0;
72 	sctp_clog.x.sb.incr = incr;
73 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
74 	    SCTP_LOG_EVENT_SB,
75 	    from,
76 	    sctp_clog.x.misc.log1,
77 	    sctp_clog.x.misc.log2,
78 	    sctp_clog.x.misc.log3,
79 	    sctp_clog.x.misc.log4);
80 }
81 
82 void
83 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
84 {
85 	struct sctp_cwnd_log sctp_clog;
86 
87 	sctp_clog.x.close.inp = (void *)inp;
88 	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
89 	if (stcb) {
90 		sctp_clog.x.close.stcb = (void *)stcb;
91 		sctp_clog.x.close.state = (uint16_t) stcb->asoc.state;
92 	} else {
93 		sctp_clog.x.close.stcb = 0;
94 		sctp_clog.x.close.state = 0;
95 	}
96 	sctp_clog.x.close.loc = loc;
97 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
98 	    SCTP_LOG_EVENT_CLOSE,
99 	    0,
100 	    sctp_clog.x.misc.log1,
101 	    sctp_clog.x.misc.log2,
102 	    sctp_clog.x.misc.log3,
103 	    sctp_clog.x.misc.log4);
104 }
105 
106 
107 void
108 rto_logging(struct sctp_nets *net, int from)
109 {
110 	struct sctp_cwnd_log sctp_clog;
111 
112 	memset(&sctp_clog, 0, sizeof(sctp_clog));
113 	sctp_clog.x.rto.net = (void *)net;
114 	sctp_clog.x.rto.rtt = net->prev_rtt;
115 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
116 	    SCTP_LOG_EVENT_RTT,
117 	    from,
118 	    sctp_clog.x.misc.log1,
119 	    sctp_clog.x.misc.log2,
120 	    sctp_clog.x.misc.log3,
121 	    sctp_clog.x.misc.log4);
122 
123 }
124 
125 void
126 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
127 {
128 	struct sctp_cwnd_log sctp_clog;
129 
130 	sctp_clog.x.strlog.stcb = stcb;
131 	sctp_clog.x.strlog.n_tsn = tsn;
132 	sctp_clog.x.strlog.n_sseq = sseq;
133 	sctp_clog.x.strlog.e_tsn = 0;
134 	sctp_clog.x.strlog.e_sseq = 0;
135 	sctp_clog.x.strlog.strm = stream;
136 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
137 	    SCTP_LOG_EVENT_STRM,
138 	    from,
139 	    sctp_clog.x.misc.log1,
140 	    sctp_clog.x.misc.log2,
141 	    sctp_clog.x.misc.log3,
142 	    sctp_clog.x.misc.log4);
143 
144 }
145 
146 void
147 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
148 {
149 	struct sctp_cwnd_log sctp_clog;
150 
151 	sctp_clog.x.nagle.stcb = (void *)stcb;
152 	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
153 	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
154 	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
155 	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
156 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
157 	    SCTP_LOG_EVENT_NAGLE,
158 	    action,
159 	    sctp_clog.x.misc.log1,
160 	    sctp_clog.x.misc.log2,
161 	    sctp_clog.x.misc.log3,
162 	    sctp_clog.x.misc.log4);
163 }
164 
165 
166 void
167 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
168 {
169 	struct sctp_cwnd_log sctp_clog;
170 
171 	sctp_clog.x.sack.cumack = cumack;
172 	sctp_clog.x.sack.oldcumack = old_cumack;
173 	sctp_clog.x.sack.tsn = tsn;
174 	sctp_clog.x.sack.numGaps = gaps;
175 	sctp_clog.x.sack.numDups = dups;
176 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
177 	    SCTP_LOG_EVENT_SACK,
178 	    from,
179 	    sctp_clog.x.misc.log1,
180 	    sctp_clog.x.misc.log2,
181 	    sctp_clog.x.misc.log3,
182 	    sctp_clog.x.misc.log4);
183 }
184 
185 void
186 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
187 {
188 	struct sctp_cwnd_log sctp_clog;
189 
190 	memset(&sctp_clog, 0, sizeof(sctp_clog));
191 	sctp_clog.x.map.base = map;
192 	sctp_clog.x.map.cum = cum;
193 	sctp_clog.x.map.high = high;
194 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
195 	    SCTP_LOG_EVENT_MAP,
196 	    from,
197 	    sctp_clog.x.misc.log1,
198 	    sctp_clog.x.misc.log2,
199 	    sctp_clog.x.misc.log3,
200 	    sctp_clog.x.misc.log4);
201 }
202 
203 void
204 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn,
205     int from)
206 {
207 	struct sctp_cwnd_log sctp_clog;
208 
209 	memset(&sctp_clog, 0, sizeof(sctp_clog));
210 	sctp_clog.x.fr.largest_tsn = biggest_tsn;
211 	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
212 	sctp_clog.x.fr.tsn = tsn;
213 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
214 	    SCTP_LOG_EVENT_FR,
215 	    from,
216 	    sctp_clog.x.misc.log1,
217 	    sctp_clog.x.misc.log2,
218 	    sctp_clog.x.misc.log3,
219 	    sctp_clog.x.misc.log4);
220 
221 }
222 
223 
224 void
225 sctp_log_mb(struct mbuf *m, int from)
226 {
227 	struct sctp_cwnd_log sctp_clog;
228 
229 	sctp_clog.x.mb.mp = m;
230 	sctp_clog.x.mb.mbuf_flags = (uint8_t) (SCTP_BUF_GET_FLAGS(m));
231 	sctp_clog.x.mb.size = (uint16_t) (SCTP_BUF_LEN(m));
232 	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
233 	if (SCTP_BUF_IS_EXTENDED(m)) {
234 		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
235 		sctp_clog.x.mb.refcnt = (uint8_t) (SCTP_BUF_EXTEND_REFCNT(m));
236 	} else {
237 		sctp_clog.x.mb.ext = 0;
238 		sctp_clog.x.mb.refcnt = 0;
239 	}
240 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
241 	    SCTP_LOG_EVENT_MBUF,
242 	    from,
243 	    sctp_clog.x.misc.log1,
244 	    sctp_clog.x.misc.log2,
245 	    sctp_clog.x.misc.log3,
246 	    sctp_clog.x.misc.log4);
247 }
248 
249 
250 void
251 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk,
252     int from)
253 {
254 	struct sctp_cwnd_log sctp_clog;
255 
256 	if (control == NULL) {
257 		SCTP_PRINTF("Gak log of NULL?\n");
258 		return;
259 	}
260 	sctp_clog.x.strlog.stcb = control->stcb;
261 	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
262 	sctp_clog.x.strlog.n_sseq = control->sinfo_ssn;
263 	sctp_clog.x.strlog.strm = control->sinfo_stream;
264 	if (poschk != NULL) {
265 		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
266 		sctp_clog.x.strlog.e_sseq = poschk->sinfo_ssn;
267 	} else {
268 		sctp_clog.x.strlog.e_tsn = 0;
269 		sctp_clog.x.strlog.e_sseq = 0;
270 	}
271 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
272 	    SCTP_LOG_EVENT_STRM,
273 	    from,
274 	    sctp_clog.x.misc.log1,
275 	    sctp_clog.x.misc.log2,
276 	    sctp_clog.x.misc.log3,
277 	    sctp_clog.x.misc.log4);
278 
279 }
280 
281 void
282 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
283 {
284 	struct sctp_cwnd_log sctp_clog;
285 
286 	sctp_clog.x.cwnd.net = net;
287 	if (stcb->asoc.send_queue_cnt > 255)
288 		sctp_clog.x.cwnd.cnt_in_send = 255;
289 	else
290 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
291 	if (stcb->asoc.stream_queue_cnt > 255)
292 		sctp_clog.x.cwnd.cnt_in_str = 255;
293 	else
294 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
295 
296 	if (net) {
297 		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
298 		sctp_clog.x.cwnd.inflight = net->flight_size;
299 		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
300 		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
301 		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
302 	}
303 	if (SCTP_CWNDLOG_PRESEND == from) {
304 		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
305 	}
306 	sctp_clog.x.cwnd.cwnd_augment = augment;
307 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
308 	    SCTP_LOG_EVENT_CWND,
309 	    from,
310 	    sctp_clog.x.misc.log1,
311 	    sctp_clog.x.misc.log2,
312 	    sctp_clog.x.misc.log3,
313 	    sctp_clog.x.misc.log4);
314 
315 }
316 
317 void
318 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
319 {
320 	struct sctp_cwnd_log sctp_clog;
321 
322 	memset(&sctp_clog, 0, sizeof(sctp_clog));
323 	if (inp) {
324 		sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
325 
326 	} else {
327 		sctp_clog.x.lock.sock = (void *)NULL;
328 	}
329 	sctp_clog.x.lock.inp = (void *)inp;
330 	if (stcb) {
331 		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
332 	} else {
333 		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
334 	}
335 	if (inp) {
336 		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
337 		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
338 	} else {
339 		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
340 		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
341 	}
342 	sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
343 	if (inp->sctp_socket) {
344 		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
345 		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
346 		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
347 	} else {
348 		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
349 		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
350 		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
351 	}
352 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
353 	    SCTP_LOG_LOCK_EVENT,
354 	    from,
355 	    sctp_clog.x.misc.log1,
356 	    sctp_clog.x.misc.log2,
357 	    sctp_clog.x.misc.log3,
358 	    sctp_clog.x.misc.log4);
359 
360 }
361 
362 void
363 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
364 {
365 	struct sctp_cwnd_log sctp_clog;
366 
367 	memset(&sctp_clog, 0, sizeof(sctp_clog));
368 	sctp_clog.x.cwnd.net = net;
369 	sctp_clog.x.cwnd.cwnd_new_value = error;
370 	sctp_clog.x.cwnd.inflight = net->flight_size;
371 	sctp_clog.x.cwnd.cwnd_augment = burst;
372 	if (stcb->asoc.send_queue_cnt > 255)
373 		sctp_clog.x.cwnd.cnt_in_send = 255;
374 	else
375 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
376 	if (stcb->asoc.stream_queue_cnt > 255)
377 		sctp_clog.x.cwnd.cnt_in_str = 255;
378 	else
379 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
380 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
381 	    SCTP_LOG_EVENT_MAXBURST,
382 	    from,
383 	    sctp_clog.x.misc.log1,
384 	    sctp_clog.x.misc.log2,
385 	    sctp_clog.x.misc.log3,
386 	    sctp_clog.x.misc.log4);
387 
388 }
389 
390 void
391 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
392 {
393 	struct sctp_cwnd_log sctp_clog;
394 
395 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
396 	sctp_clog.x.rwnd.send_size = snd_size;
397 	sctp_clog.x.rwnd.overhead = overhead;
398 	sctp_clog.x.rwnd.new_rwnd = 0;
399 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
400 	    SCTP_LOG_EVENT_RWND,
401 	    from,
402 	    sctp_clog.x.misc.log1,
403 	    sctp_clog.x.misc.log2,
404 	    sctp_clog.x.misc.log3,
405 	    sctp_clog.x.misc.log4);
406 }
407 
408 void
409 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
410 {
411 	struct sctp_cwnd_log sctp_clog;
412 
413 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
414 	sctp_clog.x.rwnd.send_size = flight_size;
415 	sctp_clog.x.rwnd.overhead = overhead;
416 	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
417 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
418 	    SCTP_LOG_EVENT_RWND,
419 	    from,
420 	    sctp_clog.x.misc.log1,
421 	    sctp_clog.x.misc.log2,
422 	    sctp_clog.x.misc.log3,
423 	    sctp_clog.x.misc.log4);
424 }
425 
426 void
427 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
428 {
429 	struct sctp_cwnd_log sctp_clog;
430 
431 	sctp_clog.x.mbcnt.total_queue_size = total_oq;
432 	sctp_clog.x.mbcnt.size_change = book;
433 	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
434 	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
435 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
436 	    SCTP_LOG_EVENT_MBCNT,
437 	    from,
438 	    sctp_clog.x.misc.log1,
439 	    sctp_clog.x.misc.log2,
440 	    sctp_clog.x.misc.log3,
441 	    sctp_clog.x.misc.log4);
442 
443 }
444 
445 void
446 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
447 {
448 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
449 	    SCTP_LOG_MISC_EVENT,
450 	    from,
451 	    a, b, c, d);
452 }
453 
454 void
455 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t cumtsn, uint32_t wake_cnt, int from)
456 {
457 	struct sctp_cwnd_log sctp_clog;
458 
459 	sctp_clog.x.wake.stcb = (void *)stcb;
460 	sctp_clog.x.wake.wake_cnt = wake_cnt;
461 	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
462 	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
463 	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
464 
465 	if (stcb->asoc.stream_queue_cnt < 0xff)
466 		sctp_clog.x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt;
467 	else
468 		sctp_clog.x.wake.stream_qcnt = 0xff;
469 
470 	if (stcb->asoc.chunks_on_out_queue < 0xff)
471 		sctp_clog.x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue;
472 	else
473 		sctp_clog.x.wake.chunks_on_oque = 0xff;
474 
475 	sctp_clog.x.wake.sctpflags = 0;
476 	/* set in the defered mode stuff */
477 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
478 		sctp_clog.x.wake.sctpflags |= 1;
479 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
480 		sctp_clog.x.wake.sctpflags |= 2;
481 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
482 		sctp_clog.x.wake.sctpflags |= 4;
483 	/* what about the sb */
484 	if (stcb->sctp_socket) {
485 		struct socket *so = stcb->sctp_socket;
486 
487 		sctp_clog.x.wake.sbflags = (uint8_t) ((so->so_snd.sb_flags & 0x00ff));
488 	} else {
489 		sctp_clog.x.wake.sbflags = 0xff;
490 	}
491 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
492 	    SCTP_LOG_EVENT_WAKE,
493 	    from,
494 	    sctp_clog.x.misc.log1,
495 	    sctp_clog.x.misc.log2,
496 	    sctp_clog.x.misc.log3,
497 	    sctp_clog.x.misc.log4);
498 
499 }
500 
501 void
502 sctp_log_block(uint8_t from, struct socket *so, struct sctp_association *asoc, int sendlen)
503 {
504 	struct sctp_cwnd_log sctp_clog;
505 
506 	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
507 	sctp_clog.x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt);
508 	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
509 	sctp_clog.x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt;
510 	sctp_clog.x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue;
511 	sctp_clog.x.blk.flight_size = (uint16_t) (asoc->total_flight / 1024);
512 	sctp_clog.x.blk.sndlen = sendlen;
513 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
514 	    SCTP_LOG_EVENT_BLOCK,
515 	    from,
516 	    sctp_clog.x.misc.log1,
517 	    sctp_clog.x.misc.log2,
518 	    sctp_clog.x.misc.log3,
519 	    sctp_clog.x.misc.log4);
520 
521 }
522 
523 int
524 sctp_fill_stat_log(void *optval, size_t *optsize)
525 {
526 	/* May need to fix this if ktrdump does not work */
527 	return (0);
528 }
529 
530 #ifdef SCTP_AUDITING_ENABLED
531 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
532 static int sctp_audit_indx = 0;
533 
534 static
535 void
536 sctp_print_audit_report(void)
537 {
538 	int i;
539 	int cnt;
540 
541 	cnt = 0;
542 	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
543 		if ((sctp_audit_data[i][0] == 0xe0) &&
544 		    (sctp_audit_data[i][1] == 0x01)) {
545 			cnt = 0;
546 			SCTP_PRINTF("\n");
547 		} else if (sctp_audit_data[i][0] == 0xf0) {
548 			cnt = 0;
549 			SCTP_PRINTF("\n");
550 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
551 		    (sctp_audit_data[i][1] == 0x01)) {
552 			SCTP_PRINTF("\n");
553 			cnt = 0;
554 		}
555 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
556 		    (uint32_t) sctp_audit_data[i][1]);
557 		cnt++;
558 		if ((cnt % 14) == 0)
559 			SCTP_PRINTF("\n");
560 	}
561 	for (i = 0; i < sctp_audit_indx; i++) {
562 		if ((sctp_audit_data[i][0] == 0xe0) &&
563 		    (sctp_audit_data[i][1] == 0x01)) {
564 			cnt = 0;
565 			SCTP_PRINTF("\n");
566 		} else if (sctp_audit_data[i][0] == 0xf0) {
567 			cnt = 0;
568 			SCTP_PRINTF("\n");
569 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
570 		    (sctp_audit_data[i][1] == 0x01)) {
571 			SCTP_PRINTF("\n");
572 			cnt = 0;
573 		}
574 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
575 		    (uint32_t) sctp_audit_data[i][1]);
576 		cnt++;
577 		if ((cnt % 14) == 0)
578 			SCTP_PRINTF("\n");
579 	}
580 	SCTP_PRINTF("\n");
581 }
582 
583 void
584 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
585     struct sctp_nets *net)
586 {
587 	int resend_cnt, tot_out, rep, tot_book_cnt;
588 	struct sctp_nets *lnet;
589 	struct sctp_tmit_chunk *chk;
590 
591 	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
592 	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
593 	sctp_audit_indx++;
594 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
595 		sctp_audit_indx = 0;
596 	}
597 	if (inp == NULL) {
598 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
599 		sctp_audit_data[sctp_audit_indx][1] = 0x01;
600 		sctp_audit_indx++;
601 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
602 			sctp_audit_indx = 0;
603 		}
604 		return;
605 	}
606 	if (stcb == NULL) {
607 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
608 		sctp_audit_data[sctp_audit_indx][1] = 0x02;
609 		sctp_audit_indx++;
610 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
611 			sctp_audit_indx = 0;
612 		}
613 		return;
614 	}
615 	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
616 	sctp_audit_data[sctp_audit_indx][1] =
617 	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
618 	sctp_audit_indx++;
619 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
620 		sctp_audit_indx = 0;
621 	}
622 	rep = 0;
623 	tot_book_cnt = 0;
624 	resend_cnt = tot_out = 0;
625 	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
626 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
627 			resend_cnt++;
628 		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
629 			tot_out += chk->book_size;
630 			tot_book_cnt++;
631 		}
632 	}
633 	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
634 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
635 		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
636 		sctp_audit_indx++;
637 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
638 			sctp_audit_indx = 0;
639 		}
640 		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
641 		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
642 		rep = 1;
643 		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
644 		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
645 		sctp_audit_data[sctp_audit_indx][1] =
646 		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
647 		sctp_audit_indx++;
648 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
649 			sctp_audit_indx = 0;
650 		}
651 	}
652 	if (tot_out != stcb->asoc.total_flight) {
653 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
654 		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
655 		sctp_audit_indx++;
656 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
657 			sctp_audit_indx = 0;
658 		}
659 		rep = 1;
660 		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
661 		    (int)stcb->asoc.total_flight);
662 		stcb->asoc.total_flight = tot_out;
663 	}
664 	if (tot_book_cnt != stcb->asoc.total_flight_count) {
665 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
666 		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
667 		sctp_audit_indx++;
668 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
669 			sctp_audit_indx = 0;
670 		}
671 		rep = 1;
672 		SCTP_PRINTF("tot_flt_book:%d\n", tot_book);
673 
674 		stcb->asoc.total_flight_count = tot_book_cnt;
675 	}
676 	tot_out = 0;
677 	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
678 		tot_out += lnet->flight_size;
679 	}
680 	if (tot_out != stcb->asoc.total_flight) {
681 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
682 		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
683 		sctp_audit_indx++;
684 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
685 			sctp_audit_indx = 0;
686 		}
687 		rep = 1;
688 		SCTP_PRINTF("real flight:%d net total was %d\n",
689 		    stcb->asoc.total_flight, tot_out);
690 		/* now corrective action */
691 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
692 
693 			tot_out = 0;
694 			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
695 				if ((chk->whoTo == lnet) &&
696 				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
697 					tot_out += chk->book_size;
698 				}
699 			}
700 			if (lnet->flight_size != tot_out) {
701 				SCTP_PRINTF("net:%x flight was %d corrected to %d\n",
702 				    (uint32_t) lnet, lnet->flight_size,
703 				    tot_out);
704 				lnet->flight_size = tot_out;
705 			}
706 		}
707 	}
708 	if (rep) {
709 		sctp_print_audit_report();
710 	}
711 }
712 
713 void
714 sctp_audit_log(uint8_t ev, uint8_t fd)
715 {
716 
717 	sctp_audit_data[sctp_audit_indx][0] = ev;
718 	sctp_audit_data[sctp_audit_indx][1] = fd;
719 	sctp_audit_indx++;
720 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
721 		sctp_audit_indx = 0;
722 	}
723 }
724 
725 #endif
726 
727 /*
728  * a list of sizes based on typical mtu's, used only if next hop size not
729  * returned.
730  */
731 static int sctp_mtu_sizes[] = {
732 	68,
733 	296,
734 	508,
735 	512,
736 	544,
737 	576,
738 	1006,
739 	1492,
740 	1500,
741 	1536,
742 	2002,
743 	2048,
744 	4352,
745 	4464,
746 	8166,
747 	17914,
748 	32000,
749 	65535
750 };
751 
752 void
753 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
754 {
755 	struct sctp_association *asoc;
756 	struct sctp_nets *net;
757 
758 	asoc = &stcb->asoc;
759 
760 	(void)SCTP_OS_TIMER_STOP(&asoc->hb_timer.timer);
761 	(void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
762 	(void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
763 	(void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
764 	(void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
765 	(void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
766 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
767 		(void)SCTP_OS_TIMER_STOP(&net->fr_timer.timer);
768 		(void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
769 	}
770 }
771 
772 int
773 find_next_best_mtu(int totsz)
774 {
775 	int i, perfer;
776 
777 	/*
778 	 * if we are in here we must find the next best fit based on the
779 	 * size of the dg that failed to be sent.
780 	 */
781 	perfer = 0;
782 	for (i = 0; i < NUMBER_OF_MTU_SIZES; i++) {
783 		if (totsz < sctp_mtu_sizes[i]) {
784 			perfer = i - 1;
785 			if (perfer < 0)
786 				perfer = 0;
787 			break;
788 		}
789 	}
790 	return (sctp_mtu_sizes[perfer]);
791 }
792 
793 void
794 sctp_fill_random_store(struct sctp_pcb *m)
795 {
796 	/*
797 	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
798 	 * our counter. The result becomes our good random numbers and we
799 	 * then setup to give these out. Note that we do no locking to
800 	 * protect this. This is ok, since if competing folks call this we
801 	 * will get more gobbled gook in the random store which is what we
802 	 * want. There is a danger that two guys will use the same random
803 	 * numbers, but thats ok too since that is random as well :->
804 	 */
805 	m->store_at = 0;
806 	(void)sctp_hmac(SCTP_HMAC, (uint8_t *) m->random_numbers,
807 	    sizeof(m->random_numbers), (uint8_t *) & m->random_counter,
808 	    sizeof(m->random_counter), (uint8_t *) m->random_store);
809 	m->random_counter++;
810 }
811 
812 uint32_t
813 sctp_select_initial_TSN(struct sctp_pcb *inp)
814 {
815 	/*
816 	 * A true implementation should use random selection process to get
817 	 * the initial stream sequence number, using RFC1750 as a good
818 	 * guideline
819 	 */
820 	uint32_t x, *xp;
821 	uint8_t *p;
822 	int store_at, new_store;
823 
824 	if (inp->initial_sequence_debug != 0) {
825 		uint32_t ret;
826 
827 		ret = inp->initial_sequence_debug;
828 		inp->initial_sequence_debug++;
829 		return (ret);
830 	}
831 retry:
832 	store_at = inp->store_at;
833 	new_store = store_at + sizeof(uint32_t);
834 	if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
835 		new_store = 0;
836 	}
837 	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
838 		goto retry;
839 	}
840 	if (new_store == 0) {
841 		/* Refill the random store */
842 		sctp_fill_random_store(inp);
843 	}
844 	p = &inp->random_store[store_at];
845 	xp = (uint32_t *) p;
846 	x = *xp;
847 	return (x);
848 }
849 
850 uint32_t
851 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int save_in_twait)
852 {
853 	u_long x, not_done;
854 	struct timeval now;
855 
856 	(void)SCTP_GETTIME_TIMEVAL(&now);
857 	not_done = 1;
858 	while (not_done) {
859 		x = sctp_select_initial_TSN(&inp->sctp_ep);
860 		if (x == 0) {
861 			/* we never use 0 */
862 			continue;
863 		}
864 		if (sctp_is_vtag_good(inp, x, lport, rport, &now, save_in_twait)) {
865 			not_done = 0;
866 		}
867 	}
868 	return (x);
869 }
870 
871 int
872 sctp_init_asoc(struct sctp_inpcb *m, struct sctp_tcb *stcb,
873     int for_a_init, uint32_t override_tag, uint32_t vrf_id)
874 {
875 	struct sctp_association *asoc;
876 
877 	/*
878 	 * Anything set to zero is taken care of by the allocation routine's
879 	 * bzero
880 	 */
881 
882 	/*
883 	 * Up front select what scoping to apply on addresses I tell my peer
884 	 * Not sure what to do with these right now, we will need to come up
885 	 * with a way to set them. We may need to pass them through from the
886 	 * caller in the sctp_aloc_assoc() function.
887 	 */
888 	int i;
889 
890 	asoc = &stcb->asoc;
891 	/* init all variables to a known value. */
892 	SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE);
893 	asoc->max_burst = m->sctp_ep.max_burst;
894 	asoc->heart_beat_delay = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
895 	asoc->cookie_life = m->sctp_ep.def_cookie_life;
896 	asoc->sctp_cmt_on_off = (uint8_t) SCTP_BASE_SYSCTL(sctp_cmt_on_off);
897 	/* EY Init nr_sack variable */
898 	asoc->sctp_nr_sack_on_off = (uint8_t) SCTP_BASE_SYSCTL(sctp_nr_sack_on_off);
899 	/* JRS 5/21/07 - Init CMT PF variables */
900 	asoc->sctp_cmt_pf = (uint8_t) SCTP_BASE_SYSCTL(sctp_cmt_pf);
901 	asoc->sctp_frag_point = m->sctp_frag_point;
902 #ifdef INET
903 	asoc->default_tos = m->ip_inp.inp.inp_ip_tos;
904 #else
905 	asoc->default_tos = 0;
906 #endif
907 
908 #ifdef INET6
909 	asoc->default_flowlabel = ((struct in6pcb *)m)->in6p_flowinfo;
910 #else
911 	asoc->default_flowlabel = 0;
912 #endif
913 	asoc->sb_send_resv = 0;
914 	if (override_tag) {
915 #ifdef MICHAELS_EXPERIMENT
916 		if (sctp_is_in_timewait(override_tag, stcb->sctp_ep->sctp_lport, stcb->rport)) {
917 			/*
918 			 * It must be in the time-wait hash, we put it there
919 			 * when we aloc one. If not the peer is playing
920 			 * games.
921 			 */
922 			asoc->my_vtag = override_tag;
923 		} else {
924 			SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
925 #ifdef INVARIANTS
926 			panic("Huh is_in_timewait fails");
927 #endif
928 			return (ENOMEM);
929 		}
930 #else
931 		asoc->my_vtag = override_tag;
932 #endif
933 	} else {
934 		asoc->my_vtag = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
935 	}
936 	/* Get the nonce tags */
937 	asoc->my_vtag_nonce = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
938 	asoc->peer_vtag_nonce = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
939 	asoc->vrf_id = vrf_id;
940 
941 	if (sctp_is_feature_on(m, SCTP_PCB_FLAGS_DONOT_HEARTBEAT))
942 		asoc->hb_is_disabled = 1;
943 	else
944 		asoc->hb_is_disabled = 0;
945 
946 #ifdef SCTP_ASOCLOG_OF_TSNS
947 	asoc->tsn_in_at = 0;
948 	asoc->tsn_out_at = 0;
949 	asoc->tsn_in_wrapped = 0;
950 	asoc->tsn_out_wrapped = 0;
951 	asoc->cumack_log_at = 0;
952 	asoc->cumack_log_atsnt = 0;
953 #endif
954 #ifdef SCTP_FS_SPEC_LOG
955 	asoc->fs_index = 0;
956 #endif
957 	asoc->refcnt = 0;
958 	asoc->assoc_up_sent = 0;
959 	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
960 	    sctp_select_initial_TSN(&m->sctp_ep);
961 	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
962 	/* we are optimisitic here */
963 	asoc->peer_supports_pktdrop = 1;
964 	asoc->peer_supports_nat = 0;
965 	asoc->sent_queue_retran_cnt = 0;
966 
967 	/* for CMT */
968 	asoc->last_net_data_came_from = NULL;
969 
970 	/* This will need to be adjusted */
971 	asoc->last_cwr_tsn = asoc->init_seq_number - 1;
972 	asoc->last_acked_seq = asoc->init_seq_number - 1;
973 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
974 	asoc->asconf_seq_in = asoc->last_acked_seq;
975 
976 	/* here we are different, we hold the next one we expect */
977 	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
978 
979 	asoc->initial_init_rto_max = m->sctp_ep.initial_init_rto_max;
980 	asoc->initial_rto = m->sctp_ep.initial_rto;
981 
982 	asoc->max_init_times = m->sctp_ep.max_init_times;
983 	asoc->max_send_times = m->sctp_ep.max_send_times;
984 	asoc->def_net_failure = m->sctp_ep.def_net_failure;
985 	asoc->free_chunk_cnt = 0;
986 
987 	asoc->iam_blocking = 0;
988 	/* ECN Nonce initialization */
989 	asoc->context = m->sctp_context;
990 	asoc->def_send = m->def_send;
991 	asoc->ecn_nonce_allowed = 0;
992 	asoc->receiver_nonce_sum = 1;
993 	asoc->nonce_sum_expect_base = 1;
994 	asoc->nonce_sum_check = 1;
995 	asoc->nonce_resync_tsn = 0;
996 	asoc->nonce_wait_for_ecne = 0;
997 	asoc->nonce_wait_tsn = 0;
998 	asoc->delayed_ack = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
999 	asoc->sack_freq = m->sctp_ep.sctp_sack_freq;
1000 	asoc->pr_sctp_cnt = 0;
1001 	asoc->total_output_queue_size = 0;
1002 
1003 	if (m->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1004 		struct in6pcb *inp6;
1005 
1006 		/* Its a V6 socket */
1007 		inp6 = (struct in6pcb *)m;
1008 		asoc->ipv6_addr_legal = 1;
1009 		/* Now look at the binding flag to see if V4 will be legal */
1010 		if (SCTP_IPV6_V6ONLY(inp6) == 0) {
1011 			asoc->ipv4_addr_legal = 1;
1012 		} else {
1013 			/* V4 addresses are NOT legal on the association */
1014 			asoc->ipv4_addr_legal = 0;
1015 		}
1016 	} else {
1017 		/* Its a V4 socket, no - V6 */
1018 		asoc->ipv4_addr_legal = 1;
1019 		asoc->ipv6_addr_legal = 0;
1020 	}
1021 
1022 	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(m->sctp_socket), SCTP_MINIMAL_RWND);
1023 	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(m->sctp_socket);
1024 
1025 	asoc->smallest_mtu = m->sctp_frag_point;
1026 #ifdef SCTP_PRINT_FOR_B_AND_M
1027 	SCTP_PRINTF("smallest_mtu init'd with asoc to :%d\n",
1028 	    asoc->smallest_mtu);
1029 #endif
1030 	asoc->minrto = m->sctp_ep.sctp_minrto;
1031 	asoc->maxrto = m->sctp_ep.sctp_maxrto;
1032 
1033 	asoc->locked_on_sending = NULL;
1034 	asoc->stream_locked_on = 0;
1035 	asoc->ecn_echo_cnt_onq = 0;
1036 	asoc->stream_locked = 0;
1037 
1038 	asoc->send_sack = 1;
1039 
1040 	LIST_INIT(&asoc->sctp_restricted_addrs);
1041 
1042 	TAILQ_INIT(&asoc->nets);
1043 	TAILQ_INIT(&asoc->pending_reply_queue);
1044 	TAILQ_INIT(&asoc->asconf_ack_sent);
1045 	/* Setup to fill the hb random cache at first HB */
1046 	asoc->hb_random_idx = 4;
1047 
1048 	asoc->sctp_autoclose_ticks = m->sctp_ep.auto_close_time;
1049 
1050 	/*
1051 	 * JRS - Pick the default congestion control module based on the
1052 	 * sysctl.
1053 	 */
1054 	switch (m->sctp_ep.sctp_default_cc_module) {
1055 		/* JRS - Standard TCP congestion control */
1056 	case SCTP_CC_RFC2581:
1057 		{
1058 			stcb->asoc.congestion_control_module = SCTP_CC_RFC2581;
1059 			stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param;
1060 			stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_cwnd_update_after_sack;
1061 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_cwnd_update_after_fr;
1062 			stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout;
1063 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo;
1064 			stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1065 			stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1066 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer;
1067 			break;
1068 		}
1069 		/* JRS - High Speed TCP congestion control (Floyd) */
1070 	case SCTP_CC_HSTCP:
1071 		{
1072 			stcb->asoc.congestion_control_module = SCTP_CC_HSTCP;
1073 			stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param;
1074 			stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_hs_cwnd_update_after_sack;
1075 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_hs_cwnd_update_after_fr;
1076 			stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout;
1077 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo;
1078 			stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1079 			stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1080 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer;
1081 			break;
1082 		}
1083 		/* JRS - HTCP congestion control */
1084 	case SCTP_CC_HTCP:
1085 		{
1086 			stcb->asoc.congestion_control_module = SCTP_CC_HTCP;
1087 			stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_htcp_set_initial_cc_param;
1088 			stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_htcp_cwnd_update_after_sack;
1089 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_htcp_cwnd_update_after_fr;
1090 			stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_htcp_cwnd_update_after_timeout;
1091 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_htcp_cwnd_update_after_ecn_echo;
1092 			stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1093 			stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1094 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_htcp_cwnd_update_after_fr_timer;
1095 			break;
1096 		}
1097 		/* JRS - By default, use RFC2581 */
1098 	default:
1099 		{
1100 			stcb->asoc.congestion_control_module = SCTP_CC_RFC2581;
1101 			stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param;
1102 			stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_cwnd_update_after_sack;
1103 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_cwnd_update_after_fr;
1104 			stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout;
1105 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo;
1106 			stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1107 			stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1108 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer;
1109 			break;
1110 		}
1111 	}
1112 
1113 	/*
1114 	 * Now the stream parameters, here we allocate space for all streams
1115 	 * that we request by default.
1116 	 */
1117 	asoc->streamoutcnt = asoc->pre_open_streams =
1118 	    m->sctp_ep.pre_open_stream_count;
1119 	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1120 	    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1121 	    SCTP_M_STRMO);
1122 	if (asoc->strmout == NULL) {
1123 		/* big trouble no memory */
1124 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1125 		return (ENOMEM);
1126 	}
1127 	for (i = 0; i < asoc->streamoutcnt; i++) {
1128 		/*
1129 		 * inbound side must be set to 0xffff, also NOTE when we get
1130 		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1131 		 * count (streamoutcnt) but first check if we sent to any of
1132 		 * the upper streams that were dropped (if some were). Those
1133 		 * that were dropped must be notified to the upper layer as
1134 		 * failed to send.
1135 		 */
1136 		asoc->strmout[i].next_sequence_sent = 0x0;
1137 		TAILQ_INIT(&asoc->strmout[i].outqueue);
1138 		asoc->strmout[i].stream_no = i;
1139 		asoc->strmout[i].last_msg_incomplete = 0;
1140 		asoc->strmout[i].next_spoke.tqe_next = 0;
1141 		asoc->strmout[i].next_spoke.tqe_prev = 0;
1142 	}
1143 	/* Now the mapping array */
1144 	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1145 	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1146 	    SCTP_M_MAP);
1147 	if (asoc->mapping_array == NULL) {
1148 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1149 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1150 		return (ENOMEM);
1151 	}
1152 	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1153 	/* EY  - initialize the nr_mapping_array just like mapping array */
1154 	asoc->nr_mapping_array_size = SCTP_INITIAL_NR_MAPPING_ARRAY;
1155 	SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->nr_mapping_array_size,
1156 	    SCTP_M_MAP);
1157 	/*
1158 	 * if (asoc->nr_mapping_array == NULL) { SCTP_FREE(asoc->strmout,
1159 	 * SCTP_M_STRMO); SCTP_LTRACE_ERR_RET(NULL, stcb, NULL,
1160 	 * SCTP_FROM_SCTPUTIL, ENOMEM); return (ENOMEM); }
1161 	 */
1162 	memset(asoc->nr_mapping_array, 0, asoc->nr_mapping_array_size);
1163 
1164 	/* Now the init of the other outqueues */
1165 	TAILQ_INIT(&asoc->free_chunks);
1166 	TAILQ_INIT(&asoc->out_wheel);
1167 	TAILQ_INIT(&asoc->control_send_queue);
1168 	TAILQ_INIT(&asoc->asconf_send_queue);
1169 	TAILQ_INIT(&asoc->send_queue);
1170 	TAILQ_INIT(&asoc->sent_queue);
1171 	TAILQ_INIT(&asoc->reasmqueue);
1172 	TAILQ_INIT(&asoc->resetHead);
1173 	asoc->max_inbound_streams = m->sctp_ep.max_open_streams_intome;
1174 	TAILQ_INIT(&asoc->asconf_queue);
1175 	/* authentication fields */
1176 	asoc->authinfo.random = NULL;
1177 	asoc->authinfo.active_keyid = 0;
1178 	asoc->authinfo.assoc_key = NULL;
1179 	asoc->authinfo.assoc_keyid = 0;
1180 	asoc->authinfo.recv_key = NULL;
1181 	asoc->authinfo.recv_keyid = 0;
1182 	LIST_INIT(&asoc->shared_keys);
1183 	asoc->marked_retrans = 0;
1184 	asoc->timoinit = 0;
1185 	asoc->timodata = 0;
1186 	asoc->timosack = 0;
1187 	asoc->timoshutdown = 0;
1188 	asoc->timoheartbeat = 0;
1189 	asoc->timocookie = 0;
1190 	asoc->timoshutdownack = 0;
1191 	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1192 	asoc->discontinuity_time = asoc->start_time;
1193 	/*
1194 	 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1195 	 * freed later whe the association is freed.
1196 	 */
1197 	return (0);
1198 }
1199 
1200 int
1201 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1202 {
1203 	/* mapping array needs to grow */
1204 	uint8_t *new_array;
1205 	uint32_t new_size;
1206 
1207 	new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1208 	SCTP_MALLOC(new_array, uint8_t *, new_size, SCTP_M_MAP);
1209 	if (new_array == NULL) {
1210 		/* can't get more, forget it */
1211 		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n",
1212 		    new_size);
1213 		return (-1);
1214 	}
1215 	memset(new_array, 0, new_size);
1216 	memcpy(new_array, asoc->mapping_array, asoc->mapping_array_size);
1217 	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1218 	asoc->mapping_array = new_array;
1219 	asoc->mapping_array_size = new_size;
1220 	return (0);
1221 }
1222 
1223 /* EY - nr_sack version of the above method */
1224 int
1225 sctp_expand_nr_mapping_array(struct sctp_association *asoc, uint32_t needed)
1226 {
1227 	/* nr mapping array needs to grow */
1228 	uint8_t *new_array;
1229 	uint32_t new_size;
1230 
1231 	new_size = asoc->nr_mapping_array_size + ((needed + 7) / 8 + SCTP_NR_MAPPING_ARRAY_INCR);
1232 	SCTP_MALLOC(new_array, uint8_t *, new_size, SCTP_M_MAP);
1233 	if (new_array == NULL) {
1234 		/* can't get more, forget it */
1235 		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n",
1236 		    new_size);
1237 		return (-1);
1238 	}
1239 	memset(new_array, 0, new_size);
1240 	memcpy(new_array, asoc->nr_mapping_array, asoc->nr_mapping_array_size);
1241 	SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1242 	asoc->nr_mapping_array = new_array;
1243 	asoc->nr_mapping_array_size = new_size;
1244 	return (0);
1245 }
1246 
1247 #if defined(SCTP_USE_THREAD_BASED_ITERATOR)
1248 static void
1249 sctp_iterator_work(struct sctp_iterator *it)
1250 {
1251 	int iteration_count = 0;
1252 	int inp_skip = 0;
1253 
1254 	SCTP_ITERATOR_LOCK();
1255 	if (it->inp) {
1256 		SCTP_INP_DECR_REF(it->inp);
1257 	}
1258 	if (it->inp == NULL) {
1259 		/* iterator is complete */
1260 done_with_iterator:
1261 		SCTP_ITERATOR_UNLOCK();
1262 		if (it->function_atend != NULL) {
1263 			(*it->function_atend) (it->pointer, it->val);
1264 		}
1265 		SCTP_FREE(it, SCTP_M_ITER);
1266 		return;
1267 	}
1268 select_a_new_ep:
1269 	SCTP_INP_WLOCK(it->inp);
1270 	while (((it->pcb_flags) &&
1271 	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1272 	    ((it->pcb_features) &&
1273 	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1274 		/* endpoint flags or features don't match, so keep looking */
1275 		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1276 			SCTP_INP_WUNLOCK(it->inp);
1277 			goto done_with_iterator;
1278 		}
1279 		SCTP_INP_WUNLOCK(it->inp);
1280 		it->inp = LIST_NEXT(it->inp, sctp_list);
1281 		if (it->inp == NULL) {
1282 			goto done_with_iterator;
1283 		}
1284 		SCTP_INP_WLOCK(it->inp);
1285 	}
1286 
1287 	SCTP_INP_WUNLOCK(it->inp);
1288 	SCTP_INP_RLOCK(it->inp);
1289 
1290 	/* now go through each assoc which is in the desired state */
1291 	if (it->done_current_ep == 0) {
1292 		if (it->function_inp != NULL)
1293 			inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1294 		it->done_current_ep = 1;
1295 	}
1296 	if (it->stcb == NULL) {
1297 		/* run the per instance function */
1298 		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1299 	}
1300 	if ((inp_skip) || it->stcb == NULL) {
1301 		if (it->function_inp_end != NULL) {
1302 			inp_skip = (*it->function_inp_end) (it->inp,
1303 			    it->pointer,
1304 			    it->val);
1305 		}
1306 		SCTP_INP_RUNLOCK(it->inp);
1307 		goto no_stcb;
1308 	}
1309 	while (it->stcb) {
1310 		SCTP_TCB_LOCK(it->stcb);
1311 		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1312 			/* not in the right state... keep looking */
1313 			SCTP_TCB_UNLOCK(it->stcb);
1314 			goto next_assoc;
1315 		}
1316 		/* see if we have limited out the iterator loop */
1317 		iteration_count++;
1318 		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1319 			/* Pause to let others grab the lock */
1320 			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1321 			SCTP_TCB_UNLOCK(it->stcb);
1322 
1323 			SCTP_INP_INCR_REF(it->inp);
1324 			SCTP_INP_RUNLOCK(it->inp);
1325 			SCTP_ITERATOR_UNLOCK();
1326 			SCTP_ITERATOR_LOCK();
1327 			SCTP_INP_RLOCK(it->inp);
1328 
1329 			SCTP_INP_DECR_REF(it->inp);
1330 			SCTP_TCB_LOCK(it->stcb);
1331 			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1332 			iteration_count = 0;
1333 		}
1334 		/* run function on this one */
1335 		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1336 
1337 		/*
1338 		 * we lie here, it really needs to have its own type but
1339 		 * first I must verify that this won't effect things :-0
1340 		 */
1341 		if (it->no_chunk_output == 0)
1342 			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1343 
1344 		SCTP_TCB_UNLOCK(it->stcb);
1345 next_assoc:
1346 		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1347 		if (it->stcb == NULL) {
1348 			/* Run last function */
1349 			if (it->function_inp_end != NULL) {
1350 				inp_skip = (*it->function_inp_end) (it->inp,
1351 				    it->pointer,
1352 				    it->val);
1353 			}
1354 		}
1355 	}
1356 	SCTP_INP_RUNLOCK(it->inp);
1357 no_stcb:
1358 	/* done with all assocs on this endpoint, move on to next endpoint */
1359 	it->done_current_ep = 0;
1360 	SCTP_INP_WLOCK(it->inp);
1361 	SCTP_INP_WUNLOCK(it->inp);
1362 	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1363 		it->inp = NULL;
1364 	} else {
1365 		SCTP_INP_INFO_RLOCK();
1366 		it->inp = LIST_NEXT(it->inp, sctp_list);
1367 		SCTP_INP_INFO_RUNLOCK();
1368 	}
1369 	if (it->inp == NULL) {
1370 		goto done_with_iterator;
1371 	}
1372 	goto select_a_new_ep;
1373 }
1374 
1375 void
1376 sctp_iterator_worker(void)
1377 {
1378 	struct sctp_iterator *it = NULL;
1379 
1380 	/* This function is called with the WQ lock in place */
1381 
1382 	SCTP_BASE_INFO(iterator_running) = 1;
1383 again:
1384 	it = TAILQ_FIRST(&SCTP_BASE_INFO(iteratorhead));
1385 	while (it) {
1386 		/* now lets work on this one */
1387 		TAILQ_REMOVE(&SCTP_BASE_INFO(iteratorhead), it, sctp_nxt_itr);
1388 		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1389 		sctp_iterator_work(it);
1390 		SCTP_IPI_ITERATOR_WQ_LOCK();
1391 		/* sa_ignore FREED_MEMORY */
1392 		it = TAILQ_FIRST(&SCTP_BASE_INFO(iteratorhead));
1393 	}
1394 	if (TAILQ_FIRST(&SCTP_BASE_INFO(iteratorhead))) {
1395 		goto again;
1396 	}
1397 	SCTP_BASE_INFO(iterator_running) = 0;
1398 	return;
1399 }
1400 
1401 #endif
1402 
1403 
1404 static void
1405 sctp_handle_addr_wq(void)
1406 {
1407 	/* deal with the ADDR wq from the rtsock calls */
1408 	struct sctp_laddr *wi;
1409 	struct sctp_asconf_iterator *asc;
1410 
1411 	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1412 	    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1413 	if (asc == NULL) {
1414 		/* Try later, no memory */
1415 		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1416 		    (struct sctp_inpcb *)NULL,
1417 		    (struct sctp_tcb *)NULL,
1418 		    (struct sctp_nets *)NULL);
1419 		return;
1420 	}
1421 	LIST_INIT(&asc->list_of_work);
1422 	asc->cnt = 0;
1423 	SCTP_IPI_ITERATOR_WQ_LOCK();
1424 	wi = LIST_FIRST(&SCTP_BASE_INFO(addr_wq));
1425 	while (wi != NULL) {
1426 		LIST_REMOVE(wi, sctp_nxt_addr);
1427 		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1428 		asc->cnt++;
1429 		wi = LIST_FIRST(&SCTP_BASE_INFO(addr_wq));
1430 	}
1431 	SCTP_IPI_ITERATOR_WQ_UNLOCK();
1432 	if (asc->cnt == 0) {
1433 		SCTP_FREE(asc, SCTP_M_ASC_IT);
1434 	} else {
1435 		(void)sctp_initiate_iterator(sctp_asconf_iterator_ep,
1436 		    sctp_asconf_iterator_stcb,
1437 		    NULL,	/* No ep end for boundall */
1438 		    SCTP_PCB_FLAGS_BOUNDALL,
1439 		    SCTP_PCB_ANY_FEATURES,
1440 		    SCTP_ASOC_ANY_STATE,
1441 		    (void *)asc, 0,
1442 		    sctp_asconf_iterator_end, NULL, 0);
1443 	}
1444 }
1445 
1446 int retcode = 0;
1447 int cur_oerr = 0;
1448 
1449 void
1450 sctp_timeout_handler(void *t)
1451 {
1452 	struct sctp_inpcb *inp;
1453 	struct sctp_tcb *stcb;
1454 	struct sctp_nets *net;
1455 	struct sctp_timer *tmr;
1456 
1457 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1458 	struct socket *so;
1459 
1460 #endif
1461 	int did_output, type;
1462 	struct sctp_iterator *it = NULL;
1463 
1464 	tmr = (struct sctp_timer *)t;
1465 	inp = (struct sctp_inpcb *)tmr->ep;
1466 	stcb = (struct sctp_tcb *)tmr->tcb;
1467 	net = (struct sctp_nets *)tmr->net;
1468 	did_output = 1;
1469 
1470 #ifdef SCTP_AUDITING_ENABLED
1471 	sctp_audit_log(0xF0, (uint8_t) tmr->type);
1472 	sctp_auditing(3, inp, stcb, net);
1473 #endif
1474 
1475 	/* sanity checks... */
1476 	if (tmr->self != (void *)tmr) {
1477 		/*
1478 		 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1479 		 * tmr);
1480 		 */
1481 		return;
1482 	}
1483 	tmr->stopped_from = 0xa001;
1484 	if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1485 		/*
1486 		 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1487 		 * tmr->type);
1488 		 */
1489 		return;
1490 	}
1491 	tmr->stopped_from = 0xa002;
1492 	if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1493 		return;
1494 	}
1495 	/* if this is an iterator timeout, get the struct and clear inp */
1496 	tmr->stopped_from = 0xa003;
1497 	if (tmr->type == SCTP_TIMER_TYPE_ITERATOR) {
1498 		it = (struct sctp_iterator *)inp;
1499 		inp = NULL;
1500 	}
1501 	type = tmr->type;
1502 	if (inp) {
1503 		SCTP_INP_INCR_REF(inp);
1504 		if ((inp->sctp_socket == 0) &&
1505 		    ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1506 		    (tmr->type != SCTP_TIMER_TYPE_SEND) &&
1507 		    (tmr->type != SCTP_TIMER_TYPE_RECV) &&
1508 		    (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) &&
1509 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1510 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1511 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1512 		    (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))
1513 		    ) {
1514 			SCTP_INP_DECR_REF(inp);
1515 			return;
1516 		}
1517 	}
1518 	tmr->stopped_from = 0xa004;
1519 	if (stcb) {
1520 		atomic_add_int(&stcb->asoc.refcnt, 1);
1521 		if (stcb->asoc.state == 0) {
1522 			atomic_add_int(&stcb->asoc.refcnt, -1);
1523 			if (inp) {
1524 				SCTP_INP_DECR_REF(inp);
1525 			}
1526 			return;
1527 		}
1528 	}
1529 	tmr->stopped_from = 0xa005;
1530 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", tmr->type);
1531 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1532 		if (inp) {
1533 			SCTP_INP_DECR_REF(inp);
1534 		}
1535 		if (stcb) {
1536 			atomic_add_int(&stcb->asoc.refcnt, -1);
1537 		}
1538 		return;
1539 	}
1540 	tmr->stopped_from = 0xa006;
1541 
1542 	if (stcb) {
1543 		SCTP_TCB_LOCK(stcb);
1544 		atomic_add_int(&stcb->asoc.refcnt, -1);
1545 		if ((tmr->type != SCTP_TIMER_TYPE_ASOCKILL) &&
1546 		    ((stcb->asoc.state == 0) ||
1547 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1548 			SCTP_TCB_UNLOCK(stcb);
1549 			if (inp) {
1550 				SCTP_INP_DECR_REF(inp);
1551 			}
1552 			return;
1553 		}
1554 	}
1555 	/* record in stopped what t-o occured */
1556 	tmr->stopped_from = tmr->type;
1557 
1558 	/* mark as being serviced now */
1559 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1560 		/*
1561 		 * Callout has been rescheduled.
1562 		 */
1563 		goto get_out;
1564 	}
1565 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1566 		/*
1567 		 * Not active, so no action.
1568 		 */
1569 		goto get_out;
1570 	}
1571 	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1572 
1573 	/* call the handler for the appropriate timer type */
1574 	switch (tmr->type) {
1575 	case SCTP_TIMER_TYPE_ZERO_COPY:
1576 		if (inp == NULL) {
1577 			break;
1578 		}
1579 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1580 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
1581 		}
1582 		break;
1583 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1584 		if (inp == NULL) {
1585 			break;
1586 		}
1587 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1588 			SCTP_ZERO_COPY_SENDQ_EVENT(inp, inp->sctp_socket);
1589 		}
1590 		break;
1591 	case SCTP_TIMER_TYPE_ADDR_WQ:
1592 		sctp_handle_addr_wq();
1593 		break;
1594 	case SCTP_TIMER_TYPE_ITERATOR:
1595 		SCTP_STAT_INCR(sctps_timoiterator);
1596 		sctp_iterator_timer(it);
1597 		break;
1598 	case SCTP_TIMER_TYPE_SEND:
1599 		if ((stcb == NULL) || (inp == NULL)) {
1600 			break;
1601 		}
1602 		SCTP_STAT_INCR(sctps_timodata);
1603 		stcb->asoc.timodata++;
1604 		stcb->asoc.num_send_timers_up--;
1605 		if (stcb->asoc.num_send_timers_up < 0) {
1606 			stcb->asoc.num_send_timers_up = 0;
1607 		}
1608 		SCTP_TCB_LOCK_ASSERT(stcb);
1609 		cur_oerr = stcb->asoc.overall_error_count;
1610 		retcode = sctp_t3rxt_timer(inp, stcb, net);
1611 		if (retcode) {
1612 			/* no need to unlock on tcb its gone */
1613 
1614 			goto out_decr;
1615 		}
1616 		SCTP_TCB_LOCK_ASSERT(stcb);
1617 #ifdef SCTP_AUDITING_ENABLED
1618 		sctp_auditing(4, inp, stcb, net);
1619 #endif
1620 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1621 		if ((stcb->asoc.num_send_timers_up == 0) &&
1622 		    (stcb->asoc.sent_queue_cnt > 0)
1623 		    ) {
1624 			struct sctp_tmit_chunk *chk;
1625 
1626 			/*
1627 			 * safeguard. If there on some on the sent queue
1628 			 * somewhere but no timers running something is
1629 			 * wrong... so we start a timer on the first chunk
1630 			 * on the send queue on whatever net it is sent to.
1631 			 */
1632 			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1633 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1634 			    chk->whoTo);
1635 		}
1636 		break;
1637 	case SCTP_TIMER_TYPE_INIT:
1638 		if ((stcb == NULL) || (inp == NULL)) {
1639 			break;
1640 		}
1641 		SCTP_STAT_INCR(sctps_timoinit);
1642 		stcb->asoc.timoinit++;
1643 		if (sctp_t1init_timer(inp, stcb, net)) {
1644 			/* no need to unlock on tcb its gone */
1645 			goto out_decr;
1646 		}
1647 		/* We do output but not here */
1648 		did_output = 0;
1649 		break;
1650 	case SCTP_TIMER_TYPE_RECV:
1651 		if ((stcb == NULL) || (inp == NULL)) {
1652 			break;
1653 		} {
1654 			int abort_flag;
1655 
1656 			SCTP_STAT_INCR(sctps_timosack);
1657 			stcb->asoc.timosack++;
1658 			if (stcb->asoc.cumulative_tsn != stcb->asoc.highest_tsn_inside_map)
1659 				sctp_sack_check(stcb, 0, 0, &abort_flag);
1660 
1661 			/*
1662 			 * EY if nr_sacks used then send an nr-sack , a sack
1663 			 * otherwise
1664 			 */
1665 			if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && stcb->asoc.peer_supports_nr_sack)
1666 				sctp_send_nr_sack(stcb);
1667 			else
1668 				sctp_send_sack(stcb);
1669 		}
1670 #ifdef SCTP_AUDITING_ENABLED
1671 		sctp_auditing(4, inp, stcb, net);
1672 #endif
1673 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1674 		break;
1675 	case SCTP_TIMER_TYPE_SHUTDOWN:
1676 		if ((stcb == NULL) || (inp == NULL)) {
1677 			break;
1678 		}
1679 		if (sctp_shutdown_timer(inp, stcb, net)) {
1680 			/* no need to unlock on tcb its gone */
1681 			goto out_decr;
1682 		}
1683 		SCTP_STAT_INCR(sctps_timoshutdown);
1684 		stcb->asoc.timoshutdown++;
1685 #ifdef SCTP_AUDITING_ENABLED
1686 		sctp_auditing(4, inp, stcb, net);
1687 #endif
1688 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1689 		break;
1690 	case SCTP_TIMER_TYPE_HEARTBEAT:
1691 		{
1692 			struct sctp_nets *lnet;
1693 			int cnt_of_unconf = 0;
1694 
1695 			if ((stcb == NULL) || (inp == NULL)) {
1696 				break;
1697 			}
1698 			SCTP_STAT_INCR(sctps_timoheartbeat);
1699 			stcb->asoc.timoheartbeat++;
1700 			TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
1701 				if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1702 				    (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
1703 					cnt_of_unconf++;
1704 				}
1705 			}
1706 			if (cnt_of_unconf == 0) {
1707 				if (sctp_heartbeat_timer(inp, stcb, lnet,
1708 				    cnt_of_unconf)) {
1709 					/* no need to unlock on tcb its gone */
1710 					goto out_decr;
1711 				}
1712 			}
1713 #ifdef SCTP_AUDITING_ENABLED
1714 			sctp_auditing(4, inp, stcb, lnet);
1715 #endif
1716 			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT,
1717 			    stcb->sctp_ep, stcb, lnet);
1718 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1719 		}
1720 		break;
1721 	case SCTP_TIMER_TYPE_COOKIE:
1722 		if ((stcb == NULL) || (inp == NULL)) {
1723 			break;
1724 		}
1725 		if (sctp_cookie_timer(inp, stcb, net)) {
1726 			/* no need to unlock on tcb its gone */
1727 			goto out_decr;
1728 		}
1729 		SCTP_STAT_INCR(sctps_timocookie);
1730 		stcb->asoc.timocookie++;
1731 #ifdef SCTP_AUDITING_ENABLED
1732 		sctp_auditing(4, inp, stcb, net);
1733 #endif
1734 		/*
1735 		 * We consider T3 and Cookie timer pretty much the same with
1736 		 * respect to where from in chunk_output.
1737 		 */
1738 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1739 		break;
1740 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1741 		{
1742 			struct timeval tv;
1743 			int i, secret;
1744 
1745 			if (inp == NULL) {
1746 				break;
1747 			}
1748 			SCTP_STAT_INCR(sctps_timosecret);
1749 			(void)SCTP_GETTIME_TIMEVAL(&tv);
1750 			SCTP_INP_WLOCK(inp);
1751 			inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1752 			inp->sctp_ep.last_secret_number =
1753 			    inp->sctp_ep.current_secret_number;
1754 			inp->sctp_ep.current_secret_number++;
1755 			if (inp->sctp_ep.current_secret_number >=
1756 			    SCTP_HOW_MANY_SECRETS) {
1757 				inp->sctp_ep.current_secret_number = 0;
1758 			}
1759 			secret = (int)inp->sctp_ep.current_secret_number;
1760 			for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1761 				inp->sctp_ep.secret_key[secret][i] =
1762 				    sctp_select_initial_TSN(&inp->sctp_ep);
1763 			}
1764 			SCTP_INP_WUNLOCK(inp);
1765 			sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1766 		}
1767 		did_output = 0;
1768 		break;
1769 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1770 		if ((stcb == NULL) || (inp == NULL)) {
1771 			break;
1772 		}
1773 		SCTP_STAT_INCR(sctps_timopathmtu);
1774 		sctp_pathmtu_timer(inp, stcb, net);
1775 		did_output = 0;
1776 		break;
1777 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1778 		if ((stcb == NULL) || (inp == NULL)) {
1779 			break;
1780 		}
1781 		if (sctp_shutdownack_timer(inp, stcb, net)) {
1782 			/* no need to unlock on tcb its gone */
1783 			goto out_decr;
1784 		}
1785 		SCTP_STAT_INCR(sctps_timoshutdownack);
1786 		stcb->asoc.timoshutdownack++;
1787 #ifdef SCTP_AUDITING_ENABLED
1788 		sctp_auditing(4, inp, stcb, net);
1789 #endif
1790 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1791 		break;
1792 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1793 		if ((stcb == NULL) || (inp == NULL)) {
1794 			break;
1795 		}
1796 		SCTP_STAT_INCR(sctps_timoshutdownguard);
1797 		sctp_abort_an_association(inp, stcb,
1798 		    SCTP_SHUTDOWN_GUARD_EXPIRES, NULL, SCTP_SO_NOT_LOCKED);
1799 		/* no need to unlock on tcb its gone */
1800 		goto out_decr;
1801 
1802 	case SCTP_TIMER_TYPE_STRRESET:
1803 		if ((stcb == NULL) || (inp == NULL)) {
1804 			break;
1805 		}
1806 		if (sctp_strreset_timer(inp, stcb, net)) {
1807 			/* no need to unlock on tcb its gone */
1808 			goto out_decr;
1809 		}
1810 		SCTP_STAT_INCR(sctps_timostrmrst);
1811 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1812 		break;
1813 	case SCTP_TIMER_TYPE_EARLYFR:
1814 		/* Need to do FR of things for net */
1815 		if ((stcb == NULL) || (inp == NULL)) {
1816 			break;
1817 		}
1818 		SCTP_STAT_INCR(sctps_timoearlyfr);
1819 		sctp_early_fr_timer(inp, stcb, net);
1820 		break;
1821 	case SCTP_TIMER_TYPE_ASCONF:
1822 		if ((stcb == NULL) || (inp == NULL)) {
1823 			break;
1824 		}
1825 		if (sctp_asconf_timer(inp, stcb, net)) {
1826 			/* no need to unlock on tcb its gone */
1827 			goto out_decr;
1828 		}
1829 		SCTP_STAT_INCR(sctps_timoasconf);
1830 #ifdef SCTP_AUDITING_ENABLED
1831 		sctp_auditing(4, inp, stcb, net);
1832 #endif
1833 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1834 		break;
1835 	case SCTP_TIMER_TYPE_PRIM_DELETED:
1836 		if ((stcb == NULL) || (inp == NULL)) {
1837 			break;
1838 		}
1839 		sctp_delete_prim_timer(inp, stcb, net);
1840 		SCTP_STAT_INCR(sctps_timodelprim);
1841 		break;
1842 
1843 	case SCTP_TIMER_TYPE_AUTOCLOSE:
1844 		if ((stcb == NULL) || (inp == NULL)) {
1845 			break;
1846 		}
1847 		SCTP_STAT_INCR(sctps_timoautoclose);
1848 		sctp_autoclose_timer(inp, stcb, net);
1849 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1850 		did_output = 0;
1851 		break;
1852 	case SCTP_TIMER_TYPE_ASOCKILL:
1853 		if ((stcb == NULL) || (inp == NULL)) {
1854 			break;
1855 		}
1856 		SCTP_STAT_INCR(sctps_timoassockill);
1857 		/* Can we free it yet? */
1858 		SCTP_INP_DECR_REF(inp);
1859 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1860 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1861 		so = SCTP_INP_SO(inp);
1862 		atomic_add_int(&stcb->asoc.refcnt, 1);
1863 		SCTP_TCB_UNLOCK(stcb);
1864 		SCTP_SOCKET_LOCK(so, 1);
1865 		SCTP_TCB_LOCK(stcb);
1866 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1867 #endif
1868 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1869 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1870 		SCTP_SOCKET_UNLOCK(so, 1);
1871 #endif
1872 		/*
1873 		 * free asoc, always unlocks (or destroy's) so prevent
1874 		 * duplicate unlock or unlock of a free mtx :-0
1875 		 */
1876 		stcb = NULL;
1877 		goto out_no_decr;
1878 	case SCTP_TIMER_TYPE_INPKILL:
1879 		SCTP_STAT_INCR(sctps_timoinpkill);
1880 		if (inp == NULL) {
1881 			break;
1882 		}
1883 		/*
1884 		 * special case, take away our increment since WE are the
1885 		 * killer
1886 		 */
1887 		SCTP_INP_DECR_REF(inp);
1888 		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1889 		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1890 		    SCTP_CALLED_DIRECTLY_NOCMPSET);
1891 		inp = NULL;
1892 		goto out_no_decr;
1893 	default:
1894 		SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
1895 		    tmr->type);
1896 		break;
1897 	};
1898 #ifdef SCTP_AUDITING_ENABLED
1899 	sctp_audit_log(0xF1, (uint8_t) tmr->type);
1900 	if (inp)
1901 		sctp_auditing(5, inp, stcb, net);
1902 #endif
1903 	if ((did_output) && stcb) {
1904 		/*
1905 		 * Now we need to clean up the control chunk chain if an
1906 		 * ECNE is on it. It must be marked as UNSENT again so next
1907 		 * call will continue to send it until such time that we get
1908 		 * a CWR, to remove it. It is, however, less likely that we
1909 		 * will find a ecn echo on the chain though.
1910 		 */
1911 		sctp_fix_ecn_echo(&stcb->asoc);
1912 	}
1913 get_out:
1914 	if (stcb) {
1915 		SCTP_TCB_UNLOCK(stcb);
1916 	}
1917 out_decr:
1918 	if (inp) {
1919 		SCTP_INP_DECR_REF(inp);
1920 	}
1921 out_no_decr:
1922 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type %d)\n",
1923 	    type);
1924 }
1925 
1926 void
1927 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1928     struct sctp_nets *net)
1929 {
1930 	int to_ticks;
1931 	struct sctp_timer *tmr;
1932 
1933 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
1934 		return;
1935 
1936 	to_ticks = 0;
1937 
1938 	tmr = NULL;
1939 	if (stcb) {
1940 		SCTP_TCB_LOCK_ASSERT(stcb);
1941 	}
1942 	switch (t_type) {
1943 	case SCTP_TIMER_TYPE_ZERO_COPY:
1944 		tmr = &inp->sctp_ep.zero_copy_timer;
1945 		to_ticks = SCTP_ZERO_COPY_TICK_DELAY;
1946 		break;
1947 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1948 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
1949 		to_ticks = SCTP_ZERO_COPY_SENDQ_TICK_DELAY;
1950 		break;
1951 	case SCTP_TIMER_TYPE_ADDR_WQ:
1952 		/* Only 1 tick away :-) */
1953 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
1954 		to_ticks = SCTP_ADDRESS_TICK_DELAY;
1955 		break;
1956 	case SCTP_TIMER_TYPE_ITERATOR:
1957 		{
1958 			struct sctp_iterator *it;
1959 
1960 			it = (struct sctp_iterator *)inp;
1961 			tmr = &it->tmr;
1962 			to_ticks = SCTP_ITERATOR_TICKS;
1963 		}
1964 		break;
1965 	case SCTP_TIMER_TYPE_SEND:
1966 		/* Here we use the RTO timer */
1967 		{
1968 			int rto_val;
1969 
1970 			if ((stcb == NULL) || (net == NULL)) {
1971 				return;
1972 			}
1973 			tmr = &net->rxt_timer;
1974 			if (net->RTO == 0) {
1975 				rto_val = stcb->asoc.initial_rto;
1976 			} else {
1977 				rto_val = net->RTO;
1978 			}
1979 			to_ticks = MSEC_TO_TICKS(rto_val);
1980 		}
1981 		break;
1982 	case SCTP_TIMER_TYPE_INIT:
1983 		/*
1984 		 * Here we use the INIT timer default usually about 1
1985 		 * minute.
1986 		 */
1987 		if ((stcb == NULL) || (net == NULL)) {
1988 			return;
1989 		}
1990 		tmr = &net->rxt_timer;
1991 		if (net->RTO == 0) {
1992 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1993 		} else {
1994 			to_ticks = MSEC_TO_TICKS(net->RTO);
1995 		}
1996 		break;
1997 	case SCTP_TIMER_TYPE_RECV:
1998 		/*
1999 		 * Here we use the Delayed-Ack timer value from the inp
2000 		 * ususually about 200ms.
2001 		 */
2002 		if (stcb == NULL) {
2003 			return;
2004 		}
2005 		tmr = &stcb->asoc.dack_timer;
2006 		to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
2007 		break;
2008 	case SCTP_TIMER_TYPE_SHUTDOWN:
2009 		/* Here we use the RTO of the destination. */
2010 		if ((stcb == NULL) || (net == NULL)) {
2011 			return;
2012 		}
2013 		if (net->RTO == 0) {
2014 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2015 		} else {
2016 			to_ticks = MSEC_TO_TICKS(net->RTO);
2017 		}
2018 		tmr = &net->rxt_timer;
2019 		break;
2020 	case SCTP_TIMER_TYPE_HEARTBEAT:
2021 		/*
2022 		 * the net is used here so that we can add in the RTO. Even
2023 		 * though we use a different timer. We also add the HB timer
2024 		 * PLUS a random jitter.
2025 		 */
2026 		if ((inp == NULL) || (stcb == NULL)) {
2027 			return;
2028 		} else {
2029 			uint32_t rndval;
2030 			uint8_t this_random;
2031 			int cnt_of_unconf = 0;
2032 			struct sctp_nets *lnet;
2033 
2034 			TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
2035 				if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2036 				    (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
2037 					cnt_of_unconf++;
2038 				}
2039 			}
2040 			if (cnt_of_unconf) {
2041 				net = lnet = NULL;
2042 				(void)sctp_heartbeat_timer(inp, stcb, lnet, cnt_of_unconf);
2043 			}
2044 			if (stcb->asoc.hb_random_idx > 3) {
2045 				rndval = sctp_select_initial_TSN(&inp->sctp_ep);
2046 				memcpy(stcb->asoc.hb_random_values, &rndval,
2047 				    sizeof(stcb->asoc.hb_random_values));
2048 				stcb->asoc.hb_random_idx = 0;
2049 			}
2050 			this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx];
2051 			stcb->asoc.hb_random_idx++;
2052 			stcb->asoc.hb_ect_randombit = 0;
2053 			/*
2054 			 * this_random will be 0 - 256 ms RTO is in ms.
2055 			 */
2056 			if ((stcb->asoc.hb_is_disabled) &&
2057 			    (cnt_of_unconf == 0)) {
2058 				return;
2059 			}
2060 			if (net) {
2061 				int delay;
2062 
2063 				delay = stcb->asoc.heart_beat_delay;
2064 				TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
2065 					if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2066 					    ((lnet->dest_state & SCTP_ADDR_OUT_OF_SCOPE) == 0) &&
2067 					    (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
2068 						delay = 0;
2069 					}
2070 				}
2071 				if (net->RTO == 0) {
2072 					/* Never been checked */
2073 					to_ticks = this_random + stcb->asoc.initial_rto + delay;
2074 				} else {
2075 					/* set rto_val to the ms */
2076 					to_ticks = delay + net->RTO + this_random;
2077 				}
2078 			} else {
2079 				if (cnt_of_unconf) {
2080 					to_ticks = this_random + stcb->asoc.initial_rto;
2081 				} else {
2082 					to_ticks = stcb->asoc.heart_beat_delay + this_random + stcb->asoc.initial_rto;
2083 				}
2084 			}
2085 			/*
2086 			 * Now we must convert the to_ticks that are now in
2087 			 * ms to ticks.
2088 			 */
2089 			to_ticks = MSEC_TO_TICKS(to_ticks);
2090 			tmr = &stcb->asoc.hb_timer;
2091 		}
2092 		break;
2093 	case SCTP_TIMER_TYPE_COOKIE:
2094 		/*
2095 		 * Here we can use the RTO timer from the network since one
2096 		 * RTT was compelete. If a retran happened then we will be
2097 		 * using the RTO initial value.
2098 		 */
2099 		if ((stcb == NULL) || (net == NULL)) {
2100 			return;
2101 		}
2102 		if (net->RTO == 0) {
2103 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2104 		} else {
2105 			to_ticks = MSEC_TO_TICKS(net->RTO);
2106 		}
2107 		tmr = &net->rxt_timer;
2108 		break;
2109 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2110 		/*
2111 		 * nothing needed but the endpoint here ususually about 60
2112 		 * minutes.
2113 		 */
2114 		if (inp == NULL) {
2115 			return;
2116 		}
2117 		tmr = &inp->sctp_ep.signature_change;
2118 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
2119 		break;
2120 	case SCTP_TIMER_TYPE_ASOCKILL:
2121 		if (stcb == NULL) {
2122 			return;
2123 		}
2124 		tmr = &stcb->asoc.strreset_timer;
2125 		to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
2126 		break;
2127 	case SCTP_TIMER_TYPE_INPKILL:
2128 		/*
2129 		 * The inp is setup to die. We re-use the signature_chage
2130 		 * timer since that has stopped and we are in the GONE
2131 		 * state.
2132 		 */
2133 		if (inp == NULL) {
2134 			return;
2135 		}
2136 		tmr = &inp->sctp_ep.signature_change;
2137 		to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2138 		break;
2139 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2140 		/*
2141 		 * Here we use the value found in the EP for PMTU ususually
2142 		 * about 10 minutes.
2143 		 */
2144 		if ((stcb == NULL) || (inp == NULL)) {
2145 			return;
2146 		}
2147 		if (net == NULL) {
2148 			return;
2149 		}
2150 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2151 		tmr = &net->pmtu_timer;
2152 		break;
2153 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2154 		/* Here we use the RTO of the destination */
2155 		if ((stcb == NULL) || (net == NULL)) {
2156 			return;
2157 		}
2158 		if (net->RTO == 0) {
2159 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2160 		} else {
2161 			to_ticks = MSEC_TO_TICKS(net->RTO);
2162 		}
2163 		tmr = &net->rxt_timer;
2164 		break;
2165 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2166 		/*
2167 		 * Here we use the endpoints shutdown guard timer usually
2168 		 * about 3 minutes.
2169 		 */
2170 		if ((inp == NULL) || (stcb == NULL)) {
2171 			return;
2172 		}
2173 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2174 		tmr = &stcb->asoc.shut_guard_timer;
2175 		break;
2176 	case SCTP_TIMER_TYPE_STRRESET:
2177 		/*
2178 		 * Here the timer comes from the stcb but its value is from
2179 		 * the net's RTO.
2180 		 */
2181 		if ((stcb == NULL) || (net == NULL)) {
2182 			return;
2183 		}
2184 		if (net->RTO == 0) {
2185 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2186 		} else {
2187 			to_ticks = MSEC_TO_TICKS(net->RTO);
2188 		}
2189 		tmr = &stcb->asoc.strreset_timer;
2190 		break;
2191 
2192 	case SCTP_TIMER_TYPE_EARLYFR:
2193 		{
2194 			unsigned int msec;
2195 
2196 			if ((stcb == NULL) || (net == NULL)) {
2197 				return;
2198 			}
2199 			if (net->flight_size > net->cwnd) {
2200 				/* no need to start */
2201 				return;
2202 			}
2203 			SCTP_STAT_INCR(sctps_earlyfrstart);
2204 			if (net->lastsa == 0) {
2205 				/* Hmm no rtt estimate yet? */
2206 				msec = stcb->asoc.initial_rto >> 2;
2207 			} else {
2208 				msec = ((net->lastsa >> 2) + net->lastsv) >> 1;
2209 			}
2210 			if (msec < SCTP_BASE_SYSCTL(sctp_early_fr_msec)) {
2211 				msec = SCTP_BASE_SYSCTL(sctp_early_fr_msec);
2212 				if (msec < SCTP_MINFR_MSEC_FLOOR) {
2213 					msec = SCTP_MINFR_MSEC_FLOOR;
2214 				}
2215 			}
2216 			to_ticks = MSEC_TO_TICKS(msec);
2217 			tmr = &net->fr_timer;
2218 		}
2219 		break;
2220 	case SCTP_TIMER_TYPE_ASCONF:
2221 		/*
2222 		 * Here the timer comes from the stcb but its value is from
2223 		 * the net's RTO.
2224 		 */
2225 		if ((stcb == NULL) || (net == NULL)) {
2226 			return;
2227 		}
2228 		if (net->RTO == 0) {
2229 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2230 		} else {
2231 			to_ticks = MSEC_TO_TICKS(net->RTO);
2232 		}
2233 		tmr = &stcb->asoc.asconf_timer;
2234 		break;
2235 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2236 		if ((stcb == NULL) || (net != NULL)) {
2237 			return;
2238 		}
2239 		to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2240 		tmr = &stcb->asoc.delete_prim_timer;
2241 		break;
2242 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2243 		if (stcb == NULL) {
2244 			return;
2245 		}
2246 		if (stcb->asoc.sctp_autoclose_ticks == 0) {
2247 			/*
2248 			 * Really an error since stcb is NOT set to
2249 			 * autoclose
2250 			 */
2251 			return;
2252 		}
2253 		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2254 		tmr = &stcb->asoc.autoclose_timer;
2255 		break;
2256 	default:
2257 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2258 		    __FUNCTION__, t_type);
2259 		return;
2260 		break;
2261 	};
2262 	if ((to_ticks <= 0) || (tmr == NULL)) {
2263 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2264 		    __FUNCTION__, t_type, to_ticks, tmr);
2265 		return;
2266 	}
2267 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2268 		/*
2269 		 * we do NOT allow you to have it already running. if it is
2270 		 * we leave the current one up unchanged
2271 		 */
2272 		return;
2273 	}
2274 	/* At this point we can proceed */
2275 	if (t_type == SCTP_TIMER_TYPE_SEND) {
2276 		stcb->asoc.num_send_timers_up++;
2277 	}
2278 	tmr->stopped_from = 0;
2279 	tmr->type = t_type;
2280 	tmr->ep = (void *)inp;
2281 	tmr->tcb = (void *)stcb;
2282 	tmr->net = (void *)net;
2283 	tmr->self = (void *)tmr;
2284 	tmr->ticks = sctp_get_tick_count();
2285 	(void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2286 	return;
2287 }
2288 
2289 void
2290 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2291     struct sctp_nets *net, uint32_t from)
2292 {
2293 	struct sctp_timer *tmr;
2294 
2295 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2296 	    (inp == NULL))
2297 		return;
2298 
2299 	tmr = NULL;
2300 	if (stcb) {
2301 		SCTP_TCB_LOCK_ASSERT(stcb);
2302 	}
2303 	switch (t_type) {
2304 	case SCTP_TIMER_TYPE_ZERO_COPY:
2305 		tmr = &inp->sctp_ep.zero_copy_timer;
2306 		break;
2307 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
2308 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
2309 		break;
2310 	case SCTP_TIMER_TYPE_ADDR_WQ:
2311 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2312 		break;
2313 	case SCTP_TIMER_TYPE_EARLYFR:
2314 		if ((stcb == NULL) || (net == NULL)) {
2315 			return;
2316 		}
2317 		tmr = &net->fr_timer;
2318 		SCTP_STAT_INCR(sctps_earlyfrstop);
2319 		break;
2320 	case SCTP_TIMER_TYPE_ITERATOR:
2321 		{
2322 			struct sctp_iterator *it;
2323 
2324 			it = (struct sctp_iterator *)inp;
2325 			tmr = &it->tmr;
2326 		}
2327 		break;
2328 	case SCTP_TIMER_TYPE_SEND:
2329 		if ((stcb == NULL) || (net == NULL)) {
2330 			return;
2331 		}
2332 		tmr = &net->rxt_timer;
2333 		break;
2334 	case SCTP_TIMER_TYPE_INIT:
2335 		if ((stcb == NULL) || (net == NULL)) {
2336 			return;
2337 		}
2338 		tmr = &net->rxt_timer;
2339 		break;
2340 	case SCTP_TIMER_TYPE_RECV:
2341 		if (stcb == NULL) {
2342 			return;
2343 		}
2344 		tmr = &stcb->asoc.dack_timer;
2345 		break;
2346 	case SCTP_TIMER_TYPE_SHUTDOWN:
2347 		if ((stcb == NULL) || (net == NULL)) {
2348 			return;
2349 		}
2350 		tmr = &net->rxt_timer;
2351 		break;
2352 	case SCTP_TIMER_TYPE_HEARTBEAT:
2353 		if (stcb == NULL) {
2354 			return;
2355 		}
2356 		tmr = &stcb->asoc.hb_timer;
2357 		break;
2358 	case SCTP_TIMER_TYPE_COOKIE:
2359 		if ((stcb == NULL) || (net == NULL)) {
2360 			return;
2361 		}
2362 		tmr = &net->rxt_timer;
2363 		break;
2364 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2365 		/* nothing needed but the endpoint here */
2366 		tmr = &inp->sctp_ep.signature_change;
2367 		/*
2368 		 * We re-use the newcookie timer for the INP kill timer. We
2369 		 * must assure that we do not kill it by accident.
2370 		 */
2371 		break;
2372 	case SCTP_TIMER_TYPE_ASOCKILL:
2373 		/*
2374 		 * Stop the asoc kill timer.
2375 		 */
2376 		if (stcb == NULL) {
2377 			return;
2378 		}
2379 		tmr = &stcb->asoc.strreset_timer;
2380 		break;
2381 
2382 	case SCTP_TIMER_TYPE_INPKILL:
2383 		/*
2384 		 * The inp is setup to die. We re-use the signature_chage
2385 		 * timer since that has stopped and we are in the GONE
2386 		 * state.
2387 		 */
2388 		tmr = &inp->sctp_ep.signature_change;
2389 		break;
2390 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2391 		if ((stcb == NULL) || (net == NULL)) {
2392 			return;
2393 		}
2394 		tmr = &net->pmtu_timer;
2395 		break;
2396 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2397 		if ((stcb == NULL) || (net == NULL)) {
2398 			return;
2399 		}
2400 		tmr = &net->rxt_timer;
2401 		break;
2402 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2403 		if (stcb == NULL) {
2404 			return;
2405 		}
2406 		tmr = &stcb->asoc.shut_guard_timer;
2407 		break;
2408 	case SCTP_TIMER_TYPE_STRRESET:
2409 		if (stcb == NULL) {
2410 			return;
2411 		}
2412 		tmr = &stcb->asoc.strreset_timer;
2413 		break;
2414 	case SCTP_TIMER_TYPE_ASCONF:
2415 		if (stcb == NULL) {
2416 			return;
2417 		}
2418 		tmr = &stcb->asoc.asconf_timer;
2419 		break;
2420 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2421 		if (stcb == NULL) {
2422 			return;
2423 		}
2424 		tmr = &stcb->asoc.delete_prim_timer;
2425 		break;
2426 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2427 		if (stcb == NULL) {
2428 			return;
2429 		}
2430 		tmr = &stcb->asoc.autoclose_timer;
2431 		break;
2432 	default:
2433 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2434 		    __FUNCTION__, t_type);
2435 		break;
2436 	};
2437 	if (tmr == NULL) {
2438 		return;
2439 	}
2440 	if ((tmr->type != t_type) && tmr->type) {
2441 		/*
2442 		 * Ok we have a timer that is under joint use. Cookie timer
2443 		 * per chance with the SEND timer. We therefore are NOT
2444 		 * running the timer that the caller wants stopped.  So just
2445 		 * return.
2446 		 */
2447 		return;
2448 	}
2449 	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2450 		stcb->asoc.num_send_timers_up--;
2451 		if (stcb->asoc.num_send_timers_up < 0) {
2452 			stcb->asoc.num_send_timers_up = 0;
2453 		}
2454 	}
2455 	tmr->self = NULL;
2456 	tmr->stopped_from = from;
2457 	(void)SCTP_OS_TIMER_STOP(&tmr->timer);
2458 	return;
2459 }
2460 
2461 #ifdef SCTP_USE_ADLER32
2462 static uint32_t
2463 update_adler32(uint32_t adler, uint8_t * buf, int32_t len)
2464 {
2465 	uint32_t s1 = adler & 0xffff;
2466 	uint32_t s2 = (adler >> 16) & 0xffff;
2467 	int n;
2468 
2469 	for (n = 0; n < len; n++, buf++) {
2470 		/* s1 = (s1 + buf[n]) % BASE */
2471 		/* first we add */
2472 		s1 = (s1 + *buf);
2473 		/*
2474 		 * now if we need to, we do a mod by subtracting. It seems a
2475 		 * bit faster since I really will only ever do one subtract
2476 		 * at the MOST, since buf[n] is a max of 255.
2477 		 */
2478 		if (s1 >= SCTP_ADLER32_BASE) {
2479 			s1 -= SCTP_ADLER32_BASE;
2480 		}
2481 		/* s2 = (s2 + s1) % BASE */
2482 		/* first we add */
2483 		s2 = (s2 + s1);
2484 		/*
2485 		 * again, it is more efficent (it seems) to subtract since
2486 		 * the most s2 will ever be is (BASE-1 + BASE-1) in the
2487 		 * worse case. This would then be (2 * BASE) - 2, which will
2488 		 * still only do one subtract. On Intel this is much better
2489 		 * to do this way and avoid the divide. Have not -pg'd on
2490 		 * sparc.
2491 		 */
2492 		if (s2 >= SCTP_ADLER32_BASE) {
2493 			s2 -= SCTP_ADLER32_BASE;
2494 		}
2495 	}
2496 	/* Return the adler32 of the bytes buf[0..len-1] */
2497 	return ((s2 << 16) + s1);
2498 }
2499 
2500 #endif
2501 
2502 
2503 uint32_t
2504 sctp_calculate_len(struct mbuf *m)
2505 {
2506 	uint32_t tlen = 0;
2507 	struct mbuf *at;
2508 
2509 	at = m;
2510 	while (at) {
2511 		tlen += SCTP_BUF_LEN(at);
2512 		at = SCTP_BUF_NEXT(at);
2513 	}
2514 	return (tlen);
2515 }
2516 
2517 #if defined(SCTP_WITH_NO_CSUM)
2518 
2519 uint32_t
2520 sctp_calculate_sum(struct mbuf *m, int32_t * pktlen, uint32_t offset)
2521 {
2522 	/*
2523 	 * given a mbuf chain with a packetheader offset by 'offset'
2524 	 * pointing at a sctphdr (with csum set to 0) go through the chain
2525 	 * of SCTP_BUF_NEXT()'s and calculate the SCTP checksum. This also
2526 	 * has a side bonus as it will calculate the total length of the
2527 	 * mbuf chain. Note: if offset is greater than the total mbuf
2528 	 * length, checksum=1, pktlen=0 is returned (ie. no real error code)
2529 	 */
2530 	if (pktlen == NULL)
2531 		return (0);
2532 	*pktlen = sctp_calculate_len(m);
2533 	return (0);
2534 }
2535 
2536 #elif defined(SCTP_USE_INCHKSUM)
2537 
2538 #include <machine/in_cksum.h>
2539 
2540 uint32_t
2541 sctp_calculate_sum(struct mbuf *m, int32_t * pktlen, uint32_t offset)
2542 {
2543 	/*
2544 	 * given a mbuf chain with a packetheader offset by 'offset'
2545 	 * pointing at a sctphdr (with csum set to 0) go through the chain
2546 	 * of SCTP_BUF_NEXT()'s and calculate the SCTP checksum. This also
2547 	 * has a side bonus as it will calculate the total length of the
2548 	 * mbuf chain. Note: if offset is greater than the total mbuf
2549 	 * length, checksum=1, pktlen=0 is returned (ie. no real error code)
2550 	 */
2551 	int32_t tlen = 0;
2552 	struct mbuf *at;
2553 	uint32_t the_sum, retsum;
2554 
2555 	at = m;
2556 	while (at) {
2557 		tlen += SCTP_BUF_LEN(at);
2558 		at = SCTP_BUF_NEXT(at);
2559 	}
2560 	the_sum = (uint32_t) (in_cksum_skip(m, tlen, offset));
2561 	if (pktlen != NULL)
2562 		*pktlen = (tlen - offset);
2563 	retsum = htons(the_sum);
2564 	return (the_sum);
2565 }
2566 
2567 #else
2568 
2569 uint32_t
2570 sctp_calculate_sum(struct mbuf *m, int32_t * pktlen, uint32_t offset)
2571 {
2572 	/*
2573 	 * given a mbuf chain with a packetheader offset by 'offset'
2574 	 * pointing at a sctphdr (with csum set to 0) go through the chain
2575 	 * of SCTP_BUF_NEXT()'s and calculate the SCTP checksum. This also
2576 	 * has a side bonus as it will calculate the total length of the
2577 	 * mbuf chain. Note: if offset is greater than the total mbuf
2578 	 * length, checksum=1, pktlen=0 is returned (ie. no real error code)
2579 	 */
2580 	int32_t tlen = 0;
2581 
2582 #ifdef SCTP_USE_ADLER32
2583 	uint32_t base = 1L;
2584 
2585 #else
2586 	uint32_t base = 0xffffffff;
2587 
2588 #endif
2589 	struct mbuf *at;
2590 
2591 	at = m;
2592 	/* find the correct mbuf and offset into mbuf */
2593 	while ((at != NULL) && (offset > (uint32_t) SCTP_BUF_LEN(at))) {
2594 		offset -= SCTP_BUF_LEN(at);	/* update remaining offset
2595 						 * left */
2596 		at = SCTP_BUF_NEXT(at);
2597 	}
2598 	while (at != NULL) {
2599 		if ((SCTP_BUF_LEN(at) - offset) > 0) {
2600 #ifdef SCTP_USE_ADLER32
2601 			base = update_adler32(base,
2602 			    (unsigned char *)(SCTP_BUF_AT(at, offset)),
2603 			    (unsigned int)(SCTP_BUF_LEN(at) - offset));
2604 #else
2605 			if ((SCTP_BUF_LEN(at) - offset) < 4) {
2606 				/* Use old method if less than 4 bytes */
2607 				base = old_update_crc32(base,
2608 				    (unsigned char *)(SCTP_BUF_AT(at, offset)),
2609 				    (unsigned int)(SCTP_BUF_LEN(at) - offset));
2610 			} else {
2611 				base = update_crc32(base,
2612 				    (unsigned char *)(SCTP_BUF_AT(at, offset)),
2613 				    (unsigned int)(SCTP_BUF_LEN(at) - offset));
2614 			}
2615 #endif
2616 			tlen += SCTP_BUF_LEN(at) - offset;
2617 			/* we only offset once into the first mbuf */
2618 		}
2619 		if (offset) {
2620 			if (offset < (uint32_t) SCTP_BUF_LEN(at))
2621 				offset = 0;
2622 			else
2623 				offset -= SCTP_BUF_LEN(at);
2624 		}
2625 		at = SCTP_BUF_NEXT(at);
2626 	}
2627 	if (pktlen != NULL) {
2628 		*pktlen = tlen;
2629 	}
2630 #ifdef SCTP_USE_ADLER32
2631 	/* Adler32 */
2632 	base = htonl(base);
2633 #else
2634 	/* CRC-32c */
2635 	base = sctp_csum_finalize(base);
2636 #endif
2637 	return (base);
2638 }
2639 
2640 
2641 #endif
2642 
2643 void
2644 sctp_mtu_size_reset(struct sctp_inpcb *inp,
2645     struct sctp_association *asoc, uint32_t mtu)
2646 {
2647 	/*
2648 	 * Reset the P-MTU size on this association, this involves changing
2649 	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2650 	 * allow the DF flag to be cleared.
2651 	 */
2652 	struct sctp_tmit_chunk *chk;
2653 	unsigned int eff_mtu, ovh;
2654 
2655 #ifdef SCTP_PRINT_FOR_B_AND_M
2656 	SCTP_PRINTF("sctp_mtu_size_reset(%p, asoc:%p mtu:%d\n",
2657 	    inp, asoc, mtu);
2658 #endif
2659 	asoc->smallest_mtu = mtu;
2660 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2661 		ovh = SCTP_MIN_OVERHEAD;
2662 	} else {
2663 		ovh = SCTP_MIN_V4_OVERHEAD;
2664 	}
2665 	eff_mtu = mtu - ovh;
2666 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2667 
2668 		if (chk->send_size > eff_mtu) {
2669 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2670 		}
2671 	}
2672 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2673 		if (chk->send_size > eff_mtu) {
2674 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2675 		}
2676 	}
2677 }
2678 
2679 
2680 /*
2681  * given an association and starting time of the current RTT period return
2682  * RTO in number of msecs net should point to the current network
2683  */
2684 uint32_t
2685 sctp_calculate_rto(struct sctp_tcb *stcb,
2686     struct sctp_association *asoc,
2687     struct sctp_nets *net,
2688     struct timeval *told,
2689     int safe)
2690 {
2691 	/*-
2692 	 * given an association and the starting time of the current RTT
2693 	 * period (in value1/value2) return RTO in number of msecs.
2694 	 */
2695 	int calc_time = 0;
2696 	int o_calctime;
2697 	uint32_t new_rto = 0;
2698 	int first_measure = 0;
2699 	struct timeval now, then, *old;
2700 
2701 	/* Copy it out for sparc64 */
2702 	if (safe == sctp_align_unsafe_makecopy) {
2703 		old = &then;
2704 		memcpy(&then, told, sizeof(struct timeval));
2705 	} else if (safe == sctp_align_safe_nocopy) {
2706 		old = told;
2707 	} else {
2708 		/* error */
2709 		SCTP_PRINTF("Huh, bad rto calc call\n");
2710 		return (0);
2711 	}
2712 	/************************/
2713 	/* 1. calculate new RTT */
2714 	/************************/
2715 	/* get the current time */
2716 	(void)SCTP_GETTIME_TIMEVAL(&now);
2717 	/* compute the RTT value */
2718 	if ((u_long)now.tv_sec > (u_long)old->tv_sec) {
2719 		calc_time = ((u_long)now.tv_sec - (u_long)old->tv_sec) * 1000;
2720 		if ((u_long)now.tv_usec > (u_long)old->tv_usec) {
2721 			calc_time += (((u_long)now.tv_usec -
2722 			    (u_long)old->tv_usec) / 1000);
2723 		} else if ((u_long)now.tv_usec < (u_long)old->tv_usec) {
2724 			/* Borrow 1,000ms from current calculation */
2725 			calc_time -= 1000;
2726 			/* Add in the slop over */
2727 			calc_time += ((int)now.tv_usec / 1000);
2728 			/* Add in the pre-second ms's */
2729 			calc_time += (((int)1000000 - (int)old->tv_usec) / 1000);
2730 		}
2731 	} else if ((u_long)now.tv_sec == (u_long)old->tv_sec) {
2732 		if ((u_long)now.tv_usec > (u_long)old->tv_usec) {
2733 			calc_time = ((u_long)now.tv_usec -
2734 			    (u_long)old->tv_usec) / 1000;
2735 		} else if ((u_long)now.tv_usec < (u_long)old->tv_usec) {
2736 			/* impossible .. garbage in nothing out */
2737 			goto calc_rto;
2738 		} else if ((u_long)now.tv_usec == (u_long)old->tv_usec) {
2739 			/*
2740 			 * We have to have 1 usec :-D this must be the
2741 			 * loopback.
2742 			 */
2743 			calc_time = 1;
2744 		} else {
2745 			/* impossible .. garbage in nothing out */
2746 			goto calc_rto;
2747 		}
2748 	} else {
2749 		/* Clock wrapped? */
2750 		goto calc_rto;
2751 	}
2752 	/***************************/
2753 	/* 2. update RTTVAR & SRTT */
2754 	/***************************/
2755 	o_calctime = calc_time;
2756 	/* this is Van Jacobson's integer version */
2757 	if (net->RTO_measured) {
2758 		calc_time -= (net->lastsa >> SCTP_RTT_SHIFT);	/* take away 1/8th when
2759 								 * shift=3 */
2760 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2761 			rto_logging(net, SCTP_LOG_RTTVAR);
2762 		}
2763 		net->prev_rtt = o_calctime;
2764 		net->lastsa += calc_time;	/* add 7/8th into sa when
2765 						 * shift=3 */
2766 		if (calc_time < 0) {
2767 			calc_time = -calc_time;
2768 		}
2769 		calc_time -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);	/* take away 1/4 when
2770 									 * VAR shift=2 */
2771 		net->lastsv += calc_time;
2772 		if (net->lastsv == 0) {
2773 			net->lastsv = SCTP_CLOCK_GRANULARITY;
2774 		}
2775 	} else {
2776 		/* First RTO measurment */
2777 		net->RTO_measured = 1;
2778 		net->lastsa = calc_time << SCTP_RTT_SHIFT;	/* Multiply by 8 when
2779 								 * shift=3 */
2780 		net->lastsv = calc_time;
2781 		if (net->lastsv == 0) {
2782 			net->lastsv = SCTP_CLOCK_GRANULARITY;
2783 		}
2784 		first_measure = 1;
2785 		net->prev_rtt = o_calctime;
2786 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2787 			rto_logging(net, SCTP_LOG_INITIAL_RTT);
2788 		}
2789 	}
2790 calc_rto:
2791 	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2792 	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2793 	    (stcb->asoc.sat_network_lockout == 0)) {
2794 		stcb->asoc.sat_network = 1;
2795 	} else if ((!first_measure) && stcb->asoc.sat_network) {
2796 		stcb->asoc.sat_network = 0;
2797 		stcb->asoc.sat_network_lockout = 1;
2798 	}
2799 	/* bound it, per C6/C7 in Section 5.3.1 */
2800 	if (new_rto < stcb->asoc.minrto) {
2801 		new_rto = stcb->asoc.minrto;
2802 	}
2803 	if (new_rto > stcb->asoc.maxrto) {
2804 		new_rto = stcb->asoc.maxrto;
2805 	}
2806 	/* we are now returning the RTO */
2807 	return (new_rto);
2808 }
2809 
2810 /*
2811  * return a pointer to a contiguous piece of data from the given mbuf chain
2812  * starting at 'off' for 'len' bytes.  If the desired piece spans more than
2813  * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2814  * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2815  */
2816 caddr_t
2817 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr)
2818 {
2819 	uint32_t count;
2820 	uint8_t *ptr;
2821 
2822 	ptr = in_ptr;
2823 	if ((off < 0) || (len <= 0))
2824 		return (NULL);
2825 
2826 	/* find the desired start location */
2827 	while ((m != NULL) && (off > 0)) {
2828 		if (off < SCTP_BUF_LEN(m))
2829 			break;
2830 		off -= SCTP_BUF_LEN(m);
2831 		m = SCTP_BUF_NEXT(m);
2832 	}
2833 	if (m == NULL)
2834 		return (NULL);
2835 
2836 	/* is the current mbuf large enough (eg. contiguous)? */
2837 	if ((SCTP_BUF_LEN(m) - off) >= len) {
2838 		return (mtod(m, caddr_t)+off);
2839 	} else {
2840 		/* else, it spans more than one mbuf, so save a temp copy... */
2841 		while ((m != NULL) && (len > 0)) {
2842 			count = min(SCTP_BUF_LEN(m) - off, len);
2843 			bcopy(mtod(m, caddr_t)+off, ptr, count);
2844 			len -= count;
2845 			ptr += count;
2846 			off = 0;
2847 			m = SCTP_BUF_NEXT(m);
2848 		}
2849 		if ((m == NULL) && (len > 0))
2850 			return (NULL);
2851 		else
2852 			return ((caddr_t)in_ptr);
2853 	}
2854 }
2855 
2856 
2857 
2858 struct sctp_paramhdr *
2859 sctp_get_next_param(struct mbuf *m,
2860     int offset,
2861     struct sctp_paramhdr *pull,
2862     int pull_limit)
2863 {
2864 	/* This just provides a typed signature to Peter's Pull routine */
2865 	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2866 	    (uint8_t *) pull));
2867 }
2868 
2869 
2870 int
2871 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2872 {
2873 	/*
2874 	 * add padlen bytes of 0 filled padding to the end of the mbuf. If
2875 	 * padlen is > 3 this routine will fail.
2876 	 */
2877 	uint8_t *dp;
2878 	int i;
2879 
2880 	if (padlen > 3) {
2881 		SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
2882 		return (ENOBUFS);
2883 	}
2884 	if (padlen <= M_TRAILINGSPACE(m)) {
2885 		/*
2886 		 * The easy way. We hope the majority of the time we hit
2887 		 * here :)
2888 		 */
2889 		dp = (uint8_t *) (mtod(m, caddr_t)+SCTP_BUF_LEN(m));
2890 		SCTP_BUF_LEN(m) += padlen;
2891 	} else {
2892 		/* Hard way we must grow the mbuf */
2893 		struct mbuf *tmp;
2894 
2895 		tmp = sctp_get_mbuf_for_msg(padlen, 0, M_DONTWAIT, 1, MT_DATA);
2896 		if (tmp == NULL) {
2897 			/* Out of space GAK! we are in big trouble. */
2898 			SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
2899 			return (ENOSPC);
2900 		}
2901 		/* setup and insert in middle */
2902 		SCTP_BUF_LEN(tmp) = padlen;
2903 		SCTP_BUF_NEXT(tmp) = NULL;
2904 		SCTP_BUF_NEXT(m) = tmp;
2905 		dp = mtod(tmp, uint8_t *);
2906 	}
2907 	/* zero out the pad */
2908 	for (i = 0; i < padlen; i++) {
2909 		*dp = 0;
2910 		dp++;
2911 	}
2912 	return (0);
2913 }
2914 
2915 int
2916 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2917 {
2918 	/* find the last mbuf in chain and pad it */
2919 	struct mbuf *m_at;
2920 
2921 	m_at = m;
2922 	if (last_mbuf) {
2923 		return (sctp_add_pad_tombuf(last_mbuf, padval));
2924 	} else {
2925 		while (m_at) {
2926 			if (SCTP_BUF_NEXT(m_at) == NULL) {
2927 				return (sctp_add_pad_tombuf(m_at, padval));
2928 			}
2929 			m_at = SCTP_BUF_NEXT(m_at);
2930 		}
2931 	}
2932 	SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
2933 	return (EFAULT);
2934 }
2935 
2936 int sctp_asoc_change_wake = 0;
2937 
2938 static void
2939 sctp_notify_assoc_change(uint32_t event, struct sctp_tcb *stcb,
2940     uint32_t error, void *data, int so_locked
2941 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2942     SCTP_UNUSED
2943 #endif
2944 )
2945 {
2946 	struct mbuf *m_notify;
2947 	struct sctp_assoc_change *sac;
2948 	struct sctp_queued_to_read *control;
2949 
2950 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2951 	struct socket *so;
2952 
2953 #endif
2954 
2955 	/*
2956 	 * For TCP model AND UDP connected sockets we will send an error up
2957 	 * when an ABORT comes in.
2958 	 */
2959 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2960 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2961 	    ((event == SCTP_COMM_LOST) || (event == SCTP_CANT_STR_ASSOC))) {
2962 		if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
2963 			SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2964 			stcb->sctp_socket->so_error = ECONNREFUSED;
2965 		} else {
2966 			SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2967 			stcb->sctp_socket->so_error = ECONNRESET;
2968 		}
2969 		/* Wake ANY sleepers */
2970 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2971 		so = SCTP_INP_SO(stcb->sctp_ep);
2972 		if (!so_locked) {
2973 			atomic_add_int(&stcb->asoc.refcnt, 1);
2974 			SCTP_TCB_UNLOCK(stcb);
2975 			SCTP_SOCKET_LOCK(so, 1);
2976 			SCTP_TCB_LOCK(stcb);
2977 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
2978 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2979 				SCTP_SOCKET_UNLOCK(so, 1);
2980 				return;
2981 			}
2982 		}
2983 #endif
2984 		sorwakeup(stcb->sctp_socket);
2985 		sowwakeup(stcb->sctp_socket);
2986 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2987 		if (!so_locked) {
2988 			SCTP_SOCKET_UNLOCK(so, 1);
2989 		}
2990 #endif
2991 		sctp_asoc_change_wake++;
2992 	}
2993 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2994 		/* event not enabled */
2995 		return;
2996 	}
2997 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_change), 0, M_DONTWAIT, 1, MT_DATA);
2998 	if (m_notify == NULL)
2999 		/* no space left */
3000 		return;
3001 	SCTP_BUF_LEN(m_notify) = 0;
3002 
3003 	sac = mtod(m_notify, struct sctp_assoc_change *);
3004 	sac->sac_type = SCTP_ASSOC_CHANGE;
3005 	sac->sac_flags = 0;
3006 	sac->sac_length = sizeof(struct sctp_assoc_change);
3007 	sac->sac_state = event;
3008 	sac->sac_error = error;
3009 	/* XXX verify these stream counts */
3010 	sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
3011 	sac->sac_inbound_streams = stcb->asoc.streamincnt;
3012 	sac->sac_assoc_id = sctp_get_associd(stcb);
3013 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_change);
3014 	SCTP_BUF_NEXT(m_notify) = NULL;
3015 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3016 	    0, 0, 0, 0, 0, 0,
3017 	    m_notify);
3018 	if (control == NULL) {
3019 		/* no memory */
3020 		sctp_m_freem(m_notify);
3021 		return;
3022 	}
3023 	control->length = SCTP_BUF_LEN(m_notify);
3024 	/* not that we need this */
3025 	control->tail_mbuf = m_notify;
3026 	control->spec_flags = M_NOTIFICATION;
3027 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3028 	    control,
3029 	    &stcb->sctp_socket->so_rcv, 1, so_locked);
3030 	if (event == SCTP_COMM_LOST) {
3031 		/* Wake up any sleeper */
3032 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3033 		so = SCTP_INP_SO(stcb->sctp_ep);
3034 		if (!so_locked) {
3035 			atomic_add_int(&stcb->asoc.refcnt, 1);
3036 			SCTP_TCB_UNLOCK(stcb);
3037 			SCTP_SOCKET_LOCK(so, 1);
3038 			SCTP_TCB_LOCK(stcb);
3039 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
3040 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3041 				SCTP_SOCKET_UNLOCK(so, 1);
3042 				return;
3043 			}
3044 		}
3045 #endif
3046 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
3047 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3048 		if (!so_locked) {
3049 			SCTP_SOCKET_UNLOCK(so, 1);
3050 		}
3051 #endif
3052 	}
3053 }
3054 
3055 static void
3056 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
3057     struct sockaddr *sa, uint32_t error)
3058 {
3059 	struct mbuf *m_notify;
3060 	struct sctp_paddr_change *spc;
3061 	struct sctp_queued_to_read *control;
3062 
3063 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
3064 		/* event not enabled */
3065 		return;
3066 	}
3067 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_DONTWAIT, 1, MT_DATA);
3068 	if (m_notify == NULL)
3069 		return;
3070 	SCTP_BUF_LEN(m_notify) = 0;
3071 	spc = mtod(m_notify, struct sctp_paddr_change *);
3072 	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
3073 	spc->spc_flags = 0;
3074 	spc->spc_length = sizeof(struct sctp_paddr_change);
3075 	switch (sa->sa_family) {
3076 	case AF_INET:
3077 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
3078 		break;
3079 #ifdef INET6
3080 	case AF_INET6:
3081 		{
3082 			struct sockaddr_in6 *sin6;
3083 
3084 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
3085 
3086 			sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
3087 			if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
3088 				if (sin6->sin6_scope_id == 0) {
3089 					/* recover scope_id for user */
3090 					(void)sa6_recoverscope(sin6);
3091 				} else {
3092 					/* clear embedded scope_id for user */
3093 					in6_clearscope(&sin6->sin6_addr);
3094 				}
3095 			}
3096 			break;
3097 		}
3098 #endif
3099 	default:
3100 		/* TSNH */
3101 		break;
3102 	}
3103 	spc->spc_state = state;
3104 	spc->spc_error = error;
3105 	spc->spc_assoc_id = sctp_get_associd(stcb);
3106 
3107 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
3108 	SCTP_BUF_NEXT(m_notify) = NULL;
3109 
3110 	/* append to socket */
3111 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3112 	    0, 0, 0, 0, 0, 0,
3113 	    m_notify);
3114 	if (control == NULL) {
3115 		/* no memory */
3116 		sctp_m_freem(m_notify);
3117 		return;
3118 	}
3119 	control->length = SCTP_BUF_LEN(m_notify);
3120 	control->spec_flags = M_NOTIFICATION;
3121 	/* not that we need this */
3122 	control->tail_mbuf = m_notify;
3123 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3124 	    control,
3125 	    &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
3126 }
3127 
3128 
3129 static void
3130 sctp_notify_send_failed(struct sctp_tcb *stcb, uint32_t error,
3131     struct sctp_tmit_chunk *chk, int so_locked
3132 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3133     SCTP_UNUSED
3134 #endif
3135 )
3136 {
3137 	struct mbuf *m_notify;
3138 	struct sctp_send_failed *ssf;
3139 	struct sctp_queued_to_read *control;
3140 	int length;
3141 
3142 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)) {
3143 		/* event not enabled */
3144 		return;
3145 	}
3146 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_send_failed), 0, M_DONTWAIT, 1, MT_DATA);
3147 	if (m_notify == NULL)
3148 		/* no space left */
3149 		return;
3150 	length = sizeof(struct sctp_send_failed) + chk->send_size;
3151 	length -= sizeof(struct sctp_data_chunk);
3152 	SCTP_BUF_LEN(m_notify) = 0;
3153 	ssf = mtod(m_notify, struct sctp_send_failed *);
3154 	ssf->ssf_type = SCTP_SEND_FAILED;
3155 	if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
3156 		ssf->ssf_flags = SCTP_DATA_UNSENT;
3157 	else
3158 		ssf->ssf_flags = SCTP_DATA_SENT;
3159 	ssf->ssf_length = length;
3160 	ssf->ssf_error = error;
3161 	/* not exactly what the user sent in, but should be close :) */
3162 	bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
3163 	ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number;
3164 	ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq;
3165 	ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
3166 	ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype;
3167 	ssf->ssf_info.sinfo_context = chk->rec.data.context;
3168 	ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3169 	ssf->ssf_assoc_id = sctp_get_associd(stcb);
3170 
3171 	SCTP_BUF_NEXT(m_notify) = chk->data;
3172 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
3173 	if (chk->data) {
3174 		/*
3175 		 * trim off the sctp chunk header(it should be there)
3176 		 */
3177 		if (chk->send_size >= sizeof(struct sctp_data_chunk)) {
3178 			m_adj(chk->data, sizeof(struct sctp_data_chunk));
3179 			sctp_mbuf_crush(chk->data);
3180 			chk->send_size -= sizeof(struct sctp_data_chunk);
3181 		}
3182 	}
3183 	/* Steal off the mbuf */
3184 	chk->data = NULL;
3185 	/*
3186 	 * For this case, we check the actual socket buffer, since the assoc
3187 	 * is going away we don't want to overfill the socket buffer for a
3188 	 * non-reader
3189 	 */
3190 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3191 		sctp_m_freem(m_notify);
3192 		return;
3193 	}
3194 	/* append to socket */
3195 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3196 	    0, 0, 0, 0, 0, 0,
3197 	    m_notify);
3198 	if (control == NULL) {
3199 		/* no memory */
3200 		sctp_m_freem(m_notify);
3201 		return;
3202 	}
3203 	control->spec_flags = M_NOTIFICATION;
3204 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3205 	    control,
3206 	    &stcb->sctp_socket->so_rcv, 1, so_locked);
3207 }
3208 
3209 
3210 static void
3211 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
3212     struct sctp_stream_queue_pending *sp, int so_locked
3213 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3214     SCTP_UNUSED
3215 #endif
3216 )
3217 {
3218 	struct mbuf *m_notify;
3219 	struct sctp_send_failed *ssf;
3220 	struct sctp_queued_to_read *control;
3221 	int length;
3222 
3223 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)) {
3224 		/* event not enabled */
3225 		return;
3226 	}
3227 	length = sizeof(struct sctp_send_failed) + sp->length;
3228 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_send_failed), 0, M_DONTWAIT, 1, MT_DATA);
3229 	if (m_notify == NULL)
3230 		/* no space left */
3231 		return;
3232 	SCTP_BUF_LEN(m_notify) = 0;
3233 	ssf = mtod(m_notify, struct sctp_send_failed *);
3234 	ssf->ssf_type = SCTP_SEND_FAILED;
3235 	if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
3236 		ssf->ssf_flags = SCTP_DATA_UNSENT;
3237 	else
3238 		ssf->ssf_flags = SCTP_DATA_SENT;
3239 	ssf->ssf_length = length;
3240 	ssf->ssf_error = error;
3241 	/* not exactly what the user sent in, but should be close :) */
3242 	bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
3243 	ssf->ssf_info.sinfo_stream = sp->stream;
3244 	ssf->ssf_info.sinfo_ssn = sp->strseq;
3245 	if (sp->some_taken) {
3246 		ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
3247 	} else {
3248 		ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
3249 	}
3250 	ssf->ssf_info.sinfo_ppid = sp->ppid;
3251 	ssf->ssf_info.sinfo_context = sp->context;
3252 	ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3253 	ssf->ssf_assoc_id = sctp_get_associd(stcb);
3254 	SCTP_BUF_NEXT(m_notify) = sp->data;
3255 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
3256 
3257 	/* Steal off the mbuf */
3258 	sp->data = NULL;
3259 	/*
3260 	 * For this case, we check the actual socket buffer, since the assoc
3261 	 * is going away we don't want to overfill the socket buffer for a
3262 	 * non-reader
3263 	 */
3264 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3265 		sctp_m_freem(m_notify);
3266 		return;
3267 	}
3268 	/* append to socket */
3269 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3270 	    0, 0, 0, 0, 0, 0,
3271 	    m_notify);
3272 	if (control == NULL) {
3273 		/* no memory */
3274 		sctp_m_freem(m_notify);
3275 		return;
3276 	}
3277 	control->spec_flags = M_NOTIFICATION;
3278 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3279 	    control,
3280 	    &stcb->sctp_socket->so_rcv, 1, so_locked);
3281 }
3282 
3283 
3284 
3285 static void
3286 sctp_notify_adaptation_layer(struct sctp_tcb *stcb,
3287     uint32_t error)
3288 {
3289 	struct mbuf *m_notify;
3290 	struct sctp_adaptation_event *sai;
3291 	struct sctp_queued_to_read *control;
3292 
3293 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
3294 		/* event not enabled */
3295 		return;
3296 	}
3297 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_DONTWAIT, 1, MT_DATA);
3298 	if (m_notify == NULL)
3299 		/* no space left */
3300 		return;
3301 	SCTP_BUF_LEN(m_notify) = 0;
3302 	sai = mtod(m_notify, struct sctp_adaptation_event *);
3303 	sai->sai_type = SCTP_ADAPTATION_INDICATION;
3304 	sai->sai_flags = 0;
3305 	sai->sai_length = sizeof(struct sctp_adaptation_event);
3306 	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3307 	sai->sai_assoc_id = sctp_get_associd(stcb);
3308 
3309 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3310 	SCTP_BUF_NEXT(m_notify) = NULL;
3311 
3312 	/* append to socket */
3313 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3314 	    0, 0, 0, 0, 0, 0,
3315 	    m_notify);
3316 	if (control == NULL) {
3317 		/* no memory */
3318 		sctp_m_freem(m_notify);
3319 		return;
3320 	}
3321 	control->length = SCTP_BUF_LEN(m_notify);
3322 	control->spec_flags = M_NOTIFICATION;
3323 	/* not that we need this */
3324 	control->tail_mbuf = m_notify;
3325 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3326 	    control,
3327 	    &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
3328 }
3329 
3330 /* This always must be called with the read-queue LOCKED in the INP */
3331 void
3332 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3333     int nolock, uint32_t val)
3334 {
3335 	struct mbuf *m_notify;
3336 	struct sctp_pdapi_event *pdapi;
3337 	struct sctp_queued_to_read *control;
3338 	struct sockbuf *sb;
3339 
3340 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3341 		/* event not enabled */
3342 		return;
3343 	}
3344 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_DONTWAIT, 1, MT_DATA);
3345 	if (m_notify == NULL)
3346 		/* no space left */
3347 		return;
3348 	SCTP_BUF_LEN(m_notify) = 0;
3349 	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3350 	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3351 	pdapi->pdapi_flags = 0;
3352 	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3353 	pdapi->pdapi_indication = error;
3354 	pdapi->pdapi_stream = (val >> 16);
3355 	pdapi->pdapi_seq = (val & 0x0000ffff);
3356 	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3357 
3358 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3359 	SCTP_BUF_NEXT(m_notify) = NULL;
3360 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3361 	    0, 0, 0, 0, 0, 0,
3362 	    m_notify);
3363 	if (control == NULL) {
3364 		/* no memory */
3365 		sctp_m_freem(m_notify);
3366 		return;
3367 	}
3368 	control->spec_flags = M_NOTIFICATION;
3369 	control->length = SCTP_BUF_LEN(m_notify);
3370 	/* not that we need this */
3371 	control->tail_mbuf = m_notify;
3372 	control->held_length = 0;
3373 	control->length = 0;
3374 	if (nolock == 0) {
3375 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
3376 	}
3377 	sb = &stcb->sctp_socket->so_rcv;
3378 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3379 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3380 	}
3381 	sctp_sballoc(stcb, sb, m_notify);
3382 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3383 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3384 	}
3385 	atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify));
3386 	control->end_added = 1;
3387 	if (stcb->asoc.control_pdapi)
3388 		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3389 	else {
3390 		/* we really should not see this case */
3391 		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3392 	}
3393 	if (nolock == 0) {
3394 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
3395 	}
3396 	if (stcb->sctp_ep && stcb->sctp_socket) {
3397 		/* This should always be the case */
3398 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3399 	}
3400 }
3401 
3402 static void
3403 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3404 {
3405 	struct mbuf *m_notify;
3406 	struct sctp_shutdown_event *sse;
3407 	struct sctp_queued_to_read *control;
3408 
3409 	/*
3410 	 * For TCP model AND UDP connected sockets we will send an error up
3411 	 * when an SHUTDOWN completes
3412 	 */
3413 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3414 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3415 		/* mark socket closed for read/write and wakeup! */
3416 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3417 		struct socket *so;
3418 
3419 		so = SCTP_INP_SO(stcb->sctp_ep);
3420 		atomic_add_int(&stcb->asoc.refcnt, 1);
3421 		SCTP_TCB_UNLOCK(stcb);
3422 		SCTP_SOCKET_LOCK(so, 1);
3423 		SCTP_TCB_LOCK(stcb);
3424 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3425 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3426 			SCTP_SOCKET_UNLOCK(so, 1);
3427 			return;
3428 		}
3429 #endif
3430 		socantsendmore(stcb->sctp_socket);
3431 		socantrcvmore(stcb->sctp_socket);
3432 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3433 		SCTP_SOCKET_UNLOCK(so, 1);
3434 #endif
3435 	}
3436 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3437 		/* event not enabled */
3438 		return;
3439 	}
3440 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_DONTWAIT, 1, MT_DATA);
3441 	if (m_notify == NULL)
3442 		/* no space left */
3443 		return;
3444 	sse = mtod(m_notify, struct sctp_shutdown_event *);
3445 	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3446 	sse->sse_flags = 0;
3447 	sse->sse_length = sizeof(struct sctp_shutdown_event);
3448 	sse->sse_assoc_id = sctp_get_associd(stcb);
3449 
3450 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3451 	SCTP_BUF_NEXT(m_notify) = NULL;
3452 
3453 	/* append to socket */
3454 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3455 	    0, 0, 0, 0, 0, 0,
3456 	    m_notify);
3457 	if (control == NULL) {
3458 		/* no memory */
3459 		sctp_m_freem(m_notify);
3460 		return;
3461 	}
3462 	control->spec_flags = M_NOTIFICATION;
3463 	control->length = SCTP_BUF_LEN(m_notify);
3464 	/* not that we need this */
3465 	control->tail_mbuf = m_notify;
3466 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3467 	    control,
3468 	    &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
3469 }
3470 
3471 static void
3472 sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3473     int so_locked
3474 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3475     SCTP_UNUSED
3476 #endif
3477 )
3478 {
3479 	struct mbuf *m_notify;
3480 	struct sctp_sender_dry_event *event;
3481 	struct sctp_queued_to_read *control;
3482 
3483 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_DRYEVNT)) {
3484 		/* event not enabled */
3485 		return;
3486 	}
3487 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_DONTWAIT, 1, MT_DATA);
3488 	if (m_notify == NULL) {
3489 		/* no space left */
3490 		return;
3491 	}
3492 	SCTP_BUF_LEN(m_notify) = 0;
3493 	event = mtod(m_notify, struct sctp_sender_dry_event *);
3494 	event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3495 	event->sender_dry_flags = 0;
3496 	event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3497 	event->sender_dry_assoc_id = sctp_get_associd(stcb);
3498 
3499 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3500 	SCTP_BUF_NEXT(m_notify) = NULL;
3501 
3502 	/* append to socket */
3503 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3504 	    0, 0, 0, 0, 0, 0, m_notify);
3505 	if (control == NULL) {
3506 		/* no memory */
3507 		sctp_m_freem(m_notify);
3508 		return;
3509 	}
3510 	control->length = SCTP_BUF_LEN(m_notify);
3511 	control->spec_flags = M_NOTIFICATION;
3512 	/* not that we need this */
3513 	control->tail_mbuf = m_notify;
3514 	sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3515 	    &stcb->sctp_socket->so_rcv, 1, so_locked);
3516 }
3517 
3518 static void
3519 sctp_notify_stream_reset(struct sctp_tcb *stcb,
3520     int number_entries, uint16_t * list, int flag)
3521 {
3522 	struct mbuf *m_notify;
3523 	struct sctp_queued_to_read *control;
3524 	struct sctp_stream_reset_event *strreset;
3525 	int len;
3526 
3527 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_STREAM_RESETEVNT)) {
3528 		/* event not enabled */
3529 		return;
3530 	}
3531 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3532 	if (m_notify == NULL)
3533 		/* no space left */
3534 		return;
3535 	SCTP_BUF_LEN(m_notify) = 0;
3536 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3537 	if (len > M_TRAILINGSPACE(m_notify)) {
3538 		/* never enough room */
3539 		sctp_m_freem(m_notify);
3540 		return;
3541 	}
3542 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3543 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3544 	if (number_entries == 0) {
3545 		strreset->strreset_flags = flag | SCTP_STRRESET_ALL_STREAMS;
3546 	} else {
3547 		strreset->strreset_flags = flag | SCTP_STRRESET_STREAM_LIST;
3548 	}
3549 	strreset->strreset_length = len;
3550 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3551 	if (number_entries) {
3552 		int i;
3553 
3554 		for (i = 0; i < number_entries; i++) {
3555 			strreset->strreset_list[i] = ntohs(list[i]);
3556 		}
3557 	}
3558 	SCTP_BUF_LEN(m_notify) = len;
3559 	SCTP_BUF_NEXT(m_notify) = NULL;
3560 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3561 		/* no space */
3562 		sctp_m_freem(m_notify);
3563 		return;
3564 	}
3565 	/* append to socket */
3566 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3567 	    0, 0, 0, 0, 0, 0,
3568 	    m_notify);
3569 	if (control == NULL) {
3570 		/* no memory */
3571 		sctp_m_freem(m_notify);
3572 		return;
3573 	}
3574 	control->spec_flags = M_NOTIFICATION;
3575 	control->length = SCTP_BUF_LEN(m_notify);
3576 	/* not that we need this */
3577 	control->tail_mbuf = m_notify;
3578 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3579 	    control,
3580 	    &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
3581 }
3582 
3583 
3584 void
3585 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3586     uint32_t error, void *data, int so_locked
3587 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3588     SCTP_UNUSED
3589 #endif
3590 )
3591 {
3592 	if ((stcb == NULL) ||
3593 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3594 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3595 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3596 		/* If the socket is gone we are out of here */
3597 		return;
3598 	}
3599 	if (stcb && ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
3600 	    (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED))) {
3601 		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3602 		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3603 		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3604 			/* Don't report these in front states */
3605 			return;
3606 		}
3607 	}
3608 	switch (notification) {
3609 	case SCTP_NOTIFY_ASSOC_UP:
3610 		if (stcb->asoc.assoc_up_sent == 0) {
3611 			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, so_locked);
3612 			stcb->asoc.assoc_up_sent = 1;
3613 		}
3614 		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3615 			sctp_notify_adaptation_layer(stcb, error);
3616 		}
3617 		if (stcb->asoc.peer_supports_auth == 0) {
3618 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3619 			    NULL, so_locked);
3620 		}
3621 		break;
3622 	case SCTP_NOTIFY_ASSOC_DOWN:
3623 		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, so_locked);
3624 		break;
3625 	case SCTP_NOTIFY_INTERFACE_DOWN:
3626 		{
3627 			struct sctp_nets *net;
3628 
3629 			net = (struct sctp_nets *)data;
3630 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3631 			    (struct sockaddr *)&net->ro._l_addr, error);
3632 			break;
3633 		}
3634 	case SCTP_NOTIFY_INTERFACE_UP:
3635 		{
3636 			struct sctp_nets *net;
3637 
3638 			net = (struct sctp_nets *)data;
3639 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3640 			    (struct sockaddr *)&net->ro._l_addr, error);
3641 			break;
3642 		}
3643 	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3644 		{
3645 			struct sctp_nets *net;
3646 
3647 			net = (struct sctp_nets *)data;
3648 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3649 			    (struct sockaddr *)&net->ro._l_addr, error);
3650 			break;
3651 		}
3652 	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3653 		sctp_notify_send_failed2(stcb, error,
3654 		    (struct sctp_stream_queue_pending *)data, so_locked);
3655 		break;
3656 	case SCTP_NOTIFY_DG_FAIL:
3657 		sctp_notify_send_failed(stcb, error,
3658 		    (struct sctp_tmit_chunk *)data, so_locked);
3659 		break;
3660 	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3661 		{
3662 			uint32_t val;
3663 
3664 			val = *((uint32_t *) data);
3665 
3666 			sctp_notify_partial_delivery_indication(stcb, error, 0, val);
3667 		}
3668 		break;
3669 	case SCTP_NOTIFY_STRDATA_ERR:
3670 		break;
3671 	case SCTP_NOTIFY_ASSOC_ABORTED:
3672 		if ((stcb) && (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3673 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED))) {
3674 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, NULL, so_locked);
3675 		} else {
3676 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, NULL, so_locked);
3677 		}
3678 		break;
3679 	case SCTP_NOTIFY_PEER_OPENED_STREAM:
3680 		break;
3681 	case SCTP_NOTIFY_STREAM_OPENED_OK:
3682 		break;
3683 	case SCTP_NOTIFY_ASSOC_RESTART:
3684 		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, data, so_locked);
3685 		if (stcb->asoc.peer_supports_auth == 0) {
3686 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3687 			    NULL, so_locked);
3688 		}
3689 		break;
3690 	case SCTP_NOTIFY_HB_RESP:
3691 		break;
3692 	case SCTP_NOTIFY_STR_RESET_SEND:
3693 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_OUTBOUND_STR);
3694 		break;
3695 	case SCTP_NOTIFY_STR_RESET_RECV:
3696 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_INBOUND_STR);
3697 		break;
3698 	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3699 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_OUTBOUND_STR | SCTP_STRRESET_FAILED));
3700 		break;
3701 
3702 	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3703 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_INBOUND_STR | SCTP_STRRESET_FAILED));
3704 		break;
3705 
3706 	case SCTP_NOTIFY_ASCONF_ADD_IP:
3707 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3708 		    error);
3709 		break;
3710 	case SCTP_NOTIFY_ASCONF_DELETE_IP:
3711 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3712 		    error);
3713 		break;
3714 	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3715 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3716 		    error);
3717 		break;
3718 	case SCTP_NOTIFY_ASCONF_SUCCESS:
3719 		break;
3720 	case SCTP_NOTIFY_ASCONF_FAILED:
3721 		break;
3722 	case SCTP_NOTIFY_PEER_SHUTDOWN:
3723 		sctp_notify_shutdown_event(stcb);
3724 		break;
3725 	case SCTP_NOTIFY_AUTH_NEW_KEY:
3726 		sctp_notify_authentication(stcb, SCTP_AUTH_NEWKEY, error,
3727 		    (uint16_t) (uintptr_t) data,
3728 		    so_locked);
3729 		break;
3730 	case SCTP_NOTIFY_AUTH_FREE_KEY:
3731 		sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
3732 		    (uint16_t) (uintptr_t) data,
3733 		    so_locked);
3734 		break;
3735 	case SCTP_NOTIFY_NO_PEER_AUTH:
3736 		sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
3737 		    (uint16_t) (uintptr_t) data,
3738 		    so_locked);
3739 		break;
3740 	case SCTP_NOTIFY_SENDER_DRY:
3741 		sctp_notify_sender_dry_event(stcb, so_locked);
3742 		break;
3743 	default:
3744 		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3745 		    __FUNCTION__, notification, notification);
3746 		break;
3747 	}			/* end switch */
3748 }
3749 
3750 void
3751 sctp_report_all_outbound(struct sctp_tcb *stcb, int holds_lock, int so_locked
3752 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3753     SCTP_UNUSED
3754 #endif
3755 )
3756 {
3757 	struct sctp_association *asoc;
3758 	struct sctp_stream_out *outs;
3759 	struct sctp_tmit_chunk *chk;
3760 	struct sctp_stream_queue_pending *sp;
3761 	int i;
3762 
3763 	asoc = &stcb->asoc;
3764 
3765 	if (stcb == NULL) {
3766 		return;
3767 	}
3768 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3769 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3770 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3771 		return;
3772 	}
3773 	/* now through all the gunk freeing chunks */
3774 	if (holds_lock == 0) {
3775 		SCTP_TCB_SEND_LOCK(stcb);
3776 	}
3777 	/* sent queue SHOULD be empty */
3778 	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3779 		chk = TAILQ_FIRST(&asoc->sent_queue);
3780 		while (chk) {
3781 			TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3782 			asoc->sent_queue_cnt--;
3783 			sctp_free_bufspace(stcb, asoc, chk, 1);
3784 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
3785 			    SCTP_NOTIFY_DATAGRAM_SENT, chk, so_locked);
3786 			if (chk->data) {
3787 				sctp_m_freem(chk->data);
3788 				chk->data = NULL;
3789 			}
3790 			sctp_free_a_chunk(stcb, chk);
3791 			/* sa_ignore FREED_MEMORY */
3792 			chk = TAILQ_FIRST(&asoc->sent_queue);
3793 		}
3794 	}
3795 	/* pending send queue SHOULD be empty */
3796 	if (!TAILQ_EMPTY(&asoc->send_queue)) {
3797 		chk = TAILQ_FIRST(&asoc->send_queue);
3798 		while (chk) {
3799 			TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3800 			asoc->send_queue_cnt--;
3801 			sctp_free_bufspace(stcb, asoc, chk, 1);
3802 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, SCTP_NOTIFY_DATAGRAM_UNSENT, chk, so_locked);
3803 			if (chk->data) {
3804 				sctp_m_freem(chk->data);
3805 				chk->data = NULL;
3806 			}
3807 			sctp_free_a_chunk(stcb, chk);
3808 			/* sa_ignore FREED_MEMORY */
3809 			chk = TAILQ_FIRST(&asoc->send_queue);
3810 		}
3811 	}
3812 	for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
3813 		/* For each stream */
3814 		outs = &stcb->asoc.strmout[i];
3815 		/* clean up any sends there */
3816 		stcb->asoc.locked_on_sending = NULL;
3817 		sp = TAILQ_FIRST(&outs->outqueue);
3818 		while (sp) {
3819 			stcb->asoc.stream_queue_cnt--;
3820 			TAILQ_REMOVE(&outs->outqueue, sp, next);
3821 			sctp_free_spbufspace(stcb, asoc, sp);
3822 			sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
3823 			    SCTP_NOTIFY_DATAGRAM_UNSENT, (void *)sp, so_locked);
3824 			if (sp->data) {
3825 				sctp_m_freem(sp->data);
3826 				sp->data = NULL;
3827 			}
3828 			if (sp->net)
3829 				sctp_free_remote_addr(sp->net);
3830 			sp->net = NULL;
3831 			/* Free the chunk */
3832 			sctp_free_a_strmoq(stcb, sp);
3833 			/* sa_ignore FREED_MEMORY */
3834 			sp = TAILQ_FIRST(&outs->outqueue);
3835 		}
3836 	}
3837 
3838 	if (holds_lock == 0) {
3839 		SCTP_TCB_SEND_UNLOCK(stcb);
3840 	}
3841 }
3842 
3843 void
3844 sctp_abort_notification(struct sctp_tcb *stcb, int error, int so_locked
3845 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3846     SCTP_UNUSED
3847 #endif
3848 )
3849 {
3850 
3851 	if (stcb == NULL) {
3852 		return;
3853 	}
3854 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3855 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3856 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3857 		return;
3858 	}
3859 	/* Tell them we lost the asoc */
3860 	sctp_report_all_outbound(stcb, 1, so_locked);
3861 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
3862 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3863 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
3864 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
3865 	}
3866 	sctp_ulp_notify(SCTP_NOTIFY_ASSOC_ABORTED, stcb, error, NULL, so_locked);
3867 }
3868 
3869 void
3870 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3871     struct mbuf *m, int iphlen, struct sctphdr *sh, struct mbuf *op_err,
3872     uint32_t vrf_id, uint16_t port)
3873 {
3874 	uint32_t vtag;
3875 
3876 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3877 	struct socket *so;
3878 
3879 #endif
3880 
3881 	vtag = 0;
3882 	if (stcb != NULL) {
3883 		/* We have a TCB to abort, send notification too */
3884 		vtag = stcb->asoc.peer_vtag;
3885 		sctp_abort_notification(stcb, 0, SCTP_SO_NOT_LOCKED);
3886 		/* get the assoc vrf id and table id */
3887 		vrf_id = stcb->asoc.vrf_id;
3888 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3889 	}
3890 	sctp_send_abort(m, iphlen, sh, vtag, op_err, vrf_id, port);
3891 	if (stcb != NULL) {
3892 		/* Ok, now lets free it */
3893 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3894 		so = SCTP_INP_SO(inp);
3895 		atomic_add_int(&stcb->asoc.refcnt, 1);
3896 		SCTP_TCB_UNLOCK(stcb);
3897 		SCTP_SOCKET_LOCK(so, 1);
3898 		SCTP_TCB_LOCK(stcb);
3899 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3900 #endif
3901 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
3902 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3903 		SCTP_SOCKET_UNLOCK(so, 1);
3904 #endif
3905 	} else {
3906 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3907 			if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3908 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3909 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
3910 			}
3911 		}
3912 	}
3913 }
3914 
3915 #ifdef SCTP_ASOCLOG_OF_TSNS
3916 void
3917 sctp_print_out_track_log(struct sctp_tcb *stcb)
3918 {
3919 #ifdef NOSIY_PRINTS
3920 	int i;
3921 
3922 	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
3923 	SCTP_PRINTF("IN bound TSN log-aaa\n");
3924 	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
3925 		SCTP_PRINTF("None rcvd\n");
3926 		goto none_in;
3927 	}
3928 	if (stcb->asoc.tsn_in_wrapped) {
3929 		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
3930 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3931 			    stcb->asoc.in_tsnlog[i].tsn,
3932 			    stcb->asoc.in_tsnlog[i].strm,
3933 			    stcb->asoc.in_tsnlog[i].seq,
3934 			    stcb->asoc.in_tsnlog[i].flgs,
3935 			    stcb->asoc.in_tsnlog[i].sz);
3936 		}
3937 	}
3938 	if (stcb->asoc.tsn_in_at) {
3939 		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
3940 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3941 			    stcb->asoc.in_tsnlog[i].tsn,
3942 			    stcb->asoc.in_tsnlog[i].strm,
3943 			    stcb->asoc.in_tsnlog[i].seq,
3944 			    stcb->asoc.in_tsnlog[i].flgs,
3945 			    stcb->asoc.in_tsnlog[i].sz);
3946 		}
3947 	}
3948 none_in:
3949 	SCTP_PRINTF("OUT bound TSN log-aaa\n");
3950 	if ((stcb->asoc.tsn_out_at == 0) &&
3951 	    (stcb->asoc.tsn_out_wrapped == 0)) {
3952 		SCTP_PRINTF("None sent\n");
3953 	}
3954 	if (stcb->asoc.tsn_out_wrapped) {
3955 		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
3956 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3957 			    stcb->asoc.out_tsnlog[i].tsn,
3958 			    stcb->asoc.out_tsnlog[i].strm,
3959 			    stcb->asoc.out_tsnlog[i].seq,
3960 			    stcb->asoc.out_tsnlog[i].flgs,
3961 			    stcb->asoc.out_tsnlog[i].sz);
3962 		}
3963 	}
3964 	if (stcb->asoc.tsn_out_at) {
3965 		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
3966 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3967 			    stcb->asoc.out_tsnlog[i].tsn,
3968 			    stcb->asoc.out_tsnlog[i].strm,
3969 			    stcb->asoc.out_tsnlog[i].seq,
3970 			    stcb->asoc.out_tsnlog[i].flgs,
3971 			    stcb->asoc.out_tsnlog[i].sz);
3972 		}
3973 	}
3974 #endif
3975 }
3976 
3977 #endif
3978 
3979 void
3980 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3981     int error, struct mbuf *op_err,
3982     int so_locked
3983 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3984     SCTP_UNUSED
3985 #endif
3986 )
3987 {
3988 	uint32_t vtag;
3989 
3990 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3991 	struct socket *so;
3992 
3993 #endif
3994 
3995 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3996 	so = SCTP_INP_SO(inp);
3997 #endif
3998 	if (stcb == NULL) {
3999 		/* Got to have a TCB */
4000 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4001 			if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
4002 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4003 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
4004 			}
4005 		}
4006 		return;
4007 	} else {
4008 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
4009 	}
4010 	vtag = stcb->asoc.peer_vtag;
4011 	/* notify the ulp */
4012 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0)
4013 		sctp_abort_notification(stcb, error, so_locked);
4014 	/* notify the peer */
4015 #if defined(SCTP_PANIC_ON_ABORT)
4016 	panic("aborting an association");
4017 #endif
4018 	sctp_send_abort_tcb(stcb, op_err, so_locked);
4019 	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
4020 	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
4021 	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4022 		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4023 	}
4024 	/* now free the asoc */
4025 #ifdef SCTP_ASOCLOG_OF_TSNS
4026 	sctp_print_out_track_log(stcb);
4027 #endif
4028 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4029 	if (!so_locked) {
4030 		atomic_add_int(&stcb->asoc.refcnt, 1);
4031 		SCTP_TCB_UNLOCK(stcb);
4032 		SCTP_SOCKET_LOCK(so, 1);
4033 		SCTP_TCB_LOCK(stcb);
4034 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4035 	}
4036 #endif
4037 	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
4038 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4039 	if (!so_locked) {
4040 		SCTP_SOCKET_UNLOCK(so, 1);
4041 	}
4042 #endif
4043 }
4044 
4045 void
4046 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh,
4047     struct sctp_inpcb *inp, struct mbuf *op_err, uint32_t vrf_id, uint16_t port)
4048 {
4049 	struct sctp_chunkhdr *ch, chunk_buf;
4050 	unsigned int chk_length;
4051 
4052 	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
4053 	/* Generate a TO address for future reference */
4054 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
4055 		if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
4056 			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4057 			    SCTP_CALLED_DIRECTLY_NOCMPSET);
4058 		}
4059 	}
4060 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4061 	    sizeof(*ch), (uint8_t *) & chunk_buf);
4062 	while (ch != NULL) {
4063 		chk_length = ntohs(ch->chunk_length);
4064 		if (chk_length < sizeof(*ch)) {
4065 			/* break to abort land */
4066 			break;
4067 		}
4068 		switch (ch->chunk_type) {
4069 		case SCTP_COOKIE_ECHO:
4070 			/* We hit here only if the assoc is being freed */
4071 			return;
4072 		case SCTP_PACKET_DROPPED:
4073 			/* we don't respond to pkt-dropped */
4074 			return;
4075 		case SCTP_ABORT_ASSOCIATION:
4076 			/* we don't respond with an ABORT to an ABORT */
4077 			return;
4078 		case SCTP_SHUTDOWN_COMPLETE:
4079 			/*
4080 			 * we ignore it since we are not waiting for it and
4081 			 * peer is gone
4082 			 */
4083 			return;
4084 		case SCTP_SHUTDOWN_ACK:
4085 			sctp_send_shutdown_complete2(m, iphlen, sh, vrf_id, port);
4086 			return;
4087 		default:
4088 			break;
4089 		}
4090 		offset += SCTP_SIZE32(chk_length);
4091 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4092 		    sizeof(*ch), (uint8_t *) & chunk_buf);
4093 	}
4094 	sctp_send_abort(m, iphlen, sh, 0, op_err, vrf_id, port);
4095 }
4096 
4097 /*
4098  * check the inbound datagram to make sure there is not an abort inside it,
4099  * if there is return 1, else return 0.
4100  */
4101 int
4102 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill)
4103 {
4104 	struct sctp_chunkhdr *ch;
4105 	struct sctp_init_chunk *init_chk, chunk_buf;
4106 	int offset;
4107 	unsigned int chk_length;
4108 
4109 	offset = iphlen + sizeof(struct sctphdr);
4110 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
4111 	    (uint8_t *) & chunk_buf);
4112 	while (ch != NULL) {
4113 		chk_length = ntohs(ch->chunk_length);
4114 		if (chk_length < sizeof(*ch)) {
4115 			/* packet is probably corrupt */
4116 			break;
4117 		}
4118 		/* we seem to be ok, is it an abort? */
4119 		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
4120 			/* yep, tell them */
4121 			return (1);
4122 		}
4123 		if (ch->chunk_type == SCTP_INITIATION) {
4124 			/* need to update the Vtag */
4125 			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4126 			    offset, sizeof(*init_chk), (uint8_t *) & chunk_buf);
4127 			if (init_chk != NULL) {
4128 				*vtagfill = ntohl(init_chk->init.initiate_tag);
4129 			}
4130 		}
4131 		/* Nope, move to the next chunk */
4132 		offset += SCTP_SIZE32(chk_length);
4133 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4134 		    sizeof(*ch), (uint8_t *) & chunk_buf);
4135 	}
4136 	return (0);
4137 }
4138 
4139 /*
4140  * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4141  * set (i.e. it's 0) so, create this function to compare link local scopes
4142  */
4143 #ifdef INET6
4144 uint32_t
4145 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4146 {
4147 	struct sockaddr_in6 a, b;
4148 
4149 	/* save copies */
4150 	a = *addr1;
4151 	b = *addr2;
4152 
4153 	if (a.sin6_scope_id == 0)
4154 		if (sa6_recoverscope(&a)) {
4155 			/* can't get scope, so can't match */
4156 			return (0);
4157 		}
4158 	if (b.sin6_scope_id == 0)
4159 		if (sa6_recoverscope(&b)) {
4160 			/* can't get scope, so can't match */
4161 			return (0);
4162 		}
4163 	if (a.sin6_scope_id != b.sin6_scope_id)
4164 		return (0);
4165 
4166 	return (1);
4167 }
4168 
4169 /*
4170  * returns a sockaddr_in6 with embedded scope recovered and removed
4171  */
4172 struct sockaddr_in6 *
4173 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4174 {
4175 	/* check and strip embedded scope junk */
4176 	if (addr->sin6_family == AF_INET6) {
4177 		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4178 			if (addr->sin6_scope_id == 0) {
4179 				*store = *addr;
4180 				if (!sa6_recoverscope(store)) {
4181 					/* use the recovered scope */
4182 					addr = store;
4183 				}
4184 			} else {
4185 				/* else, return the original "to" addr */
4186 				in6_clearscope(&addr->sin6_addr);
4187 			}
4188 		}
4189 	}
4190 	return (addr);
4191 }
4192 
4193 #endif
4194 
4195 /*
4196  * are the two addresses the same?  currently a "scopeless" check returns: 1
4197  * if same, 0 if not
4198  */
4199 int
4200 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4201 {
4202 
4203 	/* must be valid */
4204 	if (sa1 == NULL || sa2 == NULL)
4205 		return (0);
4206 
4207 	/* must be the same family */
4208 	if (sa1->sa_family != sa2->sa_family)
4209 		return (0);
4210 
4211 	switch (sa1->sa_family) {
4212 #ifdef INET6
4213 	case AF_INET6:
4214 		{
4215 			/* IPv6 addresses */
4216 			struct sockaddr_in6 *sin6_1, *sin6_2;
4217 
4218 			sin6_1 = (struct sockaddr_in6 *)sa1;
4219 			sin6_2 = (struct sockaddr_in6 *)sa2;
4220 			return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
4221 			    sin6_2));
4222 		}
4223 #endif
4224 	case AF_INET:
4225 		{
4226 			/* IPv4 addresses */
4227 			struct sockaddr_in *sin_1, *sin_2;
4228 
4229 			sin_1 = (struct sockaddr_in *)sa1;
4230 			sin_2 = (struct sockaddr_in *)sa2;
4231 			return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4232 		}
4233 	default:
4234 		/* we don't do these... */
4235 		return (0);
4236 	}
4237 }
4238 
4239 void
4240 sctp_print_address(struct sockaddr *sa)
4241 {
4242 #ifdef INET6
4243 	char ip6buf[INET6_ADDRSTRLEN];
4244 
4245 	ip6buf[0] = 0;
4246 #endif
4247 
4248 	switch (sa->sa_family) {
4249 #ifdef INET6
4250 	case AF_INET6:
4251 		{
4252 			struct sockaddr_in6 *sin6;
4253 
4254 			sin6 = (struct sockaddr_in6 *)sa;
4255 			SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4256 			    ip6_sprintf(ip6buf, &sin6->sin6_addr),
4257 			    ntohs(sin6->sin6_port),
4258 			    sin6->sin6_scope_id);
4259 			break;
4260 		}
4261 #endif
4262 	case AF_INET:
4263 		{
4264 			struct sockaddr_in *sin;
4265 			unsigned char *p;
4266 
4267 			sin = (struct sockaddr_in *)sa;
4268 			p = (unsigned char *)&sin->sin_addr;
4269 			SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4270 			    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4271 			break;
4272 		}
4273 	default:
4274 		SCTP_PRINTF("?\n");
4275 		break;
4276 	}
4277 }
4278 
4279 void
4280 sctp_print_address_pkt(struct ip *iph, struct sctphdr *sh)
4281 {
4282 	switch (iph->ip_v) {
4283 		case IPVERSION:
4284 		{
4285 			struct sockaddr_in lsa, fsa;
4286 
4287 			bzero(&lsa, sizeof(lsa));
4288 			lsa.sin_len = sizeof(lsa);
4289 			lsa.sin_family = AF_INET;
4290 			lsa.sin_addr = iph->ip_src;
4291 			lsa.sin_port = sh->src_port;
4292 			bzero(&fsa, sizeof(fsa));
4293 			fsa.sin_len = sizeof(fsa);
4294 			fsa.sin_family = AF_INET;
4295 			fsa.sin_addr = iph->ip_dst;
4296 			fsa.sin_port = sh->dest_port;
4297 			SCTP_PRINTF("src: ");
4298 			sctp_print_address((struct sockaddr *)&lsa);
4299 			SCTP_PRINTF("dest: ");
4300 			sctp_print_address((struct sockaddr *)&fsa);
4301 			break;
4302 		}
4303 #ifdef INET6
4304 	case IPV6_VERSION >> 4:
4305 		{
4306 			struct ip6_hdr *ip6;
4307 			struct sockaddr_in6 lsa6, fsa6;
4308 
4309 			ip6 = (struct ip6_hdr *)iph;
4310 			bzero(&lsa6, sizeof(lsa6));
4311 			lsa6.sin6_len = sizeof(lsa6);
4312 			lsa6.sin6_family = AF_INET6;
4313 			lsa6.sin6_addr = ip6->ip6_src;
4314 			lsa6.sin6_port = sh->src_port;
4315 			bzero(&fsa6, sizeof(fsa6));
4316 			fsa6.sin6_len = sizeof(fsa6);
4317 			fsa6.sin6_family = AF_INET6;
4318 			fsa6.sin6_addr = ip6->ip6_dst;
4319 			fsa6.sin6_port = sh->dest_port;
4320 			SCTP_PRINTF("src: ");
4321 			sctp_print_address((struct sockaddr *)&lsa6);
4322 			SCTP_PRINTF("dest: ");
4323 			sctp_print_address((struct sockaddr *)&fsa6);
4324 			break;
4325 		}
4326 #endif
4327 	default:
4328 		/* TSNH */
4329 		break;
4330 	}
4331 }
4332 
4333 void
4334 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4335     struct sctp_inpcb *new_inp,
4336     struct sctp_tcb *stcb,
4337     int waitflags)
4338 {
4339 	/*
4340 	 * go through our old INP and pull off any control structures that
4341 	 * belong to stcb and move then to the new inp.
4342 	 */
4343 	struct socket *old_so, *new_so;
4344 	struct sctp_queued_to_read *control, *nctl;
4345 	struct sctp_readhead tmp_queue;
4346 	struct mbuf *m;
4347 	int error = 0;
4348 
4349 	old_so = old_inp->sctp_socket;
4350 	new_so = new_inp->sctp_socket;
4351 	TAILQ_INIT(&tmp_queue);
4352 	error = sblock(&old_so->so_rcv, waitflags);
4353 	if (error) {
4354 		/*
4355 		 * Gak, can't get sblock, we have a problem. data will be
4356 		 * left stranded.. and we don't dare look at it since the
4357 		 * other thread may be reading something. Oh well, its a
4358 		 * screwed up app that does a peeloff OR a accept while
4359 		 * reading from the main socket... actually its only the
4360 		 * peeloff() case, since I think read will fail on a
4361 		 * listening socket..
4362 		 */
4363 		return;
4364 	}
4365 	/* lock the socket buffers */
4366 	SCTP_INP_READ_LOCK(old_inp);
4367 	control = TAILQ_FIRST(&old_inp->read_queue);
4368 	/* Pull off all for out target stcb */
4369 	while (control) {
4370 		nctl = TAILQ_NEXT(control, next);
4371 		if (control->stcb == stcb) {
4372 			/* remove it we want it */
4373 			TAILQ_REMOVE(&old_inp->read_queue, control, next);
4374 			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4375 			m = control->data;
4376 			while (m) {
4377 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4378 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4379 				}
4380 				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4381 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4382 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4383 				}
4384 				m = SCTP_BUF_NEXT(m);
4385 			}
4386 		}
4387 		control = nctl;
4388 	}
4389 	SCTP_INP_READ_UNLOCK(old_inp);
4390 	/* Remove the sb-lock on the old socket */
4391 
4392 	sbunlock(&old_so->so_rcv);
4393 	/* Now we move them over to the new socket buffer */
4394 	control = TAILQ_FIRST(&tmp_queue);
4395 	SCTP_INP_READ_LOCK(new_inp);
4396 	while (control) {
4397 		nctl = TAILQ_NEXT(control, next);
4398 		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4399 		m = control->data;
4400 		while (m) {
4401 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4402 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4403 			}
4404 			sctp_sballoc(stcb, &new_so->so_rcv, m);
4405 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4406 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4407 			}
4408 			m = SCTP_BUF_NEXT(m);
4409 		}
4410 		control = nctl;
4411 	}
4412 	SCTP_INP_READ_UNLOCK(new_inp);
4413 }
4414 
4415 void
4416 sctp_add_to_readq(struct sctp_inpcb *inp,
4417     struct sctp_tcb *stcb,
4418     struct sctp_queued_to_read *control,
4419     struct sockbuf *sb,
4420     int end,
4421     int so_locked
4422 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4423     SCTP_UNUSED
4424 #endif
4425 )
4426 {
4427 	/*
4428 	 * Here we must place the control on the end of the socket read
4429 	 * queue AND increment sb_cc so that select will work properly on
4430 	 * read.
4431 	 */
4432 	struct mbuf *m, *prev = NULL;
4433 
4434 	if (inp == NULL) {
4435 		/* Gak, TSNH!! */
4436 #ifdef INVARIANTS
4437 		panic("Gak, inp NULL on add_to_readq");
4438 #endif
4439 		return;
4440 	}
4441 	SCTP_INP_READ_LOCK(inp);
4442 	if (!(control->spec_flags & M_NOTIFICATION)) {
4443 		atomic_add_int(&inp->total_recvs, 1);
4444 		if (!control->do_not_ref_stcb) {
4445 			atomic_add_int(&stcb->total_recvs, 1);
4446 		}
4447 	}
4448 	m = control->data;
4449 	control->held_length = 0;
4450 	control->length = 0;
4451 	while (m) {
4452 		if (SCTP_BUF_LEN(m) == 0) {
4453 			/* Skip mbufs with NO length */
4454 			if (prev == NULL) {
4455 				/* First one */
4456 				control->data = sctp_m_free(m);
4457 				m = control->data;
4458 			} else {
4459 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4460 				m = SCTP_BUF_NEXT(prev);
4461 			}
4462 			if (m == NULL) {
4463 				control->tail_mbuf = prev;;
4464 			}
4465 			continue;
4466 		}
4467 		prev = m;
4468 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4469 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4470 		}
4471 		sctp_sballoc(stcb, sb, m);
4472 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4473 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4474 		}
4475 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4476 		m = SCTP_BUF_NEXT(m);
4477 	}
4478 	if (prev != NULL) {
4479 		control->tail_mbuf = prev;
4480 	} else {
4481 		/* Everything got collapsed out?? */
4482 		return;
4483 	}
4484 	if (end) {
4485 		control->end_added = 1;
4486 	}
4487 	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4488 	SCTP_INP_READ_UNLOCK(inp);
4489 	if (inp && inp->sctp_socket) {
4490 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4491 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4492 		} else {
4493 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4494 			struct socket *so;
4495 
4496 			so = SCTP_INP_SO(inp);
4497 			if (!so_locked) {
4498 				atomic_add_int(&stcb->asoc.refcnt, 1);
4499 				SCTP_TCB_UNLOCK(stcb);
4500 				SCTP_SOCKET_LOCK(so, 1);
4501 				SCTP_TCB_LOCK(stcb);
4502 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
4503 				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4504 					SCTP_SOCKET_UNLOCK(so, 1);
4505 					return;
4506 				}
4507 			}
4508 #endif
4509 			sctp_sorwakeup(inp, inp->sctp_socket);
4510 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4511 			if (!so_locked) {
4512 				SCTP_SOCKET_UNLOCK(so, 1);
4513 			}
4514 #endif
4515 		}
4516 	}
4517 }
4518 
4519 
4520 int
4521 sctp_append_to_readq(struct sctp_inpcb *inp,
4522     struct sctp_tcb *stcb,
4523     struct sctp_queued_to_read *control,
4524     struct mbuf *m,
4525     int end,
4526     int ctls_cumack,
4527     struct sockbuf *sb)
4528 {
4529 	/*
4530 	 * A partial delivery API event is underway. OR we are appending on
4531 	 * the reassembly queue.
4532 	 *
4533 	 * If PDAPI this means we need to add m to the end of the data.
4534 	 * Increase the length in the control AND increment the sb_cc.
4535 	 * Otherwise sb is NULL and all we need to do is put it at the end
4536 	 * of the mbuf chain.
4537 	 */
4538 	int len = 0;
4539 	struct mbuf *mm, *tail = NULL, *prev = NULL;
4540 
4541 	if (inp) {
4542 		SCTP_INP_READ_LOCK(inp);
4543 	}
4544 	if (control == NULL) {
4545 get_out:
4546 		if (inp) {
4547 			SCTP_INP_READ_UNLOCK(inp);
4548 		}
4549 		return (-1);
4550 	}
4551 	if (control->end_added) {
4552 		/* huh this one is complete? */
4553 		goto get_out;
4554 	}
4555 	mm = m;
4556 	if (mm == NULL) {
4557 		goto get_out;
4558 	}
4559 	while (mm) {
4560 		if (SCTP_BUF_LEN(mm) == 0) {
4561 			/* Skip mbufs with NO lenght */
4562 			if (prev == NULL) {
4563 				/* First one */
4564 				m = sctp_m_free(mm);
4565 				mm = m;
4566 			} else {
4567 				SCTP_BUF_NEXT(prev) = sctp_m_free(mm);
4568 				mm = SCTP_BUF_NEXT(prev);
4569 			}
4570 			continue;
4571 		}
4572 		prev = mm;
4573 		len += SCTP_BUF_LEN(mm);
4574 		if (sb) {
4575 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4576 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(mm));
4577 			}
4578 			sctp_sballoc(stcb, sb, mm);
4579 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4580 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4581 			}
4582 		}
4583 		mm = SCTP_BUF_NEXT(mm);
4584 	}
4585 	if (prev) {
4586 		tail = prev;
4587 	} else {
4588 		/* Really there should always be a prev */
4589 		if (m == NULL) {
4590 			/* Huh nothing left? */
4591 #ifdef INVARIANTS
4592 			panic("Nothing left to add?");
4593 #else
4594 			goto get_out;
4595 #endif
4596 		}
4597 		tail = m;
4598 	}
4599 	if (control->tail_mbuf) {
4600 		/* append */
4601 		SCTP_BUF_NEXT(control->tail_mbuf) = m;
4602 		control->tail_mbuf = tail;
4603 	} else {
4604 		/* nothing there */
4605 #ifdef INVARIANTS
4606 		if (control->data != NULL) {
4607 			panic("This should NOT happen");
4608 		}
4609 #endif
4610 		control->data = m;
4611 		control->tail_mbuf = tail;
4612 	}
4613 	atomic_add_int(&control->length, len);
4614 	if (end) {
4615 		/* message is complete */
4616 		if (stcb && (control == stcb->asoc.control_pdapi)) {
4617 			stcb->asoc.control_pdapi = NULL;
4618 		}
4619 		control->held_length = 0;
4620 		control->end_added = 1;
4621 	}
4622 	if (stcb == NULL) {
4623 		control->do_not_ref_stcb = 1;
4624 	}
4625 	/*
4626 	 * When we are appending in partial delivery, the cum-ack is used
4627 	 * for the actual pd-api highest tsn on this mbuf. The true cum-ack
4628 	 * is populated in the outbound sinfo structure from the true cumack
4629 	 * if the association exists...
4630 	 */
4631 	control->sinfo_tsn = control->sinfo_cumtsn = ctls_cumack;
4632 	if (inp) {
4633 		SCTP_INP_READ_UNLOCK(inp);
4634 	}
4635 	if (inp && inp->sctp_socket) {
4636 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4637 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4638 		} else {
4639 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4640 			struct socket *so;
4641 
4642 			so = SCTP_INP_SO(inp);
4643 			atomic_add_int(&stcb->asoc.refcnt, 1);
4644 			SCTP_TCB_UNLOCK(stcb);
4645 			SCTP_SOCKET_LOCK(so, 1);
4646 			SCTP_TCB_LOCK(stcb);
4647 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4648 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4649 				SCTP_SOCKET_UNLOCK(so, 1);
4650 				return (0);
4651 			}
4652 #endif
4653 			sctp_sorwakeup(inp, inp->sctp_socket);
4654 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4655 			SCTP_SOCKET_UNLOCK(so, 1);
4656 #endif
4657 		}
4658 	}
4659 	return (0);
4660 }
4661 
4662 
4663 
4664 /*************HOLD THIS COMMENT FOR PATCH FILE OF
4665  *************ALTERNATE ROUTING CODE
4666  */
4667 
4668 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4669  *************ALTERNATE ROUTING CODE
4670  */
4671 
4672 struct mbuf *
4673 sctp_generate_invmanparam(int err)
4674 {
4675 	/* Return a MBUF with a invalid mandatory parameter */
4676 	struct mbuf *m;
4677 
4678 	m = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_DONTWAIT, 1, MT_DATA);
4679 	if (m) {
4680 		struct sctp_paramhdr *ph;
4681 
4682 		SCTP_BUF_LEN(m) = sizeof(struct sctp_paramhdr);
4683 		ph = mtod(m, struct sctp_paramhdr *);
4684 		ph->param_length = htons(sizeof(struct sctp_paramhdr));
4685 		ph->param_type = htons(err);
4686 	}
4687 	return (m);
4688 }
4689 
4690 #ifdef SCTP_MBCNT_LOGGING
4691 void
4692 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
4693     struct sctp_tmit_chunk *tp1, int chk_cnt)
4694 {
4695 	if (tp1->data == NULL) {
4696 		return;
4697 	}
4698 	asoc->chunks_on_out_queue -= chk_cnt;
4699 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
4700 		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
4701 		    asoc->total_output_queue_size,
4702 		    tp1->book_size,
4703 		    0,
4704 		    tp1->mbcnt);
4705 	}
4706 	if (asoc->total_output_queue_size >= tp1->book_size) {
4707 		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
4708 	} else {
4709 		asoc->total_output_queue_size = 0;
4710 	}
4711 
4712 	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
4713 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
4714 		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
4715 			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
4716 		} else {
4717 			stcb->sctp_socket->so_snd.sb_cc = 0;
4718 
4719 		}
4720 	}
4721 }
4722 
4723 #endif
4724 
4725 int
4726 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
4727     int reason, struct sctpchunk_listhead *queue, int so_locked
4728 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4729     SCTP_UNUSED
4730 #endif
4731 )
4732 {
4733 	int ret_sz = 0;
4734 	int notdone;
4735 	uint8_t foundeom = 0;
4736 
4737 	do {
4738 		ret_sz += tp1->book_size;
4739 		tp1->sent = SCTP_FORWARD_TSN_SKIP;
4740 		if (tp1->data) {
4741 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4742 			struct socket *so;
4743 
4744 #endif
4745 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4746 			sctp_flight_size_decrease(tp1);
4747 			sctp_total_flight_decrease(stcb, tp1);
4748 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, reason, tp1, so_locked);
4749 			sctp_m_freem(tp1->data);
4750 			tp1->data = NULL;
4751 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4752 			so = SCTP_INP_SO(stcb->sctp_ep);
4753 			if (!so_locked) {
4754 				atomic_add_int(&stcb->asoc.refcnt, 1);
4755 				SCTP_TCB_UNLOCK(stcb);
4756 				SCTP_SOCKET_LOCK(so, 1);
4757 				SCTP_TCB_LOCK(stcb);
4758 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
4759 				if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4760 					/*
4761 					 * assoc was freed while we were
4762 					 * unlocked
4763 					 */
4764 					SCTP_SOCKET_UNLOCK(so, 1);
4765 					return (ret_sz);
4766 				}
4767 			}
4768 #endif
4769 			sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
4770 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4771 			if (!so_locked) {
4772 				SCTP_SOCKET_UNLOCK(so, 1);
4773 			}
4774 #endif
4775 		}
4776 		if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4777 			stcb->asoc.sent_queue_cnt_removeable--;
4778 		}
4779 		if (queue == &stcb->asoc.send_queue) {
4780 			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
4781 			/* on to the sent queue */
4782 			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
4783 			    sctp_next);
4784 			stcb->asoc.sent_queue_cnt++;
4785 		}
4786 		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
4787 		    SCTP_DATA_NOT_FRAG) {
4788 			/* not frag'ed we ae done   */
4789 			notdone = 0;
4790 			foundeom = 1;
4791 		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4792 			/* end of frag, we are done */
4793 			notdone = 0;
4794 			foundeom = 1;
4795 		} else {
4796 			/*
4797 			 * Its a begin or middle piece, we must mark all of
4798 			 * it
4799 			 */
4800 			notdone = 1;
4801 			tp1 = TAILQ_NEXT(tp1, sctp_next);
4802 		}
4803 	} while (tp1 && notdone);
4804 	if ((foundeom == 0) && (queue == &stcb->asoc.sent_queue)) {
4805 		/*
4806 		 * The multi-part message was scattered across the send and
4807 		 * sent queue.
4808 		 */
4809 		tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
4810 		/*
4811 		 * recurse throught the send_queue too, starting at the
4812 		 * beginning.
4813 		 */
4814 		if (tp1) {
4815 			ret_sz += sctp_release_pr_sctp_chunk(stcb, tp1, reason,
4816 			    &stcb->asoc.send_queue, so_locked);
4817 		} else {
4818 			SCTP_PRINTF("hmm, nothing on the send queue and no EOM?\n");
4819 		}
4820 	}
4821 	return (ret_sz);
4822 }
4823 
4824 /*
4825  * checks to see if the given address, sa, is one that is currently known by
4826  * the kernel note: can't distinguish the same address on multiple interfaces
4827  * and doesn't handle multiple addresses with different zone/scope id's note:
4828  * ifa_ifwithaddr() compares the entire sockaddr struct
4829  */
4830 struct sctp_ifa *
4831 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
4832     int holds_lock)
4833 {
4834 	struct sctp_laddr *laddr;
4835 
4836 	if (holds_lock == 0) {
4837 		SCTP_INP_RLOCK(inp);
4838 	}
4839 	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4840 		if (laddr->ifa == NULL)
4841 			continue;
4842 		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
4843 			continue;
4844 		if (addr->sa_family == AF_INET) {
4845 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4846 			    laddr->ifa->address.sin.sin_addr.s_addr) {
4847 				/* found him. */
4848 				if (holds_lock == 0) {
4849 					SCTP_INP_RUNLOCK(inp);
4850 				}
4851 				return (laddr->ifa);
4852 				break;
4853 			}
4854 		}
4855 #ifdef INET6
4856 		if (addr->sa_family == AF_INET6) {
4857 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4858 			    &laddr->ifa->address.sin6)) {
4859 				/* found him. */
4860 				if (holds_lock == 0) {
4861 					SCTP_INP_RUNLOCK(inp);
4862 				}
4863 				return (laddr->ifa);
4864 				break;
4865 			}
4866 		}
4867 #endif
4868 	}
4869 	if (holds_lock == 0) {
4870 		SCTP_INP_RUNLOCK(inp);
4871 	}
4872 	return (NULL);
4873 }
4874 
4875 uint32_t
4876 sctp_get_ifa_hash_val(struct sockaddr *addr)
4877 {
4878 	if (addr->sa_family == AF_INET) {
4879 		struct sockaddr_in *sin;
4880 
4881 		sin = (struct sockaddr_in *)addr;
4882 		return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
4883 	} else if (addr->sa_family == AF_INET6) {
4884 		struct sockaddr_in6 *sin6;
4885 		uint32_t hash_of_addr;
4886 
4887 		sin6 = (struct sockaddr_in6 *)addr;
4888 		hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
4889 		    sin6->sin6_addr.s6_addr32[1] +
4890 		    sin6->sin6_addr.s6_addr32[2] +
4891 		    sin6->sin6_addr.s6_addr32[3]);
4892 		hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
4893 		return (hash_of_addr);
4894 	}
4895 	return (0);
4896 }
4897 
4898 struct sctp_ifa *
4899 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
4900 {
4901 	struct sctp_ifa *sctp_ifap;
4902 	struct sctp_vrf *vrf;
4903 	struct sctp_ifalist *hash_head;
4904 	uint32_t hash_of_addr;
4905 
4906 	if (holds_lock == 0)
4907 		SCTP_IPI_ADDR_RLOCK();
4908 
4909 	vrf = sctp_find_vrf(vrf_id);
4910 	if (vrf == NULL) {
4911 stage_right:
4912 		if (holds_lock == 0)
4913 			SCTP_IPI_ADDR_RUNLOCK();
4914 		return (NULL);
4915 	}
4916 	hash_of_addr = sctp_get_ifa_hash_val(addr);
4917 
4918 	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
4919 	if (hash_head == NULL) {
4920 		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
4921 		    hash_of_addr, (uint32_t) vrf->vrf_addr_hashmark,
4922 		    (uint32_t) (hash_of_addr & vrf->vrf_addr_hashmark));
4923 		sctp_print_address(addr);
4924 		SCTP_PRINTF("No such bucket for address\n");
4925 		if (holds_lock == 0)
4926 			SCTP_IPI_ADDR_RUNLOCK();
4927 
4928 		return (NULL);
4929 	}
4930 	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
4931 		if (sctp_ifap == NULL) {
4932 #ifdef INVARIANTS
4933 			panic("Huh LIST_FOREACH corrupt");
4934 			goto stage_right;
4935 #else
4936 			SCTP_PRINTF("LIST corrupt of sctp_ifap's?\n");
4937 			goto stage_right;
4938 #endif
4939 		}
4940 		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
4941 			continue;
4942 		if (addr->sa_family == AF_INET) {
4943 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4944 			    sctp_ifap->address.sin.sin_addr.s_addr) {
4945 				/* found him. */
4946 				if (holds_lock == 0)
4947 					SCTP_IPI_ADDR_RUNLOCK();
4948 				return (sctp_ifap);
4949 				break;
4950 			}
4951 		}
4952 #ifdef INET6
4953 		if (addr->sa_family == AF_INET6) {
4954 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4955 			    &sctp_ifap->address.sin6)) {
4956 				/* found him. */
4957 				if (holds_lock == 0)
4958 					SCTP_IPI_ADDR_RUNLOCK();
4959 				return (sctp_ifap);
4960 				break;
4961 			}
4962 		}
4963 #endif
4964 	}
4965 	if (holds_lock == 0)
4966 		SCTP_IPI_ADDR_RUNLOCK();
4967 	return (NULL);
4968 }
4969 
4970 static void
4971 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t * freed_so_far, int hold_rlock,
4972     uint32_t rwnd_req)
4973 {
4974 	/* User pulled some data, do we need a rwnd update? */
4975 	int r_unlocked = 0;
4976 	uint32_t dif, rwnd;
4977 	struct socket *so = NULL;
4978 
4979 	if (stcb == NULL)
4980 		return;
4981 
4982 	atomic_add_int(&stcb->asoc.refcnt, 1);
4983 
4984 	if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED |
4985 	    SCTP_STATE_SHUTDOWN_RECEIVED |
4986 	    SCTP_STATE_SHUTDOWN_ACK_SENT)) {
4987 		/* Pre-check If we are freeing no update */
4988 		goto no_lock;
4989 	}
4990 	SCTP_INP_INCR_REF(stcb->sctp_ep);
4991 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
4992 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
4993 		goto out;
4994 	}
4995 	so = stcb->sctp_socket;
4996 	if (so == NULL) {
4997 		goto out;
4998 	}
4999 	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
5000 	/* Have you have freed enough to look */
5001 	*freed_so_far = 0;
5002 	/* Yep, its worth a look and the lock overhead */
5003 
5004 	/* Figure out what the rwnd would be */
5005 	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
5006 	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
5007 		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
5008 	} else {
5009 		dif = 0;
5010 	}
5011 	if (dif >= rwnd_req) {
5012 		if (hold_rlock) {
5013 			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5014 			r_unlocked = 1;
5015 		}
5016 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5017 			/*
5018 			 * One last check before we allow the guy possibly
5019 			 * to get in. There is a race, where the guy has not
5020 			 * reached the gate. In that case
5021 			 */
5022 			goto out;
5023 		}
5024 		SCTP_TCB_LOCK(stcb);
5025 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5026 			/* No reports here */
5027 			SCTP_TCB_UNLOCK(stcb);
5028 			goto out;
5029 		}
5030 		SCTP_STAT_INCR(sctps_wu_sacks_sent);
5031 		/*
5032 		 * EY if nr_sacks used then send an nr-sack , a sack
5033 		 * otherwise
5034 		 */
5035 		if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && stcb->asoc.peer_supports_nr_sack)
5036 			sctp_send_nr_sack(stcb);
5037 		else
5038 			sctp_send_sack(stcb);
5039 
5040 		sctp_chunk_output(stcb->sctp_ep, stcb,
5041 		    SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
5042 		/* make sure no timer is running */
5043 		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
5044 		SCTP_TCB_UNLOCK(stcb);
5045 	} else {
5046 		/* Update how much we have pending */
5047 		stcb->freed_by_sorcv_sincelast = dif;
5048 	}
5049 out:
5050 	if (so && r_unlocked && hold_rlock) {
5051 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5052 	}
5053 	SCTP_INP_DECR_REF(stcb->sctp_ep);
5054 no_lock:
5055 	atomic_add_int(&stcb->asoc.refcnt, -1);
5056 	return;
5057 }
5058 
5059 int
5060 sctp_sorecvmsg(struct socket *so,
5061     struct uio *uio,
5062     struct mbuf **mp,
5063     struct sockaddr *from,
5064     int fromlen,
5065     int *msg_flags,
5066     struct sctp_sndrcvinfo *sinfo,
5067     int filling_sinfo)
5068 {
5069 	/*
5070 	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
5071 	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
5072 	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
5073 	 * On the way out we may send out any combination of:
5074 	 * MSG_NOTIFICATION MSG_EOR
5075 	 *
5076 	 */
5077 	struct sctp_inpcb *inp = NULL;
5078 	int my_len = 0;
5079 	int cp_len = 0, error = 0;
5080 	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
5081 	struct mbuf *m = NULL, *embuf = NULL;
5082 	struct sctp_tcb *stcb = NULL;
5083 	int wakeup_read_socket = 0;
5084 	int freecnt_applied = 0;
5085 	int out_flags = 0, in_flags = 0;
5086 	int block_allowed = 1;
5087 	uint32_t freed_so_far = 0;
5088 	uint32_t copied_so_far = 0;
5089 	int in_eeor_mode = 0;
5090 	int no_rcv_needed = 0;
5091 	uint32_t rwnd_req = 0;
5092 	int hold_sblock = 0;
5093 	int hold_rlock = 0;
5094 	int slen = 0;
5095 	uint32_t held_length = 0;
5096 	int sockbuf_lock = 0;
5097 
5098 	if (uio == NULL) {
5099 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5100 		return (EINVAL);
5101 	}
5102 	if (msg_flags) {
5103 		in_flags = *msg_flags;
5104 		if (in_flags & MSG_PEEK)
5105 			SCTP_STAT_INCR(sctps_read_peeks);
5106 	} else {
5107 		in_flags = 0;
5108 	}
5109 	slen = uio->uio_resid;
5110 
5111 	/* Pull in and set up our int flags */
5112 	if (in_flags & MSG_OOB) {
5113 		/* Out of band's NOT supported */
5114 		return (EOPNOTSUPP);
5115 	}
5116 	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5117 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5118 		return (EINVAL);
5119 	}
5120 	if ((in_flags & (MSG_DONTWAIT
5121 	    | MSG_NBIO
5122 	    )) ||
5123 	    SCTP_SO_IS_NBIO(so)) {
5124 		block_allowed = 0;
5125 	}
5126 	/* setup the endpoint */
5127 	inp = (struct sctp_inpcb *)so->so_pcb;
5128 	if (inp == NULL) {
5129 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5130 		return (EFAULT);
5131 	}
5132 	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5133 	/* Must be at least a MTU's worth */
5134 	if (rwnd_req < SCTP_MIN_RWND)
5135 		rwnd_req = SCTP_MIN_RWND;
5136 	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5137 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5138 		sctp_misc_ints(SCTP_SORECV_ENTER,
5139 		    rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid);
5140 	}
5141 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5142 		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5143 		    rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid);
5144 	}
5145 	error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5146 	sockbuf_lock = 1;
5147 	if (error) {
5148 		goto release_unlocked;
5149 	}
5150 restart:
5151 
5152 
5153 restart_nosblocks:
5154 	if (hold_sblock == 0) {
5155 		SOCKBUF_LOCK(&so->so_rcv);
5156 		hold_sblock = 1;
5157 	}
5158 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5159 	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5160 		goto out;
5161 	}
5162 	if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5163 		if (so->so_error) {
5164 			error = so->so_error;
5165 			if ((in_flags & MSG_PEEK) == 0)
5166 				so->so_error = 0;
5167 			goto out;
5168 		} else {
5169 			if (so->so_rcv.sb_cc == 0) {
5170 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5171 				/* indicate EOF */
5172 				error = 0;
5173 				goto out;
5174 			}
5175 		}
5176 	}
5177 	if ((so->so_rcv.sb_cc <= held_length) && block_allowed) {
5178 		/* we need to wait for data */
5179 		if ((so->so_rcv.sb_cc == 0) &&
5180 		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5181 		    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5182 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5183 				/*
5184 				 * For active open side clear flags for
5185 				 * re-use passive open is blocked by
5186 				 * connect.
5187 				 */
5188 				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5189 					/*
5190 					 * You were aborted, passive side
5191 					 * always hits here
5192 					 */
5193 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5194 					error = ECONNRESET;
5195 					/*
5196 					 * You get this once if you are
5197 					 * active open side
5198 					 */
5199 					if (!(inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5200 						/*
5201 						 * Remove flag if on the
5202 						 * active open side
5203 						 */
5204 						inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_ABORTED;
5205 					}
5206 				}
5207 				so->so_state &= ~(SS_ISCONNECTING |
5208 				    SS_ISDISCONNECTING |
5209 				    SS_ISCONFIRMING |
5210 				    SS_ISCONNECTED);
5211 				if (error == 0) {
5212 					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5213 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5214 						error = ENOTCONN;
5215 					} else {
5216 						inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_CONNECTED;
5217 					}
5218 				}
5219 				goto out;
5220 			}
5221 		}
5222 		error = sbwait(&so->so_rcv);
5223 		if (error) {
5224 			goto out;
5225 		}
5226 		held_length = 0;
5227 		goto restart_nosblocks;
5228 	} else if (so->so_rcv.sb_cc == 0) {
5229 		if (so->so_error) {
5230 			error = so->so_error;
5231 			if ((in_flags & MSG_PEEK) == 0)
5232 				so->so_error = 0;
5233 		} else {
5234 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5235 			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5236 				if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5237 					/*
5238 					 * For active open side clear flags
5239 					 * for re-use passive open is
5240 					 * blocked by connect.
5241 					 */
5242 					if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5243 						/*
5244 						 * You were aborted, passive
5245 						 * side always hits here
5246 						 */
5247 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5248 						error = ECONNRESET;
5249 						/*
5250 						 * You get this once if you
5251 						 * are active open side
5252 						 */
5253 						if (!(inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5254 							/*
5255 							 * Remove flag if on
5256 							 * the active open
5257 							 * side
5258 							 */
5259 							inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_ABORTED;
5260 						}
5261 					}
5262 					so->so_state &= ~(SS_ISCONNECTING |
5263 					    SS_ISDISCONNECTING |
5264 					    SS_ISCONFIRMING |
5265 					    SS_ISCONNECTED);
5266 					if (error == 0) {
5267 						if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5268 							SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5269 							error = ENOTCONN;
5270 						} else {
5271 							inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_CONNECTED;
5272 						}
5273 					}
5274 					goto out;
5275 				}
5276 			}
5277 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5278 			error = EWOULDBLOCK;
5279 		}
5280 		goto out;
5281 	}
5282 	if (hold_sblock == 1) {
5283 		SOCKBUF_UNLOCK(&so->so_rcv);
5284 		hold_sblock = 0;
5285 	}
5286 	/* we possibly have data we can read */
5287 	/* sa_ignore FREED_MEMORY */
5288 	control = TAILQ_FIRST(&inp->read_queue);
5289 	if (control == NULL) {
5290 		/*
5291 		 * This could be happening since the appender did the
5292 		 * increment but as not yet did the tailq insert onto the
5293 		 * read_queue
5294 		 */
5295 		if (hold_rlock == 0) {
5296 			SCTP_INP_READ_LOCK(inp);
5297 			hold_rlock = 1;
5298 		}
5299 		control = TAILQ_FIRST(&inp->read_queue);
5300 		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5301 #ifdef INVARIANTS
5302 			panic("Huh, its non zero and nothing on control?");
5303 #endif
5304 			so->so_rcv.sb_cc = 0;
5305 		}
5306 		SCTP_INP_READ_UNLOCK(inp);
5307 		hold_rlock = 0;
5308 		goto restart;
5309 	}
5310 	if ((control->length == 0) &&
5311 	    (control->do_not_ref_stcb)) {
5312 		/*
5313 		 * Clean up code for freeing assoc that left behind a
5314 		 * pdapi.. maybe a peer in EEOR that just closed after
5315 		 * sending and never indicated a EOR.
5316 		 */
5317 		if (hold_rlock == 0) {
5318 			hold_rlock = 1;
5319 			SCTP_INP_READ_LOCK(inp);
5320 		}
5321 		control->held_length = 0;
5322 		if (control->data) {
5323 			/* Hmm there is data here .. fix */
5324 			struct mbuf *m_tmp;
5325 			int cnt = 0;
5326 
5327 			m_tmp = control->data;
5328 			while (m_tmp) {
5329 				cnt += SCTP_BUF_LEN(m_tmp);
5330 				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5331 					control->tail_mbuf = m_tmp;
5332 					control->end_added = 1;
5333 				}
5334 				m_tmp = SCTP_BUF_NEXT(m_tmp);
5335 			}
5336 			control->length = cnt;
5337 		} else {
5338 			/* remove it */
5339 			TAILQ_REMOVE(&inp->read_queue, control, next);
5340 			/* Add back any hiddend data */
5341 			sctp_free_remote_addr(control->whoFrom);
5342 			sctp_free_a_readq(stcb, control);
5343 		}
5344 		if (hold_rlock) {
5345 			hold_rlock = 0;
5346 			SCTP_INP_READ_UNLOCK(inp);
5347 		}
5348 		goto restart;
5349 	}
5350 	if (control->length == 0) {
5351 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5352 		    (filling_sinfo)) {
5353 			/* find a more suitable one then this */
5354 			ctl = TAILQ_NEXT(control, next);
5355 			while (ctl) {
5356 				if ((ctl->stcb != control->stcb) && (ctl->length) &&
5357 				    (ctl->some_taken ||
5358 				    (ctl->spec_flags & M_NOTIFICATION) ||
5359 				    ((ctl->do_not_ref_stcb == 0) &&
5360 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5361 				    ) {
5362 					/*-
5363 					 * If we have a different TCB next, and there is data
5364 					 * present. If we have already taken some (pdapi), OR we can
5365 					 * ref the tcb and no delivery as started on this stream, we
5366 					 * take it. Note we allow a notification on a different
5367 					 * assoc to be delivered..
5368 					 */
5369 					control = ctl;
5370 					goto found_one;
5371 				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5372 					    (ctl->length) &&
5373 					    ((ctl->some_taken) ||
5374 					    ((ctl->do_not_ref_stcb == 0) &&
5375 					    ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5376 					    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5377 				    ) {
5378 					/*-
5379 					 * If we have the same tcb, and there is data present, and we
5380 					 * have the strm interleave feature present. Then if we have
5381 					 * taken some (pdapi) or we can refer to tht tcb AND we have
5382 					 * not started a delivery for this stream, we can take it.
5383 					 * Note we do NOT allow a notificaiton on the same assoc to
5384 					 * be delivered.
5385 					 */
5386 					control = ctl;
5387 					goto found_one;
5388 				}
5389 				ctl = TAILQ_NEXT(ctl, next);
5390 			}
5391 		}
5392 		/*
5393 		 * if we reach here, not suitable replacement is available
5394 		 * <or> fragment interleave is NOT on. So stuff the sb_cc
5395 		 * into the our held count, and its time to sleep again.
5396 		 */
5397 		held_length = so->so_rcv.sb_cc;
5398 		control->held_length = so->so_rcv.sb_cc;
5399 		goto restart;
5400 	}
5401 	/* Clear the held length since there is something to read */
5402 	control->held_length = 0;
5403 	if (hold_rlock) {
5404 		SCTP_INP_READ_UNLOCK(inp);
5405 		hold_rlock = 0;
5406 	}
5407 found_one:
5408 	/*
5409 	 * If we reach here, control has a some data for us to read off.
5410 	 * Note that stcb COULD be NULL.
5411 	 */
5412 	control->some_taken++;
5413 	if (hold_sblock) {
5414 		SOCKBUF_UNLOCK(&so->so_rcv);
5415 		hold_sblock = 0;
5416 	}
5417 	stcb = control->stcb;
5418 	if (stcb) {
5419 		if ((control->do_not_ref_stcb == 0) &&
5420 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5421 			if (freecnt_applied == 0)
5422 				stcb = NULL;
5423 		} else if (control->do_not_ref_stcb == 0) {
5424 			/* you can't free it on me please */
5425 			/*
5426 			 * The lock on the socket buffer protects us so the
5427 			 * free code will stop. But since we used the
5428 			 * socketbuf lock and the sender uses the tcb_lock
5429 			 * to increment, we need to use the atomic add to
5430 			 * the refcnt
5431 			 */
5432 			if (freecnt_applied) {
5433 #ifdef INVARIANTS
5434 				panic("refcnt already incremented");
5435 #else
5436 				printf("refcnt already incremented?\n");
5437 #endif
5438 			} else {
5439 				atomic_add_int(&stcb->asoc.refcnt, 1);
5440 				freecnt_applied = 1;
5441 			}
5442 			/*
5443 			 * Setup to remember how much we have not yet told
5444 			 * the peer our rwnd has opened up. Note we grab the
5445 			 * value from the tcb from last time. Note too that
5446 			 * sack sending clears this when a sack is sent,
5447 			 * which is fine. Once we hit the rwnd_req, we then
5448 			 * will go to the sctp_user_rcvd() that will not
5449 			 * lock until it KNOWs it MUST send a WUP-SACK.
5450 			 */
5451 			freed_so_far = stcb->freed_by_sorcv_sincelast;
5452 			stcb->freed_by_sorcv_sincelast = 0;
5453 		}
5454 	}
5455 	if (stcb &&
5456 	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
5457 	    control->do_not_ref_stcb == 0) {
5458 		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
5459 	}
5460 	/* First lets get off the sinfo and sockaddr info */
5461 	if ((sinfo) && filling_sinfo) {
5462 		memcpy(sinfo, control, sizeof(struct sctp_nonpad_sndrcvinfo));
5463 		nxt = TAILQ_NEXT(control, next);
5464 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
5465 			struct sctp_extrcvinfo *s_extra;
5466 
5467 			s_extra = (struct sctp_extrcvinfo *)sinfo;
5468 			if ((nxt) &&
5469 			    (nxt->length)) {
5470 				s_extra->sreinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
5471 				if (nxt->sinfo_flags & SCTP_UNORDERED) {
5472 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
5473 				}
5474 				if (nxt->spec_flags & M_NOTIFICATION) {
5475 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
5476 				}
5477 				s_extra->sreinfo_next_aid = nxt->sinfo_assoc_id;
5478 				s_extra->sreinfo_next_length = nxt->length;
5479 				s_extra->sreinfo_next_ppid = nxt->sinfo_ppid;
5480 				s_extra->sreinfo_next_stream = nxt->sinfo_stream;
5481 				if (nxt->tail_mbuf != NULL) {
5482 					if (nxt->end_added) {
5483 						s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
5484 					}
5485 				}
5486 			} else {
5487 				/*
5488 				 * we explicitly 0 this, since the memcpy
5489 				 * got some other things beyond the older
5490 				 * sinfo_ that is on the control's structure
5491 				 * :-D
5492 				 */
5493 				nxt = NULL;
5494 				s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
5495 				s_extra->sreinfo_next_aid = 0;
5496 				s_extra->sreinfo_next_length = 0;
5497 				s_extra->sreinfo_next_ppid = 0;
5498 				s_extra->sreinfo_next_stream = 0;
5499 			}
5500 		}
5501 		/*
5502 		 * update off the real current cum-ack, if we have an stcb.
5503 		 */
5504 		if ((control->do_not_ref_stcb == 0) && stcb)
5505 			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
5506 		/*
5507 		 * mask off the high bits, we keep the actual chunk bits in
5508 		 * there.
5509 		 */
5510 		sinfo->sinfo_flags &= 0x00ff;
5511 		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
5512 			sinfo->sinfo_flags |= SCTP_UNORDERED;
5513 		}
5514 	}
5515 #ifdef SCTP_ASOCLOG_OF_TSNS
5516 	{
5517 		int index, newindex;
5518 		struct sctp_pcbtsn_rlog *entry;
5519 
5520 		do {
5521 			index = inp->readlog_index;
5522 			newindex = index + 1;
5523 			if (newindex >= SCTP_READ_LOG_SIZE) {
5524 				newindex = 0;
5525 			}
5526 		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
5527 		entry = &inp->readlog[index];
5528 		entry->vtag = control->sinfo_assoc_id;
5529 		entry->strm = control->sinfo_stream;
5530 		entry->seq = control->sinfo_ssn;
5531 		entry->sz = control->length;
5532 		entry->flgs = control->sinfo_flags;
5533 	}
5534 #endif
5535 	if (fromlen && from) {
5536 		struct sockaddr *to;
5537 
5538 #ifdef INET
5539 		cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sin.sin_len);
5540 		memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
5541 		((struct sockaddr_in *)from)->sin_port = control->port_from;
5542 #else
5543 		/* No AF_INET use AF_INET6 */
5544 		cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sin6.sin6_len);
5545 		memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
5546 		((struct sockaddr_in6 *)from)->sin6_port = control->port_from;
5547 #endif
5548 
5549 		to = from;
5550 #if defined(INET) && defined(INET6)
5551 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) &&
5552 		    (to->sa_family == AF_INET) &&
5553 		    ((size_t)fromlen >= sizeof(struct sockaddr_in6))) {
5554 			struct sockaddr_in *sin;
5555 			struct sockaddr_in6 sin6;
5556 
5557 			sin = (struct sockaddr_in *)to;
5558 			bzero(&sin6, sizeof(sin6));
5559 			sin6.sin6_family = AF_INET6;
5560 			sin6.sin6_len = sizeof(struct sockaddr_in6);
5561 			sin6.sin6_addr.s6_addr32[2] = htonl(0xffff);
5562 			bcopy(&sin->sin_addr,
5563 			    &sin6.sin6_addr.s6_addr32[3],
5564 			    sizeof(sin6.sin6_addr.s6_addr32[3]));
5565 			sin6.sin6_port = sin->sin_port;
5566 			memcpy(from, (caddr_t)&sin6, sizeof(sin6));
5567 		}
5568 #endif
5569 #if defined(INET6)
5570 		{
5571 			struct sockaddr_in6 lsa6, *to6;
5572 
5573 			to6 = (struct sockaddr_in6 *)to;
5574 			sctp_recover_scope_mac(to6, (&lsa6));
5575 		}
5576 #endif
5577 	}
5578 	/* now copy out what data we can */
5579 	if (mp == NULL) {
5580 		/* copy out each mbuf in the chain up to length */
5581 get_more_data:
5582 		m = control->data;
5583 		while (m) {
5584 			/* Move out all we can */
5585 			cp_len = (int)uio->uio_resid;
5586 			my_len = (int)SCTP_BUF_LEN(m);
5587 			if (cp_len > my_len) {
5588 				/* not enough in this buf */
5589 				cp_len = my_len;
5590 			}
5591 			if (hold_rlock) {
5592 				SCTP_INP_READ_UNLOCK(inp);
5593 				hold_rlock = 0;
5594 			}
5595 			if (cp_len > 0)
5596 				error = uiomove(mtod(m, char *), cp_len, uio);
5597 			/* re-read */
5598 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5599 				goto release;
5600 			}
5601 			if ((control->do_not_ref_stcb == 0) && stcb &&
5602 			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5603 				no_rcv_needed = 1;
5604 			}
5605 			if (error) {
5606 				/* error we are out of here */
5607 				goto release;
5608 			}
5609 			if ((SCTP_BUF_NEXT(m) == NULL) &&
5610 			    (cp_len >= SCTP_BUF_LEN(m)) &&
5611 			    ((control->end_added == 0) ||
5612 			    (control->end_added &&
5613 			    (TAILQ_NEXT(control, next) == NULL)))
5614 			    ) {
5615 				SCTP_INP_READ_LOCK(inp);
5616 				hold_rlock = 1;
5617 			}
5618 			if (cp_len == SCTP_BUF_LEN(m)) {
5619 				if ((SCTP_BUF_NEXT(m) == NULL) &&
5620 				    (control->end_added)) {
5621 					out_flags |= MSG_EOR;
5622 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5623 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5624 				}
5625 				if (control->spec_flags & M_NOTIFICATION) {
5626 					out_flags |= MSG_NOTIFICATION;
5627 				}
5628 				/* we ate up the mbuf */
5629 				if (in_flags & MSG_PEEK) {
5630 					/* just looking */
5631 					m = SCTP_BUF_NEXT(m);
5632 					copied_so_far += cp_len;
5633 				} else {
5634 					/* dispose of the mbuf */
5635 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5636 						sctp_sblog(&so->so_rcv,
5637 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5638 					}
5639 					sctp_sbfree(control, stcb, &so->so_rcv, m);
5640 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5641 						sctp_sblog(&so->so_rcv,
5642 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5643 					}
5644 					embuf = m;
5645 					copied_so_far += cp_len;
5646 					freed_so_far += cp_len;
5647 					freed_so_far += MSIZE;
5648 					atomic_subtract_int(&control->length, cp_len);
5649 					control->data = sctp_m_free(m);
5650 					m = control->data;
5651 					/*
5652 					 * been through it all, must hold sb
5653 					 * lock ok to null tail
5654 					 */
5655 					if (control->data == NULL) {
5656 #ifdef INVARIANTS
5657 						if ((control->end_added == 0) ||
5658 						    (TAILQ_NEXT(control, next) == NULL)) {
5659 							/*
5660 							 * If the end is not
5661 							 * added, OR the
5662 							 * next is NOT null
5663 							 * we MUST have the
5664 							 * lock.
5665 							 */
5666 							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
5667 								panic("Hmm we don't own the lock?");
5668 							}
5669 						}
5670 #endif
5671 						control->tail_mbuf = NULL;
5672 #ifdef INVARIANTS
5673 						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
5674 							panic("end_added, nothing left and no MSG_EOR");
5675 						}
5676 #endif
5677 					}
5678 				}
5679 			} else {
5680 				/* Do we need to trim the mbuf? */
5681 				if (control->spec_flags & M_NOTIFICATION) {
5682 					out_flags |= MSG_NOTIFICATION;
5683 				}
5684 				if ((in_flags & MSG_PEEK) == 0) {
5685 					SCTP_BUF_RESV_UF(m, cp_len);
5686 					SCTP_BUF_LEN(m) -= cp_len;
5687 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5688 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len);
5689 					}
5690 					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
5691 					if ((control->do_not_ref_stcb == 0) &&
5692 					    stcb) {
5693 						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
5694 					}
5695 					copied_so_far += cp_len;
5696 					embuf = m;
5697 					freed_so_far += cp_len;
5698 					freed_so_far += MSIZE;
5699 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5700 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
5701 						    SCTP_LOG_SBRESULT, 0);
5702 					}
5703 					atomic_subtract_int(&control->length, cp_len);
5704 				} else {
5705 					copied_so_far += cp_len;
5706 				}
5707 			}
5708 			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
5709 				break;
5710 			}
5711 			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5712 			    (control->do_not_ref_stcb == 0) &&
5713 			    (freed_so_far >= rwnd_req)) {
5714 				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5715 			}
5716 		}		/* end while(m) */
5717 		/*
5718 		 * At this point we have looked at it all and we either have
5719 		 * a MSG_EOR/or read all the user wants... <OR>
5720 		 * control->length == 0.
5721 		 */
5722 		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
5723 			/* we are done with this control */
5724 			if (control->length == 0) {
5725 				if (control->data) {
5726 #ifdef INVARIANTS
5727 					panic("control->data not null at read eor?");
5728 #else
5729 					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
5730 					sctp_m_freem(control->data);
5731 					control->data = NULL;
5732 #endif
5733 				}
5734 		done_with_control:
5735 				if (TAILQ_NEXT(control, next) == NULL) {
5736 					/*
5737 					 * If we don't have a next we need a
5738 					 * lock, if there is a next
5739 					 * interrupt is filling ahead of us
5740 					 * and we don't need a lock to
5741 					 * remove this guy (which is the
5742 					 * head of the queue).
5743 					 */
5744 					if (hold_rlock == 0) {
5745 						SCTP_INP_READ_LOCK(inp);
5746 						hold_rlock = 1;
5747 					}
5748 				}
5749 				TAILQ_REMOVE(&inp->read_queue, control, next);
5750 				/* Add back any hiddend data */
5751 				if (control->held_length) {
5752 					held_length = 0;
5753 					control->held_length = 0;
5754 					wakeup_read_socket = 1;
5755 				}
5756 				if (control->aux_data) {
5757 					sctp_m_free(control->aux_data);
5758 					control->aux_data = NULL;
5759 				}
5760 				no_rcv_needed = control->do_not_ref_stcb;
5761 				sctp_free_remote_addr(control->whoFrom);
5762 				control->data = NULL;
5763 				sctp_free_a_readq(stcb, control);
5764 				control = NULL;
5765 				if ((freed_so_far >= rwnd_req) &&
5766 				    (no_rcv_needed == 0))
5767 					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5768 
5769 			} else {
5770 				/*
5771 				 * The user did not read all of this
5772 				 * message, turn off the returned MSG_EOR
5773 				 * since we are leaving more behind on the
5774 				 * control to read.
5775 				 */
5776 #ifdef INVARIANTS
5777 				if (control->end_added &&
5778 				    (control->data == NULL) &&
5779 				    (control->tail_mbuf == NULL)) {
5780 					panic("Gak, control->length is corrupt?");
5781 				}
5782 #endif
5783 				no_rcv_needed = control->do_not_ref_stcb;
5784 				out_flags &= ~MSG_EOR;
5785 			}
5786 		}
5787 		if (out_flags & MSG_EOR) {
5788 			goto release;
5789 		}
5790 		if ((uio->uio_resid == 0) ||
5791 		    ((in_eeor_mode) && (copied_so_far >= max(so->so_rcv.sb_lowat, 1)))
5792 		    ) {
5793 			goto release;
5794 		}
5795 		/*
5796 		 * If I hit here the receiver wants more and this message is
5797 		 * NOT done (pd-api). So two questions. Can we block? if not
5798 		 * we are done. Did the user NOT set MSG_WAITALL?
5799 		 */
5800 		if (block_allowed == 0) {
5801 			goto release;
5802 		}
5803 		/*
5804 		 * We need to wait for more data a few things: - We don't
5805 		 * sbunlock() so we don't get someone else reading. - We
5806 		 * must be sure to account for the case where what is added
5807 		 * is NOT to our control when we wakeup.
5808 		 */
5809 
5810 		/*
5811 		 * Do we need to tell the transport a rwnd update might be
5812 		 * needed before we go to sleep?
5813 		 */
5814 		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5815 		    ((freed_so_far >= rwnd_req) &&
5816 		    (control->do_not_ref_stcb == 0) &&
5817 		    (no_rcv_needed == 0))) {
5818 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5819 		}
5820 wait_some_more:
5821 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5822 			goto release;
5823 		}
5824 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
5825 			goto release;
5826 
5827 		if (hold_rlock == 1) {
5828 			SCTP_INP_READ_UNLOCK(inp);
5829 			hold_rlock = 0;
5830 		}
5831 		if (hold_sblock == 0) {
5832 			SOCKBUF_LOCK(&so->so_rcv);
5833 			hold_sblock = 1;
5834 		}
5835 		if ((copied_so_far) && (control->length == 0) &&
5836 		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))
5837 		    ) {
5838 			goto release;
5839 		}
5840 		if (so->so_rcv.sb_cc <= control->held_length) {
5841 			error = sbwait(&so->so_rcv);
5842 			if (error) {
5843 				goto release;
5844 			}
5845 			control->held_length = 0;
5846 		}
5847 		if (hold_sblock) {
5848 			SOCKBUF_UNLOCK(&so->so_rcv);
5849 			hold_sblock = 0;
5850 		}
5851 		if (control->length == 0) {
5852 			/* still nothing here */
5853 			if (control->end_added == 1) {
5854 				/* he aborted, or is done i.e.did a shutdown */
5855 				out_flags |= MSG_EOR;
5856 				if (control->pdapi_aborted) {
5857 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5858 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5859 
5860 					out_flags |= MSG_TRUNC;
5861 				} else {
5862 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5863 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5864 				}
5865 				goto done_with_control;
5866 			}
5867 			if (so->so_rcv.sb_cc > held_length) {
5868 				control->held_length = so->so_rcv.sb_cc;
5869 				held_length = 0;
5870 			}
5871 			goto wait_some_more;
5872 		} else if (control->data == NULL) {
5873 			/*
5874 			 * we must re-sync since data is probably being
5875 			 * added
5876 			 */
5877 			SCTP_INP_READ_LOCK(inp);
5878 			if ((control->length > 0) && (control->data == NULL)) {
5879 				/*
5880 				 * big trouble.. we have the lock and its
5881 				 * corrupt?
5882 				 */
5883 #ifdef INVARIANTS
5884 				panic("Impossible data==NULL length !=0");
5885 #endif
5886 				out_flags |= MSG_EOR;
5887 				out_flags |= MSG_TRUNC;
5888 				control->length = 0;
5889 				SCTP_INP_READ_UNLOCK(inp);
5890 				goto done_with_control;
5891 			}
5892 			SCTP_INP_READ_UNLOCK(inp);
5893 			/* We will fall around to get more data */
5894 		}
5895 		goto get_more_data;
5896 	} else {
5897 		/*-
5898 		 * Give caller back the mbuf chain,
5899 		 * store in uio_resid the length
5900 		 */
5901 		wakeup_read_socket = 0;
5902 		if ((control->end_added == 0) ||
5903 		    (TAILQ_NEXT(control, next) == NULL)) {
5904 			/* Need to get rlock */
5905 			if (hold_rlock == 0) {
5906 				SCTP_INP_READ_LOCK(inp);
5907 				hold_rlock = 1;
5908 			}
5909 		}
5910 		if (control->end_added) {
5911 			out_flags |= MSG_EOR;
5912 			if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5913 				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5914 		}
5915 		if (control->spec_flags & M_NOTIFICATION) {
5916 			out_flags |= MSG_NOTIFICATION;
5917 		}
5918 		uio->uio_resid = control->length;
5919 		*mp = control->data;
5920 		m = control->data;
5921 		while (m) {
5922 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5923 				sctp_sblog(&so->so_rcv,
5924 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5925 			}
5926 			sctp_sbfree(control, stcb, &so->so_rcv, m);
5927 			freed_so_far += SCTP_BUF_LEN(m);
5928 			freed_so_far += MSIZE;
5929 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5930 				sctp_sblog(&so->so_rcv,
5931 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5932 			}
5933 			m = SCTP_BUF_NEXT(m);
5934 		}
5935 		control->data = control->tail_mbuf = NULL;
5936 		control->length = 0;
5937 		if (out_flags & MSG_EOR) {
5938 			/* Done with this control */
5939 			goto done_with_control;
5940 		}
5941 	}
5942 release:
5943 	if (hold_rlock == 1) {
5944 		SCTP_INP_READ_UNLOCK(inp);
5945 		hold_rlock = 0;
5946 	}
5947 	if (hold_sblock == 1) {
5948 		SOCKBUF_UNLOCK(&so->so_rcv);
5949 		hold_sblock = 0;
5950 	}
5951 	sbunlock(&so->so_rcv);
5952 	sockbuf_lock = 0;
5953 
5954 release_unlocked:
5955 	if (hold_sblock) {
5956 		SOCKBUF_UNLOCK(&so->so_rcv);
5957 		hold_sblock = 0;
5958 	}
5959 	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
5960 		if ((freed_so_far >= rwnd_req) &&
5961 		    (control && (control->do_not_ref_stcb == 0)) &&
5962 		    (no_rcv_needed == 0))
5963 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5964 	}
5965 out:
5966 	if (msg_flags) {
5967 		*msg_flags = out_flags;
5968 	}
5969 	if (((out_flags & MSG_EOR) == 0) &&
5970 	    ((in_flags & MSG_PEEK) == 0) &&
5971 	    (sinfo) &&
5972 	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO))) {
5973 		struct sctp_extrcvinfo *s_extra;
5974 
5975 		s_extra = (struct sctp_extrcvinfo *)sinfo;
5976 		s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
5977 	}
5978 	if (hold_rlock == 1) {
5979 		SCTP_INP_READ_UNLOCK(inp);
5980 		hold_rlock = 0;
5981 	}
5982 	if (hold_sblock) {
5983 		SOCKBUF_UNLOCK(&so->so_rcv);
5984 		hold_sblock = 0;
5985 	}
5986 	if (sockbuf_lock) {
5987 		sbunlock(&so->so_rcv);
5988 	}
5989 	if (freecnt_applied) {
5990 		/*
5991 		 * The lock on the socket buffer protects us so the free
5992 		 * code will stop. But since we used the socketbuf lock and
5993 		 * the sender uses the tcb_lock to increment, we need to use
5994 		 * the atomic add to the refcnt.
5995 		 */
5996 		if (stcb == NULL) {
5997 #ifdef INVARIANTS
5998 			panic("stcb for refcnt has gone NULL?");
5999 			goto stage_left;
6000 #else
6001 			goto stage_left;
6002 #endif
6003 		}
6004 		atomic_add_int(&stcb->asoc.refcnt, -1);
6005 		freecnt_applied = 0;
6006 		/* Save the value back for next time */
6007 		stcb->freed_by_sorcv_sincelast = freed_so_far;
6008 	}
6009 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
6010 		if (stcb) {
6011 			sctp_misc_ints(SCTP_SORECV_DONE,
6012 			    freed_so_far,
6013 			    ((uio) ? (slen - uio->uio_resid) : slen),
6014 			    stcb->asoc.my_rwnd,
6015 			    so->so_rcv.sb_cc);
6016 		} else {
6017 			sctp_misc_ints(SCTP_SORECV_DONE,
6018 			    freed_so_far,
6019 			    ((uio) ? (slen - uio->uio_resid) : slen),
6020 			    0,
6021 			    so->so_rcv.sb_cc);
6022 		}
6023 	}
6024 stage_left:
6025 	if (wakeup_read_socket) {
6026 		sctp_sorwakeup(inp, so);
6027 	}
6028 	return (error);
6029 }
6030 
6031 
6032 #ifdef SCTP_MBUF_LOGGING
6033 struct mbuf *
6034 sctp_m_free(struct mbuf *m)
6035 {
6036 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6037 		if (SCTP_BUF_IS_EXTENDED(m)) {
6038 			sctp_log_mb(m, SCTP_MBUF_IFREE);
6039 		}
6040 	}
6041 	return (m_free(m));
6042 }
6043 
6044 void
6045 sctp_m_freem(struct mbuf *mb)
6046 {
6047 	while (mb != NULL)
6048 		mb = sctp_m_free(mb);
6049 }
6050 
6051 #endif
6052 
6053 int
6054 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
6055 {
6056 	/*
6057 	 * Given a local address. For all associations that holds the
6058 	 * address, request a peer-set-primary.
6059 	 */
6060 	struct sctp_ifa *ifa;
6061 	struct sctp_laddr *wi;
6062 
6063 	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
6064 	if (ifa == NULL) {
6065 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
6066 		return (EADDRNOTAVAIL);
6067 	}
6068 	/*
6069 	 * Now that we have the ifa we must awaken the iterator with this
6070 	 * message.
6071 	 */
6072 	wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
6073 	if (wi == NULL) {
6074 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
6075 		return (ENOMEM);
6076 	}
6077 	/* Now incr the count and int wi structure */
6078 	SCTP_INCR_LADDR_COUNT();
6079 	bzero(wi, sizeof(*wi));
6080 	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
6081 	wi->ifa = ifa;
6082 	wi->action = SCTP_SET_PRIM_ADDR;
6083 	atomic_add_int(&ifa->refcount, 1);
6084 
6085 	/* Now add it to the work queue */
6086 	SCTP_IPI_ITERATOR_WQ_LOCK();
6087 	/*
6088 	 * Should this really be a tailq? As it is we will process the
6089 	 * newest first :-0
6090 	 */
6091 	LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
6092 	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
6093 	    (struct sctp_inpcb *)NULL,
6094 	    (struct sctp_tcb *)NULL,
6095 	    (struct sctp_nets *)NULL);
6096 	SCTP_IPI_ITERATOR_WQ_UNLOCK();
6097 	return (0);
6098 }
6099 
6100 
6101 int
6102 sctp_soreceive(struct socket *so,
6103     struct sockaddr **psa,
6104     struct uio *uio,
6105     struct mbuf **mp0,
6106     struct mbuf **controlp,
6107     int *flagsp)
6108 {
6109 	int error, fromlen;
6110 	uint8_t sockbuf[256];
6111 	struct sockaddr *from;
6112 	struct sctp_extrcvinfo sinfo;
6113 	int filling_sinfo = 1;
6114 	struct sctp_inpcb *inp;
6115 
6116 	inp = (struct sctp_inpcb *)so->so_pcb;
6117 	/* pickup the assoc we are reading from */
6118 	if (inp == NULL) {
6119 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6120 		return (EINVAL);
6121 	}
6122 	if ((sctp_is_feature_off(inp,
6123 	    SCTP_PCB_FLAGS_RECVDATAIOEVNT)) ||
6124 	    (controlp == NULL)) {
6125 		/* user does not want the sndrcv ctl */
6126 		filling_sinfo = 0;
6127 	}
6128 	if (psa) {
6129 		from = (struct sockaddr *)sockbuf;
6130 		fromlen = sizeof(sockbuf);
6131 		from->sa_len = 0;
6132 	} else {
6133 		from = NULL;
6134 		fromlen = 0;
6135 	}
6136 
6137 	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp,
6138 	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6139 	if ((controlp) && (filling_sinfo)) {
6140 		/* copy back the sinfo in a CMSG format */
6141 		if (filling_sinfo)
6142 			*controlp = sctp_build_ctl_nchunk(inp,
6143 			    (struct sctp_sndrcvinfo *)&sinfo);
6144 		else
6145 			*controlp = NULL;
6146 	}
6147 	if (psa) {
6148 		/* copy back the address info */
6149 		if (from && from->sa_len) {
6150 			*psa = sodupsockaddr(from, M_NOWAIT);
6151 		} else {
6152 			*psa = NULL;
6153 		}
6154 	}
6155 	return (error);
6156 }
6157 
6158 
6159 int
6160 sctp_l_soreceive(struct socket *so,
6161     struct sockaddr **name,
6162     struct uio *uio,
6163     char **controlp,
6164     int *controllen,
6165     int *flag)
6166 {
6167 	int error, fromlen;
6168 	uint8_t sockbuf[256];
6169 	struct sockaddr *from;
6170 	struct sctp_extrcvinfo sinfo;
6171 	int filling_sinfo = 1;
6172 	struct sctp_inpcb *inp;
6173 
6174 	inp = (struct sctp_inpcb *)so->so_pcb;
6175 	/* pickup the assoc we are reading from */
6176 	if (inp == NULL) {
6177 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6178 		return (EINVAL);
6179 	}
6180 	if ((sctp_is_feature_off(inp,
6181 	    SCTP_PCB_FLAGS_RECVDATAIOEVNT)) ||
6182 	    (controlp == NULL)) {
6183 		/* user does not want the sndrcv ctl */
6184 		filling_sinfo = 0;
6185 	}
6186 	if (name) {
6187 		from = (struct sockaddr *)sockbuf;
6188 		fromlen = sizeof(sockbuf);
6189 		from->sa_len = 0;
6190 	} else {
6191 		from = NULL;
6192 		fromlen = 0;
6193 	}
6194 
6195 	error = sctp_sorecvmsg(so, uio,
6196 	    (struct mbuf **)NULL,
6197 	    from, fromlen, flag,
6198 	    (struct sctp_sndrcvinfo *)&sinfo,
6199 	    filling_sinfo);
6200 	if ((controlp) && (filling_sinfo)) {
6201 		/*
6202 		 * copy back the sinfo in a CMSG format note that the caller
6203 		 * has reponsibility for freeing the memory.
6204 		 */
6205 		if (filling_sinfo)
6206 			*controlp = sctp_build_ctl_cchunk(inp,
6207 			    controllen,
6208 			    (struct sctp_sndrcvinfo *)&sinfo);
6209 	}
6210 	if (name) {
6211 		/* copy back the address info */
6212 		if (from && from->sa_len) {
6213 			*name = sodupsockaddr(from, M_WAIT);
6214 		} else {
6215 			*name = NULL;
6216 		}
6217 	}
6218 	return (error);
6219 }
6220 
6221 
6222 
6223 
6224 
6225 
6226 
6227 int
6228 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6229     int totaddr, int *error)
6230 {
6231 	int added = 0;
6232 	int i;
6233 	struct sctp_inpcb *inp;
6234 	struct sockaddr *sa;
6235 	size_t incr = 0;
6236 
6237 	sa = addr;
6238 	inp = stcb->sctp_ep;
6239 	*error = 0;
6240 	for (i = 0; i < totaddr; i++) {
6241 		if (sa->sa_family == AF_INET) {
6242 			incr = sizeof(struct sockaddr_in);
6243 			if (sctp_add_remote_addr(stcb, sa, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6244 				/* assoc gone no un-lock */
6245 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6246 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_7);
6247 				*error = ENOBUFS;
6248 				goto out_now;
6249 			}
6250 			added++;
6251 		} else if (sa->sa_family == AF_INET6) {
6252 			incr = sizeof(struct sockaddr_in6);
6253 			if (sctp_add_remote_addr(stcb, sa, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6254 				/* assoc gone no un-lock */
6255 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6256 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_8);
6257 				*error = ENOBUFS;
6258 				goto out_now;
6259 			}
6260 			added++;
6261 		}
6262 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6263 	}
6264 out_now:
6265 	return (added);
6266 }
6267 
6268 struct sctp_tcb *
6269 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6270     int *totaddr, int *num_v4, int *num_v6, int *error,
6271     int limit, int *bad_addr)
6272 {
6273 	struct sockaddr *sa;
6274 	struct sctp_tcb *stcb = NULL;
6275 	size_t incr, at, i;
6276 
6277 	at = incr = 0;
6278 	sa = addr;
6279 	*error = *num_v6 = *num_v4 = 0;
6280 	/* account and validate addresses */
6281 	for (i = 0; i < (size_t)*totaddr; i++) {
6282 		if (sa->sa_family == AF_INET) {
6283 			(*num_v4) += 1;
6284 			incr = sizeof(struct sockaddr_in);
6285 			if (sa->sa_len != incr) {
6286 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6287 				*error = EINVAL;
6288 				*bad_addr = 1;
6289 				return (NULL);
6290 			}
6291 		} else if (sa->sa_family == AF_INET6) {
6292 			struct sockaddr_in6 *sin6;
6293 
6294 			sin6 = (struct sockaddr_in6 *)sa;
6295 			if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6296 				/* Must be non-mapped for connectx */
6297 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6298 				*error = EINVAL;
6299 				*bad_addr = 1;
6300 				return (NULL);
6301 			}
6302 			(*num_v6) += 1;
6303 			incr = sizeof(struct sockaddr_in6);
6304 			if (sa->sa_len != incr) {
6305 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6306 				*error = EINVAL;
6307 				*bad_addr = 1;
6308 				return (NULL);
6309 			}
6310 		} else {
6311 			*totaddr = i;
6312 			/* we are done */
6313 			break;
6314 		}
6315 		SCTP_INP_INCR_REF(inp);
6316 		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6317 		if (stcb != NULL) {
6318 			/* Already have or am bring up an association */
6319 			return (stcb);
6320 		} else {
6321 			SCTP_INP_DECR_REF(inp);
6322 		}
6323 		if ((at + incr) > (size_t)limit) {
6324 			*totaddr = i;
6325 			break;
6326 		}
6327 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6328 	}
6329 	return ((struct sctp_tcb *)NULL);
6330 }
6331 
6332 /*
6333  * sctp_bindx(ADD) for one address.
6334  * assumes all arguments are valid/checked by caller.
6335  */
6336 void
6337 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6338     struct sockaddr *sa, sctp_assoc_t assoc_id,
6339     uint32_t vrf_id, int *error, void *p)
6340 {
6341 	struct sockaddr *addr_touse;
6342 
6343 #ifdef INET6
6344 	struct sockaddr_in sin;
6345 
6346 #endif
6347 
6348 	/* see if we're bound all already! */
6349 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6350 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6351 		*error = EINVAL;
6352 		return;
6353 	}
6354 	addr_touse = sa;
6355 #if defined(INET6) && !defined(__Userspace__)	/* TODO port in6_sin6_2_sin */
6356 	if (sa->sa_family == AF_INET6) {
6357 		struct sockaddr_in6 *sin6;
6358 
6359 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6360 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6361 			*error = EINVAL;
6362 			return;
6363 		}
6364 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6365 			/* can only bind v6 on PF_INET6 sockets */
6366 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6367 			*error = EINVAL;
6368 			return;
6369 		}
6370 		sin6 = (struct sockaddr_in6 *)addr_touse;
6371 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6372 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6373 			    SCTP_IPV6_V6ONLY(inp)) {
6374 				/* can't bind v4-mapped on PF_INET sockets */
6375 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6376 				*error = EINVAL;
6377 				return;
6378 			}
6379 			in6_sin6_2_sin(&sin, sin6);
6380 			addr_touse = (struct sockaddr *)&sin;
6381 		}
6382 	}
6383 #endif
6384 	if (sa->sa_family == AF_INET) {
6385 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6386 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6387 			*error = EINVAL;
6388 			return;
6389 		}
6390 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6391 		    SCTP_IPV6_V6ONLY(inp)) {
6392 			/* can't bind v4 on PF_INET sockets */
6393 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6394 			*error = EINVAL;
6395 			return;
6396 		}
6397 	}
6398 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6399 		if (p == NULL) {
6400 			/* Can't get proc for Net/Open BSD */
6401 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6402 			*error = EINVAL;
6403 			return;
6404 		}
6405 		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6406 		return;
6407 	}
6408 	/*
6409 	 * No locks required here since bind and mgmt_ep_sa all do their own
6410 	 * locking. If we do something for the FIX: below we may need to
6411 	 * lock in that case.
6412 	 */
6413 	if (assoc_id == 0) {
6414 		/* add the address */
6415 		struct sctp_inpcb *lep;
6416 		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
6417 
6418 		/* validate the incoming port */
6419 		if ((lsin->sin_port != 0) &&
6420 		    (lsin->sin_port != inp->sctp_lport)) {
6421 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6422 			*error = EINVAL;
6423 			return;
6424 		} else {
6425 			/* user specified 0 port, set it to existing port */
6426 			lsin->sin_port = inp->sctp_lport;
6427 		}
6428 
6429 		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
6430 		if (lep != NULL) {
6431 			/*
6432 			 * We must decrement the refcount since we have the
6433 			 * ep already and are binding. No remove going on
6434 			 * here.
6435 			 */
6436 			SCTP_INP_DECR_REF(lep);
6437 		}
6438 		if (lep == inp) {
6439 			/* already bound to it.. ok */
6440 			return;
6441 		} else if (lep == NULL) {
6442 			((struct sockaddr_in *)addr_touse)->sin_port = 0;
6443 			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6444 			    SCTP_ADD_IP_ADDRESS,
6445 			    vrf_id, NULL);
6446 		} else {
6447 			*error = EADDRINUSE;
6448 		}
6449 		if (*error)
6450 			return;
6451 	} else {
6452 		/*
6453 		 * FIX: decide whether we allow assoc based bindx
6454 		 */
6455 	}
6456 }
6457 
6458 /*
6459  * sctp_bindx(DELETE) for one address.
6460  * assumes all arguments are valid/checked by caller.
6461  */
6462 void
6463 sctp_bindx_delete_address(struct socket *so, struct sctp_inpcb *inp,
6464     struct sockaddr *sa, sctp_assoc_t assoc_id,
6465     uint32_t vrf_id, int *error)
6466 {
6467 	struct sockaddr *addr_touse;
6468 
6469 #ifdef INET6
6470 	struct sockaddr_in sin;
6471 
6472 #endif
6473 
6474 	/* see if we're bound all already! */
6475 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6476 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6477 		*error = EINVAL;
6478 		return;
6479 	}
6480 	addr_touse = sa;
6481 #if defined(INET6) && !defined(__Userspace__)	/* TODO port in6_sin6_2_sin */
6482 	if (sa->sa_family == AF_INET6) {
6483 		struct sockaddr_in6 *sin6;
6484 
6485 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6486 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6487 			*error = EINVAL;
6488 			return;
6489 		}
6490 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6491 			/* can only bind v6 on PF_INET6 sockets */
6492 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6493 			*error = EINVAL;
6494 			return;
6495 		}
6496 		sin6 = (struct sockaddr_in6 *)addr_touse;
6497 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6498 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6499 			    SCTP_IPV6_V6ONLY(inp)) {
6500 				/* can't bind mapped-v4 on PF_INET sockets */
6501 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6502 				*error = EINVAL;
6503 				return;
6504 			}
6505 			in6_sin6_2_sin(&sin, sin6);
6506 			addr_touse = (struct sockaddr *)&sin;
6507 		}
6508 	}
6509 #endif
6510 	if (sa->sa_family == AF_INET) {
6511 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6512 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6513 			*error = EINVAL;
6514 			return;
6515 		}
6516 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6517 		    SCTP_IPV6_V6ONLY(inp)) {
6518 			/* can't bind v4 on PF_INET sockets */
6519 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6520 			*error = EINVAL;
6521 			return;
6522 		}
6523 	}
6524 	/*
6525 	 * No lock required mgmt_ep_sa does its own locking. If the FIX:
6526 	 * below is ever changed we may need to lock before calling
6527 	 * association level binding.
6528 	 */
6529 	if (assoc_id == 0) {
6530 		/* delete the address */
6531 		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6532 		    SCTP_DEL_IP_ADDRESS,
6533 		    vrf_id, NULL);
6534 	} else {
6535 		/*
6536 		 * FIX: decide whether we allow assoc based bindx
6537 		 */
6538 	}
6539 }
6540 
6541 /*
6542  * returns the valid local address count for an assoc, taking into account
6543  * all scoping rules
6544  */
6545 int
6546 sctp_local_addr_count(struct sctp_tcb *stcb)
6547 {
6548 	int loopback_scope, ipv4_local_scope, local_scope, site_scope;
6549 	int ipv4_addr_legal, ipv6_addr_legal;
6550 	struct sctp_vrf *vrf;
6551 	struct sctp_ifn *sctp_ifn;
6552 	struct sctp_ifa *sctp_ifa;
6553 	int count = 0;
6554 
6555 	/* Turn on all the appropriate scopes */
6556 	loopback_scope = stcb->asoc.loopback_scope;
6557 	ipv4_local_scope = stcb->asoc.ipv4_local_scope;
6558 	local_scope = stcb->asoc.local_scope;
6559 	site_scope = stcb->asoc.site_scope;
6560 	ipv4_addr_legal = ipv6_addr_legal = 0;
6561 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
6562 		ipv6_addr_legal = 1;
6563 		if (SCTP_IPV6_V6ONLY(stcb->sctp_ep) == 0) {
6564 			ipv4_addr_legal = 1;
6565 		}
6566 	} else {
6567 		ipv4_addr_legal = 1;
6568 	}
6569 
6570 	SCTP_IPI_ADDR_RLOCK();
6571 	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
6572 	if (vrf == NULL) {
6573 		/* no vrf, no addresses */
6574 		SCTP_IPI_ADDR_RUNLOCK();
6575 		return (0);
6576 	}
6577 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6578 		/*
6579 		 * bound all case: go through all ifns on the vrf
6580 		 */
6581 		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
6582 			if ((loopback_scope == 0) &&
6583 			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
6584 				continue;
6585 			}
6586 			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
6587 				if (sctp_is_addr_restricted(stcb, sctp_ifa))
6588 					continue;
6589 				switch (sctp_ifa->address.sa.sa_family) {
6590 				case AF_INET:
6591 					if (ipv4_addr_legal) {
6592 						struct sockaddr_in *sin;
6593 
6594 						sin = (struct sockaddr_in *)&sctp_ifa->address.sa;
6595 						if (sin->sin_addr.s_addr == 0) {
6596 							/*
6597 							 * skip unspecified
6598 							 * addrs
6599 							 */
6600 							continue;
6601 						}
6602 						if ((ipv4_local_scope == 0) &&
6603 						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
6604 							continue;
6605 						}
6606 						/* count this one */
6607 						count++;
6608 					} else {
6609 						continue;
6610 					}
6611 					break;
6612 #ifdef INET6
6613 				case AF_INET6:
6614 					if (ipv6_addr_legal) {
6615 						struct sockaddr_in6 *sin6;
6616 
6617 						sin6 = (struct sockaddr_in6 *)&sctp_ifa->address.sa;
6618 						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
6619 							continue;
6620 						}
6621 						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
6622 							if (local_scope == 0)
6623 								continue;
6624 							if (sin6->sin6_scope_id == 0) {
6625 								if (sa6_recoverscope(sin6) != 0)
6626 									/*
6627 									 *
6628 									 * bad
6629 									 *
6630 									 * li
6631 									 * nk
6632 									 *
6633 									 * loc
6634 									 * al
6635 									 *
6636 									 * add
6637 									 * re
6638 									 * ss
6639 									 * */
6640 									continue;
6641 							}
6642 						}
6643 						if ((site_scope == 0) &&
6644 						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
6645 							continue;
6646 						}
6647 						/* count this one */
6648 						count++;
6649 					}
6650 					break;
6651 #endif
6652 				default:
6653 					/* TSNH */
6654 					break;
6655 				}
6656 			}
6657 		}
6658 	} else {
6659 		/*
6660 		 * subset bound case
6661 		 */
6662 		struct sctp_laddr *laddr;
6663 
6664 		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
6665 		    sctp_nxt_addr) {
6666 			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
6667 				continue;
6668 			}
6669 			/* count this one */
6670 			count++;
6671 		}
6672 	}
6673 	SCTP_IPI_ADDR_RUNLOCK();
6674 	return (count);
6675 }
6676 
6677 #if defined(SCTP_LOCAL_TRACE_BUF)
6678 
6679 void
6680 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
6681 {
6682 	uint32_t saveindex, newindex;
6683 
6684 	do {
6685 		saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
6686 		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6687 			newindex = 1;
6688 		} else {
6689 			newindex = saveindex + 1;
6690 		}
6691 	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
6692 	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6693 		saveindex = 0;
6694 	}
6695 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
6696 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
6697 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
6698 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
6699 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
6700 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
6701 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
6702 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
6703 }
6704 
6705 #endif
6706 /* We will need to add support
6707  * to bind the ports and such here
6708  * so we can do UDP tunneling. In
6709  * the mean-time, we return error
6710  */
6711 
6712 void
6713 sctp_over_udp_stop(void)
6714 {
6715 	return;
6716 }
6717 int
6718 sctp_over_udp_start(void)
6719 {
6720 	return (-1);
6721 }
6722