xref: /freebsd/sys/netinet/sctputil.c (revision a9148abd9da5db2f1c682fb17bed791845fc41c9)
1 /*-
2  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *
7  * a) Redistributions of source code must retain the above copyright notice,
8  *   this list of conditions and the following disclaimer.
9  *
10  * b) Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in
12  *   the documentation and/or other materials provided with the distribution.
13  *
14  * c) Neither the name of Cisco Systems, Inc. nor the names of its
15  *    contributors may be used to endorse or promote products derived
16  *    from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28  * THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 /* $KAME: sctputil.c,v 1.37 2005/03/07 23:26:09 itojun Exp $	 */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_pcb.h>
38 #include <netinet/sctputil.h>
39 #include <netinet/sctp_var.h>
40 #include <netinet/sctp_sysctl.h>
41 #ifdef INET6
42 #endif
43 #include <netinet/sctp_header.h>
44 #include <netinet/sctp_output.h>
45 #include <netinet/sctp_uio.h>
46 #include <netinet/sctp_timer.h>
47 #include <netinet/sctp_crc32.h>
48 #include <netinet/sctp_indata.h>/* for sctp_deliver_data() */
49 #include <netinet/sctp_auth.h>
50 #include <netinet/sctp_asconf.h>
51 #include <netinet/sctp_cc_functions.h>
52 
53 #define NUMBER_OF_MTU_SIZES 18
54 
55 
56 #ifndef KTR_SCTP
57 #define KTR_SCTP KTR_SUBSYS
58 #endif
59 
60 void
61 sctp_sblog(struct sockbuf *sb,
62     struct sctp_tcb *stcb, int from, int incr)
63 {
64 	struct sctp_cwnd_log sctp_clog;
65 
66 	sctp_clog.x.sb.stcb = stcb;
67 	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
68 	if (stcb)
69 		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
70 	else
71 		sctp_clog.x.sb.stcb_sbcc = 0;
72 	sctp_clog.x.sb.incr = incr;
73 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
74 	    SCTP_LOG_EVENT_SB,
75 	    from,
76 	    sctp_clog.x.misc.log1,
77 	    sctp_clog.x.misc.log2,
78 	    sctp_clog.x.misc.log3,
79 	    sctp_clog.x.misc.log4);
80 }
81 
82 void
83 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
84 {
85 	struct sctp_cwnd_log sctp_clog;
86 
87 	sctp_clog.x.close.inp = (void *)inp;
88 	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
89 	if (stcb) {
90 		sctp_clog.x.close.stcb = (void *)stcb;
91 		sctp_clog.x.close.state = (uint16_t) stcb->asoc.state;
92 	} else {
93 		sctp_clog.x.close.stcb = 0;
94 		sctp_clog.x.close.state = 0;
95 	}
96 	sctp_clog.x.close.loc = loc;
97 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
98 	    SCTP_LOG_EVENT_CLOSE,
99 	    0,
100 	    sctp_clog.x.misc.log1,
101 	    sctp_clog.x.misc.log2,
102 	    sctp_clog.x.misc.log3,
103 	    sctp_clog.x.misc.log4);
104 }
105 
106 
107 void
108 rto_logging(struct sctp_nets *net, int from)
109 {
110 	struct sctp_cwnd_log sctp_clog;
111 
112 	memset(&sctp_clog, 0, sizeof(sctp_clog));
113 	sctp_clog.x.rto.net = (void *)net;
114 	sctp_clog.x.rto.rtt = net->prev_rtt;
115 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
116 	    SCTP_LOG_EVENT_RTT,
117 	    from,
118 	    sctp_clog.x.misc.log1,
119 	    sctp_clog.x.misc.log2,
120 	    sctp_clog.x.misc.log3,
121 	    sctp_clog.x.misc.log4);
122 
123 }
124 
125 void
126 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
127 {
128 	struct sctp_cwnd_log sctp_clog;
129 
130 	sctp_clog.x.strlog.stcb = stcb;
131 	sctp_clog.x.strlog.n_tsn = tsn;
132 	sctp_clog.x.strlog.n_sseq = sseq;
133 	sctp_clog.x.strlog.e_tsn = 0;
134 	sctp_clog.x.strlog.e_sseq = 0;
135 	sctp_clog.x.strlog.strm = stream;
136 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
137 	    SCTP_LOG_EVENT_STRM,
138 	    from,
139 	    sctp_clog.x.misc.log1,
140 	    sctp_clog.x.misc.log2,
141 	    sctp_clog.x.misc.log3,
142 	    sctp_clog.x.misc.log4);
143 
144 }
145 
146 void
147 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
148 {
149 	struct sctp_cwnd_log sctp_clog;
150 
151 	sctp_clog.x.nagle.stcb = (void *)stcb;
152 	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
153 	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
154 	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
155 	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
156 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
157 	    SCTP_LOG_EVENT_NAGLE,
158 	    action,
159 	    sctp_clog.x.misc.log1,
160 	    sctp_clog.x.misc.log2,
161 	    sctp_clog.x.misc.log3,
162 	    sctp_clog.x.misc.log4);
163 }
164 
165 
166 void
167 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
168 {
169 	struct sctp_cwnd_log sctp_clog;
170 
171 	sctp_clog.x.sack.cumack = cumack;
172 	sctp_clog.x.sack.oldcumack = old_cumack;
173 	sctp_clog.x.sack.tsn = tsn;
174 	sctp_clog.x.sack.numGaps = gaps;
175 	sctp_clog.x.sack.numDups = dups;
176 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
177 	    SCTP_LOG_EVENT_SACK,
178 	    from,
179 	    sctp_clog.x.misc.log1,
180 	    sctp_clog.x.misc.log2,
181 	    sctp_clog.x.misc.log3,
182 	    sctp_clog.x.misc.log4);
183 }
184 
185 void
186 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
187 {
188 	struct sctp_cwnd_log sctp_clog;
189 
190 	memset(&sctp_clog, 0, sizeof(sctp_clog));
191 	sctp_clog.x.map.base = map;
192 	sctp_clog.x.map.cum = cum;
193 	sctp_clog.x.map.high = high;
194 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
195 	    SCTP_LOG_EVENT_MAP,
196 	    from,
197 	    sctp_clog.x.misc.log1,
198 	    sctp_clog.x.misc.log2,
199 	    sctp_clog.x.misc.log3,
200 	    sctp_clog.x.misc.log4);
201 }
202 
203 void
204 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn,
205     int from)
206 {
207 	struct sctp_cwnd_log sctp_clog;
208 
209 	memset(&sctp_clog, 0, sizeof(sctp_clog));
210 	sctp_clog.x.fr.largest_tsn = biggest_tsn;
211 	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
212 	sctp_clog.x.fr.tsn = tsn;
213 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
214 	    SCTP_LOG_EVENT_FR,
215 	    from,
216 	    sctp_clog.x.misc.log1,
217 	    sctp_clog.x.misc.log2,
218 	    sctp_clog.x.misc.log3,
219 	    sctp_clog.x.misc.log4);
220 
221 }
222 
223 
224 void
225 sctp_log_mb(struct mbuf *m, int from)
226 {
227 	struct sctp_cwnd_log sctp_clog;
228 
229 	sctp_clog.x.mb.mp = m;
230 	sctp_clog.x.mb.mbuf_flags = (uint8_t) (SCTP_BUF_GET_FLAGS(m));
231 	sctp_clog.x.mb.size = (uint16_t) (SCTP_BUF_LEN(m));
232 	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
233 	if (SCTP_BUF_IS_EXTENDED(m)) {
234 		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
235 		sctp_clog.x.mb.refcnt = (uint8_t) (SCTP_BUF_EXTEND_REFCNT(m));
236 	} else {
237 		sctp_clog.x.mb.ext = 0;
238 		sctp_clog.x.mb.refcnt = 0;
239 	}
240 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
241 	    SCTP_LOG_EVENT_MBUF,
242 	    from,
243 	    sctp_clog.x.misc.log1,
244 	    sctp_clog.x.misc.log2,
245 	    sctp_clog.x.misc.log3,
246 	    sctp_clog.x.misc.log4);
247 }
248 
249 
250 void
251 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk,
252     int from)
253 {
254 	struct sctp_cwnd_log sctp_clog;
255 
256 	if (control == NULL) {
257 		SCTP_PRINTF("Gak log of NULL?\n");
258 		return;
259 	}
260 	sctp_clog.x.strlog.stcb = control->stcb;
261 	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
262 	sctp_clog.x.strlog.n_sseq = control->sinfo_ssn;
263 	sctp_clog.x.strlog.strm = control->sinfo_stream;
264 	if (poschk != NULL) {
265 		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
266 		sctp_clog.x.strlog.e_sseq = poschk->sinfo_ssn;
267 	} else {
268 		sctp_clog.x.strlog.e_tsn = 0;
269 		sctp_clog.x.strlog.e_sseq = 0;
270 	}
271 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
272 	    SCTP_LOG_EVENT_STRM,
273 	    from,
274 	    sctp_clog.x.misc.log1,
275 	    sctp_clog.x.misc.log2,
276 	    sctp_clog.x.misc.log3,
277 	    sctp_clog.x.misc.log4);
278 
279 }
280 
281 void
282 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
283 {
284 	struct sctp_cwnd_log sctp_clog;
285 
286 	sctp_clog.x.cwnd.net = net;
287 	if (stcb->asoc.send_queue_cnt > 255)
288 		sctp_clog.x.cwnd.cnt_in_send = 255;
289 	else
290 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
291 	if (stcb->asoc.stream_queue_cnt > 255)
292 		sctp_clog.x.cwnd.cnt_in_str = 255;
293 	else
294 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
295 
296 	if (net) {
297 		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
298 		sctp_clog.x.cwnd.inflight = net->flight_size;
299 		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
300 		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
301 		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
302 	}
303 	if (SCTP_CWNDLOG_PRESEND == from) {
304 		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
305 	}
306 	sctp_clog.x.cwnd.cwnd_augment = augment;
307 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
308 	    SCTP_LOG_EVENT_CWND,
309 	    from,
310 	    sctp_clog.x.misc.log1,
311 	    sctp_clog.x.misc.log2,
312 	    sctp_clog.x.misc.log3,
313 	    sctp_clog.x.misc.log4);
314 
315 }
316 
317 void
318 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
319 {
320 	struct sctp_cwnd_log sctp_clog;
321 
322 	memset(&sctp_clog, 0, sizeof(sctp_clog));
323 	if (inp) {
324 		sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
325 
326 	} else {
327 		sctp_clog.x.lock.sock = (void *)NULL;
328 	}
329 	sctp_clog.x.lock.inp = (void *)inp;
330 	if (stcb) {
331 		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
332 	} else {
333 		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
334 	}
335 	if (inp) {
336 		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
337 		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
338 	} else {
339 		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
340 		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
341 	}
342 	sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
343 	if (inp->sctp_socket) {
344 		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
345 		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
346 		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
347 	} else {
348 		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
349 		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
350 		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
351 	}
352 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
353 	    SCTP_LOG_LOCK_EVENT,
354 	    from,
355 	    sctp_clog.x.misc.log1,
356 	    sctp_clog.x.misc.log2,
357 	    sctp_clog.x.misc.log3,
358 	    sctp_clog.x.misc.log4);
359 
360 }
361 
362 void
363 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
364 {
365 	struct sctp_cwnd_log sctp_clog;
366 
367 	memset(&sctp_clog, 0, sizeof(sctp_clog));
368 	sctp_clog.x.cwnd.net = net;
369 	sctp_clog.x.cwnd.cwnd_new_value = error;
370 	sctp_clog.x.cwnd.inflight = net->flight_size;
371 	sctp_clog.x.cwnd.cwnd_augment = burst;
372 	if (stcb->asoc.send_queue_cnt > 255)
373 		sctp_clog.x.cwnd.cnt_in_send = 255;
374 	else
375 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
376 	if (stcb->asoc.stream_queue_cnt > 255)
377 		sctp_clog.x.cwnd.cnt_in_str = 255;
378 	else
379 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
380 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
381 	    SCTP_LOG_EVENT_MAXBURST,
382 	    from,
383 	    sctp_clog.x.misc.log1,
384 	    sctp_clog.x.misc.log2,
385 	    sctp_clog.x.misc.log3,
386 	    sctp_clog.x.misc.log4);
387 
388 }
389 
390 void
391 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
392 {
393 	struct sctp_cwnd_log sctp_clog;
394 
395 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
396 	sctp_clog.x.rwnd.send_size = snd_size;
397 	sctp_clog.x.rwnd.overhead = overhead;
398 	sctp_clog.x.rwnd.new_rwnd = 0;
399 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
400 	    SCTP_LOG_EVENT_RWND,
401 	    from,
402 	    sctp_clog.x.misc.log1,
403 	    sctp_clog.x.misc.log2,
404 	    sctp_clog.x.misc.log3,
405 	    sctp_clog.x.misc.log4);
406 }
407 
408 void
409 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
410 {
411 	struct sctp_cwnd_log sctp_clog;
412 
413 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
414 	sctp_clog.x.rwnd.send_size = flight_size;
415 	sctp_clog.x.rwnd.overhead = overhead;
416 	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
417 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
418 	    SCTP_LOG_EVENT_RWND,
419 	    from,
420 	    sctp_clog.x.misc.log1,
421 	    sctp_clog.x.misc.log2,
422 	    sctp_clog.x.misc.log3,
423 	    sctp_clog.x.misc.log4);
424 }
425 
426 void
427 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
428 {
429 	struct sctp_cwnd_log sctp_clog;
430 
431 	sctp_clog.x.mbcnt.total_queue_size = total_oq;
432 	sctp_clog.x.mbcnt.size_change = book;
433 	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
434 	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
435 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
436 	    SCTP_LOG_EVENT_MBCNT,
437 	    from,
438 	    sctp_clog.x.misc.log1,
439 	    sctp_clog.x.misc.log2,
440 	    sctp_clog.x.misc.log3,
441 	    sctp_clog.x.misc.log4);
442 
443 }
444 
445 void
446 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
447 {
448 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
449 	    SCTP_LOG_MISC_EVENT,
450 	    from,
451 	    a, b, c, d);
452 }
453 
454 void
455 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t cumtsn, uint32_t wake_cnt, int from)
456 {
457 	struct sctp_cwnd_log sctp_clog;
458 
459 	sctp_clog.x.wake.stcb = (void *)stcb;
460 	sctp_clog.x.wake.wake_cnt = wake_cnt;
461 	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
462 	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
463 	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
464 
465 	if (stcb->asoc.stream_queue_cnt < 0xff)
466 		sctp_clog.x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt;
467 	else
468 		sctp_clog.x.wake.stream_qcnt = 0xff;
469 
470 	if (stcb->asoc.chunks_on_out_queue < 0xff)
471 		sctp_clog.x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue;
472 	else
473 		sctp_clog.x.wake.chunks_on_oque = 0xff;
474 
475 	sctp_clog.x.wake.sctpflags = 0;
476 	/* set in the defered mode stuff */
477 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
478 		sctp_clog.x.wake.sctpflags |= 1;
479 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
480 		sctp_clog.x.wake.sctpflags |= 2;
481 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
482 		sctp_clog.x.wake.sctpflags |= 4;
483 	/* what about the sb */
484 	if (stcb->sctp_socket) {
485 		struct socket *so = stcb->sctp_socket;
486 
487 		sctp_clog.x.wake.sbflags = (uint8_t) ((so->so_snd.sb_flags & 0x00ff));
488 	} else {
489 		sctp_clog.x.wake.sbflags = 0xff;
490 	}
491 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
492 	    SCTP_LOG_EVENT_WAKE,
493 	    from,
494 	    sctp_clog.x.misc.log1,
495 	    sctp_clog.x.misc.log2,
496 	    sctp_clog.x.misc.log3,
497 	    sctp_clog.x.misc.log4);
498 
499 }
500 
501 void
502 sctp_log_block(uint8_t from, struct socket *so, struct sctp_association *asoc, int sendlen)
503 {
504 	struct sctp_cwnd_log sctp_clog;
505 
506 	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
507 	sctp_clog.x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt);
508 	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
509 	sctp_clog.x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt;
510 	sctp_clog.x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue;
511 	sctp_clog.x.blk.flight_size = (uint16_t) (asoc->total_flight / 1024);
512 	sctp_clog.x.blk.sndlen = sendlen;
513 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
514 	    SCTP_LOG_EVENT_BLOCK,
515 	    from,
516 	    sctp_clog.x.misc.log1,
517 	    sctp_clog.x.misc.log2,
518 	    sctp_clog.x.misc.log3,
519 	    sctp_clog.x.misc.log4);
520 
521 }
522 
523 int
524 sctp_fill_stat_log(void *optval, size_t *optsize)
525 {
526 	/* May need to fix this if ktrdump does not work */
527 	return (0);
528 }
529 
530 #ifdef SCTP_AUDITING_ENABLED
531 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
532 static int sctp_audit_indx = 0;
533 
534 static
535 void
536 sctp_print_audit_report(void)
537 {
538 	int i;
539 	int cnt;
540 
541 	cnt = 0;
542 	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
543 		if ((sctp_audit_data[i][0] == 0xe0) &&
544 		    (sctp_audit_data[i][1] == 0x01)) {
545 			cnt = 0;
546 			SCTP_PRINTF("\n");
547 		} else if (sctp_audit_data[i][0] == 0xf0) {
548 			cnt = 0;
549 			SCTP_PRINTF("\n");
550 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
551 		    (sctp_audit_data[i][1] == 0x01)) {
552 			SCTP_PRINTF("\n");
553 			cnt = 0;
554 		}
555 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
556 		    (uint32_t) sctp_audit_data[i][1]);
557 		cnt++;
558 		if ((cnt % 14) == 0)
559 			SCTP_PRINTF("\n");
560 	}
561 	for (i = 0; i < sctp_audit_indx; i++) {
562 		if ((sctp_audit_data[i][0] == 0xe0) &&
563 		    (sctp_audit_data[i][1] == 0x01)) {
564 			cnt = 0;
565 			SCTP_PRINTF("\n");
566 		} else if (sctp_audit_data[i][0] == 0xf0) {
567 			cnt = 0;
568 			SCTP_PRINTF("\n");
569 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
570 		    (sctp_audit_data[i][1] == 0x01)) {
571 			SCTP_PRINTF("\n");
572 			cnt = 0;
573 		}
574 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
575 		    (uint32_t) sctp_audit_data[i][1]);
576 		cnt++;
577 		if ((cnt % 14) == 0)
578 			SCTP_PRINTF("\n");
579 	}
580 	SCTP_PRINTF("\n");
581 }
582 
583 void
584 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
585     struct sctp_nets *net)
586 {
587 	int resend_cnt, tot_out, rep, tot_book_cnt;
588 	struct sctp_nets *lnet;
589 	struct sctp_tmit_chunk *chk;
590 
591 	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
592 	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
593 	sctp_audit_indx++;
594 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
595 		sctp_audit_indx = 0;
596 	}
597 	if (inp == NULL) {
598 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
599 		sctp_audit_data[sctp_audit_indx][1] = 0x01;
600 		sctp_audit_indx++;
601 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
602 			sctp_audit_indx = 0;
603 		}
604 		return;
605 	}
606 	if (stcb == NULL) {
607 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
608 		sctp_audit_data[sctp_audit_indx][1] = 0x02;
609 		sctp_audit_indx++;
610 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
611 			sctp_audit_indx = 0;
612 		}
613 		return;
614 	}
615 	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
616 	sctp_audit_data[sctp_audit_indx][1] =
617 	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
618 	sctp_audit_indx++;
619 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
620 		sctp_audit_indx = 0;
621 	}
622 	rep = 0;
623 	tot_book_cnt = 0;
624 	resend_cnt = tot_out = 0;
625 	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
626 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
627 			resend_cnt++;
628 		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
629 			tot_out += chk->book_size;
630 			tot_book_cnt++;
631 		}
632 	}
633 	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
634 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
635 		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
636 		sctp_audit_indx++;
637 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
638 			sctp_audit_indx = 0;
639 		}
640 		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
641 		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
642 		rep = 1;
643 		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
644 		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
645 		sctp_audit_data[sctp_audit_indx][1] =
646 		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
647 		sctp_audit_indx++;
648 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
649 			sctp_audit_indx = 0;
650 		}
651 	}
652 	if (tot_out != stcb->asoc.total_flight) {
653 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
654 		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
655 		sctp_audit_indx++;
656 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
657 			sctp_audit_indx = 0;
658 		}
659 		rep = 1;
660 		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
661 		    (int)stcb->asoc.total_flight);
662 		stcb->asoc.total_flight = tot_out;
663 	}
664 	if (tot_book_cnt != stcb->asoc.total_flight_count) {
665 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
666 		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
667 		sctp_audit_indx++;
668 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
669 			sctp_audit_indx = 0;
670 		}
671 		rep = 1;
672 		SCTP_PRINTF("tot_flt_book:%d\n", tot_book);
673 
674 		stcb->asoc.total_flight_count = tot_book_cnt;
675 	}
676 	tot_out = 0;
677 	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
678 		tot_out += lnet->flight_size;
679 	}
680 	if (tot_out != stcb->asoc.total_flight) {
681 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
682 		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
683 		sctp_audit_indx++;
684 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
685 			sctp_audit_indx = 0;
686 		}
687 		rep = 1;
688 		SCTP_PRINTF("real flight:%d net total was %d\n",
689 		    stcb->asoc.total_flight, tot_out);
690 		/* now corrective action */
691 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
692 
693 			tot_out = 0;
694 			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
695 				if ((chk->whoTo == lnet) &&
696 				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
697 					tot_out += chk->book_size;
698 				}
699 			}
700 			if (lnet->flight_size != tot_out) {
701 				SCTP_PRINTF("net:%x flight was %d corrected to %d\n",
702 				    (uint32_t) lnet, lnet->flight_size,
703 				    tot_out);
704 				lnet->flight_size = tot_out;
705 			}
706 		}
707 	}
708 	if (rep) {
709 		sctp_print_audit_report();
710 	}
711 }
712 
713 void
714 sctp_audit_log(uint8_t ev, uint8_t fd)
715 {
716 
717 	sctp_audit_data[sctp_audit_indx][0] = ev;
718 	sctp_audit_data[sctp_audit_indx][1] = fd;
719 	sctp_audit_indx++;
720 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
721 		sctp_audit_indx = 0;
722 	}
723 }
724 
725 #endif
726 
727 /*
728  * a list of sizes based on typical mtu's, used only if next hop size not
729  * returned.
730  */
731 static int sctp_mtu_sizes[] = {
732 	68,
733 	296,
734 	508,
735 	512,
736 	544,
737 	576,
738 	1006,
739 	1492,
740 	1500,
741 	1536,
742 	2002,
743 	2048,
744 	4352,
745 	4464,
746 	8166,
747 	17914,
748 	32000,
749 	65535
750 };
751 
752 void
753 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
754 {
755 	struct sctp_association *asoc;
756 	struct sctp_nets *net;
757 
758 	asoc = &stcb->asoc;
759 
760 	(void)SCTP_OS_TIMER_STOP(&asoc->hb_timer.timer);
761 	(void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
762 	(void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
763 	(void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
764 	(void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
765 	(void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
766 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
767 		(void)SCTP_OS_TIMER_STOP(&net->fr_timer.timer);
768 		(void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
769 	}
770 }
771 
772 int
773 find_next_best_mtu(int totsz)
774 {
775 	int i, perfer;
776 
777 	/*
778 	 * if we are in here we must find the next best fit based on the
779 	 * size of the dg that failed to be sent.
780 	 */
781 	perfer = 0;
782 	for (i = 0; i < NUMBER_OF_MTU_SIZES; i++) {
783 		if (totsz < sctp_mtu_sizes[i]) {
784 			perfer = i - 1;
785 			if (perfer < 0)
786 				perfer = 0;
787 			break;
788 		}
789 	}
790 	return (sctp_mtu_sizes[perfer]);
791 }
792 
793 void
794 sctp_fill_random_store(struct sctp_pcb *m)
795 {
796 	/*
797 	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
798 	 * our counter. The result becomes our good random numbers and we
799 	 * then setup to give these out. Note that we do no locking to
800 	 * protect this. This is ok, since if competing folks call this we
801 	 * will get more gobbled gook in the random store which is what we
802 	 * want. There is a danger that two guys will use the same random
803 	 * numbers, but thats ok too since that is random as well :->
804 	 */
805 	m->store_at = 0;
806 	(void)sctp_hmac(SCTP_HMAC, (uint8_t *) m->random_numbers,
807 	    sizeof(m->random_numbers), (uint8_t *) & m->random_counter,
808 	    sizeof(m->random_counter), (uint8_t *) m->random_store);
809 	m->random_counter++;
810 }
811 
812 uint32_t
813 sctp_select_initial_TSN(struct sctp_pcb *inp)
814 {
815 	/*
816 	 * A true implementation should use random selection process to get
817 	 * the initial stream sequence number, using RFC1750 as a good
818 	 * guideline
819 	 */
820 	uint32_t x, *xp;
821 	uint8_t *p;
822 	int store_at, new_store;
823 
824 	if (inp->initial_sequence_debug != 0) {
825 		uint32_t ret;
826 
827 		ret = inp->initial_sequence_debug;
828 		inp->initial_sequence_debug++;
829 		return (ret);
830 	}
831 retry:
832 	store_at = inp->store_at;
833 	new_store = store_at + sizeof(uint32_t);
834 	if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
835 		new_store = 0;
836 	}
837 	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
838 		goto retry;
839 	}
840 	if (new_store == 0) {
841 		/* Refill the random store */
842 		sctp_fill_random_store(inp);
843 	}
844 	p = &inp->random_store[store_at];
845 	xp = (uint32_t *) p;
846 	x = *xp;
847 	return (x);
848 }
849 
850 uint32_t
851 sctp_select_a_tag(struct sctp_inpcb *inp, int save_in_twait)
852 {
853 	u_long x, not_done;
854 	struct timeval now;
855 
856 	(void)SCTP_GETTIME_TIMEVAL(&now);
857 	not_done = 1;
858 	while (not_done) {
859 		x = sctp_select_initial_TSN(&inp->sctp_ep);
860 		if (x == 0) {
861 			/* we never use 0 */
862 			continue;
863 		}
864 		if (sctp_is_vtag_good(inp, x, &now, save_in_twait)) {
865 			not_done = 0;
866 		}
867 	}
868 	return (x);
869 }
870 
871 int
872 sctp_init_asoc(struct sctp_inpcb *m, struct sctp_tcb *stcb,
873     int for_a_init, uint32_t override_tag, uint32_t vrf_id)
874 {
875 	struct sctp_association *asoc;
876 
877 	/*
878 	 * Anything set to zero is taken care of by the allocation routine's
879 	 * bzero
880 	 */
881 
882 	/*
883 	 * Up front select what scoping to apply on addresses I tell my peer
884 	 * Not sure what to do with these right now, we will need to come up
885 	 * with a way to set them. We may need to pass them through from the
886 	 * caller in the sctp_aloc_assoc() function.
887 	 */
888 	int i;
889 
890 	asoc = &stcb->asoc;
891 	/* init all variables to a known value. */
892 	SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE);
893 	asoc->max_burst = m->sctp_ep.max_burst;
894 	asoc->heart_beat_delay = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
895 	asoc->cookie_life = m->sctp_ep.def_cookie_life;
896 	asoc->sctp_cmt_on_off = (uint8_t) SCTP_BASE_SYSCTL(sctp_cmt_on_off);
897 	/* JRS 5/21/07 - Init CMT PF variables */
898 	asoc->sctp_cmt_pf = (uint8_t) SCTP_BASE_SYSCTL(sctp_cmt_pf);
899 	asoc->sctp_frag_point = m->sctp_frag_point;
900 #ifdef INET
901 	asoc->default_tos = m->ip_inp.inp.inp_ip_tos;
902 #else
903 	asoc->default_tos = 0;
904 #endif
905 
906 #ifdef INET6
907 	asoc->default_flowlabel = ((struct in6pcb *)m)->in6p_flowinfo;
908 #else
909 	asoc->default_flowlabel = 0;
910 #endif
911 	asoc->sb_send_resv = 0;
912 	if (override_tag) {
913 		struct timeval now;
914 
915 		(void)SCTP_GETTIME_TIMEVAL(&now);
916 		if (sctp_is_in_timewait(override_tag)) {
917 			/*
918 			 * It must be in the time-wait hash, we put it there
919 			 * when we aloc one. If not the peer is playing
920 			 * games.
921 			 */
922 			asoc->my_vtag = override_tag;
923 		} else {
924 			SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
925 #ifdef INVARIANTS
926 			panic("Huh is_in_timewait fails");
927 #endif
928 			return (ENOMEM);
929 		}
930 
931 	} else {
932 		asoc->my_vtag = sctp_select_a_tag(m, 1);
933 	}
934 	/* Get the nonce tags */
935 	asoc->my_vtag_nonce = sctp_select_a_tag(m, 0);
936 	asoc->peer_vtag_nonce = sctp_select_a_tag(m, 0);
937 	asoc->vrf_id = vrf_id;
938 
939 	if (sctp_is_feature_on(m, SCTP_PCB_FLAGS_DONOT_HEARTBEAT))
940 		asoc->hb_is_disabled = 1;
941 	else
942 		asoc->hb_is_disabled = 0;
943 
944 #ifdef SCTP_ASOCLOG_OF_TSNS
945 	asoc->tsn_in_at = 0;
946 	asoc->tsn_out_at = 0;
947 	asoc->tsn_in_wrapped = 0;
948 	asoc->tsn_out_wrapped = 0;
949 	asoc->cumack_log_at = 0;
950 	asoc->cumack_log_atsnt = 0;
951 #endif
952 #ifdef SCTP_FS_SPEC_LOG
953 	asoc->fs_index = 0;
954 #endif
955 	asoc->refcnt = 0;
956 	asoc->assoc_up_sent = 0;
957 	asoc->assoc_id = asoc->my_vtag;
958 	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
959 	    sctp_select_initial_TSN(&m->sctp_ep);
960 	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
961 	/* we are optimisitic here */
962 	asoc->peer_supports_pktdrop = 1;
963 
964 	asoc->sent_queue_retran_cnt = 0;
965 
966 	/* for CMT */
967 	asoc->last_net_data_came_from = NULL;
968 
969 	/* This will need to be adjusted */
970 	asoc->last_cwr_tsn = asoc->init_seq_number - 1;
971 	asoc->last_acked_seq = asoc->init_seq_number - 1;
972 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
973 	asoc->asconf_seq_in = asoc->last_acked_seq;
974 
975 	/* here we are different, we hold the next one we expect */
976 	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
977 
978 	asoc->initial_init_rto_max = m->sctp_ep.initial_init_rto_max;
979 	asoc->initial_rto = m->sctp_ep.initial_rto;
980 
981 	asoc->max_init_times = m->sctp_ep.max_init_times;
982 	asoc->max_send_times = m->sctp_ep.max_send_times;
983 	asoc->def_net_failure = m->sctp_ep.def_net_failure;
984 	asoc->free_chunk_cnt = 0;
985 
986 	asoc->iam_blocking = 0;
987 	/* ECN Nonce initialization */
988 	asoc->context = m->sctp_context;
989 	asoc->def_send = m->def_send;
990 	asoc->ecn_nonce_allowed = 0;
991 	asoc->receiver_nonce_sum = 1;
992 	asoc->nonce_sum_expect_base = 1;
993 	asoc->nonce_sum_check = 1;
994 	asoc->nonce_resync_tsn = 0;
995 	asoc->nonce_wait_for_ecne = 0;
996 	asoc->nonce_wait_tsn = 0;
997 	asoc->delayed_ack = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
998 	asoc->sack_freq = m->sctp_ep.sctp_sack_freq;
999 	asoc->pr_sctp_cnt = 0;
1000 	asoc->total_output_queue_size = 0;
1001 
1002 	if (m->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1003 		struct in6pcb *inp6;
1004 
1005 		/* Its a V6 socket */
1006 		inp6 = (struct in6pcb *)m;
1007 		asoc->ipv6_addr_legal = 1;
1008 		/* Now look at the binding flag to see if V4 will be legal */
1009 		if (SCTP_IPV6_V6ONLY(inp6) == 0) {
1010 			asoc->ipv4_addr_legal = 1;
1011 		} else {
1012 			/* V4 addresses are NOT legal on the association */
1013 			asoc->ipv4_addr_legal = 0;
1014 		}
1015 	} else {
1016 		/* Its a V4 socket, no - V6 */
1017 		asoc->ipv4_addr_legal = 1;
1018 		asoc->ipv6_addr_legal = 0;
1019 	}
1020 
1021 	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(m->sctp_socket), SCTP_MINIMAL_RWND);
1022 	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(m->sctp_socket);
1023 
1024 	asoc->smallest_mtu = m->sctp_frag_point;
1025 #ifdef SCTP_PRINT_FOR_B_AND_M
1026 	SCTP_PRINTF("smallest_mtu init'd with asoc to :%d\n",
1027 	    asoc->smallest_mtu);
1028 #endif
1029 	asoc->minrto = m->sctp_ep.sctp_minrto;
1030 	asoc->maxrto = m->sctp_ep.sctp_maxrto;
1031 
1032 	asoc->locked_on_sending = NULL;
1033 	asoc->stream_locked_on = 0;
1034 	asoc->ecn_echo_cnt_onq = 0;
1035 	asoc->stream_locked = 0;
1036 
1037 	asoc->send_sack = 1;
1038 
1039 	LIST_INIT(&asoc->sctp_restricted_addrs);
1040 
1041 	TAILQ_INIT(&asoc->nets);
1042 	TAILQ_INIT(&asoc->pending_reply_queue);
1043 	TAILQ_INIT(&asoc->asconf_ack_sent);
1044 	/* Setup to fill the hb random cache at first HB */
1045 	asoc->hb_random_idx = 4;
1046 
1047 	asoc->sctp_autoclose_ticks = m->sctp_ep.auto_close_time;
1048 
1049 	/*
1050 	 * JRS - Pick the default congestion control module based on the
1051 	 * sysctl.
1052 	 */
1053 	switch (m->sctp_ep.sctp_default_cc_module) {
1054 		/* JRS - Standard TCP congestion control */
1055 	case SCTP_CC_RFC2581:
1056 		{
1057 			stcb->asoc.congestion_control_module = SCTP_CC_RFC2581;
1058 			stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param;
1059 			stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_cwnd_update_after_sack;
1060 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_cwnd_update_after_fr;
1061 			stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout;
1062 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo;
1063 			stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1064 			stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1065 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer;
1066 			break;
1067 		}
1068 		/* JRS - High Speed TCP congestion control (Floyd) */
1069 	case SCTP_CC_HSTCP:
1070 		{
1071 			stcb->asoc.congestion_control_module = SCTP_CC_HSTCP;
1072 			stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param;
1073 			stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_hs_cwnd_update_after_sack;
1074 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_hs_cwnd_update_after_fr;
1075 			stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout;
1076 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo;
1077 			stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1078 			stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1079 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer;
1080 			break;
1081 		}
1082 		/* JRS - HTCP congestion control */
1083 	case SCTP_CC_HTCP:
1084 		{
1085 			stcb->asoc.congestion_control_module = SCTP_CC_HTCP;
1086 			stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_htcp_set_initial_cc_param;
1087 			stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_htcp_cwnd_update_after_sack;
1088 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_htcp_cwnd_update_after_fr;
1089 			stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_htcp_cwnd_update_after_timeout;
1090 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_htcp_cwnd_update_after_ecn_echo;
1091 			stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1092 			stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1093 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_htcp_cwnd_update_after_fr_timer;
1094 			break;
1095 		}
1096 		/* JRS - By default, use RFC2581 */
1097 	default:
1098 		{
1099 			stcb->asoc.congestion_control_module = SCTP_CC_RFC2581;
1100 			stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param;
1101 			stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_cwnd_update_after_sack;
1102 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_cwnd_update_after_fr;
1103 			stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout;
1104 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo;
1105 			stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1106 			stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1107 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer;
1108 			break;
1109 		}
1110 	}
1111 
1112 	/*
1113 	 * Now the stream parameters, here we allocate space for all streams
1114 	 * that we request by default.
1115 	 */
1116 	asoc->streamoutcnt = asoc->pre_open_streams =
1117 	    m->sctp_ep.pre_open_stream_count;
1118 	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1119 	    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1120 	    SCTP_M_STRMO);
1121 	if (asoc->strmout == NULL) {
1122 		/* big trouble no memory */
1123 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1124 		return (ENOMEM);
1125 	}
1126 	for (i = 0; i < asoc->streamoutcnt; i++) {
1127 		/*
1128 		 * inbound side must be set to 0xffff, also NOTE when we get
1129 		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1130 		 * count (streamoutcnt) but first check if we sent to any of
1131 		 * the upper streams that were dropped (if some were). Those
1132 		 * that were dropped must be notified to the upper layer as
1133 		 * failed to send.
1134 		 */
1135 		asoc->strmout[i].next_sequence_sent = 0x0;
1136 		TAILQ_INIT(&asoc->strmout[i].outqueue);
1137 		asoc->strmout[i].stream_no = i;
1138 		asoc->strmout[i].last_msg_incomplete = 0;
1139 		asoc->strmout[i].next_spoke.tqe_next = 0;
1140 		asoc->strmout[i].next_spoke.tqe_prev = 0;
1141 	}
1142 	/* Now the mapping array */
1143 	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1144 	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1145 	    SCTP_M_MAP);
1146 	if (asoc->mapping_array == NULL) {
1147 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1148 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1149 		return (ENOMEM);
1150 	}
1151 	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1152 	/* Now the init of the other outqueues */
1153 	TAILQ_INIT(&asoc->free_chunks);
1154 	TAILQ_INIT(&asoc->out_wheel);
1155 	TAILQ_INIT(&asoc->control_send_queue);
1156 	TAILQ_INIT(&asoc->asconf_send_queue);
1157 	TAILQ_INIT(&asoc->send_queue);
1158 	TAILQ_INIT(&asoc->sent_queue);
1159 	TAILQ_INIT(&asoc->reasmqueue);
1160 	TAILQ_INIT(&asoc->resetHead);
1161 	asoc->max_inbound_streams = m->sctp_ep.max_open_streams_intome;
1162 	TAILQ_INIT(&asoc->asconf_queue);
1163 	/* authentication fields */
1164 	asoc->authinfo.random = NULL;
1165 	asoc->authinfo.assoc_key = NULL;
1166 	asoc->authinfo.assoc_keyid = 0;
1167 	asoc->authinfo.recv_key = NULL;
1168 	asoc->authinfo.recv_keyid = 0;
1169 	LIST_INIT(&asoc->shared_keys);
1170 	asoc->marked_retrans = 0;
1171 	asoc->timoinit = 0;
1172 	asoc->timodata = 0;
1173 	asoc->timosack = 0;
1174 	asoc->timoshutdown = 0;
1175 	asoc->timoheartbeat = 0;
1176 	asoc->timocookie = 0;
1177 	asoc->timoshutdownack = 0;
1178 	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1179 	asoc->discontinuity_time = asoc->start_time;
1180 	/*
1181 	 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1182 	 * freed later whe the association is freed.
1183 	 */
1184 	return (0);
1185 }
1186 
1187 int
1188 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1189 {
1190 	/* mapping array needs to grow */
1191 	uint8_t *new_array;
1192 	uint32_t new_size;
1193 
1194 	new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1195 	SCTP_MALLOC(new_array, uint8_t *, new_size, SCTP_M_MAP);
1196 	if (new_array == NULL) {
1197 		/* can't get more, forget it */
1198 		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n",
1199 		    new_size);
1200 		return (-1);
1201 	}
1202 	memset(new_array, 0, new_size);
1203 	memcpy(new_array, asoc->mapping_array, asoc->mapping_array_size);
1204 	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1205 	asoc->mapping_array = new_array;
1206 	asoc->mapping_array_size = new_size;
1207 	return (0);
1208 }
1209 
1210 #if defined(SCTP_USE_THREAD_BASED_ITERATOR)
1211 static void
1212 sctp_iterator_work(struct sctp_iterator *it)
1213 {
1214 	int iteration_count = 0;
1215 	int inp_skip = 0;
1216 
1217 	SCTP_ITERATOR_LOCK();
1218 	if (it->inp) {
1219 		SCTP_INP_DECR_REF(it->inp);
1220 	}
1221 	if (it->inp == NULL) {
1222 		/* iterator is complete */
1223 done_with_iterator:
1224 		SCTP_ITERATOR_UNLOCK();
1225 		if (it->function_atend != NULL) {
1226 			(*it->function_atend) (it->pointer, it->val);
1227 		}
1228 		SCTP_FREE(it, SCTP_M_ITER);
1229 		return;
1230 	}
1231 select_a_new_ep:
1232 	SCTP_INP_WLOCK(it->inp);
1233 	while (((it->pcb_flags) &&
1234 	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1235 	    ((it->pcb_features) &&
1236 	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1237 		/* endpoint flags or features don't match, so keep looking */
1238 		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1239 			SCTP_INP_WUNLOCK(it->inp);
1240 			goto done_with_iterator;
1241 		}
1242 		SCTP_INP_WUNLOCK(it->inp);
1243 		it->inp = LIST_NEXT(it->inp, sctp_list);
1244 		if (it->inp == NULL) {
1245 			goto done_with_iterator;
1246 		}
1247 		SCTP_INP_WLOCK(it->inp);
1248 	}
1249 
1250 	SCTP_INP_WUNLOCK(it->inp);
1251 	SCTP_INP_RLOCK(it->inp);
1252 
1253 	/* now go through each assoc which is in the desired state */
1254 	if (it->done_current_ep == 0) {
1255 		if (it->function_inp != NULL)
1256 			inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1257 		it->done_current_ep = 1;
1258 	}
1259 	if (it->stcb == NULL) {
1260 		/* run the per instance function */
1261 		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1262 	}
1263 	if ((inp_skip) || it->stcb == NULL) {
1264 		if (it->function_inp_end != NULL) {
1265 			inp_skip = (*it->function_inp_end) (it->inp,
1266 			    it->pointer,
1267 			    it->val);
1268 		}
1269 		SCTP_INP_RUNLOCK(it->inp);
1270 		goto no_stcb;
1271 	}
1272 	while (it->stcb) {
1273 		SCTP_TCB_LOCK(it->stcb);
1274 		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1275 			/* not in the right state... keep looking */
1276 			SCTP_TCB_UNLOCK(it->stcb);
1277 			goto next_assoc;
1278 		}
1279 		/* see if we have limited out the iterator loop */
1280 		iteration_count++;
1281 		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1282 			/* Pause to let others grab the lock */
1283 			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1284 			SCTP_TCB_UNLOCK(it->stcb);
1285 
1286 			SCTP_INP_INCR_REF(it->inp);
1287 			SCTP_INP_RUNLOCK(it->inp);
1288 			SCTP_ITERATOR_UNLOCK();
1289 			SCTP_ITERATOR_LOCK();
1290 			SCTP_INP_RLOCK(it->inp);
1291 
1292 			SCTP_INP_DECR_REF(it->inp);
1293 			SCTP_TCB_LOCK(it->stcb);
1294 			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1295 			iteration_count = 0;
1296 		}
1297 		/* run function on this one */
1298 		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1299 
1300 		/*
1301 		 * we lie here, it really needs to have its own type but
1302 		 * first I must verify that this won't effect things :-0
1303 		 */
1304 		if (it->no_chunk_output == 0)
1305 			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1306 
1307 		SCTP_TCB_UNLOCK(it->stcb);
1308 next_assoc:
1309 		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1310 		if (it->stcb == NULL) {
1311 			/* Run last function */
1312 			if (it->function_inp_end != NULL) {
1313 				inp_skip = (*it->function_inp_end) (it->inp,
1314 				    it->pointer,
1315 				    it->val);
1316 			}
1317 		}
1318 	}
1319 	SCTP_INP_RUNLOCK(it->inp);
1320 no_stcb:
1321 	/* done with all assocs on this endpoint, move on to next endpoint */
1322 	it->done_current_ep = 0;
1323 	SCTP_INP_WLOCK(it->inp);
1324 	SCTP_INP_WUNLOCK(it->inp);
1325 	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1326 		it->inp = NULL;
1327 	} else {
1328 		SCTP_INP_INFO_RLOCK();
1329 		it->inp = LIST_NEXT(it->inp, sctp_list);
1330 		SCTP_INP_INFO_RUNLOCK();
1331 	}
1332 	if (it->inp == NULL) {
1333 		goto done_with_iterator;
1334 	}
1335 	goto select_a_new_ep;
1336 }
1337 
1338 void
1339 sctp_iterator_worker(void)
1340 {
1341 	struct sctp_iterator *it = NULL;
1342 
1343 	/* This function is called with the WQ lock in place */
1344 
1345 	SCTP_BASE_INFO(iterator_running) = 1;
1346 again:
1347 	it = TAILQ_FIRST(&SCTP_BASE_INFO(iteratorhead));
1348 	while (it) {
1349 		/* now lets work on this one */
1350 		TAILQ_REMOVE(&SCTP_BASE_INFO(iteratorhead), it, sctp_nxt_itr);
1351 		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1352 		sctp_iterator_work(it);
1353 		SCTP_IPI_ITERATOR_WQ_LOCK();
1354 		/* sa_ignore FREED_MEMORY */
1355 		it = TAILQ_FIRST(&SCTP_BASE_INFO(iteratorhead));
1356 	}
1357 	if (TAILQ_FIRST(&SCTP_BASE_INFO(iteratorhead))) {
1358 		goto again;
1359 	}
1360 	SCTP_BASE_INFO(iterator_running) = 0;
1361 	return;
1362 }
1363 
1364 #endif
1365 
1366 
1367 static void
1368 sctp_handle_addr_wq(void)
1369 {
1370 	/* deal with the ADDR wq from the rtsock calls */
1371 	struct sctp_laddr *wi;
1372 	struct sctp_asconf_iterator *asc;
1373 
1374 	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1375 	    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1376 	if (asc == NULL) {
1377 		/* Try later, no memory */
1378 		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1379 		    (struct sctp_inpcb *)NULL,
1380 		    (struct sctp_tcb *)NULL,
1381 		    (struct sctp_nets *)NULL);
1382 		return;
1383 	}
1384 	LIST_INIT(&asc->list_of_work);
1385 	asc->cnt = 0;
1386 	SCTP_IPI_ITERATOR_WQ_LOCK();
1387 	wi = LIST_FIRST(&SCTP_BASE_INFO(addr_wq));
1388 	while (wi != NULL) {
1389 		LIST_REMOVE(wi, sctp_nxt_addr);
1390 		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1391 		asc->cnt++;
1392 		wi = LIST_FIRST(&SCTP_BASE_INFO(addr_wq));
1393 	}
1394 	SCTP_IPI_ITERATOR_WQ_UNLOCK();
1395 	if (asc->cnt == 0) {
1396 		SCTP_FREE(asc, SCTP_M_ASC_IT);
1397 	} else {
1398 		(void)sctp_initiate_iterator(sctp_asconf_iterator_ep,
1399 		    sctp_asconf_iterator_stcb,
1400 		    NULL,	/* No ep end for boundall */
1401 		    SCTP_PCB_FLAGS_BOUNDALL,
1402 		    SCTP_PCB_ANY_FEATURES,
1403 		    SCTP_ASOC_ANY_STATE,
1404 		    (void *)asc, 0,
1405 		    sctp_asconf_iterator_end, NULL, 0);
1406 	}
1407 }
1408 
1409 int retcode = 0;
1410 int cur_oerr = 0;
1411 
1412 void
1413 sctp_timeout_handler(void *t)
1414 {
1415 	struct sctp_inpcb *inp;
1416 	struct sctp_tcb *stcb;
1417 	struct sctp_nets *net;
1418 	struct sctp_timer *tmr;
1419 
1420 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1421 	struct socket *so;
1422 
1423 #endif
1424 	int did_output, type;
1425 	struct sctp_iterator *it = NULL;
1426 
1427 	tmr = (struct sctp_timer *)t;
1428 	inp = (struct sctp_inpcb *)tmr->ep;
1429 	stcb = (struct sctp_tcb *)tmr->tcb;
1430 	net = (struct sctp_nets *)tmr->net;
1431 	did_output = 1;
1432 
1433 #ifdef SCTP_AUDITING_ENABLED
1434 	sctp_audit_log(0xF0, (uint8_t) tmr->type);
1435 	sctp_auditing(3, inp, stcb, net);
1436 #endif
1437 
1438 	/* sanity checks... */
1439 	if (tmr->self != (void *)tmr) {
1440 		/*
1441 		 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1442 		 * tmr);
1443 		 */
1444 		return;
1445 	}
1446 	tmr->stopped_from = 0xa001;
1447 	if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1448 		/*
1449 		 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1450 		 * tmr->type);
1451 		 */
1452 		return;
1453 	}
1454 	tmr->stopped_from = 0xa002;
1455 	if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1456 		return;
1457 	}
1458 	/* if this is an iterator timeout, get the struct and clear inp */
1459 	tmr->stopped_from = 0xa003;
1460 	if (tmr->type == SCTP_TIMER_TYPE_ITERATOR) {
1461 		it = (struct sctp_iterator *)inp;
1462 		inp = NULL;
1463 	}
1464 	type = tmr->type;
1465 	if (inp) {
1466 		SCTP_INP_INCR_REF(inp);
1467 		if ((inp->sctp_socket == 0) &&
1468 		    ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1469 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1470 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1471 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1472 		    (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))
1473 		    ) {
1474 			SCTP_INP_DECR_REF(inp);
1475 			return;
1476 		}
1477 	}
1478 	tmr->stopped_from = 0xa004;
1479 	if (stcb) {
1480 		atomic_add_int(&stcb->asoc.refcnt, 1);
1481 		if (stcb->asoc.state == 0) {
1482 			atomic_add_int(&stcb->asoc.refcnt, -1);
1483 			if (inp) {
1484 				SCTP_INP_DECR_REF(inp);
1485 			}
1486 			return;
1487 		}
1488 	}
1489 	tmr->stopped_from = 0xa005;
1490 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", tmr->type);
1491 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1492 		if (inp) {
1493 			SCTP_INP_DECR_REF(inp);
1494 		}
1495 		if (stcb) {
1496 			atomic_add_int(&stcb->asoc.refcnt, -1);
1497 		}
1498 		return;
1499 	}
1500 	tmr->stopped_from = 0xa006;
1501 
1502 	if (stcb) {
1503 		SCTP_TCB_LOCK(stcb);
1504 		atomic_add_int(&stcb->asoc.refcnt, -1);
1505 		if ((tmr->type != SCTP_TIMER_TYPE_ASOCKILL) &&
1506 		    ((stcb->asoc.state == 0) ||
1507 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1508 			SCTP_TCB_UNLOCK(stcb);
1509 			if (inp) {
1510 				SCTP_INP_DECR_REF(inp);
1511 			}
1512 			return;
1513 		}
1514 	}
1515 	/* record in stopped what t-o occured */
1516 	tmr->stopped_from = tmr->type;
1517 
1518 	/* mark as being serviced now */
1519 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1520 		/*
1521 		 * Callout has been rescheduled.
1522 		 */
1523 		goto get_out;
1524 	}
1525 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1526 		/*
1527 		 * Not active, so no action.
1528 		 */
1529 		goto get_out;
1530 	}
1531 	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1532 
1533 	/* call the handler for the appropriate timer type */
1534 	switch (tmr->type) {
1535 	case SCTP_TIMER_TYPE_ZERO_COPY:
1536 		if (inp == NULL) {
1537 			break;
1538 		}
1539 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1540 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
1541 		}
1542 		break;
1543 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1544 		if (inp == NULL) {
1545 			break;
1546 		}
1547 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1548 			SCTP_ZERO_COPY_SENDQ_EVENT(inp, inp->sctp_socket);
1549 		}
1550 		break;
1551 	case SCTP_TIMER_TYPE_ADDR_WQ:
1552 		sctp_handle_addr_wq();
1553 		break;
1554 	case SCTP_TIMER_TYPE_ITERATOR:
1555 		SCTP_STAT_INCR(sctps_timoiterator);
1556 		sctp_iterator_timer(it);
1557 		break;
1558 	case SCTP_TIMER_TYPE_SEND:
1559 		if ((stcb == NULL) || (inp == NULL)) {
1560 			break;
1561 		}
1562 		SCTP_STAT_INCR(sctps_timodata);
1563 		stcb->asoc.timodata++;
1564 		stcb->asoc.num_send_timers_up--;
1565 		if (stcb->asoc.num_send_timers_up < 0) {
1566 			stcb->asoc.num_send_timers_up = 0;
1567 		}
1568 		SCTP_TCB_LOCK_ASSERT(stcb);
1569 		cur_oerr = stcb->asoc.overall_error_count;
1570 		retcode = sctp_t3rxt_timer(inp, stcb, net);
1571 		if (retcode) {
1572 			/* no need to unlock on tcb its gone */
1573 
1574 			goto out_decr;
1575 		}
1576 		SCTP_TCB_LOCK_ASSERT(stcb);
1577 #ifdef SCTP_AUDITING_ENABLED
1578 		sctp_auditing(4, inp, stcb, net);
1579 #endif
1580 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1581 		if ((stcb->asoc.num_send_timers_up == 0) &&
1582 		    (stcb->asoc.sent_queue_cnt > 0)
1583 		    ) {
1584 			struct sctp_tmit_chunk *chk;
1585 
1586 			/*
1587 			 * safeguard. If there on some on the sent queue
1588 			 * somewhere but no timers running something is
1589 			 * wrong... so we start a timer on the first chunk
1590 			 * on the send queue on whatever net it is sent to.
1591 			 */
1592 			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1593 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1594 			    chk->whoTo);
1595 		}
1596 		break;
1597 	case SCTP_TIMER_TYPE_INIT:
1598 		if ((stcb == NULL) || (inp == NULL)) {
1599 			break;
1600 		}
1601 		SCTP_STAT_INCR(sctps_timoinit);
1602 		stcb->asoc.timoinit++;
1603 		if (sctp_t1init_timer(inp, stcb, net)) {
1604 			/* no need to unlock on tcb its gone */
1605 			goto out_decr;
1606 		}
1607 		/* We do output but not here */
1608 		did_output = 0;
1609 		break;
1610 	case SCTP_TIMER_TYPE_RECV:
1611 		if ((stcb == NULL) || (inp == NULL)) {
1612 			break;
1613 		} {
1614 			int abort_flag;
1615 
1616 			SCTP_STAT_INCR(sctps_timosack);
1617 			stcb->asoc.timosack++;
1618 			if (stcb->asoc.cumulative_tsn != stcb->asoc.highest_tsn_inside_map)
1619 				sctp_sack_check(stcb, 0, 0, &abort_flag);
1620 			sctp_send_sack(stcb);
1621 		}
1622 #ifdef SCTP_AUDITING_ENABLED
1623 		sctp_auditing(4, inp, stcb, net);
1624 #endif
1625 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1626 		break;
1627 	case SCTP_TIMER_TYPE_SHUTDOWN:
1628 		if ((stcb == NULL) || (inp == NULL)) {
1629 			break;
1630 		}
1631 		if (sctp_shutdown_timer(inp, stcb, net)) {
1632 			/* no need to unlock on tcb its gone */
1633 			goto out_decr;
1634 		}
1635 		SCTP_STAT_INCR(sctps_timoshutdown);
1636 		stcb->asoc.timoshutdown++;
1637 #ifdef SCTP_AUDITING_ENABLED
1638 		sctp_auditing(4, inp, stcb, net);
1639 #endif
1640 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1641 		break;
1642 	case SCTP_TIMER_TYPE_HEARTBEAT:
1643 		{
1644 			struct sctp_nets *lnet;
1645 			int cnt_of_unconf = 0;
1646 
1647 			if ((stcb == NULL) || (inp == NULL)) {
1648 				break;
1649 			}
1650 			SCTP_STAT_INCR(sctps_timoheartbeat);
1651 			stcb->asoc.timoheartbeat++;
1652 			TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
1653 				if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1654 				    (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
1655 					cnt_of_unconf++;
1656 				}
1657 			}
1658 			if (cnt_of_unconf == 0) {
1659 				if (sctp_heartbeat_timer(inp, stcb, lnet,
1660 				    cnt_of_unconf)) {
1661 					/* no need to unlock on tcb its gone */
1662 					goto out_decr;
1663 				}
1664 			}
1665 #ifdef SCTP_AUDITING_ENABLED
1666 			sctp_auditing(4, inp, stcb, lnet);
1667 #endif
1668 			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT,
1669 			    stcb->sctp_ep, stcb, lnet);
1670 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1671 		}
1672 		break;
1673 	case SCTP_TIMER_TYPE_COOKIE:
1674 		if ((stcb == NULL) || (inp == NULL)) {
1675 			break;
1676 		}
1677 		if (sctp_cookie_timer(inp, stcb, net)) {
1678 			/* no need to unlock on tcb its gone */
1679 			goto out_decr;
1680 		}
1681 		SCTP_STAT_INCR(sctps_timocookie);
1682 		stcb->asoc.timocookie++;
1683 #ifdef SCTP_AUDITING_ENABLED
1684 		sctp_auditing(4, inp, stcb, net);
1685 #endif
1686 		/*
1687 		 * We consider T3 and Cookie timer pretty much the same with
1688 		 * respect to where from in chunk_output.
1689 		 */
1690 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1691 		break;
1692 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1693 		{
1694 			struct timeval tv;
1695 			int i, secret;
1696 
1697 			if (inp == NULL) {
1698 				break;
1699 			}
1700 			SCTP_STAT_INCR(sctps_timosecret);
1701 			(void)SCTP_GETTIME_TIMEVAL(&tv);
1702 			SCTP_INP_WLOCK(inp);
1703 			inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1704 			inp->sctp_ep.last_secret_number =
1705 			    inp->sctp_ep.current_secret_number;
1706 			inp->sctp_ep.current_secret_number++;
1707 			if (inp->sctp_ep.current_secret_number >=
1708 			    SCTP_HOW_MANY_SECRETS) {
1709 				inp->sctp_ep.current_secret_number = 0;
1710 			}
1711 			secret = (int)inp->sctp_ep.current_secret_number;
1712 			for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1713 				inp->sctp_ep.secret_key[secret][i] =
1714 				    sctp_select_initial_TSN(&inp->sctp_ep);
1715 			}
1716 			SCTP_INP_WUNLOCK(inp);
1717 			sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1718 		}
1719 		did_output = 0;
1720 		break;
1721 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1722 		if ((stcb == NULL) || (inp == NULL)) {
1723 			break;
1724 		}
1725 		SCTP_STAT_INCR(sctps_timopathmtu);
1726 		sctp_pathmtu_timer(inp, stcb, net);
1727 		did_output = 0;
1728 		break;
1729 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1730 		if ((stcb == NULL) || (inp == NULL)) {
1731 			break;
1732 		}
1733 		if (sctp_shutdownack_timer(inp, stcb, net)) {
1734 			/* no need to unlock on tcb its gone */
1735 			goto out_decr;
1736 		}
1737 		SCTP_STAT_INCR(sctps_timoshutdownack);
1738 		stcb->asoc.timoshutdownack++;
1739 #ifdef SCTP_AUDITING_ENABLED
1740 		sctp_auditing(4, inp, stcb, net);
1741 #endif
1742 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1743 		break;
1744 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1745 		if ((stcb == NULL) || (inp == NULL)) {
1746 			break;
1747 		}
1748 		SCTP_STAT_INCR(sctps_timoshutdownguard);
1749 		sctp_abort_an_association(inp, stcb,
1750 		    SCTP_SHUTDOWN_GUARD_EXPIRES, NULL, SCTP_SO_NOT_LOCKED);
1751 		/* no need to unlock on tcb its gone */
1752 		goto out_decr;
1753 
1754 	case SCTP_TIMER_TYPE_STRRESET:
1755 		if ((stcb == NULL) || (inp == NULL)) {
1756 			break;
1757 		}
1758 		if (sctp_strreset_timer(inp, stcb, net)) {
1759 			/* no need to unlock on tcb its gone */
1760 			goto out_decr;
1761 		}
1762 		SCTP_STAT_INCR(sctps_timostrmrst);
1763 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1764 		break;
1765 	case SCTP_TIMER_TYPE_EARLYFR:
1766 		/* Need to do FR of things for net */
1767 		if ((stcb == NULL) || (inp == NULL)) {
1768 			break;
1769 		}
1770 		SCTP_STAT_INCR(sctps_timoearlyfr);
1771 		sctp_early_fr_timer(inp, stcb, net);
1772 		break;
1773 	case SCTP_TIMER_TYPE_ASCONF:
1774 		if ((stcb == NULL) || (inp == NULL)) {
1775 			break;
1776 		}
1777 		if (sctp_asconf_timer(inp, stcb, net)) {
1778 			/* no need to unlock on tcb its gone */
1779 			goto out_decr;
1780 		}
1781 		SCTP_STAT_INCR(sctps_timoasconf);
1782 #ifdef SCTP_AUDITING_ENABLED
1783 		sctp_auditing(4, inp, stcb, net);
1784 #endif
1785 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1786 		break;
1787 	case SCTP_TIMER_TYPE_PRIM_DELETED:
1788 		if ((stcb == NULL) || (inp == NULL)) {
1789 			break;
1790 		}
1791 		sctp_delete_prim_timer(inp, stcb, net);
1792 		SCTP_STAT_INCR(sctps_timodelprim);
1793 		break;
1794 
1795 	case SCTP_TIMER_TYPE_AUTOCLOSE:
1796 		if ((stcb == NULL) || (inp == NULL)) {
1797 			break;
1798 		}
1799 		SCTP_STAT_INCR(sctps_timoautoclose);
1800 		sctp_autoclose_timer(inp, stcb, net);
1801 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1802 		did_output = 0;
1803 		break;
1804 	case SCTP_TIMER_TYPE_ASOCKILL:
1805 		if ((stcb == NULL) || (inp == NULL)) {
1806 			break;
1807 		}
1808 		SCTP_STAT_INCR(sctps_timoassockill);
1809 		/* Can we free it yet? */
1810 		SCTP_INP_DECR_REF(inp);
1811 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1812 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1813 		so = SCTP_INP_SO(inp);
1814 		atomic_add_int(&stcb->asoc.refcnt, 1);
1815 		SCTP_TCB_UNLOCK(stcb);
1816 		SCTP_SOCKET_LOCK(so, 1);
1817 		SCTP_TCB_LOCK(stcb);
1818 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1819 #endif
1820 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1821 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1822 		SCTP_SOCKET_UNLOCK(so, 1);
1823 #endif
1824 		/*
1825 		 * free asoc, always unlocks (or destroy's) so prevent
1826 		 * duplicate unlock or unlock of a free mtx :-0
1827 		 */
1828 		stcb = NULL;
1829 		goto out_no_decr;
1830 	case SCTP_TIMER_TYPE_INPKILL:
1831 		SCTP_STAT_INCR(sctps_timoinpkill);
1832 		if (inp == NULL) {
1833 			break;
1834 		}
1835 		/*
1836 		 * special case, take away our increment since WE are the
1837 		 * killer
1838 		 */
1839 		SCTP_INP_DECR_REF(inp);
1840 		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1841 		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1842 		    SCTP_CALLED_DIRECTLY_NOCMPSET);
1843 		inp = NULL;
1844 		goto out_no_decr;
1845 	default:
1846 		SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
1847 		    tmr->type);
1848 		break;
1849 	};
1850 #ifdef SCTP_AUDITING_ENABLED
1851 	sctp_audit_log(0xF1, (uint8_t) tmr->type);
1852 	if (inp)
1853 		sctp_auditing(5, inp, stcb, net);
1854 #endif
1855 	if ((did_output) && stcb) {
1856 		/*
1857 		 * Now we need to clean up the control chunk chain if an
1858 		 * ECNE is on it. It must be marked as UNSENT again so next
1859 		 * call will continue to send it until such time that we get
1860 		 * a CWR, to remove it. It is, however, less likely that we
1861 		 * will find a ecn echo on the chain though.
1862 		 */
1863 		sctp_fix_ecn_echo(&stcb->asoc);
1864 	}
1865 get_out:
1866 	if (stcb) {
1867 		SCTP_TCB_UNLOCK(stcb);
1868 	}
1869 out_decr:
1870 	if (inp) {
1871 		SCTP_INP_DECR_REF(inp);
1872 	}
1873 out_no_decr:
1874 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type %d)\n",
1875 	    type);
1876 }
1877 
1878 void
1879 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1880     struct sctp_nets *net)
1881 {
1882 	int to_ticks;
1883 	struct sctp_timer *tmr;
1884 
1885 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
1886 		return;
1887 
1888 	to_ticks = 0;
1889 
1890 	tmr = NULL;
1891 	if (stcb) {
1892 		SCTP_TCB_LOCK_ASSERT(stcb);
1893 	}
1894 	switch (t_type) {
1895 	case SCTP_TIMER_TYPE_ZERO_COPY:
1896 		tmr = &inp->sctp_ep.zero_copy_timer;
1897 		to_ticks = SCTP_ZERO_COPY_TICK_DELAY;
1898 		break;
1899 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1900 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
1901 		to_ticks = SCTP_ZERO_COPY_SENDQ_TICK_DELAY;
1902 		break;
1903 	case SCTP_TIMER_TYPE_ADDR_WQ:
1904 		/* Only 1 tick away :-) */
1905 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
1906 		to_ticks = SCTP_ADDRESS_TICK_DELAY;
1907 		break;
1908 	case SCTP_TIMER_TYPE_ITERATOR:
1909 		{
1910 			struct sctp_iterator *it;
1911 
1912 			it = (struct sctp_iterator *)inp;
1913 			tmr = &it->tmr;
1914 			to_ticks = SCTP_ITERATOR_TICKS;
1915 		}
1916 		break;
1917 	case SCTP_TIMER_TYPE_SEND:
1918 		/* Here we use the RTO timer */
1919 		{
1920 			int rto_val;
1921 
1922 			if ((stcb == NULL) || (net == NULL)) {
1923 				return;
1924 			}
1925 			tmr = &net->rxt_timer;
1926 			if (net->RTO == 0) {
1927 				rto_val = stcb->asoc.initial_rto;
1928 			} else {
1929 				rto_val = net->RTO;
1930 			}
1931 			to_ticks = MSEC_TO_TICKS(rto_val);
1932 		}
1933 		break;
1934 	case SCTP_TIMER_TYPE_INIT:
1935 		/*
1936 		 * Here we use the INIT timer default usually about 1
1937 		 * minute.
1938 		 */
1939 		if ((stcb == NULL) || (net == NULL)) {
1940 			return;
1941 		}
1942 		tmr = &net->rxt_timer;
1943 		if (net->RTO == 0) {
1944 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1945 		} else {
1946 			to_ticks = MSEC_TO_TICKS(net->RTO);
1947 		}
1948 		break;
1949 	case SCTP_TIMER_TYPE_RECV:
1950 		/*
1951 		 * Here we use the Delayed-Ack timer value from the inp
1952 		 * ususually about 200ms.
1953 		 */
1954 		if (stcb == NULL) {
1955 			return;
1956 		}
1957 		tmr = &stcb->asoc.dack_timer;
1958 		to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
1959 		break;
1960 	case SCTP_TIMER_TYPE_SHUTDOWN:
1961 		/* Here we use the RTO of the destination. */
1962 		if ((stcb == NULL) || (net == NULL)) {
1963 			return;
1964 		}
1965 		if (net->RTO == 0) {
1966 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1967 		} else {
1968 			to_ticks = MSEC_TO_TICKS(net->RTO);
1969 		}
1970 		tmr = &net->rxt_timer;
1971 		break;
1972 	case SCTP_TIMER_TYPE_HEARTBEAT:
1973 		/*
1974 		 * the net is used here so that we can add in the RTO. Even
1975 		 * though we use a different timer. We also add the HB timer
1976 		 * PLUS a random jitter.
1977 		 */
1978 		if ((inp == NULL) || (stcb == NULL)) {
1979 			return;
1980 		} else {
1981 			uint32_t rndval;
1982 			uint8_t this_random;
1983 			int cnt_of_unconf = 0;
1984 			struct sctp_nets *lnet;
1985 
1986 			TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
1987 				if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1988 				    (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
1989 					cnt_of_unconf++;
1990 				}
1991 			}
1992 			if (cnt_of_unconf) {
1993 				net = lnet = NULL;
1994 				(void)sctp_heartbeat_timer(inp, stcb, lnet, cnt_of_unconf);
1995 			}
1996 			if (stcb->asoc.hb_random_idx > 3) {
1997 				rndval = sctp_select_initial_TSN(&inp->sctp_ep);
1998 				memcpy(stcb->asoc.hb_random_values, &rndval,
1999 				    sizeof(stcb->asoc.hb_random_values));
2000 				stcb->asoc.hb_random_idx = 0;
2001 			}
2002 			this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx];
2003 			stcb->asoc.hb_random_idx++;
2004 			stcb->asoc.hb_ect_randombit = 0;
2005 			/*
2006 			 * this_random will be 0 - 256 ms RTO is in ms.
2007 			 */
2008 			if ((stcb->asoc.hb_is_disabled) &&
2009 			    (cnt_of_unconf == 0)) {
2010 				return;
2011 			}
2012 			if (net) {
2013 				int delay;
2014 
2015 				delay = stcb->asoc.heart_beat_delay;
2016 				TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
2017 					if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2018 					    ((lnet->dest_state & SCTP_ADDR_OUT_OF_SCOPE) == 0) &&
2019 					    (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
2020 						delay = 0;
2021 					}
2022 				}
2023 				if (net->RTO == 0) {
2024 					/* Never been checked */
2025 					to_ticks = this_random + stcb->asoc.initial_rto + delay;
2026 				} else {
2027 					/* set rto_val to the ms */
2028 					to_ticks = delay + net->RTO + this_random;
2029 				}
2030 			} else {
2031 				if (cnt_of_unconf) {
2032 					to_ticks = this_random + stcb->asoc.initial_rto;
2033 				} else {
2034 					to_ticks = stcb->asoc.heart_beat_delay + this_random + stcb->asoc.initial_rto;
2035 				}
2036 			}
2037 			/*
2038 			 * Now we must convert the to_ticks that are now in
2039 			 * ms to ticks.
2040 			 */
2041 			to_ticks = MSEC_TO_TICKS(to_ticks);
2042 			tmr = &stcb->asoc.hb_timer;
2043 		}
2044 		break;
2045 	case SCTP_TIMER_TYPE_COOKIE:
2046 		/*
2047 		 * Here we can use the RTO timer from the network since one
2048 		 * RTT was compelete. If a retran happened then we will be
2049 		 * using the RTO initial value.
2050 		 */
2051 		if ((stcb == NULL) || (net == NULL)) {
2052 			return;
2053 		}
2054 		if (net->RTO == 0) {
2055 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2056 		} else {
2057 			to_ticks = MSEC_TO_TICKS(net->RTO);
2058 		}
2059 		tmr = &net->rxt_timer;
2060 		break;
2061 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2062 		/*
2063 		 * nothing needed but the endpoint here ususually about 60
2064 		 * minutes.
2065 		 */
2066 		if (inp == NULL) {
2067 			return;
2068 		}
2069 		tmr = &inp->sctp_ep.signature_change;
2070 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
2071 		break;
2072 	case SCTP_TIMER_TYPE_ASOCKILL:
2073 		if (stcb == NULL) {
2074 			return;
2075 		}
2076 		tmr = &stcb->asoc.strreset_timer;
2077 		to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
2078 		break;
2079 	case SCTP_TIMER_TYPE_INPKILL:
2080 		/*
2081 		 * The inp is setup to die. We re-use the signature_chage
2082 		 * timer since that has stopped and we are in the GONE
2083 		 * state.
2084 		 */
2085 		if (inp == NULL) {
2086 			return;
2087 		}
2088 		tmr = &inp->sctp_ep.signature_change;
2089 		to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2090 		break;
2091 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2092 		/*
2093 		 * Here we use the value found in the EP for PMTU ususually
2094 		 * about 10 minutes.
2095 		 */
2096 		if ((stcb == NULL) || (inp == NULL)) {
2097 			return;
2098 		}
2099 		if (net == NULL) {
2100 			return;
2101 		}
2102 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2103 		tmr = &net->pmtu_timer;
2104 		break;
2105 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2106 		/* Here we use the RTO of the destination */
2107 		if ((stcb == NULL) || (net == NULL)) {
2108 			return;
2109 		}
2110 		if (net->RTO == 0) {
2111 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2112 		} else {
2113 			to_ticks = MSEC_TO_TICKS(net->RTO);
2114 		}
2115 		tmr = &net->rxt_timer;
2116 		break;
2117 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2118 		/*
2119 		 * Here we use the endpoints shutdown guard timer usually
2120 		 * about 3 minutes.
2121 		 */
2122 		if ((inp == NULL) || (stcb == NULL)) {
2123 			return;
2124 		}
2125 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2126 		tmr = &stcb->asoc.shut_guard_timer;
2127 		break;
2128 	case SCTP_TIMER_TYPE_STRRESET:
2129 		/*
2130 		 * Here the timer comes from the stcb but its value is from
2131 		 * the net's RTO.
2132 		 */
2133 		if ((stcb == NULL) || (net == NULL)) {
2134 			return;
2135 		}
2136 		if (net->RTO == 0) {
2137 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2138 		} else {
2139 			to_ticks = MSEC_TO_TICKS(net->RTO);
2140 		}
2141 		tmr = &stcb->asoc.strreset_timer;
2142 		break;
2143 
2144 	case SCTP_TIMER_TYPE_EARLYFR:
2145 		{
2146 			unsigned int msec;
2147 
2148 			if ((stcb == NULL) || (net == NULL)) {
2149 				return;
2150 			}
2151 			if (net->flight_size > net->cwnd) {
2152 				/* no need to start */
2153 				return;
2154 			}
2155 			SCTP_STAT_INCR(sctps_earlyfrstart);
2156 			if (net->lastsa == 0) {
2157 				/* Hmm no rtt estimate yet? */
2158 				msec = stcb->asoc.initial_rto >> 2;
2159 			} else {
2160 				msec = ((net->lastsa >> 2) + net->lastsv) >> 1;
2161 			}
2162 			if (msec < SCTP_BASE_SYSCTL(sctp_early_fr_msec)) {
2163 				msec = SCTP_BASE_SYSCTL(sctp_early_fr_msec);
2164 				if (msec < SCTP_MINFR_MSEC_FLOOR) {
2165 					msec = SCTP_MINFR_MSEC_FLOOR;
2166 				}
2167 			}
2168 			to_ticks = MSEC_TO_TICKS(msec);
2169 			tmr = &net->fr_timer;
2170 		}
2171 		break;
2172 	case SCTP_TIMER_TYPE_ASCONF:
2173 		/*
2174 		 * Here the timer comes from the stcb but its value is from
2175 		 * the net's RTO.
2176 		 */
2177 		if ((stcb == NULL) || (net == NULL)) {
2178 			return;
2179 		}
2180 		if (net->RTO == 0) {
2181 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2182 		} else {
2183 			to_ticks = MSEC_TO_TICKS(net->RTO);
2184 		}
2185 		tmr = &stcb->asoc.asconf_timer;
2186 		break;
2187 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2188 		if ((stcb == NULL) || (net != NULL)) {
2189 			return;
2190 		}
2191 		to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2192 		tmr = &stcb->asoc.delete_prim_timer;
2193 		break;
2194 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2195 		if (stcb == NULL) {
2196 			return;
2197 		}
2198 		if (stcb->asoc.sctp_autoclose_ticks == 0) {
2199 			/*
2200 			 * Really an error since stcb is NOT set to
2201 			 * autoclose
2202 			 */
2203 			return;
2204 		}
2205 		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2206 		tmr = &stcb->asoc.autoclose_timer;
2207 		break;
2208 	default:
2209 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2210 		    __FUNCTION__, t_type);
2211 		return;
2212 		break;
2213 	};
2214 	if ((to_ticks <= 0) || (tmr == NULL)) {
2215 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2216 		    __FUNCTION__, t_type, to_ticks, tmr);
2217 		return;
2218 	}
2219 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2220 		/*
2221 		 * we do NOT allow you to have it already running. if it is
2222 		 * we leave the current one up unchanged
2223 		 */
2224 		return;
2225 	}
2226 	/* At this point we can proceed */
2227 	if (t_type == SCTP_TIMER_TYPE_SEND) {
2228 		stcb->asoc.num_send_timers_up++;
2229 	}
2230 	tmr->stopped_from = 0;
2231 	tmr->type = t_type;
2232 	tmr->ep = (void *)inp;
2233 	tmr->tcb = (void *)stcb;
2234 	tmr->net = (void *)net;
2235 	tmr->self = (void *)tmr;
2236 	tmr->ticks = sctp_get_tick_count();
2237 	(void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2238 	return;
2239 }
2240 
2241 void
2242 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2243     struct sctp_nets *net, uint32_t from)
2244 {
2245 	struct sctp_timer *tmr;
2246 
2247 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2248 	    (inp == NULL))
2249 		return;
2250 
2251 	tmr = NULL;
2252 	if (stcb) {
2253 		SCTP_TCB_LOCK_ASSERT(stcb);
2254 	}
2255 	switch (t_type) {
2256 	case SCTP_TIMER_TYPE_ZERO_COPY:
2257 		tmr = &inp->sctp_ep.zero_copy_timer;
2258 		break;
2259 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
2260 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
2261 		break;
2262 	case SCTP_TIMER_TYPE_ADDR_WQ:
2263 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2264 		break;
2265 	case SCTP_TIMER_TYPE_EARLYFR:
2266 		if ((stcb == NULL) || (net == NULL)) {
2267 			return;
2268 		}
2269 		tmr = &net->fr_timer;
2270 		SCTP_STAT_INCR(sctps_earlyfrstop);
2271 		break;
2272 	case SCTP_TIMER_TYPE_ITERATOR:
2273 		{
2274 			struct sctp_iterator *it;
2275 
2276 			it = (struct sctp_iterator *)inp;
2277 			tmr = &it->tmr;
2278 		}
2279 		break;
2280 	case SCTP_TIMER_TYPE_SEND:
2281 		if ((stcb == NULL) || (net == NULL)) {
2282 			return;
2283 		}
2284 		tmr = &net->rxt_timer;
2285 		break;
2286 	case SCTP_TIMER_TYPE_INIT:
2287 		if ((stcb == NULL) || (net == NULL)) {
2288 			return;
2289 		}
2290 		tmr = &net->rxt_timer;
2291 		break;
2292 	case SCTP_TIMER_TYPE_RECV:
2293 		if (stcb == NULL) {
2294 			return;
2295 		}
2296 		tmr = &stcb->asoc.dack_timer;
2297 		break;
2298 	case SCTP_TIMER_TYPE_SHUTDOWN:
2299 		if ((stcb == NULL) || (net == NULL)) {
2300 			return;
2301 		}
2302 		tmr = &net->rxt_timer;
2303 		break;
2304 	case SCTP_TIMER_TYPE_HEARTBEAT:
2305 		if (stcb == NULL) {
2306 			return;
2307 		}
2308 		tmr = &stcb->asoc.hb_timer;
2309 		break;
2310 	case SCTP_TIMER_TYPE_COOKIE:
2311 		if ((stcb == NULL) || (net == NULL)) {
2312 			return;
2313 		}
2314 		tmr = &net->rxt_timer;
2315 		break;
2316 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2317 		/* nothing needed but the endpoint here */
2318 		tmr = &inp->sctp_ep.signature_change;
2319 		/*
2320 		 * We re-use the newcookie timer for the INP kill timer. We
2321 		 * must assure that we do not kill it by accident.
2322 		 */
2323 		break;
2324 	case SCTP_TIMER_TYPE_ASOCKILL:
2325 		/*
2326 		 * Stop the asoc kill timer.
2327 		 */
2328 		if (stcb == NULL) {
2329 			return;
2330 		}
2331 		tmr = &stcb->asoc.strreset_timer;
2332 		break;
2333 
2334 	case SCTP_TIMER_TYPE_INPKILL:
2335 		/*
2336 		 * The inp is setup to die. We re-use the signature_chage
2337 		 * timer since that has stopped and we are in the GONE
2338 		 * state.
2339 		 */
2340 		tmr = &inp->sctp_ep.signature_change;
2341 		break;
2342 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2343 		if ((stcb == NULL) || (net == NULL)) {
2344 			return;
2345 		}
2346 		tmr = &net->pmtu_timer;
2347 		break;
2348 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2349 		if ((stcb == NULL) || (net == NULL)) {
2350 			return;
2351 		}
2352 		tmr = &net->rxt_timer;
2353 		break;
2354 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2355 		if (stcb == NULL) {
2356 			return;
2357 		}
2358 		tmr = &stcb->asoc.shut_guard_timer;
2359 		break;
2360 	case SCTP_TIMER_TYPE_STRRESET:
2361 		if (stcb == NULL) {
2362 			return;
2363 		}
2364 		tmr = &stcb->asoc.strreset_timer;
2365 		break;
2366 	case SCTP_TIMER_TYPE_ASCONF:
2367 		if (stcb == NULL) {
2368 			return;
2369 		}
2370 		tmr = &stcb->asoc.asconf_timer;
2371 		break;
2372 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2373 		if (stcb == NULL) {
2374 			return;
2375 		}
2376 		tmr = &stcb->asoc.delete_prim_timer;
2377 		break;
2378 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2379 		if (stcb == NULL) {
2380 			return;
2381 		}
2382 		tmr = &stcb->asoc.autoclose_timer;
2383 		break;
2384 	default:
2385 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2386 		    __FUNCTION__, t_type);
2387 		break;
2388 	};
2389 	if (tmr == NULL) {
2390 		return;
2391 	}
2392 	if ((tmr->type != t_type) && tmr->type) {
2393 		/*
2394 		 * Ok we have a timer that is under joint use. Cookie timer
2395 		 * per chance with the SEND timer. We therefore are NOT
2396 		 * running the timer that the caller wants stopped.  So just
2397 		 * return.
2398 		 */
2399 		return;
2400 	}
2401 	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2402 		stcb->asoc.num_send_timers_up--;
2403 		if (stcb->asoc.num_send_timers_up < 0) {
2404 			stcb->asoc.num_send_timers_up = 0;
2405 		}
2406 	}
2407 	tmr->self = NULL;
2408 	tmr->stopped_from = from;
2409 	(void)SCTP_OS_TIMER_STOP(&tmr->timer);
2410 	return;
2411 }
2412 
2413 #ifdef SCTP_USE_ADLER32
2414 static uint32_t
2415 update_adler32(uint32_t adler, uint8_t * buf, int32_t len)
2416 {
2417 	uint32_t s1 = adler & 0xffff;
2418 	uint32_t s2 = (adler >> 16) & 0xffff;
2419 	int n;
2420 
2421 	for (n = 0; n < len; n++, buf++) {
2422 		/* s1 = (s1 + buf[n]) % BASE */
2423 		/* first we add */
2424 		s1 = (s1 + *buf);
2425 		/*
2426 		 * now if we need to, we do a mod by subtracting. It seems a
2427 		 * bit faster since I really will only ever do one subtract
2428 		 * at the MOST, since buf[n] is a max of 255.
2429 		 */
2430 		if (s1 >= SCTP_ADLER32_BASE) {
2431 			s1 -= SCTP_ADLER32_BASE;
2432 		}
2433 		/* s2 = (s2 + s1) % BASE */
2434 		/* first we add */
2435 		s2 = (s2 + s1);
2436 		/*
2437 		 * again, it is more efficent (it seems) to subtract since
2438 		 * the most s2 will ever be is (BASE-1 + BASE-1) in the
2439 		 * worse case. This would then be (2 * BASE) - 2, which will
2440 		 * still only do one subtract. On Intel this is much better
2441 		 * to do this way and avoid the divide. Have not -pg'd on
2442 		 * sparc.
2443 		 */
2444 		if (s2 >= SCTP_ADLER32_BASE) {
2445 			s2 -= SCTP_ADLER32_BASE;
2446 		}
2447 	}
2448 	/* Return the adler32 of the bytes buf[0..len-1] */
2449 	return ((s2 << 16) + s1);
2450 }
2451 
2452 #endif
2453 
2454 
2455 uint32_t
2456 sctp_calculate_len(struct mbuf *m)
2457 {
2458 	uint32_t tlen = 0;
2459 	struct mbuf *at;
2460 
2461 	at = m;
2462 	while (at) {
2463 		tlen += SCTP_BUF_LEN(at);
2464 		at = SCTP_BUF_NEXT(at);
2465 	}
2466 	return (tlen);
2467 }
2468 
2469 #if defined(SCTP_WITH_NO_CSUM)
2470 
2471 uint32_t
2472 sctp_calculate_sum(struct mbuf *m, int32_t * pktlen, uint32_t offset)
2473 {
2474 	/*
2475 	 * given a mbuf chain with a packetheader offset by 'offset'
2476 	 * pointing at a sctphdr (with csum set to 0) go through the chain
2477 	 * of SCTP_BUF_NEXT()'s and calculate the SCTP checksum. This also
2478 	 * has a side bonus as it will calculate the total length of the
2479 	 * mbuf chain. Note: if offset is greater than the total mbuf
2480 	 * length, checksum=1, pktlen=0 is returned (ie. no real error code)
2481 	 */
2482 	if (pktlen == NULL)
2483 		return (0);
2484 	*pktlen = sctp_calculate_len(m);
2485 	return (0);
2486 }
2487 
2488 #elif defined(SCTP_USE_INCHKSUM)
2489 
2490 #include <machine/in_cksum.h>
2491 
2492 uint32_t
2493 sctp_calculate_sum(struct mbuf *m, int32_t * pktlen, uint32_t offset)
2494 {
2495 	/*
2496 	 * given a mbuf chain with a packetheader offset by 'offset'
2497 	 * pointing at a sctphdr (with csum set to 0) go through the chain
2498 	 * of SCTP_BUF_NEXT()'s and calculate the SCTP checksum. This also
2499 	 * has a side bonus as it will calculate the total length of the
2500 	 * mbuf chain. Note: if offset is greater than the total mbuf
2501 	 * length, checksum=1, pktlen=0 is returned (ie. no real error code)
2502 	 */
2503 	int32_t tlen = 0;
2504 	struct mbuf *at;
2505 	uint32_t the_sum, retsum;
2506 
2507 	at = m;
2508 	while (at) {
2509 		tlen += SCTP_BUF_LEN(at);
2510 		at = SCTP_BUF_NEXT(at);
2511 	}
2512 	the_sum = (uint32_t) (in_cksum_skip(m, tlen, offset));
2513 	if (pktlen != NULL)
2514 		*pktlen = (tlen - offset);
2515 	retsum = htons(the_sum);
2516 	return (the_sum);
2517 }
2518 
2519 #else
2520 
2521 uint32_t
2522 sctp_calculate_sum(struct mbuf *m, int32_t * pktlen, uint32_t offset)
2523 {
2524 	/*
2525 	 * given a mbuf chain with a packetheader offset by 'offset'
2526 	 * pointing at a sctphdr (with csum set to 0) go through the chain
2527 	 * of SCTP_BUF_NEXT()'s and calculate the SCTP checksum. This also
2528 	 * has a side bonus as it will calculate the total length of the
2529 	 * mbuf chain. Note: if offset is greater than the total mbuf
2530 	 * length, checksum=1, pktlen=0 is returned (ie. no real error code)
2531 	 */
2532 	int32_t tlen = 0;
2533 
2534 #ifdef SCTP_USE_ADLER32
2535 	uint32_t base = 1L;
2536 
2537 #else
2538 	uint32_t base = 0xffffffff;
2539 
2540 #endif
2541 	struct mbuf *at;
2542 
2543 	at = m;
2544 	/* find the correct mbuf and offset into mbuf */
2545 	while ((at != NULL) && (offset > (uint32_t) SCTP_BUF_LEN(at))) {
2546 		offset -= SCTP_BUF_LEN(at);	/* update remaining offset
2547 						 * left */
2548 		at = SCTP_BUF_NEXT(at);
2549 	}
2550 	while (at != NULL) {
2551 		if ((SCTP_BUF_LEN(at) - offset) > 0) {
2552 #ifdef SCTP_USE_ADLER32
2553 			base = update_adler32(base,
2554 			    (unsigned char *)(SCTP_BUF_AT(at, offset)),
2555 			    (unsigned int)(SCTP_BUF_LEN(at) - offset));
2556 #else
2557 			if ((SCTP_BUF_LEN(at) - offset) < 4) {
2558 				/* Use old method if less than 4 bytes */
2559 				base = old_update_crc32(base,
2560 				    (unsigned char *)(SCTP_BUF_AT(at, offset)),
2561 				    (unsigned int)(SCTP_BUF_LEN(at) - offset));
2562 			} else {
2563 				base = update_crc32(base,
2564 				    (unsigned char *)(SCTP_BUF_AT(at, offset)),
2565 				    (unsigned int)(SCTP_BUF_LEN(at) - offset));
2566 			}
2567 #endif
2568 			tlen += SCTP_BUF_LEN(at) - offset;
2569 			/* we only offset once into the first mbuf */
2570 		}
2571 		if (offset) {
2572 			if (offset < (uint32_t) SCTP_BUF_LEN(at))
2573 				offset = 0;
2574 			else
2575 				offset -= SCTP_BUF_LEN(at);
2576 		}
2577 		at = SCTP_BUF_NEXT(at);
2578 	}
2579 	if (pktlen != NULL) {
2580 		*pktlen = tlen;
2581 	}
2582 #ifdef SCTP_USE_ADLER32
2583 	/* Adler32 */
2584 	base = htonl(base);
2585 #else
2586 	/* CRC-32c */
2587 	base = sctp_csum_finalize(base);
2588 #endif
2589 	return (base);
2590 }
2591 
2592 
2593 #endif
2594 
2595 void
2596 sctp_mtu_size_reset(struct sctp_inpcb *inp,
2597     struct sctp_association *asoc, uint32_t mtu)
2598 {
2599 	/*
2600 	 * Reset the P-MTU size on this association, this involves changing
2601 	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2602 	 * allow the DF flag to be cleared.
2603 	 */
2604 	struct sctp_tmit_chunk *chk;
2605 	unsigned int eff_mtu, ovh;
2606 
2607 #ifdef SCTP_PRINT_FOR_B_AND_M
2608 	SCTP_PRINTF("sctp_mtu_size_reset(%p, asoc:%p mtu:%d\n",
2609 	    inp, asoc, mtu);
2610 #endif
2611 	asoc->smallest_mtu = mtu;
2612 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2613 		ovh = SCTP_MIN_OVERHEAD;
2614 	} else {
2615 		ovh = SCTP_MIN_V4_OVERHEAD;
2616 	}
2617 	eff_mtu = mtu - ovh;
2618 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2619 
2620 		if (chk->send_size > eff_mtu) {
2621 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2622 		}
2623 	}
2624 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2625 		if (chk->send_size > eff_mtu) {
2626 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2627 		}
2628 	}
2629 }
2630 
2631 
2632 /*
2633  * given an association and starting time of the current RTT period return
2634  * RTO in number of msecs net should point to the current network
2635  */
2636 uint32_t
2637 sctp_calculate_rto(struct sctp_tcb *stcb,
2638     struct sctp_association *asoc,
2639     struct sctp_nets *net,
2640     struct timeval *told,
2641     int safe)
2642 {
2643 	/*-
2644 	 * given an association and the starting time of the current RTT
2645 	 * period (in value1/value2) return RTO in number of msecs.
2646 	 */
2647 	int calc_time = 0;
2648 	int o_calctime;
2649 	uint32_t new_rto = 0;
2650 	int first_measure = 0;
2651 	struct timeval now, then, *old;
2652 
2653 	/* Copy it out for sparc64 */
2654 	if (safe == sctp_align_unsafe_makecopy) {
2655 		old = &then;
2656 		memcpy(&then, told, sizeof(struct timeval));
2657 	} else if (safe == sctp_align_safe_nocopy) {
2658 		old = told;
2659 	} else {
2660 		/* error */
2661 		SCTP_PRINTF("Huh, bad rto calc call\n");
2662 		return (0);
2663 	}
2664 	/************************/
2665 	/* 1. calculate new RTT */
2666 	/************************/
2667 	/* get the current time */
2668 	(void)SCTP_GETTIME_TIMEVAL(&now);
2669 	/* compute the RTT value */
2670 	if ((u_long)now.tv_sec > (u_long)old->tv_sec) {
2671 		calc_time = ((u_long)now.tv_sec - (u_long)old->tv_sec) * 1000;
2672 		if ((u_long)now.tv_usec > (u_long)old->tv_usec) {
2673 			calc_time += (((u_long)now.tv_usec -
2674 			    (u_long)old->tv_usec) / 1000);
2675 		} else if ((u_long)now.tv_usec < (u_long)old->tv_usec) {
2676 			/* Borrow 1,000ms from current calculation */
2677 			calc_time -= 1000;
2678 			/* Add in the slop over */
2679 			calc_time += ((int)now.tv_usec / 1000);
2680 			/* Add in the pre-second ms's */
2681 			calc_time += (((int)1000000 - (int)old->tv_usec) / 1000);
2682 		}
2683 	} else if ((u_long)now.tv_sec == (u_long)old->tv_sec) {
2684 		if ((u_long)now.tv_usec > (u_long)old->tv_usec) {
2685 			calc_time = ((u_long)now.tv_usec -
2686 			    (u_long)old->tv_usec) / 1000;
2687 		} else if ((u_long)now.tv_usec < (u_long)old->tv_usec) {
2688 			/* impossible .. garbage in nothing out */
2689 			goto calc_rto;
2690 		} else if ((u_long)now.tv_usec == (u_long)old->tv_usec) {
2691 			/*
2692 			 * We have to have 1 usec :-D this must be the
2693 			 * loopback.
2694 			 */
2695 			calc_time = 1;
2696 		} else {
2697 			/* impossible .. garbage in nothing out */
2698 			goto calc_rto;
2699 		}
2700 	} else {
2701 		/* Clock wrapped? */
2702 		goto calc_rto;
2703 	}
2704 	/***************************/
2705 	/* 2. update RTTVAR & SRTT */
2706 	/***************************/
2707 	o_calctime = calc_time;
2708 	/* this is Van Jacobson's integer version */
2709 	if (net->RTO_measured) {
2710 		calc_time -= (net->lastsa >> SCTP_RTT_SHIFT);	/* take away 1/8th when
2711 								 * shift=3 */
2712 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2713 			rto_logging(net, SCTP_LOG_RTTVAR);
2714 		}
2715 		net->prev_rtt = o_calctime;
2716 		net->lastsa += calc_time;	/* add 7/8th into sa when
2717 						 * shift=3 */
2718 		if (calc_time < 0) {
2719 			calc_time = -calc_time;
2720 		}
2721 		calc_time -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);	/* take away 1/4 when
2722 									 * VAR shift=2 */
2723 		net->lastsv += calc_time;
2724 		if (net->lastsv == 0) {
2725 			net->lastsv = SCTP_CLOCK_GRANULARITY;
2726 		}
2727 	} else {
2728 		/* First RTO measurment */
2729 		net->RTO_measured = 1;
2730 		net->lastsa = calc_time << SCTP_RTT_SHIFT;	/* Multiply by 8 when
2731 								 * shift=3 */
2732 		net->lastsv = calc_time;
2733 		if (net->lastsv == 0) {
2734 			net->lastsv = SCTP_CLOCK_GRANULARITY;
2735 		}
2736 		first_measure = 1;
2737 		net->prev_rtt = o_calctime;
2738 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2739 			rto_logging(net, SCTP_LOG_INITIAL_RTT);
2740 		}
2741 	}
2742 calc_rto:
2743 	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2744 	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2745 	    (stcb->asoc.sat_network_lockout == 0)) {
2746 		stcb->asoc.sat_network = 1;
2747 	} else if ((!first_measure) && stcb->asoc.sat_network) {
2748 		stcb->asoc.sat_network = 0;
2749 		stcb->asoc.sat_network_lockout = 1;
2750 	}
2751 	/* bound it, per C6/C7 in Section 5.3.1 */
2752 	if (new_rto < stcb->asoc.minrto) {
2753 		new_rto = stcb->asoc.minrto;
2754 	}
2755 	if (new_rto > stcb->asoc.maxrto) {
2756 		new_rto = stcb->asoc.maxrto;
2757 	}
2758 	/* we are now returning the RTO */
2759 	return (new_rto);
2760 }
2761 
2762 /*
2763  * return a pointer to a contiguous piece of data from the given mbuf chain
2764  * starting at 'off' for 'len' bytes.  If the desired piece spans more than
2765  * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2766  * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2767  */
2768 caddr_t
2769 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr)
2770 {
2771 	uint32_t count;
2772 	uint8_t *ptr;
2773 
2774 	ptr = in_ptr;
2775 	if ((off < 0) || (len <= 0))
2776 		return (NULL);
2777 
2778 	/* find the desired start location */
2779 	while ((m != NULL) && (off > 0)) {
2780 		if (off < SCTP_BUF_LEN(m))
2781 			break;
2782 		off -= SCTP_BUF_LEN(m);
2783 		m = SCTP_BUF_NEXT(m);
2784 	}
2785 	if (m == NULL)
2786 		return (NULL);
2787 
2788 	/* is the current mbuf large enough (eg. contiguous)? */
2789 	if ((SCTP_BUF_LEN(m) - off) >= len) {
2790 		return (mtod(m, caddr_t)+off);
2791 	} else {
2792 		/* else, it spans more than one mbuf, so save a temp copy... */
2793 		while ((m != NULL) && (len > 0)) {
2794 			count = min(SCTP_BUF_LEN(m) - off, len);
2795 			bcopy(mtod(m, caddr_t)+off, ptr, count);
2796 			len -= count;
2797 			ptr += count;
2798 			off = 0;
2799 			m = SCTP_BUF_NEXT(m);
2800 		}
2801 		if ((m == NULL) && (len > 0))
2802 			return (NULL);
2803 		else
2804 			return ((caddr_t)in_ptr);
2805 	}
2806 }
2807 
2808 
2809 
2810 struct sctp_paramhdr *
2811 sctp_get_next_param(struct mbuf *m,
2812     int offset,
2813     struct sctp_paramhdr *pull,
2814     int pull_limit)
2815 {
2816 	/* This just provides a typed signature to Peter's Pull routine */
2817 	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2818 	    (uint8_t *) pull));
2819 }
2820 
2821 
2822 int
2823 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2824 {
2825 	/*
2826 	 * add padlen bytes of 0 filled padding to the end of the mbuf. If
2827 	 * padlen is > 3 this routine will fail.
2828 	 */
2829 	uint8_t *dp;
2830 	int i;
2831 
2832 	if (padlen > 3) {
2833 		SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
2834 		return (ENOBUFS);
2835 	}
2836 	if (padlen <= M_TRAILINGSPACE(m)) {
2837 		/*
2838 		 * The easy way. We hope the majority of the time we hit
2839 		 * here :)
2840 		 */
2841 		dp = (uint8_t *) (mtod(m, caddr_t)+SCTP_BUF_LEN(m));
2842 		SCTP_BUF_LEN(m) += padlen;
2843 	} else {
2844 		/* Hard way we must grow the mbuf */
2845 		struct mbuf *tmp;
2846 
2847 		tmp = sctp_get_mbuf_for_msg(padlen, 0, M_DONTWAIT, 1, MT_DATA);
2848 		if (tmp == NULL) {
2849 			/* Out of space GAK! we are in big trouble. */
2850 			SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
2851 			return (ENOSPC);
2852 		}
2853 		/* setup and insert in middle */
2854 		SCTP_BUF_LEN(tmp) = padlen;
2855 		SCTP_BUF_NEXT(tmp) = NULL;
2856 		SCTP_BUF_NEXT(m) = tmp;
2857 		dp = mtod(tmp, uint8_t *);
2858 	}
2859 	/* zero out the pad */
2860 	for (i = 0; i < padlen; i++) {
2861 		*dp = 0;
2862 		dp++;
2863 	}
2864 	return (0);
2865 }
2866 
2867 int
2868 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2869 {
2870 	/* find the last mbuf in chain and pad it */
2871 	struct mbuf *m_at;
2872 
2873 	m_at = m;
2874 	if (last_mbuf) {
2875 		return (sctp_add_pad_tombuf(last_mbuf, padval));
2876 	} else {
2877 		while (m_at) {
2878 			if (SCTP_BUF_NEXT(m_at) == NULL) {
2879 				return (sctp_add_pad_tombuf(m_at, padval));
2880 			}
2881 			m_at = SCTP_BUF_NEXT(m_at);
2882 		}
2883 	}
2884 	SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
2885 	return (EFAULT);
2886 }
2887 
2888 int sctp_asoc_change_wake = 0;
2889 
2890 static void
2891 sctp_notify_assoc_change(uint32_t event, struct sctp_tcb *stcb,
2892     uint32_t error, void *data, int so_locked
2893 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2894     SCTP_UNUSED
2895 #endif
2896 )
2897 {
2898 	struct mbuf *m_notify;
2899 	struct sctp_assoc_change *sac;
2900 	struct sctp_queued_to_read *control;
2901 
2902 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2903 	struct socket *so;
2904 
2905 #endif
2906 
2907 	/*
2908 	 * First if we are are going down dump everything we can to the
2909 	 * socket rcv queue.
2910 	 */
2911 
2912 	if ((stcb == NULL) ||
2913 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
2914 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
2915 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)
2916 	    ) {
2917 		/* If the socket is gone we are out of here */
2918 		return;
2919 	}
2920 	/*
2921 	 * For TCP model AND UDP connected sockets we will send an error up
2922 	 * when an ABORT comes in.
2923 	 */
2924 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2925 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2926 	    ((event == SCTP_COMM_LOST) || (event == SCTP_CANT_STR_ASSOC))) {
2927 		if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
2928 			SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2929 			stcb->sctp_socket->so_error = ECONNREFUSED;
2930 		} else {
2931 			SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2932 			stcb->sctp_socket->so_error = ECONNRESET;
2933 		}
2934 		/* Wake ANY sleepers */
2935 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2936 		so = SCTP_INP_SO(stcb->sctp_ep);
2937 		if (!so_locked) {
2938 			atomic_add_int(&stcb->asoc.refcnt, 1);
2939 			SCTP_TCB_UNLOCK(stcb);
2940 			SCTP_SOCKET_LOCK(so, 1);
2941 			SCTP_TCB_LOCK(stcb);
2942 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
2943 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2944 				SCTP_SOCKET_UNLOCK(so, 1);
2945 				return;
2946 			}
2947 		}
2948 #endif
2949 		sorwakeup(stcb->sctp_socket);
2950 		sowwakeup(stcb->sctp_socket);
2951 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2952 		if (!so_locked) {
2953 			SCTP_SOCKET_UNLOCK(so, 1);
2954 		}
2955 #endif
2956 		sctp_asoc_change_wake++;
2957 	}
2958 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2959 		/* event not enabled */
2960 		return;
2961 	}
2962 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_change), 0, M_DONTWAIT, 1, MT_DATA);
2963 	if (m_notify == NULL)
2964 		/* no space left */
2965 		return;
2966 	SCTP_BUF_LEN(m_notify) = 0;
2967 
2968 	sac = mtod(m_notify, struct sctp_assoc_change *);
2969 	sac->sac_type = SCTP_ASSOC_CHANGE;
2970 	sac->sac_flags = 0;
2971 	sac->sac_length = sizeof(struct sctp_assoc_change);
2972 	sac->sac_state = event;
2973 	sac->sac_error = error;
2974 	/* XXX verify these stream counts */
2975 	sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2976 	sac->sac_inbound_streams = stcb->asoc.streamincnt;
2977 	sac->sac_assoc_id = sctp_get_associd(stcb);
2978 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_change);
2979 	SCTP_BUF_NEXT(m_notify) = NULL;
2980 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2981 	    0, 0, 0, 0, 0, 0,
2982 	    m_notify);
2983 	if (control == NULL) {
2984 		/* no memory */
2985 		sctp_m_freem(m_notify);
2986 		return;
2987 	}
2988 	control->length = SCTP_BUF_LEN(m_notify);
2989 	/* not that we need this */
2990 	control->tail_mbuf = m_notify;
2991 	control->spec_flags = M_NOTIFICATION;
2992 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2993 	    control,
2994 	    &stcb->sctp_socket->so_rcv, 1, so_locked);
2995 	if (event == SCTP_COMM_LOST) {
2996 		/* Wake up any sleeper */
2997 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2998 		so = SCTP_INP_SO(stcb->sctp_ep);
2999 		if (!so_locked) {
3000 			atomic_add_int(&stcb->asoc.refcnt, 1);
3001 			SCTP_TCB_UNLOCK(stcb);
3002 			SCTP_SOCKET_LOCK(so, 1);
3003 			SCTP_TCB_LOCK(stcb);
3004 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
3005 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3006 				SCTP_SOCKET_UNLOCK(so, 1);
3007 				return;
3008 			}
3009 		}
3010 #endif
3011 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
3012 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3013 		if (!so_locked) {
3014 			SCTP_SOCKET_UNLOCK(so, 1);
3015 		}
3016 #endif
3017 	}
3018 }
3019 
3020 static void
3021 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
3022     struct sockaddr *sa, uint32_t error)
3023 {
3024 	struct mbuf *m_notify;
3025 	struct sctp_paddr_change *spc;
3026 	struct sctp_queued_to_read *control;
3027 
3028 	if ((stcb == NULL) || (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVPADDREVNT)))
3029 		/* event not enabled */
3030 		return;
3031 
3032 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_DONTWAIT, 1, MT_DATA);
3033 	if (m_notify == NULL)
3034 		return;
3035 	SCTP_BUF_LEN(m_notify) = 0;
3036 	spc = mtod(m_notify, struct sctp_paddr_change *);
3037 	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
3038 	spc->spc_flags = 0;
3039 	spc->spc_length = sizeof(struct sctp_paddr_change);
3040 	switch (sa->sa_family) {
3041 	case AF_INET:
3042 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
3043 		break;
3044 #ifdef INET6
3045 	case AF_INET6:
3046 		{
3047 			struct sockaddr_in6 *sin6;
3048 
3049 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
3050 
3051 			sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
3052 			if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
3053 				if (sin6->sin6_scope_id == 0) {
3054 					/* recover scope_id for user */
3055 					(void)sa6_recoverscope(sin6);
3056 				} else {
3057 					/* clear embedded scope_id for user */
3058 					in6_clearscope(&sin6->sin6_addr);
3059 				}
3060 			}
3061 			break;
3062 		}
3063 #endif
3064 	default:
3065 		/* TSNH */
3066 		break;
3067 	}
3068 	spc->spc_state = state;
3069 	spc->spc_error = error;
3070 	spc->spc_assoc_id = sctp_get_associd(stcb);
3071 
3072 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
3073 	SCTP_BUF_NEXT(m_notify) = NULL;
3074 
3075 	/* append to socket */
3076 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3077 	    0, 0, 0, 0, 0, 0,
3078 	    m_notify);
3079 	if (control == NULL) {
3080 		/* no memory */
3081 		sctp_m_freem(m_notify);
3082 		return;
3083 	}
3084 	control->length = SCTP_BUF_LEN(m_notify);
3085 	control->spec_flags = M_NOTIFICATION;
3086 	/* not that we need this */
3087 	control->tail_mbuf = m_notify;
3088 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3089 	    control,
3090 	    &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
3091 }
3092 
3093 
3094 static void
3095 sctp_notify_send_failed(struct sctp_tcb *stcb, uint32_t error,
3096     struct sctp_tmit_chunk *chk, int so_locked
3097 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3098     SCTP_UNUSED
3099 #endif
3100 )
3101 {
3102 	struct mbuf *m_notify, *tt;
3103 	struct sctp_send_failed *ssf;
3104 	struct sctp_queued_to_read *control;
3105 	int length;
3106 
3107 	if ((stcb == NULL) || (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)))
3108 		/* event not enabled */
3109 		return;
3110 
3111 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_send_failed), 0, M_DONTWAIT, 1, MT_DATA);
3112 	if (m_notify == NULL)
3113 		/* no space left */
3114 		return;
3115 	length = sizeof(struct sctp_send_failed) + chk->send_size;
3116 	length -= sizeof(struct sctp_data_chunk);
3117 	SCTP_BUF_LEN(m_notify) = 0;
3118 	ssf = mtod(m_notify, struct sctp_send_failed *);
3119 	ssf->ssf_type = SCTP_SEND_FAILED;
3120 	if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
3121 		ssf->ssf_flags = SCTP_DATA_UNSENT;
3122 	else
3123 		ssf->ssf_flags = SCTP_DATA_SENT;
3124 	ssf->ssf_length = length;
3125 	ssf->ssf_error = error;
3126 	/* not exactly what the user sent in, but should be close :) */
3127 	bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
3128 	ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number;
3129 	ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq;
3130 	ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
3131 	ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype;
3132 	ssf->ssf_info.sinfo_context = chk->rec.data.context;
3133 	ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3134 	ssf->ssf_assoc_id = sctp_get_associd(stcb);
3135 
3136 	/* Take off the chunk header */
3137 	m_adj(chk->data, sizeof(struct sctp_data_chunk));
3138 
3139 	/* trim out any 0 len mbufs */
3140 	while (SCTP_BUF_LEN(chk->data) == 0) {
3141 		tt = chk->data;
3142 		chk->data = SCTP_BUF_NEXT(tt);
3143 		SCTP_BUF_NEXT(tt) = NULL;
3144 		sctp_m_freem(tt);
3145 	}
3146 
3147 	SCTP_BUF_NEXT(m_notify) = chk->data;
3148 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
3149 
3150 	/* Steal off the mbuf */
3151 	chk->data = NULL;
3152 	/*
3153 	 * For this case, we check the actual socket buffer, since the assoc
3154 	 * is going away we don't want to overfill the socket buffer for a
3155 	 * non-reader
3156 	 */
3157 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3158 		sctp_m_freem(m_notify);
3159 		return;
3160 	}
3161 	/* append to socket */
3162 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3163 	    0, 0, 0, 0, 0, 0,
3164 	    m_notify);
3165 	if (control == NULL) {
3166 		/* no memory */
3167 		sctp_m_freem(m_notify);
3168 		return;
3169 	}
3170 	control->spec_flags = M_NOTIFICATION;
3171 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3172 	    control,
3173 	    &stcb->sctp_socket->so_rcv, 1, so_locked);
3174 }
3175 
3176 
3177 static void
3178 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
3179     struct sctp_stream_queue_pending *sp, int so_locked
3180 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3181     SCTP_UNUSED
3182 #endif
3183 )
3184 {
3185 	struct mbuf *m_notify;
3186 	struct sctp_send_failed *ssf;
3187 	struct sctp_queued_to_read *control;
3188 	int length;
3189 
3190 	if ((stcb == NULL) || (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)))
3191 		/* event not enabled */
3192 		return;
3193 
3194 	length = sizeof(struct sctp_send_failed) + sp->length;
3195 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_send_failed), 0, M_DONTWAIT, 1, MT_DATA);
3196 	if (m_notify == NULL)
3197 		/* no space left */
3198 		return;
3199 	SCTP_BUF_LEN(m_notify) = 0;
3200 	ssf = mtod(m_notify, struct sctp_send_failed *);
3201 	ssf->ssf_type = SCTP_SEND_FAILED;
3202 	if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
3203 		ssf->ssf_flags = SCTP_DATA_UNSENT;
3204 	else
3205 		ssf->ssf_flags = SCTP_DATA_SENT;
3206 	ssf->ssf_length = length;
3207 	ssf->ssf_error = error;
3208 	/* not exactly what the user sent in, but should be close :) */
3209 	bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
3210 	ssf->ssf_info.sinfo_stream = sp->stream;
3211 	ssf->ssf_info.sinfo_ssn = sp->strseq;
3212 	if (sp->some_taken) {
3213 		ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
3214 	} else {
3215 		ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
3216 	}
3217 	ssf->ssf_info.sinfo_ppid = sp->ppid;
3218 	ssf->ssf_info.sinfo_context = sp->context;
3219 	ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3220 	ssf->ssf_assoc_id = sctp_get_associd(stcb);
3221 	SCTP_BUF_NEXT(m_notify) = sp->data;
3222 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
3223 
3224 	/* Steal off the mbuf */
3225 	sp->data = NULL;
3226 	/*
3227 	 * For this case, we check the actual socket buffer, since the assoc
3228 	 * is going away we don't want to overfill the socket buffer for a
3229 	 * non-reader
3230 	 */
3231 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3232 		sctp_m_freem(m_notify);
3233 		return;
3234 	}
3235 	/* append to socket */
3236 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3237 	    0, 0, 0, 0, 0, 0,
3238 	    m_notify);
3239 	if (control == NULL) {
3240 		/* no memory */
3241 		sctp_m_freem(m_notify);
3242 		return;
3243 	}
3244 	control->spec_flags = M_NOTIFICATION;
3245 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3246 	    control,
3247 	    &stcb->sctp_socket->so_rcv, 1, so_locked);
3248 }
3249 
3250 
3251 
3252 static void
3253 sctp_notify_adaptation_layer(struct sctp_tcb *stcb,
3254     uint32_t error)
3255 {
3256 	struct mbuf *m_notify;
3257 	struct sctp_adaptation_event *sai;
3258 	struct sctp_queued_to_read *control;
3259 
3260 	if ((stcb == NULL) || (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_ADAPTATIONEVNT)))
3261 		/* event not enabled */
3262 		return;
3263 
3264 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_DONTWAIT, 1, MT_DATA);
3265 	if (m_notify == NULL)
3266 		/* no space left */
3267 		return;
3268 	SCTP_BUF_LEN(m_notify) = 0;
3269 	sai = mtod(m_notify, struct sctp_adaptation_event *);
3270 	sai->sai_type = SCTP_ADAPTATION_INDICATION;
3271 	sai->sai_flags = 0;
3272 	sai->sai_length = sizeof(struct sctp_adaptation_event);
3273 	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3274 	sai->sai_assoc_id = sctp_get_associd(stcb);
3275 
3276 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3277 	SCTP_BUF_NEXT(m_notify) = NULL;
3278 
3279 	/* append to socket */
3280 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3281 	    0, 0, 0, 0, 0, 0,
3282 	    m_notify);
3283 	if (control == NULL) {
3284 		/* no memory */
3285 		sctp_m_freem(m_notify);
3286 		return;
3287 	}
3288 	control->length = SCTP_BUF_LEN(m_notify);
3289 	control->spec_flags = M_NOTIFICATION;
3290 	/* not that we need this */
3291 	control->tail_mbuf = m_notify;
3292 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3293 	    control,
3294 	    &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
3295 }
3296 
3297 /* This always must be called with the read-queue LOCKED in the INP */
3298 void
3299 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3300     int nolock, uint32_t val)
3301 {
3302 	struct mbuf *m_notify;
3303 	struct sctp_pdapi_event *pdapi;
3304 	struct sctp_queued_to_read *control;
3305 	struct sockbuf *sb;
3306 
3307 	if ((stcb == NULL) || (stcb->sctp_socket == NULL) ||
3308 	    sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_PDAPIEVNT))
3309 		/* event not enabled */
3310 		return;
3311 
3312 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_DONTWAIT, 1, MT_DATA);
3313 	if (m_notify == NULL)
3314 		/* no space left */
3315 		return;
3316 	SCTP_BUF_LEN(m_notify) = 0;
3317 	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3318 	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3319 	pdapi->pdapi_flags = 0;
3320 	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3321 	pdapi->pdapi_indication = error;
3322 	pdapi->pdapi_stream = (val >> 16);
3323 	pdapi->pdapi_seq = (val & 0x0000ffff);
3324 	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3325 
3326 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3327 	SCTP_BUF_NEXT(m_notify) = NULL;
3328 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3329 	    0, 0, 0, 0, 0, 0,
3330 	    m_notify);
3331 	if (control == NULL) {
3332 		/* no memory */
3333 		sctp_m_freem(m_notify);
3334 		return;
3335 	}
3336 	control->spec_flags = M_NOTIFICATION;
3337 	control->length = SCTP_BUF_LEN(m_notify);
3338 	/* not that we need this */
3339 	control->tail_mbuf = m_notify;
3340 	control->held_length = 0;
3341 	control->length = 0;
3342 	if (nolock == 0) {
3343 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
3344 	}
3345 	sb = &stcb->sctp_socket->so_rcv;
3346 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3347 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3348 	}
3349 	sctp_sballoc(stcb, sb, m_notify);
3350 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3351 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3352 	}
3353 	atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify));
3354 	control->end_added = 1;
3355 	if (stcb->asoc.control_pdapi)
3356 		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3357 	else {
3358 		/* we really should not see this case */
3359 		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3360 	}
3361 	if (nolock == 0) {
3362 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
3363 	}
3364 	if (stcb->sctp_ep && stcb->sctp_socket) {
3365 		/* This should always be the case */
3366 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3367 	}
3368 }
3369 
3370 static void
3371 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3372 {
3373 	struct mbuf *m_notify;
3374 	struct sctp_shutdown_event *sse;
3375 	struct sctp_queued_to_read *control;
3376 
3377 	/*
3378 	 * For TCP model AND UDP connected sockets we will send an error up
3379 	 * when an SHUTDOWN completes
3380 	 */
3381 	if (stcb == NULL) {
3382 		return;
3383 	}
3384 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3385 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3386 		/* mark socket closed for read/write and wakeup! */
3387 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3388 		struct socket *so;
3389 
3390 		so = SCTP_INP_SO(stcb->sctp_ep);
3391 		atomic_add_int(&stcb->asoc.refcnt, 1);
3392 		SCTP_TCB_UNLOCK(stcb);
3393 		SCTP_SOCKET_LOCK(so, 1);
3394 		SCTP_TCB_LOCK(stcb);
3395 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3396 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3397 			SCTP_SOCKET_UNLOCK(so, 1);
3398 			return;
3399 		}
3400 #endif
3401 		socantsendmore(stcb->sctp_socket);
3402 		socantrcvmore(stcb->sctp_socket);
3403 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3404 		SCTP_SOCKET_UNLOCK(so, 1);
3405 #endif
3406 	}
3407 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT))
3408 		/* event not enabled */
3409 		return;
3410 
3411 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_DONTWAIT, 1, MT_DATA);
3412 	if (m_notify == NULL)
3413 		/* no space left */
3414 		return;
3415 	sse = mtod(m_notify, struct sctp_shutdown_event *);
3416 	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3417 	sse->sse_flags = 0;
3418 	sse->sse_length = sizeof(struct sctp_shutdown_event);
3419 	sse->sse_assoc_id = sctp_get_associd(stcb);
3420 
3421 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3422 	SCTP_BUF_NEXT(m_notify) = NULL;
3423 
3424 	/* append to socket */
3425 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3426 	    0, 0, 0, 0, 0, 0,
3427 	    m_notify);
3428 	if (control == NULL) {
3429 		/* no memory */
3430 		sctp_m_freem(m_notify);
3431 		return;
3432 	}
3433 	control->spec_flags = M_NOTIFICATION;
3434 	control->length = SCTP_BUF_LEN(m_notify);
3435 	/* not that we need this */
3436 	control->tail_mbuf = m_notify;
3437 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3438 	    control,
3439 	    &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
3440 }
3441 
3442 static void
3443 sctp_notify_stream_reset(struct sctp_tcb *stcb,
3444     int number_entries, uint16_t * list, int flag)
3445 {
3446 	struct mbuf *m_notify;
3447 	struct sctp_queued_to_read *control;
3448 	struct sctp_stream_reset_event *strreset;
3449 	int len;
3450 
3451 	if (stcb == NULL) {
3452 		return;
3453 	}
3454 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_STREAM_RESETEVNT))
3455 		/* event not enabled */
3456 		return;
3457 
3458 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3459 	if (m_notify == NULL)
3460 		/* no space left */
3461 		return;
3462 	SCTP_BUF_LEN(m_notify) = 0;
3463 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3464 	if (len > M_TRAILINGSPACE(m_notify)) {
3465 		/* never enough room */
3466 		sctp_m_freem(m_notify);
3467 		return;
3468 	}
3469 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3470 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3471 	if (number_entries == 0) {
3472 		strreset->strreset_flags = flag | SCTP_STRRESET_ALL_STREAMS;
3473 	} else {
3474 		strreset->strreset_flags = flag | SCTP_STRRESET_STREAM_LIST;
3475 	}
3476 	strreset->strreset_length = len;
3477 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3478 	if (number_entries) {
3479 		int i;
3480 
3481 		for (i = 0; i < number_entries; i++) {
3482 			strreset->strreset_list[i] = ntohs(list[i]);
3483 		}
3484 	}
3485 	SCTP_BUF_LEN(m_notify) = len;
3486 	SCTP_BUF_NEXT(m_notify) = NULL;
3487 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3488 		/* no space */
3489 		sctp_m_freem(m_notify);
3490 		return;
3491 	}
3492 	/* append to socket */
3493 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3494 	    0, 0, 0, 0, 0, 0,
3495 	    m_notify);
3496 	if (control == NULL) {
3497 		/* no memory */
3498 		sctp_m_freem(m_notify);
3499 		return;
3500 	}
3501 	control->spec_flags = M_NOTIFICATION;
3502 	control->length = SCTP_BUF_LEN(m_notify);
3503 	/* not that we need this */
3504 	control->tail_mbuf = m_notify;
3505 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3506 	    control,
3507 	    &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
3508 }
3509 
3510 
3511 void
3512 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3513     uint32_t error, void *data, int so_locked
3514 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3515     SCTP_UNUSED
3516 #endif
3517 )
3518 {
3519 	if (stcb == NULL) {
3520 		/* unlikely but */
3521 		return;
3522 	}
3523 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3524 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3525 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)
3526 	    ) {
3527 		/* No notifications up when we are in a no socket state */
3528 		return;
3529 	}
3530 	if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3531 		/* Can't send up to a closed socket any notifications */
3532 		return;
3533 	}
3534 	if (stcb && ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
3535 	    (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED))) {
3536 		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3537 		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3538 		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3539 			/* Don't report these in front states */
3540 			return;
3541 		}
3542 	}
3543 	switch (notification) {
3544 	case SCTP_NOTIFY_ASSOC_UP:
3545 		if (stcb->asoc.assoc_up_sent == 0) {
3546 			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, so_locked);
3547 			stcb->asoc.assoc_up_sent = 1;
3548 		}
3549 		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3550 			sctp_notify_adaptation_layer(stcb, error);
3551 		}
3552 		break;
3553 	case SCTP_NOTIFY_ASSOC_DOWN:
3554 		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, so_locked);
3555 		break;
3556 	case SCTP_NOTIFY_INTERFACE_DOWN:
3557 		{
3558 			struct sctp_nets *net;
3559 
3560 			net = (struct sctp_nets *)data;
3561 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3562 			    (struct sockaddr *)&net->ro._l_addr, error);
3563 			break;
3564 		}
3565 	case SCTP_NOTIFY_INTERFACE_UP:
3566 		{
3567 			struct sctp_nets *net;
3568 
3569 			net = (struct sctp_nets *)data;
3570 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3571 			    (struct sockaddr *)&net->ro._l_addr, error);
3572 			break;
3573 		}
3574 	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3575 		{
3576 			struct sctp_nets *net;
3577 
3578 			net = (struct sctp_nets *)data;
3579 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3580 			    (struct sockaddr *)&net->ro._l_addr, error);
3581 			break;
3582 		}
3583 	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3584 		sctp_notify_send_failed2(stcb, error,
3585 		    (struct sctp_stream_queue_pending *)data, so_locked);
3586 		break;
3587 	case SCTP_NOTIFY_DG_FAIL:
3588 		sctp_notify_send_failed(stcb, error,
3589 		    (struct sctp_tmit_chunk *)data, so_locked);
3590 		break;
3591 	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3592 		{
3593 			uint32_t val;
3594 
3595 			val = *((uint32_t *) data);
3596 
3597 			sctp_notify_partial_delivery_indication(stcb, error, 0, val);
3598 		}
3599 		break;
3600 	case SCTP_NOTIFY_STRDATA_ERR:
3601 		break;
3602 	case SCTP_NOTIFY_ASSOC_ABORTED:
3603 		if ((stcb) && (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3604 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED))) {
3605 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, NULL, so_locked);
3606 		} else {
3607 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, NULL, so_locked);
3608 		}
3609 		break;
3610 	case SCTP_NOTIFY_PEER_OPENED_STREAM:
3611 		break;
3612 	case SCTP_NOTIFY_STREAM_OPENED_OK:
3613 		break;
3614 	case SCTP_NOTIFY_ASSOC_RESTART:
3615 		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, data, so_locked);
3616 		break;
3617 	case SCTP_NOTIFY_HB_RESP:
3618 		break;
3619 	case SCTP_NOTIFY_STR_RESET_SEND:
3620 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_OUTBOUND_STR);
3621 		break;
3622 	case SCTP_NOTIFY_STR_RESET_RECV:
3623 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_INBOUND_STR);
3624 		break;
3625 	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3626 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_OUTBOUND_STR | SCTP_STRRESET_FAILED));
3627 		break;
3628 
3629 	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3630 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_INBOUND_STR | SCTP_STRRESET_FAILED));
3631 		break;
3632 
3633 	case SCTP_NOTIFY_ASCONF_ADD_IP:
3634 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3635 		    error);
3636 		break;
3637 	case SCTP_NOTIFY_ASCONF_DELETE_IP:
3638 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3639 		    error);
3640 		break;
3641 	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3642 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3643 		    error);
3644 		break;
3645 	case SCTP_NOTIFY_ASCONF_SUCCESS:
3646 		break;
3647 	case SCTP_NOTIFY_ASCONF_FAILED:
3648 		break;
3649 	case SCTP_NOTIFY_PEER_SHUTDOWN:
3650 		sctp_notify_shutdown_event(stcb);
3651 		break;
3652 	case SCTP_NOTIFY_AUTH_NEW_KEY:
3653 		sctp_notify_authentication(stcb, SCTP_AUTH_NEWKEY, error,
3654 		    (uint16_t) (uintptr_t) data);
3655 		break;
3656 #if 0
3657 	case SCTP_NOTIFY_AUTH_KEY_CONFLICT:
3658 		sctp_notify_authentication(stcb, SCTP_AUTH_KEY_CONFLICT,
3659 		    error, (uint16_t) (uintptr_t) data);
3660 		break;
3661 #endif				/* not yet? remove? */
3662 
3663 
3664 	default:
3665 		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3666 		    __FUNCTION__, notification, notification);
3667 		break;
3668 	}			/* end switch */
3669 }
3670 
3671 void
3672 sctp_report_all_outbound(struct sctp_tcb *stcb, int holds_lock, int so_locked
3673 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3674     SCTP_UNUSED
3675 #endif
3676 )
3677 {
3678 	struct sctp_association *asoc;
3679 	struct sctp_stream_out *outs;
3680 	struct sctp_tmit_chunk *chk;
3681 	struct sctp_stream_queue_pending *sp;
3682 	int i;
3683 
3684 	asoc = &stcb->asoc;
3685 
3686 	if (stcb == NULL) {
3687 		return;
3688 	}
3689 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3690 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3691 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3692 		return;
3693 	}
3694 	/* now through all the gunk freeing chunks */
3695 	if (holds_lock == 0) {
3696 		SCTP_TCB_SEND_LOCK(stcb);
3697 	}
3698 	/* sent queue SHOULD be empty */
3699 	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3700 		chk = TAILQ_FIRST(&asoc->sent_queue);
3701 		while (chk) {
3702 			TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3703 			asoc->sent_queue_cnt--;
3704 			if (chk->data) {
3705 				/*
3706 				 * trim off the sctp chunk header(it should
3707 				 * be there)
3708 				 */
3709 				if (chk->send_size >= sizeof(struct sctp_data_chunk)) {
3710 					m_adj(chk->data, sizeof(struct sctp_data_chunk));
3711 					sctp_mbuf_crush(chk->data);
3712 					chk->send_size -= sizeof(struct sctp_data_chunk);
3713 				}
3714 			}
3715 			sctp_free_bufspace(stcb, asoc, chk, 1);
3716 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
3717 			    SCTP_NOTIFY_DATAGRAM_SENT, chk, so_locked);
3718 			if (chk->data) {
3719 				sctp_m_freem(chk->data);
3720 				chk->data = NULL;
3721 			}
3722 			sctp_free_a_chunk(stcb, chk);
3723 			/* sa_ignore FREED_MEMORY */
3724 			chk = TAILQ_FIRST(&asoc->sent_queue);
3725 		}
3726 	}
3727 	/* pending send queue SHOULD be empty */
3728 	if (!TAILQ_EMPTY(&asoc->send_queue)) {
3729 		chk = TAILQ_FIRST(&asoc->send_queue);
3730 		while (chk) {
3731 			TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3732 			asoc->send_queue_cnt--;
3733 			if (chk->data) {
3734 				/*
3735 				 * trim off the sctp chunk header(it should
3736 				 * be there)
3737 				 */
3738 				if (chk->send_size >= sizeof(struct sctp_data_chunk)) {
3739 					m_adj(chk->data, sizeof(struct sctp_data_chunk));
3740 					sctp_mbuf_crush(chk->data);
3741 					chk->send_size -= sizeof(struct sctp_data_chunk);
3742 				}
3743 			}
3744 			sctp_free_bufspace(stcb, asoc, chk, 1);
3745 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, SCTP_NOTIFY_DATAGRAM_UNSENT, chk, so_locked);
3746 			if (chk->data) {
3747 				sctp_m_freem(chk->data);
3748 				chk->data = NULL;
3749 			}
3750 			sctp_free_a_chunk(stcb, chk);
3751 			/* sa_ignore FREED_MEMORY */
3752 			chk = TAILQ_FIRST(&asoc->send_queue);
3753 		}
3754 	}
3755 	for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
3756 		/* For each stream */
3757 		outs = &stcb->asoc.strmout[i];
3758 		/* clean up any sends there */
3759 		stcb->asoc.locked_on_sending = NULL;
3760 		sp = TAILQ_FIRST(&outs->outqueue);
3761 		while (sp) {
3762 			stcb->asoc.stream_queue_cnt--;
3763 			TAILQ_REMOVE(&outs->outqueue, sp, next);
3764 			sctp_free_spbufspace(stcb, asoc, sp);
3765 			sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
3766 			    SCTP_NOTIFY_DATAGRAM_UNSENT, (void *)sp, so_locked);
3767 			if (sp->data) {
3768 				sctp_m_freem(sp->data);
3769 				sp->data = NULL;
3770 			}
3771 			if (sp->net)
3772 				sctp_free_remote_addr(sp->net);
3773 			sp->net = NULL;
3774 			/* Free the chunk */
3775 			sctp_free_a_strmoq(stcb, sp);
3776 			/* sa_ignore FREED_MEMORY */
3777 			sp = TAILQ_FIRST(&outs->outqueue);
3778 		}
3779 	}
3780 
3781 	if (holds_lock == 0) {
3782 		SCTP_TCB_SEND_UNLOCK(stcb);
3783 	}
3784 }
3785 
3786 void
3787 sctp_abort_notification(struct sctp_tcb *stcb, int error, int so_locked
3788 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3789     SCTP_UNUSED
3790 #endif
3791 )
3792 {
3793 
3794 	if (stcb == NULL) {
3795 		return;
3796 	}
3797 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3798 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3799 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3800 		return;
3801 	}
3802 	/* Tell them we lost the asoc */
3803 	sctp_report_all_outbound(stcb, 1, so_locked);
3804 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
3805 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3806 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
3807 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
3808 	}
3809 	sctp_ulp_notify(SCTP_NOTIFY_ASSOC_ABORTED, stcb, error, NULL, so_locked);
3810 }
3811 
3812 void
3813 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3814     struct mbuf *m, int iphlen, struct sctphdr *sh, struct mbuf *op_err,
3815     uint32_t vrf_id, uint16_t port)
3816 {
3817 	uint32_t vtag;
3818 
3819 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3820 	struct socket *so;
3821 
3822 #endif
3823 
3824 	vtag = 0;
3825 	if (stcb != NULL) {
3826 		/* We have a TCB to abort, send notification too */
3827 		vtag = stcb->asoc.peer_vtag;
3828 		sctp_abort_notification(stcb, 0, SCTP_SO_NOT_LOCKED);
3829 		/* get the assoc vrf id and table id */
3830 		vrf_id = stcb->asoc.vrf_id;
3831 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3832 	}
3833 	sctp_send_abort(m, iphlen, sh, vtag, op_err, vrf_id, port);
3834 	if (stcb != NULL) {
3835 		/* Ok, now lets free it */
3836 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3837 		so = SCTP_INP_SO(inp);
3838 		atomic_add_int(&stcb->asoc.refcnt, 1);
3839 		SCTP_TCB_UNLOCK(stcb);
3840 		SCTP_SOCKET_LOCK(so, 1);
3841 		SCTP_TCB_LOCK(stcb);
3842 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3843 #endif
3844 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
3845 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3846 		SCTP_SOCKET_UNLOCK(so, 1);
3847 #endif
3848 	} else {
3849 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3850 			if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3851 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3852 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
3853 			}
3854 		}
3855 	}
3856 }
3857 
3858 #ifdef SCTP_ASOCLOG_OF_TSNS
3859 void
3860 sctp_print_out_track_log(struct sctp_tcb *stcb)
3861 {
3862 #ifdef NOSIY_PRINTS
3863 	int i;
3864 
3865 	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
3866 	SCTP_PRINTF("IN bound TSN log-aaa\n");
3867 	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
3868 		SCTP_PRINTF("None rcvd\n");
3869 		goto none_in;
3870 	}
3871 	if (stcb->asoc.tsn_in_wrapped) {
3872 		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
3873 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3874 			    stcb->asoc.in_tsnlog[i].tsn,
3875 			    stcb->asoc.in_tsnlog[i].strm,
3876 			    stcb->asoc.in_tsnlog[i].seq,
3877 			    stcb->asoc.in_tsnlog[i].flgs,
3878 			    stcb->asoc.in_tsnlog[i].sz);
3879 		}
3880 	}
3881 	if (stcb->asoc.tsn_in_at) {
3882 		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
3883 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3884 			    stcb->asoc.in_tsnlog[i].tsn,
3885 			    stcb->asoc.in_tsnlog[i].strm,
3886 			    stcb->asoc.in_tsnlog[i].seq,
3887 			    stcb->asoc.in_tsnlog[i].flgs,
3888 			    stcb->asoc.in_tsnlog[i].sz);
3889 		}
3890 	}
3891 none_in:
3892 	SCTP_PRINTF("OUT bound TSN log-aaa\n");
3893 	if ((stcb->asoc.tsn_out_at == 0) &&
3894 	    (stcb->asoc.tsn_out_wrapped == 0)) {
3895 		SCTP_PRINTF("None sent\n");
3896 	}
3897 	if (stcb->asoc.tsn_out_wrapped) {
3898 		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
3899 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3900 			    stcb->asoc.out_tsnlog[i].tsn,
3901 			    stcb->asoc.out_tsnlog[i].strm,
3902 			    stcb->asoc.out_tsnlog[i].seq,
3903 			    stcb->asoc.out_tsnlog[i].flgs,
3904 			    stcb->asoc.out_tsnlog[i].sz);
3905 		}
3906 	}
3907 	if (stcb->asoc.tsn_out_at) {
3908 		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
3909 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3910 			    stcb->asoc.out_tsnlog[i].tsn,
3911 			    stcb->asoc.out_tsnlog[i].strm,
3912 			    stcb->asoc.out_tsnlog[i].seq,
3913 			    stcb->asoc.out_tsnlog[i].flgs,
3914 			    stcb->asoc.out_tsnlog[i].sz);
3915 		}
3916 	}
3917 #endif
3918 }
3919 
3920 #endif
3921 
3922 void
3923 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3924     int error, struct mbuf *op_err,
3925     int so_locked
3926 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3927     SCTP_UNUSED
3928 #endif
3929 )
3930 {
3931 	uint32_t vtag;
3932 
3933 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3934 	struct socket *so;
3935 
3936 #endif
3937 
3938 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3939 	so = SCTP_INP_SO(inp);
3940 #endif
3941 	if (stcb == NULL) {
3942 		/* Got to have a TCB */
3943 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3944 			if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3945 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3946 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
3947 			}
3948 		}
3949 		return;
3950 	} else {
3951 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3952 	}
3953 	vtag = stcb->asoc.peer_vtag;
3954 	/* notify the ulp */
3955 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0)
3956 		sctp_abort_notification(stcb, error, so_locked);
3957 	/* notify the peer */
3958 #if defined(SCTP_PANIC_ON_ABORT)
3959 	panic("aborting an association");
3960 #endif
3961 	sctp_send_abort_tcb(stcb, op_err, so_locked);
3962 	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3963 	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3964 	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3965 		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3966 	}
3967 	/* now free the asoc */
3968 #ifdef SCTP_ASOCLOG_OF_TSNS
3969 	sctp_print_out_track_log(stcb);
3970 #endif
3971 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3972 	if (!so_locked) {
3973 		atomic_add_int(&stcb->asoc.refcnt, 1);
3974 		SCTP_TCB_UNLOCK(stcb);
3975 		SCTP_SOCKET_LOCK(so, 1);
3976 		SCTP_TCB_LOCK(stcb);
3977 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3978 	}
3979 #endif
3980 	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
3981 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3982 	if (!so_locked) {
3983 		SCTP_SOCKET_UNLOCK(so, 1);
3984 	}
3985 #endif
3986 }
3987 
3988 void
3989 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh,
3990     struct sctp_inpcb *inp, struct mbuf *op_err, uint32_t vrf_id, uint16_t port)
3991 {
3992 	struct sctp_chunkhdr *ch, chunk_buf;
3993 	unsigned int chk_length;
3994 
3995 	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
3996 	/* Generate a TO address for future reference */
3997 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
3998 		if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3999 			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4000 			    SCTP_CALLED_DIRECTLY_NOCMPSET);
4001 		}
4002 	}
4003 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4004 	    sizeof(*ch), (uint8_t *) & chunk_buf);
4005 	while (ch != NULL) {
4006 		chk_length = ntohs(ch->chunk_length);
4007 		if (chk_length < sizeof(*ch)) {
4008 			/* break to abort land */
4009 			break;
4010 		}
4011 		switch (ch->chunk_type) {
4012 		case SCTP_COOKIE_ECHO:
4013 			/* We hit here only if the assoc is being freed */
4014 			return;
4015 		case SCTP_PACKET_DROPPED:
4016 			/* we don't respond to pkt-dropped */
4017 			return;
4018 		case SCTP_ABORT_ASSOCIATION:
4019 			/* we don't respond with an ABORT to an ABORT */
4020 			return;
4021 		case SCTP_SHUTDOWN_COMPLETE:
4022 			/*
4023 			 * we ignore it since we are not waiting for it and
4024 			 * peer is gone
4025 			 */
4026 			return;
4027 		case SCTP_SHUTDOWN_ACK:
4028 			sctp_send_shutdown_complete2(m, iphlen, sh, vrf_id, port);
4029 			return;
4030 		default:
4031 			break;
4032 		}
4033 		offset += SCTP_SIZE32(chk_length);
4034 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4035 		    sizeof(*ch), (uint8_t *) & chunk_buf);
4036 	}
4037 	sctp_send_abort(m, iphlen, sh, 0, op_err, vrf_id, port);
4038 }
4039 
4040 /*
4041  * check the inbound datagram to make sure there is not an abort inside it,
4042  * if there is return 1, else return 0.
4043  */
4044 int
4045 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill)
4046 {
4047 	struct sctp_chunkhdr *ch;
4048 	struct sctp_init_chunk *init_chk, chunk_buf;
4049 	int offset;
4050 	unsigned int chk_length;
4051 
4052 	offset = iphlen + sizeof(struct sctphdr);
4053 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
4054 	    (uint8_t *) & chunk_buf);
4055 	while (ch != NULL) {
4056 		chk_length = ntohs(ch->chunk_length);
4057 		if (chk_length < sizeof(*ch)) {
4058 			/* packet is probably corrupt */
4059 			break;
4060 		}
4061 		/* we seem to be ok, is it an abort? */
4062 		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
4063 			/* yep, tell them */
4064 			return (1);
4065 		}
4066 		if (ch->chunk_type == SCTP_INITIATION) {
4067 			/* need to update the Vtag */
4068 			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4069 			    offset, sizeof(*init_chk), (uint8_t *) & chunk_buf);
4070 			if (init_chk != NULL) {
4071 				*vtagfill = ntohl(init_chk->init.initiate_tag);
4072 			}
4073 		}
4074 		/* Nope, move to the next chunk */
4075 		offset += SCTP_SIZE32(chk_length);
4076 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4077 		    sizeof(*ch), (uint8_t *) & chunk_buf);
4078 	}
4079 	return (0);
4080 }
4081 
4082 /*
4083  * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4084  * set (i.e. it's 0) so, create this function to compare link local scopes
4085  */
4086 #ifdef INET6
4087 uint32_t
4088 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4089 {
4090 	struct sockaddr_in6 a, b;
4091 
4092 	/* save copies */
4093 	a = *addr1;
4094 	b = *addr2;
4095 
4096 	if (a.sin6_scope_id == 0)
4097 		if (sa6_recoverscope(&a)) {
4098 			/* can't get scope, so can't match */
4099 			return (0);
4100 		}
4101 	if (b.sin6_scope_id == 0)
4102 		if (sa6_recoverscope(&b)) {
4103 			/* can't get scope, so can't match */
4104 			return (0);
4105 		}
4106 	if (a.sin6_scope_id != b.sin6_scope_id)
4107 		return (0);
4108 
4109 	return (1);
4110 }
4111 
4112 /*
4113  * returns a sockaddr_in6 with embedded scope recovered and removed
4114  */
4115 struct sockaddr_in6 *
4116 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4117 {
4118 	/* check and strip embedded scope junk */
4119 	if (addr->sin6_family == AF_INET6) {
4120 		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4121 			if (addr->sin6_scope_id == 0) {
4122 				*store = *addr;
4123 				if (!sa6_recoverscope(store)) {
4124 					/* use the recovered scope */
4125 					addr = store;
4126 				}
4127 			} else {
4128 				/* else, return the original "to" addr */
4129 				in6_clearscope(&addr->sin6_addr);
4130 			}
4131 		}
4132 	}
4133 	return (addr);
4134 }
4135 
4136 #endif
4137 
4138 /*
4139  * are the two addresses the same?  currently a "scopeless" check returns: 1
4140  * if same, 0 if not
4141  */
4142 int
4143 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4144 {
4145 
4146 	/* must be valid */
4147 	if (sa1 == NULL || sa2 == NULL)
4148 		return (0);
4149 
4150 	/* must be the same family */
4151 	if (sa1->sa_family != sa2->sa_family)
4152 		return (0);
4153 
4154 	switch (sa1->sa_family) {
4155 #ifdef INET6
4156 	case AF_INET6:
4157 		{
4158 			/* IPv6 addresses */
4159 			struct sockaddr_in6 *sin6_1, *sin6_2;
4160 
4161 			sin6_1 = (struct sockaddr_in6 *)sa1;
4162 			sin6_2 = (struct sockaddr_in6 *)sa2;
4163 			return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
4164 			    sin6_2));
4165 		}
4166 #endif
4167 	case AF_INET:
4168 		{
4169 			/* IPv4 addresses */
4170 			struct sockaddr_in *sin_1, *sin_2;
4171 
4172 			sin_1 = (struct sockaddr_in *)sa1;
4173 			sin_2 = (struct sockaddr_in *)sa2;
4174 			return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4175 		}
4176 	default:
4177 		/* we don't do these... */
4178 		return (0);
4179 	}
4180 }
4181 
4182 void
4183 sctp_print_address(struct sockaddr *sa)
4184 {
4185 #ifdef INET6
4186 	char ip6buf[INET6_ADDRSTRLEN];
4187 
4188 	ip6buf[0] = 0;
4189 #endif
4190 
4191 	switch (sa->sa_family) {
4192 #ifdef INET6
4193 	case AF_INET6:
4194 		{
4195 			struct sockaddr_in6 *sin6;
4196 
4197 			sin6 = (struct sockaddr_in6 *)sa;
4198 			SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4199 			    ip6_sprintf(ip6buf, &sin6->sin6_addr),
4200 			    ntohs(sin6->sin6_port),
4201 			    sin6->sin6_scope_id);
4202 			break;
4203 		}
4204 #endif
4205 	case AF_INET:
4206 		{
4207 			struct sockaddr_in *sin;
4208 			unsigned char *p;
4209 
4210 			sin = (struct sockaddr_in *)sa;
4211 			p = (unsigned char *)&sin->sin_addr;
4212 			SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4213 			    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4214 			break;
4215 		}
4216 	default:
4217 		SCTP_PRINTF("?\n");
4218 		break;
4219 	}
4220 }
4221 
4222 void
4223 sctp_print_address_pkt(struct ip *iph, struct sctphdr *sh)
4224 {
4225 	switch (iph->ip_v) {
4226 		case IPVERSION:
4227 		{
4228 			struct sockaddr_in lsa, fsa;
4229 
4230 			bzero(&lsa, sizeof(lsa));
4231 			lsa.sin_len = sizeof(lsa);
4232 			lsa.sin_family = AF_INET;
4233 			lsa.sin_addr = iph->ip_src;
4234 			lsa.sin_port = sh->src_port;
4235 			bzero(&fsa, sizeof(fsa));
4236 			fsa.sin_len = sizeof(fsa);
4237 			fsa.sin_family = AF_INET;
4238 			fsa.sin_addr = iph->ip_dst;
4239 			fsa.sin_port = sh->dest_port;
4240 			SCTP_PRINTF("src: ");
4241 			sctp_print_address((struct sockaddr *)&lsa);
4242 			SCTP_PRINTF("dest: ");
4243 			sctp_print_address((struct sockaddr *)&fsa);
4244 			break;
4245 		}
4246 #ifdef INET6
4247 	case IPV6_VERSION >> 4:
4248 		{
4249 			struct ip6_hdr *ip6;
4250 			struct sockaddr_in6 lsa6, fsa6;
4251 
4252 			ip6 = (struct ip6_hdr *)iph;
4253 			bzero(&lsa6, sizeof(lsa6));
4254 			lsa6.sin6_len = sizeof(lsa6);
4255 			lsa6.sin6_family = AF_INET6;
4256 			lsa6.sin6_addr = ip6->ip6_src;
4257 			lsa6.sin6_port = sh->src_port;
4258 			bzero(&fsa6, sizeof(fsa6));
4259 			fsa6.sin6_len = sizeof(fsa6);
4260 			fsa6.sin6_family = AF_INET6;
4261 			fsa6.sin6_addr = ip6->ip6_dst;
4262 			fsa6.sin6_port = sh->dest_port;
4263 			SCTP_PRINTF("src: ");
4264 			sctp_print_address((struct sockaddr *)&lsa6);
4265 			SCTP_PRINTF("dest: ");
4266 			sctp_print_address((struct sockaddr *)&fsa6);
4267 			break;
4268 		}
4269 #endif
4270 	default:
4271 		/* TSNH */
4272 		break;
4273 	}
4274 }
4275 
4276 void
4277 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4278     struct sctp_inpcb *new_inp,
4279     struct sctp_tcb *stcb,
4280     int waitflags)
4281 {
4282 	/*
4283 	 * go through our old INP and pull off any control structures that
4284 	 * belong to stcb and move then to the new inp.
4285 	 */
4286 	struct socket *old_so, *new_so;
4287 	struct sctp_queued_to_read *control, *nctl;
4288 	struct sctp_readhead tmp_queue;
4289 	struct mbuf *m;
4290 	int error = 0;
4291 
4292 	old_so = old_inp->sctp_socket;
4293 	new_so = new_inp->sctp_socket;
4294 	TAILQ_INIT(&tmp_queue);
4295 	error = sblock(&old_so->so_rcv, waitflags);
4296 	if (error) {
4297 		/*
4298 		 * Gak, can't get sblock, we have a problem. data will be
4299 		 * left stranded.. and we don't dare look at it since the
4300 		 * other thread may be reading something. Oh well, its a
4301 		 * screwed up app that does a peeloff OR a accept while
4302 		 * reading from the main socket... actually its only the
4303 		 * peeloff() case, since I think read will fail on a
4304 		 * listening socket..
4305 		 */
4306 		return;
4307 	}
4308 	/* lock the socket buffers */
4309 	SCTP_INP_READ_LOCK(old_inp);
4310 	control = TAILQ_FIRST(&old_inp->read_queue);
4311 	/* Pull off all for out target stcb */
4312 	while (control) {
4313 		nctl = TAILQ_NEXT(control, next);
4314 		if (control->stcb == stcb) {
4315 			/* remove it we want it */
4316 			TAILQ_REMOVE(&old_inp->read_queue, control, next);
4317 			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4318 			m = control->data;
4319 			while (m) {
4320 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4321 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4322 				}
4323 				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4324 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4325 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4326 				}
4327 				m = SCTP_BUF_NEXT(m);
4328 			}
4329 		}
4330 		control = nctl;
4331 	}
4332 	SCTP_INP_READ_UNLOCK(old_inp);
4333 	/* Remove the sb-lock on the old socket */
4334 
4335 	sbunlock(&old_so->so_rcv);
4336 	/* Now we move them over to the new socket buffer */
4337 	control = TAILQ_FIRST(&tmp_queue);
4338 	SCTP_INP_READ_LOCK(new_inp);
4339 	while (control) {
4340 		nctl = TAILQ_NEXT(control, next);
4341 		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4342 		m = control->data;
4343 		while (m) {
4344 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4345 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4346 			}
4347 			sctp_sballoc(stcb, &new_so->so_rcv, m);
4348 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4349 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4350 			}
4351 			m = SCTP_BUF_NEXT(m);
4352 		}
4353 		control = nctl;
4354 	}
4355 	SCTP_INP_READ_UNLOCK(new_inp);
4356 }
4357 
4358 
4359 void
4360 sctp_add_to_readq(struct sctp_inpcb *inp,
4361     struct sctp_tcb *stcb,
4362     struct sctp_queued_to_read *control,
4363     struct sockbuf *sb,
4364     int end,
4365     int so_locked
4366 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4367     SCTP_UNUSED
4368 #endif
4369 )
4370 {
4371 	/*
4372 	 * Here we must place the control on the end of the socket read
4373 	 * queue AND increment sb_cc so that select will work properly on
4374 	 * read.
4375 	 */
4376 	struct mbuf *m, *prev = NULL;
4377 
4378 	if (inp == NULL) {
4379 		/* Gak, TSNH!! */
4380 #ifdef INVARIANTS
4381 		panic("Gak, inp NULL on add_to_readq");
4382 #endif
4383 		return;
4384 	}
4385 	SCTP_INP_READ_LOCK(inp);
4386 	if (!(control->spec_flags & M_NOTIFICATION)) {
4387 		atomic_add_int(&inp->total_recvs, 1);
4388 		if (!control->do_not_ref_stcb) {
4389 			atomic_add_int(&stcb->total_recvs, 1);
4390 		}
4391 	}
4392 	m = control->data;
4393 	control->held_length = 0;
4394 	control->length = 0;
4395 	while (m) {
4396 		if (SCTP_BUF_LEN(m) == 0) {
4397 			/* Skip mbufs with NO length */
4398 			if (prev == NULL) {
4399 				/* First one */
4400 				control->data = sctp_m_free(m);
4401 				m = control->data;
4402 			} else {
4403 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4404 				m = SCTP_BUF_NEXT(prev);
4405 			}
4406 			if (m == NULL) {
4407 				control->tail_mbuf = prev;;
4408 			}
4409 			continue;
4410 		}
4411 		prev = m;
4412 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4413 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4414 		}
4415 		sctp_sballoc(stcb, sb, m);
4416 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4417 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4418 		}
4419 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4420 		m = SCTP_BUF_NEXT(m);
4421 	}
4422 	if (prev != NULL) {
4423 		control->tail_mbuf = prev;
4424 	} else {
4425 		/* Everything got collapsed out?? */
4426 		return;
4427 	}
4428 	if (end) {
4429 		control->end_added = 1;
4430 	}
4431 	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4432 	SCTP_INP_READ_UNLOCK(inp);
4433 	if (inp && inp->sctp_socket) {
4434 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4435 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4436 		} else {
4437 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4438 			struct socket *so;
4439 
4440 			so = SCTP_INP_SO(inp);
4441 			if (!so_locked) {
4442 				atomic_add_int(&stcb->asoc.refcnt, 1);
4443 				SCTP_TCB_UNLOCK(stcb);
4444 				SCTP_SOCKET_LOCK(so, 1);
4445 				SCTP_TCB_LOCK(stcb);
4446 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
4447 				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4448 					SCTP_SOCKET_UNLOCK(so, 1);
4449 					return;
4450 				}
4451 			}
4452 #endif
4453 			sctp_sorwakeup(inp, inp->sctp_socket);
4454 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4455 			if (!so_locked) {
4456 				SCTP_SOCKET_UNLOCK(so, 1);
4457 			}
4458 #endif
4459 		}
4460 	}
4461 }
4462 
4463 
4464 int
4465 sctp_append_to_readq(struct sctp_inpcb *inp,
4466     struct sctp_tcb *stcb,
4467     struct sctp_queued_to_read *control,
4468     struct mbuf *m,
4469     int end,
4470     int ctls_cumack,
4471     struct sockbuf *sb)
4472 {
4473 	/*
4474 	 * A partial delivery API event is underway. OR we are appending on
4475 	 * the reassembly queue.
4476 	 *
4477 	 * If PDAPI this means we need to add m to the end of the data.
4478 	 * Increase the length in the control AND increment the sb_cc.
4479 	 * Otherwise sb is NULL and all we need to do is put it at the end
4480 	 * of the mbuf chain.
4481 	 */
4482 	int len = 0;
4483 	struct mbuf *mm, *tail = NULL, *prev = NULL;
4484 
4485 	if (inp) {
4486 		SCTP_INP_READ_LOCK(inp);
4487 	}
4488 	if (control == NULL) {
4489 get_out:
4490 		if (inp) {
4491 			SCTP_INP_READ_UNLOCK(inp);
4492 		}
4493 		return (-1);
4494 	}
4495 	if (control->end_added) {
4496 		/* huh this one is complete? */
4497 		goto get_out;
4498 	}
4499 	mm = m;
4500 	if (mm == NULL) {
4501 		goto get_out;
4502 	}
4503 	while (mm) {
4504 		if (SCTP_BUF_LEN(mm) == 0) {
4505 			/* Skip mbufs with NO lenght */
4506 			if (prev == NULL) {
4507 				/* First one */
4508 				m = sctp_m_free(mm);
4509 				mm = m;
4510 			} else {
4511 				SCTP_BUF_NEXT(prev) = sctp_m_free(mm);
4512 				mm = SCTP_BUF_NEXT(prev);
4513 			}
4514 			continue;
4515 		}
4516 		prev = mm;
4517 		len += SCTP_BUF_LEN(mm);
4518 		if (sb) {
4519 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4520 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(mm));
4521 			}
4522 			sctp_sballoc(stcb, sb, mm);
4523 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4524 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4525 			}
4526 		}
4527 		mm = SCTP_BUF_NEXT(mm);
4528 	}
4529 	if (prev) {
4530 		tail = prev;
4531 	} else {
4532 		/* Really there should always be a prev */
4533 		if (m == NULL) {
4534 			/* Huh nothing left? */
4535 #ifdef INVARIANTS
4536 			panic("Nothing left to add?");
4537 #else
4538 			goto get_out;
4539 #endif
4540 		}
4541 		tail = m;
4542 	}
4543 	if (control->tail_mbuf) {
4544 		/* append */
4545 		SCTP_BUF_NEXT(control->tail_mbuf) = m;
4546 		control->tail_mbuf = tail;
4547 	} else {
4548 		/* nothing there */
4549 #ifdef INVARIANTS
4550 		if (control->data != NULL) {
4551 			panic("This should NOT happen");
4552 		}
4553 #endif
4554 		control->data = m;
4555 		control->tail_mbuf = tail;
4556 	}
4557 	atomic_add_int(&control->length, len);
4558 	if (end) {
4559 		/* message is complete */
4560 		if (stcb && (control == stcb->asoc.control_pdapi)) {
4561 			stcb->asoc.control_pdapi = NULL;
4562 		}
4563 		control->held_length = 0;
4564 		control->end_added = 1;
4565 	}
4566 	if (stcb == NULL) {
4567 		control->do_not_ref_stcb = 1;
4568 	}
4569 	/*
4570 	 * When we are appending in partial delivery, the cum-ack is used
4571 	 * for the actual pd-api highest tsn on this mbuf. The true cum-ack
4572 	 * is populated in the outbound sinfo structure from the true cumack
4573 	 * if the association exists...
4574 	 */
4575 	control->sinfo_tsn = control->sinfo_cumtsn = ctls_cumack;
4576 	if (inp) {
4577 		SCTP_INP_READ_UNLOCK(inp);
4578 	}
4579 	if (inp && inp->sctp_socket) {
4580 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4581 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4582 		} else {
4583 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4584 			struct socket *so;
4585 
4586 			so = SCTP_INP_SO(inp);
4587 			atomic_add_int(&stcb->asoc.refcnt, 1);
4588 			SCTP_TCB_UNLOCK(stcb);
4589 			SCTP_SOCKET_LOCK(so, 1);
4590 			SCTP_TCB_LOCK(stcb);
4591 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4592 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4593 				SCTP_SOCKET_UNLOCK(so, 1);
4594 				return (0);
4595 			}
4596 #endif
4597 			sctp_sorwakeup(inp, inp->sctp_socket);
4598 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4599 			SCTP_SOCKET_UNLOCK(so, 1);
4600 #endif
4601 		}
4602 	}
4603 	return (0);
4604 }
4605 
4606 
4607 
4608 /*************HOLD THIS COMMENT FOR PATCH FILE OF
4609  *************ALTERNATE ROUTING CODE
4610  */
4611 
4612 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4613  *************ALTERNATE ROUTING CODE
4614  */
4615 
4616 struct mbuf *
4617 sctp_generate_invmanparam(int err)
4618 {
4619 	/* Return a MBUF with a invalid mandatory parameter */
4620 	struct mbuf *m;
4621 
4622 	m = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_DONTWAIT, 1, MT_DATA);
4623 	if (m) {
4624 		struct sctp_paramhdr *ph;
4625 
4626 		SCTP_BUF_LEN(m) = sizeof(struct sctp_paramhdr);
4627 		ph = mtod(m, struct sctp_paramhdr *);
4628 		ph->param_length = htons(sizeof(struct sctp_paramhdr));
4629 		ph->param_type = htons(err);
4630 	}
4631 	return (m);
4632 }
4633 
4634 #ifdef SCTP_MBCNT_LOGGING
4635 void
4636 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
4637     struct sctp_tmit_chunk *tp1, int chk_cnt)
4638 {
4639 	if (tp1->data == NULL) {
4640 		return;
4641 	}
4642 	asoc->chunks_on_out_queue -= chk_cnt;
4643 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
4644 		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
4645 		    asoc->total_output_queue_size,
4646 		    tp1->book_size,
4647 		    0,
4648 		    tp1->mbcnt);
4649 	}
4650 	if (asoc->total_output_queue_size >= tp1->book_size) {
4651 		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
4652 	} else {
4653 		asoc->total_output_queue_size = 0;
4654 	}
4655 
4656 	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
4657 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
4658 		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
4659 			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
4660 		} else {
4661 			stcb->sctp_socket->so_snd.sb_cc = 0;
4662 
4663 		}
4664 	}
4665 }
4666 
4667 #endif
4668 
4669 int
4670 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
4671     int reason, struct sctpchunk_listhead *queue, int so_locked
4672 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4673     SCTP_UNUSED
4674 #endif
4675 )
4676 {
4677 	int ret_sz = 0;
4678 	int notdone;
4679 	uint8_t foundeom = 0;
4680 
4681 	do {
4682 		ret_sz += tp1->book_size;
4683 		tp1->sent = SCTP_FORWARD_TSN_SKIP;
4684 		if (tp1->data) {
4685 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4686 			struct socket *so;
4687 
4688 #endif
4689 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4690 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, reason, tp1, SCTP_SO_NOT_LOCKED);
4691 			sctp_m_freem(tp1->data);
4692 			tp1->data = NULL;
4693 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4694 			so = SCTP_INP_SO(stcb->sctp_ep);
4695 			if (!so_locked) {
4696 				atomic_add_int(&stcb->asoc.refcnt, 1);
4697 				SCTP_TCB_UNLOCK(stcb);
4698 				SCTP_SOCKET_LOCK(so, 1);
4699 				SCTP_TCB_LOCK(stcb);
4700 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
4701 				if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4702 					/*
4703 					 * assoc was freed while we were
4704 					 * unlocked
4705 					 */
4706 					SCTP_SOCKET_UNLOCK(so, 1);
4707 					return (ret_sz);
4708 				}
4709 			}
4710 #endif
4711 			sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
4712 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4713 			if (!so_locked) {
4714 				SCTP_SOCKET_UNLOCK(so, 1);
4715 			}
4716 #endif
4717 		}
4718 		if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4719 			stcb->asoc.sent_queue_cnt_removeable--;
4720 		}
4721 		if (queue == &stcb->asoc.send_queue) {
4722 			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
4723 			/* on to the sent queue */
4724 			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
4725 			    sctp_next);
4726 			stcb->asoc.sent_queue_cnt++;
4727 		}
4728 		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
4729 		    SCTP_DATA_NOT_FRAG) {
4730 			/* not frag'ed we ae done   */
4731 			notdone = 0;
4732 			foundeom = 1;
4733 		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4734 			/* end of frag, we are done */
4735 			notdone = 0;
4736 			foundeom = 1;
4737 		} else {
4738 			/*
4739 			 * Its a begin or middle piece, we must mark all of
4740 			 * it
4741 			 */
4742 			notdone = 1;
4743 			tp1 = TAILQ_NEXT(tp1, sctp_next);
4744 		}
4745 	} while (tp1 && notdone);
4746 	if ((foundeom == 0) && (queue == &stcb->asoc.sent_queue)) {
4747 		/*
4748 		 * The multi-part message was scattered across the send and
4749 		 * sent queue.
4750 		 */
4751 		tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
4752 		/*
4753 		 * recurse throught the send_queue too, starting at the
4754 		 * beginning.
4755 		 */
4756 		if (tp1) {
4757 			ret_sz += sctp_release_pr_sctp_chunk(stcb, tp1, reason,
4758 			    &stcb->asoc.send_queue, so_locked);
4759 		} else {
4760 			SCTP_PRINTF("hmm, nothing on the send queue and no EOM?\n");
4761 		}
4762 	}
4763 	return (ret_sz);
4764 }
4765 
4766 /*
4767  * checks to see if the given address, sa, is one that is currently known by
4768  * the kernel note: can't distinguish the same address on multiple interfaces
4769  * and doesn't handle multiple addresses with different zone/scope id's note:
4770  * ifa_ifwithaddr() compares the entire sockaddr struct
4771  */
4772 struct sctp_ifa *
4773 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
4774     int holds_lock)
4775 {
4776 	struct sctp_laddr *laddr;
4777 
4778 	if (holds_lock == 0) {
4779 		SCTP_INP_RLOCK(inp);
4780 	}
4781 	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4782 		if (laddr->ifa == NULL)
4783 			continue;
4784 		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
4785 			continue;
4786 		if (addr->sa_family == AF_INET) {
4787 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4788 			    laddr->ifa->address.sin.sin_addr.s_addr) {
4789 				/* found him. */
4790 				if (holds_lock == 0) {
4791 					SCTP_INP_RUNLOCK(inp);
4792 				}
4793 				return (laddr->ifa);
4794 				break;
4795 			}
4796 		}
4797 #ifdef INET6
4798 		if (addr->sa_family == AF_INET6) {
4799 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4800 			    &laddr->ifa->address.sin6)) {
4801 				/* found him. */
4802 				if (holds_lock == 0) {
4803 					SCTP_INP_RUNLOCK(inp);
4804 				}
4805 				return (laddr->ifa);
4806 				break;
4807 			}
4808 		}
4809 #endif
4810 	}
4811 	if (holds_lock == 0) {
4812 		SCTP_INP_RUNLOCK(inp);
4813 	}
4814 	return (NULL);
4815 }
4816 
4817 uint32_t
4818 sctp_get_ifa_hash_val(struct sockaddr *addr)
4819 {
4820 	if (addr->sa_family == AF_INET) {
4821 		struct sockaddr_in *sin;
4822 
4823 		sin = (struct sockaddr_in *)addr;
4824 		return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
4825 	} else if (addr->sa_family == AF_INET6) {
4826 		struct sockaddr_in6 *sin6;
4827 		uint32_t hash_of_addr;
4828 
4829 		sin6 = (struct sockaddr_in6 *)addr;
4830 		hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
4831 		    sin6->sin6_addr.s6_addr32[1] +
4832 		    sin6->sin6_addr.s6_addr32[2] +
4833 		    sin6->sin6_addr.s6_addr32[3]);
4834 		hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
4835 		return (hash_of_addr);
4836 	}
4837 	return (0);
4838 }
4839 
4840 struct sctp_ifa *
4841 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
4842 {
4843 	struct sctp_ifa *sctp_ifap;
4844 	struct sctp_vrf *vrf;
4845 	struct sctp_ifalist *hash_head;
4846 	uint32_t hash_of_addr;
4847 
4848 	if (holds_lock == 0)
4849 		SCTP_IPI_ADDR_RLOCK();
4850 
4851 	vrf = sctp_find_vrf(vrf_id);
4852 	if (vrf == NULL) {
4853 stage_right:
4854 		if (holds_lock == 0)
4855 			SCTP_IPI_ADDR_RUNLOCK();
4856 		return (NULL);
4857 	}
4858 	hash_of_addr = sctp_get_ifa_hash_val(addr);
4859 
4860 	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
4861 	if (hash_head == NULL) {
4862 		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
4863 		    hash_of_addr, (uint32_t) vrf->vrf_addr_hashmark,
4864 		    (uint32_t) (hash_of_addr & vrf->vrf_addr_hashmark));
4865 		sctp_print_address(addr);
4866 		SCTP_PRINTF("No such bucket for address\n");
4867 		if (holds_lock == 0)
4868 			SCTP_IPI_ADDR_RUNLOCK();
4869 
4870 		return (NULL);
4871 	}
4872 	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
4873 		if (sctp_ifap == NULL) {
4874 #ifdef INVARIANTS
4875 			panic("Huh LIST_FOREACH corrupt");
4876 			goto stage_right;
4877 #else
4878 			SCTP_PRINTF("LIST corrupt of sctp_ifap's?\n");
4879 			goto stage_right;
4880 #endif
4881 		}
4882 		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
4883 			continue;
4884 		if (addr->sa_family == AF_INET) {
4885 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4886 			    sctp_ifap->address.sin.sin_addr.s_addr) {
4887 				/* found him. */
4888 				if (holds_lock == 0)
4889 					SCTP_IPI_ADDR_RUNLOCK();
4890 				return (sctp_ifap);
4891 				break;
4892 			}
4893 		}
4894 #ifdef INET6
4895 		if (addr->sa_family == AF_INET6) {
4896 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4897 			    &sctp_ifap->address.sin6)) {
4898 				/* found him. */
4899 				if (holds_lock == 0)
4900 					SCTP_IPI_ADDR_RUNLOCK();
4901 				return (sctp_ifap);
4902 				break;
4903 			}
4904 		}
4905 #endif
4906 	}
4907 	if (holds_lock == 0)
4908 		SCTP_IPI_ADDR_RUNLOCK();
4909 	return (NULL);
4910 }
4911 
4912 static void
4913 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t * freed_so_far, int hold_rlock,
4914     uint32_t rwnd_req)
4915 {
4916 	/* User pulled some data, do we need a rwnd update? */
4917 	int r_unlocked = 0;
4918 	uint32_t dif, rwnd;
4919 	struct socket *so = NULL;
4920 
4921 	if (stcb == NULL)
4922 		return;
4923 
4924 	atomic_add_int(&stcb->asoc.refcnt, 1);
4925 
4926 	if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED |
4927 	    SCTP_STATE_SHUTDOWN_RECEIVED |
4928 	    SCTP_STATE_SHUTDOWN_ACK_SENT)) {
4929 		/* Pre-check If we are freeing no update */
4930 		goto no_lock;
4931 	}
4932 	SCTP_INP_INCR_REF(stcb->sctp_ep);
4933 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
4934 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
4935 		goto out;
4936 	}
4937 	so = stcb->sctp_socket;
4938 	if (so == NULL) {
4939 		goto out;
4940 	}
4941 	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
4942 	/* Have you have freed enough to look */
4943 	*freed_so_far = 0;
4944 	/* Yep, its worth a look and the lock overhead */
4945 
4946 	/* Figure out what the rwnd would be */
4947 	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
4948 	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
4949 		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
4950 	} else {
4951 		dif = 0;
4952 	}
4953 	if (dif >= rwnd_req) {
4954 		if (hold_rlock) {
4955 			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
4956 			r_unlocked = 1;
4957 		}
4958 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
4959 			/*
4960 			 * One last check before we allow the guy possibly
4961 			 * to get in. There is a race, where the guy has not
4962 			 * reached the gate. In that case
4963 			 */
4964 			goto out;
4965 		}
4966 		SCTP_TCB_LOCK(stcb);
4967 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
4968 			/* No reports here */
4969 			SCTP_TCB_UNLOCK(stcb);
4970 			goto out;
4971 		}
4972 		SCTP_STAT_INCR(sctps_wu_sacks_sent);
4973 		sctp_send_sack(stcb);
4974 		sctp_chunk_output(stcb->sctp_ep, stcb,
4975 		    SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
4976 		/* make sure no timer is running */
4977 		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
4978 		SCTP_TCB_UNLOCK(stcb);
4979 	} else {
4980 		/* Update how much we have pending */
4981 		stcb->freed_by_sorcv_sincelast = dif;
4982 	}
4983 out:
4984 	if (so && r_unlocked && hold_rlock) {
4985 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
4986 	}
4987 	SCTP_INP_DECR_REF(stcb->sctp_ep);
4988 no_lock:
4989 	atomic_add_int(&stcb->asoc.refcnt, -1);
4990 	return;
4991 }
4992 
4993 int
4994 sctp_sorecvmsg(struct socket *so,
4995     struct uio *uio,
4996     struct mbuf **mp,
4997     struct sockaddr *from,
4998     int fromlen,
4999     int *msg_flags,
5000     struct sctp_sndrcvinfo *sinfo,
5001     int filling_sinfo)
5002 {
5003 	/*
5004 	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
5005 	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
5006 	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
5007 	 * On the way out we may send out any combination of:
5008 	 * MSG_NOTIFICATION MSG_EOR
5009 	 *
5010 	 */
5011 	struct sctp_inpcb *inp = NULL;
5012 	int my_len = 0;
5013 	int cp_len = 0, error = 0;
5014 	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
5015 	struct mbuf *m = NULL, *embuf = NULL;
5016 	struct sctp_tcb *stcb = NULL;
5017 	int wakeup_read_socket = 0;
5018 	int freecnt_applied = 0;
5019 	int out_flags = 0, in_flags = 0;
5020 	int block_allowed = 1;
5021 	uint32_t freed_so_far = 0;
5022 	uint32_t copied_so_far = 0;
5023 	int in_eeor_mode = 0;
5024 	int no_rcv_needed = 0;
5025 	uint32_t rwnd_req = 0;
5026 	int hold_sblock = 0;
5027 	int hold_rlock = 0;
5028 	int slen = 0;
5029 	uint32_t held_length = 0;
5030 	int sockbuf_lock = 0;
5031 
5032 	if (uio == NULL) {
5033 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5034 		return (EINVAL);
5035 	}
5036 	if (msg_flags) {
5037 		in_flags = *msg_flags;
5038 		if (in_flags & MSG_PEEK)
5039 			SCTP_STAT_INCR(sctps_read_peeks);
5040 	} else {
5041 		in_flags = 0;
5042 	}
5043 	slen = uio->uio_resid;
5044 
5045 	/* Pull in and set up our int flags */
5046 	if (in_flags & MSG_OOB) {
5047 		/* Out of band's NOT supported */
5048 		return (EOPNOTSUPP);
5049 	}
5050 	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5051 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5052 		return (EINVAL);
5053 	}
5054 	if ((in_flags & (MSG_DONTWAIT
5055 	    | MSG_NBIO
5056 	    )) ||
5057 	    SCTP_SO_IS_NBIO(so)) {
5058 		block_allowed = 0;
5059 	}
5060 	/* setup the endpoint */
5061 	inp = (struct sctp_inpcb *)so->so_pcb;
5062 	if (inp == NULL) {
5063 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5064 		return (EFAULT);
5065 	}
5066 	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5067 	/* Must be at least a MTU's worth */
5068 	if (rwnd_req < SCTP_MIN_RWND)
5069 		rwnd_req = SCTP_MIN_RWND;
5070 	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5071 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5072 		sctp_misc_ints(SCTP_SORECV_ENTER,
5073 		    rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid);
5074 	}
5075 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5076 		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5077 		    rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid);
5078 	}
5079 	error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5080 	sockbuf_lock = 1;
5081 	if (error) {
5082 		goto release_unlocked;
5083 	}
5084 restart:
5085 
5086 
5087 restart_nosblocks:
5088 	if (hold_sblock == 0) {
5089 		SOCKBUF_LOCK(&so->so_rcv);
5090 		hold_sblock = 1;
5091 	}
5092 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5093 	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5094 		goto out;
5095 	}
5096 	if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5097 		if (so->so_error) {
5098 			error = so->so_error;
5099 			if ((in_flags & MSG_PEEK) == 0)
5100 				so->so_error = 0;
5101 			goto out;
5102 		} else {
5103 			if (so->so_rcv.sb_cc == 0) {
5104 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5105 				/* indicate EOF */
5106 				error = 0;
5107 				goto out;
5108 			}
5109 		}
5110 	}
5111 	if ((so->so_rcv.sb_cc <= held_length) && block_allowed) {
5112 		/* we need to wait for data */
5113 		if ((so->so_rcv.sb_cc == 0) &&
5114 		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5115 		    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5116 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5117 				/*
5118 				 * For active open side clear flags for
5119 				 * re-use passive open is blocked by
5120 				 * connect.
5121 				 */
5122 				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5123 					/*
5124 					 * You were aborted, passive side
5125 					 * always hits here
5126 					 */
5127 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5128 					error = ECONNRESET;
5129 					/*
5130 					 * You get this once if you are
5131 					 * active open side
5132 					 */
5133 					if (!(inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5134 						/*
5135 						 * Remove flag if on the
5136 						 * active open side
5137 						 */
5138 						inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_ABORTED;
5139 					}
5140 				}
5141 				so->so_state &= ~(SS_ISCONNECTING |
5142 				    SS_ISDISCONNECTING |
5143 				    SS_ISCONFIRMING |
5144 				    SS_ISCONNECTED);
5145 				if (error == 0) {
5146 					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5147 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5148 						error = ENOTCONN;
5149 					} else {
5150 						inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_CONNECTED;
5151 					}
5152 				}
5153 				goto out;
5154 			}
5155 		}
5156 		error = sbwait(&so->so_rcv);
5157 		if (error) {
5158 			goto out;
5159 		}
5160 		held_length = 0;
5161 		goto restart_nosblocks;
5162 	} else if (so->so_rcv.sb_cc == 0) {
5163 		if (so->so_error) {
5164 			error = so->so_error;
5165 			if ((in_flags & MSG_PEEK) == 0)
5166 				so->so_error = 0;
5167 		} else {
5168 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5169 			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5170 				if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5171 					/*
5172 					 * For active open side clear flags
5173 					 * for re-use passive open is
5174 					 * blocked by connect.
5175 					 */
5176 					if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5177 						/*
5178 						 * You were aborted, passive
5179 						 * side always hits here
5180 						 */
5181 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5182 						error = ECONNRESET;
5183 						/*
5184 						 * You get this once if you
5185 						 * are active open side
5186 						 */
5187 						if (!(inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5188 							/*
5189 							 * Remove flag if on
5190 							 * the active open
5191 							 * side
5192 							 */
5193 							inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_ABORTED;
5194 						}
5195 					}
5196 					so->so_state &= ~(SS_ISCONNECTING |
5197 					    SS_ISDISCONNECTING |
5198 					    SS_ISCONFIRMING |
5199 					    SS_ISCONNECTED);
5200 					if (error == 0) {
5201 						if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5202 							SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5203 							error = ENOTCONN;
5204 						} else {
5205 							inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_CONNECTED;
5206 						}
5207 					}
5208 					goto out;
5209 				}
5210 			}
5211 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5212 			error = EWOULDBLOCK;
5213 		}
5214 		goto out;
5215 	}
5216 	if (hold_sblock == 1) {
5217 		SOCKBUF_UNLOCK(&so->so_rcv);
5218 		hold_sblock = 0;
5219 	}
5220 	/* we possibly have data we can read */
5221 	/* sa_ignore FREED_MEMORY */
5222 	control = TAILQ_FIRST(&inp->read_queue);
5223 	if (control == NULL) {
5224 		/*
5225 		 * This could be happening since the appender did the
5226 		 * increment but as not yet did the tailq insert onto the
5227 		 * read_queue
5228 		 */
5229 		if (hold_rlock == 0) {
5230 			SCTP_INP_READ_LOCK(inp);
5231 			hold_rlock = 1;
5232 		}
5233 		control = TAILQ_FIRST(&inp->read_queue);
5234 		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5235 #ifdef INVARIANTS
5236 			panic("Huh, its non zero and nothing on control?");
5237 #endif
5238 			so->so_rcv.sb_cc = 0;
5239 		}
5240 		SCTP_INP_READ_UNLOCK(inp);
5241 		hold_rlock = 0;
5242 		goto restart;
5243 	}
5244 	if ((control->length == 0) &&
5245 	    (control->do_not_ref_stcb)) {
5246 		/*
5247 		 * Clean up code for freeing assoc that left behind a
5248 		 * pdapi.. maybe a peer in EEOR that just closed after
5249 		 * sending and never indicated a EOR.
5250 		 */
5251 		if (hold_rlock == 0) {
5252 			hold_rlock = 1;
5253 			SCTP_INP_READ_LOCK(inp);
5254 		}
5255 		control->held_length = 0;
5256 		if (control->data) {
5257 			/* Hmm there is data here .. fix */
5258 			struct mbuf *m_tmp;
5259 			int cnt = 0;
5260 
5261 			m_tmp = control->data;
5262 			while (m_tmp) {
5263 				cnt += SCTP_BUF_LEN(m_tmp);
5264 				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5265 					control->tail_mbuf = m_tmp;
5266 					control->end_added = 1;
5267 				}
5268 				m_tmp = SCTP_BUF_NEXT(m_tmp);
5269 			}
5270 			control->length = cnt;
5271 		} else {
5272 			/* remove it */
5273 			TAILQ_REMOVE(&inp->read_queue, control, next);
5274 			/* Add back any hiddend data */
5275 			sctp_free_remote_addr(control->whoFrom);
5276 			sctp_free_a_readq(stcb, control);
5277 		}
5278 		if (hold_rlock) {
5279 			hold_rlock = 0;
5280 			SCTP_INP_READ_UNLOCK(inp);
5281 		}
5282 		goto restart;
5283 	}
5284 	if (control->length == 0) {
5285 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5286 		    (filling_sinfo)) {
5287 			/* find a more suitable one then this */
5288 			ctl = TAILQ_NEXT(control, next);
5289 			while (ctl) {
5290 				if ((ctl->stcb != control->stcb) && (ctl->length) &&
5291 				    (ctl->some_taken ||
5292 				    (ctl->spec_flags & M_NOTIFICATION) ||
5293 				    ((ctl->do_not_ref_stcb == 0) &&
5294 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5295 				    ) {
5296 					/*-
5297 					 * If we have a different TCB next, and there is data
5298 					 * present. If we have already taken some (pdapi), OR we can
5299 					 * ref the tcb and no delivery as started on this stream, we
5300 					 * take it. Note we allow a notification on a different
5301 					 * assoc to be delivered..
5302 					 */
5303 					control = ctl;
5304 					goto found_one;
5305 				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5306 					    (ctl->length) &&
5307 					    ((ctl->some_taken) ||
5308 					    ((ctl->do_not_ref_stcb == 0) &&
5309 					    ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5310 					    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5311 				    ) {
5312 					/*-
5313 					 * If we have the same tcb, and there is data present, and we
5314 					 * have the strm interleave feature present. Then if we have
5315 					 * taken some (pdapi) or we can refer to tht tcb AND we have
5316 					 * not started a delivery for this stream, we can take it.
5317 					 * Note we do NOT allow a notificaiton on the same assoc to
5318 					 * be delivered.
5319 					 */
5320 					control = ctl;
5321 					goto found_one;
5322 				}
5323 				ctl = TAILQ_NEXT(ctl, next);
5324 			}
5325 		}
5326 		/*
5327 		 * if we reach here, not suitable replacement is available
5328 		 * <or> fragment interleave is NOT on. So stuff the sb_cc
5329 		 * into the our held count, and its time to sleep again.
5330 		 */
5331 		held_length = so->so_rcv.sb_cc;
5332 		control->held_length = so->so_rcv.sb_cc;
5333 		goto restart;
5334 	}
5335 	/* Clear the held length since there is something to read */
5336 	control->held_length = 0;
5337 	if (hold_rlock) {
5338 		SCTP_INP_READ_UNLOCK(inp);
5339 		hold_rlock = 0;
5340 	}
5341 found_one:
5342 	/*
5343 	 * If we reach here, control has a some data for us to read off.
5344 	 * Note that stcb COULD be NULL.
5345 	 */
5346 	control->some_taken++;
5347 	if (hold_sblock) {
5348 		SOCKBUF_UNLOCK(&so->so_rcv);
5349 		hold_sblock = 0;
5350 	}
5351 	stcb = control->stcb;
5352 	if (stcb) {
5353 		if ((control->do_not_ref_stcb == 0) &&
5354 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5355 			if (freecnt_applied == 0)
5356 				stcb = NULL;
5357 		} else if (control->do_not_ref_stcb == 0) {
5358 			/* you can't free it on me please */
5359 			/*
5360 			 * The lock on the socket buffer protects us so the
5361 			 * free code will stop. But since we used the
5362 			 * socketbuf lock and the sender uses the tcb_lock
5363 			 * to increment, we need to use the atomic add to
5364 			 * the refcnt
5365 			 */
5366 			if (freecnt_applied) {
5367 #ifdef INVARIANTS
5368 				panic("refcnt already incremented");
5369 #else
5370 				printf("refcnt already incremented?\n");
5371 #endif
5372 			} else {
5373 				atomic_add_int(&stcb->asoc.refcnt, 1);
5374 				freecnt_applied = 1;
5375 			}
5376 			/*
5377 			 * Setup to remember how much we have not yet told
5378 			 * the peer our rwnd has opened up. Note we grab the
5379 			 * value from the tcb from last time. Note too that
5380 			 * sack sending clears this when a sack is sent,
5381 			 * which is fine. Once we hit the rwnd_req, we then
5382 			 * will go to the sctp_user_rcvd() that will not
5383 			 * lock until it KNOWs it MUST send a WUP-SACK.
5384 			 */
5385 			freed_so_far = stcb->freed_by_sorcv_sincelast;
5386 			stcb->freed_by_sorcv_sincelast = 0;
5387 		}
5388 	}
5389 	if (stcb &&
5390 	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
5391 	    control->do_not_ref_stcb == 0) {
5392 		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
5393 	}
5394 	/* First lets get off the sinfo and sockaddr info */
5395 	if ((sinfo) && filling_sinfo) {
5396 		memcpy(sinfo, control, sizeof(struct sctp_nonpad_sndrcvinfo));
5397 		nxt = TAILQ_NEXT(control, next);
5398 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
5399 			struct sctp_extrcvinfo *s_extra;
5400 
5401 			s_extra = (struct sctp_extrcvinfo *)sinfo;
5402 			if ((nxt) &&
5403 			    (nxt->length)) {
5404 				s_extra->sreinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
5405 				if (nxt->sinfo_flags & SCTP_UNORDERED) {
5406 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
5407 				}
5408 				if (nxt->spec_flags & M_NOTIFICATION) {
5409 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
5410 				}
5411 				s_extra->sreinfo_next_aid = nxt->sinfo_assoc_id;
5412 				s_extra->sreinfo_next_length = nxt->length;
5413 				s_extra->sreinfo_next_ppid = nxt->sinfo_ppid;
5414 				s_extra->sreinfo_next_stream = nxt->sinfo_stream;
5415 				if (nxt->tail_mbuf != NULL) {
5416 					if (nxt->end_added) {
5417 						s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
5418 					}
5419 				}
5420 			} else {
5421 				/*
5422 				 * we explicitly 0 this, since the memcpy
5423 				 * got some other things beyond the older
5424 				 * sinfo_ that is on the control's structure
5425 				 * :-D
5426 				 */
5427 				nxt = NULL;
5428 				s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
5429 				s_extra->sreinfo_next_aid = 0;
5430 				s_extra->sreinfo_next_length = 0;
5431 				s_extra->sreinfo_next_ppid = 0;
5432 				s_extra->sreinfo_next_stream = 0;
5433 			}
5434 		}
5435 		/*
5436 		 * update off the real current cum-ack, if we have an stcb.
5437 		 */
5438 		if ((control->do_not_ref_stcb == 0) && stcb)
5439 			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
5440 		/*
5441 		 * mask off the high bits, we keep the actual chunk bits in
5442 		 * there.
5443 		 */
5444 		sinfo->sinfo_flags &= 0x00ff;
5445 		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
5446 			sinfo->sinfo_flags |= SCTP_UNORDERED;
5447 		}
5448 	}
5449 #ifdef SCTP_ASOCLOG_OF_TSNS
5450 	{
5451 		int index, newindex;
5452 		struct sctp_pcbtsn_rlog *entry;
5453 
5454 		do {
5455 			index = inp->readlog_index;
5456 			newindex = index + 1;
5457 			if (newindex >= SCTP_READ_LOG_SIZE) {
5458 				newindex = 0;
5459 			}
5460 		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
5461 		entry = &inp->readlog[index];
5462 		entry->vtag = control->sinfo_assoc_id;
5463 		entry->strm = control->sinfo_stream;
5464 		entry->seq = control->sinfo_ssn;
5465 		entry->sz = control->length;
5466 		entry->flgs = control->sinfo_flags;
5467 	}
5468 #endif
5469 	if (fromlen && from) {
5470 		struct sockaddr *to;
5471 
5472 #ifdef INET
5473 		cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sin.sin_len);
5474 		memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
5475 		((struct sockaddr_in *)from)->sin_port = control->port_from;
5476 #else
5477 		/* No AF_INET use AF_INET6 */
5478 		cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sin6.sin6_len);
5479 		memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
5480 		((struct sockaddr_in6 *)from)->sin6_port = control->port_from;
5481 #endif
5482 
5483 		to = from;
5484 #if defined(INET) && defined(INET6)
5485 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) &&
5486 		    (to->sa_family == AF_INET) &&
5487 		    ((size_t)fromlen >= sizeof(struct sockaddr_in6))) {
5488 			struct sockaddr_in *sin;
5489 			struct sockaddr_in6 sin6;
5490 
5491 			sin = (struct sockaddr_in *)to;
5492 			bzero(&sin6, sizeof(sin6));
5493 			sin6.sin6_family = AF_INET6;
5494 			sin6.sin6_len = sizeof(struct sockaddr_in6);
5495 			sin6.sin6_addr.s6_addr32[2] = htonl(0xffff);
5496 			bcopy(&sin->sin_addr,
5497 			    &sin6.sin6_addr.s6_addr32[3],
5498 			    sizeof(sin6.sin6_addr.s6_addr32[3]));
5499 			sin6.sin6_port = sin->sin_port;
5500 			memcpy(from, (caddr_t)&sin6, sizeof(sin6));
5501 		}
5502 #endif
5503 #if defined(INET6)
5504 		{
5505 			struct sockaddr_in6 lsa6, *to6;
5506 
5507 			to6 = (struct sockaddr_in6 *)to;
5508 			sctp_recover_scope_mac(to6, (&lsa6));
5509 		}
5510 #endif
5511 	}
5512 	/* now copy out what data we can */
5513 	if (mp == NULL) {
5514 		/* copy out each mbuf in the chain up to length */
5515 get_more_data:
5516 		m = control->data;
5517 		while (m) {
5518 			/* Move out all we can */
5519 			cp_len = (int)uio->uio_resid;
5520 			my_len = (int)SCTP_BUF_LEN(m);
5521 			if (cp_len > my_len) {
5522 				/* not enough in this buf */
5523 				cp_len = my_len;
5524 			}
5525 			if (hold_rlock) {
5526 				SCTP_INP_READ_UNLOCK(inp);
5527 				hold_rlock = 0;
5528 			}
5529 			if (cp_len > 0)
5530 				error = uiomove(mtod(m, char *), cp_len, uio);
5531 			/* re-read */
5532 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5533 				goto release;
5534 			}
5535 			if ((control->do_not_ref_stcb == 0) && stcb &&
5536 			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5537 				no_rcv_needed = 1;
5538 			}
5539 			if (error) {
5540 				/* error we are out of here */
5541 				goto release;
5542 			}
5543 			if ((SCTP_BUF_NEXT(m) == NULL) &&
5544 			    (cp_len >= SCTP_BUF_LEN(m)) &&
5545 			    ((control->end_added == 0) ||
5546 			    (control->end_added &&
5547 			    (TAILQ_NEXT(control, next) == NULL)))
5548 			    ) {
5549 				SCTP_INP_READ_LOCK(inp);
5550 				hold_rlock = 1;
5551 			}
5552 			if (cp_len == SCTP_BUF_LEN(m)) {
5553 				if ((SCTP_BUF_NEXT(m) == NULL) &&
5554 				    (control->end_added)) {
5555 					out_flags |= MSG_EOR;
5556 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5557 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5558 				}
5559 				if (control->spec_flags & M_NOTIFICATION) {
5560 					out_flags |= MSG_NOTIFICATION;
5561 				}
5562 				/* we ate up the mbuf */
5563 				if (in_flags & MSG_PEEK) {
5564 					/* just looking */
5565 					m = SCTP_BUF_NEXT(m);
5566 					copied_so_far += cp_len;
5567 				} else {
5568 					/* dispose of the mbuf */
5569 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5570 						sctp_sblog(&so->so_rcv,
5571 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5572 					}
5573 					sctp_sbfree(control, stcb, &so->so_rcv, m);
5574 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5575 						sctp_sblog(&so->so_rcv,
5576 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5577 					}
5578 					embuf = m;
5579 					copied_so_far += cp_len;
5580 					freed_so_far += cp_len;
5581 					freed_so_far += MSIZE;
5582 					atomic_subtract_int(&control->length, cp_len);
5583 					control->data = sctp_m_free(m);
5584 					m = control->data;
5585 					/*
5586 					 * been through it all, must hold sb
5587 					 * lock ok to null tail
5588 					 */
5589 					if (control->data == NULL) {
5590 #ifdef INVARIANTS
5591 						if ((control->end_added == 0) ||
5592 						    (TAILQ_NEXT(control, next) == NULL)) {
5593 							/*
5594 							 * If the end is not
5595 							 * added, OR the
5596 							 * next is NOT null
5597 							 * we MUST have the
5598 							 * lock.
5599 							 */
5600 							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
5601 								panic("Hmm we don't own the lock?");
5602 							}
5603 						}
5604 #endif
5605 						control->tail_mbuf = NULL;
5606 #ifdef INVARIANTS
5607 						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
5608 							panic("end_added, nothing left and no MSG_EOR");
5609 						}
5610 #endif
5611 					}
5612 				}
5613 			} else {
5614 				/* Do we need to trim the mbuf? */
5615 				if (control->spec_flags & M_NOTIFICATION) {
5616 					out_flags |= MSG_NOTIFICATION;
5617 				}
5618 				if ((in_flags & MSG_PEEK) == 0) {
5619 					SCTP_BUF_RESV_UF(m, cp_len);
5620 					SCTP_BUF_LEN(m) -= cp_len;
5621 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5622 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len);
5623 					}
5624 					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
5625 					if ((control->do_not_ref_stcb == 0) &&
5626 					    stcb) {
5627 						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
5628 					}
5629 					copied_so_far += cp_len;
5630 					embuf = m;
5631 					freed_so_far += cp_len;
5632 					freed_so_far += MSIZE;
5633 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5634 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
5635 						    SCTP_LOG_SBRESULT, 0);
5636 					}
5637 					atomic_subtract_int(&control->length, cp_len);
5638 				} else {
5639 					copied_so_far += cp_len;
5640 				}
5641 			}
5642 			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
5643 				break;
5644 			}
5645 			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5646 			    (control->do_not_ref_stcb == 0) &&
5647 			    (freed_so_far >= rwnd_req)) {
5648 				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5649 			}
5650 		}		/* end while(m) */
5651 		/*
5652 		 * At this point we have looked at it all and we either have
5653 		 * a MSG_EOR/or read all the user wants... <OR>
5654 		 * control->length == 0.
5655 		 */
5656 		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
5657 			/* we are done with this control */
5658 			if (control->length == 0) {
5659 				if (control->data) {
5660 #ifdef INVARIANTS
5661 					panic("control->data not null at read eor?");
5662 #else
5663 					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
5664 					sctp_m_freem(control->data);
5665 					control->data = NULL;
5666 #endif
5667 				}
5668 		done_with_control:
5669 				if (TAILQ_NEXT(control, next) == NULL) {
5670 					/*
5671 					 * If we don't have a next we need a
5672 					 * lock, if there is a next
5673 					 * interrupt is filling ahead of us
5674 					 * and we don't need a lock to
5675 					 * remove this guy (which is the
5676 					 * head of the queue).
5677 					 */
5678 					if (hold_rlock == 0) {
5679 						SCTP_INP_READ_LOCK(inp);
5680 						hold_rlock = 1;
5681 					}
5682 				}
5683 				TAILQ_REMOVE(&inp->read_queue, control, next);
5684 				/* Add back any hiddend data */
5685 				if (control->held_length) {
5686 					held_length = 0;
5687 					control->held_length = 0;
5688 					wakeup_read_socket = 1;
5689 				}
5690 				if (control->aux_data) {
5691 					sctp_m_free(control->aux_data);
5692 					control->aux_data = NULL;
5693 				}
5694 				no_rcv_needed = control->do_not_ref_stcb;
5695 				sctp_free_remote_addr(control->whoFrom);
5696 				control->data = NULL;
5697 				sctp_free_a_readq(stcb, control);
5698 				control = NULL;
5699 				if ((freed_so_far >= rwnd_req) &&
5700 				    (no_rcv_needed == 0))
5701 					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5702 
5703 			} else {
5704 				/*
5705 				 * The user did not read all of this
5706 				 * message, turn off the returned MSG_EOR
5707 				 * since we are leaving more behind on the
5708 				 * control to read.
5709 				 */
5710 #ifdef INVARIANTS
5711 				if (control->end_added &&
5712 				    (control->data == NULL) &&
5713 				    (control->tail_mbuf == NULL)) {
5714 					panic("Gak, control->length is corrupt?");
5715 				}
5716 #endif
5717 				no_rcv_needed = control->do_not_ref_stcb;
5718 				out_flags &= ~MSG_EOR;
5719 			}
5720 		}
5721 		if (out_flags & MSG_EOR) {
5722 			goto release;
5723 		}
5724 		if ((uio->uio_resid == 0) ||
5725 		    ((in_eeor_mode) && (copied_so_far >= max(so->so_rcv.sb_lowat, 1)))
5726 		    ) {
5727 			goto release;
5728 		}
5729 		/*
5730 		 * If I hit here the receiver wants more and this message is
5731 		 * NOT done (pd-api). So two questions. Can we block? if not
5732 		 * we are done. Did the user NOT set MSG_WAITALL?
5733 		 */
5734 		if (block_allowed == 0) {
5735 			goto release;
5736 		}
5737 		/*
5738 		 * We need to wait for more data a few things: - We don't
5739 		 * sbunlock() so we don't get someone else reading. - We
5740 		 * must be sure to account for the case where what is added
5741 		 * is NOT to our control when we wakeup.
5742 		 */
5743 
5744 		/*
5745 		 * Do we need to tell the transport a rwnd update might be
5746 		 * needed before we go to sleep?
5747 		 */
5748 		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5749 		    ((freed_so_far >= rwnd_req) &&
5750 		    (control->do_not_ref_stcb == 0) &&
5751 		    (no_rcv_needed == 0))) {
5752 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5753 		}
5754 wait_some_more:
5755 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5756 			goto release;
5757 		}
5758 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
5759 			goto release;
5760 
5761 		if (hold_rlock == 1) {
5762 			SCTP_INP_READ_UNLOCK(inp);
5763 			hold_rlock = 0;
5764 		}
5765 		if (hold_sblock == 0) {
5766 			SOCKBUF_LOCK(&so->so_rcv);
5767 			hold_sblock = 1;
5768 		}
5769 		if ((copied_so_far) && (control->length == 0) &&
5770 		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))
5771 		    ) {
5772 			goto release;
5773 		}
5774 		if (so->so_rcv.sb_cc <= control->held_length) {
5775 			error = sbwait(&so->so_rcv);
5776 			if (error) {
5777 				goto release;
5778 			}
5779 			control->held_length = 0;
5780 		}
5781 		if (hold_sblock) {
5782 			SOCKBUF_UNLOCK(&so->so_rcv);
5783 			hold_sblock = 0;
5784 		}
5785 		if (control->length == 0) {
5786 			/* still nothing here */
5787 			if (control->end_added == 1) {
5788 				/* he aborted, or is done i.e.did a shutdown */
5789 				out_flags |= MSG_EOR;
5790 				if (control->pdapi_aborted) {
5791 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5792 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5793 
5794 					out_flags |= MSG_TRUNC;
5795 				} else {
5796 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5797 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5798 				}
5799 				goto done_with_control;
5800 			}
5801 			if (so->so_rcv.sb_cc > held_length) {
5802 				control->held_length = so->so_rcv.sb_cc;
5803 				held_length = 0;
5804 			}
5805 			goto wait_some_more;
5806 		} else if (control->data == NULL) {
5807 			/*
5808 			 * we must re-sync since data is probably being
5809 			 * added
5810 			 */
5811 			SCTP_INP_READ_LOCK(inp);
5812 			if ((control->length > 0) && (control->data == NULL)) {
5813 				/*
5814 				 * big trouble.. we have the lock and its
5815 				 * corrupt?
5816 				 */
5817 #ifdef INVARIANTS
5818 				panic("Impossible data==NULL length !=0");
5819 #endif
5820 				out_flags |= MSG_EOR;
5821 				out_flags |= MSG_TRUNC;
5822 				control->length = 0;
5823 				SCTP_INP_READ_UNLOCK(inp);
5824 				goto done_with_control;
5825 			}
5826 			SCTP_INP_READ_UNLOCK(inp);
5827 			/* We will fall around to get more data */
5828 		}
5829 		goto get_more_data;
5830 	} else {
5831 		/*-
5832 		 * Give caller back the mbuf chain,
5833 		 * store in uio_resid the length
5834 		 */
5835 		wakeup_read_socket = 0;
5836 		if ((control->end_added == 0) ||
5837 		    (TAILQ_NEXT(control, next) == NULL)) {
5838 			/* Need to get rlock */
5839 			if (hold_rlock == 0) {
5840 				SCTP_INP_READ_LOCK(inp);
5841 				hold_rlock = 1;
5842 			}
5843 		}
5844 		if (control->end_added) {
5845 			out_flags |= MSG_EOR;
5846 			if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5847 				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5848 		}
5849 		if (control->spec_flags & M_NOTIFICATION) {
5850 			out_flags |= MSG_NOTIFICATION;
5851 		}
5852 		uio->uio_resid = control->length;
5853 		*mp = control->data;
5854 		m = control->data;
5855 		while (m) {
5856 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5857 				sctp_sblog(&so->so_rcv,
5858 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5859 			}
5860 			sctp_sbfree(control, stcb, &so->so_rcv, m);
5861 			freed_so_far += SCTP_BUF_LEN(m);
5862 			freed_so_far += MSIZE;
5863 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5864 				sctp_sblog(&so->so_rcv,
5865 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5866 			}
5867 			m = SCTP_BUF_NEXT(m);
5868 		}
5869 		control->data = control->tail_mbuf = NULL;
5870 		control->length = 0;
5871 		if (out_flags & MSG_EOR) {
5872 			/* Done with this control */
5873 			goto done_with_control;
5874 		}
5875 	}
5876 release:
5877 	if (hold_rlock == 1) {
5878 		SCTP_INP_READ_UNLOCK(inp);
5879 		hold_rlock = 0;
5880 	}
5881 	if (hold_sblock == 1) {
5882 		SOCKBUF_UNLOCK(&so->so_rcv);
5883 		hold_sblock = 0;
5884 	}
5885 	sbunlock(&so->so_rcv);
5886 	sockbuf_lock = 0;
5887 
5888 release_unlocked:
5889 	if (hold_sblock) {
5890 		SOCKBUF_UNLOCK(&so->so_rcv);
5891 		hold_sblock = 0;
5892 	}
5893 	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
5894 		if ((freed_so_far >= rwnd_req) &&
5895 		    (control && (control->do_not_ref_stcb == 0)) &&
5896 		    (no_rcv_needed == 0))
5897 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5898 	}
5899 out:
5900 	if (msg_flags) {
5901 		*msg_flags = out_flags;
5902 	}
5903 	if (((out_flags & MSG_EOR) == 0) &&
5904 	    ((in_flags & MSG_PEEK) == 0) &&
5905 	    (sinfo) &&
5906 	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO))) {
5907 		struct sctp_extrcvinfo *s_extra;
5908 
5909 		s_extra = (struct sctp_extrcvinfo *)sinfo;
5910 		s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
5911 	}
5912 	if (hold_rlock == 1) {
5913 		SCTP_INP_READ_UNLOCK(inp);
5914 		hold_rlock = 0;
5915 	}
5916 	if (hold_sblock) {
5917 		SOCKBUF_UNLOCK(&so->so_rcv);
5918 		hold_sblock = 0;
5919 	}
5920 	if (sockbuf_lock) {
5921 		sbunlock(&so->so_rcv);
5922 	}
5923 	if (freecnt_applied) {
5924 		/*
5925 		 * The lock on the socket buffer protects us so the free
5926 		 * code will stop. But since we used the socketbuf lock and
5927 		 * the sender uses the tcb_lock to increment, we need to use
5928 		 * the atomic add to the refcnt.
5929 		 */
5930 		if (stcb == NULL) {
5931 #ifdef INVARIANTS
5932 			panic("stcb for refcnt has gone NULL?");
5933 			goto stage_left;
5934 #else
5935 			goto stage_left;
5936 #endif
5937 		}
5938 		atomic_add_int(&stcb->asoc.refcnt, -1);
5939 		freecnt_applied = 0;
5940 		/* Save the value back for next time */
5941 		stcb->freed_by_sorcv_sincelast = freed_so_far;
5942 	}
5943 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5944 		if (stcb) {
5945 			sctp_misc_ints(SCTP_SORECV_DONE,
5946 			    freed_so_far,
5947 			    ((uio) ? (slen - uio->uio_resid) : slen),
5948 			    stcb->asoc.my_rwnd,
5949 			    so->so_rcv.sb_cc);
5950 		} else {
5951 			sctp_misc_ints(SCTP_SORECV_DONE,
5952 			    freed_so_far,
5953 			    ((uio) ? (slen - uio->uio_resid) : slen),
5954 			    0,
5955 			    so->so_rcv.sb_cc);
5956 		}
5957 	}
5958 stage_left:
5959 	if (wakeup_read_socket) {
5960 		sctp_sorwakeup(inp, so);
5961 	}
5962 	return (error);
5963 }
5964 
5965 
5966 #ifdef SCTP_MBUF_LOGGING
5967 struct mbuf *
5968 sctp_m_free(struct mbuf *m)
5969 {
5970 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
5971 		if (SCTP_BUF_IS_EXTENDED(m)) {
5972 			sctp_log_mb(m, SCTP_MBUF_IFREE);
5973 		}
5974 	}
5975 	return (m_free(m));
5976 }
5977 
5978 void
5979 sctp_m_freem(struct mbuf *mb)
5980 {
5981 	while (mb != NULL)
5982 		mb = sctp_m_free(mb);
5983 }
5984 
5985 #endif
5986 
5987 int
5988 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
5989 {
5990 	/*
5991 	 * Given a local address. For all associations that holds the
5992 	 * address, request a peer-set-primary.
5993 	 */
5994 	struct sctp_ifa *ifa;
5995 	struct sctp_laddr *wi;
5996 
5997 	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
5998 	if (ifa == NULL) {
5999 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
6000 		return (EADDRNOTAVAIL);
6001 	}
6002 	/*
6003 	 * Now that we have the ifa we must awaken the iterator with this
6004 	 * message.
6005 	 */
6006 	wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
6007 	if (wi == NULL) {
6008 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
6009 		return (ENOMEM);
6010 	}
6011 	/* Now incr the count and int wi structure */
6012 	SCTP_INCR_LADDR_COUNT();
6013 	bzero(wi, sizeof(*wi));
6014 	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
6015 	wi->ifa = ifa;
6016 	wi->action = SCTP_SET_PRIM_ADDR;
6017 	atomic_add_int(&ifa->refcount, 1);
6018 
6019 	/* Now add it to the work queue */
6020 	SCTP_IPI_ITERATOR_WQ_LOCK();
6021 	/*
6022 	 * Should this really be a tailq? As it is we will process the
6023 	 * newest first :-0
6024 	 */
6025 	LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
6026 	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
6027 	    (struct sctp_inpcb *)NULL,
6028 	    (struct sctp_tcb *)NULL,
6029 	    (struct sctp_nets *)NULL);
6030 	SCTP_IPI_ITERATOR_WQ_UNLOCK();
6031 	return (0);
6032 }
6033 
6034 
6035 int
6036 sctp_soreceive(struct socket *so,
6037     struct sockaddr **psa,
6038     struct uio *uio,
6039     struct mbuf **mp0,
6040     struct mbuf **controlp,
6041     int *flagsp)
6042 {
6043 	int error, fromlen;
6044 	uint8_t sockbuf[256];
6045 	struct sockaddr *from;
6046 	struct sctp_extrcvinfo sinfo;
6047 	int filling_sinfo = 1;
6048 	struct sctp_inpcb *inp;
6049 
6050 	inp = (struct sctp_inpcb *)so->so_pcb;
6051 	/* pickup the assoc we are reading from */
6052 	if (inp == NULL) {
6053 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6054 		return (EINVAL);
6055 	}
6056 	if ((sctp_is_feature_off(inp,
6057 	    SCTP_PCB_FLAGS_RECVDATAIOEVNT)) ||
6058 	    (controlp == NULL)) {
6059 		/* user does not want the sndrcv ctl */
6060 		filling_sinfo = 0;
6061 	}
6062 	if (psa) {
6063 		from = (struct sockaddr *)sockbuf;
6064 		fromlen = sizeof(sockbuf);
6065 		from->sa_len = 0;
6066 	} else {
6067 		from = NULL;
6068 		fromlen = 0;
6069 	}
6070 
6071 	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp,
6072 	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6073 	if ((controlp) && (filling_sinfo)) {
6074 		/* copy back the sinfo in a CMSG format */
6075 		if (filling_sinfo)
6076 			*controlp = sctp_build_ctl_nchunk(inp,
6077 			    (struct sctp_sndrcvinfo *)&sinfo);
6078 		else
6079 			*controlp = NULL;
6080 	}
6081 	if (psa) {
6082 		/* copy back the address info */
6083 		if (from && from->sa_len) {
6084 			*psa = sodupsockaddr(from, M_NOWAIT);
6085 		} else {
6086 			*psa = NULL;
6087 		}
6088 	}
6089 	return (error);
6090 }
6091 
6092 
6093 int
6094 sctp_l_soreceive(struct socket *so,
6095     struct sockaddr **name,
6096     struct uio *uio,
6097     char **controlp,
6098     int *controllen,
6099     int *flag)
6100 {
6101 	int error, fromlen;
6102 	uint8_t sockbuf[256];
6103 	struct sockaddr *from;
6104 	struct sctp_extrcvinfo sinfo;
6105 	int filling_sinfo = 1;
6106 	struct sctp_inpcb *inp;
6107 
6108 	inp = (struct sctp_inpcb *)so->so_pcb;
6109 	/* pickup the assoc we are reading from */
6110 	if (inp == NULL) {
6111 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6112 		return (EINVAL);
6113 	}
6114 	if ((sctp_is_feature_off(inp,
6115 	    SCTP_PCB_FLAGS_RECVDATAIOEVNT)) ||
6116 	    (controlp == NULL)) {
6117 		/* user does not want the sndrcv ctl */
6118 		filling_sinfo = 0;
6119 	}
6120 	if (name) {
6121 		from = (struct sockaddr *)sockbuf;
6122 		fromlen = sizeof(sockbuf);
6123 		from->sa_len = 0;
6124 	} else {
6125 		from = NULL;
6126 		fromlen = 0;
6127 	}
6128 
6129 	error = sctp_sorecvmsg(so, uio,
6130 	    (struct mbuf **)NULL,
6131 	    from, fromlen, flag,
6132 	    (struct sctp_sndrcvinfo *)&sinfo,
6133 	    filling_sinfo);
6134 	if ((controlp) && (filling_sinfo)) {
6135 		/*
6136 		 * copy back the sinfo in a CMSG format note that the caller
6137 		 * has reponsibility for freeing the memory.
6138 		 */
6139 		if (filling_sinfo)
6140 			*controlp = sctp_build_ctl_cchunk(inp,
6141 			    controllen,
6142 			    (struct sctp_sndrcvinfo *)&sinfo);
6143 	}
6144 	if (name) {
6145 		/* copy back the address info */
6146 		if (from && from->sa_len) {
6147 			*name = sodupsockaddr(from, M_WAIT);
6148 		} else {
6149 			*name = NULL;
6150 		}
6151 	}
6152 	return (error);
6153 }
6154 
6155 
6156 
6157 
6158 
6159 
6160 
6161 int
6162 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6163     int totaddr, int *error)
6164 {
6165 	int added = 0;
6166 	int i;
6167 	struct sctp_inpcb *inp;
6168 	struct sockaddr *sa;
6169 	size_t incr = 0;
6170 
6171 	sa = addr;
6172 	inp = stcb->sctp_ep;
6173 	*error = 0;
6174 	for (i = 0; i < totaddr; i++) {
6175 		if (sa->sa_family == AF_INET) {
6176 			incr = sizeof(struct sockaddr_in);
6177 			if (sctp_add_remote_addr(stcb, sa, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6178 				/* assoc gone no un-lock */
6179 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6180 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_7);
6181 				*error = ENOBUFS;
6182 				goto out_now;
6183 			}
6184 			added++;
6185 		} else if (sa->sa_family == AF_INET6) {
6186 			incr = sizeof(struct sockaddr_in6);
6187 			if (sctp_add_remote_addr(stcb, sa, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6188 				/* assoc gone no un-lock */
6189 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6190 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_8);
6191 				*error = ENOBUFS;
6192 				goto out_now;
6193 			}
6194 			added++;
6195 		}
6196 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6197 	}
6198 out_now:
6199 	return (added);
6200 }
6201 
6202 struct sctp_tcb *
6203 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6204     int *totaddr, int *num_v4, int *num_v6, int *error,
6205     int limit, int *bad_addr)
6206 {
6207 	struct sockaddr *sa;
6208 	struct sctp_tcb *stcb = NULL;
6209 	size_t incr, at, i;
6210 
6211 	at = incr = 0;
6212 	sa = addr;
6213 	*error = *num_v6 = *num_v4 = 0;
6214 	/* account and validate addresses */
6215 	for (i = 0; i < (size_t)*totaddr; i++) {
6216 		if (sa->sa_family == AF_INET) {
6217 			(*num_v4) += 1;
6218 			incr = sizeof(struct sockaddr_in);
6219 			if (sa->sa_len != incr) {
6220 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6221 				*error = EINVAL;
6222 				*bad_addr = 1;
6223 				return (NULL);
6224 			}
6225 		} else if (sa->sa_family == AF_INET6) {
6226 			struct sockaddr_in6 *sin6;
6227 
6228 			sin6 = (struct sockaddr_in6 *)sa;
6229 			if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6230 				/* Must be non-mapped for connectx */
6231 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6232 				*error = EINVAL;
6233 				*bad_addr = 1;
6234 				return (NULL);
6235 			}
6236 			(*num_v6) += 1;
6237 			incr = sizeof(struct sockaddr_in6);
6238 			if (sa->sa_len != incr) {
6239 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6240 				*error = EINVAL;
6241 				*bad_addr = 1;
6242 				return (NULL);
6243 			}
6244 		} else {
6245 			*totaddr = i;
6246 			/* we are done */
6247 			break;
6248 		}
6249 		SCTP_INP_INCR_REF(inp);
6250 		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6251 		if (stcb != NULL) {
6252 			/* Already have or am bring up an association */
6253 			return (stcb);
6254 		} else {
6255 			SCTP_INP_DECR_REF(inp);
6256 		}
6257 		if ((at + incr) > (size_t)limit) {
6258 			*totaddr = i;
6259 			break;
6260 		}
6261 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6262 	}
6263 	return ((struct sctp_tcb *)NULL);
6264 }
6265 
6266 /*
6267  * sctp_bindx(ADD) for one address.
6268  * assumes all arguments are valid/checked by caller.
6269  */
6270 void
6271 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6272     struct sockaddr *sa, sctp_assoc_t assoc_id,
6273     uint32_t vrf_id, int *error, void *p)
6274 {
6275 	struct sockaddr *addr_touse;
6276 
6277 #ifdef INET6
6278 	struct sockaddr_in sin;
6279 
6280 #endif
6281 
6282 	/* see if we're bound all already! */
6283 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6284 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6285 		*error = EINVAL;
6286 		return;
6287 	}
6288 	addr_touse = sa;
6289 #if defined(INET6) && !defined(__Userspace__)	/* TODO port in6_sin6_2_sin */
6290 	if (sa->sa_family == AF_INET6) {
6291 		struct sockaddr_in6 *sin6;
6292 
6293 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6294 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6295 			*error = EINVAL;
6296 			return;
6297 		}
6298 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6299 			/* can only bind v6 on PF_INET6 sockets */
6300 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6301 			*error = EINVAL;
6302 			return;
6303 		}
6304 		sin6 = (struct sockaddr_in6 *)addr_touse;
6305 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6306 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6307 			    SCTP_IPV6_V6ONLY(inp)) {
6308 				/* can't bind v4-mapped on PF_INET sockets */
6309 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6310 				*error = EINVAL;
6311 				return;
6312 			}
6313 			in6_sin6_2_sin(&sin, sin6);
6314 			addr_touse = (struct sockaddr *)&sin;
6315 		}
6316 	}
6317 #endif
6318 	if (sa->sa_family == AF_INET) {
6319 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6320 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6321 			*error = EINVAL;
6322 			return;
6323 		}
6324 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6325 		    SCTP_IPV6_V6ONLY(inp)) {
6326 			/* can't bind v4 on PF_INET sockets */
6327 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6328 			*error = EINVAL;
6329 			return;
6330 		}
6331 	}
6332 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6333 		if (p == NULL) {
6334 			/* Can't get proc for Net/Open BSD */
6335 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6336 			*error = EINVAL;
6337 			return;
6338 		}
6339 		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6340 		return;
6341 	}
6342 	/*
6343 	 * No locks required here since bind and mgmt_ep_sa all do their own
6344 	 * locking. If we do something for the FIX: below we may need to
6345 	 * lock in that case.
6346 	 */
6347 	if (assoc_id == 0) {
6348 		/* add the address */
6349 		struct sctp_inpcb *lep;
6350 		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
6351 
6352 		/* validate the incoming port */
6353 		if ((lsin->sin_port != 0) &&
6354 		    (lsin->sin_port != inp->sctp_lport)) {
6355 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6356 			*error = EINVAL;
6357 			return;
6358 		} else {
6359 			/* user specified 0 port, set it to existing port */
6360 			lsin->sin_port = inp->sctp_lport;
6361 		}
6362 
6363 		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
6364 		if (lep != NULL) {
6365 			/*
6366 			 * We must decrement the refcount since we have the
6367 			 * ep already and are binding. No remove going on
6368 			 * here.
6369 			 */
6370 			SCTP_INP_DECR_REF(lep);
6371 		}
6372 		if (lep == inp) {
6373 			/* already bound to it.. ok */
6374 			return;
6375 		} else if (lep == NULL) {
6376 			((struct sockaddr_in *)addr_touse)->sin_port = 0;
6377 			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6378 			    SCTP_ADD_IP_ADDRESS,
6379 			    vrf_id, NULL);
6380 		} else {
6381 			*error = EADDRINUSE;
6382 		}
6383 		if (*error)
6384 			return;
6385 	} else {
6386 		/*
6387 		 * FIX: decide whether we allow assoc based bindx
6388 		 */
6389 	}
6390 }
6391 
6392 /*
6393  * sctp_bindx(DELETE) for one address.
6394  * assumes all arguments are valid/checked by caller.
6395  */
6396 void
6397 sctp_bindx_delete_address(struct socket *so, struct sctp_inpcb *inp,
6398     struct sockaddr *sa, sctp_assoc_t assoc_id,
6399     uint32_t vrf_id, int *error)
6400 {
6401 	struct sockaddr *addr_touse;
6402 
6403 #ifdef INET6
6404 	struct sockaddr_in sin;
6405 
6406 #endif
6407 
6408 	/* see if we're bound all already! */
6409 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6410 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6411 		*error = EINVAL;
6412 		return;
6413 	}
6414 	addr_touse = sa;
6415 #if defined(INET6) && !defined(__Userspace__)	/* TODO port in6_sin6_2_sin */
6416 	if (sa->sa_family == AF_INET6) {
6417 		struct sockaddr_in6 *sin6;
6418 
6419 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6420 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6421 			*error = EINVAL;
6422 			return;
6423 		}
6424 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6425 			/* can only bind v6 on PF_INET6 sockets */
6426 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6427 			*error = EINVAL;
6428 			return;
6429 		}
6430 		sin6 = (struct sockaddr_in6 *)addr_touse;
6431 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6432 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6433 			    SCTP_IPV6_V6ONLY(inp)) {
6434 				/* can't bind mapped-v4 on PF_INET sockets */
6435 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6436 				*error = EINVAL;
6437 				return;
6438 			}
6439 			in6_sin6_2_sin(&sin, sin6);
6440 			addr_touse = (struct sockaddr *)&sin;
6441 		}
6442 	}
6443 #endif
6444 	if (sa->sa_family == AF_INET) {
6445 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6446 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6447 			*error = EINVAL;
6448 			return;
6449 		}
6450 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6451 		    SCTP_IPV6_V6ONLY(inp)) {
6452 			/* can't bind v4 on PF_INET sockets */
6453 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6454 			*error = EINVAL;
6455 			return;
6456 		}
6457 	}
6458 	/*
6459 	 * No lock required mgmt_ep_sa does its own locking. If the FIX:
6460 	 * below is ever changed we may need to lock before calling
6461 	 * association level binding.
6462 	 */
6463 	if (assoc_id == 0) {
6464 		/* delete the address */
6465 		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6466 		    SCTP_DEL_IP_ADDRESS,
6467 		    vrf_id, NULL);
6468 	} else {
6469 		/*
6470 		 * FIX: decide whether we allow assoc based bindx
6471 		 */
6472 	}
6473 }
6474 
6475 /*
6476  * returns the valid local address count for an assoc, taking into account
6477  * all scoping rules
6478  */
6479 int
6480 sctp_local_addr_count(struct sctp_tcb *stcb)
6481 {
6482 	int loopback_scope, ipv4_local_scope, local_scope, site_scope;
6483 	int ipv4_addr_legal, ipv6_addr_legal;
6484 	struct sctp_vrf *vrf;
6485 	struct sctp_ifn *sctp_ifn;
6486 	struct sctp_ifa *sctp_ifa;
6487 	int count = 0;
6488 
6489 	/* Turn on all the appropriate scopes */
6490 	loopback_scope = stcb->asoc.loopback_scope;
6491 	ipv4_local_scope = stcb->asoc.ipv4_local_scope;
6492 	local_scope = stcb->asoc.local_scope;
6493 	site_scope = stcb->asoc.site_scope;
6494 	ipv4_addr_legal = ipv6_addr_legal = 0;
6495 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
6496 		ipv6_addr_legal = 1;
6497 		if (SCTP_IPV6_V6ONLY(stcb->sctp_ep) == 0) {
6498 			ipv4_addr_legal = 1;
6499 		}
6500 	} else {
6501 		ipv4_addr_legal = 1;
6502 	}
6503 
6504 	SCTP_IPI_ADDR_RLOCK();
6505 	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
6506 	if (vrf == NULL) {
6507 		/* no vrf, no addresses */
6508 		SCTP_IPI_ADDR_RUNLOCK();
6509 		return (0);
6510 	}
6511 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6512 		/*
6513 		 * bound all case: go through all ifns on the vrf
6514 		 */
6515 		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
6516 			if ((loopback_scope == 0) &&
6517 			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
6518 				continue;
6519 			}
6520 			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
6521 				if (sctp_is_addr_restricted(stcb, sctp_ifa))
6522 					continue;
6523 				switch (sctp_ifa->address.sa.sa_family) {
6524 				case AF_INET:
6525 					if (ipv4_addr_legal) {
6526 						struct sockaddr_in *sin;
6527 
6528 						sin = (struct sockaddr_in *)&sctp_ifa->address.sa;
6529 						if (sin->sin_addr.s_addr == 0) {
6530 							/*
6531 							 * skip unspecified
6532 							 * addrs
6533 							 */
6534 							continue;
6535 						}
6536 						if ((ipv4_local_scope == 0) &&
6537 						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
6538 							continue;
6539 						}
6540 						/* count this one */
6541 						count++;
6542 					} else {
6543 						continue;
6544 					}
6545 					break;
6546 #ifdef INET6
6547 				case AF_INET6:
6548 					if (ipv6_addr_legal) {
6549 						struct sockaddr_in6 *sin6;
6550 
6551 						sin6 = (struct sockaddr_in6 *)&sctp_ifa->address.sa;
6552 						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
6553 							continue;
6554 						}
6555 						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
6556 							if (local_scope == 0)
6557 								continue;
6558 							if (sin6->sin6_scope_id == 0) {
6559 								if (sa6_recoverscope(sin6) != 0)
6560 									/*
6561 									 *
6562 									 * bad
6563 									 *
6564 									 * li
6565 									 * nk
6566 									 *
6567 									 * loc
6568 									 * al
6569 									 *
6570 									 * add
6571 									 * re
6572 									 * ss
6573 									 * */
6574 									continue;
6575 							}
6576 						}
6577 						if ((site_scope == 0) &&
6578 						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
6579 							continue;
6580 						}
6581 						/* count this one */
6582 						count++;
6583 					}
6584 					break;
6585 #endif
6586 				default:
6587 					/* TSNH */
6588 					break;
6589 				}
6590 			}
6591 		}
6592 	} else {
6593 		/*
6594 		 * subset bound case
6595 		 */
6596 		struct sctp_laddr *laddr;
6597 
6598 		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
6599 		    sctp_nxt_addr) {
6600 			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
6601 				continue;
6602 			}
6603 			/* count this one */
6604 			count++;
6605 		}
6606 	}
6607 	SCTP_IPI_ADDR_RUNLOCK();
6608 	return (count);
6609 }
6610 
6611 #if defined(SCTP_LOCAL_TRACE_BUF)
6612 
6613 void
6614 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
6615 {
6616 	uint32_t saveindex, newindex;
6617 
6618 	do {
6619 		saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
6620 		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6621 			newindex = 1;
6622 		} else {
6623 			newindex = saveindex + 1;
6624 		}
6625 	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
6626 	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6627 		saveindex = 0;
6628 	}
6629 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
6630 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
6631 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
6632 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
6633 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
6634 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
6635 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
6636 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
6637 }
6638 
6639 #endif
6640 /* We will need to add support
6641  * to bind the ports and such here
6642  * so we can do UDP tunneling. In
6643  * the mean-time, we return error
6644  */
6645 
6646 void
6647 sctp_over_udp_stop(void)
6648 {
6649 	return;
6650 }
6651 int
6652 sctp_over_udp_start(void)
6653 {
6654 	return (-1);
6655 }
6656