xref: /freebsd/sys/netinet/sctputil.c (revision cacdd70cc751fb68dec4b86c5e5b8c969b6e26ef)
1 /*-
2  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *
7  * a) Redistributions of source code must retain the above copyright notice,
8  *   this list of conditions and the following disclaimer.
9  *
10  * b) Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in
12  *   the documentation and/or other materials provided with the distribution.
13  *
14  * c) Neither the name of Cisco Systems, Inc. nor the names of its
15  *    contributors may be used to endorse or promote products derived
16  *    from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28  * THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 /* $KAME: sctputil.c,v 1.37 2005/03/07 23:26:09 itojun Exp $	 */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_pcb.h>
38 #include <netinet/sctputil.h>
39 #include <netinet/sctp_var.h>
40 #include <netinet/sctp_sysctl.h>
41 #ifdef INET6
42 #endif
43 #include <netinet/sctp_header.h>
44 #include <netinet/sctp_output.h>
45 #include <netinet/sctp_uio.h>
46 #include <netinet/sctp_timer.h>
47 #include <netinet/sctp_crc32.h>
48 #include <netinet/sctp_indata.h>/* for sctp_deliver_data() */
49 #include <netinet/sctp_auth.h>
50 #include <netinet/sctp_asconf.h>
51 #include <netinet/sctp_cc_functions.h>
52 
53 #define NUMBER_OF_MTU_SIZES 18
54 
55 
56 #ifndef KTR_SCTP
57 #define KTR_SCTP KTR_SUBSYS
58 #endif
59 
60 void
61 sctp_sblog(struct sockbuf *sb,
62     struct sctp_tcb *stcb, int from, int incr)
63 {
64 	struct sctp_cwnd_log sctp_clog;
65 
66 	sctp_clog.x.sb.stcb = stcb;
67 	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
68 	if (stcb)
69 		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
70 	else
71 		sctp_clog.x.sb.stcb_sbcc = 0;
72 	sctp_clog.x.sb.incr = incr;
73 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
74 	    SCTP_LOG_EVENT_SB,
75 	    from,
76 	    sctp_clog.x.misc.log1,
77 	    sctp_clog.x.misc.log2,
78 	    sctp_clog.x.misc.log3,
79 	    sctp_clog.x.misc.log4);
80 }
81 
82 void
83 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
84 {
85 	struct sctp_cwnd_log sctp_clog;
86 
87 	sctp_clog.x.close.inp = (void *)inp;
88 	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
89 	if (stcb) {
90 		sctp_clog.x.close.stcb = (void *)stcb;
91 		sctp_clog.x.close.state = (uint16_t) stcb->asoc.state;
92 	} else {
93 		sctp_clog.x.close.stcb = 0;
94 		sctp_clog.x.close.state = 0;
95 	}
96 	sctp_clog.x.close.loc = loc;
97 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
98 	    SCTP_LOG_EVENT_CLOSE,
99 	    0,
100 	    sctp_clog.x.misc.log1,
101 	    sctp_clog.x.misc.log2,
102 	    sctp_clog.x.misc.log3,
103 	    sctp_clog.x.misc.log4);
104 }
105 
106 
107 void
108 rto_logging(struct sctp_nets *net, int from)
109 {
110 	struct sctp_cwnd_log sctp_clog;
111 
112 	memset(&sctp_clog, 0, sizeof(sctp_clog));
113 	sctp_clog.x.rto.net = (void *)net;
114 	sctp_clog.x.rto.rtt = net->prev_rtt;
115 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
116 	    SCTP_LOG_EVENT_RTT,
117 	    from,
118 	    sctp_clog.x.misc.log1,
119 	    sctp_clog.x.misc.log2,
120 	    sctp_clog.x.misc.log3,
121 	    sctp_clog.x.misc.log4);
122 
123 }
124 
125 void
126 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
127 {
128 	struct sctp_cwnd_log sctp_clog;
129 
130 	sctp_clog.x.strlog.stcb = stcb;
131 	sctp_clog.x.strlog.n_tsn = tsn;
132 	sctp_clog.x.strlog.n_sseq = sseq;
133 	sctp_clog.x.strlog.e_tsn = 0;
134 	sctp_clog.x.strlog.e_sseq = 0;
135 	sctp_clog.x.strlog.strm = stream;
136 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
137 	    SCTP_LOG_EVENT_STRM,
138 	    from,
139 	    sctp_clog.x.misc.log1,
140 	    sctp_clog.x.misc.log2,
141 	    sctp_clog.x.misc.log3,
142 	    sctp_clog.x.misc.log4);
143 
144 }
145 
146 void
147 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
148 {
149 	struct sctp_cwnd_log sctp_clog;
150 
151 	sctp_clog.x.nagle.stcb = (void *)stcb;
152 	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
153 	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
154 	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
155 	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
156 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
157 	    SCTP_LOG_EVENT_NAGLE,
158 	    action,
159 	    sctp_clog.x.misc.log1,
160 	    sctp_clog.x.misc.log2,
161 	    sctp_clog.x.misc.log3,
162 	    sctp_clog.x.misc.log4);
163 }
164 
165 
166 void
167 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
168 {
169 	struct sctp_cwnd_log sctp_clog;
170 
171 	sctp_clog.x.sack.cumack = cumack;
172 	sctp_clog.x.sack.oldcumack = old_cumack;
173 	sctp_clog.x.sack.tsn = tsn;
174 	sctp_clog.x.sack.numGaps = gaps;
175 	sctp_clog.x.sack.numDups = dups;
176 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
177 	    SCTP_LOG_EVENT_SACK,
178 	    from,
179 	    sctp_clog.x.misc.log1,
180 	    sctp_clog.x.misc.log2,
181 	    sctp_clog.x.misc.log3,
182 	    sctp_clog.x.misc.log4);
183 }
184 
185 void
186 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
187 {
188 	struct sctp_cwnd_log sctp_clog;
189 
190 	memset(&sctp_clog, 0, sizeof(sctp_clog));
191 	sctp_clog.x.map.base = map;
192 	sctp_clog.x.map.cum = cum;
193 	sctp_clog.x.map.high = high;
194 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
195 	    SCTP_LOG_EVENT_MAP,
196 	    from,
197 	    sctp_clog.x.misc.log1,
198 	    sctp_clog.x.misc.log2,
199 	    sctp_clog.x.misc.log3,
200 	    sctp_clog.x.misc.log4);
201 }
202 
203 void
204 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn,
205     int from)
206 {
207 	struct sctp_cwnd_log sctp_clog;
208 
209 	memset(&sctp_clog, 0, sizeof(sctp_clog));
210 	sctp_clog.x.fr.largest_tsn = biggest_tsn;
211 	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
212 	sctp_clog.x.fr.tsn = tsn;
213 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
214 	    SCTP_LOG_EVENT_FR,
215 	    from,
216 	    sctp_clog.x.misc.log1,
217 	    sctp_clog.x.misc.log2,
218 	    sctp_clog.x.misc.log3,
219 	    sctp_clog.x.misc.log4);
220 
221 }
222 
223 
224 void
225 sctp_log_mb(struct mbuf *m, int from)
226 {
227 	struct sctp_cwnd_log sctp_clog;
228 
229 	sctp_clog.x.mb.mp = m;
230 	sctp_clog.x.mb.mbuf_flags = (uint8_t) (SCTP_BUF_GET_FLAGS(m));
231 	sctp_clog.x.mb.size = (uint16_t) (SCTP_BUF_LEN(m));
232 	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
233 	if (SCTP_BUF_IS_EXTENDED(m)) {
234 		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
235 		sctp_clog.x.mb.refcnt = (uint8_t) (SCTP_BUF_EXTEND_REFCNT(m));
236 	} else {
237 		sctp_clog.x.mb.ext = 0;
238 		sctp_clog.x.mb.refcnt = 0;
239 	}
240 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
241 	    SCTP_LOG_EVENT_MBUF,
242 	    from,
243 	    sctp_clog.x.misc.log1,
244 	    sctp_clog.x.misc.log2,
245 	    sctp_clog.x.misc.log3,
246 	    sctp_clog.x.misc.log4);
247 }
248 
249 
250 void
251 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk,
252     int from)
253 {
254 	struct sctp_cwnd_log sctp_clog;
255 
256 	if (control == NULL) {
257 		SCTP_PRINTF("Gak log of NULL?\n");
258 		return;
259 	}
260 	sctp_clog.x.strlog.stcb = control->stcb;
261 	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
262 	sctp_clog.x.strlog.n_sseq = control->sinfo_ssn;
263 	sctp_clog.x.strlog.strm = control->sinfo_stream;
264 	if (poschk != NULL) {
265 		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
266 		sctp_clog.x.strlog.e_sseq = poschk->sinfo_ssn;
267 	} else {
268 		sctp_clog.x.strlog.e_tsn = 0;
269 		sctp_clog.x.strlog.e_sseq = 0;
270 	}
271 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
272 	    SCTP_LOG_EVENT_STRM,
273 	    from,
274 	    sctp_clog.x.misc.log1,
275 	    sctp_clog.x.misc.log2,
276 	    sctp_clog.x.misc.log3,
277 	    sctp_clog.x.misc.log4);
278 
279 }
280 
281 void
282 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
283 {
284 	struct sctp_cwnd_log sctp_clog;
285 
286 	sctp_clog.x.cwnd.net = net;
287 	if (stcb->asoc.send_queue_cnt > 255)
288 		sctp_clog.x.cwnd.cnt_in_send = 255;
289 	else
290 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
291 	if (stcb->asoc.stream_queue_cnt > 255)
292 		sctp_clog.x.cwnd.cnt_in_str = 255;
293 	else
294 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
295 
296 	if (net) {
297 		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
298 		sctp_clog.x.cwnd.inflight = net->flight_size;
299 		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
300 		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
301 		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
302 	}
303 	if (SCTP_CWNDLOG_PRESEND == from) {
304 		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
305 	}
306 	sctp_clog.x.cwnd.cwnd_augment = augment;
307 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
308 	    SCTP_LOG_EVENT_CWND,
309 	    from,
310 	    sctp_clog.x.misc.log1,
311 	    sctp_clog.x.misc.log2,
312 	    sctp_clog.x.misc.log3,
313 	    sctp_clog.x.misc.log4);
314 
315 }
316 
317 void
318 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
319 {
320 	struct sctp_cwnd_log sctp_clog;
321 
322 	memset(&sctp_clog, 0, sizeof(sctp_clog));
323 	if (inp) {
324 		sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
325 
326 	} else {
327 		sctp_clog.x.lock.sock = (void *)NULL;
328 	}
329 	sctp_clog.x.lock.inp = (void *)inp;
330 	if (stcb) {
331 		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
332 	} else {
333 		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
334 	}
335 	if (inp) {
336 		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
337 		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
338 	} else {
339 		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
340 		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
341 	}
342 	sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
343 	if (inp->sctp_socket) {
344 		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
345 		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
346 		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
347 	} else {
348 		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
349 		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
350 		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
351 	}
352 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
353 	    SCTP_LOG_LOCK_EVENT,
354 	    from,
355 	    sctp_clog.x.misc.log1,
356 	    sctp_clog.x.misc.log2,
357 	    sctp_clog.x.misc.log3,
358 	    sctp_clog.x.misc.log4);
359 
360 }
361 
362 void
363 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
364 {
365 	struct sctp_cwnd_log sctp_clog;
366 
367 	memset(&sctp_clog, 0, sizeof(sctp_clog));
368 	sctp_clog.x.cwnd.net = net;
369 	sctp_clog.x.cwnd.cwnd_new_value = error;
370 	sctp_clog.x.cwnd.inflight = net->flight_size;
371 	sctp_clog.x.cwnd.cwnd_augment = burst;
372 	if (stcb->asoc.send_queue_cnt > 255)
373 		sctp_clog.x.cwnd.cnt_in_send = 255;
374 	else
375 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
376 	if (stcb->asoc.stream_queue_cnt > 255)
377 		sctp_clog.x.cwnd.cnt_in_str = 255;
378 	else
379 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
380 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
381 	    SCTP_LOG_EVENT_MAXBURST,
382 	    from,
383 	    sctp_clog.x.misc.log1,
384 	    sctp_clog.x.misc.log2,
385 	    sctp_clog.x.misc.log3,
386 	    sctp_clog.x.misc.log4);
387 
388 }
389 
390 void
391 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
392 {
393 	struct sctp_cwnd_log sctp_clog;
394 
395 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
396 	sctp_clog.x.rwnd.send_size = snd_size;
397 	sctp_clog.x.rwnd.overhead = overhead;
398 	sctp_clog.x.rwnd.new_rwnd = 0;
399 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
400 	    SCTP_LOG_EVENT_RWND,
401 	    from,
402 	    sctp_clog.x.misc.log1,
403 	    sctp_clog.x.misc.log2,
404 	    sctp_clog.x.misc.log3,
405 	    sctp_clog.x.misc.log4);
406 }
407 
408 void
409 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
410 {
411 	struct sctp_cwnd_log sctp_clog;
412 
413 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
414 	sctp_clog.x.rwnd.send_size = flight_size;
415 	sctp_clog.x.rwnd.overhead = overhead;
416 	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
417 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
418 	    SCTP_LOG_EVENT_RWND,
419 	    from,
420 	    sctp_clog.x.misc.log1,
421 	    sctp_clog.x.misc.log2,
422 	    sctp_clog.x.misc.log3,
423 	    sctp_clog.x.misc.log4);
424 }
425 
426 void
427 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
428 {
429 	struct sctp_cwnd_log sctp_clog;
430 
431 	sctp_clog.x.mbcnt.total_queue_size = total_oq;
432 	sctp_clog.x.mbcnt.size_change = book;
433 	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
434 	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
435 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
436 	    SCTP_LOG_EVENT_MBCNT,
437 	    from,
438 	    sctp_clog.x.misc.log1,
439 	    sctp_clog.x.misc.log2,
440 	    sctp_clog.x.misc.log3,
441 	    sctp_clog.x.misc.log4);
442 
443 }
444 
445 void
446 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
447 {
448 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
449 	    SCTP_LOG_MISC_EVENT,
450 	    from,
451 	    a, b, c, d);
452 }
453 
454 void
455 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t cumtsn, uint32_t wake_cnt, int from)
456 {
457 	struct sctp_cwnd_log sctp_clog;
458 
459 	sctp_clog.x.wake.stcb = (void *)stcb;
460 	sctp_clog.x.wake.wake_cnt = wake_cnt;
461 	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
462 	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
463 	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
464 
465 	if (stcb->asoc.stream_queue_cnt < 0xff)
466 		sctp_clog.x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt;
467 	else
468 		sctp_clog.x.wake.stream_qcnt = 0xff;
469 
470 	if (stcb->asoc.chunks_on_out_queue < 0xff)
471 		sctp_clog.x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue;
472 	else
473 		sctp_clog.x.wake.chunks_on_oque = 0xff;
474 
475 	sctp_clog.x.wake.sctpflags = 0;
476 	/* set in the defered mode stuff */
477 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
478 		sctp_clog.x.wake.sctpflags |= 1;
479 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
480 		sctp_clog.x.wake.sctpflags |= 2;
481 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
482 		sctp_clog.x.wake.sctpflags |= 4;
483 	/* what about the sb */
484 	if (stcb->sctp_socket) {
485 		struct socket *so = stcb->sctp_socket;
486 
487 		sctp_clog.x.wake.sbflags = (uint8_t) ((so->so_snd.sb_flags & 0x00ff));
488 	} else {
489 		sctp_clog.x.wake.sbflags = 0xff;
490 	}
491 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
492 	    SCTP_LOG_EVENT_WAKE,
493 	    from,
494 	    sctp_clog.x.misc.log1,
495 	    sctp_clog.x.misc.log2,
496 	    sctp_clog.x.misc.log3,
497 	    sctp_clog.x.misc.log4);
498 
499 }
500 
501 void
502 sctp_log_block(uint8_t from, struct socket *so, struct sctp_association *asoc, int sendlen)
503 {
504 	struct sctp_cwnd_log sctp_clog;
505 
506 	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
507 	sctp_clog.x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt);
508 	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
509 	sctp_clog.x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt;
510 	sctp_clog.x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue;
511 	sctp_clog.x.blk.flight_size = (uint16_t) (asoc->total_flight / 1024);
512 	sctp_clog.x.blk.sndlen = sendlen;
513 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
514 	    SCTP_LOG_EVENT_BLOCK,
515 	    from,
516 	    sctp_clog.x.misc.log1,
517 	    sctp_clog.x.misc.log2,
518 	    sctp_clog.x.misc.log3,
519 	    sctp_clog.x.misc.log4);
520 
521 }
522 
523 int
524 sctp_fill_stat_log(void *optval, size_t *optsize)
525 {
526 	/* May need to fix this if ktrdump does not work */
527 	return (0);
528 }
529 
530 #ifdef SCTP_AUDITING_ENABLED
531 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
532 static int sctp_audit_indx = 0;
533 
534 static
535 void
536 sctp_print_audit_report(void)
537 {
538 	int i;
539 	int cnt;
540 
541 	cnt = 0;
542 	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
543 		if ((sctp_audit_data[i][0] == 0xe0) &&
544 		    (sctp_audit_data[i][1] == 0x01)) {
545 			cnt = 0;
546 			SCTP_PRINTF("\n");
547 		} else if (sctp_audit_data[i][0] == 0xf0) {
548 			cnt = 0;
549 			SCTP_PRINTF("\n");
550 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
551 		    (sctp_audit_data[i][1] == 0x01)) {
552 			SCTP_PRINTF("\n");
553 			cnt = 0;
554 		}
555 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
556 		    (uint32_t) sctp_audit_data[i][1]);
557 		cnt++;
558 		if ((cnt % 14) == 0)
559 			SCTP_PRINTF("\n");
560 	}
561 	for (i = 0; i < sctp_audit_indx; i++) {
562 		if ((sctp_audit_data[i][0] == 0xe0) &&
563 		    (sctp_audit_data[i][1] == 0x01)) {
564 			cnt = 0;
565 			SCTP_PRINTF("\n");
566 		} else if (sctp_audit_data[i][0] == 0xf0) {
567 			cnt = 0;
568 			SCTP_PRINTF("\n");
569 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
570 		    (sctp_audit_data[i][1] == 0x01)) {
571 			SCTP_PRINTF("\n");
572 			cnt = 0;
573 		}
574 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
575 		    (uint32_t) sctp_audit_data[i][1]);
576 		cnt++;
577 		if ((cnt % 14) == 0)
578 			SCTP_PRINTF("\n");
579 	}
580 	SCTP_PRINTF("\n");
581 }
582 
583 void
584 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
585     struct sctp_nets *net)
586 {
587 	int resend_cnt, tot_out, rep, tot_book_cnt;
588 	struct sctp_nets *lnet;
589 	struct sctp_tmit_chunk *chk;
590 
591 	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
592 	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
593 	sctp_audit_indx++;
594 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
595 		sctp_audit_indx = 0;
596 	}
597 	if (inp == NULL) {
598 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
599 		sctp_audit_data[sctp_audit_indx][1] = 0x01;
600 		sctp_audit_indx++;
601 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
602 			sctp_audit_indx = 0;
603 		}
604 		return;
605 	}
606 	if (stcb == NULL) {
607 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
608 		sctp_audit_data[sctp_audit_indx][1] = 0x02;
609 		sctp_audit_indx++;
610 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
611 			sctp_audit_indx = 0;
612 		}
613 		return;
614 	}
615 	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
616 	sctp_audit_data[sctp_audit_indx][1] =
617 	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
618 	sctp_audit_indx++;
619 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
620 		sctp_audit_indx = 0;
621 	}
622 	rep = 0;
623 	tot_book_cnt = 0;
624 	resend_cnt = tot_out = 0;
625 	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
626 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
627 			resend_cnt++;
628 		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
629 			tot_out += chk->book_size;
630 			tot_book_cnt++;
631 		}
632 	}
633 	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
634 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
635 		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
636 		sctp_audit_indx++;
637 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
638 			sctp_audit_indx = 0;
639 		}
640 		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
641 		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
642 		rep = 1;
643 		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
644 		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
645 		sctp_audit_data[sctp_audit_indx][1] =
646 		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
647 		sctp_audit_indx++;
648 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
649 			sctp_audit_indx = 0;
650 		}
651 	}
652 	if (tot_out != stcb->asoc.total_flight) {
653 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
654 		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
655 		sctp_audit_indx++;
656 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
657 			sctp_audit_indx = 0;
658 		}
659 		rep = 1;
660 		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
661 		    (int)stcb->asoc.total_flight);
662 		stcb->asoc.total_flight = tot_out;
663 	}
664 	if (tot_book_cnt != stcb->asoc.total_flight_count) {
665 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
666 		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
667 		sctp_audit_indx++;
668 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
669 			sctp_audit_indx = 0;
670 		}
671 		rep = 1;
672 		SCTP_PRINTF("tot_flt_book:%d\n", tot_book);
673 
674 		stcb->asoc.total_flight_count = tot_book_cnt;
675 	}
676 	tot_out = 0;
677 	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
678 		tot_out += lnet->flight_size;
679 	}
680 	if (tot_out != stcb->asoc.total_flight) {
681 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
682 		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
683 		sctp_audit_indx++;
684 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
685 			sctp_audit_indx = 0;
686 		}
687 		rep = 1;
688 		SCTP_PRINTF("real flight:%d net total was %d\n",
689 		    stcb->asoc.total_flight, tot_out);
690 		/* now corrective action */
691 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
692 
693 			tot_out = 0;
694 			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
695 				if ((chk->whoTo == lnet) &&
696 				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
697 					tot_out += chk->book_size;
698 				}
699 			}
700 			if (lnet->flight_size != tot_out) {
701 				SCTP_PRINTF("net:%x flight was %d corrected to %d\n",
702 				    (uint32_t) lnet, lnet->flight_size,
703 				    tot_out);
704 				lnet->flight_size = tot_out;
705 			}
706 		}
707 	}
708 	if (rep) {
709 		sctp_print_audit_report();
710 	}
711 }
712 
713 void
714 sctp_audit_log(uint8_t ev, uint8_t fd)
715 {
716 
717 	sctp_audit_data[sctp_audit_indx][0] = ev;
718 	sctp_audit_data[sctp_audit_indx][1] = fd;
719 	sctp_audit_indx++;
720 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
721 		sctp_audit_indx = 0;
722 	}
723 }
724 
725 #endif
726 
727 /*
728  * a list of sizes based on typical mtu's, used only if next hop size not
729  * returned.
730  */
731 static int sctp_mtu_sizes[] = {
732 	68,
733 	296,
734 	508,
735 	512,
736 	544,
737 	576,
738 	1006,
739 	1492,
740 	1500,
741 	1536,
742 	2002,
743 	2048,
744 	4352,
745 	4464,
746 	8166,
747 	17914,
748 	32000,
749 	65535
750 };
751 
752 void
753 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
754 {
755 	struct sctp_association *asoc;
756 	struct sctp_nets *net;
757 
758 	asoc = &stcb->asoc;
759 
760 	(void)SCTP_OS_TIMER_STOP(&asoc->hb_timer.timer);
761 	(void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
762 	(void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
763 	(void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
764 	(void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
765 	(void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
766 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
767 		(void)SCTP_OS_TIMER_STOP(&net->fr_timer.timer);
768 		(void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
769 	}
770 }
771 
772 int
773 find_next_best_mtu(int totsz)
774 {
775 	int i, perfer;
776 
777 	/*
778 	 * if we are in here we must find the next best fit based on the
779 	 * size of the dg that failed to be sent.
780 	 */
781 	perfer = 0;
782 	for (i = 0; i < NUMBER_OF_MTU_SIZES; i++) {
783 		if (totsz < sctp_mtu_sizes[i]) {
784 			perfer = i - 1;
785 			if (perfer < 0)
786 				perfer = 0;
787 			break;
788 		}
789 	}
790 	return (sctp_mtu_sizes[perfer]);
791 }
792 
793 void
794 sctp_fill_random_store(struct sctp_pcb *m)
795 {
796 	/*
797 	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
798 	 * our counter. The result becomes our good random numbers and we
799 	 * then setup to give these out. Note that we do no locking to
800 	 * protect this. This is ok, since if competing folks call this we
801 	 * will get more gobbled gook in the random store which is what we
802 	 * want. There is a danger that two guys will use the same random
803 	 * numbers, but thats ok too since that is random as well :->
804 	 */
805 	m->store_at = 0;
806 	(void)sctp_hmac(SCTP_HMAC, (uint8_t *) m->random_numbers,
807 	    sizeof(m->random_numbers), (uint8_t *) & m->random_counter,
808 	    sizeof(m->random_counter), (uint8_t *) m->random_store);
809 	m->random_counter++;
810 }
811 
812 uint32_t
813 sctp_select_initial_TSN(struct sctp_pcb *inp)
814 {
815 	/*
816 	 * A true implementation should use random selection process to get
817 	 * the initial stream sequence number, using RFC1750 as a good
818 	 * guideline
819 	 */
820 	uint32_t x, *xp;
821 	uint8_t *p;
822 	int store_at, new_store;
823 
824 	if (inp->initial_sequence_debug != 0) {
825 		uint32_t ret;
826 
827 		ret = inp->initial_sequence_debug;
828 		inp->initial_sequence_debug++;
829 		return (ret);
830 	}
831 retry:
832 	store_at = inp->store_at;
833 	new_store = store_at + sizeof(uint32_t);
834 	if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
835 		new_store = 0;
836 	}
837 	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
838 		goto retry;
839 	}
840 	if (new_store == 0) {
841 		/* Refill the random store */
842 		sctp_fill_random_store(inp);
843 	}
844 	p = &inp->random_store[store_at];
845 	xp = (uint32_t *) p;
846 	x = *xp;
847 	return (x);
848 }
849 
850 uint32_t
851 sctp_select_a_tag(struct sctp_inpcb *inp, int save_in_twait)
852 {
853 	u_long x, not_done;
854 	struct timeval now;
855 
856 	(void)SCTP_GETTIME_TIMEVAL(&now);
857 	not_done = 1;
858 	while (not_done) {
859 		x = sctp_select_initial_TSN(&inp->sctp_ep);
860 		if (x == 0) {
861 			/* we never use 0 */
862 			continue;
863 		}
864 		if (sctp_is_vtag_good(inp, x, &now, save_in_twait)) {
865 			not_done = 0;
866 		}
867 	}
868 	return (x);
869 }
870 
871 int
872 sctp_init_asoc(struct sctp_inpcb *m, struct sctp_tcb *stcb,
873     int for_a_init, uint32_t override_tag, uint32_t vrf_id)
874 {
875 	struct sctp_association *asoc;
876 
877 	/*
878 	 * Anything set to zero is taken care of by the allocation routine's
879 	 * bzero
880 	 */
881 
882 	/*
883 	 * Up front select what scoping to apply on addresses I tell my peer
884 	 * Not sure what to do with these right now, we will need to come up
885 	 * with a way to set them. We may need to pass them through from the
886 	 * caller in the sctp_aloc_assoc() function.
887 	 */
888 	int i;
889 
890 	asoc = &stcb->asoc;
891 	/* init all variables to a known value. */
892 	SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE);
893 	asoc->max_burst = m->sctp_ep.max_burst;
894 	asoc->heart_beat_delay = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
895 	asoc->cookie_life = m->sctp_ep.def_cookie_life;
896 	asoc->sctp_cmt_on_off = (uint8_t) SCTP_BASE_SYSCTL(sctp_cmt_on_off);
897 	/* JRS 5/21/07 - Init CMT PF variables */
898 	asoc->sctp_cmt_pf = (uint8_t) SCTP_BASE_SYSCTL(sctp_cmt_pf);
899 	asoc->sctp_frag_point = m->sctp_frag_point;
900 #ifdef INET
901 	asoc->default_tos = m->ip_inp.inp.inp_ip_tos;
902 #else
903 	asoc->default_tos = 0;
904 #endif
905 
906 #ifdef INET6
907 	asoc->default_flowlabel = ((struct in6pcb *)m)->in6p_flowinfo;
908 #else
909 	asoc->default_flowlabel = 0;
910 #endif
911 	asoc->sb_send_resv = 0;
912 	if (override_tag) {
913 		struct timeval now;
914 
915 		(void)SCTP_GETTIME_TIMEVAL(&now);
916 		if (sctp_is_in_timewait(override_tag)) {
917 			/*
918 			 * It must be in the time-wait hash, we put it there
919 			 * when we aloc one. If not the peer is playing
920 			 * games.
921 			 */
922 			asoc->my_vtag = override_tag;
923 		} else {
924 			SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
925 			panic("Huh is_in_timewait fails");
926 			return (ENOMEM);
927 		}
928 
929 	} else {
930 		asoc->my_vtag = sctp_select_a_tag(m, 1);
931 	}
932 	/* Get the nonce tags */
933 	asoc->my_vtag_nonce = sctp_select_a_tag(m, 0);
934 	asoc->peer_vtag_nonce = sctp_select_a_tag(m, 0);
935 	asoc->vrf_id = vrf_id;
936 
937 	if (sctp_is_feature_on(m, SCTP_PCB_FLAGS_DONOT_HEARTBEAT))
938 		asoc->hb_is_disabled = 1;
939 	else
940 		asoc->hb_is_disabled = 0;
941 
942 #ifdef SCTP_ASOCLOG_OF_TSNS
943 	asoc->tsn_in_at = 0;
944 	asoc->tsn_out_at = 0;
945 	asoc->tsn_in_wrapped = 0;
946 	asoc->tsn_out_wrapped = 0;
947 	asoc->cumack_log_at = 0;
948 	asoc->cumack_log_atsnt = 0;
949 #endif
950 #ifdef SCTP_FS_SPEC_LOG
951 	asoc->fs_index = 0;
952 #endif
953 	asoc->refcnt = 0;
954 	asoc->assoc_up_sent = 0;
955 	asoc->assoc_id = asoc->my_vtag;
956 	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
957 	    sctp_select_initial_TSN(&m->sctp_ep);
958 	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
959 	/* we are optimisitic here */
960 	asoc->peer_supports_pktdrop = 1;
961 
962 	asoc->sent_queue_retran_cnt = 0;
963 
964 	/* for CMT */
965 	asoc->last_net_data_came_from = NULL;
966 
967 	/* This will need to be adjusted */
968 	asoc->last_cwr_tsn = asoc->init_seq_number - 1;
969 	asoc->last_acked_seq = asoc->init_seq_number - 1;
970 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
971 	asoc->asconf_seq_in = asoc->last_acked_seq;
972 
973 	/* here we are different, we hold the next one we expect */
974 	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
975 
976 	asoc->initial_init_rto_max = m->sctp_ep.initial_init_rto_max;
977 	asoc->initial_rto = m->sctp_ep.initial_rto;
978 
979 	asoc->max_init_times = m->sctp_ep.max_init_times;
980 	asoc->max_send_times = m->sctp_ep.max_send_times;
981 	asoc->def_net_failure = m->sctp_ep.def_net_failure;
982 	asoc->free_chunk_cnt = 0;
983 
984 	asoc->iam_blocking = 0;
985 	/* ECN Nonce initialization */
986 	asoc->context = m->sctp_context;
987 	asoc->def_send = m->def_send;
988 	asoc->ecn_nonce_allowed = 0;
989 	asoc->receiver_nonce_sum = 1;
990 	asoc->nonce_sum_expect_base = 1;
991 	asoc->nonce_sum_check = 1;
992 	asoc->nonce_resync_tsn = 0;
993 	asoc->nonce_wait_for_ecne = 0;
994 	asoc->nonce_wait_tsn = 0;
995 	asoc->delayed_ack = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
996 	asoc->sack_freq = m->sctp_ep.sctp_sack_freq;
997 	asoc->pr_sctp_cnt = 0;
998 	asoc->total_output_queue_size = 0;
999 
1000 	if (m->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1001 		struct in6pcb *inp6;
1002 
1003 		/* Its a V6 socket */
1004 		inp6 = (struct in6pcb *)m;
1005 		asoc->ipv6_addr_legal = 1;
1006 		/* Now look at the binding flag to see if V4 will be legal */
1007 		if (SCTP_IPV6_V6ONLY(inp6) == 0) {
1008 			asoc->ipv4_addr_legal = 1;
1009 		} else {
1010 			/* V4 addresses are NOT legal on the association */
1011 			asoc->ipv4_addr_legal = 0;
1012 		}
1013 	} else {
1014 		/* Its a V4 socket, no - V6 */
1015 		asoc->ipv4_addr_legal = 1;
1016 		asoc->ipv6_addr_legal = 0;
1017 	}
1018 
1019 	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(m->sctp_socket), SCTP_MINIMAL_RWND);
1020 	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(m->sctp_socket);
1021 
1022 	asoc->smallest_mtu = m->sctp_frag_point;
1023 #ifdef SCTP_PRINT_FOR_B_AND_M
1024 	SCTP_PRINTF("smallest_mtu init'd with asoc to :%d\n",
1025 	    asoc->smallest_mtu);
1026 #endif
1027 	asoc->minrto = m->sctp_ep.sctp_minrto;
1028 	asoc->maxrto = m->sctp_ep.sctp_maxrto;
1029 
1030 	asoc->locked_on_sending = NULL;
1031 	asoc->stream_locked_on = 0;
1032 	asoc->ecn_echo_cnt_onq = 0;
1033 	asoc->stream_locked = 0;
1034 
1035 	asoc->send_sack = 1;
1036 
1037 	LIST_INIT(&asoc->sctp_restricted_addrs);
1038 
1039 	TAILQ_INIT(&asoc->nets);
1040 	TAILQ_INIT(&asoc->pending_reply_queue);
1041 	TAILQ_INIT(&asoc->asconf_ack_sent);
1042 	/* Setup to fill the hb random cache at first HB */
1043 	asoc->hb_random_idx = 4;
1044 
1045 	asoc->sctp_autoclose_ticks = m->sctp_ep.auto_close_time;
1046 
1047 	/*
1048 	 * JRS - Pick the default congestion control module based on the
1049 	 * sysctl.
1050 	 */
1051 	switch (m->sctp_ep.sctp_default_cc_module) {
1052 		/* JRS - Standard TCP congestion control */
1053 	case SCTP_CC_RFC2581:
1054 		{
1055 			stcb->asoc.congestion_control_module = SCTP_CC_RFC2581;
1056 			stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param;
1057 			stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_cwnd_update_after_sack;
1058 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_cwnd_update_after_fr;
1059 			stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout;
1060 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo;
1061 			stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1062 			stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1063 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer;
1064 			break;
1065 		}
1066 		/* JRS - High Speed TCP congestion control (Floyd) */
1067 	case SCTP_CC_HSTCP:
1068 		{
1069 			stcb->asoc.congestion_control_module = SCTP_CC_HSTCP;
1070 			stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param;
1071 			stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_hs_cwnd_update_after_sack;
1072 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_hs_cwnd_update_after_fr;
1073 			stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout;
1074 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo;
1075 			stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1076 			stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1077 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer;
1078 			break;
1079 		}
1080 		/* JRS - HTCP congestion control */
1081 	case SCTP_CC_HTCP:
1082 		{
1083 			stcb->asoc.congestion_control_module = SCTP_CC_HTCP;
1084 			stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_htcp_set_initial_cc_param;
1085 			stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_htcp_cwnd_update_after_sack;
1086 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_htcp_cwnd_update_after_fr;
1087 			stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_htcp_cwnd_update_after_timeout;
1088 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_htcp_cwnd_update_after_ecn_echo;
1089 			stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1090 			stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1091 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_htcp_cwnd_update_after_fr_timer;
1092 			break;
1093 		}
1094 		/* JRS - By default, use RFC2581 */
1095 	default:
1096 		{
1097 			stcb->asoc.congestion_control_module = SCTP_CC_RFC2581;
1098 			stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param;
1099 			stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_cwnd_update_after_sack;
1100 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_cwnd_update_after_fr;
1101 			stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout;
1102 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo;
1103 			stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1104 			stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1105 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer;
1106 			break;
1107 		}
1108 	}
1109 
1110 	/*
1111 	 * Now the stream parameters, here we allocate space for all streams
1112 	 * that we request by default.
1113 	 */
1114 	asoc->streamoutcnt = asoc->pre_open_streams =
1115 	    m->sctp_ep.pre_open_stream_count;
1116 	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1117 	    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1118 	    SCTP_M_STRMO);
1119 	if (asoc->strmout == NULL) {
1120 		/* big trouble no memory */
1121 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1122 		return (ENOMEM);
1123 	}
1124 	for (i = 0; i < asoc->streamoutcnt; i++) {
1125 		/*
1126 		 * inbound side must be set to 0xffff, also NOTE when we get
1127 		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1128 		 * count (streamoutcnt) but first check if we sent to any of
1129 		 * the upper streams that were dropped (if some were). Those
1130 		 * that were dropped must be notified to the upper layer as
1131 		 * failed to send.
1132 		 */
1133 		asoc->strmout[i].next_sequence_sent = 0x0;
1134 		TAILQ_INIT(&asoc->strmout[i].outqueue);
1135 		asoc->strmout[i].stream_no = i;
1136 		asoc->strmout[i].last_msg_incomplete = 0;
1137 		asoc->strmout[i].next_spoke.tqe_next = 0;
1138 		asoc->strmout[i].next_spoke.tqe_prev = 0;
1139 	}
1140 	/* Now the mapping array */
1141 	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1142 	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1143 	    SCTP_M_MAP);
1144 	if (asoc->mapping_array == NULL) {
1145 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1146 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1147 		return (ENOMEM);
1148 	}
1149 	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1150 	/* Now the init of the other outqueues */
1151 	TAILQ_INIT(&asoc->free_chunks);
1152 	TAILQ_INIT(&asoc->out_wheel);
1153 	TAILQ_INIT(&asoc->control_send_queue);
1154 	TAILQ_INIT(&asoc->asconf_send_queue);
1155 	TAILQ_INIT(&asoc->send_queue);
1156 	TAILQ_INIT(&asoc->sent_queue);
1157 	TAILQ_INIT(&asoc->reasmqueue);
1158 	TAILQ_INIT(&asoc->resetHead);
1159 	asoc->max_inbound_streams = m->sctp_ep.max_open_streams_intome;
1160 	TAILQ_INIT(&asoc->asconf_queue);
1161 	/* authentication fields */
1162 	asoc->authinfo.random = NULL;
1163 	asoc->authinfo.assoc_key = NULL;
1164 	asoc->authinfo.assoc_keyid = 0;
1165 	asoc->authinfo.recv_key = NULL;
1166 	asoc->authinfo.recv_keyid = 0;
1167 	LIST_INIT(&asoc->shared_keys);
1168 	asoc->marked_retrans = 0;
1169 	asoc->timoinit = 0;
1170 	asoc->timodata = 0;
1171 	asoc->timosack = 0;
1172 	asoc->timoshutdown = 0;
1173 	asoc->timoheartbeat = 0;
1174 	asoc->timocookie = 0;
1175 	asoc->timoshutdownack = 0;
1176 	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1177 	asoc->discontinuity_time = asoc->start_time;
1178 	/*
1179 	 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1180 	 * freed later whe the association is freed.
1181 	 */
1182 	return (0);
1183 }
1184 
1185 int
1186 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1187 {
1188 	/* mapping array needs to grow */
1189 	uint8_t *new_array;
1190 	uint32_t new_size;
1191 
1192 	new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1193 	SCTP_MALLOC(new_array, uint8_t *, new_size, SCTP_M_MAP);
1194 	if (new_array == NULL) {
1195 		/* can't get more, forget it */
1196 		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n",
1197 		    new_size);
1198 		return (-1);
1199 	}
1200 	memset(new_array, 0, new_size);
1201 	memcpy(new_array, asoc->mapping_array, asoc->mapping_array_size);
1202 	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1203 	asoc->mapping_array = new_array;
1204 	asoc->mapping_array_size = new_size;
1205 	return (0);
1206 }
1207 
1208 #if defined(SCTP_USE_THREAD_BASED_ITERATOR)
1209 static void
1210 sctp_iterator_work(struct sctp_iterator *it)
1211 {
1212 	int iteration_count = 0;
1213 	int inp_skip = 0;
1214 
1215 	SCTP_ITERATOR_LOCK();
1216 	if (it->inp) {
1217 		SCTP_INP_DECR_REF(it->inp);
1218 	}
1219 	if (it->inp == NULL) {
1220 		/* iterator is complete */
1221 done_with_iterator:
1222 		SCTP_ITERATOR_UNLOCK();
1223 		if (it->function_atend != NULL) {
1224 			(*it->function_atend) (it->pointer, it->val);
1225 		}
1226 		SCTP_FREE(it, SCTP_M_ITER);
1227 		return;
1228 	}
1229 select_a_new_ep:
1230 	SCTP_INP_WLOCK(it->inp);
1231 	while (((it->pcb_flags) &&
1232 	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1233 	    ((it->pcb_features) &&
1234 	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1235 		/* endpoint flags or features don't match, so keep looking */
1236 		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1237 			SCTP_INP_WUNLOCK(it->inp);
1238 			goto done_with_iterator;
1239 		}
1240 		SCTP_INP_WUNLOCK(it->inp);
1241 		it->inp = LIST_NEXT(it->inp, sctp_list);
1242 		if (it->inp == NULL) {
1243 			goto done_with_iterator;
1244 		}
1245 		SCTP_INP_WLOCK(it->inp);
1246 	}
1247 
1248 	SCTP_INP_WUNLOCK(it->inp);
1249 	SCTP_INP_RLOCK(it->inp);
1250 
1251 	/* now go through each assoc which is in the desired state */
1252 	if (it->done_current_ep == 0) {
1253 		if (it->function_inp != NULL)
1254 			inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1255 		it->done_current_ep = 1;
1256 	}
1257 	if (it->stcb == NULL) {
1258 		/* run the per instance function */
1259 		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1260 	}
1261 	if ((inp_skip) || it->stcb == NULL) {
1262 		if (it->function_inp_end != NULL) {
1263 			inp_skip = (*it->function_inp_end) (it->inp,
1264 			    it->pointer,
1265 			    it->val);
1266 		}
1267 		SCTP_INP_RUNLOCK(it->inp);
1268 		goto no_stcb;
1269 	}
1270 	while (it->stcb) {
1271 		SCTP_TCB_LOCK(it->stcb);
1272 		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1273 			/* not in the right state... keep looking */
1274 			SCTP_TCB_UNLOCK(it->stcb);
1275 			goto next_assoc;
1276 		}
1277 		/* see if we have limited out the iterator loop */
1278 		iteration_count++;
1279 		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1280 			/* Pause to let others grab the lock */
1281 			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1282 			SCTP_TCB_UNLOCK(it->stcb);
1283 
1284 			SCTP_INP_INCR_REF(it->inp);
1285 			SCTP_INP_RUNLOCK(it->inp);
1286 			SCTP_ITERATOR_UNLOCK();
1287 			SCTP_ITERATOR_LOCK();
1288 			SCTP_INP_RLOCK(it->inp);
1289 
1290 			SCTP_INP_DECR_REF(it->inp);
1291 			SCTP_TCB_LOCK(it->stcb);
1292 			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1293 			iteration_count = 0;
1294 		}
1295 		/* run function on this one */
1296 		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1297 
1298 		/*
1299 		 * we lie here, it really needs to have its own type but
1300 		 * first I must verify that this won't effect things :-0
1301 		 */
1302 		if (it->no_chunk_output == 0)
1303 			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1304 
1305 		SCTP_TCB_UNLOCK(it->stcb);
1306 next_assoc:
1307 		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1308 		if (it->stcb == NULL) {
1309 			/* Run last function */
1310 			if (it->function_inp_end != NULL) {
1311 				inp_skip = (*it->function_inp_end) (it->inp,
1312 				    it->pointer,
1313 				    it->val);
1314 			}
1315 		}
1316 	}
1317 	SCTP_INP_RUNLOCK(it->inp);
1318 no_stcb:
1319 	/* done with all assocs on this endpoint, move on to next endpoint */
1320 	it->done_current_ep = 0;
1321 	SCTP_INP_WLOCK(it->inp);
1322 	SCTP_INP_WUNLOCK(it->inp);
1323 	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1324 		it->inp = NULL;
1325 	} else {
1326 		SCTP_INP_INFO_RLOCK();
1327 		it->inp = LIST_NEXT(it->inp, sctp_list);
1328 		SCTP_INP_INFO_RUNLOCK();
1329 	}
1330 	if (it->inp == NULL) {
1331 		goto done_with_iterator;
1332 	}
1333 	goto select_a_new_ep;
1334 }
1335 
1336 void
1337 sctp_iterator_worker(void)
1338 {
1339 	struct sctp_iterator *it = NULL;
1340 
1341 	/* This function is called with the WQ lock in place */
1342 
1343 	SCTP_BASE_INFO(iterator_running) = 1;
1344 again:
1345 	it = TAILQ_FIRST(&SCTP_BASE_INFO(iteratorhead));
1346 	while (it) {
1347 		/* now lets work on this one */
1348 		TAILQ_REMOVE(&SCTP_BASE_INFO(iteratorhead), it, sctp_nxt_itr);
1349 		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1350 		sctp_iterator_work(it);
1351 		SCTP_IPI_ITERATOR_WQ_LOCK();
1352 		/* sa_ignore FREED_MEMORY */
1353 		it = TAILQ_FIRST(&SCTP_BASE_INFO(iteratorhead));
1354 	}
1355 	if (TAILQ_FIRST(&SCTP_BASE_INFO(iteratorhead))) {
1356 		goto again;
1357 	}
1358 	SCTP_BASE_INFO(iterator_running) = 0;
1359 	return;
1360 }
1361 
1362 #endif
1363 
1364 
1365 static void
1366 sctp_handle_addr_wq(void)
1367 {
1368 	/* deal with the ADDR wq from the rtsock calls */
1369 	struct sctp_laddr *wi;
1370 	struct sctp_asconf_iterator *asc;
1371 
1372 	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1373 	    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1374 	if (asc == NULL) {
1375 		/* Try later, no memory */
1376 		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1377 		    (struct sctp_inpcb *)NULL,
1378 		    (struct sctp_tcb *)NULL,
1379 		    (struct sctp_nets *)NULL);
1380 		return;
1381 	}
1382 	LIST_INIT(&asc->list_of_work);
1383 	asc->cnt = 0;
1384 	SCTP_IPI_ITERATOR_WQ_LOCK();
1385 	wi = LIST_FIRST(&SCTP_BASE_INFO(addr_wq));
1386 	while (wi != NULL) {
1387 		LIST_REMOVE(wi, sctp_nxt_addr);
1388 		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1389 		asc->cnt++;
1390 		wi = LIST_FIRST(&SCTP_BASE_INFO(addr_wq));
1391 	}
1392 	SCTP_IPI_ITERATOR_WQ_UNLOCK();
1393 	if (asc->cnt == 0) {
1394 		SCTP_FREE(asc, SCTP_M_ASC_IT);
1395 	} else {
1396 		(void)sctp_initiate_iterator(sctp_asconf_iterator_ep,
1397 		    sctp_asconf_iterator_stcb,
1398 		    NULL,	/* No ep end for boundall */
1399 		    SCTP_PCB_FLAGS_BOUNDALL,
1400 		    SCTP_PCB_ANY_FEATURES,
1401 		    SCTP_ASOC_ANY_STATE,
1402 		    (void *)asc, 0,
1403 		    sctp_asconf_iterator_end, NULL, 0);
1404 	}
1405 }
1406 
1407 int retcode = 0;
1408 int cur_oerr = 0;
1409 
1410 void
1411 sctp_timeout_handler(void *t)
1412 {
1413 	struct sctp_inpcb *inp;
1414 	struct sctp_tcb *stcb;
1415 	struct sctp_nets *net;
1416 	struct sctp_timer *tmr;
1417 
1418 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1419 	struct socket *so;
1420 
1421 #endif
1422 	int did_output, type;
1423 	struct sctp_iterator *it = NULL;
1424 
1425 	tmr = (struct sctp_timer *)t;
1426 	inp = (struct sctp_inpcb *)tmr->ep;
1427 	stcb = (struct sctp_tcb *)tmr->tcb;
1428 	net = (struct sctp_nets *)tmr->net;
1429 	did_output = 1;
1430 
1431 #ifdef SCTP_AUDITING_ENABLED
1432 	sctp_audit_log(0xF0, (uint8_t) tmr->type);
1433 	sctp_auditing(3, inp, stcb, net);
1434 #endif
1435 
1436 	/* sanity checks... */
1437 	if (tmr->self != (void *)tmr) {
1438 		/*
1439 		 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1440 		 * tmr);
1441 		 */
1442 		return;
1443 	}
1444 	tmr->stopped_from = 0xa001;
1445 	if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1446 		/*
1447 		 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1448 		 * tmr->type);
1449 		 */
1450 		return;
1451 	}
1452 	tmr->stopped_from = 0xa002;
1453 	if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1454 		return;
1455 	}
1456 	/* if this is an iterator timeout, get the struct and clear inp */
1457 	tmr->stopped_from = 0xa003;
1458 	if (tmr->type == SCTP_TIMER_TYPE_ITERATOR) {
1459 		it = (struct sctp_iterator *)inp;
1460 		inp = NULL;
1461 	}
1462 	type = tmr->type;
1463 	if (inp) {
1464 		SCTP_INP_INCR_REF(inp);
1465 		if ((inp->sctp_socket == 0) &&
1466 		    ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1467 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1468 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1469 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1470 		    (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))
1471 		    ) {
1472 			SCTP_INP_DECR_REF(inp);
1473 			return;
1474 		}
1475 	}
1476 	tmr->stopped_from = 0xa004;
1477 	if (stcb) {
1478 		atomic_add_int(&stcb->asoc.refcnt, 1);
1479 		if (stcb->asoc.state == 0) {
1480 			atomic_add_int(&stcb->asoc.refcnt, -1);
1481 			if (inp) {
1482 				SCTP_INP_DECR_REF(inp);
1483 			}
1484 			return;
1485 		}
1486 	}
1487 	tmr->stopped_from = 0xa005;
1488 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", tmr->type);
1489 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1490 		if (inp) {
1491 			SCTP_INP_DECR_REF(inp);
1492 		}
1493 		if (stcb) {
1494 			atomic_add_int(&stcb->asoc.refcnt, -1);
1495 		}
1496 		return;
1497 	}
1498 	tmr->stopped_from = 0xa006;
1499 
1500 	if (stcb) {
1501 		SCTP_TCB_LOCK(stcb);
1502 		atomic_add_int(&stcb->asoc.refcnt, -1);
1503 		if ((tmr->type != SCTP_TIMER_TYPE_ASOCKILL) &&
1504 		    ((stcb->asoc.state == 0) ||
1505 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1506 			SCTP_TCB_UNLOCK(stcb);
1507 			if (inp) {
1508 				SCTP_INP_DECR_REF(inp);
1509 			}
1510 			return;
1511 		}
1512 	}
1513 	/* record in stopped what t-o occured */
1514 	tmr->stopped_from = tmr->type;
1515 
1516 	/* mark as being serviced now */
1517 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1518 		/*
1519 		 * Callout has been rescheduled.
1520 		 */
1521 		goto get_out;
1522 	}
1523 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1524 		/*
1525 		 * Not active, so no action.
1526 		 */
1527 		goto get_out;
1528 	}
1529 	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1530 
1531 	/* call the handler for the appropriate timer type */
1532 	switch (tmr->type) {
1533 	case SCTP_TIMER_TYPE_ZERO_COPY:
1534 		if (inp == NULL) {
1535 			break;
1536 		}
1537 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1538 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
1539 		}
1540 		break;
1541 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1542 		if (inp == NULL) {
1543 			break;
1544 		}
1545 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1546 			SCTP_ZERO_COPY_SENDQ_EVENT(inp, inp->sctp_socket);
1547 		}
1548 		break;
1549 	case SCTP_TIMER_TYPE_ADDR_WQ:
1550 		sctp_handle_addr_wq();
1551 		break;
1552 	case SCTP_TIMER_TYPE_ITERATOR:
1553 		SCTP_STAT_INCR(sctps_timoiterator);
1554 		sctp_iterator_timer(it);
1555 		break;
1556 	case SCTP_TIMER_TYPE_SEND:
1557 		if ((stcb == NULL) || (inp == NULL)) {
1558 			break;
1559 		}
1560 		SCTP_STAT_INCR(sctps_timodata);
1561 		stcb->asoc.timodata++;
1562 		stcb->asoc.num_send_timers_up--;
1563 		if (stcb->asoc.num_send_timers_up < 0) {
1564 			stcb->asoc.num_send_timers_up = 0;
1565 		}
1566 		SCTP_TCB_LOCK_ASSERT(stcb);
1567 		cur_oerr = stcb->asoc.overall_error_count;
1568 		retcode = sctp_t3rxt_timer(inp, stcb, net);
1569 		if (retcode) {
1570 			/* no need to unlock on tcb its gone */
1571 
1572 			goto out_decr;
1573 		}
1574 		SCTP_TCB_LOCK_ASSERT(stcb);
1575 #ifdef SCTP_AUDITING_ENABLED
1576 		sctp_auditing(4, inp, stcb, net);
1577 #endif
1578 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1579 		if ((stcb->asoc.num_send_timers_up == 0) &&
1580 		    (stcb->asoc.sent_queue_cnt > 0)
1581 		    ) {
1582 			struct sctp_tmit_chunk *chk;
1583 
1584 			/*
1585 			 * safeguard. If there on some on the sent queue
1586 			 * somewhere but no timers running something is
1587 			 * wrong... so we start a timer on the first chunk
1588 			 * on the send queue on whatever net it is sent to.
1589 			 */
1590 			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1591 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1592 			    chk->whoTo);
1593 		}
1594 		break;
1595 	case SCTP_TIMER_TYPE_INIT:
1596 		if ((stcb == NULL) || (inp == NULL)) {
1597 			break;
1598 		}
1599 		SCTP_STAT_INCR(sctps_timoinit);
1600 		stcb->asoc.timoinit++;
1601 		if (sctp_t1init_timer(inp, stcb, net)) {
1602 			/* no need to unlock on tcb its gone */
1603 			goto out_decr;
1604 		}
1605 		/* We do output but not here */
1606 		did_output = 0;
1607 		break;
1608 	case SCTP_TIMER_TYPE_RECV:
1609 		if ((stcb == NULL) || (inp == NULL)) {
1610 			break;
1611 		} {
1612 			int abort_flag;
1613 
1614 			SCTP_STAT_INCR(sctps_timosack);
1615 			stcb->asoc.timosack++;
1616 			if (stcb->asoc.cumulative_tsn != stcb->asoc.highest_tsn_inside_map)
1617 				sctp_sack_check(stcb, 0, 0, &abort_flag);
1618 			sctp_send_sack(stcb);
1619 		}
1620 #ifdef SCTP_AUDITING_ENABLED
1621 		sctp_auditing(4, inp, stcb, net);
1622 #endif
1623 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1624 		break;
1625 	case SCTP_TIMER_TYPE_SHUTDOWN:
1626 		if ((stcb == NULL) || (inp == NULL)) {
1627 			break;
1628 		}
1629 		if (sctp_shutdown_timer(inp, stcb, net)) {
1630 			/* no need to unlock on tcb its gone */
1631 			goto out_decr;
1632 		}
1633 		SCTP_STAT_INCR(sctps_timoshutdown);
1634 		stcb->asoc.timoshutdown++;
1635 #ifdef SCTP_AUDITING_ENABLED
1636 		sctp_auditing(4, inp, stcb, net);
1637 #endif
1638 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1639 		break;
1640 	case SCTP_TIMER_TYPE_HEARTBEAT:
1641 		{
1642 			struct sctp_nets *lnet;
1643 			int cnt_of_unconf = 0;
1644 
1645 			if ((stcb == NULL) || (inp == NULL)) {
1646 				break;
1647 			}
1648 			SCTP_STAT_INCR(sctps_timoheartbeat);
1649 			stcb->asoc.timoheartbeat++;
1650 			TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
1651 				if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1652 				    (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
1653 					cnt_of_unconf++;
1654 				}
1655 			}
1656 			if (cnt_of_unconf == 0) {
1657 				if (sctp_heartbeat_timer(inp, stcb, lnet,
1658 				    cnt_of_unconf)) {
1659 					/* no need to unlock on tcb its gone */
1660 					goto out_decr;
1661 				}
1662 			}
1663 #ifdef SCTP_AUDITING_ENABLED
1664 			sctp_auditing(4, inp, stcb, lnet);
1665 #endif
1666 			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT,
1667 			    stcb->sctp_ep, stcb, lnet);
1668 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1669 		}
1670 		break;
1671 	case SCTP_TIMER_TYPE_COOKIE:
1672 		if ((stcb == NULL) || (inp == NULL)) {
1673 			break;
1674 		}
1675 		if (sctp_cookie_timer(inp, stcb, net)) {
1676 			/* no need to unlock on tcb its gone */
1677 			goto out_decr;
1678 		}
1679 		SCTP_STAT_INCR(sctps_timocookie);
1680 		stcb->asoc.timocookie++;
1681 #ifdef SCTP_AUDITING_ENABLED
1682 		sctp_auditing(4, inp, stcb, net);
1683 #endif
1684 		/*
1685 		 * We consider T3 and Cookie timer pretty much the same with
1686 		 * respect to where from in chunk_output.
1687 		 */
1688 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1689 		break;
1690 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1691 		{
1692 			struct timeval tv;
1693 			int i, secret;
1694 
1695 			if (inp == NULL) {
1696 				break;
1697 			}
1698 			SCTP_STAT_INCR(sctps_timosecret);
1699 			(void)SCTP_GETTIME_TIMEVAL(&tv);
1700 			SCTP_INP_WLOCK(inp);
1701 			inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1702 			inp->sctp_ep.last_secret_number =
1703 			    inp->sctp_ep.current_secret_number;
1704 			inp->sctp_ep.current_secret_number++;
1705 			if (inp->sctp_ep.current_secret_number >=
1706 			    SCTP_HOW_MANY_SECRETS) {
1707 				inp->sctp_ep.current_secret_number = 0;
1708 			}
1709 			secret = (int)inp->sctp_ep.current_secret_number;
1710 			for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1711 				inp->sctp_ep.secret_key[secret][i] =
1712 				    sctp_select_initial_TSN(&inp->sctp_ep);
1713 			}
1714 			SCTP_INP_WUNLOCK(inp);
1715 			sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1716 		}
1717 		did_output = 0;
1718 		break;
1719 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1720 		if ((stcb == NULL) || (inp == NULL)) {
1721 			break;
1722 		}
1723 		SCTP_STAT_INCR(sctps_timopathmtu);
1724 		sctp_pathmtu_timer(inp, stcb, net);
1725 		did_output = 0;
1726 		break;
1727 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1728 		if ((stcb == NULL) || (inp == NULL)) {
1729 			break;
1730 		}
1731 		if (sctp_shutdownack_timer(inp, stcb, net)) {
1732 			/* no need to unlock on tcb its gone */
1733 			goto out_decr;
1734 		}
1735 		SCTP_STAT_INCR(sctps_timoshutdownack);
1736 		stcb->asoc.timoshutdownack++;
1737 #ifdef SCTP_AUDITING_ENABLED
1738 		sctp_auditing(4, inp, stcb, net);
1739 #endif
1740 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1741 		break;
1742 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1743 		if ((stcb == NULL) || (inp == NULL)) {
1744 			break;
1745 		}
1746 		SCTP_STAT_INCR(sctps_timoshutdownguard);
1747 		sctp_abort_an_association(inp, stcb,
1748 		    SCTP_SHUTDOWN_GUARD_EXPIRES, NULL, SCTP_SO_NOT_LOCKED);
1749 		/* no need to unlock on tcb its gone */
1750 		goto out_decr;
1751 
1752 	case SCTP_TIMER_TYPE_STRRESET:
1753 		if ((stcb == NULL) || (inp == NULL)) {
1754 			break;
1755 		}
1756 		if (sctp_strreset_timer(inp, stcb, net)) {
1757 			/* no need to unlock on tcb its gone */
1758 			goto out_decr;
1759 		}
1760 		SCTP_STAT_INCR(sctps_timostrmrst);
1761 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1762 		break;
1763 	case SCTP_TIMER_TYPE_EARLYFR:
1764 		/* Need to do FR of things for net */
1765 		if ((stcb == NULL) || (inp == NULL)) {
1766 			break;
1767 		}
1768 		SCTP_STAT_INCR(sctps_timoearlyfr);
1769 		sctp_early_fr_timer(inp, stcb, net);
1770 		break;
1771 	case SCTP_TIMER_TYPE_ASCONF:
1772 		if ((stcb == NULL) || (inp == NULL)) {
1773 			break;
1774 		}
1775 		if (sctp_asconf_timer(inp, stcb, net)) {
1776 			/* no need to unlock on tcb its gone */
1777 			goto out_decr;
1778 		}
1779 		SCTP_STAT_INCR(sctps_timoasconf);
1780 #ifdef SCTP_AUDITING_ENABLED
1781 		sctp_auditing(4, inp, stcb, net);
1782 #endif
1783 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1784 		break;
1785 	case SCTP_TIMER_TYPE_PRIM_DELETED:
1786 		if ((stcb == NULL) || (inp == NULL)) {
1787 			break;
1788 		}
1789 		sctp_delete_prim_timer(inp, stcb, net);
1790 		SCTP_STAT_INCR(sctps_timodelprim);
1791 		break;
1792 
1793 	case SCTP_TIMER_TYPE_AUTOCLOSE:
1794 		if ((stcb == NULL) || (inp == NULL)) {
1795 			break;
1796 		}
1797 		SCTP_STAT_INCR(sctps_timoautoclose);
1798 		sctp_autoclose_timer(inp, stcb, net);
1799 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1800 		did_output = 0;
1801 		break;
1802 	case SCTP_TIMER_TYPE_ASOCKILL:
1803 		if ((stcb == NULL) || (inp == NULL)) {
1804 			break;
1805 		}
1806 		SCTP_STAT_INCR(sctps_timoassockill);
1807 		/* Can we free it yet? */
1808 		SCTP_INP_DECR_REF(inp);
1809 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1810 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1811 		so = SCTP_INP_SO(inp);
1812 		atomic_add_int(&stcb->asoc.refcnt, 1);
1813 		SCTP_TCB_UNLOCK(stcb);
1814 		SCTP_SOCKET_LOCK(so, 1);
1815 		SCTP_TCB_LOCK(stcb);
1816 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1817 #endif
1818 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1819 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1820 		SCTP_SOCKET_UNLOCK(so, 1);
1821 #endif
1822 		/*
1823 		 * free asoc, always unlocks (or destroy's) so prevent
1824 		 * duplicate unlock or unlock of a free mtx :-0
1825 		 */
1826 		stcb = NULL;
1827 		goto out_no_decr;
1828 	case SCTP_TIMER_TYPE_INPKILL:
1829 		SCTP_STAT_INCR(sctps_timoinpkill);
1830 		if (inp == NULL) {
1831 			break;
1832 		}
1833 		/*
1834 		 * special case, take away our increment since WE are the
1835 		 * killer
1836 		 */
1837 		SCTP_INP_DECR_REF(inp);
1838 		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1839 		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1840 		    SCTP_CALLED_DIRECTLY_NOCMPSET);
1841 		inp = NULL;
1842 		goto out_no_decr;
1843 	default:
1844 		SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
1845 		    tmr->type);
1846 		break;
1847 	};
1848 #ifdef SCTP_AUDITING_ENABLED
1849 	sctp_audit_log(0xF1, (uint8_t) tmr->type);
1850 	if (inp)
1851 		sctp_auditing(5, inp, stcb, net);
1852 #endif
1853 	if ((did_output) && stcb) {
1854 		/*
1855 		 * Now we need to clean up the control chunk chain if an
1856 		 * ECNE is on it. It must be marked as UNSENT again so next
1857 		 * call will continue to send it until such time that we get
1858 		 * a CWR, to remove it. It is, however, less likely that we
1859 		 * will find a ecn echo on the chain though.
1860 		 */
1861 		sctp_fix_ecn_echo(&stcb->asoc);
1862 	}
1863 get_out:
1864 	if (stcb) {
1865 		SCTP_TCB_UNLOCK(stcb);
1866 	}
1867 out_decr:
1868 	if (inp) {
1869 		SCTP_INP_DECR_REF(inp);
1870 	}
1871 out_no_decr:
1872 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type %d)\n",
1873 	    type);
1874 }
1875 
1876 void
1877 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1878     struct sctp_nets *net)
1879 {
1880 	int to_ticks;
1881 	struct sctp_timer *tmr;
1882 
1883 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
1884 		return;
1885 
1886 	to_ticks = 0;
1887 
1888 	tmr = NULL;
1889 	if (stcb) {
1890 		SCTP_TCB_LOCK_ASSERT(stcb);
1891 	}
1892 	switch (t_type) {
1893 	case SCTP_TIMER_TYPE_ZERO_COPY:
1894 		tmr = &inp->sctp_ep.zero_copy_timer;
1895 		to_ticks = SCTP_ZERO_COPY_TICK_DELAY;
1896 		break;
1897 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1898 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
1899 		to_ticks = SCTP_ZERO_COPY_SENDQ_TICK_DELAY;
1900 		break;
1901 	case SCTP_TIMER_TYPE_ADDR_WQ:
1902 		/* Only 1 tick away :-) */
1903 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
1904 		to_ticks = SCTP_ADDRESS_TICK_DELAY;
1905 		break;
1906 	case SCTP_TIMER_TYPE_ITERATOR:
1907 		{
1908 			struct sctp_iterator *it;
1909 
1910 			it = (struct sctp_iterator *)inp;
1911 			tmr = &it->tmr;
1912 			to_ticks = SCTP_ITERATOR_TICKS;
1913 		}
1914 		break;
1915 	case SCTP_TIMER_TYPE_SEND:
1916 		/* Here we use the RTO timer */
1917 		{
1918 			int rto_val;
1919 
1920 			if ((stcb == NULL) || (net == NULL)) {
1921 				return;
1922 			}
1923 			tmr = &net->rxt_timer;
1924 			if (net->RTO == 0) {
1925 				rto_val = stcb->asoc.initial_rto;
1926 			} else {
1927 				rto_val = net->RTO;
1928 			}
1929 			to_ticks = MSEC_TO_TICKS(rto_val);
1930 		}
1931 		break;
1932 	case SCTP_TIMER_TYPE_INIT:
1933 		/*
1934 		 * Here we use the INIT timer default usually about 1
1935 		 * minute.
1936 		 */
1937 		if ((stcb == NULL) || (net == NULL)) {
1938 			return;
1939 		}
1940 		tmr = &net->rxt_timer;
1941 		if (net->RTO == 0) {
1942 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1943 		} else {
1944 			to_ticks = MSEC_TO_TICKS(net->RTO);
1945 		}
1946 		break;
1947 	case SCTP_TIMER_TYPE_RECV:
1948 		/*
1949 		 * Here we use the Delayed-Ack timer value from the inp
1950 		 * ususually about 200ms.
1951 		 */
1952 		if (stcb == NULL) {
1953 			return;
1954 		}
1955 		tmr = &stcb->asoc.dack_timer;
1956 		to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
1957 		break;
1958 	case SCTP_TIMER_TYPE_SHUTDOWN:
1959 		/* Here we use the RTO of the destination. */
1960 		if ((stcb == NULL) || (net == NULL)) {
1961 			return;
1962 		}
1963 		if (net->RTO == 0) {
1964 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1965 		} else {
1966 			to_ticks = MSEC_TO_TICKS(net->RTO);
1967 		}
1968 		tmr = &net->rxt_timer;
1969 		break;
1970 	case SCTP_TIMER_TYPE_HEARTBEAT:
1971 		/*
1972 		 * the net is used here so that we can add in the RTO. Even
1973 		 * though we use a different timer. We also add the HB timer
1974 		 * PLUS a random jitter.
1975 		 */
1976 		if ((inp == NULL) || (stcb == NULL)) {
1977 			return;
1978 		} else {
1979 			uint32_t rndval;
1980 			uint8_t this_random;
1981 			int cnt_of_unconf = 0;
1982 			struct sctp_nets *lnet;
1983 
1984 			TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
1985 				if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1986 				    (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
1987 					cnt_of_unconf++;
1988 				}
1989 			}
1990 			if (cnt_of_unconf) {
1991 				net = lnet = NULL;
1992 				(void)sctp_heartbeat_timer(inp, stcb, lnet, cnt_of_unconf);
1993 			}
1994 			if (stcb->asoc.hb_random_idx > 3) {
1995 				rndval = sctp_select_initial_TSN(&inp->sctp_ep);
1996 				memcpy(stcb->asoc.hb_random_values, &rndval,
1997 				    sizeof(stcb->asoc.hb_random_values));
1998 				stcb->asoc.hb_random_idx = 0;
1999 			}
2000 			this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx];
2001 			stcb->asoc.hb_random_idx++;
2002 			stcb->asoc.hb_ect_randombit = 0;
2003 			/*
2004 			 * this_random will be 0 - 256 ms RTO is in ms.
2005 			 */
2006 			if ((stcb->asoc.hb_is_disabled) &&
2007 			    (cnt_of_unconf == 0)) {
2008 				return;
2009 			}
2010 			if (net) {
2011 				int delay;
2012 
2013 				delay = stcb->asoc.heart_beat_delay;
2014 				TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
2015 					if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2016 					    ((lnet->dest_state & SCTP_ADDR_OUT_OF_SCOPE) == 0) &&
2017 					    (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
2018 						delay = 0;
2019 					}
2020 				}
2021 				if (net->RTO == 0) {
2022 					/* Never been checked */
2023 					to_ticks = this_random + stcb->asoc.initial_rto + delay;
2024 				} else {
2025 					/* set rto_val to the ms */
2026 					to_ticks = delay + net->RTO + this_random;
2027 				}
2028 			} else {
2029 				if (cnt_of_unconf) {
2030 					to_ticks = this_random + stcb->asoc.initial_rto;
2031 				} else {
2032 					to_ticks = stcb->asoc.heart_beat_delay + this_random + stcb->asoc.initial_rto;
2033 				}
2034 			}
2035 			/*
2036 			 * Now we must convert the to_ticks that are now in
2037 			 * ms to ticks.
2038 			 */
2039 			to_ticks = MSEC_TO_TICKS(to_ticks);
2040 			tmr = &stcb->asoc.hb_timer;
2041 		}
2042 		break;
2043 	case SCTP_TIMER_TYPE_COOKIE:
2044 		/*
2045 		 * Here we can use the RTO timer from the network since one
2046 		 * RTT was compelete. If a retran happened then we will be
2047 		 * using the RTO initial value.
2048 		 */
2049 		if ((stcb == NULL) || (net == NULL)) {
2050 			return;
2051 		}
2052 		if (net->RTO == 0) {
2053 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2054 		} else {
2055 			to_ticks = MSEC_TO_TICKS(net->RTO);
2056 		}
2057 		tmr = &net->rxt_timer;
2058 		break;
2059 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2060 		/*
2061 		 * nothing needed but the endpoint here ususually about 60
2062 		 * minutes.
2063 		 */
2064 		if (inp == NULL) {
2065 			return;
2066 		}
2067 		tmr = &inp->sctp_ep.signature_change;
2068 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
2069 		break;
2070 	case SCTP_TIMER_TYPE_ASOCKILL:
2071 		if (stcb == NULL) {
2072 			return;
2073 		}
2074 		tmr = &stcb->asoc.strreset_timer;
2075 		to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
2076 		break;
2077 	case SCTP_TIMER_TYPE_INPKILL:
2078 		/*
2079 		 * The inp is setup to die. We re-use the signature_chage
2080 		 * timer since that has stopped and we are in the GONE
2081 		 * state.
2082 		 */
2083 		if (inp == NULL) {
2084 			return;
2085 		}
2086 		tmr = &inp->sctp_ep.signature_change;
2087 		to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2088 		break;
2089 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2090 		/*
2091 		 * Here we use the value found in the EP for PMTU ususually
2092 		 * about 10 minutes.
2093 		 */
2094 		if ((stcb == NULL) || (inp == NULL)) {
2095 			return;
2096 		}
2097 		if (net == NULL) {
2098 			return;
2099 		}
2100 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2101 		tmr = &net->pmtu_timer;
2102 		break;
2103 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2104 		/* Here we use the RTO of the destination */
2105 		if ((stcb == NULL) || (net == NULL)) {
2106 			return;
2107 		}
2108 		if (net->RTO == 0) {
2109 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2110 		} else {
2111 			to_ticks = MSEC_TO_TICKS(net->RTO);
2112 		}
2113 		tmr = &net->rxt_timer;
2114 		break;
2115 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2116 		/*
2117 		 * Here we use the endpoints shutdown guard timer usually
2118 		 * about 3 minutes.
2119 		 */
2120 		if ((inp == NULL) || (stcb == NULL)) {
2121 			return;
2122 		}
2123 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2124 		tmr = &stcb->asoc.shut_guard_timer;
2125 		break;
2126 	case SCTP_TIMER_TYPE_STRRESET:
2127 		/*
2128 		 * Here the timer comes from the stcb but its value is from
2129 		 * the net's RTO.
2130 		 */
2131 		if ((stcb == NULL) || (net == NULL)) {
2132 			return;
2133 		}
2134 		if (net->RTO == 0) {
2135 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2136 		} else {
2137 			to_ticks = MSEC_TO_TICKS(net->RTO);
2138 		}
2139 		tmr = &stcb->asoc.strreset_timer;
2140 		break;
2141 
2142 	case SCTP_TIMER_TYPE_EARLYFR:
2143 		{
2144 			unsigned int msec;
2145 
2146 			if ((stcb == NULL) || (net == NULL)) {
2147 				return;
2148 			}
2149 			if (net->flight_size > net->cwnd) {
2150 				/* no need to start */
2151 				return;
2152 			}
2153 			SCTP_STAT_INCR(sctps_earlyfrstart);
2154 			if (net->lastsa == 0) {
2155 				/* Hmm no rtt estimate yet? */
2156 				msec = stcb->asoc.initial_rto >> 2;
2157 			} else {
2158 				msec = ((net->lastsa >> 2) + net->lastsv) >> 1;
2159 			}
2160 			if (msec < SCTP_BASE_SYSCTL(sctp_early_fr_msec)) {
2161 				msec = SCTP_BASE_SYSCTL(sctp_early_fr_msec);
2162 				if (msec < SCTP_MINFR_MSEC_FLOOR) {
2163 					msec = SCTP_MINFR_MSEC_FLOOR;
2164 				}
2165 			}
2166 			to_ticks = MSEC_TO_TICKS(msec);
2167 			tmr = &net->fr_timer;
2168 		}
2169 		break;
2170 	case SCTP_TIMER_TYPE_ASCONF:
2171 		/*
2172 		 * Here the timer comes from the stcb but its value is from
2173 		 * the net's RTO.
2174 		 */
2175 		if ((stcb == NULL) || (net == NULL)) {
2176 			return;
2177 		}
2178 		if (net->RTO == 0) {
2179 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2180 		} else {
2181 			to_ticks = MSEC_TO_TICKS(net->RTO);
2182 		}
2183 		tmr = &stcb->asoc.asconf_timer;
2184 		break;
2185 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2186 		if ((stcb == NULL) || (net != NULL)) {
2187 			return;
2188 		}
2189 		to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2190 		tmr = &stcb->asoc.delete_prim_timer;
2191 		break;
2192 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2193 		if (stcb == NULL) {
2194 			return;
2195 		}
2196 		if (stcb->asoc.sctp_autoclose_ticks == 0) {
2197 			/*
2198 			 * Really an error since stcb is NOT set to
2199 			 * autoclose
2200 			 */
2201 			return;
2202 		}
2203 		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2204 		tmr = &stcb->asoc.autoclose_timer;
2205 		break;
2206 	default:
2207 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2208 		    __FUNCTION__, t_type);
2209 		return;
2210 		break;
2211 	};
2212 	if ((to_ticks <= 0) || (tmr == NULL)) {
2213 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2214 		    __FUNCTION__, t_type, to_ticks, tmr);
2215 		return;
2216 	}
2217 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2218 		/*
2219 		 * we do NOT allow you to have it already running. if it is
2220 		 * we leave the current one up unchanged
2221 		 */
2222 		return;
2223 	}
2224 	/* At this point we can proceed */
2225 	if (t_type == SCTP_TIMER_TYPE_SEND) {
2226 		stcb->asoc.num_send_timers_up++;
2227 	}
2228 	tmr->stopped_from = 0;
2229 	tmr->type = t_type;
2230 	tmr->ep = (void *)inp;
2231 	tmr->tcb = (void *)stcb;
2232 	tmr->net = (void *)net;
2233 	tmr->self = (void *)tmr;
2234 	tmr->ticks = sctp_get_tick_count();
2235 	(void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2236 	return;
2237 }
2238 
2239 void
2240 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2241     struct sctp_nets *net, uint32_t from)
2242 {
2243 	struct sctp_timer *tmr;
2244 
2245 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2246 	    (inp == NULL))
2247 		return;
2248 
2249 	tmr = NULL;
2250 	if (stcb) {
2251 		SCTP_TCB_LOCK_ASSERT(stcb);
2252 	}
2253 	switch (t_type) {
2254 	case SCTP_TIMER_TYPE_ZERO_COPY:
2255 		tmr = &inp->sctp_ep.zero_copy_timer;
2256 		break;
2257 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
2258 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
2259 		break;
2260 	case SCTP_TIMER_TYPE_ADDR_WQ:
2261 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2262 		break;
2263 	case SCTP_TIMER_TYPE_EARLYFR:
2264 		if ((stcb == NULL) || (net == NULL)) {
2265 			return;
2266 		}
2267 		tmr = &net->fr_timer;
2268 		SCTP_STAT_INCR(sctps_earlyfrstop);
2269 		break;
2270 	case SCTP_TIMER_TYPE_ITERATOR:
2271 		{
2272 			struct sctp_iterator *it;
2273 
2274 			it = (struct sctp_iterator *)inp;
2275 			tmr = &it->tmr;
2276 		}
2277 		break;
2278 	case SCTP_TIMER_TYPE_SEND:
2279 		if ((stcb == NULL) || (net == NULL)) {
2280 			return;
2281 		}
2282 		tmr = &net->rxt_timer;
2283 		break;
2284 	case SCTP_TIMER_TYPE_INIT:
2285 		if ((stcb == NULL) || (net == NULL)) {
2286 			return;
2287 		}
2288 		tmr = &net->rxt_timer;
2289 		break;
2290 	case SCTP_TIMER_TYPE_RECV:
2291 		if (stcb == NULL) {
2292 			return;
2293 		}
2294 		tmr = &stcb->asoc.dack_timer;
2295 		break;
2296 	case SCTP_TIMER_TYPE_SHUTDOWN:
2297 		if ((stcb == NULL) || (net == NULL)) {
2298 			return;
2299 		}
2300 		tmr = &net->rxt_timer;
2301 		break;
2302 	case SCTP_TIMER_TYPE_HEARTBEAT:
2303 		if (stcb == NULL) {
2304 			return;
2305 		}
2306 		tmr = &stcb->asoc.hb_timer;
2307 		break;
2308 	case SCTP_TIMER_TYPE_COOKIE:
2309 		if ((stcb == NULL) || (net == NULL)) {
2310 			return;
2311 		}
2312 		tmr = &net->rxt_timer;
2313 		break;
2314 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2315 		/* nothing needed but the endpoint here */
2316 		tmr = &inp->sctp_ep.signature_change;
2317 		/*
2318 		 * We re-use the newcookie timer for the INP kill timer. We
2319 		 * must assure that we do not kill it by accident.
2320 		 */
2321 		break;
2322 	case SCTP_TIMER_TYPE_ASOCKILL:
2323 		/*
2324 		 * Stop the asoc kill timer.
2325 		 */
2326 		if (stcb == NULL) {
2327 			return;
2328 		}
2329 		tmr = &stcb->asoc.strreset_timer;
2330 		break;
2331 
2332 	case SCTP_TIMER_TYPE_INPKILL:
2333 		/*
2334 		 * The inp is setup to die. We re-use the signature_chage
2335 		 * timer since that has stopped and we are in the GONE
2336 		 * state.
2337 		 */
2338 		tmr = &inp->sctp_ep.signature_change;
2339 		break;
2340 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2341 		if ((stcb == NULL) || (net == NULL)) {
2342 			return;
2343 		}
2344 		tmr = &net->pmtu_timer;
2345 		break;
2346 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2347 		if ((stcb == NULL) || (net == NULL)) {
2348 			return;
2349 		}
2350 		tmr = &net->rxt_timer;
2351 		break;
2352 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2353 		if (stcb == NULL) {
2354 			return;
2355 		}
2356 		tmr = &stcb->asoc.shut_guard_timer;
2357 		break;
2358 	case SCTP_TIMER_TYPE_STRRESET:
2359 		if (stcb == NULL) {
2360 			return;
2361 		}
2362 		tmr = &stcb->asoc.strreset_timer;
2363 		break;
2364 	case SCTP_TIMER_TYPE_ASCONF:
2365 		if (stcb == NULL) {
2366 			return;
2367 		}
2368 		tmr = &stcb->asoc.asconf_timer;
2369 		break;
2370 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2371 		if (stcb == NULL) {
2372 			return;
2373 		}
2374 		tmr = &stcb->asoc.delete_prim_timer;
2375 		break;
2376 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2377 		if (stcb == NULL) {
2378 			return;
2379 		}
2380 		tmr = &stcb->asoc.autoclose_timer;
2381 		break;
2382 	default:
2383 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2384 		    __FUNCTION__, t_type);
2385 		break;
2386 	};
2387 	if (tmr == NULL) {
2388 		return;
2389 	}
2390 	if ((tmr->type != t_type) && tmr->type) {
2391 		/*
2392 		 * Ok we have a timer that is under joint use. Cookie timer
2393 		 * per chance with the SEND timer. We therefore are NOT
2394 		 * running the timer that the caller wants stopped.  So just
2395 		 * return.
2396 		 */
2397 		return;
2398 	}
2399 	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2400 		stcb->asoc.num_send_timers_up--;
2401 		if (stcb->asoc.num_send_timers_up < 0) {
2402 			stcb->asoc.num_send_timers_up = 0;
2403 		}
2404 	}
2405 	tmr->self = NULL;
2406 	tmr->stopped_from = from;
2407 	(void)SCTP_OS_TIMER_STOP(&tmr->timer);
2408 	return;
2409 }
2410 
2411 #ifdef SCTP_USE_ADLER32
2412 static uint32_t
2413 update_adler32(uint32_t adler, uint8_t * buf, int32_t len)
2414 {
2415 	uint32_t s1 = adler & 0xffff;
2416 	uint32_t s2 = (adler >> 16) & 0xffff;
2417 	int n;
2418 
2419 	for (n = 0; n < len; n++, buf++) {
2420 		/* s1 = (s1 + buf[n]) % BASE */
2421 		/* first we add */
2422 		s1 = (s1 + *buf);
2423 		/*
2424 		 * now if we need to, we do a mod by subtracting. It seems a
2425 		 * bit faster since I really will only ever do one subtract
2426 		 * at the MOST, since buf[n] is a max of 255.
2427 		 */
2428 		if (s1 >= SCTP_ADLER32_BASE) {
2429 			s1 -= SCTP_ADLER32_BASE;
2430 		}
2431 		/* s2 = (s2 + s1) % BASE */
2432 		/* first we add */
2433 		s2 = (s2 + s1);
2434 		/*
2435 		 * again, it is more efficent (it seems) to subtract since
2436 		 * the most s2 will ever be is (BASE-1 + BASE-1) in the
2437 		 * worse case. This would then be (2 * BASE) - 2, which will
2438 		 * still only do one subtract. On Intel this is much better
2439 		 * to do this way and avoid the divide. Have not -pg'd on
2440 		 * sparc.
2441 		 */
2442 		if (s2 >= SCTP_ADLER32_BASE) {
2443 			s2 -= SCTP_ADLER32_BASE;
2444 		}
2445 	}
2446 	/* Return the adler32 of the bytes buf[0..len-1] */
2447 	return ((s2 << 16) + s1);
2448 }
2449 
2450 #endif
2451 
2452 
2453 uint32_t
2454 sctp_calculate_len(struct mbuf *m)
2455 {
2456 	uint32_t tlen = 0;
2457 	struct mbuf *at;
2458 
2459 	at = m;
2460 	while (at) {
2461 		tlen += SCTP_BUF_LEN(at);
2462 		at = SCTP_BUF_NEXT(at);
2463 	}
2464 	return (tlen);
2465 }
2466 
2467 #if defined(SCTP_WITH_NO_CSUM)
2468 
2469 uint32_t
2470 sctp_calculate_sum(struct mbuf *m, int32_t * pktlen, uint32_t offset)
2471 {
2472 	/*
2473 	 * given a mbuf chain with a packetheader offset by 'offset'
2474 	 * pointing at a sctphdr (with csum set to 0) go through the chain
2475 	 * of SCTP_BUF_NEXT()'s and calculate the SCTP checksum. This also
2476 	 * has a side bonus as it will calculate the total length of the
2477 	 * mbuf chain. Note: if offset is greater than the total mbuf
2478 	 * length, checksum=1, pktlen=0 is returned (ie. no real error code)
2479 	 */
2480 	if (pktlen == NULL)
2481 		return (0);
2482 	*pktlen = sctp_calculate_len(m);
2483 	return (0);
2484 }
2485 
2486 #elif defined(SCTP_USE_INCHKSUM)
2487 
2488 #include <machine/in_cksum.h>
2489 
2490 uint32_t
2491 sctp_calculate_sum(struct mbuf *m, int32_t * pktlen, uint32_t offset)
2492 {
2493 	/*
2494 	 * given a mbuf chain with a packetheader offset by 'offset'
2495 	 * pointing at a sctphdr (with csum set to 0) go through the chain
2496 	 * of SCTP_BUF_NEXT()'s and calculate the SCTP checksum. This also
2497 	 * has a side bonus as it will calculate the total length of the
2498 	 * mbuf chain. Note: if offset is greater than the total mbuf
2499 	 * length, checksum=1, pktlen=0 is returned (ie. no real error code)
2500 	 */
2501 	int32_t tlen = 0;
2502 	struct mbuf *at;
2503 	uint32_t the_sum, retsum;
2504 
2505 	at = m;
2506 	while (at) {
2507 		tlen += SCTP_BUF_LEN(at);
2508 		at = SCTP_BUF_NEXT(at);
2509 	}
2510 	the_sum = (uint32_t) (in_cksum_skip(m, tlen, offset));
2511 	if (pktlen != NULL)
2512 		*pktlen = (tlen - offset);
2513 	retsum = htons(the_sum);
2514 	return (the_sum);
2515 }
2516 
2517 #else
2518 
2519 uint32_t
2520 sctp_calculate_sum(struct mbuf *m, int32_t * pktlen, uint32_t offset)
2521 {
2522 	/*
2523 	 * given a mbuf chain with a packetheader offset by 'offset'
2524 	 * pointing at a sctphdr (with csum set to 0) go through the chain
2525 	 * of SCTP_BUF_NEXT()'s and calculate the SCTP checksum. This also
2526 	 * has a side bonus as it will calculate the total length of the
2527 	 * mbuf chain. Note: if offset is greater than the total mbuf
2528 	 * length, checksum=1, pktlen=0 is returned (ie. no real error code)
2529 	 */
2530 	int32_t tlen = 0;
2531 
2532 #ifdef SCTP_USE_ADLER32
2533 	uint32_t base = 1L;
2534 
2535 #else
2536 	uint32_t base = 0xffffffff;
2537 
2538 #endif
2539 	struct mbuf *at;
2540 
2541 	at = m;
2542 	/* find the correct mbuf and offset into mbuf */
2543 	while ((at != NULL) && (offset > (uint32_t) SCTP_BUF_LEN(at))) {
2544 		offset -= SCTP_BUF_LEN(at);	/* update remaining offset
2545 						 * left */
2546 		at = SCTP_BUF_NEXT(at);
2547 	}
2548 	while (at != NULL) {
2549 		if ((SCTP_BUF_LEN(at) - offset) > 0) {
2550 #ifdef SCTP_USE_ADLER32
2551 			base = update_adler32(base,
2552 			    (unsigned char *)(SCTP_BUF_AT(at, offset)),
2553 			    (unsigned int)(SCTP_BUF_LEN(at) - offset));
2554 #else
2555 			if ((SCTP_BUF_LEN(at) - offset) < 4) {
2556 				/* Use old method if less than 4 bytes */
2557 				base = old_update_crc32(base,
2558 				    (unsigned char *)(SCTP_BUF_AT(at, offset)),
2559 				    (unsigned int)(SCTP_BUF_LEN(at) - offset));
2560 			} else {
2561 				base = update_crc32(base,
2562 				    (unsigned char *)(SCTP_BUF_AT(at, offset)),
2563 				    (unsigned int)(SCTP_BUF_LEN(at) - offset));
2564 			}
2565 #endif
2566 			tlen += SCTP_BUF_LEN(at) - offset;
2567 			/* we only offset once into the first mbuf */
2568 		}
2569 		if (offset) {
2570 			if (offset < (uint32_t) SCTP_BUF_LEN(at))
2571 				offset = 0;
2572 			else
2573 				offset -= SCTP_BUF_LEN(at);
2574 		}
2575 		at = SCTP_BUF_NEXT(at);
2576 	}
2577 	if (pktlen != NULL) {
2578 		*pktlen = tlen;
2579 	}
2580 #ifdef SCTP_USE_ADLER32
2581 	/* Adler32 */
2582 	base = htonl(base);
2583 #else
2584 	/* CRC-32c */
2585 	base = sctp_csum_finalize(base);
2586 #endif
2587 	return (base);
2588 }
2589 
2590 
2591 #endif
2592 
2593 void
2594 sctp_mtu_size_reset(struct sctp_inpcb *inp,
2595     struct sctp_association *asoc, uint32_t mtu)
2596 {
2597 	/*
2598 	 * Reset the P-MTU size on this association, this involves changing
2599 	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2600 	 * allow the DF flag to be cleared.
2601 	 */
2602 	struct sctp_tmit_chunk *chk;
2603 	unsigned int eff_mtu, ovh;
2604 
2605 #ifdef SCTP_PRINT_FOR_B_AND_M
2606 	SCTP_PRINTF("sctp_mtu_size_reset(%p, asoc:%p mtu:%d\n",
2607 	    inp, asoc, mtu);
2608 #endif
2609 	asoc->smallest_mtu = mtu;
2610 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2611 		ovh = SCTP_MIN_OVERHEAD;
2612 	} else {
2613 		ovh = SCTP_MIN_V4_OVERHEAD;
2614 	}
2615 	eff_mtu = mtu - ovh;
2616 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2617 
2618 		if (chk->send_size > eff_mtu) {
2619 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2620 		}
2621 	}
2622 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2623 		if (chk->send_size > eff_mtu) {
2624 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2625 		}
2626 	}
2627 }
2628 
2629 
2630 /*
2631  * given an association and starting time of the current RTT period return
2632  * RTO in number of msecs net should point to the current network
2633  */
2634 uint32_t
2635 sctp_calculate_rto(struct sctp_tcb *stcb,
2636     struct sctp_association *asoc,
2637     struct sctp_nets *net,
2638     struct timeval *told,
2639     int safe)
2640 {
2641 	/*-
2642 	 * given an association and the starting time of the current RTT
2643 	 * period (in value1/value2) return RTO in number of msecs.
2644 	 */
2645 	int calc_time = 0;
2646 	int o_calctime;
2647 	uint32_t new_rto = 0;
2648 	int first_measure = 0;
2649 	struct timeval now, then, *old;
2650 
2651 	/* Copy it out for sparc64 */
2652 	if (safe == sctp_align_unsafe_makecopy) {
2653 		old = &then;
2654 		memcpy(&then, told, sizeof(struct timeval));
2655 	} else if (safe == sctp_align_safe_nocopy) {
2656 		old = told;
2657 	} else {
2658 		/* error */
2659 		SCTP_PRINTF("Huh, bad rto calc call\n");
2660 		return (0);
2661 	}
2662 	/************************/
2663 	/* 1. calculate new RTT */
2664 	/************************/
2665 	/* get the current time */
2666 	(void)SCTP_GETTIME_TIMEVAL(&now);
2667 	/* compute the RTT value */
2668 	if ((u_long)now.tv_sec > (u_long)old->tv_sec) {
2669 		calc_time = ((u_long)now.tv_sec - (u_long)old->tv_sec) * 1000;
2670 		if ((u_long)now.tv_usec > (u_long)old->tv_usec) {
2671 			calc_time += (((u_long)now.tv_usec -
2672 			    (u_long)old->tv_usec) / 1000);
2673 		} else if ((u_long)now.tv_usec < (u_long)old->tv_usec) {
2674 			/* Borrow 1,000ms from current calculation */
2675 			calc_time -= 1000;
2676 			/* Add in the slop over */
2677 			calc_time += ((int)now.tv_usec / 1000);
2678 			/* Add in the pre-second ms's */
2679 			calc_time += (((int)1000000 - (int)old->tv_usec) / 1000);
2680 		}
2681 	} else if ((u_long)now.tv_sec == (u_long)old->tv_sec) {
2682 		if ((u_long)now.tv_usec > (u_long)old->tv_usec) {
2683 			calc_time = ((u_long)now.tv_usec -
2684 			    (u_long)old->tv_usec) / 1000;
2685 		} else if ((u_long)now.tv_usec < (u_long)old->tv_usec) {
2686 			/* impossible .. garbage in nothing out */
2687 			goto calc_rto;
2688 		} else if ((u_long)now.tv_usec == (u_long)old->tv_usec) {
2689 			/*
2690 			 * We have to have 1 usec :-D this must be the
2691 			 * loopback.
2692 			 */
2693 			calc_time = 1;
2694 		} else {
2695 			/* impossible .. garbage in nothing out */
2696 			goto calc_rto;
2697 		}
2698 	} else {
2699 		/* Clock wrapped? */
2700 		goto calc_rto;
2701 	}
2702 	/***************************/
2703 	/* 2. update RTTVAR & SRTT */
2704 	/***************************/
2705 	o_calctime = calc_time;
2706 	/* this is Van Jacobson's integer version */
2707 	if (net->RTO_measured) {
2708 		calc_time -= (net->lastsa >> SCTP_RTT_SHIFT);	/* take away 1/8th when
2709 								 * shift=3 */
2710 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2711 			rto_logging(net, SCTP_LOG_RTTVAR);
2712 		}
2713 		net->prev_rtt = o_calctime;
2714 		net->lastsa += calc_time;	/* add 7/8th into sa when
2715 						 * shift=3 */
2716 		if (calc_time < 0) {
2717 			calc_time = -calc_time;
2718 		}
2719 		calc_time -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);	/* take away 1/4 when
2720 									 * VAR shift=2 */
2721 		net->lastsv += calc_time;
2722 		if (net->lastsv == 0) {
2723 			net->lastsv = SCTP_CLOCK_GRANULARITY;
2724 		}
2725 	} else {
2726 		/* First RTO measurment */
2727 		net->RTO_measured = 1;
2728 		net->lastsa = calc_time << SCTP_RTT_SHIFT;	/* Multiply by 8 when
2729 								 * shift=3 */
2730 		net->lastsv = calc_time;
2731 		if (net->lastsv == 0) {
2732 			net->lastsv = SCTP_CLOCK_GRANULARITY;
2733 		}
2734 		first_measure = 1;
2735 		net->prev_rtt = o_calctime;
2736 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2737 			rto_logging(net, SCTP_LOG_INITIAL_RTT);
2738 		}
2739 	}
2740 calc_rto:
2741 	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2742 	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2743 	    (stcb->asoc.sat_network_lockout == 0)) {
2744 		stcb->asoc.sat_network = 1;
2745 	} else if ((!first_measure) && stcb->asoc.sat_network) {
2746 		stcb->asoc.sat_network = 0;
2747 		stcb->asoc.sat_network_lockout = 1;
2748 	}
2749 	/* bound it, per C6/C7 in Section 5.3.1 */
2750 	if (new_rto < stcb->asoc.minrto) {
2751 		new_rto = stcb->asoc.minrto;
2752 	}
2753 	if (new_rto > stcb->asoc.maxrto) {
2754 		new_rto = stcb->asoc.maxrto;
2755 	}
2756 	/* we are now returning the RTO */
2757 	return (new_rto);
2758 }
2759 
2760 /*
2761  * return a pointer to a contiguous piece of data from the given mbuf chain
2762  * starting at 'off' for 'len' bytes.  If the desired piece spans more than
2763  * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2764  * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2765  */
2766 caddr_t
2767 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr)
2768 {
2769 	uint32_t count;
2770 	uint8_t *ptr;
2771 
2772 	ptr = in_ptr;
2773 	if ((off < 0) || (len <= 0))
2774 		return (NULL);
2775 
2776 	/* find the desired start location */
2777 	while ((m != NULL) && (off > 0)) {
2778 		if (off < SCTP_BUF_LEN(m))
2779 			break;
2780 		off -= SCTP_BUF_LEN(m);
2781 		m = SCTP_BUF_NEXT(m);
2782 	}
2783 	if (m == NULL)
2784 		return (NULL);
2785 
2786 	/* is the current mbuf large enough (eg. contiguous)? */
2787 	if ((SCTP_BUF_LEN(m) - off) >= len) {
2788 		return (mtod(m, caddr_t)+off);
2789 	} else {
2790 		/* else, it spans more than one mbuf, so save a temp copy... */
2791 		while ((m != NULL) && (len > 0)) {
2792 			count = min(SCTP_BUF_LEN(m) - off, len);
2793 			bcopy(mtod(m, caddr_t)+off, ptr, count);
2794 			len -= count;
2795 			ptr += count;
2796 			off = 0;
2797 			m = SCTP_BUF_NEXT(m);
2798 		}
2799 		if ((m == NULL) && (len > 0))
2800 			return (NULL);
2801 		else
2802 			return ((caddr_t)in_ptr);
2803 	}
2804 }
2805 
2806 
2807 
2808 struct sctp_paramhdr *
2809 sctp_get_next_param(struct mbuf *m,
2810     int offset,
2811     struct sctp_paramhdr *pull,
2812     int pull_limit)
2813 {
2814 	/* This just provides a typed signature to Peter's Pull routine */
2815 	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2816 	    (uint8_t *) pull));
2817 }
2818 
2819 
2820 int
2821 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2822 {
2823 	/*
2824 	 * add padlen bytes of 0 filled padding to the end of the mbuf. If
2825 	 * padlen is > 3 this routine will fail.
2826 	 */
2827 	uint8_t *dp;
2828 	int i;
2829 
2830 	if (padlen > 3) {
2831 		SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
2832 		return (ENOBUFS);
2833 	}
2834 	if (padlen <= M_TRAILINGSPACE(m)) {
2835 		/*
2836 		 * The easy way. We hope the majority of the time we hit
2837 		 * here :)
2838 		 */
2839 		dp = (uint8_t *) (mtod(m, caddr_t)+SCTP_BUF_LEN(m));
2840 		SCTP_BUF_LEN(m) += padlen;
2841 	} else {
2842 		/* Hard way we must grow the mbuf */
2843 		struct mbuf *tmp;
2844 
2845 		tmp = sctp_get_mbuf_for_msg(padlen, 0, M_DONTWAIT, 1, MT_DATA);
2846 		if (tmp == NULL) {
2847 			/* Out of space GAK! we are in big trouble. */
2848 			SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
2849 			return (ENOSPC);
2850 		}
2851 		/* setup and insert in middle */
2852 		SCTP_BUF_LEN(tmp) = padlen;
2853 		SCTP_BUF_NEXT(tmp) = NULL;
2854 		SCTP_BUF_NEXT(m) = tmp;
2855 		dp = mtod(tmp, uint8_t *);
2856 	}
2857 	/* zero out the pad */
2858 	for (i = 0; i < padlen; i++) {
2859 		*dp = 0;
2860 		dp++;
2861 	}
2862 	return (0);
2863 }
2864 
2865 int
2866 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2867 {
2868 	/* find the last mbuf in chain and pad it */
2869 	struct mbuf *m_at;
2870 
2871 	m_at = m;
2872 	if (last_mbuf) {
2873 		return (sctp_add_pad_tombuf(last_mbuf, padval));
2874 	} else {
2875 		while (m_at) {
2876 			if (SCTP_BUF_NEXT(m_at) == NULL) {
2877 				return (sctp_add_pad_tombuf(m_at, padval));
2878 			}
2879 			m_at = SCTP_BUF_NEXT(m_at);
2880 		}
2881 	}
2882 	SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
2883 	return (EFAULT);
2884 }
2885 
2886 int sctp_asoc_change_wake = 0;
2887 
2888 static void
2889 sctp_notify_assoc_change(uint32_t event, struct sctp_tcb *stcb,
2890     uint32_t error, void *data, int so_locked
2891 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2892     SCTP_UNUSED
2893 #endif
2894 )
2895 {
2896 	struct mbuf *m_notify;
2897 	struct sctp_assoc_change *sac;
2898 	struct sctp_queued_to_read *control;
2899 
2900 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2901 	struct socket *so;
2902 
2903 #endif
2904 
2905 	/*
2906 	 * First if we are are going down dump everything we can to the
2907 	 * socket rcv queue.
2908 	 */
2909 
2910 	if ((stcb == NULL) ||
2911 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
2912 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
2913 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)
2914 	    ) {
2915 		/* If the socket is gone we are out of here */
2916 		return;
2917 	}
2918 	/*
2919 	 * For TCP model AND UDP connected sockets we will send an error up
2920 	 * when an ABORT comes in.
2921 	 */
2922 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2923 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2924 	    ((event == SCTP_COMM_LOST) || (event == SCTP_CANT_STR_ASSOC))) {
2925 		if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
2926 			SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2927 			stcb->sctp_socket->so_error = ECONNREFUSED;
2928 		} else {
2929 			SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2930 			stcb->sctp_socket->so_error = ECONNRESET;
2931 		}
2932 		/* Wake ANY sleepers */
2933 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2934 		so = SCTP_INP_SO(stcb->sctp_ep);
2935 		if (!so_locked) {
2936 			atomic_add_int(&stcb->asoc.refcnt, 1);
2937 			SCTP_TCB_UNLOCK(stcb);
2938 			SCTP_SOCKET_LOCK(so, 1);
2939 			SCTP_TCB_LOCK(stcb);
2940 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
2941 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2942 				SCTP_SOCKET_UNLOCK(so, 1);
2943 				return;
2944 			}
2945 		}
2946 #endif
2947 		sorwakeup(stcb->sctp_socket);
2948 		sowwakeup(stcb->sctp_socket);
2949 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2950 		if (!so_locked) {
2951 			SCTP_SOCKET_UNLOCK(so, 1);
2952 		}
2953 #endif
2954 		sctp_asoc_change_wake++;
2955 	}
2956 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2957 		/* event not enabled */
2958 		return;
2959 	}
2960 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_change), 0, M_DONTWAIT, 1, MT_DATA);
2961 	if (m_notify == NULL)
2962 		/* no space left */
2963 		return;
2964 	SCTP_BUF_LEN(m_notify) = 0;
2965 
2966 	sac = mtod(m_notify, struct sctp_assoc_change *);
2967 	sac->sac_type = SCTP_ASSOC_CHANGE;
2968 	sac->sac_flags = 0;
2969 	sac->sac_length = sizeof(struct sctp_assoc_change);
2970 	sac->sac_state = event;
2971 	sac->sac_error = error;
2972 	/* XXX verify these stream counts */
2973 	sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2974 	sac->sac_inbound_streams = stcb->asoc.streamincnt;
2975 	sac->sac_assoc_id = sctp_get_associd(stcb);
2976 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_change);
2977 	SCTP_BUF_NEXT(m_notify) = NULL;
2978 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2979 	    0, 0, 0, 0, 0, 0,
2980 	    m_notify);
2981 	if (control == NULL) {
2982 		/* no memory */
2983 		sctp_m_freem(m_notify);
2984 		return;
2985 	}
2986 	control->length = SCTP_BUF_LEN(m_notify);
2987 	/* not that we need this */
2988 	control->tail_mbuf = m_notify;
2989 	control->spec_flags = M_NOTIFICATION;
2990 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2991 	    control,
2992 	    &stcb->sctp_socket->so_rcv, 1, so_locked);
2993 	if (event == SCTP_COMM_LOST) {
2994 		/* Wake up any sleeper */
2995 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2996 		so = SCTP_INP_SO(stcb->sctp_ep);
2997 		if (!so_locked) {
2998 			atomic_add_int(&stcb->asoc.refcnt, 1);
2999 			SCTP_TCB_UNLOCK(stcb);
3000 			SCTP_SOCKET_LOCK(so, 1);
3001 			SCTP_TCB_LOCK(stcb);
3002 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
3003 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3004 				SCTP_SOCKET_UNLOCK(so, 1);
3005 				return;
3006 			}
3007 		}
3008 #endif
3009 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
3010 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3011 		if (!so_locked) {
3012 			SCTP_SOCKET_UNLOCK(so, 1);
3013 		}
3014 #endif
3015 	}
3016 }
3017 
3018 static void
3019 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
3020     struct sockaddr *sa, uint32_t error)
3021 {
3022 	struct mbuf *m_notify;
3023 	struct sctp_paddr_change *spc;
3024 	struct sctp_queued_to_read *control;
3025 
3026 	if ((stcb == NULL) || (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVPADDREVNT)))
3027 		/* event not enabled */
3028 		return;
3029 
3030 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_DONTWAIT, 1, MT_DATA);
3031 	if (m_notify == NULL)
3032 		return;
3033 	SCTP_BUF_LEN(m_notify) = 0;
3034 	spc = mtod(m_notify, struct sctp_paddr_change *);
3035 	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
3036 	spc->spc_flags = 0;
3037 	spc->spc_length = sizeof(struct sctp_paddr_change);
3038 	switch (sa->sa_family) {
3039 	case AF_INET:
3040 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
3041 		break;
3042 #ifdef INET6
3043 	case AF_INET6:
3044 		{
3045 			struct sockaddr_in6 *sin6;
3046 
3047 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
3048 
3049 			sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
3050 			if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
3051 				if (sin6->sin6_scope_id == 0) {
3052 					/* recover scope_id for user */
3053 					(void)sa6_recoverscope(sin6);
3054 				} else {
3055 					/* clear embedded scope_id for user */
3056 					in6_clearscope(&sin6->sin6_addr);
3057 				}
3058 			}
3059 			break;
3060 		}
3061 #endif
3062 	default:
3063 		/* TSNH */
3064 		break;
3065 	}
3066 	spc->spc_state = state;
3067 	spc->spc_error = error;
3068 	spc->spc_assoc_id = sctp_get_associd(stcb);
3069 
3070 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
3071 	SCTP_BUF_NEXT(m_notify) = NULL;
3072 
3073 	/* append to socket */
3074 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3075 	    0, 0, 0, 0, 0, 0,
3076 	    m_notify);
3077 	if (control == NULL) {
3078 		/* no memory */
3079 		sctp_m_freem(m_notify);
3080 		return;
3081 	}
3082 	control->length = SCTP_BUF_LEN(m_notify);
3083 	control->spec_flags = M_NOTIFICATION;
3084 	/* not that we need this */
3085 	control->tail_mbuf = m_notify;
3086 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3087 	    control,
3088 	    &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
3089 }
3090 
3091 
3092 static void
3093 sctp_notify_send_failed(struct sctp_tcb *stcb, uint32_t error,
3094     struct sctp_tmit_chunk *chk, int so_locked
3095 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3096     SCTP_UNUSED
3097 #endif
3098 )
3099 {
3100 	struct mbuf *m_notify, *tt;
3101 	struct sctp_send_failed *ssf;
3102 	struct sctp_queued_to_read *control;
3103 	int length;
3104 
3105 	if ((stcb == NULL) || (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)))
3106 		/* event not enabled */
3107 		return;
3108 
3109 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_send_failed), 0, M_DONTWAIT, 1, MT_DATA);
3110 	if (m_notify == NULL)
3111 		/* no space left */
3112 		return;
3113 	length = sizeof(struct sctp_send_failed) + chk->send_size;
3114 	length -= sizeof(struct sctp_data_chunk);
3115 	SCTP_BUF_LEN(m_notify) = 0;
3116 	ssf = mtod(m_notify, struct sctp_send_failed *);
3117 	ssf->ssf_type = SCTP_SEND_FAILED;
3118 	if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
3119 		ssf->ssf_flags = SCTP_DATA_UNSENT;
3120 	else
3121 		ssf->ssf_flags = SCTP_DATA_SENT;
3122 	ssf->ssf_length = length;
3123 	ssf->ssf_error = error;
3124 	/* not exactly what the user sent in, but should be close :) */
3125 	bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
3126 	ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number;
3127 	ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq;
3128 	ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
3129 	ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype;
3130 	ssf->ssf_info.sinfo_context = chk->rec.data.context;
3131 	ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3132 	ssf->ssf_assoc_id = sctp_get_associd(stcb);
3133 
3134 	/* Take off the chunk header */
3135 	m_adj(chk->data, sizeof(struct sctp_data_chunk));
3136 
3137 	/* trim out any 0 len mbufs */
3138 	while (SCTP_BUF_LEN(chk->data) == 0) {
3139 		tt = chk->data;
3140 		chk->data = SCTP_BUF_NEXT(tt);
3141 		SCTP_BUF_NEXT(tt) = NULL;
3142 		sctp_m_freem(tt);
3143 	}
3144 
3145 	SCTP_BUF_NEXT(m_notify) = chk->data;
3146 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
3147 
3148 	/* Steal off the mbuf */
3149 	chk->data = NULL;
3150 	/*
3151 	 * For this case, we check the actual socket buffer, since the assoc
3152 	 * is going away we don't want to overfill the socket buffer for a
3153 	 * non-reader
3154 	 */
3155 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3156 		sctp_m_freem(m_notify);
3157 		return;
3158 	}
3159 	/* append to socket */
3160 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3161 	    0, 0, 0, 0, 0, 0,
3162 	    m_notify);
3163 	if (control == NULL) {
3164 		/* no memory */
3165 		sctp_m_freem(m_notify);
3166 		return;
3167 	}
3168 	control->spec_flags = M_NOTIFICATION;
3169 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3170 	    control,
3171 	    &stcb->sctp_socket->so_rcv, 1, so_locked);
3172 }
3173 
3174 
3175 static void
3176 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
3177     struct sctp_stream_queue_pending *sp, int so_locked
3178 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3179     SCTP_UNUSED
3180 #endif
3181 )
3182 {
3183 	struct mbuf *m_notify;
3184 	struct sctp_send_failed *ssf;
3185 	struct sctp_queued_to_read *control;
3186 	int length;
3187 
3188 	if ((stcb == NULL) || (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)))
3189 		/* event not enabled */
3190 		return;
3191 
3192 	length = sizeof(struct sctp_send_failed) + sp->length;
3193 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_send_failed), 0, M_DONTWAIT, 1, MT_DATA);
3194 	if (m_notify == NULL)
3195 		/* no space left */
3196 		return;
3197 	SCTP_BUF_LEN(m_notify) = 0;
3198 	ssf = mtod(m_notify, struct sctp_send_failed *);
3199 	ssf->ssf_type = SCTP_SEND_FAILED;
3200 	if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
3201 		ssf->ssf_flags = SCTP_DATA_UNSENT;
3202 	else
3203 		ssf->ssf_flags = SCTP_DATA_SENT;
3204 	ssf->ssf_length = length;
3205 	ssf->ssf_error = error;
3206 	/* not exactly what the user sent in, but should be close :) */
3207 	bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
3208 	ssf->ssf_info.sinfo_stream = sp->stream;
3209 	ssf->ssf_info.sinfo_ssn = sp->strseq;
3210 	if (sp->some_taken) {
3211 		ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
3212 	} else {
3213 		ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
3214 	}
3215 	ssf->ssf_info.sinfo_ppid = sp->ppid;
3216 	ssf->ssf_info.sinfo_context = sp->context;
3217 	ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3218 	ssf->ssf_assoc_id = sctp_get_associd(stcb);
3219 	SCTP_BUF_NEXT(m_notify) = sp->data;
3220 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
3221 
3222 	/* Steal off the mbuf */
3223 	sp->data = NULL;
3224 	/*
3225 	 * For this case, we check the actual socket buffer, since the assoc
3226 	 * is going away we don't want to overfill the socket buffer for a
3227 	 * non-reader
3228 	 */
3229 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3230 		sctp_m_freem(m_notify);
3231 		return;
3232 	}
3233 	/* append to socket */
3234 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3235 	    0, 0, 0, 0, 0, 0,
3236 	    m_notify);
3237 	if (control == NULL) {
3238 		/* no memory */
3239 		sctp_m_freem(m_notify);
3240 		return;
3241 	}
3242 	control->spec_flags = M_NOTIFICATION;
3243 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3244 	    control,
3245 	    &stcb->sctp_socket->so_rcv, 1, so_locked);
3246 }
3247 
3248 
3249 
3250 static void
3251 sctp_notify_adaptation_layer(struct sctp_tcb *stcb,
3252     uint32_t error)
3253 {
3254 	struct mbuf *m_notify;
3255 	struct sctp_adaptation_event *sai;
3256 	struct sctp_queued_to_read *control;
3257 
3258 	if ((stcb == NULL) || (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_ADAPTATIONEVNT)))
3259 		/* event not enabled */
3260 		return;
3261 
3262 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_DONTWAIT, 1, MT_DATA);
3263 	if (m_notify == NULL)
3264 		/* no space left */
3265 		return;
3266 	SCTP_BUF_LEN(m_notify) = 0;
3267 	sai = mtod(m_notify, struct sctp_adaptation_event *);
3268 	sai->sai_type = SCTP_ADAPTATION_INDICATION;
3269 	sai->sai_flags = 0;
3270 	sai->sai_length = sizeof(struct sctp_adaptation_event);
3271 	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3272 	sai->sai_assoc_id = sctp_get_associd(stcb);
3273 
3274 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3275 	SCTP_BUF_NEXT(m_notify) = NULL;
3276 
3277 	/* append to socket */
3278 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3279 	    0, 0, 0, 0, 0, 0,
3280 	    m_notify);
3281 	if (control == NULL) {
3282 		/* no memory */
3283 		sctp_m_freem(m_notify);
3284 		return;
3285 	}
3286 	control->length = SCTP_BUF_LEN(m_notify);
3287 	control->spec_flags = M_NOTIFICATION;
3288 	/* not that we need this */
3289 	control->tail_mbuf = m_notify;
3290 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3291 	    control,
3292 	    &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
3293 }
3294 
3295 /* This always must be called with the read-queue LOCKED in the INP */
3296 void
3297 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3298     int nolock, uint32_t val)
3299 {
3300 	struct mbuf *m_notify;
3301 	struct sctp_pdapi_event *pdapi;
3302 	struct sctp_queued_to_read *control;
3303 	struct sockbuf *sb;
3304 
3305 	if ((stcb == NULL) || (stcb->sctp_socket == NULL) ||
3306 	    sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_PDAPIEVNT))
3307 		/* event not enabled */
3308 		return;
3309 
3310 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_DONTWAIT, 1, MT_DATA);
3311 	if (m_notify == NULL)
3312 		/* no space left */
3313 		return;
3314 	SCTP_BUF_LEN(m_notify) = 0;
3315 	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3316 	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3317 	pdapi->pdapi_flags = 0;
3318 	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3319 	pdapi->pdapi_indication = error;
3320 	pdapi->pdapi_stream = (val >> 16);
3321 	pdapi->pdapi_seq = (val & 0x0000ffff);
3322 	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3323 
3324 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3325 	SCTP_BUF_NEXT(m_notify) = NULL;
3326 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3327 	    0, 0, 0, 0, 0, 0,
3328 	    m_notify);
3329 	if (control == NULL) {
3330 		/* no memory */
3331 		sctp_m_freem(m_notify);
3332 		return;
3333 	}
3334 	control->spec_flags = M_NOTIFICATION;
3335 	control->length = SCTP_BUF_LEN(m_notify);
3336 	/* not that we need this */
3337 	control->tail_mbuf = m_notify;
3338 	control->held_length = 0;
3339 	control->length = 0;
3340 	if (nolock == 0) {
3341 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
3342 	}
3343 	sb = &stcb->sctp_socket->so_rcv;
3344 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3345 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3346 	}
3347 	sctp_sballoc(stcb, sb, m_notify);
3348 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3349 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3350 	}
3351 	atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify));
3352 	control->end_added = 1;
3353 	if (stcb->asoc.control_pdapi)
3354 		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3355 	else {
3356 		/* we really should not see this case */
3357 		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3358 	}
3359 	if (nolock == 0) {
3360 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
3361 	}
3362 	if (stcb->sctp_ep && stcb->sctp_socket) {
3363 		/* This should always be the case */
3364 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3365 	}
3366 }
3367 
3368 static void
3369 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3370 {
3371 	struct mbuf *m_notify;
3372 	struct sctp_shutdown_event *sse;
3373 	struct sctp_queued_to_read *control;
3374 
3375 	/*
3376 	 * For TCP model AND UDP connected sockets we will send an error up
3377 	 * when an SHUTDOWN completes
3378 	 */
3379 	if (stcb == NULL) {
3380 		return;
3381 	}
3382 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3383 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3384 		/* mark socket closed for read/write and wakeup! */
3385 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3386 		struct socket *so;
3387 
3388 		so = SCTP_INP_SO(stcb->sctp_ep);
3389 		atomic_add_int(&stcb->asoc.refcnt, 1);
3390 		SCTP_TCB_UNLOCK(stcb);
3391 		SCTP_SOCKET_LOCK(so, 1);
3392 		SCTP_TCB_LOCK(stcb);
3393 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3394 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3395 			SCTP_SOCKET_UNLOCK(so, 1);
3396 			return;
3397 		}
3398 #endif
3399 		socantsendmore(stcb->sctp_socket);
3400 		socantrcvmore(stcb->sctp_socket);
3401 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3402 		SCTP_SOCKET_UNLOCK(so, 1);
3403 #endif
3404 	}
3405 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT))
3406 		/* event not enabled */
3407 		return;
3408 
3409 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_DONTWAIT, 1, MT_DATA);
3410 	if (m_notify == NULL)
3411 		/* no space left */
3412 		return;
3413 	sse = mtod(m_notify, struct sctp_shutdown_event *);
3414 	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3415 	sse->sse_flags = 0;
3416 	sse->sse_length = sizeof(struct sctp_shutdown_event);
3417 	sse->sse_assoc_id = sctp_get_associd(stcb);
3418 
3419 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3420 	SCTP_BUF_NEXT(m_notify) = NULL;
3421 
3422 	/* append to socket */
3423 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3424 	    0, 0, 0, 0, 0, 0,
3425 	    m_notify);
3426 	if (control == NULL) {
3427 		/* no memory */
3428 		sctp_m_freem(m_notify);
3429 		return;
3430 	}
3431 	control->spec_flags = M_NOTIFICATION;
3432 	control->length = SCTP_BUF_LEN(m_notify);
3433 	/* not that we need this */
3434 	control->tail_mbuf = m_notify;
3435 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3436 	    control,
3437 	    &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
3438 }
3439 
3440 static void
3441 sctp_notify_stream_reset(struct sctp_tcb *stcb,
3442     int number_entries, uint16_t * list, int flag)
3443 {
3444 	struct mbuf *m_notify;
3445 	struct sctp_queued_to_read *control;
3446 	struct sctp_stream_reset_event *strreset;
3447 	int len;
3448 
3449 	if (stcb == NULL) {
3450 		return;
3451 	}
3452 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_STREAM_RESETEVNT))
3453 		/* event not enabled */
3454 		return;
3455 
3456 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3457 	if (m_notify == NULL)
3458 		/* no space left */
3459 		return;
3460 	SCTP_BUF_LEN(m_notify) = 0;
3461 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3462 	if (len > M_TRAILINGSPACE(m_notify)) {
3463 		/* never enough room */
3464 		sctp_m_freem(m_notify);
3465 		return;
3466 	}
3467 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3468 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3469 	if (number_entries == 0) {
3470 		strreset->strreset_flags = flag | SCTP_STRRESET_ALL_STREAMS;
3471 	} else {
3472 		strreset->strreset_flags = flag | SCTP_STRRESET_STREAM_LIST;
3473 	}
3474 	strreset->strreset_length = len;
3475 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3476 	if (number_entries) {
3477 		int i;
3478 
3479 		for (i = 0; i < number_entries; i++) {
3480 			strreset->strreset_list[i] = ntohs(list[i]);
3481 		}
3482 	}
3483 	SCTP_BUF_LEN(m_notify) = len;
3484 	SCTP_BUF_NEXT(m_notify) = NULL;
3485 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3486 		/* no space */
3487 		sctp_m_freem(m_notify);
3488 		return;
3489 	}
3490 	/* append to socket */
3491 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3492 	    0, 0, 0, 0, 0, 0,
3493 	    m_notify);
3494 	if (control == NULL) {
3495 		/* no memory */
3496 		sctp_m_freem(m_notify);
3497 		return;
3498 	}
3499 	control->spec_flags = M_NOTIFICATION;
3500 	control->length = SCTP_BUF_LEN(m_notify);
3501 	/* not that we need this */
3502 	control->tail_mbuf = m_notify;
3503 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3504 	    control,
3505 	    &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
3506 }
3507 
3508 
3509 void
3510 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3511     uint32_t error, void *data, int so_locked
3512 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3513     SCTP_UNUSED
3514 #endif
3515 )
3516 {
3517 	if (stcb == NULL) {
3518 		/* unlikely but */
3519 		return;
3520 	}
3521 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3522 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3523 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)
3524 	    ) {
3525 		/* No notifications up when we are in a no socket state */
3526 		return;
3527 	}
3528 	if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3529 		/* Can't send up to a closed socket any notifications */
3530 		return;
3531 	}
3532 	if (stcb && ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
3533 	    (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED))) {
3534 		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3535 		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3536 		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3537 			/* Don't report these in front states */
3538 			return;
3539 		}
3540 	}
3541 	switch (notification) {
3542 	case SCTP_NOTIFY_ASSOC_UP:
3543 		if (stcb->asoc.assoc_up_sent == 0) {
3544 			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, so_locked);
3545 			stcb->asoc.assoc_up_sent = 1;
3546 		}
3547 		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3548 			sctp_notify_adaptation_layer(stcb, error);
3549 		}
3550 		break;
3551 	case SCTP_NOTIFY_ASSOC_DOWN:
3552 		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, so_locked);
3553 		break;
3554 	case SCTP_NOTIFY_INTERFACE_DOWN:
3555 		{
3556 			struct sctp_nets *net;
3557 
3558 			net = (struct sctp_nets *)data;
3559 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3560 			    (struct sockaddr *)&net->ro._l_addr, error);
3561 			break;
3562 		}
3563 	case SCTP_NOTIFY_INTERFACE_UP:
3564 		{
3565 			struct sctp_nets *net;
3566 
3567 			net = (struct sctp_nets *)data;
3568 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3569 			    (struct sockaddr *)&net->ro._l_addr, error);
3570 			break;
3571 		}
3572 	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3573 		{
3574 			struct sctp_nets *net;
3575 
3576 			net = (struct sctp_nets *)data;
3577 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3578 			    (struct sockaddr *)&net->ro._l_addr, error);
3579 			break;
3580 		}
3581 	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3582 		sctp_notify_send_failed2(stcb, error,
3583 		    (struct sctp_stream_queue_pending *)data, so_locked);
3584 		break;
3585 	case SCTP_NOTIFY_DG_FAIL:
3586 		sctp_notify_send_failed(stcb, error,
3587 		    (struct sctp_tmit_chunk *)data, so_locked);
3588 		break;
3589 	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3590 		{
3591 			uint32_t val;
3592 
3593 			val = *((uint32_t *) data);
3594 
3595 			sctp_notify_partial_delivery_indication(stcb, error, 0, val);
3596 		}
3597 		break;
3598 	case SCTP_NOTIFY_STRDATA_ERR:
3599 		break;
3600 	case SCTP_NOTIFY_ASSOC_ABORTED:
3601 		if ((stcb) && (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3602 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED))) {
3603 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, NULL, so_locked);
3604 		} else {
3605 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, NULL, so_locked);
3606 		}
3607 		break;
3608 	case SCTP_NOTIFY_PEER_OPENED_STREAM:
3609 		break;
3610 	case SCTP_NOTIFY_STREAM_OPENED_OK:
3611 		break;
3612 	case SCTP_NOTIFY_ASSOC_RESTART:
3613 		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, data, so_locked);
3614 		break;
3615 	case SCTP_NOTIFY_HB_RESP:
3616 		break;
3617 	case SCTP_NOTIFY_STR_RESET_SEND:
3618 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_OUTBOUND_STR);
3619 		break;
3620 	case SCTP_NOTIFY_STR_RESET_RECV:
3621 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_INBOUND_STR);
3622 		break;
3623 	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3624 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_OUTBOUND_STR | SCTP_STRRESET_FAILED));
3625 		break;
3626 
3627 	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3628 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_INBOUND_STR | SCTP_STRRESET_FAILED));
3629 		break;
3630 
3631 	case SCTP_NOTIFY_ASCONF_ADD_IP:
3632 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3633 		    error);
3634 		break;
3635 	case SCTP_NOTIFY_ASCONF_DELETE_IP:
3636 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3637 		    error);
3638 		break;
3639 	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3640 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3641 		    error);
3642 		break;
3643 	case SCTP_NOTIFY_ASCONF_SUCCESS:
3644 		break;
3645 	case SCTP_NOTIFY_ASCONF_FAILED:
3646 		break;
3647 	case SCTP_NOTIFY_PEER_SHUTDOWN:
3648 		sctp_notify_shutdown_event(stcb);
3649 		break;
3650 	case SCTP_NOTIFY_AUTH_NEW_KEY:
3651 		sctp_notify_authentication(stcb, SCTP_AUTH_NEWKEY, error,
3652 		    (uint16_t) (uintptr_t) data);
3653 		break;
3654 #if 0
3655 	case SCTP_NOTIFY_AUTH_KEY_CONFLICT:
3656 		sctp_notify_authentication(stcb, SCTP_AUTH_KEY_CONFLICT,
3657 		    error, (uint16_t) (uintptr_t) data);
3658 		break;
3659 #endif				/* not yet? remove? */
3660 
3661 
3662 	default:
3663 		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3664 		    __FUNCTION__, notification, notification);
3665 		break;
3666 	}			/* end switch */
3667 }
3668 
3669 void
3670 sctp_report_all_outbound(struct sctp_tcb *stcb, int holds_lock, int so_locked
3671 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3672     SCTP_UNUSED
3673 #endif
3674 )
3675 {
3676 	struct sctp_association *asoc;
3677 	struct sctp_stream_out *outs;
3678 	struct sctp_tmit_chunk *chk;
3679 	struct sctp_stream_queue_pending *sp;
3680 	int i;
3681 
3682 	asoc = &stcb->asoc;
3683 
3684 	if (stcb == NULL) {
3685 		return;
3686 	}
3687 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3688 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3689 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3690 		return;
3691 	}
3692 	/* now through all the gunk freeing chunks */
3693 	if (holds_lock == 0) {
3694 		SCTP_TCB_SEND_LOCK(stcb);
3695 	}
3696 	/* sent queue SHOULD be empty */
3697 	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3698 		chk = TAILQ_FIRST(&asoc->sent_queue);
3699 		while (chk) {
3700 			TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3701 			asoc->sent_queue_cnt--;
3702 			if (chk->data) {
3703 				/*
3704 				 * trim off the sctp chunk header(it should
3705 				 * be there)
3706 				 */
3707 				if (chk->send_size >= sizeof(struct sctp_data_chunk)) {
3708 					m_adj(chk->data, sizeof(struct sctp_data_chunk));
3709 					sctp_mbuf_crush(chk->data);
3710 					chk->send_size -= sizeof(struct sctp_data_chunk);
3711 				}
3712 			}
3713 			sctp_free_bufspace(stcb, asoc, chk, 1);
3714 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
3715 			    SCTP_NOTIFY_DATAGRAM_SENT, chk, so_locked);
3716 			if (chk->data) {
3717 				sctp_m_freem(chk->data);
3718 				chk->data = NULL;
3719 			}
3720 			sctp_free_a_chunk(stcb, chk);
3721 			/* sa_ignore FREED_MEMORY */
3722 			chk = TAILQ_FIRST(&asoc->sent_queue);
3723 		}
3724 	}
3725 	/* pending send queue SHOULD be empty */
3726 	if (!TAILQ_EMPTY(&asoc->send_queue)) {
3727 		chk = TAILQ_FIRST(&asoc->send_queue);
3728 		while (chk) {
3729 			TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3730 			asoc->send_queue_cnt--;
3731 			if (chk->data) {
3732 				/*
3733 				 * trim off the sctp chunk header(it should
3734 				 * be there)
3735 				 */
3736 				if (chk->send_size >= sizeof(struct sctp_data_chunk)) {
3737 					m_adj(chk->data, sizeof(struct sctp_data_chunk));
3738 					sctp_mbuf_crush(chk->data);
3739 					chk->send_size -= sizeof(struct sctp_data_chunk);
3740 				}
3741 			}
3742 			sctp_free_bufspace(stcb, asoc, chk, 1);
3743 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, SCTP_NOTIFY_DATAGRAM_UNSENT, chk, so_locked);
3744 			if (chk->data) {
3745 				sctp_m_freem(chk->data);
3746 				chk->data = NULL;
3747 			}
3748 			sctp_free_a_chunk(stcb, chk);
3749 			/* sa_ignore FREED_MEMORY */
3750 			chk = TAILQ_FIRST(&asoc->send_queue);
3751 		}
3752 	}
3753 	for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
3754 		/* For each stream */
3755 		outs = &stcb->asoc.strmout[i];
3756 		/* clean up any sends there */
3757 		stcb->asoc.locked_on_sending = NULL;
3758 		sp = TAILQ_FIRST(&outs->outqueue);
3759 		while (sp) {
3760 			stcb->asoc.stream_queue_cnt--;
3761 			TAILQ_REMOVE(&outs->outqueue, sp, next);
3762 			sctp_free_spbufspace(stcb, asoc, sp);
3763 			sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
3764 			    SCTP_NOTIFY_DATAGRAM_UNSENT, (void *)sp, so_locked);
3765 			if (sp->data) {
3766 				sctp_m_freem(sp->data);
3767 				sp->data = NULL;
3768 			}
3769 			if (sp->net)
3770 				sctp_free_remote_addr(sp->net);
3771 			sp->net = NULL;
3772 			/* Free the chunk */
3773 			sctp_free_a_strmoq(stcb, sp);
3774 			/* sa_ignore FREED_MEMORY */
3775 			sp = TAILQ_FIRST(&outs->outqueue);
3776 		}
3777 	}
3778 
3779 	if (holds_lock == 0) {
3780 		SCTP_TCB_SEND_UNLOCK(stcb);
3781 	}
3782 }
3783 
3784 void
3785 sctp_abort_notification(struct sctp_tcb *stcb, int error, int so_locked
3786 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3787     SCTP_UNUSED
3788 #endif
3789 )
3790 {
3791 
3792 	if (stcb == NULL) {
3793 		return;
3794 	}
3795 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3796 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3797 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3798 		return;
3799 	}
3800 	/* Tell them we lost the asoc */
3801 	sctp_report_all_outbound(stcb, 1, so_locked);
3802 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
3803 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3804 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
3805 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
3806 	}
3807 	sctp_ulp_notify(SCTP_NOTIFY_ASSOC_ABORTED, stcb, error, NULL, so_locked);
3808 }
3809 
3810 void
3811 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3812     struct mbuf *m, int iphlen, struct sctphdr *sh, struct mbuf *op_err,
3813     uint32_t vrf_id, uint16_t port)
3814 {
3815 	uint32_t vtag;
3816 
3817 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3818 	struct socket *so;
3819 
3820 #endif
3821 
3822 	vtag = 0;
3823 	if (stcb != NULL) {
3824 		/* We have a TCB to abort, send notification too */
3825 		vtag = stcb->asoc.peer_vtag;
3826 		sctp_abort_notification(stcb, 0, SCTP_SO_NOT_LOCKED);
3827 		/* get the assoc vrf id and table id */
3828 		vrf_id = stcb->asoc.vrf_id;
3829 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3830 	}
3831 	sctp_send_abort(m, iphlen, sh, vtag, op_err, vrf_id, port);
3832 	if (stcb != NULL) {
3833 		/* Ok, now lets free it */
3834 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3835 		so = SCTP_INP_SO(inp);
3836 		atomic_add_int(&stcb->asoc.refcnt, 1);
3837 		SCTP_TCB_UNLOCK(stcb);
3838 		SCTP_SOCKET_LOCK(so, 1);
3839 		SCTP_TCB_LOCK(stcb);
3840 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3841 #endif
3842 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
3843 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3844 		SCTP_SOCKET_UNLOCK(so, 1);
3845 #endif
3846 	} else {
3847 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3848 			if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3849 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3850 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
3851 			}
3852 		}
3853 	}
3854 }
3855 
3856 #ifdef SCTP_ASOCLOG_OF_TSNS
3857 void
3858 sctp_print_out_track_log(struct sctp_tcb *stcb)
3859 {
3860 #ifdef NOSIY_PRINTS
3861 	int i;
3862 
3863 	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
3864 	SCTP_PRINTF("IN bound TSN log-aaa\n");
3865 	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
3866 		SCTP_PRINTF("None rcvd\n");
3867 		goto none_in;
3868 	}
3869 	if (stcb->asoc.tsn_in_wrapped) {
3870 		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
3871 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3872 			    stcb->asoc.in_tsnlog[i].tsn,
3873 			    stcb->asoc.in_tsnlog[i].strm,
3874 			    stcb->asoc.in_tsnlog[i].seq,
3875 			    stcb->asoc.in_tsnlog[i].flgs,
3876 			    stcb->asoc.in_tsnlog[i].sz);
3877 		}
3878 	}
3879 	if (stcb->asoc.tsn_in_at) {
3880 		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
3881 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3882 			    stcb->asoc.in_tsnlog[i].tsn,
3883 			    stcb->asoc.in_tsnlog[i].strm,
3884 			    stcb->asoc.in_tsnlog[i].seq,
3885 			    stcb->asoc.in_tsnlog[i].flgs,
3886 			    stcb->asoc.in_tsnlog[i].sz);
3887 		}
3888 	}
3889 none_in:
3890 	SCTP_PRINTF("OUT bound TSN log-aaa\n");
3891 	if ((stcb->asoc.tsn_out_at == 0) &&
3892 	    (stcb->asoc.tsn_out_wrapped == 0)) {
3893 		SCTP_PRINTF("None sent\n");
3894 	}
3895 	if (stcb->asoc.tsn_out_wrapped) {
3896 		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
3897 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3898 			    stcb->asoc.out_tsnlog[i].tsn,
3899 			    stcb->asoc.out_tsnlog[i].strm,
3900 			    stcb->asoc.out_tsnlog[i].seq,
3901 			    stcb->asoc.out_tsnlog[i].flgs,
3902 			    stcb->asoc.out_tsnlog[i].sz);
3903 		}
3904 	}
3905 	if (stcb->asoc.tsn_out_at) {
3906 		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
3907 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3908 			    stcb->asoc.out_tsnlog[i].tsn,
3909 			    stcb->asoc.out_tsnlog[i].strm,
3910 			    stcb->asoc.out_tsnlog[i].seq,
3911 			    stcb->asoc.out_tsnlog[i].flgs,
3912 			    stcb->asoc.out_tsnlog[i].sz);
3913 		}
3914 	}
3915 #endif
3916 }
3917 
3918 #endif
3919 
3920 void
3921 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3922     int error, struct mbuf *op_err,
3923     int so_locked
3924 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3925     SCTP_UNUSED
3926 #endif
3927 )
3928 {
3929 	uint32_t vtag;
3930 
3931 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3932 	struct socket *so;
3933 
3934 #endif
3935 
3936 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3937 	so = SCTP_INP_SO(inp);
3938 #endif
3939 	if (stcb == NULL) {
3940 		/* Got to have a TCB */
3941 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3942 			if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3943 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3944 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
3945 			}
3946 		}
3947 		return;
3948 	} else {
3949 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3950 	}
3951 	vtag = stcb->asoc.peer_vtag;
3952 	/* notify the ulp */
3953 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0)
3954 		sctp_abort_notification(stcb, error, so_locked);
3955 	/* notify the peer */
3956 #if defined(SCTP_PANIC_ON_ABORT)
3957 	panic("aborting an association");
3958 #endif
3959 	sctp_send_abort_tcb(stcb, op_err, so_locked);
3960 	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3961 	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3962 	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3963 		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3964 	}
3965 	/* now free the asoc */
3966 #ifdef SCTP_ASOCLOG_OF_TSNS
3967 	sctp_print_out_track_log(stcb);
3968 #endif
3969 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3970 	if (!so_locked) {
3971 		atomic_add_int(&stcb->asoc.refcnt, 1);
3972 		SCTP_TCB_UNLOCK(stcb);
3973 		SCTP_SOCKET_LOCK(so, 1);
3974 		SCTP_TCB_LOCK(stcb);
3975 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3976 	}
3977 #endif
3978 	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
3979 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3980 	if (!so_locked) {
3981 		SCTP_SOCKET_UNLOCK(so, 1);
3982 	}
3983 #endif
3984 }
3985 
3986 void
3987 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh,
3988     struct sctp_inpcb *inp, struct mbuf *op_err, uint32_t vrf_id, uint16_t port)
3989 {
3990 	struct sctp_chunkhdr *ch, chunk_buf;
3991 	unsigned int chk_length;
3992 
3993 	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
3994 	/* Generate a TO address for future reference */
3995 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
3996 		if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3997 			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3998 			    SCTP_CALLED_DIRECTLY_NOCMPSET);
3999 		}
4000 	}
4001 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4002 	    sizeof(*ch), (uint8_t *) & chunk_buf);
4003 	while (ch != NULL) {
4004 		chk_length = ntohs(ch->chunk_length);
4005 		if (chk_length < sizeof(*ch)) {
4006 			/* break to abort land */
4007 			break;
4008 		}
4009 		switch (ch->chunk_type) {
4010 		case SCTP_COOKIE_ECHO:
4011 			/* We hit here only if the assoc is being freed */
4012 			return;
4013 		case SCTP_PACKET_DROPPED:
4014 			/* we don't respond to pkt-dropped */
4015 			return;
4016 		case SCTP_ABORT_ASSOCIATION:
4017 			/* we don't respond with an ABORT to an ABORT */
4018 			return;
4019 		case SCTP_SHUTDOWN_COMPLETE:
4020 			/*
4021 			 * we ignore it since we are not waiting for it and
4022 			 * peer is gone
4023 			 */
4024 			return;
4025 		case SCTP_SHUTDOWN_ACK:
4026 			sctp_send_shutdown_complete2(m, iphlen, sh, vrf_id, port);
4027 			return;
4028 		default:
4029 			break;
4030 		}
4031 		offset += SCTP_SIZE32(chk_length);
4032 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4033 		    sizeof(*ch), (uint8_t *) & chunk_buf);
4034 	}
4035 	sctp_send_abort(m, iphlen, sh, 0, op_err, vrf_id, port);
4036 }
4037 
4038 /*
4039  * check the inbound datagram to make sure there is not an abort inside it,
4040  * if there is return 1, else return 0.
4041  */
4042 int
4043 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill)
4044 {
4045 	struct sctp_chunkhdr *ch;
4046 	struct sctp_init_chunk *init_chk, chunk_buf;
4047 	int offset;
4048 	unsigned int chk_length;
4049 
4050 	offset = iphlen + sizeof(struct sctphdr);
4051 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
4052 	    (uint8_t *) & chunk_buf);
4053 	while (ch != NULL) {
4054 		chk_length = ntohs(ch->chunk_length);
4055 		if (chk_length < sizeof(*ch)) {
4056 			/* packet is probably corrupt */
4057 			break;
4058 		}
4059 		/* we seem to be ok, is it an abort? */
4060 		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
4061 			/* yep, tell them */
4062 			return (1);
4063 		}
4064 		if (ch->chunk_type == SCTP_INITIATION) {
4065 			/* need to update the Vtag */
4066 			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4067 			    offset, sizeof(*init_chk), (uint8_t *) & chunk_buf);
4068 			if (init_chk != NULL) {
4069 				*vtagfill = ntohl(init_chk->init.initiate_tag);
4070 			}
4071 		}
4072 		/* Nope, move to the next chunk */
4073 		offset += SCTP_SIZE32(chk_length);
4074 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4075 		    sizeof(*ch), (uint8_t *) & chunk_buf);
4076 	}
4077 	return (0);
4078 }
4079 
4080 /*
4081  * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4082  * set (i.e. it's 0) so, create this function to compare link local scopes
4083  */
4084 #ifdef INET6
4085 uint32_t
4086 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4087 {
4088 	struct sockaddr_in6 a, b;
4089 
4090 	/* save copies */
4091 	a = *addr1;
4092 	b = *addr2;
4093 
4094 	if (a.sin6_scope_id == 0)
4095 		if (sa6_recoverscope(&a)) {
4096 			/* can't get scope, so can't match */
4097 			return (0);
4098 		}
4099 	if (b.sin6_scope_id == 0)
4100 		if (sa6_recoverscope(&b)) {
4101 			/* can't get scope, so can't match */
4102 			return (0);
4103 		}
4104 	if (a.sin6_scope_id != b.sin6_scope_id)
4105 		return (0);
4106 
4107 	return (1);
4108 }
4109 
4110 /*
4111  * returns a sockaddr_in6 with embedded scope recovered and removed
4112  */
4113 struct sockaddr_in6 *
4114 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4115 {
4116 	/* check and strip embedded scope junk */
4117 	if (addr->sin6_family == AF_INET6) {
4118 		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4119 			if (addr->sin6_scope_id == 0) {
4120 				*store = *addr;
4121 				if (!sa6_recoverscope(store)) {
4122 					/* use the recovered scope */
4123 					addr = store;
4124 				}
4125 			} else {
4126 				/* else, return the original "to" addr */
4127 				in6_clearscope(&addr->sin6_addr);
4128 			}
4129 		}
4130 	}
4131 	return (addr);
4132 }
4133 
4134 #endif
4135 
4136 /*
4137  * are the two addresses the same?  currently a "scopeless" check returns: 1
4138  * if same, 0 if not
4139  */
4140 int
4141 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4142 {
4143 
4144 	/* must be valid */
4145 	if (sa1 == NULL || sa2 == NULL)
4146 		return (0);
4147 
4148 	/* must be the same family */
4149 	if (sa1->sa_family != sa2->sa_family)
4150 		return (0);
4151 
4152 	switch (sa1->sa_family) {
4153 #ifdef INET6
4154 	case AF_INET6:
4155 		{
4156 			/* IPv6 addresses */
4157 			struct sockaddr_in6 *sin6_1, *sin6_2;
4158 
4159 			sin6_1 = (struct sockaddr_in6 *)sa1;
4160 			sin6_2 = (struct sockaddr_in6 *)sa2;
4161 			return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
4162 			    sin6_2));
4163 		}
4164 #endif
4165 	case AF_INET:
4166 		{
4167 			/* IPv4 addresses */
4168 			struct sockaddr_in *sin_1, *sin_2;
4169 
4170 			sin_1 = (struct sockaddr_in *)sa1;
4171 			sin_2 = (struct sockaddr_in *)sa2;
4172 			return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4173 		}
4174 	default:
4175 		/* we don't do these... */
4176 		return (0);
4177 	}
4178 }
4179 
4180 void
4181 sctp_print_address(struct sockaddr *sa)
4182 {
4183 #ifdef INET6
4184 	char ip6buf[INET6_ADDRSTRLEN];
4185 
4186 	ip6buf[0] = 0;
4187 #endif
4188 
4189 	switch (sa->sa_family) {
4190 #ifdef INET6
4191 	case AF_INET6:
4192 		{
4193 			struct sockaddr_in6 *sin6;
4194 
4195 			sin6 = (struct sockaddr_in6 *)sa;
4196 			SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4197 			    ip6_sprintf(ip6buf, &sin6->sin6_addr),
4198 			    ntohs(sin6->sin6_port),
4199 			    sin6->sin6_scope_id);
4200 			break;
4201 		}
4202 #endif
4203 	case AF_INET:
4204 		{
4205 			struct sockaddr_in *sin;
4206 			unsigned char *p;
4207 
4208 			sin = (struct sockaddr_in *)sa;
4209 			p = (unsigned char *)&sin->sin_addr;
4210 			SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4211 			    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4212 			break;
4213 		}
4214 	default:
4215 		SCTP_PRINTF("?\n");
4216 		break;
4217 	}
4218 }
4219 
4220 void
4221 sctp_print_address_pkt(struct ip *iph, struct sctphdr *sh)
4222 {
4223 	switch (iph->ip_v) {
4224 		case IPVERSION:
4225 		{
4226 			struct sockaddr_in lsa, fsa;
4227 
4228 			bzero(&lsa, sizeof(lsa));
4229 			lsa.sin_len = sizeof(lsa);
4230 			lsa.sin_family = AF_INET;
4231 			lsa.sin_addr = iph->ip_src;
4232 			lsa.sin_port = sh->src_port;
4233 			bzero(&fsa, sizeof(fsa));
4234 			fsa.sin_len = sizeof(fsa);
4235 			fsa.sin_family = AF_INET;
4236 			fsa.sin_addr = iph->ip_dst;
4237 			fsa.sin_port = sh->dest_port;
4238 			SCTP_PRINTF("src: ");
4239 			sctp_print_address((struct sockaddr *)&lsa);
4240 			SCTP_PRINTF("dest: ");
4241 			sctp_print_address((struct sockaddr *)&fsa);
4242 			break;
4243 		}
4244 #ifdef INET6
4245 	case IPV6_VERSION >> 4:
4246 		{
4247 			struct ip6_hdr *ip6;
4248 			struct sockaddr_in6 lsa6, fsa6;
4249 
4250 			ip6 = (struct ip6_hdr *)iph;
4251 			bzero(&lsa6, sizeof(lsa6));
4252 			lsa6.sin6_len = sizeof(lsa6);
4253 			lsa6.sin6_family = AF_INET6;
4254 			lsa6.sin6_addr = ip6->ip6_src;
4255 			lsa6.sin6_port = sh->src_port;
4256 			bzero(&fsa6, sizeof(fsa6));
4257 			fsa6.sin6_len = sizeof(fsa6);
4258 			fsa6.sin6_family = AF_INET6;
4259 			fsa6.sin6_addr = ip6->ip6_dst;
4260 			fsa6.sin6_port = sh->dest_port;
4261 			SCTP_PRINTF("src: ");
4262 			sctp_print_address((struct sockaddr *)&lsa6);
4263 			SCTP_PRINTF("dest: ");
4264 			sctp_print_address((struct sockaddr *)&fsa6);
4265 			break;
4266 		}
4267 #endif
4268 	default:
4269 		/* TSNH */
4270 		break;
4271 	}
4272 }
4273 
4274 void
4275 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4276     struct sctp_inpcb *new_inp,
4277     struct sctp_tcb *stcb,
4278     int waitflags)
4279 {
4280 	/*
4281 	 * go through our old INP and pull off any control structures that
4282 	 * belong to stcb and move then to the new inp.
4283 	 */
4284 	struct socket *old_so, *new_so;
4285 	struct sctp_queued_to_read *control, *nctl;
4286 	struct sctp_readhead tmp_queue;
4287 	struct mbuf *m;
4288 	int error = 0;
4289 
4290 	old_so = old_inp->sctp_socket;
4291 	new_so = new_inp->sctp_socket;
4292 	TAILQ_INIT(&tmp_queue);
4293 	error = sblock(&old_so->so_rcv, waitflags);
4294 	if (error) {
4295 		/*
4296 		 * Gak, can't get sblock, we have a problem. data will be
4297 		 * left stranded.. and we don't dare look at it since the
4298 		 * other thread may be reading something. Oh well, its a
4299 		 * screwed up app that does a peeloff OR a accept while
4300 		 * reading from the main socket... actually its only the
4301 		 * peeloff() case, since I think read will fail on a
4302 		 * listening socket..
4303 		 */
4304 		return;
4305 	}
4306 	/* lock the socket buffers */
4307 	SCTP_INP_READ_LOCK(old_inp);
4308 	control = TAILQ_FIRST(&old_inp->read_queue);
4309 	/* Pull off all for out target stcb */
4310 	while (control) {
4311 		nctl = TAILQ_NEXT(control, next);
4312 		if (control->stcb == stcb) {
4313 			/* remove it we want it */
4314 			TAILQ_REMOVE(&old_inp->read_queue, control, next);
4315 			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4316 			m = control->data;
4317 			while (m) {
4318 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4319 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4320 				}
4321 				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4322 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4323 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4324 				}
4325 				m = SCTP_BUF_NEXT(m);
4326 			}
4327 		}
4328 		control = nctl;
4329 	}
4330 	SCTP_INP_READ_UNLOCK(old_inp);
4331 	/* Remove the sb-lock on the old socket */
4332 
4333 	sbunlock(&old_so->so_rcv);
4334 	/* Now we move them over to the new socket buffer */
4335 	control = TAILQ_FIRST(&tmp_queue);
4336 	SCTP_INP_READ_LOCK(new_inp);
4337 	while (control) {
4338 		nctl = TAILQ_NEXT(control, next);
4339 		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4340 		m = control->data;
4341 		while (m) {
4342 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4343 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4344 			}
4345 			sctp_sballoc(stcb, &new_so->so_rcv, m);
4346 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4347 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4348 			}
4349 			m = SCTP_BUF_NEXT(m);
4350 		}
4351 		control = nctl;
4352 	}
4353 	SCTP_INP_READ_UNLOCK(new_inp);
4354 }
4355 
4356 
4357 void
4358 sctp_add_to_readq(struct sctp_inpcb *inp,
4359     struct sctp_tcb *stcb,
4360     struct sctp_queued_to_read *control,
4361     struct sockbuf *sb,
4362     int end,
4363     int so_locked
4364 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4365     SCTP_UNUSED
4366 #endif
4367 )
4368 {
4369 	/*
4370 	 * Here we must place the control on the end of the socket read
4371 	 * queue AND increment sb_cc so that select will work properly on
4372 	 * read.
4373 	 */
4374 	struct mbuf *m, *prev = NULL;
4375 
4376 	if (inp == NULL) {
4377 		/* Gak, TSNH!! */
4378 #ifdef INVARIANTS
4379 		panic("Gak, inp NULL on add_to_readq");
4380 #endif
4381 		return;
4382 	}
4383 	SCTP_INP_READ_LOCK(inp);
4384 	if (!(control->spec_flags & M_NOTIFICATION)) {
4385 		atomic_add_int(&inp->total_recvs, 1);
4386 		if (!control->do_not_ref_stcb) {
4387 			atomic_add_int(&stcb->total_recvs, 1);
4388 		}
4389 	}
4390 	m = control->data;
4391 	control->held_length = 0;
4392 	control->length = 0;
4393 	while (m) {
4394 		if (SCTP_BUF_LEN(m) == 0) {
4395 			/* Skip mbufs with NO length */
4396 			if (prev == NULL) {
4397 				/* First one */
4398 				control->data = sctp_m_free(m);
4399 				m = control->data;
4400 			} else {
4401 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4402 				m = SCTP_BUF_NEXT(prev);
4403 			}
4404 			if (m == NULL) {
4405 				control->tail_mbuf = prev;;
4406 			}
4407 			continue;
4408 		}
4409 		prev = m;
4410 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4411 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4412 		}
4413 		sctp_sballoc(stcb, sb, m);
4414 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4415 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4416 		}
4417 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4418 		m = SCTP_BUF_NEXT(m);
4419 	}
4420 	if (prev != NULL) {
4421 		control->tail_mbuf = prev;
4422 	} else {
4423 		/* Everything got collapsed out?? */
4424 		return;
4425 	}
4426 	if (end) {
4427 		control->end_added = 1;
4428 	}
4429 	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4430 	SCTP_INP_READ_UNLOCK(inp);
4431 	if (inp && inp->sctp_socket) {
4432 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4433 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4434 		} else {
4435 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4436 			struct socket *so;
4437 
4438 			so = SCTP_INP_SO(inp);
4439 			if (!so_locked) {
4440 				atomic_add_int(&stcb->asoc.refcnt, 1);
4441 				SCTP_TCB_UNLOCK(stcb);
4442 				SCTP_SOCKET_LOCK(so, 1);
4443 				SCTP_TCB_LOCK(stcb);
4444 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
4445 				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4446 					SCTP_SOCKET_UNLOCK(so, 1);
4447 					return;
4448 				}
4449 			}
4450 #endif
4451 			sctp_sorwakeup(inp, inp->sctp_socket);
4452 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4453 			if (!so_locked) {
4454 				SCTP_SOCKET_UNLOCK(so, 1);
4455 			}
4456 #endif
4457 		}
4458 	}
4459 }
4460 
4461 
4462 int
4463 sctp_append_to_readq(struct sctp_inpcb *inp,
4464     struct sctp_tcb *stcb,
4465     struct sctp_queued_to_read *control,
4466     struct mbuf *m,
4467     int end,
4468     int ctls_cumack,
4469     struct sockbuf *sb)
4470 {
4471 	/*
4472 	 * A partial delivery API event is underway. OR we are appending on
4473 	 * the reassembly queue.
4474 	 *
4475 	 * If PDAPI this means we need to add m to the end of the data.
4476 	 * Increase the length in the control AND increment the sb_cc.
4477 	 * Otherwise sb is NULL and all we need to do is put it at the end
4478 	 * of the mbuf chain.
4479 	 */
4480 	int len = 0;
4481 	struct mbuf *mm, *tail = NULL, *prev = NULL;
4482 
4483 	if (inp) {
4484 		SCTP_INP_READ_LOCK(inp);
4485 	}
4486 	if (control == NULL) {
4487 get_out:
4488 		if (inp) {
4489 			SCTP_INP_READ_UNLOCK(inp);
4490 		}
4491 		return (-1);
4492 	}
4493 	if (control->end_added) {
4494 		/* huh this one is complete? */
4495 		goto get_out;
4496 	}
4497 	mm = m;
4498 	if (mm == NULL) {
4499 		goto get_out;
4500 	}
4501 	while (mm) {
4502 		if (SCTP_BUF_LEN(mm) == 0) {
4503 			/* Skip mbufs with NO lenght */
4504 			if (prev == NULL) {
4505 				/* First one */
4506 				m = sctp_m_free(mm);
4507 				mm = m;
4508 			} else {
4509 				SCTP_BUF_NEXT(prev) = sctp_m_free(mm);
4510 				mm = SCTP_BUF_NEXT(prev);
4511 			}
4512 			continue;
4513 		}
4514 		prev = mm;
4515 		len += SCTP_BUF_LEN(mm);
4516 		if (sb) {
4517 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4518 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(mm));
4519 			}
4520 			sctp_sballoc(stcb, sb, mm);
4521 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4522 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4523 			}
4524 		}
4525 		mm = SCTP_BUF_NEXT(mm);
4526 	}
4527 	if (prev) {
4528 		tail = prev;
4529 	} else {
4530 		/* Really there should always be a prev */
4531 		if (m == NULL) {
4532 			/* Huh nothing left? */
4533 #ifdef INVARIANTS
4534 			panic("Nothing left to add?");
4535 #else
4536 			goto get_out;
4537 #endif
4538 		}
4539 		tail = m;
4540 	}
4541 	if (control->tail_mbuf) {
4542 		/* append */
4543 		SCTP_BUF_NEXT(control->tail_mbuf) = m;
4544 		control->tail_mbuf = tail;
4545 	} else {
4546 		/* nothing there */
4547 #ifdef INVARIANTS
4548 		if (control->data != NULL) {
4549 			panic("This should NOT happen");
4550 		}
4551 #endif
4552 		control->data = m;
4553 		control->tail_mbuf = tail;
4554 	}
4555 	atomic_add_int(&control->length, len);
4556 	if (end) {
4557 		/* message is complete */
4558 		if (stcb && (control == stcb->asoc.control_pdapi)) {
4559 			stcb->asoc.control_pdapi = NULL;
4560 		}
4561 		control->held_length = 0;
4562 		control->end_added = 1;
4563 	}
4564 	if (stcb == NULL) {
4565 		control->do_not_ref_stcb = 1;
4566 	}
4567 	/*
4568 	 * When we are appending in partial delivery, the cum-ack is used
4569 	 * for the actual pd-api highest tsn on this mbuf. The true cum-ack
4570 	 * is populated in the outbound sinfo structure from the true cumack
4571 	 * if the association exists...
4572 	 */
4573 	control->sinfo_tsn = control->sinfo_cumtsn = ctls_cumack;
4574 	if (inp) {
4575 		SCTP_INP_READ_UNLOCK(inp);
4576 	}
4577 	if (inp && inp->sctp_socket) {
4578 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4579 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4580 		} else {
4581 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4582 			struct socket *so;
4583 
4584 			so = SCTP_INP_SO(inp);
4585 			atomic_add_int(&stcb->asoc.refcnt, 1);
4586 			SCTP_TCB_UNLOCK(stcb);
4587 			SCTP_SOCKET_LOCK(so, 1);
4588 			SCTP_TCB_LOCK(stcb);
4589 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4590 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4591 				SCTP_SOCKET_UNLOCK(so, 1);
4592 				return (0);
4593 			}
4594 #endif
4595 			sctp_sorwakeup(inp, inp->sctp_socket);
4596 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4597 			SCTP_SOCKET_UNLOCK(so, 1);
4598 #endif
4599 		}
4600 	}
4601 	return (0);
4602 }
4603 
4604 
4605 
4606 /*************HOLD THIS COMMENT FOR PATCH FILE OF
4607  *************ALTERNATE ROUTING CODE
4608  */
4609 
4610 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4611  *************ALTERNATE ROUTING CODE
4612  */
4613 
4614 struct mbuf *
4615 sctp_generate_invmanparam(int err)
4616 {
4617 	/* Return a MBUF with a invalid mandatory parameter */
4618 	struct mbuf *m;
4619 
4620 	m = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_DONTWAIT, 1, MT_DATA);
4621 	if (m) {
4622 		struct sctp_paramhdr *ph;
4623 
4624 		SCTP_BUF_LEN(m) = sizeof(struct sctp_paramhdr);
4625 		ph = mtod(m, struct sctp_paramhdr *);
4626 		ph->param_length = htons(sizeof(struct sctp_paramhdr));
4627 		ph->param_type = htons(err);
4628 	}
4629 	return (m);
4630 }
4631 
4632 #ifdef SCTP_MBCNT_LOGGING
4633 void
4634 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
4635     struct sctp_tmit_chunk *tp1, int chk_cnt)
4636 {
4637 	if (tp1->data == NULL) {
4638 		return;
4639 	}
4640 	asoc->chunks_on_out_queue -= chk_cnt;
4641 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
4642 		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
4643 		    asoc->total_output_queue_size,
4644 		    tp1->book_size,
4645 		    0,
4646 		    tp1->mbcnt);
4647 	}
4648 	if (asoc->total_output_queue_size >= tp1->book_size) {
4649 		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
4650 	} else {
4651 		asoc->total_output_queue_size = 0;
4652 	}
4653 
4654 	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
4655 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
4656 		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
4657 			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
4658 		} else {
4659 			stcb->sctp_socket->so_snd.sb_cc = 0;
4660 
4661 		}
4662 	}
4663 }
4664 
4665 #endif
4666 
4667 int
4668 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
4669     int reason, struct sctpchunk_listhead *queue, int so_locked
4670 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4671     SCTP_UNUSED
4672 #endif
4673 )
4674 {
4675 	int ret_sz = 0;
4676 	int notdone;
4677 	uint8_t foundeom = 0;
4678 
4679 	do {
4680 		ret_sz += tp1->book_size;
4681 		tp1->sent = SCTP_FORWARD_TSN_SKIP;
4682 		if (tp1->data) {
4683 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4684 			struct socket *so;
4685 
4686 #endif
4687 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4688 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, reason, tp1, SCTP_SO_NOT_LOCKED);
4689 			sctp_m_freem(tp1->data);
4690 			tp1->data = NULL;
4691 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4692 			so = SCTP_INP_SO(stcb->sctp_ep);
4693 			if (!so_locked) {
4694 				atomic_add_int(&stcb->asoc.refcnt, 1);
4695 				SCTP_TCB_UNLOCK(stcb);
4696 				SCTP_SOCKET_LOCK(so, 1);
4697 				SCTP_TCB_LOCK(stcb);
4698 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
4699 				if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4700 					/*
4701 					 * assoc was freed while we were
4702 					 * unlocked
4703 					 */
4704 					SCTP_SOCKET_UNLOCK(so, 1);
4705 					return (ret_sz);
4706 				}
4707 			}
4708 #endif
4709 			sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
4710 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4711 			if (!so_locked) {
4712 				SCTP_SOCKET_UNLOCK(so, 1);
4713 			}
4714 #endif
4715 		}
4716 		if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4717 			stcb->asoc.sent_queue_cnt_removeable--;
4718 		}
4719 		if (queue == &stcb->asoc.send_queue) {
4720 			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
4721 			/* on to the sent queue */
4722 			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
4723 			    sctp_next);
4724 			stcb->asoc.sent_queue_cnt++;
4725 		}
4726 		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
4727 		    SCTP_DATA_NOT_FRAG) {
4728 			/* not frag'ed we ae done   */
4729 			notdone = 0;
4730 			foundeom = 1;
4731 		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4732 			/* end of frag, we are done */
4733 			notdone = 0;
4734 			foundeom = 1;
4735 		} else {
4736 			/*
4737 			 * Its a begin or middle piece, we must mark all of
4738 			 * it
4739 			 */
4740 			notdone = 1;
4741 			tp1 = TAILQ_NEXT(tp1, sctp_next);
4742 		}
4743 	} while (tp1 && notdone);
4744 	if ((foundeom == 0) && (queue == &stcb->asoc.sent_queue)) {
4745 		/*
4746 		 * The multi-part message was scattered across the send and
4747 		 * sent queue.
4748 		 */
4749 		tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
4750 		/*
4751 		 * recurse throught the send_queue too, starting at the
4752 		 * beginning.
4753 		 */
4754 		if (tp1) {
4755 			ret_sz += sctp_release_pr_sctp_chunk(stcb, tp1, reason,
4756 			    &stcb->asoc.send_queue, so_locked);
4757 		} else {
4758 			SCTP_PRINTF("hmm, nothing on the send queue and no EOM?\n");
4759 		}
4760 	}
4761 	return (ret_sz);
4762 }
4763 
4764 /*
4765  * checks to see if the given address, sa, is one that is currently known by
4766  * the kernel note: can't distinguish the same address on multiple interfaces
4767  * and doesn't handle multiple addresses with different zone/scope id's note:
4768  * ifa_ifwithaddr() compares the entire sockaddr struct
4769  */
4770 struct sctp_ifa *
4771 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
4772     int holds_lock)
4773 {
4774 	struct sctp_laddr *laddr;
4775 
4776 	if (holds_lock == 0) {
4777 		SCTP_INP_RLOCK(inp);
4778 	}
4779 	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4780 		if (laddr->ifa == NULL)
4781 			continue;
4782 		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
4783 			continue;
4784 		if (addr->sa_family == AF_INET) {
4785 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4786 			    laddr->ifa->address.sin.sin_addr.s_addr) {
4787 				/* found him. */
4788 				if (holds_lock == 0) {
4789 					SCTP_INP_RUNLOCK(inp);
4790 				}
4791 				return (laddr->ifa);
4792 				break;
4793 			}
4794 		}
4795 #ifdef INET6
4796 		if (addr->sa_family == AF_INET6) {
4797 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4798 			    &laddr->ifa->address.sin6)) {
4799 				/* found him. */
4800 				if (holds_lock == 0) {
4801 					SCTP_INP_RUNLOCK(inp);
4802 				}
4803 				return (laddr->ifa);
4804 				break;
4805 			}
4806 		}
4807 #endif
4808 	}
4809 	if (holds_lock == 0) {
4810 		SCTP_INP_RUNLOCK(inp);
4811 	}
4812 	return (NULL);
4813 }
4814 
4815 uint32_t
4816 sctp_get_ifa_hash_val(struct sockaddr *addr)
4817 {
4818 	if (addr->sa_family == AF_INET) {
4819 		struct sockaddr_in *sin;
4820 
4821 		sin = (struct sockaddr_in *)addr;
4822 		return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
4823 	} else if (addr->sa_family == AF_INET6) {
4824 		struct sockaddr_in6 *sin6;
4825 		uint32_t hash_of_addr;
4826 
4827 		sin6 = (struct sockaddr_in6 *)addr;
4828 		hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
4829 		    sin6->sin6_addr.s6_addr32[1] +
4830 		    sin6->sin6_addr.s6_addr32[2] +
4831 		    sin6->sin6_addr.s6_addr32[3]);
4832 		hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
4833 		return (hash_of_addr);
4834 	}
4835 	return (0);
4836 }
4837 
4838 struct sctp_ifa *
4839 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
4840 {
4841 	struct sctp_ifa *sctp_ifap;
4842 	struct sctp_vrf *vrf;
4843 	struct sctp_ifalist *hash_head;
4844 	uint32_t hash_of_addr;
4845 
4846 	if (holds_lock == 0)
4847 		SCTP_IPI_ADDR_RLOCK();
4848 
4849 	vrf = sctp_find_vrf(vrf_id);
4850 	if (vrf == NULL) {
4851 		if (holds_lock == 0)
4852 			SCTP_IPI_ADDR_RUNLOCK();
4853 		return (NULL);
4854 	}
4855 	hash_of_addr = sctp_get_ifa_hash_val(addr);
4856 
4857 	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
4858 	if (hash_head == NULL) {
4859 		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
4860 		    hash_of_addr, (uint32_t) vrf->vrf_addr_hashmark,
4861 		    (uint32_t) (hash_of_addr & vrf->vrf_addr_hashmark));
4862 		sctp_print_address(addr);
4863 		SCTP_PRINTF("No such bucket for address\n");
4864 		if (holds_lock == 0)
4865 			SCTP_IPI_ADDR_RUNLOCK();
4866 
4867 		return (NULL);
4868 	}
4869 	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
4870 		if (sctp_ifap == NULL) {
4871 			panic("Huh LIST_FOREACH corrupt");
4872 		}
4873 		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
4874 			continue;
4875 		if (addr->sa_family == AF_INET) {
4876 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4877 			    sctp_ifap->address.sin.sin_addr.s_addr) {
4878 				/* found him. */
4879 				if (holds_lock == 0)
4880 					SCTP_IPI_ADDR_RUNLOCK();
4881 				return (sctp_ifap);
4882 				break;
4883 			}
4884 		}
4885 #ifdef INET6
4886 		if (addr->sa_family == AF_INET6) {
4887 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4888 			    &sctp_ifap->address.sin6)) {
4889 				/* found him. */
4890 				if (holds_lock == 0)
4891 					SCTP_IPI_ADDR_RUNLOCK();
4892 				return (sctp_ifap);
4893 				break;
4894 			}
4895 		}
4896 #endif
4897 	}
4898 	if (holds_lock == 0)
4899 		SCTP_IPI_ADDR_RUNLOCK();
4900 	return (NULL);
4901 }
4902 
4903 static void
4904 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t * freed_so_far, int hold_rlock,
4905     uint32_t rwnd_req)
4906 {
4907 	/* User pulled some data, do we need a rwnd update? */
4908 	int r_unlocked = 0;
4909 	uint32_t dif, rwnd;
4910 	struct socket *so = NULL;
4911 
4912 	if (stcb == NULL)
4913 		return;
4914 
4915 	atomic_add_int(&stcb->asoc.refcnt, 1);
4916 
4917 	if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED |
4918 	    SCTP_STATE_SHUTDOWN_RECEIVED |
4919 	    SCTP_STATE_SHUTDOWN_ACK_SENT)) {
4920 		/* Pre-check If we are freeing no update */
4921 		goto no_lock;
4922 	}
4923 	SCTP_INP_INCR_REF(stcb->sctp_ep);
4924 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
4925 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
4926 		goto out;
4927 	}
4928 	so = stcb->sctp_socket;
4929 	if (so == NULL) {
4930 		goto out;
4931 	}
4932 	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
4933 	/* Have you have freed enough to look */
4934 	*freed_so_far = 0;
4935 	/* Yep, its worth a look and the lock overhead */
4936 
4937 	/* Figure out what the rwnd would be */
4938 	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
4939 	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
4940 		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
4941 	} else {
4942 		dif = 0;
4943 	}
4944 	if (dif >= rwnd_req) {
4945 		if (hold_rlock) {
4946 			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
4947 			r_unlocked = 1;
4948 		}
4949 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
4950 			/*
4951 			 * One last check before we allow the guy possibly
4952 			 * to get in. There is a race, where the guy has not
4953 			 * reached the gate. In that case
4954 			 */
4955 			goto out;
4956 		}
4957 		SCTP_TCB_LOCK(stcb);
4958 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
4959 			/* No reports here */
4960 			SCTP_TCB_UNLOCK(stcb);
4961 			goto out;
4962 		}
4963 		SCTP_STAT_INCR(sctps_wu_sacks_sent);
4964 		sctp_send_sack(stcb);
4965 		sctp_chunk_output(stcb->sctp_ep, stcb,
4966 		    SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
4967 		/* make sure no timer is running */
4968 		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
4969 		SCTP_TCB_UNLOCK(stcb);
4970 	} else {
4971 		/* Update how much we have pending */
4972 		stcb->freed_by_sorcv_sincelast = dif;
4973 	}
4974 out:
4975 	if (so && r_unlocked && hold_rlock) {
4976 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
4977 	}
4978 	SCTP_INP_DECR_REF(stcb->sctp_ep);
4979 no_lock:
4980 	atomic_add_int(&stcb->asoc.refcnt, -1);
4981 	return;
4982 }
4983 
4984 int
4985 sctp_sorecvmsg(struct socket *so,
4986     struct uio *uio,
4987     struct mbuf **mp,
4988     struct sockaddr *from,
4989     int fromlen,
4990     int *msg_flags,
4991     struct sctp_sndrcvinfo *sinfo,
4992     int filling_sinfo)
4993 {
4994 	/*
4995 	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
4996 	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
4997 	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
4998 	 * On the way out we may send out any combination of:
4999 	 * MSG_NOTIFICATION MSG_EOR
5000 	 *
5001 	 */
5002 	struct sctp_inpcb *inp = NULL;
5003 	int my_len = 0;
5004 	int cp_len = 0, error = 0;
5005 	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
5006 	struct mbuf *m = NULL, *embuf = NULL;
5007 	struct sctp_tcb *stcb = NULL;
5008 	int wakeup_read_socket = 0;
5009 	int freecnt_applied = 0;
5010 	int out_flags = 0, in_flags = 0;
5011 	int block_allowed = 1;
5012 	uint32_t freed_so_far = 0;
5013 	uint32_t copied_so_far = 0;
5014 	int in_eeor_mode = 0;
5015 	int no_rcv_needed = 0;
5016 	uint32_t rwnd_req = 0;
5017 	int hold_sblock = 0;
5018 	int hold_rlock = 0;
5019 	int slen = 0;
5020 	uint32_t held_length = 0;
5021 	int sockbuf_lock = 0;
5022 
5023 	if (uio == NULL) {
5024 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5025 		return (EINVAL);
5026 	}
5027 	if (msg_flags) {
5028 		in_flags = *msg_flags;
5029 		if (in_flags & MSG_PEEK)
5030 			SCTP_STAT_INCR(sctps_read_peeks);
5031 	} else {
5032 		in_flags = 0;
5033 	}
5034 	slen = uio->uio_resid;
5035 
5036 	/* Pull in and set up our int flags */
5037 	if (in_flags & MSG_OOB) {
5038 		/* Out of band's NOT supported */
5039 		return (EOPNOTSUPP);
5040 	}
5041 	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5042 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5043 		return (EINVAL);
5044 	}
5045 	if ((in_flags & (MSG_DONTWAIT
5046 	    | MSG_NBIO
5047 	    )) ||
5048 	    SCTP_SO_IS_NBIO(so)) {
5049 		block_allowed = 0;
5050 	}
5051 	/* setup the endpoint */
5052 	inp = (struct sctp_inpcb *)so->so_pcb;
5053 	if (inp == NULL) {
5054 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5055 		return (EFAULT);
5056 	}
5057 	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5058 	/* Must be at least a MTU's worth */
5059 	if (rwnd_req < SCTP_MIN_RWND)
5060 		rwnd_req = SCTP_MIN_RWND;
5061 	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5062 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5063 		sctp_misc_ints(SCTP_SORECV_ENTER,
5064 		    rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid);
5065 	}
5066 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5067 		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5068 		    rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid);
5069 	}
5070 	error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5071 	sockbuf_lock = 1;
5072 	if (error) {
5073 		goto release_unlocked;
5074 	}
5075 restart:
5076 
5077 
5078 restart_nosblocks:
5079 	if (hold_sblock == 0) {
5080 		SOCKBUF_LOCK(&so->so_rcv);
5081 		hold_sblock = 1;
5082 	}
5083 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5084 	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5085 		goto out;
5086 	}
5087 	if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5088 		if (so->so_error) {
5089 			error = so->so_error;
5090 			if ((in_flags & MSG_PEEK) == 0)
5091 				so->so_error = 0;
5092 			goto out;
5093 		} else {
5094 			if (so->so_rcv.sb_cc == 0) {
5095 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5096 				/* indicate EOF */
5097 				error = 0;
5098 				goto out;
5099 			}
5100 		}
5101 	}
5102 	if ((so->so_rcv.sb_cc <= held_length) && block_allowed) {
5103 		/* we need to wait for data */
5104 		if ((so->so_rcv.sb_cc == 0) &&
5105 		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5106 		    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5107 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5108 				/*
5109 				 * For active open side clear flags for
5110 				 * re-use passive open is blocked by
5111 				 * connect.
5112 				 */
5113 				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5114 					/*
5115 					 * You were aborted, passive side
5116 					 * always hits here
5117 					 */
5118 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5119 					error = ECONNRESET;
5120 					/*
5121 					 * You get this once if you are
5122 					 * active open side
5123 					 */
5124 					if (!(inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5125 						/*
5126 						 * Remove flag if on the
5127 						 * active open side
5128 						 */
5129 						inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_ABORTED;
5130 					}
5131 				}
5132 				so->so_state &= ~(SS_ISCONNECTING |
5133 				    SS_ISDISCONNECTING |
5134 				    SS_ISCONFIRMING |
5135 				    SS_ISCONNECTED);
5136 				if (error == 0) {
5137 					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5138 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5139 						error = ENOTCONN;
5140 					} else {
5141 						inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_CONNECTED;
5142 					}
5143 				}
5144 				goto out;
5145 			}
5146 		}
5147 		error = sbwait(&so->so_rcv);
5148 		if (error) {
5149 			goto out;
5150 		}
5151 		held_length = 0;
5152 		goto restart_nosblocks;
5153 	} else if (so->so_rcv.sb_cc == 0) {
5154 		if (so->so_error) {
5155 			error = so->so_error;
5156 			if ((in_flags & MSG_PEEK) == 0)
5157 				so->so_error = 0;
5158 		} else {
5159 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5160 			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5161 				if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5162 					/*
5163 					 * For active open side clear flags
5164 					 * for re-use passive open is
5165 					 * blocked by connect.
5166 					 */
5167 					if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5168 						/*
5169 						 * You were aborted, passive
5170 						 * side always hits here
5171 						 */
5172 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5173 						error = ECONNRESET;
5174 						/*
5175 						 * You get this once if you
5176 						 * are active open side
5177 						 */
5178 						if (!(inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5179 							/*
5180 							 * Remove flag if on
5181 							 * the active open
5182 							 * side
5183 							 */
5184 							inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_ABORTED;
5185 						}
5186 					}
5187 					so->so_state &= ~(SS_ISCONNECTING |
5188 					    SS_ISDISCONNECTING |
5189 					    SS_ISCONFIRMING |
5190 					    SS_ISCONNECTED);
5191 					if (error == 0) {
5192 						if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5193 							SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5194 							error = ENOTCONN;
5195 						} else {
5196 							inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_CONNECTED;
5197 						}
5198 					}
5199 					goto out;
5200 				}
5201 			}
5202 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5203 			error = EWOULDBLOCK;
5204 		}
5205 		goto out;
5206 	}
5207 	if (hold_sblock == 1) {
5208 		SOCKBUF_UNLOCK(&so->so_rcv);
5209 		hold_sblock = 0;
5210 	}
5211 	/* we possibly have data we can read */
5212 	/* sa_ignore FREED_MEMORY */
5213 	control = TAILQ_FIRST(&inp->read_queue);
5214 	if (control == NULL) {
5215 		/*
5216 		 * This could be happening since the appender did the
5217 		 * increment but as not yet did the tailq insert onto the
5218 		 * read_queue
5219 		 */
5220 		if (hold_rlock == 0) {
5221 			SCTP_INP_READ_LOCK(inp);
5222 			hold_rlock = 1;
5223 		}
5224 		control = TAILQ_FIRST(&inp->read_queue);
5225 		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5226 #ifdef INVARIANTS
5227 			panic("Huh, its non zero and nothing on control?");
5228 #endif
5229 			so->so_rcv.sb_cc = 0;
5230 		}
5231 		SCTP_INP_READ_UNLOCK(inp);
5232 		hold_rlock = 0;
5233 		goto restart;
5234 	}
5235 	if ((control->length == 0) &&
5236 	    (control->do_not_ref_stcb)) {
5237 		/*
5238 		 * Clean up code for freeing assoc that left behind a
5239 		 * pdapi.. maybe a peer in EEOR that just closed after
5240 		 * sending and never indicated a EOR.
5241 		 */
5242 		if (hold_rlock == 0) {
5243 			hold_rlock = 1;
5244 			SCTP_INP_READ_LOCK(inp);
5245 		}
5246 		control->held_length = 0;
5247 		if (control->data) {
5248 			/* Hmm there is data here .. fix */
5249 			struct mbuf *m_tmp;
5250 			int cnt = 0;
5251 
5252 			m_tmp = control->data;
5253 			while (m_tmp) {
5254 				cnt += SCTP_BUF_LEN(m_tmp);
5255 				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5256 					control->tail_mbuf = m_tmp;
5257 					control->end_added = 1;
5258 				}
5259 				m_tmp = SCTP_BUF_NEXT(m_tmp);
5260 			}
5261 			control->length = cnt;
5262 		} else {
5263 			/* remove it */
5264 			TAILQ_REMOVE(&inp->read_queue, control, next);
5265 			/* Add back any hiddend data */
5266 			sctp_free_remote_addr(control->whoFrom);
5267 			sctp_free_a_readq(stcb, control);
5268 		}
5269 		if (hold_rlock) {
5270 			hold_rlock = 0;
5271 			SCTP_INP_READ_UNLOCK(inp);
5272 		}
5273 		goto restart;
5274 	}
5275 	if (control->length == 0) {
5276 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5277 		    (filling_sinfo)) {
5278 			/* find a more suitable one then this */
5279 			ctl = TAILQ_NEXT(control, next);
5280 			while (ctl) {
5281 				if ((ctl->stcb != control->stcb) && (ctl->length) &&
5282 				    (ctl->some_taken ||
5283 				    (ctl->spec_flags & M_NOTIFICATION) ||
5284 				    ((ctl->do_not_ref_stcb == 0) &&
5285 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5286 				    ) {
5287 					/*-
5288 					 * If we have a different TCB next, and there is data
5289 					 * present. If we have already taken some (pdapi), OR we can
5290 					 * ref the tcb and no delivery as started on this stream, we
5291 					 * take it. Note we allow a notification on a different
5292 					 * assoc to be delivered..
5293 					 */
5294 					control = ctl;
5295 					goto found_one;
5296 				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5297 					    (ctl->length) &&
5298 					    ((ctl->some_taken) ||
5299 					    ((ctl->do_not_ref_stcb == 0) &&
5300 					    ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5301 					    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5302 				    ) {
5303 					/*-
5304 					 * If we have the same tcb, and there is data present, and we
5305 					 * have the strm interleave feature present. Then if we have
5306 					 * taken some (pdapi) or we can refer to tht tcb AND we have
5307 					 * not started a delivery for this stream, we can take it.
5308 					 * Note we do NOT allow a notificaiton on the same assoc to
5309 					 * be delivered.
5310 					 */
5311 					control = ctl;
5312 					goto found_one;
5313 				}
5314 				ctl = TAILQ_NEXT(ctl, next);
5315 			}
5316 		}
5317 		/*
5318 		 * if we reach here, not suitable replacement is available
5319 		 * <or> fragment interleave is NOT on. So stuff the sb_cc
5320 		 * into the our held count, and its time to sleep again.
5321 		 */
5322 		held_length = so->so_rcv.sb_cc;
5323 		control->held_length = so->so_rcv.sb_cc;
5324 		goto restart;
5325 	}
5326 	/* Clear the held length since there is something to read */
5327 	control->held_length = 0;
5328 	if (hold_rlock) {
5329 		SCTP_INP_READ_UNLOCK(inp);
5330 		hold_rlock = 0;
5331 	}
5332 found_one:
5333 	/*
5334 	 * If we reach here, control has a some data for us to read off.
5335 	 * Note that stcb COULD be NULL.
5336 	 */
5337 	control->some_taken++;
5338 	if (hold_sblock) {
5339 		SOCKBUF_UNLOCK(&so->so_rcv);
5340 		hold_sblock = 0;
5341 	}
5342 	stcb = control->stcb;
5343 	if (stcb) {
5344 		if ((control->do_not_ref_stcb == 0) &&
5345 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5346 			if (freecnt_applied == 0)
5347 				stcb = NULL;
5348 		} else if (control->do_not_ref_stcb == 0) {
5349 			/* you can't free it on me please */
5350 			/*
5351 			 * The lock on the socket buffer protects us so the
5352 			 * free code will stop. But since we used the
5353 			 * socketbuf lock and the sender uses the tcb_lock
5354 			 * to increment, we need to use the atomic add to
5355 			 * the refcnt
5356 			 */
5357 			if (freecnt_applied) {
5358 #ifdef INVARIANTS
5359 				panic("refcnt already incremented");
5360 #else
5361 				printf("refcnt already incremented?\n");
5362 #endif
5363 			} else {
5364 				atomic_add_int(&stcb->asoc.refcnt, 1);
5365 				freecnt_applied = 1;
5366 			}
5367 			/*
5368 			 * Setup to remember how much we have not yet told
5369 			 * the peer our rwnd has opened up. Note we grab the
5370 			 * value from the tcb from last time. Note too that
5371 			 * sack sending clears this when a sack is sent,
5372 			 * which is fine. Once we hit the rwnd_req, we then
5373 			 * will go to the sctp_user_rcvd() that will not
5374 			 * lock until it KNOWs it MUST send a WUP-SACK.
5375 			 */
5376 			freed_so_far = stcb->freed_by_sorcv_sincelast;
5377 			stcb->freed_by_sorcv_sincelast = 0;
5378 		}
5379 	}
5380 	if (stcb &&
5381 	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
5382 	    control->do_not_ref_stcb == 0) {
5383 		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
5384 	}
5385 	/* First lets get off the sinfo and sockaddr info */
5386 	if ((sinfo) && filling_sinfo) {
5387 		memcpy(sinfo, control, sizeof(struct sctp_nonpad_sndrcvinfo));
5388 		nxt = TAILQ_NEXT(control, next);
5389 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
5390 			struct sctp_extrcvinfo *s_extra;
5391 
5392 			s_extra = (struct sctp_extrcvinfo *)sinfo;
5393 			if ((nxt) &&
5394 			    (nxt->length)) {
5395 				s_extra->sreinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
5396 				if (nxt->sinfo_flags & SCTP_UNORDERED) {
5397 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
5398 				}
5399 				if (nxt->spec_flags & M_NOTIFICATION) {
5400 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
5401 				}
5402 				s_extra->sreinfo_next_aid = nxt->sinfo_assoc_id;
5403 				s_extra->sreinfo_next_length = nxt->length;
5404 				s_extra->sreinfo_next_ppid = nxt->sinfo_ppid;
5405 				s_extra->sreinfo_next_stream = nxt->sinfo_stream;
5406 				if (nxt->tail_mbuf != NULL) {
5407 					if (nxt->end_added) {
5408 						s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
5409 					}
5410 				}
5411 			} else {
5412 				/*
5413 				 * we explicitly 0 this, since the memcpy
5414 				 * got some other things beyond the older
5415 				 * sinfo_ that is on the control's structure
5416 				 * :-D
5417 				 */
5418 				nxt = NULL;
5419 				s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
5420 				s_extra->sreinfo_next_aid = 0;
5421 				s_extra->sreinfo_next_length = 0;
5422 				s_extra->sreinfo_next_ppid = 0;
5423 				s_extra->sreinfo_next_stream = 0;
5424 			}
5425 		}
5426 		/*
5427 		 * update off the real current cum-ack, if we have an stcb.
5428 		 */
5429 		if ((control->do_not_ref_stcb == 0) && stcb)
5430 			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
5431 		/*
5432 		 * mask off the high bits, we keep the actual chunk bits in
5433 		 * there.
5434 		 */
5435 		sinfo->sinfo_flags &= 0x00ff;
5436 		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
5437 			sinfo->sinfo_flags |= SCTP_UNORDERED;
5438 		}
5439 	}
5440 #ifdef SCTP_ASOCLOG_OF_TSNS
5441 	{
5442 		int index, newindex;
5443 		struct sctp_pcbtsn_rlog *entry;
5444 
5445 		do {
5446 			index = inp->readlog_index;
5447 			newindex = index + 1;
5448 			if (newindex >= SCTP_READ_LOG_SIZE) {
5449 				newindex = 0;
5450 			}
5451 		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
5452 		entry = &inp->readlog[index];
5453 		entry->vtag = control->sinfo_assoc_id;
5454 		entry->strm = control->sinfo_stream;
5455 		entry->seq = control->sinfo_ssn;
5456 		entry->sz = control->length;
5457 		entry->flgs = control->sinfo_flags;
5458 	}
5459 #endif
5460 	if (fromlen && from) {
5461 		struct sockaddr *to;
5462 
5463 #ifdef INET
5464 		cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sin.sin_len);
5465 		memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
5466 		((struct sockaddr_in *)from)->sin_port = control->port_from;
5467 #else
5468 		/* No AF_INET use AF_INET6 */
5469 		cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sin6.sin6_len);
5470 		memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
5471 		((struct sockaddr_in6 *)from)->sin6_port = control->port_from;
5472 #endif
5473 
5474 		to = from;
5475 #if defined(INET) && defined(INET6)
5476 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) &&
5477 		    (to->sa_family == AF_INET) &&
5478 		    ((size_t)fromlen >= sizeof(struct sockaddr_in6))) {
5479 			struct sockaddr_in *sin;
5480 			struct sockaddr_in6 sin6;
5481 
5482 			sin = (struct sockaddr_in *)to;
5483 			bzero(&sin6, sizeof(sin6));
5484 			sin6.sin6_family = AF_INET6;
5485 			sin6.sin6_len = sizeof(struct sockaddr_in6);
5486 			sin6.sin6_addr.s6_addr32[2] = htonl(0xffff);
5487 			bcopy(&sin->sin_addr,
5488 			    &sin6.sin6_addr.s6_addr32[3],
5489 			    sizeof(sin6.sin6_addr.s6_addr32[3]));
5490 			sin6.sin6_port = sin->sin_port;
5491 			memcpy(from, (caddr_t)&sin6, sizeof(sin6));
5492 		}
5493 #endif
5494 #if defined(INET6)
5495 		{
5496 			struct sockaddr_in6 lsa6, *to6;
5497 
5498 			to6 = (struct sockaddr_in6 *)to;
5499 			sctp_recover_scope_mac(to6, (&lsa6));
5500 		}
5501 #endif
5502 	}
5503 	/* now copy out what data we can */
5504 	if (mp == NULL) {
5505 		/* copy out each mbuf in the chain up to length */
5506 get_more_data:
5507 		m = control->data;
5508 		while (m) {
5509 			/* Move out all we can */
5510 			cp_len = (int)uio->uio_resid;
5511 			my_len = (int)SCTP_BUF_LEN(m);
5512 			if (cp_len > my_len) {
5513 				/* not enough in this buf */
5514 				cp_len = my_len;
5515 			}
5516 			if (hold_rlock) {
5517 				SCTP_INP_READ_UNLOCK(inp);
5518 				hold_rlock = 0;
5519 			}
5520 			if (cp_len > 0)
5521 				error = uiomove(mtod(m, char *), cp_len, uio);
5522 			/* re-read */
5523 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5524 				goto release;
5525 			}
5526 			if ((control->do_not_ref_stcb == 0) && stcb &&
5527 			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5528 				no_rcv_needed = 1;
5529 			}
5530 			if (error) {
5531 				/* error we are out of here */
5532 				goto release;
5533 			}
5534 			if ((SCTP_BUF_NEXT(m) == NULL) &&
5535 			    (cp_len >= SCTP_BUF_LEN(m)) &&
5536 			    ((control->end_added == 0) ||
5537 			    (control->end_added &&
5538 			    (TAILQ_NEXT(control, next) == NULL)))
5539 			    ) {
5540 				SCTP_INP_READ_LOCK(inp);
5541 				hold_rlock = 1;
5542 			}
5543 			if (cp_len == SCTP_BUF_LEN(m)) {
5544 				if ((SCTP_BUF_NEXT(m) == NULL) &&
5545 				    (control->end_added)) {
5546 					out_flags |= MSG_EOR;
5547 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5548 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5549 				}
5550 				if (control->spec_flags & M_NOTIFICATION) {
5551 					out_flags |= MSG_NOTIFICATION;
5552 				}
5553 				/* we ate up the mbuf */
5554 				if (in_flags & MSG_PEEK) {
5555 					/* just looking */
5556 					m = SCTP_BUF_NEXT(m);
5557 					copied_so_far += cp_len;
5558 				} else {
5559 					/* dispose of the mbuf */
5560 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5561 						sctp_sblog(&so->so_rcv,
5562 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5563 					}
5564 					sctp_sbfree(control, stcb, &so->so_rcv, m);
5565 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5566 						sctp_sblog(&so->so_rcv,
5567 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5568 					}
5569 					embuf = m;
5570 					copied_so_far += cp_len;
5571 					freed_so_far += cp_len;
5572 					freed_so_far += MSIZE;
5573 					atomic_subtract_int(&control->length, cp_len);
5574 					control->data = sctp_m_free(m);
5575 					m = control->data;
5576 					/*
5577 					 * been through it all, must hold sb
5578 					 * lock ok to null tail
5579 					 */
5580 					if (control->data == NULL) {
5581 #ifdef INVARIANTS
5582 						if ((control->end_added == 0) ||
5583 						    (TAILQ_NEXT(control, next) == NULL)) {
5584 							/*
5585 							 * If the end is not
5586 							 * added, OR the
5587 							 * next is NOT null
5588 							 * we MUST have the
5589 							 * lock.
5590 							 */
5591 							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
5592 								panic("Hmm we don't own the lock?");
5593 							}
5594 						}
5595 #endif
5596 						control->tail_mbuf = NULL;
5597 #ifdef INVARIANTS
5598 						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
5599 							panic("end_added, nothing left and no MSG_EOR");
5600 						}
5601 #endif
5602 					}
5603 				}
5604 			} else {
5605 				/* Do we need to trim the mbuf? */
5606 				if (control->spec_flags & M_NOTIFICATION) {
5607 					out_flags |= MSG_NOTIFICATION;
5608 				}
5609 				if ((in_flags & MSG_PEEK) == 0) {
5610 					SCTP_BUF_RESV_UF(m, cp_len);
5611 					SCTP_BUF_LEN(m) -= cp_len;
5612 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5613 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len);
5614 					}
5615 					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
5616 					if ((control->do_not_ref_stcb == 0) &&
5617 					    stcb) {
5618 						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
5619 					}
5620 					copied_so_far += cp_len;
5621 					embuf = m;
5622 					freed_so_far += cp_len;
5623 					freed_so_far += MSIZE;
5624 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5625 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
5626 						    SCTP_LOG_SBRESULT, 0);
5627 					}
5628 					atomic_subtract_int(&control->length, cp_len);
5629 				} else {
5630 					copied_so_far += cp_len;
5631 				}
5632 			}
5633 			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
5634 				break;
5635 			}
5636 			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5637 			    (control->do_not_ref_stcb == 0) &&
5638 			    (freed_so_far >= rwnd_req)) {
5639 				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5640 			}
5641 		}		/* end while(m) */
5642 		/*
5643 		 * At this point we have looked at it all and we either have
5644 		 * a MSG_EOR/or read all the user wants... <OR>
5645 		 * control->length == 0.
5646 		 */
5647 		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
5648 			/* we are done with this control */
5649 			if (control->length == 0) {
5650 				if (control->data) {
5651 #ifdef INVARIANTS
5652 					panic("control->data not null at read eor?");
5653 #else
5654 					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
5655 					sctp_m_freem(control->data);
5656 					control->data = NULL;
5657 #endif
5658 				}
5659 		done_with_control:
5660 				if (TAILQ_NEXT(control, next) == NULL) {
5661 					/*
5662 					 * If we don't have a next we need a
5663 					 * lock, if there is a next
5664 					 * interrupt is filling ahead of us
5665 					 * and we don't need a lock to
5666 					 * remove this guy (which is the
5667 					 * head of the queue).
5668 					 */
5669 					if (hold_rlock == 0) {
5670 						SCTP_INP_READ_LOCK(inp);
5671 						hold_rlock = 1;
5672 					}
5673 				}
5674 				TAILQ_REMOVE(&inp->read_queue, control, next);
5675 				/* Add back any hiddend data */
5676 				if (control->held_length) {
5677 					held_length = 0;
5678 					control->held_length = 0;
5679 					wakeup_read_socket = 1;
5680 				}
5681 				if (control->aux_data) {
5682 					sctp_m_free(control->aux_data);
5683 					control->aux_data = NULL;
5684 				}
5685 				no_rcv_needed = control->do_not_ref_stcb;
5686 				sctp_free_remote_addr(control->whoFrom);
5687 				control->data = NULL;
5688 				sctp_free_a_readq(stcb, control);
5689 				control = NULL;
5690 				if ((freed_so_far >= rwnd_req) &&
5691 				    (no_rcv_needed == 0))
5692 					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5693 
5694 			} else {
5695 				/*
5696 				 * The user did not read all of this
5697 				 * message, turn off the returned MSG_EOR
5698 				 * since we are leaving more behind on the
5699 				 * control to read.
5700 				 */
5701 #ifdef INVARIANTS
5702 				if (control->end_added &&
5703 				    (control->data == NULL) &&
5704 				    (control->tail_mbuf == NULL)) {
5705 					panic("Gak, control->length is corrupt?");
5706 				}
5707 #endif
5708 				no_rcv_needed = control->do_not_ref_stcb;
5709 				out_flags &= ~MSG_EOR;
5710 			}
5711 		}
5712 		if (out_flags & MSG_EOR) {
5713 			goto release;
5714 		}
5715 		if ((uio->uio_resid == 0) ||
5716 		    ((in_eeor_mode) && (copied_so_far >= max(so->so_rcv.sb_lowat, 1)))
5717 		    ) {
5718 			goto release;
5719 		}
5720 		/*
5721 		 * If I hit here the receiver wants more and this message is
5722 		 * NOT done (pd-api). So two questions. Can we block? if not
5723 		 * we are done. Did the user NOT set MSG_WAITALL?
5724 		 */
5725 		if (block_allowed == 0) {
5726 			goto release;
5727 		}
5728 		/*
5729 		 * We need to wait for more data a few things: - We don't
5730 		 * sbunlock() so we don't get someone else reading. - We
5731 		 * must be sure to account for the case where what is added
5732 		 * is NOT to our control when we wakeup.
5733 		 */
5734 
5735 		/*
5736 		 * Do we need to tell the transport a rwnd update might be
5737 		 * needed before we go to sleep?
5738 		 */
5739 		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5740 		    ((freed_so_far >= rwnd_req) &&
5741 		    (control->do_not_ref_stcb == 0) &&
5742 		    (no_rcv_needed == 0))) {
5743 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5744 		}
5745 wait_some_more:
5746 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5747 			goto release;
5748 		}
5749 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
5750 			goto release;
5751 
5752 		if (hold_rlock == 1) {
5753 			SCTP_INP_READ_UNLOCK(inp);
5754 			hold_rlock = 0;
5755 		}
5756 		if (hold_sblock == 0) {
5757 			SOCKBUF_LOCK(&so->so_rcv);
5758 			hold_sblock = 1;
5759 		}
5760 		if ((copied_so_far) && (control->length == 0) &&
5761 		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))
5762 		    ) {
5763 			goto release;
5764 		}
5765 		if (so->so_rcv.sb_cc <= control->held_length) {
5766 			error = sbwait(&so->so_rcv);
5767 			if (error) {
5768 				goto release;
5769 			}
5770 			control->held_length = 0;
5771 		}
5772 		if (hold_sblock) {
5773 			SOCKBUF_UNLOCK(&so->so_rcv);
5774 			hold_sblock = 0;
5775 		}
5776 		if (control->length == 0) {
5777 			/* still nothing here */
5778 			if (control->end_added == 1) {
5779 				/* he aborted, or is done i.e.did a shutdown */
5780 				out_flags |= MSG_EOR;
5781 				if (control->pdapi_aborted) {
5782 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5783 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5784 
5785 					out_flags |= MSG_TRUNC;
5786 				} else {
5787 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5788 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5789 				}
5790 				goto done_with_control;
5791 			}
5792 			if (so->so_rcv.sb_cc > held_length) {
5793 				control->held_length = so->so_rcv.sb_cc;
5794 				held_length = 0;
5795 			}
5796 			goto wait_some_more;
5797 		} else if (control->data == NULL) {
5798 			/*
5799 			 * we must re-sync since data is probably being
5800 			 * added
5801 			 */
5802 			SCTP_INP_READ_LOCK(inp);
5803 			if ((control->length > 0) && (control->data == NULL)) {
5804 				/*
5805 				 * big trouble.. we have the lock and its
5806 				 * corrupt?
5807 				 */
5808 #ifdef INVARIANTS
5809 				panic("Impossible data==NULL length !=0");
5810 #endif
5811 				out_flags |= MSG_EOR;
5812 				out_flags |= MSG_TRUNC;
5813 				control->length = 0;
5814 				SCTP_INP_READ_UNLOCK(inp);
5815 				goto done_with_control;
5816 			}
5817 			SCTP_INP_READ_UNLOCK(inp);
5818 			/* We will fall around to get more data */
5819 		}
5820 		goto get_more_data;
5821 	} else {
5822 		/*-
5823 		 * Give caller back the mbuf chain,
5824 		 * store in uio_resid the length
5825 		 */
5826 		wakeup_read_socket = 0;
5827 		if ((control->end_added == 0) ||
5828 		    (TAILQ_NEXT(control, next) == NULL)) {
5829 			/* Need to get rlock */
5830 			if (hold_rlock == 0) {
5831 				SCTP_INP_READ_LOCK(inp);
5832 				hold_rlock = 1;
5833 			}
5834 		}
5835 		if (control->end_added) {
5836 			out_flags |= MSG_EOR;
5837 			if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5838 				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5839 		}
5840 		if (control->spec_flags & M_NOTIFICATION) {
5841 			out_flags |= MSG_NOTIFICATION;
5842 		}
5843 		uio->uio_resid = control->length;
5844 		*mp = control->data;
5845 		m = control->data;
5846 		while (m) {
5847 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5848 				sctp_sblog(&so->so_rcv,
5849 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5850 			}
5851 			sctp_sbfree(control, stcb, &so->so_rcv, m);
5852 			freed_so_far += SCTP_BUF_LEN(m);
5853 			freed_so_far += MSIZE;
5854 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5855 				sctp_sblog(&so->so_rcv,
5856 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5857 			}
5858 			m = SCTP_BUF_NEXT(m);
5859 		}
5860 		control->data = control->tail_mbuf = NULL;
5861 		control->length = 0;
5862 		if (out_flags & MSG_EOR) {
5863 			/* Done with this control */
5864 			goto done_with_control;
5865 		}
5866 	}
5867 release:
5868 	if (hold_rlock == 1) {
5869 		SCTP_INP_READ_UNLOCK(inp);
5870 		hold_rlock = 0;
5871 	}
5872 	if (hold_sblock == 1) {
5873 		SOCKBUF_UNLOCK(&so->so_rcv);
5874 		hold_sblock = 0;
5875 	}
5876 	sbunlock(&so->so_rcv);
5877 	sockbuf_lock = 0;
5878 
5879 release_unlocked:
5880 	if (hold_sblock) {
5881 		SOCKBUF_UNLOCK(&so->so_rcv);
5882 		hold_sblock = 0;
5883 	}
5884 	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
5885 		if ((freed_so_far >= rwnd_req) &&
5886 		    (control && (control->do_not_ref_stcb == 0)) &&
5887 		    (no_rcv_needed == 0))
5888 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5889 	}
5890 	if (msg_flags)
5891 		*msg_flags = out_flags;
5892 out:
5893 	if (((out_flags & MSG_EOR) == 0) &&
5894 	    ((in_flags & MSG_PEEK) == 0) &&
5895 	    (sinfo) &&
5896 	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO))) {
5897 		struct sctp_extrcvinfo *s_extra;
5898 
5899 		s_extra = (struct sctp_extrcvinfo *)sinfo;
5900 		s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
5901 	}
5902 	if (hold_rlock == 1) {
5903 		SCTP_INP_READ_UNLOCK(inp);
5904 		hold_rlock = 0;
5905 	}
5906 	if (hold_sblock) {
5907 		SOCKBUF_UNLOCK(&so->so_rcv);
5908 		hold_sblock = 0;
5909 	}
5910 	if (sockbuf_lock) {
5911 		sbunlock(&so->so_rcv);
5912 	}
5913 	if (freecnt_applied) {
5914 		/*
5915 		 * The lock on the socket buffer protects us so the free
5916 		 * code will stop. But since we used the socketbuf lock and
5917 		 * the sender uses the tcb_lock to increment, we need to use
5918 		 * the atomic add to the refcnt.
5919 		 */
5920 		if (stcb == NULL) {
5921 			panic("stcb for refcnt has gone NULL?");
5922 		}
5923 		atomic_add_int(&stcb->asoc.refcnt, -1);
5924 		freecnt_applied = 0;
5925 		/* Save the value back for next time */
5926 		stcb->freed_by_sorcv_sincelast = freed_so_far;
5927 	}
5928 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5929 		if (stcb) {
5930 			sctp_misc_ints(SCTP_SORECV_DONE,
5931 			    freed_so_far,
5932 			    ((uio) ? (slen - uio->uio_resid) : slen),
5933 			    stcb->asoc.my_rwnd,
5934 			    so->so_rcv.sb_cc);
5935 		} else {
5936 			sctp_misc_ints(SCTP_SORECV_DONE,
5937 			    freed_so_far,
5938 			    ((uio) ? (slen - uio->uio_resid) : slen),
5939 			    0,
5940 			    so->so_rcv.sb_cc);
5941 		}
5942 	}
5943 	if (wakeup_read_socket) {
5944 		sctp_sorwakeup(inp, so);
5945 	}
5946 	return (error);
5947 }
5948 
5949 
5950 #ifdef SCTP_MBUF_LOGGING
5951 struct mbuf *
5952 sctp_m_free(struct mbuf *m)
5953 {
5954 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
5955 		if (SCTP_BUF_IS_EXTENDED(m)) {
5956 			sctp_log_mb(m, SCTP_MBUF_IFREE);
5957 		}
5958 	}
5959 	return (m_free(m));
5960 }
5961 
5962 void
5963 sctp_m_freem(struct mbuf *mb)
5964 {
5965 	while (mb != NULL)
5966 		mb = sctp_m_free(mb);
5967 }
5968 
5969 #endif
5970 
5971 int
5972 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
5973 {
5974 	/*
5975 	 * Given a local address. For all associations that holds the
5976 	 * address, request a peer-set-primary.
5977 	 */
5978 	struct sctp_ifa *ifa;
5979 	struct sctp_laddr *wi;
5980 
5981 	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
5982 	if (ifa == NULL) {
5983 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
5984 		return (EADDRNOTAVAIL);
5985 	}
5986 	/*
5987 	 * Now that we have the ifa we must awaken the iterator with this
5988 	 * message.
5989 	 */
5990 	wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
5991 	if (wi == NULL) {
5992 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
5993 		return (ENOMEM);
5994 	}
5995 	/* Now incr the count and int wi structure */
5996 	SCTP_INCR_LADDR_COUNT();
5997 	bzero(wi, sizeof(*wi));
5998 	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
5999 	wi->ifa = ifa;
6000 	wi->action = SCTP_SET_PRIM_ADDR;
6001 	atomic_add_int(&ifa->refcount, 1);
6002 
6003 	/* Now add it to the work queue */
6004 	SCTP_IPI_ITERATOR_WQ_LOCK();
6005 	/*
6006 	 * Should this really be a tailq? As it is we will process the
6007 	 * newest first :-0
6008 	 */
6009 	LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
6010 	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
6011 	    (struct sctp_inpcb *)NULL,
6012 	    (struct sctp_tcb *)NULL,
6013 	    (struct sctp_nets *)NULL);
6014 	SCTP_IPI_ITERATOR_WQ_UNLOCK();
6015 	return (0);
6016 }
6017 
6018 
6019 int
6020 sctp_soreceive(struct socket *so,
6021     struct sockaddr **psa,
6022     struct uio *uio,
6023     struct mbuf **mp0,
6024     struct mbuf **controlp,
6025     int *flagsp)
6026 {
6027 	int error, fromlen;
6028 	uint8_t sockbuf[256];
6029 	struct sockaddr *from;
6030 	struct sctp_extrcvinfo sinfo;
6031 	int filling_sinfo = 1;
6032 	struct sctp_inpcb *inp;
6033 
6034 	inp = (struct sctp_inpcb *)so->so_pcb;
6035 	/* pickup the assoc we are reading from */
6036 	if (inp == NULL) {
6037 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6038 		return (EINVAL);
6039 	}
6040 	if ((sctp_is_feature_off(inp,
6041 	    SCTP_PCB_FLAGS_RECVDATAIOEVNT)) ||
6042 	    (controlp == NULL)) {
6043 		/* user does not want the sndrcv ctl */
6044 		filling_sinfo = 0;
6045 	}
6046 	if (psa) {
6047 		from = (struct sockaddr *)sockbuf;
6048 		fromlen = sizeof(sockbuf);
6049 		from->sa_len = 0;
6050 	} else {
6051 		from = NULL;
6052 		fromlen = 0;
6053 	}
6054 
6055 	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp,
6056 	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6057 	if ((controlp) && (filling_sinfo)) {
6058 		/* copy back the sinfo in a CMSG format */
6059 		if (filling_sinfo)
6060 			*controlp = sctp_build_ctl_nchunk(inp,
6061 			    (struct sctp_sndrcvinfo *)&sinfo);
6062 		else
6063 			*controlp = NULL;
6064 	}
6065 	if (psa) {
6066 		/* copy back the address info */
6067 		if (from && from->sa_len) {
6068 			*psa = sodupsockaddr(from, M_NOWAIT);
6069 		} else {
6070 			*psa = NULL;
6071 		}
6072 	}
6073 	return (error);
6074 }
6075 
6076 
6077 int
6078 sctp_l_soreceive(struct socket *so,
6079     struct sockaddr **name,
6080     struct uio *uio,
6081     char **controlp,
6082     int *controllen,
6083     int *flag)
6084 {
6085 	int error, fromlen;
6086 	uint8_t sockbuf[256];
6087 	struct sockaddr *from;
6088 	struct sctp_extrcvinfo sinfo;
6089 	int filling_sinfo = 1;
6090 	struct sctp_inpcb *inp;
6091 
6092 	inp = (struct sctp_inpcb *)so->so_pcb;
6093 	/* pickup the assoc we are reading from */
6094 	if (inp == NULL) {
6095 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6096 		return (EINVAL);
6097 	}
6098 	if ((sctp_is_feature_off(inp,
6099 	    SCTP_PCB_FLAGS_RECVDATAIOEVNT)) ||
6100 	    (controlp == NULL)) {
6101 		/* user does not want the sndrcv ctl */
6102 		filling_sinfo = 0;
6103 	}
6104 	if (name) {
6105 		from = (struct sockaddr *)sockbuf;
6106 		fromlen = sizeof(sockbuf);
6107 		from->sa_len = 0;
6108 	} else {
6109 		from = NULL;
6110 		fromlen = 0;
6111 	}
6112 
6113 	error = sctp_sorecvmsg(so, uio,
6114 	    (struct mbuf **)NULL,
6115 	    from, fromlen, flag,
6116 	    (struct sctp_sndrcvinfo *)&sinfo,
6117 	    filling_sinfo);
6118 	if ((controlp) && (filling_sinfo)) {
6119 		/*
6120 		 * copy back the sinfo in a CMSG format note that the caller
6121 		 * has reponsibility for freeing the memory.
6122 		 */
6123 		if (filling_sinfo)
6124 			*controlp = sctp_build_ctl_cchunk(inp,
6125 			    controllen,
6126 			    (struct sctp_sndrcvinfo *)&sinfo);
6127 	}
6128 	if (name) {
6129 		/* copy back the address info */
6130 		if (from && from->sa_len) {
6131 			*name = sodupsockaddr(from, M_WAIT);
6132 		} else {
6133 			*name = NULL;
6134 		}
6135 	}
6136 	return (error);
6137 }
6138 
6139 
6140 
6141 
6142 
6143 
6144 
6145 int
6146 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6147     int totaddr, int *error)
6148 {
6149 	int added = 0;
6150 	int i;
6151 	struct sctp_inpcb *inp;
6152 	struct sockaddr *sa;
6153 	size_t incr = 0;
6154 
6155 	sa = addr;
6156 	inp = stcb->sctp_ep;
6157 	*error = 0;
6158 	for (i = 0; i < totaddr; i++) {
6159 		if (sa->sa_family == AF_INET) {
6160 			incr = sizeof(struct sockaddr_in);
6161 			if (sctp_add_remote_addr(stcb, sa, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6162 				/* assoc gone no un-lock */
6163 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6164 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_7);
6165 				*error = ENOBUFS;
6166 				goto out_now;
6167 			}
6168 			added++;
6169 		} else if (sa->sa_family == AF_INET6) {
6170 			incr = sizeof(struct sockaddr_in6);
6171 			if (sctp_add_remote_addr(stcb, sa, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6172 				/* assoc gone no un-lock */
6173 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6174 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_8);
6175 				*error = ENOBUFS;
6176 				goto out_now;
6177 			}
6178 			added++;
6179 		}
6180 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6181 	}
6182 out_now:
6183 	return (added);
6184 }
6185 
6186 struct sctp_tcb *
6187 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6188     int *totaddr, int *num_v4, int *num_v6, int *error,
6189     int limit, int *bad_addr)
6190 {
6191 	struct sockaddr *sa;
6192 	struct sctp_tcb *stcb = NULL;
6193 	size_t incr, at, i;
6194 
6195 	at = incr = 0;
6196 	sa = addr;
6197 	*error = *num_v6 = *num_v4 = 0;
6198 	/* account and validate addresses */
6199 	for (i = 0; i < (size_t)*totaddr; i++) {
6200 		if (sa->sa_family == AF_INET) {
6201 			(*num_v4) += 1;
6202 			incr = sizeof(struct sockaddr_in);
6203 			if (sa->sa_len != incr) {
6204 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6205 				*error = EINVAL;
6206 				*bad_addr = 1;
6207 				return (NULL);
6208 			}
6209 		} else if (sa->sa_family == AF_INET6) {
6210 			struct sockaddr_in6 *sin6;
6211 
6212 			sin6 = (struct sockaddr_in6 *)sa;
6213 			if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6214 				/* Must be non-mapped for connectx */
6215 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6216 				*error = EINVAL;
6217 				*bad_addr = 1;
6218 				return (NULL);
6219 			}
6220 			(*num_v6) += 1;
6221 			incr = sizeof(struct sockaddr_in6);
6222 			if (sa->sa_len != incr) {
6223 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6224 				*error = EINVAL;
6225 				*bad_addr = 1;
6226 				return (NULL);
6227 			}
6228 		} else {
6229 			*totaddr = i;
6230 			/* we are done */
6231 			break;
6232 		}
6233 		SCTP_INP_INCR_REF(inp);
6234 		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6235 		if (stcb != NULL) {
6236 			/* Already have or am bring up an association */
6237 			return (stcb);
6238 		} else {
6239 			SCTP_INP_DECR_REF(inp);
6240 		}
6241 		if ((at + incr) > (size_t)limit) {
6242 			*totaddr = i;
6243 			break;
6244 		}
6245 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6246 	}
6247 	return ((struct sctp_tcb *)NULL);
6248 }
6249 
6250 /*
6251  * sctp_bindx(ADD) for one address.
6252  * assumes all arguments are valid/checked by caller.
6253  */
6254 void
6255 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6256     struct sockaddr *sa, sctp_assoc_t assoc_id,
6257     uint32_t vrf_id, int *error, void *p)
6258 {
6259 	struct sockaddr *addr_touse;
6260 
6261 #ifdef INET6
6262 	struct sockaddr_in sin;
6263 
6264 #endif
6265 
6266 	/* see if we're bound all already! */
6267 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6268 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6269 		*error = EINVAL;
6270 		return;
6271 	}
6272 	addr_touse = sa;
6273 #if defined(INET6) && !defined(__Userspace__)	/* TODO port in6_sin6_2_sin */
6274 	if (sa->sa_family == AF_INET6) {
6275 		struct sockaddr_in6 *sin6;
6276 
6277 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6278 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6279 			*error = EINVAL;
6280 			return;
6281 		}
6282 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6283 			/* can only bind v6 on PF_INET6 sockets */
6284 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6285 			*error = EINVAL;
6286 			return;
6287 		}
6288 		sin6 = (struct sockaddr_in6 *)addr_touse;
6289 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6290 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6291 			    SCTP_IPV6_V6ONLY(inp)) {
6292 				/* can't bind v4-mapped on PF_INET sockets */
6293 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6294 				*error = EINVAL;
6295 				return;
6296 			}
6297 			in6_sin6_2_sin(&sin, sin6);
6298 			addr_touse = (struct sockaddr *)&sin;
6299 		}
6300 	}
6301 #endif
6302 	if (sa->sa_family == AF_INET) {
6303 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6304 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6305 			*error = EINVAL;
6306 			return;
6307 		}
6308 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6309 		    SCTP_IPV6_V6ONLY(inp)) {
6310 			/* can't bind v4 on PF_INET sockets */
6311 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6312 			*error = EINVAL;
6313 			return;
6314 		}
6315 	}
6316 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6317 		if (p == NULL) {
6318 			/* Can't get proc for Net/Open BSD */
6319 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6320 			*error = EINVAL;
6321 			return;
6322 		}
6323 		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6324 		return;
6325 	}
6326 	/*
6327 	 * No locks required here since bind and mgmt_ep_sa all do their own
6328 	 * locking. If we do something for the FIX: below we may need to
6329 	 * lock in that case.
6330 	 */
6331 	if (assoc_id == 0) {
6332 		/* add the address */
6333 		struct sctp_inpcb *lep;
6334 		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
6335 
6336 		/* validate the incoming port */
6337 		if ((lsin->sin_port != 0) &&
6338 		    (lsin->sin_port != inp->sctp_lport)) {
6339 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6340 			*error = EINVAL;
6341 			return;
6342 		} else {
6343 			/* user specified 0 port, set it to existing port */
6344 			lsin->sin_port = inp->sctp_lport;
6345 		}
6346 
6347 		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
6348 		if (lep != NULL) {
6349 			/*
6350 			 * We must decrement the refcount since we have the
6351 			 * ep already and are binding. No remove going on
6352 			 * here.
6353 			 */
6354 			SCTP_INP_DECR_REF(lep);
6355 		}
6356 		if (lep == inp) {
6357 			/* already bound to it.. ok */
6358 			return;
6359 		} else if (lep == NULL) {
6360 			((struct sockaddr_in *)addr_touse)->sin_port = 0;
6361 			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6362 			    SCTP_ADD_IP_ADDRESS,
6363 			    vrf_id, NULL);
6364 		} else {
6365 			*error = EADDRINUSE;
6366 		}
6367 		if (*error)
6368 			return;
6369 	} else {
6370 		/*
6371 		 * FIX: decide whether we allow assoc based bindx
6372 		 */
6373 	}
6374 }
6375 
6376 /*
6377  * sctp_bindx(DELETE) for one address.
6378  * assumes all arguments are valid/checked by caller.
6379  */
6380 void
6381 sctp_bindx_delete_address(struct socket *so, struct sctp_inpcb *inp,
6382     struct sockaddr *sa, sctp_assoc_t assoc_id,
6383     uint32_t vrf_id, int *error)
6384 {
6385 	struct sockaddr *addr_touse;
6386 
6387 #ifdef INET6
6388 	struct sockaddr_in sin;
6389 
6390 #endif
6391 
6392 	/* see if we're bound all already! */
6393 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6394 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6395 		*error = EINVAL;
6396 		return;
6397 	}
6398 	addr_touse = sa;
6399 #if defined(INET6) && !defined(__Userspace__)	/* TODO port in6_sin6_2_sin */
6400 	if (sa->sa_family == AF_INET6) {
6401 		struct sockaddr_in6 *sin6;
6402 
6403 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6404 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6405 			*error = EINVAL;
6406 			return;
6407 		}
6408 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6409 			/* can only bind v6 on PF_INET6 sockets */
6410 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6411 			*error = EINVAL;
6412 			return;
6413 		}
6414 		sin6 = (struct sockaddr_in6 *)addr_touse;
6415 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6416 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6417 			    SCTP_IPV6_V6ONLY(inp)) {
6418 				/* can't bind mapped-v4 on PF_INET sockets */
6419 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6420 				*error = EINVAL;
6421 				return;
6422 			}
6423 			in6_sin6_2_sin(&sin, sin6);
6424 			addr_touse = (struct sockaddr *)&sin;
6425 		}
6426 	}
6427 #endif
6428 	if (sa->sa_family == AF_INET) {
6429 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6430 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6431 			*error = EINVAL;
6432 			return;
6433 		}
6434 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6435 		    SCTP_IPV6_V6ONLY(inp)) {
6436 			/* can't bind v4 on PF_INET sockets */
6437 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6438 			*error = EINVAL;
6439 			return;
6440 		}
6441 	}
6442 	/*
6443 	 * No lock required mgmt_ep_sa does its own locking. If the FIX:
6444 	 * below is ever changed we may need to lock before calling
6445 	 * association level binding.
6446 	 */
6447 	if (assoc_id == 0) {
6448 		/* delete the address */
6449 		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6450 		    SCTP_DEL_IP_ADDRESS,
6451 		    vrf_id, NULL);
6452 	} else {
6453 		/*
6454 		 * FIX: decide whether we allow assoc based bindx
6455 		 */
6456 	}
6457 }
6458 
6459 /*
6460  * returns the valid local address count for an assoc, taking into account
6461  * all scoping rules
6462  */
6463 int
6464 sctp_local_addr_count(struct sctp_tcb *stcb)
6465 {
6466 	int loopback_scope, ipv4_local_scope, local_scope, site_scope;
6467 	int ipv4_addr_legal, ipv6_addr_legal;
6468 	struct sctp_vrf *vrf;
6469 	struct sctp_ifn *sctp_ifn;
6470 	struct sctp_ifa *sctp_ifa;
6471 	int count = 0;
6472 
6473 	/* Turn on all the appropriate scopes */
6474 	loopback_scope = stcb->asoc.loopback_scope;
6475 	ipv4_local_scope = stcb->asoc.ipv4_local_scope;
6476 	local_scope = stcb->asoc.local_scope;
6477 	site_scope = stcb->asoc.site_scope;
6478 	ipv4_addr_legal = ipv6_addr_legal = 0;
6479 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
6480 		ipv6_addr_legal = 1;
6481 		if (SCTP_IPV6_V6ONLY(stcb->sctp_ep) == 0) {
6482 			ipv4_addr_legal = 1;
6483 		}
6484 	} else {
6485 		ipv4_addr_legal = 1;
6486 	}
6487 
6488 	SCTP_IPI_ADDR_RLOCK();
6489 	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
6490 	if (vrf == NULL) {
6491 		/* no vrf, no addresses */
6492 		SCTP_IPI_ADDR_RUNLOCK();
6493 		return (0);
6494 	}
6495 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6496 		/*
6497 		 * bound all case: go through all ifns on the vrf
6498 		 */
6499 		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
6500 			if ((loopback_scope == 0) &&
6501 			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
6502 				continue;
6503 			}
6504 			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
6505 				if (sctp_is_addr_restricted(stcb, sctp_ifa))
6506 					continue;
6507 				switch (sctp_ifa->address.sa.sa_family) {
6508 				case AF_INET:
6509 					if (ipv4_addr_legal) {
6510 						struct sockaddr_in *sin;
6511 
6512 						sin = (struct sockaddr_in *)&sctp_ifa->address.sa;
6513 						if (sin->sin_addr.s_addr == 0) {
6514 							/*
6515 							 * skip unspecified
6516 							 * addrs
6517 							 */
6518 							continue;
6519 						}
6520 						if ((ipv4_local_scope == 0) &&
6521 						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
6522 							continue;
6523 						}
6524 						/* count this one */
6525 						count++;
6526 					} else {
6527 						continue;
6528 					}
6529 					break;
6530 #ifdef INET6
6531 				case AF_INET6:
6532 					if (ipv6_addr_legal) {
6533 						struct sockaddr_in6 *sin6;
6534 
6535 						sin6 = (struct sockaddr_in6 *)&sctp_ifa->address.sa;
6536 						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
6537 							continue;
6538 						}
6539 						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
6540 							if (local_scope == 0)
6541 								continue;
6542 							if (sin6->sin6_scope_id == 0) {
6543 								if (sa6_recoverscope(sin6) != 0)
6544 									/*
6545 									 *
6546 									 * bad
6547 									 *
6548 									 * li
6549 									 * nk
6550 									 *
6551 									 * loc
6552 									 * al
6553 									 *
6554 									 * add
6555 									 * re
6556 									 * ss
6557 									 * */
6558 									continue;
6559 							}
6560 						}
6561 						if ((site_scope == 0) &&
6562 						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
6563 							continue;
6564 						}
6565 						/* count this one */
6566 						count++;
6567 					}
6568 					break;
6569 #endif
6570 				default:
6571 					/* TSNH */
6572 					break;
6573 				}
6574 			}
6575 		}
6576 	} else {
6577 		/*
6578 		 * subset bound case
6579 		 */
6580 		struct sctp_laddr *laddr;
6581 
6582 		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
6583 		    sctp_nxt_addr) {
6584 			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
6585 				continue;
6586 			}
6587 			/* count this one */
6588 			count++;
6589 		}
6590 	}
6591 	SCTP_IPI_ADDR_RUNLOCK();
6592 	return (count);
6593 }
6594 
6595 #if defined(SCTP_LOCAL_TRACE_BUF)
6596 
6597 void
6598 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
6599 {
6600 	uint32_t saveindex, newindex;
6601 
6602 	do {
6603 		saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
6604 		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6605 			newindex = 1;
6606 		} else {
6607 			newindex = saveindex + 1;
6608 		}
6609 	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
6610 	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6611 		saveindex = 0;
6612 	}
6613 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
6614 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
6615 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
6616 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
6617 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
6618 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
6619 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
6620 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
6621 }
6622 
6623 #endif
6624 /* We will need to add support
6625  * to bind the ports and such here
6626  * so we can do UDP tunneling. In
6627  * the mean-time, we return error
6628  */
6629 
6630 void
6631 sctp_over_udp_stop(void)
6632 {
6633 	return;
6634 }
6635 int
6636 sctp_over_udp_start(void)
6637 {
6638 	return (-1);
6639 }
6640