xref: /freebsd/sys/netinet/sctputil.c (revision 9fd69f37d28cfd7438cac3eeb45fe9dd46b4d7dd)
1 /*-
2  * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *
7  * a) Redistributions of source code must retain the above copyright notice,
8  *   this list of conditions and the following disclaimer.
9  *
10  * b) Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in
12  *   the documentation and/or other materials provided with the distribution.
13  *
14  * c) Neither the name of Cisco Systems, Inc. nor the names of its
15  *    contributors may be used to endorse or promote products derived
16  *    from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28  * THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 /* $KAME: sctputil.c,v 1.37 2005/03/07 23:26:09 itojun Exp $	 */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_pcb.h>
38 #include <netinet/sctputil.h>
39 #include <netinet/sctp_var.h>
40 #include <netinet/sctp_sysctl.h>
41 #ifdef INET6
42 #endif
43 #include <netinet/sctp_header.h>
44 #include <netinet/sctp_output.h>
45 #include <netinet/sctp_uio.h>
46 #include <netinet/sctp_timer.h>
47 #include <netinet/sctp_indata.h>/* for sctp_deliver_data() */
48 #include <netinet/sctp_auth.h>
49 #include <netinet/sctp_asconf.h>
50 #include <netinet/sctp_cc_functions.h>
51 
52 #define NUMBER_OF_MTU_SIZES 18
53 
54 
55 #ifndef KTR_SCTP
56 #define KTR_SCTP KTR_SUBSYS
57 #endif
58 
59 void
60 sctp_sblog(struct sockbuf *sb,
61     struct sctp_tcb *stcb, int from, int incr)
62 {
63 	struct sctp_cwnd_log sctp_clog;
64 
65 	sctp_clog.x.sb.stcb = stcb;
66 	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
67 	if (stcb)
68 		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
69 	else
70 		sctp_clog.x.sb.stcb_sbcc = 0;
71 	sctp_clog.x.sb.incr = incr;
72 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
73 	    SCTP_LOG_EVENT_SB,
74 	    from,
75 	    sctp_clog.x.misc.log1,
76 	    sctp_clog.x.misc.log2,
77 	    sctp_clog.x.misc.log3,
78 	    sctp_clog.x.misc.log4);
79 }
80 
81 void
82 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
83 {
84 	struct sctp_cwnd_log sctp_clog;
85 
86 	sctp_clog.x.close.inp = (void *)inp;
87 	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
88 	if (stcb) {
89 		sctp_clog.x.close.stcb = (void *)stcb;
90 		sctp_clog.x.close.state = (uint16_t) stcb->asoc.state;
91 	} else {
92 		sctp_clog.x.close.stcb = 0;
93 		sctp_clog.x.close.state = 0;
94 	}
95 	sctp_clog.x.close.loc = loc;
96 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
97 	    SCTP_LOG_EVENT_CLOSE,
98 	    0,
99 	    sctp_clog.x.misc.log1,
100 	    sctp_clog.x.misc.log2,
101 	    sctp_clog.x.misc.log3,
102 	    sctp_clog.x.misc.log4);
103 }
104 
105 
106 void
107 rto_logging(struct sctp_nets *net, int from)
108 {
109 	struct sctp_cwnd_log sctp_clog;
110 
111 	memset(&sctp_clog, 0, sizeof(sctp_clog));
112 	sctp_clog.x.rto.net = (void *)net;
113 	sctp_clog.x.rto.rtt = net->prev_rtt;
114 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
115 	    SCTP_LOG_EVENT_RTT,
116 	    from,
117 	    sctp_clog.x.misc.log1,
118 	    sctp_clog.x.misc.log2,
119 	    sctp_clog.x.misc.log3,
120 	    sctp_clog.x.misc.log4);
121 
122 }
123 
124 void
125 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
126 {
127 	struct sctp_cwnd_log sctp_clog;
128 
129 	sctp_clog.x.strlog.stcb = stcb;
130 	sctp_clog.x.strlog.n_tsn = tsn;
131 	sctp_clog.x.strlog.n_sseq = sseq;
132 	sctp_clog.x.strlog.e_tsn = 0;
133 	sctp_clog.x.strlog.e_sseq = 0;
134 	sctp_clog.x.strlog.strm = stream;
135 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
136 	    SCTP_LOG_EVENT_STRM,
137 	    from,
138 	    sctp_clog.x.misc.log1,
139 	    sctp_clog.x.misc.log2,
140 	    sctp_clog.x.misc.log3,
141 	    sctp_clog.x.misc.log4);
142 
143 }
144 
145 void
146 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
147 {
148 	struct sctp_cwnd_log sctp_clog;
149 
150 	sctp_clog.x.nagle.stcb = (void *)stcb;
151 	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
152 	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
153 	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
154 	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
155 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
156 	    SCTP_LOG_EVENT_NAGLE,
157 	    action,
158 	    sctp_clog.x.misc.log1,
159 	    sctp_clog.x.misc.log2,
160 	    sctp_clog.x.misc.log3,
161 	    sctp_clog.x.misc.log4);
162 }
163 
164 
165 void
166 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
167 {
168 	struct sctp_cwnd_log sctp_clog;
169 
170 	sctp_clog.x.sack.cumack = cumack;
171 	sctp_clog.x.sack.oldcumack = old_cumack;
172 	sctp_clog.x.sack.tsn = tsn;
173 	sctp_clog.x.sack.numGaps = gaps;
174 	sctp_clog.x.sack.numDups = dups;
175 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
176 	    SCTP_LOG_EVENT_SACK,
177 	    from,
178 	    sctp_clog.x.misc.log1,
179 	    sctp_clog.x.misc.log2,
180 	    sctp_clog.x.misc.log3,
181 	    sctp_clog.x.misc.log4);
182 }
183 
184 void
185 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
186 {
187 	struct sctp_cwnd_log sctp_clog;
188 
189 	memset(&sctp_clog, 0, sizeof(sctp_clog));
190 	sctp_clog.x.map.base = map;
191 	sctp_clog.x.map.cum = cum;
192 	sctp_clog.x.map.high = high;
193 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
194 	    SCTP_LOG_EVENT_MAP,
195 	    from,
196 	    sctp_clog.x.misc.log1,
197 	    sctp_clog.x.misc.log2,
198 	    sctp_clog.x.misc.log3,
199 	    sctp_clog.x.misc.log4);
200 }
201 
202 void
203 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn,
204     int from)
205 {
206 	struct sctp_cwnd_log sctp_clog;
207 
208 	memset(&sctp_clog, 0, sizeof(sctp_clog));
209 	sctp_clog.x.fr.largest_tsn = biggest_tsn;
210 	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
211 	sctp_clog.x.fr.tsn = tsn;
212 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
213 	    SCTP_LOG_EVENT_FR,
214 	    from,
215 	    sctp_clog.x.misc.log1,
216 	    sctp_clog.x.misc.log2,
217 	    sctp_clog.x.misc.log3,
218 	    sctp_clog.x.misc.log4);
219 
220 }
221 
222 
223 void
224 sctp_log_mb(struct mbuf *m, int from)
225 {
226 	struct sctp_cwnd_log sctp_clog;
227 
228 	sctp_clog.x.mb.mp = m;
229 	sctp_clog.x.mb.mbuf_flags = (uint8_t) (SCTP_BUF_GET_FLAGS(m));
230 	sctp_clog.x.mb.size = (uint16_t) (SCTP_BUF_LEN(m));
231 	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
232 	if (SCTP_BUF_IS_EXTENDED(m)) {
233 		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
234 		sctp_clog.x.mb.refcnt = (uint8_t) (SCTP_BUF_EXTEND_REFCNT(m));
235 	} else {
236 		sctp_clog.x.mb.ext = 0;
237 		sctp_clog.x.mb.refcnt = 0;
238 	}
239 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
240 	    SCTP_LOG_EVENT_MBUF,
241 	    from,
242 	    sctp_clog.x.misc.log1,
243 	    sctp_clog.x.misc.log2,
244 	    sctp_clog.x.misc.log3,
245 	    sctp_clog.x.misc.log4);
246 }
247 
248 
249 void
250 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk,
251     int from)
252 {
253 	struct sctp_cwnd_log sctp_clog;
254 
255 	if (control == NULL) {
256 		SCTP_PRINTF("Gak log of NULL?\n");
257 		return;
258 	}
259 	sctp_clog.x.strlog.stcb = control->stcb;
260 	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
261 	sctp_clog.x.strlog.n_sseq = control->sinfo_ssn;
262 	sctp_clog.x.strlog.strm = control->sinfo_stream;
263 	if (poschk != NULL) {
264 		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
265 		sctp_clog.x.strlog.e_sseq = poschk->sinfo_ssn;
266 	} else {
267 		sctp_clog.x.strlog.e_tsn = 0;
268 		sctp_clog.x.strlog.e_sseq = 0;
269 	}
270 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
271 	    SCTP_LOG_EVENT_STRM,
272 	    from,
273 	    sctp_clog.x.misc.log1,
274 	    sctp_clog.x.misc.log2,
275 	    sctp_clog.x.misc.log3,
276 	    sctp_clog.x.misc.log4);
277 
278 }
279 
280 void
281 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
282 {
283 	struct sctp_cwnd_log sctp_clog;
284 
285 	sctp_clog.x.cwnd.net = net;
286 	if (stcb->asoc.send_queue_cnt > 255)
287 		sctp_clog.x.cwnd.cnt_in_send = 255;
288 	else
289 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
290 	if (stcb->asoc.stream_queue_cnt > 255)
291 		sctp_clog.x.cwnd.cnt_in_str = 255;
292 	else
293 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
294 
295 	if (net) {
296 		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
297 		sctp_clog.x.cwnd.inflight = net->flight_size;
298 		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
299 		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
300 		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
301 	}
302 	if (SCTP_CWNDLOG_PRESEND == from) {
303 		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
304 	}
305 	sctp_clog.x.cwnd.cwnd_augment = augment;
306 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
307 	    SCTP_LOG_EVENT_CWND,
308 	    from,
309 	    sctp_clog.x.misc.log1,
310 	    sctp_clog.x.misc.log2,
311 	    sctp_clog.x.misc.log3,
312 	    sctp_clog.x.misc.log4);
313 
314 }
315 
316 void
317 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
318 {
319 	struct sctp_cwnd_log sctp_clog;
320 
321 	memset(&sctp_clog, 0, sizeof(sctp_clog));
322 	if (inp) {
323 		sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
324 
325 	} else {
326 		sctp_clog.x.lock.sock = (void *)NULL;
327 	}
328 	sctp_clog.x.lock.inp = (void *)inp;
329 	if (stcb) {
330 		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
331 	} else {
332 		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
333 	}
334 	if (inp) {
335 		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
336 		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
337 	} else {
338 		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
339 		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
340 	}
341 	sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
342 	if (inp->sctp_socket) {
343 		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
344 		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
345 		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
346 	} else {
347 		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
348 		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
349 		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
350 	}
351 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
352 	    SCTP_LOG_LOCK_EVENT,
353 	    from,
354 	    sctp_clog.x.misc.log1,
355 	    sctp_clog.x.misc.log2,
356 	    sctp_clog.x.misc.log3,
357 	    sctp_clog.x.misc.log4);
358 
359 }
360 
361 void
362 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
363 {
364 	struct sctp_cwnd_log sctp_clog;
365 
366 	memset(&sctp_clog, 0, sizeof(sctp_clog));
367 	sctp_clog.x.cwnd.net = net;
368 	sctp_clog.x.cwnd.cwnd_new_value = error;
369 	sctp_clog.x.cwnd.inflight = net->flight_size;
370 	sctp_clog.x.cwnd.cwnd_augment = burst;
371 	if (stcb->asoc.send_queue_cnt > 255)
372 		sctp_clog.x.cwnd.cnt_in_send = 255;
373 	else
374 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
375 	if (stcb->asoc.stream_queue_cnt > 255)
376 		sctp_clog.x.cwnd.cnt_in_str = 255;
377 	else
378 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
379 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
380 	    SCTP_LOG_EVENT_MAXBURST,
381 	    from,
382 	    sctp_clog.x.misc.log1,
383 	    sctp_clog.x.misc.log2,
384 	    sctp_clog.x.misc.log3,
385 	    sctp_clog.x.misc.log4);
386 
387 }
388 
389 void
390 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
391 {
392 	struct sctp_cwnd_log sctp_clog;
393 
394 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
395 	sctp_clog.x.rwnd.send_size = snd_size;
396 	sctp_clog.x.rwnd.overhead = overhead;
397 	sctp_clog.x.rwnd.new_rwnd = 0;
398 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
399 	    SCTP_LOG_EVENT_RWND,
400 	    from,
401 	    sctp_clog.x.misc.log1,
402 	    sctp_clog.x.misc.log2,
403 	    sctp_clog.x.misc.log3,
404 	    sctp_clog.x.misc.log4);
405 }
406 
407 void
408 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
409 {
410 	struct sctp_cwnd_log sctp_clog;
411 
412 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
413 	sctp_clog.x.rwnd.send_size = flight_size;
414 	sctp_clog.x.rwnd.overhead = overhead;
415 	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
416 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
417 	    SCTP_LOG_EVENT_RWND,
418 	    from,
419 	    sctp_clog.x.misc.log1,
420 	    sctp_clog.x.misc.log2,
421 	    sctp_clog.x.misc.log3,
422 	    sctp_clog.x.misc.log4);
423 }
424 
425 void
426 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
427 {
428 	struct sctp_cwnd_log sctp_clog;
429 
430 	sctp_clog.x.mbcnt.total_queue_size = total_oq;
431 	sctp_clog.x.mbcnt.size_change = book;
432 	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
433 	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
434 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
435 	    SCTP_LOG_EVENT_MBCNT,
436 	    from,
437 	    sctp_clog.x.misc.log1,
438 	    sctp_clog.x.misc.log2,
439 	    sctp_clog.x.misc.log3,
440 	    sctp_clog.x.misc.log4);
441 
442 }
443 
444 void
445 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
446 {
447 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
448 	    SCTP_LOG_MISC_EVENT,
449 	    from,
450 	    a, b, c, d);
451 }
452 
453 void
454 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t cumtsn, uint32_t wake_cnt, int from)
455 {
456 	struct sctp_cwnd_log sctp_clog;
457 
458 	sctp_clog.x.wake.stcb = (void *)stcb;
459 	sctp_clog.x.wake.wake_cnt = wake_cnt;
460 	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
461 	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
462 	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
463 
464 	if (stcb->asoc.stream_queue_cnt < 0xff)
465 		sctp_clog.x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt;
466 	else
467 		sctp_clog.x.wake.stream_qcnt = 0xff;
468 
469 	if (stcb->asoc.chunks_on_out_queue < 0xff)
470 		sctp_clog.x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue;
471 	else
472 		sctp_clog.x.wake.chunks_on_oque = 0xff;
473 
474 	sctp_clog.x.wake.sctpflags = 0;
475 	/* set in the defered mode stuff */
476 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
477 		sctp_clog.x.wake.sctpflags |= 1;
478 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
479 		sctp_clog.x.wake.sctpflags |= 2;
480 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
481 		sctp_clog.x.wake.sctpflags |= 4;
482 	/* what about the sb */
483 	if (stcb->sctp_socket) {
484 		struct socket *so = stcb->sctp_socket;
485 
486 		sctp_clog.x.wake.sbflags = (uint8_t) ((so->so_snd.sb_flags & 0x00ff));
487 	} else {
488 		sctp_clog.x.wake.sbflags = 0xff;
489 	}
490 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
491 	    SCTP_LOG_EVENT_WAKE,
492 	    from,
493 	    sctp_clog.x.misc.log1,
494 	    sctp_clog.x.misc.log2,
495 	    sctp_clog.x.misc.log3,
496 	    sctp_clog.x.misc.log4);
497 
498 }
499 
500 void
501 sctp_log_block(uint8_t from, struct socket *so, struct sctp_association *asoc, int sendlen)
502 {
503 	struct sctp_cwnd_log sctp_clog;
504 
505 	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
506 	sctp_clog.x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt);
507 	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
508 	sctp_clog.x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt;
509 	sctp_clog.x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue;
510 	sctp_clog.x.blk.flight_size = (uint16_t) (asoc->total_flight / 1024);
511 	sctp_clog.x.blk.sndlen = sendlen;
512 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
513 	    SCTP_LOG_EVENT_BLOCK,
514 	    from,
515 	    sctp_clog.x.misc.log1,
516 	    sctp_clog.x.misc.log2,
517 	    sctp_clog.x.misc.log3,
518 	    sctp_clog.x.misc.log4);
519 
520 }
521 
522 int
523 sctp_fill_stat_log(void *optval, size_t *optsize)
524 {
525 	/* May need to fix this if ktrdump does not work */
526 	return (0);
527 }
528 
529 #ifdef SCTP_AUDITING_ENABLED
530 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
531 static int sctp_audit_indx = 0;
532 
533 static
534 void
535 sctp_print_audit_report(void)
536 {
537 	int i;
538 	int cnt;
539 
540 	cnt = 0;
541 	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
542 		if ((sctp_audit_data[i][0] == 0xe0) &&
543 		    (sctp_audit_data[i][1] == 0x01)) {
544 			cnt = 0;
545 			SCTP_PRINTF("\n");
546 		} else if (sctp_audit_data[i][0] == 0xf0) {
547 			cnt = 0;
548 			SCTP_PRINTF("\n");
549 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
550 		    (sctp_audit_data[i][1] == 0x01)) {
551 			SCTP_PRINTF("\n");
552 			cnt = 0;
553 		}
554 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
555 		    (uint32_t) sctp_audit_data[i][1]);
556 		cnt++;
557 		if ((cnt % 14) == 0)
558 			SCTP_PRINTF("\n");
559 	}
560 	for (i = 0; i < sctp_audit_indx; i++) {
561 		if ((sctp_audit_data[i][0] == 0xe0) &&
562 		    (sctp_audit_data[i][1] == 0x01)) {
563 			cnt = 0;
564 			SCTP_PRINTF("\n");
565 		} else if (sctp_audit_data[i][0] == 0xf0) {
566 			cnt = 0;
567 			SCTP_PRINTF("\n");
568 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
569 		    (sctp_audit_data[i][1] == 0x01)) {
570 			SCTP_PRINTF("\n");
571 			cnt = 0;
572 		}
573 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
574 		    (uint32_t) sctp_audit_data[i][1]);
575 		cnt++;
576 		if ((cnt % 14) == 0)
577 			SCTP_PRINTF("\n");
578 	}
579 	SCTP_PRINTF("\n");
580 }
581 
582 void
583 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
584     struct sctp_nets *net)
585 {
586 	int resend_cnt, tot_out, rep, tot_book_cnt;
587 	struct sctp_nets *lnet;
588 	struct sctp_tmit_chunk *chk;
589 
590 	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
591 	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
592 	sctp_audit_indx++;
593 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
594 		sctp_audit_indx = 0;
595 	}
596 	if (inp == NULL) {
597 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
598 		sctp_audit_data[sctp_audit_indx][1] = 0x01;
599 		sctp_audit_indx++;
600 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
601 			sctp_audit_indx = 0;
602 		}
603 		return;
604 	}
605 	if (stcb == NULL) {
606 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
607 		sctp_audit_data[sctp_audit_indx][1] = 0x02;
608 		sctp_audit_indx++;
609 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
610 			sctp_audit_indx = 0;
611 		}
612 		return;
613 	}
614 	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
615 	sctp_audit_data[sctp_audit_indx][1] =
616 	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
617 	sctp_audit_indx++;
618 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
619 		sctp_audit_indx = 0;
620 	}
621 	rep = 0;
622 	tot_book_cnt = 0;
623 	resend_cnt = tot_out = 0;
624 	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
625 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
626 			resend_cnt++;
627 		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
628 			tot_out += chk->book_size;
629 			tot_book_cnt++;
630 		}
631 	}
632 	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
633 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
634 		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
635 		sctp_audit_indx++;
636 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
637 			sctp_audit_indx = 0;
638 		}
639 		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
640 		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
641 		rep = 1;
642 		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
643 		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
644 		sctp_audit_data[sctp_audit_indx][1] =
645 		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
646 		sctp_audit_indx++;
647 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
648 			sctp_audit_indx = 0;
649 		}
650 	}
651 	if (tot_out != stcb->asoc.total_flight) {
652 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
653 		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
654 		sctp_audit_indx++;
655 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
656 			sctp_audit_indx = 0;
657 		}
658 		rep = 1;
659 		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
660 		    (int)stcb->asoc.total_flight);
661 		stcb->asoc.total_flight = tot_out;
662 	}
663 	if (tot_book_cnt != stcb->asoc.total_flight_count) {
664 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
665 		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
666 		sctp_audit_indx++;
667 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
668 			sctp_audit_indx = 0;
669 		}
670 		rep = 1;
671 		SCTP_PRINTF("tot_flt_book:%d\n", tot_book);
672 
673 		stcb->asoc.total_flight_count = tot_book_cnt;
674 	}
675 	tot_out = 0;
676 	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
677 		tot_out += lnet->flight_size;
678 	}
679 	if (tot_out != stcb->asoc.total_flight) {
680 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
681 		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
682 		sctp_audit_indx++;
683 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
684 			sctp_audit_indx = 0;
685 		}
686 		rep = 1;
687 		SCTP_PRINTF("real flight:%d net total was %d\n",
688 		    stcb->asoc.total_flight, tot_out);
689 		/* now corrective action */
690 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
691 
692 			tot_out = 0;
693 			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
694 				if ((chk->whoTo == lnet) &&
695 				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
696 					tot_out += chk->book_size;
697 				}
698 			}
699 			if (lnet->flight_size != tot_out) {
700 				SCTP_PRINTF("net:%x flight was %d corrected to %d\n",
701 				    (uint32_t) lnet, lnet->flight_size,
702 				    tot_out);
703 				lnet->flight_size = tot_out;
704 			}
705 		}
706 	}
707 	if (rep) {
708 		sctp_print_audit_report();
709 	}
710 }
711 
712 void
713 sctp_audit_log(uint8_t ev, uint8_t fd)
714 {
715 
716 	sctp_audit_data[sctp_audit_indx][0] = ev;
717 	sctp_audit_data[sctp_audit_indx][1] = fd;
718 	sctp_audit_indx++;
719 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
720 		sctp_audit_indx = 0;
721 	}
722 }
723 
724 #endif
725 
726 /*
727  * a list of sizes based on typical mtu's, used only if next hop size not
728  * returned.
729  */
730 static int sctp_mtu_sizes[] = {
731 	68,
732 	296,
733 	508,
734 	512,
735 	544,
736 	576,
737 	1006,
738 	1492,
739 	1500,
740 	1536,
741 	2002,
742 	2048,
743 	4352,
744 	4464,
745 	8166,
746 	17914,
747 	32000,
748 	65535
749 };
750 
751 void
752 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
753 {
754 	struct sctp_association *asoc;
755 	struct sctp_nets *net;
756 
757 	asoc = &stcb->asoc;
758 
759 	(void)SCTP_OS_TIMER_STOP(&asoc->hb_timer.timer);
760 	(void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
761 	(void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
762 	(void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
763 	(void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
764 	(void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
765 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
766 		(void)SCTP_OS_TIMER_STOP(&net->fr_timer.timer);
767 		(void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
768 	}
769 }
770 
771 int
772 find_next_best_mtu(int totsz)
773 {
774 	int i, perfer;
775 
776 	/*
777 	 * if we are in here we must find the next best fit based on the
778 	 * size of the dg that failed to be sent.
779 	 */
780 	perfer = 0;
781 	for (i = 0; i < NUMBER_OF_MTU_SIZES; i++) {
782 		if (totsz < sctp_mtu_sizes[i]) {
783 			perfer = i - 1;
784 			if (perfer < 0)
785 				perfer = 0;
786 			break;
787 		}
788 	}
789 	return (sctp_mtu_sizes[perfer]);
790 }
791 
792 void
793 sctp_fill_random_store(struct sctp_pcb *m)
794 {
795 	/*
796 	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
797 	 * our counter. The result becomes our good random numbers and we
798 	 * then setup to give these out. Note that we do no locking to
799 	 * protect this. This is ok, since if competing folks call this we
800 	 * will get more gobbled gook in the random store which is what we
801 	 * want. There is a danger that two guys will use the same random
802 	 * numbers, but thats ok too since that is random as well :->
803 	 */
804 	m->store_at = 0;
805 	(void)sctp_hmac(SCTP_HMAC, (uint8_t *) m->random_numbers,
806 	    sizeof(m->random_numbers), (uint8_t *) & m->random_counter,
807 	    sizeof(m->random_counter), (uint8_t *) m->random_store);
808 	m->random_counter++;
809 }
810 
811 uint32_t
812 sctp_select_initial_TSN(struct sctp_pcb *inp)
813 {
814 	/*
815 	 * A true implementation should use random selection process to get
816 	 * the initial stream sequence number, using RFC1750 as a good
817 	 * guideline
818 	 */
819 	uint32_t x, *xp;
820 	uint8_t *p;
821 	int store_at, new_store;
822 
823 	if (inp->initial_sequence_debug != 0) {
824 		uint32_t ret;
825 
826 		ret = inp->initial_sequence_debug;
827 		inp->initial_sequence_debug++;
828 		return (ret);
829 	}
830 retry:
831 	store_at = inp->store_at;
832 	new_store = store_at + sizeof(uint32_t);
833 	if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
834 		new_store = 0;
835 	}
836 	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
837 		goto retry;
838 	}
839 	if (new_store == 0) {
840 		/* Refill the random store */
841 		sctp_fill_random_store(inp);
842 	}
843 	p = &inp->random_store[store_at];
844 	xp = (uint32_t *) p;
845 	x = *xp;
846 	return (x);
847 }
848 
849 uint32_t
850 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int save_in_twait)
851 {
852 	uint32_t x, not_done;
853 	struct timeval now;
854 
855 	(void)SCTP_GETTIME_TIMEVAL(&now);
856 	not_done = 1;
857 	while (not_done) {
858 		x = sctp_select_initial_TSN(&inp->sctp_ep);
859 		if (x == 0) {
860 			/* we never use 0 */
861 			continue;
862 		}
863 		if (sctp_is_vtag_good(inp, x, lport, rport, &now, save_in_twait)) {
864 			not_done = 0;
865 		}
866 	}
867 	return (x);
868 }
869 
870 int
871 sctp_init_asoc(struct sctp_inpcb *m, struct sctp_tcb *stcb,
872     int for_a_init, uint32_t override_tag, uint32_t vrf_id)
873 {
874 	struct sctp_association *asoc;
875 
876 	/*
877 	 * Anything set to zero is taken care of by the allocation routine's
878 	 * bzero
879 	 */
880 
881 	/*
882 	 * Up front select what scoping to apply on addresses I tell my peer
883 	 * Not sure what to do with these right now, we will need to come up
884 	 * with a way to set them. We may need to pass them through from the
885 	 * caller in the sctp_aloc_assoc() function.
886 	 */
887 	int i;
888 
889 	asoc = &stcb->asoc;
890 	/* init all variables to a known value. */
891 	SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE);
892 	asoc->max_burst = m->sctp_ep.max_burst;
893 	asoc->heart_beat_delay = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
894 	asoc->cookie_life = m->sctp_ep.def_cookie_life;
895 	asoc->sctp_cmt_on_off = (uint8_t) SCTP_BASE_SYSCTL(sctp_cmt_on_off);
896 	/* EY Init nr_sack variable */
897 	asoc->sctp_nr_sack_on_off = (uint8_t) SCTP_BASE_SYSCTL(sctp_nr_sack_on_off);
898 	/* JRS 5/21/07 - Init CMT PF variables */
899 	asoc->sctp_cmt_pf = (uint8_t) SCTP_BASE_SYSCTL(sctp_cmt_pf);
900 	asoc->sctp_frag_point = m->sctp_frag_point;
901 #ifdef INET
902 	asoc->default_tos = m->ip_inp.inp.inp_ip_tos;
903 #else
904 	asoc->default_tos = 0;
905 #endif
906 
907 #ifdef INET6
908 	asoc->default_flowlabel = ((struct in6pcb *)m)->in6p_flowinfo;
909 #else
910 	asoc->default_flowlabel = 0;
911 #endif
912 	asoc->sb_send_resv = 0;
913 	if (override_tag) {
914 		asoc->my_vtag = override_tag;
915 	} else {
916 		asoc->my_vtag = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
917 	}
918 	/* Get the nonce tags */
919 	asoc->my_vtag_nonce = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
920 	asoc->peer_vtag_nonce = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
921 	asoc->vrf_id = vrf_id;
922 
923 	if (sctp_is_feature_on(m, SCTP_PCB_FLAGS_DONOT_HEARTBEAT))
924 		asoc->hb_is_disabled = 1;
925 	else
926 		asoc->hb_is_disabled = 0;
927 
928 #ifdef SCTP_ASOCLOG_OF_TSNS
929 	asoc->tsn_in_at = 0;
930 	asoc->tsn_out_at = 0;
931 	asoc->tsn_in_wrapped = 0;
932 	asoc->tsn_out_wrapped = 0;
933 	asoc->cumack_log_at = 0;
934 	asoc->cumack_log_atsnt = 0;
935 #endif
936 #ifdef SCTP_FS_SPEC_LOG
937 	asoc->fs_index = 0;
938 #endif
939 	asoc->refcnt = 0;
940 	asoc->assoc_up_sent = 0;
941 	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
942 	    sctp_select_initial_TSN(&m->sctp_ep);
943 	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
944 	/* we are optimisitic here */
945 	asoc->peer_supports_pktdrop = 1;
946 	asoc->peer_supports_nat = 0;
947 	asoc->sent_queue_retran_cnt = 0;
948 
949 	/* for CMT */
950 	asoc->last_net_cmt_send_started = NULL;
951 
952 	/* This will need to be adjusted */
953 	asoc->last_cwr_tsn = asoc->init_seq_number - 1;
954 	asoc->last_acked_seq = asoc->init_seq_number - 1;
955 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
956 	asoc->asconf_seq_in = asoc->last_acked_seq;
957 
958 	/* here we are different, we hold the next one we expect */
959 	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
960 
961 	asoc->initial_init_rto_max = m->sctp_ep.initial_init_rto_max;
962 	asoc->initial_rto = m->sctp_ep.initial_rto;
963 
964 	asoc->max_init_times = m->sctp_ep.max_init_times;
965 	asoc->max_send_times = m->sctp_ep.max_send_times;
966 	asoc->def_net_failure = m->sctp_ep.def_net_failure;
967 	asoc->free_chunk_cnt = 0;
968 
969 	asoc->iam_blocking = 0;
970 	/* ECN Nonce initialization */
971 	asoc->context = m->sctp_context;
972 	asoc->def_send = m->def_send;
973 	asoc->ecn_nonce_allowed = 0;
974 	asoc->receiver_nonce_sum = 1;
975 	asoc->nonce_sum_expect_base = 1;
976 	asoc->nonce_sum_check = 1;
977 	asoc->nonce_resync_tsn = 0;
978 	asoc->nonce_wait_for_ecne = 0;
979 	asoc->nonce_wait_tsn = 0;
980 	asoc->delayed_ack = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
981 	asoc->sack_freq = m->sctp_ep.sctp_sack_freq;
982 	asoc->pr_sctp_cnt = 0;
983 	asoc->total_output_queue_size = 0;
984 
985 	if (m->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
986 		struct in6pcb *inp6;
987 
988 		/* Its a V6 socket */
989 		inp6 = (struct in6pcb *)m;
990 		asoc->ipv6_addr_legal = 1;
991 		/* Now look at the binding flag to see if V4 will be legal */
992 		if (SCTP_IPV6_V6ONLY(inp6) == 0) {
993 			asoc->ipv4_addr_legal = 1;
994 		} else {
995 			/* V4 addresses are NOT legal on the association */
996 			asoc->ipv4_addr_legal = 0;
997 		}
998 	} else {
999 		/* Its a V4 socket, no - V6 */
1000 		asoc->ipv4_addr_legal = 1;
1001 		asoc->ipv6_addr_legal = 0;
1002 	}
1003 
1004 	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(m->sctp_socket), SCTP_MINIMAL_RWND);
1005 	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(m->sctp_socket);
1006 
1007 	asoc->smallest_mtu = m->sctp_frag_point;
1008 #ifdef SCTP_PRINT_FOR_B_AND_M
1009 	SCTP_PRINTF("smallest_mtu init'd with asoc to :%d\n",
1010 	    asoc->smallest_mtu);
1011 #endif
1012 	asoc->minrto = m->sctp_ep.sctp_minrto;
1013 	asoc->maxrto = m->sctp_ep.sctp_maxrto;
1014 
1015 	asoc->locked_on_sending = NULL;
1016 	asoc->stream_locked_on = 0;
1017 	asoc->ecn_echo_cnt_onq = 0;
1018 	asoc->stream_locked = 0;
1019 
1020 	asoc->send_sack = 1;
1021 
1022 	LIST_INIT(&asoc->sctp_restricted_addrs);
1023 
1024 	TAILQ_INIT(&asoc->nets);
1025 	TAILQ_INIT(&asoc->pending_reply_queue);
1026 	TAILQ_INIT(&asoc->asconf_ack_sent);
1027 	/* Setup to fill the hb random cache at first HB */
1028 	asoc->hb_random_idx = 4;
1029 
1030 	asoc->sctp_autoclose_ticks = m->sctp_ep.auto_close_time;
1031 
1032 	/*
1033 	 * JRS - Pick the default congestion control module based on the
1034 	 * sysctl.
1035 	 */
1036 	switch (m->sctp_ep.sctp_default_cc_module) {
1037 		/* JRS - Standard TCP congestion control */
1038 	case SCTP_CC_RFC2581:
1039 		{
1040 			stcb->asoc.congestion_control_module = SCTP_CC_RFC2581;
1041 			stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param;
1042 			stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_cwnd_update_after_sack;
1043 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_cwnd_update_after_fr;
1044 			stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout;
1045 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo;
1046 			stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1047 			stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1048 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer;
1049 			break;
1050 		}
1051 		/* JRS - High Speed TCP congestion control (Floyd) */
1052 	case SCTP_CC_HSTCP:
1053 		{
1054 			stcb->asoc.congestion_control_module = SCTP_CC_HSTCP;
1055 			stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param;
1056 			stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_hs_cwnd_update_after_sack;
1057 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_hs_cwnd_update_after_fr;
1058 			stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout;
1059 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo;
1060 			stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1061 			stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1062 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer;
1063 			break;
1064 		}
1065 		/* JRS - HTCP congestion control */
1066 	case SCTP_CC_HTCP:
1067 		{
1068 			stcb->asoc.congestion_control_module = SCTP_CC_HTCP;
1069 			stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_htcp_set_initial_cc_param;
1070 			stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_htcp_cwnd_update_after_sack;
1071 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_htcp_cwnd_update_after_fr;
1072 			stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_htcp_cwnd_update_after_timeout;
1073 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_htcp_cwnd_update_after_ecn_echo;
1074 			stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1075 			stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1076 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_htcp_cwnd_update_after_fr_timer;
1077 			break;
1078 		}
1079 		/* JRS - By default, use RFC2581 */
1080 	default:
1081 		{
1082 			stcb->asoc.congestion_control_module = SCTP_CC_RFC2581;
1083 			stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param;
1084 			stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_cwnd_update_after_sack;
1085 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_cwnd_update_after_fr;
1086 			stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout;
1087 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo;
1088 			stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1089 			stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1090 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer;
1091 			break;
1092 		}
1093 	}
1094 
1095 	/*
1096 	 * Now the stream parameters, here we allocate space for all streams
1097 	 * that we request by default.
1098 	 */
1099 	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
1100 	    m->sctp_ep.pre_open_stream_count;
1101 	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1102 	    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1103 	    SCTP_M_STRMO);
1104 	if (asoc->strmout == NULL) {
1105 		/* big trouble no memory */
1106 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1107 		return (ENOMEM);
1108 	}
1109 	for (i = 0; i < asoc->streamoutcnt; i++) {
1110 		/*
1111 		 * inbound side must be set to 0xffff, also NOTE when we get
1112 		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1113 		 * count (streamoutcnt) but first check if we sent to any of
1114 		 * the upper streams that were dropped (if some were). Those
1115 		 * that were dropped must be notified to the upper layer as
1116 		 * failed to send.
1117 		 */
1118 		asoc->strmout[i].next_sequence_sent = 0x0;
1119 		TAILQ_INIT(&asoc->strmout[i].outqueue);
1120 		asoc->strmout[i].stream_no = i;
1121 		asoc->strmout[i].last_msg_incomplete = 0;
1122 		asoc->strmout[i].next_spoke.tqe_next = 0;
1123 		asoc->strmout[i].next_spoke.tqe_prev = 0;
1124 	}
1125 	/* Now the mapping array */
1126 	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1127 	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1128 	    SCTP_M_MAP);
1129 	if (asoc->mapping_array == NULL) {
1130 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1131 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1132 		return (ENOMEM);
1133 	}
1134 	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1135 	/* EY  - initialize the nr_mapping_array just like mapping array */
1136 	asoc->nr_mapping_array_size = SCTP_INITIAL_NR_MAPPING_ARRAY;
1137 	SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->nr_mapping_array_size,
1138 	    SCTP_M_MAP);
1139 	if (asoc->nr_mapping_array == NULL) {
1140 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1141 		SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1142 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1143 		return (ENOMEM);
1144 	}
1145 	memset(asoc->nr_mapping_array, 0, asoc->nr_mapping_array_size);
1146 
1147 	/* Now the init of the other outqueues */
1148 	TAILQ_INIT(&asoc->free_chunks);
1149 	TAILQ_INIT(&asoc->out_wheel);
1150 	TAILQ_INIT(&asoc->control_send_queue);
1151 	TAILQ_INIT(&asoc->asconf_send_queue);
1152 	TAILQ_INIT(&asoc->send_queue);
1153 	TAILQ_INIT(&asoc->sent_queue);
1154 	TAILQ_INIT(&asoc->reasmqueue);
1155 	TAILQ_INIT(&asoc->resetHead);
1156 	asoc->max_inbound_streams = m->sctp_ep.max_open_streams_intome;
1157 	TAILQ_INIT(&asoc->asconf_queue);
1158 	/* authentication fields */
1159 	asoc->authinfo.random = NULL;
1160 	asoc->authinfo.active_keyid = 0;
1161 	asoc->authinfo.assoc_key = NULL;
1162 	asoc->authinfo.assoc_keyid = 0;
1163 	asoc->authinfo.recv_key = NULL;
1164 	asoc->authinfo.recv_keyid = 0;
1165 	LIST_INIT(&asoc->shared_keys);
1166 	asoc->marked_retrans = 0;
1167 	asoc->timoinit = 0;
1168 	asoc->timodata = 0;
1169 	asoc->timosack = 0;
1170 	asoc->timoshutdown = 0;
1171 	asoc->timoheartbeat = 0;
1172 	asoc->timocookie = 0;
1173 	asoc->timoshutdownack = 0;
1174 	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1175 	asoc->discontinuity_time = asoc->start_time;
1176 	/*
1177 	 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1178 	 * freed later whe the association is freed.
1179 	 */
1180 	return (0);
1181 }
1182 
1183 int
1184 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1185 {
1186 	/* mapping array needs to grow */
1187 	uint8_t *new_array;
1188 	uint32_t new_size;
1189 
1190 	new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1191 	SCTP_MALLOC(new_array, uint8_t *, new_size, SCTP_M_MAP);
1192 	if (new_array == NULL) {
1193 		/* can't get more, forget it */
1194 		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n",
1195 		    new_size);
1196 		return (-1);
1197 	}
1198 	memset(new_array, 0, new_size);
1199 	memcpy(new_array, asoc->mapping_array, asoc->mapping_array_size);
1200 	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1201 	asoc->mapping_array = new_array;
1202 	asoc->mapping_array_size = new_size;
1203 	if (asoc->peer_supports_nr_sack) {
1204 		new_size = asoc->nr_mapping_array_size + ((needed + 7) / 8 + SCTP_NR_MAPPING_ARRAY_INCR);
1205 		SCTP_MALLOC(new_array, uint8_t *, new_size, SCTP_M_MAP);
1206 		if (new_array == NULL) {
1207 			/* can't get more, forget it */
1208 			SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n",
1209 			    new_size);
1210 			return (-1);
1211 		}
1212 		memset(new_array, 0, new_size);
1213 		memcpy(new_array, asoc->nr_mapping_array, asoc->nr_mapping_array_size);
1214 		SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1215 		asoc->nr_mapping_array = new_array;
1216 		asoc->nr_mapping_array_size = new_size;
1217 	}
1218 	return (0);
1219 }
1220 
1221 
1222 #if defined(SCTP_USE_THREAD_BASED_ITERATOR)
1223 static void
1224 sctp_iterator_work(struct sctp_iterator *it)
1225 {
1226 	int iteration_count = 0;
1227 	int inp_skip = 0;
1228 
1229 	SCTP_ITERATOR_LOCK();
1230 	if (it->inp) {
1231 		SCTP_INP_DECR_REF(it->inp);
1232 	}
1233 	if (it->inp == NULL) {
1234 		/* iterator is complete */
1235 done_with_iterator:
1236 		SCTP_ITERATOR_UNLOCK();
1237 		if (it->function_atend != NULL) {
1238 			(*it->function_atend) (it->pointer, it->val);
1239 		}
1240 		SCTP_FREE(it, SCTP_M_ITER);
1241 		return;
1242 	}
1243 select_a_new_ep:
1244 	SCTP_INP_WLOCK(it->inp);
1245 	while (((it->pcb_flags) &&
1246 	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1247 	    ((it->pcb_features) &&
1248 	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1249 		/* endpoint flags or features don't match, so keep looking */
1250 		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1251 			SCTP_INP_WUNLOCK(it->inp);
1252 			goto done_with_iterator;
1253 		}
1254 		SCTP_INP_WUNLOCK(it->inp);
1255 		it->inp = LIST_NEXT(it->inp, sctp_list);
1256 		if (it->inp == NULL) {
1257 			goto done_with_iterator;
1258 		}
1259 		SCTP_INP_WLOCK(it->inp);
1260 	}
1261 
1262 	SCTP_INP_WUNLOCK(it->inp);
1263 	SCTP_INP_RLOCK(it->inp);
1264 
1265 	/* now go through each assoc which is in the desired state */
1266 	if (it->done_current_ep == 0) {
1267 		if (it->function_inp != NULL)
1268 			inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1269 		it->done_current_ep = 1;
1270 	}
1271 	if (it->stcb == NULL) {
1272 		/* run the per instance function */
1273 		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1274 	}
1275 	if ((inp_skip) || it->stcb == NULL) {
1276 		if (it->function_inp_end != NULL) {
1277 			inp_skip = (*it->function_inp_end) (it->inp,
1278 			    it->pointer,
1279 			    it->val);
1280 		}
1281 		SCTP_INP_RUNLOCK(it->inp);
1282 		goto no_stcb;
1283 	}
1284 	while (it->stcb) {
1285 		SCTP_TCB_LOCK(it->stcb);
1286 		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1287 			/* not in the right state... keep looking */
1288 			SCTP_TCB_UNLOCK(it->stcb);
1289 			goto next_assoc;
1290 		}
1291 		/* see if we have limited out the iterator loop */
1292 		iteration_count++;
1293 		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1294 			/* Pause to let others grab the lock */
1295 			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1296 			SCTP_TCB_UNLOCK(it->stcb);
1297 
1298 			SCTP_INP_INCR_REF(it->inp);
1299 			SCTP_INP_RUNLOCK(it->inp);
1300 			SCTP_ITERATOR_UNLOCK();
1301 			SCTP_ITERATOR_LOCK();
1302 			SCTP_INP_RLOCK(it->inp);
1303 
1304 			SCTP_INP_DECR_REF(it->inp);
1305 			SCTP_TCB_LOCK(it->stcb);
1306 			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1307 			iteration_count = 0;
1308 		}
1309 		/* run function on this one */
1310 		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1311 
1312 		/*
1313 		 * we lie here, it really needs to have its own type but
1314 		 * first I must verify that this won't effect things :-0
1315 		 */
1316 		if (it->no_chunk_output == 0)
1317 			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1318 
1319 		SCTP_TCB_UNLOCK(it->stcb);
1320 next_assoc:
1321 		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1322 		if (it->stcb == NULL) {
1323 			/* Run last function */
1324 			if (it->function_inp_end != NULL) {
1325 				inp_skip = (*it->function_inp_end) (it->inp,
1326 				    it->pointer,
1327 				    it->val);
1328 			}
1329 		}
1330 	}
1331 	SCTP_INP_RUNLOCK(it->inp);
1332 no_stcb:
1333 	/* done with all assocs on this endpoint, move on to next endpoint */
1334 	it->done_current_ep = 0;
1335 	SCTP_INP_WLOCK(it->inp);
1336 	SCTP_INP_WUNLOCK(it->inp);
1337 	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1338 		it->inp = NULL;
1339 	} else {
1340 		SCTP_INP_INFO_RLOCK();
1341 		it->inp = LIST_NEXT(it->inp, sctp_list);
1342 		SCTP_INP_INFO_RUNLOCK();
1343 	}
1344 	if (it->inp == NULL) {
1345 		goto done_with_iterator;
1346 	}
1347 	goto select_a_new_ep;
1348 }
1349 
1350 void
1351 sctp_iterator_worker(void)
1352 {
1353 	struct sctp_iterator *it = NULL;
1354 
1355 	/* This function is called with the WQ lock in place */
1356 
1357 	SCTP_BASE_INFO(iterator_running) = 1;
1358 again:
1359 	it = TAILQ_FIRST(&SCTP_BASE_INFO(iteratorhead));
1360 	while (it) {
1361 		/* now lets work on this one */
1362 		TAILQ_REMOVE(&SCTP_BASE_INFO(iteratorhead), it, sctp_nxt_itr);
1363 		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1364 		sctp_iterator_work(it);
1365 		SCTP_IPI_ITERATOR_WQ_LOCK();
1366 		/* sa_ignore FREED_MEMORY */
1367 		it = TAILQ_FIRST(&SCTP_BASE_INFO(iteratorhead));
1368 	}
1369 	if (TAILQ_FIRST(&SCTP_BASE_INFO(iteratorhead))) {
1370 		goto again;
1371 	}
1372 	SCTP_BASE_INFO(iterator_running) = 0;
1373 	return;
1374 }
1375 
1376 #endif
1377 
1378 
1379 static void
1380 sctp_handle_addr_wq(void)
1381 {
1382 	/* deal with the ADDR wq from the rtsock calls */
1383 	struct sctp_laddr *wi;
1384 	struct sctp_asconf_iterator *asc;
1385 
1386 	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1387 	    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1388 	if (asc == NULL) {
1389 		/* Try later, no memory */
1390 		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1391 		    (struct sctp_inpcb *)NULL,
1392 		    (struct sctp_tcb *)NULL,
1393 		    (struct sctp_nets *)NULL);
1394 		return;
1395 	}
1396 	LIST_INIT(&asc->list_of_work);
1397 	asc->cnt = 0;
1398 	SCTP_IPI_ITERATOR_WQ_LOCK();
1399 	wi = LIST_FIRST(&SCTP_BASE_INFO(addr_wq));
1400 	while (wi != NULL) {
1401 		LIST_REMOVE(wi, sctp_nxt_addr);
1402 		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1403 		asc->cnt++;
1404 		wi = LIST_FIRST(&SCTP_BASE_INFO(addr_wq));
1405 	}
1406 	SCTP_IPI_ITERATOR_WQ_UNLOCK();
1407 	if (asc->cnt == 0) {
1408 		SCTP_FREE(asc, SCTP_M_ASC_IT);
1409 	} else {
1410 		(void)sctp_initiate_iterator(sctp_asconf_iterator_ep,
1411 		    sctp_asconf_iterator_stcb,
1412 		    NULL,	/* No ep end for boundall */
1413 		    SCTP_PCB_FLAGS_BOUNDALL,
1414 		    SCTP_PCB_ANY_FEATURES,
1415 		    SCTP_ASOC_ANY_STATE,
1416 		    (void *)asc, 0,
1417 		    sctp_asconf_iterator_end, NULL, 0);
1418 	}
1419 }
1420 
1421 int retcode = 0;
1422 int cur_oerr = 0;
1423 
1424 void
1425 sctp_timeout_handler(void *t)
1426 {
1427 	struct sctp_inpcb *inp;
1428 	struct sctp_tcb *stcb;
1429 	struct sctp_nets *net;
1430 	struct sctp_timer *tmr;
1431 
1432 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1433 	struct socket *so;
1434 
1435 #endif
1436 	int did_output, type;
1437 	struct sctp_iterator *it = NULL;
1438 
1439 	tmr = (struct sctp_timer *)t;
1440 	inp = (struct sctp_inpcb *)tmr->ep;
1441 	stcb = (struct sctp_tcb *)tmr->tcb;
1442 	net = (struct sctp_nets *)tmr->net;
1443 	CURVNET_SET((struct vnet *)tmr->vnet);
1444 	did_output = 1;
1445 
1446 #ifdef SCTP_AUDITING_ENABLED
1447 	sctp_audit_log(0xF0, (uint8_t) tmr->type);
1448 	sctp_auditing(3, inp, stcb, net);
1449 #endif
1450 
1451 	/* sanity checks... */
1452 	if (tmr->self != (void *)tmr) {
1453 		/*
1454 		 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1455 		 * tmr);
1456 		 */
1457 		CURVNET_RESTORE();
1458 		return;
1459 	}
1460 	tmr->stopped_from = 0xa001;
1461 	if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1462 		/*
1463 		 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1464 		 * tmr->type);
1465 		 */
1466 		CURVNET_RESTORE();
1467 		return;
1468 	}
1469 	tmr->stopped_from = 0xa002;
1470 	if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1471 		CURVNET_RESTORE();
1472 		return;
1473 	}
1474 	/* if this is an iterator timeout, get the struct and clear inp */
1475 	tmr->stopped_from = 0xa003;
1476 	if (tmr->type == SCTP_TIMER_TYPE_ITERATOR) {
1477 		it = (struct sctp_iterator *)inp;
1478 		inp = NULL;
1479 	}
1480 	type = tmr->type;
1481 	if (inp) {
1482 		SCTP_INP_INCR_REF(inp);
1483 		if ((inp->sctp_socket == 0) &&
1484 		    ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1485 		    (tmr->type != SCTP_TIMER_TYPE_INIT) &&
1486 		    (tmr->type != SCTP_TIMER_TYPE_SEND) &&
1487 		    (tmr->type != SCTP_TIMER_TYPE_RECV) &&
1488 		    (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) &&
1489 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1490 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1491 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1492 		    (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))
1493 		    ) {
1494 			SCTP_INP_DECR_REF(inp);
1495 			CURVNET_RESTORE();
1496 			return;
1497 		}
1498 	}
1499 	tmr->stopped_from = 0xa004;
1500 	if (stcb) {
1501 		atomic_add_int(&stcb->asoc.refcnt, 1);
1502 		if (stcb->asoc.state == 0) {
1503 			atomic_add_int(&stcb->asoc.refcnt, -1);
1504 			if (inp) {
1505 				SCTP_INP_DECR_REF(inp);
1506 			}
1507 			CURVNET_RESTORE();
1508 			return;
1509 		}
1510 	}
1511 	tmr->stopped_from = 0xa005;
1512 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", tmr->type);
1513 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1514 		if (inp) {
1515 			SCTP_INP_DECR_REF(inp);
1516 		}
1517 		if (stcb) {
1518 			atomic_add_int(&stcb->asoc.refcnt, -1);
1519 		}
1520 		CURVNET_RESTORE();
1521 		return;
1522 	}
1523 	tmr->stopped_from = 0xa006;
1524 
1525 	if (stcb) {
1526 		SCTP_TCB_LOCK(stcb);
1527 		atomic_add_int(&stcb->asoc.refcnt, -1);
1528 		if ((tmr->type != SCTP_TIMER_TYPE_ASOCKILL) &&
1529 		    ((stcb->asoc.state == 0) ||
1530 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1531 			SCTP_TCB_UNLOCK(stcb);
1532 			if (inp) {
1533 				SCTP_INP_DECR_REF(inp);
1534 			}
1535 			CURVNET_RESTORE();
1536 			return;
1537 		}
1538 	}
1539 	/* record in stopped what t-o occured */
1540 	tmr->stopped_from = tmr->type;
1541 
1542 	/* mark as being serviced now */
1543 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1544 		/*
1545 		 * Callout has been rescheduled.
1546 		 */
1547 		goto get_out;
1548 	}
1549 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1550 		/*
1551 		 * Not active, so no action.
1552 		 */
1553 		goto get_out;
1554 	}
1555 	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1556 
1557 	/* call the handler for the appropriate timer type */
1558 	switch (tmr->type) {
1559 	case SCTP_TIMER_TYPE_ZERO_COPY:
1560 		if (inp == NULL) {
1561 			break;
1562 		}
1563 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1564 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
1565 		}
1566 		break;
1567 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1568 		if (inp == NULL) {
1569 			break;
1570 		}
1571 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1572 			SCTP_ZERO_COPY_SENDQ_EVENT(inp, inp->sctp_socket);
1573 		}
1574 		break;
1575 	case SCTP_TIMER_TYPE_ADDR_WQ:
1576 		sctp_handle_addr_wq();
1577 		break;
1578 	case SCTP_TIMER_TYPE_ITERATOR:
1579 		SCTP_STAT_INCR(sctps_timoiterator);
1580 		sctp_iterator_timer(it);
1581 		break;
1582 	case SCTP_TIMER_TYPE_SEND:
1583 		if ((stcb == NULL) || (inp == NULL)) {
1584 			break;
1585 		}
1586 		SCTP_STAT_INCR(sctps_timodata);
1587 		stcb->asoc.timodata++;
1588 		stcb->asoc.num_send_timers_up--;
1589 		if (stcb->asoc.num_send_timers_up < 0) {
1590 			stcb->asoc.num_send_timers_up = 0;
1591 		}
1592 		SCTP_TCB_LOCK_ASSERT(stcb);
1593 		cur_oerr = stcb->asoc.overall_error_count;
1594 		retcode = sctp_t3rxt_timer(inp, stcb, net);
1595 		if (retcode) {
1596 			/* no need to unlock on tcb its gone */
1597 
1598 			goto out_decr;
1599 		}
1600 		SCTP_TCB_LOCK_ASSERT(stcb);
1601 #ifdef SCTP_AUDITING_ENABLED
1602 		sctp_auditing(4, inp, stcb, net);
1603 #endif
1604 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1605 		if ((stcb->asoc.num_send_timers_up == 0) &&
1606 		    (stcb->asoc.sent_queue_cnt > 0)
1607 		    ) {
1608 			struct sctp_tmit_chunk *chk;
1609 
1610 			/*
1611 			 * safeguard. If there on some on the sent queue
1612 			 * somewhere but no timers running something is
1613 			 * wrong... so we start a timer on the first chunk
1614 			 * on the send queue on whatever net it is sent to.
1615 			 */
1616 			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1617 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1618 			    chk->whoTo);
1619 		}
1620 		break;
1621 	case SCTP_TIMER_TYPE_INIT:
1622 		if ((stcb == NULL) || (inp == NULL)) {
1623 			break;
1624 		}
1625 		SCTP_STAT_INCR(sctps_timoinit);
1626 		stcb->asoc.timoinit++;
1627 		if (sctp_t1init_timer(inp, stcb, net)) {
1628 			/* no need to unlock on tcb its gone */
1629 			goto out_decr;
1630 		}
1631 		/* We do output but not here */
1632 		did_output = 0;
1633 		break;
1634 	case SCTP_TIMER_TYPE_RECV:
1635 		if ((stcb == NULL) || (inp == NULL)) {
1636 			break;
1637 		} {
1638 			int abort_flag;
1639 
1640 			SCTP_STAT_INCR(sctps_timosack);
1641 			stcb->asoc.timosack++;
1642 			if (stcb->asoc.cumulative_tsn != stcb->asoc.highest_tsn_inside_map)
1643 				sctp_sack_check(stcb, 0, 0, &abort_flag);
1644 
1645 			/*
1646 			 * EY if nr_sacks used then send an nr-sack , a sack
1647 			 * otherwise
1648 			 */
1649 			if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && stcb->asoc.peer_supports_nr_sack)
1650 				sctp_send_nr_sack(stcb);
1651 			else
1652 				sctp_send_sack(stcb);
1653 		}
1654 #ifdef SCTP_AUDITING_ENABLED
1655 		sctp_auditing(4, inp, stcb, net);
1656 #endif
1657 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1658 		break;
1659 	case SCTP_TIMER_TYPE_SHUTDOWN:
1660 		if ((stcb == NULL) || (inp == NULL)) {
1661 			break;
1662 		}
1663 		if (sctp_shutdown_timer(inp, stcb, net)) {
1664 			/* no need to unlock on tcb its gone */
1665 			goto out_decr;
1666 		}
1667 		SCTP_STAT_INCR(sctps_timoshutdown);
1668 		stcb->asoc.timoshutdown++;
1669 #ifdef SCTP_AUDITING_ENABLED
1670 		sctp_auditing(4, inp, stcb, net);
1671 #endif
1672 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1673 		break;
1674 	case SCTP_TIMER_TYPE_HEARTBEAT:
1675 		{
1676 			struct sctp_nets *lnet;
1677 			int cnt_of_unconf = 0;
1678 
1679 			if ((stcb == NULL) || (inp == NULL)) {
1680 				break;
1681 			}
1682 			SCTP_STAT_INCR(sctps_timoheartbeat);
1683 			stcb->asoc.timoheartbeat++;
1684 			TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
1685 				if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1686 				    (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
1687 					cnt_of_unconf++;
1688 				}
1689 			}
1690 			if (cnt_of_unconf == 0) {
1691 				if (sctp_heartbeat_timer(inp, stcb, lnet,
1692 				    cnt_of_unconf)) {
1693 					/* no need to unlock on tcb its gone */
1694 					goto out_decr;
1695 				}
1696 			}
1697 #ifdef SCTP_AUDITING_ENABLED
1698 			sctp_auditing(4, inp, stcb, lnet);
1699 #endif
1700 			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT,
1701 			    stcb->sctp_ep, stcb, lnet);
1702 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1703 		}
1704 		break;
1705 	case SCTP_TIMER_TYPE_COOKIE:
1706 		if ((stcb == NULL) || (inp == NULL)) {
1707 			break;
1708 		}
1709 		if (sctp_cookie_timer(inp, stcb, net)) {
1710 			/* no need to unlock on tcb its gone */
1711 			goto out_decr;
1712 		}
1713 		SCTP_STAT_INCR(sctps_timocookie);
1714 		stcb->asoc.timocookie++;
1715 #ifdef SCTP_AUDITING_ENABLED
1716 		sctp_auditing(4, inp, stcb, net);
1717 #endif
1718 		/*
1719 		 * We consider T3 and Cookie timer pretty much the same with
1720 		 * respect to where from in chunk_output.
1721 		 */
1722 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1723 		break;
1724 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1725 		{
1726 			struct timeval tv;
1727 			int i, secret;
1728 
1729 			if (inp == NULL) {
1730 				break;
1731 			}
1732 			SCTP_STAT_INCR(sctps_timosecret);
1733 			(void)SCTP_GETTIME_TIMEVAL(&tv);
1734 			SCTP_INP_WLOCK(inp);
1735 			inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1736 			inp->sctp_ep.last_secret_number =
1737 			    inp->sctp_ep.current_secret_number;
1738 			inp->sctp_ep.current_secret_number++;
1739 			if (inp->sctp_ep.current_secret_number >=
1740 			    SCTP_HOW_MANY_SECRETS) {
1741 				inp->sctp_ep.current_secret_number = 0;
1742 			}
1743 			secret = (int)inp->sctp_ep.current_secret_number;
1744 			for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1745 				inp->sctp_ep.secret_key[secret][i] =
1746 				    sctp_select_initial_TSN(&inp->sctp_ep);
1747 			}
1748 			SCTP_INP_WUNLOCK(inp);
1749 			sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1750 		}
1751 		did_output = 0;
1752 		break;
1753 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1754 		if ((stcb == NULL) || (inp == NULL)) {
1755 			break;
1756 		}
1757 		SCTP_STAT_INCR(sctps_timopathmtu);
1758 		sctp_pathmtu_timer(inp, stcb, net);
1759 		did_output = 0;
1760 		break;
1761 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1762 		if ((stcb == NULL) || (inp == NULL)) {
1763 			break;
1764 		}
1765 		if (sctp_shutdownack_timer(inp, stcb, net)) {
1766 			/* no need to unlock on tcb its gone */
1767 			goto out_decr;
1768 		}
1769 		SCTP_STAT_INCR(sctps_timoshutdownack);
1770 		stcb->asoc.timoshutdownack++;
1771 #ifdef SCTP_AUDITING_ENABLED
1772 		sctp_auditing(4, inp, stcb, net);
1773 #endif
1774 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1775 		break;
1776 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1777 		if ((stcb == NULL) || (inp == NULL)) {
1778 			break;
1779 		}
1780 		SCTP_STAT_INCR(sctps_timoshutdownguard);
1781 		sctp_abort_an_association(inp, stcb,
1782 		    SCTP_SHUTDOWN_GUARD_EXPIRES, NULL, SCTP_SO_NOT_LOCKED);
1783 		/* no need to unlock on tcb its gone */
1784 		goto out_decr;
1785 
1786 	case SCTP_TIMER_TYPE_STRRESET:
1787 		if ((stcb == NULL) || (inp == NULL)) {
1788 			break;
1789 		}
1790 		if (sctp_strreset_timer(inp, stcb, net)) {
1791 			/* no need to unlock on tcb its gone */
1792 			goto out_decr;
1793 		}
1794 		SCTP_STAT_INCR(sctps_timostrmrst);
1795 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1796 		break;
1797 	case SCTP_TIMER_TYPE_EARLYFR:
1798 		/* Need to do FR of things for net */
1799 		if ((stcb == NULL) || (inp == NULL)) {
1800 			break;
1801 		}
1802 		SCTP_STAT_INCR(sctps_timoearlyfr);
1803 		sctp_early_fr_timer(inp, stcb, net);
1804 		break;
1805 	case SCTP_TIMER_TYPE_ASCONF:
1806 		if ((stcb == NULL) || (inp == NULL)) {
1807 			break;
1808 		}
1809 		if (sctp_asconf_timer(inp, stcb, net)) {
1810 			/* no need to unlock on tcb its gone */
1811 			goto out_decr;
1812 		}
1813 		SCTP_STAT_INCR(sctps_timoasconf);
1814 #ifdef SCTP_AUDITING_ENABLED
1815 		sctp_auditing(4, inp, stcb, net);
1816 #endif
1817 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1818 		break;
1819 	case SCTP_TIMER_TYPE_PRIM_DELETED:
1820 		if ((stcb == NULL) || (inp == NULL)) {
1821 			break;
1822 		}
1823 		sctp_delete_prim_timer(inp, stcb, net);
1824 		SCTP_STAT_INCR(sctps_timodelprim);
1825 		break;
1826 
1827 	case SCTP_TIMER_TYPE_AUTOCLOSE:
1828 		if ((stcb == NULL) || (inp == NULL)) {
1829 			break;
1830 		}
1831 		SCTP_STAT_INCR(sctps_timoautoclose);
1832 		sctp_autoclose_timer(inp, stcb, net);
1833 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1834 		did_output = 0;
1835 		break;
1836 	case SCTP_TIMER_TYPE_ASOCKILL:
1837 		if ((stcb == NULL) || (inp == NULL)) {
1838 			break;
1839 		}
1840 		SCTP_STAT_INCR(sctps_timoassockill);
1841 		/* Can we free it yet? */
1842 		SCTP_INP_DECR_REF(inp);
1843 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1844 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1845 		so = SCTP_INP_SO(inp);
1846 		atomic_add_int(&stcb->asoc.refcnt, 1);
1847 		SCTP_TCB_UNLOCK(stcb);
1848 		SCTP_SOCKET_LOCK(so, 1);
1849 		SCTP_TCB_LOCK(stcb);
1850 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1851 #endif
1852 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1853 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1854 		SCTP_SOCKET_UNLOCK(so, 1);
1855 #endif
1856 		/*
1857 		 * free asoc, always unlocks (or destroy's) so prevent
1858 		 * duplicate unlock or unlock of a free mtx :-0
1859 		 */
1860 		stcb = NULL;
1861 		goto out_no_decr;
1862 	case SCTP_TIMER_TYPE_INPKILL:
1863 		SCTP_STAT_INCR(sctps_timoinpkill);
1864 		if (inp == NULL) {
1865 			break;
1866 		}
1867 		/*
1868 		 * special case, take away our increment since WE are the
1869 		 * killer
1870 		 */
1871 		SCTP_INP_DECR_REF(inp);
1872 		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1873 		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1874 		    SCTP_CALLED_DIRECTLY_NOCMPSET);
1875 		inp = NULL;
1876 		goto out_no_decr;
1877 	default:
1878 		SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
1879 		    tmr->type);
1880 		break;
1881 	};
1882 #ifdef SCTP_AUDITING_ENABLED
1883 	sctp_audit_log(0xF1, (uint8_t) tmr->type);
1884 	if (inp)
1885 		sctp_auditing(5, inp, stcb, net);
1886 #endif
1887 	if ((did_output) && stcb) {
1888 		/*
1889 		 * Now we need to clean up the control chunk chain if an
1890 		 * ECNE is on it. It must be marked as UNSENT again so next
1891 		 * call will continue to send it until such time that we get
1892 		 * a CWR, to remove it. It is, however, less likely that we
1893 		 * will find a ecn echo on the chain though.
1894 		 */
1895 		sctp_fix_ecn_echo(&stcb->asoc);
1896 	}
1897 get_out:
1898 	if (stcb) {
1899 		SCTP_TCB_UNLOCK(stcb);
1900 	}
1901 out_decr:
1902 	if (inp) {
1903 		SCTP_INP_DECR_REF(inp);
1904 	}
1905 out_no_decr:
1906 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type %d)\n",
1907 	    type);
1908 	CURVNET_RESTORE();
1909 }
1910 
1911 void
1912 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1913     struct sctp_nets *net)
1914 {
1915 	int to_ticks;
1916 	struct sctp_timer *tmr;
1917 
1918 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
1919 		return;
1920 
1921 	to_ticks = 0;
1922 
1923 	tmr = NULL;
1924 	if (stcb) {
1925 		SCTP_TCB_LOCK_ASSERT(stcb);
1926 	}
1927 	switch (t_type) {
1928 	case SCTP_TIMER_TYPE_ZERO_COPY:
1929 		tmr = &inp->sctp_ep.zero_copy_timer;
1930 		to_ticks = SCTP_ZERO_COPY_TICK_DELAY;
1931 		break;
1932 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1933 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
1934 		to_ticks = SCTP_ZERO_COPY_SENDQ_TICK_DELAY;
1935 		break;
1936 	case SCTP_TIMER_TYPE_ADDR_WQ:
1937 		/* Only 1 tick away :-) */
1938 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
1939 		to_ticks = SCTP_ADDRESS_TICK_DELAY;
1940 		break;
1941 	case SCTP_TIMER_TYPE_ITERATOR:
1942 		{
1943 			struct sctp_iterator *it;
1944 
1945 			it = (struct sctp_iterator *)inp;
1946 			tmr = &it->tmr;
1947 			to_ticks = SCTP_ITERATOR_TICKS;
1948 		}
1949 		break;
1950 	case SCTP_TIMER_TYPE_SEND:
1951 		/* Here we use the RTO timer */
1952 		{
1953 			int rto_val;
1954 
1955 			if ((stcb == NULL) || (net == NULL)) {
1956 				return;
1957 			}
1958 			tmr = &net->rxt_timer;
1959 			if (net->RTO == 0) {
1960 				rto_val = stcb->asoc.initial_rto;
1961 			} else {
1962 				rto_val = net->RTO;
1963 			}
1964 			to_ticks = MSEC_TO_TICKS(rto_val);
1965 		}
1966 		break;
1967 	case SCTP_TIMER_TYPE_INIT:
1968 		/*
1969 		 * Here we use the INIT timer default usually about 1
1970 		 * minute.
1971 		 */
1972 		if ((stcb == NULL) || (net == NULL)) {
1973 			return;
1974 		}
1975 		tmr = &net->rxt_timer;
1976 		if (net->RTO == 0) {
1977 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1978 		} else {
1979 			to_ticks = MSEC_TO_TICKS(net->RTO);
1980 		}
1981 		break;
1982 	case SCTP_TIMER_TYPE_RECV:
1983 		/*
1984 		 * Here we use the Delayed-Ack timer value from the inp
1985 		 * ususually about 200ms.
1986 		 */
1987 		if (stcb == NULL) {
1988 			return;
1989 		}
1990 		tmr = &stcb->asoc.dack_timer;
1991 		to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
1992 		break;
1993 	case SCTP_TIMER_TYPE_SHUTDOWN:
1994 		/* Here we use the RTO of the destination. */
1995 		if ((stcb == NULL) || (net == NULL)) {
1996 			return;
1997 		}
1998 		if (net->RTO == 0) {
1999 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2000 		} else {
2001 			to_ticks = MSEC_TO_TICKS(net->RTO);
2002 		}
2003 		tmr = &net->rxt_timer;
2004 		break;
2005 	case SCTP_TIMER_TYPE_HEARTBEAT:
2006 		/*
2007 		 * the net is used here so that we can add in the RTO. Even
2008 		 * though we use a different timer. We also add the HB timer
2009 		 * PLUS a random jitter.
2010 		 */
2011 		if ((inp == NULL) || (stcb == NULL)) {
2012 			return;
2013 		} else {
2014 			uint32_t rndval;
2015 			uint8_t this_random;
2016 			int cnt_of_unconf = 0;
2017 			struct sctp_nets *lnet;
2018 
2019 			TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
2020 				if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2021 				    (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
2022 					cnt_of_unconf++;
2023 				}
2024 			}
2025 			if (cnt_of_unconf) {
2026 				net = lnet = NULL;
2027 				(void)sctp_heartbeat_timer(inp, stcb, lnet, cnt_of_unconf);
2028 			}
2029 			if (stcb->asoc.hb_random_idx > 3) {
2030 				rndval = sctp_select_initial_TSN(&inp->sctp_ep);
2031 				memcpy(stcb->asoc.hb_random_values, &rndval,
2032 				    sizeof(stcb->asoc.hb_random_values));
2033 				stcb->asoc.hb_random_idx = 0;
2034 			}
2035 			this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx];
2036 			stcb->asoc.hb_random_idx++;
2037 			stcb->asoc.hb_ect_randombit = 0;
2038 			/*
2039 			 * this_random will be 0 - 256 ms RTO is in ms.
2040 			 */
2041 			if ((stcb->asoc.hb_is_disabled) &&
2042 			    (cnt_of_unconf == 0)) {
2043 				return;
2044 			}
2045 			if (net) {
2046 				int delay;
2047 
2048 				delay = stcb->asoc.heart_beat_delay;
2049 				TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
2050 					if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2051 					    ((lnet->dest_state & SCTP_ADDR_OUT_OF_SCOPE) == 0) &&
2052 					    (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
2053 						delay = 0;
2054 					}
2055 				}
2056 				if (net->RTO == 0) {
2057 					/* Never been checked */
2058 					to_ticks = this_random + stcb->asoc.initial_rto + delay;
2059 				} else {
2060 					/* set rto_val to the ms */
2061 					to_ticks = delay + net->RTO + this_random;
2062 				}
2063 			} else {
2064 				if (cnt_of_unconf) {
2065 					to_ticks = this_random + stcb->asoc.initial_rto;
2066 				} else {
2067 					to_ticks = stcb->asoc.heart_beat_delay + this_random + stcb->asoc.initial_rto;
2068 				}
2069 			}
2070 			/*
2071 			 * Now we must convert the to_ticks that are now in
2072 			 * ms to ticks.
2073 			 */
2074 			to_ticks = MSEC_TO_TICKS(to_ticks);
2075 			tmr = &stcb->asoc.hb_timer;
2076 		}
2077 		break;
2078 	case SCTP_TIMER_TYPE_COOKIE:
2079 		/*
2080 		 * Here we can use the RTO timer from the network since one
2081 		 * RTT was compelete. If a retran happened then we will be
2082 		 * using the RTO initial value.
2083 		 */
2084 		if ((stcb == NULL) || (net == NULL)) {
2085 			return;
2086 		}
2087 		if (net->RTO == 0) {
2088 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2089 		} else {
2090 			to_ticks = MSEC_TO_TICKS(net->RTO);
2091 		}
2092 		tmr = &net->rxt_timer;
2093 		break;
2094 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2095 		/*
2096 		 * nothing needed but the endpoint here ususually about 60
2097 		 * minutes.
2098 		 */
2099 		if (inp == NULL) {
2100 			return;
2101 		}
2102 		tmr = &inp->sctp_ep.signature_change;
2103 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
2104 		break;
2105 	case SCTP_TIMER_TYPE_ASOCKILL:
2106 		if (stcb == NULL) {
2107 			return;
2108 		}
2109 		tmr = &stcb->asoc.strreset_timer;
2110 		to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
2111 		break;
2112 	case SCTP_TIMER_TYPE_INPKILL:
2113 		/*
2114 		 * The inp is setup to die. We re-use the signature_chage
2115 		 * timer since that has stopped and we are in the GONE
2116 		 * state.
2117 		 */
2118 		if (inp == NULL) {
2119 			return;
2120 		}
2121 		tmr = &inp->sctp_ep.signature_change;
2122 		to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2123 		break;
2124 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2125 		/*
2126 		 * Here we use the value found in the EP for PMTU ususually
2127 		 * about 10 minutes.
2128 		 */
2129 		if ((stcb == NULL) || (inp == NULL)) {
2130 			return;
2131 		}
2132 		if (net == NULL) {
2133 			return;
2134 		}
2135 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2136 		tmr = &net->pmtu_timer;
2137 		break;
2138 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2139 		/* Here we use the RTO of the destination */
2140 		if ((stcb == NULL) || (net == NULL)) {
2141 			return;
2142 		}
2143 		if (net->RTO == 0) {
2144 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2145 		} else {
2146 			to_ticks = MSEC_TO_TICKS(net->RTO);
2147 		}
2148 		tmr = &net->rxt_timer;
2149 		break;
2150 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2151 		/*
2152 		 * Here we use the endpoints shutdown guard timer usually
2153 		 * about 3 minutes.
2154 		 */
2155 		if ((inp == NULL) || (stcb == NULL)) {
2156 			return;
2157 		}
2158 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2159 		tmr = &stcb->asoc.shut_guard_timer;
2160 		break;
2161 	case SCTP_TIMER_TYPE_STRRESET:
2162 		/*
2163 		 * Here the timer comes from the stcb but its value is from
2164 		 * the net's RTO.
2165 		 */
2166 		if ((stcb == NULL) || (net == NULL)) {
2167 			return;
2168 		}
2169 		if (net->RTO == 0) {
2170 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2171 		} else {
2172 			to_ticks = MSEC_TO_TICKS(net->RTO);
2173 		}
2174 		tmr = &stcb->asoc.strreset_timer;
2175 		break;
2176 
2177 	case SCTP_TIMER_TYPE_EARLYFR:
2178 		{
2179 			unsigned int msec;
2180 
2181 			if ((stcb == NULL) || (net == NULL)) {
2182 				return;
2183 			}
2184 			if (net->flight_size > net->cwnd) {
2185 				/* no need to start */
2186 				return;
2187 			}
2188 			SCTP_STAT_INCR(sctps_earlyfrstart);
2189 			if (net->lastsa == 0) {
2190 				/* Hmm no rtt estimate yet? */
2191 				msec = stcb->asoc.initial_rto >> 2;
2192 			} else {
2193 				msec = ((net->lastsa >> 2) + net->lastsv) >> 1;
2194 			}
2195 			if (msec < SCTP_BASE_SYSCTL(sctp_early_fr_msec)) {
2196 				msec = SCTP_BASE_SYSCTL(sctp_early_fr_msec);
2197 				if (msec < SCTP_MINFR_MSEC_FLOOR) {
2198 					msec = SCTP_MINFR_MSEC_FLOOR;
2199 				}
2200 			}
2201 			to_ticks = MSEC_TO_TICKS(msec);
2202 			tmr = &net->fr_timer;
2203 		}
2204 		break;
2205 	case SCTP_TIMER_TYPE_ASCONF:
2206 		/*
2207 		 * Here the timer comes from the stcb but its value is from
2208 		 * the net's RTO.
2209 		 */
2210 		if ((stcb == NULL) || (net == NULL)) {
2211 			return;
2212 		}
2213 		if (net->RTO == 0) {
2214 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2215 		} else {
2216 			to_ticks = MSEC_TO_TICKS(net->RTO);
2217 		}
2218 		tmr = &stcb->asoc.asconf_timer;
2219 		break;
2220 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2221 		if ((stcb == NULL) || (net != NULL)) {
2222 			return;
2223 		}
2224 		to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2225 		tmr = &stcb->asoc.delete_prim_timer;
2226 		break;
2227 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2228 		if (stcb == NULL) {
2229 			return;
2230 		}
2231 		if (stcb->asoc.sctp_autoclose_ticks == 0) {
2232 			/*
2233 			 * Really an error since stcb is NOT set to
2234 			 * autoclose
2235 			 */
2236 			return;
2237 		}
2238 		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2239 		tmr = &stcb->asoc.autoclose_timer;
2240 		break;
2241 	default:
2242 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2243 		    __FUNCTION__, t_type);
2244 		return;
2245 		break;
2246 	};
2247 	if ((to_ticks <= 0) || (tmr == NULL)) {
2248 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2249 		    __FUNCTION__, t_type, to_ticks, tmr);
2250 		return;
2251 	}
2252 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2253 		/*
2254 		 * we do NOT allow you to have it already running. if it is
2255 		 * we leave the current one up unchanged
2256 		 */
2257 		return;
2258 	}
2259 	/* At this point we can proceed */
2260 	if (t_type == SCTP_TIMER_TYPE_SEND) {
2261 		stcb->asoc.num_send_timers_up++;
2262 	}
2263 	tmr->stopped_from = 0;
2264 	tmr->type = t_type;
2265 	tmr->ep = (void *)inp;
2266 	tmr->tcb = (void *)stcb;
2267 	tmr->net = (void *)net;
2268 	tmr->self = (void *)tmr;
2269 	tmr->vnet = (void *)curvnet;
2270 	tmr->ticks = sctp_get_tick_count();
2271 	(void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2272 	return;
2273 }
2274 
2275 void
2276 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2277     struct sctp_nets *net, uint32_t from)
2278 {
2279 	struct sctp_timer *tmr;
2280 
2281 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2282 	    (inp == NULL))
2283 		return;
2284 
2285 	tmr = NULL;
2286 	if (stcb) {
2287 		SCTP_TCB_LOCK_ASSERT(stcb);
2288 	}
2289 	switch (t_type) {
2290 	case SCTP_TIMER_TYPE_ZERO_COPY:
2291 		tmr = &inp->sctp_ep.zero_copy_timer;
2292 		break;
2293 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
2294 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
2295 		break;
2296 	case SCTP_TIMER_TYPE_ADDR_WQ:
2297 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2298 		break;
2299 	case SCTP_TIMER_TYPE_EARLYFR:
2300 		if ((stcb == NULL) || (net == NULL)) {
2301 			return;
2302 		}
2303 		tmr = &net->fr_timer;
2304 		SCTP_STAT_INCR(sctps_earlyfrstop);
2305 		break;
2306 	case SCTP_TIMER_TYPE_ITERATOR:
2307 		{
2308 			struct sctp_iterator *it;
2309 
2310 			it = (struct sctp_iterator *)inp;
2311 			tmr = &it->tmr;
2312 		}
2313 		break;
2314 	case SCTP_TIMER_TYPE_SEND:
2315 		if ((stcb == NULL) || (net == NULL)) {
2316 			return;
2317 		}
2318 		tmr = &net->rxt_timer;
2319 		break;
2320 	case SCTP_TIMER_TYPE_INIT:
2321 		if ((stcb == NULL) || (net == NULL)) {
2322 			return;
2323 		}
2324 		tmr = &net->rxt_timer;
2325 		break;
2326 	case SCTP_TIMER_TYPE_RECV:
2327 		if (stcb == NULL) {
2328 			return;
2329 		}
2330 		tmr = &stcb->asoc.dack_timer;
2331 		break;
2332 	case SCTP_TIMER_TYPE_SHUTDOWN:
2333 		if ((stcb == NULL) || (net == NULL)) {
2334 			return;
2335 		}
2336 		tmr = &net->rxt_timer;
2337 		break;
2338 	case SCTP_TIMER_TYPE_HEARTBEAT:
2339 		if (stcb == NULL) {
2340 			return;
2341 		}
2342 		tmr = &stcb->asoc.hb_timer;
2343 		break;
2344 	case SCTP_TIMER_TYPE_COOKIE:
2345 		if ((stcb == NULL) || (net == NULL)) {
2346 			return;
2347 		}
2348 		tmr = &net->rxt_timer;
2349 		break;
2350 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2351 		/* nothing needed but the endpoint here */
2352 		tmr = &inp->sctp_ep.signature_change;
2353 		/*
2354 		 * We re-use the newcookie timer for the INP kill timer. We
2355 		 * must assure that we do not kill it by accident.
2356 		 */
2357 		break;
2358 	case SCTP_TIMER_TYPE_ASOCKILL:
2359 		/*
2360 		 * Stop the asoc kill timer.
2361 		 */
2362 		if (stcb == NULL) {
2363 			return;
2364 		}
2365 		tmr = &stcb->asoc.strreset_timer;
2366 		break;
2367 
2368 	case SCTP_TIMER_TYPE_INPKILL:
2369 		/*
2370 		 * The inp is setup to die. We re-use the signature_chage
2371 		 * timer since that has stopped and we are in the GONE
2372 		 * state.
2373 		 */
2374 		tmr = &inp->sctp_ep.signature_change;
2375 		break;
2376 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2377 		if ((stcb == NULL) || (net == NULL)) {
2378 			return;
2379 		}
2380 		tmr = &net->pmtu_timer;
2381 		break;
2382 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2383 		if ((stcb == NULL) || (net == NULL)) {
2384 			return;
2385 		}
2386 		tmr = &net->rxt_timer;
2387 		break;
2388 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2389 		if (stcb == NULL) {
2390 			return;
2391 		}
2392 		tmr = &stcb->asoc.shut_guard_timer;
2393 		break;
2394 	case SCTP_TIMER_TYPE_STRRESET:
2395 		if (stcb == NULL) {
2396 			return;
2397 		}
2398 		tmr = &stcb->asoc.strreset_timer;
2399 		break;
2400 	case SCTP_TIMER_TYPE_ASCONF:
2401 		if (stcb == NULL) {
2402 			return;
2403 		}
2404 		tmr = &stcb->asoc.asconf_timer;
2405 		break;
2406 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2407 		if (stcb == NULL) {
2408 			return;
2409 		}
2410 		tmr = &stcb->asoc.delete_prim_timer;
2411 		break;
2412 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2413 		if (stcb == NULL) {
2414 			return;
2415 		}
2416 		tmr = &stcb->asoc.autoclose_timer;
2417 		break;
2418 	default:
2419 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2420 		    __FUNCTION__, t_type);
2421 		break;
2422 	};
2423 	if (tmr == NULL) {
2424 		return;
2425 	}
2426 	if ((tmr->type != t_type) && tmr->type) {
2427 		/*
2428 		 * Ok we have a timer that is under joint use. Cookie timer
2429 		 * per chance with the SEND timer. We therefore are NOT
2430 		 * running the timer that the caller wants stopped.  So just
2431 		 * return.
2432 		 */
2433 		return;
2434 	}
2435 	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2436 		stcb->asoc.num_send_timers_up--;
2437 		if (stcb->asoc.num_send_timers_up < 0) {
2438 			stcb->asoc.num_send_timers_up = 0;
2439 		}
2440 	}
2441 	tmr->self = NULL;
2442 	tmr->stopped_from = from;
2443 	(void)SCTP_OS_TIMER_STOP(&tmr->timer);
2444 	return;
2445 }
2446 
2447 uint32_t
2448 sctp_calculate_len(struct mbuf *m)
2449 {
2450 	uint32_t tlen = 0;
2451 	struct mbuf *at;
2452 
2453 	at = m;
2454 	while (at) {
2455 		tlen += SCTP_BUF_LEN(at);
2456 		at = SCTP_BUF_NEXT(at);
2457 	}
2458 	return (tlen);
2459 }
2460 
2461 void
2462 sctp_mtu_size_reset(struct sctp_inpcb *inp,
2463     struct sctp_association *asoc, uint32_t mtu)
2464 {
2465 	/*
2466 	 * Reset the P-MTU size on this association, this involves changing
2467 	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2468 	 * allow the DF flag to be cleared.
2469 	 */
2470 	struct sctp_tmit_chunk *chk;
2471 	unsigned int eff_mtu, ovh;
2472 
2473 #ifdef SCTP_PRINT_FOR_B_AND_M
2474 	SCTP_PRINTF("sctp_mtu_size_reset(%p, asoc:%p mtu:%d\n",
2475 	    inp, asoc, mtu);
2476 #endif
2477 	asoc->smallest_mtu = mtu;
2478 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2479 		ovh = SCTP_MIN_OVERHEAD;
2480 	} else {
2481 		ovh = SCTP_MIN_V4_OVERHEAD;
2482 	}
2483 	eff_mtu = mtu - ovh;
2484 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2485 
2486 		if (chk->send_size > eff_mtu) {
2487 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2488 		}
2489 	}
2490 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2491 		if (chk->send_size > eff_mtu) {
2492 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2493 		}
2494 	}
2495 }
2496 
2497 
2498 /*
2499  * given an association and starting time of the current RTT period return
2500  * RTO in number of msecs net should point to the current network
2501  */
2502 uint32_t
2503 sctp_calculate_rto(struct sctp_tcb *stcb,
2504     struct sctp_association *asoc,
2505     struct sctp_nets *net,
2506     struct timeval *told,
2507     int safe)
2508 {
2509 	/*-
2510 	 * given an association and the starting time of the current RTT
2511 	 * period (in value1/value2) return RTO in number of msecs.
2512 	 */
2513 	int calc_time = 0;
2514 	int o_calctime;
2515 	uint32_t new_rto = 0;
2516 	int first_measure = 0;
2517 	struct timeval now, then, *old;
2518 
2519 	/* Copy it out for sparc64 */
2520 	if (safe == sctp_align_unsafe_makecopy) {
2521 		old = &then;
2522 		memcpy(&then, told, sizeof(struct timeval));
2523 	} else if (safe == sctp_align_safe_nocopy) {
2524 		old = told;
2525 	} else {
2526 		/* error */
2527 		SCTP_PRINTF("Huh, bad rto calc call\n");
2528 		return (0);
2529 	}
2530 	/************************/
2531 	/* 1. calculate new RTT */
2532 	/************************/
2533 	/* get the current time */
2534 	(void)SCTP_GETTIME_TIMEVAL(&now);
2535 	/* compute the RTT value */
2536 	if ((u_long)now.tv_sec > (u_long)old->tv_sec) {
2537 		calc_time = ((u_long)now.tv_sec - (u_long)old->tv_sec) * 1000;
2538 		if ((u_long)now.tv_usec > (u_long)old->tv_usec) {
2539 			calc_time += (((u_long)now.tv_usec -
2540 			    (u_long)old->tv_usec) / 1000);
2541 		} else if ((u_long)now.tv_usec < (u_long)old->tv_usec) {
2542 			/* Borrow 1,000ms from current calculation */
2543 			calc_time -= 1000;
2544 			/* Add in the slop over */
2545 			calc_time += ((int)now.tv_usec / 1000);
2546 			/* Add in the pre-second ms's */
2547 			calc_time += (((int)1000000 - (int)old->tv_usec) / 1000);
2548 		}
2549 	} else if ((u_long)now.tv_sec == (u_long)old->tv_sec) {
2550 		if ((u_long)now.tv_usec > (u_long)old->tv_usec) {
2551 			calc_time = ((u_long)now.tv_usec -
2552 			    (u_long)old->tv_usec) / 1000;
2553 		} else if ((u_long)now.tv_usec < (u_long)old->tv_usec) {
2554 			/* impossible .. garbage in nothing out */
2555 			goto calc_rto;
2556 		} else if ((u_long)now.tv_usec == (u_long)old->tv_usec) {
2557 			/*
2558 			 * We have to have 1 usec :-D this must be the
2559 			 * loopback.
2560 			 */
2561 			calc_time = 1;
2562 		} else {
2563 			/* impossible .. garbage in nothing out */
2564 			goto calc_rto;
2565 		}
2566 	} else {
2567 		/* Clock wrapped? */
2568 		goto calc_rto;
2569 	}
2570 	/***************************/
2571 	/* 2. update RTTVAR & SRTT */
2572 	/***************************/
2573 	net->rtt = o_calctime = calc_time;
2574 	/* this is Van Jacobson's integer version */
2575 	if (net->RTO_measured) {
2576 		calc_time -= (net->lastsa >> SCTP_RTT_SHIFT);	/* take away 1/8th when
2577 								 * shift=3 */
2578 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2579 			rto_logging(net, SCTP_LOG_RTTVAR);
2580 		}
2581 		net->prev_rtt = o_calctime;
2582 		net->lastsa += calc_time;	/* add 7/8th into sa when
2583 						 * shift=3 */
2584 		if (calc_time < 0) {
2585 			calc_time = -calc_time;
2586 		}
2587 		calc_time -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);	/* take away 1/4 when
2588 									 * VAR shift=2 */
2589 		net->lastsv += calc_time;
2590 		if (net->lastsv == 0) {
2591 			net->lastsv = SCTP_CLOCK_GRANULARITY;
2592 		}
2593 	} else {
2594 		/* First RTO measurment */
2595 		net->RTO_measured = 1;
2596 		net->lastsa = calc_time << SCTP_RTT_SHIFT;	/* Multiply by 8 when
2597 								 * shift=3 */
2598 		net->lastsv = calc_time;
2599 		if (net->lastsv == 0) {
2600 			net->lastsv = SCTP_CLOCK_GRANULARITY;
2601 		}
2602 		first_measure = 1;
2603 		net->prev_rtt = o_calctime;
2604 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2605 			rto_logging(net, SCTP_LOG_INITIAL_RTT);
2606 		}
2607 	}
2608 calc_rto:
2609 	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2610 	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2611 	    (stcb->asoc.sat_network_lockout == 0)) {
2612 		stcb->asoc.sat_network = 1;
2613 	} else if ((!first_measure) && stcb->asoc.sat_network) {
2614 		stcb->asoc.sat_network = 0;
2615 		stcb->asoc.sat_network_lockout = 1;
2616 	}
2617 	/* bound it, per C6/C7 in Section 5.3.1 */
2618 	if (new_rto < stcb->asoc.minrto) {
2619 		new_rto = stcb->asoc.minrto;
2620 	}
2621 	if (new_rto > stcb->asoc.maxrto) {
2622 		new_rto = stcb->asoc.maxrto;
2623 	}
2624 	/* we are now returning the RTO */
2625 	return (new_rto);
2626 }
2627 
2628 /*
2629  * return a pointer to a contiguous piece of data from the given mbuf chain
2630  * starting at 'off' for 'len' bytes.  If the desired piece spans more than
2631  * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2632  * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2633  */
2634 caddr_t
2635 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr)
2636 {
2637 	uint32_t count;
2638 	uint8_t *ptr;
2639 
2640 	ptr = in_ptr;
2641 	if ((off < 0) || (len <= 0))
2642 		return (NULL);
2643 
2644 	/* find the desired start location */
2645 	while ((m != NULL) && (off > 0)) {
2646 		if (off < SCTP_BUF_LEN(m))
2647 			break;
2648 		off -= SCTP_BUF_LEN(m);
2649 		m = SCTP_BUF_NEXT(m);
2650 	}
2651 	if (m == NULL)
2652 		return (NULL);
2653 
2654 	/* is the current mbuf large enough (eg. contiguous)? */
2655 	if ((SCTP_BUF_LEN(m) - off) >= len) {
2656 		return (mtod(m, caddr_t)+off);
2657 	} else {
2658 		/* else, it spans more than one mbuf, so save a temp copy... */
2659 		while ((m != NULL) && (len > 0)) {
2660 			count = min(SCTP_BUF_LEN(m) - off, len);
2661 			bcopy(mtod(m, caddr_t)+off, ptr, count);
2662 			len -= count;
2663 			ptr += count;
2664 			off = 0;
2665 			m = SCTP_BUF_NEXT(m);
2666 		}
2667 		if ((m == NULL) && (len > 0))
2668 			return (NULL);
2669 		else
2670 			return ((caddr_t)in_ptr);
2671 	}
2672 }
2673 
2674 
2675 
2676 struct sctp_paramhdr *
2677 sctp_get_next_param(struct mbuf *m,
2678     int offset,
2679     struct sctp_paramhdr *pull,
2680     int pull_limit)
2681 {
2682 	/* This just provides a typed signature to Peter's Pull routine */
2683 	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2684 	    (uint8_t *) pull));
2685 }
2686 
2687 
2688 int
2689 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2690 {
2691 	/*
2692 	 * add padlen bytes of 0 filled padding to the end of the mbuf. If
2693 	 * padlen is > 3 this routine will fail.
2694 	 */
2695 	uint8_t *dp;
2696 	int i;
2697 
2698 	if (padlen > 3) {
2699 		SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
2700 		return (ENOBUFS);
2701 	}
2702 	if (padlen <= M_TRAILINGSPACE(m)) {
2703 		/*
2704 		 * The easy way. We hope the majority of the time we hit
2705 		 * here :)
2706 		 */
2707 		dp = (uint8_t *) (mtod(m, caddr_t)+SCTP_BUF_LEN(m));
2708 		SCTP_BUF_LEN(m) += padlen;
2709 	} else {
2710 		/* Hard way we must grow the mbuf */
2711 		struct mbuf *tmp;
2712 
2713 		tmp = sctp_get_mbuf_for_msg(padlen, 0, M_DONTWAIT, 1, MT_DATA);
2714 		if (tmp == NULL) {
2715 			/* Out of space GAK! we are in big trouble. */
2716 			SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
2717 			return (ENOSPC);
2718 		}
2719 		/* setup and insert in middle */
2720 		SCTP_BUF_LEN(tmp) = padlen;
2721 		SCTP_BUF_NEXT(tmp) = NULL;
2722 		SCTP_BUF_NEXT(m) = tmp;
2723 		dp = mtod(tmp, uint8_t *);
2724 	}
2725 	/* zero out the pad */
2726 	for (i = 0; i < padlen; i++) {
2727 		*dp = 0;
2728 		dp++;
2729 	}
2730 	return (0);
2731 }
2732 
2733 int
2734 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2735 {
2736 	/* find the last mbuf in chain and pad it */
2737 	struct mbuf *m_at;
2738 
2739 	m_at = m;
2740 	if (last_mbuf) {
2741 		return (sctp_add_pad_tombuf(last_mbuf, padval));
2742 	} else {
2743 		while (m_at) {
2744 			if (SCTP_BUF_NEXT(m_at) == NULL) {
2745 				return (sctp_add_pad_tombuf(m_at, padval));
2746 			}
2747 			m_at = SCTP_BUF_NEXT(m_at);
2748 		}
2749 	}
2750 	SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
2751 	return (EFAULT);
2752 }
2753 
2754 static void
2755 sctp_notify_assoc_change(uint32_t event, struct sctp_tcb *stcb,
2756     uint32_t error, void *data, int so_locked
2757 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2758     SCTP_UNUSED
2759 #endif
2760 )
2761 {
2762 	struct mbuf *m_notify;
2763 	struct sctp_assoc_change *sac;
2764 	struct sctp_queued_to_read *control;
2765 
2766 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2767 	struct socket *so;
2768 
2769 #endif
2770 
2771 	/*
2772 	 * For TCP model AND UDP connected sockets we will send an error up
2773 	 * when an ABORT comes in.
2774 	 */
2775 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2776 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2777 	    ((event == SCTP_COMM_LOST) || (event == SCTP_CANT_STR_ASSOC))) {
2778 		if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
2779 			SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2780 			stcb->sctp_socket->so_error = ECONNREFUSED;
2781 		} else {
2782 			SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2783 			stcb->sctp_socket->so_error = ECONNRESET;
2784 		}
2785 		/* Wake ANY sleepers */
2786 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2787 		so = SCTP_INP_SO(stcb->sctp_ep);
2788 		if (!so_locked) {
2789 			atomic_add_int(&stcb->asoc.refcnt, 1);
2790 			SCTP_TCB_UNLOCK(stcb);
2791 			SCTP_SOCKET_LOCK(so, 1);
2792 			SCTP_TCB_LOCK(stcb);
2793 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
2794 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2795 				SCTP_SOCKET_UNLOCK(so, 1);
2796 				return;
2797 			}
2798 		}
2799 #endif
2800 		sorwakeup(stcb->sctp_socket);
2801 		sowwakeup(stcb->sctp_socket);
2802 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2803 		if (!so_locked) {
2804 			SCTP_SOCKET_UNLOCK(so, 1);
2805 		}
2806 #endif
2807 	}
2808 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2809 		/* event not enabled */
2810 		return;
2811 	}
2812 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_change), 0, M_DONTWAIT, 1, MT_DATA);
2813 	if (m_notify == NULL)
2814 		/* no space left */
2815 		return;
2816 	SCTP_BUF_LEN(m_notify) = 0;
2817 
2818 	sac = mtod(m_notify, struct sctp_assoc_change *);
2819 	sac->sac_type = SCTP_ASSOC_CHANGE;
2820 	sac->sac_flags = 0;
2821 	sac->sac_length = sizeof(struct sctp_assoc_change);
2822 	sac->sac_state = event;
2823 	sac->sac_error = error;
2824 	/* XXX verify these stream counts */
2825 	sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2826 	sac->sac_inbound_streams = stcb->asoc.streamincnt;
2827 	sac->sac_assoc_id = sctp_get_associd(stcb);
2828 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_change);
2829 	SCTP_BUF_NEXT(m_notify) = NULL;
2830 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2831 	    0, 0, 0, 0, 0, 0,
2832 	    m_notify);
2833 	if (control == NULL) {
2834 		/* no memory */
2835 		sctp_m_freem(m_notify);
2836 		return;
2837 	}
2838 	control->length = SCTP_BUF_LEN(m_notify);
2839 	/* not that we need this */
2840 	control->tail_mbuf = m_notify;
2841 	control->spec_flags = M_NOTIFICATION;
2842 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2843 	    control,
2844 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
2845 	    so_locked);
2846 	if (event == SCTP_COMM_LOST) {
2847 		/* Wake up any sleeper */
2848 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2849 		so = SCTP_INP_SO(stcb->sctp_ep);
2850 		if (!so_locked) {
2851 			atomic_add_int(&stcb->asoc.refcnt, 1);
2852 			SCTP_TCB_UNLOCK(stcb);
2853 			SCTP_SOCKET_LOCK(so, 1);
2854 			SCTP_TCB_LOCK(stcb);
2855 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
2856 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2857 				SCTP_SOCKET_UNLOCK(so, 1);
2858 				return;
2859 			}
2860 		}
2861 #endif
2862 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
2863 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2864 		if (!so_locked) {
2865 			SCTP_SOCKET_UNLOCK(so, 1);
2866 		}
2867 #endif
2868 	}
2869 }
2870 
2871 static void
2872 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2873     struct sockaddr *sa, uint32_t error)
2874 {
2875 	struct mbuf *m_notify;
2876 	struct sctp_paddr_change *spc;
2877 	struct sctp_queued_to_read *control;
2878 
2879 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
2880 		/* event not enabled */
2881 		return;
2882 	}
2883 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_DONTWAIT, 1, MT_DATA);
2884 	if (m_notify == NULL)
2885 		return;
2886 	SCTP_BUF_LEN(m_notify) = 0;
2887 	spc = mtod(m_notify, struct sctp_paddr_change *);
2888 	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2889 	spc->spc_flags = 0;
2890 	spc->spc_length = sizeof(struct sctp_paddr_change);
2891 	switch (sa->sa_family) {
2892 	case AF_INET:
2893 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2894 		break;
2895 #ifdef INET6
2896 	case AF_INET6:
2897 		{
2898 			struct sockaddr_in6 *sin6;
2899 
2900 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2901 
2902 			sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
2903 			if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
2904 				if (sin6->sin6_scope_id == 0) {
2905 					/* recover scope_id for user */
2906 					(void)sa6_recoverscope(sin6);
2907 				} else {
2908 					/* clear embedded scope_id for user */
2909 					in6_clearscope(&sin6->sin6_addr);
2910 				}
2911 			}
2912 			break;
2913 		}
2914 #endif
2915 	default:
2916 		/* TSNH */
2917 		break;
2918 	}
2919 	spc->spc_state = state;
2920 	spc->spc_error = error;
2921 	spc->spc_assoc_id = sctp_get_associd(stcb);
2922 
2923 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
2924 	SCTP_BUF_NEXT(m_notify) = NULL;
2925 
2926 	/* append to socket */
2927 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2928 	    0, 0, 0, 0, 0, 0,
2929 	    m_notify);
2930 	if (control == NULL) {
2931 		/* no memory */
2932 		sctp_m_freem(m_notify);
2933 		return;
2934 	}
2935 	control->length = SCTP_BUF_LEN(m_notify);
2936 	control->spec_flags = M_NOTIFICATION;
2937 	/* not that we need this */
2938 	control->tail_mbuf = m_notify;
2939 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2940 	    control,
2941 	    &stcb->sctp_socket->so_rcv, 1,
2942 	    SCTP_READ_LOCK_NOT_HELD,
2943 	    SCTP_SO_NOT_LOCKED);
2944 }
2945 
2946 
2947 static void
2948 sctp_notify_send_failed(struct sctp_tcb *stcb, uint32_t error,
2949     struct sctp_tmit_chunk *chk, int so_locked
2950 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2951     SCTP_UNUSED
2952 #endif
2953 )
2954 {
2955 	struct mbuf *m_notify;
2956 	struct sctp_send_failed *ssf;
2957 	struct sctp_queued_to_read *control;
2958 	int length;
2959 
2960 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)) {
2961 		/* event not enabled */
2962 		return;
2963 	}
2964 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_send_failed), 0, M_DONTWAIT, 1, MT_DATA);
2965 	if (m_notify == NULL)
2966 		/* no space left */
2967 		return;
2968 	length = sizeof(struct sctp_send_failed) + chk->send_size;
2969 	length -= sizeof(struct sctp_data_chunk);
2970 	SCTP_BUF_LEN(m_notify) = 0;
2971 	ssf = mtod(m_notify, struct sctp_send_failed *);
2972 	ssf->ssf_type = SCTP_SEND_FAILED;
2973 	if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
2974 		ssf->ssf_flags = SCTP_DATA_UNSENT;
2975 	else
2976 		ssf->ssf_flags = SCTP_DATA_SENT;
2977 	ssf->ssf_length = length;
2978 	ssf->ssf_error = error;
2979 	/* not exactly what the user sent in, but should be close :) */
2980 	bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
2981 	ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number;
2982 	ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq;
2983 	ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
2984 	ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype;
2985 	ssf->ssf_info.sinfo_context = chk->rec.data.context;
2986 	ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
2987 	ssf->ssf_assoc_id = sctp_get_associd(stcb);
2988 
2989 	if (chk->data) {
2990 		/*
2991 		 * trim off the sctp chunk header(it should be there)
2992 		 */
2993 		if (chk->send_size >= sizeof(struct sctp_data_chunk)) {
2994 			m_adj(chk->data, sizeof(struct sctp_data_chunk));
2995 			sctp_mbuf_crush(chk->data);
2996 			chk->send_size -= sizeof(struct sctp_data_chunk);
2997 		}
2998 	}
2999 	SCTP_BUF_NEXT(m_notify) = chk->data;
3000 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
3001 	/* Steal off the mbuf */
3002 	chk->data = NULL;
3003 	/*
3004 	 * For this case, we check the actual socket buffer, since the assoc
3005 	 * is going away we don't want to overfill the socket buffer for a
3006 	 * non-reader
3007 	 */
3008 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3009 		sctp_m_freem(m_notify);
3010 		return;
3011 	}
3012 	/* append to socket */
3013 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3014 	    0, 0, 0, 0, 0, 0,
3015 	    m_notify);
3016 	if (control == NULL) {
3017 		/* no memory */
3018 		sctp_m_freem(m_notify);
3019 		return;
3020 	}
3021 	control->spec_flags = M_NOTIFICATION;
3022 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3023 	    control,
3024 	    &stcb->sctp_socket->so_rcv, 1,
3025 	    SCTP_READ_LOCK_NOT_HELD,
3026 	    so_locked);
3027 }
3028 
3029 
3030 static void
3031 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
3032     struct sctp_stream_queue_pending *sp, int so_locked
3033 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3034     SCTP_UNUSED
3035 #endif
3036 )
3037 {
3038 	struct mbuf *m_notify;
3039 	struct sctp_send_failed *ssf;
3040 	struct sctp_queued_to_read *control;
3041 	int length;
3042 
3043 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)) {
3044 		/* event not enabled */
3045 		return;
3046 	}
3047 	length = sizeof(struct sctp_send_failed) + sp->length;
3048 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_send_failed), 0, M_DONTWAIT, 1, MT_DATA);
3049 	if (m_notify == NULL)
3050 		/* no space left */
3051 		return;
3052 	SCTP_BUF_LEN(m_notify) = 0;
3053 	ssf = mtod(m_notify, struct sctp_send_failed *);
3054 	ssf->ssf_type = SCTP_SEND_FAILED;
3055 	if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
3056 		ssf->ssf_flags = SCTP_DATA_UNSENT;
3057 	else
3058 		ssf->ssf_flags = SCTP_DATA_SENT;
3059 	ssf->ssf_length = length;
3060 	ssf->ssf_error = error;
3061 	/* not exactly what the user sent in, but should be close :) */
3062 	bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
3063 	ssf->ssf_info.sinfo_stream = sp->stream;
3064 	ssf->ssf_info.sinfo_ssn = sp->strseq;
3065 	if (sp->some_taken) {
3066 		ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
3067 	} else {
3068 		ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
3069 	}
3070 	ssf->ssf_info.sinfo_ppid = sp->ppid;
3071 	ssf->ssf_info.sinfo_context = sp->context;
3072 	ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3073 	ssf->ssf_assoc_id = sctp_get_associd(stcb);
3074 	SCTP_BUF_NEXT(m_notify) = sp->data;
3075 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
3076 
3077 	/* Steal off the mbuf */
3078 	sp->data = NULL;
3079 	/*
3080 	 * For this case, we check the actual socket buffer, since the assoc
3081 	 * is going away we don't want to overfill the socket buffer for a
3082 	 * non-reader
3083 	 */
3084 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3085 		sctp_m_freem(m_notify);
3086 		return;
3087 	}
3088 	/* append to socket */
3089 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3090 	    0, 0, 0, 0, 0, 0,
3091 	    m_notify);
3092 	if (control == NULL) {
3093 		/* no memory */
3094 		sctp_m_freem(m_notify);
3095 		return;
3096 	}
3097 	control->spec_flags = M_NOTIFICATION;
3098 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3099 	    control,
3100 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3101 }
3102 
3103 
3104 
3105 static void
3106 sctp_notify_adaptation_layer(struct sctp_tcb *stcb,
3107     uint32_t error)
3108 {
3109 	struct mbuf *m_notify;
3110 	struct sctp_adaptation_event *sai;
3111 	struct sctp_queued_to_read *control;
3112 
3113 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
3114 		/* event not enabled */
3115 		return;
3116 	}
3117 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_DONTWAIT, 1, MT_DATA);
3118 	if (m_notify == NULL)
3119 		/* no space left */
3120 		return;
3121 	SCTP_BUF_LEN(m_notify) = 0;
3122 	sai = mtod(m_notify, struct sctp_adaptation_event *);
3123 	sai->sai_type = SCTP_ADAPTATION_INDICATION;
3124 	sai->sai_flags = 0;
3125 	sai->sai_length = sizeof(struct sctp_adaptation_event);
3126 	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3127 	sai->sai_assoc_id = sctp_get_associd(stcb);
3128 
3129 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3130 	SCTP_BUF_NEXT(m_notify) = NULL;
3131 
3132 	/* append to socket */
3133 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3134 	    0, 0, 0, 0, 0, 0,
3135 	    m_notify);
3136 	if (control == NULL) {
3137 		/* no memory */
3138 		sctp_m_freem(m_notify);
3139 		return;
3140 	}
3141 	control->length = SCTP_BUF_LEN(m_notify);
3142 	control->spec_flags = M_NOTIFICATION;
3143 	/* not that we need this */
3144 	control->tail_mbuf = m_notify;
3145 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3146 	    control,
3147 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3148 }
3149 
3150 /* This always must be called with the read-queue LOCKED in the INP */
3151 static void
3152 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3153     uint32_t val, int so_locked
3154 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3155     SCTP_UNUSED
3156 #endif
3157 )
3158 {
3159 	struct mbuf *m_notify;
3160 	struct sctp_pdapi_event *pdapi;
3161 	struct sctp_queued_to_read *control;
3162 	struct sockbuf *sb;
3163 
3164 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3165 		/* event not enabled */
3166 		return;
3167 	}
3168 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_DONTWAIT, 1, MT_DATA);
3169 	if (m_notify == NULL)
3170 		/* no space left */
3171 		return;
3172 	SCTP_BUF_LEN(m_notify) = 0;
3173 	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3174 	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3175 	pdapi->pdapi_flags = 0;
3176 	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3177 	pdapi->pdapi_indication = error;
3178 	pdapi->pdapi_stream = (val >> 16);
3179 	pdapi->pdapi_seq = (val & 0x0000ffff);
3180 	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3181 
3182 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3183 	SCTP_BUF_NEXT(m_notify) = NULL;
3184 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3185 	    0, 0, 0, 0, 0, 0,
3186 	    m_notify);
3187 	if (control == NULL) {
3188 		/* no memory */
3189 		sctp_m_freem(m_notify);
3190 		return;
3191 	}
3192 	control->spec_flags = M_NOTIFICATION;
3193 	control->length = SCTP_BUF_LEN(m_notify);
3194 	/* not that we need this */
3195 	control->tail_mbuf = m_notify;
3196 	control->held_length = 0;
3197 	control->length = 0;
3198 	sb = &stcb->sctp_socket->so_rcv;
3199 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3200 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3201 	}
3202 	sctp_sballoc(stcb, sb, m_notify);
3203 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3204 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3205 	}
3206 	atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify));
3207 	control->end_added = 1;
3208 	if (stcb->asoc.control_pdapi)
3209 		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3210 	else {
3211 		/* we really should not see this case */
3212 		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3213 	}
3214 	if (stcb->sctp_ep && stcb->sctp_socket) {
3215 		/* This should always be the case */
3216 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3217 		struct socket *so;
3218 
3219 		so = SCTP_INP_SO(stcb->sctp_ep);
3220 		if (!so_locked) {
3221 			atomic_add_int(&stcb->asoc.refcnt, 1);
3222 			SCTP_TCB_UNLOCK(stcb);
3223 			SCTP_SOCKET_LOCK(so, 1);
3224 			SCTP_TCB_LOCK(stcb);
3225 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
3226 			if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3227 				SCTP_SOCKET_UNLOCK(so, 1);
3228 				return;
3229 			}
3230 		}
3231 #endif
3232 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3233 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3234 		if (!so_locked) {
3235 			SCTP_SOCKET_UNLOCK(so, 1);
3236 		}
3237 #endif
3238 	}
3239 }
3240 
3241 static void
3242 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3243 {
3244 	struct mbuf *m_notify;
3245 	struct sctp_shutdown_event *sse;
3246 	struct sctp_queued_to_read *control;
3247 
3248 	/*
3249 	 * For TCP model AND UDP connected sockets we will send an error up
3250 	 * when an SHUTDOWN completes
3251 	 */
3252 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3253 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3254 		/* mark socket closed for read/write and wakeup! */
3255 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3256 		struct socket *so;
3257 
3258 		so = SCTP_INP_SO(stcb->sctp_ep);
3259 		atomic_add_int(&stcb->asoc.refcnt, 1);
3260 		SCTP_TCB_UNLOCK(stcb);
3261 		SCTP_SOCKET_LOCK(so, 1);
3262 		SCTP_TCB_LOCK(stcb);
3263 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3264 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3265 			SCTP_SOCKET_UNLOCK(so, 1);
3266 			return;
3267 		}
3268 #endif
3269 		socantsendmore(stcb->sctp_socket);
3270 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3271 		SCTP_SOCKET_UNLOCK(so, 1);
3272 #endif
3273 	}
3274 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3275 		/* event not enabled */
3276 		return;
3277 	}
3278 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_DONTWAIT, 1, MT_DATA);
3279 	if (m_notify == NULL)
3280 		/* no space left */
3281 		return;
3282 	sse = mtod(m_notify, struct sctp_shutdown_event *);
3283 	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3284 	sse->sse_flags = 0;
3285 	sse->sse_length = sizeof(struct sctp_shutdown_event);
3286 	sse->sse_assoc_id = sctp_get_associd(stcb);
3287 
3288 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3289 	SCTP_BUF_NEXT(m_notify) = NULL;
3290 
3291 	/* append to socket */
3292 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3293 	    0, 0, 0, 0, 0, 0,
3294 	    m_notify);
3295 	if (control == NULL) {
3296 		/* no memory */
3297 		sctp_m_freem(m_notify);
3298 		return;
3299 	}
3300 	control->spec_flags = M_NOTIFICATION;
3301 	control->length = SCTP_BUF_LEN(m_notify);
3302 	/* not that we need this */
3303 	control->tail_mbuf = m_notify;
3304 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3305 	    control,
3306 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3307 }
3308 
3309 static void
3310 sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3311     int so_locked
3312 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3313     SCTP_UNUSED
3314 #endif
3315 )
3316 {
3317 	struct mbuf *m_notify;
3318 	struct sctp_sender_dry_event *event;
3319 	struct sctp_queued_to_read *control;
3320 
3321 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_DRYEVNT)) {
3322 		/* event not enabled */
3323 		return;
3324 	}
3325 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_DONTWAIT, 1, MT_DATA);
3326 	if (m_notify == NULL) {
3327 		/* no space left */
3328 		return;
3329 	}
3330 	SCTP_BUF_LEN(m_notify) = 0;
3331 	event = mtod(m_notify, struct sctp_sender_dry_event *);
3332 	event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3333 	event->sender_dry_flags = 0;
3334 	event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3335 	event->sender_dry_assoc_id = sctp_get_associd(stcb);
3336 
3337 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3338 	SCTP_BUF_NEXT(m_notify) = NULL;
3339 
3340 	/* append to socket */
3341 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3342 	    0, 0, 0, 0, 0, 0, m_notify);
3343 	if (control == NULL) {
3344 		/* no memory */
3345 		sctp_m_freem(m_notify);
3346 		return;
3347 	}
3348 	control->length = SCTP_BUF_LEN(m_notify);
3349 	control->spec_flags = M_NOTIFICATION;
3350 	/* not that we need this */
3351 	control->tail_mbuf = m_notify;
3352 	sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3353 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3354 }
3355 
3356 
3357 static void
3358 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, int number_entries, int flag)
3359 {
3360 	struct mbuf *m_notify;
3361 	struct sctp_queued_to_read *control;
3362 	struct sctp_stream_reset_event *strreset;
3363 	int len;
3364 
3365 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_STREAM_RESETEVNT)) {
3366 		/* event not enabled */
3367 		return;
3368 	}
3369 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3370 	if (m_notify == NULL)
3371 		/* no space left */
3372 		return;
3373 	SCTP_BUF_LEN(m_notify) = 0;
3374 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3375 	if (len > M_TRAILINGSPACE(m_notify)) {
3376 		/* never enough room */
3377 		sctp_m_freem(m_notify);
3378 		return;
3379 	}
3380 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3381 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3382 	strreset->strreset_flags = SCTP_STRRESET_ADD_STREAM | flag;
3383 	strreset->strreset_length = len;
3384 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3385 	strreset->strreset_list[0] = number_entries;
3386 
3387 	SCTP_BUF_LEN(m_notify) = len;
3388 	SCTP_BUF_NEXT(m_notify) = NULL;
3389 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3390 		/* no space */
3391 		sctp_m_freem(m_notify);
3392 		return;
3393 	}
3394 	/* append to socket */
3395 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3396 	    0, 0, 0, 0, 0, 0,
3397 	    m_notify);
3398 	if (control == NULL) {
3399 		/* no memory */
3400 		sctp_m_freem(m_notify);
3401 		return;
3402 	}
3403 	control->spec_flags = M_NOTIFICATION;
3404 	control->length = SCTP_BUF_LEN(m_notify);
3405 	/* not that we need this */
3406 	control->tail_mbuf = m_notify;
3407 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3408 	    control,
3409 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3410 }
3411 
3412 
3413 static void
3414 sctp_notify_stream_reset(struct sctp_tcb *stcb,
3415     int number_entries, uint16_t * list, int flag)
3416 {
3417 	struct mbuf *m_notify;
3418 	struct sctp_queued_to_read *control;
3419 	struct sctp_stream_reset_event *strreset;
3420 	int len;
3421 
3422 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_STREAM_RESETEVNT)) {
3423 		/* event not enabled */
3424 		return;
3425 	}
3426 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3427 	if (m_notify == NULL)
3428 		/* no space left */
3429 		return;
3430 	SCTP_BUF_LEN(m_notify) = 0;
3431 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3432 	if (len > M_TRAILINGSPACE(m_notify)) {
3433 		/* never enough room */
3434 		sctp_m_freem(m_notify);
3435 		return;
3436 	}
3437 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3438 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3439 	if (number_entries == 0) {
3440 		strreset->strreset_flags = flag | SCTP_STRRESET_ALL_STREAMS;
3441 	} else {
3442 		strreset->strreset_flags = flag | SCTP_STRRESET_STREAM_LIST;
3443 	}
3444 	strreset->strreset_length = len;
3445 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3446 	if (number_entries) {
3447 		int i;
3448 
3449 		for (i = 0; i < number_entries; i++) {
3450 			strreset->strreset_list[i] = ntohs(list[i]);
3451 		}
3452 	}
3453 	SCTP_BUF_LEN(m_notify) = len;
3454 	SCTP_BUF_NEXT(m_notify) = NULL;
3455 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3456 		/* no space */
3457 		sctp_m_freem(m_notify);
3458 		return;
3459 	}
3460 	/* append to socket */
3461 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3462 	    0, 0, 0, 0, 0, 0,
3463 	    m_notify);
3464 	if (control == NULL) {
3465 		/* no memory */
3466 		sctp_m_freem(m_notify);
3467 		return;
3468 	}
3469 	control->spec_flags = M_NOTIFICATION;
3470 	control->length = SCTP_BUF_LEN(m_notify);
3471 	/* not that we need this */
3472 	control->tail_mbuf = m_notify;
3473 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3474 	    control,
3475 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3476 }
3477 
3478 
3479 void
3480 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3481     uint32_t error, void *data, int so_locked
3482 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3483     SCTP_UNUSED
3484 #endif
3485 )
3486 {
3487 	if ((stcb == NULL) ||
3488 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3489 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3490 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3491 		/* If the socket is gone we are out of here */
3492 		return;
3493 	}
3494 	if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
3495 		return;
3496 	}
3497 	if (stcb && ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
3498 	    (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED))) {
3499 		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3500 		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3501 		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3502 			/* Don't report these in front states */
3503 			return;
3504 		}
3505 	}
3506 	switch (notification) {
3507 	case SCTP_NOTIFY_ASSOC_UP:
3508 		if (stcb->asoc.assoc_up_sent == 0) {
3509 			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, so_locked);
3510 			stcb->asoc.assoc_up_sent = 1;
3511 		}
3512 		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3513 			sctp_notify_adaptation_layer(stcb, error);
3514 		}
3515 		if (stcb->asoc.peer_supports_auth == 0) {
3516 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3517 			    NULL, so_locked);
3518 		}
3519 		break;
3520 	case SCTP_NOTIFY_ASSOC_DOWN:
3521 		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, so_locked);
3522 		break;
3523 	case SCTP_NOTIFY_INTERFACE_DOWN:
3524 		{
3525 			struct sctp_nets *net;
3526 
3527 			net = (struct sctp_nets *)data;
3528 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3529 			    (struct sockaddr *)&net->ro._l_addr, error);
3530 			break;
3531 		}
3532 	case SCTP_NOTIFY_INTERFACE_UP:
3533 		{
3534 			struct sctp_nets *net;
3535 
3536 			net = (struct sctp_nets *)data;
3537 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3538 			    (struct sockaddr *)&net->ro._l_addr, error);
3539 			break;
3540 		}
3541 	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3542 		{
3543 			struct sctp_nets *net;
3544 
3545 			net = (struct sctp_nets *)data;
3546 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3547 			    (struct sockaddr *)&net->ro._l_addr, error);
3548 			break;
3549 		}
3550 	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3551 		sctp_notify_send_failed2(stcb, error,
3552 		    (struct sctp_stream_queue_pending *)data, so_locked);
3553 		break;
3554 	case SCTP_NOTIFY_DG_FAIL:
3555 		sctp_notify_send_failed(stcb, error,
3556 		    (struct sctp_tmit_chunk *)data, so_locked);
3557 		break;
3558 	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3559 		{
3560 			uint32_t val;
3561 
3562 			val = *((uint32_t *) data);
3563 
3564 			sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
3565 			break;
3566 		}
3567 	case SCTP_NOTIFY_STRDATA_ERR:
3568 		break;
3569 	case SCTP_NOTIFY_ASSOC_ABORTED:
3570 		if ((stcb) && (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3571 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED))) {
3572 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, NULL, so_locked);
3573 		} else {
3574 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, NULL, so_locked);
3575 		}
3576 		break;
3577 	case SCTP_NOTIFY_PEER_OPENED_STREAM:
3578 		break;
3579 	case SCTP_NOTIFY_STREAM_OPENED_OK:
3580 		break;
3581 	case SCTP_NOTIFY_ASSOC_RESTART:
3582 		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, data, so_locked);
3583 		if (stcb->asoc.peer_supports_auth == 0) {
3584 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3585 			    NULL, so_locked);
3586 		}
3587 		break;
3588 	case SCTP_NOTIFY_HB_RESP:
3589 		break;
3590 	case SCTP_NOTIFY_STR_RESET_INSTREAM_ADD_OK:
3591 		sctp_notify_stream_reset_add(stcb, error, SCTP_STRRESET_INBOUND_STR);
3592 		break;
3593 	case SCTP_NOTIFY_STR_RESET_ADD_OK:
3594 		sctp_notify_stream_reset_add(stcb, error, SCTP_STRRESET_OUTBOUND_STR);
3595 		break;
3596 	case SCTP_NOTIFY_STR_RESET_ADD_FAIL:
3597 		sctp_notify_stream_reset_add(stcb, error, (SCTP_STRRESET_FAILED | SCTP_STRRESET_OUTBOUND_STR));
3598 		break;
3599 
3600 	case SCTP_NOTIFY_STR_RESET_SEND:
3601 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_OUTBOUND_STR);
3602 		break;
3603 	case SCTP_NOTIFY_STR_RESET_RECV:
3604 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_INBOUND_STR);
3605 		break;
3606 	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3607 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_OUTBOUND_STR | SCTP_STRRESET_FAILED));
3608 		break;
3609 	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3610 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_INBOUND_STR | SCTP_STRRESET_FAILED));
3611 		break;
3612 	case SCTP_NOTIFY_ASCONF_ADD_IP:
3613 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3614 		    error);
3615 		break;
3616 	case SCTP_NOTIFY_ASCONF_DELETE_IP:
3617 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3618 		    error);
3619 		break;
3620 	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3621 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3622 		    error);
3623 		break;
3624 	case SCTP_NOTIFY_ASCONF_SUCCESS:
3625 		break;
3626 	case SCTP_NOTIFY_ASCONF_FAILED:
3627 		break;
3628 	case SCTP_NOTIFY_PEER_SHUTDOWN:
3629 		sctp_notify_shutdown_event(stcb);
3630 		break;
3631 	case SCTP_NOTIFY_AUTH_NEW_KEY:
3632 		sctp_notify_authentication(stcb, SCTP_AUTH_NEWKEY, error,
3633 		    (uint16_t) (uintptr_t) data,
3634 		    so_locked);
3635 		break;
3636 	case SCTP_NOTIFY_AUTH_FREE_KEY:
3637 		sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
3638 		    (uint16_t) (uintptr_t) data,
3639 		    so_locked);
3640 		break;
3641 	case SCTP_NOTIFY_NO_PEER_AUTH:
3642 		sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
3643 		    (uint16_t) (uintptr_t) data,
3644 		    so_locked);
3645 		break;
3646 	case SCTP_NOTIFY_SENDER_DRY:
3647 		sctp_notify_sender_dry_event(stcb, so_locked);
3648 		break;
3649 	default:
3650 		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3651 		    __FUNCTION__, notification, notification);
3652 		break;
3653 	}			/* end switch */
3654 }
3655 
3656 void
3657 sctp_report_all_outbound(struct sctp_tcb *stcb, int holds_lock, int so_locked
3658 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3659     SCTP_UNUSED
3660 #endif
3661 )
3662 {
3663 	struct sctp_association *asoc;
3664 	struct sctp_stream_out *outs;
3665 	struct sctp_tmit_chunk *chk;
3666 	struct sctp_stream_queue_pending *sp;
3667 	int i;
3668 
3669 	asoc = &stcb->asoc;
3670 
3671 	if (stcb == NULL) {
3672 		return;
3673 	}
3674 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3675 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3676 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3677 		return;
3678 	}
3679 	/* now through all the gunk freeing chunks */
3680 	if (holds_lock == 0) {
3681 		SCTP_TCB_SEND_LOCK(stcb);
3682 	}
3683 	/* sent queue SHOULD be empty */
3684 	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3685 		chk = TAILQ_FIRST(&asoc->sent_queue);
3686 		while (chk) {
3687 			TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3688 			asoc->sent_queue_cnt--;
3689 			if (chk->data != NULL) {
3690 				sctp_free_bufspace(stcb, asoc, chk, 1);
3691 				sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
3692 				    SCTP_NOTIFY_DATAGRAM_SENT, chk, so_locked);
3693 				if (chk->data) {
3694 					sctp_m_freem(chk->data);
3695 					chk->data = NULL;
3696 				}
3697 			}
3698 			sctp_free_a_chunk(stcb, chk);
3699 			/* sa_ignore FREED_MEMORY */
3700 			chk = TAILQ_FIRST(&asoc->sent_queue);
3701 		}
3702 	}
3703 	/* pending send queue SHOULD be empty */
3704 	if (!TAILQ_EMPTY(&asoc->send_queue)) {
3705 		chk = TAILQ_FIRST(&asoc->send_queue);
3706 		while (chk) {
3707 			TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3708 			asoc->send_queue_cnt--;
3709 			if (chk->data != NULL) {
3710 				sctp_free_bufspace(stcb, asoc, chk, 1);
3711 				sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
3712 				    SCTP_NOTIFY_DATAGRAM_UNSENT, chk, so_locked);
3713 				if (chk->data) {
3714 					sctp_m_freem(chk->data);
3715 					chk->data = NULL;
3716 				}
3717 			}
3718 			sctp_free_a_chunk(stcb, chk);
3719 			/* sa_ignore FREED_MEMORY */
3720 			chk = TAILQ_FIRST(&asoc->send_queue);
3721 		}
3722 	}
3723 	for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
3724 		/* For each stream */
3725 		outs = &stcb->asoc.strmout[i];
3726 		/* clean up any sends there */
3727 		stcb->asoc.locked_on_sending = NULL;
3728 		sp = TAILQ_FIRST(&outs->outqueue);
3729 		while (sp) {
3730 			stcb->asoc.stream_queue_cnt--;
3731 			TAILQ_REMOVE(&outs->outqueue, sp, next);
3732 			sctp_free_spbufspace(stcb, asoc, sp);
3733 			sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
3734 			    SCTP_NOTIFY_DATAGRAM_UNSENT, (void *)sp, so_locked);
3735 			if (sp->data) {
3736 				sctp_m_freem(sp->data);
3737 				sp->data = NULL;
3738 			}
3739 			if (sp->net)
3740 				sctp_free_remote_addr(sp->net);
3741 			sp->net = NULL;
3742 			/* Free the chunk */
3743 			sctp_free_a_strmoq(stcb, sp);
3744 			/* sa_ignore FREED_MEMORY */
3745 			sp = TAILQ_FIRST(&outs->outqueue);
3746 		}
3747 	}
3748 
3749 	if (holds_lock == 0) {
3750 		SCTP_TCB_SEND_UNLOCK(stcb);
3751 	}
3752 }
3753 
3754 void
3755 sctp_abort_notification(struct sctp_tcb *stcb, int error, int so_locked
3756 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3757     SCTP_UNUSED
3758 #endif
3759 )
3760 {
3761 
3762 	if (stcb == NULL) {
3763 		return;
3764 	}
3765 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3766 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3767 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3768 		return;
3769 	}
3770 	/* Tell them we lost the asoc */
3771 	sctp_report_all_outbound(stcb, 1, so_locked);
3772 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
3773 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3774 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
3775 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
3776 	}
3777 	sctp_ulp_notify(SCTP_NOTIFY_ASSOC_ABORTED, stcb, error, NULL, so_locked);
3778 }
3779 
3780 void
3781 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3782     struct mbuf *m, int iphlen, struct sctphdr *sh, struct mbuf *op_err,
3783     uint32_t vrf_id, uint16_t port)
3784 {
3785 	uint32_t vtag;
3786 
3787 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3788 	struct socket *so;
3789 
3790 #endif
3791 
3792 	vtag = 0;
3793 	if (stcb != NULL) {
3794 		/* We have a TCB to abort, send notification too */
3795 		vtag = stcb->asoc.peer_vtag;
3796 		sctp_abort_notification(stcb, 0, SCTP_SO_NOT_LOCKED);
3797 		/* get the assoc vrf id and table id */
3798 		vrf_id = stcb->asoc.vrf_id;
3799 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3800 	}
3801 	sctp_send_abort(m, iphlen, sh, vtag, op_err, vrf_id, port);
3802 	if (stcb != NULL) {
3803 		/* Ok, now lets free it */
3804 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3805 		so = SCTP_INP_SO(inp);
3806 		atomic_add_int(&stcb->asoc.refcnt, 1);
3807 		SCTP_TCB_UNLOCK(stcb);
3808 		SCTP_SOCKET_LOCK(so, 1);
3809 		SCTP_TCB_LOCK(stcb);
3810 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3811 #endif
3812 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
3813 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3814 		SCTP_SOCKET_UNLOCK(so, 1);
3815 #endif
3816 	} else {
3817 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3818 			if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3819 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3820 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
3821 			}
3822 		}
3823 	}
3824 }
3825 
3826 #ifdef SCTP_ASOCLOG_OF_TSNS
3827 void
3828 sctp_print_out_track_log(struct sctp_tcb *stcb)
3829 {
3830 #ifdef NOSIY_PRINTS
3831 	int i;
3832 
3833 	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
3834 	SCTP_PRINTF("IN bound TSN log-aaa\n");
3835 	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
3836 		SCTP_PRINTF("None rcvd\n");
3837 		goto none_in;
3838 	}
3839 	if (stcb->asoc.tsn_in_wrapped) {
3840 		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
3841 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3842 			    stcb->asoc.in_tsnlog[i].tsn,
3843 			    stcb->asoc.in_tsnlog[i].strm,
3844 			    stcb->asoc.in_tsnlog[i].seq,
3845 			    stcb->asoc.in_tsnlog[i].flgs,
3846 			    stcb->asoc.in_tsnlog[i].sz);
3847 		}
3848 	}
3849 	if (stcb->asoc.tsn_in_at) {
3850 		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
3851 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3852 			    stcb->asoc.in_tsnlog[i].tsn,
3853 			    stcb->asoc.in_tsnlog[i].strm,
3854 			    stcb->asoc.in_tsnlog[i].seq,
3855 			    stcb->asoc.in_tsnlog[i].flgs,
3856 			    stcb->asoc.in_tsnlog[i].sz);
3857 		}
3858 	}
3859 none_in:
3860 	SCTP_PRINTF("OUT bound TSN log-aaa\n");
3861 	if ((stcb->asoc.tsn_out_at == 0) &&
3862 	    (stcb->asoc.tsn_out_wrapped == 0)) {
3863 		SCTP_PRINTF("None sent\n");
3864 	}
3865 	if (stcb->asoc.tsn_out_wrapped) {
3866 		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
3867 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3868 			    stcb->asoc.out_tsnlog[i].tsn,
3869 			    stcb->asoc.out_tsnlog[i].strm,
3870 			    stcb->asoc.out_tsnlog[i].seq,
3871 			    stcb->asoc.out_tsnlog[i].flgs,
3872 			    stcb->asoc.out_tsnlog[i].sz);
3873 		}
3874 	}
3875 	if (stcb->asoc.tsn_out_at) {
3876 		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
3877 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3878 			    stcb->asoc.out_tsnlog[i].tsn,
3879 			    stcb->asoc.out_tsnlog[i].strm,
3880 			    stcb->asoc.out_tsnlog[i].seq,
3881 			    stcb->asoc.out_tsnlog[i].flgs,
3882 			    stcb->asoc.out_tsnlog[i].sz);
3883 		}
3884 	}
3885 #endif
3886 }
3887 
3888 #endif
3889 
3890 void
3891 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3892     int error, struct mbuf *op_err,
3893     int so_locked
3894 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3895     SCTP_UNUSED
3896 #endif
3897 )
3898 {
3899 	uint32_t vtag;
3900 
3901 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3902 	struct socket *so;
3903 
3904 #endif
3905 
3906 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3907 	so = SCTP_INP_SO(inp);
3908 #endif
3909 	if (stcb == NULL) {
3910 		/* Got to have a TCB */
3911 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3912 			if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3913 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3914 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
3915 			}
3916 		}
3917 		return;
3918 	} else {
3919 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3920 	}
3921 	vtag = stcb->asoc.peer_vtag;
3922 	/* notify the ulp */
3923 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0)
3924 		sctp_abort_notification(stcb, error, so_locked);
3925 	/* notify the peer */
3926 #if defined(SCTP_PANIC_ON_ABORT)
3927 	panic("aborting an association");
3928 #endif
3929 	sctp_send_abort_tcb(stcb, op_err, so_locked);
3930 	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3931 	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3932 	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3933 		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3934 	}
3935 	/* now free the asoc */
3936 #ifdef SCTP_ASOCLOG_OF_TSNS
3937 	sctp_print_out_track_log(stcb);
3938 #endif
3939 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3940 	if (!so_locked) {
3941 		atomic_add_int(&stcb->asoc.refcnt, 1);
3942 		SCTP_TCB_UNLOCK(stcb);
3943 		SCTP_SOCKET_LOCK(so, 1);
3944 		SCTP_TCB_LOCK(stcb);
3945 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3946 	}
3947 #endif
3948 	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
3949 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3950 	if (!so_locked) {
3951 		SCTP_SOCKET_UNLOCK(so, 1);
3952 	}
3953 #endif
3954 }
3955 
3956 void
3957 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh,
3958     struct sctp_inpcb *inp, struct mbuf *op_err, uint32_t vrf_id, uint16_t port)
3959 {
3960 	struct sctp_chunkhdr *ch, chunk_buf;
3961 	unsigned int chk_length;
3962 
3963 	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
3964 	/* Generate a TO address for future reference */
3965 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
3966 		if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3967 			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3968 			    SCTP_CALLED_DIRECTLY_NOCMPSET);
3969 		}
3970 	}
3971 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
3972 	    sizeof(*ch), (uint8_t *) & chunk_buf);
3973 	while (ch != NULL) {
3974 		chk_length = ntohs(ch->chunk_length);
3975 		if (chk_length < sizeof(*ch)) {
3976 			/* break to abort land */
3977 			break;
3978 		}
3979 		switch (ch->chunk_type) {
3980 		case SCTP_COOKIE_ECHO:
3981 			/* We hit here only if the assoc is being freed */
3982 			return;
3983 		case SCTP_PACKET_DROPPED:
3984 			/* we don't respond to pkt-dropped */
3985 			return;
3986 		case SCTP_ABORT_ASSOCIATION:
3987 			/* we don't respond with an ABORT to an ABORT */
3988 			return;
3989 		case SCTP_SHUTDOWN_COMPLETE:
3990 			/*
3991 			 * we ignore it since we are not waiting for it and
3992 			 * peer is gone
3993 			 */
3994 			return;
3995 		case SCTP_SHUTDOWN_ACK:
3996 			sctp_send_shutdown_complete2(m, iphlen, sh, vrf_id, port);
3997 			return;
3998 		default:
3999 			break;
4000 		}
4001 		offset += SCTP_SIZE32(chk_length);
4002 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4003 		    sizeof(*ch), (uint8_t *) & chunk_buf);
4004 	}
4005 	sctp_send_abort(m, iphlen, sh, 0, op_err, vrf_id, port);
4006 }
4007 
4008 /*
4009  * check the inbound datagram to make sure there is not an abort inside it,
4010  * if there is return 1, else return 0.
4011  */
4012 int
4013 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill)
4014 {
4015 	struct sctp_chunkhdr *ch;
4016 	struct sctp_init_chunk *init_chk, chunk_buf;
4017 	int offset;
4018 	unsigned int chk_length;
4019 
4020 	offset = iphlen + sizeof(struct sctphdr);
4021 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
4022 	    (uint8_t *) & chunk_buf);
4023 	while (ch != NULL) {
4024 		chk_length = ntohs(ch->chunk_length);
4025 		if (chk_length < sizeof(*ch)) {
4026 			/* packet is probably corrupt */
4027 			break;
4028 		}
4029 		/* we seem to be ok, is it an abort? */
4030 		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
4031 			/* yep, tell them */
4032 			return (1);
4033 		}
4034 		if (ch->chunk_type == SCTP_INITIATION) {
4035 			/* need to update the Vtag */
4036 			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4037 			    offset, sizeof(*init_chk), (uint8_t *) & chunk_buf);
4038 			if (init_chk != NULL) {
4039 				*vtagfill = ntohl(init_chk->init.initiate_tag);
4040 			}
4041 		}
4042 		/* Nope, move to the next chunk */
4043 		offset += SCTP_SIZE32(chk_length);
4044 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4045 		    sizeof(*ch), (uint8_t *) & chunk_buf);
4046 	}
4047 	return (0);
4048 }
4049 
4050 /*
4051  * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4052  * set (i.e. it's 0) so, create this function to compare link local scopes
4053  */
4054 #ifdef INET6
4055 uint32_t
4056 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4057 {
4058 	struct sockaddr_in6 a, b;
4059 
4060 	/* save copies */
4061 	a = *addr1;
4062 	b = *addr2;
4063 
4064 	if (a.sin6_scope_id == 0)
4065 		if (sa6_recoverscope(&a)) {
4066 			/* can't get scope, so can't match */
4067 			return (0);
4068 		}
4069 	if (b.sin6_scope_id == 0)
4070 		if (sa6_recoverscope(&b)) {
4071 			/* can't get scope, so can't match */
4072 			return (0);
4073 		}
4074 	if (a.sin6_scope_id != b.sin6_scope_id)
4075 		return (0);
4076 
4077 	return (1);
4078 }
4079 
4080 /*
4081  * returns a sockaddr_in6 with embedded scope recovered and removed
4082  */
4083 struct sockaddr_in6 *
4084 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4085 {
4086 	/* check and strip embedded scope junk */
4087 	if (addr->sin6_family == AF_INET6) {
4088 		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4089 			if (addr->sin6_scope_id == 0) {
4090 				*store = *addr;
4091 				if (!sa6_recoverscope(store)) {
4092 					/* use the recovered scope */
4093 					addr = store;
4094 				}
4095 			} else {
4096 				/* else, return the original "to" addr */
4097 				in6_clearscope(&addr->sin6_addr);
4098 			}
4099 		}
4100 	}
4101 	return (addr);
4102 }
4103 
4104 #endif
4105 
4106 /*
4107  * are the two addresses the same?  currently a "scopeless" check returns: 1
4108  * if same, 0 if not
4109  */
4110 int
4111 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4112 {
4113 
4114 	/* must be valid */
4115 	if (sa1 == NULL || sa2 == NULL)
4116 		return (0);
4117 
4118 	/* must be the same family */
4119 	if (sa1->sa_family != sa2->sa_family)
4120 		return (0);
4121 
4122 	switch (sa1->sa_family) {
4123 #ifdef INET6
4124 	case AF_INET6:
4125 		{
4126 			/* IPv6 addresses */
4127 			struct sockaddr_in6 *sin6_1, *sin6_2;
4128 
4129 			sin6_1 = (struct sockaddr_in6 *)sa1;
4130 			sin6_2 = (struct sockaddr_in6 *)sa2;
4131 			return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
4132 			    sin6_2));
4133 		}
4134 #endif
4135 	case AF_INET:
4136 		{
4137 			/* IPv4 addresses */
4138 			struct sockaddr_in *sin_1, *sin_2;
4139 
4140 			sin_1 = (struct sockaddr_in *)sa1;
4141 			sin_2 = (struct sockaddr_in *)sa2;
4142 			return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4143 		}
4144 	default:
4145 		/* we don't do these... */
4146 		return (0);
4147 	}
4148 }
4149 
4150 void
4151 sctp_print_address(struct sockaddr *sa)
4152 {
4153 #ifdef INET6
4154 	char ip6buf[INET6_ADDRSTRLEN];
4155 
4156 	ip6buf[0] = 0;
4157 #endif
4158 
4159 	switch (sa->sa_family) {
4160 #ifdef INET6
4161 	case AF_INET6:
4162 		{
4163 			struct sockaddr_in6 *sin6;
4164 
4165 			sin6 = (struct sockaddr_in6 *)sa;
4166 			SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4167 			    ip6_sprintf(ip6buf, &sin6->sin6_addr),
4168 			    ntohs(sin6->sin6_port),
4169 			    sin6->sin6_scope_id);
4170 			break;
4171 		}
4172 #endif
4173 	case AF_INET:
4174 		{
4175 			struct sockaddr_in *sin;
4176 			unsigned char *p;
4177 
4178 			sin = (struct sockaddr_in *)sa;
4179 			p = (unsigned char *)&sin->sin_addr;
4180 			SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4181 			    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4182 			break;
4183 		}
4184 	default:
4185 		SCTP_PRINTF("?\n");
4186 		break;
4187 	}
4188 }
4189 
4190 void
4191 sctp_print_address_pkt(struct ip *iph, struct sctphdr *sh)
4192 {
4193 	switch (iph->ip_v) {
4194 		case IPVERSION:
4195 		{
4196 			struct sockaddr_in lsa, fsa;
4197 
4198 			bzero(&lsa, sizeof(lsa));
4199 			lsa.sin_len = sizeof(lsa);
4200 			lsa.sin_family = AF_INET;
4201 			lsa.sin_addr = iph->ip_src;
4202 			lsa.sin_port = sh->src_port;
4203 			bzero(&fsa, sizeof(fsa));
4204 			fsa.sin_len = sizeof(fsa);
4205 			fsa.sin_family = AF_INET;
4206 			fsa.sin_addr = iph->ip_dst;
4207 			fsa.sin_port = sh->dest_port;
4208 			SCTP_PRINTF("src: ");
4209 			sctp_print_address((struct sockaddr *)&lsa);
4210 			SCTP_PRINTF("dest: ");
4211 			sctp_print_address((struct sockaddr *)&fsa);
4212 			break;
4213 		}
4214 #ifdef INET6
4215 	case IPV6_VERSION >> 4:
4216 		{
4217 			struct ip6_hdr *ip6;
4218 			struct sockaddr_in6 lsa6, fsa6;
4219 
4220 			ip6 = (struct ip6_hdr *)iph;
4221 			bzero(&lsa6, sizeof(lsa6));
4222 			lsa6.sin6_len = sizeof(lsa6);
4223 			lsa6.sin6_family = AF_INET6;
4224 			lsa6.sin6_addr = ip6->ip6_src;
4225 			lsa6.sin6_port = sh->src_port;
4226 			bzero(&fsa6, sizeof(fsa6));
4227 			fsa6.sin6_len = sizeof(fsa6);
4228 			fsa6.sin6_family = AF_INET6;
4229 			fsa6.sin6_addr = ip6->ip6_dst;
4230 			fsa6.sin6_port = sh->dest_port;
4231 			SCTP_PRINTF("src: ");
4232 			sctp_print_address((struct sockaddr *)&lsa6);
4233 			SCTP_PRINTF("dest: ");
4234 			sctp_print_address((struct sockaddr *)&fsa6);
4235 			break;
4236 		}
4237 #endif
4238 	default:
4239 		/* TSNH */
4240 		break;
4241 	}
4242 }
4243 
4244 void
4245 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4246     struct sctp_inpcb *new_inp,
4247     struct sctp_tcb *stcb,
4248     int waitflags)
4249 {
4250 	/*
4251 	 * go through our old INP and pull off any control structures that
4252 	 * belong to stcb and move then to the new inp.
4253 	 */
4254 	struct socket *old_so, *new_so;
4255 	struct sctp_queued_to_read *control, *nctl;
4256 	struct sctp_readhead tmp_queue;
4257 	struct mbuf *m;
4258 	int error = 0;
4259 
4260 	old_so = old_inp->sctp_socket;
4261 	new_so = new_inp->sctp_socket;
4262 	TAILQ_INIT(&tmp_queue);
4263 	error = sblock(&old_so->so_rcv, waitflags);
4264 	if (error) {
4265 		/*
4266 		 * Gak, can't get sblock, we have a problem. data will be
4267 		 * left stranded.. and we don't dare look at it since the
4268 		 * other thread may be reading something. Oh well, its a
4269 		 * screwed up app that does a peeloff OR a accept while
4270 		 * reading from the main socket... actually its only the
4271 		 * peeloff() case, since I think read will fail on a
4272 		 * listening socket..
4273 		 */
4274 		return;
4275 	}
4276 	/* lock the socket buffers */
4277 	SCTP_INP_READ_LOCK(old_inp);
4278 	control = TAILQ_FIRST(&old_inp->read_queue);
4279 	/* Pull off all for out target stcb */
4280 	while (control) {
4281 		nctl = TAILQ_NEXT(control, next);
4282 		if (control->stcb == stcb) {
4283 			/* remove it we want it */
4284 			TAILQ_REMOVE(&old_inp->read_queue, control, next);
4285 			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4286 			m = control->data;
4287 			while (m) {
4288 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4289 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4290 				}
4291 				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4292 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4293 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4294 				}
4295 				m = SCTP_BUF_NEXT(m);
4296 			}
4297 		}
4298 		control = nctl;
4299 	}
4300 	SCTP_INP_READ_UNLOCK(old_inp);
4301 	/* Remove the sb-lock on the old socket */
4302 
4303 	sbunlock(&old_so->so_rcv);
4304 	/* Now we move them over to the new socket buffer */
4305 	control = TAILQ_FIRST(&tmp_queue);
4306 	SCTP_INP_READ_LOCK(new_inp);
4307 	while (control) {
4308 		nctl = TAILQ_NEXT(control, next);
4309 		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4310 		m = control->data;
4311 		while (m) {
4312 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4313 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4314 			}
4315 			sctp_sballoc(stcb, &new_so->so_rcv, m);
4316 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4317 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4318 			}
4319 			m = SCTP_BUF_NEXT(m);
4320 		}
4321 		control = nctl;
4322 	}
4323 	SCTP_INP_READ_UNLOCK(new_inp);
4324 }
4325 
4326 void
4327 sctp_add_to_readq(struct sctp_inpcb *inp,
4328     struct sctp_tcb *stcb,
4329     struct sctp_queued_to_read *control,
4330     struct sockbuf *sb,
4331     int end,
4332     int inp_read_lock_held,
4333     int so_locked
4334 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4335     SCTP_UNUSED
4336 #endif
4337 )
4338 {
4339 	/*
4340 	 * Here we must place the control on the end of the socket read
4341 	 * queue AND increment sb_cc so that select will work properly on
4342 	 * read.
4343 	 */
4344 	struct mbuf *m, *prev = NULL;
4345 
4346 	if (inp == NULL) {
4347 		/* Gak, TSNH!! */
4348 #ifdef INVARIANTS
4349 		panic("Gak, inp NULL on add_to_readq");
4350 #endif
4351 		return;
4352 	}
4353 	if (inp_read_lock_held == 0)
4354 		SCTP_INP_READ_LOCK(inp);
4355 	if (!(control->spec_flags & M_NOTIFICATION)) {
4356 		atomic_add_int(&inp->total_recvs, 1);
4357 		if (!control->do_not_ref_stcb) {
4358 			atomic_add_int(&stcb->total_recvs, 1);
4359 		}
4360 	}
4361 	m = control->data;
4362 	control->held_length = 0;
4363 	control->length = 0;
4364 	while (m) {
4365 		if (SCTP_BUF_LEN(m) == 0) {
4366 			/* Skip mbufs with NO length */
4367 			if (prev == NULL) {
4368 				/* First one */
4369 				control->data = sctp_m_free(m);
4370 				m = control->data;
4371 			} else {
4372 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4373 				m = SCTP_BUF_NEXT(prev);
4374 			}
4375 			if (m == NULL) {
4376 				control->tail_mbuf = prev;
4377 			}
4378 			continue;
4379 		}
4380 		prev = m;
4381 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4382 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4383 		}
4384 		sctp_sballoc(stcb, sb, m);
4385 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4386 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4387 		}
4388 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4389 		m = SCTP_BUF_NEXT(m);
4390 	}
4391 	if (prev != NULL) {
4392 		control->tail_mbuf = prev;
4393 	} else {
4394 		/* Everything got collapsed out?? */
4395 		if (inp_read_lock_held == 0)
4396 			SCTP_INP_READ_UNLOCK(inp);
4397 		return;
4398 	}
4399 	if (end) {
4400 		control->end_added = 1;
4401 	}
4402 	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4403 	if (inp_read_lock_held == 0)
4404 		SCTP_INP_READ_UNLOCK(inp);
4405 	if (inp && inp->sctp_socket) {
4406 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4407 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4408 		} else {
4409 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4410 			struct socket *so;
4411 
4412 			so = SCTP_INP_SO(inp);
4413 			if (!so_locked) {
4414 				atomic_add_int(&stcb->asoc.refcnt, 1);
4415 				SCTP_TCB_UNLOCK(stcb);
4416 				SCTP_SOCKET_LOCK(so, 1);
4417 				SCTP_TCB_LOCK(stcb);
4418 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
4419 				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4420 					SCTP_SOCKET_UNLOCK(so, 1);
4421 					return;
4422 				}
4423 			}
4424 #endif
4425 			sctp_sorwakeup(inp, inp->sctp_socket);
4426 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4427 			if (!so_locked) {
4428 				SCTP_SOCKET_UNLOCK(so, 1);
4429 			}
4430 #endif
4431 		}
4432 	}
4433 }
4434 
4435 
4436 int
4437 sctp_append_to_readq(struct sctp_inpcb *inp,
4438     struct sctp_tcb *stcb,
4439     struct sctp_queued_to_read *control,
4440     struct mbuf *m,
4441     int end,
4442     int ctls_cumack,
4443     struct sockbuf *sb)
4444 {
4445 	/*
4446 	 * A partial delivery API event is underway. OR we are appending on
4447 	 * the reassembly queue.
4448 	 *
4449 	 * If PDAPI this means we need to add m to the end of the data.
4450 	 * Increase the length in the control AND increment the sb_cc.
4451 	 * Otherwise sb is NULL and all we need to do is put it at the end
4452 	 * of the mbuf chain.
4453 	 */
4454 	int len = 0;
4455 	struct mbuf *mm, *tail = NULL, *prev = NULL;
4456 
4457 	if (inp) {
4458 		SCTP_INP_READ_LOCK(inp);
4459 	}
4460 	if (control == NULL) {
4461 get_out:
4462 		if (inp) {
4463 			SCTP_INP_READ_UNLOCK(inp);
4464 		}
4465 		return (-1);
4466 	}
4467 	if (control->end_added) {
4468 		/* huh this one is complete? */
4469 		goto get_out;
4470 	}
4471 	mm = m;
4472 	if (mm == NULL) {
4473 		goto get_out;
4474 	}
4475 	while (mm) {
4476 		if (SCTP_BUF_LEN(mm) == 0) {
4477 			/* Skip mbufs with NO lenght */
4478 			if (prev == NULL) {
4479 				/* First one */
4480 				m = sctp_m_free(mm);
4481 				mm = m;
4482 			} else {
4483 				SCTP_BUF_NEXT(prev) = sctp_m_free(mm);
4484 				mm = SCTP_BUF_NEXT(prev);
4485 			}
4486 			continue;
4487 		}
4488 		prev = mm;
4489 		len += SCTP_BUF_LEN(mm);
4490 		if (sb) {
4491 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4492 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(mm));
4493 			}
4494 			sctp_sballoc(stcb, sb, mm);
4495 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4496 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4497 			}
4498 		}
4499 		mm = SCTP_BUF_NEXT(mm);
4500 	}
4501 	if (prev) {
4502 		tail = prev;
4503 	} else {
4504 		/* Really there should always be a prev */
4505 		if (m == NULL) {
4506 			/* Huh nothing left? */
4507 #ifdef INVARIANTS
4508 			panic("Nothing left to add?");
4509 #else
4510 			goto get_out;
4511 #endif
4512 		}
4513 		tail = m;
4514 	}
4515 	if (control->tail_mbuf) {
4516 		/* append */
4517 		SCTP_BUF_NEXT(control->tail_mbuf) = m;
4518 		control->tail_mbuf = tail;
4519 	} else {
4520 		/* nothing there */
4521 #ifdef INVARIANTS
4522 		if (control->data != NULL) {
4523 			panic("This should NOT happen");
4524 		}
4525 #endif
4526 		control->data = m;
4527 		control->tail_mbuf = tail;
4528 	}
4529 	atomic_add_int(&control->length, len);
4530 	if (end) {
4531 		/* message is complete */
4532 		if (stcb && (control == stcb->asoc.control_pdapi)) {
4533 			stcb->asoc.control_pdapi = NULL;
4534 		}
4535 		control->held_length = 0;
4536 		control->end_added = 1;
4537 	}
4538 	if (stcb == NULL) {
4539 		control->do_not_ref_stcb = 1;
4540 	}
4541 	/*
4542 	 * When we are appending in partial delivery, the cum-ack is used
4543 	 * for the actual pd-api highest tsn on this mbuf. The true cum-ack
4544 	 * is populated in the outbound sinfo structure from the true cumack
4545 	 * if the association exists...
4546 	 */
4547 	control->sinfo_tsn = control->sinfo_cumtsn = ctls_cumack;
4548 	if (inp) {
4549 		SCTP_INP_READ_UNLOCK(inp);
4550 	}
4551 	if (inp && inp->sctp_socket) {
4552 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4553 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4554 		} else {
4555 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4556 			struct socket *so;
4557 
4558 			so = SCTP_INP_SO(inp);
4559 			atomic_add_int(&stcb->asoc.refcnt, 1);
4560 			SCTP_TCB_UNLOCK(stcb);
4561 			SCTP_SOCKET_LOCK(so, 1);
4562 			SCTP_TCB_LOCK(stcb);
4563 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4564 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4565 				SCTP_SOCKET_UNLOCK(so, 1);
4566 				return (0);
4567 			}
4568 #endif
4569 			sctp_sorwakeup(inp, inp->sctp_socket);
4570 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4571 			SCTP_SOCKET_UNLOCK(so, 1);
4572 #endif
4573 		}
4574 	}
4575 	return (0);
4576 }
4577 
4578 
4579 
4580 /*************HOLD THIS COMMENT FOR PATCH FILE OF
4581  *************ALTERNATE ROUTING CODE
4582  */
4583 
4584 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4585  *************ALTERNATE ROUTING CODE
4586  */
4587 
4588 struct mbuf *
4589 sctp_generate_invmanparam(int err)
4590 {
4591 	/* Return a MBUF with a invalid mandatory parameter */
4592 	struct mbuf *m;
4593 
4594 	m = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_DONTWAIT, 1, MT_DATA);
4595 	if (m) {
4596 		struct sctp_paramhdr *ph;
4597 
4598 		SCTP_BUF_LEN(m) = sizeof(struct sctp_paramhdr);
4599 		ph = mtod(m, struct sctp_paramhdr *);
4600 		ph->param_length = htons(sizeof(struct sctp_paramhdr));
4601 		ph->param_type = htons(err);
4602 	}
4603 	return (m);
4604 }
4605 
4606 #ifdef SCTP_MBCNT_LOGGING
4607 void
4608 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
4609     struct sctp_tmit_chunk *tp1, int chk_cnt)
4610 {
4611 	if (tp1->data == NULL) {
4612 		return;
4613 	}
4614 	asoc->chunks_on_out_queue -= chk_cnt;
4615 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
4616 		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
4617 		    asoc->total_output_queue_size,
4618 		    tp1->book_size,
4619 		    0,
4620 		    tp1->mbcnt);
4621 	}
4622 	if (asoc->total_output_queue_size >= tp1->book_size) {
4623 		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
4624 	} else {
4625 		asoc->total_output_queue_size = 0;
4626 	}
4627 
4628 	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
4629 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
4630 		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
4631 			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
4632 		} else {
4633 			stcb->sctp_socket->so_snd.sb_cc = 0;
4634 
4635 		}
4636 	}
4637 }
4638 
4639 #endif
4640 
4641 int
4642 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
4643     int reason, int so_locked
4644 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4645     SCTP_UNUSED
4646 #endif
4647 )
4648 {
4649 	struct sctp_stream_out *strq;
4650 	struct sctp_tmit_chunk *chk = NULL;
4651 	struct sctp_stream_queue_pending *sp;
4652 	uint16_t stream = 0, seq = 0;
4653 	uint8_t foundeom = 0;
4654 	int ret_sz = 0;
4655 	int notdone;
4656 	int do_wakeup_routine = 0;
4657 
4658 	stream = tp1->rec.data.stream_number;
4659 	seq = tp1->rec.data.stream_seq;
4660 	do {
4661 		ret_sz += tp1->book_size;
4662 		if (tp1->data != NULL) {
4663 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4664 				sctp_flight_size_decrease(tp1);
4665 				sctp_total_flight_decrease(stcb, tp1);
4666 			}
4667 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4668 			stcb->asoc.peers_rwnd += tp1->send_size;
4669 			stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
4670 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, reason, tp1, so_locked);
4671 			if (tp1->data) {
4672 				sctp_m_freem(tp1->data);
4673 				tp1->data = NULL;
4674 			}
4675 			do_wakeup_routine = 1;
4676 			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4677 				stcb->asoc.sent_queue_cnt_removeable--;
4678 			}
4679 		}
4680 		tp1->sent = SCTP_FORWARD_TSN_SKIP;
4681 		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
4682 		    SCTP_DATA_NOT_FRAG) {
4683 			/* not frag'ed we ae done   */
4684 			notdone = 0;
4685 			foundeom = 1;
4686 		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4687 			/* end of frag, we are done */
4688 			notdone = 0;
4689 			foundeom = 1;
4690 		} else {
4691 			/*
4692 			 * Its a begin or middle piece, we must mark all of
4693 			 * it
4694 			 */
4695 			notdone = 1;
4696 			tp1 = TAILQ_NEXT(tp1, sctp_next);
4697 		}
4698 	} while (tp1 && notdone);
4699 	if (foundeom == 0) {
4700 		/*
4701 		 * The multi-part message was scattered across the send and
4702 		 * sent queue.
4703 		 */
4704 next_on_sent:
4705 		tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
4706 		/*
4707 		 * recurse throught the send_queue too, starting at the
4708 		 * beginning.
4709 		 */
4710 		if ((tp1) &&
4711 		    (tp1->rec.data.stream_number == stream) &&
4712 		    (tp1->rec.data.stream_seq == seq)
4713 		    ) {
4714 			/*
4715 			 * save to chk in case we have some on stream out
4716 			 * queue. If so and we have an un-transmitted one we
4717 			 * don't have to fudge the TSN.
4718 			 */
4719 			chk = tp1;
4720 			ret_sz += tp1->book_size;
4721 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4722 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, reason, tp1, so_locked);
4723 			if (tp1->data) {
4724 				sctp_m_freem(tp1->data);
4725 				tp1->data = NULL;
4726 			}
4727 			/* No flight involved here book the size to 0 */
4728 			tp1->book_size = 0;
4729 			if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4730 				foundeom = 1;
4731 			}
4732 			do_wakeup_routine = 1;
4733 			tp1->sent = SCTP_FORWARD_TSN_SKIP;
4734 			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
4735 			/*
4736 			 * on to the sent queue so we can wait for it to be
4737 			 * passed by.
4738 			 */
4739 			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
4740 			    sctp_next);
4741 			stcb->asoc.send_queue_cnt--;
4742 			stcb->asoc.sent_queue_cnt++;
4743 			goto next_on_sent;
4744 		}
4745 	}
4746 	if (foundeom == 0) {
4747 		/*
4748 		 * Still no eom found. That means there is stuff left on the
4749 		 * stream out queue.. yuck.
4750 		 */
4751 		strq = &stcb->asoc.strmout[stream];
4752 		SCTP_TCB_SEND_LOCK(stcb);
4753 		sp = TAILQ_FIRST(&strq->outqueue);
4754 		while (sp->strseq <= seq) {
4755 			/* Check if its our SEQ */
4756 			if (sp->strseq == seq) {
4757 				sp->discard_rest = 1;
4758 				/*
4759 				 * We may need to put a chunk on the queue
4760 				 * that holds the TSN that would have been
4761 				 * sent with the LAST bit.
4762 				 */
4763 				if (chk == NULL) {
4764 					/* Yep, we have to */
4765 					sctp_alloc_a_chunk(stcb, chk);
4766 					if (chk == NULL) {
4767 						/*
4768 						 * we are hosed. All we can
4769 						 * do is nothing.. which
4770 						 * will cause an abort if
4771 						 * the peer is paying
4772 						 * attention.
4773 						 */
4774 						goto oh_well;
4775 					}
4776 					memset(chk, 0, sizeof(*chk));
4777 					chk->rec.data.rcv_flags = SCTP_DATA_LAST_FRAG;
4778 					chk->sent = SCTP_FORWARD_TSN_SKIP;
4779 					chk->asoc = &stcb->asoc;
4780 					chk->rec.data.stream_seq = sp->strseq;
4781 					chk->rec.data.stream_number = sp->stream;
4782 					chk->rec.data.payloadtype = sp->ppid;
4783 					chk->rec.data.context = sp->context;
4784 					chk->flags = sp->act_flags;
4785 					chk->whoTo = sp->net;
4786 					atomic_add_int(&chk->whoTo->ref_count, 1);
4787 					chk->rec.data.TSN_seq = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
4788 					stcb->asoc.pr_sctp_cnt++;
4789 					chk->pr_sctp_on = 1;
4790 					TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
4791 					stcb->asoc.sent_queue_cnt++;
4792 					stcb->asoc.pr_sctp_cnt++;
4793 				} else {
4794 					chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
4795 				}
4796 		oh_well:
4797 				if (sp->data) {
4798 					/*
4799 					 * Pull any data to free up the SB
4800 					 * and allow sender to "add more"
4801 					 * whilc we will throw away :-)
4802 					 */
4803 					sctp_free_spbufspace(stcb, &stcb->asoc,
4804 					    sp);
4805 					ret_sz += sp->length;
4806 					do_wakeup_routine = 1;
4807 					sp->some_taken = 1;
4808 					sctp_m_freem(sp->data);
4809 					sp->length = 0;
4810 					sp->data = NULL;
4811 					sp->tail_mbuf = NULL;
4812 				}
4813 				break;
4814 			} else {
4815 				/* Next one please */
4816 				sp = TAILQ_NEXT(sp, next);
4817 			}
4818 		}		/* End while */
4819 		SCTP_TCB_SEND_UNLOCK(stcb);
4820 	}
4821 	if (do_wakeup_routine) {
4822 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4823 		struct socket *so;
4824 
4825 		so = SCTP_INP_SO(stcb->sctp_ep);
4826 		if (!so_locked) {
4827 			atomic_add_int(&stcb->asoc.refcnt, 1);
4828 			SCTP_TCB_UNLOCK(stcb);
4829 			SCTP_SOCKET_LOCK(so, 1);
4830 			SCTP_TCB_LOCK(stcb);
4831 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4832 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4833 				/* assoc was freed while we were unlocked */
4834 				SCTP_SOCKET_UNLOCK(so, 1);
4835 				return (ret_sz);
4836 			}
4837 		}
4838 #endif
4839 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
4840 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4841 		if (!so_locked) {
4842 			SCTP_SOCKET_UNLOCK(so, 1);
4843 		}
4844 #endif
4845 	}
4846 	return (ret_sz);
4847 }
4848 
4849 /*
4850  * checks to see if the given address, sa, is one that is currently known by
4851  * the kernel note: can't distinguish the same address on multiple interfaces
4852  * and doesn't handle multiple addresses with different zone/scope id's note:
4853  * ifa_ifwithaddr() compares the entire sockaddr struct
4854  */
4855 struct sctp_ifa *
4856 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
4857     int holds_lock)
4858 {
4859 	struct sctp_laddr *laddr;
4860 
4861 	if (holds_lock == 0) {
4862 		SCTP_INP_RLOCK(inp);
4863 	}
4864 	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4865 		if (laddr->ifa == NULL)
4866 			continue;
4867 		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
4868 			continue;
4869 		if (addr->sa_family == AF_INET) {
4870 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4871 			    laddr->ifa->address.sin.sin_addr.s_addr) {
4872 				/* found him. */
4873 				if (holds_lock == 0) {
4874 					SCTP_INP_RUNLOCK(inp);
4875 				}
4876 				return (laddr->ifa);
4877 				break;
4878 			}
4879 		}
4880 #ifdef INET6
4881 		if (addr->sa_family == AF_INET6) {
4882 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4883 			    &laddr->ifa->address.sin6)) {
4884 				/* found him. */
4885 				if (holds_lock == 0) {
4886 					SCTP_INP_RUNLOCK(inp);
4887 				}
4888 				return (laddr->ifa);
4889 				break;
4890 			}
4891 		}
4892 #endif
4893 	}
4894 	if (holds_lock == 0) {
4895 		SCTP_INP_RUNLOCK(inp);
4896 	}
4897 	return (NULL);
4898 }
4899 
4900 uint32_t
4901 sctp_get_ifa_hash_val(struct sockaddr *addr)
4902 {
4903 	if (addr->sa_family == AF_INET) {
4904 		struct sockaddr_in *sin;
4905 
4906 		sin = (struct sockaddr_in *)addr;
4907 		return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
4908 	} else if (addr->sa_family == AF_INET6) {
4909 		struct sockaddr_in6 *sin6;
4910 		uint32_t hash_of_addr;
4911 
4912 		sin6 = (struct sockaddr_in6 *)addr;
4913 		hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
4914 		    sin6->sin6_addr.s6_addr32[1] +
4915 		    sin6->sin6_addr.s6_addr32[2] +
4916 		    sin6->sin6_addr.s6_addr32[3]);
4917 		hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
4918 		return (hash_of_addr);
4919 	}
4920 	return (0);
4921 }
4922 
4923 struct sctp_ifa *
4924 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
4925 {
4926 	struct sctp_ifa *sctp_ifap;
4927 	struct sctp_vrf *vrf;
4928 	struct sctp_ifalist *hash_head;
4929 	uint32_t hash_of_addr;
4930 
4931 	if (holds_lock == 0)
4932 		SCTP_IPI_ADDR_RLOCK();
4933 
4934 	vrf = sctp_find_vrf(vrf_id);
4935 	if (vrf == NULL) {
4936 stage_right:
4937 		if (holds_lock == 0)
4938 			SCTP_IPI_ADDR_RUNLOCK();
4939 		return (NULL);
4940 	}
4941 	hash_of_addr = sctp_get_ifa_hash_val(addr);
4942 
4943 	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
4944 	if (hash_head == NULL) {
4945 		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
4946 		    hash_of_addr, (uint32_t) vrf->vrf_addr_hashmark,
4947 		    (uint32_t) (hash_of_addr & vrf->vrf_addr_hashmark));
4948 		sctp_print_address(addr);
4949 		SCTP_PRINTF("No such bucket for address\n");
4950 		if (holds_lock == 0)
4951 			SCTP_IPI_ADDR_RUNLOCK();
4952 
4953 		return (NULL);
4954 	}
4955 	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
4956 		if (sctp_ifap == NULL) {
4957 #ifdef INVARIANTS
4958 			panic("Huh LIST_FOREACH corrupt");
4959 			goto stage_right;
4960 #else
4961 			SCTP_PRINTF("LIST corrupt of sctp_ifap's?\n");
4962 			goto stage_right;
4963 #endif
4964 		}
4965 		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
4966 			continue;
4967 		if (addr->sa_family == AF_INET) {
4968 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4969 			    sctp_ifap->address.sin.sin_addr.s_addr) {
4970 				/* found him. */
4971 				if (holds_lock == 0)
4972 					SCTP_IPI_ADDR_RUNLOCK();
4973 				return (sctp_ifap);
4974 				break;
4975 			}
4976 		}
4977 #ifdef INET6
4978 		if (addr->sa_family == AF_INET6) {
4979 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4980 			    &sctp_ifap->address.sin6)) {
4981 				/* found him. */
4982 				if (holds_lock == 0)
4983 					SCTP_IPI_ADDR_RUNLOCK();
4984 				return (sctp_ifap);
4985 				break;
4986 			}
4987 		}
4988 #endif
4989 	}
4990 	if (holds_lock == 0)
4991 		SCTP_IPI_ADDR_RUNLOCK();
4992 	return (NULL);
4993 }
4994 
4995 static void
4996 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t * freed_so_far, int hold_rlock,
4997     uint32_t rwnd_req)
4998 {
4999 	/* User pulled some data, do we need a rwnd update? */
5000 	int r_unlocked = 0;
5001 	uint32_t dif, rwnd;
5002 	struct socket *so = NULL;
5003 
5004 	if (stcb == NULL)
5005 		return;
5006 
5007 	atomic_add_int(&stcb->asoc.refcnt, 1);
5008 
5009 	if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED |
5010 	    SCTP_STATE_SHUTDOWN_RECEIVED |
5011 	    SCTP_STATE_SHUTDOWN_ACK_SENT)) {
5012 		/* Pre-check If we are freeing no update */
5013 		goto no_lock;
5014 	}
5015 	SCTP_INP_INCR_REF(stcb->sctp_ep);
5016 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5017 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5018 		goto out;
5019 	}
5020 	so = stcb->sctp_socket;
5021 	if (so == NULL) {
5022 		goto out;
5023 	}
5024 	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
5025 	/* Have you have freed enough to look */
5026 	*freed_so_far = 0;
5027 	/* Yep, its worth a look and the lock overhead */
5028 
5029 	/* Figure out what the rwnd would be */
5030 	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
5031 	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
5032 		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
5033 	} else {
5034 		dif = 0;
5035 	}
5036 	if (dif >= rwnd_req) {
5037 		if (hold_rlock) {
5038 			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5039 			r_unlocked = 1;
5040 		}
5041 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5042 			/*
5043 			 * One last check before we allow the guy possibly
5044 			 * to get in. There is a race, where the guy has not
5045 			 * reached the gate. In that case
5046 			 */
5047 			goto out;
5048 		}
5049 		SCTP_TCB_LOCK(stcb);
5050 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5051 			/* No reports here */
5052 			SCTP_TCB_UNLOCK(stcb);
5053 			goto out;
5054 		}
5055 		SCTP_STAT_INCR(sctps_wu_sacks_sent);
5056 		/*
5057 		 * EY if nr_sacks used then send an nr-sack , a sack
5058 		 * otherwise
5059 		 */
5060 		if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && stcb->asoc.peer_supports_nr_sack)
5061 			sctp_send_nr_sack(stcb);
5062 		else
5063 			sctp_send_sack(stcb);
5064 
5065 		sctp_chunk_output(stcb->sctp_ep, stcb,
5066 		    SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
5067 		/* make sure no timer is running */
5068 		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
5069 		SCTP_TCB_UNLOCK(stcb);
5070 	} else {
5071 		/* Update how much we have pending */
5072 		stcb->freed_by_sorcv_sincelast = dif;
5073 	}
5074 out:
5075 	if (so && r_unlocked && hold_rlock) {
5076 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5077 	}
5078 	SCTP_INP_DECR_REF(stcb->sctp_ep);
5079 no_lock:
5080 	atomic_add_int(&stcb->asoc.refcnt, -1);
5081 	return;
5082 }
5083 
5084 int
5085 sctp_sorecvmsg(struct socket *so,
5086     struct uio *uio,
5087     struct mbuf **mp,
5088     struct sockaddr *from,
5089     int fromlen,
5090     int *msg_flags,
5091     struct sctp_sndrcvinfo *sinfo,
5092     int filling_sinfo)
5093 {
5094 	/*
5095 	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
5096 	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
5097 	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
5098 	 * On the way out we may send out any combination of:
5099 	 * MSG_NOTIFICATION MSG_EOR
5100 	 *
5101 	 */
5102 	struct sctp_inpcb *inp = NULL;
5103 	int my_len = 0;
5104 	int cp_len = 0, error = 0;
5105 	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
5106 	struct mbuf *m = NULL, *embuf = NULL;
5107 	struct sctp_tcb *stcb = NULL;
5108 	int wakeup_read_socket = 0;
5109 	int freecnt_applied = 0;
5110 	int out_flags = 0, in_flags = 0;
5111 	int block_allowed = 1;
5112 	uint32_t freed_so_far = 0;
5113 	uint32_t copied_so_far = 0;
5114 	int in_eeor_mode = 0;
5115 	int no_rcv_needed = 0;
5116 	uint32_t rwnd_req = 0;
5117 	int hold_sblock = 0;
5118 	int hold_rlock = 0;
5119 	int slen = 0;
5120 	uint32_t held_length = 0;
5121 	int sockbuf_lock = 0;
5122 
5123 	if (uio == NULL) {
5124 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5125 		return (EINVAL);
5126 	}
5127 	if (msg_flags) {
5128 		in_flags = *msg_flags;
5129 		if (in_flags & MSG_PEEK)
5130 			SCTP_STAT_INCR(sctps_read_peeks);
5131 	} else {
5132 		in_flags = 0;
5133 	}
5134 	slen = uio->uio_resid;
5135 
5136 	/* Pull in and set up our int flags */
5137 	if (in_flags & MSG_OOB) {
5138 		/* Out of band's NOT supported */
5139 		return (EOPNOTSUPP);
5140 	}
5141 	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5142 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5143 		return (EINVAL);
5144 	}
5145 	if ((in_flags & (MSG_DONTWAIT
5146 	    | MSG_NBIO
5147 	    )) ||
5148 	    SCTP_SO_IS_NBIO(so)) {
5149 		block_allowed = 0;
5150 	}
5151 	/* setup the endpoint */
5152 	inp = (struct sctp_inpcb *)so->so_pcb;
5153 	if (inp == NULL) {
5154 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5155 		return (EFAULT);
5156 	}
5157 	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5158 	/* Must be at least a MTU's worth */
5159 	if (rwnd_req < SCTP_MIN_RWND)
5160 		rwnd_req = SCTP_MIN_RWND;
5161 	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5162 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5163 		sctp_misc_ints(SCTP_SORECV_ENTER,
5164 		    rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid);
5165 	}
5166 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5167 		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5168 		    rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid);
5169 	}
5170 	error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5171 	sockbuf_lock = 1;
5172 	if (error) {
5173 		goto release_unlocked;
5174 	}
5175 restart:
5176 
5177 
5178 restart_nosblocks:
5179 	if (hold_sblock == 0) {
5180 		SOCKBUF_LOCK(&so->so_rcv);
5181 		hold_sblock = 1;
5182 	}
5183 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5184 	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5185 		goto out;
5186 	}
5187 	if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5188 		if (so->so_error) {
5189 			error = so->so_error;
5190 			if ((in_flags & MSG_PEEK) == 0)
5191 				so->so_error = 0;
5192 			goto out;
5193 		} else {
5194 			if (so->so_rcv.sb_cc == 0) {
5195 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5196 				/* indicate EOF */
5197 				error = 0;
5198 				goto out;
5199 			}
5200 		}
5201 	}
5202 	if ((so->so_rcv.sb_cc <= held_length) && block_allowed) {
5203 		/* we need to wait for data */
5204 		if ((so->so_rcv.sb_cc == 0) &&
5205 		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5206 		    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5207 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5208 				/*
5209 				 * For active open side clear flags for
5210 				 * re-use passive open is blocked by
5211 				 * connect.
5212 				 */
5213 				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5214 					/*
5215 					 * You were aborted, passive side
5216 					 * always hits here
5217 					 */
5218 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5219 					error = ECONNRESET;
5220 					/*
5221 					 * You get this once if you are
5222 					 * active open side
5223 					 */
5224 					if (!(inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5225 						/*
5226 						 * Remove flag if on the
5227 						 * active open side
5228 						 */
5229 						inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_ABORTED;
5230 					}
5231 				}
5232 				so->so_state &= ~(SS_ISCONNECTING |
5233 				    SS_ISDISCONNECTING |
5234 				    SS_ISCONFIRMING |
5235 				    SS_ISCONNECTED);
5236 				if (error == 0) {
5237 					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5238 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5239 						error = ENOTCONN;
5240 					} else {
5241 						inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_CONNECTED;
5242 					}
5243 				}
5244 				goto out;
5245 			}
5246 		}
5247 		error = sbwait(&so->so_rcv);
5248 		if (error) {
5249 			goto out;
5250 		}
5251 		held_length = 0;
5252 		goto restart_nosblocks;
5253 	} else if (so->so_rcv.sb_cc == 0) {
5254 		if (so->so_error) {
5255 			error = so->so_error;
5256 			if ((in_flags & MSG_PEEK) == 0)
5257 				so->so_error = 0;
5258 		} else {
5259 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5260 			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5261 				if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5262 					/*
5263 					 * For active open side clear flags
5264 					 * for re-use passive open is
5265 					 * blocked by connect.
5266 					 */
5267 					if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5268 						/*
5269 						 * You were aborted, passive
5270 						 * side always hits here
5271 						 */
5272 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5273 						error = ECONNRESET;
5274 						/*
5275 						 * You get this once if you
5276 						 * are active open side
5277 						 */
5278 						if (!(inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5279 							/*
5280 							 * Remove flag if on
5281 							 * the active open
5282 							 * side
5283 							 */
5284 							inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_ABORTED;
5285 						}
5286 					}
5287 					so->so_state &= ~(SS_ISCONNECTING |
5288 					    SS_ISDISCONNECTING |
5289 					    SS_ISCONFIRMING |
5290 					    SS_ISCONNECTED);
5291 					if (error == 0) {
5292 						if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5293 							SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5294 							error = ENOTCONN;
5295 						} else {
5296 							inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_CONNECTED;
5297 						}
5298 					}
5299 					goto out;
5300 				}
5301 			}
5302 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5303 			error = EWOULDBLOCK;
5304 		}
5305 		goto out;
5306 	}
5307 	if (hold_sblock == 1) {
5308 		SOCKBUF_UNLOCK(&so->so_rcv);
5309 		hold_sblock = 0;
5310 	}
5311 	/* we possibly have data we can read */
5312 	/* sa_ignore FREED_MEMORY */
5313 	control = TAILQ_FIRST(&inp->read_queue);
5314 	if (control == NULL) {
5315 		/*
5316 		 * This could be happening since the appender did the
5317 		 * increment but as not yet did the tailq insert onto the
5318 		 * read_queue
5319 		 */
5320 		if (hold_rlock == 0) {
5321 			SCTP_INP_READ_LOCK(inp);
5322 			hold_rlock = 1;
5323 		}
5324 		control = TAILQ_FIRST(&inp->read_queue);
5325 		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5326 #ifdef INVARIANTS
5327 			panic("Huh, its non zero and nothing on control?");
5328 #endif
5329 			so->so_rcv.sb_cc = 0;
5330 		}
5331 		SCTP_INP_READ_UNLOCK(inp);
5332 		hold_rlock = 0;
5333 		goto restart;
5334 	}
5335 	if ((control->length == 0) &&
5336 	    (control->do_not_ref_stcb)) {
5337 		/*
5338 		 * Clean up code for freeing assoc that left behind a
5339 		 * pdapi.. maybe a peer in EEOR that just closed after
5340 		 * sending and never indicated a EOR.
5341 		 */
5342 		if (hold_rlock == 0) {
5343 			hold_rlock = 1;
5344 			SCTP_INP_READ_LOCK(inp);
5345 		}
5346 		control->held_length = 0;
5347 		if (control->data) {
5348 			/* Hmm there is data here .. fix */
5349 			struct mbuf *m_tmp;
5350 			int cnt = 0;
5351 
5352 			m_tmp = control->data;
5353 			while (m_tmp) {
5354 				cnt += SCTP_BUF_LEN(m_tmp);
5355 				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5356 					control->tail_mbuf = m_tmp;
5357 					control->end_added = 1;
5358 				}
5359 				m_tmp = SCTP_BUF_NEXT(m_tmp);
5360 			}
5361 			control->length = cnt;
5362 		} else {
5363 			/* remove it */
5364 			TAILQ_REMOVE(&inp->read_queue, control, next);
5365 			/* Add back any hiddend data */
5366 			sctp_free_remote_addr(control->whoFrom);
5367 			sctp_free_a_readq(stcb, control);
5368 		}
5369 		if (hold_rlock) {
5370 			hold_rlock = 0;
5371 			SCTP_INP_READ_UNLOCK(inp);
5372 		}
5373 		goto restart;
5374 	}
5375 	if ((control->length == 0) &&
5376 	    (control->end_added == 1)) {
5377 		/*
5378 		 * Do we also need to check for (control->pdapi_aborted ==
5379 		 * 1)?
5380 		 */
5381 		if (hold_rlock == 0) {
5382 			hold_rlock = 1;
5383 			SCTP_INP_READ_LOCK(inp);
5384 		}
5385 		TAILQ_REMOVE(&inp->read_queue, control, next);
5386 		if (control->data) {
5387 #ifdef INVARIANTS
5388 			panic("control->data not null but control->length == 0");
5389 #else
5390 			SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
5391 			sctp_m_freem(control->data);
5392 			control->data = NULL;
5393 #endif
5394 		}
5395 		if (control->aux_data) {
5396 			sctp_m_free(control->aux_data);
5397 			control->aux_data = NULL;
5398 		}
5399 		sctp_free_remote_addr(control->whoFrom);
5400 		sctp_free_a_readq(stcb, control);
5401 		if (hold_rlock) {
5402 			hold_rlock = 0;
5403 			SCTP_INP_READ_UNLOCK(inp);
5404 		}
5405 		goto restart;
5406 	}
5407 	if (control->length == 0) {
5408 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5409 		    (filling_sinfo)) {
5410 			/* find a more suitable one then this */
5411 			ctl = TAILQ_NEXT(control, next);
5412 			while (ctl) {
5413 				if ((ctl->stcb != control->stcb) && (ctl->length) &&
5414 				    (ctl->some_taken ||
5415 				    (ctl->spec_flags & M_NOTIFICATION) ||
5416 				    ((ctl->do_not_ref_stcb == 0) &&
5417 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5418 				    ) {
5419 					/*-
5420 					 * If we have a different TCB next, and there is data
5421 					 * present. If we have already taken some (pdapi), OR we can
5422 					 * ref the tcb and no delivery as started on this stream, we
5423 					 * take it. Note we allow a notification on a different
5424 					 * assoc to be delivered..
5425 					 */
5426 					control = ctl;
5427 					goto found_one;
5428 				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5429 					    (ctl->length) &&
5430 					    ((ctl->some_taken) ||
5431 					    ((ctl->do_not_ref_stcb == 0) &&
5432 					    ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5433 					    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5434 				    ) {
5435 					/*-
5436 					 * If we have the same tcb, and there is data present, and we
5437 					 * have the strm interleave feature present. Then if we have
5438 					 * taken some (pdapi) or we can refer to tht tcb AND we have
5439 					 * not started a delivery for this stream, we can take it.
5440 					 * Note we do NOT allow a notificaiton on the same assoc to
5441 					 * be delivered.
5442 					 */
5443 					control = ctl;
5444 					goto found_one;
5445 				}
5446 				ctl = TAILQ_NEXT(ctl, next);
5447 			}
5448 		}
5449 		/*
5450 		 * if we reach here, not suitable replacement is available
5451 		 * <or> fragment interleave is NOT on. So stuff the sb_cc
5452 		 * into the our held count, and its time to sleep again.
5453 		 */
5454 		held_length = so->so_rcv.sb_cc;
5455 		control->held_length = so->so_rcv.sb_cc;
5456 		goto restart;
5457 	}
5458 	/* Clear the held length since there is something to read */
5459 	control->held_length = 0;
5460 	if (hold_rlock) {
5461 		SCTP_INP_READ_UNLOCK(inp);
5462 		hold_rlock = 0;
5463 	}
5464 found_one:
5465 	/*
5466 	 * If we reach here, control has a some data for us to read off.
5467 	 * Note that stcb COULD be NULL.
5468 	 */
5469 	control->some_taken++;
5470 	if (hold_sblock) {
5471 		SOCKBUF_UNLOCK(&so->so_rcv);
5472 		hold_sblock = 0;
5473 	}
5474 	stcb = control->stcb;
5475 	if (stcb) {
5476 		if ((control->do_not_ref_stcb == 0) &&
5477 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5478 			if (freecnt_applied == 0)
5479 				stcb = NULL;
5480 		} else if (control->do_not_ref_stcb == 0) {
5481 			/* you can't free it on me please */
5482 			/*
5483 			 * The lock on the socket buffer protects us so the
5484 			 * free code will stop. But since we used the
5485 			 * socketbuf lock and the sender uses the tcb_lock
5486 			 * to increment, we need to use the atomic add to
5487 			 * the refcnt
5488 			 */
5489 			if (freecnt_applied) {
5490 #ifdef INVARIANTS
5491 				panic("refcnt already incremented");
5492 #else
5493 				printf("refcnt already incremented?\n");
5494 #endif
5495 			} else {
5496 				atomic_add_int(&stcb->asoc.refcnt, 1);
5497 				freecnt_applied = 1;
5498 			}
5499 			/*
5500 			 * Setup to remember how much we have not yet told
5501 			 * the peer our rwnd has opened up. Note we grab the
5502 			 * value from the tcb from last time. Note too that
5503 			 * sack sending clears this when a sack is sent,
5504 			 * which is fine. Once we hit the rwnd_req, we then
5505 			 * will go to the sctp_user_rcvd() that will not
5506 			 * lock until it KNOWs it MUST send a WUP-SACK.
5507 			 */
5508 			freed_so_far = stcb->freed_by_sorcv_sincelast;
5509 			stcb->freed_by_sorcv_sincelast = 0;
5510 		}
5511 	}
5512 	if (stcb &&
5513 	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
5514 	    control->do_not_ref_stcb == 0) {
5515 		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
5516 	}
5517 	/* First lets get off the sinfo and sockaddr info */
5518 	if ((sinfo) && filling_sinfo) {
5519 		memcpy(sinfo, control, sizeof(struct sctp_nonpad_sndrcvinfo));
5520 		nxt = TAILQ_NEXT(control, next);
5521 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
5522 			struct sctp_extrcvinfo *s_extra;
5523 
5524 			s_extra = (struct sctp_extrcvinfo *)sinfo;
5525 			if ((nxt) &&
5526 			    (nxt->length)) {
5527 				s_extra->sreinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
5528 				if (nxt->sinfo_flags & SCTP_UNORDERED) {
5529 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
5530 				}
5531 				if (nxt->spec_flags & M_NOTIFICATION) {
5532 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
5533 				}
5534 				s_extra->sreinfo_next_aid = nxt->sinfo_assoc_id;
5535 				s_extra->sreinfo_next_length = nxt->length;
5536 				s_extra->sreinfo_next_ppid = nxt->sinfo_ppid;
5537 				s_extra->sreinfo_next_stream = nxt->sinfo_stream;
5538 				if (nxt->tail_mbuf != NULL) {
5539 					if (nxt->end_added) {
5540 						s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
5541 					}
5542 				}
5543 			} else {
5544 				/*
5545 				 * we explicitly 0 this, since the memcpy
5546 				 * got some other things beyond the older
5547 				 * sinfo_ that is on the control's structure
5548 				 * :-D
5549 				 */
5550 				nxt = NULL;
5551 				s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
5552 				s_extra->sreinfo_next_aid = 0;
5553 				s_extra->sreinfo_next_length = 0;
5554 				s_extra->sreinfo_next_ppid = 0;
5555 				s_extra->sreinfo_next_stream = 0;
5556 			}
5557 		}
5558 		/*
5559 		 * update off the real current cum-ack, if we have an stcb.
5560 		 */
5561 		if ((control->do_not_ref_stcb == 0) && stcb)
5562 			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
5563 		/*
5564 		 * mask off the high bits, we keep the actual chunk bits in
5565 		 * there.
5566 		 */
5567 		sinfo->sinfo_flags &= 0x00ff;
5568 		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
5569 			sinfo->sinfo_flags |= SCTP_UNORDERED;
5570 		}
5571 	}
5572 #ifdef SCTP_ASOCLOG_OF_TSNS
5573 	{
5574 		int index, newindex;
5575 		struct sctp_pcbtsn_rlog *entry;
5576 
5577 		do {
5578 			index = inp->readlog_index;
5579 			newindex = index + 1;
5580 			if (newindex >= SCTP_READ_LOG_SIZE) {
5581 				newindex = 0;
5582 			}
5583 		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
5584 		entry = &inp->readlog[index];
5585 		entry->vtag = control->sinfo_assoc_id;
5586 		entry->strm = control->sinfo_stream;
5587 		entry->seq = control->sinfo_ssn;
5588 		entry->sz = control->length;
5589 		entry->flgs = control->sinfo_flags;
5590 	}
5591 #endif
5592 	if (fromlen && from) {
5593 		struct sockaddr *to;
5594 
5595 #ifdef INET
5596 		cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sin.sin_len);
5597 		memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
5598 		((struct sockaddr_in *)from)->sin_port = control->port_from;
5599 #else
5600 		/* No AF_INET use AF_INET6 */
5601 		cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sin6.sin6_len);
5602 		memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
5603 		((struct sockaddr_in6 *)from)->sin6_port = control->port_from;
5604 #endif
5605 
5606 		to = from;
5607 #if defined(INET) && defined(INET6)
5608 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) &&
5609 		    (to->sa_family == AF_INET) &&
5610 		    ((size_t)fromlen >= sizeof(struct sockaddr_in6))) {
5611 			struct sockaddr_in *sin;
5612 			struct sockaddr_in6 sin6;
5613 
5614 			sin = (struct sockaddr_in *)to;
5615 			bzero(&sin6, sizeof(sin6));
5616 			sin6.sin6_family = AF_INET6;
5617 			sin6.sin6_len = sizeof(struct sockaddr_in6);
5618 			sin6.sin6_addr.s6_addr32[2] = htonl(0xffff);
5619 			bcopy(&sin->sin_addr,
5620 			    &sin6.sin6_addr.s6_addr32[3],
5621 			    sizeof(sin6.sin6_addr.s6_addr32[3]));
5622 			sin6.sin6_port = sin->sin_port;
5623 			memcpy(from, (caddr_t)&sin6, sizeof(sin6));
5624 		}
5625 #endif
5626 #if defined(INET6)
5627 		{
5628 			struct sockaddr_in6 lsa6, *to6;
5629 
5630 			to6 = (struct sockaddr_in6 *)to;
5631 			sctp_recover_scope_mac(to6, (&lsa6));
5632 		}
5633 #endif
5634 	}
5635 	/* now copy out what data we can */
5636 	if (mp == NULL) {
5637 		/* copy out each mbuf in the chain up to length */
5638 get_more_data:
5639 		m = control->data;
5640 		while (m) {
5641 			/* Move out all we can */
5642 			cp_len = (int)uio->uio_resid;
5643 			my_len = (int)SCTP_BUF_LEN(m);
5644 			if (cp_len > my_len) {
5645 				/* not enough in this buf */
5646 				cp_len = my_len;
5647 			}
5648 			if (hold_rlock) {
5649 				SCTP_INP_READ_UNLOCK(inp);
5650 				hold_rlock = 0;
5651 			}
5652 			if (cp_len > 0)
5653 				error = uiomove(mtod(m, char *), cp_len, uio);
5654 			/* re-read */
5655 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5656 				goto release;
5657 			}
5658 			if ((control->do_not_ref_stcb == 0) && stcb &&
5659 			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5660 				no_rcv_needed = 1;
5661 			}
5662 			if (error) {
5663 				/* error we are out of here */
5664 				goto release;
5665 			}
5666 			if ((SCTP_BUF_NEXT(m) == NULL) &&
5667 			    (cp_len >= SCTP_BUF_LEN(m)) &&
5668 			    ((control->end_added == 0) ||
5669 			    (control->end_added &&
5670 			    (TAILQ_NEXT(control, next) == NULL)))
5671 			    ) {
5672 				SCTP_INP_READ_LOCK(inp);
5673 				hold_rlock = 1;
5674 			}
5675 			if (cp_len == SCTP_BUF_LEN(m)) {
5676 				if ((SCTP_BUF_NEXT(m) == NULL) &&
5677 				    (control->end_added)) {
5678 					out_flags |= MSG_EOR;
5679 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5680 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5681 				}
5682 				if (control->spec_flags & M_NOTIFICATION) {
5683 					out_flags |= MSG_NOTIFICATION;
5684 				}
5685 				/* we ate up the mbuf */
5686 				if (in_flags & MSG_PEEK) {
5687 					/* just looking */
5688 					m = SCTP_BUF_NEXT(m);
5689 					copied_so_far += cp_len;
5690 				} else {
5691 					/* dispose of the mbuf */
5692 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5693 						sctp_sblog(&so->so_rcv,
5694 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5695 					}
5696 					sctp_sbfree(control, stcb, &so->so_rcv, m);
5697 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5698 						sctp_sblog(&so->so_rcv,
5699 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5700 					}
5701 					embuf = m;
5702 					copied_so_far += cp_len;
5703 					freed_so_far += cp_len;
5704 					freed_so_far += MSIZE;
5705 					atomic_subtract_int(&control->length, cp_len);
5706 					control->data = sctp_m_free(m);
5707 					m = control->data;
5708 					/*
5709 					 * been through it all, must hold sb
5710 					 * lock ok to null tail
5711 					 */
5712 					if (control->data == NULL) {
5713 #ifdef INVARIANTS
5714 						if ((control->end_added == 0) ||
5715 						    (TAILQ_NEXT(control, next) == NULL)) {
5716 							/*
5717 							 * If the end is not
5718 							 * added, OR the
5719 							 * next is NOT null
5720 							 * we MUST have the
5721 							 * lock.
5722 							 */
5723 							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
5724 								panic("Hmm we don't own the lock?");
5725 							}
5726 						}
5727 #endif
5728 						control->tail_mbuf = NULL;
5729 #ifdef INVARIANTS
5730 						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
5731 							panic("end_added, nothing left and no MSG_EOR");
5732 						}
5733 #endif
5734 					}
5735 				}
5736 			} else {
5737 				/* Do we need to trim the mbuf? */
5738 				if (control->spec_flags & M_NOTIFICATION) {
5739 					out_flags |= MSG_NOTIFICATION;
5740 				}
5741 				if ((in_flags & MSG_PEEK) == 0) {
5742 					SCTP_BUF_RESV_UF(m, cp_len);
5743 					SCTP_BUF_LEN(m) -= cp_len;
5744 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5745 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len);
5746 					}
5747 					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
5748 					if ((control->do_not_ref_stcb == 0) &&
5749 					    stcb) {
5750 						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
5751 					}
5752 					copied_so_far += cp_len;
5753 					embuf = m;
5754 					freed_so_far += cp_len;
5755 					freed_so_far += MSIZE;
5756 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5757 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
5758 						    SCTP_LOG_SBRESULT, 0);
5759 					}
5760 					atomic_subtract_int(&control->length, cp_len);
5761 				} else {
5762 					copied_so_far += cp_len;
5763 				}
5764 			}
5765 			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
5766 				break;
5767 			}
5768 			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5769 			    (control->do_not_ref_stcb == 0) &&
5770 			    (freed_so_far >= rwnd_req)) {
5771 				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5772 			}
5773 		}		/* end while(m) */
5774 		/*
5775 		 * At this point we have looked at it all and we either have
5776 		 * a MSG_EOR/or read all the user wants... <OR>
5777 		 * control->length == 0.
5778 		 */
5779 		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
5780 			/* we are done with this control */
5781 			if (control->length == 0) {
5782 				if (control->data) {
5783 #ifdef INVARIANTS
5784 					panic("control->data not null at read eor?");
5785 #else
5786 					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
5787 					sctp_m_freem(control->data);
5788 					control->data = NULL;
5789 #endif
5790 				}
5791 		done_with_control:
5792 				if (TAILQ_NEXT(control, next) == NULL) {
5793 					/*
5794 					 * If we don't have a next we need a
5795 					 * lock, if there is a next
5796 					 * interrupt is filling ahead of us
5797 					 * and we don't need a lock to
5798 					 * remove this guy (which is the
5799 					 * head of the queue).
5800 					 */
5801 					if (hold_rlock == 0) {
5802 						SCTP_INP_READ_LOCK(inp);
5803 						hold_rlock = 1;
5804 					}
5805 				}
5806 				TAILQ_REMOVE(&inp->read_queue, control, next);
5807 				/* Add back any hiddend data */
5808 				if (control->held_length) {
5809 					held_length = 0;
5810 					control->held_length = 0;
5811 					wakeup_read_socket = 1;
5812 				}
5813 				if (control->aux_data) {
5814 					sctp_m_free(control->aux_data);
5815 					control->aux_data = NULL;
5816 				}
5817 				no_rcv_needed = control->do_not_ref_stcb;
5818 				sctp_free_remote_addr(control->whoFrom);
5819 				control->data = NULL;
5820 				sctp_free_a_readq(stcb, control);
5821 				control = NULL;
5822 				if ((freed_so_far >= rwnd_req) &&
5823 				    (no_rcv_needed == 0))
5824 					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5825 
5826 			} else {
5827 				/*
5828 				 * The user did not read all of this
5829 				 * message, turn off the returned MSG_EOR
5830 				 * since we are leaving more behind on the
5831 				 * control to read.
5832 				 */
5833 #ifdef INVARIANTS
5834 				if (control->end_added &&
5835 				    (control->data == NULL) &&
5836 				    (control->tail_mbuf == NULL)) {
5837 					panic("Gak, control->length is corrupt?");
5838 				}
5839 #endif
5840 				no_rcv_needed = control->do_not_ref_stcb;
5841 				out_flags &= ~MSG_EOR;
5842 			}
5843 		}
5844 		if (out_flags & MSG_EOR) {
5845 			goto release;
5846 		}
5847 		if ((uio->uio_resid == 0) ||
5848 		    ((in_eeor_mode) && (copied_so_far >= max(so->so_rcv.sb_lowat, 1)))
5849 		    ) {
5850 			goto release;
5851 		}
5852 		/*
5853 		 * If I hit here the receiver wants more and this message is
5854 		 * NOT done (pd-api). So two questions. Can we block? if not
5855 		 * we are done. Did the user NOT set MSG_WAITALL?
5856 		 */
5857 		if (block_allowed == 0) {
5858 			goto release;
5859 		}
5860 		/*
5861 		 * We need to wait for more data a few things: - We don't
5862 		 * sbunlock() so we don't get someone else reading. - We
5863 		 * must be sure to account for the case where what is added
5864 		 * is NOT to our control when we wakeup.
5865 		 */
5866 
5867 		/*
5868 		 * Do we need to tell the transport a rwnd update might be
5869 		 * needed before we go to sleep?
5870 		 */
5871 		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5872 		    ((freed_so_far >= rwnd_req) &&
5873 		    (control->do_not_ref_stcb == 0) &&
5874 		    (no_rcv_needed == 0))) {
5875 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5876 		}
5877 wait_some_more:
5878 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5879 			goto release;
5880 		}
5881 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
5882 			goto release;
5883 
5884 		if (hold_rlock == 1) {
5885 			SCTP_INP_READ_UNLOCK(inp);
5886 			hold_rlock = 0;
5887 		}
5888 		if (hold_sblock == 0) {
5889 			SOCKBUF_LOCK(&so->so_rcv);
5890 			hold_sblock = 1;
5891 		}
5892 		if ((copied_so_far) && (control->length == 0) &&
5893 		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))
5894 		    ) {
5895 			goto release;
5896 		}
5897 		if (so->so_rcv.sb_cc <= control->held_length) {
5898 			error = sbwait(&so->so_rcv);
5899 			if (error) {
5900 				goto release;
5901 			}
5902 			control->held_length = 0;
5903 		}
5904 		if (hold_sblock) {
5905 			SOCKBUF_UNLOCK(&so->so_rcv);
5906 			hold_sblock = 0;
5907 		}
5908 		if (control->length == 0) {
5909 			/* still nothing here */
5910 			if (control->end_added == 1) {
5911 				/* he aborted, or is done i.e.did a shutdown */
5912 				out_flags |= MSG_EOR;
5913 				if (control->pdapi_aborted) {
5914 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5915 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5916 
5917 					out_flags |= MSG_TRUNC;
5918 				} else {
5919 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5920 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5921 				}
5922 				goto done_with_control;
5923 			}
5924 			if (so->so_rcv.sb_cc > held_length) {
5925 				control->held_length = so->so_rcv.sb_cc;
5926 				held_length = 0;
5927 			}
5928 			goto wait_some_more;
5929 		} else if (control->data == NULL) {
5930 			/*
5931 			 * we must re-sync since data is probably being
5932 			 * added
5933 			 */
5934 			SCTP_INP_READ_LOCK(inp);
5935 			if ((control->length > 0) && (control->data == NULL)) {
5936 				/*
5937 				 * big trouble.. we have the lock and its
5938 				 * corrupt?
5939 				 */
5940 #ifdef INVARIANTS
5941 				panic("Impossible data==NULL length !=0");
5942 #endif
5943 				out_flags |= MSG_EOR;
5944 				out_flags |= MSG_TRUNC;
5945 				control->length = 0;
5946 				SCTP_INP_READ_UNLOCK(inp);
5947 				goto done_with_control;
5948 			}
5949 			SCTP_INP_READ_UNLOCK(inp);
5950 			/* We will fall around to get more data */
5951 		}
5952 		goto get_more_data;
5953 	} else {
5954 		/*-
5955 		 * Give caller back the mbuf chain,
5956 		 * store in uio_resid the length
5957 		 */
5958 		wakeup_read_socket = 0;
5959 		if ((control->end_added == 0) ||
5960 		    (TAILQ_NEXT(control, next) == NULL)) {
5961 			/* Need to get rlock */
5962 			if (hold_rlock == 0) {
5963 				SCTP_INP_READ_LOCK(inp);
5964 				hold_rlock = 1;
5965 			}
5966 		}
5967 		if (control->end_added) {
5968 			out_flags |= MSG_EOR;
5969 			if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5970 				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5971 		}
5972 		if (control->spec_flags & M_NOTIFICATION) {
5973 			out_flags |= MSG_NOTIFICATION;
5974 		}
5975 		uio->uio_resid = control->length;
5976 		*mp = control->data;
5977 		m = control->data;
5978 		while (m) {
5979 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5980 				sctp_sblog(&so->so_rcv,
5981 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5982 			}
5983 			sctp_sbfree(control, stcb, &so->so_rcv, m);
5984 			freed_so_far += SCTP_BUF_LEN(m);
5985 			freed_so_far += MSIZE;
5986 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5987 				sctp_sblog(&so->so_rcv,
5988 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5989 			}
5990 			m = SCTP_BUF_NEXT(m);
5991 		}
5992 		control->data = control->tail_mbuf = NULL;
5993 		control->length = 0;
5994 		if (out_flags & MSG_EOR) {
5995 			/* Done with this control */
5996 			goto done_with_control;
5997 		}
5998 	}
5999 release:
6000 	if (hold_rlock == 1) {
6001 		SCTP_INP_READ_UNLOCK(inp);
6002 		hold_rlock = 0;
6003 	}
6004 	if (hold_sblock == 1) {
6005 		SOCKBUF_UNLOCK(&so->so_rcv);
6006 		hold_sblock = 0;
6007 	}
6008 	sbunlock(&so->so_rcv);
6009 	sockbuf_lock = 0;
6010 
6011 release_unlocked:
6012 	if (hold_sblock) {
6013 		SOCKBUF_UNLOCK(&so->so_rcv);
6014 		hold_sblock = 0;
6015 	}
6016 	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
6017 		if ((freed_so_far >= rwnd_req) &&
6018 		    (control && (control->do_not_ref_stcb == 0)) &&
6019 		    (no_rcv_needed == 0))
6020 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6021 	}
6022 out:
6023 	if (msg_flags) {
6024 		*msg_flags = out_flags;
6025 	}
6026 	if (((out_flags & MSG_EOR) == 0) &&
6027 	    ((in_flags & MSG_PEEK) == 0) &&
6028 	    (sinfo) &&
6029 	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO))) {
6030 		struct sctp_extrcvinfo *s_extra;
6031 
6032 		s_extra = (struct sctp_extrcvinfo *)sinfo;
6033 		s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
6034 	}
6035 	if (hold_rlock == 1) {
6036 		SCTP_INP_READ_UNLOCK(inp);
6037 		hold_rlock = 0;
6038 	}
6039 	if (hold_sblock) {
6040 		SOCKBUF_UNLOCK(&so->so_rcv);
6041 		hold_sblock = 0;
6042 	}
6043 	if (sockbuf_lock) {
6044 		sbunlock(&so->so_rcv);
6045 	}
6046 	if (freecnt_applied) {
6047 		/*
6048 		 * The lock on the socket buffer protects us so the free
6049 		 * code will stop. But since we used the socketbuf lock and
6050 		 * the sender uses the tcb_lock to increment, we need to use
6051 		 * the atomic add to the refcnt.
6052 		 */
6053 		if (stcb == NULL) {
6054 #ifdef INVARIANTS
6055 			panic("stcb for refcnt has gone NULL?");
6056 			goto stage_left;
6057 #else
6058 			goto stage_left;
6059 #endif
6060 		}
6061 		atomic_add_int(&stcb->asoc.refcnt, -1);
6062 		freecnt_applied = 0;
6063 		/* Save the value back for next time */
6064 		stcb->freed_by_sorcv_sincelast = freed_so_far;
6065 	}
6066 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
6067 		if (stcb) {
6068 			sctp_misc_ints(SCTP_SORECV_DONE,
6069 			    freed_so_far,
6070 			    ((uio) ? (slen - uio->uio_resid) : slen),
6071 			    stcb->asoc.my_rwnd,
6072 			    so->so_rcv.sb_cc);
6073 		} else {
6074 			sctp_misc_ints(SCTP_SORECV_DONE,
6075 			    freed_so_far,
6076 			    ((uio) ? (slen - uio->uio_resid) : slen),
6077 			    0,
6078 			    so->so_rcv.sb_cc);
6079 		}
6080 	}
6081 stage_left:
6082 	if (wakeup_read_socket) {
6083 		sctp_sorwakeup(inp, so);
6084 	}
6085 	return (error);
6086 }
6087 
6088 
6089 #ifdef SCTP_MBUF_LOGGING
6090 struct mbuf *
6091 sctp_m_free(struct mbuf *m)
6092 {
6093 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6094 		if (SCTP_BUF_IS_EXTENDED(m)) {
6095 			sctp_log_mb(m, SCTP_MBUF_IFREE);
6096 		}
6097 	}
6098 	return (m_free(m));
6099 }
6100 
6101 void
6102 sctp_m_freem(struct mbuf *mb)
6103 {
6104 	while (mb != NULL)
6105 		mb = sctp_m_free(mb);
6106 }
6107 
6108 #endif
6109 
6110 int
6111 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
6112 {
6113 	/*
6114 	 * Given a local address. For all associations that holds the
6115 	 * address, request a peer-set-primary.
6116 	 */
6117 	struct sctp_ifa *ifa;
6118 	struct sctp_laddr *wi;
6119 
6120 	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
6121 	if (ifa == NULL) {
6122 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
6123 		return (EADDRNOTAVAIL);
6124 	}
6125 	/*
6126 	 * Now that we have the ifa we must awaken the iterator with this
6127 	 * message.
6128 	 */
6129 	wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
6130 	if (wi == NULL) {
6131 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
6132 		return (ENOMEM);
6133 	}
6134 	/* Now incr the count and int wi structure */
6135 	SCTP_INCR_LADDR_COUNT();
6136 	bzero(wi, sizeof(*wi));
6137 	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
6138 	wi->ifa = ifa;
6139 	wi->action = SCTP_SET_PRIM_ADDR;
6140 	atomic_add_int(&ifa->refcount, 1);
6141 
6142 	/* Now add it to the work queue */
6143 	SCTP_IPI_ITERATOR_WQ_LOCK();
6144 	/*
6145 	 * Should this really be a tailq? As it is we will process the
6146 	 * newest first :-0
6147 	 */
6148 	LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
6149 	SCTP_IPI_ITERATOR_WQ_UNLOCK();
6150 	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
6151 	    (struct sctp_inpcb *)NULL,
6152 	    (struct sctp_tcb *)NULL,
6153 	    (struct sctp_nets *)NULL);
6154 	return (0);
6155 }
6156 
6157 
6158 int
6159 sctp_soreceive(struct socket *so,
6160     struct sockaddr **psa,
6161     struct uio *uio,
6162     struct mbuf **mp0,
6163     struct mbuf **controlp,
6164     int *flagsp)
6165 {
6166 	int error, fromlen;
6167 	uint8_t sockbuf[256];
6168 	struct sockaddr *from;
6169 	struct sctp_extrcvinfo sinfo;
6170 	int filling_sinfo = 1;
6171 	struct sctp_inpcb *inp;
6172 
6173 	inp = (struct sctp_inpcb *)so->so_pcb;
6174 	/* pickup the assoc we are reading from */
6175 	if (inp == NULL) {
6176 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6177 		return (EINVAL);
6178 	}
6179 	if ((sctp_is_feature_off(inp,
6180 	    SCTP_PCB_FLAGS_RECVDATAIOEVNT)) ||
6181 	    (controlp == NULL)) {
6182 		/* user does not want the sndrcv ctl */
6183 		filling_sinfo = 0;
6184 	}
6185 	if (psa) {
6186 		from = (struct sockaddr *)sockbuf;
6187 		fromlen = sizeof(sockbuf);
6188 		from->sa_len = 0;
6189 	} else {
6190 		from = NULL;
6191 		fromlen = 0;
6192 	}
6193 
6194 	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp,
6195 	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6196 	if ((controlp) && (filling_sinfo)) {
6197 		/* copy back the sinfo in a CMSG format */
6198 		if (filling_sinfo)
6199 			*controlp = sctp_build_ctl_nchunk(inp,
6200 			    (struct sctp_sndrcvinfo *)&sinfo);
6201 		else
6202 			*controlp = NULL;
6203 	}
6204 	if (psa) {
6205 		/* copy back the address info */
6206 		if (from && from->sa_len) {
6207 			*psa = sodupsockaddr(from, M_NOWAIT);
6208 		} else {
6209 			*psa = NULL;
6210 		}
6211 	}
6212 	return (error);
6213 }
6214 
6215 
6216 int
6217 sctp_l_soreceive(struct socket *so,
6218     struct sockaddr **name,
6219     struct uio *uio,
6220     char **controlp,
6221     int *controllen,
6222     int *flag)
6223 {
6224 	int error, fromlen;
6225 	uint8_t sockbuf[256];
6226 	struct sockaddr *from;
6227 	struct sctp_extrcvinfo sinfo;
6228 	int filling_sinfo = 1;
6229 	struct sctp_inpcb *inp;
6230 
6231 	inp = (struct sctp_inpcb *)so->so_pcb;
6232 	/* pickup the assoc we are reading from */
6233 	if (inp == NULL) {
6234 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6235 		return (EINVAL);
6236 	}
6237 	if ((sctp_is_feature_off(inp,
6238 	    SCTP_PCB_FLAGS_RECVDATAIOEVNT)) ||
6239 	    (controlp == NULL)) {
6240 		/* user does not want the sndrcv ctl */
6241 		filling_sinfo = 0;
6242 	}
6243 	if (name) {
6244 		from = (struct sockaddr *)sockbuf;
6245 		fromlen = sizeof(sockbuf);
6246 		from->sa_len = 0;
6247 	} else {
6248 		from = NULL;
6249 		fromlen = 0;
6250 	}
6251 
6252 	error = sctp_sorecvmsg(so, uio,
6253 	    (struct mbuf **)NULL,
6254 	    from, fromlen, flag,
6255 	    (struct sctp_sndrcvinfo *)&sinfo,
6256 	    filling_sinfo);
6257 	if ((controlp) && (filling_sinfo)) {
6258 		/*
6259 		 * copy back the sinfo in a CMSG format note that the caller
6260 		 * has reponsibility for freeing the memory.
6261 		 */
6262 		if (filling_sinfo)
6263 			*controlp = sctp_build_ctl_cchunk(inp,
6264 			    controllen,
6265 			    (struct sctp_sndrcvinfo *)&sinfo);
6266 	}
6267 	if (name) {
6268 		/* copy back the address info */
6269 		if (from && from->sa_len) {
6270 			*name = sodupsockaddr(from, M_WAIT);
6271 		} else {
6272 			*name = NULL;
6273 		}
6274 	}
6275 	return (error);
6276 }
6277 
6278 
6279 
6280 
6281 
6282 
6283 
6284 int
6285 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6286     int totaddr, int *error)
6287 {
6288 	int added = 0;
6289 	int i;
6290 	struct sctp_inpcb *inp;
6291 	struct sockaddr *sa;
6292 	size_t incr = 0;
6293 
6294 	sa = addr;
6295 	inp = stcb->sctp_ep;
6296 	*error = 0;
6297 	for (i = 0; i < totaddr; i++) {
6298 		if (sa->sa_family == AF_INET) {
6299 			incr = sizeof(struct sockaddr_in);
6300 			if (sctp_add_remote_addr(stcb, sa, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6301 				/* assoc gone no un-lock */
6302 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6303 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_7);
6304 				*error = ENOBUFS;
6305 				goto out_now;
6306 			}
6307 			added++;
6308 		} else if (sa->sa_family == AF_INET6) {
6309 			incr = sizeof(struct sockaddr_in6);
6310 			if (sctp_add_remote_addr(stcb, sa, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6311 				/* assoc gone no un-lock */
6312 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6313 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_8);
6314 				*error = ENOBUFS;
6315 				goto out_now;
6316 			}
6317 			added++;
6318 		}
6319 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6320 	}
6321 out_now:
6322 	return (added);
6323 }
6324 
6325 struct sctp_tcb *
6326 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6327     int *totaddr, int *num_v4, int *num_v6, int *error,
6328     int limit, int *bad_addr)
6329 {
6330 	struct sockaddr *sa;
6331 	struct sctp_tcb *stcb = NULL;
6332 	size_t incr, at, i;
6333 
6334 	at = incr = 0;
6335 	sa = addr;
6336 	*error = *num_v6 = *num_v4 = 0;
6337 	/* account and validate addresses */
6338 	for (i = 0; i < (size_t)*totaddr; i++) {
6339 		if (sa->sa_family == AF_INET) {
6340 			(*num_v4) += 1;
6341 			incr = sizeof(struct sockaddr_in);
6342 			if (sa->sa_len != incr) {
6343 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6344 				*error = EINVAL;
6345 				*bad_addr = 1;
6346 				return (NULL);
6347 			}
6348 		} else if (sa->sa_family == AF_INET6) {
6349 			struct sockaddr_in6 *sin6;
6350 
6351 			sin6 = (struct sockaddr_in6 *)sa;
6352 			if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6353 				/* Must be non-mapped for connectx */
6354 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6355 				*error = EINVAL;
6356 				*bad_addr = 1;
6357 				return (NULL);
6358 			}
6359 			(*num_v6) += 1;
6360 			incr = sizeof(struct sockaddr_in6);
6361 			if (sa->sa_len != incr) {
6362 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6363 				*error = EINVAL;
6364 				*bad_addr = 1;
6365 				return (NULL);
6366 			}
6367 		} else {
6368 			*totaddr = i;
6369 			/* we are done */
6370 			break;
6371 		}
6372 		SCTP_INP_INCR_REF(inp);
6373 		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6374 		if (stcb != NULL) {
6375 			/* Already have or am bring up an association */
6376 			return (stcb);
6377 		} else {
6378 			SCTP_INP_DECR_REF(inp);
6379 		}
6380 		if ((at + incr) > (size_t)limit) {
6381 			*totaddr = i;
6382 			break;
6383 		}
6384 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6385 	}
6386 	return ((struct sctp_tcb *)NULL);
6387 }
6388 
6389 /*
6390  * sctp_bindx(ADD) for one address.
6391  * assumes all arguments are valid/checked by caller.
6392  */
6393 void
6394 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6395     struct sockaddr *sa, sctp_assoc_t assoc_id,
6396     uint32_t vrf_id, int *error, void *p)
6397 {
6398 	struct sockaddr *addr_touse;
6399 
6400 #ifdef INET6
6401 	struct sockaddr_in sin;
6402 
6403 #endif
6404 
6405 	/* see if we're bound all already! */
6406 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6407 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6408 		*error = EINVAL;
6409 		return;
6410 	}
6411 	addr_touse = sa;
6412 #if defined(INET6) && !defined(__Userspace__)	/* TODO port in6_sin6_2_sin */
6413 	if (sa->sa_family == AF_INET6) {
6414 		struct sockaddr_in6 *sin6;
6415 
6416 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6417 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6418 			*error = EINVAL;
6419 			return;
6420 		}
6421 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6422 			/* can only bind v6 on PF_INET6 sockets */
6423 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6424 			*error = EINVAL;
6425 			return;
6426 		}
6427 		sin6 = (struct sockaddr_in6 *)addr_touse;
6428 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6429 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6430 			    SCTP_IPV6_V6ONLY(inp)) {
6431 				/* can't bind v4-mapped on PF_INET sockets */
6432 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6433 				*error = EINVAL;
6434 				return;
6435 			}
6436 			in6_sin6_2_sin(&sin, sin6);
6437 			addr_touse = (struct sockaddr *)&sin;
6438 		}
6439 	}
6440 #endif
6441 	if (sa->sa_family == AF_INET) {
6442 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6443 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6444 			*error = EINVAL;
6445 			return;
6446 		}
6447 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6448 		    SCTP_IPV6_V6ONLY(inp)) {
6449 			/* can't bind v4 on PF_INET sockets */
6450 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6451 			*error = EINVAL;
6452 			return;
6453 		}
6454 	}
6455 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6456 		if (p == NULL) {
6457 			/* Can't get proc for Net/Open BSD */
6458 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6459 			*error = EINVAL;
6460 			return;
6461 		}
6462 		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6463 		return;
6464 	}
6465 	/*
6466 	 * No locks required here since bind and mgmt_ep_sa all do their own
6467 	 * locking. If we do something for the FIX: below we may need to
6468 	 * lock in that case.
6469 	 */
6470 	if (assoc_id == 0) {
6471 		/* add the address */
6472 		struct sctp_inpcb *lep;
6473 		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
6474 
6475 		/* validate the incoming port */
6476 		if ((lsin->sin_port != 0) &&
6477 		    (lsin->sin_port != inp->sctp_lport)) {
6478 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6479 			*error = EINVAL;
6480 			return;
6481 		} else {
6482 			/* user specified 0 port, set it to existing port */
6483 			lsin->sin_port = inp->sctp_lport;
6484 		}
6485 
6486 		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
6487 		if (lep != NULL) {
6488 			/*
6489 			 * We must decrement the refcount since we have the
6490 			 * ep already and are binding. No remove going on
6491 			 * here.
6492 			 */
6493 			SCTP_INP_DECR_REF(lep);
6494 		}
6495 		if (lep == inp) {
6496 			/* already bound to it.. ok */
6497 			return;
6498 		} else if (lep == NULL) {
6499 			((struct sockaddr_in *)addr_touse)->sin_port = 0;
6500 			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6501 			    SCTP_ADD_IP_ADDRESS,
6502 			    vrf_id, NULL);
6503 		} else {
6504 			*error = EADDRINUSE;
6505 		}
6506 		if (*error)
6507 			return;
6508 	} else {
6509 		/*
6510 		 * FIX: decide whether we allow assoc based bindx
6511 		 */
6512 	}
6513 }
6514 
6515 /*
6516  * sctp_bindx(DELETE) for one address.
6517  * assumes all arguments are valid/checked by caller.
6518  */
6519 void
6520 sctp_bindx_delete_address(struct socket *so, struct sctp_inpcb *inp,
6521     struct sockaddr *sa, sctp_assoc_t assoc_id,
6522     uint32_t vrf_id, int *error)
6523 {
6524 	struct sockaddr *addr_touse;
6525 
6526 #ifdef INET6
6527 	struct sockaddr_in sin;
6528 
6529 #endif
6530 
6531 	/* see if we're bound all already! */
6532 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6533 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6534 		*error = EINVAL;
6535 		return;
6536 	}
6537 	addr_touse = sa;
6538 #if defined(INET6) && !defined(__Userspace__)	/* TODO port in6_sin6_2_sin */
6539 	if (sa->sa_family == AF_INET6) {
6540 		struct sockaddr_in6 *sin6;
6541 
6542 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6543 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6544 			*error = EINVAL;
6545 			return;
6546 		}
6547 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6548 			/* can only bind v6 on PF_INET6 sockets */
6549 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6550 			*error = EINVAL;
6551 			return;
6552 		}
6553 		sin6 = (struct sockaddr_in6 *)addr_touse;
6554 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6555 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6556 			    SCTP_IPV6_V6ONLY(inp)) {
6557 				/* can't bind mapped-v4 on PF_INET sockets */
6558 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6559 				*error = EINVAL;
6560 				return;
6561 			}
6562 			in6_sin6_2_sin(&sin, sin6);
6563 			addr_touse = (struct sockaddr *)&sin;
6564 		}
6565 	}
6566 #endif
6567 	if (sa->sa_family == AF_INET) {
6568 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6569 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6570 			*error = EINVAL;
6571 			return;
6572 		}
6573 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6574 		    SCTP_IPV6_V6ONLY(inp)) {
6575 			/* can't bind v4 on PF_INET sockets */
6576 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6577 			*error = EINVAL;
6578 			return;
6579 		}
6580 	}
6581 	/*
6582 	 * No lock required mgmt_ep_sa does its own locking. If the FIX:
6583 	 * below is ever changed we may need to lock before calling
6584 	 * association level binding.
6585 	 */
6586 	if (assoc_id == 0) {
6587 		/* delete the address */
6588 		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6589 		    SCTP_DEL_IP_ADDRESS,
6590 		    vrf_id, NULL);
6591 	} else {
6592 		/*
6593 		 * FIX: decide whether we allow assoc based bindx
6594 		 */
6595 	}
6596 }
6597 
6598 /*
6599  * returns the valid local address count for an assoc, taking into account
6600  * all scoping rules
6601  */
6602 int
6603 sctp_local_addr_count(struct sctp_tcb *stcb)
6604 {
6605 	int loopback_scope, ipv4_local_scope, local_scope, site_scope;
6606 	int ipv4_addr_legal, ipv6_addr_legal;
6607 	struct sctp_vrf *vrf;
6608 	struct sctp_ifn *sctp_ifn;
6609 	struct sctp_ifa *sctp_ifa;
6610 	int count = 0;
6611 
6612 	/* Turn on all the appropriate scopes */
6613 	loopback_scope = stcb->asoc.loopback_scope;
6614 	ipv4_local_scope = stcb->asoc.ipv4_local_scope;
6615 	local_scope = stcb->asoc.local_scope;
6616 	site_scope = stcb->asoc.site_scope;
6617 	ipv4_addr_legal = ipv6_addr_legal = 0;
6618 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
6619 		ipv6_addr_legal = 1;
6620 		if (SCTP_IPV6_V6ONLY(stcb->sctp_ep) == 0) {
6621 			ipv4_addr_legal = 1;
6622 		}
6623 	} else {
6624 		ipv4_addr_legal = 1;
6625 	}
6626 
6627 	SCTP_IPI_ADDR_RLOCK();
6628 	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
6629 	if (vrf == NULL) {
6630 		/* no vrf, no addresses */
6631 		SCTP_IPI_ADDR_RUNLOCK();
6632 		return (0);
6633 	}
6634 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6635 		/*
6636 		 * bound all case: go through all ifns on the vrf
6637 		 */
6638 		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
6639 			if ((loopback_scope == 0) &&
6640 			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
6641 				continue;
6642 			}
6643 			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
6644 				if (sctp_is_addr_restricted(stcb, sctp_ifa))
6645 					continue;
6646 				switch (sctp_ifa->address.sa.sa_family) {
6647 				case AF_INET:
6648 					if (ipv4_addr_legal) {
6649 						struct sockaddr_in *sin;
6650 
6651 						sin = (struct sockaddr_in *)&sctp_ifa->address.sa;
6652 						if (sin->sin_addr.s_addr == 0) {
6653 							/*
6654 							 * skip unspecified
6655 							 * addrs
6656 							 */
6657 							continue;
6658 						}
6659 						if ((ipv4_local_scope == 0) &&
6660 						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
6661 							continue;
6662 						}
6663 						/* count this one */
6664 						count++;
6665 					} else {
6666 						continue;
6667 					}
6668 					break;
6669 #ifdef INET6
6670 				case AF_INET6:
6671 					if (ipv6_addr_legal) {
6672 						struct sockaddr_in6 *sin6;
6673 
6674 						sin6 = (struct sockaddr_in6 *)&sctp_ifa->address.sa;
6675 						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
6676 							continue;
6677 						}
6678 						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
6679 							if (local_scope == 0)
6680 								continue;
6681 							if (sin6->sin6_scope_id == 0) {
6682 								if (sa6_recoverscope(sin6) != 0)
6683 									/*
6684 									 *
6685 									 * bad
6686 									 *
6687 									 * li
6688 									 * nk
6689 									 *
6690 									 * loc
6691 									 * al
6692 									 *
6693 									 * add
6694 									 * re
6695 									 * ss
6696 									 * */
6697 									continue;
6698 							}
6699 						}
6700 						if ((site_scope == 0) &&
6701 						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
6702 							continue;
6703 						}
6704 						/* count this one */
6705 						count++;
6706 					}
6707 					break;
6708 #endif
6709 				default:
6710 					/* TSNH */
6711 					break;
6712 				}
6713 			}
6714 		}
6715 	} else {
6716 		/*
6717 		 * subset bound case
6718 		 */
6719 		struct sctp_laddr *laddr;
6720 
6721 		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
6722 		    sctp_nxt_addr) {
6723 			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
6724 				continue;
6725 			}
6726 			/* count this one */
6727 			count++;
6728 		}
6729 	}
6730 	SCTP_IPI_ADDR_RUNLOCK();
6731 	return (count);
6732 }
6733 
6734 #if defined(SCTP_LOCAL_TRACE_BUF)
6735 
6736 void
6737 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
6738 {
6739 	uint32_t saveindex, newindex;
6740 
6741 	do {
6742 		saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
6743 		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6744 			newindex = 1;
6745 		} else {
6746 			newindex = saveindex + 1;
6747 		}
6748 	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
6749 	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6750 		saveindex = 0;
6751 	}
6752 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
6753 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
6754 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
6755 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
6756 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
6757 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
6758 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
6759 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
6760 }
6761 
6762 #endif
6763 /* We will need to add support
6764  * to bind the ports and such here
6765  * so we can do UDP tunneling. In
6766  * the mean-time, we return error
6767  */
6768 #include <netinet/udp.h>
6769 #include <netinet/udp_var.h>
6770 #include <sys/proc.h>
6771 #ifdef INET6
6772 #include <netinet6/sctp6_var.h>
6773 #endif
6774 
6775 static void
6776 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *ignored)
6777 {
6778 	struct ip *iph;
6779 	struct mbuf *sp, *last;
6780 	struct udphdr *uhdr;
6781 	uint16_t port = 0, len;
6782 	int header_size = sizeof(struct udphdr) + sizeof(struct sctphdr);
6783 
6784 	/*
6785 	 * Split out the mbuf chain. Leave the IP header in m, place the
6786 	 * rest in the sp.
6787 	 */
6788 	if ((m->m_flags & M_PKTHDR) == 0) {
6789 		/* Can't handle one that is not a pkt hdr */
6790 		goto out;
6791 	}
6792 	/* pull the src port */
6793 	iph = mtod(m, struct ip *);
6794 	uhdr = (struct udphdr *)((caddr_t)iph + off);
6795 
6796 	port = uhdr->uh_sport;
6797 	sp = m_split(m, off, M_DONTWAIT);
6798 	if (sp == NULL) {
6799 		/* Gak, drop packet, we can't do a split */
6800 		goto out;
6801 	}
6802 	if (sp->m_pkthdr.len < header_size) {
6803 		/* Gak, packet can't have an SCTP header in it - to small */
6804 		m_freem(sp);
6805 		goto out;
6806 	}
6807 	/* ok now pull up the UDP header and SCTP header together */
6808 	sp = m_pullup(sp, header_size);
6809 	if (sp == NULL) {
6810 		/* Gak pullup failed */
6811 		goto out;
6812 	}
6813 	/* trim out the UDP header */
6814 	m_adj(sp, sizeof(struct udphdr));
6815 
6816 	/* Now reconstruct the mbuf chain */
6817 	/* 1) find last one */
6818 	last = m;
6819 	while (last->m_next != NULL) {
6820 		last = last->m_next;
6821 	}
6822 	last->m_next = sp;
6823 	m->m_pkthdr.len += sp->m_pkthdr.len;
6824 	last = m;
6825 	while (last != NULL) {
6826 		last = last->m_next;
6827 	}
6828 	/* Now its ready for sctp_input or sctp6_input */
6829 	iph = mtod(m, struct ip *);
6830 	switch (iph->ip_v) {
6831 	case IPVERSION:
6832 		{
6833 			/* its IPv4 */
6834 			len = SCTP_GET_IPV4_LENGTH(iph);
6835 			len -= sizeof(struct udphdr);
6836 			SCTP_GET_IPV4_LENGTH(iph) = len;
6837 			sctp_input_with_port(m, off, port);
6838 			break;
6839 		}
6840 #ifdef INET6
6841 	case IPV6_VERSION >> 4:
6842 		{
6843 			/* its IPv6 - NOT supported */
6844 			goto out;
6845 			break;
6846 
6847 		}
6848 #endif
6849 	default:
6850 		{
6851 			m_freem(m);
6852 			break;
6853 		}
6854 	}
6855 	return;
6856 out:
6857 	m_freem(m);
6858 }
6859 
6860 void
6861 sctp_over_udp_stop(void)
6862 {
6863 	struct socket *sop;
6864 
6865 	/*
6866 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
6867 	 * for writting!
6868 	 */
6869 	if (SCTP_BASE_INFO(udp_tun_socket) == NULL) {
6870 		/* Nothing to do */
6871 		return;
6872 	}
6873 	sop = SCTP_BASE_INFO(udp_tun_socket);
6874 	soclose(sop);
6875 	SCTP_BASE_INFO(udp_tun_socket) = NULL;
6876 }
6877 int
6878 sctp_over_udp_start(void)
6879 {
6880 	uint16_t port;
6881 	int ret;
6882 	struct sockaddr_in sin;
6883 	struct socket *sop = NULL;
6884 	struct thread *th;
6885 	struct ucred *cred;
6886 
6887 	/*
6888 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
6889 	 * for writting!
6890 	 */
6891 	port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
6892 	if (port == 0) {
6893 		/* Must have a port set */
6894 		return (EINVAL);
6895 	}
6896 	if (SCTP_BASE_INFO(udp_tun_socket) != NULL) {
6897 		/* Already running -- must stop first */
6898 		return (EALREADY);
6899 	}
6900 	th = curthread;
6901 	cred = th->td_ucred;
6902 	if ((ret = socreate(PF_INET, &sop,
6903 	    SOCK_DGRAM, IPPROTO_UDP, cred, th))) {
6904 		return (ret);
6905 	}
6906 	SCTP_BASE_INFO(udp_tun_socket) = sop;
6907 	/* call the special UDP hook */
6908 	ret = udp_set_kernel_tunneling(sop, sctp_recv_udp_tunneled_packet);
6909 	if (ret) {
6910 		goto exit_stage_left;
6911 	}
6912 	/* Ok we have a socket, bind it to the port */
6913 	memset(&sin, 0, sizeof(sin));
6914 	sin.sin_len = sizeof(sin);
6915 	sin.sin_family = AF_INET;
6916 	sin.sin_port = htons(port);
6917 	ret = sobind(sop, (struct sockaddr *)&sin, th);
6918 	if (ret) {
6919 		/* Close up we cant get the port */
6920 exit_stage_left:
6921 		sctp_over_udp_stop();
6922 		return (ret);
6923 	}
6924 	/*
6925 	 * Ok we should now get UDP packets directly to our input routine
6926 	 * sctp_recv_upd_tunneled_packet().
6927 	 */
6928 	return (0);
6929 }
6930