xref: /freebsd/sys/netinet/sctputil.c (revision aa64588d28258aef88cc33b8043112e8856948d0)
1 /*-
2  * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *
7  * a) Redistributions of source code must retain the above copyright notice,
8  *   this list of conditions and the following disclaimer.
9  *
10  * b) Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in
12  *   the documentation and/or other materials provided with the distribution.
13  *
14  * c) Neither the name of Cisco Systems, Inc. nor the names of its
15  *    contributors may be used to endorse or promote products derived
16  *    from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28  * THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 /* $KAME: sctputil.c,v 1.37 2005/03/07 23:26:09 itojun Exp $	 */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_pcb.h>
38 #include <netinet/sctputil.h>
39 #include <netinet/sctp_var.h>
40 #include <netinet/sctp_sysctl.h>
41 #ifdef INET6
42 #endif
43 #include <netinet/sctp_header.h>
44 #include <netinet/sctp_output.h>
45 #include <netinet/sctp_uio.h>
46 #include <netinet/sctp_timer.h>
47 #include <netinet/sctp_indata.h>/* for sctp_deliver_data() */
48 #include <netinet/sctp_auth.h>
49 #include <netinet/sctp_asconf.h>
50 #include <netinet/sctp_cc_functions.h>
51 #include <netinet/sctp_bsd_addr.h>
52 
53 #define NUMBER_OF_MTU_SIZES 18
54 
55 
56 #ifndef KTR_SCTP
57 #define KTR_SCTP KTR_SUBSYS
58 #endif
59 
60 void
61 sctp_sblog(struct sockbuf *sb,
62     struct sctp_tcb *stcb, int from, int incr)
63 {
64 	struct sctp_cwnd_log sctp_clog;
65 
66 	sctp_clog.x.sb.stcb = stcb;
67 	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
68 	if (stcb)
69 		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
70 	else
71 		sctp_clog.x.sb.stcb_sbcc = 0;
72 	sctp_clog.x.sb.incr = incr;
73 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
74 	    SCTP_LOG_EVENT_SB,
75 	    from,
76 	    sctp_clog.x.misc.log1,
77 	    sctp_clog.x.misc.log2,
78 	    sctp_clog.x.misc.log3,
79 	    sctp_clog.x.misc.log4);
80 }
81 
82 void
83 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
84 {
85 	struct sctp_cwnd_log sctp_clog;
86 
87 	sctp_clog.x.close.inp = (void *)inp;
88 	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
89 	if (stcb) {
90 		sctp_clog.x.close.stcb = (void *)stcb;
91 		sctp_clog.x.close.state = (uint16_t) stcb->asoc.state;
92 	} else {
93 		sctp_clog.x.close.stcb = 0;
94 		sctp_clog.x.close.state = 0;
95 	}
96 	sctp_clog.x.close.loc = loc;
97 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
98 	    SCTP_LOG_EVENT_CLOSE,
99 	    0,
100 	    sctp_clog.x.misc.log1,
101 	    sctp_clog.x.misc.log2,
102 	    sctp_clog.x.misc.log3,
103 	    sctp_clog.x.misc.log4);
104 }
105 
106 
107 void
108 rto_logging(struct sctp_nets *net, int from)
109 {
110 	struct sctp_cwnd_log sctp_clog;
111 
112 	memset(&sctp_clog, 0, sizeof(sctp_clog));
113 	sctp_clog.x.rto.net = (void *)net;
114 	sctp_clog.x.rto.rtt = net->prev_rtt;
115 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
116 	    SCTP_LOG_EVENT_RTT,
117 	    from,
118 	    sctp_clog.x.misc.log1,
119 	    sctp_clog.x.misc.log2,
120 	    sctp_clog.x.misc.log3,
121 	    sctp_clog.x.misc.log4);
122 
123 }
124 
125 void
126 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
127 {
128 	struct sctp_cwnd_log sctp_clog;
129 
130 	sctp_clog.x.strlog.stcb = stcb;
131 	sctp_clog.x.strlog.n_tsn = tsn;
132 	sctp_clog.x.strlog.n_sseq = sseq;
133 	sctp_clog.x.strlog.e_tsn = 0;
134 	sctp_clog.x.strlog.e_sseq = 0;
135 	sctp_clog.x.strlog.strm = stream;
136 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
137 	    SCTP_LOG_EVENT_STRM,
138 	    from,
139 	    sctp_clog.x.misc.log1,
140 	    sctp_clog.x.misc.log2,
141 	    sctp_clog.x.misc.log3,
142 	    sctp_clog.x.misc.log4);
143 
144 }
145 
146 void
147 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
148 {
149 	struct sctp_cwnd_log sctp_clog;
150 
151 	sctp_clog.x.nagle.stcb = (void *)stcb;
152 	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
153 	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
154 	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
155 	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
156 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
157 	    SCTP_LOG_EVENT_NAGLE,
158 	    action,
159 	    sctp_clog.x.misc.log1,
160 	    sctp_clog.x.misc.log2,
161 	    sctp_clog.x.misc.log3,
162 	    sctp_clog.x.misc.log4);
163 }
164 
165 
166 void
167 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
168 {
169 	struct sctp_cwnd_log sctp_clog;
170 
171 	sctp_clog.x.sack.cumack = cumack;
172 	sctp_clog.x.sack.oldcumack = old_cumack;
173 	sctp_clog.x.sack.tsn = tsn;
174 	sctp_clog.x.sack.numGaps = gaps;
175 	sctp_clog.x.sack.numDups = dups;
176 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
177 	    SCTP_LOG_EVENT_SACK,
178 	    from,
179 	    sctp_clog.x.misc.log1,
180 	    sctp_clog.x.misc.log2,
181 	    sctp_clog.x.misc.log3,
182 	    sctp_clog.x.misc.log4);
183 }
184 
185 void
186 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
187 {
188 	struct sctp_cwnd_log sctp_clog;
189 
190 	memset(&sctp_clog, 0, sizeof(sctp_clog));
191 	sctp_clog.x.map.base = map;
192 	sctp_clog.x.map.cum = cum;
193 	sctp_clog.x.map.high = high;
194 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
195 	    SCTP_LOG_EVENT_MAP,
196 	    from,
197 	    sctp_clog.x.misc.log1,
198 	    sctp_clog.x.misc.log2,
199 	    sctp_clog.x.misc.log3,
200 	    sctp_clog.x.misc.log4);
201 }
202 
203 void
204 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn,
205     int from)
206 {
207 	struct sctp_cwnd_log sctp_clog;
208 
209 	memset(&sctp_clog, 0, sizeof(sctp_clog));
210 	sctp_clog.x.fr.largest_tsn = biggest_tsn;
211 	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
212 	sctp_clog.x.fr.tsn = tsn;
213 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
214 	    SCTP_LOG_EVENT_FR,
215 	    from,
216 	    sctp_clog.x.misc.log1,
217 	    sctp_clog.x.misc.log2,
218 	    sctp_clog.x.misc.log3,
219 	    sctp_clog.x.misc.log4);
220 
221 }
222 
223 
224 void
225 sctp_log_mb(struct mbuf *m, int from)
226 {
227 	struct sctp_cwnd_log sctp_clog;
228 
229 	sctp_clog.x.mb.mp = m;
230 	sctp_clog.x.mb.mbuf_flags = (uint8_t) (SCTP_BUF_GET_FLAGS(m));
231 	sctp_clog.x.mb.size = (uint16_t) (SCTP_BUF_LEN(m));
232 	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
233 	if (SCTP_BUF_IS_EXTENDED(m)) {
234 		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
235 		sctp_clog.x.mb.refcnt = (uint8_t) (SCTP_BUF_EXTEND_REFCNT(m));
236 	} else {
237 		sctp_clog.x.mb.ext = 0;
238 		sctp_clog.x.mb.refcnt = 0;
239 	}
240 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
241 	    SCTP_LOG_EVENT_MBUF,
242 	    from,
243 	    sctp_clog.x.misc.log1,
244 	    sctp_clog.x.misc.log2,
245 	    sctp_clog.x.misc.log3,
246 	    sctp_clog.x.misc.log4);
247 }
248 
249 
250 void
251 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk,
252     int from)
253 {
254 	struct sctp_cwnd_log sctp_clog;
255 
256 	if (control == NULL) {
257 		SCTP_PRINTF("Gak log of NULL?\n");
258 		return;
259 	}
260 	sctp_clog.x.strlog.stcb = control->stcb;
261 	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
262 	sctp_clog.x.strlog.n_sseq = control->sinfo_ssn;
263 	sctp_clog.x.strlog.strm = control->sinfo_stream;
264 	if (poschk != NULL) {
265 		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
266 		sctp_clog.x.strlog.e_sseq = poschk->sinfo_ssn;
267 	} else {
268 		sctp_clog.x.strlog.e_tsn = 0;
269 		sctp_clog.x.strlog.e_sseq = 0;
270 	}
271 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
272 	    SCTP_LOG_EVENT_STRM,
273 	    from,
274 	    sctp_clog.x.misc.log1,
275 	    sctp_clog.x.misc.log2,
276 	    sctp_clog.x.misc.log3,
277 	    sctp_clog.x.misc.log4);
278 
279 }
280 
281 void
282 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
283 {
284 	struct sctp_cwnd_log sctp_clog;
285 
286 	sctp_clog.x.cwnd.net = net;
287 	if (stcb->asoc.send_queue_cnt > 255)
288 		sctp_clog.x.cwnd.cnt_in_send = 255;
289 	else
290 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
291 	if (stcb->asoc.stream_queue_cnt > 255)
292 		sctp_clog.x.cwnd.cnt_in_str = 255;
293 	else
294 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
295 
296 	if (net) {
297 		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
298 		sctp_clog.x.cwnd.inflight = net->flight_size;
299 		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
300 		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
301 		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
302 	}
303 	if (SCTP_CWNDLOG_PRESEND == from) {
304 		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
305 	}
306 	sctp_clog.x.cwnd.cwnd_augment = augment;
307 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
308 	    SCTP_LOG_EVENT_CWND,
309 	    from,
310 	    sctp_clog.x.misc.log1,
311 	    sctp_clog.x.misc.log2,
312 	    sctp_clog.x.misc.log3,
313 	    sctp_clog.x.misc.log4);
314 
315 }
316 
317 void
318 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
319 {
320 	struct sctp_cwnd_log sctp_clog;
321 
322 	memset(&sctp_clog, 0, sizeof(sctp_clog));
323 	if (inp) {
324 		sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
325 
326 	} else {
327 		sctp_clog.x.lock.sock = (void *)NULL;
328 	}
329 	sctp_clog.x.lock.inp = (void *)inp;
330 	if (stcb) {
331 		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
332 	} else {
333 		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
334 	}
335 	if (inp) {
336 		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
337 		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
338 	} else {
339 		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
340 		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
341 	}
342 	sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
343 	if (inp->sctp_socket) {
344 		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
345 		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
346 		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
347 	} else {
348 		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
349 		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
350 		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
351 	}
352 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
353 	    SCTP_LOG_LOCK_EVENT,
354 	    from,
355 	    sctp_clog.x.misc.log1,
356 	    sctp_clog.x.misc.log2,
357 	    sctp_clog.x.misc.log3,
358 	    sctp_clog.x.misc.log4);
359 
360 }
361 
362 void
363 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
364 {
365 	struct sctp_cwnd_log sctp_clog;
366 
367 	memset(&sctp_clog, 0, sizeof(sctp_clog));
368 	sctp_clog.x.cwnd.net = net;
369 	sctp_clog.x.cwnd.cwnd_new_value = error;
370 	sctp_clog.x.cwnd.inflight = net->flight_size;
371 	sctp_clog.x.cwnd.cwnd_augment = burst;
372 	if (stcb->asoc.send_queue_cnt > 255)
373 		sctp_clog.x.cwnd.cnt_in_send = 255;
374 	else
375 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
376 	if (stcb->asoc.stream_queue_cnt > 255)
377 		sctp_clog.x.cwnd.cnt_in_str = 255;
378 	else
379 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
380 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
381 	    SCTP_LOG_EVENT_MAXBURST,
382 	    from,
383 	    sctp_clog.x.misc.log1,
384 	    sctp_clog.x.misc.log2,
385 	    sctp_clog.x.misc.log3,
386 	    sctp_clog.x.misc.log4);
387 
388 }
389 
390 void
391 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
392 {
393 	struct sctp_cwnd_log sctp_clog;
394 
395 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
396 	sctp_clog.x.rwnd.send_size = snd_size;
397 	sctp_clog.x.rwnd.overhead = overhead;
398 	sctp_clog.x.rwnd.new_rwnd = 0;
399 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
400 	    SCTP_LOG_EVENT_RWND,
401 	    from,
402 	    sctp_clog.x.misc.log1,
403 	    sctp_clog.x.misc.log2,
404 	    sctp_clog.x.misc.log3,
405 	    sctp_clog.x.misc.log4);
406 }
407 
408 void
409 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
410 {
411 	struct sctp_cwnd_log sctp_clog;
412 
413 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
414 	sctp_clog.x.rwnd.send_size = flight_size;
415 	sctp_clog.x.rwnd.overhead = overhead;
416 	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
417 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
418 	    SCTP_LOG_EVENT_RWND,
419 	    from,
420 	    sctp_clog.x.misc.log1,
421 	    sctp_clog.x.misc.log2,
422 	    sctp_clog.x.misc.log3,
423 	    sctp_clog.x.misc.log4);
424 }
425 
426 void
427 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
428 {
429 	struct sctp_cwnd_log sctp_clog;
430 
431 	sctp_clog.x.mbcnt.total_queue_size = total_oq;
432 	sctp_clog.x.mbcnt.size_change = book;
433 	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
434 	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
435 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
436 	    SCTP_LOG_EVENT_MBCNT,
437 	    from,
438 	    sctp_clog.x.misc.log1,
439 	    sctp_clog.x.misc.log2,
440 	    sctp_clog.x.misc.log3,
441 	    sctp_clog.x.misc.log4);
442 
443 }
444 
445 void
446 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
447 {
448 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
449 	    SCTP_LOG_MISC_EVENT,
450 	    from,
451 	    a, b, c, d);
452 }
453 
454 void
455 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t cumtsn, uint32_t wake_cnt, int from)
456 {
457 	struct sctp_cwnd_log sctp_clog;
458 
459 	sctp_clog.x.wake.stcb = (void *)stcb;
460 	sctp_clog.x.wake.wake_cnt = wake_cnt;
461 	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
462 	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
463 	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
464 
465 	if (stcb->asoc.stream_queue_cnt < 0xff)
466 		sctp_clog.x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt;
467 	else
468 		sctp_clog.x.wake.stream_qcnt = 0xff;
469 
470 	if (stcb->asoc.chunks_on_out_queue < 0xff)
471 		sctp_clog.x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue;
472 	else
473 		sctp_clog.x.wake.chunks_on_oque = 0xff;
474 
475 	sctp_clog.x.wake.sctpflags = 0;
476 	/* set in the defered mode stuff */
477 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
478 		sctp_clog.x.wake.sctpflags |= 1;
479 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
480 		sctp_clog.x.wake.sctpflags |= 2;
481 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
482 		sctp_clog.x.wake.sctpflags |= 4;
483 	/* what about the sb */
484 	if (stcb->sctp_socket) {
485 		struct socket *so = stcb->sctp_socket;
486 
487 		sctp_clog.x.wake.sbflags = (uint8_t) ((so->so_snd.sb_flags & 0x00ff));
488 	} else {
489 		sctp_clog.x.wake.sbflags = 0xff;
490 	}
491 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
492 	    SCTP_LOG_EVENT_WAKE,
493 	    from,
494 	    sctp_clog.x.misc.log1,
495 	    sctp_clog.x.misc.log2,
496 	    sctp_clog.x.misc.log3,
497 	    sctp_clog.x.misc.log4);
498 
499 }
500 
501 void
502 sctp_log_block(uint8_t from, struct socket *so, struct sctp_association *asoc, int sendlen)
503 {
504 	struct sctp_cwnd_log sctp_clog;
505 
506 	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
507 	sctp_clog.x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt);
508 	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
509 	sctp_clog.x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt;
510 	sctp_clog.x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue;
511 	sctp_clog.x.blk.flight_size = (uint16_t) (asoc->total_flight / 1024);
512 	sctp_clog.x.blk.sndlen = sendlen;
513 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
514 	    SCTP_LOG_EVENT_BLOCK,
515 	    from,
516 	    sctp_clog.x.misc.log1,
517 	    sctp_clog.x.misc.log2,
518 	    sctp_clog.x.misc.log3,
519 	    sctp_clog.x.misc.log4);
520 
521 }
522 
523 int
524 sctp_fill_stat_log(void *optval, size_t *optsize)
525 {
526 	/* May need to fix this if ktrdump does not work */
527 	return (0);
528 }
529 
530 #ifdef SCTP_AUDITING_ENABLED
531 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
532 static int sctp_audit_indx = 0;
533 
534 static
535 void
536 sctp_print_audit_report(void)
537 {
538 	int i;
539 	int cnt;
540 
541 	cnt = 0;
542 	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
543 		if ((sctp_audit_data[i][0] == 0xe0) &&
544 		    (sctp_audit_data[i][1] == 0x01)) {
545 			cnt = 0;
546 			SCTP_PRINTF("\n");
547 		} else if (sctp_audit_data[i][0] == 0xf0) {
548 			cnt = 0;
549 			SCTP_PRINTF("\n");
550 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
551 		    (sctp_audit_data[i][1] == 0x01)) {
552 			SCTP_PRINTF("\n");
553 			cnt = 0;
554 		}
555 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
556 		    (uint32_t) sctp_audit_data[i][1]);
557 		cnt++;
558 		if ((cnt % 14) == 0)
559 			SCTP_PRINTF("\n");
560 	}
561 	for (i = 0; i < sctp_audit_indx; i++) {
562 		if ((sctp_audit_data[i][0] == 0xe0) &&
563 		    (sctp_audit_data[i][1] == 0x01)) {
564 			cnt = 0;
565 			SCTP_PRINTF("\n");
566 		} else if (sctp_audit_data[i][0] == 0xf0) {
567 			cnt = 0;
568 			SCTP_PRINTF("\n");
569 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
570 		    (sctp_audit_data[i][1] == 0x01)) {
571 			SCTP_PRINTF("\n");
572 			cnt = 0;
573 		}
574 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
575 		    (uint32_t) sctp_audit_data[i][1]);
576 		cnt++;
577 		if ((cnt % 14) == 0)
578 			SCTP_PRINTF("\n");
579 	}
580 	SCTP_PRINTF("\n");
581 }
582 
583 void
584 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
585     struct sctp_nets *net)
586 {
587 	int resend_cnt, tot_out, rep, tot_book_cnt;
588 	struct sctp_nets *lnet;
589 	struct sctp_tmit_chunk *chk;
590 
591 	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
592 	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
593 	sctp_audit_indx++;
594 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
595 		sctp_audit_indx = 0;
596 	}
597 	if (inp == NULL) {
598 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
599 		sctp_audit_data[sctp_audit_indx][1] = 0x01;
600 		sctp_audit_indx++;
601 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
602 			sctp_audit_indx = 0;
603 		}
604 		return;
605 	}
606 	if (stcb == NULL) {
607 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
608 		sctp_audit_data[sctp_audit_indx][1] = 0x02;
609 		sctp_audit_indx++;
610 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
611 			sctp_audit_indx = 0;
612 		}
613 		return;
614 	}
615 	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
616 	sctp_audit_data[sctp_audit_indx][1] =
617 	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
618 	sctp_audit_indx++;
619 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
620 		sctp_audit_indx = 0;
621 	}
622 	rep = 0;
623 	tot_book_cnt = 0;
624 	resend_cnt = tot_out = 0;
625 	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
626 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
627 			resend_cnt++;
628 		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
629 			tot_out += chk->book_size;
630 			tot_book_cnt++;
631 		}
632 	}
633 	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
634 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
635 		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
636 		sctp_audit_indx++;
637 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
638 			sctp_audit_indx = 0;
639 		}
640 		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
641 		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
642 		rep = 1;
643 		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
644 		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
645 		sctp_audit_data[sctp_audit_indx][1] =
646 		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
647 		sctp_audit_indx++;
648 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
649 			sctp_audit_indx = 0;
650 		}
651 	}
652 	if (tot_out != stcb->asoc.total_flight) {
653 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
654 		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
655 		sctp_audit_indx++;
656 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
657 			sctp_audit_indx = 0;
658 		}
659 		rep = 1;
660 		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
661 		    (int)stcb->asoc.total_flight);
662 		stcb->asoc.total_flight = tot_out;
663 	}
664 	if (tot_book_cnt != stcb->asoc.total_flight_count) {
665 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
666 		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
667 		sctp_audit_indx++;
668 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
669 			sctp_audit_indx = 0;
670 		}
671 		rep = 1;
672 		SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt);
673 
674 		stcb->asoc.total_flight_count = tot_book_cnt;
675 	}
676 	tot_out = 0;
677 	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
678 		tot_out += lnet->flight_size;
679 	}
680 	if (tot_out != stcb->asoc.total_flight) {
681 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
682 		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
683 		sctp_audit_indx++;
684 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
685 			sctp_audit_indx = 0;
686 		}
687 		rep = 1;
688 		SCTP_PRINTF("real flight:%d net total was %d\n",
689 		    stcb->asoc.total_flight, tot_out);
690 		/* now corrective action */
691 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
692 
693 			tot_out = 0;
694 			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
695 				if ((chk->whoTo == lnet) &&
696 				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
697 					tot_out += chk->book_size;
698 				}
699 			}
700 			if (lnet->flight_size != tot_out) {
701 				SCTP_PRINTF("net:%p flight was %d corrected to %d\n",
702 				    lnet, lnet->flight_size,
703 				    tot_out);
704 				lnet->flight_size = tot_out;
705 			}
706 		}
707 	}
708 	if (rep) {
709 		sctp_print_audit_report();
710 	}
711 }
712 
713 void
714 sctp_audit_log(uint8_t ev, uint8_t fd)
715 {
716 
717 	sctp_audit_data[sctp_audit_indx][0] = ev;
718 	sctp_audit_data[sctp_audit_indx][1] = fd;
719 	sctp_audit_indx++;
720 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
721 		sctp_audit_indx = 0;
722 	}
723 }
724 
725 #endif
726 
727 /*
728  * a list of sizes based on typical mtu's, used only if next hop size not
729  * returned.
730  */
731 static int sctp_mtu_sizes[] = {
732 	68,
733 	296,
734 	508,
735 	512,
736 	544,
737 	576,
738 	1006,
739 	1492,
740 	1500,
741 	1536,
742 	2002,
743 	2048,
744 	4352,
745 	4464,
746 	8166,
747 	17914,
748 	32000,
749 	65535
750 };
751 
752 void
753 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
754 {
755 	struct sctp_association *asoc;
756 	struct sctp_nets *net;
757 
758 	asoc = &stcb->asoc;
759 
760 	(void)SCTP_OS_TIMER_STOP(&asoc->hb_timer.timer);
761 	(void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
762 	(void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
763 	(void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
764 	(void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
765 	(void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
766 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
767 		(void)SCTP_OS_TIMER_STOP(&net->fr_timer.timer);
768 		(void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
769 	}
770 }
771 
772 int
773 find_next_best_mtu(int totsz)
774 {
775 	int i, perfer;
776 
777 	/*
778 	 * if we are in here we must find the next best fit based on the
779 	 * size of the dg that failed to be sent.
780 	 */
781 	perfer = 0;
782 	for (i = 0; i < NUMBER_OF_MTU_SIZES; i++) {
783 		if (totsz < sctp_mtu_sizes[i]) {
784 			perfer = i - 1;
785 			if (perfer < 0)
786 				perfer = 0;
787 			break;
788 		}
789 	}
790 	return (sctp_mtu_sizes[perfer]);
791 }
792 
793 void
794 sctp_fill_random_store(struct sctp_pcb *m)
795 {
796 	/*
797 	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
798 	 * our counter. The result becomes our good random numbers and we
799 	 * then setup to give these out. Note that we do no locking to
800 	 * protect this. This is ok, since if competing folks call this we
801 	 * will get more gobbled gook in the random store which is what we
802 	 * want. There is a danger that two guys will use the same random
803 	 * numbers, but thats ok too since that is random as well :->
804 	 */
805 	m->store_at = 0;
806 	(void)sctp_hmac(SCTP_HMAC, (uint8_t *) m->random_numbers,
807 	    sizeof(m->random_numbers), (uint8_t *) & m->random_counter,
808 	    sizeof(m->random_counter), (uint8_t *) m->random_store);
809 	m->random_counter++;
810 }
811 
812 uint32_t
813 sctp_select_initial_TSN(struct sctp_pcb *inp)
814 {
815 	/*
816 	 * A true implementation should use random selection process to get
817 	 * the initial stream sequence number, using RFC1750 as a good
818 	 * guideline
819 	 */
820 	uint32_t x, *xp;
821 	uint8_t *p;
822 	int store_at, new_store;
823 
824 	if (inp->initial_sequence_debug != 0) {
825 		uint32_t ret;
826 
827 		ret = inp->initial_sequence_debug;
828 		inp->initial_sequence_debug++;
829 		return (ret);
830 	}
831 retry:
832 	store_at = inp->store_at;
833 	new_store = store_at + sizeof(uint32_t);
834 	if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
835 		new_store = 0;
836 	}
837 	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
838 		goto retry;
839 	}
840 	if (new_store == 0) {
841 		/* Refill the random store */
842 		sctp_fill_random_store(inp);
843 	}
844 	p = &inp->random_store[store_at];
845 	xp = (uint32_t *) p;
846 	x = *xp;
847 	return (x);
848 }
849 
850 uint32_t
851 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int save_in_twait)
852 {
853 	uint32_t x, not_done;
854 	struct timeval now;
855 
856 	(void)SCTP_GETTIME_TIMEVAL(&now);
857 	not_done = 1;
858 	while (not_done) {
859 		x = sctp_select_initial_TSN(&inp->sctp_ep);
860 		if (x == 0) {
861 			/* we never use 0 */
862 			continue;
863 		}
864 		if (sctp_is_vtag_good(inp, x, lport, rport, &now, save_in_twait)) {
865 			not_done = 0;
866 		}
867 	}
868 	return (x);
869 }
870 
871 int
872 sctp_init_asoc(struct sctp_inpcb *m, struct sctp_tcb *stcb,
873     uint32_t override_tag, uint32_t vrf_id)
874 {
875 	struct sctp_association *asoc;
876 
877 	/*
878 	 * Anything set to zero is taken care of by the allocation routine's
879 	 * bzero
880 	 */
881 
882 	/*
883 	 * Up front select what scoping to apply on addresses I tell my peer
884 	 * Not sure what to do with these right now, we will need to come up
885 	 * with a way to set them. We may need to pass them through from the
886 	 * caller in the sctp_aloc_assoc() function.
887 	 */
888 	int i;
889 
890 	asoc = &stcb->asoc;
891 	/* init all variables to a known value. */
892 	SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE);
893 	asoc->max_burst = m->sctp_ep.max_burst;
894 	asoc->heart_beat_delay = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
895 	asoc->cookie_life = m->sctp_ep.def_cookie_life;
896 	asoc->sctp_cmt_on_off = (uint8_t) SCTP_BASE_SYSCTL(sctp_cmt_on_off);
897 	/* EY Init nr_sack variable */
898 	asoc->sctp_nr_sack_on_off = (uint8_t) SCTP_BASE_SYSCTL(sctp_nr_sack_on_off);
899 	/* JRS 5/21/07 - Init CMT PF variables */
900 	asoc->sctp_cmt_pf = (uint8_t) SCTP_BASE_SYSCTL(sctp_cmt_pf);
901 	asoc->sctp_frag_point = m->sctp_frag_point;
902 #ifdef INET
903 	asoc->default_tos = m->ip_inp.inp.inp_ip_tos;
904 #else
905 	asoc->default_tos = 0;
906 #endif
907 
908 #ifdef INET6
909 	asoc->default_flowlabel = ((struct in6pcb *)m)->in6p_flowinfo;
910 #else
911 	asoc->default_flowlabel = 0;
912 #endif
913 	asoc->sb_send_resv = 0;
914 	if (override_tag) {
915 		asoc->my_vtag = override_tag;
916 	} else {
917 		asoc->my_vtag = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
918 	}
919 	/* Get the nonce tags */
920 	asoc->my_vtag_nonce = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
921 	asoc->peer_vtag_nonce = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
922 	asoc->vrf_id = vrf_id;
923 
924 	if (sctp_is_feature_on(m, SCTP_PCB_FLAGS_DONOT_HEARTBEAT))
925 		asoc->hb_is_disabled = 1;
926 	else
927 		asoc->hb_is_disabled = 0;
928 
929 #ifdef SCTP_ASOCLOG_OF_TSNS
930 	asoc->tsn_in_at = 0;
931 	asoc->tsn_out_at = 0;
932 	asoc->tsn_in_wrapped = 0;
933 	asoc->tsn_out_wrapped = 0;
934 	asoc->cumack_log_at = 0;
935 	asoc->cumack_log_atsnt = 0;
936 #endif
937 #ifdef SCTP_FS_SPEC_LOG
938 	asoc->fs_index = 0;
939 #endif
940 	asoc->refcnt = 0;
941 	asoc->assoc_up_sent = 0;
942 	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
943 	    sctp_select_initial_TSN(&m->sctp_ep);
944 	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
945 	/* we are optimisitic here */
946 	asoc->peer_supports_pktdrop = 1;
947 	asoc->peer_supports_nat = 0;
948 	asoc->sent_queue_retran_cnt = 0;
949 
950 	/* for CMT */
951 	asoc->last_net_cmt_send_started = NULL;
952 
953 	/* This will need to be adjusted */
954 	asoc->last_cwr_tsn = asoc->init_seq_number - 1;
955 	asoc->last_acked_seq = asoc->init_seq_number - 1;
956 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
957 	asoc->asconf_seq_in = asoc->last_acked_seq;
958 
959 	/* here we are different, we hold the next one we expect */
960 	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
961 
962 	asoc->initial_init_rto_max = m->sctp_ep.initial_init_rto_max;
963 	asoc->initial_rto = m->sctp_ep.initial_rto;
964 
965 	asoc->max_init_times = m->sctp_ep.max_init_times;
966 	asoc->max_send_times = m->sctp_ep.max_send_times;
967 	asoc->def_net_failure = m->sctp_ep.def_net_failure;
968 	asoc->free_chunk_cnt = 0;
969 
970 	asoc->iam_blocking = 0;
971 	/* ECN Nonce initialization */
972 	asoc->context = m->sctp_context;
973 	asoc->def_send = m->def_send;
974 	asoc->ecn_nonce_allowed = 0;
975 	asoc->receiver_nonce_sum = 1;
976 	asoc->nonce_sum_expect_base = 1;
977 	asoc->nonce_sum_check = 1;
978 	asoc->nonce_resync_tsn = 0;
979 	asoc->nonce_wait_for_ecne = 0;
980 	asoc->nonce_wait_tsn = 0;
981 	asoc->delayed_ack = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
982 	asoc->sack_freq = m->sctp_ep.sctp_sack_freq;
983 	asoc->pr_sctp_cnt = 0;
984 	asoc->total_output_queue_size = 0;
985 
986 	if (m->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
987 		struct in6pcb *inp6;
988 
989 		/* Its a V6 socket */
990 		inp6 = (struct in6pcb *)m;
991 		asoc->ipv6_addr_legal = 1;
992 		/* Now look at the binding flag to see if V4 will be legal */
993 		if (SCTP_IPV6_V6ONLY(inp6) == 0) {
994 			asoc->ipv4_addr_legal = 1;
995 		} else {
996 			/* V4 addresses are NOT legal on the association */
997 			asoc->ipv4_addr_legal = 0;
998 		}
999 	} else {
1000 		/* Its a V4 socket, no - V6 */
1001 		asoc->ipv4_addr_legal = 1;
1002 		asoc->ipv6_addr_legal = 0;
1003 	}
1004 
1005 	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(m->sctp_socket), SCTP_MINIMAL_RWND);
1006 	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(m->sctp_socket);
1007 
1008 	asoc->smallest_mtu = m->sctp_frag_point;
1009 #ifdef SCTP_PRINT_FOR_B_AND_M
1010 	SCTP_PRINTF("smallest_mtu init'd with asoc to :%d\n",
1011 	    asoc->smallest_mtu);
1012 #endif
1013 	asoc->minrto = m->sctp_ep.sctp_minrto;
1014 	asoc->maxrto = m->sctp_ep.sctp_maxrto;
1015 
1016 	asoc->locked_on_sending = NULL;
1017 	asoc->stream_locked_on = 0;
1018 	asoc->ecn_echo_cnt_onq = 0;
1019 	asoc->stream_locked = 0;
1020 
1021 	asoc->send_sack = 1;
1022 
1023 	LIST_INIT(&asoc->sctp_restricted_addrs);
1024 
1025 	TAILQ_INIT(&asoc->nets);
1026 	TAILQ_INIT(&asoc->pending_reply_queue);
1027 	TAILQ_INIT(&asoc->asconf_ack_sent);
1028 	/* Setup to fill the hb random cache at first HB */
1029 	asoc->hb_random_idx = 4;
1030 
1031 	asoc->sctp_autoclose_ticks = m->sctp_ep.auto_close_time;
1032 
1033 	/*
1034 	 * JRS - Pick the default congestion control module based on the
1035 	 * sysctl.
1036 	 */
1037 	switch (m->sctp_ep.sctp_default_cc_module) {
1038 		/* JRS - Standard TCP congestion control */
1039 	case SCTP_CC_RFC2581:
1040 		{
1041 			stcb->asoc.congestion_control_module = SCTP_CC_RFC2581;
1042 			stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param;
1043 			stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_cwnd_update_after_sack;
1044 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_cwnd_update_after_fr;
1045 			stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout;
1046 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo;
1047 			stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1048 			stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1049 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer;
1050 			break;
1051 		}
1052 		/* JRS - High Speed TCP congestion control (Floyd) */
1053 	case SCTP_CC_HSTCP:
1054 		{
1055 			stcb->asoc.congestion_control_module = SCTP_CC_HSTCP;
1056 			stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param;
1057 			stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_hs_cwnd_update_after_sack;
1058 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_hs_cwnd_update_after_fr;
1059 			stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout;
1060 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo;
1061 			stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1062 			stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1063 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer;
1064 			break;
1065 		}
1066 		/* JRS - HTCP congestion control */
1067 	case SCTP_CC_HTCP:
1068 		{
1069 			stcb->asoc.congestion_control_module = SCTP_CC_HTCP;
1070 			stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_htcp_set_initial_cc_param;
1071 			stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_htcp_cwnd_update_after_sack;
1072 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_htcp_cwnd_update_after_fr;
1073 			stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_htcp_cwnd_update_after_timeout;
1074 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_htcp_cwnd_update_after_ecn_echo;
1075 			stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1076 			stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1077 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_htcp_cwnd_update_after_fr_timer;
1078 			break;
1079 		}
1080 		/* JRS - By default, use RFC2581 */
1081 	default:
1082 		{
1083 			stcb->asoc.congestion_control_module = SCTP_CC_RFC2581;
1084 			stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param;
1085 			stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_cwnd_update_after_sack;
1086 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_cwnd_update_after_fr;
1087 			stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout;
1088 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo;
1089 			stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1090 			stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1091 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer;
1092 			break;
1093 		}
1094 	}
1095 
1096 	/*
1097 	 * Now the stream parameters, here we allocate space for all streams
1098 	 * that we request by default.
1099 	 */
1100 	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
1101 	    m->sctp_ep.pre_open_stream_count;
1102 	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1103 	    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1104 	    SCTP_M_STRMO);
1105 	if (asoc->strmout == NULL) {
1106 		/* big trouble no memory */
1107 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1108 		return (ENOMEM);
1109 	}
1110 	for (i = 0; i < asoc->streamoutcnt; i++) {
1111 		/*
1112 		 * inbound side must be set to 0xffff, also NOTE when we get
1113 		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1114 		 * count (streamoutcnt) but first check if we sent to any of
1115 		 * the upper streams that were dropped (if some were). Those
1116 		 * that were dropped must be notified to the upper layer as
1117 		 * failed to send.
1118 		 */
1119 		asoc->strmout[i].next_sequence_sent = 0x0;
1120 		TAILQ_INIT(&asoc->strmout[i].outqueue);
1121 		asoc->strmout[i].stream_no = i;
1122 		asoc->strmout[i].last_msg_incomplete = 0;
1123 		asoc->strmout[i].next_spoke.tqe_next = 0;
1124 		asoc->strmout[i].next_spoke.tqe_prev = 0;
1125 	}
1126 	/* Now the mapping array */
1127 	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1128 	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1129 	    SCTP_M_MAP);
1130 	if (asoc->mapping_array == NULL) {
1131 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1132 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1133 		return (ENOMEM);
1134 	}
1135 	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1136 	SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size,
1137 	    SCTP_M_MAP);
1138 	if (asoc->nr_mapping_array == NULL) {
1139 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1140 		SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1141 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1142 		return (ENOMEM);
1143 	}
1144 	memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
1145 
1146 	/* Now the init of the other outqueues */
1147 	TAILQ_INIT(&asoc->free_chunks);
1148 	TAILQ_INIT(&asoc->out_wheel);
1149 	TAILQ_INIT(&asoc->control_send_queue);
1150 	TAILQ_INIT(&asoc->asconf_send_queue);
1151 	TAILQ_INIT(&asoc->send_queue);
1152 	TAILQ_INIT(&asoc->sent_queue);
1153 	TAILQ_INIT(&asoc->reasmqueue);
1154 	TAILQ_INIT(&asoc->resetHead);
1155 	asoc->max_inbound_streams = m->sctp_ep.max_open_streams_intome;
1156 	TAILQ_INIT(&asoc->asconf_queue);
1157 	/* authentication fields */
1158 	asoc->authinfo.random = NULL;
1159 	asoc->authinfo.active_keyid = 0;
1160 	asoc->authinfo.assoc_key = NULL;
1161 	asoc->authinfo.assoc_keyid = 0;
1162 	asoc->authinfo.recv_key = NULL;
1163 	asoc->authinfo.recv_keyid = 0;
1164 	LIST_INIT(&asoc->shared_keys);
1165 	asoc->marked_retrans = 0;
1166 	asoc->timoinit = 0;
1167 	asoc->timodata = 0;
1168 	asoc->timosack = 0;
1169 	asoc->timoshutdown = 0;
1170 	asoc->timoheartbeat = 0;
1171 	asoc->timocookie = 0;
1172 	asoc->timoshutdownack = 0;
1173 	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1174 	asoc->discontinuity_time = asoc->start_time;
1175 	/*
1176 	 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1177 	 * freed later when the association is freed.
1178 	 */
1179 	return (0);
1180 }
1181 
1182 void
1183 sctp_print_mapping_array(struct sctp_association *asoc)
1184 {
1185 	unsigned int i, limit;
1186 
1187 	printf("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n",
1188 	    asoc->mapping_array_size,
1189 	    asoc->mapping_array_base_tsn,
1190 	    asoc->cumulative_tsn,
1191 	    asoc->highest_tsn_inside_map,
1192 	    asoc->highest_tsn_inside_nr_map);
1193 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1194 		if (asoc->mapping_array[limit - 1]) {
1195 			break;
1196 		}
1197 	}
1198 	printf("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1199 	for (i = 0; i < limit; i++) {
1200 		printf("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1201 		if (((i + 1) % 16) == 0)
1202 			printf("\n");
1203 	}
1204 	if (limit % 16)
1205 		printf("\n");
1206 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1207 		if (asoc->nr_mapping_array[limit - 1]) {
1208 			break;
1209 		}
1210 	}
1211 	printf("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1212 	for (i = 0; i < limit; i++) {
1213 		printf("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1214 	}
1215 	if (limit % 16)
1216 		printf("\n");
1217 }
1218 
1219 int
1220 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1221 {
1222 	/* mapping array needs to grow */
1223 	uint8_t *new_array1, *new_array2;
1224 	uint32_t new_size;
1225 
1226 	new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1227 	SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP);
1228 	SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP);
1229 	if ((new_array1 == NULL) || (new_array2 == NULL)) {
1230 		/* can't get more, forget it */
1231 		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size);
1232 		if (new_array1) {
1233 			SCTP_FREE(new_array1, SCTP_M_MAP);
1234 		}
1235 		if (new_array2) {
1236 			SCTP_FREE(new_array2, SCTP_M_MAP);
1237 		}
1238 		return (-1);
1239 	}
1240 	memset(new_array1, 0, new_size);
1241 	memset(new_array2, 0, new_size);
1242 	memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size);
1243 	memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size);
1244 	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1245 	SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1246 	asoc->mapping_array = new_array1;
1247 	asoc->nr_mapping_array = new_array2;
1248 	asoc->mapping_array_size = new_size;
1249 	return (0);
1250 }
1251 
1252 
1253 static void
1254 sctp_iterator_work(struct sctp_iterator *it)
1255 {
1256 	int iteration_count = 0;
1257 	int inp_skip = 0;
1258 
1259 	SCTP_ITERATOR_LOCK();
1260 	if (it->inp) {
1261 		SCTP_INP_DECR_REF(it->inp);
1262 	}
1263 	if (it->inp == NULL) {
1264 		/* iterator is complete */
1265 done_with_iterator:
1266 		SCTP_ITERATOR_UNLOCK();
1267 		if (it->function_atend != NULL) {
1268 			(*it->function_atend) (it->pointer, it->val);
1269 		}
1270 		SCTP_FREE(it, SCTP_M_ITER);
1271 		return;
1272 	}
1273 select_a_new_ep:
1274 	SCTP_INP_RLOCK(it->inp);
1275 	while (((it->pcb_flags) &&
1276 	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1277 	    ((it->pcb_features) &&
1278 	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1279 		/* endpoint flags or features don't match, so keep looking */
1280 		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1281 			SCTP_INP_RUNLOCK(it->inp);
1282 			goto done_with_iterator;
1283 		}
1284 		SCTP_INP_RUNLOCK(it->inp);
1285 		it->inp = LIST_NEXT(it->inp, sctp_list);
1286 		if (it->inp == NULL) {
1287 			goto done_with_iterator;
1288 		}
1289 		SCTP_INP_RLOCK(it->inp);
1290 	}
1291 	/* now go through each assoc which is in the desired state */
1292 	if (it->done_current_ep == 0) {
1293 		if (it->function_inp != NULL)
1294 			inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1295 		it->done_current_ep = 1;
1296 	}
1297 	if (it->stcb == NULL) {
1298 		/* run the per instance function */
1299 		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1300 	}
1301 	if ((inp_skip) || it->stcb == NULL) {
1302 		if (it->function_inp_end != NULL) {
1303 			inp_skip = (*it->function_inp_end) (it->inp,
1304 			    it->pointer,
1305 			    it->val);
1306 		}
1307 		SCTP_INP_RUNLOCK(it->inp);
1308 		goto no_stcb;
1309 	}
1310 	while (it->stcb) {
1311 		SCTP_TCB_LOCK(it->stcb);
1312 		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1313 			/* not in the right state... keep looking */
1314 			SCTP_TCB_UNLOCK(it->stcb);
1315 			goto next_assoc;
1316 		}
1317 		/* see if we have limited out the iterator loop */
1318 		iteration_count++;
1319 		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1320 			/* Pause to let others grab the lock */
1321 			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1322 			SCTP_TCB_UNLOCK(it->stcb);
1323 			SCTP_INP_INCR_REF(it->inp);
1324 			SCTP_INP_RUNLOCK(it->inp);
1325 			SCTP_ITERATOR_UNLOCK();
1326 			SCTP_ITERATOR_LOCK();
1327 			if (sctp_it_ctl.iterator_flags) {
1328 				/* We won't be staying here */
1329 				SCTP_INP_DECR_REF(it->inp);
1330 				atomic_add_int(&it->stcb->asoc.refcnt, -1);
1331 				if (sctp_it_ctl.iterator_flags &
1332 				    SCTP_ITERATOR_MUST_EXIT) {
1333 					goto done_with_iterator;
1334 				}
1335 				if (sctp_it_ctl.iterator_flags &
1336 				    SCTP_ITERATOR_STOP_CUR_IT) {
1337 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT;
1338 					goto done_with_iterator;
1339 				}
1340 				if (sctp_it_ctl.iterator_flags &
1341 				    SCTP_ITERATOR_STOP_CUR_INP) {
1342 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP;
1343 					goto no_stcb;
1344 				}
1345 				/* If we reach here huh? */
1346 				printf("Unknown it ctl flag %x\n",
1347 				    sctp_it_ctl.iterator_flags);
1348 				sctp_it_ctl.iterator_flags = 0;
1349 			}
1350 			SCTP_INP_RLOCK(it->inp);
1351 			SCTP_INP_DECR_REF(it->inp);
1352 			SCTP_TCB_LOCK(it->stcb);
1353 			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1354 			iteration_count = 0;
1355 		}
1356 		/* run function on this one */
1357 		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1358 
1359 		/*
1360 		 * we lie here, it really needs to have its own type but
1361 		 * first I must verify that this won't effect things :-0
1362 		 */
1363 		if (it->no_chunk_output == 0)
1364 			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1365 
1366 		SCTP_TCB_UNLOCK(it->stcb);
1367 next_assoc:
1368 		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1369 		if (it->stcb == NULL) {
1370 			/* Run last function */
1371 			if (it->function_inp_end != NULL) {
1372 				inp_skip = (*it->function_inp_end) (it->inp,
1373 				    it->pointer,
1374 				    it->val);
1375 			}
1376 		}
1377 	}
1378 	SCTP_INP_RUNLOCK(it->inp);
1379 no_stcb:
1380 	/* done with all assocs on this endpoint, move on to next endpoint */
1381 	it->done_current_ep = 0;
1382 	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1383 		it->inp = NULL;
1384 	} else {
1385 		SCTP_INP_INFO_RLOCK();
1386 		it->inp = LIST_NEXT(it->inp, sctp_list);
1387 		SCTP_INP_INFO_RUNLOCK();
1388 	}
1389 	if (it->inp == NULL) {
1390 		goto done_with_iterator;
1391 	}
1392 	goto select_a_new_ep;
1393 }
1394 
1395 void
1396 sctp_iterator_worker(void)
1397 {
1398 	struct sctp_iterator *it = NULL;
1399 
1400 	/* This function is called with the WQ lock in place */
1401 
1402 	sctp_it_ctl.iterator_running = 1;
1403 	sctp_it_ctl.cur_it = it = TAILQ_FIRST(&sctp_it_ctl.iteratorhead);
1404 	while (it) {
1405 		/* now lets work on this one */
1406 		TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
1407 		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1408 		CURVNET_SET(it->vn);
1409 		sctp_iterator_work(it);
1410 
1411 		CURVNET_RESTORE();
1412 		SCTP_IPI_ITERATOR_WQ_LOCK();
1413 		if (sctp_it_ctl.iterator_flags & SCTP_ITERATOR_MUST_EXIT) {
1414 			sctp_it_ctl.cur_it = NULL;
1415 			break;
1416 		}
1417 		/* sa_ignore FREED_MEMORY */
1418 		sctp_it_ctl.cur_it = it = TAILQ_FIRST(&sctp_it_ctl.iteratorhead);
1419 	}
1420 	sctp_it_ctl.iterator_running = 0;
1421 	return;
1422 }
1423 
1424 
1425 static void
1426 sctp_handle_addr_wq(void)
1427 {
1428 	/* deal with the ADDR wq from the rtsock calls */
1429 	struct sctp_laddr *wi;
1430 	struct sctp_asconf_iterator *asc;
1431 
1432 	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1433 	    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1434 	if (asc == NULL) {
1435 		/* Try later, no memory */
1436 		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1437 		    (struct sctp_inpcb *)NULL,
1438 		    (struct sctp_tcb *)NULL,
1439 		    (struct sctp_nets *)NULL);
1440 		return;
1441 	}
1442 	LIST_INIT(&asc->list_of_work);
1443 	asc->cnt = 0;
1444 
1445 	SCTP_WQ_ADDR_LOCK();
1446 	wi = LIST_FIRST(&SCTP_BASE_INFO(addr_wq));
1447 	while (wi != NULL) {
1448 		LIST_REMOVE(wi, sctp_nxt_addr);
1449 		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1450 		asc->cnt++;
1451 		wi = LIST_FIRST(&SCTP_BASE_INFO(addr_wq));
1452 	}
1453 	SCTP_WQ_ADDR_UNLOCK();
1454 
1455 	if (asc->cnt == 0) {
1456 		SCTP_FREE(asc, SCTP_M_ASC_IT);
1457 	} else {
1458 		(void)sctp_initiate_iterator(sctp_asconf_iterator_ep,
1459 		    sctp_asconf_iterator_stcb,
1460 		    NULL,	/* No ep end for boundall */
1461 		    SCTP_PCB_FLAGS_BOUNDALL,
1462 		    SCTP_PCB_ANY_FEATURES,
1463 		    SCTP_ASOC_ANY_STATE,
1464 		    (void *)asc, 0,
1465 		    sctp_asconf_iterator_end, NULL, 0);
1466 	}
1467 }
1468 
1469 int retcode = 0;
1470 int cur_oerr = 0;
1471 
1472 void
1473 sctp_timeout_handler(void *t)
1474 {
1475 	struct sctp_inpcb *inp;
1476 	struct sctp_tcb *stcb;
1477 	struct sctp_nets *net;
1478 	struct sctp_timer *tmr;
1479 
1480 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1481 	struct socket *so;
1482 
1483 #endif
1484 	int did_output, type;
1485 
1486 	tmr = (struct sctp_timer *)t;
1487 	inp = (struct sctp_inpcb *)tmr->ep;
1488 	stcb = (struct sctp_tcb *)tmr->tcb;
1489 	net = (struct sctp_nets *)tmr->net;
1490 	CURVNET_SET((struct vnet *)tmr->vnet);
1491 	did_output = 1;
1492 
1493 #ifdef SCTP_AUDITING_ENABLED
1494 	sctp_audit_log(0xF0, (uint8_t) tmr->type);
1495 	sctp_auditing(3, inp, stcb, net);
1496 #endif
1497 
1498 	/* sanity checks... */
1499 	if (tmr->self != (void *)tmr) {
1500 		/*
1501 		 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1502 		 * tmr);
1503 		 */
1504 		CURVNET_RESTORE();
1505 		return;
1506 	}
1507 	tmr->stopped_from = 0xa001;
1508 	if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1509 		/*
1510 		 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1511 		 * tmr->type);
1512 		 */
1513 		CURVNET_RESTORE();
1514 		return;
1515 	}
1516 	tmr->stopped_from = 0xa002;
1517 	if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1518 		CURVNET_RESTORE();
1519 		return;
1520 	}
1521 	/* if this is an iterator timeout, get the struct and clear inp */
1522 	tmr->stopped_from = 0xa003;
1523 	type = tmr->type;
1524 	if (inp) {
1525 		SCTP_INP_INCR_REF(inp);
1526 		if ((inp->sctp_socket == 0) &&
1527 		    ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1528 		    (tmr->type != SCTP_TIMER_TYPE_INIT) &&
1529 		    (tmr->type != SCTP_TIMER_TYPE_SEND) &&
1530 		    (tmr->type != SCTP_TIMER_TYPE_RECV) &&
1531 		    (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) &&
1532 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1533 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1534 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1535 		    (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))
1536 		    ) {
1537 			SCTP_INP_DECR_REF(inp);
1538 			CURVNET_RESTORE();
1539 			return;
1540 		}
1541 	}
1542 	tmr->stopped_from = 0xa004;
1543 	if (stcb) {
1544 		atomic_add_int(&stcb->asoc.refcnt, 1);
1545 		if (stcb->asoc.state == 0) {
1546 			atomic_add_int(&stcb->asoc.refcnt, -1);
1547 			if (inp) {
1548 				SCTP_INP_DECR_REF(inp);
1549 			}
1550 			CURVNET_RESTORE();
1551 			return;
1552 		}
1553 	}
1554 	tmr->stopped_from = 0xa005;
1555 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", tmr->type);
1556 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1557 		if (inp) {
1558 			SCTP_INP_DECR_REF(inp);
1559 		}
1560 		if (stcb) {
1561 			atomic_add_int(&stcb->asoc.refcnt, -1);
1562 		}
1563 		CURVNET_RESTORE();
1564 		return;
1565 	}
1566 	tmr->stopped_from = 0xa006;
1567 
1568 	if (stcb) {
1569 		SCTP_TCB_LOCK(stcb);
1570 		atomic_add_int(&stcb->asoc.refcnt, -1);
1571 		if ((tmr->type != SCTP_TIMER_TYPE_ASOCKILL) &&
1572 		    ((stcb->asoc.state == 0) ||
1573 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1574 			SCTP_TCB_UNLOCK(stcb);
1575 			if (inp) {
1576 				SCTP_INP_DECR_REF(inp);
1577 			}
1578 			CURVNET_RESTORE();
1579 			return;
1580 		}
1581 	}
1582 	/* record in stopped what t-o occured */
1583 	tmr->stopped_from = tmr->type;
1584 
1585 	/* mark as being serviced now */
1586 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1587 		/*
1588 		 * Callout has been rescheduled.
1589 		 */
1590 		goto get_out;
1591 	}
1592 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1593 		/*
1594 		 * Not active, so no action.
1595 		 */
1596 		goto get_out;
1597 	}
1598 	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1599 
1600 	/* call the handler for the appropriate timer type */
1601 	switch (tmr->type) {
1602 	case SCTP_TIMER_TYPE_ZERO_COPY:
1603 		if (inp == NULL) {
1604 			break;
1605 		}
1606 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1607 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
1608 		}
1609 		break;
1610 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1611 		if (inp == NULL) {
1612 			break;
1613 		}
1614 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1615 			SCTP_ZERO_COPY_SENDQ_EVENT(inp, inp->sctp_socket);
1616 		}
1617 		break;
1618 	case SCTP_TIMER_TYPE_ADDR_WQ:
1619 		sctp_handle_addr_wq();
1620 		break;
1621 	case SCTP_TIMER_TYPE_SEND:
1622 		if ((stcb == NULL) || (inp == NULL)) {
1623 			break;
1624 		}
1625 		SCTP_STAT_INCR(sctps_timodata);
1626 		stcb->asoc.timodata++;
1627 		stcb->asoc.num_send_timers_up--;
1628 		if (stcb->asoc.num_send_timers_up < 0) {
1629 			stcb->asoc.num_send_timers_up = 0;
1630 		}
1631 		SCTP_TCB_LOCK_ASSERT(stcb);
1632 		cur_oerr = stcb->asoc.overall_error_count;
1633 		retcode = sctp_t3rxt_timer(inp, stcb, net);
1634 		if (retcode) {
1635 			/* no need to unlock on tcb its gone */
1636 
1637 			goto out_decr;
1638 		}
1639 		SCTP_TCB_LOCK_ASSERT(stcb);
1640 #ifdef SCTP_AUDITING_ENABLED
1641 		sctp_auditing(4, inp, stcb, net);
1642 #endif
1643 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1644 		if ((stcb->asoc.num_send_timers_up == 0) &&
1645 		    (stcb->asoc.sent_queue_cnt > 0)
1646 		    ) {
1647 			struct sctp_tmit_chunk *chk;
1648 
1649 			/*
1650 			 * safeguard. If there on some on the sent queue
1651 			 * somewhere but no timers running something is
1652 			 * wrong... so we start a timer on the first chunk
1653 			 * on the send queue on whatever net it is sent to.
1654 			 */
1655 			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1656 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1657 			    chk->whoTo);
1658 		}
1659 		break;
1660 	case SCTP_TIMER_TYPE_INIT:
1661 		if ((stcb == NULL) || (inp == NULL)) {
1662 			break;
1663 		}
1664 		SCTP_STAT_INCR(sctps_timoinit);
1665 		stcb->asoc.timoinit++;
1666 		if (sctp_t1init_timer(inp, stcb, net)) {
1667 			/* no need to unlock on tcb its gone */
1668 			goto out_decr;
1669 		}
1670 		/* We do output but not here */
1671 		did_output = 0;
1672 		break;
1673 	case SCTP_TIMER_TYPE_RECV:
1674 		if ((stcb == NULL) || (inp == NULL)) {
1675 			break;
1676 		} {
1677 			SCTP_STAT_INCR(sctps_timosack);
1678 			stcb->asoc.timosack++;
1679 			sctp_send_sack(stcb);
1680 		}
1681 #ifdef SCTP_AUDITING_ENABLED
1682 		sctp_auditing(4, inp, stcb, net);
1683 #endif
1684 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1685 		break;
1686 	case SCTP_TIMER_TYPE_SHUTDOWN:
1687 		if ((stcb == NULL) || (inp == NULL)) {
1688 			break;
1689 		}
1690 		if (sctp_shutdown_timer(inp, stcb, net)) {
1691 			/* no need to unlock on tcb its gone */
1692 			goto out_decr;
1693 		}
1694 		SCTP_STAT_INCR(sctps_timoshutdown);
1695 		stcb->asoc.timoshutdown++;
1696 #ifdef SCTP_AUDITING_ENABLED
1697 		sctp_auditing(4, inp, stcb, net);
1698 #endif
1699 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1700 		break;
1701 	case SCTP_TIMER_TYPE_HEARTBEAT:
1702 		{
1703 			struct sctp_nets *lnet;
1704 			int cnt_of_unconf = 0;
1705 
1706 			if ((stcb == NULL) || (inp == NULL)) {
1707 				break;
1708 			}
1709 			SCTP_STAT_INCR(sctps_timoheartbeat);
1710 			stcb->asoc.timoheartbeat++;
1711 			TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
1712 				if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1713 				    (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
1714 					cnt_of_unconf++;
1715 				}
1716 			}
1717 			if (cnt_of_unconf == 0) {
1718 				if (sctp_heartbeat_timer(inp, stcb, lnet,
1719 				    cnt_of_unconf)) {
1720 					/* no need to unlock on tcb its gone */
1721 					goto out_decr;
1722 				}
1723 			}
1724 #ifdef SCTP_AUDITING_ENABLED
1725 			sctp_auditing(4, inp, stcb, lnet);
1726 #endif
1727 			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT,
1728 			    stcb->sctp_ep, stcb, lnet);
1729 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1730 		}
1731 		break;
1732 	case SCTP_TIMER_TYPE_COOKIE:
1733 		if ((stcb == NULL) || (inp == NULL)) {
1734 			break;
1735 		}
1736 		if (sctp_cookie_timer(inp, stcb, net)) {
1737 			/* no need to unlock on tcb its gone */
1738 			goto out_decr;
1739 		}
1740 		SCTP_STAT_INCR(sctps_timocookie);
1741 		stcb->asoc.timocookie++;
1742 #ifdef SCTP_AUDITING_ENABLED
1743 		sctp_auditing(4, inp, stcb, net);
1744 #endif
1745 		/*
1746 		 * We consider T3 and Cookie timer pretty much the same with
1747 		 * respect to where from in chunk_output.
1748 		 */
1749 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1750 		break;
1751 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1752 		{
1753 			struct timeval tv;
1754 			int i, secret;
1755 
1756 			if (inp == NULL) {
1757 				break;
1758 			}
1759 			SCTP_STAT_INCR(sctps_timosecret);
1760 			(void)SCTP_GETTIME_TIMEVAL(&tv);
1761 			SCTP_INP_WLOCK(inp);
1762 			inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1763 			inp->sctp_ep.last_secret_number =
1764 			    inp->sctp_ep.current_secret_number;
1765 			inp->sctp_ep.current_secret_number++;
1766 			if (inp->sctp_ep.current_secret_number >=
1767 			    SCTP_HOW_MANY_SECRETS) {
1768 				inp->sctp_ep.current_secret_number = 0;
1769 			}
1770 			secret = (int)inp->sctp_ep.current_secret_number;
1771 			for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1772 				inp->sctp_ep.secret_key[secret][i] =
1773 				    sctp_select_initial_TSN(&inp->sctp_ep);
1774 			}
1775 			SCTP_INP_WUNLOCK(inp);
1776 			sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1777 		}
1778 		did_output = 0;
1779 		break;
1780 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1781 		if ((stcb == NULL) || (inp == NULL)) {
1782 			break;
1783 		}
1784 		SCTP_STAT_INCR(sctps_timopathmtu);
1785 		sctp_pathmtu_timer(inp, stcb, net);
1786 		did_output = 0;
1787 		break;
1788 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1789 		if ((stcb == NULL) || (inp == NULL)) {
1790 			break;
1791 		}
1792 		if (sctp_shutdownack_timer(inp, stcb, net)) {
1793 			/* no need to unlock on tcb its gone */
1794 			goto out_decr;
1795 		}
1796 		SCTP_STAT_INCR(sctps_timoshutdownack);
1797 		stcb->asoc.timoshutdownack++;
1798 #ifdef SCTP_AUDITING_ENABLED
1799 		sctp_auditing(4, inp, stcb, net);
1800 #endif
1801 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1802 		break;
1803 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1804 		if ((stcb == NULL) || (inp == NULL)) {
1805 			break;
1806 		}
1807 		SCTP_STAT_INCR(sctps_timoshutdownguard);
1808 		sctp_abort_an_association(inp, stcb,
1809 		    SCTP_SHUTDOWN_GUARD_EXPIRES, NULL, SCTP_SO_NOT_LOCKED);
1810 		/* no need to unlock on tcb its gone */
1811 		goto out_decr;
1812 
1813 	case SCTP_TIMER_TYPE_STRRESET:
1814 		if ((stcb == NULL) || (inp == NULL)) {
1815 			break;
1816 		}
1817 		if (sctp_strreset_timer(inp, stcb, net)) {
1818 			/* no need to unlock on tcb its gone */
1819 			goto out_decr;
1820 		}
1821 		SCTP_STAT_INCR(sctps_timostrmrst);
1822 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1823 		break;
1824 	case SCTP_TIMER_TYPE_EARLYFR:
1825 		/* Need to do FR of things for net */
1826 		if ((stcb == NULL) || (inp == NULL)) {
1827 			break;
1828 		}
1829 		SCTP_STAT_INCR(sctps_timoearlyfr);
1830 		sctp_early_fr_timer(inp, stcb, net);
1831 		break;
1832 	case SCTP_TIMER_TYPE_ASCONF:
1833 		if ((stcb == NULL) || (inp == NULL)) {
1834 			break;
1835 		}
1836 		if (sctp_asconf_timer(inp, stcb, net)) {
1837 			/* no need to unlock on tcb its gone */
1838 			goto out_decr;
1839 		}
1840 		SCTP_STAT_INCR(sctps_timoasconf);
1841 #ifdef SCTP_AUDITING_ENABLED
1842 		sctp_auditing(4, inp, stcb, net);
1843 #endif
1844 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1845 		break;
1846 	case SCTP_TIMER_TYPE_PRIM_DELETED:
1847 		if ((stcb == NULL) || (inp == NULL)) {
1848 			break;
1849 		}
1850 		sctp_delete_prim_timer(inp, stcb, net);
1851 		SCTP_STAT_INCR(sctps_timodelprim);
1852 		break;
1853 
1854 	case SCTP_TIMER_TYPE_AUTOCLOSE:
1855 		if ((stcb == NULL) || (inp == NULL)) {
1856 			break;
1857 		}
1858 		SCTP_STAT_INCR(sctps_timoautoclose);
1859 		sctp_autoclose_timer(inp, stcb, net);
1860 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1861 		did_output = 0;
1862 		break;
1863 	case SCTP_TIMER_TYPE_ASOCKILL:
1864 		if ((stcb == NULL) || (inp == NULL)) {
1865 			break;
1866 		}
1867 		SCTP_STAT_INCR(sctps_timoassockill);
1868 		/* Can we free it yet? */
1869 		SCTP_INP_DECR_REF(inp);
1870 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1871 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1872 		so = SCTP_INP_SO(inp);
1873 		atomic_add_int(&stcb->asoc.refcnt, 1);
1874 		SCTP_TCB_UNLOCK(stcb);
1875 		SCTP_SOCKET_LOCK(so, 1);
1876 		SCTP_TCB_LOCK(stcb);
1877 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1878 #endif
1879 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1880 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1881 		SCTP_SOCKET_UNLOCK(so, 1);
1882 #endif
1883 		/*
1884 		 * free asoc, always unlocks (or destroy's) so prevent
1885 		 * duplicate unlock or unlock of a free mtx :-0
1886 		 */
1887 		stcb = NULL;
1888 		goto out_no_decr;
1889 	case SCTP_TIMER_TYPE_INPKILL:
1890 		SCTP_STAT_INCR(sctps_timoinpkill);
1891 		if (inp == NULL) {
1892 			break;
1893 		}
1894 		/*
1895 		 * special case, take away our increment since WE are the
1896 		 * killer
1897 		 */
1898 		SCTP_INP_DECR_REF(inp);
1899 		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1900 		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1901 		    SCTP_CALLED_FROM_INPKILL_TIMER);
1902 		inp = NULL;
1903 		goto out_no_decr;
1904 	default:
1905 		SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
1906 		    tmr->type);
1907 		break;
1908 	};
1909 #ifdef SCTP_AUDITING_ENABLED
1910 	sctp_audit_log(0xF1, (uint8_t) tmr->type);
1911 	if (inp)
1912 		sctp_auditing(5, inp, stcb, net);
1913 #endif
1914 	if ((did_output) && stcb) {
1915 		/*
1916 		 * Now we need to clean up the control chunk chain if an
1917 		 * ECNE is on it. It must be marked as UNSENT again so next
1918 		 * call will continue to send it until such time that we get
1919 		 * a CWR, to remove it. It is, however, less likely that we
1920 		 * will find a ecn echo on the chain though.
1921 		 */
1922 		sctp_fix_ecn_echo(&stcb->asoc);
1923 	}
1924 get_out:
1925 	if (stcb) {
1926 		SCTP_TCB_UNLOCK(stcb);
1927 	}
1928 out_decr:
1929 	if (inp) {
1930 		SCTP_INP_DECR_REF(inp);
1931 	}
1932 out_no_decr:
1933 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type %d)\n",
1934 	    type);
1935 	CURVNET_RESTORE();
1936 }
1937 
1938 void
1939 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1940     struct sctp_nets *net)
1941 {
1942 	int to_ticks;
1943 	struct sctp_timer *tmr;
1944 
1945 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
1946 		return;
1947 
1948 	to_ticks = 0;
1949 
1950 	tmr = NULL;
1951 	if (stcb) {
1952 		SCTP_TCB_LOCK_ASSERT(stcb);
1953 	}
1954 	switch (t_type) {
1955 	case SCTP_TIMER_TYPE_ZERO_COPY:
1956 		tmr = &inp->sctp_ep.zero_copy_timer;
1957 		to_ticks = SCTP_ZERO_COPY_TICK_DELAY;
1958 		break;
1959 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1960 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
1961 		to_ticks = SCTP_ZERO_COPY_SENDQ_TICK_DELAY;
1962 		break;
1963 	case SCTP_TIMER_TYPE_ADDR_WQ:
1964 		/* Only 1 tick away :-) */
1965 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
1966 		to_ticks = SCTP_ADDRESS_TICK_DELAY;
1967 		break;
1968 	case SCTP_TIMER_TYPE_SEND:
1969 		/* Here we use the RTO timer */
1970 		{
1971 			int rto_val;
1972 
1973 			if ((stcb == NULL) || (net == NULL)) {
1974 				return;
1975 			}
1976 			tmr = &net->rxt_timer;
1977 			if (net->RTO == 0) {
1978 				rto_val = stcb->asoc.initial_rto;
1979 			} else {
1980 				rto_val = net->RTO;
1981 			}
1982 			to_ticks = MSEC_TO_TICKS(rto_val);
1983 		}
1984 		break;
1985 	case SCTP_TIMER_TYPE_INIT:
1986 		/*
1987 		 * Here we use the INIT timer default usually about 1
1988 		 * minute.
1989 		 */
1990 		if ((stcb == NULL) || (net == NULL)) {
1991 			return;
1992 		}
1993 		tmr = &net->rxt_timer;
1994 		if (net->RTO == 0) {
1995 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1996 		} else {
1997 			to_ticks = MSEC_TO_TICKS(net->RTO);
1998 		}
1999 		break;
2000 	case SCTP_TIMER_TYPE_RECV:
2001 		/*
2002 		 * Here we use the Delayed-Ack timer value from the inp
2003 		 * ususually about 200ms.
2004 		 */
2005 		if (stcb == NULL) {
2006 			return;
2007 		}
2008 		tmr = &stcb->asoc.dack_timer;
2009 		to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
2010 		break;
2011 	case SCTP_TIMER_TYPE_SHUTDOWN:
2012 		/* Here we use the RTO of the destination. */
2013 		if ((stcb == NULL) || (net == NULL)) {
2014 			return;
2015 		}
2016 		if (net->RTO == 0) {
2017 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2018 		} else {
2019 			to_ticks = MSEC_TO_TICKS(net->RTO);
2020 		}
2021 		tmr = &net->rxt_timer;
2022 		break;
2023 	case SCTP_TIMER_TYPE_HEARTBEAT:
2024 		/*
2025 		 * the net is used here so that we can add in the RTO. Even
2026 		 * though we use a different timer. We also add the HB timer
2027 		 * PLUS a random jitter.
2028 		 */
2029 		if ((inp == NULL) || (stcb == NULL)) {
2030 			return;
2031 		} else {
2032 			uint32_t rndval;
2033 			uint8_t this_random;
2034 			int cnt_of_unconf = 0;
2035 			struct sctp_nets *lnet;
2036 
2037 			TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
2038 				if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2039 				    (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
2040 					cnt_of_unconf++;
2041 				}
2042 			}
2043 			if (cnt_of_unconf) {
2044 				net = lnet = NULL;
2045 				(void)sctp_heartbeat_timer(inp, stcb, lnet, cnt_of_unconf);
2046 			}
2047 			if (stcb->asoc.hb_random_idx > 3) {
2048 				rndval = sctp_select_initial_TSN(&inp->sctp_ep);
2049 				memcpy(stcb->asoc.hb_random_values, &rndval,
2050 				    sizeof(stcb->asoc.hb_random_values));
2051 				stcb->asoc.hb_random_idx = 0;
2052 			}
2053 			this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx];
2054 			stcb->asoc.hb_random_idx++;
2055 			stcb->asoc.hb_ect_randombit = 0;
2056 			/*
2057 			 * this_random will be 0 - 256 ms RTO is in ms.
2058 			 */
2059 			if ((stcb->asoc.hb_is_disabled) &&
2060 			    (cnt_of_unconf == 0)) {
2061 				return;
2062 			}
2063 			if (net) {
2064 				int delay;
2065 
2066 				delay = stcb->asoc.heart_beat_delay;
2067 				TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
2068 					if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2069 					    ((lnet->dest_state & SCTP_ADDR_OUT_OF_SCOPE) == 0) &&
2070 					    (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
2071 						delay = 0;
2072 					}
2073 				}
2074 				if (net->RTO == 0) {
2075 					/* Never been checked */
2076 					to_ticks = this_random + stcb->asoc.initial_rto + delay;
2077 				} else {
2078 					/* set rto_val to the ms */
2079 					to_ticks = delay + net->RTO + this_random;
2080 				}
2081 			} else {
2082 				if (cnt_of_unconf) {
2083 					to_ticks = this_random + stcb->asoc.initial_rto;
2084 				} else {
2085 					to_ticks = stcb->asoc.heart_beat_delay + this_random + stcb->asoc.initial_rto;
2086 				}
2087 			}
2088 			/*
2089 			 * Now we must convert the to_ticks that are now in
2090 			 * ms to ticks.
2091 			 */
2092 			to_ticks = MSEC_TO_TICKS(to_ticks);
2093 			tmr = &stcb->asoc.hb_timer;
2094 		}
2095 		break;
2096 	case SCTP_TIMER_TYPE_COOKIE:
2097 		/*
2098 		 * Here we can use the RTO timer from the network since one
2099 		 * RTT was compelete. If a retran happened then we will be
2100 		 * using the RTO initial value.
2101 		 */
2102 		if ((stcb == NULL) || (net == NULL)) {
2103 			return;
2104 		}
2105 		if (net->RTO == 0) {
2106 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2107 		} else {
2108 			to_ticks = MSEC_TO_TICKS(net->RTO);
2109 		}
2110 		tmr = &net->rxt_timer;
2111 		break;
2112 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2113 		/*
2114 		 * nothing needed but the endpoint here ususually about 60
2115 		 * minutes.
2116 		 */
2117 		if (inp == NULL) {
2118 			return;
2119 		}
2120 		tmr = &inp->sctp_ep.signature_change;
2121 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
2122 		break;
2123 	case SCTP_TIMER_TYPE_ASOCKILL:
2124 		if (stcb == NULL) {
2125 			return;
2126 		}
2127 		tmr = &stcb->asoc.strreset_timer;
2128 		to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
2129 		break;
2130 	case SCTP_TIMER_TYPE_INPKILL:
2131 		/*
2132 		 * The inp is setup to die. We re-use the signature_chage
2133 		 * timer since that has stopped and we are in the GONE
2134 		 * state.
2135 		 */
2136 		if (inp == NULL) {
2137 			return;
2138 		}
2139 		tmr = &inp->sctp_ep.signature_change;
2140 		to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2141 		break;
2142 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2143 		/*
2144 		 * Here we use the value found in the EP for PMTU ususually
2145 		 * about 10 minutes.
2146 		 */
2147 		if ((stcb == NULL) || (inp == NULL)) {
2148 			return;
2149 		}
2150 		if (net == NULL) {
2151 			return;
2152 		}
2153 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2154 		tmr = &net->pmtu_timer;
2155 		break;
2156 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2157 		/* Here we use the RTO of the destination */
2158 		if ((stcb == NULL) || (net == NULL)) {
2159 			return;
2160 		}
2161 		if (net->RTO == 0) {
2162 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2163 		} else {
2164 			to_ticks = MSEC_TO_TICKS(net->RTO);
2165 		}
2166 		tmr = &net->rxt_timer;
2167 		break;
2168 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2169 		/*
2170 		 * Here we use the endpoints shutdown guard timer usually
2171 		 * about 3 minutes.
2172 		 */
2173 		if ((inp == NULL) || (stcb == NULL)) {
2174 			return;
2175 		}
2176 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2177 		tmr = &stcb->asoc.shut_guard_timer;
2178 		break;
2179 	case SCTP_TIMER_TYPE_STRRESET:
2180 		/*
2181 		 * Here the timer comes from the stcb but its value is from
2182 		 * the net's RTO.
2183 		 */
2184 		if ((stcb == NULL) || (net == NULL)) {
2185 			return;
2186 		}
2187 		if (net->RTO == 0) {
2188 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2189 		} else {
2190 			to_ticks = MSEC_TO_TICKS(net->RTO);
2191 		}
2192 		tmr = &stcb->asoc.strreset_timer;
2193 		break;
2194 
2195 	case SCTP_TIMER_TYPE_EARLYFR:
2196 		{
2197 			unsigned int msec;
2198 
2199 			if ((stcb == NULL) || (net == NULL)) {
2200 				return;
2201 			}
2202 			if (net->flight_size > net->cwnd) {
2203 				/* no need to start */
2204 				return;
2205 			}
2206 			SCTP_STAT_INCR(sctps_earlyfrstart);
2207 			if (net->lastsa == 0) {
2208 				/* Hmm no rtt estimate yet? */
2209 				msec = stcb->asoc.initial_rto >> 2;
2210 			} else {
2211 				msec = ((net->lastsa >> 2) + net->lastsv) >> 1;
2212 			}
2213 			if (msec < SCTP_BASE_SYSCTL(sctp_early_fr_msec)) {
2214 				msec = SCTP_BASE_SYSCTL(sctp_early_fr_msec);
2215 				if (msec < SCTP_MINFR_MSEC_FLOOR) {
2216 					msec = SCTP_MINFR_MSEC_FLOOR;
2217 				}
2218 			}
2219 			to_ticks = MSEC_TO_TICKS(msec);
2220 			tmr = &net->fr_timer;
2221 		}
2222 		break;
2223 	case SCTP_TIMER_TYPE_ASCONF:
2224 		/*
2225 		 * Here the timer comes from the stcb but its value is from
2226 		 * the net's RTO.
2227 		 */
2228 		if ((stcb == NULL) || (net == NULL)) {
2229 			return;
2230 		}
2231 		if (net->RTO == 0) {
2232 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2233 		} else {
2234 			to_ticks = MSEC_TO_TICKS(net->RTO);
2235 		}
2236 		tmr = &stcb->asoc.asconf_timer;
2237 		break;
2238 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2239 		if ((stcb == NULL) || (net != NULL)) {
2240 			return;
2241 		}
2242 		to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2243 		tmr = &stcb->asoc.delete_prim_timer;
2244 		break;
2245 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2246 		if (stcb == NULL) {
2247 			return;
2248 		}
2249 		if (stcb->asoc.sctp_autoclose_ticks == 0) {
2250 			/*
2251 			 * Really an error since stcb is NOT set to
2252 			 * autoclose
2253 			 */
2254 			return;
2255 		}
2256 		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2257 		tmr = &stcb->asoc.autoclose_timer;
2258 		break;
2259 	default:
2260 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2261 		    __FUNCTION__, t_type);
2262 		return;
2263 		break;
2264 	};
2265 	if ((to_ticks <= 0) || (tmr == NULL)) {
2266 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2267 		    __FUNCTION__, t_type, to_ticks, tmr);
2268 		return;
2269 	}
2270 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2271 		/*
2272 		 * we do NOT allow you to have it already running. if it is
2273 		 * we leave the current one up unchanged
2274 		 */
2275 		return;
2276 	}
2277 	/* At this point we can proceed */
2278 	if (t_type == SCTP_TIMER_TYPE_SEND) {
2279 		stcb->asoc.num_send_timers_up++;
2280 	}
2281 	tmr->stopped_from = 0;
2282 	tmr->type = t_type;
2283 	tmr->ep = (void *)inp;
2284 	tmr->tcb = (void *)stcb;
2285 	tmr->net = (void *)net;
2286 	tmr->self = (void *)tmr;
2287 	tmr->vnet = (void *)curvnet;
2288 	tmr->ticks = sctp_get_tick_count();
2289 	(void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2290 	return;
2291 }
2292 
2293 void
2294 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2295     struct sctp_nets *net, uint32_t from)
2296 {
2297 	struct sctp_timer *tmr;
2298 
2299 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2300 	    (inp == NULL))
2301 		return;
2302 
2303 	tmr = NULL;
2304 	if (stcb) {
2305 		SCTP_TCB_LOCK_ASSERT(stcb);
2306 	}
2307 	switch (t_type) {
2308 	case SCTP_TIMER_TYPE_ZERO_COPY:
2309 		tmr = &inp->sctp_ep.zero_copy_timer;
2310 		break;
2311 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
2312 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
2313 		break;
2314 	case SCTP_TIMER_TYPE_ADDR_WQ:
2315 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2316 		break;
2317 	case SCTP_TIMER_TYPE_EARLYFR:
2318 		if ((stcb == NULL) || (net == NULL)) {
2319 			return;
2320 		}
2321 		tmr = &net->fr_timer;
2322 		SCTP_STAT_INCR(sctps_earlyfrstop);
2323 		break;
2324 	case SCTP_TIMER_TYPE_SEND:
2325 		if ((stcb == NULL) || (net == NULL)) {
2326 			return;
2327 		}
2328 		tmr = &net->rxt_timer;
2329 		break;
2330 	case SCTP_TIMER_TYPE_INIT:
2331 		if ((stcb == NULL) || (net == NULL)) {
2332 			return;
2333 		}
2334 		tmr = &net->rxt_timer;
2335 		break;
2336 	case SCTP_TIMER_TYPE_RECV:
2337 		if (stcb == NULL) {
2338 			return;
2339 		}
2340 		tmr = &stcb->asoc.dack_timer;
2341 		break;
2342 	case SCTP_TIMER_TYPE_SHUTDOWN:
2343 		if ((stcb == NULL) || (net == NULL)) {
2344 			return;
2345 		}
2346 		tmr = &net->rxt_timer;
2347 		break;
2348 	case SCTP_TIMER_TYPE_HEARTBEAT:
2349 		if (stcb == NULL) {
2350 			return;
2351 		}
2352 		tmr = &stcb->asoc.hb_timer;
2353 		break;
2354 	case SCTP_TIMER_TYPE_COOKIE:
2355 		if ((stcb == NULL) || (net == NULL)) {
2356 			return;
2357 		}
2358 		tmr = &net->rxt_timer;
2359 		break;
2360 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2361 		/* nothing needed but the endpoint here */
2362 		tmr = &inp->sctp_ep.signature_change;
2363 		/*
2364 		 * We re-use the newcookie timer for the INP kill timer. We
2365 		 * must assure that we do not kill it by accident.
2366 		 */
2367 		break;
2368 	case SCTP_TIMER_TYPE_ASOCKILL:
2369 		/*
2370 		 * Stop the asoc kill timer.
2371 		 */
2372 		if (stcb == NULL) {
2373 			return;
2374 		}
2375 		tmr = &stcb->asoc.strreset_timer;
2376 		break;
2377 
2378 	case SCTP_TIMER_TYPE_INPKILL:
2379 		/*
2380 		 * The inp is setup to die. We re-use the signature_chage
2381 		 * timer since that has stopped and we are in the GONE
2382 		 * state.
2383 		 */
2384 		tmr = &inp->sctp_ep.signature_change;
2385 		break;
2386 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2387 		if ((stcb == NULL) || (net == NULL)) {
2388 			return;
2389 		}
2390 		tmr = &net->pmtu_timer;
2391 		break;
2392 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2393 		if ((stcb == NULL) || (net == NULL)) {
2394 			return;
2395 		}
2396 		tmr = &net->rxt_timer;
2397 		break;
2398 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2399 		if (stcb == NULL) {
2400 			return;
2401 		}
2402 		tmr = &stcb->asoc.shut_guard_timer;
2403 		break;
2404 	case SCTP_TIMER_TYPE_STRRESET:
2405 		if (stcb == NULL) {
2406 			return;
2407 		}
2408 		tmr = &stcb->asoc.strreset_timer;
2409 		break;
2410 	case SCTP_TIMER_TYPE_ASCONF:
2411 		if (stcb == NULL) {
2412 			return;
2413 		}
2414 		tmr = &stcb->asoc.asconf_timer;
2415 		break;
2416 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2417 		if (stcb == NULL) {
2418 			return;
2419 		}
2420 		tmr = &stcb->asoc.delete_prim_timer;
2421 		break;
2422 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2423 		if (stcb == NULL) {
2424 			return;
2425 		}
2426 		tmr = &stcb->asoc.autoclose_timer;
2427 		break;
2428 	default:
2429 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2430 		    __FUNCTION__, t_type);
2431 		break;
2432 	};
2433 	if (tmr == NULL) {
2434 		return;
2435 	}
2436 	if ((tmr->type != t_type) && tmr->type) {
2437 		/*
2438 		 * Ok we have a timer that is under joint use. Cookie timer
2439 		 * per chance with the SEND timer. We therefore are NOT
2440 		 * running the timer that the caller wants stopped.  So just
2441 		 * return.
2442 		 */
2443 		return;
2444 	}
2445 	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2446 		stcb->asoc.num_send_timers_up--;
2447 		if (stcb->asoc.num_send_timers_up < 0) {
2448 			stcb->asoc.num_send_timers_up = 0;
2449 		}
2450 	}
2451 	tmr->self = NULL;
2452 	tmr->stopped_from = from;
2453 	(void)SCTP_OS_TIMER_STOP(&tmr->timer);
2454 	return;
2455 }
2456 
2457 uint32_t
2458 sctp_calculate_len(struct mbuf *m)
2459 {
2460 	uint32_t tlen = 0;
2461 	struct mbuf *at;
2462 
2463 	at = m;
2464 	while (at) {
2465 		tlen += SCTP_BUF_LEN(at);
2466 		at = SCTP_BUF_NEXT(at);
2467 	}
2468 	return (tlen);
2469 }
2470 
2471 void
2472 sctp_mtu_size_reset(struct sctp_inpcb *inp,
2473     struct sctp_association *asoc, uint32_t mtu)
2474 {
2475 	/*
2476 	 * Reset the P-MTU size on this association, this involves changing
2477 	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2478 	 * allow the DF flag to be cleared.
2479 	 */
2480 	struct sctp_tmit_chunk *chk;
2481 	unsigned int eff_mtu, ovh;
2482 
2483 #ifdef SCTP_PRINT_FOR_B_AND_M
2484 	SCTP_PRINTF("sctp_mtu_size_reset(%p, asoc:%p mtu:%d\n",
2485 	    inp, asoc, mtu);
2486 #endif
2487 	asoc->smallest_mtu = mtu;
2488 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2489 		ovh = SCTP_MIN_OVERHEAD;
2490 	} else {
2491 		ovh = SCTP_MIN_V4_OVERHEAD;
2492 	}
2493 	eff_mtu = mtu - ovh;
2494 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2495 
2496 		if (chk->send_size > eff_mtu) {
2497 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2498 		}
2499 	}
2500 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2501 		if (chk->send_size > eff_mtu) {
2502 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2503 		}
2504 	}
2505 }
2506 
2507 
2508 /*
2509  * given an association and starting time of the current RTT period return
2510  * RTO in number of msecs net should point to the current network
2511  */
2512 uint32_t
2513 sctp_calculate_rto(struct sctp_tcb *stcb,
2514     struct sctp_association *asoc,
2515     struct sctp_nets *net,
2516     struct timeval *told,
2517     int safe)
2518 {
2519 	/*-
2520 	 * given an association and the starting time of the current RTT
2521 	 * period (in value1/value2) return RTO in number of msecs.
2522 	 */
2523 	int calc_time = 0;
2524 	int o_calctime;
2525 	uint32_t new_rto = 0;
2526 	int first_measure = 0;
2527 	struct timeval now, then, *old;
2528 
2529 	/* Copy it out for sparc64 */
2530 	if (safe == sctp_align_unsafe_makecopy) {
2531 		old = &then;
2532 		memcpy(&then, told, sizeof(struct timeval));
2533 	} else if (safe == sctp_align_safe_nocopy) {
2534 		old = told;
2535 	} else {
2536 		/* error */
2537 		SCTP_PRINTF("Huh, bad rto calc call\n");
2538 		return (0);
2539 	}
2540 	/************************/
2541 	/* 1. calculate new RTT */
2542 	/************************/
2543 	/* get the current time */
2544 	(void)SCTP_GETTIME_TIMEVAL(&now);
2545 	/* compute the RTT value */
2546 	if ((u_long)now.tv_sec > (u_long)old->tv_sec) {
2547 		calc_time = ((u_long)now.tv_sec - (u_long)old->tv_sec) * 1000;
2548 		if ((u_long)now.tv_usec > (u_long)old->tv_usec) {
2549 			calc_time += (((u_long)now.tv_usec -
2550 			    (u_long)old->tv_usec) / 1000);
2551 		} else if ((u_long)now.tv_usec < (u_long)old->tv_usec) {
2552 			/* Borrow 1,000ms from current calculation */
2553 			calc_time -= 1000;
2554 			/* Add in the slop over */
2555 			calc_time += ((int)now.tv_usec / 1000);
2556 			/* Add in the pre-second ms's */
2557 			calc_time += (((int)1000000 - (int)old->tv_usec) / 1000);
2558 		}
2559 	} else if ((u_long)now.tv_sec == (u_long)old->tv_sec) {
2560 		if ((u_long)now.tv_usec > (u_long)old->tv_usec) {
2561 			calc_time = ((u_long)now.tv_usec -
2562 			    (u_long)old->tv_usec) / 1000;
2563 		} else if ((u_long)now.tv_usec < (u_long)old->tv_usec) {
2564 			/* impossible .. garbage in nothing out */
2565 			goto calc_rto;
2566 		} else if ((u_long)now.tv_usec == (u_long)old->tv_usec) {
2567 			/*
2568 			 * We have to have 1 usec :-D this must be the
2569 			 * loopback.
2570 			 */
2571 			calc_time = 1;
2572 		} else {
2573 			/* impossible .. garbage in nothing out */
2574 			goto calc_rto;
2575 		}
2576 	} else {
2577 		/* Clock wrapped? */
2578 		goto calc_rto;
2579 	}
2580 	/***************************/
2581 	/* 2. update RTTVAR & SRTT */
2582 	/***************************/
2583 	net->rtt = o_calctime = calc_time;
2584 	/* this is Van Jacobson's integer version */
2585 	if (net->RTO_measured) {
2586 		calc_time -= (net->lastsa >> SCTP_RTT_SHIFT);	/* take away 1/8th when
2587 								 * shift=3 */
2588 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2589 			rto_logging(net, SCTP_LOG_RTTVAR);
2590 		}
2591 		net->prev_rtt = o_calctime;
2592 		net->lastsa += calc_time;	/* add 7/8th into sa when
2593 						 * shift=3 */
2594 		if (calc_time < 0) {
2595 			calc_time = -calc_time;
2596 		}
2597 		calc_time -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);	/* take away 1/4 when
2598 									 * VAR shift=2 */
2599 		net->lastsv += calc_time;
2600 		if (net->lastsv == 0) {
2601 			net->lastsv = SCTP_CLOCK_GRANULARITY;
2602 		}
2603 	} else {
2604 		/* First RTO measurment */
2605 		net->RTO_measured = 1;
2606 		net->lastsa = calc_time << SCTP_RTT_SHIFT;	/* Multiply by 8 when
2607 								 * shift=3 */
2608 		net->lastsv = calc_time;
2609 		if (net->lastsv == 0) {
2610 			net->lastsv = SCTP_CLOCK_GRANULARITY;
2611 		}
2612 		first_measure = 1;
2613 		net->prev_rtt = o_calctime;
2614 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2615 			rto_logging(net, SCTP_LOG_INITIAL_RTT);
2616 		}
2617 	}
2618 calc_rto:
2619 	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2620 	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2621 	    (stcb->asoc.sat_network_lockout == 0)) {
2622 		stcb->asoc.sat_network = 1;
2623 	} else if ((!first_measure) && stcb->asoc.sat_network) {
2624 		stcb->asoc.sat_network = 0;
2625 		stcb->asoc.sat_network_lockout = 1;
2626 	}
2627 	/* bound it, per C6/C7 in Section 5.3.1 */
2628 	if (new_rto < stcb->asoc.minrto) {
2629 		new_rto = stcb->asoc.minrto;
2630 	}
2631 	if (new_rto > stcb->asoc.maxrto) {
2632 		new_rto = stcb->asoc.maxrto;
2633 	}
2634 	/* we are now returning the RTO */
2635 	return (new_rto);
2636 }
2637 
2638 /*
2639  * return a pointer to a contiguous piece of data from the given mbuf chain
2640  * starting at 'off' for 'len' bytes.  If the desired piece spans more than
2641  * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2642  * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2643  */
2644 caddr_t
2645 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr)
2646 {
2647 	uint32_t count;
2648 	uint8_t *ptr;
2649 
2650 	ptr = in_ptr;
2651 	if ((off < 0) || (len <= 0))
2652 		return (NULL);
2653 
2654 	/* find the desired start location */
2655 	while ((m != NULL) && (off > 0)) {
2656 		if (off < SCTP_BUF_LEN(m))
2657 			break;
2658 		off -= SCTP_BUF_LEN(m);
2659 		m = SCTP_BUF_NEXT(m);
2660 	}
2661 	if (m == NULL)
2662 		return (NULL);
2663 
2664 	/* is the current mbuf large enough (eg. contiguous)? */
2665 	if ((SCTP_BUF_LEN(m) - off) >= len) {
2666 		return (mtod(m, caddr_t)+off);
2667 	} else {
2668 		/* else, it spans more than one mbuf, so save a temp copy... */
2669 		while ((m != NULL) && (len > 0)) {
2670 			count = min(SCTP_BUF_LEN(m) - off, len);
2671 			bcopy(mtod(m, caddr_t)+off, ptr, count);
2672 			len -= count;
2673 			ptr += count;
2674 			off = 0;
2675 			m = SCTP_BUF_NEXT(m);
2676 		}
2677 		if ((m == NULL) && (len > 0))
2678 			return (NULL);
2679 		else
2680 			return ((caddr_t)in_ptr);
2681 	}
2682 }
2683 
2684 
2685 
2686 struct sctp_paramhdr *
2687 sctp_get_next_param(struct mbuf *m,
2688     int offset,
2689     struct sctp_paramhdr *pull,
2690     int pull_limit)
2691 {
2692 	/* This just provides a typed signature to Peter's Pull routine */
2693 	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2694 	    (uint8_t *) pull));
2695 }
2696 
2697 
2698 int
2699 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2700 {
2701 	/*
2702 	 * add padlen bytes of 0 filled padding to the end of the mbuf. If
2703 	 * padlen is > 3 this routine will fail.
2704 	 */
2705 	uint8_t *dp;
2706 	int i;
2707 
2708 	if (padlen > 3) {
2709 		SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
2710 		return (ENOBUFS);
2711 	}
2712 	if (padlen <= M_TRAILINGSPACE(m)) {
2713 		/*
2714 		 * The easy way. We hope the majority of the time we hit
2715 		 * here :)
2716 		 */
2717 		dp = (uint8_t *) (mtod(m, caddr_t)+SCTP_BUF_LEN(m));
2718 		SCTP_BUF_LEN(m) += padlen;
2719 	} else {
2720 		/* Hard way we must grow the mbuf */
2721 		struct mbuf *tmp;
2722 
2723 		tmp = sctp_get_mbuf_for_msg(padlen, 0, M_DONTWAIT, 1, MT_DATA);
2724 		if (tmp == NULL) {
2725 			/* Out of space GAK! we are in big trouble. */
2726 			SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
2727 			return (ENOSPC);
2728 		}
2729 		/* setup and insert in middle */
2730 		SCTP_BUF_LEN(tmp) = padlen;
2731 		SCTP_BUF_NEXT(tmp) = NULL;
2732 		SCTP_BUF_NEXT(m) = tmp;
2733 		dp = mtod(tmp, uint8_t *);
2734 	}
2735 	/* zero out the pad */
2736 	for (i = 0; i < padlen; i++) {
2737 		*dp = 0;
2738 		dp++;
2739 	}
2740 	return (0);
2741 }
2742 
2743 int
2744 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2745 {
2746 	/* find the last mbuf in chain and pad it */
2747 	struct mbuf *m_at;
2748 
2749 	m_at = m;
2750 	if (last_mbuf) {
2751 		return (sctp_add_pad_tombuf(last_mbuf, padval));
2752 	} else {
2753 		while (m_at) {
2754 			if (SCTP_BUF_NEXT(m_at) == NULL) {
2755 				return (sctp_add_pad_tombuf(m_at, padval));
2756 			}
2757 			m_at = SCTP_BUF_NEXT(m_at);
2758 		}
2759 	}
2760 	SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
2761 	return (EFAULT);
2762 }
2763 
2764 static void
2765 sctp_notify_assoc_change(uint32_t event, struct sctp_tcb *stcb,
2766     uint32_t error, void *data, int so_locked
2767 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2768     SCTP_UNUSED
2769 #endif
2770 )
2771 {
2772 	struct mbuf *m_notify;
2773 	struct sctp_assoc_change *sac;
2774 	struct sctp_queued_to_read *control;
2775 
2776 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2777 	struct socket *so;
2778 
2779 #endif
2780 
2781 	/*
2782 	 * For TCP model AND UDP connected sockets we will send an error up
2783 	 * when an ABORT comes in.
2784 	 */
2785 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2786 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2787 	    ((event == SCTP_COMM_LOST) || (event == SCTP_CANT_STR_ASSOC))) {
2788 		if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
2789 			SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2790 			stcb->sctp_socket->so_error = ECONNREFUSED;
2791 		} else {
2792 			SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2793 			stcb->sctp_socket->so_error = ECONNRESET;
2794 		}
2795 		/* Wake ANY sleepers */
2796 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2797 		so = SCTP_INP_SO(stcb->sctp_ep);
2798 		if (!so_locked) {
2799 			atomic_add_int(&stcb->asoc.refcnt, 1);
2800 			SCTP_TCB_UNLOCK(stcb);
2801 			SCTP_SOCKET_LOCK(so, 1);
2802 			SCTP_TCB_LOCK(stcb);
2803 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
2804 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2805 				SCTP_SOCKET_UNLOCK(so, 1);
2806 				return;
2807 			}
2808 		}
2809 #endif
2810 		sorwakeup(stcb->sctp_socket);
2811 		sowwakeup(stcb->sctp_socket);
2812 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2813 		if (!so_locked) {
2814 			SCTP_SOCKET_UNLOCK(so, 1);
2815 		}
2816 #endif
2817 	}
2818 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2819 		/* event not enabled */
2820 		return;
2821 	}
2822 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_change), 0, M_DONTWAIT, 1, MT_DATA);
2823 	if (m_notify == NULL)
2824 		/* no space left */
2825 		return;
2826 	SCTP_BUF_LEN(m_notify) = 0;
2827 
2828 	sac = mtod(m_notify, struct sctp_assoc_change *);
2829 	sac->sac_type = SCTP_ASSOC_CHANGE;
2830 	sac->sac_flags = 0;
2831 	sac->sac_length = sizeof(struct sctp_assoc_change);
2832 	sac->sac_state = event;
2833 	sac->sac_error = error;
2834 	/* XXX verify these stream counts */
2835 	sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2836 	sac->sac_inbound_streams = stcb->asoc.streamincnt;
2837 	sac->sac_assoc_id = sctp_get_associd(stcb);
2838 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_change);
2839 	SCTP_BUF_NEXT(m_notify) = NULL;
2840 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2841 	    0, 0, 0, 0, 0, 0,
2842 	    m_notify);
2843 	if (control == NULL) {
2844 		/* no memory */
2845 		sctp_m_freem(m_notify);
2846 		return;
2847 	}
2848 	control->length = SCTP_BUF_LEN(m_notify);
2849 	/* not that we need this */
2850 	control->tail_mbuf = m_notify;
2851 	control->spec_flags = M_NOTIFICATION;
2852 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2853 	    control,
2854 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
2855 	    so_locked);
2856 	if (event == SCTP_COMM_LOST) {
2857 		/* Wake up any sleeper */
2858 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2859 		so = SCTP_INP_SO(stcb->sctp_ep);
2860 		if (!so_locked) {
2861 			atomic_add_int(&stcb->asoc.refcnt, 1);
2862 			SCTP_TCB_UNLOCK(stcb);
2863 			SCTP_SOCKET_LOCK(so, 1);
2864 			SCTP_TCB_LOCK(stcb);
2865 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
2866 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2867 				SCTP_SOCKET_UNLOCK(so, 1);
2868 				return;
2869 			}
2870 		}
2871 #endif
2872 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
2873 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2874 		if (!so_locked) {
2875 			SCTP_SOCKET_UNLOCK(so, 1);
2876 		}
2877 #endif
2878 	}
2879 }
2880 
2881 static void
2882 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2883     struct sockaddr *sa, uint32_t error)
2884 {
2885 	struct mbuf *m_notify;
2886 	struct sctp_paddr_change *spc;
2887 	struct sctp_queued_to_read *control;
2888 
2889 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
2890 		/* event not enabled */
2891 		return;
2892 	}
2893 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_DONTWAIT, 1, MT_DATA);
2894 	if (m_notify == NULL)
2895 		return;
2896 	SCTP_BUF_LEN(m_notify) = 0;
2897 	spc = mtod(m_notify, struct sctp_paddr_change *);
2898 	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2899 	spc->spc_flags = 0;
2900 	spc->spc_length = sizeof(struct sctp_paddr_change);
2901 	switch (sa->sa_family) {
2902 	case AF_INET:
2903 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2904 		break;
2905 #ifdef INET6
2906 	case AF_INET6:
2907 		{
2908 			struct sockaddr_in6 *sin6;
2909 
2910 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2911 
2912 			sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
2913 			if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
2914 				if (sin6->sin6_scope_id == 0) {
2915 					/* recover scope_id for user */
2916 					(void)sa6_recoverscope(sin6);
2917 				} else {
2918 					/* clear embedded scope_id for user */
2919 					in6_clearscope(&sin6->sin6_addr);
2920 				}
2921 			}
2922 			break;
2923 		}
2924 #endif
2925 	default:
2926 		/* TSNH */
2927 		break;
2928 	}
2929 	spc->spc_state = state;
2930 	spc->spc_error = error;
2931 	spc->spc_assoc_id = sctp_get_associd(stcb);
2932 
2933 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
2934 	SCTP_BUF_NEXT(m_notify) = NULL;
2935 
2936 	/* append to socket */
2937 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2938 	    0, 0, 0, 0, 0, 0,
2939 	    m_notify);
2940 	if (control == NULL) {
2941 		/* no memory */
2942 		sctp_m_freem(m_notify);
2943 		return;
2944 	}
2945 	control->length = SCTP_BUF_LEN(m_notify);
2946 	control->spec_flags = M_NOTIFICATION;
2947 	/* not that we need this */
2948 	control->tail_mbuf = m_notify;
2949 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2950 	    control,
2951 	    &stcb->sctp_socket->so_rcv, 1,
2952 	    SCTP_READ_LOCK_NOT_HELD,
2953 	    SCTP_SO_NOT_LOCKED);
2954 }
2955 
2956 
2957 static void
2958 sctp_notify_send_failed(struct sctp_tcb *stcb, uint32_t error,
2959     struct sctp_tmit_chunk *chk, int so_locked
2960 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2961     SCTP_UNUSED
2962 #endif
2963 )
2964 {
2965 	struct mbuf *m_notify;
2966 	struct sctp_send_failed *ssf;
2967 	struct sctp_queued_to_read *control;
2968 	int length;
2969 
2970 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)) {
2971 		/* event not enabled */
2972 		return;
2973 	}
2974 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_send_failed), 0, M_DONTWAIT, 1, MT_DATA);
2975 	if (m_notify == NULL)
2976 		/* no space left */
2977 		return;
2978 	length = sizeof(struct sctp_send_failed) + chk->send_size;
2979 	length -= sizeof(struct sctp_data_chunk);
2980 	SCTP_BUF_LEN(m_notify) = 0;
2981 	ssf = mtod(m_notify, struct sctp_send_failed *);
2982 	ssf->ssf_type = SCTP_SEND_FAILED;
2983 	if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
2984 		ssf->ssf_flags = SCTP_DATA_UNSENT;
2985 	else
2986 		ssf->ssf_flags = SCTP_DATA_SENT;
2987 	ssf->ssf_length = length;
2988 	ssf->ssf_error = error;
2989 	/* not exactly what the user sent in, but should be close :) */
2990 	bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
2991 	ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number;
2992 	ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq;
2993 	ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
2994 	ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype;
2995 	ssf->ssf_info.sinfo_context = chk->rec.data.context;
2996 	ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
2997 	ssf->ssf_assoc_id = sctp_get_associd(stcb);
2998 
2999 	if (chk->data) {
3000 		/*
3001 		 * trim off the sctp chunk header(it should be there)
3002 		 */
3003 		if (chk->send_size >= sizeof(struct sctp_data_chunk)) {
3004 			m_adj(chk->data, sizeof(struct sctp_data_chunk));
3005 			sctp_mbuf_crush(chk->data);
3006 			chk->send_size -= sizeof(struct sctp_data_chunk);
3007 		}
3008 	}
3009 	SCTP_BUF_NEXT(m_notify) = chk->data;
3010 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
3011 	/* Steal off the mbuf */
3012 	chk->data = NULL;
3013 	/*
3014 	 * For this case, we check the actual socket buffer, since the assoc
3015 	 * is going away we don't want to overfill the socket buffer for a
3016 	 * non-reader
3017 	 */
3018 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3019 		sctp_m_freem(m_notify);
3020 		return;
3021 	}
3022 	/* append to socket */
3023 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3024 	    0, 0, 0, 0, 0, 0,
3025 	    m_notify);
3026 	if (control == NULL) {
3027 		/* no memory */
3028 		sctp_m_freem(m_notify);
3029 		return;
3030 	}
3031 	control->spec_flags = M_NOTIFICATION;
3032 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3033 	    control,
3034 	    &stcb->sctp_socket->so_rcv, 1,
3035 	    SCTP_READ_LOCK_NOT_HELD,
3036 	    so_locked);
3037 }
3038 
3039 
3040 static void
3041 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
3042     struct sctp_stream_queue_pending *sp, int so_locked
3043 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3044     SCTP_UNUSED
3045 #endif
3046 )
3047 {
3048 	struct mbuf *m_notify;
3049 	struct sctp_send_failed *ssf;
3050 	struct sctp_queued_to_read *control;
3051 	int length;
3052 
3053 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)) {
3054 		/* event not enabled */
3055 		return;
3056 	}
3057 	length = sizeof(struct sctp_send_failed) + sp->length;
3058 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_send_failed), 0, M_DONTWAIT, 1, MT_DATA);
3059 	if (m_notify == NULL)
3060 		/* no space left */
3061 		return;
3062 	SCTP_BUF_LEN(m_notify) = 0;
3063 	ssf = mtod(m_notify, struct sctp_send_failed *);
3064 	ssf->ssf_type = SCTP_SEND_FAILED;
3065 	if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
3066 		ssf->ssf_flags = SCTP_DATA_UNSENT;
3067 	else
3068 		ssf->ssf_flags = SCTP_DATA_SENT;
3069 	ssf->ssf_length = length;
3070 	ssf->ssf_error = error;
3071 	/* not exactly what the user sent in, but should be close :) */
3072 	bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
3073 	ssf->ssf_info.sinfo_stream = sp->stream;
3074 	ssf->ssf_info.sinfo_ssn = sp->strseq;
3075 	if (sp->some_taken) {
3076 		ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
3077 	} else {
3078 		ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
3079 	}
3080 	ssf->ssf_info.sinfo_ppid = sp->ppid;
3081 	ssf->ssf_info.sinfo_context = sp->context;
3082 	ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3083 	ssf->ssf_assoc_id = sctp_get_associd(stcb);
3084 	SCTP_BUF_NEXT(m_notify) = sp->data;
3085 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
3086 
3087 	/* Steal off the mbuf */
3088 	sp->data = NULL;
3089 	/*
3090 	 * For this case, we check the actual socket buffer, since the assoc
3091 	 * is going away we don't want to overfill the socket buffer for a
3092 	 * non-reader
3093 	 */
3094 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3095 		sctp_m_freem(m_notify);
3096 		return;
3097 	}
3098 	/* append to socket */
3099 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3100 	    0, 0, 0, 0, 0, 0,
3101 	    m_notify);
3102 	if (control == NULL) {
3103 		/* no memory */
3104 		sctp_m_freem(m_notify);
3105 		return;
3106 	}
3107 	control->spec_flags = M_NOTIFICATION;
3108 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3109 	    control,
3110 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3111 }
3112 
3113 
3114 
3115 static void
3116 sctp_notify_adaptation_layer(struct sctp_tcb *stcb,
3117     uint32_t error)
3118 {
3119 	struct mbuf *m_notify;
3120 	struct sctp_adaptation_event *sai;
3121 	struct sctp_queued_to_read *control;
3122 
3123 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
3124 		/* event not enabled */
3125 		return;
3126 	}
3127 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_DONTWAIT, 1, MT_DATA);
3128 	if (m_notify == NULL)
3129 		/* no space left */
3130 		return;
3131 	SCTP_BUF_LEN(m_notify) = 0;
3132 	sai = mtod(m_notify, struct sctp_adaptation_event *);
3133 	sai->sai_type = SCTP_ADAPTATION_INDICATION;
3134 	sai->sai_flags = 0;
3135 	sai->sai_length = sizeof(struct sctp_adaptation_event);
3136 	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3137 	sai->sai_assoc_id = sctp_get_associd(stcb);
3138 
3139 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3140 	SCTP_BUF_NEXT(m_notify) = NULL;
3141 
3142 	/* append to socket */
3143 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3144 	    0, 0, 0, 0, 0, 0,
3145 	    m_notify);
3146 	if (control == NULL) {
3147 		/* no memory */
3148 		sctp_m_freem(m_notify);
3149 		return;
3150 	}
3151 	control->length = SCTP_BUF_LEN(m_notify);
3152 	control->spec_flags = M_NOTIFICATION;
3153 	/* not that we need this */
3154 	control->tail_mbuf = m_notify;
3155 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3156 	    control,
3157 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3158 }
3159 
3160 /* This always must be called with the read-queue LOCKED in the INP */
3161 static void
3162 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3163     uint32_t val, int so_locked
3164 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3165     SCTP_UNUSED
3166 #endif
3167 )
3168 {
3169 	struct mbuf *m_notify;
3170 	struct sctp_pdapi_event *pdapi;
3171 	struct sctp_queued_to_read *control;
3172 	struct sockbuf *sb;
3173 
3174 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3175 		/* event not enabled */
3176 		return;
3177 	}
3178 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_DONTWAIT, 1, MT_DATA);
3179 	if (m_notify == NULL)
3180 		/* no space left */
3181 		return;
3182 	SCTP_BUF_LEN(m_notify) = 0;
3183 	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3184 	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3185 	pdapi->pdapi_flags = 0;
3186 	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3187 	pdapi->pdapi_indication = error;
3188 	pdapi->pdapi_stream = (val >> 16);
3189 	pdapi->pdapi_seq = (val & 0x0000ffff);
3190 	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3191 
3192 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3193 	SCTP_BUF_NEXT(m_notify) = NULL;
3194 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3195 	    0, 0, 0, 0, 0, 0,
3196 	    m_notify);
3197 	if (control == NULL) {
3198 		/* no memory */
3199 		sctp_m_freem(m_notify);
3200 		return;
3201 	}
3202 	control->spec_flags = M_NOTIFICATION;
3203 	control->length = SCTP_BUF_LEN(m_notify);
3204 	/* not that we need this */
3205 	control->tail_mbuf = m_notify;
3206 	control->held_length = 0;
3207 	control->length = 0;
3208 	sb = &stcb->sctp_socket->so_rcv;
3209 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3210 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3211 	}
3212 	sctp_sballoc(stcb, sb, m_notify);
3213 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3214 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3215 	}
3216 	atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify));
3217 	control->end_added = 1;
3218 	if (stcb->asoc.control_pdapi)
3219 		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3220 	else {
3221 		/* we really should not see this case */
3222 		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3223 	}
3224 	if (stcb->sctp_ep && stcb->sctp_socket) {
3225 		/* This should always be the case */
3226 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3227 		struct socket *so;
3228 
3229 		so = SCTP_INP_SO(stcb->sctp_ep);
3230 		if (!so_locked) {
3231 			atomic_add_int(&stcb->asoc.refcnt, 1);
3232 			SCTP_TCB_UNLOCK(stcb);
3233 			SCTP_SOCKET_LOCK(so, 1);
3234 			SCTP_TCB_LOCK(stcb);
3235 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
3236 			if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3237 				SCTP_SOCKET_UNLOCK(so, 1);
3238 				return;
3239 			}
3240 		}
3241 #endif
3242 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3243 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3244 		if (!so_locked) {
3245 			SCTP_SOCKET_UNLOCK(so, 1);
3246 		}
3247 #endif
3248 	}
3249 }
3250 
3251 static void
3252 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3253 {
3254 	struct mbuf *m_notify;
3255 	struct sctp_shutdown_event *sse;
3256 	struct sctp_queued_to_read *control;
3257 
3258 	/*
3259 	 * For TCP model AND UDP connected sockets we will send an error up
3260 	 * when an SHUTDOWN completes
3261 	 */
3262 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3263 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3264 		/* mark socket closed for read/write and wakeup! */
3265 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3266 		struct socket *so;
3267 
3268 		so = SCTP_INP_SO(stcb->sctp_ep);
3269 		atomic_add_int(&stcb->asoc.refcnt, 1);
3270 		SCTP_TCB_UNLOCK(stcb);
3271 		SCTP_SOCKET_LOCK(so, 1);
3272 		SCTP_TCB_LOCK(stcb);
3273 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3274 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3275 			SCTP_SOCKET_UNLOCK(so, 1);
3276 			return;
3277 		}
3278 #endif
3279 		socantsendmore(stcb->sctp_socket);
3280 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3281 		SCTP_SOCKET_UNLOCK(so, 1);
3282 #endif
3283 	}
3284 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3285 		/* event not enabled */
3286 		return;
3287 	}
3288 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_DONTWAIT, 1, MT_DATA);
3289 	if (m_notify == NULL)
3290 		/* no space left */
3291 		return;
3292 	sse = mtod(m_notify, struct sctp_shutdown_event *);
3293 	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3294 	sse->sse_flags = 0;
3295 	sse->sse_length = sizeof(struct sctp_shutdown_event);
3296 	sse->sse_assoc_id = sctp_get_associd(stcb);
3297 
3298 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3299 	SCTP_BUF_NEXT(m_notify) = NULL;
3300 
3301 	/* append to socket */
3302 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3303 	    0, 0, 0, 0, 0, 0,
3304 	    m_notify);
3305 	if (control == NULL) {
3306 		/* no memory */
3307 		sctp_m_freem(m_notify);
3308 		return;
3309 	}
3310 	control->spec_flags = M_NOTIFICATION;
3311 	control->length = SCTP_BUF_LEN(m_notify);
3312 	/* not that we need this */
3313 	control->tail_mbuf = m_notify;
3314 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3315 	    control,
3316 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3317 }
3318 
3319 static void
3320 sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3321     int so_locked
3322 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3323     SCTP_UNUSED
3324 #endif
3325 )
3326 {
3327 	struct mbuf *m_notify;
3328 	struct sctp_sender_dry_event *event;
3329 	struct sctp_queued_to_read *control;
3330 
3331 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_DRYEVNT)) {
3332 		/* event not enabled */
3333 		return;
3334 	}
3335 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_DONTWAIT, 1, MT_DATA);
3336 	if (m_notify == NULL) {
3337 		/* no space left */
3338 		return;
3339 	}
3340 	SCTP_BUF_LEN(m_notify) = 0;
3341 	event = mtod(m_notify, struct sctp_sender_dry_event *);
3342 	event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3343 	event->sender_dry_flags = 0;
3344 	event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3345 	event->sender_dry_assoc_id = sctp_get_associd(stcb);
3346 
3347 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3348 	SCTP_BUF_NEXT(m_notify) = NULL;
3349 
3350 	/* append to socket */
3351 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3352 	    0, 0, 0, 0, 0, 0, m_notify);
3353 	if (control == NULL) {
3354 		/* no memory */
3355 		sctp_m_freem(m_notify);
3356 		return;
3357 	}
3358 	control->length = SCTP_BUF_LEN(m_notify);
3359 	control->spec_flags = M_NOTIFICATION;
3360 	/* not that we need this */
3361 	control->tail_mbuf = m_notify;
3362 	sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3363 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3364 }
3365 
3366 
3367 static void
3368 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, int number_entries, int flag)
3369 {
3370 	struct mbuf *m_notify;
3371 	struct sctp_queued_to_read *control;
3372 	struct sctp_stream_reset_event *strreset;
3373 	int len;
3374 
3375 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_STREAM_RESETEVNT)) {
3376 		/* event not enabled */
3377 		return;
3378 	}
3379 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3380 	if (m_notify == NULL)
3381 		/* no space left */
3382 		return;
3383 	SCTP_BUF_LEN(m_notify) = 0;
3384 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3385 	if (len > M_TRAILINGSPACE(m_notify)) {
3386 		/* never enough room */
3387 		sctp_m_freem(m_notify);
3388 		return;
3389 	}
3390 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3391 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3392 	strreset->strreset_flags = SCTP_STRRESET_ADD_STREAM | flag;
3393 	strreset->strreset_length = len;
3394 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3395 	strreset->strreset_list[0] = number_entries;
3396 
3397 	SCTP_BUF_LEN(m_notify) = len;
3398 	SCTP_BUF_NEXT(m_notify) = NULL;
3399 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3400 		/* no space */
3401 		sctp_m_freem(m_notify);
3402 		return;
3403 	}
3404 	/* append to socket */
3405 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3406 	    0, 0, 0, 0, 0, 0,
3407 	    m_notify);
3408 	if (control == NULL) {
3409 		/* no memory */
3410 		sctp_m_freem(m_notify);
3411 		return;
3412 	}
3413 	control->spec_flags = M_NOTIFICATION;
3414 	control->length = SCTP_BUF_LEN(m_notify);
3415 	/* not that we need this */
3416 	control->tail_mbuf = m_notify;
3417 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3418 	    control,
3419 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3420 }
3421 
3422 
3423 static void
3424 sctp_notify_stream_reset(struct sctp_tcb *stcb,
3425     int number_entries, uint16_t * list, int flag)
3426 {
3427 	struct mbuf *m_notify;
3428 	struct sctp_queued_to_read *control;
3429 	struct sctp_stream_reset_event *strreset;
3430 	int len;
3431 
3432 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_STREAM_RESETEVNT)) {
3433 		/* event not enabled */
3434 		return;
3435 	}
3436 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3437 	if (m_notify == NULL)
3438 		/* no space left */
3439 		return;
3440 	SCTP_BUF_LEN(m_notify) = 0;
3441 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3442 	if (len > M_TRAILINGSPACE(m_notify)) {
3443 		/* never enough room */
3444 		sctp_m_freem(m_notify);
3445 		return;
3446 	}
3447 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3448 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3449 	if (number_entries == 0) {
3450 		strreset->strreset_flags = flag | SCTP_STRRESET_ALL_STREAMS;
3451 	} else {
3452 		strreset->strreset_flags = flag | SCTP_STRRESET_STREAM_LIST;
3453 	}
3454 	strreset->strreset_length = len;
3455 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3456 	if (number_entries) {
3457 		int i;
3458 
3459 		for (i = 0; i < number_entries; i++) {
3460 			strreset->strreset_list[i] = ntohs(list[i]);
3461 		}
3462 	}
3463 	SCTP_BUF_LEN(m_notify) = len;
3464 	SCTP_BUF_NEXT(m_notify) = NULL;
3465 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3466 		/* no space */
3467 		sctp_m_freem(m_notify);
3468 		return;
3469 	}
3470 	/* append to socket */
3471 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3472 	    0, 0, 0, 0, 0, 0,
3473 	    m_notify);
3474 	if (control == NULL) {
3475 		/* no memory */
3476 		sctp_m_freem(m_notify);
3477 		return;
3478 	}
3479 	control->spec_flags = M_NOTIFICATION;
3480 	control->length = SCTP_BUF_LEN(m_notify);
3481 	/* not that we need this */
3482 	control->tail_mbuf = m_notify;
3483 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3484 	    control,
3485 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3486 }
3487 
3488 
3489 void
3490 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3491     uint32_t error, void *data, int so_locked
3492 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3493     SCTP_UNUSED
3494 #endif
3495 )
3496 {
3497 	if ((stcb == NULL) ||
3498 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3499 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3500 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3501 		/* If the socket is gone we are out of here */
3502 		return;
3503 	}
3504 	if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
3505 		return;
3506 	}
3507 	if (stcb && ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
3508 	    (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED))) {
3509 		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3510 		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3511 		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3512 			/* Don't report these in front states */
3513 			return;
3514 		}
3515 	}
3516 	switch (notification) {
3517 	case SCTP_NOTIFY_ASSOC_UP:
3518 		if (stcb->asoc.assoc_up_sent == 0) {
3519 			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, so_locked);
3520 			stcb->asoc.assoc_up_sent = 1;
3521 		}
3522 		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3523 			sctp_notify_adaptation_layer(stcb, error);
3524 		}
3525 		if (stcb->asoc.peer_supports_auth == 0) {
3526 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3527 			    NULL, so_locked);
3528 		}
3529 		break;
3530 	case SCTP_NOTIFY_ASSOC_DOWN:
3531 		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, so_locked);
3532 		break;
3533 	case SCTP_NOTIFY_INTERFACE_DOWN:
3534 		{
3535 			struct sctp_nets *net;
3536 
3537 			net = (struct sctp_nets *)data;
3538 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3539 			    (struct sockaddr *)&net->ro._l_addr, error);
3540 			break;
3541 		}
3542 	case SCTP_NOTIFY_INTERFACE_UP:
3543 		{
3544 			struct sctp_nets *net;
3545 
3546 			net = (struct sctp_nets *)data;
3547 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3548 			    (struct sockaddr *)&net->ro._l_addr, error);
3549 			break;
3550 		}
3551 	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3552 		{
3553 			struct sctp_nets *net;
3554 
3555 			net = (struct sctp_nets *)data;
3556 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3557 			    (struct sockaddr *)&net->ro._l_addr, error);
3558 			break;
3559 		}
3560 	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3561 		sctp_notify_send_failed2(stcb, error,
3562 		    (struct sctp_stream_queue_pending *)data, so_locked);
3563 		break;
3564 	case SCTP_NOTIFY_DG_FAIL:
3565 		sctp_notify_send_failed(stcb, error,
3566 		    (struct sctp_tmit_chunk *)data, so_locked);
3567 		break;
3568 	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3569 		{
3570 			uint32_t val;
3571 
3572 			val = *((uint32_t *) data);
3573 
3574 			sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
3575 			break;
3576 		}
3577 	case SCTP_NOTIFY_STRDATA_ERR:
3578 		break;
3579 	case SCTP_NOTIFY_ASSOC_ABORTED:
3580 		if ((stcb) && (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3581 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED))) {
3582 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, NULL, so_locked);
3583 		} else {
3584 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, NULL, so_locked);
3585 		}
3586 		break;
3587 	case SCTP_NOTIFY_PEER_OPENED_STREAM:
3588 		break;
3589 	case SCTP_NOTIFY_STREAM_OPENED_OK:
3590 		break;
3591 	case SCTP_NOTIFY_ASSOC_RESTART:
3592 		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, data, so_locked);
3593 		if (stcb->asoc.peer_supports_auth == 0) {
3594 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3595 			    NULL, so_locked);
3596 		}
3597 		break;
3598 	case SCTP_NOTIFY_HB_RESP:
3599 		break;
3600 	case SCTP_NOTIFY_STR_RESET_INSTREAM_ADD_OK:
3601 		sctp_notify_stream_reset_add(stcb, error, SCTP_STRRESET_INBOUND_STR);
3602 		break;
3603 	case SCTP_NOTIFY_STR_RESET_ADD_OK:
3604 		sctp_notify_stream_reset_add(stcb, error, SCTP_STRRESET_OUTBOUND_STR);
3605 		break;
3606 	case SCTP_NOTIFY_STR_RESET_ADD_FAIL:
3607 		sctp_notify_stream_reset_add(stcb, error, (SCTP_STRRESET_FAILED | SCTP_STRRESET_OUTBOUND_STR));
3608 		break;
3609 
3610 	case SCTP_NOTIFY_STR_RESET_SEND:
3611 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_OUTBOUND_STR);
3612 		break;
3613 	case SCTP_NOTIFY_STR_RESET_RECV:
3614 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_INBOUND_STR);
3615 		break;
3616 	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3617 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_OUTBOUND_STR | SCTP_STRRESET_FAILED));
3618 		break;
3619 	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3620 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_INBOUND_STR | SCTP_STRRESET_FAILED));
3621 		break;
3622 	case SCTP_NOTIFY_ASCONF_ADD_IP:
3623 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3624 		    error);
3625 		break;
3626 	case SCTP_NOTIFY_ASCONF_DELETE_IP:
3627 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3628 		    error);
3629 		break;
3630 	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3631 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3632 		    error);
3633 		break;
3634 	case SCTP_NOTIFY_ASCONF_SUCCESS:
3635 		break;
3636 	case SCTP_NOTIFY_ASCONF_FAILED:
3637 		break;
3638 	case SCTP_NOTIFY_PEER_SHUTDOWN:
3639 		sctp_notify_shutdown_event(stcb);
3640 		break;
3641 	case SCTP_NOTIFY_AUTH_NEW_KEY:
3642 		sctp_notify_authentication(stcb, SCTP_AUTH_NEWKEY, error,
3643 		    (uint16_t) (uintptr_t) data,
3644 		    so_locked);
3645 		break;
3646 	case SCTP_NOTIFY_AUTH_FREE_KEY:
3647 		sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
3648 		    (uint16_t) (uintptr_t) data,
3649 		    so_locked);
3650 		break;
3651 	case SCTP_NOTIFY_NO_PEER_AUTH:
3652 		sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
3653 		    (uint16_t) (uintptr_t) data,
3654 		    so_locked);
3655 		break;
3656 	case SCTP_NOTIFY_SENDER_DRY:
3657 		sctp_notify_sender_dry_event(stcb, so_locked);
3658 		break;
3659 	default:
3660 		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3661 		    __FUNCTION__, notification, notification);
3662 		break;
3663 	}			/* end switch */
3664 }
3665 
3666 void
3667 sctp_report_all_outbound(struct sctp_tcb *stcb, int holds_lock, int so_locked
3668 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3669     SCTP_UNUSED
3670 #endif
3671 )
3672 {
3673 	struct sctp_association *asoc;
3674 	struct sctp_stream_out *outs;
3675 	struct sctp_tmit_chunk *chk;
3676 	struct sctp_stream_queue_pending *sp;
3677 	int i;
3678 
3679 	asoc = &stcb->asoc;
3680 
3681 	if (stcb == NULL) {
3682 		return;
3683 	}
3684 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3685 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3686 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3687 		return;
3688 	}
3689 	/* now through all the gunk freeing chunks */
3690 	if (holds_lock == 0) {
3691 		SCTP_TCB_SEND_LOCK(stcb);
3692 	}
3693 	/* sent queue SHOULD be empty */
3694 	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3695 		chk = TAILQ_FIRST(&asoc->sent_queue);
3696 		while (chk) {
3697 			TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3698 			asoc->sent_queue_cnt--;
3699 			if (chk->data != NULL) {
3700 				sctp_free_bufspace(stcb, asoc, chk, 1);
3701 				sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
3702 				    SCTP_NOTIFY_DATAGRAM_SENT, chk, so_locked);
3703 				if (chk->data) {
3704 					sctp_m_freem(chk->data);
3705 					chk->data = NULL;
3706 				}
3707 			}
3708 			sctp_free_a_chunk(stcb, chk);
3709 			/* sa_ignore FREED_MEMORY */
3710 			chk = TAILQ_FIRST(&asoc->sent_queue);
3711 		}
3712 	}
3713 	/* pending send queue SHOULD be empty */
3714 	if (!TAILQ_EMPTY(&asoc->send_queue)) {
3715 		chk = TAILQ_FIRST(&asoc->send_queue);
3716 		while (chk) {
3717 			TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3718 			asoc->send_queue_cnt--;
3719 			if (chk->data != NULL) {
3720 				sctp_free_bufspace(stcb, asoc, chk, 1);
3721 				sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
3722 				    SCTP_NOTIFY_DATAGRAM_UNSENT, chk, so_locked);
3723 				if (chk->data) {
3724 					sctp_m_freem(chk->data);
3725 					chk->data = NULL;
3726 				}
3727 			}
3728 			sctp_free_a_chunk(stcb, chk);
3729 			/* sa_ignore FREED_MEMORY */
3730 			chk = TAILQ_FIRST(&asoc->send_queue);
3731 		}
3732 	}
3733 	for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
3734 		/* For each stream */
3735 		outs = &stcb->asoc.strmout[i];
3736 		/* clean up any sends there */
3737 		stcb->asoc.locked_on_sending = NULL;
3738 		sp = TAILQ_FIRST(&outs->outqueue);
3739 		while (sp) {
3740 			stcb->asoc.stream_queue_cnt--;
3741 			TAILQ_REMOVE(&outs->outqueue, sp, next);
3742 			sctp_free_spbufspace(stcb, asoc, sp);
3743 			sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
3744 			    SCTP_NOTIFY_DATAGRAM_UNSENT, (void *)sp, so_locked);
3745 			if (sp->data) {
3746 				sctp_m_freem(sp->data);
3747 				sp->data = NULL;
3748 			}
3749 			if (sp->net)
3750 				sctp_free_remote_addr(sp->net);
3751 			sp->net = NULL;
3752 			/* Free the chunk */
3753 			sctp_free_a_strmoq(stcb, sp);
3754 			/* sa_ignore FREED_MEMORY */
3755 			sp = TAILQ_FIRST(&outs->outqueue);
3756 		}
3757 	}
3758 
3759 	if (holds_lock == 0) {
3760 		SCTP_TCB_SEND_UNLOCK(stcb);
3761 	}
3762 }
3763 
3764 void
3765 sctp_abort_notification(struct sctp_tcb *stcb, int error, int so_locked
3766 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3767     SCTP_UNUSED
3768 #endif
3769 )
3770 {
3771 
3772 	if (stcb == NULL) {
3773 		return;
3774 	}
3775 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3776 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3777 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3778 		return;
3779 	}
3780 	/* Tell them we lost the asoc */
3781 	sctp_report_all_outbound(stcb, 1, so_locked);
3782 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
3783 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3784 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
3785 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
3786 	}
3787 	sctp_ulp_notify(SCTP_NOTIFY_ASSOC_ABORTED, stcb, error, NULL, so_locked);
3788 }
3789 
3790 void
3791 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3792     struct mbuf *m, int iphlen, struct sctphdr *sh, struct mbuf *op_err,
3793     uint32_t vrf_id, uint16_t port)
3794 {
3795 	uint32_t vtag;
3796 
3797 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3798 	struct socket *so;
3799 
3800 #endif
3801 
3802 	vtag = 0;
3803 	if (stcb != NULL) {
3804 		/* We have a TCB to abort, send notification too */
3805 		vtag = stcb->asoc.peer_vtag;
3806 		sctp_abort_notification(stcb, 0, SCTP_SO_NOT_LOCKED);
3807 		/* get the assoc vrf id and table id */
3808 		vrf_id = stcb->asoc.vrf_id;
3809 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3810 	}
3811 	sctp_send_abort(m, iphlen, sh, vtag, op_err, vrf_id, port);
3812 	if (stcb != NULL) {
3813 		/* Ok, now lets free it */
3814 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3815 		so = SCTP_INP_SO(inp);
3816 		atomic_add_int(&stcb->asoc.refcnt, 1);
3817 		SCTP_TCB_UNLOCK(stcb);
3818 		SCTP_SOCKET_LOCK(so, 1);
3819 		SCTP_TCB_LOCK(stcb);
3820 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3821 #endif
3822 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
3823 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3824 		SCTP_SOCKET_UNLOCK(so, 1);
3825 #endif
3826 	}
3827 }
3828 
3829 #ifdef SCTP_ASOCLOG_OF_TSNS
3830 void
3831 sctp_print_out_track_log(struct sctp_tcb *stcb)
3832 {
3833 #ifdef NOSIY_PRINTS
3834 	int i;
3835 
3836 	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
3837 	SCTP_PRINTF("IN bound TSN log-aaa\n");
3838 	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
3839 		SCTP_PRINTF("None rcvd\n");
3840 		goto none_in;
3841 	}
3842 	if (stcb->asoc.tsn_in_wrapped) {
3843 		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
3844 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3845 			    stcb->asoc.in_tsnlog[i].tsn,
3846 			    stcb->asoc.in_tsnlog[i].strm,
3847 			    stcb->asoc.in_tsnlog[i].seq,
3848 			    stcb->asoc.in_tsnlog[i].flgs,
3849 			    stcb->asoc.in_tsnlog[i].sz);
3850 		}
3851 	}
3852 	if (stcb->asoc.tsn_in_at) {
3853 		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
3854 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3855 			    stcb->asoc.in_tsnlog[i].tsn,
3856 			    stcb->asoc.in_tsnlog[i].strm,
3857 			    stcb->asoc.in_tsnlog[i].seq,
3858 			    stcb->asoc.in_tsnlog[i].flgs,
3859 			    stcb->asoc.in_tsnlog[i].sz);
3860 		}
3861 	}
3862 none_in:
3863 	SCTP_PRINTF("OUT bound TSN log-aaa\n");
3864 	if ((stcb->asoc.tsn_out_at == 0) &&
3865 	    (stcb->asoc.tsn_out_wrapped == 0)) {
3866 		SCTP_PRINTF("None sent\n");
3867 	}
3868 	if (stcb->asoc.tsn_out_wrapped) {
3869 		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
3870 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3871 			    stcb->asoc.out_tsnlog[i].tsn,
3872 			    stcb->asoc.out_tsnlog[i].strm,
3873 			    stcb->asoc.out_tsnlog[i].seq,
3874 			    stcb->asoc.out_tsnlog[i].flgs,
3875 			    stcb->asoc.out_tsnlog[i].sz);
3876 		}
3877 	}
3878 	if (stcb->asoc.tsn_out_at) {
3879 		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
3880 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3881 			    stcb->asoc.out_tsnlog[i].tsn,
3882 			    stcb->asoc.out_tsnlog[i].strm,
3883 			    stcb->asoc.out_tsnlog[i].seq,
3884 			    stcb->asoc.out_tsnlog[i].flgs,
3885 			    stcb->asoc.out_tsnlog[i].sz);
3886 		}
3887 	}
3888 #endif
3889 }
3890 
3891 #endif
3892 
3893 void
3894 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3895     int error, struct mbuf *op_err,
3896     int so_locked
3897 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3898     SCTP_UNUSED
3899 #endif
3900 )
3901 {
3902 	uint32_t vtag;
3903 
3904 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3905 	struct socket *so;
3906 
3907 #endif
3908 
3909 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3910 	so = SCTP_INP_SO(inp);
3911 #endif
3912 	if (stcb == NULL) {
3913 		/* Got to have a TCB */
3914 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3915 			if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3916 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3917 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
3918 			}
3919 		}
3920 		return;
3921 	} else {
3922 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3923 	}
3924 	vtag = stcb->asoc.peer_vtag;
3925 	/* notify the ulp */
3926 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0)
3927 		sctp_abort_notification(stcb, error, so_locked);
3928 	/* notify the peer */
3929 #if defined(SCTP_PANIC_ON_ABORT)
3930 	panic("aborting an association");
3931 #endif
3932 	sctp_send_abort_tcb(stcb, op_err, so_locked);
3933 	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3934 	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3935 	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3936 		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3937 	}
3938 	/* now free the asoc */
3939 #ifdef SCTP_ASOCLOG_OF_TSNS
3940 	sctp_print_out_track_log(stcb);
3941 #endif
3942 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3943 	if (!so_locked) {
3944 		atomic_add_int(&stcb->asoc.refcnt, 1);
3945 		SCTP_TCB_UNLOCK(stcb);
3946 		SCTP_SOCKET_LOCK(so, 1);
3947 		SCTP_TCB_LOCK(stcb);
3948 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3949 	}
3950 #endif
3951 	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
3952 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3953 	if (!so_locked) {
3954 		SCTP_SOCKET_UNLOCK(so, 1);
3955 	}
3956 #endif
3957 }
3958 
3959 void
3960 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh,
3961     struct sctp_inpcb *inp, struct mbuf *op_err, uint32_t vrf_id, uint16_t port)
3962 {
3963 	struct sctp_chunkhdr *ch, chunk_buf;
3964 	unsigned int chk_length;
3965 
3966 	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
3967 	/* Generate a TO address for future reference */
3968 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
3969 		if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3970 			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3971 			    SCTP_CALLED_DIRECTLY_NOCMPSET);
3972 		}
3973 	}
3974 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
3975 	    sizeof(*ch), (uint8_t *) & chunk_buf);
3976 	while (ch != NULL) {
3977 		chk_length = ntohs(ch->chunk_length);
3978 		if (chk_length < sizeof(*ch)) {
3979 			/* break to abort land */
3980 			break;
3981 		}
3982 		switch (ch->chunk_type) {
3983 		case SCTP_COOKIE_ECHO:
3984 			/* We hit here only if the assoc is being freed */
3985 			return;
3986 		case SCTP_PACKET_DROPPED:
3987 			/* we don't respond to pkt-dropped */
3988 			return;
3989 		case SCTP_ABORT_ASSOCIATION:
3990 			/* we don't respond with an ABORT to an ABORT */
3991 			return;
3992 		case SCTP_SHUTDOWN_COMPLETE:
3993 			/*
3994 			 * we ignore it since we are not waiting for it and
3995 			 * peer is gone
3996 			 */
3997 			return;
3998 		case SCTP_SHUTDOWN_ACK:
3999 			sctp_send_shutdown_complete2(m, iphlen, sh, vrf_id, port);
4000 			return;
4001 		default:
4002 			break;
4003 		}
4004 		offset += SCTP_SIZE32(chk_length);
4005 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4006 		    sizeof(*ch), (uint8_t *) & chunk_buf);
4007 	}
4008 	sctp_send_abort(m, iphlen, sh, 0, op_err, vrf_id, port);
4009 }
4010 
4011 /*
4012  * check the inbound datagram to make sure there is not an abort inside it,
4013  * if there is return 1, else return 0.
4014  */
4015 int
4016 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill)
4017 {
4018 	struct sctp_chunkhdr *ch;
4019 	struct sctp_init_chunk *init_chk, chunk_buf;
4020 	int offset;
4021 	unsigned int chk_length;
4022 
4023 	offset = iphlen + sizeof(struct sctphdr);
4024 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
4025 	    (uint8_t *) & chunk_buf);
4026 	while (ch != NULL) {
4027 		chk_length = ntohs(ch->chunk_length);
4028 		if (chk_length < sizeof(*ch)) {
4029 			/* packet is probably corrupt */
4030 			break;
4031 		}
4032 		/* we seem to be ok, is it an abort? */
4033 		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
4034 			/* yep, tell them */
4035 			return (1);
4036 		}
4037 		if (ch->chunk_type == SCTP_INITIATION) {
4038 			/* need to update the Vtag */
4039 			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4040 			    offset, sizeof(*init_chk), (uint8_t *) & chunk_buf);
4041 			if (init_chk != NULL) {
4042 				*vtagfill = ntohl(init_chk->init.initiate_tag);
4043 			}
4044 		}
4045 		/* Nope, move to the next chunk */
4046 		offset += SCTP_SIZE32(chk_length);
4047 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4048 		    sizeof(*ch), (uint8_t *) & chunk_buf);
4049 	}
4050 	return (0);
4051 }
4052 
4053 /*
4054  * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4055  * set (i.e. it's 0) so, create this function to compare link local scopes
4056  */
4057 #ifdef INET6
4058 uint32_t
4059 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4060 {
4061 	struct sockaddr_in6 a, b;
4062 
4063 	/* save copies */
4064 	a = *addr1;
4065 	b = *addr2;
4066 
4067 	if (a.sin6_scope_id == 0)
4068 		if (sa6_recoverscope(&a)) {
4069 			/* can't get scope, so can't match */
4070 			return (0);
4071 		}
4072 	if (b.sin6_scope_id == 0)
4073 		if (sa6_recoverscope(&b)) {
4074 			/* can't get scope, so can't match */
4075 			return (0);
4076 		}
4077 	if (a.sin6_scope_id != b.sin6_scope_id)
4078 		return (0);
4079 
4080 	return (1);
4081 }
4082 
4083 /*
4084  * returns a sockaddr_in6 with embedded scope recovered and removed
4085  */
4086 struct sockaddr_in6 *
4087 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4088 {
4089 	/* check and strip embedded scope junk */
4090 	if (addr->sin6_family == AF_INET6) {
4091 		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4092 			if (addr->sin6_scope_id == 0) {
4093 				*store = *addr;
4094 				if (!sa6_recoverscope(store)) {
4095 					/* use the recovered scope */
4096 					addr = store;
4097 				}
4098 			} else {
4099 				/* else, return the original "to" addr */
4100 				in6_clearscope(&addr->sin6_addr);
4101 			}
4102 		}
4103 	}
4104 	return (addr);
4105 }
4106 
4107 #endif
4108 
4109 /*
4110  * are the two addresses the same?  currently a "scopeless" check returns: 1
4111  * if same, 0 if not
4112  */
4113 int
4114 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4115 {
4116 
4117 	/* must be valid */
4118 	if (sa1 == NULL || sa2 == NULL)
4119 		return (0);
4120 
4121 	/* must be the same family */
4122 	if (sa1->sa_family != sa2->sa_family)
4123 		return (0);
4124 
4125 	switch (sa1->sa_family) {
4126 #ifdef INET6
4127 	case AF_INET6:
4128 		{
4129 			/* IPv6 addresses */
4130 			struct sockaddr_in6 *sin6_1, *sin6_2;
4131 
4132 			sin6_1 = (struct sockaddr_in6 *)sa1;
4133 			sin6_2 = (struct sockaddr_in6 *)sa2;
4134 			return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
4135 			    sin6_2));
4136 		}
4137 #endif
4138 	case AF_INET:
4139 		{
4140 			/* IPv4 addresses */
4141 			struct sockaddr_in *sin_1, *sin_2;
4142 
4143 			sin_1 = (struct sockaddr_in *)sa1;
4144 			sin_2 = (struct sockaddr_in *)sa2;
4145 			return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4146 		}
4147 	default:
4148 		/* we don't do these... */
4149 		return (0);
4150 	}
4151 }
4152 
4153 void
4154 sctp_print_address(struct sockaddr *sa)
4155 {
4156 #ifdef INET6
4157 	char ip6buf[INET6_ADDRSTRLEN];
4158 
4159 	ip6buf[0] = 0;
4160 #endif
4161 
4162 	switch (sa->sa_family) {
4163 #ifdef INET6
4164 	case AF_INET6:
4165 		{
4166 			struct sockaddr_in6 *sin6;
4167 
4168 			sin6 = (struct sockaddr_in6 *)sa;
4169 			SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4170 			    ip6_sprintf(ip6buf, &sin6->sin6_addr),
4171 			    ntohs(sin6->sin6_port),
4172 			    sin6->sin6_scope_id);
4173 			break;
4174 		}
4175 #endif
4176 	case AF_INET:
4177 		{
4178 			struct sockaddr_in *sin;
4179 			unsigned char *p;
4180 
4181 			sin = (struct sockaddr_in *)sa;
4182 			p = (unsigned char *)&sin->sin_addr;
4183 			SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4184 			    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4185 			break;
4186 		}
4187 	default:
4188 		SCTP_PRINTF("?\n");
4189 		break;
4190 	}
4191 }
4192 
4193 void
4194 sctp_print_address_pkt(struct ip *iph, struct sctphdr *sh)
4195 {
4196 	switch (iph->ip_v) {
4197 	case IPVERSION:
4198 		{
4199 			struct sockaddr_in lsa, fsa;
4200 
4201 			bzero(&lsa, sizeof(lsa));
4202 			lsa.sin_len = sizeof(lsa);
4203 			lsa.sin_family = AF_INET;
4204 			lsa.sin_addr = iph->ip_src;
4205 			lsa.sin_port = sh->src_port;
4206 			bzero(&fsa, sizeof(fsa));
4207 			fsa.sin_len = sizeof(fsa);
4208 			fsa.sin_family = AF_INET;
4209 			fsa.sin_addr = iph->ip_dst;
4210 			fsa.sin_port = sh->dest_port;
4211 			SCTP_PRINTF("src: ");
4212 			sctp_print_address((struct sockaddr *)&lsa);
4213 			SCTP_PRINTF("dest: ");
4214 			sctp_print_address((struct sockaddr *)&fsa);
4215 			break;
4216 		}
4217 #ifdef INET6
4218 	case IPV6_VERSION >> 4:
4219 		{
4220 			struct ip6_hdr *ip6;
4221 			struct sockaddr_in6 lsa6, fsa6;
4222 
4223 			ip6 = (struct ip6_hdr *)iph;
4224 			bzero(&lsa6, sizeof(lsa6));
4225 			lsa6.sin6_len = sizeof(lsa6);
4226 			lsa6.sin6_family = AF_INET6;
4227 			lsa6.sin6_addr = ip6->ip6_src;
4228 			lsa6.sin6_port = sh->src_port;
4229 			bzero(&fsa6, sizeof(fsa6));
4230 			fsa6.sin6_len = sizeof(fsa6);
4231 			fsa6.sin6_family = AF_INET6;
4232 			fsa6.sin6_addr = ip6->ip6_dst;
4233 			fsa6.sin6_port = sh->dest_port;
4234 			SCTP_PRINTF("src: ");
4235 			sctp_print_address((struct sockaddr *)&lsa6);
4236 			SCTP_PRINTF("dest: ");
4237 			sctp_print_address((struct sockaddr *)&fsa6);
4238 			break;
4239 		}
4240 #endif
4241 	default:
4242 		/* TSNH */
4243 		break;
4244 	}
4245 }
4246 
4247 void
4248 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4249     struct sctp_inpcb *new_inp,
4250     struct sctp_tcb *stcb,
4251     int waitflags)
4252 {
4253 	/*
4254 	 * go through our old INP and pull off any control structures that
4255 	 * belong to stcb and move then to the new inp.
4256 	 */
4257 	struct socket *old_so, *new_so;
4258 	struct sctp_queued_to_read *control, *nctl;
4259 	struct sctp_readhead tmp_queue;
4260 	struct mbuf *m;
4261 	int error = 0;
4262 
4263 	old_so = old_inp->sctp_socket;
4264 	new_so = new_inp->sctp_socket;
4265 	TAILQ_INIT(&tmp_queue);
4266 	error = sblock(&old_so->so_rcv, waitflags);
4267 	if (error) {
4268 		/*
4269 		 * Gak, can't get sblock, we have a problem. data will be
4270 		 * left stranded.. and we don't dare look at it since the
4271 		 * other thread may be reading something. Oh well, its a
4272 		 * screwed up app that does a peeloff OR a accept while
4273 		 * reading from the main socket... actually its only the
4274 		 * peeloff() case, since I think read will fail on a
4275 		 * listening socket..
4276 		 */
4277 		return;
4278 	}
4279 	/* lock the socket buffers */
4280 	SCTP_INP_READ_LOCK(old_inp);
4281 	control = TAILQ_FIRST(&old_inp->read_queue);
4282 	/* Pull off all for out target stcb */
4283 	while (control) {
4284 		nctl = TAILQ_NEXT(control, next);
4285 		if (control->stcb == stcb) {
4286 			/* remove it we want it */
4287 			TAILQ_REMOVE(&old_inp->read_queue, control, next);
4288 			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4289 			m = control->data;
4290 			while (m) {
4291 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4292 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4293 				}
4294 				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4295 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4296 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4297 				}
4298 				m = SCTP_BUF_NEXT(m);
4299 			}
4300 		}
4301 		control = nctl;
4302 	}
4303 	SCTP_INP_READ_UNLOCK(old_inp);
4304 	/* Remove the sb-lock on the old socket */
4305 
4306 	sbunlock(&old_so->so_rcv);
4307 	/* Now we move them over to the new socket buffer */
4308 	control = TAILQ_FIRST(&tmp_queue);
4309 	SCTP_INP_READ_LOCK(new_inp);
4310 	while (control) {
4311 		nctl = TAILQ_NEXT(control, next);
4312 		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4313 		m = control->data;
4314 		while (m) {
4315 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4316 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4317 			}
4318 			sctp_sballoc(stcb, &new_so->so_rcv, m);
4319 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4320 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4321 			}
4322 			m = SCTP_BUF_NEXT(m);
4323 		}
4324 		control = nctl;
4325 	}
4326 	SCTP_INP_READ_UNLOCK(new_inp);
4327 }
4328 
4329 void
4330 sctp_add_to_readq(struct sctp_inpcb *inp,
4331     struct sctp_tcb *stcb,
4332     struct sctp_queued_to_read *control,
4333     struct sockbuf *sb,
4334     int end,
4335     int inp_read_lock_held,
4336     int so_locked
4337 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4338     SCTP_UNUSED
4339 #endif
4340 )
4341 {
4342 	/*
4343 	 * Here we must place the control on the end of the socket read
4344 	 * queue AND increment sb_cc so that select will work properly on
4345 	 * read.
4346 	 */
4347 	struct mbuf *m, *prev = NULL;
4348 
4349 	if (inp == NULL) {
4350 		/* Gak, TSNH!! */
4351 #ifdef INVARIANTS
4352 		panic("Gak, inp NULL on add_to_readq");
4353 #endif
4354 		return;
4355 	}
4356 	if (inp_read_lock_held == 0)
4357 		SCTP_INP_READ_LOCK(inp);
4358 	if (!(control->spec_flags & M_NOTIFICATION)) {
4359 		atomic_add_int(&inp->total_recvs, 1);
4360 		if (!control->do_not_ref_stcb) {
4361 			atomic_add_int(&stcb->total_recvs, 1);
4362 		}
4363 	}
4364 	m = control->data;
4365 	control->held_length = 0;
4366 	control->length = 0;
4367 	while (m) {
4368 		if (SCTP_BUF_LEN(m) == 0) {
4369 			/* Skip mbufs with NO length */
4370 			if (prev == NULL) {
4371 				/* First one */
4372 				control->data = sctp_m_free(m);
4373 				m = control->data;
4374 			} else {
4375 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4376 				m = SCTP_BUF_NEXT(prev);
4377 			}
4378 			if (m == NULL) {
4379 				control->tail_mbuf = prev;
4380 			}
4381 			continue;
4382 		}
4383 		prev = m;
4384 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4385 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4386 		}
4387 		sctp_sballoc(stcb, sb, m);
4388 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4389 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4390 		}
4391 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4392 		m = SCTP_BUF_NEXT(m);
4393 	}
4394 	if (prev != NULL) {
4395 		control->tail_mbuf = prev;
4396 	} else {
4397 		/* Everything got collapsed out?? */
4398 		if (inp_read_lock_held == 0)
4399 			SCTP_INP_READ_UNLOCK(inp);
4400 		return;
4401 	}
4402 	if (end) {
4403 		control->end_added = 1;
4404 	}
4405 	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4406 	if (inp_read_lock_held == 0)
4407 		SCTP_INP_READ_UNLOCK(inp);
4408 	if (inp && inp->sctp_socket) {
4409 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4410 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4411 		} else {
4412 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4413 			struct socket *so;
4414 
4415 			so = SCTP_INP_SO(inp);
4416 			if (!so_locked) {
4417 				atomic_add_int(&stcb->asoc.refcnt, 1);
4418 				SCTP_TCB_UNLOCK(stcb);
4419 				SCTP_SOCKET_LOCK(so, 1);
4420 				SCTP_TCB_LOCK(stcb);
4421 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
4422 				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4423 					SCTP_SOCKET_UNLOCK(so, 1);
4424 					return;
4425 				}
4426 			}
4427 #endif
4428 			sctp_sorwakeup(inp, inp->sctp_socket);
4429 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4430 			if (!so_locked) {
4431 				SCTP_SOCKET_UNLOCK(so, 1);
4432 			}
4433 #endif
4434 		}
4435 	}
4436 }
4437 
4438 
4439 int
4440 sctp_append_to_readq(struct sctp_inpcb *inp,
4441     struct sctp_tcb *stcb,
4442     struct sctp_queued_to_read *control,
4443     struct mbuf *m,
4444     int end,
4445     int ctls_cumack,
4446     struct sockbuf *sb)
4447 {
4448 	/*
4449 	 * A partial delivery API event is underway. OR we are appending on
4450 	 * the reassembly queue.
4451 	 *
4452 	 * If PDAPI this means we need to add m to the end of the data.
4453 	 * Increase the length in the control AND increment the sb_cc.
4454 	 * Otherwise sb is NULL and all we need to do is put it at the end
4455 	 * of the mbuf chain.
4456 	 */
4457 	int len = 0;
4458 	struct mbuf *mm, *tail = NULL, *prev = NULL;
4459 
4460 	if (inp) {
4461 		SCTP_INP_READ_LOCK(inp);
4462 	}
4463 	if (control == NULL) {
4464 get_out:
4465 		if (inp) {
4466 			SCTP_INP_READ_UNLOCK(inp);
4467 		}
4468 		return (-1);
4469 	}
4470 	if (control->end_added) {
4471 		/* huh this one is complete? */
4472 		goto get_out;
4473 	}
4474 	mm = m;
4475 	if (mm == NULL) {
4476 		goto get_out;
4477 	}
4478 	while (mm) {
4479 		if (SCTP_BUF_LEN(mm) == 0) {
4480 			/* Skip mbufs with NO lenght */
4481 			if (prev == NULL) {
4482 				/* First one */
4483 				m = sctp_m_free(mm);
4484 				mm = m;
4485 			} else {
4486 				SCTP_BUF_NEXT(prev) = sctp_m_free(mm);
4487 				mm = SCTP_BUF_NEXT(prev);
4488 			}
4489 			continue;
4490 		}
4491 		prev = mm;
4492 		len += SCTP_BUF_LEN(mm);
4493 		if (sb) {
4494 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4495 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(mm));
4496 			}
4497 			sctp_sballoc(stcb, sb, mm);
4498 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4499 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4500 			}
4501 		}
4502 		mm = SCTP_BUF_NEXT(mm);
4503 	}
4504 	if (prev) {
4505 		tail = prev;
4506 	} else {
4507 		/* Really there should always be a prev */
4508 		if (m == NULL) {
4509 			/* Huh nothing left? */
4510 #ifdef INVARIANTS
4511 			panic("Nothing left to add?");
4512 #else
4513 			goto get_out;
4514 #endif
4515 		}
4516 		tail = m;
4517 	}
4518 	if (control->tail_mbuf) {
4519 		/* append */
4520 		SCTP_BUF_NEXT(control->tail_mbuf) = m;
4521 		control->tail_mbuf = tail;
4522 	} else {
4523 		/* nothing there */
4524 #ifdef INVARIANTS
4525 		if (control->data != NULL) {
4526 			panic("This should NOT happen");
4527 		}
4528 #endif
4529 		control->data = m;
4530 		control->tail_mbuf = tail;
4531 	}
4532 	atomic_add_int(&control->length, len);
4533 	if (end) {
4534 		/* message is complete */
4535 		if (stcb && (control == stcb->asoc.control_pdapi)) {
4536 			stcb->asoc.control_pdapi = NULL;
4537 		}
4538 		control->held_length = 0;
4539 		control->end_added = 1;
4540 	}
4541 	if (stcb == NULL) {
4542 		control->do_not_ref_stcb = 1;
4543 	}
4544 	/*
4545 	 * When we are appending in partial delivery, the cum-ack is used
4546 	 * for the actual pd-api highest tsn on this mbuf. The true cum-ack
4547 	 * is populated in the outbound sinfo structure from the true cumack
4548 	 * if the association exists...
4549 	 */
4550 	control->sinfo_tsn = control->sinfo_cumtsn = ctls_cumack;
4551 	if (inp) {
4552 		SCTP_INP_READ_UNLOCK(inp);
4553 	}
4554 	if (inp && inp->sctp_socket) {
4555 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4556 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4557 		} else {
4558 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4559 			struct socket *so;
4560 
4561 			so = SCTP_INP_SO(inp);
4562 			atomic_add_int(&stcb->asoc.refcnt, 1);
4563 			SCTP_TCB_UNLOCK(stcb);
4564 			SCTP_SOCKET_LOCK(so, 1);
4565 			SCTP_TCB_LOCK(stcb);
4566 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4567 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4568 				SCTP_SOCKET_UNLOCK(so, 1);
4569 				return (0);
4570 			}
4571 #endif
4572 			sctp_sorwakeup(inp, inp->sctp_socket);
4573 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4574 			SCTP_SOCKET_UNLOCK(so, 1);
4575 #endif
4576 		}
4577 	}
4578 	return (0);
4579 }
4580 
4581 
4582 
4583 /*************HOLD THIS COMMENT FOR PATCH FILE OF
4584  *************ALTERNATE ROUTING CODE
4585  */
4586 
4587 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4588  *************ALTERNATE ROUTING CODE
4589  */
4590 
4591 struct mbuf *
4592 sctp_generate_invmanparam(int err)
4593 {
4594 	/* Return a MBUF with a invalid mandatory parameter */
4595 	struct mbuf *m;
4596 
4597 	m = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_DONTWAIT, 1, MT_DATA);
4598 	if (m) {
4599 		struct sctp_paramhdr *ph;
4600 
4601 		SCTP_BUF_LEN(m) = sizeof(struct sctp_paramhdr);
4602 		ph = mtod(m, struct sctp_paramhdr *);
4603 		ph->param_length = htons(sizeof(struct sctp_paramhdr));
4604 		ph->param_type = htons(err);
4605 	}
4606 	return (m);
4607 }
4608 
4609 #ifdef SCTP_MBCNT_LOGGING
4610 void
4611 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
4612     struct sctp_tmit_chunk *tp1, int chk_cnt)
4613 {
4614 	if (tp1->data == NULL) {
4615 		return;
4616 	}
4617 	asoc->chunks_on_out_queue -= chk_cnt;
4618 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
4619 		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
4620 		    asoc->total_output_queue_size,
4621 		    tp1->book_size,
4622 		    0,
4623 		    tp1->mbcnt);
4624 	}
4625 	if (asoc->total_output_queue_size >= tp1->book_size) {
4626 		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
4627 	} else {
4628 		asoc->total_output_queue_size = 0;
4629 	}
4630 
4631 	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
4632 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
4633 		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
4634 			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
4635 		} else {
4636 			stcb->sctp_socket->so_snd.sb_cc = 0;
4637 
4638 		}
4639 	}
4640 }
4641 
4642 #endif
4643 
4644 int
4645 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
4646     int reason, int so_locked
4647 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4648     SCTP_UNUSED
4649 #endif
4650 )
4651 {
4652 	struct sctp_stream_out *strq;
4653 	struct sctp_tmit_chunk *chk = NULL;
4654 	struct sctp_stream_queue_pending *sp;
4655 	uint16_t stream = 0, seq = 0;
4656 	uint8_t foundeom = 0;
4657 	int ret_sz = 0;
4658 	int notdone;
4659 	int do_wakeup_routine = 0;
4660 
4661 	stream = tp1->rec.data.stream_number;
4662 	seq = tp1->rec.data.stream_seq;
4663 	do {
4664 		ret_sz += tp1->book_size;
4665 		if (tp1->data != NULL) {
4666 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4667 				sctp_flight_size_decrease(tp1);
4668 				sctp_total_flight_decrease(stcb, tp1);
4669 			}
4670 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4671 			stcb->asoc.peers_rwnd += tp1->send_size;
4672 			stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
4673 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, reason, tp1, so_locked);
4674 			if (tp1->data) {
4675 				sctp_m_freem(tp1->data);
4676 				tp1->data = NULL;
4677 			}
4678 			do_wakeup_routine = 1;
4679 			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4680 				stcb->asoc.sent_queue_cnt_removeable--;
4681 			}
4682 		}
4683 		tp1->sent = SCTP_FORWARD_TSN_SKIP;
4684 		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
4685 		    SCTP_DATA_NOT_FRAG) {
4686 			/* not frag'ed we ae done   */
4687 			notdone = 0;
4688 			foundeom = 1;
4689 		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4690 			/* end of frag, we are done */
4691 			notdone = 0;
4692 			foundeom = 1;
4693 		} else {
4694 			/*
4695 			 * Its a begin or middle piece, we must mark all of
4696 			 * it
4697 			 */
4698 			notdone = 1;
4699 			tp1 = TAILQ_NEXT(tp1, sctp_next);
4700 		}
4701 	} while (tp1 && notdone);
4702 	if (foundeom == 0) {
4703 		/*
4704 		 * The multi-part message was scattered across the send and
4705 		 * sent queue.
4706 		 */
4707 next_on_sent:
4708 		tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
4709 		/*
4710 		 * recurse throught the send_queue too, starting at the
4711 		 * beginning.
4712 		 */
4713 		if ((tp1) &&
4714 		    (tp1->rec.data.stream_number == stream) &&
4715 		    (tp1->rec.data.stream_seq == seq)) {
4716 			/*
4717 			 * save to chk in case we have some on stream out
4718 			 * queue. If so and we have an un-transmitted one we
4719 			 * don't have to fudge the TSN.
4720 			 */
4721 			chk = tp1;
4722 			ret_sz += tp1->book_size;
4723 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4724 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, reason, tp1, so_locked);
4725 			if (tp1->data) {
4726 				sctp_m_freem(tp1->data);
4727 				tp1->data = NULL;
4728 			}
4729 			/* No flight involved here book the size to 0 */
4730 			tp1->book_size = 0;
4731 			if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4732 				foundeom = 1;
4733 			}
4734 			do_wakeup_routine = 1;
4735 			tp1->sent = SCTP_FORWARD_TSN_SKIP;
4736 			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
4737 			/*
4738 			 * on to the sent queue so we can wait for it to be
4739 			 * passed by.
4740 			 */
4741 			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
4742 			    sctp_next);
4743 			stcb->asoc.send_queue_cnt--;
4744 			stcb->asoc.sent_queue_cnt++;
4745 			goto next_on_sent;
4746 		}
4747 	}
4748 	if (foundeom == 0) {
4749 		/*
4750 		 * Still no eom found. That means there is stuff left on the
4751 		 * stream out queue.. yuck.
4752 		 */
4753 		strq = &stcb->asoc.strmout[stream];
4754 		SCTP_TCB_SEND_LOCK(stcb);
4755 		sp = TAILQ_FIRST(&strq->outqueue);
4756 		while (sp->strseq <= seq) {
4757 			/* Check if its our SEQ */
4758 			if (sp->strseq == seq) {
4759 				sp->discard_rest = 1;
4760 				/*
4761 				 * We may need to put a chunk on the queue
4762 				 * that holds the TSN that would have been
4763 				 * sent with the LAST bit.
4764 				 */
4765 				if (chk == NULL) {
4766 					/* Yep, we have to */
4767 					sctp_alloc_a_chunk(stcb, chk);
4768 					if (chk == NULL) {
4769 						/*
4770 						 * we are hosed. All we can
4771 						 * do is nothing.. which
4772 						 * will cause an abort if
4773 						 * the peer is paying
4774 						 * attention.
4775 						 */
4776 						goto oh_well;
4777 					}
4778 					memset(chk, 0, sizeof(*chk));
4779 					chk->rec.data.rcv_flags = SCTP_DATA_LAST_FRAG;
4780 					chk->sent = SCTP_FORWARD_TSN_SKIP;
4781 					chk->asoc = &stcb->asoc;
4782 					chk->rec.data.stream_seq = sp->strseq;
4783 					chk->rec.data.stream_number = sp->stream;
4784 					chk->rec.data.payloadtype = sp->ppid;
4785 					chk->rec.data.context = sp->context;
4786 					chk->flags = sp->act_flags;
4787 					chk->whoTo = sp->net;
4788 					atomic_add_int(&chk->whoTo->ref_count, 1);
4789 					chk->rec.data.TSN_seq = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
4790 					stcb->asoc.pr_sctp_cnt++;
4791 					chk->pr_sctp_on = 1;
4792 					TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
4793 					stcb->asoc.sent_queue_cnt++;
4794 					stcb->asoc.pr_sctp_cnt++;
4795 				} else {
4796 					chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
4797 				}
4798 		oh_well:
4799 				if (sp->data) {
4800 					/*
4801 					 * Pull any data to free up the SB
4802 					 * and allow sender to "add more"
4803 					 * whilc we will throw away :-)
4804 					 */
4805 					sctp_free_spbufspace(stcb, &stcb->asoc,
4806 					    sp);
4807 					ret_sz += sp->length;
4808 					do_wakeup_routine = 1;
4809 					sp->some_taken = 1;
4810 					sctp_m_freem(sp->data);
4811 					sp->length = 0;
4812 					sp->data = NULL;
4813 					sp->tail_mbuf = NULL;
4814 				}
4815 				break;
4816 			} else {
4817 				/* Next one please */
4818 				sp = TAILQ_NEXT(sp, next);
4819 			}
4820 		}		/* End while */
4821 		SCTP_TCB_SEND_UNLOCK(stcb);
4822 	}
4823 	if (do_wakeup_routine) {
4824 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4825 		struct socket *so;
4826 
4827 		so = SCTP_INP_SO(stcb->sctp_ep);
4828 		if (!so_locked) {
4829 			atomic_add_int(&stcb->asoc.refcnt, 1);
4830 			SCTP_TCB_UNLOCK(stcb);
4831 			SCTP_SOCKET_LOCK(so, 1);
4832 			SCTP_TCB_LOCK(stcb);
4833 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4834 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4835 				/* assoc was freed while we were unlocked */
4836 				SCTP_SOCKET_UNLOCK(so, 1);
4837 				return (ret_sz);
4838 			}
4839 		}
4840 #endif
4841 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
4842 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4843 		if (!so_locked) {
4844 			SCTP_SOCKET_UNLOCK(so, 1);
4845 		}
4846 #endif
4847 	}
4848 	return (ret_sz);
4849 }
4850 
4851 /*
4852  * checks to see if the given address, sa, is one that is currently known by
4853  * the kernel note: can't distinguish the same address on multiple interfaces
4854  * and doesn't handle multiple addresses with different zone/scope id's note:
4855  * ifa_ifwithaddr() compares the entire sockaddr struct
4856  */
4857 struct sctp_ifa *
4858 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
4859     int holds_lock)
4860 {
4861 	struct sctp_laddr *laddr;
4862 
4863 	if (holds_lock == 0) {
4864 		SCTP_INP_RLOCK(inp);
4865 	}
4866 	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4867 		if (laddr->ifa == NULL)
4868 			continue;
4869 		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
4870 			continue;
4871 		if (addr->sa_family == AF_INET) {
4872 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4873 			    laddr->ifa->address.sin.sin_addr.s_addr) {
4874 				/* found him. */
4875 				if (holds_lock == 0) {
4876 					SCTP_INP_RUNLOCK(inp);
4877 				}
4878 				return (laddr->ifa);
4879 				break;
4880 			}
4881 		}
4882 #ifdef INET6
4883 		if (addr->sa_family == AF_INET6) {
4884 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4885 			    &laddr->ifa->address.sin6)) {
4886 				/* found him. */
4887 				if (holds_lock == 0) {
4888 					SCTP_INP_RUNLOCK(inp);
4889 				}
4890 				return (laddr->ifa);
4891 				break;
4892 			}
4893 		}
4894 #endif
4895 	}
4896 	if (holds_lock == 0) {
4897 		SCTP_INP_RUNLOCK(inp);
4898 	}
4899 	return (NULL);
4900 }
4901 
4902 uint32_t
4903 sctp_get_ifa_hash_val(struct sockaddr *addr)
4904 {
4905 	if (addr->sa_family == AF_INET) {
4906 		struct sockaddr_in *sin;
4907 
4908 		sin = (struct sockaddr_in *)addr;
4909 		return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
4910 	} else if (addr->sa_family == AF_INET6) {
4911 		struct sockaddr_in6 *sin6;
4912 		uint32_t hash_of_addr;
4913 
4914 		sin6 = (struct sockaddr_in6 *)addr;
4915 		hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
4916 		    sin6->sin6_addr.s6_addr32[1] +
4917 		    sin6->sin6_addr.s6_addr32[2] +
4918 		    sin6->sin6_addr.s6_addr32[3]);
4919 		hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
4920 		return (hash_of_addr);
4921 	}
4922 	return (0);
4923 }
4924 
4925 struct sctp_ifa *
4926 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
4927 {
4928 	struct sctp_ifa *sctp_ifap;
4929 	struct sctp_vrf *vrf;
4930 	struct sctp_ifalist *hash_head;
4931 	uint32_t hash_of_addr;
4932 
4933 	if (holds_lock == 0)
4934 		SCTP_IPI_ADDR_RLOCK();
4935 
4936 	vrf = sctp_find_vrf(vrf_id);
4937 	if (vrf == NULL) {
4938 stage_right:
4939 		if (holds_lock == 0)
4940 			SCTP_IPI_ADDR_RUNLOCK();
4941 		return (NULL);
4942 	}
4943 	hash_of_addr = sctp_get_ifa_hash_val(addr);
4944 
4945 	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
4946 	if (hash_head == NULL) {
4947 		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
4948 		    hash_of_addr, (uint32_t) vrf->vrf_addr_hashmark,
4949 		    (uint32_t) (hash_of_addr & vrf->vrf_addr_hashmark));
4950 		sctp_print_address(addr);
4951 		SCTP_PRINTF("No such bucket for address\n");
4952 		if (holds_lock == 0)
4953 			SCTP_IPI_ADDR_RUNLOCK();
4954 
4955 		return (NULL);
4956 	}
4957 	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
4958 		if (sctp_ifap == NULL) {
4959 #ifdef INVARIANTS
4960 			panic("Huh LIST_FOREACH corrupt");
4961 			goto stage_right;
4962 #else
4963 			SCTP_PRINTF("LIST corrupt of sctp_ifap's?\n");
4964 			goto stage_right;
4965 #endif
4966 		}
4967 		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
4968 			continue;
4969 		if (addr->sa_family == AF_INET) {
4970 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4971 			    sctp_ifap->address.sin.sin_addr.s_addr) {
4972 				/* found him. */
4973 				if (holds_lock == 0)
4974 					SCTP_IPI_ADDR_RUNLOCK();
4975 				return (sctp_ifap);
4976 				break;
4977 			}
4978 		}
4979 #ifdef INET6
4980 		if (addr->sa_family == AF_INET6) {
4981 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4982 			    &sctp_ifap->address.sin6)) {
4983 				/* found him. */
4984 				if (holds_lock == 0)
4985 					SCTP_IPI_ADDR_RUNLOCK();
4986 				return (sctp_ifap);
4987 				break;
4988 			}
4989 		}
4990 #endif
4991 	}
4992 	if (holds_lock == 0)
4993 		SCTP_IPI_ADDR_RUNLOCK();
4994 	return (NULL);
4995 }
4996 
4997 static void
4998 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t * freed_so_far, int hold_rlock,
4999     uint32_t rwnd_req)
5000 {
5001 	/* User pulled some data, do we need a rwnd update? */
5002 	int r_unlocked = 0;
5003 	uint32_t dif, rwnd;
5004 	struct socket *so = NULL;
5005 
5006 	if (stcb == NULL)
5007 		return;
5008 
5009 	atomic_add_int(&stcb->asoc.refcnt, 1);
5010 
5011 	if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED |
5012 	    SCTP_STATE_SHUTDOWN_RECEIVED |
5013 	    SCTP_STATE_SHUTDOWN_ACK_SENT)) {
5014 		/* Pre-check If we are freeing no update */
5015 		goto no_lock;
5016 	}
5017 	SCTP_INP_INCR_REF(stcb->sctp_ep);
5018 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5019 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5020 		goto out;
5021 	}
5022 	so = stcb->sctp_socket;
5023 	if (so == NULL) {
5024 		goto out;
5025 	}
5026 	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
5027 	/* Have you have freed enough to look */
5028 	*freed_so_far = 0;
5029 	/* Yep, its worth a look and the lock overhead */
5030 
5031 	/* Figure out what the rwnd would be */
5032 	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
5033 	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
5034 		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
5035 	} else {
5036 		dif = 0;
5037 	}
5038 	if (dif >= rwnd_req) {
5039 		if (hold_rlock) {
5040 			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5041 			r_unlocked = 1;
5042 		}
5043 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5044 			/*
5045 			 * One last check before we allow the guy possibly
5046 			 * to get in. There is a race, where the guy has not
5047 			 * reached the gate. In that case
5048 			 */
5049 			goto out;
5050 		}
5051 		SCTP_TCB_LOCK(stcb);
5052 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5053 			/* No reports here */
5054 			SCTP_TCB_UNLOCK(stcb);
5055 			goto out;
5056 		}
5057 		SCTP_STAT_INCR(sctps_wu_sacks_sent);
5058 		sctp_send_sack(stcb);
5059 
5060 		sctp_chunk_output(stcb->sctp_ep, stcb,
5061 		    SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
5062 		/* make sure no timer is running */
5063 		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
5064 		SCTP_TCB_UNLOCK(stcb);
5065 	} else {
5066 		/* Update how much we have pending */
5067 		stcb->freed_by_sorcv_sincelast = dif;
5068 	}
5069 out:
5070 	if (so && r_unlocked && hold_rlock) {
5071 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5072 	}
5073 	SCTP_INP_DECR_REF(stcb->sctp_ep);
5074 no_lock:
5075 	atomic_add_int(&stcb->asoc.refcnt, -1);
5076 	return;
5077 }
5078 
5079 int
5080 sctp_sorecvmsg(struct socket *so,
5081     struct uio *uio,
5082     struct mbuf **mp,
5083     struct sockaddr *from,
5084     int fromlen,
5085     int *msg_flags,
5086     struct sctp_sndrcvinfo *sinfo,
5087     int filling_sinfo)
5088 {
5089 	/*
5090 	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
5091 	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
5092 	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
5093 	 * On the way out we may send out any combination of:
5094 	 * MSG_NOTIFICATION MSG_EOR
5095 	 *
5096 	 */
5097 	struct sctp_inpcb *inp = NULL;
5098 	int my_len = 0;
5099 	int cp_len = 0, error = 0;
5100 	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
5101 	struct mbuf *m = NULL, *embuf = NULL;
5102 	struct sctp_tcb *stcb = NULL;
5103 	int wakeup_read_socket = 0;
5104 	int freecnt_applied = 0;
5105 	int out_flags = 0, in_flags = 0;
5106 	int block_allowed = 1;
5107 	uint32_t freed_so_far = 0;
5108 	uint32_t copied_so_far = 0;
5109 	int in_eeor_mode = 0;
5110 	int no_rcv_needed = 0;
5111 	uint32_t rwnd_req = 0;
5112 	int hold_sblock = 0;
5113 	int hold_rlock = 0;
5114 	int slen = 0;
5115 	uint32_t held_length = 0;
5116 	int sockbuf_lock = 0;
5117 
5118 	if (uio == NULL) {
5119 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5120 		return (EINVAL);
5121 	}
5122 	if (msg_flags) {
5123 		in_flags = *msg_flags;
5124 		if (in_flags & MSG_PEEK)
5125 			SCTP_STAT_INCR(sctps_read_peeks);
5126 	} else {
5127 		in_flags = 0;
5128 	}
5129 	slen = uio->uio_resid;
5130 
5131 	/* Pull in and set up our int flags */
5132 	if (in_flags & MSG_OOB) {
5133 		/* Out of band's NOT supported */
5134 		return (EOPNOTSUPP);
5135 	}
5136 	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5137 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5138 		return (EINVAL);
5139 	}
5140 	if ((in_flags & (MSG_DONTWAIT
5141 	    | MSG_NBIO
5142 	    )) ||
5143 	    SCTP_SO_IS_NBIO(so)) {
5144 		block_allowed = 0;
5145 	}
5146 	/* setup the endpoint */
5147 	inp = (struct sctp_inpcb *)so->so_pcb;
5148 	if (inp == NULL) {
5149 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5150 		return (EFAULT);
5151 	}
5152 	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5153 	/* Must be at least a MTU's worth */
5154 	if (rwnd_req < SCTP_MIN_RWND)
5155 		rwnd_req = SCTP_MIN_RWND;
5156 	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5157 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5158 		sctp_misc_ints(SCTP_SORECV_ENTER,
5159 		    rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid);
5160 	}
5161 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5162 		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5163 		    rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid);
5164 	}
5165 	error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5166 	sockbuf_lock = 1;
5167 	if (error) {
5168 		goto release_unlocked;
5169 	}
5170 restart:
5171 
5172 
5173 restart_nosblocks:
5174 	if (hold_sblock == 0) {
5175 		SOCKBUF_LOCK(&so->so_rcv);
5176 		hold_sblock = 1;
5177 	}
5178 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5179 	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5180 		goto out;
5181 	}
5182 	if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5183 		if (so->so_error) {
5184 			error = so->so_error;
5185 			if ((in_flags & MSG_PEEK) == 0)
5186 				so->so_error = 0;
5187 			goto out;
5188 		} else {
5189 			if (so->so_rcv.sb_cc == 0) {
5190 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5191 				/* indicate EOF */
5192 				error = 0;
5193 				goto out;
5194 			}
5195 		}
5196 	}
5197 	if ((so->so_rcv.sb_cc <= held_length) && block_allowed) {
5198 		/* we need to wait for data */
5199 		if ((so->so_rcv.sb_cc == 0) &&
5200 		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5201 		    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5202 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5203 				/*
5204 				 * For active open side clear flags for
5205 				 * re-use passive open is blocked by
5206 				 * connect.
5207 				 */
5208 				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5209 					/*
5210 					 * You were aborted, passive side
5211 					 * always hits here
5212 					 */
5213 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5214 					error = ECONNRESET;
5215 					/*
5216 					 * You get this once if you are
5217 					 * active open side
5218 					 */
5219 					if (!(inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5220 						/*
5221 						 * Remove flag if on the
5222 						 * active open side
5223 						 */
5224 						inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_ABORTED;
5225 					}
5226 				}
5227 				so->so_state &= ~(SS_ISCONNECTING |
5228 				    SS_ISDISCONNECTING |
5229 				    SS_ISCONFIRMING |
5230 				    SS_ISCONNECTED);
5231 				if (error == 0) {
5232 					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5233 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5234 						error = ENOTCONN;
5235 					} else {
5236 						inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_CONNECTED;
5237 					}
5238 				}
5239 				goto out;
5240 			}
5241 		}
5242 		error = sbwait(&so->so_rcv);
5243 		if (error) {
5244 			goto out;
5245 		}
5246 		held_length = 0;
5247 		goto restart_nosblocks;
5248 	} else if (so->so_rcv.sb_cc == 0) {
5249 		if (so->so_error) {
5250 			error = so->so_error;
5251 			if ((in_flags & MSG_PEEK) == 0)
5252 				so->so_error = 0;
5253 		} else {
5254 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5255 			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5256 				if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5257 					/*
5258 					 * For active open side clear flags
5259 					 * for re-use passive open is
5260 					 * blocked by connect.
5261 					 */
5262 					if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5263 						/*
5264 						 * You were aborted, passive
5265 						 * side always hits here
5266 						 */
5267 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5268 						error = ECONNRESET;
5269 						/*
5270 						 * You get this once if you
5271 						 * are active open side
5272 						 */
5273 						if (!(inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5274 							/*
5275 							 * Remove flag if on
5276 							 * the active open
5277 							 * side
5278 							 */
5279 							inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_ABORTED;
5280 						}
5281 					}
5282 					so->so_state &= ~(SS_ISCONNECTING |
5283 					    SS_ISDISCONNECTING |
5284 					    SS_ISCONFIRMING |
5285 					    SS_ISCONNECTED);
5286 					if (error == 0) {
5287 						if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5288 							SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5289 							error = ENOTCONN;
5290 						} else {
5291 							inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_CONNECTED;
5292 						}
5293 					}
5294 					goto out;
5295 				}
5296 			}
5297 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5298 			error = EWOULDBLOCK;
5299 		}
5300 		goto out;
5301 	}
5302 	if (hold_sblock == 1) {
5303 		SOCKBUF_UNLOCK(&so->so_rcv);
5304 		hold_sblock = 0;
5305 	}
5306 	/* we possibly have data we can read */
5307 	/* sa_ignore FREED_MEMORY */
5308 	control = TAILQ_FIRST(&inp->read_queue);
5309 	if (control == NULL) {
5310 		/*
5311 		 * This could be happening since the appender did the
5312 		 * increment but as not yet did the tailq insert onto the
5313 		 * read_queue
5314 		 */
5315 		if (hold_rlock == 0) {
5316 			SCTP_INP_READ_LOCK(inp);
5317 			hold_rlock = 1;
5318 		}
5319 		control = TAILQ_FIRST(&inp->read_queue);
5320 		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5321 #ifdef INVARIANTS
5322 			panic("Huh, its non zero and nothing on control?");
5323 #endif
5324 			so->so_rcv.sb_cc = 0;
5325 		}
5326 		SCTP_INP_READ_UNLOCK(inp);
5327 		hold_rlock = 0;
5328 		goto restart;
5329 	}
5330 	if ((control->length == 0) &&
5331 	    (control->do_not_ref_stcb)) {
5332 		/*
5333 		 * Clean up code for freeing assoc that left behind a
5334 		 * pdapi.. maybe a peer in EEOR that just closed after
5335 		 * sending and never indicated a EOR.
5336 		 */
5337 		if (hold_rlock == 0) {
5338 			hold_rlock = 1;
5339 			SCTP_INP_READ_LOCK(inp);
5340 		}
5341 		control->held_length = 0;
5342 		if (control->data) {
5343 			/* Hmm there is data here .. fix */
5344 			struct mbuf *m_tmp;
5345 			int cnt = 0;
5346 
5347 			m_tmp = control->data;
5348 			while (m_tmp) {
5349 				cnt += SCTP_BUF_LEN(m_tmp);
5350 				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5351 					control->tail_mbuf = m_tmp;
5352 					control->end_added = 1;
5353 				}
5354 				m_tmp = SCTP_BUF_NEXT(m_tmp);
5355 			}
5356 			control->length = cnt;
5357 		} else {
5358 			/* remove it */
5359 			TAILQ_REMOVE(&inp->read_queue, control, next);
5360 			/* Add back any hiddend data */
5361 			sctp_free_remote_addr(control->whoFrom);
5362 			sctp_free_a_readq(stcb, control);
5363 		}
5364 		if (hold_rlock) {
5365 			hold_rlock = 0;
5366 			SCTP_INP_READ_UNLOCK(inp);
5367 		}
5368 		goto restart;
5369 	}
5370 	if ((control->length == 0) &&
5371 	    (control->end_added == 1)) {
5372 		/*
5373 		 * Do we also need to check for (control->pdapi_aborted ==
5374 		 * 1)?
5375 		 */
5376 		if (hold_rlock == 0) {
5377 			hold_rlock = 1;
5378 			SCTP_INP_READ_LOCK(inp);
5379 		}
5380 		TAILQ_REMOVE(&inp->read_queue, control, next);
5381 		if (control->data) {
5382 #ifdef INVARIANTS
5383 			panic("control->data not null but control->length == 0");
5384 #else
5385 			SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
5386 			sctp_m_freem(control->data);
5387 			control->data = NULL;
5388 #endif
5389 		}
5390 		if (control->aux_data) {
5391 			sctp_m_free(control->aux_data);
5392 			control->aux_data = NULL;
5393 		}
5394 		sctp_free_remote_addr(control->whoFrom);
5395 		sctp_free_a_readq(stcb, control);
5396 		if (hold_rlock) {
5397 			hold_rlock = 0;
5398 			SCTP_INP_READ_UNLOCK(inp);
5399 		}
5400 		goto restart;
5401 	}
5402 	if (control->length == 0) {
5403 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5404 		    (filling_sinfo)) {
5405 			/* find a more suitable one then this */
5406 			ctl = TAILQ_NEXT(control, next);
5407 			while (ctl) {
5408 				if ((ctl->stcb != control->stcb) && (ctl->length) &&
5409 				    (ctl->some_taken ||
5410 				    (ctl->spec_flags & M_NOTIFICATION) ||
5411 				    ((ctl->do_not_ref_stcb == 0) &&
5412 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5413 				    ) {
5414 					/*-
5415 					 * If we have a different TCB next, and there is data
5416 					 * present. If we have already taken some (pdapi), OR we can
5417 					 * ref the tcb and no delivery as started on this stream, we
5418 					 * take it. Note we allow a notification on a different
5419 					 * assoc to be delivered..
5420 					 */
5421 					control = ctl;
5422 					goto found_one;
5423 				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5424 					    (ctl->length) &&
5425 					    ((ctl->some_taken) ||
5426 					    ((ctl->do_not_ref_stcb == 0) &&
5427 					    ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5428 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) {
5429 					/*-
5430 					 * If we have the same tcb, and there is data present, and we
5431 					 * have the strm interleave feature present. Then if we have
5432 					 * taken some (pdapi) or we can refer to tht tcb AND we have
5433 					 * not started a delivery for this stream, we can take it.
5434 					 * Note we do NOT allow a notificaiton on the same assoc to
5435 					 * be delivered.
5436 					 */
5437 					control = ctl;
5438 					goto found_one;
5439 				}
5440 				ctl = TAILQ_NEXT(ctl, next);
5441 			}
5442 		}
5443 		/*
5444 		 * if we reach here, not suitable replacement is available
5445 		 * <or> fragment interleave is NOT on. So stuff the sb_cc
5446 		 * into the our held count, and its time to sleep again.
5447 		 */
5448 		held_length = so->so_rcv.sb_cc;
5449 		control->held_length = so->so_rcv.sb_cc;
5450 		goto restart;
5451 	}
5452 	/* Clear the held length since there is something to read */
5453 	control->held_length = 0;
5454 	if (hold_rlock) {
5455 		SCTP_INP_READ_UNLOCK(inp);
5456 		hold_rlock = 0;
5457 	}
5458 found_one:
5459 	/*
5460 	 * If we reach here, control has a some data for us to read off.
5461 	 * Note that stcb COULD be NULL.
5462 	 */
5463 	control->some_taken++;
5464 	if (hold_sblock) {
5465 		SOCKBUF_UNLOCK(&so->so_rcv);
5466 		hold_sblock = 0;
5467 	}
5468 	stcb = control->stcb;
5469 	if (stcb) {
5470 		if ((control->do_not_ref_stcb == 0) &&
5471 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5472 			if (freecnt_applied == 0)
5473 				stcb = NULL;
5474 		} else if (control->do_not_ref_stcb == 0) {
5475 			/* you can't free it on me please */
5476 			/*
5477 			 * The lock on the socket buffer protects us so the
5478 			 * free code will stop. But since we used the
5479 			 * socketbuf lock and the sender uses the tcb_lock
5480 			 * to increment, we need to use the atomic add to
5481 			 * the refcnt
5482 			 */
5483 			if (freecnt_applied) {
5484 #ifdef INVARIANTS
5485 				panic("refcnt already incremented");
5486 #else
5487 				printf("refcnt already incremented?\n");
5488 #endif
5489 			} else {
5490 				atomic_add_int(&stcb->asoc.refcnt, 1);
5491 				freecnt_applied = 1;
5492 			}
5493 			/*
5494 			 * Setup to remember how much we have not yet told
5495 			 * the peer our rwnd has opened up. Note we grab the
5496 			 * value from the tcb from last time. Note too that
5497 			 * sack sending clears this when a sack is sent,
5498 			 * which is fine. Once we hit the rwnd_req, we then
5499 			 * will go to the sctp_user_rcvd() that will not
5500 			 * lock until it KNOWs it MUST send a WUP-SACK.
5501 			 */
5502 			freed_so_far = stcb->freed_by_sorcv_sincelast;
5503 			stcb->freed_by_sorcv_sincelast = 0;
5504 		}
5505 	}
5506 	if (stcb &&
5507 	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
5508 	    control->do_not_ref_stcb == 0) {
5509 		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
5510 	}
5511 	/* First lets get off the sinfo and sockaddr info */
5512 	if ((sinfo) && filling_sinfo) {
5513 		memcpy(sinfo, control, sizeof(struct sctp_nonpad_sndrcvinfo));
5514 		nxt = TAILQ_NEXT(control, next);
5515 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
5516 			struct sctp_extrcvinfo *s_extra;
5517 
5518 			s_extra = (struct sctp_extrcvinfo *)sinfo;
5519 			if ((nxt) &&
5520 			    (nxt->length)) {
5521 				s_extra->sreinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
5522 				if (nxt->sinfo_flags & SCTP_UNORDERED) {
5523 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
5524 				}
5525 				if (nxt->spec_flags & M_NOTIFICATION) {
5526 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
5527 				}
5528 				s_extra->sreinfo_next_aid = nxt->sinfo_assoc_id;
5529 				s_extra->sreinfo_next_length = nxt->length;
5530 				s_extra->sreinfo_next_ppid = nxt->sinfo_ppid;
5531 				s_extra->sreinfo_next_stream = nxt->sinfo_stream;
5532 				if (nxt->tail_mbuf != NULL) {
5533 					if (nxt->end_added) {
5534 						s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
5535 					}
5536 				}
5537 			} else {
5538 				/*
5539 				 * we explicitly 0 this, since the memcpy
5540 				 * got some other things beyond the older
5541 				 * sinfo_ that is on the control's structure
5542 				 * :-D
5543 				 */
5544 				nxt = NULL;
5545 				s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
5546 				s_extra->sreinfo_next_aid = 0;
5547 				s_extra->sreinfo_next_length = 0;
5548 				s_extra->sreinfo_next_ppid = 0;
5549 				s_extra->sreinfo_next_stream = 0;
5550 			}
5551 		}
5552 		/*
5553 		 * update off the real current cum-ack, if we have an stcb.
5554 		 */
5555 		if ((control->do_not_ref_stcb == 0) && stcb)
5556 			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
5557 		/*
5558 		 * mask off the high bits, we keep the actual chunk bits in
5559 		 * there.
5560 		 */
5561 		sinfo->sinfo_flags &= 0x00ff;
5562 		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
5563 			sinfo->sinfo_flags |= SCTP_UNORDERED;
5564 		}
5565 	}
5566 #ifdef SCTP_ASOCLOG_OF_TSNS
5567 	{
5568 		int index, newindex;
5569 		struct sctp_pcbtsn_rlog *entry;
5570 
5571 		do {
5572 			index = inp->readlog_index;
5573 			newindex = index + 1;
5574 			if (newindex >= SCTP_READ_LOG_SIZE) {
5575 				newindex = 0;
5576 			}
5577 		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
5578 		entry = &inp->readlog[index];
5579 		entry->vtag = control->sinfo_assoc_id;
5580 		entry->strm = control->sinfo_stream;
5581 		entry->seq = control->sinfo_ssn;
5582 		entry->sz = control->length;
5583 		entry->flgs = control->sinfo_flags;
5584 	}
5585 #endif
5586 	if (fromlen && from) {
5587 		struct sockaddr *to;
5588 
5589 #ifdef INET
5590 		cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sin.sin_len);
5591 		memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
5592 		((struct sockaddr_in *)from)->sin_port = control->port_from;
5593 #else
5594 		/* No AF_INET use AF_INET6 */
5595 		cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sin6.sin6_len);
5596 		memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
5597 		((struct sockaddr_in6 *)from)->sin6_port = control->port_from;
5598 #endif
5599 
5600 		to = from;
5601 #if defined(INET) && defined(INET6)
5602 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) &&
5603 		    (to->sa_family == AF_INET) &&
5604 		    ((size_t)fromlen >= sizeof(struct sockaddr_in6))) {
5605 			struct sockaddr_in *sin;
5606 			struct sockaddr_in6 sin6;
5607 
5608 			sin = (struct sockaddr_in *)to;
5609 			bzero(&sin6, sizeof(sin6));
5610 			sin6.sin6_family = AF_INET6;
5611 			sin6.sin6_len = sizeof(struct sockaddr_in6);
5612 			sin6.sin6_addr.s6_addr32[2] = htonl(0xffff);
5613 			bcopy(&sin->sin_addr,
5614 			    &sin6.sin6_addr.s6_addr32[3],
5615 			    sizeof(sin6.sin6_addr.s6_addr32[3]));
5616 			sin6.sin6_port = sin->sin_port;
5617 			memcpy(from, (caddr_t)&sin6, sizeof(sin6));
5618 		}
5619 #endif
5620 #if defined(INET6)
5621 		{
5622 			struct sockaddr_in6 lsa6, *to6;
5623 
5624 			to6 = (struct sockaddr_in6 *)to;
5625 			sctp_recover_scope_mac(to6, (&lsa6));
5626 		}
5627 #endif
5628 	}
5629 	/* now copy out what data we can */
5630 	if (mp == NULL) {
5631 		/* copy out each mbuf in the chain up to length */
5632 get_more_data:
5633 		m = control->data;
5634 		while (m) {
5635 			/* Move out all we can */
5636 			cp_len = (int)uio->uio_resid;
5637 			my_len = (int)SCTP_BUF_LEN(m);
5638 			if (cp_len > my_len) {
5639 				/* not enough in this buf */
5640 				cp_len = my_len;
5641 			}
5642 			if (hold_rlock) {
5643 				SCTP_INP_READ_UNLOCK(inp);
5644 				hold_rlock = 0;
5645 			}
5646 			if (cp_len > 0)
5647 				error = uiomove(mtod(m, char *), cp_len, uio);
5648 			/* re-read */
5649 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5650 				goto release;
5651 			}
5652 			if ((control->do_not_ref_stcb == 0) && stcb &&
5653 			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5654 				no_rcv_needed = 1;
5655 			}
5656 			if (error) {
5657 				/* error we are out of here */
5658 				goto release;
5659 			}
5660 			if ((SCTP_BUF_NEXT(m) == NULL) &&
5661 			    (cp_len >= SCTP_BUF_LEN(m)) &&
5662 			    ((control->end_added == 0) ||
5663 			    (control->end_added &&
5664 			    (TAILQ_NEXT(control, next) == NULL)))
5665 			    ) {
5666 				SCTP_INP_READ_LOCK(inp);
5667 				hold_rlock = 1;
5668 			}
5669 			if (cp_len == SCTP_BUF_LEN(m)) {
5670 				if ((SCTP_BUF_NEXT(m) == NULL) &&
5671 				    (control->end_added)) {
5672 					out_flags |= MSG_EOR;
5673 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5674 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5675 				}
5676 				if (control->spec_flags & M_NOTIFICATION) {
5677 					out_flags |= MSG_NOTIFICATION;
5678 				}
5679 				/* we ate up the mbuf */
5680 				if (in_flags & MSG_PEEK) {
5681 					/* just looking */
5682 					m = SCTP_BUF_NEXT(m);
5683 					copied_so_far += cp_len;
5684 				} else {
5685 					/* dispose of the mbuf */
5686 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5687 						sctp_sblog(&so->so_rcv,
5688 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5689 					}
5690 					sctp_sbfree(control, stcb, &so->so_rcv, m);
5691 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5692 						sctp_sblog(&so->so_rcv,
5693 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5694 					}
5695 					embuf = m;
5696 					copied_so_far += cp_len;
5697 					freed_so_far += cp_len;
5698 					freed_so_far += MSIZE;
5699 					atomic_subtract_int(&control->length, cp_len);
5700 					control->data = sctp_m_free(m);
5701 					m = control->data;
5702 					/*
5703 					 * been through it all, must hold sb
5704 					 * lock ok to null tail
5705 					 */
5706 					if (control->data == NULL) {
5707 #ifdef INVARIANTS
5708 						if ((control->end_added == 0) ||
5709 						    (TAILQ_NEXT(control, next) == NULL)) {
5710 							/*
5711 							 * If the end is not
5712 							 * added, OR the
5713 							 * next is NOT null
5714 							 * we MUST have the
5715 							 * lock.
5716 							 */
5717 							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
5718 								panic("Hmm we don't own the lock?");
5719 							}
5720 						}
5721 #endif
5722 						control->tail_mbuf = NULL;
5723 #ifdef INVARIANTS
5724 						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
5725 							panic("end_added, nothing left and no MSG_EOR");
5726 						}
5727 #endif
5728 					}
5729 				}
5730 			} else {
5731 				/* Do we need to trim the mbuf? */
5732 				if (control->spec_flags & M_NOTIFICATION) {
5733 					out_flags |= MSG_NOTIFICATION;
5734 				}
5735 				if ((in_flags & MSG_PEEK) == 0) {
5736 					SCTP_BUF_RESV_UF(m, cp_len);
5737 					SCTP_BUF_LEN(m) -= cp_len;
5738 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5739 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len);
5740 					}
5741 					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
5742 					if ((control->do_not_ref_stcb == 0) &&
5743 					    stcb) {
5744 						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
5745 					}
5746 					copied_so_far += cp_len;
5747 					embuf = m;
5748 					freed_so_far += cp_len;
5749 					freed_so_far += MSIZE;
5750 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5751 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
5752 						    SCTP_LOG_SBRESULT, 0);
5753 					}
5754 					atomic_subtract_int(&control->length, cp_len);
5755 				} else {
5756 					copied_so_far += cp_len;
5757 				}
5758 			}
5759 			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
5760 				break;
5761 			}
5762 			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5763 			    (control->do_not_ref_stcb == 0) &&
5764 			    (freed_so_far >= rwnd_req)) {
5765 				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5766 			}
5767 		}		/* end while(m) */
5768 		/*
5769 		 * At this point we have looked at it all and we either have
5770 		 * a MSG_EOR/or read all the user wants... <OR>
5771 		 * control->length == 0.
5772 		 */
5773 		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
5774 			/* we are done with this control */
5775 			if (control->length == 0) {
5776 				if (control->data) {
5777 #ifdef INVARIANTS
5778 					panic("control->data not null at read eor?");
5779 #else
5780 					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
5781 					sctp_m_freem(control->data);
5782 					control->data = NULL;
5783 #endif
5784 				}
5785 		done_with_control:
5786 				if (TAILQ_NEXT(control, next) == NULL) {
5787 					/*
5788 					 * If we don't have a next we need a
5789 					 * lock, if there is a next
5790 					 * interrupt is filling ahead of us
5791 					 * and we don't need a lock to
5792 					 * remove this guy (which is the
5793 					 * head of the queue).
5794 					 */
5795 					if (hold_rlock == 0) {
5796 						SCTP_INP_READ_LOCK(inp);
5797 						hold_rlock = 1;
5798 					}
5799 				}
5800 				TAILQ_REMOVE(&inp->read_queue, control, next);
5801 				/* Add back any hiddend data */
5802 				if (control->held_length) {
5803 					held_length = 0;
5804 					control->held_length = 0;
5805 					wakeup_read_socket = 1;
5806 				}
5807 				if (control->aux_data) {
5808 					sctp_m_free(control->aux_data);
5809 					control->aux_data = NULL;
5810 				}
5811 				no_rcv_needed = control->do_not_ref_stcb;
5812 				sctp_free_remote_addr(control->whoFrom);
5813 				control->data = NULL;
5814 				sctp_free_a_readq(stcb, control);
5815 				control = NULL;
5816 				if ((freed_so_far >= rwnd_req) &&
5817 				    (no_rcv_needed == 0))
5818 					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5819 
5820 			} else {
5821 				/*
5822 				 * The user did not read all of this
5823 				 * message, turn off the returned MSG_EOR
5824 				 * since we are leaving more behind on the
5825 				 * control to read.
5826 				 */
5827 #ifdef INVARIANTS
5828 				if (control->end_added &&
5829 				    (control->data == NULL) &&
5830 				    (control->tail_mbuf == NULL)) {
5831 					panic("Gak, control->length is corrupt?");
5832 				}
5833 #endif
5834 				no_rcv_needed = control->do_not_ref_stcb;
5835 				out_flags &= ~MSG_EOR;
5836 			}
5837 		}
5838 		if (out_flags & MSG_EOR) {
5839 			goto release;
5840 		}
5841 		if ((uio->uio_resid == 0) ||
5842 		    ((in_eeor_mode) && (copied_so_far >= max(so->so_rcv.sb_lowat, 1)))
5843 		    ) {
5844 			goto release;
5845 		}
5846 		/*
5847 		 * If I hit here the receiver wants more and this message is
5848 		 * NOT done (pd-api). So two questions. Can we block? if not
5849 		 * we are done. Did the user NOT set MSG_WAITALL?
5850 		 */
5851 		if (block_allowed == 0) {
5852 			goto release;
5853 		}
5854 		/*
5855 		 * We need to wait for more data a few things: - We don't
5856 		 * sbunlock() so we don't get someone else reading. - We
5857 		 * must be sure to account for the case where what is added
5858 		 * is NOT to our control when we wakeup.
5859 		 */
5860 
5861 		/*
5862 		 * Do we need to tell the transport a rwnd update might be
5863 		 * needed before we go to sleep?
5864 		 */
5865 		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5866 		    ((freed_so_far >= rwnd_req) &&
5867 		    (control->do_not_ref_stcb == 0) &&
5868 		    (no_rcv_needed == 0))) {
5869 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5870 		}
5871 wait_some_more:
5872 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5873 			goto release;
5874 		}
5875 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
5876 			goto release;
5877 
5878 		if (hold_rlock == 1) {
5879 			SCTP_INP_READ_UNLOCK(inp);
5880 			hold_rlock = 0;
5881 		}
5882 		if (hold_sblock == 0) {
5883 			SOCKBUF_LOCK(&so->so_rcv);
5884 			hold_sblock = 1;
5885 		}
5886 		if ((copied_so_far) && (control->length == 0) &&
5887 		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) {
5888 			goto release;
5889 		}
5890 		if (so->so_rcv.sb_cc <= control->held_length) {
5891 			error = sbwait(&so->so_rcv);
5892 			if (error) {
5893 				goto release;
5894 			}
5895 			control->held_length = 0;
5896 		}
5897 		if (hold_sblock) {
5898 			SOCKBUF_UNLOCK(&so->so_rcv);
5899 			hold_sblock = 0;
5900 		}
5901 		if (control->length == 0) {
5902 			/* still nothing here */
5903 			if (control->end_added == 1) {
5904 				/* he aborted, or is done i.e.did a shutdown */
5905 				out_flags |= MSG_EOR;
5906 				if (control->pdapi_aborted) {
5907 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5908 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5909 
5910 					out_flags |= MSG_TRUNC;
5911 				} else {
5912 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5913 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5914 				}
5915 				goto done_with_control;
5916 			}
5917 			if (so->so_rcv.sb_cc > held_length) {
5918 				control->held_length = so->so_rcv.sb_cc;
5919 				held_length = 0;
5920 			}
5921 			goto wait_some_more;
5922 		} else if (control->data == NULL) {
5923 			/*
5924 			 * we must re-sync since data is probably being
5925 			 * added
5926 			 */
5927 			SCTP_INP_READ_LOCK(inp);
5928 			if ((control->length > 0) && (control->data == NULL)) {
5929 				/*
5930 				 * big trouble.. we have the lock and its
5931 				 * corrupt?
5932 				 */
5933 #ifdef INVARIANTS
5934 				panic("Impossible data==NULL length !=0");
5935 #endif
5936 				out_flags |= MSG_EOR;
5937 				out_flags |= MSG_TRUNC;
5938 				control->length = 0;
5939 				SCTP_INP_READ_UNLOCK(inp);
5940 				goto done_with_control;
5941 			}
5942 			SCTP_INP_READ_UNLOCK(inp);
5943 			/* We will fall around to get more data */
5944 		}
5945 		goto get_more_data;
5946 	} else {
5947 		/*-
5948 		 * Give caller back the mbuf chain,
5949 		 * store in uio_resid the length
5950 		 */
5951 		wakeup_read_socket = 0;
5952 		if ((control->end_added == 0) ||
5953 		    (TAILQ_NEXT(control, next) == NULL)) {
5954 			/* Need to get rlock */
5955 			if (hold_rlock == 0) {
5956 				SCTP_INP_READ_LOCK(inp);
5957 				hold_rlock = 1;
5958 			}
5959 		}
5960 		if (control->end_added) {
5961 			out_flags |= MSG_EOR;
5962 			if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5963 				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5964 		}
5965 		if (control->spec_flags & M_NOTIFICATION) {
5966 			out_flags |= MSG_NOTIFICATION;
5967 		}
5968 		uio->uio_resid = control->length;
5969 		*mp = control->data;
5970 		m = control->data;
5971 		while (m) {
5972 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5973 				sctp_sblog(&so->so_rcv,
5974 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5975 			}
5976 			sctp_sbfree(control, stcb, &so->so_rcv, m);
5977 			freed_so_far += SCTP_BUF_LEN(m);
5978 			freed_so_far += MSIZE;
5979 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5980 				sctp_sblog(&so->so_rcv,
5981 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5982 			}
5983 			m = SCTP_BUF_NEXT(m);
5984 		}
5985 		control->data = control->tail_mbuf = NULL;
5986 		control->length = 0;
5987 		if (out_flags & MSG_EOR) {
5988 			/* Done with this control */
5989 			goto done_with_control;
5990 		}
5991 	}
5992 release:
5993 	if (hold_rlock == 1) {
5994 		SCTP_INP_READ_UNLOCK(inp);
5995 		hold_rlock = 0;
5996 	}
5997 	if (hold_sblock == 1) {
5998 		SOCKBUF_UNLOCK(&so->so_rcv);
5999 		hold_sblock = 0;
6000 	}
6001 	sbunlock(&so->so_rcv);
6002 	sockbuf_lock = 0;
6003 
6004 release_unlocked:
6005 	if (hold_sblock) {
6006 		SOCKBUF_UNLOCK(&so->so_rcv);
6007 		hold_sblock = 0;
6008 	}
6009 	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
6010 		if ((freed_so_far >= rwnd_req) &&
6011 		    (control && (control->do_not_ref_stcb == 0)) &&
6012 		    (no_rcv_needed == 0))
6013 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6014 	}
6015 out:
6016 	if (msg_flags) {
6017 		*msg_flags = out_flags;
6018 	}
6019 	if (((out_flags & MSG_EOR) == 0) &&
6020 	    ((in_flags & MSG_PEEK) == 0) &&
6021 	    (sinfo) &&
6022 	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO))) {
6023 		struct sctp_extrcvinfo *s_extra;
6024 
6025 		s_extra = (struct sctp_extrcvinfo *)sinfo;
6026 		s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
6027 	}
6028 	if (hold_rlock == 1) {
6029 		SCTP_INP_READ_UNLOCK(inp);
6030 		hold_rlock = 0;
6031 	}
6032 	if (hold_sblock) {
6033 		SOCKBUF_UNLOCK(&so->so_rcv);
6034 		hold_sblock = 0;
6035 	}
6036 	if (sockbuf_lock) {
6037 		sbunlock(&so->so_rcv);
6038 	}
6039 	if (freecnt_applied) {
6040 		/*
6041 		 * The lock on the socket buffer protects us so the free
6042 		 * code will stop. But since we used the socketbuf lock and
6043 		 * the sender uses the tcb_lock to increment, we need to use
6044 		 * the atomic add to the refcnt.
6045 		 */
6046 		if (stcb == NULL) {
6047 #ifdef INVARIANTS
6048 			panic("stcb for refcnt has gone NULL?");
6049 			goto stage_left;
6050 #else
6051 			goto stage_left;
6052 #endif
6053 		}
6054 		atomic_add_int(&stcb->asoc.refcnt, -1);
6055 		freecnt_applied = 0;
6056 		/* Save the value back for next time */
6057 		stcb->freed_by_sorcv_sincelast = freed_so_far;
6058 	}
6059 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
6060 		if (stcb) {
6061 			sctp_misc_ints(SCTP_SORECV_DONE,
6062 			    freed_so_far,
6063 			    ((uio) ? (slen - uio->uio_resid) : slen),
6064 			    stcb->asoc.my_rwnd,
6065 			    so->so_rcv.sb_cc);
6066 		} else {
6067 			sctp_misc_ints(SCTP_SORECV_DONE,
6068 			    freed_so_far,
6069 			    ((uio) ? (slen - uio->uio_resid) : slen),
6070 			    0,
6071 			    so->so_rcv.sb_cc);
6072 		}
6073 	}
6074 stage_left:
6075 	if (wakeup_read_socket) {
6076 		sctp_sorwakeup(inp, so);
6077 	}
6078 	return (error);
6079 }
6080 
6081 
6082 #ifdef SCTP_MBUF_LOGGING
6083 struct mbuf *
6084 sctp_m_free(struct mbuf *m)
6085 {
6086 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6087 		if (SCTP_BUF_IS_EXTENDED(m)) {
6088 			sctp_log_mb(m, SCTP_MBUF_IFREE);
6089 		}
6090 	}
6091 	return (m_free(m));
6092 }
6093 
6094 void
6095 sctp_m_freem(struct mbuf *mb)
6096 {
6097 	while (mb != NULL)
6098 		mb = sctp_m_free(mb);
6099 }
6100 
6101 #endif
6102 
6103 int
6104 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
6105 {
6106 	/*
6107 	 * Given a local address. For all associations that holds the
6108 	 * address, request a peer-set-primary.
6109 	 */
6110 	struct sctp_ifa *ifa;
6111 	struct sctp_laddr *wi;
6112 
6113 	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
6114 	if (ifa == NULL) {
6115 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
6116 		return (EADDRNOTAVAIL);
6117 	}
6118 	/*
6119 	 * Now that we have the ifa we must awaken the iterator with this
6120 	 * message.
6121 	 */
6122 	wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
6123 	if (wi == NULL) {
6124 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
6125 		return (ENOMEM);
6126 	}
6127 	/* Now incr the count and int wi structure */
6128 	SCTP_INCR_LADDR_COUNT();
6129 	bzero(wi, sizeof(*wi));
6130 	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
6131 	wi->ifa = ifa;
6132 	wi->action = SCTP_SET_PRIM_ADDR;
6133 	atomic_add_int(&ifa->refcount, 1);
6134 
6135 	/* Now add it to the work queue */
6136 	SCTP_WQ_ADDR_LOCK();
6137 	/*
6138 	 * Should this really be a tailq? As it is we will process the
6139 	 * newest first :-0
6140 	 */
6141 	LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
6142 	SCTP_WQ_ADDR_UNLOCK();
6143 	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
6144 	    (struct sctp_inpcb *)NULL,
6145 	    (struct sctp_tcb *)NULL,
6146 	    (struct sctp_nets *)NULL);
6147 	return (0);
6148 }
6149 
6150 
6151 int
6152 sctp_soreceive(struct socket *so,
6153     struct sockaddr **psa,
6154     struct uio *uio,
6155     struct mbuf **mp0,
6156     struct mbuf **controlp,
6157     int *flagsp)
6158 {
6159 	int error, fromlen;
6160 	uint8_t sockbuf[256];
6161 	struct sockaddr *from;
6162 	struct sctp_extrcvinfo sinfo;
6163 	int filling_sinfo = 1;
6164 	struct sctp_inpcb *inp;
6165 
6166 	inp = (struct sctp_inpcb *)so->so_pcb;
6167 	/* pickup the assoc we are reading from */
6168 	if (inp == NULL) {
6169 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6170 		return (EINVAL);
6171 	}
6172 	if ((sctp_is_feature_off(inp,
6173 	    SCTP_PCB_FLAGS_RECVDATAIOEVNT)) ||
6174 	    (controlp == NULL)) {
6175 		/* user does not want the sndrcv ctl */
6176 		filling_sinfo = 0;
6177 	}
6178 	if (psa) {
6179 		from = (struct sockaddr *)sockbuf;
6180 		fromlen = sizeof(sockbuf);
6181 		from->sa_len = 0;
6182 	} else {
6183 		from = NULL;
6184 		fromlen = 0;
6185 	}
6186 
6187 	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp,
6188 	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6189 	if ((controlp) && (filling_sinfo)) {
6190 		/* copy back the sinfo in a CMSG format */
6191 		if (filling_sinfo)
6192 			*controlp = sctp_build_ctl_nchunk(inp,
6193 			    (struct sctp_sndrcvinfo *)&sinfo);
6194 		else
6195 			*controlp = NULL;
6196 	}
6197 	if (psa) {
6198 		/* copy back the address info */
6199 		if (from && from->sa_len) {
6200 			*psa = sodupsockaddr(from, M_NOWAIT);
6201 		} else {
6202 			*psa = NULL;
6203 		}
6204 	}
6205 	return (error);
6206 }
6207 
6208 
6209 int
6210 sctp_l_soreceive(struct socket *so,
6211     struct sockaddr **name,
6212     struct uio *uio,
6213     char **controlp,
6214     int *controllen,
6215     int *flag)
6216 {
6217 	int error, fromlen;
6218 	uint8_t sockbuf[256];
6219 	struct sockaddr *from;
6220 	struct sctp_extrcvinfo sinfo;
6221 	int filling_sinfo = 1;
6222 	struct sctp_inpcb *inp;
6223 
6224 	inp = (struct sctp_inpcb *)so->so_pcb;
6225 	/* pickup the assoc we are reading from */
6226 	if (inp == NULL) {
6227 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6228 		return (EINVAL);
6229 	}
6230 	if ((sctp_is_feature_off(inp,
6231 	    SCTP_PCB_FLAGS_RECVDATAIOEVNT)) ||
6232 	    (controlp == NULL)) {
6233 		/* user does not want the sndrcv ctl */
6234 		filling_sinfo = 0;
6235 	}
6236 	if (name) {
6237 		from = (struct sockaddr *)sockbuf;
6238 		fromlen = sizeof(sockbuf);
6239 		from->sa_len = 0;
6240 	} else {
6241 		from = NULL;
6242 		fromlen = 0;
6243 	}
6244 
6245 	error = sctp_sorecvmsg(so, uio,
6246 	    (struct mbuf **)NULL,
6247 	    from, fromlen, flag,
6248 	    (struct sctp_sndrcvinfo *)&sinfo,
6249 	    filling_sinfo);
6250 	if ((controlp) && (filling_sinfo)) {
6251 		/*
6252 		 * copy back the sinfo in a CMSG format note that the caller
6253 		 * has reponsibility for freeing the memory.
6254 		 */
6255 		if (filling_sinfo)
6256 			*controlp = sctp_build_ctl_cchunk(inp,
6257 			    controllen,
6258 			    (struct sctp_sndrcvinfo *)&sinfo);
6259 	}
6260 	if (name) {
6261 		/* copy back the address info */
6262 		if (from && from->sa_len) {
6263 			*name = sodupsockaddr(from, M_WAIT);
6264 		} else {
6265 			*name = NULL;
6266 		}
6267 	}
6268 	return (error);
6269 }
6270 
6271 
6272 
6273 
6274 
6275 
6276 
6277 int
6278 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6279     int totaddr, int *error)
6280 {
6281 	int added = 0;
6282 	int i;
6283 	struct sctp_inpcb *inp;
6284 	struct sockaddr *sa;
6285 	size_t incr = 0;
6286 
6287 	sa = addr;
6288 	inp = stcb->sctp_ep;
6289 	*error = 0;
6290 	for (i = 0; i < totaddr; i++) {
6291 		if (sa->sa_family == AF_INET) {
6292 			incr = sizeof(struct sockaddr_in);
6293 			if (sctp_add_remote_addr(stcb, sa, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6294 				/* assoc gone no un-lock */
6295 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6296 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_7);
6297 				*error = ENOBUFS;
6298 				goto out_now;
6299 			}
6300 			added++;
6301 		} else if (sa->sa_family == AF_INET6) {
6302 			incr = sizeof(struct sockaddr_in6);
6303 			if (sctp_add_remote_addr(stcb, sa, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6304 				/* assoc gone no un-lock */
6305 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6306 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_8);
6307 				*error = ENOBUFS;
6308 				goto out_now;
6309 			}
6310 			added++;
6311 		}
6312 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6313 	}
6314 out_now:
6315 	return (added);
6316 }
6317 
6318 struct sctp_tcb *
6319 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6320     int *totaddr, int *num_v4, int *num_v6, int *error,
6321     int limit, int *bad_addr)
6322 {
6323 	struct sockaddr *sa;
6324 	struct sctp_tcb *stcb = NULL;
6325 	size_t incr, at, i;
6326 
6327 	at = incr = 0;
6328 	sa = addr;
6329 	*error = *num_v6 = *num_v4 = 0;
6330 	/* account and validate addresses */
6331 	for (i = 0; i < (size_t)*totaddr; i++) {
6332 		if (sa->sa_family == AF_INET) {
6333 			(*num_v4) += 1;
6334 			incr = sizeof(struct sockaddr_in);
6335 			if (sa->sa_len != incr) {
6336 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6337 				*error = EINVAL;
6338 				*bad_addr = 1;
6339 				return (NULL);
6340 			}
6341 		} else if (sa->sa_family == AF_INET6) {
6342 			struct sockaddr_in6 *sin6;
6343 
6344 			sin6 = (struct sockaddr_in6 *)sa;
6345 			if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6346 				/* Must be non-mapped for connectx */
6347 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6348 				*error = EINVAL;
6349 				*bad_addr = 1;
6350 				return (NULL);
6351 			}
6352 			(*num_v6) += 1;
6353 			incr = sizeof(struct sockaddr_in6);
6354 			if (sa->sa_len != incr) {
6355 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6356 				*error = EINVAL;
6357 				*bad_addr = 1;
6358 				return (NULL);
6359 			}
6360 		} else {
6361 			*totaddr = i;
6362 			/* we are done */
6363 			break;
6364 		}
6365 		SCTP_INP_INCR_REF(inp);
6366 		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6367 		if (stcb != NULL) {
6368 			/* Already have or am bring up an association */
6369 			return (stcb);
6370 		} else {
6371 			SCTP_INP_DECR_REF(inp);
6372 		}
6373 		if ((at + incr) > (size_t)limit) {
6374 			*totaddr = i;
6375 			break;
6376 		}
6377 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6378 	}
6379 	return ((struct sctp_tcb *)NULL);
6380 }
6381 
6382 /*
6383  * sctp_bindx(ADD) for one address.
6384  * assumes all arguments are valid/checked by caller.
6385  */
6386 void
6387 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6388     struct sockaddr *sa, sctp_assoc_t assoc_id,
6389     uint32_t vrf_id, int *error, void *p)
6390 {
6391 	struct sockaddr *addr_touse;
6392 
6393 #ifdef INET6
6394 	struct sockaddr_in sin;
6395 
6396 #endif
6397 
6398 	/* see if we're bound all already! */
6399 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6400 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6401 		*error = EINVAL;
6402 		return;
6403 	}
6404 	addr_touse = sa;
6405 #if defined(INET6) && !defined(__Userspace__)	/* TODO port in6_sin6_2_sin */
6406 	if (sa->sa_family == AF_INET6) {
6407 		struct sockaddr_in6 *sin6;
6408 
6409 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6410 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6411 			*error = EINVAL;
6412 			return;
6413 		}
6414 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6415 			/* can only bind v6 on PF_INET6 sockets */
6416 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6417 			*error = EINVAL;
6418 			return;
6419 		}
6420 		sin6 = (struct sockaddr_in6 *)addr_touse;
6421 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6422 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6423 			    SCTP_IPV6_V6ONLY(inp)) {
6424 				/* can't bind v4-mapped on PF_INET sockets */
6425 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6426 				*error = EINVAL;
6427 				return;
6428 			}
6429 			in6_sin6_2_sin(&sin, sin6);
6430 			addr_touse = (struct sockaddr *)&sin;
6431 		}
6432 	}
6433 #endif
6434 	if (sa->sa_family == AF_INET) {
6435 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6436 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6437 			*error = EINVAL;
6438 			return;
6439 		}
6440 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6441 		    SCTP_IPV6_V6ONLY(inp)) {
6442 			/* can't bind v4 on PF_INET sockets */
6443 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6444 			*error = EINVAL;
6445 			return;
6446 		}
6447 	}
6448 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6449 		if (p == NULL) {
6450 			/* Can't get proc for Net/Open BSD */
6451 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6452 			*error = EINVAL;
6453 			return;
6454 		}
6455 		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6456 		return;
6457 	}
6458 	/*
6459 	 * No locks required here since bind and mgmt_ep_sa all do their own
6460 	 * locking. If we do something for the FIX: below we may need to
6461 	 * lock in that case.
6462 	 */
6463 	if (assoc_id == 0) {
6464 		/* add the address */
6465 		struct sctp_inpcb *lep;
6466 		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
6467 
6468 		/* validate the incoming port */
6469 		if ((lsin->sin_port != 0) &&
6470 		    (lsin->sin_port != inp->sctp_lport)) {
6471 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6472 			*error = EINVAL;
6473 			return;
6474 		} else {
6475 			/* user specified 0 port, set it to existing port */
6476 			lsin->sin_port = inp->sctp_lport;
6477 		}
6478 
6479 		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
6480 		if (lep != NULL) {
6481 			/*
6482 			 * We must decrement the refcount since we have the
6483 			 * ep already and are binding. No remove going on
6484 			 * here.
6485 			 */
6486 			SCTP_INP_DECR_REF(lep);
6487 		}
6488 		if (lep == inp) {
6489 			/* already bound to it.. ok */
6490 			return;
6491 		} else if (lep == NULL) {
6492 			((struct sockaddr_in *)addr_touse)->sin_port = 0;
6493 			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6494 			    SCTP_ADD_IP_ADDRESS,
6495 			    vrf_id, NULL);
6496 		} else {
6497 			*error = EADDRINUSE;
6498 		}
6499 		if (*error)
6500 			return;
6501 	} else {
6502 		/*
6503 		 * FIX: decide whether we allow assoc based bindx
6504 		 */
6505 	}
6506 }
6507 
6508 /*
6509  * sctp_bindx(DELETE) for one address.
6510  * assumes all arguments are valid/checked by caller.
6511  */
6512 void
6513 sctp_bindx_delete_address(struct socket *so, struct sctp_inpcb *inp,
6514     struct sockaddr *sa, sctp_assoc_t assoc_id,
6515     uint32_t vrf_id, int *error)
6516 {
6517 	struct sockaddr *addr_touse;
6518 
6519 #ifdef INET6
6520 	struct sockaddr_in sin;
6521 
6522 #endif
6523 
6524 	/* see if we're bound all already! */
6525 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6526 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6527 		*error = EINVAL;
6528 		return;
6529 	}
6530 	addr_touse = sa;
6531 #if defined(INET6) && !defined(__Userspace__)	/* TODO port in6_sin6_2_sin */
6532 	if (sa->sa_family == AF_INET6) {
6533 		struct sockaddr_in6 *sin6;
6534 
6535 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6536 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6537 			*error = EINVAL;
6538 			return;
6539 		}
6540 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6541 			/* can only bind v6 on PF_INET6 sockets */
6542 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6543 			*error = EINVAL;
6544 			return;
6545 		}
6546 		sin6 = (struct sockaddr_in6 *)addr_touse;
6547 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6548 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6549 			    SCTP_IPV6_V6ONLY(inp)) {
6550 				/* can't bind mapped-v4 on PF_INET sockets */
6551 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6552 				*error = EINVAL;
6553 				return;
6554 			}
6555 			in6_sin6_2_sin(&sin, sin6);
6556 			addr_touse = (struct sockaddr *)&sin;
6557 		}
6558 	}
6559 #endif
6560 	if (sa->sa_family == AF_INET) {
6561 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6562 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6563 			*error = EINVAL;
6564 			return;
6565 		}
6566 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6567 		    SCTP_IPV6_V6ONLY(inp)) {
6568 			/* can't bind v4 on PF_INET sockets */
6569 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6570 			*error = EINVAL;
6571 			return;
6572 		}
6573 	}
6574 	/*
6575 	 * No lock required mgmt_ep_sa does its own locking. If the FIX:
6576 	 * below is ever changed we may need to lock before calling
6577 	 * association level binding.
6578 	 */
6579 	if (assoc_id == 0) {
6580 		/* delete the address */
6581 		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6582 		    SCTP_DEL_IP_ADDRESS,
6583 		    vrf_id, NULL);
6584 	} else {
6585 		/*
6586 		 * FIX: decide whether we allow assoc based bindx
6587 		 */
6588 	}
6589 }
6590 
6591 /*
6592  * returns the valid local address count for an assoc, taking into account
6593  * all scoping rules
6594  */
6595 int
6596 sctp_local_addr_count(struct sctp_tcb *stcb)
6597 {
6598 	int loopback_scope, ipv4_local_scope, local_scope, site_scope;
6599 	int ipv4_addr_legal, ipv6_addr_legal;
6600 	struct sctp_vrf *vrf;
6601 	struct sctp_ifn *sctp_ifn;
6602 	struct sctp_ifa *sctp_ifa;
6603 	int count = 0;
6604 
6605 	/* Turn on all the appropriate scopes */
6606 	loopback_scope = stcb->asoc.loopback_scope;
6607 	ipv4_local_scope = stcb->asoc.ipv4_local_scope;
6608 	local_scope = stcb->asoc.local_scope;
6609 	site_scope = stcb->asoc.site_scope;
6610 	ipv4_addr_legal = ipv6_addr_legal = 0;
6611 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
6612 		ipv6_addr_legal = 1;
6613 		if (SCTP_IPV6_V6ONLY(stcb->sctp_ep) == 0) {
6614 			ipv4_addr_legal = 1;
6615 		}
6616 	} else {
6617 		ipv4_addr_legal = 1;
6618 	}
6619 
6620 	SCTP_IPI_ADDR_RLOCK();
6621 	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
6622 	if (vrf == NULL) {
6623 		/* no vrf, no addresses */
6624 		SCTP_IPI_ADDR_RUNLOCK();
6625 		return (0);
6626 	}
6627 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6628 		/*
6629 		 * bound all case: go through all ifns on the vrf
6630 		 */
6631 		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
6632 			if ((loopback_scope == 0) &&
6633 			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
6634 				continue;
6635 			}
6636 			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
6637 				if (sctp_is_addr_restricted(stcb, sctp_ifa))
6638 					continue;
6639 				switch (sctp_ifa->address.sa.sa_family) {
6640 				case AF_INET:
6641 					if (ipv4_addr_legal) {
6642 						struct sockaddr_in *sin;
6643 
6644 						sin = (struct sockaddr_in *)&sctp_ifa->address.sa;
6645 						if (sin->sin_addr.s_addr == 0) {
6646 							/*
6647 							 * skip unspecified
6648 							 * addrs
6649 							 */
6650 							continue;
6651 						}
6652 						if ((ipv4_local_scope == 0) &&
6653 						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
6654 							continue;
6655 						}
6656 						/* count this one */
6657 						count++;
6658 					} else {
6659 						continue;
6660 					}
6661 					break;
6662 #ifdef INET6
6663 				case AF_INET6:
6664 					if (ipv6_addr_legal) {
6665 						struct sockaddr_in6 *sin6;
6666 
6667 						sin6 = (struct sockaddr_in6 *)&sctp_ifa->address.sa;
6668 						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
6669 							continue;
6670 						}
6671 						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
6672 							if (local_scope == 0)
6673 								continue;
6674 							if (sin6->sin6_scope_id == 0) {
6675 								if (sa6_recoverscope(sin6) != 0)
6676 									/*
6677 									 *
6678 									 * bad
6679 									 *
6680 									 * li
6681 									 * nk
6682 									 *
6683 									 * loc
6684 									 * al
6685 									 *
6686 									 * add
6687 									 * re
6688 									 * ss
6689 									 * */
6690 									continue;
6691 							}
6692 						}
6693 						if ((site_scope == 0) &&
6694 						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
6695 							continue;
6696 						}
6697 						/* count this one */
6698 						count++;
6699 					}
6700 					break;
6701 #endif
6702 				default:
6703 					/* TSNH */
6704 					break;
6705 				}
6706 			}
6707 		}
6708 	} else {
6709 		/*
6710 		 * subset bound case
6711 		 */
6712 		struct sctp_laddr *laddr;
6713 
6714 		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
6715 		    sctp_nxt_addr) {
6716 			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
6717 				continue;
6718 			}
6719 			/* count this one */
6720 			count++;
6721 		}
6722 	}
6723 	SCTP_IPI_ADDR_RUNLOCK();
6724 	return (count);
6725 }
6726 
6727 #if defined(SCTP_LOCAL_TRACE_BUF)
6728 
6729 void
6730 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
6731 {
6732 	uint32_t saveindex, newindex;
6733 
6734 	do {
6735 		saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
6736 		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6737 			newindex = 1;
6738 		} else {
6739 			newindex = saveindex + 1;
6740 		}
6741 	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
6742 	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6743 		saveindex = 0;
6744 	}
6745 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
6746 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
6747 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
6748 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
6749 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
6750 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
6751 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
6752 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
6753 }
6754 
6755 #endif
6756 /* We will need to add support
6757  * to bind the ports and such here
6758  * so we can do UDP tunneling. In
6759  * the mean-time, we return error
6760  */
6761 #include <netinet/udp.h>
6762 #include <netinet/udp_var.h>
6763 #include <sys/proc.h>
6764 #ifdef INET6
6765 #include <netinet6/sctp6_var.h>
6766 #endif
6767 
6768 static void
6769 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *ignored)
6770 {
6771 	struct ip *iph;
6772 	struct mbuf *sp, *last;
6773 	struct udphdr *uhdr;
6774 	uint16_t port = 0, len;
6775 	int header_size = sizeof(struct udphdr) + sizeof(struct sctphdr);
6776 
6777 	/*
6778 	 * Split out the mbuf chain. Leave the IP header in m, place the
6779 	 * rest in the sp.
6780 	 */
6781 	if ((m->m_flags & M_PKTHDR) == 0) {
6782 		/* Can't handle one that is not a pkt hdr */
6783 		goto out;
6784 	}
6785 	/* pull the src port */
6786 	iph = mtod(m, struct ip *);
6787 	uhdr = (struct udphdr *)((caddr_t)iph + off);
6788 
6789 	port = uhdr->uh_sport;
6790 	sp = m_split(m, off, M_DONTWAIT);
6791 	if (sp == NULL) {
6792 		/* Gak, drop packet, we can't do a split */
6793 		goto out;
6794 	}
6795 	if (sp->m_pkthdr.len < header_size) {
6796 		/* Gak, packet can't have an SCTP header in it - to small */
6797 		m_freem(sp);
6798 		goto out;
6799 	}
6800 	/* ok now pull up the UDP header and SCTP header together */
6801 	sp = m_pullup(sp, header_size);
6802 	if (sp == NULL) {
6803 		/* Gak pullup failed */
6804 		goto out;
6805 	}
6806 	/* trim out the UDP header */
6807 	m_adj(sp, sizeof(struct udphdr));
6808 
6809 	/* Now reconstruct the mbuf chain */
6810 	/* 1) find last one */
6811 	last = m;
6812 	while (last->m_next != NULL) {
6813 		last = last->m_next;
6814 	}
6815 	last->m_next = sp;
6816 	m->m_pkthdr.len += sp->m_pkthdr.len;
6817 	last = m;
6818 	while (last != NULL) {
6819 		last = last->m_next;
6820 	}
6821 	/* Now its ready for sctp_input or sctp6_input */
6822 	iph = mtod(m, struct ip *);
6823 	switch (iph->ip_v) {
6824 	case IPVERSION:
6825 		{
6826 			/* its IPv4 */
6827 			len = SCTP_GET_IPV4_LENGTH(iph);
6828 			len -= sizeof(struct udphdr);
6829 			SCTP_GET_IPV4_LENGTH(iph) = len;
6830 			sctp_input_with_port(m, off, port);
6831 			break;
6832 		}
6833 #ifdef INET6
6834 	case IPV6_VERSION >> 4:
6835 		{
6836 			/* its IPv6 - NOT supported */
6837 			goto out;
6838 			break;
6839 
6840 		}
6841 #endif
6842 	default:
6843 		{
6844 			m_freem(m);
6845 			break;
6846 		}
6847 	}
6848 	return;
6849 out:
6850 	m_freem(m);
6851 }
6852 
6853 void
6854 sctp_over_udp_stop(void)
6855 {
6856 	struct socket *sop;
6857 
6858 	/*
6859 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
6860 	 * for writting!
6861 	 */
6862 	if (SCTP_BASE_INFO(udp_tun_socket) == NULL) {
6863 		/* Nothing to do */
6864 		return;
6865 	}
6866 	sop = SCTP_BASE_INFO(udp_tun_socket);
6867 	soclose(sop);
6868 	SCTP_BASE_INFO(udp_tun_socket) = NULL;
6869 }
6870 int
6871 sctp_over_udp_start(void)
6872 {
6873 	uint16_t port;
6874 	int ret;
6875 	struct sockaddr_in sin;
6876 	struct socket *sop = NULL;
6877 	struct thread *th;
6878 	struct ucred *cred;
6879 
6880 	/*
6881 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
6882 	 * for writting!
6883 	 */
6884 	port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
6885 	if (port == 0) {
6886 		/* Must have a port set */
6887 		return (EINVAL);
6888 	}
6889 	if (SCTP_BASE_INFO(udp_tun_socket) != NULL) {
6890 		/* Already running -- must stop first */
6891 		return (EALREADY);
6892 	}
6893 	th = curthread;
6894 	cred = th->td_ucred;
6895 	if ((ret = socreate(PF_INET, &sop,
6896 	    SOCK_DGRAM, IPPROTO_UDP, cred, th))) {
6897 		return (ret);
6898 	}
6899 	SCTP_BASE_INFO(udp_tun_socket) = sop;
6900 	/* call the special UDP hook */
6901 	ret = udp_set_kernel_tunneling(sop, sctp_recv_udp_tunneled_packet);
6902 	if (ret) {
6903 		goto exit_stage_left;
6904 	}
6905 	/* Ok we have a socket, bind it to the port */
6906 	memset(&sin, 0, sizeof(sin));
6907 	sin.sin_len = sizeof(sin);
6908 	sin.sin_family = AF_INET;
6909 	sin.sin_port = htons(port);
6910 	ret = sobind(sop, (struct sockaddr *)&sin, th);
6911 	if (ret) {
6912 		/* Close up we cant get the port */
6913 exit_stage_left:
6914 		sctp_over_udp_stop();
6915 		return (ret);
6916 	}
6917 	/*
6918 	 * Ok we should now get UDP packets directly to our input routine
6919 	 * sctp_recv_upd_tunneled_packet().
6920 	 */
6921 	return (0);
6922 }
6923