xref: /freebsd/sys/netinet/sctputil.c (revision a3cf0ef5a295c885c895fabfd56470c0d1db322d)
1 /*-
2  * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *
7  * a) Redistributions of source code must retain the above copyright notice,
8  *   this list of conditions and the following disclaimer.
9  *
10  * b) Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in
12  *   the documentation and/or other materials provided with the distribution.
13  *
14  * c) Neither the name of Cisco Systems, Inc. nor the names of its
15  *    contributors may be used to endorse or promote products derived
16  *    from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28  * THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 /* $KAME: sctputil.c,v 1.37 2005/03/07 23:26:09 itojun Exp $	 */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_pcb.h>
38 #include <netinet/sctputil.h>
39 #include <netinet/sctp_var.h>
40 #include <netinet/sctp_sysctl.h>
41 #ifdef INET6
42 #endif
43 #include <netinet/sctp_header.h>
44 #include <netinet/sctp_output.h>
45 #include <netinet/sctp_uio.h>
46 #include <netinet/sctp_timer.h>
47 #include <netinet/sctp_indata.h>/* for sctp_deliver_data() */
48 #include <netinet/sctp_auth.h>
49 #include <netinet/sctp_asconf.h>
50 #include <netinet/sctp_cc_functions.h>
51 #include <netinet/sctp_bsd_addr.h>
52 
53 
54 #ifndef KTR_SCTP
55 #define KTR_SCTP KTR_SUBSYS
56 #endif
57 
58 void
59 sctp_sblog(struct sockbuf *sb,
60     struct sctp_tcb *stcb, int from, int incr)
61 {
62 	struct sctp_cwnd_log sctp_clog;
63 
64 	sctp_clog.x.sb.stcb = stcb;
65 	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
66 	if (stcb)
67 		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
68 	else
69 		sctp_clog.x.sb.stcb_sbcc = 0;
70 	sctp_clog.x.sb.incr = incr;
71 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
72 	    SCTP_LOG_EVENT_SB,
73 	    from,
74 	    sctp_clog.x.misc.log1,
75 	    sctp_clog.x.misc.log2,
76 	    sctp_clog.x.misc.log3,
77 	    sctp_clog.x.misc.log4);
78 }
79 
80 void
81 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
82 {
83 	struct sctp_cwnd_log sctp_clog;
84 
85 	sctp_clog.x.close.inp = (void *)inp;
86 	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
87 	if (stcb) {
88 		sctp_clog.x.close.stcb = (void *)stcb;
89 		sctp_clog.x.close.state = (uint16_t) stcb->asoc.state;
90 	} else {
91 		sctp_clog.x.close.stcb = 0;
92 		sctp_clog.x.close.state = 0;
93 	}
94 	sctp_clog.x.close.loc = loc;
95 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
96 	    SCTP_LOG_EVENT_CLOSE,
97 	    0,
98 	    sctp_clog.x.misc.log1,
99 	    sctp_clog.x.misc.log2,
100 	    sctp_clog.x.misc.log3,
101 	    sctp_clog.x.misc.log4);
102 }
103 
104 
105 void
106 rto_logging(struct sctp_nets *net, int from)
107 {
108 	struct sctp_cwnd_log sctp_clog;
109 
110 	memset(&sctp_clog, 0, sizeof(sctp_clog));
111 	sctp_clog.x.rto.net = (void *)net;
112 	sctp_clog.x.rto.rtt = net->prev_rtt;
113 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
114 	    SCTP_LOG_EVENT_RTT,
115 	    from,
116 	    sctp_clog.x.misc.log1,
117 	    sctp_clog.x.misc.log2,
118 	    sctp_clog.x.misc.log3,
119 	    sctp_clog.x.misc.log4);
120 
121 }
122 
123 void
124 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
125 {
126 	struct sctp_cwnd_log sctp_clog;
127 
128 	sctp_clog.x.strlog.stcb = stcb;
129 	sctp_clog.x.strlog.n_tsn = tsn;
130 	sctp_clog.x.strlog.n_sseq = sseq;
131 	sctp_clog.x.strlog.e_tsn = 0;
132 	sctp_clog.x.strlog.e_sseq = 0;
133 	sctp_clog.x.strlog.strm = stream;
134 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
135 	    SCTP_LOG_EVENT_STRM,
136 	    from,
137 	    sctp_clog.x.misc.log1,
138 	    sctp_clog.x.misc.log2,
139 	    sctp_clog.x.misc.log3,
140 	    sctp_clog.x.misc.log4);
141 
142 }
143 
144 void
145 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
146 {
147 	struct sctp_cwnd_log sctp_clog;
148 
149 	sctp_clog.x.nagle.stcb = (void *)stcb;
150 	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
151 	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
152 	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
153 	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
154 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
155 	    SCTP_LOG_EVENT_NAGLE,
156 	    action,
157 	    sctp_clog.x.misc.log1,
158 	    sctp_clog.x.misc.log2,
159 	    sctp_clog.x.misc.log3,
160 	    sctp_clog.x.misc.log4);
161 }
162 
163 
164 void
165 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
166 {
167 	struct sctp_cwnd_log sctp_clog;
168 
169 	sctp_clog.x.sack.cumack = cumack;
170 	sctp_clog.x.sack.oldcumack = old_cumack;
171 	sctp_clog.x.sack.tsn = tsn;
172 	sctp_clog.x.sack.numGaps = gaps;
173 	sctp_clog.x.sack.numDups = dups;
174 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
175 	    SCTP_LOG_EVENT_SACK,
176 	    from,
177 	    sctp_clog.x.misc.log1,
178 	    sctp_clog.x.misc.log2,
179 	    sctp_clog.x.misc.log3,
180 	    sctp_clog.x.misc.log4);
181 }
182 
183 void
184 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
185 {
186 	struct sctp_cwnd_log sctp_clog;
187 
188 	memset(&sctp_clog, 0, sizeof(sctp_clog));
189 	sctp_clog.x.map.base = map;
190 	sctp_clog.x.map.cum = cum;
191 	sctp_clog.x.map.high = high;
192 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
193 	    SCTP_LOG_EVENT_MAP,
194 	    from,
195 	    sctp_clog.x.misc.log1,
196 	    sctp_clog.x.misc.log2,
197 	    sctp_clog.x.misc.log3,
198 	    sctp_clog.x.misc.log4);
199 }
200 
201 void
202 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn,
203     int from)
204 {
205 	struct sctp_cwnd_log sctp_clog;
206 
207 	memset(&sctp_clog, 0, sizeof(sctp_clog));
208 	sctp_clog.x.fr.largest_tsn = biggest_tsn;
209 	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
210 	sctp_clog.x.fr.tsn = tsn;
211 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
212 	    SCTP_LOG_EVENT_FR,
213 	    from,
214 	    sctp_clog.x.misc.log1,
215 	    sctp_clog.x.misc.log2,
216 	    sctp_clog.x.misc.log3,
217 	    sctp_clog.x.misc.log4);
218 
219 }
220 
221 
222 void
223 sctp_log_mb(struct mbuf *m, int from)
224 {
225 	struct sctp_cwnd_log sctp_clog;
226 
227 	sctp_clog.x.mb.mp = m;
228 	sctp_clog.x.mb.mbuf_flags = (uint8_t) (SCTP_BUF_GET_FLAGS(m));
229 	sctp_clog.x.mb.size = (uint16_t) (SCTP_BUF_LEN(m));
230 	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
231 	if (SCTP_BUF_IS_EXTENDED(m)) {
232 		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
233 		sctp_clog.x.mb.refcnt = (uint8_t) (SCTP_BUF_EXTEND_REFCNT(m));
234 	} else {
235 		sctp_clog.x.mb.ext = 0;
236 		sctp_clog.x.mb.refcnt = 0;
237 	}
238 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
239 	    SCTP_LOG_EVENT_MBUF,
240 	    from,
241 	    sctp_clog.x.misc.log1,
242 	    sctp_clog.x.misc.log2,
243 	    sctp_clog.x.misc.log3,
244 	    sctp_clog.x.misc.log4);
245 }
246 
247 
248 void
249 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk,
250     int from)
251 {
252 	struct sctp_cwnd_log sctp_clog;
253 
254 	if (control == NULL) {
255 		SCTP_PRINTF("Gak log of NULL?\n");
256 		return;
257 	}
258 	sctp_clog.x.strlog.stcb = control->stcb;
259 	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
260 	sctp_clog.x.strlog.n_sseq = control->sinfo_ssn;
261 	sctp_clog.x.strlog.strm = control->sinfo_stream;
262 	if (poschk != NULL) {
263 		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
264 		sctp_clog.x.strlog.e_sseq = poschk->sinfo_ssn;
265 	} else {
266 		sctp_clog.x.strlog.e_tsn = 0;
267 		sctp_clog.x.strlog.e_sseq = 0;
268 	}
269 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
270 	    SCTP_LOG_EVENT_STRM,
271 	    from,
272 	    sctp_clog.x.misc.log1,
273 	    sctp_clog.x.misc.log2,
274 	    sctp_clog.x.misc.log3,
275 	    sctp_clog.x.misc.log4);
276 
277 }
278 
279 void
280 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
281 {
282 	struct sctp_cwnd_log sctp_clog;
283 
284 	sctp_clog.x.cwnd.net = net;
285 	if (stcb->asoc.send_queue_cnt > 255)
286 		sctp_clog.x.cwnd.cnt_in_send = 255;
287 	else
288 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
289 	if (stcb->asoc.stream_queue_cnt > 255)
290 		sctp_clog.x.cwnd.cnt_in_str = 255;
291 	else
292 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
293 
294 	if (net) {
295 		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
296 		sctp_clog.x.cwnd.inflight = net->flight_size;
297 		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
298 		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
299 		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
300 	}
301 	if (SCTP_CWNDLOG_PRESEND == from) {
302 		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
303 	}
304 	sctp_clog.x.cwnd.cwnd_augment = augment;
305 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
306 	    SCTP_LOG_EVENT_CWND,
307 	    from,
308 	    sctp_clog.x.misc.log1,
309 	    sctp_clog.x.misc.log2,
310 	    sctp_clog.x.misc.log3,
311 	    sctp_clog.x.misc.log4);
312 
313 }
314 
315 void
316 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
317 {
318 	struct sctp_cwnd_log sctp_clog;
319 
320 	memset(&sctp_clog, 0, sizeof(sctp_clog));
321 	if (inp) {
322 		sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
323 
324 	} else {
325 		sctp_clog.x.lock.sock = (void *)NULL;
326 	}
327 	sctp_clog.x.lock.inp = (void *)inp;
328 	if (stcb) {
329 		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
330 	} else {
331 		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
332 	}
333 	if (inp) {
334 		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
335 		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
336 	} else {
337 		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
338 		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
339 	}
340 	sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
341 	if (inp && (inp->sctp_socket)) {
342 		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
343 		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
344 		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
345 	} else {
346 		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
347 		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
348 		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
349 	}
350 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
351 	    SCTP_LOG_LOCK_EVENT,
352 	    from,
353 	    sctp_clog.x.misc.log1,
354 	    sctp_clog.x.misc.log2,
355 	    sctp_clog.x.misc.log3,
356 	    sctp_clog.x.misc.log4);
357 
358 }
359 
360 void
361 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
362 {
363 	struct sctp_cwnd_log sctp_clog;
364 
365 	memset(&sctp_clog, 0, sizeof(sctp_clog));
366 	sctp_clog.x.cwnd.net = net;
367 	sctp_clog.x.cwnd.cwnd_new_value = error;
368 	sctp_clog.x.cwnd.inflight = net->flight_size;
369 	sctp_clog.x.cwnd.cwnd_augment = burst;
370 	if (stcb->asoc.send_queue_cnt > 255)
371 		sctp_clog.x.cwnd.cnt_in_send = 255;
372 	else
373 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
374 	if (stcb->asoc.stream_queue_cnt > 255)
375 		sctp_clog.x.cwnd.cnt_in_str = 255;
376 	else
377 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
378 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
379 	    SCTP_LOG_EVENT_MAXBURST,
380 	    from,
381 	    sctp_clog.x.misc.log1,
382 	    sctp_clog.x.misc.log2,
383 	    sctp_clog.x.misc.log3,
384 	    sctp_clog.x.misc.log4);
385 
386 }
387 
388 void
389 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
390 {
391 	struct sctp_cwnd_log sctp_clog;
392 
393 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
394 	sctp_clog.x.rwnd.send_size = snd_size;
395 	sctp_clog.x.rwnd.overhead = overhead;
396 	sctp_clog.x.rwnd.new_rwnd = 0;
397 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
398 	    SCTP_LOG_EVENT_RWND,
399 	    from,
400 	    sctp_clog.x.misc.log1,
401 	    sctp_clog.x.misc.log2,
402 	    sctp_clog.x.misc.log3,
403 	    sctp_clog.x.misc.log4);
404 }
405 
406 void
407 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
408 {
409 	struct sctp_cwnd_log sctp_clog;
410 
411 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
412 	sctp_clog.x.rwnd.send_size = flight_size;
413 	sctp_clog.x.rwnd.overhead = overhead;
414 	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
415 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
416 	    SCTP_LOG_EVENT_RWND,
417 	    from,
418 	    sctp_clog.x.misc.log1,
419 	    sctp_clog.x.misc.log2,
420 	    sctp_clog.x.misc.log3,
421 	    sctp_clog.x.misc.log4);
422 }
423 
424 void
425 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
426 {
427 	struct sctp_cwnd_log sctp_clog;
428 
429 	sctp_clog.x.mbcnt.total_queue_size = total_oq;
430 	sctp_clog.x.mbcnt.size_change = book;
431 	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
432 	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
433 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
434 	    SCTP_LOG_EVENT_MBCNT,
435 	    from,
436 	    sctp_clog.x.misc.log1,
437 	    sctp_clog.x.misc.log2,
438 	    sctp_clog.x.misc.log3,
439 	    sctp_clog.x.misc.log4);
440 
441 }
442 
443 void
444 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
445 {
446 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
447 	    SCTP_LOG_MISC_EVENT,
448 	    from,
449 	    a, b, c, d);
450 }
451 
452 void
453 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t cumtsn, uint32_t wake_cnt, int from)
454 {
455 	struct sctp_cwnd_log sctp_clog;
456 
457 	sctp_clog.x.wake.stcb = (void *)stcb;
458 	sctp_clog.x.wake.wake_cnt = wake_cnt;
459 	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
460 	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
461 	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
462 
463 	if (stcb->asoc.stream_queue_cnt < 0xff)
464 		sctp_clog.x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt;
465 	else
466 		sctp_clog.x.wake.stream_qcnt = 0xff;
467 
468 	if (stcb->asoc.chunks_on_out_queue < 0xff)
469 		sctp_clog.x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue;
470 	else
471 		sctp_clog.x.wake.chunks_on_oque = 0xff;
472 
473 	sctp_clog.x.wake.sctpflags = 0;
474 	/* set in the defered mode stuff */
475 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
476 		sctp_clog.x.wake.sctpflags |= 1;
477 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
478 		sctp_clog.x.wake.sctpflags |= 2;
479 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
480 		sctp_clog.x.wake.sctpflags |= 4;
481 	/* what about the sb */
482 	if (stcb->sctp_socket) {
483 		struct socket *so = stcb->sctp_socket;
484 
485 		sctp_clog.x.wake.sbflags = (uint8_t) ((so->so_snd.sb_flags & 0x00ff));
486 	} else {
487 		sctp_clog.x.wake.sbflags = 0xff;
488 	}
489 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
490 	    SCTP_LOG_EVENT_WAKE,
491 	    from,
492 	    sctp_clog.x.misc.log1,
493 	    sctp_clog.x.misc.log2,
494 	    sctp_clog.x.misc.log3,
495 	    sctp_clog.x.misc.log4);
496 
497 }
498 
499 void
500 sctp_log_block(uint8_t from, struct socket *so, struct sctp_association *asoc, int sendlen)
501 {
502 	struct sctp_cwnd_log sctp_clog;
503 
504 	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
505 	sctp_clog.x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt);
506 	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
507 	sctp_clog.x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt;
508 	sctp_clog.x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue;
509 	sctp_clog.x.blk.flight_size = (uint16_t) (asoc->total_flight / 1024);
510 	sctp_clog.x.blk.sndlen = sendlen;
511 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
512 	    SCTP_LOG_EVENT_BLOCK,
513 	    from,
514 	    sctp_clog.x.misc.log1,
515 	    sctp_clog.x.misc.log2,
516 	    sctp_clog.x.misc.log3,
517 	    sctp_clog.x.misc.log4);
518 
519 }
520 
521 int
522 sctp_fill_stat_log(void *optval, size_t *optsize)
523 {
524 	/* May need to fix this if ktrdump does not work */
525 	return (0);
526 }
527 
528 #ifdef SCTP_AUDITING_ENABLED
529 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
530 static int sctp_audit_indx = 0;
531 
532 static
533 void
534 sctp_print_audit_report(void)
535 {
536 	int i;
537 	int cnt;
538 
539 	cnt = 0;
540 	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
541 		if ((sctp_audit_data[i][0] == 0xe0) &&
542 		    (sctp_audit_data[i][1] == 0x01)) {
543 			cnt = 0;
544 			SCTP_PRINTF("\n");
545 		} else if (sctp_audit_data[i][0] == 0xf0) {
546 			cnt = 0;
547 			SCTP_PRINTF("\n");
548 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
549 		    (sctp_audit_data[i][1] == 0x01)) {
550 			SCTP_PRINTF("\n");
551 			cnt = 0;
552 		}
553 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
554 		    (uint32_t) sctp_audit_data[i][1]);
555 		cnt++;
556 		if ((cnt % 14) == 0)
557 			SCTP_PRINTF("\n");
558 	}
559 	for (i = 0; i < sctp_audit_indx; i++) {
560 		if ((sctp_audit_data[i][0] == 0xe0) &&
561 		    (sctp_audit_data[i][1] == 0x01)) {
562 			cnt = 0;
563 			SCTP_PRINTF("\n");
564 		} else if (sctp_audit_data[i][0] == 0xf0) {
565 			cnt = 0;
566 			SCTP_PRINTF("\n");
567 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
568 		    (sctp_audit_data[i][1] == 0x01)) {
569 			SCTP_PRINTF("\n");
570 			cnt = 0;
571 		}
572 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
573 		    (uint32_t) sctp_audit_data[i][1]);
574 		cnt++;
575 		if ((cnt % 14) == 0)
576 			SCTP_PRINTF("\n");
577 	}
578 	SCTP_PRINTF("\n");
579 }
580 
581 void
582 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
583     struct sctp_nets *net)
584 {
585 	int resend_cnt, tot_out, rep, tot_book_cnt;
586 	struct sctp_nets *lnet;
587 	struct sctp_tmit_chunk *chk;
588 
589 	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
590 	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
591 	sctp_audit_indx++;
592 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
593 		sctp_audit_indx = 0;
594 	}
595 	if (inp == NULL) {
596 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
597 		sctp_audit_data[sctp_audit_indx][1] = 0x01;
598 		sctp_audit_indx++;
599 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
600 			sctp_audit_indx = 0;
601 		}
602 		return;
603 	}
604 	if (stcb == NULL) {
605 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
606 		sctp_audit_data[sctp_audit_indx][1] = 0x02;
607 		sctp_audit_indx++;
608 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
609 			sctp_audit_indx = 0;
610 		}
611 		return;
612 	}
613 	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
614 	sctp_audit_data[sctp_audit_indx][1] =
615 	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
616 	sctp_audit_indx++;
617 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
618 		sctp_audit_indx = 0;
619 	}
620 	rep = 0;
621 	tot_book_cnt = 0;
622 	resend_cnt = tot_out = 0;
623 	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
624 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
625 			resend_cnt++;
626 		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
627 			tot_out += chk->book_size;
628 			tot_book_cnt++;
629 		}
630 	}
631 	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
632 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
633 		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
634 		sctp_audit_indx++;
635 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
636 			sctp_audit_indx = 0;
637 		}
638 		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
639 		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
640 		rep = 1;
641 		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
642 		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
643 		sctp_audit_data[sctp_audit_indx][1] =
644 		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
645 		sctp_audit_indx++;
646 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
647 			sctp_audit_indx = 0;
648 		}
649 	}
650 	if (tot_out != stcb->asoc.total_flight) {
651 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
652 		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
653 		sctp_audit_indx++;
654 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
655 			sctp_audit_indx = 0;
656 		}
657 		rep = 1;
658 		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
659 		    (int)stcb->asoc.total_flight);
660 		stcb->asoc.total_flight = tot_out;
661 	}
662 	if (tot_book_cnt != stcb->asoc.total_flight_count) {
663 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
664 		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
665 		sctp_audit_indx++;
666 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
667 			sctp_audit_indx = 0;
668 		}
669 		rep = 1;
670 		SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt);
671 
672 		stcb->asoc.total_flight_count = tot_book_cnt;
673 	}
674 	tot_out = 0;
675 	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
676 		tot_out += lnet->flight_size;
677 	}
678 	if (tot_out != stcb->asoc.total_flight) {
679 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
680 		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
681 		sctp_audit_indx++;
682 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
683 			sctp_audit_indx = 0;
684 		}
685 		rep = 1;
686 		SCTP_PRINTF("real flight:%d net total was %d\n",
687 		    stcb->asoc.total_flight, tot_out);
688 		/* now corrective action */
689 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
690 
691 			tot_out = 0;
692 			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
693 				if ((chk->whoTo == lnet) &&
694 				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
695 					tot_out += chk->book_size;
696 				}
697 			}
698 			if (lnet->flight_size != tot_out) {
699 				SCTP_PRINTF("net:%p flight was %d corrected to %d\n",
700 				    lnet, lnet->flight_size,
701 				    tot_out);
702 				lnet->flight_size = tot_out;
703 			}
704 		}
705 	}
706 	if (rep) {
707 		sctp_print_audit_report();
708 	}
709 }
710 
711 void
712 sctp_audit_log(uint8_t ev, uint8_t fd)
713 {
714 
715 	sctp_audit_data[sctp_audit_indx][0] = ev;
716 	sctp_audit_data[sctp_audit_indx][1] = fd;
717 	sctp_audit_indx++;
718 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
719 		sctp_audit_indx = 0;
720 	}
721 }
722 
723 #endif
724 
725 /*
726  * sctp_stop_timers_for_shutdown() should be called
727  * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT
728  * state to make sure that all timers are stopped.
729  */
730 void
731 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
732 {
733 	struct sctp_association *asoc;
734 	struct sctp_nets *net;
735 
736 	asoc = &stcb->asoc;
737 
738 	(void)SCTP_OS_TIMER_STOP(&asoc->hb_timer.timer);
739 	(void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
740 	(void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
741 	(void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
742 	(void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
743 	(void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
744 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
745 		(void)SCTP_OS_TIMER_STOP(&net->fr_timer.timer);
746 		(void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
747 	}
748 }
749 
750 /*
751  * a list of sizes based on typical mtu's, used only if next hop size not
752  * returned.
753  */
754 static uint32_t sctp_mtu_sizes[] = {
755 	68,
756 	296,
757 	508,
758 	512,
759 	544,
760 	576,
761 	1006,
762 	1492,
763 	1500,
764 	1536,
765 	2002,
766 	2048,
767 	4352,
768 	4464,
769 	8166,
770 	17914,
771 	32000,
772 	65535
773 };
774 
775 /*
776  * Return the largest MTU smaller than val. If there is no
777  * entry, just return val.
778  */
779 uint32_t
780 sctp_get_prev_mtu(uint32_t val)
781 {
782 	uint32_t i;
783 
784 	if (val <= sctp_mtu_sizes[0]) {
785 		return (val);
786 	}
787 	for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
788 		if (val <= sctp_mtu_sizes[i]) {
789 			break;
790 		}
791 	}
792 	return (sctp_mtu_sizes[i - 1]);
793 }
794 
795 /*
796  * Return the smallest MTU larger than val. If there is no
797  * entry, just return val.
798  */
799 uint32_t
800 sctp_get_next_mtu(struct sctp_inpcb *inp, uint32_t val)
801 {
802 	/* select another MTU that is just bigger than this one */
803 	uint32_t i;
804 
805 	for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
806 		if (val < sctp_mtu_sizes[i]) {
807 			return (sctp_mtu_sizes[i]);
808 		}
809 	}
810 	return (val);
811 }
812 
813 void
814 sctp_fill_random_store(struct sctp_pcb *m)
815 {
816 	/*
817 	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
818 	 * our counter. The result becomes our good random numbers and we
819 	 * then setup to give these out. Note that we do no locking to
820 	 * protect this. This is ok, since if competing folks call this we
821 	 * will get more gobbled gook in the random store which is what we
822 	 * want. There is a danger that two guys will use the same random
823 	 * numbers, but thats ok too since that is random as well :->
824 	 */
825 	m->store_at = 0;
826 	(void)sctp_hmac(SCTP_HMAC, (uint8_t *) m->random_numbers,
827 	    sizeof(m->random_numbers), (uint8_t *) & m->random_counter,
828 	    sizeof(m->random_counter), (uint8_t *) m->random_store);
829 	m->random_counter++;
830 }
831 
832 uint32_t
833 sctp_select_initial_TSN(struct sctp_pcb *inp)
834 {
835 	/*
836 	 * A true implementation should use random selection process to get
837 	 * the initial stream sequence number, using RFC1750 as a good
838 	 * guideline
839 	 */
840 	uint32_t x, *xp;
841 	uint8_t *p;
842 	int store_at, new_store;
843 
844 	if (inp->initial_sequence_debug != 0) {
845 		uint32_t ret;
846 
847 		ret = inp->initial_sequence_debug;
848 		inp->initial_sequence_debug++;
849 		return (ret);
850 	}
851 retry:
852 	store_at = inp->store_at;
853 	new_store = store_at + sizeof(uint32_t);
854 	if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
855 		new_store = 0;
856 	}
857 	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
858 		goto retry;
859 	}
860 	if (new_store == 0) {
861 		/* Refill the random store */
862 		sctp_fill_random_store(inp);
863 	}
864 	p = &inp->random_store[store_at];
865 	xp = (uint32_t *) p;
866 	x = *xp;
867 	return (x);
868 }
869 
870 uint32_t
871 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int save_in_twait)
872 {
873 	uint32_t x, not_done;
874 	struct timeval now;
875 
876 	(void)SCTP_GETTIME_TIMEVAL(&now);
877 	not_done = 1;
878 	while (not_done) {
879 		x = sctp_select_initial_TSN(&inp->sctp_ep);
880 		if (x == 0) {
881 			/* we never use 0 */
882 			continue;
883 		}
884 		if (sctp_is_vtag_good(inp, x, lport, rport, &now, save_in_twait)) {
885 			not_done = 0;
886 		}
887 	}
888 	return (x);
889 }
890 
891 int
892 sctp_init_asoc(struct sctp_inpcb *m, struct sctp_tcb *stcb,
893     uint32_t override_tag, uint32_t vrf_id)
894 {
895 	struct sctp_association *asoc;
896 
897 	/*
898 	 * Anything set to zero is taken care of by the allocation routine's
899 	 * bzero
900 	 */
901 
902 	/*
903 	 * Up front select what scoping to apply on addresses I tell my peer
904 	 * Not sure what to do with these right now, we will need to come up
905 	 * with a way to set them. We may need to pass them through from the
906 	 * caller in the sctp_aloc_assoc() function.
907 	 */
908 	int i;
909 
910 	asoc = &stcb->asoc;
911 	/* init all variables to a known value. */
912 	SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE);
913 	asoc->max_burst = m->sctp_ep.max_burst;
914 	asoc->heart_beat_delay = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
915 	asoc->cookie_life = m->sctp_ep.def_cookie_life;
916 	asoc->sctp_cmt_on_off = m->sctp_cmt_on_off;
917 	asoc->sctp_nr_sack_on_off = (uint8_t) SCTP_BASE_SYSCTL(sctp_nr_sack_on_off);
918 	asoc->sctp_cmt_pf = (uint8_t) SCTP_BASE_SYSCTL(sctp_cmt_pf);
919 	asoc->sctp_frag_point = m->sctp_frag_point;
920 #ifdef INET
921 	asoc->default_tos = m->ip_inp.inp.inp_ip_tos;
922 #else
923 	asoc->default_tos = 0;
924 #endif
925 
926 #ifdef INET6
927 	asoc->default_flowlabel = ((struct in6pcb *)m)->in6p_flowinfo;
928 #else
929 	asoc->default_flowlabel = 0;
930 #endif
931 	asoc->sb_send_resv = 0;
932 	if (override_tag) {
933 		asoc->my_vtag = override_tag;
934 	} else {
935 		asoc->my_vtag = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
936 	}
937 	/* Get the nonce tags */
938 	asoc->my_vtag_nonce = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
939 	asoc->peer_vtag_nonce = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
940 	asoc->vrf_id = vrf_id;
941 
942 	if (sctp_is_feature_on(m, SCTP_PCB_FLAGS_DONOT_HEARTBEAT))
943 		asoc->hb_is_disabled = 1;
944 	else
945 		asoc->hb_is_disabled = 0;
946 
947 #ifdef SCTP_ASOCLOG_OF_TSNS
948 	asoc->tsn_in_at = 0;
949 	asoc->tsn_out_at = 0;
950 	asoc->tsn_in_wrapped = 0;
951 	asoc->tsn_out_wrapped = 0;
952 	asoc->cumack_log_at = 0;
953 	asoc->cumack_log_atsnt = 0;
954 #endif
955 #ifdef SCTP_FS_SPEC_LOG
956 	asoc->fs_index = 0;
957 #endif
958 	asoc->refcnt = 0;
959 	asoc->assoc_up_sent = 0;
960 	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
961 	    sctp_select_initial_TSN(&m->sctp_ep);
962 	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
963 	/* we are optimisitic here */
964 	asoc->peer_supports_pktdrop = 1;
965 	asoc->peer_supports_nat = 0;
966 	asoc->sent_queue_retran_cnt = 0;
967 
968 	/* for CMT */
969 	asoc->last_net_cmt_send_started = NULL;
970 
971 	/* This will need to be adjusted */
972 	asoc->last_cwr_tsn = asoc->init_seq_number - 1;
973 	asoc->last_acked_seq = asoc->init_seq_number - 1;
974 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
975 	asoc->asconf_seq_in = asoc->last_acked_seq;
976 
977 	/* here we are different, we hold the next one we expect */
978 	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
979 
980 	asoc->initial_init_rto_max = m->sctp_ep.initial_init_rto_max;
981 	asoc->initial_rto = m->sctp_ep.initial_rto;
982 
983 	asoc->max_init_times = m->sctp_ep.max_init_times;
984 	asoc->max_send_times = m->sctp_ep.max_send_times;
985 	asoc->def_net_failure = m->sctp_ep.def_net_failure;
986 	asoc->free_chunk_cnt = 0;
987 
988 	asoc->iam_blocking = 0;
989 	/* ECN Nonce initialization */
990 	asoc->context = m->sctp_context;
991 	asoc->def_send = m->def_send;
992 	asoc->ecn_nonce_allowed = 0;
993 	asoc->receiver_nonce_sum = 1;
994 	asoc->nonce_sum_expect_base = 1;
995 	asoc->nonce_sum_check = 1;
996 	asoc->nonce_resync_tsn = 0;
997 	asoc->nonce_wait_for_ecne = 0;
998 	asoc->nonce_wait_tsn = 0;
999 	asoc->delayed_ack = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
1000 	asoc->sack_freq = m->sctp_ep.sctp_sack_freq;
1001 	asoc->pr_sctp_cnt = 0;
1002 	asoc->total_output_queue_size = 0;
1003 
1004 	if (m->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1005 		struct in6pcb *inp6;
1006 
1007 		/* Its a V6 socket */
1008 		inp6 = (struct in6pcb *)m;
1009 		asoc->ipv6_addr_legal = 1;
1010 		/* Now look at the binding flag to see if V4 will be legal */
1011 		if (SCTP_IPV6_V6ONLY(inp6) == 0) {
1012 			asoc->ipv4_addr_legal = 1;
1013 		} else {
1014 			/* V4 addresses are NOT legal on the association */
1015 			asoc->ipv4_addr_legal = 0;
1016 		}
1017 	} else {
1018 		/* Its a V4 socket, no - V6 */
1019 		asoc->ipv4_addr_legal = 1;
1020 		asoc->ipv6_addr_legal = 0;
1021 	}
1022 
1023 	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(m->sctp_socket), SCTP_MINIMAL_RWND);
1024 	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(m->sctp_socket);
1025 
1026 	asoc->smallest_mtu = m->sctp_frag_point;
1027 	asoc->minrto = m->sctp_ep.sctp_minrto;
1028 	asoc->maxrto = m->sctp_ep.sctp_maxrto;
1029 
1030 	asoc->locked_on_sending = NULL;
1031 	asoc->stream_locked_on = 0;
1032 	asoc->ecn_echo_cnt_onq = 0;
1033 	asoc->stream_locked = 0;
1034 
1035 	asoc->send_sack = 1;
1036 
1037 	LIST_INIT(&asoc->sctp_restricted_addrs);
1038 
1039 	TAILQ_INIT(&asoc->nets);
1040 	TAILQ_INIT(&asoc->pending_reply_queue);
1041 	TAILQ_INIT(&asoc->asconf_ack_sent);
1042 	/* Setup to fill the hb random cache at first HB */
1043 	asoc->hb_random_idx = 4;
1044 
1045 	asoc->sctp_autoclose_ticks = m->sctp_ep.auto_close_time;
1046 
1047 	/*
1048 	 * JRS - Pick the default congestion control module based on the
1049 	 * sysctl.
1050 	 */
1051 	switch (m->sctp_ep.sctp_default_cc_module) {
1052 		/* JRS - Standard TCP congestion control */
1053 	case SCTP_CC_RFC2581:
1054 		{
1055 			stcb->asoc.congestion_control_module = SCTP_CC_RFC2581;
1056 			stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param;
1057 			stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_cwnd_update_after_sack;
1058 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_cwnd_update_after_fr;
1059 			stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout;
1060 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo;
1061 			stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1062 			stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1063 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer;
1064 			break;
1065 		}
1066 		/* JRS - High Speed TCP congestion control (Floyd) */
1067 	case SCTP_CC_HSTCP:
1068 		{
1069 			stcb->asoc.congestion_control_module = SCTP_CC_HSTCP;
1070 			stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param;
1071 			stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_hs_cwnd_update_after_sack;
1072 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_hs_cwnd_update_after_fr;
1073 			stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout;
1074 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo;
1075 			stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1076 			stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1077 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer;
1078 			break;
1079 		}
1080 		/* JRS - HTCP congestion control */
1081 	case SCTP_CC_HTCP:
1082 		{
1083 			stcb->asoc.congestion_control_module = SCTP_CC_HTCP;
1084 			stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_htcp_set_initial_cc_param;
1085 			stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_htcp_cwnd_update_after_sack;
1086 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_htcp_cwnd_update_after_fr;
1087 			stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_htcp_cwnd_update_after_timeout;
1088 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_htcp_cwnd_update_after_ecn_echo;
1089 			stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1090 			stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1091 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_htcp_cwnd_update_after_fr_timer;
1092 			break;
1093 		}
1094 		/* JRS - By default, use RFC2581 */
1095 	default:
1096 		{
1097 			stcb->asoc.congestion_control_module = SCTP_CC_RFC2581;
1098 			stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param;
1099 			stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_cwnd_update_after_sack;
1100 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_cwnd_update_after_fr;
1101 			stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout;
1102 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo;
1103 			stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1104 			stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1105 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer;
1106 			break;
1107 		}
1108 	}
1109 
1110 	/*
1111 	 * Now the stream parameters, here we allocate space for all streams
1112 	 * that we request by default.
1113 	 */
1114 	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
1115 	    m->sctp_ep.pre_open_stream_count;
1116 	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1117 	    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1118 	    SCTP_M_STRMO);
1119 	if (asoc->strmout == NULL) {
1120 		/* big trouble no memory */
1121 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1122 		return (ENOMEM);
1123 	}
1124 	for (i = 0; i < asoc->streamoutcnt; i++) {
1125 		/*
1126 		 * inbound side must be set to 0xffff, also NOTE when we get
1127 		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1128 		 * count (streamoutcnt) but first check if we sent to any of
1129 		 * the upper streams that were dropped (if some were). Those
1130 		 * that were dropped must be notified to the upper layer as
1131 		 * failed to send.
1132 		 */
1133 		asoc->strmout[i].next_sequence_sent = 0x0;
1134 		TAILQ_INIT(&asoc->strmout[i].outqueue);
1135 		asoc->strmout[i].stream_no = i;
1136 		asoc->strmout[i].last_msg_incomplete = 0;
1137 		asoc->strmout[i].next_spoke.tqe_next = 0;
1138 		asoc->strmout[i].next_spoke.tqe_prev = 0;
1139 	}
1140 	/* Now the mapping array */
1141 	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1142 	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1143 	    SCTP_M_MAP);
1144 	if (asoc->mapping_array == NULL) {
1145 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1146 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1147 		return (ENOMEM);
1148 	}
1149 	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1150 	SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size,
1151 	    SCTP_M_MAP);
1152 	if (asoc->nr_mapping_array == NULL) {
1153 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1154 		SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1155 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1156 		return (ENOMEM);
1157 	}
1158 	memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
1159 
1160 	/* Now the init of the other outqueues */
1161 	TAILQ_INIT(&asoc->free_chunks);
1162 	TAILQ_INIT(&asoc->out_wheel);
1163 	TAILQ_INIT(&asoc->control_send_queue);
1164 	TAILQ_INIT(&asoc->asconf_send_queue);
1165 	TAILQ_INIT(&asoc->send_queue);
1166 	TAILQ_INIT(&asoc->sent_queue);
1167 	TAILQ_INIT(&asoc->reasmqueue);
1168 	TAILQ_INIT(&asoc->resetHead);
1169 	asoc->max_inbound_streams = m->sctp_ep.max_open_streams_intome;
1170 	TAILQ_INIT(&asoc->asconf_queue);
1171 	/* authentication fields */
1172 	asoc->authinfo.random = NULL;
1173 	asoc->authinfo.active_keyid = 0;
1174 	asoc->authinfo.assoc_key = NULL;
1175 	asoc->authinfo.assoc_keyid = 0;
1176 	asoc->authinfo.recv_key = NULL;
1177 	asoc->authinfo.recv_keyid = 0;
1178 	LIST_INIT(&asoc->shared_keys);
1179 	asoc->marked_retrans = 0;
1180 	asoc->timoinit = 0;
1181 	asoc->timodata = 0;
1182 	asoc->timosack = 0;
1183 	asoc->timoshutdown = 0;
1184 	asoc->timoheartbeat = 0;
1185 	asoc->timocookie = 0;
1186 	asoc->timoshutdownack = 0;
1187 	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1188 	asoc->discontinuity_time = asoc->start_time;
1189 	/*
1190 	 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1191 	 * freed later when the association is freed.
1192 	 */
1193 	return (0);
1194 }
1195 
1196 void
1197 sctp_print_mapping_array(struct sctp_association *asoc)
1198 {
1199 	unsigned int i, limit;
1200 
1201 	printf("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n",
1202 	    asoc->mapping_array_size,
1203 	    asoc->mapping_array_base_tsn,
1204 	    asoc->cumulative_tsn,
1205 	    asoc->highest_tsn_inside_map,
1206 	    asoc->highest_tsn_inside_nr_map);
1207 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1208 		if (asoc->mapping_array[limit - 1]) {
1209 			break;
1210 		}
1211 	}
1212 	printf("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1213 	for (i = 0; i < limit; i++) {
1214 		printf("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1215 		if (((i + 1) % 16) == 0)
1216 			printf("\n");
1217 	}
1218 	if (limit % 16)
1219 		printf("\n");
1220 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1221 		if (asoc->nr_mapping_array[limit - 1]) {
1222 			break;
1223 		}
1224 	}
1225 	printf("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1226 	for (i = 0; i < limit; i++) {
1227 		printf("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1228 	}
1229 	if (limit % 16)
1230 		printf("\n");
1231 }
1232 
1233 int
1234 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1235 {
1236 	/* mapping array needs to grow */
1237 	uint8_t *new_array1, *new_array2;
1238 	uint32_t new_size;
1239 
1240 	new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1241 	SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP);
1242 	SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP);
1243 	if ((new_array1 == NULL) || (new_array2 == NULL)) {
1244 		/* can't get more, forget it */
1245 		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size);
1246 		if (new_array1) {
1247 			SCTP_FREE(new_array1, SCTP_M_MAP);
1248 		}
1249 		if (new_array2) {
1250 			SCTP_FREE(new_array2, SCTP_M_MAP);
1251 		}
1252 		return (-1);
1253 	}
1254 	memset(new_array1, 0, new_size);
1255 	memset(new_array2, 0, new_size);
1256 	memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size);
1257 	memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size);
1258 	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1259 	SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1260 	asoc->mapping_array = new_array1;
1261 	asoc->nr_mapping_array = new_array2;
1262 	asoc->mapping_array_size = new_size;
1263 	return (0);
1264 }
1265 
1266 
1267 static void
1268 sctp_iterator_work(struct sctp_iterator *it)
1269 {
1270 	int iteration_count = 0;
1271 	int inp_skip = 0;
1272 	int first_in = 1;
1273 	struct sctp_inpcb *tinp;
1274 
1275 	SCTP_INP_INFO_RLOCK();
1276 	SCTP_ITERATOR_LOCK();
1277 	if (it->inp) {
1278 		SCTP_INP_RLOCK(it->inp);
1279 		SCTP_INP_DECR_REF(it->inp);
1280 	}
1281 	if (it->inp == NULL) {
1282 		/* iterator is complete */
1283 done_with_iterator:
1284 		SCTP_ITERATOR_UNLOCK();
1285 		SCTP_INP_INFO_RUNLOCK();
1286 		if (it->function_atend != NULL) {
1287 			(*it->function_atend) (it->pointer, it->val);
1288 		}
1289 		SCTP_FREE(it, SCTP_M_ITER);
1290 		return;
1291 	}
1292 select_a_new_ep:
1293 	if (first_in) {
1294 		first_in = 0;
1295 	} else {
1296 		SCTP_INP_RLOCK(it->inp);
1297 	}
1298 	while (((it->pcb_flags) &&
1299 	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1300 	    ((it->pcb_features) &&
1301 	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1302 		/* endpoint flags or features don't match, so keep looking */
1303 		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1304 			SCTP_INP_RUNLOCK(it->inp);
1305 			goto done_with_iterator;
1306 		}
1307 		tinp = it->inp;
1308 		it->inp = LIST_NEXT(it->inp, sctp_list);
1309 		SCTP_INP_RUNLOCK(tinp);
1310 		if (it->inp == NULL) {
1311 			goto done_with_iterator;
1312 		}
1313 		SCTP_INP_RLOCK(it->inp);
1314 	}
1315 	/* now go through each assoc which is in the desired state */
1316 	if (it->done_current_ep == 0) {
1317 		if (it->function_inp != NULL)
1318 			inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1319 		it->done_current_ep = 1;
1320 	}
1321 	if (it->stcb == NULL) {
1322 		/* run the per instance function */
1323 		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1324 	}
1325 	if ((inp_skip) || it->stcb == NULL) {
1326 		if (it->function_inp_end != NULL) {
1327 			inp_skip = (*it->function_inp_end) (it->inp,
1328 			    it->pointer,
1329 			    it->val);
1330 		}
1331 		SCTP_INP_RUNLOCK(it->inp);
1332 		goto no_stcb;
1333 	}
1334 	while (it->stcb) {
1335 		SCTP_TCB_LOCK(it->stcb);
1336 		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1337 			/* not in the right state... keep looking */
1338 			SCTP_TCB_UNLOCK(it->stcb);
1339 			goto next_assoc;
1340 		}
1341 		/* see if we have limited out the iterator loop */
1342 		iteration_count++;
1343 		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1344 			/* Pause to let others grab the lock */
1345 			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1346 			SCTP_TCB_UNLOCK(it->stcb);
1347 			SCTP_INP_INCR_REF(it->inp);
1348 			SCTP_INP_RUNLOCK(it->inp);
1349 			SCTP_ITERATOR_UNLOCK();
1350 			SCTP_INP_INFO_RUNLOCK();
1351 			SCTP_INP_INFO_RLOCK();
1352 			SCTP_ITERATOR_LOCK();
1353 			if (sctp_it_ctl.iterator_flags) {
1354 				/* We won't be staying here */
1355 				SCTP_INP_DECR_REF(it->inp);
1356 				atomic_add_int(&it->stcb->asoc.refcnt, -1);
1357 				if (sctp_it_ctl.iterator_flags &
1358 				    SCTP_ITERATOR_MUST_EXIT) {
1359 					goto done_with_iterator;
1360 				}
1361 				if (sctp_it_ctl.iterator_flags &
1362 				    SCTP_ITERATOR_STOP_CUR_IT) {
1363 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT;
1364 					goto done_with_iterator;
1365 				}
1366 				if (sctp_it_ctl.iterator_flags &
1367 				    SCTP_ITERATOR_STOP_CUR_INP) {
1368 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP;
1369 					goto no_stcb;
1370 				}
1371 				/* If we reach here huh? */
1372 				printf("Unknown it ctl flag %x\n",
1373 				    sctp_it_ctl.iterator_flags);
1374 				sctp_it_ctl.iterator_flags = 0;
1375 			}
1376 			SCTP_INP_RLOCK(it->inp);
1377 			SCTP_INP_DECR_REF(it->inp);
1378 			SCTP_TCB_LOCK(it->stcb);
1379 			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1380 			iteration_count = 0;
1381 		}
1382 		/* run function on this one */
1383 		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1384 
1385 		/*
1386 		 * we lie here, it really needs to have its own type but
1387 		 * first I must verify that this won't effect things :-0
1388 		 */
1389 		if (it->no_chunk_output == 0)
1390 			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1391 
1392 		SCTP_TCB_UNLOCK(it->stcb);
1393 next_assoc:
1394 		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1395 		if (it->stcb == NULL) {
1396 			/* Run last function */
1397 			if (it->function_inp_end != NULL) {
1398 				inp_skip = (*it->function_inp_end) (it->inp,
1399 				    it->pointer,
1400 				    it->val);
1401 			}
1402 		}
1403 	}
1404 	SCTP_INP_RUNLOCK(it->inp);
1405 no_stcb:
1406 	/* done with all assocs on this endpoint, move on to next endpoint */
1407 	it->done_current_ep = 0;
1408 	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1409 		it->inp = NULL;
1410 	} else {
1411 		it->inp = LIST_NEXT(it->inp, sctp_list);
1412 	}
1413 	if (it->inp == NULL) {
1414 		goto done_with_iterator;
1415 	}
1416 	goto select_a_new_ep;
1417 }
1418 
1419 void
1420 sctp_iterator_worker(void)
1421 {
1422 	struct sctp_iterator *it = NULL;
1423 
1424 	/* This function is called with the WQ lock in place */
1425 
1426 	sctp_it_ctl.iterator_running = 1;
1427 	sctp_it_ctl.cur_it = it = TAILQ_FIRST(&sctp_it_ctl.iteratorhead);
1428 	while (it) {
1429 		/* now lets work on this one */
1430 		TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
1431 		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1432 		CURVNET_SET(it->vn);
1433 		sctp_iterator_work(it);
1434 
1435 		CURVNET_RESTORE();
1436 		SCTP_IPI_ITERATOR_WQ_LOCK();
1437 		if (sctp_it_ctl.iterator_flags & SCTP_ITERATOR_MUST_EXIT) {
1438 			sctp_it_ctl.cur_it = NULL;
1439 			break;
1440 		}
1441 		/* sa_ignore FREED_MEMORY */
1442 		sctp_it_ctl.cur_it = it = TAILQ_FIRST(&sctp_it_ctl.iteratorhead);
1443 	}
1444 	sctp_it_ctl.iterator_running = 0;
1445 	return;
1446 }
1447 
1448 
1449 static void
1450 sctp_handle_addr_wq(void)
1451 {
1452 	/* deal with the ADDR wq from the rtsock calls */
1453 	struct sctp_laddr *wi;
1454 	struct sctp_asconf_iterator *asc;
1455 
1456 	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1457 	    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1458 	if (asc == NULL) {
1459 		/* Try later, no memory */
1460 		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1461 		    (struct sctp_inpcb *)NULL,
1462 		    (struct sctp_tcb *)NULL,
1463 		    (struct sctp_nets *)NULL);
1464 		return;
1465 	}
1466 	LIST_INIT(&asc->list_of_work);
1467 	asc->cnt = 0;
1468 
1469 	SCTP_WQ_ADDR_LOCK();
1470 	wi = LIST_FIRST(&SCTP_BASE_INFO(addr_wq));
1471 	while (wi != NULL) {
1472 		LIST_REMOVE(wi, sctp_nxt_addr);
1473 		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1474 		asc->cnt++;
1475 		wi = LIST_FIRST(&SCTP_BASE_INFO(addr_wq));
1476 	}
1477 	SCTP_WQ_ADDR_UNLOCK();
1478 
1479 	if (asc->cnt == 0) {
1480 		SCTP_FREE(asc, SCTP_M_ASC_IT);
1481 	} else {
1482 		(void)sctp_initiate_iterator(sctp_asconf_iterator_ep,
1483 		    sctp_asconf_iterator_stcb,
1484 		    NULL,	/* No ep end for boundall */
1485 		    SCTP_PCB_FLAGS_BOUNDALL,
1486 		    SCTP_PCB_ANY_FEATURES,
1487 		    SCTP_ASOC_ANY_STATE,
1488 		    (void *)asc, 0,
1489 		    sctp_asconf_iterator_end, NULL, 0);
1490 	}
1491 }
1492 
1493 int retcode = 0;
1494 int cur_oerr = 0;
1495 
1496 void
1497 sctp_timeout_handler(void *t)
1498 {
1499 	struct sctp_inpcb *inp;
1500 	struct sctp_tcb *stcb;
1501 	struct sctp_nets *net;
1502 	struct sctp_timer *tmr;
1503 
1504 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1505 	struct socket *so;
1506 
1507 #endif
1508 	int did_output, type;
1509 
1510 	tmr = (struct sctp_timer *)t;
1511 	inp = (struct sctp_inpcb *)tmr->ep;
1512 	stcb = (struct sctp_tcb *)tmr->tcb;
1513 	net = (struct sctp_nets *)tmr->net;
1514 	CURVNET_SET((struct vnet *)tmr->vnet);
1515 	did_output = 1;
1516 
1517 #ifdef SCTP_AUDITING_ENABLED
1518 	sctp_audit_log(0xF0, (uint8_t) tmr->type);
1519 	sctp_auditing(3, inp, stcb, net);
1520 #endif
1521 
1522 	/* sanity checks... */
1523 	if (tmr->self != (void *)tmr) {
1524 		/*
1525 		 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1526 		 * tmr);
1527 		 */
1528 		CURVNET_RESTORE();
1529 		return;
1530 	}
1531 	tmr->stopped_from = 0xa001;
1532 	if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1533 		/*
1534 		 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1535 		 * tmr->type);
1536 		 */
1537 		CURVNET_RESTORE();
1538 		return;
1539 	}
1540 	tmr->stopped_from = 0xa002;
1541 	if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1542 		CURVNET_RESTORE();
1543 		return;
1544 	}
1545 	/* if this is an iterator timeout, get the struct and clear inp */
1546 	tmr->stopped_from = 0xa003;
1547 	type = tmr->type;
1548 	if (inp) {
1549 		SCTP_INP_INCR_REF(inp);
1550 		if ((inp->sctp_socket == 0) &&
1551 		    ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1552 		    (tmr->type != SCTP_TIMER_TYPE_INIT) &&
1553 		    (tmr->type != SCTP_TIMER_TYPE_SEND) &&
1554 		    (tmr->type != SCTP_TIMER_TYPE_RECV) &&
1555 		    (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) &&
1556 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1557 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1558 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1559 		    (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))
1560 		    ) {
1561 			SCTP_INP_DECR_REF(inp);
1562 			CURVNET_RESTORE();
1563 			return;
1564 		}
1565 	}
1566 	tmr->stopped_from = 0xa004;
1567 	if (stcb) {
1568 		atomic_add_int(&stcb->asoc.refcnt, 1);
1569 		if (stcb->asoc.state == 0) {
1570 			atomic_add_int(&stcb->asoc.refcnt, -1);
1571 			if (inp) {
1572 				SCTP_INP_DECR_REF(inp);
1573 			}
1574 			CURVNET_RESTORE();
1575 			return;
1576 		}
1577 	}
1578 	tmr->stopped_from = 0xa005;
1579 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", tmr->type);
1580 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1581 		if (inp) {
1582 			SCTP_INP_DECR_REF(inp);
1583 		}
1584 		if (stcb) {
1585 			atomic_add_int(&stcb->asoc.refcnt, -1);
1586 		}
1587 		CURVNET_RESTORE();
1588 		return;
1589 	}
1590 	tmr->stopped_from = 0xa006;
1591 
1592 	if (stcb) {
1593 		SCTP_TCB_LOCK(stcb);
1594 		atomic_add_int(&stcb->asoc.refcnt, -1);
1595 		if ((tmr->type != SCTP_TIMER_TYPE_ASOCKILL) &&
1596 		    ((stcb->asoc.state == 0) ||
1597 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1598 			SCTP_TCB_UNLOCK(stcb);
1599 			if (inp) {
1600 				SCTP_INP_DECR_REF(inp);
1601 			}
1602 			CURVNET_RESTORE();
1603 			return;
1604 		}
1605 	}
1606 	/* record in stopped what t-o occured */
1607 	tmr->stopped_from = tmr->type;
1608 
1609 	/* mark as being serviced now */
1610 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1611 		/*
1612 		 * Callout has been rescheduled.
1613 		 */
1614 		goto get_out;
1615 	}
1616 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1617 		/*
1618 		 * Not active, so no action.
1619 		 */
1620 		goto get_out;
1621 	}
1622 	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1623 
1624 	/* call the handler for the appropriate timer type */
1625 	switch (tmr->type) {
1626 	case SCTP_TIMER_TYPE_ZERO_COPY:
1627 		if (inp == NULL) {
1628 			break;
1629 		}
1630 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1631 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
1632 		}
1633 		break;
1634 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1635 		if (inp == NULL) {
1636 			break;
1637 		}
1638 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1639 			SCTP_ZERO_COPY_SENDQ_EVENT(inp, inp->sctp_socket);
1640 		}
1641 		break;
1642 	case SCTP_TIMER_TYPE_ADDR_WQ:
1643 		sctp_handle_addr_wq();
1644 		break;
1645 	case SCTP_TIMER_TYPE_SEND:
1646 		if ((stcb == NULL) || (inp == NULL)) {
1647 			break;
1648 		}
1649 		SCTP_STAT_INCR(sctps_timodata);
1650 		stcb->asoc.timodata++;
1651 		stcb->asoc.num_send_timers_up--;
1652 		if (stcb->asoc.num_send_timers_up < 0) {
1653 			stcb->asoc.num_send_timers_up = 0;
1654 		}
1655 		SCTP_TCB_LOCK_ASSERT(stcb);
1656 		cur_oerr = stcb->asoc.overall_error_count;
1657 		retcode = sctp_t3rxt_timer(inp, stcb, net);
1658 		if (retcode) {
1659 			/* no need to unlock on tcb its gone */
1660 
1661 			goto out_decr;
1662 		}
1663 		SCTP_TCB_LOCK_ASSERT(stcb);
1664 #ifdef SCTP_AUDITING_ENABLED
1665 		sctp_auditing(4, inp, stcb, net);
1666 #endif
1667 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1668 		if ((stcb->asoc.num_send_timers_up == 0) &&
1669 		    (stcb->asoc.sent_queue_cnt > 0)
1670 		    ) {
1671 			struct sctp_tmit_chunk *chk;
1672 
1673 			/*
1674 			 * safeguard. If there on some on the sent queue
1675 			 * somewhere but no timers running something is
1676 			 * wrong... so we start a timer on the first chunk
1677 			 * on the send queue on whatever net it is sent to.
1678 			 */
1679 			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1680 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1681 			    chk->whoTo);
1682 		}
1683 		break;
1684 	case SCTP_TIMER_TYPE_INIT:
1685 		if ((stcb == NULL) || (inp == NULL)) {
1686 			break;
1687 		}
1688 		SCTP_STAT_INCR(sctps_timoinit);
1689 		stcb->asoc.timoinit++;
1690 		if (sctp_t1init_timer(inp, stcb, net)) {
1691 			/* no need to unlock on tcb its gone */
1692 			goto out_decr;
1693 		}
1694 		/* We do output but not here */
1695 		did_output = 0;
1696 		break;
1697 	case SCTP_TIMER_TYPE_RECV:
1698 		if ((stcb == NULL) || (inp == NULL)) {
1699 			break;
1700 		} {
1701 			SCTP_STAT_INCR(sctps_timosack);
1702 			stcb->asoc.timosack++;
1703 			sctp_send_sack(stcb);
1704 		}
1705 #ifdef SCTP_AUDITING_ENABLED
1706 		sctp_auditing(4, inp, stcb, net);
1707 #endif
1708 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1709 		break;
1710 	case SCTP_TIMER_TYPE_SHUTDOWN:
1711 		if ((stcb == NULL) || (inp == NULL)) {
1712 			break;
1713 		}
1714 		if (sctp_shutdown_timer(inp, stcb, net)) {
1715 			/* no need to unlock on tcb its gone */
1716 			goto out_decr;
1717 		}
1718 		SCTP_STAT_INCR(sctps_timoshutdown);
1719 		stcb->asoc.timoshutdown++;
1720 #ifdef SCTP_AUDITING_ENABLED
1721 		sctp_auditing(4, inp, stcb, net);
1722 #endif
1723 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1724 		break;
1725 	case SCTP_TIMER_TYPE_HEARTBEAT:
1726 		{
1727 			struct sctp_nets *lnet;
1728 			int cnt_of_unconf = 0;
1729 
1730 			if ((stcb == NULL) || (inp == NULL)) {
1731 				break;
1732 			}
1733 			SCTP_STAT_INCR(sctps_timoheartbeat);
1734 			stcb->asoc.timoheartbeat++;
1735 			TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
1736 				if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1737 				    (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
1738 					cnt_of_unconf++;
1739 				}
1740 			}
1741 			if (cnt_of_unconf == 0) {
1742 				if (sctp_heartbeat_timer(inp, stcb, lnet,
1743 				    cnt_of_unconf)) {
1744 					/* no need to unlock on tcb its gone */
1745 					goto out_decr;
1746 				}
1747 			}
1748 #ifdef SCTP_AUDITING_ENABLED
1749 			sctp_auditing(4, inp, stcb, lnet);
1750 #endif
1751 			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT,
1752 			    stcb->sctp_ep, stcb, lnet);
1753 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1754 		}
1755 		break;
1756 	case SCTP_TIMER_TYPE_COOKIE:
1757 		if ((stcb == NULL) || (inp == NULL)) {
1758 			break;
1759 		}
1760 		if (sctp_cookie_timer(inp, stcb, net)) {
1761 			/* no need to unlock on tcb its gone */
1762 			goto out_decr;
1763 		}
1764 		SCTP_STAT_INCR(sctps_timocookie);
1765 		stcb->asoc.timocookie++;
1766 #ifdef SCTP_AUDITING_ENABLED
1767 		sctp_auditing(4, inp, stcb, net);
1768 #endif
1769 		/*
1770 		 * We consider T3 and Cookie timer pretty much the same with
1771 		 * respect to where from in chunk_output.
1772 		 */
1773 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1774 		break;
1775 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1776 		{
1777 			struct timeval tv;
1778 			int i, secret;
1779 
1780 			if (inp == NULL) {
1781 				break;
1782 			}
1783 			SCTP_STAT_INCR(sctps_timosecret);
1784 			(void)SCTP_GETTIME_TIMEVAL(&tv);
1785 			SCTP_INP_WLOCK(inp);
1786 			inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1787 			inp->sctp_ep.last_secret_number =
1788 			    inp->sctp_ep.current_secret_number;
1789 			inp->sctp_ep.current_secret_number++;
1790 			if (inp->sctp_ep.current_secret_number >=
1791 			    SCTP_HOW_MANY_SECRETS) {
1792 				inp->sctp_ep.current_secret_number = 0;
1793 			}
1794 			secret = (int)inp->sctp_ep.current_secret_number;
1795 			for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1796 				inp->sctp_ep.secret_key[secret][i] =
1797 				    sctp_select_initial_TSN(&inp->sctp_ep);
1798 			}
1799 			SCTP_INP_WUNLOCK(inp);
1800 			sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1801 		}
1802 		did_output = 0;
1803 		break;
1804 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1805 		if ((stcb == NULL) || (inp == NULL)) {
1806 			break;
1807 		}
1808 		SCTP_STAT_INCR(sctps_timopathmtu);
1809 		sctp_pathmtu_timer(inp, stcb, net);
1810 		did_output = 0;
1811 		break;
1812 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1813 		if ((stcb == NULL) || (inp == NULL)) {
1814 			break;
1815 		}
1816 		if (sctp_shutdownack_timer(inp, stcb, net)) {
1817 			/* no need to unlock on tcb its gone */
1818 			goto out_decr;
1819 		}
1820 		SCTP_STAT_INCR(sctps_timoshutdownack);
1821 		stcb->asoc.timoshutdownack++;
1822 #ifdef SCTP_AUDITING_ENABLED
1823 		sctp_auditing(4, inp, stcb, net);
1824 #endif
1825 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1826 		break;
1827 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1828 		if ((stcb == NULL) || (inp == NULL)) {
1829 			break;
1830 		}
1831 		SCTP_STAT_INCR(sctps_timoshutdownguard);
1832 		sctp_abort_an_association(inp, stcb,
1833 		    SCTP_SHUTDOWN_GUARD_EXPIRES, NULL, SCTP_SO_NOT_LOCKED);
1834 		/* no need to unlock on tcb its gone */
1835 		goto out_decr;
1836 
1837 	case SCTP_TIMER_TYPE_STRRESET:
1838 		if ((stcb == NULL) || (inp == NULL)) {
1839 			break;
1840 		}
1841 		if (sctp_strreset_timer(inp, stcb, net)) {
1842 			/* no need to unlock on tcb its gone */
1843 			goto out_decr;
1844 		}
1845 		SCTP_STAT_INCR(sctps_timostrmrst);
1846 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1847 		break;
1848 	case SCTP_TIMER_TYPE_EARLYFR:
1849 		/* Need to do FR of things for net */
1850 		if ((stcb == NULL) || (inp == NULL)) {
1851 			break;
1852 		}
1853 		SCTP_STAT_INCR(sctps_timoearlyfr);
1854 		sctp_early_fr_timer(inp, stcb, net);
1855 		break;
1856 	case SCTP_TIMER_TYPE_ASCONF:
1857 		if ((stcb == NULL) || (inp == NULL)) {
1858 			break;
1859 		}
1860 		if (sctp_asconf_timer(inp, stcb, net)) {
1861 			/* no need to unlock on tcb its gone */
1862 			goto out_decr;
1863 		}
1864 		SCTP_STAT_INCR(sctps_timoasconf);
1865 #ifdef SCTP_AUDITING_ENABLED
1866 		sctp_auditing(4, inp, stcb, net);
1867 #endif
1868 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1869 		break;
1870 	case SCTP_TIMER_TYPE_PRIM_DELETED:
1871 		if ((stcb == NULL) || (inp == NULL)) {
1872 			break;
1873 		}
1874 		sctp_delete_prim_timer(inp, stcb, net);
1875 		SCTP_STAT_INCR(sctps_timodelprim);
1876 		break;
1877 
1878 	case SCTP_TIMER_TYPE_AUTOCLOSE:
1879 		if ((stcb == NULL) || (inp == NULL)) {
1880 			break;
1881 		}
1882 		SCTP_STAT_INCR(sctps_timoautoclose);
1883 		sctp_autoclose_timer(inp, stcb, net);
1884 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1885 		did_output = 0;
1886 		break;
1887 	case SCTP_TIMER_TYPE_ASOCKILL:
1888 		if ((stcb == NULL) || (inp == NULL)) {
1889 			break;
1890 		}
1891 		SCTP_STAT_INCR(sctps_timoassockill);
1892 		/* Can we free it yet? */
1893 		SCTP_INP_DECR_REF(inp);
1894 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1895 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1896 		so = SCTP_INP_SO(inp);
1897 		atomic_add_int(&stcb->asoc.refcnt, 1);
1898 		SCTP_TCB_UNLOCK(stcb);
1899 		SCTP_SOCKET_LOCK(so, 1);
1900 		SCTP_TCB_LOCK(stcb);
1901 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1902 #endif
1903 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1904 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1905 		SCTP_SOCKET_UNLOCK(so, 1);
1906 #endif
1907 		/*
1908 		 * free asoc, always unlocks (or destroy's) so prevent
1909 		 * duplicate unlock or unlock of a free mtx :-0
1910 		 */
1911 		stcb = NULL;
1912 		goto out_no_decr;
1913 	case SCTP_TIMER_TYPE_INPKILL:
1914 		SCTP_STAT_INCR(sctps_timoinpkill);
1915 		if (inp == NULL) {
1916 			break;
1917 		}
1918 		/*
1919 		 * special case, take away our increment since WE are the
1920 		 * killer
1921 		 */
1922 		SCTP_INP_DECR_REF(inp);
1923 		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1924 		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1925 		    SCTP_CALLED_FROM_INPKILL_TIMER);
1926 		inp = NULL;
1927 		goto out_no_decr;
1928 	default:
1929 		SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
1930 		    tmr->type);
1931 		break;
1932 	};
1933 #ifdef SCTP_AUDITING_ENABLED
1934 	sctp_audit_log(0xF1, (uint8_t) tmr->type);
1935 	if (inp)
1936 		sctp_auditing(5, inp, stcb, net);
1937 #endif
1938 	if ((did_output) && stcb) {
1939 		/*
1940 		 * Now we need to clean up the control chunk chain if an
1941 		 * ECNE is on it. It must be marked as UNSENT again so next
1942 		 * call will continue to send it until such time that we get
1943 		 * a CWR, to remove it. It is, however, less likely that we
1944 		 * will find a ecn echo on the chain though.
1945 		 */
1946 		sctp_fix_ecn_echo(&stcb->asoc);
1947 	}
1948 get_out:
1949 	if (stcb) {
1950 		SCTP_TCB_UNLOCK(stcb);
1951 	}
1952 out_decr:
1953 	if (inp) {
1954 		SCTP_INP_DECR_REF(inp);
1955 	}
1956 out_no_decr:
1957 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type %d)\n",
1958 	    type);
1959 	CURVNET_RESTORE();
1960 }
1961 
1962 void
1963 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1964     struct sctp_nets *net)
1965 {
1966 	int to_ticks;
1967 	struct sctp_timer *tmr;
1968 
1969 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
1970 		return;
1971 
1972 	to_ticks = 0;
1973 
1974 	tmr = NULL;
1975 	if (stcb) {
1976 		SCTP_TCB_LOCK_ASSERT(stcb);
1977 	}
1978 	switch (t_type) {
1979 	case SCTP_TIMER_TYPE_ZERO_COPY:
1980 		tmr = &inp->sctp_ep.zero_copy_timer;
1981 		to_ticks = SCTP_ZERO_COPY_TICK_DELAY;
1982 		break;
1983 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1984 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
1985 		to_ticks = SCTP_ZERO_COPY_SENDQ_TICK_DELAY;
1986 		break;
1987 	case SCTP_TIMER_TYPE_ADDR_WQ:
1988 		/* Only 1 tick away :-) */
1989 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
1990 		to_ticks = SCTP_ADDRESS_TICK_DELAY;
1991 		break;
1992 	case SCTP_TIMER_TYPE_SEND:
1993 		/* Here we use the RTO timer */
1994 		{
1995 			int rto_val;
1996 
1997 			if ((stcb == NULL) || (net == NULL)) {
1998 				return;
1999 			}
2000 			tmr = &net->rxt_timer;
2001 			if (net->RTO == 0) {
2002 				rto_val = stcb->asoc.initial_rto;
2003 			} else {
2004 				rto_val = net->RTO;
2005 			}
2006 			to_ticks = MSEC_TO_TICKS(rto_val);
2007 		}
2008 		break;
2009 	case SCTP_TIMER_TYPE_INIT:
2010 		/*
2011 		 * Here we use the INIT timer default usually about 1
2012 		 * minute.
2013 		 */
2014 		if ((stcb == NULL) || (net == NULL)) {
2015 			return;
2016 		}
2017 		tmr = &net->rxt_timer;
2018 		if (net->RTO == 0) {
2019 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2020 		} else {
2021 			to_ticks = MSEC_TO_TICKS(net->RTO);
2022 		}
2023 		break;
2024 	case SCTP_TIMER_TYPE_RECV:
2025 		/*
2026 		 * Here we use the Delayed-Ack timer value from the inp
2027 		 * ususually about 200ms.
2028 		 */
2029 		if (stcb == NULL) {
2030 			return;
2031 		}
2032 		tmr = &stcb->asoc.dack_timer;
2033 		to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
2034 		break;
2035 	case SCTP_TIMER_TYPE_SHUTDOWN:
2036 		/* Here we use the RTO of the destination. */
2037 		if ((stcb == NULL) || (net == NULL)) {
2038 			return;
2039 		}
2040 		if (net->RTO == 0) {
2041 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2042 		} else {
2043 			to_ticks = MSEC_TO_TICKS(net->RTO);
2044 		}
2045 		tmr = &net->rxt_timer;
2046 		break;
2047 	case SCTP_TIMER_TYPE_HEARTBEAT:
2048 		/*
2049 		 * the net is used here so that we can add in the RTO. Even
2050 		 * though we use a different timer. We also add the HB timer
2051 		 * PLUS a random jitter.
2052 		 */
2053 		if ((inp == NULL) || (stcb == NULL)) {
2054 			return;
2055 		} else {
2056 			uint32_t rndval;
2057 			uint8_t this_random;
2058 			int cnt_of_unconf = 0;
2059 			struct sctp_nets *lnet;
2060 
2061 			TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
2062 				if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2063 				    (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
2064 					cnt_of_unconf++;
2065 				}
2066 			}
2067 			if (cnt_of_unconf) {
2068 				net = lnet = NULL;
2069 				(void)sctp_heartbeat_timer(inp, stcb, lnet, cnt_of_unconf);
2070 			}
2071 			if (stcb->asoc.hb_random_idx > 3) {
2072 				rndval = sctp_select_initial_TSN(&inp->sctp_ep);
2073 				memcpy(stcb->asoc.hb_random_values, &rndval,
2074 				    sizeof(stcb->asoc.hb_random_values));
2075 				stcb->asoc.hb_random_idx = 0;
2076 			}
2077 			this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx];
2078 			stcb->asoc.hb_random_idx++;
2079 			stcb->asoc.hb_ect_randombit = 0;
2080 			/*
2081 			 * this_random will be 0 - 256 ms RTO is in ms.
2082 			 */
2083 			if ((stcb->asoc.hb_is_disabled) &&
2084 			    (cnt_of_unconf == 0)) {
2085 				return;
2086 			}
2087 			if (net) {
2088 				int delay;
2089 
2090 				delay = stcb->asoc.heart_beat_delay;
2091 				TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
2092 					if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2093 					    ((lnet->dest_state & SCTP_ADDR_OUT_OF_SCOPE) == 0) &&
2094 					    (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
2095 						delay = 0;
2096 					}
2097 				}
2098 				if (net->RTO == 0) {
2099 					/* Never been checked */
2100 					to_ticks = this_random + stcb->asoc.initial_rto + delay;
2101 				} else {
2102 					/* set rto_val to the ms */
2103 					to_ticks = delay + net->RTO + this_random;
2104 				}
2105 			} else {
2106 				if (cnt_of_unconf) {
2107 					to_ticks = this_random + stcb->asoc.initial_rto;
2108 				} else {
2109 					to_ticks = stcb->asoc.heart_beat_delay + this_random + stcb->asoc.initial_rto;
2110 				}
2111 			}
2112 			/*
2113 			 * Now we must convert the to_ticks that are now in
2114 			 * ms to ticks.
2115 			 */
2116 			to_ticks = MSEC_TO_TICKS(to_ticks);
2117 			tmr = &stcb->asoc.hb_timer;
2118 		}
2119 		break;
2120 	case SCTP_TIMER_TYPE_COOKIE:
2121 		/*
2122 		 * Here we can use the RTO timer from the network since one
2123 		 * RTT was compelete. If a retran happened then we will be
2124 		 * using the RTO initial value.
2125 		 */
2126 		if ((stcb == NULL) || (net == NULL)) {
2127 			return;
2128 		}
2129 		if (net->RTO == 0) {
2130 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2131 		} else {
2132 			to_ticks = MSEC_TO_TICKS(net->RTO);
2133 		}
2134 		tmr = &net->rxt_timer;
2135 		break;
2136 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2137 		/*
2138 		 * nothing needed but the endpoint here ususually about 60
2139 		 * minutes.
2140 		 */
2141 		if (inp == NULL) {
2142 			return;
2143 		}
2144 		tmr = &inp->sctp_ep.signature_change;
2145 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
2146 		break;
2147 	case SCTP_TIMER_TYPE_ASOCKILL:
2148 		if (stcb == NULL) {
2149 			return;
2150 		}
2151 		tmr = &stcb->asoc.strreset_timer;
2152 		to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
2153 		break;
2154 	case SCTP_TIMER_TYPE_INPKILL:
2155 		/*
2156 		 * The inp is setup to die. We re-use the signature_chage
2157 		 * timer since that has stopped and we are in the GONE
2158 		 * state.
2159 		 */
2160 		if (inp == NULL) {
2161 			return;
2162 		}
2163 		tmr = &inp->sctp_ep.signature_change;
2164 		to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2165 		break;
2166 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2167 		/*
2168 		 * Here we use the value found in the EP for PMTU ususually
2169 		 * about 10 minutes.
2170 		 */
2171 		if ((stcb == NULL) || (inp == NULL)) {
2172 			return;
2173 		}
2174 		if (net == NULL) {
2175 			return;
2176 		}
2177 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2178 		tmr = &net->pmtu_timer;
2179 		break;
2180 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2181 		/* Here we use the RTO of the destination */
2182 		if ((stcb == NULL) || (net == NULL)) {
2183 			return;
2184 		}
2185 		if (net->RTO == 0) {
2186 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2187 		} else {
2188 			to_ticks = MSEC_TO_TICKS(net->RTO);
2189 		}
2190 		tmr = &net->rxt_timer;
2191 		break;
2192 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2193 		/*
2194 		 * Here we use the endpoints shutdown guard timer usually
2195 		 * about 3 minutes.
2196 		 */
2197 		if ((inp == NULL) || (stcb == NULL)) {
2198 			return;
2199 		}
2200 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2201 		tmr = &stcb->asoc.shut_guard_timer;
2202 		break;
2203 	case SCTP_TIMER_TYPE_STRRESET:
2204 		/*
2205 		 * Here the timer comes from the stcb but its value is from
2206 		 * the net's RTO.
2207 		 */
2208 		if ((stcb == NULL) || (net == NULL)) {
2209 			return;
2210 		}
2211 		if (net->RTO == 0) {
2212 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2213 		} else {
2214 			to_ticks = MSEC_TO_TICKS(net->RTO);
2215 		}
2216 		tmr = &stcb->asoc.strreset_timer;
2217 		break;
2218 
2219 	case SCTP_TIMER_TYPE_EARLYFR:
2220 		{
2221 			unsigned int msec;
2222 
2223 			if ((stcb == NULL) || (net == NULL)) {
2224 				return;
2225 			}
2226 			if (net->flight_size > net->cwnd) {
2227 				/* no need to start */
2228 				return;
2229 			}
2230 			SCTP_STAT_INCR(sctps_earlyfrstart);
2231 			if (net->lastsa == 0) {
2232 				/* Hmm no rtt estimate yet? */
2233 				msec = stcb->asoc.initial_rto >> 2;
2234 			} else {
2235 				msec = ((net->lastsa >> 2) + net->lastsv) >> 1;
2236 			}
2237 			if (msec < SCTP_BASE_SYSCTL(sctp_early_fr_msec)) {
2238 				msec = SCTP_BASE_SYSCTL(sctp_early_fr_msec);
2239 				if (msec < SCTP_MINFR_MSEC_FLOOR) {
2240 					msec = SCTP_MINFR_MSEC_FLOOR;
2241 				}
2242 			}
2243 			to_ticks = MSEC_TO_TICKS(msec);
2244 			tmr = &net->fr_timer;
2245 		}
2246 		break;
2247 	case SCTP_TIMER_TYPE_ASCONF:
2248 		/*
2249 		 * Here the timer comes from the stcb but its value is from
2250 		 * the net's RTO.
2251 		 */
2252 		if ((stcb == NULL) || (net == NULL)) {
2253 			return;
2254 		}
2255 		if (net->RTO == 0) {
2256 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2257 		} else {
2258 			to_ticks = MSEC_TO_TICKS(net->RTO);
2259 		}
2260 		tmr = &stcb->asoc.asconf_timer;
2261 		break;
2262 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2263 		if ((stcb == NULL) || (net != NULL)) {
2264 			return;
2265 		}
2266 		to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2267 		tmr = &stcb->asoc.delete_prim_timer;
2268 		break;
2269 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2270 		if (stcb == NULL) {
2271 			return;
2272 		}
2273 		if (stcb->asoc.sctp_autoclose_ticks == 0) {
2274 			/*
2275 			 * Really an error since stcb is NOT set to
2276 			 * autoclose
2277 			 */
2278 			return;
2279 		}
2280 		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2281 		tmr = &stcb->asoc.autoclose_timer;
2282 		break;
2283 	default:
2284 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2285 		    __FUNCTION__, t_type);
2286 		return;
2287 		break;
2288 	};
2289 	if ((to_ticks <= 0) || (tmr == NULL)) {
2290 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2291 		    __FUNCTION__, t_type, to_ticks, tmr);
2292 		return;
2293 	}
2294 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2295 		/*
2296 		 * we do NOT allow you to have it already running. if it is
2297 		 * we leave the current one up unchanged
2298 		 */
2299 		return;
2300 	}
2301 	/* At this point we can proceed */
2302 	if (t_type == SCTP_TIMER_TYPE_SEND) {
2303 		stcb->asoc.num_send_timers_up++;
2304 	}
2305 	tmr->stopped_from = 0;
2306 	tmr->type = t_type;
2307 	tmr->ep = (void *)inp;
2308 	tmr->tcb = (void *)stcb;
2309 	tmr->net = (void *)net;
2310 	tmr->self = (void *)tmr;
2311 	tmr->vnet = (void *)curvnet;
2312 	tmr->ticks = sctp_get_tick_count();
2313 	(void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2314 	return;
2315 }
2316 
2317 void
2318 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2319     struct sctp_nets *net, uint32_t from)
2320 {
2321 	struct sctp_timer *tmr;
2322 
2323 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2324 	    (inp == NULL))
2325 		return;
2326 
2327 	tmr = NULL;
2328 	if (stcb) {
2329 		SCTP_TCB_LOCK_ASSERT(stcb);
2330 	}
2331 	switch (t_type) {
2332 	case SCTP_TIMER_TYPE_ZERO_COPY:
2333 		tmr = &inp->sctp_ep.zero_copy_timer;
2334 		break;
2335 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
2336 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
2337 		break;
2338 	case SCTP_TIMER_TYPE_ADDR_WQ:
2339 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2340 		break;
2341 	case SCTP_TIMER_TYPE_EARLYFR:
2342 		if ((stcb == NULL) || (net == NULL)) {
2343 			return;
2344 		}
2345 		tmr = &net->fr_timer;
2346 		SCTP_STAT_INCR(sctps_earlyfrstop);
2347 		break;
2348 	case SCTP_TIMER_TYPE_SEND:
2349 		if ((stcb == NULL) || (net == NULL)) {
2350 			return;
2351 		}
2352 		tmr = &net->rxt_timer;
2353 		break;
2354 	case SCTP_TIMER_TYPE_INIT:
2355 		if ((stcb == NULL) || (net == NULL)) {
2356 			return;
2357 		}
2358 		tmr = &net->rxt_timer;
2359 		break;
2360 	case SCTP_TIMER_TYPE_RECV:
2361 		if (stcb == NULL) {
2362 			return;
2363 		}
2364 		tmr = &stcb->asoc.dack_timer;
2365 		break;
2366 	case SCTP_TIMER_TYPE_SHUTDOWN:
2367 		if ((stcb == NULL) || (net == NULL)) {
2368 			return;
2369 		}
2370 		tmr = &net->rxt_timer;
2371 		break;
2372 	case SCTP_TIMER_TYPE_HEARTBEAT:
2373 		if (stcb == NULL) {
2374 			return;
2375 		}
2376 		tmr = &stcb->asoc.hb_timer;
2377 		break;
2378 	case SCTP_TIMER_TYPE_COOKIE:
2379 		if ((stcb == NULL) || (net == NULL)) {
2380 			return;
2381 		}
2382 		tmr = &net->rxt_timer;
2383 		break;
2384 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2385 		/* nothing needed but the endpoint here */
2386 		tmr = &inp->sctp_ep.signature_change;
2387 		/*
2388 		 * We re-use the newcookie timer for the INP kill timer. We
2389 		 * must assure that we do not kill it by accident.
2390 		 */
2391 		break;
2392 	case SCTP_TIMER_TYPE_ASOCKILL:
2393 		/*
2394 		 * Stop the asoc kill timer.
2395 		 */
2396 		if (stcb == NULL) {
2397 			return;
2398 		}
2399 		tmr = &stcb->asoc.strreset_timer;
2400 		break;
2401 
2402 	case SCTP_TIMER_TYPE_INPKILL:
2403 		/*
2404 		 * The inp is setup to die. We re-use the signature_chage
2405 		 * timer since that has stopped and we are in the GONE
2406 		 * state.
2407 		 */
2408 		tmr = &inp->sctp_ep.signature_change;
2409 		break;
2410 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2411 		if ((stcb == NULL) || (net == NULL)) {
2412 			return;
2413 		}
2414 		tmr = &net->pmtu_timer;
2415 		break;
2416 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2417 		if ((stcb == NULL) || (net == NULL)) {
2418 			return;
2419 		}
2420 		tmr = &net->rxt_timer;
2421 		break;
2422 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2423 		if (stcb == NULL) {
2424 			return;
2425 		}
2426 		tmr = &stcb->asoc.shut_guard_timer;
2427 		break;
2428 	case SCTP_TIMER_TYPE_STRRESET:
2429 		if (stcb == NULL) {
2430 			return;
2431 		}
2432 		tmr = &stcb->asoc.strreset_timer;
2433 		break;
2434 	case SCTP_TIMER_TYPE_ASCONF:
2435 		if (stcb == NULL) {
2436 			return;
2437 		}
2438 		tmr = &stcb->asoc.asconf_timer;
2439 		break;
2440 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2441 		if (stcb == NULL) {
2442 			return;
2443 		}
2444 		tmr = &stcb->asoc.delete_prim_timer;
2445 		break;
2446 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2447 		if (stcb == NULL) {
2448 			return;
2449 		}
2450 		tmr = &stcb->asoc.autoclose_timer;
2451 		break;
2452 	default:
2453 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2454 		    __FUNCTION__, t_type);
2455 		break;
2456 	};
2457 	if (tmr == NULL) {
2458 		return;
2459 	}
2460 	if ((tmr->type != t_type) && tmr->type) {
2461 		/*
2462 		 * Ok we have a timer that is under joint use. Cookie timer
2463 		 * per chance with the SEND timer. We therefore are NOT
2464 		 * running the timer that the caller wants stopped.  So just
2465 		 * return.
2466 		 */
2467 		return;
2468 	}
2469 	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2470 		stcb->asoc.num_send_timers_up--;
2471 		if (stcb->asoc.num_send_timers_up < 0) {
2472 			stcb->asoc.num_send_timers_up = 0;
2473 		}
2474 	}
2475 	tmr->self = NULL;
2476 	tmr->stopped_from = from;
2477 	(void)SCTP_OS_TIMER_STOP(&tmr->timer);
2478 	return;
2479 }
2480 
2481 uint32_t
2482 sctp_calculate_len(struct mbuf *m)
2483 {
2484 	uint32_t tlen = 0;
2485 	struct mbuf *at;
2486 
2487 	at = m;
2488 	while (at) {
2489 		tlen += SCTP_BUF_LEN(at);
2490 		at = SCTP_BUF_NEXT(at);
2491 	}
2492 	return (tlen);
2493 }
2494 
2495 void
2496 sctp_mtu_size_reset(struct sctp_inpcb *inp,
2497     struct sctp_association *asoc, uint32_t mtu)
2498 {
2499 	/*
2500 	 * Reset the P-MTU size on this association, this involves changing
2501 	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2502 	 * allow the DF flag to be cleared.
2503 	 */
2504 	struct sctp_tmit_chunk *chk;
2505 	unsigned int eff_mtu, ovh;
2506 
2507 	asoc->smallest_mtu = mtu;
2508 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2509 		ovh = SCTP_MIN_OVERHEAD;
2510 	} else {
2511 		ovh = SCTP_MIN_V4_OVERHEAD;
2512 	}
2513 	eff_mtu = mtu - ovh;
2514 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2515 		if (chk->send_size > eff_mtu) {
2516 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2517 		}
2518 	}
2519 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2520 		if (chk->send_size > eff_mtu) {
2521 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2522 		}
2523 	}
2524 }
2525 
2526 
2527 /*
2528  * given an association and starting time of the current RTT period return
2529  * RTO in number of msecs net should point to the current network
2530  */
2531 uint32_t
2532 sctp_calculate_rto(struct sctp_tcb *stcb,
2533     struct sctp_association *asoc,
2534     struct sctp_nets *net,
2535     struct timeval *told,
2536     int safe)
2537 {
2538 	/*-
2539 	 * given an association and the starting time of the current RTT
2540 	 * period (in value1/value2) return RTO in number of msecs.
2541 	 */
2542 	int calc_time = 0;
2543 	int o_calctime;
2544 	uint32_t new_rto = 0;
2545 	int first_measure = 0;
2546 	struct timeval now, then, *old;
2547 
2548 	/* Copy it out for sparc64 */
2549 	if (safe == sctp_align_unsafe_makecopy) {
2550 		old = &then;
2551 		memcpy(&then, told, sizeof(struct timeval));
2552 	} else if (safe == sctp_align_safe_nocopy) {
2553 		old = told;
2554 	} else {
2555 		/* error */
2556 		SCTP_PRINTF("Huh, bad rto calc call\n");
2557 		return (0);
2558 	}
2559 	/************************/
2560 	/* 1. calculate new RTT */
2561 	/************************/
2562 	/* get the current time */
2563 	(void)SCTP_GETTIME_TIMEVAL(&now);
2564 	/* compute the RTT value */
2565 	if ((u_long)now.tv_sec > (u_long)old->tv_sec) {
2566 		calc_time = ((u_long)now.tv_sec - (u_long)old->tv_sec) * 1000;
2567 		if ((u_long)now.tv_usec > (u_long)old->tv_usec) {
2568 			calc_time += (((u_long)now.tv_usec -
2569 			    (u_long)old->tv_usec) / 1000);
2570 		} else if ((u_long)now.tv_usec < (u_long)old->tv_usec) {
2571 			/* Borrow 1,000ms from current calculation */
2572 			calc_time -= 1000;
2573 			/* Add in the slop over */
2574 			calc_time += ((int)now.tv_usec / 1000);
2575 			/* Add in the pre-second ms's */
2576 			calc_time += (((int)1000000 - (int)old->tv_usec) / 1000);
2577 		}
2578 	} else if ((u_long)now.tv_sec == (u_long)old->tv_sec) {
2579 		if ((u_long)now.tv_usec > (u_long)old->tv_usec) {
2580 			calc_time = ((u_long)now.tv_usec -
2581 			    (u_long)old->tv_usec) / 1000;
2582 		} else if ((u_long)now.tv_usec < (u_long)old->tv_usec) {
2583 			/* impossible .. garbage in nothing out */
2584 			goto calc_rto;
2585 		} else if ((u_long)now.tv_usec == (u_long)old->tv_usec) {
2586 			/*
2587 			 * We have to have 1 usec :-D this must be the
2588 			 * loopback.
2589 			 */
2590 			calc_time = 1;
2591 		} else {
2592 			/* impossible .. garbage in nothing out */
2593 			goto calc_rto;
2594 		}
2595 	} else {
2596 		/* Clock wrapped? */
2597 		goto calc_rto;
2598 	}
2599 	/***************************/
2600 	/* 2. update RTTVAR & SRTT */
2601 	/***************************/
2602 	net->rtt = o_calctime = calc_time;
2603 	/* this is Van Jacobson's integer version */
2604 	if (net->RTO_measured) {
2605 		calc_time -= (net->lastsa >> SCTP_RTT_SHIFT);	/* take away 1/8th when
2606 								 * shift=3 */
2607 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2608 			rto_logging(net, SCTP_LOG_RTTVAR);
2609 		}
2610 		net->prev_rtt = o_calctime;
2611 		net->lastsa += calc_time;	/* add 7/8th into sa when
2612 						 * shift=3 */
2613 		if (calc_time < 0) {
2614 			calc_time = -calc_time;
2615 		}
2616 		calc_time -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);	/* take away 1/4 when
2617 									 * VAR shift=2 */
2618 		net->lastsv += calc_time;
2619 		if (net->lastsv == 0) {
2620 			net->lastsv = SCTP_CLOCK_GRANULARITY;
2621 		}
2622 	} else {
2623 		/* First RTO measurment */
2624 		net->RTO_measured = 1;
2625 		net->lastsa = calc_time << SCTP_RTT_SHIFT;	/* Multiply by 8 when
2626 								 * shift=3 */
2627 		net->lastsv = calc_time;
2628 		if (net->lastsv == 0) {
2629 			net->lastsv = SCTP_CLOCK_GRANULARITY;
2630 		}
2631 		first_measure = 1;
2632 		net->prev_rtt = o_calctime;
2633 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2634 			rto_logging(net, SCTP_LOG_INITIAL_RTT);
2635 		}
2636 	}
2637 calc_rto:
2638 	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2639 	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2640 	    (stcb->asoc.sat_network_lockout == 0)) {
2641 		stcb->asoc.sat_network = 1;
2642 	} else if ((!first_measure) && stcb->asoc.sat_network) {
2643 		stcb->asoc.sat_network = 0;
2644 		stcb->asoc.sat_network_lockout = 1;
2645 	}
2646 	/* bound it, per C6/C7 in Section 5.3.1 */
2647 	if (new_rto < stcb->asoc.minrto) {
2648 		new_rto = stcb->asoc.minrto;
2649 	}
2650 	if (new_rto > stcb->asoc.maxrto) {
2651 		new_rto = stcb->asoc.maxrto;
2652 	}
2653 	/* we are now returning the RTO */
2654 	return (new_rto);
2655 }
2656 
2657 /*
2658  * return a pointer to a contiguous piece of data from the given mbuf chain
2659  * starting at 'off' for 'len' bytes.  If the desired piece spans more than
2660  * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2661  * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2662  */
2663 caddr_t
2664 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr)
2665 {
2666 	uint32_t count;
2667 	uint8_t *ptr;
2668 
2669 	ptr = in_ptr;
2670 	if ((off < 0) || (len <= 0))
2671 		return (NULL);
2672 
2673 	/* find the desired start location */
2674 	while ((m != NULL) && (off > 0)) {
2675 		if (off < SCTP_BUF_LEN(m))
2676 			break;
2677 		off -= SCTP_BUF_LEN(m);
2678 		m = SCTP_BUF_NEXT(m);
2679 	}
2680 	if (m == NULL)
2681 		return (NULL);
2682 
2683 	/* is the current mbuf large enough (eg. contiguous)? */
2684 	if ((SCTP_BUF_LEN(m) - off) >= len) {
2685 		return (mtod(m, caddr_t)+off);
2686 	} else {
2687 		/* else, it spans more than one mbuf, so save a temp copy... */
2688 		while ((m != NULL) && (len > 0)) {
2689 			count = min(SCTP_BUF_LEN(m) - off, len);
2690 			bcopy(mtod(m, caddr_t)+off, ptr, count);
2691 			len -= count;
2692 			ptr += count;
2693 			off = 0;
2694 			m = SCTP_BUF_NEXT(m);
2695 		}
2696 		if ((m == NULL) && (len > 0))
2697 			return (NULL);
2698 		else
2699 			return ((caddr_t)in_ptr);
2700 	}
2701 }
2702 
2703 
2704 
2705 struct sctp_paramhdr *
2706 sctp_get_next_param(struct mbuf *m,
2707     int offset,
2708     struct sctp_paramhdr *pull,
2709     int pull_limit)
2710 {
2711 	/* This just provides a typed signature to Peter's Pull routine */
2712 	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2713 	    (uint8_t *) pull));
2714 }
2715 
2716 
2717 int
2718 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2719 {
2720 	/*
2721 	 * add padlen bytes of 0 filled padding to the end of the mbuf. If
2722 	 * padlen is > 3 this routine will fail.
2723 	 */
2724 	uint8_t *dp;
2725 	int i;
2726 
2727 	if (padlen > 3) {
2728 		SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
2729 		return (ENOBUFS);
2730 	}
2731 	if (padlen <= M_TRAILINGSPACE(m)) {
2732 		/*
2733 		 * The easy way. We hope the majority of the time we hit
2734 		 * here :)
2735 		 */
2736 		dp = (uint8_t *) (mtod(m, caddr_t)+SCTP_BUF_LEN(m));
2737 		SCTP_BUF_LEN(m) += padlen;
2738 	} else {
2739 		/* Hard way we must grow the mbuf */
2740 		struct mbuf *tmp;
2741 
2742 		tmp = sctp_get_mbuf_for_msg(padlen, 0, M_DONTWAIT, 1, MT_DATA);
2743 		if (tmp == NULL) {
2744 			/* Out of space GAK! we are in big trouble. */
2745 			SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
2746 			return (ENOSPC);
2747 		}
2748 		/* setup and insert in middle */
2749 		SCTP_BUF_LEN(tmp) = padlen;
2750 		SCTP_BUF_NEXT(tmp) = NULL;
2751 		SCTP_BUF_NEXT(m) = tmp;
2752 		dp = mtod(tmp, uint8_t *);
2753 	}
2754 	/* zero out the pad */
2755 	for (i = 0; i < padlen; i++) {
2756 		*dp = 0;
2757 		dp++;
2758 	}
2759 	return (0);
2760 }
2761 
2762 int
2763 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2764 {
2765 	/* find the last mbuf in chain and pad it */
2766 	struct mbuf *m_at;
2767 
2768 	m_at = m;
2769 	if (last_mbuf) {
2770 		return (sctp_add_pad_tombuf(last_mbuf, padval));
2771 	} else {
2772 		while (m_at) {
2773 			if (SCTP_BUF_NEXT(m_at) == NULL) {
2774 				return (sctp_add_pad_tombuf(m_at, padval));
2775 			}
2776 			m_at = SCTP_BUF_NEXT(m_at);
2777 		}
2778 	}
2779 	SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
2780 	return (EFAULT);
2781 }
2782 
2783 static void
2784 sctp_notify_assoc_change(uint32_t event, struct sctp_tcb *stcb,
2785     uint32_t error, void *data, int so_locked
2786 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2787     SCTP_UNUSED
2788 #endif
2789 )
2790 {
2791 	struct mbuf *m_notify;
2792 	struct sctp_assoc_change *sac;
2793 	struct sctp_queued_to_read *control;
2794 
2795 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2796 	struct socket *so;
2797 
2798 #endif
2799 
2800 	/*
2801 	 * For TCP model AND UDP connected sockets we will send an error up
2802 	 * when an ABORT comes in.
2803 	 */
2804 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2805 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2806 	    ((event == SCTP_COMM_LOST) || (event == SCTP_CANT_STR_ASSOC))) {
2807 		if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
2808 			SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2809 			stcb->sctp_socket->so_error = ECONNREFUSED;
2810 		} else {
2811 			SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2812 			stcb->sctp_socket->so_error = ECONNRESET;
2813 		}
2814 		/* Wake ANY sleepers */
2815 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2816 		so = SCTP_INP_SO(stcb->sctp_ep);
2817 		if (!so_locked) {
2818 			atomic_add_int(&stcb->asoc.refcnt, 1);
2819 			SCTP_TCB_UNLOCK(stcb);
2820 			SCTP_SOCKET_LOCK(so, 1);
2821 			SCTP_TCB_LOCK(stcb);
2822 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
2823 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2824 				SCTP_SOCKET_UNLOCK(so, 1);
2825 				return;
2826 			}
2827 		}
2828 #endif
2829 		socantrcvmore(stcb->sctp_socket);
2830 		sorwakeup(stcb->sctp_socket);
2831 		sowwakeup(stcb->sctp_socket);
2832 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2833 		if (!so_locked) {
2834 			SCTP_SOCKET_UNLOCK(so, 1);
2835 		}
2836 #endif
2837 	}
2838 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2839 		/* event not enabled */
2840 		return;
2841 	}
2842 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_change), 0, M_DONTWAIT, 1, MT_DATA);
2843 	if (m_notify == NULL)
2844 		/* no space left */
2845 		return;
2846 	SCTP_BUF_LEN(m_notify) = 0;
2847 
2848 	sac = mtod(m_notify, struct sctp_assoc_change *);
2849 	sac->sac_type = SCTP_ASSOC_CHANGE;
2850 	sac->sac_flags = 0;
2851 	sac->sac_length = sizeof(struct sctp_assoc_change);
2852 	sac->sac_state = event;
2853 	sac->sac_error = error;
2854 	/* XXX verify these stream counts */
2855 	sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2856 	sac->sac_inbound_streams = stcb->asoc.streamincnt;
2857 	sac->sac_assoc_id = sctp_get_associd(stcb);
2858 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_change);
2859 	SCTP_BUF_NEXT(m_notify) = NULL;
2860 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2861 	    0, 0, 0, 0, 0, 0,
2862 	    m_notify);
2863 	if (control == NULL) {
2864 		/* no memory */
2865 		sctp_m_freem(m_notify);
2866 		return;
2867 	}
2868 	control->length = SCTP_BUF_LEN(m_notify);
2869 	/* not that we need this */
2870 	control->tail_mbuf = m_notify;
2871 	control->spec_flags = M_NOTIFICATION;
2872 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2873 	    control,
2874 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
2875 	    so_locked);
2876 	if (event == SCTP_COMM_LOST) {
2877 		/* Wake up any sleeper */
2878 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2879 		so = SCTP_INP_SO(stcb->sctp_ep);
2880 		if (!so_locked) {
2881 			atomic_add_int(&stcb->asoc.refcnt, 1);
2882 			SCTP_TCB_UNLOCK(stcb);
2883 			SCTP_SOCKET_LOCK(so, 1);
2884 			SCTP_TCB_LOCK(stcb);
2885 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
2886 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2887 				SCTP_SOCKET_UNLOCK(so, 1);
2888 				return;
2889 			}
2890 		}
2891 #endif
2892 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
2893 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2894 		if (!so_locked) {
2895 			SCTP_SOCKET_UNLOCK(so, 1);
2896 		}
2897 #endif
2898 	}
2899 }
2900 
2901 static void
2902 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2903     struct sockaddr *sa, uint32_t error)
2904 {
2905 	struct mbuf *m_notify;
2906 	struct sctp_paddr_change *spc;
2907 	struct sctp_queued_to_read *control;
2908 
2909 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
2910 		/* event not enabled */
2911 		return;
2912 	}
2913 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_DONTWAIT, 1, MT_DATA);
2914 	if (m_notify == NULL)
2915 		return;
2916 	SCTP_BUF_LEN(m_notify) = 0;
2917 	spc = mtod(m_notify, struct sctp_paddr_change *);
2918 	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2919 	spc->spc_flags = 0;
2920 	spc->spc_length = sizeof(struct sctp_paddr_change);
2921 	switch (sa->sa_family) {
2922 	case AF_INET:
2923 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2924 		break;
2925 #ifdef INET6
2926 	case AF_INET6:
2927 		{
2928 			struct sockaddr_in6 *sin6;
2929 
2930 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2931 
2932 			sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
2933 			if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
2934 				if (sin6->sin6_scope_id == 0) {
2935 					/* recover scope_id for user */
2936 					(void)sa6_recoverscope(sin6);
2937 				} else {
2938 					/* clear embedded scope_id for user */
2939 					in6_clearscope(&sin6->sin6_addr);
2940 				}
2941 			}
2942 			break;
2943 		}
2944 #endif
2945 	default:
2946 		/* TSNH */
2947 		break;
2948 	}
2949 	spc->spc_state = state;
2950 	spc->spc_error = error;
2951 	spc->spc_assoc_id = sctp_get_associd(stcb);
2952 
2953 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
2954 	SCTP_BUF_NEXT(m_notify) = NULL;
2955 
2956 	/* append to socket */
2957 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2958 	    0, 0, 0, 0, 0, 0,
2959 	    m_notify);
2960 	if (control == NULL) {
2961 		/* no memory */
2962 		sctp_m_freem(m_notify);
2963 		return;
2964 	}
2965 	control->length = SCTP_BUF_LEN(m_notify);
2966 	control->spec_flags = M_NOTIFICATION;
2967 	/* not that we need this */
2968 	control->tail_mbuf = m_notify;
2969 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2970 	    control,
2971 	    &stcb->sctp_socket->so_rcv, 1,
2972 	    SCTP_READ_LOCK_NOT_HELD,
2973 	    SCTP_SO_NOT_LOCKED);
2974 }
2975 
2976 
2977 static void
2978 sctp_notify_send_failed(struct sctp_tcb *stcb, uint32_t error,
2979     struct sctp_tmit_chunk *chk, int so_locked
2980 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2981     SCTP_UNUSED
2982 #endif
2983 )
2984 {
2985 	struct mbuf *m_notify;
2986 	struct sctp_send_failed *ssf;
2987 	struct sctp_queued_to_read *control;
2988 	int length;
2989 
2990 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)) {
2991 		/* event not enabled */
2992 		return;
2993 	}
2994 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_send_failed), 0, M_DONTWAIT, 1, MT_DATA);
2995 	if (m_notify == NULL)
2996 		/* no space left */
2997 		return;
2998 	length = sizeof(struct sctp_send_failed) + chk->send_size;
2999 	length -= sizeof(struct sctp_data_chunk);
3000 	SCTP_BUF_LEN(m_notify) = 0;
3001 	ssf = mtod(m_notify, struct sctp_send_failed *);
3002 	ssf->ssf_type = SCTP_SEND_FAILED;
3003 	if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
3004 		ssf->ssf_flags = SCTP_DATA_UNSENT;
3005 	else
3006 		ssf->ssf_flags = SCTP_DATA_SENT;
3007 	ssf->ssf_length = length;
3008 	ssf->ssf_error = error;
3009 	/* not exactly what the user sent in, but should be close :) */
3010 	bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
3011 	ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number;
3012 	ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq;
3013 	ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
3014 	ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype;
3015 	ssf->ssf_info.sinfo_context = chk->rec.data.context;
3016 	ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3017 	ssf->ssf_assoc_id = sctp_get_associd(stcb);
3018 
3019 	if (chk->data) {
3020 		/*
3021 		 * trim off the sctp chunk header(it should be there)
3022 		 */
3023 		if (chk->send_size >= sizeof(struct sctp_data_chunk)) {
3024 			m_adj(chk->data, sizeof(struct sctp_data_chunk));
3025 			sctp_mbuf_crush(chk->data);
3026 			chk->send_size -= sizeof(struct sctp_data_chunk);
3027 		}
3028 	}
3029 	SCTP_BUF_NEXT(m_notify) = chk->data;
3030 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
3031 	/* Steal off the mbuf */
3032 	chk->data = NULL;
3033 	/*
3034 	 * For this case, we check the actual socket buffer, since the assoc
3035 	 * is going away we don't want to overfill the socket buffer for a
3036 	 * non-reader
3037 	 */
3038 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3039 		sctp_m_freem(m_notify);
3040 		return;
3041 	}
3042 	/* append to socket */
3043 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3044 	    0, 0, 0, 0, 0, 0,
3045 	    m_notify);
3046 	if (control == NULL) {
3047 		/* no memory */
3048 		sctp_m_freem(m_notify);
3049 		return;
3050 	}
3051 	control->spec_flags = M_NOTIFICATION;
3052 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3053 	    control,
3054 	    &stcb->sctp_socket->so_rcv, 1,
3055 	    SCTP_READ_LOCK_NOT_HELD,
3056 	    so_locked);
3057 }
3058 
3059 
3060 static void
3061 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
3062     struct sctp_stream_queue_pending *sp, int so_locked
3063 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3064     SCTP_UNUSED
3065 #endif
3066 )
3067 {
3068 	struct mbuf *m_notify;
3069 	struct sctp_send_failed *ssf;
3070 	struct sctp_queued_to_read *control;
3071 	int length;
3072 
3073 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)) {
3074 		/* event not enabled */
3075 		return;
3076 	}
3077 	length = sizeof(struct sctp_send_failed) + sp->length;
3078 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_send_failed), 0, M_DONTWAIT, 1, MT_DATA);
3079 	if (m_notify == NULL)
3080 		/* no space left */
3081 		return;
3082 	SCTP_BUF_LEN(m_notify) = 0;
3083 	ssf = mtod(m_notify, struct sctp_send_failed *);
3084 	ssf->ssf_type = SCTP_SEND_FAILED;
3085 	if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
3086 		ssf->ssf_flags = SCTP_DATA_UNSENT;
3087 	else
3088 		ssf->ssf_flags = SCTP_DATA_SENT;
3089 	ssf->ssf_length = length;
3090 	ssf->ssf_error = error;
3091 	/* not exactly what the user sent in, but should be close :) */
3092 	bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
3093 	ssf->ssf_info.sinfo_stream = sp->stream;
3094 	ssf->ssf_info.sinfo_ssn = sp->strseq;
3095 	if (sp->some_taken) {
3096 		ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
3097 	} else {
3098 		ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
3099 	}
3100 	ssf->ssf_info.sinfo_ppid = sp->ppid;
3101 	ssf->ssf_info.sinfo_context = sp->context;
3102 	ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3103 	ssf->ssf_assoc_id = sctp_get_associd(stcb);
3104 	SCTP_BUF_NEXT(m_notify) = sp->data;
3105 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
3106 
3107 	/* Steal off the mbuf */
3108 	sp->data = NULL;
3109 	/*
3110 	 * For this case, we check the actual socket buffer, since the assoc
3111 	 * is going away we don't want to overfill the socket buffer for a
3112 	 * non-reader
3113 	 */
3114 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3115 		sctp_m_freem(m_notify);
3116 		return;
3117 	}
3118 	/* append to socket */
3119 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3120 	    0, 0, 0, 0, 0, 0,
3121 	    m_notify);
3122 	if (control == NULL) {
3123 		/* no memory */
3124 		sctp_m_freem(m_notify);
3125 		return;
3126 	}
3127 	control->spec_flags = M_NOTIFICATION;
3128 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3129 	    control,
3130 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3131 }
3132 
3133 
3134 
3135 static void
3136 sctp_notify_adaptation_layer(struct sctp_tcb *stcb,
3137     uint32_t error)
3138 {
3139 	struct mbuf *m_notify;
3140 	struct sctp_adaptation_event *sai;
3141 	struct sctp_queued_to_read *control;
3142 
3143 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
3144 		/* event not enabled */
3145 		return;
3146 	}
3147 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_DONTWAIT, 1, MT_DATA);
3148 	if (m_notify == NULL)
3149 		/* no space left */
3150 		return;
3151 	SCTP_BUF_LEN(m_notify) = 0;
3152 	sai = mtod(m_notify, struct sctp_adaptation_event *);
3153 	sai->sai_type = SCTP_ADAPTATION_INDICATION;
3154 	sai->sai_flags = 0;
3155 	sai->sai_length = sizeof(struct sctp_adaptation_event);
3156 	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3157 	sai->sai_assoc_id = sctp_get_associd(stcb);
3158 
3159 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3160 	SCTP_BUF_NEXT(m_notify) = NULL;
3161 
3162 	/* append to socket */
3163 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3164 	    0, 0, 0, 0, 0, 0,
3165 	    m_notify);
3166 	if (control == NULL) {
3167 		/* no memory */
3168 		sctp_m_freem(m_notify);
3169 		return;
3170 	}
3171 	control->length = SCTP_BUF_LEN(m_notify);
3172 	control->spec_flags = M_NOTIFICATION;
3173 	/* not that we need this */
3174 	control->tail_mbuf = m_notify;
3175 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3176 	    control,
3177 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3178 }
3179 
3180 /* This always must be called with the read-queue LOCKED in the INP */
3181 static void
3182 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3183     uint32_t val, int so_locked
3184 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3185     SCTP_UNUSED
3186 #endif
3187 )
3188 {
3189 	struct mbuf *m_notify;
3190 	struct sctp_pdapi_event *pdapi;
3191 	struct sctp_queued_to_read *control;
3192 	struct sockbuf *sb;
3193 
3194 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3195 		/* event not enabled */
3196 		return;
3197 	}
3198 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
3199 		return;
3200 	}
3201 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_DONTWAIT, 1, MT_DATA);
3202 	if (m_notify == NULL)
3203 		/* no space left */
3204 		return;
3205 	SCTP_BUF_LEN(m_notify) = 0;
3206 	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3207 	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3208 	pdapi->pdapi_flags = 0;
3209 	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3210 	pdapi->pdapi_indication = error;
3211 	pdapi->pdapi_stream = (val >> 16);
3212 	pdapi->pdapi_seq = (val & 0x0000ffff);
3213 	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3214 
3215 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3216 	SCTP_BUF_NEXT(m_notify) = NULL;
3217 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3218 	    0, 0, 0, 0, 0, 0,
3219 	    m_notify);
3220 	if (control == NULL) {
3221 		/* no memory */
3222 		sctp_m_freem(m_notify);
3223 		return;
3224 	}
3225 	control->spec_flags = M_NOTIFICATION;
3226 	control->length = SCTP_BUF_LEN(m_notify);
3227 	/* not that we need this */
3228 	control->tail_mbuf = m_notify;
3229 	control->held_length = 0;
3230 	control->length = 0;
3231 	sb = &stcb->sctp_socket->so_rcv;
3232 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3233 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3234 	}
3235 	sctp_sballoc(stcb, sb, m_notify);
3236 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3237 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3238 	}
3239 	atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify));
3240 	control->end_added = 1;
3241 	if (stcb->asoc.control_pdapi)
3242 		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3243 	else {
3244 		/* we really should not see this case */
3245 		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3246 	}
3247 	if (stcb->sctp_ep && stcb->sctp_socket) {
3248 		/* This should always be the case */
3249 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3250 		struct socket *so;
3251 
3252 		so = SCTP_INP_SO(stcb->sctp_ep);
3253 		if (!so_locked) {
3254 			atomic_add_int(&stcb->asoc.refcnt, 1);
3255 			SCTP_TCB_UNLOCK(stcb);
3256 			SCTP_SOCKET_LOCK(so, 1);
3257 			SCTP_TCB_LOCK(stcb);
3258 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
3259 			if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3260 				SCTP_SOCKET_UNLOCK(so, 1);
3261 				return;
3262 			}
3263 		}
3264 #endif
3265 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3266 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3267 		if (!so_locked) {
3268 			SCTP_SOCKET_UNLOCK(so, 1);
3269 		}
3270 #endif
3271 	}
3272 }
3273 
3274 static void
3275 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3276 {
3277 	struct mbuf *m_notify;
3278 	struct sctp_shutdown_event *sse;
3279 	struct sctp_queued_to_read *control;
3280 
3281 	/*
3282 	 * For TCP model AND UDP connected sockets we will send an error up
3283 	 * when an SHUTDOWN completes
3284 	 */
3285 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3286 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3287 		/* mark socket closed for read/write and wakeup! */
3288 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3289 		struct socket *so;
3290 
3291 		so = SCTP_INP_SO(stcb->sctp_ep);
3292 		atomic_add_int(&stcb->asoc.refcnt, 1);
3293 		SCTP_TCB_UNLOCK(stcb);
3294 		SCTP_SOCKET_LOCK(so, 1);
3295 		SCTP_TCB_LOCK(stcb);
3296 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3297 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3298 			SCTP_SOCKET_UNLOCK(so, 1);
3299 			return;
3300 		}
3301 #endif
3302 		socantsendmore(stcb->sctp_socket);
3303 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3304 		SCTP_SOCKET_UNLOCK(so, 1);
3305 #endif
3306 	}
3307 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3308 		/* event not enabled */
3309 		return;
3310 	}
3311 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_DONTWAIT, 1, MT_DATA);
3312 	if (m_notify == NULL)
3313 		/* no space left */
3314 		return;
3315 	sse = mtod(m_notify, struct sctp_shutdown_event *);
3316 	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3317 	sse->sse_flags = 0;
3318 	sse->sse_length = sizeof(struct sctp_shutdown_event);
3319 	sse->sse_assoc_id = sctp_get_associd(stcb);
3320 
3321 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3322 	SCTP_BUF_NEXT(m_notify) = NULL;
3323 
3324 	/* append to socket */
3325 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3326 	    0, 0, 0, 0, 0, 0,
3327 	    m_notify);
3328 	if (control == NULL) {
3329 		/* no memory */
3330 		sctp_m_freem(m_notify);
3331 		return;
3332 	}
3333 	control->spec_flags = M_NOTIFICATION;
3334 	control->length = SCTP_BUF_LEN(m_notify);
3335 	/* not that we need this */
3336 	control->tail_mbuf = m_notify;
3337 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3338 	    control,
3339 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3340 }
3341 
3342 static void
3343 sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3344     int so_locked
3345 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3346     SCTP_UNUSED
3347 #endif
3348 )
3349 {
3350 	struct mbuf *m_notify;
3351 	struct sctp_sender_dry_event *event;
3352 	struct sctp_queued_to_read *control;
3353 
3354 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_DRYEVNT)) {
3355 		/* event not enabled */
3356 		return;
3357 	}
3358 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_DONTWAIT, 1, MT_DATA);
3359 	if (m_notify == NULL) {
3360 		/* no space left */
3361 		return;
3362 	}
3363 	SCTP_BUF_LEN(m_notify) = 0;
3364 	event = mtod(m_notify, struct sctp_sender_dry_event *);
3365 	event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3366 	event->sender_dry_flags = 0;
3367 	event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3368 	event->sender_dry_assoc_id = sctp_get_associd(stcb);
3369 
3370 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3371 	SCTP_BUF_NEXT(m_notify) = NULL;
3372 
3373 	/* append to socket */
3374 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3375 	    0, 0, 0, 0, 0, 0, m_notify);
3376 	if (control == NULL) {
3377 		/* no memory */
3378 		sctp_m_freem(m_notify);
3379 		return;
3380 	}
3381 	control->length = SCTP_BUF_LEN(m_notify);
3382 	control->spec_flags = M_NOTIFICATION;
3383 	/* not that we need this */
3384 	control->tail_mbuf = m_notify;
3385 	sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3386 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3387 }
3388 
3389 
3390 static void
3391 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, int number_entries, int flag)
3392 {
3393 	struct mbuf *m_notify;
3394 	struct sctp_queued_to_read *control;
3395 	struct sctp_stream_reset_event *strreset;
3396 	int len;
3397 
3398 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_STREAM_RESETEVNT)) {
3399 		/* event not enabled */
3400 		return;
3401 	}
3402 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3403 	if (m_notify == NULL)
3404 		/* no space left */
3405 		return;
3406 	SCTP_BUF_LEN(m_notify) = 0;
3407 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3408 	if (len > M_TRAILINGSPACE(m_notify)) {
3409 		/* never enough room */
3410 		sctp_m_freem(m_notify);
3411 		return;
3412 	}
3413 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3414 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3415 	strreset->strreset_flags = SCTP_STRRESET_ADD_STREAM | flag;
3416 	strreset->strreset_length = len;
3417 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3418 	strreset->strreset_list[0] = number_entries;
3419 
3420 	SCTP_BUF_LEN(m_notify) = len;
3421 	SCTP_BUF_NEXT(m_notify) = NULL;
3422 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3423 		/* no space */
3424 		sctp_m_freem(m_notify);
3425 		return;
3426 	}
3427 	/* append to socket */
3428 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3429 	    0, 0, 0, 0, 0, 0,
3430 	    m_notify);
3431 	if (control == NULL) {
3432 		/* no memory */
3433 		sctp_m_freem(m_notify);
3434 		return;
3435 	}
3436 	control->spec_flags = M_NOTIFICATION;
3437 	control->length = SCTP_BUF_LEN(m_notify);
3438 	/* not that we need this */
3439 	control->tail_mbuf = m_notify;
3440 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3441 	    control,
3442 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3443 }
3444 
3445 
3446 static void
3447 sctp_notify_stream_reset(struct sctp_tcb *stcb,
3448     int number_entries, uint16_t * list, int flag)
3449 {
3450 	struct mbuf *m_notify;
3451 	struct sctp_queued_to_read *control;
3452 	struct sctp_stream_reset_event *strreset;
3453 	int len;
3454 
3455 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_STREAM_RESETEVNT)) {
3456 		/* event not enabled */
3457 		return;
3458 	}
3459 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3460 	if (m_notify == NULL)
3461 		/* no space left */
3462 		return;
3463 	SCTP_BUF_LEN(m_notify) = 0;
3464 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3465 	if (len > M_TRAILINGSPACE(m_notify)) {
3466 		/* never enough room */
3467 		sctp_m_freem(m_notify);
3468 		return;
3469 	}
3470 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3471 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3472 	if (number_entries == 0) {
3473 		strreset->strreset_flags = flag | SCTP_STRRESET_ALL_STREAMS;
3474 	} else {
3475 		strreset->strreset_flags = flag | SCTP_STRRESET_STREAM_LIST;
3476 	}
3477 	strreset->strreset_length = len;
3478 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3479 	if (number_entries) {
3480 		int i;
3481 
3482 		for (i = 0; i < number_entries; i++) {
3483 			strreset->strreset_list[i] = ntohs(list[i]);
3484 		}
3485 	}
3486 	SCTP_BUF_LEN(m_notify) = len;
3487 	SCTP_BUF_NEXT(m_notify) = NULL;
3488 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3489 		/* no space */
3490 		sctp_m_freem(m_notify);
3491 		return;
3492 	}
3493 	/* append to socket */
3494 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3495 	    0, 0, 0, 0, 0, 0,
3496 	    m_notify);
3497 	if (control == NULL) {
3498 		/* no memory */
3499 		sctp_m_freem(m_notify);
3500 		return;
3501 	}
3502 	control->spec_flags = M_NOTIFICATION;
3503 	control->length = SCTP_BUF_LEN(m_notify);
3504 	/* not that we need this */
3505 	control->tail_mbuf = m_notify;
3506 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3507 	    control,
3508 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3509 }
3510 
3511 
3512 void
3513 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3514     uint32_t error, void *data, int so_locked
3515 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3516     SCTP_UNUSED
3517 #endif
3518 )
3519 {
3520 	if ((stcb == NULL) ||
3521 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3522 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3523 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3524 		/* If the socket is gone we are out of here */
3525 		return;
3526 	}
3527 	if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
3528 		return;
3529 	}
3530 	if (stcb && ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
3531 	    (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED))) {
3532 		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3533 		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3534 		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3535 			/* Don't report these in front states */
3536 			return;
3537 		}
3538 	}
3539 	switch (notification) {
3540 	case SCTP_NOTIFY_ASSOC_UP:
3541 		if (stcb->asoc.assoc_up_sent == 0) {
3542 			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, so_locked);
3543 			stcb->asoc.assoc_up_sent = 1;
3544 		}
3545 		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3546 			sctp_notify_adaptation_layer(stcb, error);
3547 		}
3548 		if (stcb->asoc.peer_supports_auth == 0) {
3549 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3550 			    NULL, so_locked);
3551 		}
3552 		break;
3553 	case SCTP_NOTIFY_ASSOC_DOWN:
3554 		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, so_locked);
3555 		break;
3556 	case SCTP_NOTIFY_INTERFACE_DOWN:
3557 		{
3558 			struct sctp_nets *net;
3559 
3560 			net = (struct sctp_nets *)data;
3561 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3562 			    (struct sockaddr *)&net->ro._l_addr, error);
3563 			break;
3564 		}
3565 	case SCTP_NOTIFY_INTERFACE_UP:
3566 		{
3567 			struct sctp_nets *net;
3568 
3569 			net = (struct sctp_nets *)data;
3570 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3571 			    (struct sockaddr *)&net->ro._l_addr, error);
3572 			break;
3573 		}
3574 	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3575 		{
3576 			struct sctp_nets *net;
3577 
3578 			net = (struct sctp_nets *)data;
3579 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3580 			    (struct sockaddr *)&net->ro._l_addr, error);
3581 			break;
3582 		}
3583 	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3584 		sctp_notify_send_failed2(stcb, error,
3585 		    (struct sctp_stream_queue_pending *)data, so_locked);
3586 		break;
3587 	case SCTP_NOTIFY_DG_FAIL:
3588 		sctp_notify_send_failed(stcb, error,
3589 		    (struct sctp_tmit_chunk *)data, so_locked);
3590 		break;
3591 	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3592 		{
3593 			uint32_t val;
3594 
3595 			val = *((uint32_t *) data);
3596 
3597 			sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
3598 			break;
3599 		}
3600 	case SCTP_NOTIFY_STRDATA_ERR:
3601 		break;
3602 	case SCTP_NOTIFY_ASSOC_ABORTED:
3603 		if ((stcb) && (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3604 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED))) {
3605 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, NULL, so_locked);
3606 		} else {
3607 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, NULL, so_locked);
3608 		}
3609 		break;
3610 	case SCTP_NOTIFY_PEER_OPENED_STREAM:
3611 		break;
3612 	case SCTP_NOTIFY_STREAM_OPENED_OK:
3613 		break;
3614 	case SCTP_NOTIFY_ASSOC_RESTART:
3615 		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, data, so_locked);
3616 		if (stcb->asoc.peer_supports_auth == 0) {
3617 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3618 			    NULL, so_locked);
3619 		}
3620 		break;
3621 	case SCTP_NOTIFY_HB_RESP:
3622 		break;
3623 	case SCTP_NOTIFY_STR_RESET_INSTREAM_ADD_OK:
3624 		sctp_notify_stream_reset_add(stcb, error, SCTP_STRRESET_INBOUND_STR);
3625 		break;
3626 	case SCTP_NOTIFY_STR_RESET_ADD_OK:
3627 		sctp_notify_stream_reset_add(stcb, error, SCTP_STRRESET_OUTBOUND_STR);
3628 		break;
3629 	case SCTP_NOTIFY_STR_RESET_ADD_FAIL:
3630 		sctp_notify_stream_reset_add(stcb, error, (SCTP_STRRESET_FAILED | SCTP_STRRESET_OUTBOUND_STR));
3631 		break;
3632 
3633 	case SCTP_NOTIFY_STR_RESET_SEND:
3634 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_OUTBOUND_STR);
3635 		break;
3636 	case SCTP_NOTIFY_STR_RESET_RECV:
3637 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_INBOUND_STR);
3638 		break;
3639 	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3640 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_OUTBOUND_STR | SCTP_STRRESET_FAILED));
3641 		break;
3642 	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3643 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_INBOUND_STR | SCTP_STRRESET_FAILED));
3644 		break;
3645 	case SCTP_NOTIFY_ASCONF_ADD_IP:
3646 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3647 		    error);
3648 		break;
3649 	case SCTP_NOTIFY_ASCONF_DELETE_IP:
3650 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3651 		    error);
3652 		break;
3653 	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3654 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3655 		    error);
3656 		break;
3657 	case SCTP_NOTIFY_ASCONF_SUCCESS:
3658 		break;
3659 	case SCTP_NOTIFY_ASCONF_FAILED:
3660 		break;
3661 	case SCTP_NOTIFY_PEER_SHUTDOWN:
3662 		sctp_notify_shutdown_event(stcb);
3663 		break;
3664 	case SCTP_NOTIFY_AUTH_NEW_KEY:
3665 		sctp_notify_authentication(stcb, SCTP_AUTH_NEWKEY, error,
3666 		    (uint16_t) (uintptr_t) data,
3667 		    so_locked);
3668 		break;
3669 	case SCTP_NOTIFY_AUTH_FREE_KEY:
3670 		sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
3671 		    (uint16_t) (uintptr_t) data,
3672 		    so_locked);
3673 		break;
3674 	case SCTP_NOTIFY_NO_PEER_AUTH:
3675 		sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
3676 		    (uint16_t) (uintptr_t) data,
3677 		    so_locked);
3678 		break;
3679 	case SCTP_NOTIFY_SENDER_DRY:
3680 		sctp_notify_sender_dry_event(stcb, so_locked);
3681 		break;
3682 	default:
3683 		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3684 		    __FUNCTION__, notification, notification);
3685 		break;
3686 	}			/* end switch */
3687 }
3688 
3689 void
3690 sctp_report_all_outbound(struct sctp_tcb *stcb, int holds_lock, int so_locked
3691 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3692     SCTP_UNUSED
3693 #endif
3694 )
3695 {
3696 	struct sctp_association *asoc;
3697 	struct sctp_stream_out *outs;
3698 	struct sctp_tmit_chunk *chk;
3699 	struct sctp_stream_queue_pending *sp;
3700 	int i;
3701 
3702 	asoc = &stcb->asoc;
3703 
3704 	if (stcb == NULL) {
3705 		return;
3706 	}
3707 	if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
3708 		/* already being freed */
3709 		return;
3710 	}
3711 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3712 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3713 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3714 		return;
3715 	}
3716 	/* now through all the gunk freeing chunks */
3717 	if (holds_lock == 0) {
3718 		SCTP_TCB_SEND_LOCK(stcb);
3719 	}
3720 	/* sent queue SHOULD be empty */
3721 	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3722 		chk = TAILQ_FIRST(&asoc->sent_queue);
3723 		while (chk) {
3724 			TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3725 			asoc->sent_queue_cnt--;
3726 			if (chk->data != NULL) {
3727 				sctp_free_bufspace(stcb, asoc, chk, 1);
3728 				sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
3729 				    SCTP_NOTIFY_DATAGRAM_SENT, chk, so_locked);
3730 				if (chk->data) {
3731 					sctp_m_freem(chk->data);
3732 					chk->data = NULL;
3733 				}
3734 			}
3735 			sctp_free_a_chunk(stcb, chk);
3736 			/* sa_ignore FREED_MEMORY */
3737 			chk = TAILQ_FIRST(&asoc->sent_queue);
3738 		}
3739 	}
3740 	/* pending send queue SHOULD be empty */
3741 	if (!TAILQ_EMPTY(&asoc->send_queue)) {
3742 		chk = TAILQ_FIRST(&asoc->send_queue);
3743 		while (chk) {
3744 			TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3745 			asoc->send_queue_cnt--;
3746 			if (chk->data != NULL) {
3747 				sctp_free_bufspace(stcb, asoc, chk, 1);
3748 				sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
3749 				    SCTP_NOTIFY_DATAGRAM_UNSENT, chk, so_locked);
3750 				if (chk->data) {
3751 					sctp_m_freem(chk->data);
3752 					chk->data = NULL;
3753 				}
3754 			}
3755 			sctp_free_a_chunk(stcb, chk);
3756 			/* sa_ignore FREED_MEMORY */
3757 			chk = TAILQ_FIRST(&asoc->send_queue);
3758 		}
3759 	}
3760 	for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
3761 		/* For each stream */
3762 		outs = &stcb->asoc.strmout[i];
3763 		/* clean up any sends there */
3764 		stcb->asoc.locked_on_sending = NULL;
3765 		sp = TAILQ_FIRST(&outs->outqueue);
3766 		while (sp) {
3767 			stcb->asoc.stream_queue_cnt--;
3768 			TAILQ_REMOVE(&outs->outqueue, sp, next);
3769 			sctp_free_spbufspace(stcb, asoc, sp);
3770 			if (sp->data) {
3771 				sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
3772 				    SCTP_NOTIFY_DATAGRAM_UNSENT, (void *)sp, so_locked);
3773 				if (sp->data) {
3774 					sctp_m_freem(sp->data);
3775 					sp->data = NULL;
3776 				}
3777 			}
3778 			if (sp->net) {
3779 				sctp_free_remote_addr(sp->net);
3780 				sp->net = NULL;
3781 			}
3782 			/* Free the chunk */
3783 			sctp_free_a_strmoq(stcb, sp);
3784 			/* sa_ignore FREED_MEMORY */
3785 			sp = TAILQ_FIRST(&outs->outqueue);
3786 		}
3787 	}
3788 
3789 	if (holds_lock == 0) {
3790 		SCTP_TCB_SEND_UNLOCK(stcb);
3791 	}
3792 }
3793 
3794 void
3795 sctp_abort_notification(struct sctp_tcb *stcb, int error, int so_locked
3796 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3797     SCTP_UNUSED
3798 #endif
3799 )
3800 {
3801 
3802 	if (stcb == NULL) {
3803 		return;
3804 	}
3805 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3806 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3807 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3808 		return;
3809 	}
3810 	/* Tell them we lost the asoc */
3811 	sctp_report_all_outbound(stcb, 1, so_locked);
3812 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
3813 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3814 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
3815 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
3816 	}
3817 	sctp_ulp_notify(SCTP_NOTIFY_ASSOC_ABORTED, stcb, error, NULL, so_locked);
3818 }
3819 
3820 void
3821 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3822     struct mbuf *m, int iphlen, struct sctphdr *sh, struct mbuf *op_err,
3823     uint32_t vrf_id, uint16_t port)
3824 {
3825 	uint32_t vtag;
3826 
3827 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3828 	struct socket *so;
3829 
3830 #endif
3831 
3832 	vtag = 0;
3833 	if (stcb != NULL) {
3834 		/* We have a TCB to abort, send notification too */
3835 		vtag = stcb->asoc.peer_vtag;
3836 		sctp_abort_notification(stcb, 0, SCTP_SO_NOT_LOCKED);
3837 		/* get the assoc vrf id and table id */
3838 		vrf_id = stcb->asoc.vrf_id;
3839 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3840 	}
3841 	sctp_send_abort(m, iphlen, sh, vtag, op_err, vrf_id, port);
3842 	if (stcb != NULL) {
3843 		/* Ok, now lets free it */
3844 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3845 		so = SCTP_INP_SO(inp);
3846 		atomic_add_int(&stcb->asoc.refcnt, 1);
3847 		SCTP_TCB_UNLOCK(stcb);
3848 		SCTP_SOCKET_LOCK(so, 1);
3849 		SCTP_TCB_LOCK(stcb);
3850 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3851 #endif
3852 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
3853 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3854 		SCTP_SOCKET_UNLOCK(so, 1);
3855 #endif
3856 	}
3857 }
3858 
3859 #ifdef SCTP_ASOCLOG_OF_TSNS
3860 void
3861 sctp_print_out_track_log(struct sctp_tcb *stcb)
3862 {
3863 #ifdef NOSIY_PRINTS
3864 	int i;
3865 
3866 	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
3867 	SCTP_PRINTF("IN bound TSN log-aaa\n");
3868 	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
3869 		SCTP_PRINTF("None rcvd\n");
3870 		goto none_in;
3871 	}
3872 	if (stcb->asoc.tsn_in_wrapped) {
3873 		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
3874 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3875 			    stcb->asoc.in_tsnlog[i].tsn,
3876 			    stcb->asoc.in_tsnlog[i].strm,
3877 			    stcb->asoc.in_tsnlog[i].seq,
3878 			    stcb->asoc.in_tsnlog[i].flgs,
3879 			    stcb->asoc.in_tsnlog[i].sz);
3880 		}
3881 	}
3882 	if (stcb->asoc.tsn_in_at) {
3883 		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
3884 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3885 			    stcb->asoc.in_tsnlog[i].tsn,
3886 			    stcb->asoc.in_tsnlog[i].strm,
3887 			    stcb->asoc.in_tsnlog[i].seq,
3888 			    stcb->asoc.in_tsnlog[i].flgs,
3889 			    stcb->asoc.in_tsnlog[i].sz);
3890 		}
3891 	}
3892 none_in:
3893 	SCTP_PRINTF("OUT bound TSN log-aaa\n");
3894 	if ((stcb->asoc.tsn_out_at == 0) &&
3895 	    (stcb->asoc.tsn_out_wrapped == 0)) {
3896 		SCTP_PRINTF("None sent\n");
3897 	}
3898 	if (stcb->asoc.tsn_out_wrapped) {
3899 		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
3900 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3901 			    stcb->asoc.out_tsnlog[i].tsn,
3902 			    stcb->asoc.out_tsnlog[i].strm,
3903 			    stcb->asoc.out_tsnlog[i].seq,
3904 			    stcb->asoc.out_tsnlog[i].flgs,
3905 			    stcb->asoc.out_tsnlog[i].sz);
3906 		}
3907 	}
3908 	if (stcb->asoc.tsn_out_at) {
3909 		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
3910 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3911 			    stcb->asoc.out_tsnlog[i].tsn,
3912 			    stcb->asoc.out_tsnlog[i].strm,
3913 			    stcb->asoc.out_tsnlog[i].seq,
3914 			    stcb->asoc.out_tsnlog[i].flgs,
3915 			    stcb->asoc.out_tsnlog[i].sz);
3916 		}
3917 	}
3918 #endif
3919 }
3920 
3921 #endif
3922 
3923 void
3924 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3925     int error, struct mbuf *op_err,
3926     int so_locked
3927 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3928     SCTP_UNUSED
3929 #endif
3930 )
3931 {
3932 	uint32_t vtag;
3933 
3934 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3935 	struct socket *so;
3936 
3937 #endif
3938 
3939 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3940 	so = SCTP_INP_SO(inp);
3941 #endif
3942 	if (stcb == NULL) {
3943 		/* Got to have a TCB */
3944 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3945 			if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3946 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3947 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
3948 			}
3949 		}
3950 		return;
3951 	} else {
3952 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3953 	}
3954 	vtag = stcb->asoc.peer_vtag;
3955 	/* notify the ulp */
3956 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0)
3957 		sctp_abort_notification(stcb, error, so_locked);
3958 	/* notify the peer */
3959 #if defined(SCTP_PANIC_ON_ABORT)
3960 	panic("aborting an association");
3961 #endif
3962 	sctp_send_abort_tcb(stcb, op_err, so_locked);
3963 	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3964 	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3965 	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3966 		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3967 	}
3968 	/* now free the asoc */
3969 #ifdef SCTP_ASOCLOG_OF_TSNS
3970 	sctp_print_out_track_log(stcb);
3971 #endif
3972 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3973 	if (!so_locked) {
3974 		atomic_add_int(&stcb->asoc.refcnt, 1);
3975 		SCTP_TCB_UNLOCK(stcb);
3976 		SCTP_SOCKET_LOCK(so, 1);
3977 		SCTP_TCB_LOCK(stcb);
3978 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3979 	}
3980 #endif
3981 	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
3982 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3983 	if (!so_locked) {
3984 		SCTP_SOCKET_UNLOCK(so, 1);
3985 	}
3986 #endif
3987 }
3988 
3989 void
3990 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh,
3991     struct sctp_inpcb *inp, struct mbuf *op_err, uint32_t vrf_id, uint16_t port)
3992 {
3993 	struct sctp_chunkhdr *ch, chunk_buf;
3994 	unsigned int chk_length;
3995 
3996 	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
3997 	/* Generate a TO address for future reference */
3998 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
3999 		if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
4000 			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4001 			    SCTP_CALLED_DIRECTLY_NOCMPSET);
4002 		}
4003 	}
4004 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4005 	    sizeof(*ch), (uint8_t *) & chunk_buf);
4006 	while (ch != NULL) {
4007 		chk_length = ntohs(ch->chunk_length);
4008 		if (chk_length < sizeof(*ch)) {
4009 			/* break to abort land */
4010 			break;
4011 		}
4012 		switch (ch->chunk_type) {
4013 		case SCTP_COOKIE_ECHO:
4014 			/* We hit here only if the assoc is being freed */
4015 			return;
4016 		case SCTP_PACKET_DROPPED:
4017 			/* we don't respond to pkt-dropped */
4018 			return;
4019 		case SCTP_ABORT_ASSOCIATION:
4020 			/* we don't respond with an ABORT to an ABORT */
4021 			return;
4022 		case SCTP_SHUTDOWN_COMPLETE:
4023 			/*
4024 			 * we ignore it since we are not waiting for it and
4025 			 * peer is gone
4026 			 */
4027 			return;
4028 		case SCTP_SHUTDOWN_ACK:
4029 			sctp_send_shutdown_complete2(m, iphlen, sh, vrf_id, port);
4030 			return;
4031 		default:
4032 			break;
4033 		}
4034 		offset += SCTP_SIZE32(chk_length);
4035 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4036 		    sizeof(*ch), (uint8_t *) & chunk_buf);
4037 	}
4038 	sctp_send_abort(m, iphlen, sh, 0, op_err, vrf_id, port);
4039 }
4040 
4041 /*
4042  * check the inbound datagram to make sure there is not an abort inside it,
4043  * if there is return 1, else return 0.
4044  */
4045 int
4046 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill)
4047 {
4048 	struct sctp_chunkhdr *ch;
4049 	struct sctp_init_chunk *init_chk, chunk_buf;
4050 	int offset;
4051 	unsigned int chk_length;
4052 
4053 	offset = iphlen + sizeof(struct sctphdr);
4054 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
4055 	    (uint8_t *) & chunk_buf);
4056 	while (ch != NULL) {
4057 		chk_length = ntohs(ch->chunk_length);
4058 		if (chk_length < sizeof(*ch)) {
4059 			/* packet is probably corrupt */
4060 			break;
4061 		}
4062 		/* we seem to be ok, is it an abort? */
4063 		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
4064 			/* yep, tell them */
4065 			return (1);
4066 		}
4067 		if (ch->chunk_type == SCTP_INITIATION) {
4068 			/* need to update the Vtag */
4069 			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4070 			    offset, sizeof(*init_chk), (uint8_t *) & chunk_buf);
4071 			if (init_chk != NULL) {
4072 				*vtagfill = ntohl(init_chk->init.initiate_tag);
4073 			}
4074 		}
4075 		/* Nope, move to the next chunk */
4076 		offset += SCTP_SIZE32(chk_length);
4077 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4078 		    sizeof(*ch), (uint8_t *) & chunk_buf);
4079 	}
4080 	return (0);
4081 }
4082 
4083 /*
4084  * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4085  * set (i.e. it's 0) so, create this function to compare link local scopes
4086  */
4087 #ifdef INET6
4088 uint32_t
4089 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4090 {
4091 	struct sockaddr_in6 a, b;
4092 
4093 	/* save copies */
4094 	a = *addr1;
4095 	b = *addr2;
4096 
4097 	if (a.sin6_scope_id == 0)
4098 		if (sa6_recoverscope(&a)) {
4099 			/* can't get scope, so can't match */
4100 			return (0);
4101 		}
4102 	if (b.sin6_scope_id == 0)
4103 		if (sa6_recoverscope(&b)) {
4104 			/* can't get scope, so can't match */
4105 			return (0);
4106 		}
4107 	if (a.sin6_scope_id != b.sin6_scope_id)
4108 		return (0);
4109 
4110 	return (1);
4111 }
4112 
4113 /*
4114  * returns a sockaddr_in6 with embedded scope recovered and removed
4115  */
4116 struct sockaddr_in6 *
4117 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4118 {
4119 	/* check and strip embedded scope junk */
4120 	if (addr->sin6_family == AF_INET6) {
4121 		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4122 			if (addr->sin6_scope_id == 0) {
4123 				*store = *addr;
4124 				if (!sa6_recoverscope(store)) {
4125 					/* use the recovered scope */
4126 					addr = store;
4127 				}
4128 			} else {
4129 				/* else, return the original "to" addr */
4130 				in6_clearscope(&addr->sin6_addr);
4131 			}
4132 		}
4133 	}
4134 	return (addr);
4135 }
4136 
4137 #endif
4138 
4139 /*
4140  * are the two addresses the same?  currently a "scopeless" check returns: 1
4141  * if same, 0 if not
4142  */
4143 int
4144 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4145 {
4146 
4147 	/* must be valid */
4148 	if (sa1 == NULL || sa2 == NULL)
4149 		return (0);
4150 
4151 	/* must be the same family */
4152 	if (sa1->sa_family != sa2->sa_family)
4153 		return (0);
4154 
4155 	switch (sa1->sa_family) {
4156 #ifdef INET6
4157 	case AF_INET6:
4158 		{
4159 			/* IPv6 addresses */
4160 			struct sockaddr_in6 *sin6_1, *sin6_2;
4161 
4162 			sin6_1 = (struct sockaddr_in6 *)sa1;
4163 			sin6_2 = (struct sockaddr_in6 *)sa2;
4164 			return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
4165 			    sin6_2));
4166 		}
4167 #endif
4168 	case AF_INET:
4169 		{
4170 			/* IPv4 addresses */
4171 			struct sockaddr_in *sin_1, *sin_2;
4172 
4173 			sin_1 = (struct sockaddr_in *)sa1;
4174 			sin_2 = (struct sockaddr_in *)sa2;
4175 			return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4176 		}
4177 	default:
4178 		/* we don't do these... */
4179 		return (0);
4180 	}
4181 }
4182 
4183 void
4184 sctp_print_address(struct sockaddr *sa)
4185 {
4186 #ifdef INET6
4187 	char ip6buf[INET6_ADDRSTRLEN];
4188 
4189 	ip6buf[0] = 0;
4190 #endif
4191 
4192 	switch (sa->sa_family) {
4193 #ifdef INET6
4194 	case AF_INET6:
4195 		{
4196 			struct sockaddr_in6 *sin6;
4197 
4198 			sin6 = (struct sockaddr_in6 *)sa;
4199 			SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4200 			    ip6_sprintf(ip6buf, &sin6->sin6_addr),
4201 			    ntohs(sin6->sin6_port),
4202 			    sin6->sin6_scope_id);
4203 			break;
4204 		}
4205 #endif
4206 	case AF_INET:
4207 		{
4208 			struct sockaddr_in *sin;
4209 			unsigned char *p;
4210 
4211 			sin = (struct sockaddr_in *)sa;
4212 			p = (unsigned char *)&sin->sin_addr;
4213 			SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4214 			    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4215 			break;
4216 		}
4217 	default:
4218 		SCTP_PRINTF("?\n");
4219 		break;
4220 	}
4221 }
4222 
4223 void
4224 sctp_print_address_pkt(struct ip *iph, struct sctphdr *sh)
4225 {
4226 	switch (iph->ip_v) {
4227 	case IPVERSION:
4228 		{
4229 			struct sockaddr_in lsa, fsa;
4230 
4231 			bzero(&lsa, sizeof(lsa));
4232 			lsa.sin_len = sizeof(lsa);
4233 			lsa.sin_family = AF_INET;
4234 			lsa.sin_addr = iph->ip_src;
4235 			lsa.sin_port = sh->src_port;
4236 			bzero(&fsa, sizeof(fsa));
4237 			fsa.sin_len = sizeof(fsa);
4238 			fsa.sin_family = AF_INET;
4239 			fsa.sin_addr = iph->ip_dst;
4240 			fsa.sin_port = sh->dest_port;
4241 			SCTP_PRINTF("src: ");
4242 			sctp_print_address((struct sockaddr *)&lsa);
4243 			SCTP_PRINTF("dest: ");
4244 			sctp_print_address((struct sockaddr *)&fsa);
4245 			break;
4246 		}
4247 #ifdef INET6
4248 	case IPV6_VERSION >> 4:
4249 		{
4250 			struct ip6_hdr *ip6;
4251 			struct sockaddr_in6 lsa6, fsa6;
4252 
4253 			ip6 = (struct ip6_hdr *)iph;
4254 			bzero(&lsa6, sizeof(lsa6));
4255 			lsa6.sin6_len = sizeof(lsa6);
4256 			lsa6.sin6_family = AF_INET6;
4257 			lsa6.sin6_addr = ip6->ip6_src;
4258 			lsa6.sin6_port = sh->src_port;
4259 			bzero(&fsa6, sizeof(fsa6));
4260 			fsa6.sin6_len = sizeof(fsa6);
4261 			fsa6.sin6_family = AF_INET6;
4262 			fsa6.sin6_addr = ip6->ip6_dst;
4263 			fsa6.sin6_port = sh->dest_port;
4264 			SCTP_PRINTF("src: ");
4265 			sctp_print_address((struct sockaddr *)&lsa6);
4266 			SCTP_PRINTF("dest: ");
4267 			sctp_print_address((struct sockaddr *)&fsa6);
4268 			break;
4269 		}
4270 #endif
4271 	default:
4272 		/* TSNH */
4273 		break;
4274 	}
4275 }
4276 
4277 void
4278 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4279     struct sctp_inpcb *new_inp,
4280     struct sctp_tcb *stcb,
4281     int waitflags)
4282 {
4283 	/*
4284 	 * go through our old INP and pull off any control structures that
4285 	 * belong to stcb and move then to the new inp.
4286 	 */
4287 	struct socket *old_so, *new_so;
4288 	struct sctp_queued_to_read *control, *nctl;
4289 	struct sctp_readhead tmp_queue;
4290 	struct mbuf *m;
4291 	int error = 0;
4292 
4293 	old_so = old_inp->sctp_socket;
4294 	new_so = new_inp->sctp_socket;
4295 	TAILQ_INIT(&tmp_queue);
4296 	error = sblock(&old_so->so_rcv, waitflags);
4297 	if (error) {
4298 		/*
4299 		 * Gak, can't get sblock, we have a problem. data will be
4300 		 * left stranded.. and we don't dare look at it since the
4301 		 * other thread may be reading something. Oh well, its a
4302 		 * screwed up app that does a peeloff OR a accept while
4303 		 * reading from the main socket... actually its only the
4304 		 * peeloff() case, since I think read will fail on a
4305 		 * listening socket..
4306 		 */
4307 		return;
4308 	}
4309 	/* lock the socket buffers */
4310 	SCTP_INP_READ_LOCK(old_inp);
4311 	control = TAILQ_FIRST(&old_inp->read_queue);
4312 	/* Pull off all for out target stcb */
4313 	while (control) {
4314 		nctl = TAILQ_NEXT(control, next);
4315 		if (control->stcb == stcb) {
4316 			/* remove it we want it */
4317 			TAILQ_REMOVE(&old_inp->read_queue, control, next);
4318 			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4319 			m = control->data;
4320 			while (m) {
4321 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4322 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4323 				}
4324 				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4325 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4326 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4327 				}
4328 				m = SCTP_BUF_NEXT(m);
4329 			}
4330 		}
4331 		control = nctl;
4332 	}
4333 	SCTP_INP_READ_UNLOCK(old_inp);
4334 	/* Remove the sb-lock on the old socket */
4335 
4336 	sbunlock(&old_so->so_rcv);
4337 	/* Now we move them over to the new socket buffer */
4338 	control = TAILQ_FIRST(&tmp_queue);
4339 	SCTP_INP_READ_LOCK(new_inp);
4340 	while (control) {
4341 		nctl = TAILQ_NEXT(control, next);
4342 		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4343 		m = control->data;
4344 		while (m) {
4345 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4346 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4347 			}
4348 			sctp_sballoc(stcb, &new_so->so_rcv, m);
4349 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4350 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4351 			}
4352 			m = SCTP_BUF_NEXT(m);
4353 		}
4354 		control = nctl;
4355 	}
4356 	SCTP_INP_READ_UNLOCK(new_inp);
4357 }
4358 
4359 void
4360 sctp_add_to_readq(struct sctp_inpcb *inp,
4361     struct sctp_tcb *stcb,
4362     struct sctp_queued_to_read *control,
4363     struct sockbuf *sb,
4364     int end,
4365     int inp_read_lock_held,
4366     int so_locked
4367 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4368     SCTP_UNUSED
4369 #endif
4370 )
4371 {
4372 	/*
4373 	 * Here we must place the control on the end of the socket read
4374 	 * queue AND increment sb_cc so that select will work properly on
4375 	 * read.
4376 	 */
4377 	struct mbuf *m, *prev = NULL;
4378 
4379 	if (inp == NULL) {
4380 		/* Gak, TSNH!! */
4381 #ifdef INVARIANTS
4382 		panic("Gak, inp NULL on add_to_readq");
4383 #endif
4384 		return;
4385 	}
4386 	if (inp_read_lock_held == 0)
4387 		SCTP_INP_READ_LOCK(inp);
4388 	if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
4389 		sctp_free_remote_addr(control->whoFrom);
4390 		if (control->data) {
4391 			sctp_m_freem(control->data);
4392 			control->data = NULL;
4393 		}
4394 		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control);
4395 		if (inp_read_lock_held == 0)
4396 			SCTP_INP_READ_UNLOCK(inp);
4397 		return;
4398 	}
4399 	if (!(control->spec_flags & M_NOTIFICATION)) {
4400 		atomic_add_int(&inp->total_recvs, 1);
4401 		if (!control->do_not_ref_stcb) {
4402 			atomic_add_int(&stcb->total_recvs, 1);
4403 		}
4404 	}
4405 	m = control->data;
4406 	control->held_length = 0;
4407 	control->length = 0;
4408 	while (m) {
4409 		if (SCTP_BUF_LEN(m) == 0) {
4410 			/* Skip mbufs with NO length */
4411 			if (prev == NULL) {
4412 				/* First one */
4413 				control->data = sctp_m_free(m);
4414 				m = control->data;
4415 			} else {
4416 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4417 				m = SCTP_BUF_NEXT(prev);
4418 			}
4419 			if (m == NULL) {
4420 				control->tail_mbuf = prev;
4421 			}
4422 			continue;
4423 		}
4424 		prev = m;
4425 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4426 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4427 		}
4428 		sctp_sballoc(stcb, sb, m);
4429 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4430 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4431 		}
4432 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4433 		m = SCTP_BUF_NEXT(m);
4434 	}
4435 	if (prev != NULL) {
4436 		control->tail_mbuf = prev;
4437 	} else {
4438 		/* Everything got collapsed out?? */
4439 		sctp_free_remote_addr(control->whoFrom);
4440 		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control);
4441 		if (inp_read_lock_held == 0)
4442 			SCTP_INP_READ_UNLOCK(inp);
4443 		return;
4444 	}
4445 	if (end) {
4446 		control->end_added = 1;
4447 	}
4448 	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4449 	if (inp_read_lock_held == 0)
4450 		SCTP_INP_READ_UNLOCK(inp);
4451 	if (inp && inp->sctp_socket) {
4452 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4453 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4454 		} else {
4455 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4456 			struct socket *so;
4457 
4458 			so = SCTP_INP_SO(inp);
4459 			if (!so_locked) {
4460 				atomic_add_int(&stcb->asoc.refcnt, 1);
4461 				SCTP_TCB_UNLOCK(stcb);
4462 				SCTP_SOCKET_LOCK(so, 1);
4463 				SCTP_TCB_LOCK(stcb);
4464 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
4465 				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4466 					SCTP_SOCKET_UNLOCK(so, 1);
4467 					return;
4468 				}
4469 			}
4470 #endif
4471 			sctp_sorwakeup(inp, inp->sctp_socket);
4472 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4473 			if (!so_locked) {
4474 				SCTP_SOCKET_UNLOCK(so, 1);
4475 			}
4476 #endif
4477 		}
4478 	}
4479 }
4480 
4481 
4482 int
4483 sctp_append_to_readq(struct sctp_inpcb *inp,
4484     struct sctp_tcb *stcb,
4485     struct sctp_queued_to_read *control,
4486     struct mbuf *m,
4487     int end,
4488     int ctls_cumack,
4489     struct sockbuf *sb)
4490 {
4491 	/*
4492 	 * A partial delivery API event is underway. OR we are appending on
4493 	 * the reassembly queue.
4494 	 *
4495 	 * If PDAPI this means we need to add m to the end of the data.
4496 	 * Increase the length in the control AND increment the sb_cc.
4497 	 * Otherwise sb is NULL and all we need to do is put it at the end
4498 	 * of the mbuf chain.
4499 	 */
4500 	int len = 0;
4501 	struct mbuf *mm, *tail = NULL, *prev = NULL;
4502 
4503 	if (inp) {
4504 		SCTP_INP_READ_LOCK(inp);
4505 	}
4506 	if (control == NULL) {
4507 get_out:
4508 		if (inp) {
4509 			SCTP_INP_READ_UNLOCK(inp);
4510 		}
4511 		return (-1);
4512 	}
4513 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ)) {
4514 		SCTP_INP_READ_UNLOCK(inp);
4515 		return 0;
4516 	}
4517 	if (control->end_added) {
4518 		/* huh this one is complete? */
4519 		goto get_out;
4520 	}
4521 	mm = m;
4522 	if (mm == NULL) {
4523 		goto get_out;
4524 	}
4525 	while (mm) {
4526 		if (SCTP_BUF_LEN(mm) == 0) {
4527 			/* Skip mbufs with NO lenght */
4528 			if (prev == NULL) {
4529 				/* First one */
4530 				m = sctp_m_free(mm);
4531 				mm = m;
4532 			} else {
4533 				SCTP_BUF_NEXT(prev) = sctp_m_free(mm);
4534 				mm = SCTP_BUF_NEXT(prev);
4535 			}
4536 			continue;
4537 		}
4538 		prev = mm;
4539 		len += SCTP_BUF_LEN(mm);
4540 		if (sb) {
4541 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4542 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(mm));
4543 			}
4544 			sctp_sballoc(stcb, sb, mm);
4545 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4546 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4547 			}
4548 		}
4549 		mm = SCTP_BUF_NEXT(mm);
4550 	}
4551 	if (prev) {
4552 		tail = prev;
4553 	} else {
4554 		/* Really there should always be a prev */
4555 		if (m == NULL) {
4556 			/* Huh nothing left? */
4557 #ifdef INVARIANTS
4558 			panic("Nothing left to add?");
4559 #else
4560 			goto get_out;
4561 #endif
4562 		}
4563 		tail = m;
4564 	}
4565 	if (control->tail_mbuf) {
4566 		/* append */
4567 		SCTP_BUF_NEXT(control->tail_mbuf) = m;
4568 		control->tail_mbuf = tail;
4569 	} else {
4570 		/* nothing there */
4571 #ifdef INVARIANTS
4572 		if (control->data != NULL) {
4573 			panic("This should NOT happen");
4574 		}
4575 #endif
4576 		control->data = m;
4577 		control->tail_mbuf = tail;
4578 	}
4579 	atomic_add_int(&control->length, len);
4580 	if (end) {
4581 		/* message is complete */
4582 		if (stcb && (control == stcb->asoc.control_pdapi)) {
4583 			stcb->asoc.control_pdapi = NULL;
4584 		}
4585 		control->held_length = 0;
4586 		control->end_added = 1;
4587 	}
4588 	if (stcb == NULL) {
4589 		control->do_not_ref_stcb = 1;
4590 	}
4591 	/*
4592 	 * When we are appending in partial delivery, the cum-ack is used
4593 	 * for the actual pd-api highest tsn on this mbuf. The true cum-ack
4594 	 * is populated in the outbound sinfo structure from the true cumack
4595 	 * if the association exists...
4596 	 */
4597 	control->sinfo_tsn = control->sinfo_cumtsn = ctls_cumack;
4598 	if (inp) {
4599 		SCTP_INP_READ_UNLOCK(inp);
4600 	}
4601 	if (inp && inp->sctp_socket) {
4602 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4603 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4604 		} else {
4605 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4606 			struct socket *so;
4607 
4608 			so = SCTP_INP_SO(inp);
4609 			atomic_add_int(&stcb->asoc.refcnt, 1);
4610 			SCTP_TCB_UNLOCK(stcb);
4611 			SCTP_SOCKET_LOCK(so, 1);
4612 			SCTP_TCB_LOCK(stcb);
4613 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4614 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4615 				SCTP_SOCKET_UNLOCK(so, 1);
4616 				return (0);
4617 			}
4618 #endif
4619 			sctp_sorwakeup(inp, inp->sctp_socket);
4620 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4621 			SCTP_SOCKET_UNLOCK(so, 1);
4622 #endif
4623 		}
4624 	}
4625 	return (0);
4626 }
4627 
4628 
4629 
4630 /*************HOLD THIS COMMENT FOR PATCH FILE OF
4631  *************ALTERNATE ROUTING CODE
4632  */
4633 
4634 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4635  *************ALTERNATE ROUTING CODE
4636  */
4637 
4638 struct mbuf *
4639 sctp_generate_invmanparam(int err)
4640 {
4641 	/* Return a MBUF with a invalid mandatory parameter */
4642 	struct mbuf *m;
4643 
4644 	m = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_DONTWAIT, 1, MT_DATA);
4645 	if (m) {
4646 		struct sctp_paramhdr *ph;
4647 
4648 		SCTP_BUF_LEN(m) = sizeof(struct sctp_paramhdr);
4649 		ph = mtod(m, struct sctp_paramhdr *);
4650 		ph->param_length = htons(sizeof(struct sctp_paramhdr));
4651 		ph->param_type = htons(err);
4652 	}
4653 	return (m);
4654 }
4655 
4656 #ifdef SCTP_MBCNT_LOGGING
4657 void
4658 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
4659     struct sctp_tmit_chunk *tp1, int chk_cnt)
4660 {
4661 	if (tp1->data == NULL) {
4662 		return;
4663 	}
4664 	asoc->chunks_on_out_queue -= chk_cnt;
4665 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
4666 		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
4667 		    asoc->total_output_queue_size,
4668 		    tp1->book_size,
4669 		    0,
4670 		    tp1->mbcnt);
4671 	}
4672 	if (asoc->total_output_queue_size >= tp1->book_size) {
4673 		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
4674 	} else {
4675 		asoc->total_output_queue_size = 0;
4676 	}
4677 
4678 	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
4679 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
4680 		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
4681 			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
4682 		} else {
4683 			stcb->sctp_socket->so_snd.sb_cc = 0;
4684 
4685 		}
4686 	}
4687 }
4688 
4689 #endif
4690 
4691 int
4692 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
4693     int reason, int so_locked
4694 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4695     SCTP_UNUSED
4696 #endif
4697 )
4698 {
4699 	struct sctp_stream_out *strq;
4700 	struct sctp_tmit_chunk *chk = NULL;
4701 	struct sctp_stream_queue_pending *sp;
4702 	uint16_t stream = 0, seq = 0;
4703 	uint8_t foundeom = 0;
4704 	int ret_sz = 0;
4705 	int notdone;
4706 	int do_wakeup_routine = 0;
4707 
4708 	stream = tp1->rec.data.stream_number;
4709 	seq = tp1->rec.data.stream_seq;
4710 	do {
4711 		ret_sz += tp1->book_size;
4712 		if (tp1->data != NULL) {
4713 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4714 				sctp_flight_size_decrease(tp1);
4715 				sctp_total_flight_decrease(stcb, tp1);
4716 			}
4717 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4718 			stcb->asoc.peers_rwnd += tp1->send_size;
4719 			stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
4720 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, reason, tp1, so_locked);
4721 			if (tp1->data) {
4722 				sctp_m_freem(tp1->data);
4723 				tp1->data = NULL;
4724 			}
4725 			do_wakeup_routine = 1;
4726 			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4727 				stcb->asoc.sent_queue_cnt_removeable--;
4728 			}
4729 		}
4730 		tp1->sent = SCTP_FORWARD_TSN_SKIP;
4731 		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
4732 		    SCTP_DATA_NOT_FRAG) {
4733 			/* not frag'ed we ae done   */
4734 			notdone = 0;
4735 			foundeom = 1;
4736 		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4737 			/* end of frag, we are done */
4738 			notdone = 0;
4739 			foundeom = 1;
4740 		} else {
4741 			/*
4742 			 * Its a begin or middle piece, we must mark all of
4743 			 * it
4744 			 */
4745 			notdone = 1;
4746 			tp1 = TAILQ_NEXT(tp1, sctp_next);
4747 		}
4748 	} while (tp1 && notdone);
4749 	if (foundeom == 0) {
4750 		/*
4751 		 * The multi-part message was scattered across the send and
4752 		 * sent queue.
4753 		 */
4754 next_on_sent:
4755 		tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
4756 		/*
4757 		 * recurse throught the send_queue too, starting at the
4758 		 * beginning.
4759 		 */
4760 		if ((tp1) &&
4761 		    (tp1->rec.data.stream_number == stream) &&
4762 		    (tp1->rec.data.stream_seq == seq)) {
4763 			/*
4764 			 * save to chk in case we have some on stream out
4765 			 * queue. If so and we have an un-transmitted one we
4766 			 * don't have to fudge the TSN.
4767 			 */
4768 			chk = tp1;
4769 			ret_sz += tp1->book_size;
4770 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4771 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, reason, tp1, so_locked);
4772 			if (tp1->data) {
4773 				sctp_m_freem(tp1->data);
4774 				tp1->data = NULL;
4775 			}
4776 			/* No flight involved here book the size to 0 */
4777 			tp1->book_size = 0;
4778 			if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4779 				foundeom = 1;
4780 			}
4781 			do_wakeup_routine = 1;
4782 			tp1->sent = SCTP_FORWARD_TSN_SKIP;
4783 			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
4784 			/*
4785 			 * on to the sent queue so we can wait for it to be
4786 			 * passed by.
4787 			 */
4788 			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
4789 			    sctp_next);
4790 			stcb->asoc.send_queue_cnt--;
4791 			stcb->asoc.sent_queue_cnt++;
4792 			goto next_on_sent;
4793 		}
4794 	}
4795 	if (foundeom == 0) {
4796 		/*
4797 		 * Still no eom found. That means there is stuff left on the
4798 		 * stream out queue.. yuck.
4799 		 */
4800 		strq = &stcb->asoc.strmout[stream];
4801 		SCTP_TCB_SEND_LOCK(stcb);
4802 		sp = TAILQ_FIRST(&strq->outqueue);
4803 		while (sp->strseq <= seq) {
4804 			/* Check if its our SEQ */
4805 			if (sp->strseq == seq) {
4806 				sp->discard_rest = 1;
4807 				/*
4808 				 * We may need to put a chunk on the queue
4809 				 * that holds the TSN that would have been
4810 				 * sent with the LAST bit.
4811 				 */
4812 				if (chk == NULL) {
4813 					/* Yep, we have to */
4814 					sctp_alloc_a_chunk(stcb, chk);
4815 					if (chk == NULL) {
4816 						/*
4817 						 * we are hosed. All we can
4818 						 * do is nothing.. which
4819 						 * will cause an abort if
4820 						 * the peer is paying
4821 						 * attention.
4822 						 */
4823 						goto oh_well;
4824 					}
4825 					memset(chk, 0, sizeof(*chk));
4826 					chk->rec.data.rcv_flags = SCTP_DATA_LAST_FRAG;
4827 					chk->sent = SCTP_FORWARD_TSN_SKIP;
4828 					chk->asoc = &stcb->asoc;
4829 					chk->rec.data.stream_seq = sp->strseq;
4830 					chk->rec.data.stream_number = sp->stream;
4831 					chk->rec.data.payloadtype = sp->ppid;
4832 					chk->rec.data.context = sp->context;
4833 					chk->flags = sp->act_flags;
4834 					if (sp->net)
4835 						chk->whoTo = sp->net;
4836 					else
4837 						chk->whoTo = stcb->asoc.primary_destination;
4838 					atomic_add_int(&chk->whoTo->ref_count, 1);
4839 					chk->rec.data.TSN_seq = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
4840 					stcb->asoc.pr_sctp_cnt++;
4841 					chk->pr_sctp_on = 1;
4842 					TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
4843 					stcb->asoc.sent_queue_cnt++;
4844 					stcb->asoc.pr_sctp_cnt++;
4845 				} else {
4846 					chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
4847 				}
4848 		oh_well:
4849 				if (sp->data) {
4850 					/*
4851 					 * Pull any data to free up the SB
4852 					 * and allow sender to "add more"
4853 					 * whilc we will throw away :-)
4854 					 */
4855 					sctp_free_spbufspace(stcb, &stcb->asoc,
4856 					    sp);
4857 					ret_sz += sp->length;
4858 					do_wakeup_routine = 1;
4859 					sp->some_taken = 1;
4860 					sctp_m_freem(sp->data);
4861 					sp->length = 0;
4862 					sp->data = NULL;
4863 					sp->tail_mbuf = NULL;
4864 				}
4865 				break;
4866 			} else {
4867 				/* Next one please */
4868 				sp = TAILQ_NEXT(sp, next);
4869 			}
4870 		}		/* End while */
4871 		SCTP_TCB_SEND_UNLOCK(stcb);
4872 	}
4873 	if (do_wakeup_routine) {
4874 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4875 		struct socket *so;
4876 
4877 		so = SCTP_INP_SO(stcb->sctp_ep);
4878 		if (!so_locked) {
4879 			atomic_add_int(&stcb->asoc.refcnt, 1);
4880 			SCTP_TCB_UNLOCK(stcb);
4881 			SCTP_SOCKET_LOCK(so, 1);
4882 			SCTP_TCB_LOCK(stcb);
4883 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4884 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4885 				/* assoc was freed while we were unlocked */
4886 				SCTP_SOCKET_UNLOCK(so, 1);
4887 				return (ret_sz);
4888 			}
4889 		}
4890 #endif
4891 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
4892 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4893 		if (!so_locked) {
4894 			SCTP_SOCKET_UNLOCK(so, 1);
4895 		}
4896 #endif
4897 	}
4898 	return (ret_sz);
4899 }
4900 
4901 /*
4902  * checks to see if the given address, sa, is one that is currently known by
4903  * the kernel note: can't distinguish the same address on multiple interfaces
4904  * and doesn't handle multiple addresses with different zone/scope id's note:
4905  * ifa_ifwithaddr() compares the entire sockaddr struct
4906  */
4907 struct sctp_ifa *
4908 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
4909     int holds_lock)
4910 {
4911 	struct sctp_laddr *laddr;
4912 
4913 	if (holds_lock == 0) {
4914 		SCTP_INP_RLOCK(inp);
4915 	}
4916 	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4917 		if (laddr->ifa == NULL)
4918 			continue;
4919 		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
4920 			continue;
4921 		if (addr->sa_family == AF_INET) {
4922 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4923 			    laddr->ifa->address.sin.sin_addr.s_addr) {
4924 				/* found him. */
4925 				if (holds_lock == 0) {
4926 					SCTP_INP_RUNLOCK(inp);
4927 				}
4928 				return (laddr->ifa);
4929 				break;
4930 			}
4931 		}
4932 #ifdef INET6
4933 		if (addr->sa_family == AF_INET6) {
4934 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4935 			    &laddr->ifa->address.sin6)) {
4936 				/* found him. */
4937 				if (holds_lock == 0) {
4938 					SCTP_INP_RUNLOCK(inp);
4939 				}
4940 				return (laddr->ifa);
4941 				break;
4942 			}
4943 		}
4944 #endif
4945 	}
4946 	if (holds_lock == 0) {
4947 		SCTP_INP_RUNLOCK(inp);
4948 	}
4949 	return (NULL);
4950 }
4951 
4952 uint32_t
4953 sctp_get_ifa_hash_val(struct sockaddr *addr)
4954 {
4955 	if (addr->sa_family == AF_INET) {
4956 		struct sockaddr_in *sin;
4957 
4958 		sin = (struct sockaddr_in *)addr;
4959 		return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
4960 	} else if (addr->sa_family == AF_INET6) {
4961 		struct sockaddr_in6 *sin6;
4962 		uint32_t hash_of_addr;
4963 
4964 		sin6 = (struct sockaddr_in6 *)addr;
4965 		hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
4966 		    sin6->sin6_addr.s6_addr32[1] +
4967 		    sin6->sin6_addr.s6_addr32[2] +
4968 		    sin6->sin6_addr.s6_addr32[3]);
4969 		hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
4970 		return (hash_of_addr);
4971 	}
4972 	return (0);
4973 }
4974 
4975 struct sctp_ifa *
4976 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
4977 {
4978 	struct sctp_ifa *sctp_ifap;
4979 	struct sctp_vrf *vrf;
4980 	struct sctp_ifalist *hash_head;
4981 	uint32_t hash_of_addr;
4982 
4983 	if (holds_lock == 0)
4984 		SCTP_IPI_ADDR_RLOCK();
4985 
4986 	vrf = sctp_find_vrf(vrf_id);
4987 	if (vrf == NULL) {
4988 stage_right:
4989 		if (holds_lock == 0)
4990 			SCTP_IPI_ADDR_RUNLOCK();
4991 		return (NULL);
4992 	}
4993 	hash_of_addr = sctp_get_ifa_hash_val(addr);
4994 
4995 	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
4996 	if (hash_head == NULL) {
4997 		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
4998 		    hash_of_addr, (uint32_t) vrf->vrf_addr_hashmark,
4999 		    (uint32_t) (hash_of_addr & vrf->vrf_addr_hashmark));
5000 		sctp_print_address(addr);
5001 		SCTP_PRINTF("No such bucket for address\n");
5002 		if (holds_lock == 0)
5003 			SCTP_IPI_ADDR_RUNLOCK();
5004 
5005 		return (NULL);
5006 	}
5007 	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
5008 		if (sctp_ifap == NULL) {
5009 #ifdef INVARIANTS
5010 			panic("Huh LIST_FOREACH corrupt");
5011 			goto stage_right;
5012 #else
5013 			SCTP_PRINTF("LIST corrupt of sctp_ifap's?\n");
5014 			goto stage_right;
5015 #endif
5016 		}
5017 		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
5018 			continue;
5019 		if (addr->sa_family == AF_INET) {
5020 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5021 			    sctp_ifap->address.sin.sin_addr.s_addr) {
5022 				/* found him. */
5023 				if (holds_lock == 0)
5024 					SCTP_IPI_ADDR_RUNLOCK();
5025 				return (sctp_ifap);
5026 				break;
5027 			}
5028 		}
5029 #ifdef INET6
5030 		if (addr->sa_family == AF_INET6) {
5031 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5032 			    &sctp_ifap->address.sin6)) {
5033 				/* found him. */
5034 				if (holds_lock == 0)
5035 					SCTP_IPI_ADDR_RUNLOCK();
5036 				return (sctp_ifap);
5037 				break;
5038 			}
5039 		}
5040 #endif
5041 	}
5042 	if (holds_lock == 0)
5043 		SCTP_IPI_ADDR_RUNLOCK();
5044 	return (NULL);
5045 }
5046 
5047 static void
5048 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t * freed_so_far, int hold_rlock,
5049     uint32_t rwnd_req)
5050 {
5051 	/* User pulled some data, do we need a rwnd update? */
5052 	int r_unlocked = 0;
5053 	uint32_t dif, rwnd;
5054 	struct socket *so = NULL;
5055 
5056 	if (stcb == NULL)
5057 		return;
5058 
5059 	atomic_add_int(&stcb->asoc.refcnt, 1);
5060 
5061 	if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED |
5062 	    SCTP_STATE_SHUTDOWN_RECEIVED |
5063 	    SCTP_STATE_SHUTDOWN_ACK_SENT)) {
5064 		/* Pre-check If we are freeing no update */
5065 		goto no_lock;
5066 	}
5067 	SCTP_INP_INCR_REF(stcb->sctp_ep);
5068 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5069 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5070 		goto out;
5071 	}
5072 	so = stcb->sctp_socket;
5073 	if (so == NULL) {
5074 		goto out;
5075 	}
5076 	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
5077 	/* Have you have freed enough to look */
5078 	*freed_so_far = 0;
5079 	/* Yep, its worth a look and the lock overhead */
5080 
5081 	/* Figure out what the rwnd would be */
5082 	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
5083 	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
5084 		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
5085 	} else {
5086 		dif = 0;
5087 	}
5088 	if (dif >= rwnd_req) {
5089 		if (hold_rlock) {
5090 			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5091 			r_unlocked = 1;
5092 		}
5093 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5094 			/*
5095 			 * One last check before we allow the guy possibly
5096 			 * to get in. There is a race, where the guy has not
5097 			 * reached the gate. In that case
5098 			 */
5099 			goto out;
5100 		}
5101 		SCTP_TCB_LOCK(stcb);
5102 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5103 			/* No reports here */
5104 			SCTP_TCB_UNLOCK(stcb);
5105 			goto out;
5106 		}
5107 		SCTP_STAT_INCR(sctps_wu_sacks_sent);
5108 		sctp_send_sack(stcb);
5109 
5110 		sctp_chunk_output(stcb->sctp_ep, stcb,
5111 		    SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
5112 		/* make sure no timer is running */
5113 		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
5114 		SCTP_TCB_UNLOCK(stcb);
5115 	} else {
5116 		/* Update how much we have pending */
5117 		stcb->freed_by_sorcv_sincelast = dif;
5118 	}
5119 out:
5120 	if (so && r_unlocked && hold_rlock) {
5121 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5122 	}
5123 	SCTP_INP_DECR_REF(stcb->sctp_ep);
5124 no_lock:
5125 	atomic_add_int(&stcb->asoc.refcnt, -1);
5126 	return;
5127 }
5128 
5129 int
5130 sctp_sorecvmsg(struct socket *so,
5131     struct uio *uio,
5132     struct mbuf **mp,
5133     struct sockaddr *from,
5134     int fromlen,
5135     int *msg_flags,
5136     struct sctp_sndrcvinfo *sinfo,
5137     int filling_sinfo)
5138 {
5139 	/*
5140 	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
5141 	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
5142 	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
5143 	 * On the way out we may send out any combination of:
5144 	 * MSG_NOTIFICATION MSG_EOR
5145 	 *
5146 	 */
5147 	struct sctp_inpcb *inp = NULL;
5148 	int my_len = 0;
5149 	int cp_len = 0, error = 0;
5150 	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
5151 	struct mbuf *m = NULL;
5152 	struct sctp_tcb *stcb = NULL;
5153 	int wakeup_read_socket = 0;
5154 	int freecnt_applied = 0;
5155 	int out_flags = 0, in_flags = 0;
5156 	int block_allowed = 1;
5157 	uint32_t freed_so_far = 0;
5158 	uint32_t copied_so_far = 0;
5159 	int in_eeor_mode = 0;
5160 	int no_rcv_needed = 0;
5161 	uint32_t rwnd_req = 0;
5162 	int hold_sblock = 0;
5163 	int hold_rlock = 0;
5164 	int slen = 0;
5165 	uint32_t held_length = 0;
5166 	int sockbuf_lock = 0;
5167 
5168 	if (uio == NULL) {
5169 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5170 		return (EINVAL);
5171 	}
5172 	if (msg_flags) {
5173 		in_flags = *msg_flags;
5174 		if (in_flags & MSG_PEEK)
5175 			SCTP_STAT_INCR(sctps_read_peeks);
5176 	} else {
5177 		in_flags = 0;
5178 	}
5179 	slen = uio->uio_resid;
5180 
5181 	/* Pull in and set up our int flags */
5182 	if (in_flags & MSG_OOB) {
5183 		/* Out of band's NOT supported */
5184 		return (EOPNOTSUPP);
5185 	}
5186 	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5187 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5188 		return (EINVAL);
5189 	}
5190 	if ((in_flags & (MSG_DONTWAIT
5191 	    | MSG_NBIO
5192 	    )) ||
5193 	    SCTP_SO_IS_NBIO(so)) {
5194 		block_allowed = 0;
5195 	}
5196 	/* setup the endpoint */
5197 	inp = (struct sctp_inpcb *)so->so_pcb;
5198 	if (inp == NULL) {
5199 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5200 		return (EFAULT);
5201 	}
5202 	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5203 	/* Must be at least a MTU's worth */
5204 	if (rwnd_req < SCTP_MIN_RWND)
5205 		rwnd_req = SCTP_MIN_RWND;
5206 	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5207 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5208 		sctp_misc_ints(SCTP_SORECV_ENTER,
5209 		    rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid);
5210 	}
5211 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5212 		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5213 		    rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid);
5214 	}
5215 	error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5216 	sockbuf_lock = 1;
5217 	if (error) {
5218 		goto release_unlocked;
5219 	}
5220 restart:
5221 
5222 
5223 restart_nosblocks:
5224 	if (hold_sblock == 0) {
5225 		SOCKBUF_LOCK(&so->so_rcv);
5226 		hold_sblock = 1;
5227 	}
5228 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5229 	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5230 		goto out;
5231 	}
5232 	if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5233 		if (so->so_error) {
5234 			error = so->so_error;
5235 			if ((in_flags & MSG_PEEK) == 0)
5236 				so->so_error = 0;
5237 			goto out;
5238 		} else {
5239 			if (so->so_rcv.sb_cc == 0) {
5240 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5241 				/* indicate EOF */
5242 				error = 0;
5243 				goto out;
5244 			}
5245 		}
5246 	}
5247 	if ((so->so_rcv.sb_cc <= held_length) && block_allowed) {
5248 		/* we need to wait for data */
5249 		if ((so->so_rcv.sb_cc == 0) &&
5250 		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5251 		    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5252 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5253 				/*
5254 				 * For active open side clear flags for
5255 				 * re-use passive open is blocked by
5256 				 * connect.
5257 				 */
5258 				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5259 					/*
5260 					 * You were aborted, passive side
5261 					 * always hits here
5262 					 */
5263 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5264 					error = ECONNRESET;
5265 					/*
5266 					 * You get this once if you are
5267 					 * active open side
5268 					 */
5269 					if (!(inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5270 						/*
5271 						 * Remove flag if on the
5272 						 * active open side
5273 						 */
5274 						inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_ABORTED;
5275 					}
5276 				}
5277 				so->so_state &= ~(SS_ISCONNECTING |
5278 				    SS_ISDISCONNECTING |
5279 				    SS_ISCONFIRMING |
5280 				    SS_ISCONNECTED);
5281 				if (error == 0) {
5282 					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5283 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5284 						error = ENOTCONN;
5285 					} else {
5286 						inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_CONNECTED;
5287 					}
5288 				}
5289 				goto out;
5290 			}
5291 		}
5292 		error = sbwait(&so->so_rcv);
5293 		if (error) {
5294 			goto out;
5295 		}
5296 		held_length = 0;
5297 		goto restart_nosblocks;
5298 	} else if (so->so_rcv.sb_cc == 0) {
5299 		if (so->so_error) {
5300 			error = so->so_error;
5301 			if ((in_flags & MSG_PEEK) == 0)
5302 				so->so_error = 0;
5303 		} else {
5304 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5305 			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5306 				if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5307 					/*
5308 					 * For active open side clear flags
5309 					 * for re-use passive open is
5310 					 * blocked by connect.
5311 					 */
5312 					if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5313 						/*
5314 						 * You were aborted, passive
5315 						 * side always hits here
5316 						 */
5317 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5318 						error = ECONNRESET;
5319 						/*
5320 						 * You get this once if you
5321 						 * are active open side
5322 						 */
5323 						if (!(inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5324 							/*
5325 							 * Remove flag if on
5326 							 * the active open
5327 							 * side
5328 							 */
5329 							inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_ABORTED;
5330 						}
5331 					}
5332 					so->so_state &= ~(SS_ISCONNECTING |
5333 					    SS_ISDISCONNECTING |
5334 					    SS_ISCONFIRMING |
5335 					    SS_ISCONNECTED);
5336 					if (error == 0) {
5337 						if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5338 							SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5339 							error = ENOTCONN;
5340 						} else {
5341 							inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_CONNECTED;
5342 						}
5343 					}
5344 					goto out;
5345 				}
5346 			}
5347 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5348 			error = EWOULDBLOCK;
5349 		}
5350 		goto out;
5351 	}
5352 	if (hold_sblock == 1) {
5353 		SOCKBUF_UNLOCK(&so->so_rcv);
5354 		hold_sblock = 0;
5355 	}
5356 	/* we possibly have data we can read */
5357 	/* sa_ignore FREED_MEMORY */
5358 	control = TAILQ_FIRST(&inp->read_queue);
5359 	if (control == NULL) {
5360 		/*
5361 		 * This could be happening since the appender did the
5362 		 * increment but as not yet did the tailq insert onto the
5363 		 * read_queue
5364 		 */
5365 		if (hold_rlock == 0) {
5366 			SCTP_INP_READ_LOCK(inp);
5367 			hold_rlock = 1;
5368 		}
5369 		control = TAILQ_FIRST(&inp->read_queue);
5370 		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5371 #ifdef INVARIANTS
5372 			panic("Huh, its non zero and nothing on control?");
5373 #endif
5374 			so->so_rcv.sb_cc = 0;
5375 		}
5376 		SCTP_INP_READ_UNLOCK(inp);
5377 		hold_rlock = 0;
5378 		goto restart;
5379 	}
5380 	if ((control->length == 0) &&
5381 	    (control->do_not_ref_stcb)) {
5382 		/*
5383 		 * Clean up code for freeing assoc that left behind a
5384 		 * pdapi.. maybe a peer in EEOR that just closed after
5385 		 * sending and never indicated a EOR.
5386 		 */
5387 		if (hold_rlock == 0) {
5388 			hold_rlock = 1;
5389 			SCTP_INP_READ_LOCK(inp);
5390 		}
5391 		control->held_length = 0;
5392 		if (control->data) {
5393 			/* Hmm there is data here .. fix */
5394 			struct mbuf *m_tmp;
5395 			int cnt = 0;
5396 
5397 			m_tmp = control->data;
5398 			while (m_tmp) {
5399 				cnt += SCTP_BUF_LEN(m_tmp);
5400 				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5401 					control->tail_mbuf = m_tmp;
5402 					control->end_added = 1;
5403 				}
5404 				m_tmp = SCTP_BUF_NEXT(m_tmp);
5405 			}
5406 			control->length = cnt;
5407 		} else {
5408 			/* remove it */
5409 			TAILQ_REMOVE(&inp->read_queue, control, next);
5410 			/* Add back any hiddend data */
5411 			sctp_free_remote_addr(control->whoFrom);
5412 			sctp_free_a_readq(stcb, control);
5413 		}
5414 		if (hold_rlock) {
5415 			hold_rlock = 0;
5416 			SCTP_INP_READ_UNLOCK(inp);
5417 		}
5418 		goto restart;
5419 	}
5420 	if ((control->length == 0) &&
5421 	    (control->end_added == 1)) {
5422 		/*
5423 		 * Do we also need to check for (control->pdapi_aborted ==
5424 		 * 1)?
5425 		 */
5426 		if (hold_rlock == 0) {
5427 			hold_rlock = 1;
5428 			SCTP_INP_READ_LOCK(inp);
5429 		}
5430 		TAILQ_REMOVE(&inp->read_queue, control, next);
5431 		if (control->data) {
5432 #ifdef INVARIANTS
5433 			panic("control->data not null but control->length == 0");
5434 #else
5435 			SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
5436 			sctp_m_freem(control->data);
5437 			control->data = NULL;
5438 #endif
5439 		}
5440 		if (control->aux_data) {
5441 			sctp_m_free(control->aux_data);
5442 			control->aux_data = NULL;
5443 		}
5444 		sctp_free_remote_addr(control->whoFrom);
5445 		sctp_free_a_readq(stcb, control);
5446 		if (hold_rlock) {
5447 			hold_rlock = 0;
5448 			SCTP_INP_READ_UNLOCK(inp);
5449 		}
5450 		goto restart;
5451 	}
5452 	if (control->length == 0) {
5453 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5454 		    (filling_sinfo)) {
5455 			/* find a more suitable one then this */
5456 			ctl = TAILQ_NEXT(control, next);
5457 			while (ctl) {
5458 				if ((ctl->stcb != control->stcb) && (ctl->length) &&
5459 				    (ctl->some_taken ||
5460 				    (ctl->spec_flags & M_NOTIFICATION) ||
5461 				    ((ctl->do_not_ref_stcb == 0) &&
5462 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5463 				    ) {
5464 					/*-
5465 					 * If we have a different TCB next, and there is data
5466 					 * present. If we have already taken some (pdapi), OR we can
5467 					 * ref the tcb and no delivery as started on this stream, we
5468 					 * take it. Note we allow a notification on a different
5469 					 * assoc to be delivered..
5470 					 */
5471 					control = ctl;
5472 					goto found_one;
5473 				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5474 					    (ctl->length) &&
5475 					    ((ctl->some_taken) ||
5476 					    ((ctl->do_not_ref_stcb == 0) &&
5477 					    ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5478 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) {
5479 					/*-
5480 					 * If we have the same tcb, and there is data present, and we
5481 					 * have the strm interleave feature present. Then if we have
5482 					 * taken some (pdapi) or we can refer to tht tcb AND we have
5483 					 * not started a delivery for this stream, we can take it.
5484 					 * Note we do NOT allow a notificaiton on the same assoc to
5485 					 * be delivered.
5486 					 */
5487 					control = ctl;
5488 					goto found_one;
5489 				}
5490 				ctl = TAILQ_NEXT(ctl, next);
5491 			}
5492 		}
5493 		/*
5494 		 * if we reach here, not suitable replacement is available
5495 		 * <or> fragment interleave is NOT on. So stuff the sb_cc
5496 		 * into the our held count, and its time to sleep again.
5497 		 */
5498 		held_length = so->so_rcv.sb_cc;
5499 		control->held_length = so->so_rcv.sb_cc;
5500 		goto restart;
5501 	}
5502 	/* Clear the held length since there is something to read */
5503 	control->held_length = 0;
5504 	if (hold_rlock) {
5505 		SCTP_INP_READ_UNLOCK(inp);
5506 		hold_rlock = 0;
5507 	}
5508 found_one:
5509 	/*
5510 	 * If we reach here, control has a some data for us to read off.
5511 	 * Note that stcb COULD be NULL.
5512 	 */
5513 	control->some_taken++;
5514 	if (hold_sblock) {
5515 		SOCKBUF_UNLOCK(&so->so_rcv);
5516 		hold_sblock = 0;
5517 	}
5518 	stcb = control->stcb;
5519 	if (stcb) {
5520 		if ((control->do_not_ref_stcb == 0) &&
5521 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5522 			if (freecnt_applied == 0)
5523 				stcb = NULL;
5524 		} else if (control->do_not_ref_stcb == 0) {
5525 			/* you can't free it on me please */
5526 			/*
5527 			 * The lock on the socket buffer protects us so the
5528 			 * free code will stop. But since we used the
5529 			 * socketbuf lock and the sender uses the tcb_lock
5530 			 * to increment, we need to use the atomic add to
5531 			 * the refcnt
5532 			 */
5533 			if (freecnt_applied) {
5534 #ifdef INVARIANTS
5535 				panic("refcnt already incremented");
5536 #else
5537 				printf("refcnt already incremented?\n");
5538 #endif
5539 			} else {
5540 				atomic_add_int(&stcb->asoc.refcnt, 1);
5541 				freecnt_applied = 1;
5542 			}
5543 			/*
5544 			 * Setup to remember how much we have not yet told
5545 			 * the peer our rwnd has opened up. Note we grab the
5546 			 * value from the tcb from last time. Note too that
5547 			 * sack sending clears this when a sack is sent,
5548 			 * which is fine. Once we hit the rwnd_req, we then
5549 			 * will go to the sctp_user_rcvd() that will not
5550 			 * lock until it KNOWs it MUST send a WUP-SACK.
5551 			 */
5552 			freed_so_far = stcb->freed_by_sorcv_sincelast;
5553 			stcb->freed_by_sorcv_sincelast = 0;
5554 		}
5555 	}
5556 	if (stcb &&
5557 	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
5558 	    control->do_not_ref_stcb == 0) {
5559 		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
5560 	}
5561 	/* First lets get off the sinfo and sockaddr info */
5562 	if ((sinfo) && filling_sinfo) {
5563 		memcpy(sinfo, control, sizeof(struct sctp_nonpad_sndrcvinfo));
5564 		nxt = TAILQ_NEXT(control, next);
5565 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
5566 			struct sctp_extrcvinfo *s_extra;
5567 
5568 			s_extra = (struct sctp_extrcvinfo *)sinfo;
5569 			if ((nxt) &&
5570 			    (nxt->length)) {
5571 				s_extra->sreinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
5572 				if (nxt->sinfo_flags & SCTP_UNORDERED) {
5573 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
5574 				}
5575 				if (nxt->spec_flags & M_NOTIFICATION) {
5576 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
5577 				}
5578 				s_extra->sreinfo_next_aid = nxt->sinfo_assoc_id;
5579 				s_extra->sreinfo_next_length = nxt->length;
5580 				s_extra->sreinfo_next_ppid = nxt->sinfo_ppid;
5581 				s_extra->sreinfo_next_stream = nxt->sinfo_stream;
5582 				if (nxt->tail_mbuf != NULL) {
5583 					if (nxt->end_added) {
5584 						s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
5585 					}
5586 				}
5587 			} else {
5588 				/*
5589 				 * we explicitly 0 this, since the memcpy
5590 				 * got some other things beyond the older
5591 				 * sinfo_ that is on the control's structure
5592 				 * :-D
5593 				 */
5594 				nxt = NULL;
5595 				s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
5596 				s_extra->sreinfo_next_aid = 0;
5597 				s_extra->sreinfo_next_length = 0;
5598 				s_extra->sreinfo_next_ppid = 0;
5599 				s_extra->sreinfo_next_stream = 0;
5600 			}
5601 		}
5602 		/*
5603 		 * update off the real current cum-ack, if we have an stcb.
5604 		 */
5605 		if ((control->do_not_ref_stcb == 0) && stcb)
5606 			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
5607 		/*
5608 		 * mask off the high bits, we keep the actual chunk bits in
5609 		 * there.
5610 		 */
5611 		sinfo->sinfo_flags &= 0x00ff;
5612 		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
5613 			sinfo->sinfo_flags |= SCTP_UNORDERED;
5614 		}
5615 	}
5616 #ifdef SCTP_ASOCLOG_OF_TSNS
5617 	{
5618 		int index, newindex;
5619 		struct sctp_pcbtsn_rlog *entry;
5620 
5621 		do {
5622 			index = inp->readlog_index;
5623 			newindex = index + 1;
5624 			if (newindex >= SCTP_READ_LOG_SIZE) {
5625 				newindex = 0;
5626 			}
5627 		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
5628 		entry = &inp->readlog[index];
5629 		entry->vtag = control->sinfo_assoc_id;
5630 		entry->strm = control->sinfo_stream;
5631 		entry->seq = control->sinfo_ssn;
5632 		entry->sz = control->length;
5633 		entry->flgs = control->sinfo_flags;
5634 	}
5635 #endif
5636 	if (fromlen && from) {
5637 		struct sockaddr *to;
5638 
5639 #ifdef INET
5640 		cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sin.sin_len);
5641 		memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
5642 		((struct sockaddr_in *)from)->sin_port = control->port_from;
5643 #else
5644 		/* No AF_INET use AF_INET6 */
5645 		cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sin6.sin6_len);
5646 		memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
5647 		((struct sockaddr_in6 *)from)->sin6_port = control->port_from;
5648 #endif
5649 
5650 		to = from;
5651 #if defined(INET) && defined(INET6)
5652 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) &&
5653 		    (to->sa_family == AF_INET) &&
5654 		    ((size_t)fromlen >= sizeof(struct sockaddr_in6))) {
5655 			struct sockaddr_in *sin;
5656 			struct sockaddr_in6 sin6;
5657 
5658 			sin = (struct sockaddr_in *)to;
5659 			bzero(&sin6, sizeof(sin6));
5660 			sin6.sin6_family = AF_INET6;
5661 			sin6.sin6_len = sizeof(struct sockaddr_in6);
5662 			sin6.sin6_addr.s6_addr32[2] = htonl(0xffff);
5663 			bcopy(&sin->sin_addr,
5664 			    &sin6.sin6_addr.s6_addr32[3],
5665 			    sizeof(sin6.sin6_addr.s6_addr32[3]));
5666 			sin6.sin6_port = sin->sin_port;
5667 			memcpy(from, (caddr_t)&sin6, sizeof(sin6));
5668 		}
5669 #endif
5670 #if defined(INET6)
5671 		{
5672 			struct sockaddr_in6 lsa6, *to6;
5673 
5674 			to6 = (struct sockaddr_in6 *)to;
5675 			sctp_recover_scope_mac(to6, (&lsa6));
5676 		}
5677 #endif
5678 	}
5679 	/* now copy out what data we can */
5680 	if (mp == NULL) {
5681 		/* copy out each mbuf in the chain up to length */
5682 get_more_data:
5683 		m = control->data;
5684 		while (m) {
5685 			/* Move out all we can */
5686 			cp_len = (int)uio->uio_resid;
5687 			my_len = (int)SCTP_BUF_LEN(m);
5688 			if (cp_len > my_len) {
5689 				/* not enough in this buf */
5690 				cp_len = my_len;
5691 			}
5692 			if (hold_rlock) {
5693 				SCTP_INP_READ_UNLOCK(inp);
5694 				hold_rlock = 0;
5695 			}
5696 			if (cp_len > 0)
5697 				error = uiomove(mtod(m, char *), cp_len, uio);
5698 			/* re-read */
5699 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5700 				goto release;
5701 			}
5702 			if ((control->do_not_ref_stcb == 0) && stcb &&
5703 			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5704 				no_rcv_needed = 1;
5705 			}
5706 			if (error) {
5707 				/* error we are out of here */
5708 				goto release;
5709 			}
5710 			if ((SCTP_BUF_NEXT(m) == NULL) &&
5711 			    (cp_len >= SCTP_BUF_LEN(m)) &&
5712 			    ((control->end_added == 0) ||
5713 			    (control->end_added &&
5714 			    (TAILQ_NEXT(control, next) == NULL)))
5715 			    ) {
5716 				SCTP_INP_READ_LOCK(inp);
5717 				hold_rlock = 1;
5718 			}
5719 			if (cp_len == SCTP_BUF_LEN(m)) {
5720 				if ((SCTP_BUF_NEXT(m) == NULL) &&
5721 				    (control->end_added)) {
5722 					out_flags |= MSG_EOR;
5723 					if ((control->do_not_ref_stcb == 0) &&
5724 					    (control->stcb != NULL) &&
5725 					    ((control->spec_flags & M_NOTIFICATION) == 0))
5726 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5727 				}
5728 				if (control->spec_flags & M_NOTIFICATION) {
5729 					out_flags |= MSG_NOTIFICATION;
5730 				}
5731 				/* we ate up the mbuf */
5732 				if (in_flags & MSG_PEEK) {
5733 					/* just looking */
5734 					m = SCTP_BUF_NEXT(m);
5735 					copied_so_far += cp_len;
5736 				} else {
5737 					/* dispose of the mbuf */
5738 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5739 						sctp_sblog(&so->so_rcv,
5740 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5741 					}
5742 					sctp_sbfree(control, stcb, &so->so_rcv, m);
5743 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5744 						sctp_sblog(&so->so_rcv,
5745 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5746 					}
5747 					copied_so_far += cp_len;
5748 					freed_so_far += cp_len;
5749 					freed_so_far += MSIZE;
5750 					atomic_subtract_int(&control->length, cp_len);
5751 					control->data = sctp_m_free(m);
5752 					m = control->data;
5753 					/*
5754 					 * been through it all, must hold sb
5755 					 * lock ok to null tail
5756 					 */
5757 					if (control->data == NULL) {
5758 #ifdef INVARIANTS
5759 						if ((control->end_added == 0) ||
5760 						    (TAILQ_NEXT(control, next) == NULL)) {
5761 							/*
5762 							 * If the end is not
5763 							 * added, OR the
5764 							 * next is NOT null
5765 							 * we MUST have the
5766 							 * lock.
5767 							 */
5768 							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
5769 								panic("Hmm we don't own the lock?");
5770 							}
5771 						}
5772 #endif
5773 						control->tail_mbuf = NULL;
5774 #ifdef INVARIANTS
5775 						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
5776 							panic("end_added, nothing left and no MSG_EOR");
5777 						}
5778 #endif
5779 					}
5780 				}
5781 			} else {
5782 				/* Do we need to trim the mbuf? */
5783 				if (control->spec_flags & M_NOTIFICATION) {
5784 					out_flags |= MSG_NOTIFICATION;
5785 				}
5786 				if ((in_flags & MSG_PEEK) == 0) {
5787 					SCTP_BUF_RESV_UF(m, cp_len);
5788 					SCTP_BUF_LEN(m) -= cp_len;
5789 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5790 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len);
5791 					}
5792 					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
5793 					if ((control->do_not_ref_stcb == 0) &&
5794 					    stcb) {
5795 						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
5796 					}
5797 					copied_so_far += cp_len;
5798 					freed_so_far += cp_len;
5799 					freed_so_far += MSIZE;
5800 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5801 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
5802 						    SCTP_LOG_SBRESULT, 0);
5803 					}
5804 					atomic_subtract_int(&control->length, cp_len);
5805 				} else {
5806 					copied_so_far += cp_len;
5807 				}
5808 			}
5809 			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
5810 				break;
5811 			}
5812 			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5813 			    (control->do_not_ref_stcb == 0) &&
5814 			    (freed_so_far >= rwnd_req)) {
5815 				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5816 			}
5817 		}		/* end while(m) */
5818 		/*
5819 		 * At this point we have looked at it all and we either have
5820 		 * a MSG_EOR/or read all the user wants... <OR>
5821 		 * control->length == 0.
5822 		 */
5823 		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
5824 			/* we are done with this control */
5825 			if (control->length == 0) {
5826 				if (control->data) {
5827 #ifdef INVARIANTS
5828 					panic("control->data not null at read eor?");
5829 #else
5830 					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
5831 					sctp_m_freem(control->data);
5832 					control->data = NULL;
5833 #endif
5834 				}
5835 		done_with_control:
5836 				if (TAILQ_NEXT(control, next) == NULL) {
5837 					/*
5838 					 * If we don't have a next we need a
5839 					 * lock, if there is a next
5840 					 * interrupt is filling ahead of us
5841 					 * and we don't need a lock to
5842 					 * remove this guy (which is the
5843 					 * head of the queue).
5844 					 */
5845 					if (hold_rlock == 0) {
5846 						SCTP_INP_READ_LOCK(inp);
5847 						hold_rlock = 1;
5848 					}
5849 				}
5850 				TAILQ_REMOVE(&inp->read_queue, control, next);
5851 				/* Add back any hiddend data */
5852 				if (control->held_length) {
5853 					held_length = 0;
5854 					control->held_length = 0;
5855 					wakeup_read_socket = 1;
5856 				}
5857 				if (control->aux_data) {
5858 					sctp_m_free(control->aux_data);
5859 					control->aux_data = NULL;
5860 				}
5861 				no_rcv_needed = control->do_not_ref_stcb;
5862 				sctp_free_remote_addr(control->whoFrom);
5863 				control->data = NULL;
5864 				sctp_free_a_readq(stcb, control);
5865 				control = NULL;
5866 				if ((freed_so_far >= rwnd_req) &&
5867 				    (no_rcv_needed == 0))
5868 					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5869 
5870 			} else {
5871 				/*
5872 				 * The user did not read all of this
5873 				 * message, turn off the returned MSG_EOR
5874 				 * since we are leaving more behind on the
5875 				 * control to read.
5876 				 */
5877 #ifdef INVARIANTS
5878 				if (control->end_added &&
5879 				    (control->data == NULL) &&
5880 				    (control->tail_mbuf == NULL)) {
5881 					panic("Gak, control->length is corrupt?");
5882 				}
5883 #endif
5884 				no_rcv_needed = control->do_not_ref_stcb;
5885 				out_flags &= ~MSG_EOR;
5886 			}
5887 		}
5888 		if (out_flags & MSG_EOR) {
5889 			goto release;
5890 		}
5891 		if ((uio->uio_resid == 0) ||
5892 		    ((in_eeor_mode) && (copied_so_far >= max(so->so_rcv.sb_lowat, 1)))
5893 		    ) {
5894 			goto release;
5895 		}
5896 		/*
5897 		 * If I hit here the receiver wants more and this message is
5898 		 * NOT done (pd-api). So two questions. Can we block? if not
5899 		 * we are done. Did the user NOT set MSG_WAITALL?
5900 		 */
5901 		if (block_allowed == 0) {
5902 			goto release;
5903 		}
5904 		/*
5905 		 * We need to wait for more data a few things: - We don't
5906 		 * sbunlock() so we don't get someone else reading. - We
5907 		 * must be sure to account for the case where what is added
5908 		 * is NOT to our control when we wakeup.
5909 		 */
5910 
5911 		/*
5912 		 * Do we need to tell the transport a rwnd update might be
5913 		 * needed before we go to sleep?
5914 		 */
5915 		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5916 		    ((freed_so_far >= rwnd_req) &&
5917 		    (control->do_not_ref_stcb == 0) &&
5918 		    (no_rcv_needed == 0))) {
5919 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5920 		}
5921 wait_some_more:
5922 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5923 			goto release;
5924 		}
5925 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
5926 			goto release;
5927 
5928 		if (hold_rlock == 1) {
5929 			SCTP_INP_READ_UNLOCK(inp);
5930 			hold_rlock = 0;
5931 		}
5932 		if (hold_sblock == 0) {
5933 			SOCKBUF_LOCK(&so->so_rcv);
5934 			hold_sblock = 1;
5935 		}
5936 		if ((copied_so_far) && (control->length == 0) &&
5937 		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) {
5938 			goto release;
5939 		}
5940 		if (so->so_rcv.sb_cc <= control->held_length) {
5941 			error = sbwait(&so->so_rcv);
5942 			if (error) {
5943 				goto release;
5944 			}
5945 			control->held_length = 0;
5946 		}
5947 		if (hold_sblock) {
5948 			SOCKBUF_UNLOCK(&so->so_rcv);
5949 			hold_sblock = 0;
5950 		}
5951 		if (control->length == 0) {
5952 			/* still nothing here */
5953 			if (control->end_added == 1) {
5954 				/* he aborted, or is done i.e.did a shutdown */
5955 				out_flags |= MSG_EOR;
5956 				if (control->pdapi_aborted) {
5957 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5958 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5959 
5960 					out_flags |= MSG_TRUNC;
5961 				} else {
5962 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5963 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5964 				}
5965 				goto done_with_control;
5966 			}
5967 			if (so->so_rcv.sb_cc > held_length) {
5968 				control->held_length = so->so_rcv.sb_cc;
5969 				held_length = 0;
5970 			}
5971 			goto wait_some_more;
5972 		} else if (control->data == NULL) {
5973 			/*
5974 			 * we must re-sync since data is probably being
5975 			 * added
5976 			 */
5977 			SCTP_INP_READ_LOCK(inp);
5978 			if ((control->length > 0) && (control->data == NULL)) {
5979 				/*
5980 				 * big trouble.. we have the lock and its
5981 				 * corrupt?
5982 				 */
5983 #ifdef INVARIANTS
5984 				panic("Impossible data==NULL length !=0");
5985 #endif
5986 				out_flags |= MSG_EOR;
5987 				out_flags |= MSG_TRUNC;
5988 				control->length = 0;
5989 				SCTP_INP_READ_UNLOCK(inp);
5990 				goto done_with_control;
5991 			}
5992 			SCTP_INP_READ_UNLOCK(inp);
5993 			/* We will fall around to get more data */
5994 		}
5995 		goto get_more_data;
5996 	} else {
5997 		/*-
5998 		 * Give caller back the mbuf chain,
5999 		 * store in uio_resid the length
6000 		 */
6001 		wakeup_read_socket = 0;
6002 		if ((control->end_added == 0) ||
6003 		    (TAILQ_NEXT(control, next) == NULL)) {
6004 			/* Need to get rlock */
6005 			if (hold_rlock == 0) {
6006 				SCTP_INP_READ_LOCK(inp);
6007 				hold_rlock = 1;
6008 			}
6009 		}
6010 		if (control->end_added) {
6011 			out_flags |= MSG_EOR;
6012 			if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
6013 				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6014 		}
6015 		if (control->spec_flags & M_NOTIFICATION) {
6016 			out_flags |= MSG_NOTIFICATION;
6017 		}
6018 		uio->uio_resid = control->length;
6019 		*mp = control->data;
6020 		m = control->data;
6021 		while (m) {
6022 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6023 				sctp_sblog(&so->so_rcv,
6024 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
6025 			}
6026 			sctp_sbfree(control, stcb, &so->so_rcv, m);
6027 			freed_so_far += SCTP_BUF_LEN(m);
6028 			freed_so_far += MSIZE;
6029 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6030 				sctp_sblog(&so->so_rcv,
6031 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
6032 			}
6033 			m = SCTP_BUF_NEXT(m);
6034 		}
6035 		control->data = control->tail_mbuf = NULL;
6036 		control->length = 0;
6037 		if (out_flags & MSG_EOR) {
6038 			/* Done with this control */
6039 			goto done_with_control;
6040 		}
6041 	}
6042 release:
6043 	if (hold_rlock == 1) {
6044 		SCTP_INP_READ_UNLOCK(inp);
6045 		hold_rlock = 0;
6046 	}
6047 	if (hold_sblock == 1) {
6048 		SOCKBUF_UNLOCK(&so->so_rcv);
6049 		hold_sblock = 0;
6050 	}
6051 	sbunlock(&so->so_rcv);
6052 	sockbuf_lock = 0;
6053 
6054 release_unlocked:
6055 	if (hold_sblock) {
6056 		SOCKBUF_UNLOCK(&so->so_rcv);
6057 		hold_sblock = 0;
6058 	}
6059 	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
6060 		if ((freed_so_far >= rwnd_req) &&
6061 		    (control && (control->do_not_ref_stcb == 0)) &&
6062 		    (no_rcv_needed == 0))
6063 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6064 	}
6065 out:
6066 	if (msg_flags) {
6067 		*msg_flags = out_flags;
6068 	}
6069 	if (((out_flags & MSG_EOR) == 0) &&
6070 	    ((in_flags & MSG_PEEK) == 0) &&
6071 	    (sinfo) &&
6072 	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO))) {
6073 		struct sctp_extrcvinfo *s_extra;
6074 
6075 		s_extra = (struct sctp_extrcvinfo *)sinfo;
6076 		s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
6077 	}
6078 	if (hold_rlock == 1) {
6079 		SCTP_INP_READ_UNLOCK(inp);
6080 		hold_rlock = 0;
6081 	}
6082 	if (hold_sblock) {
6083 		SOCKBUF_UNLOCK(&so->so_rcv);
6084 		hold_sblock = 0;
6085 	}
6086 	if (sockbuf_lock) {
6087 		sbunlock(&so->so_rcv);
6088 	}
6089 	if (freecnt_applied) {
6090 		/*
6091 		 * The lock on the socket buffer protects us so the free
6092 		 * code will stop. But since we used the socketbuf lock and
6093 		 * the sender uses the tcb_lock to increment, we need to use
6094 		 * the atomic add to the refcnt.
6095 		 */
6096 		if (stcb == NULL) {
6097 #ifdef INVARIANTS
6098 			panic("stcb for refcnt has gone NULL?");
6099 			goto stage_left;
6100 #else
6101 			goto stage_left;
6102 #endif
6103 		}
6104 		atomic_add_int(&stcb->asoc.refcnt, -1);
6105 		freecnt_applied = 0;
6106 		/* Save the value back for next time */
6107 		stcb->freed_by_sorcv_sincelast = freed_so_far;
6108 	}
6109 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
6110 		if (stcb) {
6111 			sctp_misc_ints(SCTP_SORECV_DONE,
6112 			    freed_so_far,
6113 			    ((uio) ? (slen - uio->uio_resid) : slen),
6114 			    stcb->asoc.my_rwnd,
6115 			    so->so_rcv.sb_cc);
6116 		} else {
6117 			sctp_misc_ints(SCTP_SORECV_DONE,
6118 			    freed_so_far,
6119 			    ((uio) ? (slen - uio->uio_resid) : slen),
6120 			    0,
6121 			    so->so_rcv.sb_cc);
6122 		}
6123 	}
6124 stage_left:
6125 	if (wakeup_read_socket) {
6126 		sctp_sorwakeup(inp, so);
6127 	}
6128 	return (error);
6129 }
6130 
6131 
6132 #ifdef SCTP_MBUF_LOGGING
6133 struct mbuf *
6134 sctp_m_free(struct mbuf *m)
6135 {
6136 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6137 		if (SCTP_BUF_IS_EXTENDED(m)) {
6138 			sctp_log_mb(m, SCTP_MBUF_IFREE);
6139 		}
6140 	}
6141 	return (m_free(m));
6142 }
6143 
6144 void
6145 sctp_m_freem(struct mbuf *mb)
6146 {
6147 	while (mb != NULL)
6148 		mb = sctp_m_free(mb);
6149 }
6150 
6151 #endif
6152 
6153 int
6154 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
6155 {
6156 	/*
6157 	 * Given a local address. For all associations that holds the
6158 	 * address, request a peer-set-primary.
6159 	 */
6160 	struct sctp_ifa *ifa;
6161 	struct sctp_laddr *wi;
6162 
6163 	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
6164 	if (ifa == NULL) {
6165 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
6166 		return (EADDRNOTAVAIL);
6167 	}
6168 	/*
6169 	 * Now that we have the ifa we must awaken the iterator with this
6170 	 * message.
6171 	 */
6172 	wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
6173 	if (wi == NULL) {
6174 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
6175 		return (ENOMEM);
6176 	}
6177 	/* Now incr the count and int wi structure */
6178 	SCTP_INCR_LADDR_COUNT();
6179 	bzero(wi, sizeof(*wi));
6180 	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
6181 	wi->ifa = ifa;
6182 	wi->action = SCTP_SET_PRIM_ADDR;
6183 	atomic_add_int(&ifa->refcount, 1);
6184 
6185 	/* Now add it to the work queue */
6186 	SCTP_WQ_ADDR_LOCK();
6187 	/*
6188 	 * Should this really be a tailq? As it is we will process the
6189 	 * newest first :-0
6190 	 */
6191 	LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
6192 	SCTP_WQ_ADDR_UNLOCK();
6193 	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
6194 	    (struct sctp_inpcb *)NULL,
6195 	    (struct sctp_tcb *)NULL,
6196 	    (struct sctp_nets *)NULL);
6197 	return (0);
6198 }
6199 
6200 
6201 int
6202 sctp_soreceive(struct socket *so,
6203     struct sockaddr **psa,
6204     struct uio *uio,
6205     struct mbuf **mp0,
6206     struct mbuf **controlp,
6207     int *flagsp)
6208 {
6209 	int error, fromlen;
6210 	uint8_t sockbuf[256];
6211 	struct sockaddr *from;
6212 	struct sctp_extrcvinfo sinfo;
6213 	int filling_sinfo = 1;
6214 	struct sctp_inpcb *inp;
6215 
6216 	inp = (struct sctp_inpcb *)so->so_pcb;
6217 	/* pickup the assoc we are reading from */
6218 	if (inp == NULL) {
6219 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6220 		return (EINVAL);
6221 	}
6222 	if ((sctp_is_feature_off(inp,
6223 	    SCTP_PCB_FLAGS_RECVDATAIOEVNT)) ||
6224 	    (controlp == NULL)) {
6225 		/* user does not want the sndrcv ctl */
6226 		filling_sinfo = 0;
6227 	}
6228 	if (psa) {
6229 		from = (struct sockaddr *)sockbuf;
6230 		fromlen = sizeof(sockbuf);
6231 		from->sa_len = 0;
6232 	} else {
6233 		from = NULL;
6234 		fromlen = 0;
6235 	}
6236 
6237 	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp,
6238 	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6239 	if ((controlp) && (filling_sinfo)) {
6240 		/* copy back the sinfo in a CMSG format */
6241 		if (filling_sinfo)
6242 			*controlp = sctp_build_ctl_nchunk(inp,
6243 			    (struct sctp_sndrcvinfo *)&sinfo);
6244 		else
6245 			*controlp = NULL;
6246 	}
6247 	if (psa) {
6248 		/* copy back the address info */
6249 		if (from && from->sa_len) {
6250 			*psa = sodupsockaddr(from, M_NOWAIT);
6251 		} else {
6252 			*psa = NULL;
6253 		}
6254 	}
6255 	return (error);
6256 }
6257 
6258 
6259 int
6260 sctp_l_soreceive(struct socket *so,
6261     struct sockaddr **name,
6262     struct uio *uio,
6263     char **controlp,
6264     int *controllen,
6265     int *flag)
6266 {
6267 	int error, fromlen;
6268 	uint8_t sockbuf[256];
6269 	struct sockaddr *from;
6270 	struct sctp_extrcvinfo sinfo;
6271 	int filling_sinfo = 1;
6272 	struct sctp_inpcb *inp;
6273 
6274 	inp = (struct sctp_inpcb *)so->so_pcb;
6275 	/* pickup the assoc we are reading from */
6276 	if (inp == NULL) {
6277 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6278 		return (EINVAL);
6279 	}
6280 	if ((sctp_is_feature_off(inp,
6281 	    SCTP_PCB_FLAGS_RECVDATAIOEVNT)) ||
6282 	    (controlp == NULL)) {
6283 		/* user does not want the sndrcv ctl */
6284 		filling_sinfo = 0;
6285 	}
6286 	if (name) {
6287 		from = (struct sockaddr *)sockbuf;
6288 		fromlen = sizeof(sockbuf);
6289 		from->sa_len = 0;
6290 	} else {
6291 		from = NULL;
6292 		fromlen = 0;
6293 	}
6294 
6295 	error = sctp_sorecvmsg(so, uio,
6296 	    (struct mbuf **)NULL,
6297 	    from, fromlen, flag,
6298 	    (struct sctp_sndrcvinfo *)&sinfo,
6299 	    filling_sinfo);
6300 	if ((controlp) && (filling_sinfo)) {
6301 		/*
6302 		 * copy back the sinfo in a CMSG format note that the caller
6303 		 * has reponsibility for freeing the memory.
6304 		 */
6305 		if (filling_sinfo)
6306 			*controlp = sctp_build_ctl_cchunk(inp,
6307 			    controllen,
6308 			    (struct sctp_sndrcvinfo *)&sinfo);
6309 	}
6310 	if (name) {
6311 		/* copy back the address info */
6312 		if (from && from->sa_len) {
6313 			*name = sodupsockaddr(from, M_WAIT);
6314 		} else {
6315 			*name = NULL;
6316 		}
6317 	}
6318 	return (error);
6319 }
6320 
6321 
6322 
6323 
6324 
6325 
6326 
6327 int
6328 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6329     int totaddr, int *error)
6330 {
6331 	int added = 0;
6332 	int i;
6333 	struct sctp_inpcb *inp;
6334 	struct sockaddr *sa;
6335 	size_t incr = 0;
6336 
6337 	sa = addr;
6338 	inp = stcb->sctp_ep;
6339 	*error = 0;
6340 	for (i = 0; i < totaddr; i++) {
6341 		if (sa->sa_family == AF_INET) {
6342 			incr = sizeof(struct sockaddr_in);
6343 			if (sctp_add_remote_addr(stcb, sa, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6344 				/* assoc gone no un-lock */
6345 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6346 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_7);
6347 				*error = ENOBUFS;
6348 				goto out_now;
6349 			}
6350 			added++;
6351 		} else if (sa->sa_family == AF_INET6) {
6352 			incr = sizeof(struct sockaddr_in6);
6353 			if (sctp_add_remote_addr(stcb, sa, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6354 				/* assoc gone no un-lock */
6355 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6356 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_8);
6357 				*error = ENOBUFS;
6358 				goto out_now;
6359 			}
6360 			added++;
6361 		}
6362 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6363 	}
6364 out_now:
6365 	return (added);
6366 }
6367 
6368 struct sctp_tcb *
6369 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6370     int *totaddr, int *num_v4, int *num_v6, int *error,
6371     int limit, int *bad_addr)
6372 {
6373 	struct sockaddr *sa;
6374 	struct sctp_tcb *stcb = NULL;
6375 	size_t incr, at, i;
6376 
6377 	at = incr = 0;
6378 	sa = addr;
6379 	*error = *num_v6 = *num_v4 = 0;
6380 	/* account and validate addresses */
6381 	for (i = 0; i < (size_t)*totaddr; i++) {
6382 		if (sa->sa_family == AF_INET) {
6383 			(*num_v4) += 1;
6384 			incr = sizeof(struct sockaddr_in);
6385 			if (sa->sa_len != incr) {
6386 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6387 				*error = EINVAL;
6388 				*bad_addr = 1;
6389 				return (NULL);
6390 			}
6391 		} else if (sa->sa_family == AF_INET6) {
6392 			struct sockaddr_in6 *sin6;
6393 
6394 			sin6 = (struct sockaddr_in6 *)sa;
6395 			if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6396 				/* Must be non-mapped for connectx */
6397 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6398 				*error = EINVAL;
6399 				*bad_addr = 1;
6400 				return (NULL);
6401 			}
6402 			(*num_v6) += 1;
6403 			incr = sizeof(struct sockaddr_in6);
6404 			if (sa->sa_len != incr) {
6405 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6406 				*error = EINVAL;
6407 				*bad_addr = 1;
6408 				return (NULL);
6409 			}
6410 		} else {
6411 			*totaddr = i;
6412 			/* we are done */
6413 			break;
6414 		}
6415 		SCTP_INP_INCR_REF(inp);
6416 		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6417 		if (stcb != NULL) {
6418 			/* Already have or am bring up an association */
6419 			return (stcb);
6420 		} else {
6421 			SCTP_INP_DECR_REF(inp);
6422 		}
6423 		if ((at + incr) > (size_t)limit) {
6424 			*totaddr = i;
6425 			break;
6426 		}
6427 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6428 	}
6429 	return ((struct sctp_tcb *)NULL);
6430 }
6431 
6432 /*
6433  * sctp_bindx(ADD) for one address.
6434  * assumes all arguments are valid/checked by caller.
6435  */
6436 void
6437 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6438     struct sockaddr *sa, sctp_assoc_t assoc_id,
6439     uint32_t vrf_id, int *error, void *p)
6440 {
6441 	struct sockaddr *addr_touse;
6442 
6443 #ifdef INET6
6444 	struct sockaddr_in sin;
6445 
6446 #endif
6447 
6448 	/* see if we're bound all already! */
6449 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6450 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6451 		*error = EINVAL;
6452 		return;
6453 	}
6454 	addr_touse = sa;
6455 #if defined(INET6) && !defined(__Userspace__)	/* TODO port in6_sin6_2_sin */
6456 	if (sa->sa_family == AF_INET6) {
6457 		struct sockaddr_in6 *sin6;
6458 
6459 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6460 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6461 			*error = EINVAL;
6462 			return;
6463 		}
6464 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6465 			/* can only bind v6 on PF_INET6 sockets */
6466 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6467 			*error = EINVAL;
6468 			return;
6469 		}
6470 		sin6 = (struct sockaddr_in6 *)addr_touse;
6471 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6472 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6473 			    SCTP_IPV6_V6ONLY(inp)) {
6474 				/* can't bind v4-mapped on PF_INET sockets */
6475 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6476 				*error = EINVAL;
6477 				return;
6478 			}
6479 			in6_sin6_2_sin(&sin, sin6);
6480 			addr_touse = (struct sockaddr *)&sin;
6481 		}
6482 	}
6483 #endif
6484 	if (sa->sa_family == AF_INET) {
6485 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6486 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6487 			*error = EINVAL;
6488 			return;
6489 		}
6490 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6491 		    SCTP_IPV6_V6ONLY(inp)) {
6492 			/* can't bind v4 on PF_INET sockets */
6493 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6494 			*error = EINVAL;
6495 			return;
6496 		}
6497 	}
6498 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6499 		if (p == NULL) {
6500 			/* Can't get proc for Net/Open BSD */
6501 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6502 			*error = EINVAL;
6503 			return;
6504 		}
6505 		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6506 		return;
6507 	}
6508 	/*
6509 	 * No locks required here since bind and mgmt_ep_sa all do their own
6510 	 * locking. If we do something for the FIX: below we may need to
6511 	 * lock in that case.
6512 	 */
6513 	if (assoc_id == 0) {
6514 		/* add the address */
6515 		struct sctp_inpcb *lep;
6516 		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
6517 
6518 		/* validate the incoming port */
6519 		if ((lsin->sin_port != 0) &&
6520 		    (lsin->sin_port != inp->sctp_lport)) {
6521 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6522 			*error = EINVAL;
6523 			return;
6524 		} else {
6525 			/* user specified 0 port, set it to existing port */
6526 			lsin->sin_port = inp->sctp_lport;
6527 		}
6528 
6529 		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
6530 		if (lep != NULL) {
6531 			/*
6532 			 * We must decrement the refcount since we have the
6533 			 * ep already and are binding. No remove going on
6534 			 * here.
6535 			 */
6536 			SCTP_INP_DECR_REF(lep);
6537 		}
6538 		if (lep == inp) {
6539 			/* already bound to it.. ok */
6540 			return;
6541 		} else if (lep == NULL) {
6542 			((struct sockaddr_in *)addr_touse)->sin_port = 0;
6543 			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6544 			    SCTP_ADD_IP_ADDRESS,
6545 			    vrf_id, NULL);
6546 		} else {
6547 			*error = EADDRINUSE;
6548 		}
6549 		if (*error)
6550 			return;
6551 	} else {
6552 		/*
6553 		 * FIX: decide whether we allow assoc based bindx
6554 		 */
6555 	}
6556 }
6557 
6558 /*
6559  * sctp_bindx(DELETE) for one address.
6560  * assumes all arguments are valid/checked by caller.
6561  */
6562 void
6563 sctp_bindx_delete_address(struct socket *so, struct sctp_inpcb *inp,
6564     struct sockaddr *sa, sctp_assoc_t assoc_id,
6565     uint32_t vrf_id, int *error)
6566 {
6567 	struct sockaddr *addr_touse;
6568 
6569 #ifdef INET6
6570 	struct sockaddr_in sin;
6571 
6572 #endif
6573 
6574 	/* see if we're bound all already! */
6575 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6576 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6577 		*error = EINVAL;
6578 		return;
6579 	}
6580 	addr_touse = sa;
6581 #if defined(INET6) && !defined(__Userspace__)	/* TODO port in6_sin6_2_sin */
6582 	if (sa->sa_family == AF_INET6) {
6583 		struct sockaddr_in6 *sin6;
6584 
6585 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6586 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6587 			*error = EINVAL;
6588 			return;
6589 		}
6590 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6591 			/* can only bind v6 on PF_INET6 sockets */
6592 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6593 			*error = EINVAL;
6594 			return;
6595 		}
6596 		sin6 = (struct sockaddr_in6 *)addr_touse;
6597 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6598 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6599 			    SCTP_IPV6_V6ONLY(inp)) {
6600 				/* can't bind mapped-v4 on PF_INET sockets */
6601 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6602 				*error = EINVAL;
6603 				return;
6604 			}
6605 			in6_sin6_2_sin(&sin, sin6);
6606 			addr_touse = (struct sockaddr *)&sin;
6607 		}
6608 	}
6609 #endif
6610 	if (sa->sa_family == AF_INET) {
6611 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6612 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6613 			*error = EINVAL;
6614 			return;
6615 		}
6616 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6617 		    SCTP_IPV6_V6ONLY(inp)) {
6618 			/* can't bind v4 on PF_INET sockets */
6619 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6620 			*error = EINVAL;
6621 			return;
6622 		}
6623 	}
6624 	/*
6625 	 * No lock required mgmt_ep_sa does its own locking. If the FIX:
6626 	 * below is ever changed we may need to lock before calling
6627 	 * association level binding.
6628 	 */
6629 	if (assoc_id == 0) {
6630 		/* delete the address */
6631 		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6632 		    SCTP_DEL_IP_ADDRESS,
6633 		    vrf_id, NULL);
6634 	} else {
6635 		/*
6636 		 * FIX: decide whether we allow assoc based bindx
6637 		 */
6638 	}
6639 }
6640 
6641 /*
6642  * returns the valid local address count for an assoc, taking into account
6643  * all scoping rules
6644  */
6645 int
6646 sctp_local_addr_count(struct sctp_tcb *stcb)
6647 {
6648 	int loopback_scope, ipv4_local_scope, local_scope, site_scope;
6649 	int ipv4_addr_legal, ipv6_addr_legal;
6650 	struct sctp_vrf *vrf;
6651 	struct sctp_ifn *sctp_ifn;
6652 	struct sctp_ifa *sctp_ifa;
6653 	int count = 0;
6654 
6655 	/* Turn on all the appropriate scopes */
6656 	loopback_scope = stcb->asoc.loopback_scope;
6657 	ipv4_local_scope = stcb->asoc.ipv4_local_scope;
6658 	local_scope = stcb->asoc.local_scope;
6659 	site_scope = stcb->asoc.site_scope;
6660 	ipv4_addr_legal = ipv6_addr_legal = 0;
6661 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
6662 		ipv6_addr_legal = 1;
6663 		if (SCTP_IPV6_V6ONLY(stcb->sctp_ep) == 0) {
6664 			ipv4_addr_legal = 1;
6665 		}
6666 	} else {
6667 		ipv4_addr_legal = 1;
6668 	}
6669 
6670 	SCTP_IPI_ADDR_RLOCK();
6671 	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
6672 	if (vrf == NULL) {
6673 		/* no vrf, no addresses */
6674 		SCTP_IPI_ADDR_RUNLOCK();
6675 		return (0);
6676 	}
6677 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6678 		/*
6679 		 * bound all case: go through all ifns on the vrf
6680 		 */
6681 		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
6682 			if ((loopback_scope == 0) &&
6683 			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
6684 				continue;
6685 			}
6686 			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
6687 				if (sctp_is_addr_restricted(stcb, sctp_ifa))
6688 					continue;
6689 				switch (sctp_ifa->address.sa.sa_family) {
6690 				case AF_INET:
6691 					if (ipv4_addr_legal) {
6692 						struct sockaddr_in *sin;
6693 
6694 						sin = (struct sockaddr_in *)&sctp_ifa->address.sa;
6695 						if (sin->sin_addr.s_addr == 0) {
6696 							/*
6697 							 * skip unspecified
6698 							 * addrs
6699 							 */
6700 							continue;
6701 						}
6702 						if ((ipv4_local_scope == 0) &&
6703 						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
6704 							continue;
6705 						}
6706 						/* count this one */
6707 						count++;
6708 					} else {
6709 						continue;
6710 					}
6711 					break;
6712 #ifdef INET6
6713 				case AF_INET6:
6714 					if (ipv6_addr_legal) {
6715 						struct sockaddr_in6 *sin6;
6716 
6717 						sin6 = (struct sockaddr_in6 *)&sctp_ifa->address.sa;
6718 						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
6719 							continue;
6720 						}
6721 						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
6722 							if (local_scope == 0)
6723 								continue;
6724 							if (sin6->sin6_scope_id == 0) {
6725 								if (sa6_recoverscope(sin6) != 0)
6726 									/*
6727 									 *
6728 									 * bad
6729 									 *
6730 									 * li
6731 									 * nk
6732 									 *
6733 									 * loc
6734 									 * al
6735 									 *
6736 									 * add
6737 									 * re
6738 									 * ss
6739 									 * */
6740 									continue;
6741 							}
6742 						}
6743 						if ((site_scope == 0) &&
6744 						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
6745 							continue;
6746 						}
6747 						/* count this one */
6748 						count++;
6749 					}
6750 					break;
6751 #endif
6752 				default:
6753 					/* TSNH */
6754 					break;
6755 				}
6756 			}
6757 		}
6758 	} else {
6759 		/*
6760 		 * subset bound case
6761 		 */
6762 		struct sctp_laddr *laddr;
6763 
6764 		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
6765 		    sctp_nxt_addr) {
6766 			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
6767 				continue;
6768 			}
6769 			/* count this one */
6770 			count++;
6771 		}
6772 	}
6773 	SCTP_IPI_ADDR_RUNLOCK();
6774 	return (count);
6775 }
6776 
6777 #if defined(SCTP_LOCAL_TRACE_BUF)
6778 
6779 void
6780 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
6781 {
6782 	uint32_t saveindex, newindex;
6783 
6784 	do {
6785 		saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
6786 		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6787 			newindex = 1;
6788 		} else {
6789 			newindex = saveindex + 1;
6790 		}
6791 	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
6792 	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6793 		saveindex = 0;
6794 	}
6795 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
6796 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
6797 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
6798 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
6799 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
6800 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
6801 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
6802 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
6803 }
6804 
6805 #endif
6806 /* We will need to add support
6807  * to bind the ports and such here
6808  * so we can do UDP tunneling. In
6809  * the mean-time, we return error
6810  */
6811 #include <netinet/udp.h>
6812 #include <netinet/udp_var.h>
6813 #include <sys/proc.h>
6814 #ifdef INET6
6815 #include <netinet6/sctp6_var.h>
6816 #endif
6817 
6818 static void
6819 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *ignored)
6820 {
6821 	struct ip *iph;
6822 	struct mbuf *sp, *last;
6823 	struct udphdr *uhdr;
6824 	uint16_t port = 0, len;
6825 	int header_size = sizeof(struct udphdr) + sizeof(struct sctphdr);
6826 
6827 	/*
6828 	 * Split out the mbuf chain. Leave the IP header in m, place the
6829 	 * rest in the sp.
6830 	 */
6831 	if ((m->m_flags & M_PKTHDR) == 0) {
6832 		/* Can't handle one that is not a pkt hdr */
6833 		goto out;
6834 	}
6835 	/* pull the src port */
6836 	iph = mtod(m, struct ip *);
6837 	uhdr = (struct udphdr *)((caddr_t)iph + off);
6838 
6839 	port = uhdr->uh_sport;
6840 	sp = m_split(m, off, M_DONTWAIT);
6841 	if (sp == NULL) {
6842 		/* Gak, drop packet, we can't do a split */
6843 		goto out;
6844 	}
6845 	if (sp->m_pkthdr.len < header_size) {
6846 		/* Gak, packet can't have an SCTP header in it - to small */
6847 		m_freem(sp);
6848 		goto out;
6849 	}
6850 	/* ok now pull up the UDP header and SCTP header together */
6851 	sp = m_pullup(sp, header_size);
6852 	if (sp == NULL) {
6853 		/* Gak pullup failed */
6854 		goto out;
6855 	}
6856 	/* trim out the UDP header */
6857 	m_adj(sp, sizeof(struct udphdr));
6858 
6859 	/* Now reconstruct the mbuf chain */
6860 	/* 1) find last one */
6861 	last = m;
6862 	while (last->m_next != NULL) {
6863 		last = last->m_next;
6864 	}
6865 	last->m_next = sp;
6866 	m->m_pkthdr.len += sp->m_pkthdr.len;
6867 	last = m;
6868 	while (last != NULL) {
6869 		last = last->m_next;
6870 	}
6871 	/* Now its ready for sctp_input or sctp6_input */
6872 	iph = mtod(m, struct ip *);
6873 	switch (iph->ip_v) {
6874 	case IPVERSION:
6875 		{
6876 			/* its IPv4 */
6877 			len = SCTP_GET_IPV4_LENGTH(iph);
6878 			len -= sizeof(struct udphdr);
6879 			SCTP_GET_IPV4_LENGTH(iph) = len;
6880 			sctp_input_with_port(m, off, port);
6881 			break;
6882 		}
6883 #ifdef INET6
6884 	case IPV6_VERSION >> 4:
6885 		{
6886 			/* its IPv6 - NOT supported */
6887 			goto out;
6888 			break;
6889 
6890 		}
6891 #endif
6892 	default:
6893 		{
6894 			m_freem(m);
6895 			break;
6896 		}
6897 	}
6898 	return;
6899 out:
6900 	m_freem(m);
6901 }
6902 
6903 void
6904 sctp_over_udp_stop(void)
6905 {
6906 	struct socket *sop;
6907 
6908 	/*
6909 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
6910 	 * for writting!
6911 	 */
6912 	if (SCTP_BASE_INFO(udp_tun_socket) == NULL) {
6913 		/* Nothing to do */
6914 		return;
6915 	}
6916 	sop = SCTP_BASE_INFO(udp_tun_socket);
6917 	soclose(sop);
6918 	SCTP_BASE_INFO(udp_tun_socket) = NULL;
6919 }
6920 int
6921 sctp_over_udp_start(void)
6922 {
6923 	uint16_t port;
6924 	int ret;
6925 	struct sockaddr_in sin;
6926 	struct socket *sop = NULL;
6927 	struct thread *th;
6928 	struct ucred *cred;
6929 
6930 	/*
6931 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
6932 	 * for writting!
6933 	 */
6934 	port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
6935 	if (port == 0) {
6936 		/* Must have a port set */
6937 		return (EINVAL);
6938 	}
6939 	if (SCTP_BASE_INFO(udp_tun_socket) != NULL) {
6940 		/* Already running -- must stop first */
6941 		return (EALREADY);
6942 	}
6943 	th = curthread;
6944 	cred = th->td_ucred;
6945 	if ((ret = socreate(PF_INET, &sop,
6946 	    SOCK_DGRAM, IPPROTO_UDP, cred, th))) {
6947 		return (ret);
6948 	}
6949 	SCTP_BASE_INFO(udp_tun_socket) = sop;
6950 	/* call the special UDP hook */
6951 	ret = udp_set_kernel_tunneling(sop, sctp_recv_udp_tunneled_packet);
6952 	if (ret) {
6953 		goto exit_stage_left;
6954 	}
6955 	/* Ok we have a socket, bind it to the port */
6956 	memset(&sin, 0, sizeof(sin));
6957 	sin.sin_len = sizeof(sin);
6958 	sin.sin_family = AF_INET;
6959 	sin.sin_port = htons(port);
6960 	ret = sobind(sop, (struct sockaddr *)&sin, th);
6961 	if (ret) {
6962 		/* Close up we cant get the port */
6963 exit_stage_left:
6964 		sctp_over_udp_stop();
6965 		return (ret);
6966 	}
6967 	/*
6968 	 * Ok we should now get UDP packets directly to our input routine
6969 	 * sctp_recv_upd_tunneled_packet().
6970 	 */
6971 	return (0);
6972 }
6973