xref: /freebsd/sys/netinet/sctp_usrreq.c (revision 5a0bba9007c527b18db7f9b64f06b486cda4fe9d)
1 /*-
2  * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2008-2011, by Randall Stewart, rrs@lakerest.net and
4  *                          Michael Tuexen, tuexen@fh-muenster.de
5  *                          All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions are met:
9  *
10  * a) Redistributions of source code must retain the above copyright notice,
11  *   this list of conditions and the following disclaimer.
12  *
13  * b) Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *   the documentation and/or other materials provided with the distribution.
16  *
17  * c) Neither the name of Cisco Systems, Inc. nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
23  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31  * THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 /* $KAME: sctp_usrreq.c,v 1.48 2005/03/07 23:26:08 itojun Exp $	 */
35 
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38 #include <netinet/sctp_os.h>
39 #include <sys/proc.h>
40 #include <netinet/sctp_pcb.h>
41 #include <netinet/sctp_header.h>
42 #include <netinet/sctp_var.h>
43 #if defined(INET6)
44 #endif
45 #include <netinet/sctp_sysctl.h>
46 #include <netinet/sctp_output.h>
47 #include <netinet/sctp_uio.h>
48 #include <netinet/sctp_asconf.h>
49 #include <netinet/sctputil.h>
50 #include <netinet/sctp_indata.h>
51 #include <netinet/sctp_timer.h>
52 #include <netinet/sctp_auth.h>
53 #include <netinet/sctp_bsd_addr.h>
54 #include <netinet/udp.h>
55 
56 
57 
58 extern struct sctp_cc_functions sctp_cc_functions[];
59 extern struct sctp_ss_functions sctp_ss_functions[];
60 
61 void
62 sctp_init(void)
63 {
64 	u_long sb_max_adj;
65 
66 	/* Initialize and modify the sysctled variables */
67 	sctp_init_sysctls();
68 	if ((nmbclusters / 8) > SCTP_ASOC_MAX_CHUNKS_ON_QUEUE)
69 		SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue) = (nmbclusters / 8);
70 	/*
71 	 * Allow a user to take no more than 1/2 the number of clusters or
72 	 * the SB_MAX whichever is smaller for the send window.
73 	 */
74 	sb_max_adj = (u_long)((u_quad_t) (SB_MAX) * MCLBYTES / (MSIZE + MCLBYTES));
75 	SCTP_BASE_SYSCTL(sctp_sendspace) = min(sb_max_adj,
76 	    (((uint32_t) nmbclusters / 2) * SCTP_DEFAULT_MAXSEGMENT));
77 	/*
78 	 * Now for the recv window, should we take the same amount? or
79 	 * should I do 1/2 the SB_MAX instead in the SB_MAX min above. For
80 	 * now I will just copy.
81 	 */
82 	SCTP_BASE_SYSCTL(sctp_recvspace) = SCTP_BASE_SYSCTL(sctp_sendspace);
83 
84 	SCTP_BASE_VAR(first_time) = 0;
85 	SCTP_BASE_VAR(sctp_pcb_initialized) = 0;
86 	sctp_pcb_init();
87 #if defined(SCTP_PACKET_LOGGING)
88 	SCTP_BASE_VAR(packet_log_writers) = 0;
89 	SCTP_BASE_VAR(packet_log_end) = 0;
90 	bzero(&SCTP_BASE_VAR(packet_log_buffer), SCTP_PACKET_LOG_SIZE);
91 #endif
92 
93 
94 }
95 
96 void
97 sctp_finish(void)
98 {
99 	sctp_pcb_finish();
100 }
101 
102 
103 
104 void
105 sctp_pathmtu_adjustment(struct sctp_inpcb *inp,
106     struct sctp_tcb *stcb,
107     struct sctp_nets *net,
108     uint16_t nxtsz)
109 {
110 	struct sctp_tmit_chunk *chk;
111 	uint16_t overhead;
112 
113 	/* Adjust that too */
114 	stcb->asoc.smallest_mtu = nxtsz;
115 	/* now off to subtract IP_DF flag if needed */
116 	overhead = IP_HDR_SIZE;
117 	if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks)) {
118 		overhead += sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
119 	}
120 	TAILQ_FOREACH(chk, &stcb->asoc.send_queue, sctp_next) {
121 		if ((chk->send_size + overhead) > nxtsz) {
122 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
123 		}
124 	}
125 	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
126 		if ((chk->send_size + overhead) > nxtsz) {
127 			/*
128 			 * For this guy we also mark for immediate resend
129 			 * since we sent to big of chunk
130 			 */
131 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
132 			if (chk->sent < SCTP_DATAGRAM_RESEND) {
133 				sctp_flight_size_decrease(chk);
134 				sctp_total_flight_decrease(stcb, chk);
135 			}
136 			if (chk->sent != SCTP_DATAGRAM_RESEND) {
137 				sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
138 			}
139 			chk->sent = SCTP_DATAGRAM_RESEND;
140 			chk->rec.data.doing_fast_retransmit = 0;
141 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
142 				sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_PMTU,
143 				    chk->whoTo->flight_size,
144 				    chk->book_size,
145 				    (uintptr_t) chk->whoTo,
146 				    chk->rec.data.TSN_seq);
147 			}
148 			/* Clear any time so NO RTT is being done */
149 			chk->do_rtt = 0;
150 		}
151 	}
152 }
153 
154 static void
155 sctp_notify_mbuf(struct sctp_inpcb *inp,
156     struct sctp_tcb *stcb,
157     struct sctp_nets *net,
158     struct ip *ip,
159     struct sctphdr *sh)
160 {
161 	struct icmp *icmph;
162 	int totsz, tmr_stopped = 0;
163 	uint16_t nxtsz;
164 
165 	/* protection */
166 	if ((inp == NULL) || (stcb == NULL) || (net == NULL) ||
167 	    (ip == NULL) || (sh == NULL)) {
168 		if (stcb != NULL) {
169 			SCTP_TCB_UNLOCK(stcb);
170 		}
171 		return;
172 	}
173 	/* First job is to verify the vtag matches what I would send */
174 	if (ntohl(sh->v_tag) != (stcb->asoc.peer_vtag)) {
175 		SCTP_TCB_UNLOCK(stcb);
176 		return;
177 	}
178 	icmph = (struct icmp *)((caddr_t)ip - (sizeof(struct icmp) -
179 	    sizeof(struct ip)));
180 	if (icmph->icmp_type != ICMP_UNREACH) {
181 		/* We only care about unreachable */
182 		SCTP_TCB_UNLOCK(stcb);
183 		return;
184 	}
185 	if (icmph->icmp_code != ICMP_UNREACH_NEEDFRAG) {
186 		/* not a unreachable message due to frag. */
187 		SCTP_TCB_UNLOCK(stcb);
188 		return;
189 	}
190 	totsz = ip->ip_len;
191 
192 	nxtsz = ntohs(icmph->icmp_nextmtu);
193 	if (nxtsz == 0) {
194 		/*
195 		 * old type router that does not tell us what the next size
196 		 * mtu is. Rats we will have to guess (in a educated fashion
197 		 * of course)
198 		 */
199 		nxtsz = sctp_get_prev_mtu(totsz);
200 	}
201 	/* Stop any PMTU timer */
202 	if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) {
203 		tmr_stopped = 1;
204 		sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net,
205 		    SCTP_FROM_SCTP_USRREQ + SCTP_LOC_1);
206 	}
207 	/* Adjust destination size limit */
208 	if (net->mtu > nxtsz) {
209 		net->mtu = nxtsz;
210 		if (net->port) {
211 			net->mtu -= sizeof(struct udphdr);
212 		}
213 	}
214 	/* now what about the ep? */
215 	if (stcb->asoc.smallest_mtu > nxtsz) {
216 		sctp_pathmtu_adjustment(inp, stcb, net, nxtsz);
217 	}
218 	if (tmr_stopped)
219 		sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net);
220 
221 	SCTP_TCB_UNLOCK(stcb);
222 }
223 
224 
225 void
226 sctp_notify(struct sctp_inpcb *inp,
227     struct ip *ip,
228     struct sctphdr *sh,
229     struct sockaddr *to,
230     struct sctp_tcb *stcb,
231     struct sctp_nets *net)
232 {
233 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
234 	struct socket *so;
235 
236 #endif
237 	/* protection */
238 	int reason;
239 	struct icmp *icmph;
240 
241 
242 	if ((inp == NULL) || (stcb == NULL) || (net == NULL) ||
243 	    (sh == NULL) || (to == NULL)) {
244 		if (stcb)
245 			SCTP_TCB_UNLOCK(stcb);
246 		return;
247 	}
248 	/* First job is to verify the vtag matches what I would send */
249 	if (ntohl(sh->v_tag) != (stcb->asoc.peer_vtag)) {
250 		SCTP_TCB_UNLOCK(stcb);
251 		return;
252 	}
253 	icmph = (struct icmp *)((caddr_t)ip - (sizeof(struct icmp) -
254 	    sizeof(struct ip)));
255 	if (icmph->icmp_type != ICMP_UNREACH) {
256 		/* We only care about unreachable */
257 		SCTP_TCB_UNLOCK(stcb);
258 		return;
259 	}
260 	if ((icmph->icmp_code == ICMP_UNREACH_NET) ||
261 	    (icmph->icmp_code == ICMP_UNREACH_HOST) ||
262 	    (icmph->icmp_code == ICMP_UNREACH_NET_UNKNOWN) ||
263 	    (icmph->icmp_code == ICMP_UNREACH_HOST_UNKNOWN) ||
264 	    (icmph->icmp_code == ICMP_UNREACH_ISOLATED) ||
265 	    (icmph->icmp_code == ICMP_UNREACH_NET_PROHIB) ||
266 	    (icmph->icmp_code == ICMP_UNREACH_HOST_PROHIB) ||
267 	    (icmph->icmp_code == ICMP_UNREACH_FILTER_PROHIB)) {
268 
269 		/*
270 		 * Hmm reachablity problems we must examine closely. If its
271 		 * not reachable, we may have lost a network. Or if there is
272 		 * NO protocol at the other end named SCTP. well we consider
273 		 * it a OOTB abort.
274 		 */
275 		if (net->dest_state & SCTP_ADDR_REACHABLE) {
276 			/* Ok that destination is NOT reachable */
277 			SCTP_PRINTF("ICMP (thresh %d/%d) takes interface %p down\n",
278 			    net->error_count,
279 			    net->failure_threshold,
280 			    net);
281 
282 			net->dest_state &= ~SCTP_ADDR_REACHABLE;
283 			net->dest_state |= SCTP_ADDR_NOT_REACHABLE;
284 			/*
285 			 * JRS 5/14/07 - If a destination is unreachable,
286 			 * the PF bit is turned off.  This allows an
287 			 * unambiguous use of the PF bit for destinations
288 			 * that are reachable but potentially failed. If the
289 			 * destination is set to the unreachable state, also
290 			 * set the destination to the PF state.
291 			 */
292 			/*
293 			 * Add debug message here if destination is not in
294 			 * PF state.
295 			 */
296 			/* Stop any running T3 timers here? */
297 			if ((stcb->asoc.sctp_cmt_on_off > 0) &&
298 			    (stcb->asoc.sctp_cmt_pf > 0)) {
299 				net->dest_state &= ~SCTP_ADDR_PF;
300 				SCTPDBG(SCTP_DEBUG_TIMER4, "Destination %p moved from PF to unreachable.\n",
301 				    net);
302 			}
303 			net->error_count = net->failure_threshold + 1;
304 			sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN,
305 			    stcb, SCTP_FAILED_THRESHOLD,
306 			    (void *)net, SCTP_SO_NOT_LOCKED);
307 		}
308 		SCTP_TCB_UNLOCK(stcb);
309 	} else if ((icmph->icmp_code == ICMP_UNREACH_PROTOCOL) ||
310 	    (icmph->icmp_code == ICMP_UNREACH_PORT)) {
311 		/*
312 		 * Here the peer is either playing tricks on us, including
313 		 * an address that belongs to someone who does not support
314 		 * SCTP OR was a userland implementation that shutdown and
315 		 * now is dead. In either case treat it like a OOTB abort
316 		 * with no TCB
317 		 */
318 		reason = SCTP_PEER_FAULTY;
319 		sctp_abort_notification(stcb, reason, SCTP_SO_NOT_LOCKED);
320 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
321 		so = SCTP_INP_SO(inp);
322 		atomic_add_int(&stcb->asoc.refcnt, 1);
323 		SCTP_TCB_UNLOCK(stcb);
324 		SCTP_SOCKET_LOCK(so, 1);
325 		SCTP_TCB_LOCK(stcb);
326 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
327 #endif
328 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_2);
329 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
330 		SCTP_SOCKET_UNLOCK(so, 1);
331 		/* SCTP_TCB_UNLOCK(stcb); MT: I think this is not needed. */
332 #endif
333 		/* no need to unlock here, since the TCB is gone */
334 	} else {
335 		SCTP_TCB_UNLOCK(stcb);
336 	}
337 }
338 
339 void
340 sctp_ctlinput(cmd, sa, vip)
341 	int cmd;
342 	struct sockaddr *sa;
343 	void *vip;
344 {
345 	struct ip *ip = vip;
346 	struct sctphdr *sh;
347 	uint32_t vrf_id;
348 
349 	/* FIX, for non-bsd is this right? */
350 	vrf_id = SCTP_DEFAULT_VRFID;
351 	if (sa->sa_family != AF_INET ||
352 	    ((struct sockaddr_in *)sa)->sin_addr.s_addr == INADDR_ANY) {
353 		return;
354 	}
355 	if (PRC_IS_REDIRECT(cmd)) {
356 		ip = 0;
357 	} else if ((unsigned)cmd >= PRC_NCMDS || inetctlerrmap[cmd] == 0) {
358 		return;
359 	}
360 	if (ip) {
361 		struct sctp_inpcb *inp = NULL;
362 		struct sctp_tcb *stcb = NULL;
363 		struct sctp_nets *net = NULL;
364 		struct sockaddr_in to, from;
365 
366 		sh = (struct sctphdr *)((caddr_t)ip + (ip->ip_hl << 2));
367 		bzero(&to, sizeof(to));
368 		bzero(&from, sizeof(from));
369 		from.sin_family = to.sin_family = AF_INET;
370 		from.sin_len = to.sin_len = sizeof(to);
371 		from.sin_port = sh->src_port;
372 		from.sin_addr = ip->ip_src;
373 		to.sin_port = sh->dest_port;
374 		to.sin_addr = ip->ip_dst;
375 
376 		/*
377 		 * 'to' holds the dest of the packet that failed to be sent.
378 		 * 'from' holds our local endpoint address. Thus we reverse
379 		 * the to and the from in the lookup.
380 		 */
381 		stcb = sctp_findassociation_addr_sa((struct sockaddr *)&from,
382 		    (struct sockaddr *)&to,
383 		    &inp, &net, 1, vrf_id);
384 		if (stcb != NULL && inp && (inp->sctp_socket != NULL)) {
385 			if (cmd != PRC_MSGSIZE) {
386 				sctp_notify(inp, ip, sh,
387 				    (struct sockaddr *)&to, stcb,
388 				    net);
389 			} else {
390 				/* handle possible ICMP size messages */
391 				sctp_notify_mbuf(inp, stcb, net, ip, sh);
392 			}
393 		} else {
394 			if ((stcb == NULL) && (inp != NULL)) {
395 				/* reduce ref-count */
396 				SCTP_INP_WLOCK(inp);
397 				SCTP_INP_DECR_REF(inp);
398 				SCTP_INP_WUNLOCK(inp);
399 			}
400 			if (stcb) {
401 				SCTP_TCB_UNLOCK(stcb);
402 			}
403 		}
404 	}
405 	return;
406 }
407 
408 static int
409 sctp_getcred(SYSCTL_HANDLER_ARGS)
410 {
411 	struct xucred xuc;
412 	struct sockaddr_in addrs[2];
413 	struct sctp_inpcb *inp;
414 	struct sctp_nets *net;
415 	struct sctp_tcb *stcb;
416 	int error;
417 	uint32_t vrf_id;
418 
419 	/* FIX, for non-bsd is this right? */
420 	vrf_id = SCTP_DEFAULT_VRFID;
421 
422 	error = priv_check(req->td, PRIV_NETINET_GETCRED);
423 
424 	if (error)
425 		return (error);
426 
427 	error = SYSCTL_IN(req, addrs, sizeof(addrs));
428 	if (error)
429 		return (error);
430 
431 	stcb = sctp_findassociation_addr_sa(sintosa(&addrs[0]),
432 	    sintosa(&addrs[1]),
433 	    &inp, &net, 1, vrf_id);
434 	if (stcb == NULL || inp == NULL || inp->sctp_socket == NULL) {
435 		if ((inp != NULL) && (stcb == NULL)) {
436 			/* reduce ref-count */
437 			SCTP_INP_WLOCK(inp);
438 			SCTP_INP_DECR_REF(inp);
439 			goto cred_can_cont;
440 		}
441 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT);
442 		error = ENOENT;
443 		goto out;
444 	}
445 	SCTP_TCB_UNLOCK(stcb);
446 	/*
447 	 * We use the write lock here, only since in the error leg we need
448 	 * it. If we used RLOCK, then we would have to
449 	 * wlock/decr/unlock/rlock. Which in theory could create a hole.
450 	 * Better to use higher wlock.
451 	 */
452 	SCTP_INP_WLOCK(inp);
453 cred_can_cont:
454 	error = cr_canseesocket(req->td->td_ucred, inp->sctp_socket);
455 	if (error) {
456 		SCTP_INP_WUNLOCK(inp);
457 		goto out;
458 	}
459 	cru2x(inp->sctp_socket->so_cred, &xuc);
460 	SCTP_INP_WUNLOCK(inp);
461 	error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred));
462 out:
463 	return (error);
464 }
465 
466 SYSCTL_PROC(_net_inet_sctp, OID_AUTO, getcred, CTLTYPE_OPAQUE | CTLFLAG_RW,
467     0, 0, sctp_getcred, "S,ucred", "Get the ucred of a SCTP connection");
468 
469 
470 static void
471 sctp_abort(struct socket *so)
472 {
473 	struct sctp_inpcb *inp;
474 	uint32_t flags;
475 
476 	inp = (struct sctp_inpcb *)so->so_pcb;
477 	if (inp == 0) {
478 		return;
479 	}
480 sctp_must_try_again:
481 	flags = inp->sctp_flags;
482 #ifdef SCTP_LOG_CLOSING
483 	sctp_log_closing(inp, NULL, 17);
484 #endif
485 	if (((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) &&
486 	    (atomic_cmpset_int(&inp->sctp_flags, flags, (flags | SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_CLOSE_IP)))) {
487 #ifdef SCTP_LOG_CLOSING
488 		sctp_log_closing(inp, NULL, 16);
489 #endif
490 		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
491 		    SCTP_CALLED_AFTER_CMPSET_OFCLOSE);
492 		SOCK_LOCK(so);
493 		SCTP_SB_CLEAR(so->so_snd);
494 		/*
495 		 * same for the rcv ones, they are only here for the
496 		 * accounting/select.
497 		 */
498 		SCTP_SB_CLEAR(so->so_rcv);
499 
500 		/* Now null out the reference, we are completely detached. */
501 		so->so_pcb = NULL;
502 		SOCK_UNLOCK(so);
503 	} else {
504 		flags = inp->sctp_flags;
505 		if ((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
506 			goto sctp_must_try_again;
507 		}
508 	}
509 	return;
510 }
511 
512 static int
513 sctp_attach(struct socket *so, int proto, struct thread *p)
514 {
515 	struct sctp_inpcb *inp;
516 	struct inpcb *ip_inp;
517 	int error;
518 	uint32_t vrf_id = SCTP_DEFAULT_VRFID;
519 
520 #ifdef IPSEC
521 	uint32_t flags;
522 
523 #endif
524 
525 	inp = (struct sctp_inpcb *)so->so_pcb;
526 	if (inp != 0) {
527 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
528 		return EINVAL;
529 	}
530 	if (so->so_snd.sb_hiwat == 0 || so->so_rcv.sb_hiwat == 0) {
531 		error = SCTP_SORESERVE(so, SCTP_BASE_SYSCTL(sctp_sendspace), SCTP_BASE_SYSCTL(sctp_recvspace));
532 		if (error) {
533 			return error;
534 		}
535 	}
536 	error = sctp_inpcb_alloc(so, vrf_id);
537 	if (error) {
538 		return error;
539 	}
540 	inp = (struct sctp_inpcb *)so->so_pcb;
541 	SCTP_INP_WLOCK(inp);
542 	inp->sctp_flags &= ~SCTP_PCB_FLAGS_BOUND_V6;	/* I'm not v6! */
543 	ip_inp = &inp->ip_inp.inp;
544 	ip_inp->inp_vflag |= INP_IPV4;
545 	ip_inp->inp_ip_ttl = MODULE_GLOBAL(ip_defttl);
546 #ifdef IPSEC
547 	error = ipsec_init_policy(so, &ip_inp->inp_sp);
548 #ifdef SCTP_LOG_CLOSING
549 	sctp_log_closing(inp, NULL, 17);
550 #endif
551 	if (error != 0) {
552 try_again:
553 		flags = inp->sctp_flags;
554 		if (((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) &&
555 		    (atomic_cmpset_int(&inp->sctp_flags, flags, (flags | SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_CLOSE_IP)))) {
556 #ifdef SCTP_LOG_CLOSING
557 			sctp_log_closing(inp, NULL, 15);
558 #endif
559 			SCTP_INP_WUNLOCK(inp);
560 			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
561 			    SCTP_CALLED_AFTER_CMPSET_OFCLOSE);
562 		} else {
563 			flags = inp->sctp_flags;
564 			if ((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
565 				goto try_again;
566 			} else {
567 				SCTP_INP_WUNLOCK(inp);
568 			}
569 		}
570 		return error;
571 	}
572 #endif				/* IPSEC */
573 	SCTP_INP_WUNLOCK(inp);
574 	return 0;
575 }
576 
577 static int
578 sctp_bind(struct socket *so, struct sockaddr *addr, struct thread *p)
579 {
580 	struct sctp_inpcb *inp = NULL;
581 	int error;
582 
583 #ifdef INET6
584 	if (addr && addr->sa_family != AF_INET) {
585 		/* must be a v4 address! */
586 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
587 		return EINVAL;
588 	}
589 #endif				/* INET6 */
590 	if (addr && (addr->sa_len != sizeof(struct sockaddr_in))) {
591 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
592 		return EINVAL;
593 	}
594 	inp = (struct sctp_inpcb *)so->so_pcb;
595 	if (inp == 0) {
596 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
597 		return EINVAL;
598 	}
599 	error = sctp_inpcb_bind(so, addr, NULL, p);
600 	return error;
601 }
602 
603 void
604 sctp_close(struct socket *so)
605 {
606 	struct sctp_inpcb *inp;
607 	uint32_t flags;
608 
609 	inp = (struct sctp_inpcb *)so->so_pcb;
610 	if (inp == 0)
611 		return;
612 
613 	/*
614 	 * Inform all the lower layer assoc that we are done.
615 	 */
616 sctp_must_try_again:
617 	flags = inp->sctp_flags;
618 #ifdef SCTP_LOG_CLOSING
619 	sctp_log_closing(inp, NULL, 17);
620 #endif
621 	if (((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) &&
622 	    (atomic_cmpset_int(&inp->sctp_flags, flags, (flags | SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_CLOSE_IP)))) {
623 		if (((so->so_options & SO_LINGER) && (so->so_linger == 0)) ||
624 		    (so->so_rcv.sb_cc > 0)) {
625 #ifdef SCTP_LOG_CLOSING
626 			sctp_log_closing(inp, NULL, 13);
627 #endif
628 			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
629 			    SCTP_CALLED_AFTER_CMPSET_OFCLOSE);
630 		} else {
631 #ifdef SCTP_LOG_CLOSING
632 			sctp_log_closing(inp, NULL, 14);
633 #endif
634 			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_GRACEFUL_CLOSE,
635 			    SCTP_CALLED_AFTER_CMPSET_OFCLOSE);
636 		}
637 		/*
638 		 * The socket is now detached, no matter what the state of
639 		 * the SCTP association.
640 		 */
641 		SOCK_LOCK(so);
642 		SCTP_SB_CLEAR(so->so_snd);
643 		/*
644 		 * same for the rcv ones, they are only here for the
645 		 * accounting/select.
646 		 */
647 		SCTP_SB_CLEAR(so->so_rcv);
648 
649 		/* Now null out the reference, we are completely detached. */
650 		so->so_pcb = NULL;
651 		SOCK_UNLOCK(so);
652 	} else {
653 		flags = inp->sctp_flags;
654 		if ((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
655 			goto sctp_must_try_again;
656 		}
657 	}
658 	return;
659 }
660 
661 
662 int
663 sctp_sendm(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr,
664     struct mbuf *control, struct thread *p);
665 
666 
667 int
668 sctp_sendm(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr,
669     struct mbuf *control, struct thread *p)
670 {
671 	struct sctp_inpcb *inp;
672 	int error;
673 
674 	inp = (struct sctp_inpcb *)so->so_pcb;
675 	if (inp == 0) {
676 		if (control) {
677 			sctp_m_freem(control);
678 			control = NULL;
679 		}
680 		SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
681 		sctp_m_freem(m);
682 		return EINVAL;
683 	}
684 	/* Got to have an to address if we are NOT a connected socket */
685 	if ((addr == NULL) &&
686 	    ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) ||
687 	    (inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE))
688 	    ) {
689 		goto connected_type;
690 	} else if (addr == NULL) {
691 		SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EDESTADDRREQ);
692 		error = EDESTADDRREQ;
693 		sctp_m_freem(m);
694 		if (control) {
695 			sctp_m_freem(control);
696 			control = NULL;
697 		}
698 		return (error);
699 	}
700 #ifdef INET6
701 	if (addr->sa_family != AF_INET) {
702 		/* must be a v4 address! */
703 		SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EDESTADDRREQ);
704 		sctp_m_freem(m);
705 		if (control) {
706 			sctp_m_freem(control);
707 			control = NULL;
708 		}
709 		error = EDESTADDRREQ;
710 		return EDESTADDRREQ;
711 	}
712 #endif				/* INET6 */
713 connected_type:
714 	/* now what about control */
715 	if (control) {
716 		if (inp->control) {
717 			SCTP_PRINTF("huh? control set?\n");
718 			sctp_m_freem(inp->control);
719 			inp->control = NULL;
720 		}
721 		inp->control = control;
722 	}
723 	/* Place the data */
724 	if (inp->pkt) {
725 		SCTP_BUF_NEXT(inp->pkt_last) = m;
726 		inp->pkt_last = m;
727 	} else {
728 		inp->pkt_last = inp->pkt = m;
729 	}
730 	if (
731 	/* FreeBSD uses a flag passed */
732 	    ((flags & PRUS_MORETOCOME) == 0)
733 	    ) {
734 		/*
735 		 * note with the current version this code will only be used
736 		 * by OpenBSD-- NetBSD, FreeBSD, and MacOS have methods for
737 		 * re-defining sosend to use the sctp_sosend. One can
738 		 * optionally switch back to this code (by changing back the
739 		 * definitions) but this is not advisable. This code is used
740 		 * by FreeBSD when sending a file with sendfile() though.
741 		 */
742 		int ret;
743 
744 		ret = sctp_output(inp, inp->pkt, addr, inp->control, p, flags);
745 		inp->pkt = NULL;
746 		inp->control = NULL;
747 		return (ret);
748 	} else {
749 		return (0);
750 	}
751 }
752 
753 int
754 sctp_disconnect(struct socket *so)
755 {
756 	struct sctp_inpcb *inp;
757 
758 	inp = (struct sctp_inpcb *)so->so_pcb;
759 	if (inp == NULL) {
760 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN);
761 		return (ENOTCONN);
762 	}
763 	SCTP_INP_RLOCK(inp);
764 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
765 	    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
766 		if (LIST_EMPTY(&inp->sctp_asoc_list)) {
767 			/* No connection */
768 			SCTP_INP_RUNLOCK(inp);
769 			return (0);
770 		} else {
771 			struct sctp_association *asoc;
772 			struct sctp_tcb *stcb;
773 
774 			stcb = LIST_FIRST(&inp->sctp_asoc_list);
775 			if (stcb == NULL) {
776 				SCTP_INP_RUNLOCK(inp);
777 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
778 				return (EINVAL);
779 			}
780 			SCTP_TCB_LOCK(stcb);
781 			asoc = &stcb->asoc;
782 			if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
783 				/* We are about to be freed, out of here */
784 				SCTP_TCB_UNLOCK(stcb);
785 				SCTP_INP_RUNLOCK(inp);
786 				return (0);
787 			}
788 			if (((so->so_options & SO_LINGER) &&
789 			    (so->so_linger == 0)) ||
790 			    (so->so_rcv.sb_cc > 0)) {
791 				if (SCTP_GET_STATE(asoc) !=
792 				    SCTP_STATE_COOKIE_WAIT) {
793 					/* Left with Data unread */
794 					struct mbuf *err;
795 
796 					err = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_DONTWAIT, 1, MT_DATA);
797 					if (err) {
798 						/*
799 						 * Fill in the user
800 						 * initiated abort
801 						 */
802 						struct sctp_paramhdr *ph;
803 
804 						ph = mtod(err, struct sctp_paramhdr *);
805 						SCTP_BUF_LEN(err) = sizeof(struct sctp_paramhdr);
806 						ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
807 						ph->param_length = htons(SCTP_BUF_LEN(err));
808 					}
809 #if defined(SCTP_PANIC_ON_ABORT)
810 					panic("disconnect does an abort");
811 #endif
812 					sctp_send_abort_tcb(stcb, err, SCTP_SO_LOCKED);
813 					SCTP_STAT_INCR_COUNTER32(sctps_aborted);
814 				}
815 				SCTP_INP_RUNLOCK(inp);
816 				if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
817 				    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
818 					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
819 				}
820 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_3);
821 				/* No unlock tcb assoc is gone */
822 				return (0);
823 			}
824 			if (TAILQ_EMPTY(&asoc->send_queue) &&
825 			    TAILQ_EMPTY(&asoc->sent_queue) &&
826 			    (asoc->stream_queue_cnt == 0)) {
827 				/* there is nothing queued to send, so done */
828 				if (asoc->locked_on_sending) {
829 					goto abort_anyway;
830 				}
831 				if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
832 				    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
833 					/* only send SHUTDOWN 1st time thru */
834 					sctp_stop_timers_for_shutdown(stcb);
835 					sctp_send_shutdown(stcb,
836 					    stcb->asoc.primary_destination);
837 					sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_LOCKED);
838 					if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
839 					    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
840 						SCTP_STAT_DECR_GAUGE32(sctps_currestab);
841 					}
842 					SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
843 					SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
844 					sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
845 					    stcb->sctp_ep, stcb,
846 					    asoc->primary_destination);
847 					sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
848 					    stcb->sctp_ep, stcb,
849 					    asoc->primary_destination);
850 				}
851 			} else {
852 				/*
853 				 * we still got (or just got) data to send,
854 				 * so set SHUTDOWN_PENDING
855 				 */
856 				/*
857 				 * XXX sockets draft says that SCTP_EOF
858 				 * should be sent with no data. currently,
859 				 * we will allow user data to be sent first
860 				 * and move to SHUTDOWN-PENDING
861 				 */
862 				asoc->state |= SCTP_STATE_SHUTDOWN_PENDING;
863 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
864 				    asoc->primary_destination);
865 				if (asoc->locked_on_sending) {
866 					/* Locked to send out the data */
867 					struct sctp_stream_queue_pending *sp;
868 
869 					sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead);
870 					if (sp == NULL) {
871 						SCTP_PRINTF("Error, sp is NULL, locked on sending is non-null strm:%d\n",
872 						    asoc->locked_on_sending->stream_no);
873 					} else {
874 						if ((sp->length == 0) && (sp->msg_is_complete == 0))
875 							asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
876 					}
877 				}
878 				if (TAILQ_EMPTY(&asoc->send_queue) &&
879 				    TAILQ_EMPTY(&asoc->sent_queue) &&
880 				    (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
881 					struct mbuf *op_err;
882 
883 			abort_anyway:
884 					op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
885 					    0, M_DONTWAIT, 1, MT_DATA);
886 					if (op_err) {
887 						/*
888 						 * Fill in the user
889 						 * initiated abort
890 						 */
891 						struct sctp_paramhdr *ph;
892 						uint32_t *ippp;
893 
894 						SCTP_BUF_LEN(op_err) =
895 						    (sizeof(struct sctp_paramhdr) + sizeof(uint32_t));
896 						ph = mtod(op_err,
897 						    struct sctp_paramhdr *);
898 						ph->param_type = htons(
899 						    SCTP_CAUSE_USER_INITIATED_ABT);
900 						ph->param_length = htons(SCTP_BUF_LEN(op_err));
901 						ippp = (uint32_t *) (ph + 1);
902 						*ippp = htonl(SCTP_FROM_SCTP_USRREQ + SCTP_LOC_4);
903 					}
904 #if defined(SCTP_PANIC_ON_ABORT)
905 					panic("disconnect does an abort");
906 #endif
907 
908 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_USRREQ + SCTP_LOC_4;
909 					sctp_send_abort_tcb(stcb, op_err, SCTP_SO_LOCKED);
910 					SCTP_STAT_INCR_COUNTER32(sctps_aborted);
911 					if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
912 					    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
913 						SCTP_STAT_DECR_GAUGE32(sctps_currestab);
914 					}
915 					SCTP_INP_RUNLOCK(inp);
916 					(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_5);
917 					return (0);
918 				} else {
919 					sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CLOSING, SCTP_SO_LOCKED);
920 				}
921 			}
922 			soisdisconnecting(so);
923 			SCTP_TCB_UNLOCK(stcb);
924 			SCTP_INP_RUNLOCK(inp);
925 			return (0);
926 		}
927 		/* not reached */
928 	} else {
929 		/* UDP model does not support this */
930 		SCTP_INP_RUNLOCK(inp);
931 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP);
932 		return EOPNOTSUPP;
933 	}
934 }
935 
936 int
937 sctp_flush(struct socket *so, int how)
938 {
939 	/*
940 	 * We will just clear out the values and let subsequent close clear
941 	 * out the data, if any. Note if the user did a shutdown(SHUT_RD)
942 	 * they will not be able to read the data, the socket will block
943 	 * that from happening.
944 	 */
945 	struct sctp_inpcb *inp;
946 
947 	inp = (struct sctp_inpcb *)so->so_pcb;
948 	if (inp == NULL) {
949 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
950 		return EINVAL;
951 	}
952 	SCTP_INP_RLOCK(inp);
953 	/* For the 1 to many model this does nothing */
954 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) {
955 		SCTP_INP_RUNLOCK(inp);
956 		return (0);
957 	}
958 	SCTP_INP_RUNLOCK(inp);
959 	if ((how == PRU_FLUSH_RD) || (how == PRU_FLUSH_RDWR)) {
960 		/*
961 		 * First make sure the sb will be happy, we don't use these
962 		 * except maybe the count
963 		 */
964 		SCTP_INP_WLOCK(inp);
965 		SCTP_INP_READ_LOCK(inp);
966 		inp->sctp_flags |= SCTP_PCB_FLAGS_SOCKET_CANT_READ;
967 		SCTP_INP_READ_UNLOCK(inp);
968 		SCTP_INP_WUNLOCK(inp);
969 		so->so_rcv.sb_cc = 0;
970 		so->so_rcv.sb_mbcnt = 0;
971 		so->so_rcv.sb_mb = NULL;
972 	}
973 	if ((how == PRU_FLUSH_WR) || (how == PRU_FLUSH_RDWR)) {
974 		/*
975 		 * First make sure the sb will be happy, we don't use these
976 		 * except maybe the count
977 		 */
978 		so->so_snd.sb_cc = 0;
979 		so->so_snd.sb_mbcnt = 0;
980 		so->so_snd.sb_mb = NULL;
981 
982 	}
983 	return (0);
984 }
985 
986 int
987 sctp_shutdown(struct socket *so)
988 {
989 	struct sctp_inpcb *inp;
990 
991 	inp = (struct sctp_inpcb *)so->so_pcb;
992 	if (inp == 0) {
993 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
994 		return EINVAL;
995 	}
996 	SCTP_INP_RLOCK(inp);
997 	/* For UDP model this is a invalid call */
998 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) {
999 		/* Restore the flags that the soshutdown took away. */
1000 		SOCKBUF_LOCK(&so->so_rcv);
1001 		so->so_rcv.sb_state &= ~SBS_CANTRCVMORE;
1002 		SOCKBUF_UNLOCK(&so->so_rcv);
1003 		/* This proc will wakeup for read and do nothing (I hope) */
1004 		SCTP_INP_RUNLOCK(inp);
1005 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP);
1006 		return (EOPNOTSUPP);
1007 	}
1008 	/*
1009 	 * Ok if we reach here its the TCP model and it is either a SHUT_WR
1010 	 * or SHUT_RDWR. This means we put the shutdown flag against it.
1011 	 */
1012 	{
1013 		struct sctp_tcb *stcb;
1014 		struct sctp_association *asoc;
1015 
1016 		if ((so->so_state &
1017 		    (SS_ISCONNECTED | SS_ISCONNECTING | SS_ISDISCONNECTING)) == 0) {
1018 			SCTP_INP_RUNLOCK(inp);
1019 			return (ENOTCONN);
1020 		}
1021 		socantsendmore(so);
1022 
1023 		stcb = LIST_FIRST(&inp->sctp_asoc_list);
1024 		if (stcb == NULL) {
1025 			/*
1026 			 * Ok we hit the case that the shutdown call was
1027 			 * made after an abort or something. Nothing to do
1028 			 * now.
1029 			 */
1030 			SCTP_INP_RUNLOCK(inp);
1031 			return (0);
1032 		}
1033 		SCTP_TCB_LOCK(stcb);
1034 		asoc = &stcb->asoc;
1035 		if (TAILQ_EMPTY(&asoc->send_queue) &&
1036 		    TAILQ_EMPTY(&asoc->sent_queue) &&
1037 		    (asoc->stream_queue_cnt == 0)) {
1038 			if (asoc->locked_on_sending) {
1039 				goto abort_anyway;
1040 			}
1041 			/* there is nothing queued to send, so I'm done... */
1042 			if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) {
1043 				/* only send SHUTDOWN the first time through */
1044 				sctp_stop_timers_for_shutdown(stcb);
1045 				sctp_send_shutdown(stcb,
1046 				    stcb->asoc.primary_destination);
1047 				sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_LOCKED);
1048 				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
1049 				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
1050 					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
1051 				}
1052 				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
1053 				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
1054 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
1055 				    stcb->sctp_ep, stcb,
1056 				    asoc->primary_destination);
1057 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1058 				    stcb->sctp_ep, stcb,
1059 				    asoc->primary_destination);
1060 			}
1061 		} else {
1062 			/*
1063 			 * we still got (or just got) data to send, so set
1064 			 * SHUTDOWN_PENDING
1065 			 */
1066 			asoc->state |= SCTP_STATE_SHUTDOWN_PENDING;
1067 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
1068 			    asoc->primary_destination);
1069 
1070 			if (asoc->locked_on_sending) {
1071 				/* Locked to send out the data */
1072 				struct sctp_stream_queue_pending *sp;
1073 
1074 				sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead);
1075 				if (sp == NULL) {
1076 					SCTP_PRINTF("Error, sp is NULL, locked on sending is non-null strm:%d\n",
1077 					    asoc->locked_on_sending->stream_no);
1078 				} else {
1079 					if ((sp->length == 0) && (sp->msg_is_complete == 0)) {
1080 						asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
1081 					}
1082 				}
1083 			}
1084 			if (TAILQ_EMPTY(&asoc->send_queue) &&
1085 			    TAILQ_EMPTY(&asoc->sent_queue) &&
1086 			    (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
1087 				struct mbuf *op_err;
1088 
1089 		abort_anyway:
1090 				op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
1091 				    0, M_DONTWAIT, 1, MT_DATA);
1092 				if (op_err) {
1093 					/* Fill in the user initiated abort */
1094 					struct sctp_paramhdr *ph;
1095 					uint32_t *ippp;
1096 
1097 					SCTP_BUF_LEN(op_err) =
1098 					    sizeof(struct sctp_paramhdr) + sizeof(uint32_t);
1099 					ph = mtod(op_err,
1100 					    struct sctp_paramhdr *);
1101 					ph->param_type = htons(
1102 					    SCTP_CAUSE_USER_INITIATED_ABT);
1103 					ph->param_length = htons(SCTP_BUF_LEN(op_err));
1104 					ippp = (uint32_t *) (ph + 1);
1105 					*ippp = htonl(SCTP_FROM_SCTP_USRREQ + SCTP_LOC_6);
1106 				}
1107 #if defined(SCTP_PANIC_ON_ABORT)
1108 				panic("shutdown does an abort");
1109 #endif
1110 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_USRREQ + SCTP_LOC_6;
1111 				sctp_abort_an_association(stcb->sctp_ep, stcb,
1112 				    SCTP_RESPONSE_TO_USER_REQ,
1113 				    op_err, SCTP_SO_LOCKED);
1114 				goto skip_unlock;
1115 			} else {
1116 				sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CLOSING, SCTP_SO_LOCKED);
1117 			}
1118 		}
1119 		SCTP_TCB_UNLOCK(stcb);
1120 	}
1121 skip_unlock:
1122 	SCTP_INP_RUNLOCK(inp);
1123 	return 0;
1124 }
1125 
1126 /*
1127  * copies a "user" presentable address and removes embedded scope, etc.
1128  * returns 0 on success, 1 on error
1129  */
1130 static uint32_t
1131 sctp_fill_user_address(struct sockaddr_storage *ss, struct sockaddr *sa)
1132 {
1133 #ifdef INET6
1134 	struct sockaddr_in6 lsa6;
1135 
1136 	sa = (struct sockaddr *)sctp_recover_scope((struct sockaddr_in6 *)sa,
1137 	    &lsa6);
1138 #endif
1139 	memcpy(ss, sa, sa->sa_len);
1140 	return (0);
1141 }
1142 
1143 
1144 
1145 /*
1146  * NOTE: assumes addr lock is held
1147  */
1148 static size_t
1149 sctp_fill_up_addresses_vrf(struct sctp_inpcb *inp,
1150     struct sctp_tcb *stcb,
1151     size_t limit,
1152     struct sockaddr_storage *sas,
1153     uint32_t vrf_id)
1154 {
1155 	struct sctp_ifn *sctp_ifn;
1156 	struct sctp_ifa *sctp_ifa;
1157 	int loopback_scope, ipv4_local_scope, local_scope, site_scope;
1158 	size_t actual;
1159 	int ipv4_addr_legal, ipv6_addr_legal;
1160 	struct sctp_vrf *vrf;
1161 
1162 	actual = 0;
1163 	if (limit <= 0)
1164 		return (actual);
1165 
1166 	if (stcb) {
1167 		/* Turn on all the appropriate scope */
1168 		loopback_scope = stcb->asoc.loopback_scope;
1169 		ipv4_local_scope = stcb->asoc.ipv4_local_scope;
1170 		local_scope = stcb->asoc.local_scope;
1171 		site_scope = stcb->asoc.site_scope;
1172 	} else {
1173 		/* Turn on ALL scope, since we look at the EP */
1174 		loopback_scope = ipv4_local_scope = local_scope =
1175 		    site_scope = 1;
1176 	}
1177 	ipv4_addr_legal = ipv6_addr_legal = 0;
1178 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1179 		ipv6_addr_legal = 1;
1180 		if (SCTP_IPV6_V6ONLY(inp) == 0) {
1181 			ipv4_addr_legal = 1;
1182 		}
1183 	} else {
1184 		ipv4_addr_legal = 1;
1185 	}
1186 	vrf = sctp_find_vrf(vrf_id);
1187 	if (vrf == NULL) {
1188 		return (0);
1189 	}
1190 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
1191 		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
1192 			if ((loopback_scope == 0) &&
1193 			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
1194 				/* Skip loopback if loopback_scope not set */
1195 				continue;
1196 			}
1197 			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
1198 				if (stcb) {
1199 					/*
1200 					 * For the BOUND-ALL case, the list
1201 					 * associated with a TCB is Always
1202 					 * considered a reverse list.. i.e.
1203 					 * it lists addresses that are NOT
1204 					 * part of the association. If this
1205 					 * is one of those we must skip it.
1206 					 */
1207 					if (sctp_is_addr_restricted(stcb,
1208 					    sctp_ifa)) {
1209 						continue;
1210 					}
1211 				}
1212 				switch (sctp_ifa->address.sa.sa_family) {
1213 				case AF_INET:
1214 					if (ipv4_addr_legal) {
1215 						struct sockaddr_in *sin;
1216 
1217 						sin = (struct sockaddr_in *)&sctp_ifa->address.sa;
1218 						if (sin->sin_addr.s_addr == 0) {
1219 							/*
1220 							 * we skip
1221 							 * unspecifed
1222 							 * addresses
1223 							 */
1224 							continue;
1225 						}
1226 						if ((ipv4_local_scope == 0) &&
1227 						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
1228 							continue;
1229 						}
1230 #ifdef INET6
1231 						if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
1232 							in6_sin_2_v4mapsin6(sin, (struct sockaddr_in6 *)sas);
1233 							((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport;
1234 							sas = (struct sockaddr_storage *)((caddr_t)sas + sizeof(struct sockaddr_in6));
1235 							actual += sizeof(struct sockaddr_in6);
1236 						} else {
1237 #endif
1238 							memcpy(sas, sin, sizeof(*sin));
1239 							((struct sockaddr_in *)sas)->sin_port = inp->sctp_lport;
1240 							sas = (struct sockaddr_storage *)((caddr_t)sas + sizeof(*sin));
1241 							actual += sizeof(*sin);
1242 #ifdef INET6
1243 						}
1244 #endif
1245 						if (actual >= limit) {
1246 							return (actual);
1247 						}
1248 					} else {
1249 						continue;
1250 					}
1251 					break;
1252 #ifdef INET6
1253 				case AF_INET6:
1254 					if (ipv6_addr_legal) {
1255 						struct sockaddr_in6 *sin6;
1256 
1257 						sin6 = (struct sockaddr_in6 *)&sctp_ifa->address.sa;
1258 						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
1259 							/*
1260 							 * we skip
1261 							 * unspecifed
1262 							 * addresses
1263 							 */
1264 							continue;
1265 						}
1266 						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
1267 							if (local_scope == 0)
1268 								continue;
1269 							if (sin6->sin6_scope_id == 0) {
1270 								if (sa6_recoverscope(sin6) != 0)
1271 									/*
1272 									 *
1273 									 * bad
1274 									 *
1275 									 * li
1276 									 * nk
1277 									 *
1278 									 * loc
1279 									 * al
1280 									 *
1281 									 * add
1282 									 * re
1283 									 * ss
1284 									 * */
1285 									continue;
1286 							}
1287 						}
1288 						if ((site_scope == 0) &&
1289 						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
1290 							continue;
1291 						}
1292 						memcpy(sas, sin6, sizeof(*sin6));
1293 						((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport;
1294 						sas = (struct sockaddr_storage *)((caddr_t)sas + sizeof(*sin6));
1295 						actual += sizeof(*sin6);
1296 						if (actual >= limit) {
1297 							return (actual);
1298 						}
1299 					} else {
1300 						continue;
1301 					}
1302 					break;
1303 #endif
1304 				default:
1305 					/* TSNH */
1306 					break;
1307 				}
1308 			}
1309 		}
1310 	} else {
1311 		struct sctp_laddr *laddr;
1312 
1313 		LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
1314 			if (stcb) {
1315 				if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
1316 					continue;
1317 				}
1318 			}
1319 			if (sctp_fill_user_address(sas, &laddr->ifa->address.sa))
1320 				continue;
1321 
1322 			((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport;
1323 			sas = (struct sockaddr_storage *)((caddr_t)sas +
1324 			    laddr->ifa->address.sa.sa_len);
1325 			actual += laddr->ifa->address.sa.sa_len;
1326 			if (actual >= limit) {
1327 				return (actual);
1328 			}
1329 		}
1330 	}
1331 	return (actual);
1332 }
1333 
1334 static size_t
1335 sctp_fill_up_addresses(struct sctp_inpcb *inp,
1336     struct sctp_tcb *stcb,
1337     size_t limit,
1338     struct sockaddr_storage *sas)
1339 {
1340 	size_t size = 0;
1341 
1342 	SCTP_IPI_ADDR_RLOCK();
1343 	/* fill up addresses for the endpoint's default vrf */
1344 	size = sctp_fill_up_addresses_vrf(inp, stcb, limit, sas,
1345 	    inp->def_vrf_id);
1346 	SCTP_IPI_ADDR_RUNLOCK();
1347 	return (size);
1348 }
1349 
1350 /*
1351  * NOTE: assumes addr lock is held
1352  */
1353 static int
1354 sctp_count_max_addresses_vrf(struct sctp_inpcb *inp, uint32_t vrf_id)
1355 {
1356 	int cnt = 0;
1357 	struct sctp_vrf *vrf = NULL;
1358 
1359 	/*
1360 	 * In both sub-set bound an bound_all cases we return the MAXIMUM
1361 	 * number of addresses that you COULD get. In reality the sub-set
1362 	 * bound may have an exclusion list for a given TCB OR in the
1363 	 * bound-all case a TCB may NOT include the loopback or other
1364 	 * addresses as well.
1365 	 */
1366 	vrf = sctp_find_vrf(vrf_id);
1367 	if (vrf == NULL) {
1368 		return (0);
1369 	}
1370 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
1371 		struct sctp_ifn *sctp_ifn;
1372 		struct sctp_ifa *sctp_ifa;
1373 
1374 		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
1375 			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
1376 				/* Count them if they are the right type */
1377 				if (sctp_ifa->address.sa.sa_family == AF_INET) {
1378 					if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4))
1379 						cnt += sizeof(struct sockaddr_in6);
1380 					else
1381 						cnt += sizeof(struct sockaddr_in);
1382 
1383 				} else if (sctp_ifa->address.sa.sa_family == AF_INET6)
1384 					cnt += sizeof(struct sockaddr_in6);
1385 			}
1386 		}
1387 	} else {
1388 		struct sctp_laddr *laddr;
1389 
1390 		LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
1391 			if (laddr->ifa->address.sa.sa_family == AF_INET) {
1392 				if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4))
1393 					cnt += sizeof(struct sockaddr_in6);
1394 				else
1395 					cnt += sizeof(struct sockaddr_in);
1396 
1397 			} else if (laddr->ifa->address.sa.sa_family == AF_INET6)
1398 				cnt += sizeof(struct sockaddr_in6);
1399 		}
1400 	}
1401 	return (cnt);
1402 }
1403 
1404 static int
1405 sctp_count_max_addresses(struct sctp_inpcb *inp)
1406 {
1407 	int cnt = 0;
1408 
1409 	SCTP_IPI_ADDR_RLOCK();
1410 	/* count addresses for the endpoint's default VRF */
1411 	cnt = sctp_count_max_addresses_vrf(inp, inp->def_vrf_id);
1412 	SCTP_IPI_ADDR_RUNLOCK();
1413 	return (cnt);
1414 }
1415 
1416 static int
1417 sctp_do_connect_x(struct socket *so, struct sctp_inpcb *inp, void *optval,
1418     size_t optsize, void *p, int delay)
1419 {
1420 	int error = 0;
1421 	int creat_lock_on = 0;
1422 	struct sctp_tcb *stcb = NULL;
1423 	struct sockaddr *sa;
1424 	int num_v6 = 0, num_v4 = 0, *totaddrp, totaddr;
1425 	int added = 0;
1426 	uint32_t vrf_id;
1427 	int bad_addresses = 0;
1428 	sctp_assoc_t *a_id;
1429 
1430 	SCTPDBG(SCTP_DEBUG_PCB1, "Connectx called\n");
1431 
1432 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
1433 	    (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) {
1434 		/* We are already connected AND the TCP model */
1435 		SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EADDRINUSE);
1436 		return (EADDRINUSE);
1437 	}
1438 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) &&
1439 	    (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_PORTREUSE))) {
1440 		SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
1441 		return (EINVAL);
1442 	}
1443 	if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
1444 		SCTP_INP_RLOCK(inp);
1445 		stcb = LIST_FIRST(&inp->sctp_asoc_list);
1446 		SCTP_INP_RUNLOCK(inp);
1447 	}
1448 	if (stcb) {
1449 		SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY);
1450 		return (EALREADY);
1451 	}
1452 	SCTP_INP_INCR_REF(inp);
1453 	SCTP_ASOC_CREATE_LOCK(inp);
1454 	creat_lock_on = 1;
1455 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1456 	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
1457 		SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EFAULT);
1458 		error = EFAULT;
1459 		goto out_now;
1460 	}
1461 	totaddrp = (int *)optval;
1462 	totaddr = *totaddrp;
1463 	sa = (struct sockaddr *)(totaddrp + 1);
1464 	stcb = sctp_connectx_helper_find(inp, sa, &totaddr, &num_v4, &num_v6, &error, (optsize - sizeof(int)), &bad_addresses);
1465 	if ((stcb != NULL) || bad_addresses) {
1466 		/* Already have or am bring up an association */
1467 		SCTP_ASOC_CREATE_UNLOCK(inp);
1468 		creat_lock_on = 0;
1469 		if (stcb)
1470 			SCTP_TCB_UNLOCK(stcb);
1471 		if (bad_addresses == 0) {
1472 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY);
1473 			error = EALREADY;
1474 		}
1475 		goto out_now;
1476 	}
1477 #ifdef INET6
1478 	if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) &&
1479 	    (num_v6 > 0)) {
1480 		error = EINVAL;
1481 		goto out_now;
1482 	}
1483 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
1484 	    (num_v4 > 0)) {
1485 		struct in6pcb *inp6;
1486 
1487 		inp6 = (struct in6pcb *)inp;
1488 		if (SCTP_IPV6_V6ONLY(inp6)) {
1489 			/*
1490 			 * if IPV6_V6ONLY flag, ignore connections destined
1491 			 * to a v4 addr or v4-mapped addr
1492 			 */
1493 			SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
1494 			error = EINVAL;
1495 			goto out_now;
1496 		}
1497 	}
1498 #endif				/* INET6 */
1499 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) ==
1500 	    SCTP_PCB_FLAGS_UNBOUND) {
1501 		/* Bind a ephemeral port */
1502 		error = sctp_inpcb_bind(so, NULL, NULL, p);
1503 		if (error) {
1504 			goto out_now;
1505 		}
1506 	}
1507 	/* FIX ME: do we want to pass in a vrf on the connect call? */
1508 	vrf_id = inp->def_vrf_id;
1509 
1510 
1511 	/* We are GOOD to go */
1512 	stcb = sctp_aloc_assoc(inp, sa, &error, 0, vrf_id,
1513 	    (struct thread *)p
1514 	    );
1515 	if (stcb == NULL) {
1516 		/* Gak! no memory */
1517 		goto out_now;
1518 	}
1519 	SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_COOKIE_WAIT);
1520 	/* move to second address */
1521 	if (sa->sa_family == AF_INET)
1522 		sa = (struct sockaddr *)((caddr_t)sa + sizeof(struct sockaddr_in));
1523 	else
1524 		sa = (struct sockaddr *)((caddr_t)sa + sizeof(struct sockaddr_in6));
1525 
1526 	error = 0;
1527 	added = sctp_connectx_helper_add(stcb, sa, (totaddr - 1), &error);
1528 	/* Fill in the return id */
1529 	if (error) {
1530 		(void)sctp_free_assoc(inp, stcb, SCTP_PCBFREE_FORCE, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_6);
1531 		goto out_now;
1532 	}
1533 	a_id = (sctp_assoc_t *) optval;
1534 	*a_id = sctp_get_associd(stcb);
1535 
1536 	/* initialize authentication parameters for the assoc */
1537 	sctp_initialize_auth_params(inp, stcb);
1538 
1539 	if (delay) {
1540 		/* doing delayed connection */
1541 		stcb->asoc.delayed_connection = 1;
1542 		sctp_timer_start(SCTP_TIMER_TYPE_INIT, inp, stcb, stcb->asoc.primary_destination);
1543 	} else {
1544 		(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
1545 		sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED);
1546 	}
1547 	SCTP_TCB_UNLOCK(stcb);
1548 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
1549 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
1550 		/* Set the connected flag so we can queue data */
1551 		soisconnecting(so);
1552 	}
1553 out_now:
1554 	if (creat_lock_on) {
1555 		SCTP_ASOC_CREATE_UNLOCK(inp);
1556 	}
1557 	SCTP_INP_DECR_REF(inp);
1558 	return error;
1559 }
1560 
1561 #define SCTP_FIND_STCB(inp, stcb, assoc_id) { \
1562 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||\
1563 	    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { \
1564 		SCTP_INP_RLOCK(inp); \
1565 		stcb = LIST_FIRST(&inp->sctp_asoc_list); \
1566 		if (stcb) { \
1567 			SCTP_TCB_LOCK(stcb); \
1568                 } \
1569 		SCTP_INP_RUNLOCK(inp); \
1570 	} else if (assoc_id != 0) { \
1571 		stcb = sctp_findassociation_ep_asocid(inp, assoc_id, 1); \
1572 		if (stcb == NULL) { \
1573 		        SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); \
1574 			error = ENOENT; \
1575 			break; \
1576 		} \
1577 	} else { \
1578 		stcb = NULL; \
1579         } \
1580   }
1581 
1582 
1583 #define SCTP_CHECK_AND_CAST(destp, srcp, type, size)  {\
1584 	if (size < sizeof(type)) { \
1585 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); \
1586 		error = EINVAL; \
1587 		break; \
1588 	} else { \
1589 		destp = (type *)srcp; \
1590 	} \
1591       }
1592 
1593 static int
1594 sctp_getopt(struct socket *so, int optname, void *optval, size_t *optsize,
1595     void *p)
1596 {
1597 	struct sctp_inpcb *inp = NULL;
1598 	int error, val = 0;
1599 	struct sctp_tcb *stcb = NULL;
1600 
1601 	if (optval == NULL) {
1602 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
1603 		return (EINVAL);
1604 	}
1605 	inp = (struct sctp_inpcb *)so->so_pcb;
1606 	if (inp == 0) {
1607 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
1608 		return EINVAL;
1609 	}
1610 	error = 0;
1611 
1612 	switch (optname) {
1613 	case SCTP_NODELAY:
1614 	case SCTP_AUTOCLOSE:
1615 	case SCTP_EXPLICIT_EOR:
1616 	case SCTP_AUTO_ASCONF:
1617 	case SCTP_DISABLE_FRAGMENTS:
1618 	case SCTP_I_WANT_MAPPED_V4_ADDR:
1619 	case SCTP_USE_EXT_RCVINFO:
1620 		SCTP_INP_RLOCK(inp);
1621 		switch (optname) {
1622 		case SCTP_DISABLE_FRAGMENTS:
1623 			val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NO_FRAGMENT);
1624 			break;
1625 		case SCTP_I_WANT_MAPPED_V4_ADDR:
1626 			val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4);
1627 			break;
1628 		case SCTP_AUTO_ASCONF:
1629 			if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
1630 				/* only valid for bound all sockets */
1631 				val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTO_ASCONF);
1632 			} else {
1633 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
1634 				error = EINVAL;
1635 				goto flags_out;
1636 			}
1637 			break;
1638 		case SCTP_EXPLICIT_EOR:
1639 			val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
1640 			break;
1641 		case SCTP_NODELAY:
1642 			val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NODELAY);
1643 			break;
1644 		case SCTP_USE_EXT_RCVINFO:
1645 			val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO);
1646 			break;
1647 		case SCTP_AUTOCLOSE:
1648 			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE))
1649 				val = TICKS_TO_SEC(inp->sctp_ep.auto_close_time);
1650 			else
1651 				val = 0;
1652 			break;
1653 
1654 		default:
1655 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT);
1656 			error = ENOPROTOOPT;
1657 		}		/* end switch (sopt->sopt_name) */
1658 		if (optname != SCTP_AUTOCLOSE) {
1659 			/* make it an "on/off" value */
1660 			val = (val != 0);
1661 		}
1662 		if (*optsize < sizeof(val)) {
1663 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
1664 			error = EINVAL;
1665 		}
1666 flags_out:
1667 		SCTP_INP_RUNLOCK(inp);
1668 		if (error == 0) {
1669 			/* return the option value */
1670 			*(int *)optval = val;
1671 			*optsize = sizeof(val);
1672 		}
1673 		break;
1674 	case SCTP_GET_PACKET_LOG:
1675 		{
1676 #ifdef  SCTP_PACKET_LOGGING
1677 			uint8_t *target;
1678 			int ret;
1679 
1680 			SCTP_CHECK_AND_CAST(target, optval, uint8_t, *optsize);
1681 			ret = sctp_copy_out_packet_log(target, (int)*optsize);
1682 			*optsize = ret;
1683 #else
1684 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP);
1685 			error = EOPNOTSUPP;
1686 #endif
1687 			break;
1688 		}
1689 	case SCTP_REUSE_PORT:
1690 		{
1691 			uint32_t *value;
1692 
1693 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE)) {
1694 				/* Can't do this for a 1-m socket */
1695 				error = EINVAL;
1696 				break;
1697 			}
1698 			SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize);
1699 			*value = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PORTREUSE);
1700 			*optsize = sizeof(uint32_t);
1701 		}
1702 		break;
1703 	case SCTP_PARTIAL_DELIVERY_POINT:
1704 		{
1705 			uint32_t *value;
1706 
1707 			SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize);
1708 			*value = inp->partial_delivery_point;
1709 			*optsize = sizeof(uint32_t);
1710 		}
1711 		break;
1712 	case SCTP_FRAGMENT_INTERLEAVE:
1713 		{
1714 			uint32_t *value;
1715 
1716 			SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize);
1717 			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) {
1718 				if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) {
1719 					*value = SCTP_FRAG_LEVEL_2;
1720 				} else {
1721 					*value = SCTP_FRAG_LEVEL_1;
1722 				}
1723 			} else {
1724 				*value = SCTP_FRAG_LEVEL_0;
1725 			}
1726 			*optsize = sizeof(uint32_t);
1727 		}
1728 		break;
1729 	case SCTP_CMT_ON_OFF:
1730 		{
1731 			struct sctp_assoc_value *av;
1732 
1733 			SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize);
1734 			SCTP_FIND_STCB(inp, stcb, av->assoc_id);
1735 			if (stcb) {
1736 				av->assoc_value = stcb->asoc.sctp_cmt_on_off;
1737 				SCTP_TCB_UNLOCK(stcb);
1738 			} else {
1739 				SCTP_INP_RLOCK(inp);
1740 				av->assoc_value = inp->sctp_cmt_on_off;
1741 				SCTP_INP_RUNLOCK(inp);
1742 			}
1743 			*optsize = sizeof(*av);
1744 		}
1745 		break;
1746 		/* JRS - Get socket option for pluggable congestion control */
1747 	case SCTP_PLUGGABLE_CC:
1748 		{
1749 			struct sctp_assoc_value *av;
1750 
1751 			SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize);
1752 			SCTP_FIND_STCB(inp, stcb, av->assoc_id);
1753 			if (stcb) {
1754 				av->assoc_value = stcb->asoc.congestion_control_module;
1755 				SCTP_TCB_UNLOCK(stcb);
1756 			} else {
1757 				av->assoc_value = inp->sctp_ep.sctp_default_cc_module;
1758 			}
1759 			*optsize = sizeof(*av);
1760 		}
1761 		break;
1762 		/* RS - Get socket option for pluggable stream scheduling */
1763 	case SCTP_PLUGGABLE_SS:
1764 		{
1765 			struct sctp_assoc_value *av;
1766 
1767 			SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize);
1768 			SCTP_FIND_STCB(inp, stcb, av->assoc_id);
1769 			if (stcb) {
1770 				av->assoc_value = stcb->asoc.stream_scheduling_module;
1771 				SCTP_TCB_UNLOCK(stcb);
1772 			} else {
1773 				av->assoc_value = inp->sctp_ep.sctp_default_ss_module;
1774 			}
1775 			*optsize = sizeof(*av);
1776 		}
1777 		break;
1778 	case SCTP_SS_VALUE:
1779 		{
1780 			struct sctp_stream_value *av;
1781 
1782 			SCTP_CHECK_AND_CAST(av, optval, struct sctp_stream_value, *optsize);
1783 			SCTP_FIND_STCB(inp, stcb, av->assoc_id);
1784 			if (stcb) {
1785 				if (stcb->asoc.ss_functions.sctp_ss_get_value(stcb, &stcb->asoc, &stcb->asoc.strmout[av->stream_id],
1786 				    &av->stream_value) < 0) {
1787 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
1788 					error = EINVAL;
1789 				} else {
1790 					*optsize = sizeof(*av);
1791 				}
1792 				SCTP_TCB_UNLOCK(stcb);
1793 			} else {
1794 				/*
1795 				 * Can't get stream value without
1796 				 * association
1797 				 */
1798 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
1799 				error = EINVAL;
1800 			}
1801 		}
1802 		break;
1803 	case SCTP_GET_ADDR_LEN:
1804 		{
1805 			struct sctp_assoc_value *av;
1806 
1807 			SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize);
1808 			error = EINVAL;
1809 #ifdef INET
1810 			if (av->assoc_value == AF_INET) {
1811 				av->assoc_value = sizeof(struct sockaddr_in);
1812 				error = 0;
1813 			}
1814 #endif
1815 #ifdef INET6
1816 			if (av->assoc_value == AF_INET6) {
1817 				av->assoc_value = sizeof(struct sockaddr_in6);
1818 				error = 0;
1819 			}
1820 #endif
1821 			if (error) {
1822 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
1823 			}
1824 			*optsize = sizeof(*av);
1825 		}
1826 		break;
1827 	case SCTP_GET_ASSOC_NUMBER:
1828 		{
1829 			uint32_t *value, cnt;
1830 
1831 			SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize);
1832 			cnt = 0;
1833 			SCTP_INP_RLOCK(inp);
1834 			LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
1835 				cnt++;
1836 			}
1837 			SCTP_INP_RUNLOCK(inp);
1838 			*value = cnt;
1839 			*optsize = sizeof(uint32_t);
1840 		}
1841 		break;
1842 
1843 	case SCTP_GET_ASSOC_ID_LIST:
1844 		{
1845 			struct sctp_assoc_ids *ids;
1846 			unsigned int at, limit;
1847 
1848 			SCTP_CHECK_AND_CAST(ids, optval, struct sctp_assoc_ids, *optsize);
1849 			at = 0;
1850 			limit = (*optsize - sizeof(uint32_t)) / sizeof(sctp_assoc_t);
1851 			SCTP_INP_RLOCK(inp);
1852 			LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
1853 				if (at < limit) {
1854 					ids->gaids_assoc_id[at++] = sctp_get_associd(stcb);
1855 				} else {
1856 					error = EINVAL;
1857 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
1858 					break;
1859 				}
1860 			}
1861 			SCTP_INP_RUNLOCK(inp);
1862 			ids->gaids_number_of_ids = at;
1863 			*optsize = ((at * sizeof(sctp_assoc_t)) + sizeof(uint32_t));
1864 		}
1865 		break;
1866 	case SCTP_CONTEXT:
1867 		{
1868 			struct sctp_assoc_value *av;
1869 
1870 			SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize);
1871 			SCTP_FIND_STCB(inp, stcb, av->assoc_id);
1872 
1873 			if (stcb) {
1874 				av->assoc_value = stcb->asoc.context;
1875 				SCTP_TCB_UNLOCK(stcb);
1876 			} else {
1877 				SCTP_INP_RLOCK(inp);
1878 				av->assoc_value = inp->sctp_context;
1879 				SCTP_INP_RUNLOCK(inp);
1880 			}
1881 			*optsize = sizeof(*av);
1882 		}
1883 		break;
1884 	case SCTP_VRF_ID:
1885 		{
1886 			uint32_t *default_vrfid;
1887 
1888 			SCTP_CHECK_AND_CAST(default_vrfid, optval, uint32_t, *optsize);
1889 			*default_vrfid = inp->def_vrf_id;
1890 			break;
1891 		}
1892 	case SCTP_GET_ASOC_VRF:
1893 		{
1894 			struct sctp_assoc_value *id;
1895 
1896 			SCTP_CHECK_AND_CAST(id, optval, struct sctp_assoc_value, *optsize);
1897 			SCTP_FIND_STCB(inp, stcb, id->assoc_id);
1898 			if (stcb == NULL) {
1899 				error = EINVAL;
1900 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
1901 				break;
1902 			}
1903 			id->assoc_value = stcb->asoc.vrf_id;
1904 			break;
1905 		}
1906 	case SCTP_GET_VRF_IDS:
1907 		{
1908 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP);
1909 			error = EOPNOTSUPP;
1910 			break;
1911 		}
1912 	case SCTP_GET_NONCE_VALUES:
1913 		{
1914 			struct sctp_get_nonce_values *gnv;
1915 
1916 			SCTP_CHECK_AND_CAST(gnv, optval, struct sctp_get_nonce_values, *optsize);
1917 			SCTP_FIND_STCB(inp, stcb, gnv->gn_assoc_id);
1918 
1919 			if (stcb) {
1920 				gnv->gn_peers_tag = stcb->asoc.peer_vtag;
1921 				gnv->gn_local_tag = stcb->asoc.my_vtag;
1922 				SCTP_TCB_UNLOCK(stcb);
1923 			} else {
1924 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN);
1925 				error = ENOTCONN;
1926 			}
1927 			*optsize = sizeof(*gnv);
1928 		}
1929 		break;
1930 	case SCTP_DELAYED_SACK:
1931 		{
1932 			struct sctp_sack_info *sack;
1933 
1934 			SCTP_CHECK_AND_CAST(sack, optval, struct sctp_sack_info, *optsize);
1935 			SCTP_FIND_STCB(inp, stcb, sack->sack_assoc_id);
1936 			if (stcb) {
1937 				sack->sack_delay = stcb->asoc.delayed_ack;
1938 				sack->sack_freq = stcb->asoc.sack_freq;
1939 				SCTP_TCB_UNLOCK(stcb);
1940 			} else {
1941 				SCTP_INP_RLOCK(inp);
1942 				sack->sack_delay = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
1943 				sack->sack_freq = inp->sctp_ep.sctp_sack_freq;
1944 				SCTP_INP_RUNLOCK(inp);
1945 			}
1946 			*optsize = sizeof(*sack);
1947 		}
1948 		break;
1949 
1950 	case SCTP_GET_SNDBUF_USE:
1951 		{
1952 			struct sctp_sockstat *ss;
1953 
1954 			SCTP_CHECK_AND_CAST(ss, optval, struct sctp_sockstat, *optsize);
1955 			SCTP_FIND_STCB(inp, stcb, ss->ss_assoc_id);
1956 
1957 			if (stcb) {
1958 				ss->ss_total_sndbuf = stcb->asoc.total_output_queue_size;
1959 				ss->ss_total_recv_buf = (stcb->asoc.size_on_reasm_queue +
1960 				    stcb->asoc.size_on_all_streams);
1961 				SCTP_TCB_UNLOCK(stcb);
1962 			} else {
1963 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN);
1964 				error = ENOTCONN;
1965 			}
1966 			*optsize = sizeof(struct sctp_sockstat);
1967 		}
1968 		break;
1969 	case SCTP_MAX_BURST:
1970 		{
1971 			struct sctp_assoc_value *av;
1972 
1973 			SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize);
1974 			SCTP_FIND_STCB(inp, stcb, av->assoc_id);
1975 
1976 			if (stcb) {
1977 				av->assoc_value = stcb->asoc.max_burst;
1978 				SCTP_TCB_UNLOCK(stcb);
1979 			} else {
1980 				SCTP_INP_RLOCK(inp);
1981 				av->assoc_value = inp->sctp_ep.max_burst;
1982 				SCTP_INP_RUNLOCK(inp);
1983 			}
1984 			*optsize = sizeof(struct sctp_assoc_value);
1985 
1986 		}
1987 		break;
1988 	case SCTP_MAXSEG:
1989 		{
1990 			struct sctp_assoc_value *av;
1991 			int ovh;
1992 
1993 			SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize);
1994 			SCTP_FIND_STCB(inp, stcb, av->assoc_id);
1995 
1996 			if (stcb) {
1997 				av->assoc_value = sctp_get_frag_point(stcb, &stcb->asoc);
1998 				SCTP_TCB_UNLOCK(stcb);
1999 			} else {
2000 				SCTP_INP_RLOCK(inp);
2001 				if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2002 					ovh = SCTP_MED_OVERHEAD;
2003 				} else {
2004 					ovh = SCTP_MED_V4_OVERHEAD;
2005 				}
2006 				if (inp->sctp_frag_point >= SCTP_DEFAULT_MAXSEGMENT)
2007 					av->assoc_value = 0;
2008 				else
2009 					av->assoc_value = inp->sctp_frag_point - ovh;
2010 				SCTP_INP_RUNLOCK(inp);
2011 			}
2012 			*optsize = sizeof(struct sctp_assoc_value);
2013 		}
2014 		break;
2015 	case SCTP_GET_STAT_LOG:
2016 		error = sctp_fill_stat_log(optval, optsize);
2017 		break;
2018 	case SCTP_EVENTS:
2019 		{
2020 			struct sctp_event_subscribe *events;
2021 
2022 			SCTP_CHECK_AND_CAST(events, optval, struct sctp_event_subscribe, *optsize);
2023 			memset(events, 0, sizeof(*events));
2024 			SCTP_INP_RLOCK(inp);
2025 			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT))
2026 				events->sctp_data_io_event = 1;
2027 
2028 			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVASSOCEVNT))
2029 				events->sctp_association_event = 1;
2030 
2031 			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVPADDREVNT))
2032 				events->sctp_address_event = 1;
2033 
2034 			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVSENDFAILEVNT))
2035 				events->sctp_send_failure_event = 1;
2036 
2037 			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVPEERERR))
2038 				events->sctp_peer_error_event = 1;
2039 
2040 			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT))
2041 				events->sctp_shutdown_event = 1;
2042 
2043 			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PDAPIEVNT))
2044 				events->sctp_partial_delivery_event = 1;
2045 
2046 			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ADAPTATIONEVNT))
2047 				events->sctp_adaptation_layer_event = 1;
2048 
2049 			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTHEVNT))
2050 				events->sctp_authentication_event = 1;
2051 
2052 			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_DRYEVNT))
2053 				events->sctp_sender_dry_event = 1;
2054 
2055 			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_STREAM_RESETEVNT))
2056 				events->sctp_stream_reset_event = 1;
2057 			SCTP_INP_RUNLOCK(inp);
2058 			*optsize = sizeof(struct sctp_event_subscribe);
2059 		}
2060 		break;
2061 
2062 	case SCTP_ADAPTATION_LAYER:
2063 		{
2064 			uint32_t *value;
2065 
2066 			SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize);
2067 
2068 			SCTP_INP_RLOCK(inp);
2069 			*value = inp->sctp_ep.adaptation_layer_indicator;
2070 			SCTP_INP_RUNLOCK(inp);
2071 			*optsize = sizeof(uint32_t);
2072 		}
2073 		break;
2074 	case SCTP_SET_INITIAL_DBG_SEQ:
2075 		{
2076 			uint32_t *value;
2077 
2078 			SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize);
2079 			SCTP_INP_RLOCK(inp);
2080 			*value = inp->sctp_ep.initial_sequence_debug;
2081 			SCTP_INP_RUNLOCK(inp);
2082 			*optsize = sizeof(uint32_t);
2083 		}
2084 		break;
2085 	case SCTP_GET_LOCAL_ADDR_SIZE:
2086 		{
2087 			uint32_t *value;
2088 
2089 			SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize);
2090 			SCTP_INP_RLOCK(inp);
2091 			*value = sctp_count_max_addresses(inp);
2092 			SCTP_INP_RUNLOCK(inp);
2093 			*optsize = sizeof(uint32_t);
2094 		}
2095 		break;
2096 	case SCTP_GET_REMOTE_ADDR_SIZE:
2097 		{
2098 			uint32_t *value;
2099 			size_t size;
2100 			struct sctp_nets *net;
2101 
2102 			SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize);
2103 			/* FIXME MT: change to sctp_assoc_value? */
2104 			SCTP_FIND_STCB(inp, stcb, (sctp_assoc_t) * value);
2105 
2106 			if (stcb) {
2107 				size = 0;
2108 				/* Count the sizes */
2109 				TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
2110 					if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) ||
2111 					    (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET6)) {
2112 						size += sizeof(struct sockaddr_in6);
2113 					} else if (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET) {
2114 						size += sizeof(struct sockaddr_in);
2115 					} else {
2116 						/* huh */
2117 						break;
2118 					}
2119 				}
2120 				SCTP_TCB_UNLOCK(stcb);
2121 				*value = (uint32_t) size;
2122 			} else {
2123 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN);
2124 				error = ENOTCONN;
2125 			}
2126 			*optsize = sizeof(uint32_t);
2127 		}
2128 		break;
2129 	case SCTP_GET_PEER_ADDRESSES:
2130 		/*
2131 		 * Get the address information, an array is passed in to
2132 		 * fill up we pack it.
2133 		 */
2134 		{
2135 			size_t cpsz, left;
2136 			struct sockaddr_storage *sas;
2137 			struct sctp_nets *net;
2138 			struct sctp_getaddresses *saddr;
2139 
2140 			SCTP_CHECK_AND_CAST(saddr, optval, struct sctp_getaddresses, *optsize);
2141 			SCTP_FIND_STCB(inp, stcb, saddr->sget_assoc_id);
2142 
2143 			if (stcb) {
2144 				left = (*optsize) - sizeof(struct sctp_getaddresses);
2145 				*optsize = sizeof(struct sctp_getaddresses);
2146 				sas = (struct sockaddr_storage *)&saddr->addr[0];
2147 
2148 				TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
2149 					if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) ||
2150 					    (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET6)) {
2151 						cpsz = sizeof(struct sockaddr_in6);
2152 					} else if (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET) {
2153 						cpsz = sizeof(struct sockaddr_in);
2154 					} else {
2155 						/* huh */
2156 						break;
2157 					}
2158 					if (left < cpsz) {
2159 						/* not enough room. */
2160 						break;
2161 					}
2162 #ifdef INET6
2163 					if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) &&
2164 					    (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET)) {
2165 						/* Must map the address */
2166 						in6_sin_2_v4mapsin6((struct sockaddr_in *)&net->ro._l_addr,
2167 						    (struct sockaddr_in6 *)sas);
2168 					} else {
2169 #endif
2170 						memcpy(sas, &net->ro._l_addr, cpsz);
2171 #ifdef INET6
2172 					}
2173 #endif
2174 					((struct sockaddr_in *)sas)->sin_port = stcb->rport;
2175 
2176 					sas = (struct sockaddr_storage *)((caddr_t)sas + cpsz);
2177 					left -= cpsz;
2178 					*optsize += cpsz;
2179 				}
2180 				SCTP_TCB_UNLOCK(stcb);
2181 			} else {
2182 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT);
2183 				error = ENOENT;
2184 			}
2185 		}
2186 		break;
2187 	case SCTP_GET_LOCAL_ADDRESSES:
2188 		{
2189 			size_t limit, actual;
2190 			struct sockaddr_storage *sas;
2191 			struct sctp_getaddresses *saddr;
2192 
2193 			SCTP_CHECK_AND_CAST(saddr, optval, struct sctp_getaddresses, *optsize);
2194 			SCTP_FIND_STCB(inp, stcb, saddr->sget_assoc_id);
2195 
2196 			sas = (struct sockaddr_storage *)&saddr->addr[0];
2197 			limit = *optsize - sizeof(sctp_assoc_t);
2198 			actual = sctp_fill_up_addresses(inp, stcb, limit, sas);
2199 			if (stcb) {
2200 				SCTP_TCB_UNLOCK(stcb);
2201 			}
2202 			*optsize = sizeof(struct sockaddr_storage) + actual;
2203 		}
2204 		break;
2205 	case SCTP_PEER_ADDR_PARAMS:
2206 		{
2207 			struct sctp_paddrparams *paddrp;
2208 			struct sctp_nets *net;
2209 
2210 			SCTP_CHECK_AND_CAST(paddrp, optval, struct sctp_paddrparams, *optsize);
2211 			SCTP_FIND_STCB(inp, stcb, paddrp->spp_assoc_id);
2212 
2213 			net = NULL;
2214 			if (stcb) {
2215 				net = sctp_findnet(stcb, (struct sockaddr *)&paddrp->spp_address);
2216 			} else {
2217 				/*
2218 				 * We increment here since
2219 				 * sctp_findassociation_ep_addr() wil do a
2220 				 * decrement if it finds the stcb as long as
2221 				 * the locked tcb (last argument) is NOT a
2222 				 * TCB.. aka NULL.
2223 				 */
2224 				SCTP_INP_INCR_REF(inp);
2225 				stcb = sctp_findassociation_ep_addr(&inp, (struct sockaddr *)&paddrp->spp_address, &net, NULL, NULL);
2226 				if (stcb == NULL) {
2227 					SCTP_INP_DECR_REF(inp);
2228 				}
2229 			}
2230 			if (stcb && (net == NULL)) {
2231 				struct sockaddr *sa;
2232 
2233 				sa = (struct sockaddr *)&paddrp->spp_address;
2234 				if (sa->sa_family == AF_INET) {
2235 					struct sockaddr_in *sin;
2236 
2237 					sin = (struct sockaddr_in *)sa;
2238 					if (sin->sin_addr.s_addr) {
2239 						error = EINVAL;
2240 						SCTP_TCB_UNLOCK(stcb);
2241 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
2242 						break;
2243 					}
2244 				} else if (sa->sa_family == AF_INET6) {
2245 					struct sockaddr_in6 *sin6;
2246 
2247 					sin6 = (struct sockaddr_in6 *)sa;
2248 					if (!IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
2249 						error = EINVAL;
2250 						SCTP_TCB_UNLOCK(stcb);
2251 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
2252 						break;
2253 					}
2254 				} else {
2255 					error = EAFNOSUPPORT;
2256 					SCTP_TCB_UNLOCK(stcb);
2257 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
2258 					break;
2259 				}
2260 			}
2261 			if (stcb) {
2262 				/* Applys to the specific association */
2263 				paddrp->spp_flags = 0;
2264 				if (net) {
2265 					int ovh;
2266 
2267 					if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2268 						ovh = SCTP_MED_OVERHEAD;
2269 					} else {
2270 						ovh = SCTP_MED_V4_OVERHEAD;
2271 					}
2272 
2273 
2274 					paddrp->spp_pathmaxrxt = net->failure_threshold;
2275 					paddrp->spp_pathmtu = net->mtu - ovh;
2276 					/* get flags for HB */
2277 					if (net->dest_state & SCTP_ADDR_NOHB)
2278 						paddrp->spp_flags |= SPP_HB_DISABLE;
2279 					else
2280 						paddrp->spp_flags |= SPP_HB_ENABLE;
2281 					/* get flags for PMTU */
2282 					if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) {
2283 						paddrp->spp_flags |= SPP_PMTUD_ENABLE;
2284 					} else {
2285 						paddrp->spp_flags |= SPP_PMTUD_DISABLE;
2286 					}
2287 #ifdef INET
2288 					if (net->ro._l_addr.sin.sin_family == AF_INET) {
2289 						paddrp->spp_ipv4_tos = net->tos_flowlabel & 0x000000fc;
2290 						paddrp->spp_flags |= SPP_IPV4_TOS;
2291 					}
2292 #endif
2293 #ifdef INET6
2294 					if (net->ro._l_addr.sin6.sin6_family == AF_INET6) {
2295 						paddrp->spp_ipv6_flowlabel = net->tos_flowlabel;
2296 						paddrp->spp_flags |= SPP_IPV6_FLOWLABEL;
2297 					}
2298 #endif
2299 				} else {
2300 					/*
2301 					 * No destination so return default
2302 					 * value
2303 					 */
2304 					int cnt = 0;
2305 
2306 					paddrp->spp_pathmaxrxt = stcb->asoc.def_net_failure;
2307 					paddrp->spp_pathmtu = sctp_get_frag_point(stcb, &stcb->asoc);
2308 #ifdef INET
2309 					paddrp->spp_ipv4_tos = stcb->asoc.default_tos & 0x000000fc;
2310 					paddrp->spp_flags |= SPP_IPV4_TOS;
2311 #endif
2312 #ifdef INET6
2313 					paddrp->spp_ipv6_flowlabel = stcb->asoc.default_flowlabel;
2314 					paddrp->spp_flags |= SPP_IPV6_FLOWLABEL;
2315 #endif
2316 					/* default settings should be these */
2317 					if (stcb->asoc.hb_is_disabled == 0) {
2318 						paddrp->spp_flags |= SPP_HB_ENABLE;
2319 					} else {
2320 						paddrp->spp_flags |= SPP_HB_DISABLE;
2321 					}
2322 					TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
2323 						if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) {
2324 							cnt++;
2325 						}
2326 					}
2327 					if (cnt) {
2328 						paddrp->spp_flags |= SPP_PMTUD_ENABLE;
2329 					}
2330 				}
2331 				paddrp->spp_hbinterval = stcb->asoc.heart_beat_delay;
2332 				paddrp->spp_assoc_id = sctp_get_associd(stcb);
2333 				SCTP_TCB_UNLOCK(stcb);
2334 			} else {
2335 				/* Use endpoint defaults */
2336 				SCTP_INP_RLOCK(inp);
2337 				paddrp->spp_pathmaxrxt = inp->sctp_ep.def_net_failure;
2338 				paddrp->spp_hbinterval = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
2339 				paddrp->spp_assoc_id = (sctp_assoc_t) 0;
2340 				/* get inp's default */
2341 #ifdef INET
2342 				paddrp->spp_ipv4_tos = inp->ip_inp.inp.inp_ip_tos;
2343 				paddrp->spp_flags |= SPP_IPV4_TOS;
2344 #endif
2345 #ifdef INET6
2346 				if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2347 					paddrp->spp_ipv6_flowlabel = ((struct in6pcb *)inp)->in6p_flowinfo;
2348 					paddrp->spp_flags |= SPP_IPV6_FLOWLABEL;
2349 				}
2350 #endif
2351 				/* can't return this */
2352 				paddrp->spp_pathmtu = 0;
2353 
2354 				/* default behavior, no stcb */
2355 				paddrp->spp_flags = SPP_PMTUD_ENABLE;
2356 
2357 				if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_DONOT_HEARTBEAT)) {
2358 					paddrp->spp_flags |= SPP_HB_ENABLE;
2359 				} else {
2360 					paddrp->spp_flags |= SPP_HB_DISABLE;
2361 				}
2362 				SCTP_INP_RUNLOCK(inp);
2363 			}
2364 			*optsize = sizeof(struct sctp_paddrparams);
2365 		}
2366 		break;
2367 	case SCTP_GET_PEER_ADDR_INFO:
2368 		{
2369 			struct sctp_paddrinfo *paddri;
2370 			struct sctp_nets *net;
2371 
2372 			SCTP_CHECK_AND_CAST(paddri, optval, struct sctp_paddrinfo, *optsize);
2373 			SCTP_FIND_STCB(inp, stcb, paddri->spinfo_assoc_id);
2374 
2375 			net = NULL;
2376 			if (stcb) {
2377 				net = sctp_findnet(stcb, (struct sockaddr *)&paddri->spinfo_address);
2378 			} else {
2379 				/*
2380 				 * We increment here since
2381 				 * sctp_findassociation_ep_addr() wil do a
2382 				 * decrement if it finds the stcb as long as
2383 				 * the locked tcb (last argument) is NOT a
2384 				 * TCB.. aka NULL.
2385 				 */
2386 				SCTP_INP_INCR_REF(inp);
2387 				stcb = sctp_findassociation_ep_addr(&inp, (struct sockaddr *)&paddri->spinfo_address, &net, NULL, NULL);
2388 				if (stcb == NULL) {
2389 					SCTP_INP_DECR_REF(inp);
2390 				}
2391 			}
2392 
2393 			if ((stcb) && (net)) {
2394 				if (net->dest_state & SCTP_ADDR_UNCONFIRMED) {
2395 					/* It's unconfirmed */
2396 					paddri->spinfo_state = SCTP_UNCONFIRMED;
2397 				} else if (net->dest_state & SCTP_ADDR_REACHABLE) {
2398 					/* It's active */
2399 					paddri->spinfo_state = SCTP_ACTIVE;
2400 				} else {
2401 					/* It's inactive */
2402 					paddri->spinfo_state = SCTP_INACTIVE;
2403 				}
2404 				paddri->spinfo_cwnd = net->cwnd;
2405 				paddri->spinfo_srtt = ((net->lastsa >> 2) + net->lastsv) >> 1;
2406 				paddri->spinfo_rto = net->RTO;
2407 				paddri->spinfo_assoc_id = sctp_get_associd(stcb);
2408 				SCTP_TCB_UNLOCK(stcb);
2409 			} else {
2410 				if (stcb) {
2411 					SCTP_TCB_UNLOCK(stcb);
2412 				}
2413 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT);
2414 				error = ENOENT;
2415 			}
2416 			*optsize = sizeof(struct sctp_paddrinfo);
2417 		}
2418 		break;
2419 	case SCTP_PCB_STATUS:
2420 		{
2421 			struct sctp_pcbinfo *spcb;
2422 
2423 			SCTP_CHECK_AND_CAST(spcb, optval, struct sctp_pcbinfo, *optsize);
2424 			sctp_fill_pcbinfo(spcb);
2425 			*optsize = sizeof(struct sctp_pcbinfo);
2426 		}
2427 		break;
2428 
2429 	case SCTP_STATUS:
2430 		{
2431 			struct sctp_nets *net;
2432 			struct sctp_status *sstat;
2433 
2434 			SCTP_CHECK_AND_CAST(sstat, optval, struct sctp_status, *optsize);
2435 			SCTP_FIND_STCB(inp, stcb, sstat->sstat_assoc_id);
2436 
2437 			if (stcb == NULL) {
2438 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
2439 				error = EINVAL;
2440 				break;
2441 			}
2442 			/*
2443 			 * I think passing the state is fine since
2444 			 * sctp_constants.h will be available to the user
2445 			 * land.
2446 			 */
2447 			sstat->sstat_state = stcb->asoc.state;
2448 			sstat->sstat_assoc_id = sctp_get_associd(stcb);
2449 			sstat->sstat_rwnd = stcb->asoc.peers_rwnd;
2450 			sstat->sstat_unackdata = stcb->asoc.sent_queue_cnt;
2451 			/*
2452 			 * We can't include chunks that have been passed to
2453 			 * the socket layer. Only things in queue.
2454 			 */
2455 			sstat->sstat_penddata = (stcb->asoc.cnt_on_reasm_queue +
2456 			    stcb->asoc.cnt_on_all_streams);
2457 
2458 
2459 			sstat->sstat_instrms = stcb->asoc.streamincnt;
2460 			sstat->sstat_outstrms = stcb->asoc.streamoutcnt;
2461 			sstat->sstat_fragmentation_point = sctp_get_frag_point(stcb, &stcb->asoc);
2462 			memcpy(&sstat->sstat_primary.spinfo_address,
2463 			    &stcb->asoc.primary_destination->ro._l_addr,
2464 			    ((struct sockaddr *)(&stcb->asoc.primary_destination->ro._l_addr))->sa_len);
2465 			net = stcb->asoc.primary_destination;
2466 			((struct sockaddr_in *)&sstat->sstat_primary.spinfo_address)->sin_port = stcb->rport;
2467 			/*
2468 			 * Again the user can get info from sctp_constants.h
2469 			 * for what the state of the network is.
2470 			 */
2471 			if (net->dest_state & SCTP_ADDR_UNCONFIRMED) {
2472 				/* It's unconfirmed */
2473 				sstat->sstat_primary.spinfo_state = SCTP_UNCONFIRMED;
2474 			} else if (net->dest_state & SCTP_ADDR_REACHABLE) {
2475 				/* It's active */
2476 				sstat->sstat_primary.spinfo_state = SCTP_ACTIVE;
2477 			} else {
2478 				/* It's inactive */
2479 				sstat->sstat_primary.spinfo_state = SCTP_INACTIVE;
2480 			}
2481 			sstat->sstat_primary.spinfo_cwnd = net->cwnd;
2482 			sstat->sstat_primary.spinfo_srtt = net->lastsa;
2483 			sstat->sstat_primary.spinfo_rto = net->RTO;
2484 			sstat->sstat_primary.spinfo_mtu = net->mtu;
2485 			sstat->sstat_primary.spinfo_assoc_id = sctp_get_associd(stcb);
2486 			SCTP_TCB_UNLOCK(stcb);
2487 			*optsize = sizeof(*sstat);
2488 		}
2489 		break;
2490 	case SCTP_RTOINFO:
2491 		{
2492 			struct sctp_rtoinfo *srto;
2493 
2494 			SCTP_CHECK_AND_CAST(srto, optval, struct sctp_rtoinfo, *optsize);
2495 			SCTP_FIND_STCB(inp, stcb, srto->srto_assoc_id);
2496 
2497 			if (stcb) {
2498 				srto->srto_initial = stcb->asoc.initial_rto;
2499 				srto->srto_max = stcb->asoc.maxrto;
2500 				srto->srto_min = stcb->asoc.minrto;
2501 				SCTP_TCB_UNLOCK(stcb);
2502 			} else {
2503 				SCTP_INP_RLOCK(inp);
2504 				srto->srto_initial = inp->sctp_ep.initial_rto;
2505 				srto->srto_max = inp->sctp_ep.sctp_maxrto;
2506 				srto->srto_min = inp->sctp_ep.sctp_minrto;
2507 				SCTP_INP_RUNLOCK(inp);
2508 			}
2509 			*optsize = sizeof(*srto);
2510 		}
2511 		break;
2512 	case SCTP_TIMEOUTS:
2513 		{
2514 			struct sctp_timeouts *stimo;
2515 
2516 			SCTP_CHECK_AND_CAST(stimo, optval, struct sctp_timeouts, *optsize);
2517 			SCTP_FIND_STCB(inp, stcb, stimo->stimo_assoc_id);
2518 
2519 			if (stcb) {
2520 				stimo->stimo_init = stcb->asoc.timoinit;
2521 				stimo->stimo_data = stcb->asoc.timodata;
2522 				stimo->stimo_sack = stcb->asoc.timosack;
2523 				stimo->stimo_shutdown = stcb->asoc.timoshutdown;
2524 				stimo->stimo_heartbeat = stcb->asoc.timoheartbeat;
2525 				stimo->stimo_cookie = stcb->asoc.timocookie;
2526 				stimo->stimo_shutdownack = stcb->asoc.timoshutdownack;
2527 				SCTP_TCB_UNLOCK(stcb);
2528 			} else {
2529 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
2530 				error = EINVAL;
2531 			}
2532 			*optsize = sizeof(*stimo);
2533 		}
2534 		break;
2535 	case SCTP_ASSOCINFO:
2536 		{
2537 			struct sctp_assocparams *sasoc;
2538 			uint32_t oldval;
2539 
2540 			SCTP_CHECK_AND_CAST(sasoc, optval, struct sctp_assocparams, *optsize);
2541 			SCTP_FIND_STCB(inp, stcb, sasoc->sasoc_assoc_id);
2542 
2543 			if (stcb) {
2544 				oldval = sasoc->sasoc_cookie_life;
2545 				sasoc->sasoc_cookie_life = TICKS_TO_MSEC(stcb->asoc.cookie_life);
2546 				sasoc->sasoc_asocmaxrxt = stcb->asoc.max_send_times;
2547 				sasoc->sasoc_number_peer_destinations = stcb->asoc.numnets;
2548 				sasoc->sasoc_peer_rwnd = stcb->asoc.peers_rwnd;
2549 				sasoc->sasoc_local_rwnd = stcb->asoc.my_rwnd;
2550 				SCTP_TCB_UNLOCK(stcb);
2551 			} else {
2552 				SCTP_INP_RLOCK(inp);
2553 				sasoc->sasoc_cookie_life = TICKS_TO_MSEC(inp->sctp_ep.def_cookie_life);
2554 				sasoc->sasoc_asocmaxrxt = inp->sctp_ep.max_send_times;
2555 				sasoc->sasoc_number_peer_destinations = 0;
2556 				sasoc->sasoc_peer_rwnd = 0;
2557 				sasoc->sasoc_local_rwnd = sbspace(&inp->sctp_socket->so_rcv);
2558 				SCTP_INP_RUNLOCK(inp);
2559 			}
2560 			*optsize = sizeof(*sasoc);
2561 		}
2562 		break;
2563 	case SCTP_DEFAULT_SEND_PARAM:
2564 		{
2565 			struct sctp_sndrcvinfo *s_info;
2566 
2567 			SCTP_CHECK_AND_CAST(s_info, optval, struct sctp_sndrcvinfo, *optsize);
2568 			SCTP_FIND_STCB(inp, stcb, s_info->sinfo_assoc_id);
2569 
2570 			if (stcb) {
2571 				memcpy(s_info, &stcb->asoc.def_send, sizeof(stcb->asoc.def_send));
2572 				SCTP_TCB_UNLOCK(stcb);
2573 			} else {
2574 				SCTP_INP_RLOCK(inp);
2575 				memcpy(s_info, &inp->def_send, sizeof(inp->def_send));
2576 				SCTP_INP_RUNLOCK(inp);
2577 			}
2578 			*optsize = sizeof(*s_info);
2579 		}
2580 		break;
2581 	case SCTP_INITMSG:
2582 		{
2583 			struct sctp_initmsg *sinit;
2584 
2585 			SCTP_CHECK_AND_CAST(sinit, optval, struct sctp_initmsg, *optsize);
2586 			SCTP_INP_RLOCK(inp);
2587 			sinit->sinit_num_ostreams = inp->sctp_ep.pre_open_stream_count;
2588 			sinit->sinit_max_instreams = inp->sctp_ep.max_open_streams_intome;
2589 			sinit->sinit_max_attempts = inp->sctp_ep.max_init_times;
2590 			sinit->sinit_max_init_timeo = inp->sctp_ep.initial_init_rto_max;
2591 			SCTP_INP_RUNLOCK(inp);
2592 			*optsize = sizeof(*sinit);
2593 		}
2594 		break;
2595 	case SCTP_PRIMARY_ADDR:
2596 		/* we allow a "get" operation on this */
2597 		{
2598 			struct sctp_setprim *ssp;
2599 
2600 			SCTP_CHECK_AND_CAST(ssp, optval, struct sctp_setprim, *optsize);
2601 			SCTP_FIND_STCB(inp, stcb, ssp->ssp_assoc_id);
2602 
2603 			if (stcb) {
2604 				/* simply copy out the sockaddr_storage... */
2605 				int len;
2606 
2607 				len = *optsize;
2608 				if (len > stcb->asoc.primary_destination->ro._l_addr.sa.sa_len)
2609 					len = stcb->asoc.primary_destination->ro._l_addr.sa.sa_len;
2610 
2611 				memcpy(&ssp->ssp_addr,
2612 				    &stcb->asoc.primary_destination->ro._l_addr,
2613 				    len);
2614 				SCTP_TCB_UNLOCK(stcb);
2615 			} else {
2616 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
2617 				error = EINVAL;
2618 			}
2619 			*optsize = sizeof(*ssp);
2620 		}
2621 		break;
2622 
2623 	case SCTP_HMAC_IDENT:
2624 		{
2625 			struct sctp_hmacalgo *shmac;
2626 			sctp_hmaclist_t *hmaclist;
2627 			uint32_t size;
2628 			int i;
2629 
2630 			SCTP_CHECK_AND_CAST(shmac, optval, struct sctp_hmacalgo, *optsize);
2631 
2632 			SCTP_INP_RLOCK(inp);
2633 			hmaclist = inp->sctp_ep.local_hmacs;
2634 			if (hmaclist == NULL) {
2635 				/* no HMACs to return */
2636 				*optsize = sizeof(*shmac);
2637 				SCTP_INP_RUNLOCK(inp);
2638 				break;
2639 			}
2640 			/* is there room for all of the hmac ids? */
2641 			size = sizeof(*shmac) + (hmaclist->num_algo *
2642 			    sizeof(shmac->shmac_idents[0]));
2643 			if ((size_t)(*optsize) < size) {
2644 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
2645 				error = EINVAL;
2646 				SCTP_INP_RUNLOCK(inp);
2647 				break;
2648 			}
2649 			/* copy in the list */
2650 			shmac->shmac_number_of_idents = hmaclist->num_algo;
2651 			for (i = 0; i < hmaclist->num_algo; i++) {
2652 				shmac->shmac_idents[i] = hmaclist->hmac[i];
2653 			}
2654 			SCTP_INP_RUNLOCK(inp);
2655 			*optsize = size;
2656 			break;
2657 		}
2658 	case SCTP_AUTH_ACTIVE_KEY:
2659 		{
2660 			struct sctp_authkeyid *scact;
2661 
2662 			SCTP_CHECK_AND_CAST(scact, optval, struct sctp_authkeyid, *optsize);
2663 			SCTP_FIND_STCB(inp, stcb, scact->scact_assoc_id);
2664 
2665 			if (stcb) {
2666 				/* get the active key on the assoc */
2667 				scact->scact_keynumber = stcb->asoc.authinfo.active_keyid;
2668 				SCTP_TCB_UNLOCK(stcb);
2669 			} else {
2670 				/* get the endpoint active key */
2671 				SCTP_INP_RLOCK(inp);
2672 				scact->scact_keynumber = inp->sctp_ep.default_keyid;
2673 				SCTP_INP_RUNLOCK(inp);
2674 			}
2675 			*optsize = sizeof(*scact);
2676 			break;
2677 		}
2678 	case SCTP_LOCAL_AUTH_CHUNKS:
2679 		{
2680 			struct sctp_authchunks *sac;
2681 			sctp_auth_chklist_t *chklist = NULL;
2682 			size_t size = 0;
2683 
2684 			SCTP_CHECK_AND_CAST(sac, optval, struct sctp_authchunks, *optsize);
2685 			SCTP_FIND_STCB(inp, stcb, sac->gauth_assoc_id);
2686 
2687 			if (stcb) {
2688 				/* get off the assoc */
2689 				chklist = stcb->asoc.local_auth_chunks;
2690 				/* is there enough space? */
2691 				size = sctp_auth_get_chklist_size(chklist);
2692 				if (*optsize < (sizeof(struct sctp_authchunks) + size)) {
2693 					error = EINVAL;
2694 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
2695 				} else {
2696 					/* copy in the chunks */
2697 					(void)sctp_serialize_auth_chunks(chklist, sac->gauth_chunks);
2698 				}
2699 				SCTP_TCB_UNLOCK(stcb);
2700 			} else {
2701 				/* get off the endpoint */
2702 				SCTP_INP_RLOCK(inp);
2703 				chklist = inp->sctp_ep.local_auth_chunks;
2704 				/* is there enough space? */
2705 				size = sctp_auth_get_chklist_size(chklist);
2706 				if (*optsize < (sizeof(struct sctp_authchunks) + size)) {
2707 					error = EINVAL;
2708 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
2709 				} else {
2710 					/* copy in the chunks */
2711 					(void)sctp_serialize_auth_chunks(chklist, sac->gauth_chunks);
2712 				}
2713 				SCTP_INP_RUNLOCK(inp);
2714 			}
2715 			*optsize = sizeof(struct sctp_authchunks) + size;
2716 			break;
2717 		}
2718 	case SCTP_PEER_AUTH_CHUNKS:
2719 		{
2720 			struct sctp_authchunks *sac;
2721 			sctp_auth_chklist_t *chklist = NULL;
2722 			size_t size = 0;
2723 
2724 			SCTP_CHECK_AND_CAST(sac, optval, struct sctp_authchunks, *optsize);
2725 			SCTP_FIND_STCB(inp, stcb, sac->gauth_assoc_id);
2726 
2727 			if (stcb) {
2728 				/* get off the assoc */
2729 				chklist = stcb->asoc.peer_auth_chunks;
2730 				/* is there enough space? */
2731 				size = sctp_auth_get_chklist_size(chklist);
2732 				if (*optsize < (sizeof(struct sctp_authchunks) + size)) {
2733 					error = EINVAL;
2734 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
2735 				} else {
2736 					/* copy in the chunks */
2737 					(void)sctp_serialize_auth_chunks(chklist, sac->gauth_chunks);
2738 				}
2739 				SCTP_TCB_UNLOCK(stcb);
2740 			} else {
2741 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT);
2742 				error = ENOENT;
2743 			}
2744 			*optsize = sizeof(struct sctp_authchunks) + size;
2745 			break;
2746 		}
2747 
2748 
2749 	default:
2750 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT);
2751 		error = ENOPROTOOPT;
2752 		*optsize = 0;
2753 		break;
2754 	}			/* end switch (sopt->sopt_name) */
2755 	return (error);
2756 }
2757 
2758 static int
2759 sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize,
2760     void *p)
2761 {
2762 	int error, set_opt;
2763 	uint32_t *mopt;
2764 	struct sctp_tcb *stcb = NULL;
2765 	struct sctp_inpcb *inp = NULL;
2766 	uint32_t vrf_id;
2767 
2768 	if (optval == NULL) {
2769 		SCTP_PRINTF("optval is NULL\n");
2770 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
2771 		return (EINVAL);
2772 	}
2773 	inp = (struct sctp_inpcb *)so->so_pcb;
2774 	if (inp == 0) {
2775 		SCTP_PRINTF("inp is NULL?\n");
2776 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
2777 		return EINVAL;
2778 	}
2779 	vrf_id = inp->def_vrf_id;
2780 
2781 	error = 0;
2782 	switch (optname) {
2783 	case SCTP_NODELAY:
2784 	case SCTP_AUTOCLOSE:
2785 	case SCTP_AUTO_ASCONF:
2786 	case SCTP_EXPLICIT_EOR:
2787 	case SCTP_DISABLE_FRAGMENTS:
2788 	case SCTP_USE_EXT_RCVINFO:
2789 	case SCTP_I_WANT_MAPPED_V4_ADDR:
2790 		/* copy in the option value */
2791 		SCTP_CHECK_AND_CAST(mopt, optval, uint32_t, optsize);
2792 		set_opt = 0;
2793 		if (error)
2794 			break;
2795 		switch (optname) {
2796 		case SCTP_DISABLE_FRAGMENTS:
2797 			set_opt = SCTP_PCB_FLAGS_NO_FRAGMENT;
2798 			break;
2799 		case SCTP_AUTO_ASCONF:
2800 			/*
2801 			 * NOTE: we don't really support this flag
2802 			 */
2803 			if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
2804 				/* only valid for bound all sockets */
2805 				set_opt = SCTP_PCB_FLAGS_AUTO_ASCONF;
2806 			} else {
2807 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
2808 				return (EINVAL);
2809 			}
2810 			break;
2811 		case SCTP_EXPLICIT_EOR:
2812 			set_opt = SCTP_PCB_FLAGS_EXPLICIT_EOR;
2813 			break;
2814 		case SCTP_USE_EXT_RCVINFO:
2815 			set_opt = SCTP_PCB_FLAGS_EXT_RCVINFO;
2816 			break;
2817 		case SCTP_I_WANT_MAPPED_V4_ADDR:
2818 			if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2819 				set_opt = SCTP_PCB_FLAGS_NEEDS_MAPPED_V4;
2820 			} else {
2821 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
2822 				return (EINVAL);
2823 			}
2824 			break;
2825 		case SCTP_NODELAY:
2826 			set_opt = SCTP_PCB_FLAGS_NODELAY;
2827 			break;
2828 		case SCTP_AUTOCLOSE:
2829 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2830 			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
2831 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
2832 				return (EINVAL);
2833 			}
2834 			set_opt = SCTP_PCB_FLAGS_AUTOCLOSE;
2835 			/*
2836 			 * The value is in ticks. Note this does not effect
2837 			 * old associations, only new ones.
2838 			 */
2839 			inp->sctp_ep.auto_close_time = SEC_TO_TICKS(*mopt);
2840 			break;
2841 		}
2842 		SCTP_INP_WLOCK(inp);
2843 		if (*mopt != 0) {
2844 			sctp_feature_on(inp, set_opt);
2845 		} else {
2846 			sctp_feature_off(inp, set_opt);
2847 		}
2848 		SCTP_INP_WUNLOCK(inp);
2849 		break;
2850 	case SCTP_REUSE_PORT:
2851 		{
2852 			SCTP_CHECK_AND_CAST(mopt, optval, uint32_t, optsize);
2853 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) == 0) {
2854 				/* Can't set it after we are bound */
2855 				error = EINVAL;
2856 				break;
2857 			}
2858 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE)) {
2859 				/* Can't do this for a 1-m socket */
2860 				error = EINVAL;
2861 				break;
2862 			}
2863 			if (optval)
2864 				sctp_feature_on(inp, SCTP_PCB_FLAGS_PORTREUSE);
2865 			else
2866 				sctp_feature_off(inp, SCTP_PCB_FLAGS_PORTREUSE);
2867 		}
2868 		break;
2869 	case SCTP_PARTIAL_DELIVERY_POINT:
2870 		{
2871 			uint32_t *value;
2872 
2873 			SCTP_CHECK_AND_CAST(value, optval, uint32_t, optsize);
2874 			if (*value > SCTP_SB_LIMIT_RCV(so)) {
2875 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
2876 				error = EINVAL;
2877 				break;
2878 			}
2879 			inp->partial_delivery_point = *value;
2880 		}
2881 		break;
2882 	case SCTP_FRAGMENT_INTERLEAVE:
2883 		/* not yet until we re-write sctp_recvmsg() */
2884 		{
2885 			uint32_t *level;
2886 
2887 			SCTP_CHECK_AND_CAST(level, optval, uint32_t, optsize);
2888 			if (*level == SCTP_FRAG_LEVEL_2) {
2889 				sctp_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE);
2890 				sctp_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS);
2891 			} else if (*level == SCTP_FRAG_LEVEL_1) {
2892 				sctp_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE);
2893 				sctp_feature_off(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS);
2894 			} else if (*level == SCTP_FRAG_LEVEL_0) {
2895 				sctp_feature_off(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE);
2896 				sctp_feature_off(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS);
2897 
2898 			} else {
2899 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
2900 				error = EINVAL;
2901 			}
2902 		}
2903 		break;
2904 	case SCTP_CMT_ON_OFF:
2905 		if (SCTP_BASE_SYSCTL(sctp_cmt_on_off)) {
2906 			struct sctp_assoc_value *av;
2907 
2908 			SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize);
2909 			SCTP_FIND_STCB(inp, stcb, av->assoc_id);
2910 			if (stcb) {
2911 				stcb->asoc.sctp_cmt_on_off = av->assoc_value;
2912 				if (stcb->asoc.sctp_cmt_on_off > 2) {
2913 					stcb->asoc.sctp_cmt_on_off = 2;
2914 				}
2915 				SCTP_TCB_UNLOCK(stcb);
2916 			} else {
2917 				SCTP_INP_WLOCK(inp);
2918 				inp->sctp_cmt_on_off = av->assoc_value;
2919 				if (inp->sctp_cmt_on_off > 2) {
2920 					inp->sctp_cmt_on_off = 2;
2921 				}
2922 				SCTP_INP_WUNLOCK(inp);
2923 			}
2924 		} else {
2925 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT);
2926 			error = ENOPROTOOPT;
2927 		}
2928 		break;
2929 		/* JRS - Set socket option for pluggable congestion control */
2930 	case SCTP_PLUGGABLE_CC:
2931 		{
2932 			struct sctp_assoc_value *av;
2933 
2934 			SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize);
2935 			SCTP_FIND_STCB(inp, stcb, av->assoc_id);
2936 			if (stcb) {
2937 				switch (av->assoc_value) {
2938 				case SCTP_CC_RFC2581:
2939 				case SCTP_CC_HSTCP:
2940 				case SCTP_CC_HTCP:
2941 					stcb->asoc.cc_functions = sctp_cc_functions[av->assoc_value];
2942 					stcb->asoc.congestion_control_module = av->assoc_value;
2943 					break;
2944 				default:
2945 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
2946 					error = EINVAL;
2947 					break;
2948 				}
2949 				SCTP_TCB_UNLOCK(stcb);
2950 			} else {
2951 				switch (av->assoc_value) {
2952 				case SCTP_CC_RFC2581:
2953 				case SCTP_CC_HSTCP:
2954 				case SCTP_CC_HTCP:
2955 					SCTP_INP_WLOCK(inp);
2956 					inp->sctp_ep.sctp_default_cc_module = av->assoc_value;
2957 					SCTP_INP_WUNLOCK(inp);
2958 					break;
2959 				default:
2960 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
2961 					error = EINVAL;
2962 					break;
2963 				}
2964 			}
2965 		}
2966 		break;
2967 		/* RS - Set socket option for pluggable stream scheduling */
2968 	case SCTP_PLUGGABLE_SS:
2969 		{
2970 			struct sctp_assoc_value *av;
2971 
2972 			SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize);
2973 			SCTP_FIND_STCB(inp, stcb, av->assoc_id);
2974 			if (stcb) {
2975 				switch (av->assoc_value) {
2976 				case SCTP_SS_DEFAULT:
2977 				case SCTP_SS_ROUND_ROBIN:
2978 				case SCTP_SS_ROUND_ROBIN_PACKET:
2979 				case SCTP_SS_PRIORITY:
2980 				case SCTP_SS_FAIR_BANDWITH:
2981 				case SCTP_SS_FIRST_COME:
2982 					stcb->asoc.ss_functions.sctp_ss_clear(stcb, &stcb->asoc, 1, 1);
2983 					stcb->asoc.ss_functions = sctp_ss_functions[av->assoc_value];
2984 					stcb->asoc.stream_scheduling_module = av->assoc_value;
2985 					stcb->asoc.ss_functions.sctp_ss_init(stcb, &stcb->asoc, 1);
2986 					break;
2987 				default:
2988 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
2989 					error = EINVAL;
2990 					break;
2991 				}
2992 				SCTP_TCB_UNLOCK(stcb);
2993 			} else {
2994 				switch (av->assoc_value) {
2995 				case SCTP_SS_DEFAULT:
2996 				case SCTP_SS_ROUND_ROBIN:
2997 				case SCTP_SS_ROUND_ROBIN_PACKET:
2998 				case SCTP_SS_PRIORITY:
2999 				case SCTP_SS_FAIR_BANDWITH:
3000 				case SCTP_SS_FIRST_COME:
3001 					SCTP_INP_WLOCK(inp);
3002 					inp->sctp_ep.sctp_default_ss_module = av->assoc_value;
3003 					SCTP_INP_WUNLOCK(inp);
3004 					break;
3005 				default:
3006 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3007 					error = EINVAL;
3008 					break;
3009 				}
3010 			}
3011 		}
3012 		break;
3013 	case SCTP_SS_VALUE:
3014 		{
3015 			struct sctp_stream_value *av;
3016 
3017 			SCTP_CHECK_AND_CAST(av, optval, struct sctp_stream_value, optsize);
3018 			SCTP_FIND_STCB(inp, stcb, av->assoc_id);
3019 			if (stcb) {
3020 				if (stcb->asoc.ss_functions.sctp_ss_set_value(stcb, &stcb->asoc, &stcb->asoc.strmout[av->stream_id],
3021 				    av->stream_value) < 0) {
3022 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3023 					error = EINVAL;
3024 				}
3025 				SCTP_TCB_UNLOCK(stcb);
3026 			} else {
3027 				/*
3028 				 * Can't set stream value without
3029 				 * association
3030 				 */
3031 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3032 				error = EINVAL;
3033 			}
3034 		}
3035 		break;
3036 	case SCTP_CLR_STAT_LOG:
3037 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP);
3038 		error = EOPNOTSUPP;
3039 		break;
3040 	case SCTP_CONTEXT:
3041 		{
3042 			struct sctp_assoc_value *av;
3043 
3044 			SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize);
3045 			SCTP_FIND_STCB(inp, stcb, av->assoc_id);
3046 
3047 			if (stcb) {
3048 				stcb->asoc.context = av->assoc_value;
3049 				SCTP_TCB_UNLOCK(stcb);
3050 			} else {
3051 				SCTP_INP_WLOCK(inp);
3052 				inp->sctp_context = av->assoc_value;
3053 				SCTP_INP_WUNLOCK(inp);
3054 			}
3055 		}
3056 		break;
3057 	case SCTP_VRF_ID:
3058 		{
3059 			uint32_t *default_vrfid;
3060 
3061 			SCTP_CHECK_AND_CAST(default_vrfid, optval, uint32_t, optsize);
3062 			if (*default_vrfid > SCTP_MAX_VRF_ID) {
3063 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3064 				error = EINVAL;
3065 				break;
3066 			}
3067 			inp->def_vrf_id = *default_vrfid;
3068 			break;
3069 		}
3070 	case SCTP_DEL_VRF_ID:
3071 		{
3072 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP);
3073 			error = EOPNOTSUPP;
3074 			break;
3075 		}
3076 	case SCTP_ADD_VRF_ID:
3077 		{
3078 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP);
3079 			error = EOPNOTSUPP;
3080 			break;
3081 		}
3082 	case SCTP_DELAYED_SACK:
3083 		{
3084 			struct sctp_sack_info *sack;
3085 
3086 			SCTP_CHECK_AND_CAST(sack, optval, struct sctp_sack_info, optsize);
3087 			SCTP_FIND_STCB(inp, stcb, sack->sack_assoc_id);
3088 			if (sack->sack_delay) {
3089 				if (sack->sack_delay > SCTP_MAX_SACK_DELAY)
3090 					sack->sack_delay = SCTP_MAX_SACK_DELAY;
3091 			}
3092 			if (stcb) {
3093 				if (sack->sack_delay) {
3094 					if (MSEC_TO_TICKS(sack->sack_delay) < 1) {
3095 						sack->sack_delay = TICKS_TO_MSEC(1);
3096 					}
3097 					stcb->asoc.delayed_ack = sack->sack_delay;
3098 				}
3099 				if (sack->sack_freq) {
3100 					stcb->asoc.sack_freq = sack->sack_freq;
3101 				}
3102 				SCTP_TCB_UNLOCK(stcb);
3103 			} else {
3104 				SCTP_INP_WLOCK(inp);
3105 				if (sack->sack_delay) {
3106 					if (MSEC_TO_TICKS(sack->sack_delay) < 1) {
3107 						sack->sack_delay = TICKS_TO_MSEC(1);
3108 					}
3109 					inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV] = MSEC_TO_TICKS(sack->sack_delay);
3110 				}
3111 				if (sack->sack_freq) {
3112 					inp->sctp_ep.sctp_sack_freq = sack->sack_freq;
3113 				}
3114 				SCTP_INP_WUNLOCK(inp);
3115 			}
3116 			break;
3117 		}
3118 	case SCTP_AUTH_CHUNK:
3119 		{
3120 			struct sctp_authchunk *sauth;
3121 
3122 			SCTP_CHECK_AND_CAST(sauth, optval, struct sctp_authchunk, optsize);
3123 
3124 			SCTP_INP_WLOCK(inp);
3125 			if (sctp_auth_add_chunk(sauth->sauth_chunk, inp->sctp_ep.local_auth_chunks)) {
3126 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3127 				error = EINVAL;
3128 			}
3129 			SCTP_INP_WUNLOCK(inp);
3130 			break;
3131 		}
3132 	case SCTP_AUTH_KEY:
3133 		{
3134 			struct sctp_authkey *sca;
3135 			struct sctp_keyhead *shared_keys;
3136 			sctp_sharedkey_t *shared_key;
3137 			sctp_key_t *key = NULL;
3138 			size_t size;
3139 
3140 			SCTP_CHECK_AND_CAST(sca, optval, struct sctp_authkey, optsize);
3141 			SCTP_FIND_STCB(inp, stcb, sca->sca_assoc_id);
3142 			size = optsize - sizeof(*sca);
3143 
3144 			if (stcb) {
3145 				/* set it on the assoc */
3146 				shared_keys = &stcb->asoc.shared_keys;
3147 				/* clear the cached keys for this key id */
3148 				sctp_clear_cachedkeys(stcb, sca->sca_keynumber);
3149 				/*
3150 				 * create the new shared key and
3151 				 * insert/replace it
3152 				 */
3153 				if (size > 0) {
3154 					key = sctp_set_key(sca->sca_key, (uint32_t) size);
3155 					if (key == NULL) {
3156 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM);
3157 						error = ENOMEM;
3158 						SCTP_TCB_UNLOCK(stcb);
3159 						break;
3160 					}
3161 				}
3162 				shared_key = sctp_alloc_sharedkey();
3163 				if (shared_key == NULL) {
3164 					sctp_free_key(key);
3165 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM);
3166 					error = ENOMEM;
3167 					SCTP_TCB_UNLOCK(stcb);
3168 					break;
3169 				}
3170 				shared_key->key = key;
3171 				shared_key->keyid = sca->sca_keynumber;
3172 				error = sctp_insert_sharedkey(shared_keys, shared_key);
3173 				SCTP_TCB_UNLOCK(stcb);
3174 			} else {
3175 				/* set it on the endpoint */
3176 				SCTP_INP_WLOCK(inp);
3177 				shared_keys = &inp->sctp_ep.shared_keys;
3178 				/*
3179 				 * clear the cached keys on all assocs for
3180 				 * this key id
3181 				 */
3182 				sctp_clear_cachedkeys_ep(inp, sca->sca_keynumber);
3183 				/*
3184 				 * create the new shared key and
3185 				 * insert/replace it
3186 				 */
3187 				if (size > 0) {
3188 					key = sctp_set_key(sca->sca_key, (uint32_t) size);
3189 					if (key == NULL) {
3190 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM);
3191 						error = ENOMEM;
3192 						SCTP_INP_WUNLOCK(inp);
3193 						break;
3194 					}
3195 				}
3196 				shared_key = sctp_alloc_sharedkey();
3197 				if (shared_key == NULL) {
3198 					sctp_free_key(key);
3199 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM);
3200 					error = ENOMEM;
3201 					SCTP_INP_WUNLOCK(inp);
3202 					break;
3203 				}
3204 				shared_key->key = key;
3205 				shared_key->keyid = sca->sca_keynumber;
3206 				error = sctp_insert_sharedkey(shared_keys, shared_key);
3207 				SCTP_INP_WUNLOCK(inp);
3208 			}
3209 			break;
3210 		}
3211 	case SCTP_HMAC_IDENT:
3212 		{
3213 			struct sctp_hmacalgo *shmac;
3214 			sctp_hmaclist_t *hmaclist;
3215 			uint16_t hmacid;
3216 			uint32_t i;
3217 
3218 			size_t found;
3219 
3220 			SCTP_CHECK_AND_CAST(shmac, optval, struct sctp_hmacalgo, optsize);
3221 			if (optsize < sizeof(struct sctp_hmacalgo) + shmac->shmac_number_of_idents * sizeof(uint16_t)) {
3222 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3223 				error = EINVAL;
3224 				break;
3225 			}
3226 			hmaclist = sctp_alloc_hmaclist(shmac->shmac_number_of_idents);
3227 			if (hmaclist == NULL) {
3228 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM);
3229 				error = ENOMEM;
3230 				break;
3231 			}
3232 			for (i = 0; i < shmac->shmac_number_of_idents; i++) {
3233 				hmacid = shmac->shmac_idents[i];
3234 				if (sctp_auth_add_hmacid(hmaclist, hmacid)) {
3235 					 /* invalid HMACs were found */ ;
3236 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3237 					error = EINVAL;
3238 					sctp_free_hmaclist(hmaclist);
3239 					goto sctp_set_hmac_done;
3240 				}
3241 			}
3242 			found = 0;
3243 			for (i = 0; i < hmaclist->num_algo; i++) {
3244 				if (hmaclist->hmac[i] == SCTP_AUTH_HMAC_ID_SHA1) {
3245 					/* already in list */
3246 					found = 1;
3247 				}
3248 			}
3249 			if (!found) {
3250 				sctp_free_hmaclist(hmaclist);
3251 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3252 				error = EINVAL;
3253 				break;
3254 			}
3255 			/* set it on the endpoint */
3256 			SCTP_INP_WLOCK(inp);
3257 			if (inp->sctp_ep.local_hmacs)
3258 				sctp_free_hmaclist(inp->sctp_ep.local_hmacs);
3259 			inp->sctp_ep.local_hmacs = hmaclist;
3260 			SCTP_INP_WUNLOCK(inp);
3261 	sctp_set_hmac_done:
3262 			break;
3263 		}
3264 	case SCTP_AUTH_ACTIVE_KEY:
3265 		{
3266 			struct sctp_authkeyid *scact;
3267 
3268 			SCTP_CHECK_AND_CAST(scact, optval, struct sctp_authkeyid,
3269 			    optsize);
3270 			SCTP_FIND_STCB(inp, stcb, scact->scact_assoc_id);
3271 
3272 			/* set the active key on the right place */
3273 			if (stcb) {
3274 				/* set the active key on the assoc */
3275 				if (sctp_auth_setactivekey(stcb,
3276 				    scact->scact_keynumber)) {
3277 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL,
3278 					    SCTP_FROM_SCTP_USRREQ,
3279 					    EINVAL);
3280 					error = EINVAL;
3281 				}
3282 				SCTP_TCB_UNLOCK(stcb);
3283 			} else {
3284 				/* set the active key on the endpoint */
3285 				SCTP_INP_WLOCK(inp);
3286 				if (sctp_auth_setactivekey_ep(inp,
3287 				    scact->scact_keynumber)) {
3288 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL,
3289 					    SCTP_FROM_SCTP_USRREQ,
3290 					    EINVAL);
3291 					error = EINVAL;
3292 				}
3293 				SCTP_INP_WUNLOCK(inp);
3294 			}
3295 			break;
3296 		}
3297 	case SCTP_AUTH_DELETE_KEY:
3298 		{
3299 			struct sctp_authkeyid *scdel;
3300 
3301 			SCTP_CHECK_AND_CAST(scdel, optval, struct sctp_authkeyid,
3302 			    optsize);
3303 			SCTP_FIND_STCB(inp, stcb, scdel->scact_assoc_id);
3304 
3305 			/* delete the key from the right place */
3306 			if (stcb) {
3307 				if (sctp_delete_sharedkey(stcb,
3308 				    scdel->scact_keynumber)) {
3309 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL,
3310 					    SCTP_FROM_SCTP_USRREQ,
3311 					    EINVAL);
3312 					error = EINVAL;
3313 				}
3314 				SCTP_TCB_UNLOCK(stcb);
3315 			} else {
3316 				SCTP_INP_WLOCK(inp);
3317 				if (sctp_delete_sharedkey_ep(inp,
3318 				    scdel->scact_keynumber)) {
3319 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL,
3320 					    SCTP_FROM_SCTP_USRREQ,
3321 					    EINVAL);
3322 					error = EINVAL;
3323 				}
3324 				SCTP_INP_WUNLOCK(inp);
3325 			}
3326 			break;
3327 		}
3328 	case SCTP_AUTH_DEACTIVATE_KEY:
3329 		{
3330 			struct sctp_authkeyid *keyid;
3331 
3332 			SCTP_CHECK_AND_CAST(keyid, optval, struct sctp_authkeyid,
3333 			    optsize);
3334 			SCTP_FIND_STCB(inp, stcb, keyid->scact_assoc_id);
3335 
3336 			/* deactivate the key from the right place */
3337 			if (stcb) {
3338 				if (sctp_deact_sharedkey(stcb,
3339 				    keyid->scact_keynumber)) {
3340 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL,
3341 					    SCTP_FROM_SCTP_USRREQ,
3342 					    EINVAL);
3343 					error = EINVAL;
3344 				}
3345 				SCTP_TCB_UNLOCK(stcb);
3346 			} else {
3347 				SCTP_INP_WLOCK(inp);
3348 				if (sctp_deact_sharedkey_ep(inp,
3349 				    keyid->scact_keynumber)) {
3350 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL,
3351 					    SCTP_FROM_SCTP_USRREQ,
3352 					    EINVAL);
3353 					error = EINVAL;
3354 				}
3355 				SCTP_INP_WUNLOCK(inp);
3356 			}
3357 			break;
3358 		}
3359 
3360 	case SCTP_RESET_STREAMS:
3361 		{
3362 			struct sctp_stream_reset *strrst;
3363 			uint8_t send_in = 0, send_tsn = 0, send_out = 0,
3364 			        addstream = 0;
3365 			uint16_t addstrmcnt = 0;
3366 			int i;
3367 
3368 			SCTP_CHECK_AND_CAST(strrst, optval, struct sctp_stream_reset, optsize);
3369 			SCTP_FIND_STCB(inp, stcb, strrst->strrst_assoc_id);
3370 
3371 			if (stcb == NULL) {
3372 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT);
3373 				error = ENOENT;
3374 				break;
3375 			}
3376 			if (stcb->asoc.peer_supports_strreset == 0) {
3377 				/*
3378 				 * Peer does not support it, we return
3379 				 * protocol not supported since this is true
3380 				 * for this feature and this peer, not the
3381 				 * socket request in general.
3382 				 */
3383 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EPROTONOSUPPORT);
3384 				error = EPROTONOSUPPORT;
3385 				SCTP_TCB_UNLOCK(stcb);
3386 				break;
3387 			}
3388 			if (stcb->asoc.stream_reset_outstanding) {
3389 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY);
3390 				error = EALREADY;
3391 				SCTP_TCB_UNLOCK(stcb);
3392 				break;
3393 			}
3394 			if (strrst->strrst_flags == SCTP_RESET_LOCAL_RECV) {
3395 				send_in = 1;
3396 			} else if (strrst->strrst_flags == SCTP_RESET_LOCAL_SEND) {
3397 				send_out = 1;
3398 			} else if (strrst->strrst_flags == SCTP_RESET_BOTH) {
3399 				send_in = 1;
3400 				send_out = 1;
3401 			} else if (strrst->strrst_flags == SCTP_RESET_TSN) {
3402 				send_tsn = 1;
3403 			} else if (strrst->strrst_flags == SCTP_RESET_ADD_STREAMS) {
3404 				if (send_tsn ||
3405 				    send_in ||
3406 				    send_out) {
3407 					/* We can't do that and add streams */
3408 					error = EINVAL;
3409 					goto skip_stuff;
3410 				}
3411 				if (stcb->asoc.stream_reset_outstanding) {
3412 					error = EBUSY;
3413 					goto skip_stuff;
3414 				}
3415 				addstream = 1;
3416 				/* We allocate here */
3417 				addstrmcnt = strrst->strrst_num_streams;
3418 				if ((int)(addstrmcnt + stcb->asoc.streamoutcnt) > 0xffff) {
3419 					/* You can't have more than 64k */
3420 					error = EINVAL;
3421 					goto skip_stuff;
3422 				}
3423 				if ((stcb->asoc.strm_realoutsize - stcb->asoc.streamoutcnt) < addstrmcnt) {
3424 					/* Need to allocate more */
3425 					struct sctp_stream_out *oldstream;
3426 					struct sctp_stream_queue_pending *sp,
3427 					                         *nsp;
3428 
3429 					oldstream = stcb->asoc.strmout;
3430 					/* get some more */
3431 					SCTP_MALLOC(stcb->asoc.strmout, struct sctp_stream_out *,
3432 					    ((stcb->asoc.streamoutcnt + addstrmcnt) * sizeof(struct sctp_stream_out)),
3433 					    SCTP_M_STRMO);
3434 					if (stcb->asoc.strmout == NULL) {
3435 						stcb->asoc.strmout = oldstream;
3436 						error = ENOMEM;
3437 						goto skip_stuff;
3438 					}
3439 					/*
3440 					 * Ok now we proceed with copying
3441 					 * the old out stuff and
3442 					 * initializing the new stuff.
3443 					 */
3444 					SCTP_TCB_SEND_LOCK(stcb);
3445 					stcb->asoc.ss_functions.sctp_ss_clear(stcb, &stcb->asoc, 0, 1);
3446 					for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
3447 						TAILQ_INIT(&stcb->asoc.strmout[i].outqueue);
3448 						stcb->asoc.strmout[i].next_sequence_sent = oldstream[i].next_sequence_sent;
3449 						stcb->asoc.strmout[i].last_msg_incomplete = oldstream[i].last_msg_incomplete;
3450 						stcb->asoc.strmout[i].stream_no = i;
3451 						stcb->asoc.ss_functions.sctp_ss_init_stream(&oldstream[i]);
3452 						/*
3453 						 * now anything on those
3454 						 * queues?
3455 						 */
3456 						TAILQ_FOREACH_SAFE(sp, &oldstream[i].outqueue, next, nsp) {
3457 							TAILQ_REMOVE(&oldstream[i].outqueue, sp, next);
3458 							TAILQ_INSERT_TAIL(&stcb->asoc.strmout[i].outqueue, sp, next);
3459 						}
3460 						/*
3461 						 * Now move assoc pointers
3462 						 * too
3463 						 */
3464 						if (stcb->asoc.last_out_stream == &oldstream[i]) {
3465 							stcb->asoc.last_out_stream = &stcb->asoc.strmout[i];
3466 						}
3467 						if (stcb->asoc.locked_on_sending == &oldstream[i]) {
3468 							stcb->asoc.locked_on_sending = &stcb->asoc.strmout[i];
3469 						}
3470 					}
3471 					/* now the new streams */
3472 					stcb->asoc.ss_functions.sctp_ss_init(stcb, &stcb->asoc, 1);
3473 					for (i = stcb->asoc.streamoutcnt; i < (stcb->asoc.streamoutcnt + addstrmcnt); i++) {
3474 						stcb->asoc.strmout[i].next_sequence_sent = 0x0;
3475 						TAILQ_INIT(&stcb->asoc.strmout[i].outqueue);
3476 						stcb->asoc.strmout[i].stream_no = i;
3477 						stcb->asoc.strmout[i].last_msg_incomplete = 0;
3478 						stcb->asoc.ss_functions.sctp_ss_init_stream(&stcb->asoc.strmout[i]);
3479 					}
3480 					stcb->asoc.strm_realoutsize = stcb->asoc.streamoutcnt + addstrmcnt;
3481 					SCTP_FREE(oldstream, SCTP_M_STRMO);
3482 				}
3483 				SCTP_TCB_SEND_UNLOCK(stcb);
3484 				goto skip_stuff;
3485 			} else {
3486 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3487 				error = EINVAL;
3488 				SCTP_TCB_UNLOCK(stcb);
3489 				break;
3490 			}
3491 			for (i = 0; i < strrst->strrst_num_streams; i++) {
3492 				if ((send_in) &&
3493 
3494 				    (strrst->strrst_list[i] > stcb->asoc.streamincnt)) {
3495 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3496 					error = EINVAL;
3497 					goto get_out;
3498 				}
3499 				if ((send_out) &&
3500 				    (strrst->strrst_list[i] > stcb->asoc.streamoutcnt)) {
3501 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3502 					error = EINVAL;
3503 					goto get_out;
3504 				}
3505 			}
3506 	skip_stuff:
3507 			if (error) {
3508 		get_out:
3509 				SCTP_TCB_UNLOCK(stcb);
3510 				break;
3511 			}
3512 			error = sctp_send_str_reset_req(stcb, strrst->strrst_num_streams,
3513 			    strrst->strrst_list,
3514 			    send_out, (stcb->asoc.str_reset_seq_in - 3),
3515 			    send_in, send_tsn, addstream, addstrmcnt);
3516 
3517 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_REQ, SCTP_SO_LOCKED);
3518 			SCTP_TCB_UNLOCK(stcb);
3519 		}
3520 		break;
3521 
3522 	case SCTP_CONNECT_X:
3523 		if (optsize < (sizeof(int) + sizeof(struct sockaddr_in))) {
3524 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3525 			error = EINVAL;
3526 			break;
3527 		}
3528 		error = sctp_do_connect_x(so, inp, optval, optsize, p, 0);
3529 		break;
3530 
3531 	case SCTP_CONNECT_X_DELAYED:
3532 		if (optsize < (sizeof(int) + sizeof(struct sockaddr_in))) {
3533 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3534 			error = EINVAL;
3535 			break;
3536 		}
3537 		error = sctp_do_connect_x(so, inp, optval, optsize, p, 1);
3538 		break;
3539 
3540 	case SCTP_CONNECT_X_COMPLETE:
3541 		{
3542 			struct sockaddr *sa;
3543 			struct sctp_nets *net;
3544 
3545 			/* FIXME MT: check correct? */
3546 			SCTP_CHECK_AND_CAST(sa, optval, struct sockaddr, optsize);
3547 
3548 			/* find tcb */
3549 			if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
3550 				SCTP_INP_RLOCK(inp);
3551 				stcb = LIST_FIRST(&inp->sctp_asoc_list);
3552 				if (stcb) {
3553 					SCTP_TCB_LOCK(stcb);
3554 					net = sctp_findnet(stcb, sa);
3555 				}
3556 				SCTP_INP_RUNLOCK(inp);
3557 			} else {
3558 				/*
3559 				 * We increment here since
3560 				 * sctp_findassociation_ep_addr() wil do a
3561 				 * decrement if it finds the stcb as long as
3562 				 * the locked tcb (last argument) is NOT a
3563 				 * TCB.. aka NULL.
3564 				 */
3565 				SCTP_INP_INCR_REF(inp);
3566 				stcb = sctp_findassociation_ep_addr(&inp, sa, &net, NULL, NULL);
3567 				if (stcb == NULL) {
3568 					SCTP_INP_DECR_REF(inp);
3569 				}
3570 			}
3571 
3572 			if (stcb == NULL) {
3573 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT);
3574 				error = ENOENT;
3575 				break;
3576 			}
3577 			if (stcb->asoc.delayed_connection == 1) {
3578 				stcb->asoc.delayed_connection = 0;
3579 				(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
3580 				sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb,
3581 				    stcb->asoc.primary_destination,
3582 				    SCTP_FROM_SCTP_USRREQ + SCTP_LOC_9);
3583 				sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED);
3584 			} else {
3585 				/*
3586 				 * already expired or did not use delayed
3587 				 * connectx
3588 				 */
3589 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY);
3590 				error = EALREADY;
3591 			}
3592 			SCTP_TCB_UNLOCK(stcb);
3593 		}
3594 		break;
3595 	case SCTP_MAX_BURST:
3596 		{
3597 			struct sctp_assoc_value *av;
3598 
3599 			SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize);
3600 			SCTP_FIND_STCB(inp, stcb, av->assoc_id);
3601 
3602 			if (stcb) {
3603 				stcb->asoc.max_burst = av->assoc_value;
3604 				SCTP_TCB_UNLOCK(stcb);
3605 			} else {
3606 				SCTP_INP_WLOCK(inp);
3607 				inp->sctp_ep.max_burst = av->assoc_value;
3608 				SCTP_INP_WUNLOCK(inp);
3609 			}
3610 		}
3611 		break;
3612 	case SCTP_MAXSEG:
3613 		{
3614 			struct sctp_assoc_value *av;
3615 			int ovh;
3616 
3617 			SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize);
3618 			SCTP_FIND_STCB(inp, stcb, av->assoc_id);
3619 
3620 			if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
3621 				ovh = SCTP_MED_OVERHEAD;
3622 			} else {
3623 				ovh = SCTP_MED_V4_OVERHEAD;
3624 			}
3625 			if (stcb) {
3626 				if (av->assoc_value) {
3627 					stcb->asoc.sctp_frag_point = (av->assoc_value + ovh);
3628 				} else {
3629 					stcb->asoc.sctp_frag_point = SCTP_DEFAULT_MAXSEGMENT;
3630 				}
3631 				SCTP_TCB_UNLOCK(stcb);
3632 			} else {
3633 				SCTP_INP_WLOCK(inp);
3634 				/*
3635 				 * FIXME MT: I think this is not in tune
3636 				 * with the API ID
3637 				 */
3638 				if (av->assoc_value) {
3639 					inp->sctp_frag_point = (av->assoc_value + ovh);
3640 				} else {
3641 					inp->sctp_frag_point = SCTP_DEFAULT_MAXSEGMENT;
3642 				}
3643 				SCTP_INP_WUNLOCK(inp);
3644 			}
3645 		}
3646 		break;
3647 	case SCTP_EVENTS:
3648 		{
3649 			struct sctp_event_subscribe *events;
3650 
3651 			SCTP_CHECK_AND_CAST(events, optval, struct sctp_event_subscribe, optsize);
3652 
3653 			SCTP_INP_WLOCK(inp);
3654 			if (events->sctp_data_io_event) {
3655 				sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT);
3656 			} else {
3657 				sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT);
3658 			}
3659 
3660 			if (events->sctp_association_event) {
3661 				sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVASSOCEVNT);
3662 			} else {
3663 				sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVASSOCEVNT);
3664 			}
3665 
3666 			if (events->sctp_address_event) {
3667 				sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVPADDREVNT);
3668 			} else {
3669 				sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVPADDREVNT);
3670 			}
3671 
3672 			if (events->sctp_send_failure_event) {
3673 				sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVSENDFAILEVNT);
3674 			} else {
3675 				sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVSENDFAILEVNT);
3676 			}
3677 
3678 			if (events->sctp_peer_error_event) {
3679 				sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVPEERERR);
3680 			} else {
3681 				sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVPEERERR);
3682 			}
3683 
3684 			if (events->sctp_shutdown_event) {
3685 				sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT);
3686 			} else {
3687 				sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT);
3688 			}
3689 
3690 			if (events->sctp_partial_delivery_event) {
3691 				sctp_feature_on(inp, SCTP_PCB_FLAGS_PDAPIEVNT);
3692 			} else {
3693 				sctp_feature_off(inp, SCTP_PCB_FLAGS_PDAPIEVNT);
3694 			}
3695 
3696 			if (events->sctp_adaptation_layer_event) {
3697 				sctp_feature_on(inp, SCTP_PCB_FLAGS_ADAPTATIONEVNT);
3698 			} else {
3699 				sctp_feature_off(inp, SCTP_PCB_FLAGS_ADAPTATIONEVNT);
3700 			}
3701 
3702 			if (events->sctp_authentication_event) {
3703 				sctp_feature_on(inp, SCTP_PCB_FLAGS_AUTHEVNT);
3704 			} else {
3705 				sctp_feature_off(inp, SCTP_PCB_FLAGS_AUTHEVNT);
3706 			}
3707 
3708 			if (events->sctp_sender_dry_event) {
3709 				sctp_feature_on(inp, SCTP_PCB_FLAGS_DRYEVNT);
3710 				if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3711 				    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3712 					stcb = LIST_FIRST(&inp->sctp_asoc_list);
3713 					if (stcb) {
3714 						SCTP_TCB_LOCK(stcb);
3715 					}
3716 					if (stcb &&
3717 					    TAILQ_EMPTY(&stcb->asoc.send_queue) &&
3718 					    TAILQ_EMPTY(&stcb->asoc.sent_queue) &&
3719 					    (stcb->asoc.stream_queue_cnt == 0)) {
3720 						sctp_ulp_notify(SCTP_NOTIFY_SENDER_DRY, stcb, 0, NULL, SCTP_SO_LOCKED);
3721 					}
3722 					if (stcb) {
3723 						SCTP_TCB_UNLOCK(stcb);
3724 					}
3725 				}
3726 			} else {
3727 				sctp_feature_off(inp, SCTP_PCB_FLAGS_DRYEVNT);
3728 			}
3729 
3730 			if (events->sctp_stream_reset_event) {
3731 				sctp_feature_on(inp, SCTP_PCB_FLAGS_STREAM_RESETEVNT);
3732 			} else {
3733 				sctp_feature_off(inp, SCTP_PCB_FLAGS_STREAM_RESETEVNT);
3734 			}
3735 			SCTP_INP_WUNLOCK(inp);
3736 		}
3737 		break;
3738 
3739 	case SCTP_ADAPTATION_LAYER:
3740 		{
3741 			struct sctp_setadaptation *adap_bits;
3742 
3743 			SCTP_CHECK_AND_CAST(adap_bits, optval, struct sctp_setadaptation, optsize);
3744 			SCTP_INP_WLOCK(inp);
3745 			inp->sctp_ep.adaptation_layer_indicator = adap_bits->ssb_adaptation_ind;
3746 			SCTP_INP_WUNLOCK(inp);
3747 		}
3748 		break;
3749 #ifdef SCTP_DEBUG
3750 	case SCTP_SET_INITIAL_DBG_SEQ:
3751 		{
3752 			uint32_t *vvv;
3753 
3754 			SCTP_CHECK_AND_CAST(vvv, optval, uint32_t, optsize);
3755 			SCTP_INP_WLOCK(inp);
3756 			inp->sctp_ep.initial_sequence_debug = *vvv;
3757 			SCTP_INP_WUNLOCK(inp);
3758 		}
3759 		break;
3760 #endif
3761 	case SCTP_DEFAULT_SEND_PARAM:
3762 		{
3763 			struct sctp_sndrcvinfo *s_info;
3764 
3765 			SCTP_CHECK_AND_CAST(s_info, optval, struct sctp_sndrcvinfo, optsize);
3766 			SCTP_FIND_STCB(inp, stcb, s_info->sinfo_assoc_id);
3767 
3768 			if (stcb) {
3769 				if (s_info->sinfo_stream <= stcb->asoc.streamoutcnt) {
3770 					memcpy(&stcb->asoc.def_send, s_info, min(optsize, sizeof(stcb->asoc.def_send)));
3771 				} else {
3772 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3773 					error = EINVAL;
3774 				}
3775 				SCTP_TCB_UNLOCK(stcb);
3776 			} else {
3777 				SCTP_INP_WLOCK(inp);
3778 				memcpy(&inp->def_send, s_info, min(optsize, sizeof(inp->def_send)));
3779 				SCTP_INP_WUNLOCK(inp);
3780 			}
3781 		}
3782 		break;
3783 	case SCTP_PEER_ADDR_PARAMS:
3784 		/* Applys to the specific association */
3785 		{
3786 			struct sctp_paddrparams *paddrp;
3787 			struct sctp_nets *net;
3788 
3789 			SCTP_CHECK_AND_CAST(paddrp, optval, struct sctp_paddrparams, optsize);
3790 			SCTP_FIND_STCB(inp, stcb, paddrp->spp_assoc_id);
3791 			net = NULL;
3792 			if (stcb) {
3793 				net = sctp_findnet(stcb, (struct sockaddr *)&paddrp->spp_address);
3794 			} else {
3795 				/*
3796 				 * We increment here since
3797 				 * sctp_findassociation_ep_addr() wil do a
3798 				 * decrement if it finds the stcb as long as
3799 				 * the locked tcb (last argument) is NOT a
3800 				 * TCB.. aka NULL.
3801 				 */
3802 				SCTP_INP_INCR_REF(inp);
3803 				stcb = sctp_findassociation_ep_addr(&inp,
3804 				    (struct sockaddr *)&paddrp->spp_address,
3805 				    &net, NULL, NULL);
3806 				if (stcb == NULL) {
3807 					SCTP_INP_DECR_REF(inp);
3808 				}
3809 			}
3810 			if (stcb && (net == NULL)) {
3811 				struct sockaddr *sa;
3812 
3813 				sa = (struct sockaddr *)&paddrp->spp_address;
3814 				if (sa->sa_family == AF_INET) {
3815 					struct sockaddr_in *sin;
3816 
3817 					sin = (struct sockaddr_in *)sa;
3818 					if (sin->sin_addr.s_addr) {
3819 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3820 						SCTP_TCB_UNLOCK(stcb);
3821 						error = EINVAL;
3822 						break;
3823 					}
3824 				} else if (sa->sa_family == AF_INET6) {
3825 					struct sockaddr_in6 *sin6;
3826 
3827 					sin6 = (struct sockaddr_in6 *)sa;
3828 					if (!IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
3829 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3830 						SCTP_TCB_UNLOCK(stcb);
3831 						error = EINVAL;
3832 						break;
3833 					}
3834 				} else {
3835 					error = EAFNOSUPPORT;
3836 					SCTP_TCB_UNLOCK(stcb);
3837 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
3838 					break;
3839 				}
3840 			}
3841 			/* sanity checks */
3842 			if ((paddrp->spp_flags & SPP_HB_ENABLE) && (paddrp->spp_flags & SPP_HB_DISABLE)) {
3843 				if (stcb)
3844 					SCTP_TCB_UNLOCK(stcb);
3845 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3846 				return (EINVAL);
3847 			}
3848 			if ((paddrp->spp_flags & SPP_PMTUD_ENABLE) && (paddrp->spp_flags & SPP_PMTUD_DISABLE)) {
3849 				if (stcb)
3850 					SCTP_TCB_UNLOCK(stcb);
3851 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3852 				return (EINVAL);
3853 			}
3854 			if (stcb) {
3855 				/************************TCB SPECIFIC SET ******************/
3856 				/*
3857 				 * do we change the timer for HB, we run
3858 				 * only one?
3859 				 */
3860 				int ovh = 0;
3861 
3862 				if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
3863 					ovh = SCTP_MED_OVERHEAD;
3864 				} else {
3865 					ovh = SCTP_MED_V4_OVERHEAD;
3866 				}
3867 
3868 				if (paddrp->spp_hbinterval)
3869 					stcb->asoc.heart_beat_delay = paddrp->spp_hbinterval;
3870 				else if (paddrp->spp_flags & SPP_HB_TIME_IS_ZERO)
3871 					stcb->asoc.heart_beat_delay = 0;
3872 
3873 				/* network sets ? */
3874 				if (net) {
3875 					/************************NET SPECIFIC SET ******************/
3876 					if (paddrp->spp_flags & SPP_HB_DEMAND) {
3877 						/* on demand HB */
3878 						if (sctp_send_hb(stcb, 1, net) < 0) {
3879 							/* asoc destroyed */
3880 							SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3881 							error = EINVAL;
3882 							break;
3883 						}
3884 					}
3885 					if (paddrp->spp_flags & SPP_HB_DISABLE) {
3886 						net->dest_state |= SCTP_ADDR_NOHB;
3887 					}
3888 					if (paddrp->spp_flags & SPP_HB_ENABLE) {
3889 						net->dest_state &= ~SCTP_ADDR_NOHB;
3890 					}
3891 					if ((paddrp->spp_flags & SPP_PMTUD_DISABLE) && (paddrp->spp_pathmtu >= SCTP_SMALLEST_PMTU)) {
3892 						if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) {
3893 							sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net,
3894 							    SCTP_FROM_SCTP_USRREQ + SCTP_LOC_10);
3895 						}
3896 						if (paddrp->spp_pathmtu > SCTP_DEFAULT_MINSEGMENT) {
3897 							net->mtu = paddrp->spp_pathmtu + ovh;
3898 							if (net->mtu < stcb->asoc.smallest_mtu) {
3899 								sctp_pathmtu_adjustment(inp, stcb, net, net->mtu);
3900 							}
3901 						}
3902 					}
3903 					if (paddrp->spp_flags & SPP_PMTUD_ENABLE) {
3904 						if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) {
3905 							sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net);
3906 						}
3907 					}
3908 					if (paddrp->spp_pathmaxrxt)
3909 						net->failure_threshold = paddrp->spp_pathmaxrxt;
3910 #ifdef INET
3911 					if (paddrp->spp_flags & SPP_IPV4_TOS) {
3912 						if (net->ro._l_addr.sin.sin_family == AF_INET) {
3913 							net->tos_flowlabel = paddrp->spp_ipv4_tos & 0x000000fc;
3914 						}
3915 					}
3916 #endif
3917 #ifdef INET6
3918 					if (paddrp->spp_flags & SPP_IPV6_FLOWLABEL) {
3919 						if (net->ro._l_addr.sin6.sin6_family == AF_INET6) {
3920 							net->tos_flowlabel = paddrp->spp_ipv6_flowlabel;
3921 						}
3922 					}
3923 #endif
3924 				} else {
3925 					/************************ASSOC ONLY -- NO NET SPECIFIC SET ******************/
3926 					if (paddrp->spp_pathmaxrxt)
3927 						stcb->asoc.def_net_failure = paddrp->spp_pathmaxrxt;
3928 
3929 					if (paddrp->spp_flags & SPP_HB_ENABLE) {
3930 						/* Turn back on the timer */
3931 						stcb->asoc.hb_is_disabled = 0;
3932 						sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
3933 					}
3934 					if ((paddrp->spp_flags & SPP_PMTUD_DISABLE) && (paddrp->spp_pathmtu >= SCTP_SMALLEST_PMTU)) {
3935 						TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
3936 							if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) {
3937 								sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net,
3938 								    SCTP_FROM_SCTP_USRREQ + SCTP_LOC_10);
3939 							}
3940 							if (paddrp->spp_pathmtu > SCTP_DEFAULT_MINSEGMENT) {
3941 								net->mtu = paddrp->spp_pathmtu + ovh;
3942 								if (net->mtu < stcb->asoc.smallest_mtu) {
3943 									sctp_pathmtu_adjustment(inp, stcb, net, net->mtu);
3944 								}
3945 							}
3946 						}
3947 					}
3948 					if (paddrp->spp_flags & SPP_PMTUD_ENABLE) {
3949 						TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
3950 							if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) {
3951 								sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net);
3952 							}
3953 						}
3954 					}
3955 					if (paddrp->spp_flags & SPP_HB_DISABLE) {
3956 						int cnt_of_unconf = 0;
3957 						struct sctp_nets *lnet;
3958 
3959 						stcb->asoc.hb_is_disabled = 1;
3960 						TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
3961 							if (lnet->dest_state & SCTP_ADDR_UNCONFIRMED) {
3962 								cnt_of_unconf++;
3963 							}
3964 						}
3965 						/*
3966 						 * stop the timer ONLY if we
3967 						 * have no unconfirmed
3968 						 * addresses
3969 						 */
3970 						if (cnt_of_unconf == 0) {
3971 							TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
3972 								sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net,
3973 								    SCTP_FROM_SCTP_USRREQ + SCTP_LOC_11);
3974 							}
3975 						}
3976 					}
3977 					if (paddrp->spp_flags & SPP_HB_ENABLE) {
3978 						/* start up the timer. */
3979 						TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
3980 							sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
3981 						}
3982 					}
3983 #ifdef INET
3984 					if (paddrp->spp_flags & SPP_IPV4_TOS)
3985 						stcb->asoc.default_tos = paddrp->spp_ipv4_tos & 0x000000fc;
3986 #endif
3987 #ifdef INET6
3988 					if (paddrp->spp_flags & SPP_IPV6_FLOWLABEL)
3989 						stcb->asoc.default_flowlabel = paddrp->spp_ipv6_flowlabel;
3990 #endif
3991 
3992 				}
3993 				SCTP_TCB_UNLOCK(stcb);
3994 			} else {
3995 				/************************NO TCB, SET TO default stuff ******************/
3996 				SCTP_INP_WLOCK(inp);
3997 				/*
3998 				 * For the TOS/FLOWLABEL stuff you set it
3999 				 * with the options on the socket
4000 				 */
4001 				if (paddrp->spp_pathmaxrxt) {
4002 					inp->sctp_ep.def_net_failure = paddrp->spp_pathmaxrxt;
4003 				}
4004 				if (paddrp->spp_flags & SPP_HB_TIME_IS_ZERO)
4005 					inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT] = 0;
4006 				else if (paddrp->spp_hbinterval) {
4007 					if (paddrp->spp_hbinterval > SCTP_MAX_HB_INTERVAL)
4008 						paddrp->spp_hbinterval = SCTP_MAX_HB_INTERVAL;
4009 					inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT] = MSEC_TO_TICKS(paddrp->spp_hbinterval);
4010 				}
4011 				if (paddrp->spp_flags & SPP_HB_ENABLE) {
4012 					sctp_feature_off(inp, SCTP_PCB_FLAGS_DONOT_HEARTBEAT);
4013 
4014 				} else if (paddrp->spp_flags & SPP_HB_DISABLE) {
4015 					sctp_feature_on(inp, SCTP_PCB_FLAGS_DONOT_HEARTBEAT);
4016 				}
4017 				SCTP_INP_WUNLOCK(inp);
4018 			}
4019 		}
4020 		break;
4021 	case SCTP_RTOINFO:
4022 		{
4023 			struct sctp_rtoinfo *srto;
4024 			uint32_t new_init, new_min, new_max;
4025 
4026 			SCTP_CHECK_AND_CAST(srto, optval, struct sctp_rtoinfo, optsize);
4027 			SCTP_FIND_STCB(inp, stcb, srto->srto_assoc_id);
4028 
4029 			if (stcb) {
4030 				if (srto->srto_initial)
4031 					new_init = srto->srto_initial;
4032 				else
4033 					new_init = stcb->asoc.initial_rto;
4034 				if (srto->srto_max)
4035 					new_max = srto->srto_max;
4036 				else
4037 					new_max = stcb->asoc.maxrto;
4038 				if (srto->srto_min)
4039 					new_min = srto->srto_min;
4040 				else
4041 					new_min = stcb->asoc.minrto;
4042 				if ((new_min <= new_init) && (new_init <= new_max)) {
4043 					stcb->asoc.initial_rto = new_init;
4044 					stcb->asoc.maxrto = new_max;
4045 					stcb->asoc.minrto = new_min;
4046 				} else {
4047 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4048 					error = EINVAL;
4049 				}
4050 				SCTP_TCB_UNLOCK(stcb);
4051 			} else {
4052 				SCTP_INP_WLOCK(inp);
4053 				if (srto->srto_initial)
4054 					new_init = srto->srto_initial;
4055 				else
4056 					new_init = inp->sctp_ep.initial_rto;
4057 				if (srto->srto_max)
4058 					new_max = srto->srto_max;
4059 				else
4060 					new_max = inp->sctp_ep.sctp_maxrto;
4061 				if (srto->srto_min)
4062 					new_min = srto->srto_min;
4063 				else
4064 					new_min = inp->sctp_ep.sctp_minrto;
4065 				if ((new_min <= new_init) && (new_init <= new_max)) {
4066 					inp->sctp_ep.initial_rto = new_init;
4067 					inp->sctp_ep.sctp_maxrto = new_max;
4068 					inp->sctp_ep.sctp_minrto = new_min;
4069 				} else {
4070 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4071 					error = EINVAL;
4072 				}
4073 				SCTP_INP_WUNLOCK(inp);
4074 			}
4075 		}
4076 		break;
4077 	case SCTP_ASSOCINFO:
4078 		{
4079 			struct sctp_assocparams *sasoc;
4080 
4081 			SCTP_CHECK_AND_CAST(sasoc, optval, struct sctp_assocparams, optsize);
4082 			SCTP_FIND_STCB(inp, stcb, sasoc->sasoc_assoc_id);
4083 			if (sasoc->sasoc_cookie_life) {
4084 				/* boundary check the cookie life */
4085 				if (sasoc->sasoc_cookie_life < 1000)
4086 					sasoc->sasoc_cookie_life = 1000;
4087 				if (sasoc->sasoc_cookie_life > SCTP_MAX_COOKIE_LIFE) {
4088 					sasoc->sasoc_cookie_life = SCTP_MAX_COOKIE_LIFE;
4089 				}
4090 			}
4091 			if (stcb) {
4092 				if (sasoc->sasoc_asocmaxrxt)
4093 					stcb->asoc.max_send_times = sasoc->sasoc_asocmaxrxt;
4094 				sasoc->sasoc_number_peer_destinations = stcb->asoc.numnets;
4095 				sasoc->sasoc_peer_rwnd = 0;
4096 				sasoc->sasoc_local_rwnd = 0;
4097 				if (sasoc->sasoc_cookie_life) {
4098 					stcb->asoc.cookie_life = MSEC_TO_TICKS(sasoc->sasoc_cookie_life);
4099 				}
4100 				SCTP_TCB_UNLOCK(stcb);
4101 			} else {
4102 				SCTP_INP_WLOCK(inp);
4103 				if (sasoc->sasoc_asocmaxrxt)
4104 					inp->sctp_ep.max_send_times = sasoc->sasoc_asocmaxrxt;
4105 				sasoc->sasoc_number_peer_destinations = 0;
4106 				sasoc->sasoc_peer_rwnd = 0;
4107 				sasoc->sasoc_local_rwnd = 0;
4108 				if (sasoc->sasoc_cookie_life) {
4109 					inp->sctp_ep.def_cookie_life = MSEC_TO_TICKS(sasoc->sasoc_cookie_life);
4110 				}
4111 				SCTP_INP_WUNLOCK(inp);
4112 			}
4113 		}
4114 		break;
4115 	case SCTP_INITMSG:
4116 		{
4117 			struct sctp_initmsg *sinit;
4118 
4119 			SCTP_CHECK_AND_CAST(sinit, optval, struct sctp_initmsg, optsize);
4120 			SCTP_INP_WLOCK(inp);
4121 			if (sinit->sinit_num_ostreams)
4122 				inp->sctp_ep.pre_open_stream_count = sinit->sinit_num_ostreams;
4123 
4124 			if (sinit->sinit_max_instreams)
4125 				inp->sctp_ep.max_open_streams_intome = sinit->sinit_max_instreams;
4126 
4127 			if (sinit->sinit_max_attempts)
4128 				inp->sctp_ep.max_init_times = sinit->sinit_max_attempts;
4129 
4130 			if (sinit->sinit_max_init_timeo)
4131 				inp->sctp_ep.initial_init_rto_max = sinit->sinit_max_init_timeo;
4132 			SCTP_INP_WUNLOCK(inp);
4133 		}
4134 		break;
4135 	case SCTP_PRIMARY_ADDR:
4136 		{
4137 			struct sctp_setprim *spa;
4138 			struct sctp_nets *net, *lnet;
4139 
4140 			SCTP_CHECK_AND_CAST(spa, optval, struct sctp_setprim, optsize);
4141 			SCTP_FIND_STCB(inp, stcb, spa->ssp_assoc_id);
4142 
4143 			net = NULL;
4144 			if (stcb) {
4145 				net = sctp_findnet(stcb, (struct sockaddr *)&spa->ssp_addr);
4146 			} else {
4147 				/*
4148 				 * We increment here since
4149 				 * sctp_findassociation_ep_addr() wil do a
4150 				 * decrement if it finds the stcb as long as
4151 				 * the locked tcb (last argument) is NOT a
4152 				 * TCB.. aka NULL.
4153 				 */
4154 				SCTP_INP_INCR_REF(inp);
4155 				stcb = sctp_findassociation_ep_addr(&inp,
4156 				    (struct sockaddr *)&spa->ssp_addr,
4157 				    &net, NULL, NULL);
4158 				if (stcb == NULL) {
4159 					SCTP_INP_DECR_REF(inp);
4160 				}
4161 			}
4162 
4163 			if ((stcb) && (net)) {
4164 				if ((net != stcb->asoc.primary_destination) &&
4165 				    (!(net->dest_state & SCTP_ADDR_UNCONFIRMED))) {
4166 					/* Ok we need to set it */
4167 					lnet = stcb->asoc.primary_destination;
4168 					if (sctp_set_primary_addr(stcb, (struct sockaddr *)NULL, net) == 0) {
4169 						if (net->dest_state & SCTP_ADDR_SWITCH_PRIMARY) {
4170 							net->dest_state |= SCTP_ADDR_DOUBLE_SWITCH;
4171 						}
4172 						net->dest_state |= SCTP_ADDR_SWITCH_PRIMARY;
4173 					}
4174 				}
4175 			} else {
4176 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4177 				error = EINVAL;
4178 			}
4179 			if (stcb) {
4180 				SCTP_TCB_UNLOCK(stcb);
4181 			}
4182 		}
4183 		break;
4184 	case SCTP_SET_DYNAMIC_PRIMARY:
4185 		{
4186 			union sctp_sockstore *ss;
4187 
4188 			error = priv_check(curthread,
4189 			    PRIV_NETINET_RESERVEDPORT);
4190 			if (error)
4191 				break;
4192 
4193 			SCTP_CHECK_AND_CAST(ss, optval, union sctp_sockstore, optsize);
4194 			/* SUPER USER CHECK? */
4195 			error = sctp_dynamic_set_primary(&ss->sa, vrf_id);
4196 		}
4197 		break;
4198 	case SCTP_SET_PEER_PRIMARY_ADDR:
4199 		{
4200 			struct sctp_setpeerprim *sspp;
4201 
4202 			SCTP_CHECK_AND_CAST(sspp, optval, struct sctp_setpeerprim, optsize);
4203 			SCTP_FIND_STCB(inp, stcb, sspp->sspp_assoc_id);
4204 			if (stcb != NULL) {
4205 				struct sctp_ifa *ifa;
4206 
4207 				ifa = sctp_find_ifa_by_addr((struct sockaddr *)&sspp->sspp_addr,
4208 				    stcb->asoc.vrf_id, SCTP_ADDR_NOT_LOCKED);
4209 				if (ifa == NULL) {
4210 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4211 					error = EINVAL;
4212 					goto out_of_it;
4213 				}
4214 				if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) {
4215 					/*
4216 					 * Must validate the ifa found is in
4217 					 * our ep
4218 					 */
4219 					struct sctp_laddr *laddr;
4220 					int found = 0;
4221 
4222 					LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4223 						if (laddr->ifa == NULL) {
4224 							SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n",
4225 							    __FUNCTION__);
4226 							continue;
4227 						}
4228 						if (laddr->ifa == ifa) {
4229 							found = 1;
4230 							break;
4231 						}
4232 					}
4233 					if (!found) {
4234 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4235 						error = EINVAL;
4236 						goto out_of_it;
4237 					}
4238 				}
4239 				if (sctp_set_primary_ip_address_sa(stcb,
4240 				    (struct sockaddr *)&sspp->sspp_addr) != 0) {
4241 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4242 					error = EINVAL;
4243 				}
4244 		out_of_it:
4245 				SCTP_TCB_UNLOCK(stcb);
4246 			} else {
4247 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4248 				error = EINVAL;
4249 			}
4250 
4251 		}
4252 		break;
4253 	case SCTP_BINDX_ADD_ADDR:
4254 		{
4255 			struct sctp_getaddresses *addrs;
4256 			size_t sz;
4257 			struct thread *td;
4258 
4259 			td = (struct thread *)p;
4260 			SCTP_CHECK_AND_CAST(addrs, optval, struct sctp_getaddresses,
4261 			    optsize);
4262 			if (addrs->addr->sa_family == AF_INET) {
4263 				sz = sizeof(struct sctp_getaddresses) - sizeof(struct sockaddr) + sizeof(struct sockaddr_in);
4264 				if (optsize < sz) {
4265 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4266 					error = EINVAL;
4267 					break;
4268 				}
4269 				if (td != NULL && (error = prison_local_ip4(td->td_ucred, &(((struct sockaddr_in *)(addrs->addr))->sin_addr)))) {
4270 					SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, error);
4271 					break;
4272 				}
4273 #ifdef INET6
4274 			} else if (addrs->addr->sa_family == AF_INET6) {
4275 				sz = sizeof(struct sctp_getaddresses) - sizeof(struct sockaddr) + sizeof(struct sockaddr_in6);
4276 				if (optsize < sz) {
4277 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4278 					error = EINVAL;
4279 					break;
4280 				}
4281 				if (td != NULL && (error = prison_local_ip6(td->td_ucred, &(((struct sockaddr_in6 *)(addrs->addr))->sin6_addr),
4282 				    (SCTP_IPV6_V6ONLY(inp) != 0))) != 0) {
4283 					SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, error);
4284 					break;
4285 				}
4286 #endif
4287 			} else {
4288 				error = EAFNOSUPPORT;
4289 				break;
4290 			}
4291 			sctp_bindx_add_address(so, inp, addrs->addr,
4292 			    addrs->sget_assoc_id, vrf_id,
4293 			    &error, p);
4294 		}
4295 		break;
4296 	case SCTP_BINDX_REM_ADDR:
4297 		{
4298 			struct sctp_getaddresses *addrs;
4299 			size_t sz;
4300 			struct thread *td;
4301 
4302 			td = (struct thread *)p;
4303 
4304 			SCTP_CHECK_AND_CAST(addrs, optval, struct sctp_getaddresses, optsize);
4305 			if (addrs->addr->sa_family == AF_INET) {
4306 				sz = sizeof(struct sctp_getaddresses) - sizeof(struct sockaddr) + sizeof(struct sockaddr_in);
4307 				if (optsize < sz) {
4308 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4309 					error = EINVAL;
4310 					break;
4311 				}
4312 				if (td != NULL && (error = prison_local_ip4(td->td_ucred, &(((struct sockaddr_in *)(addrs->addr))->sin_addr)))) {
4313 					SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, error);
4314 					break;
4315 				}
4316 #ifdef INET6
4317 			} else if (addrs->addr->sa_family == AF_INET6) {
4318 				sz = sizeof(struct sctp_getaddresses) - sizeof(struct sockaddr) + sizeof(struct sockaddr_in6);
4319 				if (optsize < sz) {
4320 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4321 					error = EINVAL;
4322 					break;
4323 				}
4324 				if (td != NULL && (error = prison_local_ip6(td->td_ucred, &(((struct sockaddr_in6 *)(addrs->addr))->sin6_addr),
4325 				    (SCTP_IPV6_V6ONLY(inp) != 0))) != 0) {
4326 					SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, error);
4327 					break;
4328 				}
4329 #endif
4330 			} else {
4331 				error = EAFNOSUPPORT;
4332 				break;
4333 			}
4334 			sctp_bindx_delete_address(so, inp, addrs->addr,
4335 			    addrs->sget_assoc_id, vrf_id,
4336 			    &error);
4337 		}
4338 		break;
4339 	default:
4340 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT);
4341 		error = ENOPROTOOPT;
4342 		break;
4343 	}			/* end switch (opt) */
4344 	return (error);
4345 }
4346 
4347 int
4348 sctp_ctloutput(struct socket *so, struct sockopt *sopt)
4349 {
4350 	void *optval = NULL;
4351 	size_t optsize = 0;
4352 	struct sctp_inpcb *inp;
4353 	void *p;
4354 	int error = 0;
4355 
4356 	inp = (struct sctp_inpcb *)so->so_pcb;
4357 	if (inp == 0) {
4358 		/* I made the same as TCP since we are not setup? */
4359 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4360 		return (ECONNRESET);
4361 	}
4362 	if (sopt->sopt_level != IPPROTO_SCTP) {
4363 		/* wrong proto level... send back up to IP */
4364 #ifdef INET6
4365 		if (INP_CHECK_SOCKAF(so, AF_INET6))
4366 			error = ip6_ctloutput(so, sopt);
4367 		else
4368 #endif				/* INET6 */
4369 			error = ip_ctloutput(so, sopt);
4370 		return (error);
4371 	}
4372 	optsize = sopt->sopt_valsize;
4373 	if (optsize) {
4374 		SCTP_MALLOC(optval, void *, optsize, SCTP_M_SOCKOPT);
4375 		if (optval == NULL) {
4376 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOBUFS);
4377 			return (ENOBUFS);
4378 		}
4379 		error = sooptcopyin(sopt, optval, optsize, optsize);
4380 		if (error) {
4381 			SCTP_FREE(optval, SCTP_M_SOCKOPT);
4382 			goto out;
4383 		}
4384 	}
4385 	p = (void *)sopt->sopt_td;
4386 	if (sopt->sopt_dir == SOPT_SET) {
4387 		error = sctp_setopt(so, sopt->sopt_name, optval, optsize, p);
4388 	} else if (sopt->sopt_dir == SOPT_GET) {
4389 		error = sctp_getopt(so, sopt->sopt_name, optval, &optsize, p);
4390 	} else {
4391 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4392 		error = EINVAL;
4393 	}
4394 	if ((error == 0) && (optval != NULL)) {
4395 		error = sooptcopyout(sopt, optval, optsize);
4396 		SCTP_FREE(optval, SCTP_M_SOCKOPT);
4397 	} else if (optval != NULL) {
4398 		SCTP_FREE(optval, SCTP_M_SOCKOPT);
4399 	}
4400 out:
4401 	return (error);
4402 }
4403 
4404 
4405 static int
4406 sctp_connect(struct socket *so, struct sockaddr *addr, struct thread *p)
4407 {
4408 	int error = 0;
4409 	int create_lock_on = 0;
4410 	uint32_t vrf_id;
4411 	struct sctp_inpcb *inp;
4412 	struct sctp_tcb *stcb = NULL;
4413 
4414 	inp = (struct sctp_inpcb *)so->so_pcb;
4415 	if (inp == 0) {
4416 		/* I made the same as TCP since we are not setup? */
4417 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4418 		return (ECONNRESET);
4419 	}
4420 	if (addr == NULL) {
4421 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4422 		return EINVAL;
4423 	}
4424 #ifdef INET6
4425 	if (addr->sa_family == AF_INET6) {
4426 		struct sockaddr_in6 *sin6p;
4427 
4428 		if (addr->sa_len != sizeof(struct sockaddr_in6)) {
4429 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4430 			return (EINVAL);
4431 		}
4432 		sin6p = (struct sockaddr_in6 *)addr;
4433 		if (p != NULL && (error = prison_remote_ip6(p->td_ucred, &sin6p->sin6_addr)) != 0) {
4434 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
4435 			return (error);
4436 		}
4437 	} else
4438 #endif
4439 	if (addr->sa_family == AF_INET) {
4440 		struct sockaddr_in *sinp;
4441 
4442 		if (addr->sa_len != sizeof(struct sockaddr_in)) {
4443 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4444 			return (EINVAL);
4445 		}
4446 		sinp = (struct sockaddr_in *)addr;
4447 		if (p != NULL && (error = prison_remote_ip4(p->td_ucred, &sinp->sin_addr)) != 0) {
4448 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
4449 			return (error);
4450 		}
4451 	} else {
4452 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EAFNOSUPPORT);
4453 		return (EAFNOSUPPORT);
4454 	}
4455 	SCTP_INP_INCR_REF(inp);
4456 	SCTP_ASOC_CREATE_LOCK(inp);
4457 	create_lock_on = 1;
4458 
4459 
4460 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
4461 	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
4462 		/* Should I really unlock ? */
4463 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EFAULT);
4464 		error = EFAULT;
4465 		goto out_now;
4466 	}
4467 #ifdef INET6
4468 	if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) &&
4469 	    (addr->sa_family == AF_INET6)) {
4470 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4471 		error = EINVAL;
4472 		goto out_now;
4473 	}
4474 #endif				/* INET6 */
4475 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) ==
4476 	    SCTP_PCB_FLAGS_UNBOUND) {
4477 		/* Bind a ephemeral port */
4478 		error = sctp_inpcb_bind(so, NULL, NULL, p);
4479 		if (error) {
4480 			goto out_now;
4481 		}
4482 	}
4483 	/* Now do we connect? */
4484 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) &&
4485 	    (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_PORTREUSE))) {
4486 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4487 		error = EINVAL;
4488 		goto out_now;
4489 	}
4490 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
4491 	    (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) {
4492 		/* We are already connected AND the TCP model */
4493 		SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EADDRINUSE);
4494 		error = EADDRINUSE;
4495 		goto out_now;
4496 	}
4497 	if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
4498 		SCTP_INP_RLOCK(inp);
4499 		stcb = LIST_FIRST(&inp->sctp_asoc_list);
4500 		SCTP_INP_RUNLOCK(inp);
4501 	} else {
4502 		/*
4503 		 * We increment here since sctp_findassociation_ep_addr()
4504 		 * will do a decrement if it finds the stcb as long as the
4505 		 * locked tcb (last argument) is NOT a TCB.. aka NULL.
4506 		 */
4507 		SCTP_INP_INCR_REF(inp);
4508 		stcb = sctp_findassociation_ep_addr(&inp, addr, NULL, NULL, NULL);
4509 		if (stcb == NULL) {
4510 			SCTP_INP_DECR_REF(inp);
4511 		} else {
4512 			SCTP_TCB_UNLOCK(stcb);
4513 		}
4514 	}
4515 	if (stcb != NULL) {
4516 		/* Already have or am bring up an association */
4517 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY);
4518 		error = EALREADY;
4519 		goto out_now;
4520 	}
4521 	vrf_id = inp->def_vrf_id;
4522 	/* We are GOOD to go */
4523 	stcb = sctp_aloc_assoc(inp, addr, &error, 0, vrf_id, p);
4524 	if (stcb == NULL) {
4525 		/* Gak! no memory */
4526 		goto out_now;
4527 	}
4528 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
4529 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
4530 		/* Set the connected flag so we can queue data */
4531 		SOCKBUF_LOCK(&so->so_rcv);
4532 		so->so_rcv.sb_state &= ~SBS_CANTRCVMORE;
4533 		SOCKBUF_UNLOCK(&so->so_rcv);
4534 		SOCKBUF_LOCK(&so->so_snd);
4535 		so->so_snd.sb_state &= ~SBS_CANTSENDMORE;
4536 		SOCKBUF_UNLOCK(&so->so_snd);
4537 		SOCK_LOCK(so);
4538 		so->so_state &= ~SS_ISDISCONNECTING;
4539 		SOCK_UNLOCK(so);
4540 		soisconnecting(so);
4541 	}
4542 	SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_COOKIE_WAIT);
4543 	(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
4544 
4545 	/* initialize authentication parameters for the assoc */
4546 	sctp_initialize_auth_params(inp, stcb);
4547 
4548 	sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED);
4549 	SCTP_TCB_UNLOCK(stcb);
4550 out_now:
4551 	if (create_lock_on) {
4552 		SCTP_ASOC_CREATE_UNLOCK(inp);
4553 	}
4554 	SCTP_INP_DECR_REF(inp);
4555 	return error;
4556 }
4557 
4558 int
4559 sctp_listen(struct socket *so, int backlog, struct thread *p)
4560 {
4561 	/*
4562 	 * Note this module depends on the protocol processing being called
4563 	 * AFTER any socket level flags and backlog are applied to the
4564 	 * socket. The traditional way that the socket flags are applied is
4565 	 * AFTER protocol processing. We have made a change to the
4566 	 * sys/kern/uipc_socket.c module to reverse this but this MUST be in
4567 	 * place if the socket API for SCTP is to work properly.
4568 	 */
4569 
4570 	int error = 0;
4571 	struct sctp_inpcb *inp;
4572 
4573 	inp = (struct sctp_inpcb *)so->so_pcb;
4574 	if (inp == 0) {
4575 		/* I made the same as TCP since we are not setup? */
4576 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4577 		return (ECONNRESET);
4578 	}
4579 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PORTREUSE)) {
4580 		/* See if we have a listener */
4581 		struct sctp_inpcb *tinp;
4582 		union sctp_sockstore store, *sp;
4583 
4584 		sp = &store;
4585 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) {
4586 			/* not bound all */
4587 			struct sctp_laddr *laddr;
4588 
4589 			LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4590 				memcpy(&store, &laddr->ifa->address, sizeof(store));
4591 				sp->sin.sin_port = inp->sctp_lport;
4592 				tinp = sctp_pcb_findep(&sp->sa, 0, 0, inp->def_vrf_id);
4593 				if (tinp && (tinp != inp) &&
4594 				    ((tinp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) == 0) &&
4595 				    ((tinp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) &&
4596 				    (tinp->sctp_socket->so_qlimit)) {
4597 					/*
4598 					 * we have a listener already and
4599 					 * its not this inp.
4600 					 */
4601 					SCTP_INP_DECR_REF(tinp);
4602 					return (EADDRINUSE);
4603 				} else if (tinp) {
4604 					SCTP_INP_DECR_REF(tinp);
4605 				}
4606 			}
4607 		} else {
4608 			/* Setup a local addr bound all */
4609 			memset(&store, 0, sizeof(store));
4610 			store.sin.sin_port = inp->sctp_lport;
4611 #ifdef INET6
4612 			if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
4613 				store.sa.sa_family = AF_INET6;
4614 				store.sa.sa_len = sizeof(struct sockaddr_in6);
4615 			}
4616 #endif
4617 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
4618 				store.sa.sa_family = AF_INET;
4619 				store.sa.sa_len = sizeof(struct sockaddr_in);
4620 			}
4621 			tinp = sctp_pcb_findep(&sp->sa, 0, 0, inp->def_vrf_id);
4622 			if (tinp && (tinp != inp) &&
4623 			    ((tinp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) == 0) &&
4624 			    ((tinp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) &&
4625 			    (tinp->sctp_socket->so_qlimit)) {
4626 				/*
4627 				 * we have a listener already and its not
4628 				 * this inp.
4629 				 */
4630 				SCTP_INP_DECR_REF(tinp);
4631 				return (EADDRINUSE);
4632 			} else if (tinp) {
4633 				SCTP_INP_DECR_REF(inp);
4634 			}
4635 		}
4636 	}
4637 	SCTP_INP_RLOCK(inp);
4638 #ifdef SCTP_LOCK_LOGGING
4639 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) {
4640 		sctp_log_lock(inp, (struct sctp_tcb *)NULL, SCTP_LOG_LOCK_SOCK);
4641 	}
4642 #endif
4643 	SOCK_LOCK(so);
4644 	error = solisten_proto_check(so);
4645 	if (error) {
4646 		SOCK_UNLOCK(so);
4647 		SCTP_INP_RUNLOCK(inp);
4648 		return (error);
4649 	}
4650 	if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PORTREUSE)) &&
4651 	    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
4652 		/*
4653 		 * The unlucky case - We are in the tcp pool with this guy.
4654 		 * - Someone else is in the main inp slot. - We must move
4655 		 * this guy (the listener) to the main slot - We must then
4656 		 * move the guy that was listener to the TCP Pool.
4657 		 */
4658 		if (sctp_swap_inpcb_for_listen(inp)) {
4659 			goto in_use;
4660 		}
4661 	}
4662 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
4663 	    (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) {
4664 		/* We are already connected AND the TCP model */
4665 in_use:
4666 		SCTP_INP_RUNLOCK(inp);
4667 		SOCK_UNLOCK(so);
4668 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EADDRINUSE);
4669 		return (EADDRINUSE);
4670 	}
4671 	SCTP_INP_RUNLOCK(inp);
4672 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
4673 		/* We must do a bind. */
4674 		SOCK_UNLOCK(so);
4675 		if ((error = sctp_inpcb_bind(so, NULL, NULL, p))) {
4676 			/* bind error, probably perm */
4677 			return (error);
4678 		}
4679 		SOCK_LOCK(so);
4680 	}
4681 	/* It appears for 7.0 and on, we must always call this. */
4682 	solisten_proto(so, backlog);
4683 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) {
4684 		/* remove the ACCEPTCONN flag for one-to-many sockets */
4685 		so->so_options &= ~SO_ACCEPTCONN;
4686 	}
4687 	if (backlog == 0) {
4688 		/* turning off listen */
4689 		so->so_options &= ~SO_ACCEPTCONN;
4690 	}
4691 	SOCK_UNLOCK(so);
4692 	return (error);
4693 }
4694 
4695 static int sctp_defered_wakeup_cnt = 0;
4696 
4697 int
4698 sctp_accept(struct socket *so, struct sockaddr **addr)
4699 {
4700 	struct sctp_tcb *stcb;
4701 	struct sctp_inpcb *inp;
4702 	union sctp_sockstore store;
4703 
4704 #ifdef INET6
4705 	int error;
4706 
4707 #endif
4708 	inp = (struct sctp_inpcb *)so->so_pcb;
4709 
4710 	if (inp == 0) {
4711 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4712 		return (ECONNRESET);
4713 	}
4714 	SCTP_INP_RLOCK(inp);
4715 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) {
4716 		SCTP_INP_RUNLOCK(inp);
4717 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP);
4718 		return (EOPNOTSUPP);
4719 	}
4720 	if (so->so_state & SS_ISDISCONNECTED) {
4721 		SCTP_INP_RUNLOCK(inp);
4722 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ECONNABORTED);
4723 		return (ECONNABORTED);
4724 	}
4725 	stcb = LIST_FIRST(&inp->sctp_asoc_list);
4726 	if (stcb == NULL) {
4727 		SCTP_INP_RUNLOCK(inp);
4728 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4729 		return (ECONNRESET);
4730 	}
4731 	SCTP_TCB_LOCK(stcb);
4732 	SCTP_INP_RUNLOCK(inp);
4733 	store = stcb->asoc.primary_destination->ro._l_addr;
4734 	stcb->asoc.state &= ~SCTP_STATE_IN_ACCEPT_QUEUE;
4735 	SCTP_TCB_UNLOCK(stcb);
4736 	switch (store.sa.sa_family) {
4737 	case AF_INET:
4738 		{
4739 			struct sockaddr_in *sin;
4740 
4741 			SCTP_MALLOC_SONAME(sin, struct sockaddr_in *, sizeof *sin);
4742 			if (sin == NULL)
4743 				return (ENOMEM);
4744 			sin->sin_family = AF_INET;
4745 			sin->sin_len = sizeof(*sin);
4746 			sin->sin_port = ((struct sockaddr_in *)&store)->sin_port;
4747 			sin->sin_addr = ((struct sockaddr_in *)&store)->sin_addr;
4748 			*addr = (struct sockaddr *)sin;
4749 			break;
4750 		}
4751 #ifdef INET6
4752 	case AF_INET6:
4753 		{
4754 			struct sockaddr_in6 *sin6;
4755 
4756 			SCTP_MALLOC_SONAME(sin6, struct sockaddr_in6 *, sizeof *sin6);
4757 			if (sin6 == NULL)
4758 				return (ENOMEM);
4759 			sin6->sin6_family = AF_INET6;
4760 			sin6->sin6_len = sizeof(*sin6);
4761 			sin6->sin6_port = ((struct sockaddr_in6 *)&store)->sin6_port;
4762 
4763 			sin6->sin6_addr = ((struct sockaddr_in6 *)&store)->sin6_addr;
4764 			if ((error = sa6_recoverscope(sin6)) != 0) {
4765 				SCTP_FREE_SONAME(sin6);
4766 				return (error);
4767 			}
4768 			*addr = (struct sockaddr *)sin6;
4769 			break;
4770 		}
4771 #endif
4772 	default:
4773 		/* TSNH */
4774 		break;
4775 	}
4776 	/* Wake any delayed sleep action */
4777 	if (inp->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) {
4778 		SCTP_INP_WLOCK(inp);
4779 		inp->sctp_flags &= ~SCTP_PCB_FLAGS_DONT_WAKE;
4780 		if (inp->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT) {
4781 			inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAKEOUTPUT;
4782 			SCTP_INP_WUNLOCK(inp);
4783 			SOCKBUF_LOCK(&inp->sctp_socket->so_snd);
4784 			if (sowriteable(inp->sctp_socket)) {
4785 				sowwakeup_locked(inp->sctp_socket);
4786 			} else {
4787 				SOCKBUF_UNLOCK(&inp->sctp_socket->so_snd);
4788 			}
4789 			SCTP_INP_WLOCK(inp);
4790 		}
4791 		if (inp->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT) {
4792 			inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAKEINPUT;
4793 			SCTP_INP_WUNLOCK(inp);
4794 			SOCKBUF_LOCK(&inp->sctp_socket->so_rcv);
4795 			if (soreadable(inp->sctp_socket)) {
4796 				sctp_defered_wakeup_cnt++;
4797 				sorwakeup_locked(inp->sctp_socket);
4798 			} else {
4799 				SOCKBUF_UNLOCK(&inp->sctp_socket->so_rcv);
4800 			}
4801 			SCTP_INP_WLOCK(inp);
4802 		}
4803 		SCTP_INP_WUNLOCK(inp);
4804 	}
4805 	if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
4806 		SCTP_TCB_LOCK(stcb);
4807 		sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_7);
4808 	}
4809 	return (0);
4810 }
4811 
4812 int
4813 sctp_ingetaddr(struct socket *so, struct sockaddr **addr)
4814 {
4815 	struct sockaddr_in *sin;
4816 	uint32_t vrf_id;
4817 	struct sctp_inpcb *inp;
4818 	struct sctp_ifa *sctp_ifa;
4819 
4820 	/*
4821 	 * Do the malloc first in case it blocks.
4822 	 */
4823 	SCTP_MALLOC_SONAME(sin, struct sockaddr_in *, sizeof *sin);
4824 	if (sin == NULL)
4825 		return (ENOMEM);
4826 	sin->sin_family = AF_INET;
4827 	sin->sin_len = sizeof(*sin);
4828 	inp = (struct sctp_inpcb *)so->so_pcb;
4829 	if (!inp) {
4830 		SCTP_FREE_SONAME(sin);
4831 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4832 		return ECONNRESET;
4833 	}
4834 	SCTP_INP_RLOCK(inp);
4835 	sin->sin_port = inp->sctp_lport;
4836 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
4837 		if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
4838 			struct sctp_tcb *stcb;
4839 			struct sockaddr_in *sin_a;
4840 			struct sctp_nets *net;
4841 			int fnd;
4842 
4843 			stcb = LIST_FIRST(&inp->sctp_asoc_list);
4844 			if (stcb == NULL) {
4845 				goto notConn;
4846 			}
4847 			fnd = 0;
4848 			sin_a = NULL;
4849 			SCTP_TCB_LOCK(stcb);
4850 			TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
4851 				sin_a = (struct sockaddr_in *)&net->ro._l_addr;
4852 				if (sin_a == NULL)
4853 					/* this will make coverity happy */
4854 					continue;
4855 
4856 				if (sin_a->sin_family == AF_INET) {
4857 					fnd = 1;
4858 					break;
4859 				}
4860 			}
4861 			if ((!fnd) || (sin_a == NULL)) {
4862 				/* punt */
4863 				SCTP_TCB_UNLOCK(stcb);
4864 				goto notConn;
4865 			}
4866 			vrf_id = inp->def_vrf_id;
4867 			sctp_ifa = sctp_source_address_selection(inp,
4868 			    stcb,
4869 			    (sctp_route_t *) & net->ro,
4870 			    net, 0, vrf_id);
4871 			if (sctp_ifa) {
4872 				sin->sin_addr = sctp_ifa->address.sin.sin_addr;
4873 				sctp_free_ifa(sctp_ifa);
4874 			}
4875 			SCTP_TCB_UNLOCK(stcb);
4876 		} else {
4877 			/* For the bound all case you get back 0 */
4878 	notConn:
4879 			sin->sin_addr.s_addr = 0;
4880 		}
4881 
4882 	} else {
4883 		/* Take the first IPv4 address in the list */
4884 		struct sctp_laddr *laddr;
4885 		int fnd = 0;
4886 
4887 		LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4888 			if (laddr->ifa->address.sa.sa_family == AF_INET) {
4889 				struct sockaddr_in *sin_a;
4890 
4891 				sin_a = (struct sockaddr_in *)&laddr->ifa->address.sa;
4892 				sin->sin_addr = sin_a->sin_addr;
4893 				fnd = 1;
4894 				break;
4895 			}
4896 		}
4897 		if (!fnd) {
4898 			SCTP_FREE_SONAME(sin);
4899 			SCTP_INP_RUNLOCK(inp);
4900 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT);
4901 			return ENOENT;
4902 		}
4903 	}
4904 	SCTP_INP_RUNLOCK(inp);
4905 	(*addr) = (struct sockaddr *)sin;
4906 	return (0);
4907 }
4908 
4909 int
4910 sctp_peeraddr(struct socket *so, struct sockaddr **addr)
4911 {
4912 	struct sockaddr_in *sin = (struct sockaddr_in *)*addr;
4913 	int fnd;
4914 	struct sockaddr_in *sin_a;
4915 	struct sctp_inpcb *inp;
4916 	struct sctp_tcb *stcb;
4917 	struct sctp_nets *net;
4918 
4919 	/* Do the malloc first in case it blocks. */
4920 	inp = (struct sctp_inpcb *)so->so_pcb;
4921 	if ((inp == NULL) ||
4922 	    ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) {
4923 		/* UDP type and listeners will drop out here */
4924 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN);
4925 		return (ENOTCONN);
4926 	}
4927 	SCTP_MALLOC_SONAME(sin, struct sockaddr_in *, sizeof *sin);
4928 	if (sin == NULL)
4929 		return (ENOMEM);
4930 	sin->sin_family = AF_INET;
4931 	sin->sin_len = sizeof(*sin);
4932 
4933 	/* We must recapture incase we blocked */
4934 	inp = (struct sctp_inpcb *)so->so_pcb;
4935 	if (!inp) {
4936 		SCTP_FREE_SONAME(sin);
4937 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4938 		return ECONNRESET;
4939 	}
4940 	SCTP_INP_RLOCK(inp);
4941 	stcb = LIST_FIRST(&inp->sctp_asoc_list);
4942 	if (stcb) {
4943 		SCTP_TCB_LOCK(stcb);
4944 	}
4945 	SCTP_INP_RUNLOCK(inp);
4946 	if (stcb == NULL) {
4947 		SCTP_FREE_SONAME(sin);
4948 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4949 		return ECONNRESET;
4950 	}
4951 	fnd = 0;
4952 	TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
4953 		sin_a = (struct sockaddr_in *)&net->ro._l_addr;
4954 		if (sin_a->sin_family == AF_INET) {
4955 			fnd = 1;
4956 			sin->sin_port = stcb->rport;
4957 			sin->sin_addr = sin_a->sin_addr;
4958 			break;
4959 		}
4960 	}
4961 	SCTP_TCB_UNLOCK(stcb);
4962 	if (!fnd) {
4963 		/* No IPv4 address */
4964 		SCTP_FREE_SONAME(sin);
4965 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT);
4966 		return ENOENT;
4967 	}
4968 	(*addr) = (struct sockaddr *)sin;
4969 	return (0);
4970 }
4971 
4972 struct pr_usrreqs sctp_usrreqs = {
4973 	.pru_abort = sctp_abort,
4974 	.pru_accept = sctp_accept,
4975 	.pru_attach = sctp_attach,
4976 	.pru_bind = sctp_bind,
4977 	.pru_connect = sctp_connect,
4978 	.pru_control = in_control,
4979 	.pru_close = sctp_close,
4980 	.pru_detach = sctp_close,
4981 	.pru_sopoll = sopoll_generic,
4982 	.pru_flush = sctp_flush,
4983 	.pru_disconnect = sctp_disconnect,
4984 	.pru_listen = sctp_listen,
4985 	.pru_peeraddr = sctp_peeraddr,
4986 	.pru_send = sctp_sendm,
4987 	.pru_shutdown = sctp_shutdown,
4988 	.pru_sockaddr = sctp_ingetaddr,
4989 	.pru_sosend = sctp_sosend,
4990 	.pru_soreceive = sctp_soreceive
4991 };
4992