xref: /freebsd/sys/netinet/sctp_usrreq.c (revision ea60845d09e6bcb73038858af28bd191219b610b)
1 /*-
2  * Copyright (c) 2001-2007, Cisco Systems, Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *
7  * a) Redistributions of source code must retain the above copyright notice,
8  *   this list of conditions and the following disclaimer.
9  *
10  * b) Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in
12  *   the documentation and/or other materials provided with the distribution.
13  *
14  * c) Neither the name of Cisco Systems, Inc. nor the names of its
15  *    contributors may be used to endorse or promote products derived
16  *    from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28  * THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 /* $KAME: sctp_usrreq.c,v 1.48 2005/03/07 23:26:08 itojun Exp $	 */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 #include <netinet/sctp_os.h>
36 #include <sys/proc.h>
37 #include <netinet/sctp_pcb.h>
38 #include <netinet/sctp_header.h>
39 #include <netinet/sctp_var.h>
40 #if defined(INET6)
41 #include <netinet6/sctp6_var.h>
42 #endif
43 #include <netinet/sctp_sysctl.h>
44 #include <netinet/sctp_output.h>
45 #include <netinet/sctp_uio.h>
46 #include <netinet/sctp_asconf.h>
47 #include <netinet/sctputil.h>
48 #include <netinet/sctp_indata.h>
49 #include <netinet/sctp_timer.h>
50 #include <netinet/sctp_auth.h>
51 
52 
53 
54 void
55 sctp_init(void)
56 {
57 	/* Init the SCTP pcb in sctp_pcb.c */
58 	u_long sb_max_adj;
59 
60 	sctp_pcb_init();
61 
62 	if ((nmbclusters / 8) > SCTP_ASOC_MAX_CHUNKS_ON_QUEUE)
63 		sctp_max_chunks_on_queue = (nmbclusters / 8);
64 	/*
65 	 * Allow a user to take no more than 1/2 the number of clusters or
66 	 * the SB_MAX whichever is smaller for the send window.
67 	 */
68 	sb_max_adj = (u_long)((u_quad_t) (SB_MAX) * MCLBYTES / (MSIZE + MCLBYTES));
69 	sctp_sendspace = min((min(SB_MAX, sb_max_adj)),
70 	    ((nmbclusters / 2) * SCTP_DEFAULT_MAXSEGMENT));
71 	/*
72 	 * Now for the recv window, should we take the same amount? or
73 	 * should I do 1/2 the SB_MAX instead in the SB_MAX min above. For
74 	 * now I will just copy.
75 	 */
76 	sctp_recvspace = sctp_sendspace;
77 
78 
79 }
80 
81 
82 
83 /*
84  * cleanup of the sctppcbinfo structure.
85  * Assumes that the sctppcbinfo lock is held.
86  */
87 void
88 sctp_pcbinfo_cleanup(void)
89 {
90 	/* free the hash tables */
91 	if (sctppcbinfo.sctp_asochash != NULL)
92 		SCTP_HASH_FREE(sctppcbinfo.sctp_asochash, sctppcbinfo.hashasocmark);
93 	if (sctppcbinfo.sctp_ephash != NULL)
94 		SCTP_HASH_FREE(sctppcbinfo.sctp_ephash, sctppcbinfo.hashmark);
95 	if (sctppcbinfo.sctp_tcpephash != NULL)
96 		SCTP_HASH_FREE(sctppcbinfo.sctp_tcpephash, sctppcbinfo.hashtcpmark);
97 	if (sctppcbinfo.sctp_restarthash != NULL)
98 		SCTP_HASH_FREE(sctppcbinfo.sctp_restarthash, sctppcbinfo.hashrestartmark);
99 }
100 
101 
102 static void
103 sctp_pathmtu_adjustment(struct sctp_inpcb *inp,
104     struct sctp_tcb *stcb,
105     struct sctp_nets *net,
106     uint16_t nxtsz)
107 {
108 	struct sctp_tmit_chunk *chk;
109 
110 	/* Adjust that too */
111 	stcb->asoc.smallest_mtu = nxtsz;
112 	/* now off to subtract IP_DF flag if needed */
113 
114 	TAILQ_FOREACH(chk, &stcb->asoc.send_queue, sctp_next) {
115 		if ((chk->send_size + IP_HDR_SIZE) > nxtsz) {
116 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
117 		}
118 	}
119 	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
120 		if ((chk->send_size + IP_HDR_SIZE) > nxtsz) {
121 			/*
122 			 * For this guy we also mark for immediate resend
123 			 * since we sent to big of chunk
124 			 */
125 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
126 			if (chk->sent != SCTP_DATAGRAM_RESEND) {
127 				sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
128 			}
129 			chk->sent = SCTP_DATAGRAM_RESEND;
130 			chk->rec.data.doing_fast_retransmit = 0;
131 #ifdef SCTP_FLIGHT_LOGGING
132 			sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_PMTU,
133 			    chk->whoTo->flight_size,
134 			    chk->book_size,
135 			    (uintptr_t) chk->whoTo,
136 			    chk->rec.data.TSN_seq);
137 #endif
138 			/* Clear any time so NO RTT is being done */
139 			chk->do_rtt = 0;
140 			sctp_flight_size_decrease(chk);
141 			sctp_total_flight_decrease(stcb, chk);
142 		}
143 	}
144 }
145 
146 static void
147 sctp_notify_mbuf(struct sctp_inpcb *inp,
148     struct sctp_tcb *stcb,
149     struct sctp_nets *net,
150     struct ip *ip,
151     struct sctphdr *sh)
152 {
153 	struct icmp *icmph;
154 	int totsz, tmr_stopped = 0;
155 	uint16_t nxtsz;
156 
157 	/* protection */
158 	if ((inp == NULL) || (stcb == NULL) || (net == NULL) ||
159 	    (ip == NULL) || (sh == NULL)) {
160 		if (stcb != NULL)
161 			SCTP_TCB_UNLOCK(stcb);
162 		return;
163 	}
164 	/* First job is to verify the vtag matches what I would send */
165 	if (ntohl(sh->v_tag) != (stcb->asoc.peer_vtag)) {
166 		SCTP_TCB_UNLOCK(stcb);
167 		return;
168 	}
169 	icmph = (struct icmp *)((caddr_t)ip - (sizeof(struct icmp) -
170 	    sizeof(struct ip)));
171 	if (icmph->icmp_type != ICMP_UNREACH) {
172 		/* We only care about unreachable */
173 		SCTP_TCB_UNLOCK(stcb);
174 		return;
175 	}
176 	if (icmph->icmp_code != ICMP_UNREACH_NEEDFRAG) {
177 		/* not a unreachable message due to frag. */
178 		SCTP_TCB_UNLOCK(stcb);
179 		return;
180 	}
181 	totsz = ip->ip_len;
182 
183 	nxtsz = ntohs(icmph->icmp_seq);
184 	if (nxtsz == 0) {
185 		/*
186 		 * old type router that does not tell us what the next size
187 		 * mtu is. Rats we will have to guess (in a educated fashion
188 		 * of course)
189 		 */
190 		nxtsz = find_next_best_mtu(totsz);
191 	}
192 	/* Stop any PMTU timer */
193 	if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) {
194 		tmr_stopped = 1;
195 		sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net,
196 		    SCTP_FROM_SCTP_USRREQ + SCTP_LOC_1);
197 	}
198 	/* Adjust destination size limit */
199 	if (net->mtu > nxtsz) {
200 		net->mtu = nxtsz;
201 	}
202 	/* now what about the ep? */
203 	if (stcb->asoc.smallest_mtu > nxtsz) {
204 		sctp_pathmtu_adjustment(inp, stcb, net, nxtsz);
205 	}
206 	if (tmr_stopped)
207 		sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net);
208 
209 	SCTP_TCB_UNLOCK(stcb);
210 }
211 
212 
213 void
214 sctp_notify(struct sctp_inpcb *inp,
215     int error,
216     struct sctphdr *sh,
217     struct sockaddr *to,
218     struct sctp_tcb *stcb,
219     struct sctp_nets *net)
220 {
221 	/* protection */
222 	if ((inp == NULL) || (stcb == NULL) || (net == NULL) ||
223 	    (sh == NULL) || (to == NULL)) {
224 		return;
225 	}
226 	/* First job is to verify the vtag matches what I would send */
227 	if (ntohl(sh->v_tag) != (stcb->asoc.peer_vtag)) {
228 		return;
229 	}
230 	/* FIX ME FIX ME PROTOPT i.e. no SCTP should ALWAYS be an ABORT */
231 
232 	if ((error == EHOSTUNREACH) ||	/* Host is not reachable */
233 	    (error == EHOSTDOWN) ||	/* Host is down */
234 	    (error == ECONNREFUSED) ||	/* Host refused the connection, (not
235 					 * an abort?) */
236 	    (error == ENOPROTOOPT)	/* SCTP is not present on host */
237 	    ) {
238 		/*
239 		 * Hmm reachablity problems we must examine closely. If its
240 		 * not reachable, we may have lost a network. Or if there is
241 		 * NO protocol at the other end named SCTP. well we consider
242 		 * it a OOTB abort.
243 		 */
244 		if ((error == EHOSTUNREACH) || (error == EHOSTDOWN)) {
245 			if (net->dest_state & SCTP_ADDR_REACHABLE) {
246 				/* Ok that destination is NOT reachable */
247 				printf("ICMP (thresh %d/%d) takes interface %p down\n",
248 				    net->error_count,
249 				    net->failure_threshold,
250 				    net);
251 
252 				net->dest_state &= ~SCTP_ADDR_REACHABLE;
253 				net->dest_state |= SCTP_ADDR_NOT_REACHABLE;
254 				net->error_count = net->failure_threshold + 1;
255 				sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN,
256 				    stcb, SCTP_FAILED_THRESHOLD,
257 				    (void *)net);
258 			}
259 			if (stcb)
260 				SCTP_TCB_UNLOCK(stcb);
261 		} else {
262 			/*
263 			 * Here the peer is either playing tricks on us,
264 			 * including an address that belongs to someone who
265 			 * does not support SCTP OR was a userland
266 			 * implementation that shutdown and now is dead. In
267 			 * either case treat it like a OOTB abort with no
268 			 * TCB
269 			 */
270 			sctp_abort_notification(stcb, SCTP_PEER_FAULTY);
271 			sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_2);
272 			/* no need to unlock here, since the TCB is gone */
273 		}
274 	} else {
275 		/* Send all others to the app */
276 		if (stcb)
277 			SCTP_TCB_UNLOCK(stcb);
278 
279 
280 		if (inp->sctp_socket) {
281 #ifdef SCTP_LOCK_LOGGING
282 			sctp_log_lock(inp, stcb, SCTP_LOG_LOCK_SOCK);
283 #endif
284 			SOCK_LOCK(inp->sctp_socket);
285 			inp->sctp_socket->so_error = error;
286 			sctp_sowwakeup(inp, inp->sctp_socket);
287 			SOCK_UNLOCK(inp->sctp_socket);
288 		}
289 	}
290 }
291 
292 void
293 sctp_ctlinput(cmd, sa, vip)
294 	int cmd;
295 	struct sockaddr *sa;
296 	void *vip;
297 {
298 	struct ip *ip = vip;
299 	struct sctphdr *sh;
300 	uint32_t vrf_id;
301 
302 	/* FIX, for non-bsd is this right? */
303 	vrf_id = SCTP_DEFAULT_VRFID;
304 	if (sa->sa_family != AF_INET ||
305 	    ((struct sockaddr_in *)sa)->sin_addr.s_addr == INADDR_ANY) {
306 		return;
307 	}
308 	if (PRC_IS_REDIRECT(cmd)) {
309 		ip = 0;
310 	} else if ((unsigned)cmd >= PRC_NCMDS || inetctlerrmap[cmd] == 0) {
311 		return;
312 	}
313 	if (ip) {
314 		struct sctp_inpcb *inp = NULL;
315 		struct sctp_tcb *stcb = NULL;
316 		struct sctp_nets *net = NULL;
317 		struct sockaddr_in to, from;
318 
319 		sh = (struct sctphdr *)((caddr_t)ip + (ip->ip_hl << 2));
320 		bzero(&to, sizeof(to));
321 		bzero(&from, sizeof(from));
322 		from.sin_family = to.sin_family = AF_INET;
323 		from.sin_len = to.sin_len = sizeof(to);
324 		from.sin_port = sh->src_port;
325 		from.sin_addr = ip->ip_src;
326 		to.sin_port = sh->dest_port;
327 		to.sin_addr = ip->ip_dst;
328 
329 		/*
330 		 * 'to' holds the dest of the packet that failed to be sent.
331 		 * 'from' holds our local endpoint address. Thus we reverse
332 		 * the to and the from in the lookup.
333 		 */
334 		stcb = sctp_findassociation_addr_sa((struct sockaddr *)&from,
335 		    (struct sockaddr *)&to,
336 		    &inp, &net, 1, vrf_id);
337 		if (stcb != NULL && inp && (inp->sctp_socket != NULL)) {
338 			if (cmd != PRC_MSGSIZE) {
339 				int cm;
340 
341 				if (cmd == PRC_HOSTDEAD) {
342 					cm = EHOSTUNREACH;
343 				} else {
344 					cm = inetctlerrmap[cmd];
345 				}
346 				sctp_notify(inp, cm, sh,
347 				    (struct sockaddr *)&to, stcb,
348 				    net);
349 			} else {
350 				/* handle possible ICMP size messages */
351 				sctp_notify_mbuf(inp, stcb, net, ip, sh);
352 			}
353 		} else {
354 			if ((stcb == NULL) && (inp != NULL)) {
355 				/* reduce ref-count */
356 				SCTP_INP_WLOCK(inp);
357 				SCTP_INP_DECR_REF(inp);
358 				SCTP_INP_WUNLOCK(inp);
359 			}
360 		}
361 	}
362 	return;
363 }
364 
365 static int
366 sctp_getcred(SYSCTL_HANDLER_ARGS)
367 {
368 	struct xucred xuc;
369 	struct sockaddr_in addrs[2];
370 	struct sctp_inpcb *inp;
371 	struct sctp_nets *net;
372 	struct sctp_tcb *stcb;
373 	int error;
374 	uint32_t vrf_id;
375 
376 
377 	/* FIX, for non-bsd is this right? */
378 	vrf_id = SCTP_DEFAULT_VRFID;
379 
380 	/*
381 	 * XXXRW: Other instances of getcred use SUSER_ALLOWJAIL, as socket
382 	 * visibility is scoped using cr_canseesocket(), which it is not
383 	 * here.
384 	 */
385 	error = priv_check_cred(req->td->td_ucred, PRIV_NETINET_GETCRED,
386 	    SUSER_ALLOWJAIL);
387 	if (error)
388 		return (error);
389 
390 	error = SYSCTL_IN(req, addrs, sizeof(addrs));
391 	if (error)
392 		return (error);
393 
394 	stcb = sctp_findassociation_addr_sa(sintosa(&addrs[0]),
395 	    sintosa(&addrs[1]),
396 	    &inp, &net, 1, vrf_id);
397 	if (stcb == NULL || inp == NULL || inp->sctp_socket == NULL) {
398 		if ((inp != NULL) && (stcb == NULL)) {
399 			/* reduce ref-count */
400 			SCTP_INP_WLOCK(inp);
401 			SCTP_INP_DECR_REF(inp);
402 			goto cred_can_cont;
403 		}
404 		error = ENOENT;
405 		goto out;
406 	}
407 	SCTP_TCB_UNLOCK(stcb);
408 	/*
409 	 * We use the write lock here, only since in the error leg we need
410 	 * it. If we used RLOCK, then we would have to
411 	 * wlock/decr/unlock/rlock. Which in theory could create a hole.
412 	 * Better to use higher wlock.
413 	 */
414 	SCTP_INP_WLOCK(inp);
415 cred_can_cont:
416 	error = cr_canseesocket(req->td->td_ucred, inp->sctp_socket);
417 	if (error) {
418 		SCTP_INP_WUNLOCK(inp);
419 		goto out;
420 	}
421 	cru2x(inp->sctp_socket->so_cred, &xuc);
422 	SCTP_INP_WUNLOCK(inp);
423 	error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred));
424 out:
425 	return (error);
426 }
427 
428 SYSCTL_PROC(_net_inet_sctp, OID_AUTO, getcred, CTLTYPE_OPAQUE | CTLFLAG_RW,
429     0, 0, sctp_getcred, "S,ucred", "Get the ucred of a SCTP connection");
430 
431 
432 static void
433 sctp_abort(struct socket *so)
434 {
435 	struct sctp_inpcb *inp;
436 	uint32_t flags;
437 
438 	inp = (struct sctp_inpcb *)so->so_pcb;
439 	if (inp == 0)
440 		return;
441 
442 sctp_must_try_again:
443 	flags = inp->sctp_flags;
444 #ifdef SCTP_LOG_CLOSING
445 	sctp_log_closing(inp, NULL, 17);
446 #endif
447 	if (((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) &&
448 	    (atomic_cmpset_int(&inp->sctp_flags, flags, (flags | SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_CLOSE_IP)))) {
449 #ifdef SCTP_LOG_CLOSING
450 		sctp_log_closing(inp, NULL, 16);
451 #endif
452 		sctp_inpcb_free(inp, 1, 0);
453 		SOCK_LOCK(so);
454 		SCTP_SB_CLEAR(so->so_snd);
455 		/*
456 		 * same for the rcv ones, they are only here for the
457 		 * accounting/select.
458 		 */
459 		SCTP_SB_CLEAR(so->so_rcv);
460 
461 		/* Now null out the reference, we are completely detached. */
462 		so->so_pcb = NULL;
463 		SOCK_UNLOCK(so);
464 	} else {
465 		flags = inp->sctp_flags;
466 		if ((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
467 			goto sctp_must_try_again;
468 		}
469 	}
470 	return;
471 }
472 
473 static int
474 sctp_attach(struct socket *so, int proto, struct thread *p)
475 {
476 	struct sctp_inpcb *inp;
477 	struct inpcb *ip_inp;
478 	int error;
479 
480 #ifdef IPSEC
481 	uint32_t flags;
482 
483 #endif
484 	inp = (struct sctp_inpcb *)so->so_pcb;
485 	if (inp != 0) {
486 		return EINVAL;
487 	}
488 	error = SCTP_SORESERVE(so, sctp_sendspace, sctp_recvspace);
489 	if (error) {
490 		return error;
491 	}
492 	error = sctp_inpcb_alloc(so);
493 	if (error) {
494 		return error;
495 	}
496 	inp = (struct sctp_inpcb *)so->so_pcb;
497 	SCTP_INP_WLOCK(inp);
498 
499 	inp->sctp_flags &= ~SCTP_PCB_FLAGS_BOUND_V6;	/* I'm not v6! */
500 	ip_inp = &inp->ip_inp.inp;
501 	ip_inp->inp_vflag |= INP_IPV4;
502 	ip_inp->inp_ip_ttl = ip_defttl;
503 
504 #ifdef IPSEC
505 	error = ipsec_init_pcbpolicy(so, &ip_inp->inp_sp);
506 #ifdef SCTP_LOG_CLOSING
507 	sctp_log_closing(inp, NULL, 17);
508 #endif
509 	if (error != 0) {
510 		flags = inp->sctp_flags;
511 		if (((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) &&
512 		    (atomic_cmpset_int(&inp->sctp_flags, flags, (flags | SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_CLOSE_IP)))) {
513 #ifdef SCTP_LOG_CLOSING
514 			sctp_log_closing(inp, NULL, 15);
515 #endif
516 			sctp_inpcb_free(inp, 1, 0);
517 		}
518 		return error;
519 	}
520 #endif				/* IPSEC */
521 	SCTP_INP_WUNLOCK(inp);
522 	return 0;
523 }
524 
525 static int
526 sctp_bind(struct socket *so, struct sockaddr *addr, struct thread *p)
527 {
528 	struct sctp_inpcb *inp;
529 	int error;
530 
531 #ifdef INET6
532 	if (addr && addr->sa_family != AF_INET)
533 		/* must be a v4 address! */
534 		return EINVAL;
535 #endif				/* INET6 */
536 
537 	inp = (struct sctp_inpcb *)so->so_pcb;
538 	if (inp == 0)
539 		return EINVAL;
540 
541 	error = sctp_inpcb_bind(so, addr, p);
542 	return error;
543 }
544 
545 static void
546 sctp_close(struct socket *so)
547 {
548 	struct sctp_inpcb *inp;
549 	uint32_t flags;
550 
551 	inp = (struct sctp_inpcb *)so->so_pcb;
552 	if (inp == 0)
553 		return;
554 
555 	/*
556 	 * Inform all the lower layer assoc that we are done.
557 	 */
558 sctp_must_try_again:
559 	flags = inp->sctp_flags;
560 #ifdef SCTP_LOG_CLOSING
561 	sctp_log_closing(inp, NULL, 17);
562 #endif
563 	if (((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) &&
564 	    (atomic_cmpset_int(&inp->sctp_flags, flags, (flags | SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_CLOSE_IP)))) {
565 		if (((so->so_options & SO_LINGER) && (so->so_linger == 0)) ||
566 		    (so->so_rcv.sb_cc > 0)) {
567 #ifdef SCTP_LOG_CLOSING
568 			sctp_log_closing(inp, NULL, 13);
569 #endif
570 			sctp_inpcb_free(inp, 1, 1);
571 		} else {
572 #ifdef SCTP_LOG_CLOSING
573 			sctp_log_closing(inp, NULL, 14);
574 #endif
575 			sctp_inpcb_free(inp, 0, 1);
576 		}
577 		/*
578 		 * The socket is now detached, no matter what the state of
579 		 * the SCTP association.
580 		 */
581 		SOCK_LOCK(so);
582 		SCTP_SB_CLEAR(so->so_snd);
583 		/*
584 		 * same for the rcv ones, they are only here for the
585 		 * accounting/select.
586 		 */
587 		SCTP_SB_CLEAR(so->so_rcv);
588 
589 		/* Now null out the reference, we are completely detached. */
590 		so->so_pcb = NULL;
591 		SOCK_UNLOCK(so);
592 	} else {
593 		flags = inp->sctp_flags;
594 		if ((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
595 			goto sctp_must_try_again;
596 		}
597 	}
598 	return;
599 }
600 
601 
602 int
603 sctp_sendm(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr,
604     struct mbuf *control, struct thread *p);
605 
606 
607 int
608 sctp_sendm(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr,
609     struct mbuf *control, struct thread *p)
610 {
611 	struct sctp_inpcb *inp;
612 	int error;
613 
614 	inp = (struct sctp_inpcb *)so->so_pcb;
615 	if (inp == 0) {
616 		if (control) {
617 			sctp_m_freem(control);
618 			control = NULL;
619 		}
620 		sctp_m_freem(m);
621 		return EINVAL;
622 	}
623 	/* Got to have an to address if we are NOT a connected socket */
624 	if ((addr == NULL) &&
625 	    ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) ||
626 	    (inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE))
627 	    ) {
628 		goto connected_type;
629 	} else if (addr == NULL) {
630 		error = EDESTADDRREQ;
631 		sctp_m_freem(m);
632 		if (control) {
633 			sctp_m_freem(control);
634 			control = NULL;
635 		}
636 		return (error);
637 	}
638 #ifdef INET6
639 	if (addr->sa_family != AF_INET) {
640 		/* must be a v4 address! */
641 		sctp_m_freem(m);
642 		if (control) {
643 			sctp_m_freem(control);
644 			control = NULL;
645 		}
646 		error = EDESTADDRREQ;
647 		return EINVAL;
648 	}
649 #endif				/* INET6 */
650 connected_type:
651 	/* now what about control */
652 	if (control) {
653 		if (inp->control) {
654 			printf("huh? control set?\n");
655 			sctp_m_freem(inp->control);
656 			inp->control = NULL;
657 		}
658 		inp->control = control;
659 	}
660 	/* Place the data */
661 	if (inp->pkt) {
662 		SCTP_BUF_NEXT(inp->pkt_last) = m;
663 		inp->pkt_last = m;
664 	} else {
665 		inp->pkt_last = inp->pkt = m;
666 	}
667 	if (
668 	/* FreeBSD uses a flag passed */
669 	    ((flags & PRUS_MORETOCOME) == 0)
670 	    ) {
671 		/*
672 		 * note with the current version this code will only be used
673 		 * by OpenBSD-- NetBSD, FreeBSD, and MacOS have methods for
674 		 * re-defining sosend to use the sctp_sosend. One can
675 		 * optionally switch back to this code (by changing back the
676 		 * definitions) but this is not advisable. This code is used
677 		 * by FreeBSD when sending a file with sendfile() though.
678 		 */
679 		int ret;
680 
681 		ret = sctp_output(inp, inp->pkt, addr, inp->control, p, flags);
682 		inp->pkt = NULL;
683 		inp->control = NULL;
684 		return (ret);
685 	} else {
686 		return (0);
687 	}
688 }
689 
690 static int
691 sctp_disconnect(struct socket *so)
692 {
693 	struct sctp_inpcb *inp;
694 
695 	inp = (struct sctp_inpcb *)so->so_pcb;
696 	if (inp == NULL) {
697 		return (ENOTCONN);
698 	}
699 	SCTP_INP_RLOCK(inp);
700 	if (inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
701 		if (SCTP_LIST_EMPTY(&inp->sctp_asoc_list)) {
702 			/* No connection */
703 			SCTP_INP_RUNLOCK(inp);
704 			return (0);
705 		} else {
706 			struct sctp_association *asoc;
707 			struct sctp_tcb *stcb;
708 
709 			stcb = LIST_FIRST(&inp->sctp_asoc_list);
710 			if (stcb == NULL) {
711 				SCTP_INP_RUNLOCK(inp);
712 				return (EINVAL);
713 			}
714 			SCTP_TCB_LOCK(stcb);
715 			asoc = &stcb->asoc;
716 			if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
717 				/* We are about to be freed, out of here */
718 				SCTP_TCB_UNLOCK(stcb);
719 				SCTP_INP_RUNLOCK(inp);
720 				return (0);
721 			}
722 			if (((so->so_options & SO_LINGER) &&
723 			    (so->so_linger == 0)) ||
724 			    (so->so_rcv.sb_cc > 0)) {
725 				if (SCTP_GET_STATE(asoc) !=
726 				    SCTP_STATE_COOKIE_WAIT) {
727 					/* Left with Data unread */
728 					struct mbuf *err;
729 
730 					err = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_DONTWAIT, 1, MT_DATA);
731 					if (err) {
732 						/*
733 						 * Fill in the user
734 						 * initiated abort
735 						 */
736 						struct sctp_paramhdr *ph;
737 
738 						ph = mtod(err, struct sctp_paramhdr *);
739 						SCTP_BUF_LEN(err) = sizeof(struct sctp_paramhdr);
740 						ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
741 						ph->param_length = htons(SCTP_BUF_LEN(err));
742 					}
743 					sctp_send_abort_tcb(stcb, err);
744 					SCTP_STAT_INCR_COUNTER32(sctps_aborted);
745 				}
746 				SCTP_INP_RUNLOCK(inp);
747 				if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
748 				    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
749 					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
750 				}
751 				sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_3);
752 				/* No unlock tcb assoc is gone */
753 				return (0);
754 			}
755 			if (TAILQ_EMPTY(&asoc->send_queue) &&
756 			    TAILQ_EMPTY(&asoc->sent_queue) &&
757 			    (asoc->stream_queue_cnt == 0)) {
758 				/* there is nothing queued to send, so done */
759 				if (asoc->locked_on_sending) {
760 					goto abort_anyway;
761 				}
762 				if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
763 				    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
764 					/* only send SHUTDOWN 1st time thru */
765 					sctp_stop_timers_for_shutdown(stcb);
766 					sctp_send_shutdown(stcb,
767 					    stcb->asoc.primary_destination);
768 					sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_T3);
769 					if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
770 					    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
771 						SCTP_STAT_DECR_GAUGE32(sctps_currestab);
772 					}
773 					asoc->state = SCTP_STATE_SHUTDOWN_SENT;
774 					sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
775 					    stcb->sctp_ep, stcb,
776 					    asoc->primary_destination);
777 					sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
778 					    stcb->sctp_ep, stcb,
779 					    asoc->primary_destination);
780 				}
781 			} else {
782 				/*
783 				 * we still got (or just got) data to send,
784 				 * so set SHUTDOWN_PENDING
785 				 */
786 				/*
787 				 * XXX sockets draft says that SCTP_EOF
788 				 * should be sent with no data. currently,
789 				 * we will allow user data to be sent first
790 				 * and move to SHUTDOWN-PENDING
791 				 */
792 				asoc->state |= SCTP_STATE_SHUTDOWN_PENDING;
793 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
794 				    asoc->primary_destination);
795 				if (asoc->locked_on_sending) {
796 					/* Locked to send out the data */
797 					struct sctp_stream_queue_pending *sp;
798 
799 					sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead);
800 					if (sp == NULL) {
801 						printf("Error, sp is NULL, locked on sending is non-null strm:%d\n",
802 						    asoc->locked_on_sending->stream_no);
803 					} else {
804 						if ((sp->length == 0) && (sp->msg_is_complete == 0))
805 							asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
806 					}
807 				}
808 				if (TAILQ_EMPTY(&asoc->send_queue) &&
809 				    TAILQ_EMPTY(&asoc->sent_queue) &&
810 				    (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
811 					struct mbuf *op_err;
812 
813 			abort_anyway:
814 					op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
815 					    0, M_DONTWAIT, 1, MT_DATA);
816 					if (op_err) {
817 						/*
818 						 * Fill in the user
819 						 * initiated abort
820 						 */
821 						struct sctp_paramhdr *ph;
822 						uint32_t *ippp;
823 
824 						SCTP_BUF_LEN(op_err) =
825 						    (sizeof(struct sctp_paramhdr) + sizeof(uint32_t));
826 						ph = mtod(op_err,
827 						    struct sctp_paramhdr *);
828 						ph->param_type = htons(
829 						    SCTP_CAUSE_USER_INITIATED_ABT);
830 						ph->param_length = htons(SCTP_BUF_LEN(op_err));
831 						ippp = (uint32_t *) (ph + 1);
832 						*ippp = htonl(SCTP_FROM_SCTP_USRREQ + SCTP_LOC_4);
833 					}
834 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_USRREQ + SCTP_LOC_4;
835 					sctp_send_abort_tcb(stcb, op_err);
836 					SCTP_STAT_INCR_COUNTER32(sctps_aborted);
837 					if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
838 					    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
839 						SCTP_STAT_DECR_GAUGE32(sctps_currestab);
840 					}
841 					SCTP_INP_RUNLOCK(inp);
842 					sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_5);
843 					return (0);
844 				}
845 			}
846 			SCTP_TCB_UNLOCK(stcb);
847 			SCTP_INP_RUNLOCK(inp);
848 			return (0);
849 		}
850 		/* not reached */
851 		printf("Not reached reached?\n");
852 	} else {
853 		/* UDP model does not support this */
854 		SCTP_INP_RUNLOCK(inp);
855 		return EOPNOTSUPP;
856 	}
857 }
858 
859 int
860 sctp_shutdown(struct socket *so)
861 {
862 	struct sctp_inpcb *inp;
863 
864 	inp = (struct sctp_inpcb *)so->so_pcb;
865 	if (inp == 0) {
866 		return EINVAL;
867 	}
868 	SCTP_INP_RLOCK(inp);
869 	/* For UDP model this is a invalid call */
870 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) {
871 		/* Restore the flags that the soshutdown took away. */
872 		so->so_rcv.sb_state &= ~SBS_CANTRCVMORE;
873 		/* This proc will wakeup for read and do nothing (I hope) */
874 		SCTP_INP_RUNLOCK(inp);
875 		return (EOPNOTSUPP);
876 	}
877 	/*
878 	 * Ok if we reach here its the TCP model and it is either a SHUT_WR
879 	 * or SHUT_RDWR. This means we put the shutdown flag against it.
880 	 */
881 	{
882 		struct sctp_tcb *stcb;
883 		struct sctp_association *asoc;
884 
885 		socantsendmore(so);
886 
887 		stcb = LIST_FIRST(&inp->sctp_asoc_list);
888 		if (stcb == NULL) {
889 			/*
890 			 * Ok we hit the case that the shutdown call was
891 			 * made after an abort or something. Nothing to do
892 			 * now.
893 			 */
894 			SCTP_INP_RUNLOCK(inp);
895 			return (0);
896 		}
897 		SCTP_TCB_LOCK(stcb);
898 		asoc = &stcb->asoc;
899 		if (TAILQ_EMPTY(&asoc->send_queue) &&
900 		    TAILQ_EMPTY(&asoc->sent_queue) &&
901 		    (asoc->stream_queue_cnt == 0)) {
902 			if (asoc->locked_on_sending) {
903 				goto abort_anyway;
904 			}
905 			/* there is nothing queued to send, so I'm done... */
906 			if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) {
907 				/* only send SHUTDOWN the first time through */
908 				sctp_stop_timers_for_shutdown(stcb);
909 				sctp_send_shutdown(stcb,
910 				    stcb->asoc.primary_destination);
911 				sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_T3);
912 				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
913 				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
914 					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
915 				}
916 				asoc->state = SCTP_STATE_SHUTDOWN_SENT;
917 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
918 				    stcb->sctp_ep, stcb,
919 				    asoc->primary_destination);
920 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
921 				    stcb->sctp_ep, stcb,
922 				    asoc->primary_destination);
923 			}
924 		} else {
925 			/*
926 			 * we still got (or just got) data to send, so set
927 			 * SHUTDOWN_PENDING
928 			 */
929 			asoc->state |= SCTP_STATE_SHUTDOWN_PENDING;
930 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
931 			    asoc->primary_destination);
932 
933 			if (asoc->locked_on_sending) {
934 				/* Locked to send out the data */
935 				struct sctp_stream_queue_pending *sp;
936 
937 				sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead);
938 				if (sp == NULL) {
939 					printf("Error, sp is NULL, locked on sending is non-null strm:%d\n",
940 					    asoc->locked_on_sending->stream_no);
941 				} else {
942 					if ((sp->length == 0) && (sp->msg_is_complete == 0)) {
943 						asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
944 					}
945 				}
946 			}
947 			if (TAILQ_EMPTY(&asoc->send_queue) &&
948 			    TAILQ_EMPTY(&asoc->sent_queue) &&
949 			    (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
950 				struct mbuf *op_err;
951 
952 		abort_anyway:
953 				op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
954 				    0, M_DONTWAIT, 1, MT_DATA);
955 				if (op_err) {
956 					/* Fill in the user initiated abort */
957 					struct sctp_paramhdr *ph;
958 					uint32_t *ippp;
959 
960 					SCTP_BUF_LEN(op_err) =
961 					    sizeof(struct sctp_paramhdr) + sizeof(uint32_t);
962 					ph = mtod(op_err,
963 					    struct sctp_paramhdr *);
964 					ph->param_type = htons(
965 					    SCTP_CAUSE_USER_INITIATED_ABT);
966 					ph->param_length = htons(SCTP_BUF_LEN(op_err));
967 					ippp = (uint32_t *) (ph + 1);
968 					*ippp = htonl(SCTP_FROM_SCTP_USRREQ + SCTP_LOC_6);
969 				}
970 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_USRREQ + SCTP_LOC_6;
971 				sctp_abort_an_association(stcb->sctp_ep, stcb,
972 				    SCTP_RESPONSE_TO_USER_REQ,
973 				    op_err);
974 				goto skip_unlock;
975 			}
976 		}
977 		SCTP_TCB_UNLOCK(stcb);
978 	}
979 skip_unlock:
980 	SCTP_INP_RUNLOCK(inp);
981 	return 0;
982 }
983 
984 /*
985  * copies a "user" presentable address and removes embedded scope, etc.
986  * returns 0 on success, 1 on error
987  */
988 static uint32_t
989 sctp_fill_user_address(struct sockaddr_storage *ss, struct sockaddr *sa)
990 {
991 	struct sockaddr_in6 lsa6;
992 
993 	sa = (struct sockaddr *)sctp_recover_scope((struct sockaddr_in6 *)sa,
994 	    &lsa6);
995 	memcpy(ss, sa, sa->sa_len);
996 	return (0);
997 }
998 
999 
1000 
1001 static size_t
1002 sctp_fill_up_addresses_vrf(struct sctp_inpcb *inp,
1003     struct sctp_tcb *stcb,
1004     size_t limit,
1005     struct sockaddr_storage *sas,
1006     uint32_t vrf_id)
1007 {
1008 	struct sctp_ifn *sctp_ifn;
1009 	struct sctp_ifa *sctp_ifa;
1010 	int loopback_scope, ipv4_local_scope, local_scope, site_scope;
1011 	size_t actual;
1012 	int ipv4_addr_legal, ipv6_addr_legal;
1013 	struct sctp_vrf *vrf;
1014 
1015 	actual = 0;
1016 	if (limit <= 0)
1017 		return (actual);
1018 
1019 	if (stcb) {
1020 		/* Turn on all the appropriate scope */
1021 		loopback_scope = stcb->asoc.loopback_scope;
1022 		ipv4_local_scope = stcb->asoc.ipv4_local_scope;
1023 		local_scope = stcb->asoc.local_scope;
1024 		site_scope = stcb->asoc.site_scope;
1025 	} else {
1026 		/* Turn on ALL scope, since we look at the EP */
1027 		loopback_scope = ipv4_local_scope = local_scope =
1028 		    site_scope = 1;
1029 	}
1030 	ipv4_addr_legal = ipv6_addr_legal = 0;
1031 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1032 		ipv6_addr_legal = 1;
1033 		if (SCTP_IPV6_V6ONLY(inp) == 0) {
1034 			ipv4_addr_legal = 1;
1035 		}
1036 	} else {
1037 		ipv4_addr_legal = 1;
1038 	}
1039 	vrf = sctp_find_vrf(vrf_id);
1040 	if (vrf == NULL) {
1041 		return (0);
1042 	}
1043 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
1044 		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
1045 			if ((loopback_scope == 0) &&
1046 			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
1047 				/* Skip loopback if loopback_scope not set */
1048 				continue;
1049 			}
1050 			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
1051 				if (stcb) {
1052 					/*
1053 					 * For the BOUND-ALL case, the list
1054 					 * associated with a TCB is Always
1055 					 * considered a reverse list.. i.e.
1056 					 * it lists addresses that are NOT
1057 					 * part of the association. If this
1058 					 * is one of those we must skip it.
1059 					 */
1060 					if (sctp_is_addr_restricted(stcb,
1061 					    sctp_ifa)) {
1062 						continue;
1063 					}
1064 				}
1065 				if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
1066 				    (ipv4_addr_legal)) {
1067 					struct sockaddr_in *sin;
1068 
1069 					sin = (struct sockaddr_in *)&sctp_ifa->address.sa;
1070 					if (sin->sin_addr.s_addr == 0) {
1071 						/*
1072 						 * we skip unspecifed
1073 						 * addresses
1074 						 */
1075 						continue;
1076 					}
1077 					if ((ipv4_local_scope == 0) &&
1078 					    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
1079 						continue;
1080 					}
1081 					if (inp->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) {
1082 						in6_sin_2_v4mapsin6(sin, (struct sockaddr_in6 *)sas);
1083 						((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport;
1084 						sas = (struct sockaddr_storage *)((caddr_t)sas + sizeof(struct sockaddr_in6));
1085 						actual += sizeof(sizeof(struct sockaddr_in6));
1086 					} else {
1087 						memcpy(sas, sin, sizeof(*sin));
1088 						((struct sockaddr_in *)sas)->sin_port = inp->sctp_lport;
1089 						sas = (struct sockaddr_storage *)((caddr_t)sas + sizeof(*sin));
1090 						actual += sizeof(*sin);
1091 					}
1092 					if (actual >= limit) {
1093 						return (actual);
1094 					}
1095 				} else if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
1096 				    (ipv6_addr_legal)) {
1097 					struct sockaddr_in6 *sin6;
1098 
1099 					sin6 = (struct sockaddr_in6 *)&sctp_ifa->address.sa;
1100 					if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
1101 						/*
1102 						 * we skip unspecifed
1103 						 * addresses
1104 						 */
1105 						continue;
1106 					}
1107 					if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
1108 						if (local_scope == 0)
1109 							continue;
1110 						if (sin6->sin6_scope_id == 0) {
1111 							if (sa6_recoverscope(sin6) != 0)
1112 								/*
1113 								 * bad link
1114 								 * local
1115 								 * address
1116 								 */
1117 								continue;
1118 						}
1119 					}
1120 					if ((site_scope == 0) &&
1121 					    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
1122 						continue;
1123 					}
1124 					memcpy(sas, sin6, sizeof(*sin6));
1125 					((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport;
1126 					sas = (struct sockaddr_storage *)((caddr_t)sas + sizeof(*sin6));
1127 					actual += sizeof(*sin6);
1128 					if (actual >= limit) {
1129 						return (actual);
1130 					}
1131 				}
1132 			}
1133 		}
1134 	} else {
1135 		struct sctp_laddr *laddr;
1136 
1137 		/* The list is a NEGATIVE list */
1138 		LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
1139 			if (stcb) {
1140 				if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
1141 					continue;
1142 				}
1143 			}
1144 			if (sctp_fill_user_address(sas, &laddr->ifa->address.sa))
1145 				continue;
1146 
1147 			((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport;
1148 			sas = (struct sockaddr_storage *)((caddr_t)sas +
1149 			    laddr->ifa->address.sa.sa_len);
1150 			actual += laddr->ifa->address.sa.sa_len;
1151 			if (actual >= limit) {
1152 				return (actual);
1153 			}
1154 		}
1155 	}
1156 	return (actual);
1157 }
1158 
1159 static size_t
1160 sctp_fill_up_addresses(struct sctp_inpcb *inp,
1161     struct sctp_tcb *stcb,
1162     size_t limit,
1163     struct sockaddr_storage *sas)
1164 {
1165 	size_t size = 0;
1166 
1167 	/* fill up addresses for the endpoint's default vrf */
1168 	size = sctp_fill_up_addresses_vrf(inp, stcb, limit, sas,
1169 	    inp->def_vrf_id);
1170 	return (size);
1171 }
1172 
1173 static int
1174 sctp_count_max_addresses_vrf(struct sctp_inpcb *inp, uint32_t vrf_id)
1175 {
1176 	int cnt = 0;
1177 	struct sctp_vrf *vrf = NULL;
1178 
1179 	/*
1180 	 * In both sub-set bound an bound_all cases we return the MAXIMUM
1181 	 * number of addresses that you COULD get. In reality the sub-set
1182 	 * bound may have an exclusion list for a given TCB OR in the
1183 	 * bound-all case a TCB may NOT include the loopback or other
1184 	 * addresses as well.
1185 	 */
1186 	vrf = sctp_find_vrf(vrf_id);
1187 	if (vrf == NULL) {
1188 		return (0);
1189 	}
1190 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
1191 		struct sctp_ifn *sctp_ifn;
1192 		struct sctp_ifa *sctp_ifa;
1193 
1194 		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
1195 			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
1196 				/* Count them if they are the right type */
1197 				if (sctp_ifa->address.sa.sa_family == AF_INET) {
1198 					if (inp->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)
1199 						cnt += sizeof(struct sockaddr_in6);
1200 					else
1201 						cnt += sizeof(struct sockaddr_in);
1202 
1203 				} else if (sctp_ifa->address.sa.sa_family == AF_INET6)
1204 					cnt += sizeof(struct sockaddr_in6);
1205 			}
1206 		}
1207 	} else {
1208 		struct sctp_laddr *laddr;
1209 
1210 		LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
1211 			if (laddr->ifa->address.sa.sa_family == AF_INET) {
1212 				if (inp->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)
1213 					cnt += sizeof(struct sockaddr_in6);
1214 				else
1215 					cnt += sizeof(struct sockaddr_in);
1216 
1217 			} else if (laddr->ifa->address.sa.sa_family == AF_INET6)
1218 				cnt += sizeof(struct sockaddr_in6);
1219 		}
1220 	}
1221 	return (cnt);
1222 }
1223 
1224 static int
1225 sctp_count_max_addresses(struct sctp_inpcb *inp)
1226 {
1227 	int cnt = 0;
1228 
1229 	/* count addresses for the endpoint's default VRF */
1230 	cnt = sctp_count_max_addresses_vrf(inp, inp->def_vrf_id);
1231 	return (cnt);
1232 }
1233 
1234 
1235 static int
1236 sctp_do_connect_x(struct socket *so, struct sctp_inpcb *inp, void *optval,
1237     size_t optsize, void *p, int delay)
1238 {
1239 	int error = 0;
1240 	int creat_lock_on = 0;
1241 	struct sctp_tcb *stcb = NULL;
1242 	struct sockaddr *sa;
1243 	int num_v6 = 0, num_v4 = 0, *totaddrp, totaddr, i;
1244 	size_t incr, at;
1245 	uint32_t vrf_id;
1246 	sctp_assoc_t *a_id;
1247 
1248 #ifdef SCTP_DEBUG
1249 	if (sctp_debug_on & SCTP_DEBUG_PCB1) {
1250 		printf("Connectx called\n");
1251 	}
1252 #endif				/* SCTP_DEBUG */
1253 
1254 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
1255 	    (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) {
1256 		/* We are already connected AND the TCP model */
1257 		return (EADDRINUSE);
1258 	}
1259 	if (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) {
1260 		return (EINVAL);
1261 	}
1262 	if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
1263 		SCTP_INP_RLOCK(inp);
1264 		stcb = LIST_FIRST(&inp->sctp_asoc_list);
1265 		SCTP_INP_RUNLOCK(inp);
1266 	}
1267 	if (stcb) {
1268 		return (EALREADY);
1269 	}
1270 	SCTP_INP_INCR_REF(inp);
1271 	SCTP_ASOC_CREATE_LOCK(inp);
1272 	creat_lock_on = 1;
1273 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1274 	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
1275 		error = EFAULT;
1276 		goto out_now;
1277 	}
1278 	totaddrp = (int *)optval;
1279 	totaddr = *totaddrp;
1280 	sa = (struct sockaddr *)(totaddrp + 1);
1281 	at = incr = 0;
1282 	/* account and validate addresses */
1283 	for (i = 0; i < totaddr; i++) {
1284 		if (sa->sa_family == AF_INET) {
1285 			num_v4++;
1286 			incr = sizeof(struct sockaddr_in);
1287 		} else if (sa->sa_family == AF_INET6) {
1288 			struct sockaddr_in6 *sin6;
1289 
1290 			sin6 = (struct sockaddr_in6 *)sa;
1291 			if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
1292 				/* Must be non-mapped for connectx */
1293 				error = EINVAL;
1294 				goto out_now;
1295 			}
1296 			num_v6++;
1297 			incr = sizeof(struct sockaddr_in6);
1298 		} else {
1299 			totaddr = i;
1300 			break;
1301 		}
1302 		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
1303 		if (stcb != NULL) {
1304 			/* Already have or am bring up an association */
1305 			SCTP_ASOC_CREATE_UNLOCK(inp);
1306 			creat_lock_on = 0;
1307 			SCTP_TCB_UNLOCK(stcb);
1308 			error = EALREADY;
1309 			goto out_now;
1310 		}
1311 		if ((at + incr) > optsize) {
1312 			totaddr = i;
1313 			break;
1314 		}
1315 		sa = (struct sockaddr *)((caddr_t)sa + incr);
1316 	}
1317 	sa = (struct sockaddr *)(totaddrp + 1);
1318 #ifdef INET6
1319 	if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) &&
1320 	    (num_v6 > 0)) {
1321 		error = EINVAL;
1322 		goto out_now;
1323 	}
1324 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
1325 	    (num_v4 > 0)) {
1326 		struct in6pcb *inp6;
1327 
1328 		inp6 = (struct in6pcb *)inp;
1329 		if (SCTP_IPV6_V6ONLY(inp6)) {
1330 			/*
1331 			 * if IPV6_V6ONLY flag, ignore connections destined
1332 			 * to a v4 addr or v4-mapped addr
1333 			 */
1334 			error = EINVAL;
1335 			goto out_now;
1336 		}
1337 	}
1338 #endif				/* INET6 */
1339 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) ==
1340 	    SCTP_PCB_FLAGS_UNBOUND) {
1341 		/* Bind a ephemeral port */
1342 		error = sctp_inpcb_bind(so, NULL, p);
1343 		if (error) {
1344 			goto out_now;
1345 		}
1346 	}
1347 	/* FIX ME: do we want to pass in a vrf on the connect call? */
1348 	vrf_id = inp->def_vrf_id;
1349 
1350 	/* We are GOOD to go */
1351 	stcb = sctp_aloc_assoc(inp, sa, 1, &error, 0, vrf_id);
1352 	if (stcb == NULL) {
1353 		/* Gak! no memory */
1354 		goto out_now;
1355 	}
1356 	/* move to second address */
1357 	if (sa->sa_family == AF_INET)
1358 		sa = (struct sockaddr *)((caddr_t)sa + sizeof(struct sockaddr_in));
1359 	else
1360 		sa = (struct sockaddr *)((caddr_t)sa + sizeof(struct sockaddr_in6));
1361 
1362 	for (i = 1; i < totaddr; i++) {
1363 		if (sa->sa_family == AF_INET) {
1364 			incr = sizeof(struct sockaddr_in);
1365 			if (sctp_add_remote_addr(stcb, sa, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
1366 				/* assoc gone no un-lock */
1367 				sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_7);
1368 				error = ENOBUFS;
1369 				goto out_now;
1370 			}
1371 		} else if (sa->sa_family == AF_INET6) {
1372 			incr = sizeof(struct sockaddr_in6);
1373 			if (sctp_add_remote_addr(stcb, sa, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
1374 				/* assoc gone no un-lock */
1375 				sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_8);
1376 				error = ENOBUFS;
1377 				goto out_now;
1378 			}
1379 		}
1380 		sa = (struct sockaddr *)((caddr_t)sa + incr);
1381 	}
1382 	stcb->asoc.state = SCTP_STATE_COOKIE_WAIT;
1383 	/* Fill in the return id */
1384 	a_id = (sctp_assoc_t *) optval;
1385 	*a_id = sctp_get_associd(stcb);
1386 
1387 	/* initialize authentication parameters for the assoc */
1388 	sctp_initialize_auth_params(inp, stcb);
1389 
1390 	if (delay) {
1391 		/* doing delayed connection */
1392 		stcb->asoc.delayed_connection = 1;
1393 		sctp_timer_start(SCTP_TIMER_TYPE_INIT, inp, stcb, stcb->asoc.primary_destination);
1394 	} else {
1395 		SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
1396 		sctp_send_initiate(inp, stcb);
1397 	}
1398 	SCTP_TCB_UNLOCK(stcb);
1399 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
1400 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
1401 		/* Set the connected flag so we can queue data */
1402 		soisconnecting(so);
1403 	}
1404 out_now:
1405 	if (creat_lock_on)
1406 		SCTP_ASOC_CREATE_UNLOCK(inp);
1407 	SCTP_INP_DECR_REF(inp);
1408 	return error;
1409 }
1410 
1411 #define SCTP_FIND_STCB(inp, stcb, assoc_id) \
1412 	if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { \
1413 		SCTP_INP_RLOCK(inp); \
1414 		stcb = LIST_FIRST(&inp->sctp_asoc_list); \
1415 		if (stcb) \
1416 			SCTP_TCB_LOCK(stcb); \
1417 		SCTP_INP_RUNLOCK(inp); \
1418 	} else if (assoc_id != 0) { \
1419 		stcb = sctp_findassociation_ep_asocid(inp, assoc_id, 1); \
1420 		if (stcb == NULL) { \
1421 			error = ENOENT; \
1422 			break; \
1423 		} \
1424 	} else { \
1425 		stcb = NULL; \
1426 	}
1427 
1428 #define SCTP_CHECK_AND_CAST(destp, srcp, type, size) \
1429 	if (size < sizeof(type)) { \
1430 		error = EINVAL; \
1431 		break; \
1432 	} else { \
1433 		destp = (type *)srcp; \
1434 	}
1435 
1436 static int
1437 sctp_getopt(struct socket *so, int optname, void *optval, size_t *optsize,
1438     void *p)
1439 {
1440 	struct sctp_inpcb *inp;
1441 	int error, val = 0;
1442 	struct sctp_tcb *stcb = NULL;
1443 
1444 	if (optval == NULL) {
1445 		return (EINVAL);
1446 	}
1447 	inp = (struct sctp_inpcb *)so->so_pcb;
1448 	if (inp == 0)
1449 		return EINVAL;
1450 	error = 0;
1451 
1452 	switch (optname) {
1453 	case SCTP_NODELAY:
1454 	case SCTP_AUTOCLOSE:
1455 	case SCTP_EXPLICIT_EOR:
1456 	case SCTP_AUTO_ASCONF:
1457 	case SCTP_DISABLE_FRAGMENTS:
1458 	case SCTP_I_WANT_MAPPED_V4_ADDR:
1459 	case SCTP_USE_EXT_RCVINFO:
1460 		SCTP_INP_RLOCK(inp);
1461 		switch (optname) {
1462 		case SCTP_DISABLE_FRAGMENTS:
1463 			val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NO_FRAGMENT);
1464 			break;
1465 		case SCTP_I_WANT_MAPPED_V4_ADDR:
1466 			val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4);
1467 			break;
1468 		case SCTP_AUTO_ASCONF:
1469 			val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTO_ASCONF);
1470 			break;
1471 		case SCTP_EXPLICIT_EOR:
1472 			val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
1473 			break;
1474 		case SCTP_NODELAY:
1475 			val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NODELAY);
1476 			break;
1477 		case SCTP_USE_EXT_RCVINFO:
1478 			val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO);
1479 			break;
1480 		case SCTP_AUTOCLOSE:
1481 			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE))
1482 				val = TICKS_TO_SEC(inp->sctp_ep.auto_close_time);
1483 			else
1484 				val = 0;
1485 			break;
1486 
1487 		default:
1488 			error = ENOPROTOOPT;
1489 		}		/* end switch (sopt->sopt_name) */
1490 		if (optname != SCTP_AUTOCLOSE) {
1491 			/* make it an "on/off" value */
1492 			val = (val != 0);
1493 		}
1494 		if (*optsize < sizeof(val)) {
1495 			error = EINVAL;
1496 		}
1497 		SCTP_INP_RUNLOCK(inp);
1498 		if (error == 0) {
1499 			/* return the option value */
1500 			*(int *)optval = val;
1501 			*optsize = sizeof(val);
1502 		}
1503 		break;
1504 
1505 	case SCTP_PARTIAL_DELIVERY_POINT:
1506 		{
1507 			uint32_t *value;
1508 
1509 			SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize);
1510 			*value = inp->partial_delivery_point;
1511 			*optsize = sizeof(uint32_t);
1512 		}
1513 		break;
1514 	case SCTP_FRAGMENT_INTERLEAVE:
1515 		{
1516 			uint32_t *value;
1517 
1518 			SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize);
1519 			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) {
1520 				if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) {
1521 					*value = SCTP_FRAG_LEVEL_2;
1522 				} else {
1523 					*value = SCTP_FRAG_LEVEL_1;
1524 				}
1525 			} else {
1526 				*value = SCTP_FRAG_LEVEL_0;
1527 			}
1528 			*optsize = sizeof(uint32_t);
1529 		}
1530 		break;
1531 	case SCTP_CMT_ON_OFF:
1532 		{
1533 			struct sctp_assoc_value *av;
1534 
1535 			SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize);
1536 			if (sctp_cmt_on_off) {
1537 				SCTP_FIND_STCB(inp, stcb, av->assoc_id);
1538 				if (stcb) {
1539 					av->assoc_value = stcb->asoc.sctp_cmt_on_off;
1540 					SCTP_TCB_UNLOCK(stcb);
1541 
1542 				} else {
1543 					error = ENOTCONN;
1544 				}
1545 			} else {
1546 				error = ENOPROTOOPT;
1547 			}
1548 			*optsize = sizeof(*av);
1549 		}
1550 		break;
1551 	case SCTP_GET_ADDR_LEN:
1552 		{
1553 			struct sctp_assoc_value *av;
1554 
1555 			SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize);
1556 			error = EINVAL;
1557 #ifdef INET
1558 			if (av->assoc_value == AF_INET) {
1559 				av->assoc_value = sizeof(struct sockaddr_in);
1560 				error = 0;
1561 			}
1562 #endif
1563 #ifdef INET6
1564 			if (av->assoc_value == AF_INET6) {
1565 				av->assoc_value = sizeof(struct sockaddr_in6);
1566 				error = 0;
1567 			}
1568 #endif
1569 			*optsize = sizeof(*av);
1570 		}
1571 		break;
1572 	case SCTP_GET_ASOC_ID_LIST:
1573 		{
1574 			struct sctp_assoc_ids *ids;
1575 			int cnt, at;
1576 			uint16_t orig;
1577 
1578 			SCTP_CHECK_AND_CAST(ids, optval, struct sctp_assoc_ids, *optsize);
1579 			cnt = 0;
1580 			SCTP_INP_RLOCK(inp);
1581 			stcb = LIST_FIRST(&inp->sctp_asoc_list);
1582 			if (stcb == NULL) {
1583 		none_out_now:
1584 				ids->asls_numb_present = 0;
1585 				ids->asls_more_to_get = 0;
1586 				SCTP_INP_RUNLOCK(inp);
1587 				break;
1588 			}
1589 			orig = ids->asls_assoc_start;
1590 			stcb = LIST_FIRST(&inp->sctp_asoc_list);
1591 			while (orig) {
1592 				stcb = LIST_NEXT(stcb, sctp_tcblist);
1593 				orig--;
1594 				cnt--;
1595 				if (stcb == NULL)
1596 					goto none_out_now;
1597 			}
1598 			if (stcb == NULL)
1599 				goto none_out_now;
1600 
1601 			at = 0;
1602 			ids->asls_numb_present = 0;
1603 			ids->asls_more_to_get = 1;
1604 			while (at < MAX_ASOC_IDS_RET) {
1605 				ids->asls_assoc_id[at] = sctp_get_associd(stcb);
1606 				at++;
1607 				ids->asls_numb_present++;
1608 				stcb = LIST_NEXT(stcb, sctp_tcblist);
1609 				if (stcb == NULL) {
1610 					ids->asls_more_to_get = 0;
1611 					break;
1612 				}
1613 			}
1614 			SCTP_INP_RUNLOCK(inp);
1615 		}
1616 		break;
1617 	case SCTP_CONTEXT:
1618 		{
1619 			struct sctp_assoc_value *av;
1620 
1621 			SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize);
1622 			SCTP_FIND_STCB(inp, stcb, av->assoc_id);
1623 
1624 			if (stcb) {
1625 				av->assoc_value = stcb->asoc.context;
1626 				SCTP_TCB_UNLOCK(stcb);
1627 			} else {
1628 				SCTP_INP_RLOCK(inp);
1629 				av->assoc_value = inp->sctp_context;
1630 				SCTP_INP_RUNLOCK(inp);
1631 			}
1632 			*optsize = sizeof(*av);
1633 		}
1634 		break;
1635 	case SCTP_VRF_ID:
1636 		{
1637 			uint32_t *vrf_id;
1638 
1639 			SCTP_CHECK_AND_CAST(vrf_id, optval, uint32_t, *optsize);
1640 			*vrf_id = inp->def_vrf_id;
1641 			break;
1642 		}
1643 	case SCTP_GET_ASOC_VRF:
1644 		{
1645 			struct sctp_assoc_value *id;
1646 
1647 			SCTP_CHECK_AND_CAST(id, optval, struct sctp_assoc_value, *optsize);
1648 			SCTP_FIND_STCB(inp, stcb, id->assoc_id);
1649 			if (stcb == NULL) {
1650 				error = EINVAL;
1651 				break;
1652 			}
1653 			id->assoc_value = stcb->asoc.vrf_id;
1654 			break;
1655 		}
1656 	case SCTP_GET_VRF_IDS:
1657 		{
1658 			error = EOPNOTSUPP;
1659 			break;
1660 		}
1661 	case SCTP_GET_NONCE_VALUES:
1662 		{
1663 			struct sctp_get_nonce_values *gnv;
1664 
1665 			SCTP_CHECK_AND_CAST(gnv, optval, struct sctp_get_nonce_values, *optsize);
1666 			SCTP_FIND_STCB(inp, stcb, gnv->gn_assoc_id);
1667 
1668 			if (stcb) {
1669 				gnv->gn_peers_tag = stcb->asoc.peer_vtag;
1670 				gnv->gn_local_tag = stcb->asoc.my_vtag;
1671 				SCTP_TCB_UNLOCK(stcb);
1672 			} else {
1673 				error = ENOTCONN;
1674 			}
1675 			*optsize = sizeof(*gnv);
1676 		}
1677 		break;
1678 	case SCTP_DELAYED_ACK_TIME:
1679 		{
1680 			struct sctp_assoc_value *tm;
1681 
1682 			SCTP_CHECK_AND_CAST(tm, optval, struct sctp_assoc_value, *optsize);
1683 			SCTP_FIND_STCB(inp, stcb, tm->assoc_id);
1684 
1685 			if (stcb) {
1686 				tm->assoc_value = stcb->asoc.delayed_ack;
1687 				SCTP_TCB_UNLOCK(stcb);
1688 			} else {
1689 				SCTP_INP_RLOCK(inp);
1690 				tm->assoc_value = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
1691 				SCTP_INP_RUNLOCK(inp);
1692 			}
1693 			*optsize = sizeof(*tm);
1694 		}
1695 		break;
1696 
1697 	case SCTP_GET_SNDBUF_USE:
1698 		{
1699 			struct sctp_sockstat *ss;
1700 
1701 			SCTP_CHECK_AND_CAST(ss, optval, struct sctp_sockstat, *optsize);
1702 			SCTP_FIND_STCB(inp, stcb, ss->ss_assoc_id);
1703 
1704 			if (stcb) {
1705 				ss->ss_total_sndbuf = stcb->asoc.total_output_queue_size;
1706 				ss->ss_total_recv_buf = (stcb->asoc.size_on_reasm_queue +
1707 				    stcb->asoc.size_on_all_streams);
1708 				SCTP_TCB_UNLOCK(stcb);
1709 			} else {
1710 				error = ENOTCONN;
1711 			}
1712 			*optsize = sizeof(struct sctp_sockstat);
1713 		}
1714 		break;
1715 	case SCTP_MAXBURST:
1716 		{
1717 			uint8_t *value;
1718 
1719 			SCTP_CHECK_AND_CAST(value, optval, uint8_t, *optsize);
1720 
1721 			SCTP_INP_RLOCK(inp);
1722 			*value = inp->sctp_ep.max_burst;
1723 			SCTP_INP_RUNLOCK(inp);
1724 			*optsize = sizeof(uint8_t);
1725 		}
1726 		break;
1727 	case SCTP_MAXSEG:
1728 		{
1729 			struct sctp_assoc_value *av;
1730 			int ovh;
1731 
1732 			SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize);
1733 			if (av->assoc_id) {
1734 				SCTP_FIND_STCB(inp, stcb, av->assoc_id);
1735 			} else {
1736 				stcb = NULL;
1737 			}
1738 
1739 			if (stcb) {
1740 				av->assoc_value = sctp_get_frag_point(stcb, &stcb->asoc);
1741 				SCTP_TCB_UNLOCK(stcb);
1742 			} else {
1743 				SCTP_INP_RLOCK(inp);
1744 				if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1745 					ovh = SCTP_MED_OVERHEAD;
1746 				} else {
1747 					ovh = SCTP_MED_V4_OVERHEAD;
1748 				}
1749 				av->assoc_value = inp->sctp_frag_point - ovh;
1750 				SCTP_INP_RUNLOCK(inp);
1751 			}
1752 			*optsize = sizeof(struct sctp_assoc_value);
1753 		}
1754 		break;
1755 	case SCTP_GET_STAT_LOG:
1756 #ifdef SCTP_STAT_LOGGING
1757 		error = sctp_fill_stat_log(optval, optsize);
1758 #else
1759 		error = EOPNOTSUPP;
1760 #endif
1761 		break;
1762 	case SCTP_EVENTS:
1763 		{
1764 			struct sctp_event_subscribe *events;
1765 
1766 			SCTP_CHECK_AND_CAST(events, optval, struct sctp_event_subscribe, *optsize);
1767 			memset(events, 0, sizeof(*events));
1768 			SCTP_INP_RLOCK(inp);
1769 			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT))
1770 				events->sctp_data_io_event = 1;
1771 
1772 			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVASSOCEVNT))
1773 				events->sctp_association_event = 1;
1774 
1775 			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVPADDREVNT))
1776 				events->sctp_address_event = 1;
1777 
1778 			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVSENDFAILEVNT))
1779 				events->sctp_send_failure_event = 1;
1780 
1781 			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVPEERERR))
1782 				events->sctp_peer_error_event = 1;
1783 
1784 			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT))
1785 				events->sctp_shutdown_event = 1;
1786 
1787 			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PDAPIEVNT))
1788 				events->sctp_partial_delivery_event = 1;
1789 
1790 			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ADAPTATIONEVNT))
1791 				events->sctp_adaptation_layer_event = 1;
1792 
1793 			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTHEVNT))
1794 				events->sctp_authentication_event = 1;
1795 
1796 			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_STREAM_RESETEVNT))
1797 				events->sctp_stream_reset_events = 1;
1798 			SCTP_INP_RUNLOCK(inp);
1799 			*optsize = sizeof(struct sctp_event_subscribe);
1800 		}
1801 		break;
1802 
1803 	case SCTP_ADAPTATION_LAYER:
1804 		{
1805 			uint32_t *value;
1806 
1807 			SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize);
1808 
1809 			SCTP_INP_RLOCK(inp);
1810 			*value = inp->sctp_ep.adaptation_layer_indicator;
1811 			SCTP_INP_RUNLOCK(inp);
1812 			*optsize = sizeof(uint32_t);
1813 		}
1814 		break;
1815 	case SCTP_SET_INITIAL_DBG_SEQ:
1816 		{
1817 			uint32_t *value;
1818 
1819 			SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize);
1820 			SCTP_INP_RLOCK(inp);
1821 			*value = inp->sctp_ep.initial_sequence_debug;
1822 			SCTP_INP_RUNLOCK(inp);
1823 			*optsize = sizeof(uint32_t);
1824 		}
1825 		break;
1826 	case SCTP_GET_LOCAL_ADDR_SIZE:
1827 		{
1828 			uint32_t *value;
1829 
1830 			SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize);
1831 			SCTP_INP_RLOCK(inp);
1832 			*value = sctp_count_max_addresses(inp);
1833 			SCTP_INP_RUNLOCK(inp);
1834 			*optsize = sizeof(uint32_t);
1835 		}
1836 		break;
1837 	case SCTP_GET_REMOTE_ADDR_SIZE:
1838 		{
1839 			uint32_t *value;
1840 			size_t size;
1841 			struct sctp_nets *net;
1842 
1843 			SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize);
1844 			/* FIXME MT: change to sctp_assoc_value? */
1845 			SCTP_FIND_STCB(inp, stcb, (sctp_assoc_t) * value);
1846 
1847 			if (stcb) {
1848 				size = 0;
1849 				/* Count the sizes */
1850 				TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
1851 					if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) ||
1852 					    (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET6)) {
1853 						size += sizeof(struct sockaddr_in6);
1854 					} else if (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET) {
1855 						size += sizeof(struct sockaddr_in);
1856 					} else {
1857 						/* huh */
1858 						break;
1859 					}
1860 				}
1861 				SCTP_TCB_UNLOCK(stcb);
1862 				*value = (uint32_t) size;
1863 			} else {
1864 				error = ENOTCONN;
1865 			}
1866 			*optsize = sizeof(uint32_t);
1867 		}
1868 		break;
1869 	case SCTP_GET_PEER_ADDRESSES:
1870 		/*
1871 		 * Get the address information, an array is passed in to
1872 		 * fill up we pack it.
1873 		 */
1874 		{
1875 			size_t cpsz, left;
1876 			struct sockaddr_storage *sas;
1877 			struct sctp_nets *net;
1878 			struct sctp_getaddresses *saddr;
1879 
1880 			SCTP_CHECK_AND_CAST(saddr, optval, struct sctp_getaddresses, *optsize);
1881 			SCTP_FIND_STCB(inp, stcb, saddr->sget_assoc_id);
1882 
1883 			if (stcb) {
1884 				left = (*optsize) - sizeof(struct sctp_getaddresses);
1885 				*optsize = sizeof(struct sctp_getaddresses);
1886 				sas = (struct sockaddr_storage *)&saddr->addr[0];
1887 
1888 				TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
1889 					if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) ||
1890 					    (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET6)) {
1891 						cpsz = sizeof(struct sockaddr_in6);
1892 					} else if (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET) {
1893 						cpsz = sizeof(struct sockaddr_in);
1894 					} else {
1895 						/* huh */
1896 						break;
1897 					}
1898 					if (left < cpsz) {
1899 						/* not enough room. */
1900 						break;
1901 					}
1902 					if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
1903 					    (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET)) {
1904 						/* Must map the address */
1905 						in6_sin_2_v4mapsin6((struct sockaddr_in *)&net->ro._l_addr,
1906 						    (struct sockaddr_in6 *)sas);
1907 					} else {
1908 						memcpy(sas, &net->ro._l_addr, cpsz);
1909 					}
1910 					((struct sockaddr_in *)sas)->sin_port = stcb->rport;
1911 
1912 					sas = (struct sockaddr_storage *)((caddr_t)sas + cpsz);
1913 					left -= cpsz;
1914 					*optsize += cpsz;
1915 				}
1916 				SCTP_TCB_UNLOCK(stcb);
1917 			} else {
1918 				error = ENOENT;
1919 			}
1920 		}
1921 		break;
1922 	case SCTP_GET_LOCAL_ADDRESSES:
1923 		{
1924 			size_t limit, actual;
1925 			struct sockaddr_storage *sas;
1926 			struct sctp_getaddresses *saddr;
1927 
1928 			SCTP_CHECK_AND_CAST(saddr, optval, struct sctp_getaddresses, *optsize);
1929 			SCTP_FIND_STCB(inp, stcb, saddr->sget_assoc_id);
1930 
1931 			sas = (struct sockaddr_storage *)&saddr->addr[0];
1932 			limit = *optsize - sizeof(sctp_assoc_t);
1933 			actual = sctp_fill_up_addresses(inp, stcb, limit, sas);
1934 			if (stcb)
1935 				SCTP_TCB_UNLOCK(stcb);
1936 			*optsize = sizeof(struct sockaddr_storage) + actual;
1937 		}
1938 		break;
1939 	case SCTP_PEER_ADDR_PARAMS:
1940 		{
1941 			struct sctp_paddrparams *paddrp;
1942 			struct sctp_nets *net;
1943 
1944 			SCTP_CHECK_AND_CAST(paddrp, optval, struct sctp_paddrparams, *optsize);
1945 			SCTP_FIND_STCB(inp, stcb, paddrp->spp_assoc_id);
1946 
1947 			net = NULL;
1948 			if (stcb) {
1949 				net = sctp_findnet(stcb, (struct sockaddr *)&paddrp->spp_address);
1950 			} else {
1951 				/*
1952 				 * We increment here since
1953 				 * sctp_findassociation_ep_addr() wil do a
1954 				 * decrement if it finds the stcb as long as
1955 				 * the locked tcb (last argument) is NOT a
1956 				 * TCB.. aka NULL.
1957 				 */
1958 				SCTP_INP_INCR_REF(inp);
1959 				stcb = sctp_findassociation_ep_addr(&inp, (struct sockaddr *)&paddrp->spp_address, &net, NULL, NULL);
1960 				if (stcb == NULL) {
1961 					SCTP_INP_DECR_REF(inp);
1962 				}
1963 			}
1964 
1965 			if (stcb) {
1966 				/* Applys to the specific association */
1967 				paddrp->spp_flags = 0;
1968 				if (net) {
1969 					paddrp->spp_pathmaxrxt = net->failure_threshold;
1970 					paddrp->spp_pathmtu = net->mtu;
1971 					/* get flags for HB */
1972 					if (net->dest_state & SCTP_ADDR_NOHB)
1973 						paddrp->spp_flags |= SPP_HB_DISABLE;
1974 					else
1975 						paddrp->spp_flags |= SPP_HB_ENABLE;
1976 					/* get flags for PMTU */
1977 					if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) {
1978 						paddrp->spp_flags |= SPP_PMTUD_ENABLE;
1979 					} else {
1980 						paddrp->spp_flags |= SPP_PMTUD_DISABLE;
1981 					}
1982 #ifdef INET
1983 					if (net->ro._l_addr.sin.sin_family == AF_INET) {
1984 						paddrp->spp_ipv4_tos = net->tos_flowlabel & 0x000000fc;
1985 						paddrp->spp_flags |= SPP_IPV4_TOS;
1986 					}
1987 #endif
1988 #ifdef INET6
1989 					if (net->ro._l_addr.sin6.sin6_family == AF_INET6) {
1990 						paddrp->spp_ipv6_flowlabel = net->tos_flowlabel;
1991 						paddrp->spp_flags |= SPP_IPV6_FLOWLABEL;
1992 					}
1993 #endif
1994 				} else {
1995 					/*
1996 					 * No destination so return default
1997 					 * value
1998 					 */
1999 					paddrp->spp_pathmaxrxt = stcb->asoc.def_net_failure;
2000 					paddrp->spp_pathmtu = sctp_get_frag_point(stcb, &stcb->asoc);
2001 #ifdef INET
2002 					paddrp->spp_ipv4_tos = stcb->asoc.default_tos & 0x000000fc;
2003 					paddrp->spp_flags |= SPP_IPV4_TOS;
2004 #endif
2005 #ifdef INET6
2006 					paddrp->spp_ipv6_flowlabel = stcb->asoc.default_flowlabel;
2007 					paddrp->spp_flags |= SPP_IPV6_FLOWLABEL;
2008 #endif
2009 					/* default settings should be these */
2010 					if (sctp_is_hb_timer_running(stcb)) {
2011 						paddrp->spp_flags |= SPP_HB_ENABLE;
2012 					}
2013 				}
2014 				paddrp->spp_hbinterval = stcb->asoc.heart_beat_delay;
2015 				paddrp->spp_assoc_id = sctp_get_associd(stcb);
2016 				SCTP_TCB_UNLOCK(stcb);
2017 			} else {
2018 				/* Use endpoint defaults */
2019 				SCTP_INP_RLOCK(inp);
2020 				paddrp->spp_pathmaxrxt = inp->sctp_ep.def_net_failure;
2021 				paddrp->spp_hbinterval = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
2022 				paddrp->spp_assoc_id = (sctp_assoc_t) 0;
2023 				/* get inp's default */
2024 #ifdef INET
2025 				paddrp->spp_ipv4_tos = inp->ip_inp.inp.inp_ip_tos;
2026 				paddrp->spp_flags |= SPP_IPV4_TOS;
2027 #endif
2028 #ifdef INET6
2029 				if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2030 					paddrp->spp_ipv6_flowlabel = ((struct in6pcb *)inp)->in6p_flowinfo;
2031 					paddrp->spp_flags |= SPP_IPV6_FLOWLABEL;
2032 				}
2033 #endif
2034 				/* can't return this */
2035 				paddrp->spp_pathmaxrxt = 0;
2036 				paddrp->spp_pathmtu = 0;
2037 				/* default behavior, no stcb */
2038 				paddrp->spp_flags = SPP_HB_ENABLE | SPP_PMTUD_ENABLE;
2039 
2040 				SCTP_INP_RUNLOCK(inp);
2041 			}
2042 			*optsize = sizeof(struct sctp_paddrparams);
2043 		}
2044 		break;
2045 	case SCTP_GET_PEER_ADDR_INFO:
2046 		{
2047 			struct sctp_paddrinfo *paddri;
2048 			struct sctp_nets *net;
2049 
2050 			SCTP_CHECK_AND_CAST(paddri, optval, struct sctp_paddrinfo, *optsize);
2051 			SCTP_FIND_STCB(inp, stcb, paddri->spinfo_assoc_id);
2052 
2053 			net = NULL;
2054 			if (stcb) {
2055 				net = sctp_findnet(stcb, (struct sockaddr *)&paddri->spinfo_address);
2056 			} else {
2057 				/*
2058 				 * We increment here since
2059 				 * sctp_findassociation_ep_addr() wil do a
2060 				 * decrement if it finds the stcb as long as
2061 				 * the locked tcb (last argument) is NOT a
2062 				 * TCB.. aka NULL.
2063 				 */
2064 				SCTP_INP_INCR_REF(inp);
2065 				stcb = sctp_findassociation_ep_addr(&inp, (struct sockaddr *)&paddri->spinfo_address, &net, NULL, NULL);
2066 				if (stcb == NULL) {
2067 					SCTP_INP_DECR_REF(inp);
2068 				}
2069 			}
2070 
2071 			if ((stcb) && (net)) {
2072 				paddri->spinfo_state = net->dest_state & (SCTP_REACHABLE_MASK | SCTP_ADDR_NOHB);
2073 				paddri->spinfo_cwnd = net->cwnd;
2074 				paddri->spinfo_srtt = ((net->lastsa >> 2) + net->lastsv) >> 1;
2075 				paddri->spinfo_rto = net->RTO;
2076 				paddri->spinfo_assoc_id = sctp_get_associd(stcb);
2077 				SCTP_TCB_UNLOCK(stcb);
2078 			} else {
2079 				if (stcb) {
2080 					SCTP_TCB_UNLOCK(stcb);
2081 				}
2082 				error = ENOENT;
2083 			}
2084 			*optsize = sizeof(struct sctp_paddrinfo);
2085 		}
2086 		break;
2087 	case SCTP_PCB_STATUS:
2088 		{
2089 			struct sctp_pcbinfo *spcb;
2090 
2091 			SCTP_CHECK_AND_CAST(spcb, optval, struct sctp_pcbinfo, *optsize);
2092 			sctp_fill_pcbinfo(spcb);
2093 			*optsize = sizeof(struct sctp_pcbinfo);
2094 		}
2095 		break;
2096 
2097 	case SCTP_STATUS:
2098 		{
2099 			struct sctp_nets *net;
2100 			struct sctp_status *sstat;
2101 
2102 			SCTP_CHECK_AND_CAST(sstat, optval, struct sctp_status, *optsize);
2103 			SCTP_FIND_STCB(inp, stcb, sstat->sstat_assoc_id);
2104 
2105 			if (stcb == NULL) {
2106 				error = EINVAL;
2107 				break;
2108 			}
2109 			/*
2110 			 * I think passing the state is fine since
2111 			 * sctp_constants.h will be available to the user
2112 			 * land.
2113 			 */
2114 			sstat->sstat_state = stcb->asoc.state;
2115 			sstat->sstat_rwnd = stcb->asoc.peers_rwnd;
2116 			sstat->sstat_unackdata = stcb->asoc.sent_queue_cnt;
2117 			/*
2118 			 * We can't include chunks that have been passed to
2119 			 * the socket layer. Only things in queue.
2120 			 */
2121 			sstat->sstat_penddata = (stcb->asoc.cnt_on_reasm_queue +
2122 			    stcb->asoc.cnt_on_all_streams);
2123 
2124 
2125 			sstat->sstat_instrms = stcb->asoc.streamincnt;
2126 			sstat->sstat_outstrms = stcb->asoc.streamoutcnt;
2127 			sstat->sstat_fragmentation_point = sctp_get_frag_point(stcb, &stcb->asoc);
2128 			memcpy(&sstat->sstat_primary.spinfo_address,
2129 			    &stcb->asoc.primary_destination->ro._l_addr,
2130 			    ((struct sockaddr *)(&stcb->asoc.primary_destination->ro._l_addr))->sa_len);
2131 			net = stcb->asoc.primary_destination;
2132 			((struct sockaddr_in *)&sstat->sstat_primary.spinfo_address)->sin_port = stcb->rport;
2133 			/*
2134 			 * Again the user can get info from sctp_constants.h
2135 			 * for what the state of the network is.
2136 			 */
2137 			sstat->sstat_primary.spinfo_state = net->dest_state & SCTP_REACHABLE_MASK;
2138 			sstat->sstat_primary.spinfo_cwnd = net->cwnd;
2139 			sstat->sstat_primary.spinfo_srtt = net->lastsa;
2140 			sstat->sstat_primary.spinfo_rto = net->RTO;
2141 			sstat->sstat_primary.spinfo_mtu = net->mtu;
2142 			sstat->sstat_primary.spinfo_assoc_id = sctp_get_associd(stcb);
2143 			SCTP_TCB_UNLOCK(stcb);
2144 			*optsize = sizeof(*sstat);
2145 		}
2146 		break;
2147 	case SCTP_RTOINFO:
2148 		{
2149 			struct sctp_rtoinfo *srto;
2150 
2151 			SCTP_CHECK_AND_CAST(srto, optval, struct sctp_rtoinfo, *optsize);
2152 			SCTP_FIND_STCB(inp, stcb, srto->srto_assoc_id);
2153 
2154 			if (stcb) {
2155 				srto->srto_initial = stcb->asoc.initial_rto;
2156 				srto->srto_max = stcb->asoc.maxrto;
2157 				srto->srto_min = stcb->asoc.minrto;
2158 				SCTP_TCB_UNLOCK(stcb);
2159 			} else {
2160 				SCTP_INP_RLOCK(inp);
2161 				srto->srto_initial = inp->sctp_ep.initial_rto;
2162 				srto->srto_max = inp->sctp_ep.sctp_maxrto;
2163 				srto->srto_min = inp->sctp_ep.sctp_minrto;
2164 				SCTP_INP_RUNLOCK(inp);
2165 			}
2166 			*optsize = sizeof(*srto);
2167 		}
2168 		break;
2169 	case SCTP_ASSOCINFO:
2170 		{
2171 			struct sctp_assocparams *sasoc;
2172 
2173 			SCTP_CHECK_AND_CAST(sasoc, optval, struct sctp_assocparams, *optsize);
2174 			SCTP_FIND_STCB(inp, stcb, sasoc->sasoc_assoc_id);
2175 
2176 			if (stcb) {
2177 				sasoc->sasoc_asocmaxrxt = stcb->asoc.max_send_times;
2178 				sasoc->sasoc_number_peer_destinations = stcb->asoc.numnets;
2179 				sasoc->sasoc_peer_rwnd = stcb->asoc.peers_rwnd;
2180 				sasoc->sasoc_local_rwnd = stcb->asoc.my_rwnd;
2181 				sasoc->sasoc_cookie_life = stcb->asoc.cookie_life;
2182 				sasoc->sasoc_sack_delay = stcb->asoc.delayed_ack;
2183 				sasoc->sasoc_sack_freq = stcb->asoc.sack_freq;
2184 				SCTP_TCB_UNLOCK(stcb);
2185 			} else {
2186 				SCTP_INP_RLOCK(inp);
2187 				sasoc->sasoc_asocmaxrxt = inp->sctp_ep.max_send_times;
2188 				sasoc->sasoc_number_peer_destinations = 0;
2189 				sasoc->sasoc_peer_rwnd = 0;
2190 				sasoc->sasoc_local_rwnd = sbspace(&inp->sctp_socket->so_rcv);
2191 				sasoc->sasoc_cookie_life = inp->sctp_ep.def_cookie_life;
2192 				sasoc->sasoc_sack_delay = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
2193 				sasoc->sasoc_sack_freq = inp->sctp_ep.sctp_sack_freq;
2194 				SCTP_INP_RUNLOCK(inp);
2195 			}
2196 			*optsize = sizeof(*sasoc);
2197 		}
2198 		break;
2199 	case SCTP_DEFAULT_SEND_PARAM:
2200 		{
2201 			struct sctp_sndrcvinfo *s_info;
2202 
2203 			SCTP_CHECK_AND_CAST(s_info, optval, struct sctp_sndrcvinfo, *optsize);
2204 			SCTP_FIND_STCB(inp, stcb, s_info->sinfo_assoc_id);
2205 
2206 			if (stcb) {
2207 				*s_info = stcb->asoc.def_send;
2208 				SCTP_TCB_UNLOCK(stcb);
2209 			} else {
2210 				SCTP_INP_RLOCK(inp);
2211 				*s_info = inp->def_send;
2212 				SCTP_INP_RUNLOCK(inp);
2213 			}
2214 			*optsize = sizeof(*s_info);
2215 		}
2216 		break;
2217 	case SCTP_INITMSG:
2218 		{
2219 			struct sctp_initmsg *sinit;
2220 
2221 			SCTP_CHECK_AND_CAST(sinit, optval, struct sctp_initmsg, *optsize);
2222 			SCTP_INP_RLOCK(inp);
2223 			sinit->sinit_num_ostreams = inp->sctp_ep.pre_open_stream_count;
2224 			sinit->sinit_max_instreams = inp->sctp_ep.max_open_streams_intome;
2225 			sinit->sinit_max_attempts = inp->sctp_ep.max_init_times;
2226 			sinit->sinit_max_init_timeo = inp->sctp_ep.initial_init_rto_max;
2227 			SCTP_INP_RUNLOCK(inp);
2228 			*optsize = sizeof(*sinit);
2229 		}
2230 		break;
2231 	case SCTP_PRIMARY_ADDR:
2232 		/* we allow a "get" operation on this */
2233 		{
2234 			struct sctp_setprim *ssp;
2235 
2236 			SCTP_CHECK_AND_CAST(ssp, optval, struct sctp_setprim, *optsize);
2237 			SCTP_FIND_STCB(inp, stcb, ssp->ssp_assoc_id);
2238 
2239 			if (stcb) {
2240 				/* simply copy out the sockaddr_storage... */
2241 				memcpy(&ssp->ssp_addr, &stcb->asoc.primary_destination->ro._l_addr,
2242 				    ((struct sockaddr *)&stcb->asoc.primary_destination->ro._l_addr)->sa_len);
2243 				SCTP_TCB_UNLOCK(stcb);
2244 			} else {
2245 				error = EINVAL;
2246 			}
2247 			*optsize = sizeof(*ssp);
2248 		}
2249 		break;
2250 
2251 	case SCTP_HMAC_IDENT:
2252 		{
2253 			struct sctp_hmacalgo *shmac;
2254 			sctp_hmaclist_t *hmaclist;
2255 			uint32_t size;
2256 			int i;
2257 
2258 			SCTP_CHECK_AND_CAST(shmac, optval, struct sctp_hmacalgo, *optsize);
2259 
2260 			SCTP_INP_RLOCK(inp);
2261 			hmaclist = inp->sctp_ep.local_hmacs;
2262 			if (hmaclist == NULL) {
2263 				/* no HMACs to return */
2264 				*optsize = sizeof(*shmac);
2265 				SCTP_INP_RUNLOCK(inp);
2266 				break;
2267 			}
2268 			/* is there room for all of the hmac ids? */
2269 			size = sizeof(*shmac) + (hmaclist->num_algo *
2270 			    sizeof(shmac->shmac_idents[0]));
2271 			if ((size_t)(*optsize) < size) {
2272 				error = EINVAL;
2273 				SCTP_INP_RUNLOCK(inp);
2274 				break;
2275 			}
2276 			/* copy in the list */
2277 			for (i = 0; i < hmaclist->num_algo; i++)
2278 				shmac->shmac_idents[i] = hmaclist->hmac[i];
2279 			SCTP_INP_RUNLOCK(inp);
2280 			*optsize = size;
2281 			break;
2282 		}
2283 	case SCTP_AUTH_ACTIVE_KEY:
2284 		{
2285 			struct sctp_authkeyid *scact;
2286 
2287 			SCTP_CHECK_AND_CAST(scact, optval, struct sctp_authkeyid, *optsize);
2288 			SCTP_FIND_STCB(inp, stcb, scact->scact_assoc_id);
2289 
2290 			if (stcb) {
2291 				/* get the active key on the assoc */
2292 				scact->scact_keynumber = stcb->asoc.authinfo.assoc_keyid;
2293 				SCTP_TCB_UNLOCK(stcb);
2294 			} else {
2295 				/* get the endpoint active key */
2296 				SCTP_INP_RLOCK(inp);
2297 				scact->scact_keynumber = inp->sctp_ep.default_keyid;
2298 				SCTP_INP_RUNLOCK(inp);
2299 			}
2300 			*optsize = sizeof(*scact);
2301 			break;
2302 		}
2303 	case SCTP_LOCAL_AUTH_CHUNKS:
2304 		{
2305 			struct sctp_authchunks *sac;
2306 			sctp_auth_chklist_t *chklist = NULL;
2307 			size_t size = 0;
2308 
2309 			SCTP_CHECK_AND_CAST(sac, optval, struct sctp_authchunks, *optsize);
2310 			SCTP_FIND_STCB(inp, stcb, sac->gauth_assoc_id);
2311 
2312 			if (stcb) {
2313 				/* get off the assoc */
2314 				chklist = stcb->asoc.local_auth_chunks;
2315 				/* is there enough space? */
2316 				size = sctp_auth_get_chklist_size(chklist);
2317 				if (*optsize < (sizeof(struct sctp_authchunks) + size)) {
2318 					error = EINVAL;
2319 				} else {
2320 					/* copy in the chunks */
2321 					sctp_serialize_auth_chunks(chklist, sac->gauth_chunks);
2322 				}
2323 				SCTP_TCB_UNLOCK(stcb);
2324 			} else {
2325 				/* get off the endpoint */
2326 				SCTP_INP_RLOCK(inp);
2327 				chklist = inp->sctp_ep.local_auth_chunks;
2328 				/* is there enough space? */
2329 				size = sctp_auth_get_chklist_size(chklist);
2330 				if (*optsize < (sizeof(struct sctp_authchunks) + size)) {
2331 					error = EINVAL;
2332 				} else {
2333 					/* copy in the chunks */
2334 					sctp_serialize_auth_chunks(chklist, sac->gauth_chunks);
2335 				}
2336 				SCTP_INP_RUNLOCK(inp);
2337 			}
2338 			*optsize = sizeof(struct sctp_authchunks) + size;
2339 			break;
2340 		}
2341 	case SCTP_PEER_AUTH_CHUNKS:
2342 		{
2343 			struct sctp_authchunks *sac;
2344 			sctp_auth_chklist_t *chklist = NULL;
2345 			size_t size = 0;
2346 
2347 			SCTP_CHECK_AND_CAST(sac, optval, struct sctp_authchunks, *optsize);
2348 			SCTP_FIND_STCB(inp, stcb, sac->gauth_assoc_id);
2349 
2350 			if (stcb) {
2351 				/* get off the assoc */
2352 				chklist = stcb->asoc.peer_auth_chunks;
2353 				/* is there enough space? */
2354 				size = sctp_auth_get_chklist_size(chklist);
2355 				if (*optsize < (sizeof(struct sctp_authchunks) + size)) {
2356 					error = EINVAL;
2357 				} else {
2358 					/* copy in the chunks */
2359 					sctp_serialize_auth_chunks(chklist, sac->gauth_chunks);
2360 				}
2361 				SCTP_TCB_UNLOCK(stcb);
2362 			} else {
2363 				error = ENOENT;
2364 			}
2365 			*optsize = sizeof(struct sctp_authchunks) + size;
2366 			break;
2367 		}
2368 
2369 
2370 	default:
2371 		error = ENOPROTOOPT;
2372 		*optsize = 0;
2373 		break;
2374 	}			/* end switch (sopt->sopt_name) */
2375 	return (error);
2376 }
2377 
2378 static int
2379 sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize,
2380     void *p)
2381 {
2382 	int error, set_opt;
2383 	uint32_t *mopt;
2384 	struct sctp_tcb *stcb = NULL;
2385 	struct sctp_inpcb *inp;
2386 	uint32_t vrf_id;
2387 
2388 	if (optval == NULL) {
2389 		printf("optval is NULL\n");
2390 		return (EINVAL);
2391 	}
2392 	inp = (struct sctp_inpcb *)so->so_pcb;
2393 	if (inp == 0) {
2394 		printf("inp is NULL?\n");
2395 		return EINVAL;
2396 	}
2397 	vrf_id = inp->def_vrf_id;
2398 
2399 	error = 0;
2400 	switch (optname) {
2401 	case SCTP_NODELAY:
2402 	case SCTP_AUTOCLOSE:
2403 	case SCTP_AUTO_ASCONF:
2404 	case SCTP_EXPLICIT_EOR:
2405 	case SCTP_DISABLE_FRAGMENTS:
2406 	case SCTP_USE_EXT_RCVINFO:
2407 	case SCTP_I_WANT_MAPPED_V4_ADDR:
2408 		/* copy in the option value */
2409 		SCTP_CHECK_AND_CAST(mopt, optval, uint32_t, optsize);
2410 		set_opt = 0;
2411 		if (error)
2412 			break;
2413 		switch (optname) {
2414 		case SCTP_DISABLE_FRAGMENTS:
2415 			set_opt = SCTP_PCB_FLAGS_NO_FRAGMENT;
2416 			break;
2417 		case SCTP_AUTO_ASCONF:
2418 			set_opt = SCTP_PCB_FLAGS_AUTO_ASCONF;
2419 			break;
2420 		case SCTP_EXPLICIT_EOR:
2421 			set_opt = SCTP_PCB_FLAGS_EXPLICIT_EOR;
2422 			break;
2423 		case SCTP_USE_EXT_RCVINFO:
2424 			set_opt = SCTP_PCB_FLAGS_EXT_RCVINFO;
2425 			break;
2426 		case SCTP_I_WANT_MAPPED_V4_ADDR:
2427 			if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2428 				set_opt = SCTP_PCB_FLAGS_NEEDS_MAPPED_V4;
2429 			} else {
2430 				return (EINVAL);
2431 			}
2432 			break;
2433 		case SCTP_NODELAY:
2434 			set_opt = SCTP_PCB_FLAGS_NODELAY;
2435 			break;
2436 		case SCTP_AUTOCLOSE:
2437 			set_opt = SCTP_PCB_FLAGS_AUTOCLOSE;
2438 			/*
2439 			 * The value is in ticks. Note this does not effect
2440 			 * old associations, only new ones.
2441 			 */
2442 			inp->sctp_ep.auto_close_time = SEC_TO_TICKS(*mopt);
2443 			break;
2444 		}
2445 		SCTP_INP_WLOCK(inp);
2446 		if (*mopt != 0) {
2447 			sctp_feature_on(inp, set_opt);
2448 		} else {
2449 			sctp_feature_off(inp, set_opt);
2450 		}
2451 		SCTP_INP_WUNLOCK(inp);
2452 		break;
2453 	case SCTP_PARTIAL_DELIVERY_POINT:
2454 		{
2455 			uint32_t *value;
2456 
2457 			SCTP_CHECK_AND_CAST(value, optval, uint32_t, optsize);
2458 			if (*value > SCTP_SB_LIMIT_RCV(so)) {
2459 				error = EINVAL;
2460 				break;
2461 			}
2462 			inp->partial_delivery_point = *value;
2463 		}
2464 		break;
2465 	case SCTP_FRAGMENT_INTERLEAVE:
2466 		/* not yet until we re-write sctp_recvmsg() */
2467 		{
2468 			uint32_t *level;
2469 
2470 			SCTP_CHECK_AND_CAST(level, optval, uint32_t, optsize);
2471 			if (*level == SCTP_FRAG_LEVEL_2) {
2472 				sctp_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE);
2473 				sctp_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS);
2474 			} else if (*level == SCTP_FRAG_LEVEL_1) {
2475 				sctp_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE);
2476 				sctp_feature_off(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS);
2477 			} else if (*level == SCTP_FRAG_LEVEL_0) {
2478 				sctp_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE);
2479 				sctp_feature_off(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS);
2480 
2481 			} else {
2482 				error = EINVAL;
2483 			}
2484 		}
2485 		break;
2486 	case SCTP_CMT_ON_OFF:
2487 		{
2488 			struct sctp_assoc_value *av;
2489 
2490 			SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize);
2491 			if (sctp_cmt_on_off) {
2492 				SCTP_FIND_STCB(inp, stcb, av->assoc_id);
2493 				if (stcb) {
2494 					stcb->asoc.sctp_cmt_on_off = (uint8_t) av->assoc_value;
2495 					SCTP_TCB_UNLOCK(stcb);
2496 				} else {
2497 					error = ENOTCONN;
2498 				}
2499 			} else {
2500 				error = ENOPROTOOPT;
2501 			}
2502 		}
2503 		break;
2504 	case SCTP_CLR_STAT_LOG:
2505 #ifdef SCTP_STAT_LOGGING
2506 		sctp_clr_stat_log();
2507 #else
2508 		error = EOPNOTSUPP;
2509 #endif
2510 		break;
2511 	case SCTP_CONTEXT:
2512 		{
2513 			struct sctp_assoc_value *av;
2514 
2515 			SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize);
2516 			SCTP_FIND_STCB(inp, stcb, av->assoc_id);
2517 
2518 			if (stcb) {
2519 				stcb->asoc.context = av->assoc_value;
2520 				SCTP_TCB_UNLOCK(stcb);
2521 			} else {
2522 				SCTP_INP_WLOCK(inp);
2523 				inp->sctp_context = av->assoc_value;
2524 				SCTP_INP_WUNLOCK(inp);
2525 			}
2526 		}
2527 		break;
2528 	case SCTP_VRF_ID:
2529 		{
2530 			uint32_t *vrf_id;
2531 
2532 			SCTP_CHECK_AND_CAST(vrf_id, optval, uint32_t, optsize);
2533 			if (*vrf_id > SCTP_MAX_VRF_ID) {
2534 				error = EINVAL;
2535 				break;
2536 			}
2537 			inp->def_vrf_id = *vrf_id;
2538 			break;
2539 		}
2540 	case SCTP_DEL_VRF_ID:
2541 		{
2542 			error = EOPNOTSUPP;
2543 			break;
2544 		}
2545 	case SCTP_ADD_VRF_ID:
2546 		{
2547 			error = EOPNOTSUPP;
2548 			break;
2549 		}
2550 
2551 	case SCTP_DELAYED_ACK_TIME:
2552 		{
2553 			struct sctp_assoc_value *tm;
2554 
2555 			SCTP_CHECK_AND_CAST(tm, optval, struct sctp_assoc_value, optsize);
2556 			SCTP_FIND_STCB(inp, stcb, tm->assoc_id);
2557 
2558 			if (stcb) {
2559 				stcb->asoc.delayed_ack = tm->assoc_value;
2560 				SCTP_TCB_UNLOCK(stcb);
2561 			} else {
2562 				SCTP_INP_WLOCK(inp);
2563 				inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV] = MSEC_TO_TICKS(tm->assoc_value);
2564 				SCTP_INP_WUNLOCK(inp);
2565 			}
2566 			break;
2567 		}
2568 	case SCTP_AUTH_CHUNK:
2569 		{
2570 			struct sctp_authchunk *sauth;
2571 
2572 			SCTP_CHECK_AND_CAST(sauth, optval, struct sctp_authchunk, optsize);
2573 
2574 			SCTP_INP_WLOCK(inp);
2575 			if (sctp_auth_add_chunk(sauth->sauth_chunk, inp->sctp_ep.local_auth_chunks))
2576 				error = EINVAL;
2577 			SCTP_INP_WUNLOCK(inp);
2578 			break;
2579 		}
2580 	case SCTP_AUTH_KEY:
2581 		{
2582 			struct sctp_authkey *sca;
2583 			struct sctp_keyhead *shared_keys;
2584 			sctp_sharedkey_t *shared_key;
2585 			sctp_key_t *key = NULL;
2586 			size_t size;
2587 
2588 			SCTP_CHECK_AND_CAST(sca, optval, struct sctp_authkey, optsize);
2589 			SCTP_FIND_STCB(inp, stcb, sca->sca_assoc_id)
2590 			    size = optsize - sizeof(*sca);
2591 
2592 			if (stcb) {
2593 				/* set it on the assoc */
2594 				shared_keys = &stcb->asoc.shared_keys;
2595 				/* clear the cached keys for this key id */
2596 				sctp_clear_cachedkeys(stcb, sca->sca_keynumber);
2597 				/*
2598 				 * create the new shared key and
2599 				 * insert/replace it
2600 				 */
2601 				if (size > 0) {
2602 					key = sctp_set_key(sca->sca_key, (uint32_t) size);
2603 					if (key == NULL) {
2604 						error = ENOMEM;
2605 						SCTP_TCB_UNLOCK(stcb);
2606 						break;
2607 					}
2608 				}
2609 				shared_key = sctp_alloc_sharedkey();
2610 				if (shared_key == NULL) {
2611 					sctp_free_key(key);
2612 					error = ENOMEM;
2613 					SCTP_TCB_UNLOCK(stcb);
2614 					break;
2615 				}
2616 				shared_key->key = key;
2617 				shared_key->keyid = sca->sca_keynumber;
2618 				sctp_insert_sharedkey(shared_keys, shared_key);
2619 				SCTP_TCB_UNLOCK(stcb);
2620 			} else {
2621 				/* set it on the endpoint */
2622 				SCTP_INP_WLOCK(inp);
2623 				shared_keys = &inp->sctp_ep.shared_keys;
2624 				/*
2625 				 * clear the cached keys on all assocs for
2626 				 * this key id
2627 				 */
2628 				sctp_clear_cachedkeys_ep(inp, sca->sca_keynumber);
2629 				/*
2630 				 * create the new shared key and
2631 				 * insert/replace it
2632 				 */
2633 				if (size > 0) {
2634 					key = sctp_set_key(sca->sca_key, (uint32_t) size);
2635 					if (key == NULL) {
2636 						error = ENOMEM;
2637 						SCTP_INP_WUNLOCK(inp);
2638 						break;
2639 					}
2640 				}
2641 				shared_key = sctp_alloc_sharedkey();
2642 				if (shared_key == NULL) {
2643 					sctp_free_key(key);
2644 					error = ENOMEM;
2645 					SCTP_INP_WUNLOCK(inp);
2646 					break;
2647 				}
2648 				shared_key->key = key;
2649 				shared_key->keyid = sca->sca_keynumber;
2650 				sctp_insert_sharedkey(shared_keys, shared_key);
2651 				SCTP_INP_WUNLOCK(inp);
2652 			}
2653 			break;
2654 		}
2655 	case SCTP_HMAC_IDENT:
2656 		{
2657 			struct sctp_hmacalgo *shmac;
2658 			sctp_hmaclist_t *hmaclist;
2659 			uint32_t hmacid;
2660 			size_t size, i;
2661 
2662 			SCTP_CHECK_AND_CAST(shmac, optval, struct sctp_hmacalgo, optsize);
2663 			size = (optsize - sizeof(*shmac)) / sizeof(shmac->shmac_idents[0]);
2664 			hmaclist = sctp_alloc_hmaclist(size);
2665 			if (hmaclist == NULL) {
2666 				error = ENOMEM;
2667 				break;
2668 			}
2669 			for (i = 0; i < size; i++) {
2670 				hmacid = shmac->shmac_idents[i];
2671 				if (sctp_auth_add_hmacid(hmaclist, (uint16_t) hmacid)) {
2672 					 /* invalid HMACs were found */ ;
2673 					error = EINVAL;
2674 					sctp_free_hmaclist(hmaclist);
2675 					goto sctp_set_hmac_done;
2676 				}
2677 			}
2678 			/* set it on the endpoint */
2679 			SCTP_INP_WLOCK(inp);
2680 			if (inp->sctp_ep.local_hmacs)
2681 				sctp_free_hmaclist(inp->sctp_ep.local_hmacs);
2682 			inp->sctp_ep.local_hmacs = hmaclist;
2683 			SCTP_INP_WUNLOCK(inp);
2684 	sctp_set_hmac_done:
2685 			break;
2686 		}
2687 	case SCTP_AUTH_ACTIVE_KEY:
2688 		{
2689 			struct sctp_authkeyid *scact;
2690 
2691 			SCTP_CHECK_AND_CAST(scact, optval, struct sctp_authkeyid, optsize);
2692 			SCTP_FIND_STCB(inp, stcb, scact->scact_assoc_id);
2693 
2694 			/* set the active key on the right place */
2695 			if (stcb) {
2696 				/* set the active key on the assoc */
2697 				if (sctp_auth_setactivekey(stcb, scact->scact_keynumber))
2698 					error = EINVAL;
2699 				SCTP_TCB_UNLOCK(stcb);
2700 			} else {
2701 				/* set the active key on the endpoint */
2702 				SCTP_INP_WLOCK(inp);
2703 				if (sctp_auth_setactivekey_ep(inp, scact->scact_keynumber))
2704 					error = EINVAL;
2705 				SCTP_INP_WUNLOCK(inp);
2706 			}
2707 			break;
2708 		}
2709 	case SCTP_AUTH_DELETE_KEY:
2710 		{
2711 			struct sctp_authkeyid *scdel;
2712 
2713 			SCTP_CHECK_AND_CAST(scdel, optval, struct sctp_authkeyid, optsize);
2714 			SCTP_FIND_STCB(inp, stcb, scdel->scact_assoc_id);
2715 
2716 			/* delete the key from the right place */
2717 			if (stcb) {
2718 				if (sctp_delete_sharedkey(stcb, scdel->scact_keynumber))
2719 					error = EINVAL;
2720 				SCTP_TCB_UNLOCK(stcb);
2721 			} else {
2722 				SCTP_INP_WLOCK(inp);
2723 				if (sctp_delete_sharedkey_ep(inp, scdel->scact_keynumber))
2724 					error = EINVAL;
2725 				SCTP_INP_WUNLOCK(inp);
2726 			}
2727 			break;
2728 		}
2729 
2730 	case SCTP_RESET_STREAMS:
2731 		{
2732 			struct sctp_stream_reset *strrst;
2733 			uint8_t send_in = 0, send_tsn = 0, send_out = 0;
2734 			int i;
2735 
2736 			SCTP_CHECK_AND_CAST(strrst, optval, struct sctp_stream_reset, optsize);
2737 			SCTP_FIND_STCB(inp, stcb, strrst->strrst_assoc_id);
2738 
2739 			if (stcb == NULL) {
2740 				error = ENOENT;
2741 				break;
2742 			}
2743 			if (stcb->asoc.peer_supports_strreset == 0) {
2744 				/*
2745 				 * Peer does not support it, we return
2746 				 * protocol not supported since this is true
2747 				 * for this feature and this peer, not the
2748 				 * socket request in general.
2749 				 */
2750 				error = EPROTONOSUPPORT;
2751 				SCTP_TCB_UNLOCK(stcb);
2752 				break;
2753 			}
2754 			if (stcb->asoc.stream_reset_outstanding) {
2755 				error = EALREADY;
2756 				SCTP_TCB_UNLOCK(stcb);
2757 				break;
2758 			}
2759 			if (strrst->strrst_flags == SCTP_RESET_LOCAL_RECV) {
2760 				send_in = 1;
2761 			} else if (strrst->strrst_flags == SCTP_RESET_LOCAL_SEND) {
2762 				send_out = 1;
2763 			} else if (strrst->strrst_flags == SCTP_RESET_BOTH) {
2764 				send_in = 1;
2765 				send_out = 1;
2766 			} else if (strrst->strrst_flags == SCTP_RESET_TSN) {
2767 				send_tsn = 1;
2768 			} else {
2769 				error = EINVAL;
2770 				SCTP_TCB_UNLOCK(stcb);
2771 				break;
2772 			}
2773 			for (i = 0; i < strrst->strrst_num_streams; i++) {
2774 				if ((send_in) &&
2775 
2776 				    (strrst->strrst_list[i] > stcb->asoc.streamincnt)) {
2777 					error = EINVAL;
2778 					goto get_out;
2779 				}
2780 				if ((send_out) &&
2781 				    (strrst->strrst_list[i] > stcb->asoc.streamoutcnt)) {
2782 					error = EINVAL;
2783 					goto get_out;
2784 				}
2785 			}
2786 			if (error) {
2787 		get_out:
2788 				SCTP_TCB_UNLOCK(stcb);
2789 				break;
2790 			}
2791 			error = sctp_send_str_reset_req(stcb, strrst->strrst_num_streams,
2792 			    strrst->strrst_list,
2793 			    send_out, (stcb->asoc.str_reset_seq_in - 3),
2794 			    send_in, send_tsn);
2795 
2796 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_REQ);
2797 			SCTP_TCB_UNLOCK(stcb);
2798 		}
2799 		break;
2800 
2801 	case SCTP_CONNECT_X:
2802 		if (optsize < (sizeof(int) + sizeof(struct sockaddr_in))) {
2803 			error = EINVAL;
2804 			break;
2805 		}
2806 		error = sctp_do_connect_x(so, inp, optval, optsize, p, 0);
2807 		break;
2808 
2809 	case SCTP_CONNECT_X_DELAYED:
2810 		if (optsize < (sizeof(int) + sizeof(struct sockaddr_in))) {
2811 			error = EINVAL;
2812 			break;
2813 		}
2814 		error = sctp_do_connect_x(so, inp, optval, optsize, p, 1);
2815 		break;
2816 
2817 	case SCTP_CONNECT_X_COMPLETE:
2818 		{
2819 			struct sockaddr *sa;
2820 			struct sctp_nets *net;
2821 
2822 			/* FIXME MT: check correct? */
2823 			SCTP_CHECK_AND_CAST(sa, optval, struct sockaddr, optsize);
2824 
2825 			/* find tcb */
2826 			if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
2827 				SCTP_INP_RLOCK(inp);
2828 				stcb = LIST_FIRST(&inp->sctp_asoc_list);
2829 				if (stcb) {
2830 					SCTP_TCB_LOCK(stcb);
2831 					net = sctp_findnet(stcb, sa);
2832 				}
2833 				SCTP_INP_RUNLOCK(inp);
2834 			} else {
2835 				/*
2836 				 * We increment here since
2837 				 * sctp_findassociation_ep_addr() wil do a
2838 				 * decrement if it finds the stcb as long as
2839 				 * the locked tcb (last argument) is NOT a
2840 				 * TCB.. aka NULL.
2841 				 */
2842 				SCTP_INP_INCR_REF(inp);
2843 				stcb = sctp_findassociation_ep_addr(&inp, sa, &net, NULL, NULL);
2844 				if (stcb == NULL) {
2845 					SCTP_INP_DECR_REF(inp);
2846 				}
2847 			}
2848 
2849 			if (stcb == NULL) {
2850 				error = ENOENT;
2851 				break;
2852 			}
2853 			if (stcb->asoc.delayed_connection == 1) {
2854 				stcb->asoc.delayed_connection = 0;
2855 				SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
2856 				sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb,
2857 				    stcb->asoc.primary_destination,
2858 				    SCTP_FROM_SCTP_USRREQ + SCTP_LOC_9);
2859 				sctp_send_initiate(inp, stcb);
2860 			} else {
2861 				/*
2862 				 * already expired or did not use delayed
2863 				 * connectx
2864 				 */
2865 				error = EALREADY;
2866 			}
2867 			SCTP_TCB_UNLOCK(stcb);
2868 		}
2869 		break;
2870 	case SCTP_MAXBURST:
2871 		{
2872 			uint8_t *burst;
2873 
2874 			SCTP_CHECK_AND_CAST(burst, optval, uint8_t, optsize);
2875 
2876 			SCTP_INP_WLOCK(inp);
2877 			if (*burst) {
2878 				inp->sctp_ep.max_burst = *burst;
2879 			}
2880 			SCTP_INP_WUNLOCK(inp);
2881 		}
2882 		break;
2883 	case SCTP_MAXSEG:
2884 		{
2885 			struct sctp_assoc_value *av;
2886 			int ovh;
2887 
2888 			SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize);
2889 			SCTP_FIND_STCB(inp, stcb, av->assoc_id);
2890 
2891 			if (stcb) {
2892 				error = EINVAL;
2893 				SCTP_TCB_UNLOCK(stcb);
2894 			} else {
2895 				SCTP_INP_WLOCK(inp);
2896 				if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2897 					ovh = SCTP_MED_OVERHEAD;
2898 				} else {
2899 					ovh = SCTP_MED_V4_OVERHEAD;
2900 				}
2901 				/*
2902 				 * FIXME MT: I think this is not in tune
2903 				 * with the API ID
2904 				 */
2905 				if (av->assoc_value) {
2906 					inp->sctp_frag_point = (av->assoc_value + ovh);
2907 				} else {
2908 					error = EINVAL;
2909 				}
2910 				SCTP_INP_WUNLOCK(inp);
2911 			}
2912 		}
2913 		break;
2914 	case SCTP_EVENTS:
2915 		{
2916 			struct sctp_event_subscribe *events;
2917 
2918 			SCTP_CHECK_AND_CAST(events, optval, struct sctp_event_subscribe, optsize);
2919 
2920 			SCTP_INP_WLOCK(inp);
2921 			if (events->sctp_data_io_event) {
2922 				sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT);
2923 			} else {
2924 				sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT);
2925 			}
2926 
2927 			if (events->sctp_association_event) {
2928 				sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVASSOCEVNT);
2929 			} else {
2930 				sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVASSOCEVNT);
2931 			}
2932 
2933 			if (events->sctp_address_event) {
2934 				sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVPADDREVNT);
2935 			} else {
2936 				sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVPADDREVNT);
2937 			}
2938 
2939 			if (events->sctp_send_failure_event) {
2940 				sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVSENDFAILEVNT);
2941 			} else {
2942 				sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVSENDFAILEVNT);
2943 			}
2944 
2945 			if (events->sctp_peer_error_event) {
2946 				sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVPEERERR);
2947 			} else {
2948 				sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVPEERERR);
2949 			}
2950 
2951 			if (events->sctp_shutdown_event) {
2952 				sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT);
2953 			} else {
2954 				sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT);
2955 			}
2956 
2957 			if (events->sctp_partial_delivery_event) {
2958 				sctp_feature_on(inp, SCTP_PCB_FLAGS_PDAPIEVNT);
2959 			} else {
2960 				sctp_feature_off(inp, SCTP_PCB_FLAGS_PDAPIEVNT);
2961 			}
2962 
2963 			if (events->sctp_adaptation_layer_event) {
2964 				sctp_feature_on(inp, SCTP_PCB_FLAGS_ADAPTATIONEVNT);
2965 			} else {
2966 				sctp_feature_off(inp, SCTP_PCB_FLAGS_ADAPTATIONEVNT);
2967 			}
2968 
2969 			if (events->sctp_authentication_event) {
2970 				sctp_feature_on(inp, SCTP_PCB_FLAGS_AUTHEVNT);
2971 			} else {
2972 				sctp_feature_off(inp, SCTP_PCB_FLAGS_AUTHEVNT);
2973 			}
2974 
2975 			if (events->sctp_stream_reset_events) {
2976 				sctp_feature_on(inp, SCTP_PCB_FLAGS_STREAM_RESETEVNT);
2977 			} else {
2978 				sctp_feature_off(inp, SCTP_PCB_FLAGS_STREAM_RESETEVNT);
2979 			}
2980 			SCTP_INP_WUNLOCK(inp);
2981 		}
2982 		break;
2983 
2984 	case SCTP_ADAPTATION_LAYER:
2985 		{
2986 			struct sctp_setadaptation *adap_bits;
2987 
2988 			SCTP_CHECK_AND_CAST(adap_bits, optval, struct sctp_setadaptation, optsize);
2989 			SCTP_INP_WLOCK(inp);
2990 			inp->sctp_ep.adaptation_layer_indicator = adap_bits->ssb_adaptation_ind;
2991 			SCTP_INP_WUNLOCK(inp);
2992 		}
2993 		break;
2994 #ifdef SCTP_DEBUG
2995 	case SCTP_SET_INITIAL_DBG_SEQ:
2996 		{
2997 			uint32_t *vvv;
2998 
2999 			SCTP_CHECK_AND_CAST(vvv, optval, uint32_t, optsize);
3000 			SCTP_INP_WLOCK(inp);
3001 			inp->sctp_ep.initial_sequence_debug = *vvv;
3002 			SCTP_INP_WUNLOCK(inp);
3003 		}
3004 		break;
3005 #endif
3006 	case SCTP_DEFAULT_SEND_PARAM:
3007 		{
3008 			struct sctp_sndrcvinfo *s_info;
3009 
3010 			SCTP_CHECK_AND_CAST(s_info, optval, struct sctp_sndrcvinfo, optsize);
3011 			SCTP_FIND_STCB(inp, stcb, s_info->sinfo_assoc_id);
3012 
3013 			if (stcb) {
3014 				if (s_info->sinfo_stream <= stcb->asoc.streamoutcnt) {
3015 					stcb->asoc.def_send = *s_info;
3016 				} else {
3017 					error = EINVAL;
3018 				}
3019 				SCTP_TCB_UNLOCK(stcb);
3020 			} else {
3021 				SCTP_INP_WLOCK(inp);
3022 				inp->def_send = *s_info;
3023 				SCTP_INP_WUNLOCK(inp);
3024 			}
3025 		}
3026 		break;
3027 	case SCTP_PEER_ADDR_PARAMS:
3028 		/* Applys to the specific association */
3029 		{
3030 			struct sctp_paddrparams *paddrp;
3031 			struct sctp_nets *net;
3032 
3033 			SCTP_CHECK_AND_CAST(paddrp, optval, struct sctp_paddrparams, optsize);
3034 			SCTP_FIND_STCB(inp, stcb, paddrp->spp_assoc_id);
3035 			net = NULL;
3036 			if (stcb) {
3037 				net = sctp_findnet(stcb, (struct sockaddr *)&paddrp->spp_address);
3038 			} else {
3039 				/*
3040 				 * We increment here since
3041 				 * sctp_findassociation_ep_addr() wil do a
3042 				 * decrement if it finds the stcb as long as
3043 				 * the locked tcb (last argument) is NOT a
3044 				 * TCB.. aka NULL.
3045 				 */
3046 				SCTP_INP_INCR_REF(inp);
3047 				stcb = sctp_findassociation_ep_addr(&inp,
3048 				    (struct sockaddr *)&paddrp->spp_address,
3049 				    &net, NULL, NULL);
3050 				if (stcb == NULL) {
3051 					SCTP_INP_DECR_REF(inp);
3052 				}
3053 			}
3054 
3055 
3056 			if (stcb) {
3057 				/************************TCB SPECIFIC SET ******************/
3058 				/*
3059 				 * do we change the timer for HB, we run
3060 				 * only one?
3061 				 */
3062 				if (paddrp->spp_hbinterval)
3063 					stcb->asoc.heart_beat_delay = paddrp->spp_hbinterval;
3064 				else if (paddrp->spp_flags & SPP_HB_TIME_IS_ZERO)
3065 					stcb->asoc.heart_beat_delay = 0;
3066 
3067 				/* network sets ? */
3068 				if (net) {
3069 					/************************NET SPECIFIC SET ******************/
3070 					if (paddrp->spp_flags & SPP_HB_DEMAND) {
3071 						/* on demand HB */
3072 						sctp_send_hb(stcb, 1, net);
3073 					}
3074 					if (paddrp->spp_flags & SPP_HB_DISABLE) {
3075 						net->dest_state |= SCTP_ADDR_NOHB;
3076 					}
3077 					if (paddrp->spp_flags & SPP_HB_ENABLE) {
3078 						net->dest_state &= ~SCTP_ADDR_NOHB;
3079 					}
3080 					if (paddrp->spp_flags & SPP_PMTUD_DISABLE) {
3081 						if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) {
3082 							sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net,
3083 							    SCTP_FROM_SCTP_USRREQ + SCTP_LOC_10);
3084 						}
3085 						if (paddrp->spp_pathmtu > SCTP_DEFAULT_MINSEGMENT) {
3086 							net->mtu = paddrp->spp_pathmtu;
3087 							if (net->mtu < stcb->asoc.smallest_mtu)
3088 								sctp_pathmtu_adjustment(inp, stcb, net, net->mtu);
3089 						}
3090 					}
3091 					if (paddrp->spp_flags & SPP_PMTUD_ENABLE) {
3092 						if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) {
3093 							sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net);
3094 						}
3095 					}
3096 					if (paddrp->spp_pathmaxrxt)
3097 						net->failure_threshold = paddrp->spp_pathmaxrxt;
3098 #ifdef INET
3099 					if (paddrp->spp_flags & SPP_IPV4_TOS) {
3100 						if (net->ro._l_addr.sin.sin_family == AF_INET) {
3101 							net->tos_flowlabel = paddrp->spp_ipv4_tos & 0x000000fc;
3102 						}
3103 					}
3104 #endif
3105 #ifdef INET6
3106 					if (paddrp->spp_flags & SPP_IPV6_FLOWLABEL) {
3107 						if (net->ro._l_addr.sin6.sin6_family == AF_INET6) {
3108 							net->tos_flowlabel = paddrp->spp_ipv6_flowlabel;
3109 						}
3110 					}
3111 #endif
3112 				} else {
3113 					/************************ASSOC ONLY -- NO NET SPECIFIC SET ******************/
3114 					if (paddrp->spp_pathmaxrxt)
3115 						stcb->asoc.def_net_failure = paddrp->spp_pathmaxrxt;
3116 
3117 					if (paddrp->spp_flags & SPP_HB_ENABLE) {
3118 						/* Turn back on the timer */
3119 						stcb->asoc.hb_is_disabled = 0;
3120 						sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
3121 					}
3122 					if (paddrp->spp_flags & SPP_HB_DISABLE) {
3123 						int cnt_of_unconf = 0;
3124 						struct sctp_nets *lnet;
3125 
3126 						stcb->asoc.hb_is_disabled = 1;
3127 						TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
3128 							if (lnet->dest_state & SCTP_ADDR_UNCONFIRMED) {
3129 								cnt_of_unconf++;
3130 							}
3131 						}
3132 						/*
3133 						 * stop the timer ONLY if we
3134 						 * have no unconfirmed
3135 						 * addresses
3136 						 */
3137 						if (cnt_of_unconf == 0) {
3138 							sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_11);
3139 						}
3140 					}
3141 					if (paddrp->spp_flags & SPP_HB_ENABLE) {
3142 						/* start up the timer. */
3143 						sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
3144 					}
3145 #ifdef INET
3146 					if (paddrp->spp_flags & SPP_IPV4_TOS)
3147 						stcb->asoc.default_tos = paddrp->spp_ipv4_tos & 0x000000fc;
3148 #endif
3149 #ifdef INET6
3150 					if (paddrp->spp_flags & SPP_IPV6_FLOWLABEL)
3151 						stcb->asoc.default_flowlabel = paddrp->spp_ipv6_flowlabel;
3152 #endif
3153 
3154 				}
3155 				SCTP_TCB_UNLOCK(stcb);
3156 			} else {
3157 				/************************NO TCB, SET TO default stuff ******************/
3158 				SCTP_INP_WLOCK(inp);
3159 				/*
3160 				 * For the TOS/FLOWLABEL stuff you set it
3161 				 * with the options on the socket
3162 				 */
3163 				if (paddrp->spp_pathmaxrxt) {
3164 					inp->sctp_ep.def_net_failure = paddrp->spp_pathmaxrxt;
3165 				}
3166 				if (paddrp->spp_flags & SPP_HB_ENABLE) {
3167 					inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT] = MSEC_TO_TICKS(paddrp->spp_hbinterval);
3168 					sctp_feature_off(inp, SCTP_PCB_FLAGS_DONOT_HEARTBEAT);
3169 				} else if (paddrp->spp_flags & SPP_HB_DISABLE) {
3170 					sctp_feature_on(inp, SCTP_PCB_FLAGS_DONOT_HEARTBEAT);
3171 				}
3172 				SCTP_INP_WUNLOCK(inp);
3173 			}
3174 		}
3175 		break;
3176 	case SCTP_RTOINFO:
3177 		{
3178 			struct sctp_rtoinfo *srto;
3179 
3180 			SCTP_CHECK_AND_CAST(srto, optval, struct sctp_rtoinfo, optsize);
3181 			SCTP_FIND_STCB(inp, stcb, srto->srto_assoc_id);
3182 
3183 			if (stcb) {
3184 				/* Set in ms we hope :-) */
3185 				if (srto->srto_initial)
3186 					stcb->asoc.initial_rto = srto->srto_initial;
3187 				if (srto->srto_max)
3188 					stcb->asoc.maxrto = srto->srto_max;
3189 				if (srto->srto_min)
3190 					stcb->asoc.minrto = srto->srto_min;
3191 				SCTP_TCB_UNLOCK(stcb);
3192 			} else {
3193 				SCTP_INP_WLOCK(inp);
3194 				/*
3195 				 * If we have a null asoc, its default for
3196 				 * the endpoint
3197 				 */
3198 				if (srto->srto_initial)
3199 					inp->sctp_ep.initial_rto = srto->srto_initial;
3200 				if (srto->srto_max)
3201 					inp->sctp_ep.sctp_maxrto = srto->srto_max;
3202 				if (srto->srto_min)
3203 					inp->sctp_ep.sctp_minrto = srto->srto_min;
3204 				SCTP_INP_WUNLOCK(inp);
3205 			}
3206 		}
3207 		break;
3208 	case SCTP_ASSOCINFO:
3209 		{
3210 			struct sctp_assocparams *sasoc;
3211 
3212 			SCTP_CHECK_AND_CAST(sasoc, optval, struct sctp_assocparams, optsize);
3213 			SCTP_FIND_STCB(inp, stcb, sasoc->sasoc_assoc_id);
3214 
3215 			if (stcb) {
3216 				if (sasoc->sasoc_asocmaxrxt)
3217 					stcb->asoc.max_send_times = sasoc->sasoc_asocmaxrxt;
3218 				sasoc->sasoc_number_peer_destinations = stcb->asoc.numnets;
3219 				sasoc->sasoc_peer_rwnd = 0;
3220 				sasoc->sasoc_local_rwnd = 0;
3221 				if (stcb->asoc.cookie_life)
3222 					stcb->asoc.cookie_life = sasoc->sasoc_cookie_life;
3223 				stcb->asoc.delayed_ack = sasoc->sasoc_sack_delay;
3224 				if (sasoc->sasoc_sack_freq) {
3225 					stcb->asoc.sack_freq = sasoc->sasoc_sack_freq;
3226 				}
3227 				SCTP_TCB_UNLOCK(stcb);
3228 			} else {
3229 				SCTP_INP_WLOCK(inp);
3230 				if (sasoc->sasoc_asocmaxrxt)
3231 					inp->sctp_ep.max_send_times = sasoc->sasoc_asocmaxrxt;
3232 				sasoc->sasoc_number_peer_destinations = 0;
3233 				sasoc->sasoc_peer_rwnd = 0;
3234 				sasoc->sasoc_local_rwnd = 0;
3235 				if (sasoc->sasoc_cookie_life)
3236 					inp->sctp_ep.def_cookie_life = sasoc->sasoc_cookie_life;
3237 				inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV] = MSEC_TO_TICKS(sasoc->sasoc_sack_delay);
3238 				if (sasoc->sasoc_sack_freq) {
3239 					inp->sctp_ep.sctp_sack_freq = sasoc->sasoc_sack_freq;
3240 				}
3241 				SCTP_INP_WUNLOCK(inp);
3242 			}
3243 		}
3244 		break;
3245 	case SCTP_INITMSG:
3246 		{
3247 			struct sctp_initmsg *sinit;
3248 
3249 			SCTP_CHECK_AND_CAST(sinit, optval, struct sctp_initmsg, optsize);
3250 			SCTP_INP_WLOCK(inp);
3251 			if (sinit->sinit_num_ostreams)
3252 				inp->sctp_ep.pre_open_stream_count = sinit->sinit_num_ostreams;
3253 
3254 			if (sinit->sinit_max_instreams)
3255 				inp->sctp_ep.max_open_streams_intome = sinit->sinit_max_instreams;
3256 
3257 			if (sinit->sinit_max_attempts)
3258 				inp->sctp_ep.max_init_times = sinit->sinit_max_attempts;
3259 
3260 			if (sinit->sinit_max_init_timeo)
3261 				inp->sctp_ep.initial_init_rto_max = sinit->sinit_max_init_timeo;
3262 			SCTP_INP_WUNLOCK(inp);
3263 		}
3264 		break;
3265 	case SCTP_PRIMARY_ADDR:
3266 		{
3267 			struct sctp_setprim *spa;
3268 			struct sctp_nets *net, *lnet;
3269 
3270 			SCTP_CHECK_AND_CAST(spa, optval, struct sctp_setprim, optsize);
3271 			SCTP_FIND_STCB(inp, stcb, spa->ssp_assoc_id);
3272 
3273 			net = NULL;
3274 			if (stcb) {
3275 				net = sctp_findnet(stcb, (struct sockaddr *)&spa->ssp_addr);
3276 			} else {
3277 				/*
3278 				 * We increment here since
3279 				 * sctp_findassociation_ep_addr() wil do a
3280 				 * decrement if it finds the stcb as long as
3281 				 * the locked tcb (last argument) is NOT a
3282 				 * TCB.. aka NULL.
3283 				 */
3284 				SCTP_INP_INCR_REF(inp);
3285 				stcb = sctp_findassociation_ep_addr(&inp,
3286 				    (struct sockaddr *)&spa->ssp_addr,
3287 				    &net, NULL, NULL);
3288 				if (stcb == NULL) {
3289 					SCTP_INP_DECR_REF(inp);
3290 				}
3291 			}
3292 
3293 			if ((stcb) && (net)) {
3294 				if ((net != stcb->asoc.primary_destination) &&
3295 				    (!(net->dest_state & SCTP_ADDR_UNCONFIRMED))) {
3296 					/* Ok we need to set it */
3297 					lnet = stcb->asoc.primary_destination;
3298 					if (sctp_set_primary_addr(stcb, (struct sockaddr *)NULL, net) == 0) {
3299 						if (net->dest_state & SCTP_ADDR_SWITCH_PRIMARY) {
3300 							net->dest_state |= SCTP_ADDR_DOUBLE_SWITCH;
3301 						}
3302 						net->dest_state |= SCTP_ADDR_SWITCH_PRIMARY;
3303 					}
3304 				}
3305 			} else {
3306 				error = EINVAL;
3307 			}
3308 			if (stcb) {
3309 				SCTP_TCB_UNLOCK(stcb);
3310 			}
3311 		}
3312 		break;
3313 	case SCTP_SET_DYNAMIC_PRIMARY:
3314 		{
3315 			union sctp_sockstore *ss;
3316 
3317 			error = priv_check_cred(curthread->td_ucred,
3318 			    PRIV_NETINET_RESERVEDPORT,
3319 			    SUSER_ALLOWJAIL);
3320 			if (error)
3321 				break;
3322 
3323 			SCTP_CHECK_AND_CAST(ss, optval, union sctp_sockstore, optsize);
3324 			/* SUPER USER CHECK? */
3325 			error = sctp_dynamic_set_primary(&ss->sa, vrf_id);
3326 		}
3327 		break;
3328 	case SCTP_SET_PEER_PRIMARY_ADDR:
3329 		{
3330 			struct sctp_setpeerprim *sspp;
3331 
3332 			SCTP_CHECK_AND_CAST(sspp, optval, struct sctp_setpeerprim, optsize);
3333 			SCTP_FIND_STCB(inp, stcb, sspp->sspp_assoc_id);
3334 
3335 			if (stcb) {
3336 				if (sctp_set_primary_ip_address_sa(stcb, (struct sockaddr *)&sspp->sspp_addr) != 0) {
3337 					error = EINVAL;
3338 				}
3339 			} else {
3340 				error = EINVAL;
3341 			}
3342 			SCTP_TCB_UNLOCK(stcb);
3343 		}
3344 		break;
3345 	case SCTP_BINDX_ADD_ADDR:
3346 		{
3347 			struct sctp_getaddresses *addrs;
3348 			struct sockaddr *addr_touse;
3349 			struct sockaddr_in sin;
3350 
3351 			SCTP_CHECK_AND_CAST(addrs, optval, struct sctp_getaddresses, optsize);
3352 
3353 			/* see if we're bound all already! */
3354 			if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
3355 				error = EINVAL;
3356 				break;
3357 			}
3358 			/* Is the VRF one we have */
3359 			addr_touse = addrs->addr;
3360 #if defined(INET6)
3361 			if (addrs->addr->sa_family == AF_INET6) {
3362 				struct sockaddr_in6 *sin6;
3363 
3364 				sin6 = (struct sockaddr_in6 *)addr_touse;
3365 				if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
3366 					in6_sin6_2_sin(&sin, sin6);
3367 					addr_touse = (struct sockaddr *)&sin;
3368 				}
3369 			}
3370 #endif
3371 			if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
3372 				if (p == NULL) {
3373 					/* Can't get proc for Net/Open BSD */
3374 					error = EINVAL;
3375 					break;
3376 				}
3377 				error = sctp_inpcb_bind(so, addr_touse, p);
3378 				break;
3379 			}
3380 			/*
3381 			 * No locks required here since bind and mgmt_ep_sa
3382 			 * all do their own locking. If we do something for
3383 			 * the FIX: below we may need to lock in that case.
3384 			 */
3385 			if (addrs->sget_assoc_id == 0) {
3386 				/* add the address */
3387 				struct sctp_inpcb *lep;
3388 
3389 				((struct sockaddr_in *)addr_touse)->sin_port = inp->sctp_lport;
3390 				lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
3391 				if (lep != NULL) {
3392 					/*
3393 					 * We must decrement the refcount
3394 					 * since we have the ep already and
3395 					 * are binding. No remove going on
3396 					 * here.
3397 					 */
3398 					SCTP_INP_DECR_REF(inp);
3399 				}
3400 				if (lep == inp) {
3401 					/* already bound to it.. ok */
3402 					break;
3403 				} else if (lep == NULL) {
3404 					((struct sockaddr_in *)addr_touse)->sin_port = 0;
3405 					error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
3406 					    SCTP_ADD_IP_ADDRESS, vrf_id);
3407 				} else {
3408 					error = EADDRNOTAVAIL;
3409 				}
3410 				if (error)
3411 					break;
3412 
3413 			} else {
3414 				/*
3415 				 * FIX: decide whether we allow assoc based
3416 				 * bindx
3417 				 */
3418 			}
3419 		}
3420 		break;
3421 	case SCTP_BINDX_REM_ADDR:
3422 		{
3423 			struct sctp_getaddresses *addrs;
3424 			struct sockaddr *addr_touse;
3425 			struct sockaddr_in sin;
3426 
3427 			SCTP_CHECK_AND_CAST(addrs, optval, struct sctp_getaddresses, optsize);
3428 			/* see if we're bound all already! */
3429 			if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
3430 				error = EINVAL;
3431 				break;
3432 			}
3433 			addr_touse = addrs->addr;
3434 #if defined(INET6)
3435 			if (addrs->addr->sa_family == AF_INET6) {
3436 				struct sockaddr_in6 *sin6;
3437 
3438 				sin6 = (struct sockaddr_in6 *)addr_touse;
3439 				if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
3440 					in6_sin6_2_sin(&sin, sin6);
3441 					addr_touse = (struct sockaddr *)&sin;
3442 				}
3443 			}
3444 #endif
3445 			/*
3446 			 * No lock required mgmt_ep_sa does its own locking.
3447 			 * If the FIX: below is ever changed we may need to
3448 			 * lock before calling association level binding.
3449 			 */
3450 			if (addrs->sget_assoc_id == 0) {
3451 				/* delete the address */
3452 				sctp_addr_mgmt_ep_sa(inp, addr_touse,
3453 				    SCTP_DEL_IP_ADDRESS, vrf_id);
3454 			} else {
3455 				/*
3456 				 * FIX: decide whether we allow assoc based
3457 				 * bindx
3458 				 */
3459 			}
3460 		}
3461 		break;
3462 	default:
3463 		error = ENOPROTOOPT;
3464 		break;
3465 	}			/* end switch (opt) */
3466 	return (error);
3467 }
3468 
3469 
3470 int
3471 sctp_ctloutput(struct socket *so, struct sockopt *sopt)
3472 {
3473 	void *optval = NULL;
3474 	size_t optsize = 0;
3475 	struct sctp_inpcb *inp;
3476 	void *p;
3477 	int error = 0;
3478 
3479 	inp = (struct sctp_inpcb *)so->so_pcb;
3480 	if (inp == 0) {
3481 		/* I made the same as TCP since we are not setup? */
3482 		return (ECONNRESET);
3483 	}
3484 	if (sopt->sopt_level != IPPROTO_SCTP) {
3485 		/* wrong proto level... send back up to IP */
3486 #ifdef INET6
3487 		if (INP_CHECK_SOCKAF(so, AF_INET6))
3488 			error = ip6_ctloutput(so, sopt);
3489 		else
3490 #endif				/* INET6 */
3491 			error = ip_ctloutput(so, sopt);
3492 		return (error);
3493 	}
3494 	optsize = sopt->sopt_valsize;
3495 	if (optsize) {
3496 		SCTP_MALLOC(optval, void *, optsize, "SCTPSockOpt");
3497 		if (optval == NULL) {
3498 			return (ENOBUFS);
3499 		}
3500 		error = sooptcopyin(sopt, optval, optsize, optsize);
3501 		if (error) {
3502 			SCTP_FREE(optval);
3503 			goto out;
3504 		}
3505 	}
3506 	p = (void *)sopt->sopt_td;
3507 	if (sopt->sopt_dir == SOPT_SET) {
3508 		error = sctp_setopt(so, sopt->sopt_name, optval, optsize, p);
3509 	} else if (sopt->sopt_dir == SOPT_GET) {
3510 		error = sctp_getopt(so, sopt->sopt_name, optval, &optsize, p);
3511 	} else {
3512 		error = EINVAL;
3513 	}
3514 	if ((error == 0) && (optval != NULL)) {
3515 		error = sooptcopyout(sopt, optval, optsize);
3516 		SCTP_FREE(optval);
3517 	} else if (optval != NULL) {
3518 		SCTP_FREE(optval);
3519 	}
3520 out:
3521 	return (error);
3522 }
3523 
3524 
3525 static int
3526 sctp_connect(struct socket *so, struct sockaddr *addr, struct thread *p)
3527 {
3528 	int error = 0;
3529 	int create_lock_on = 0;
3530 	uint32_t vrf_id;
3531 	struct sctp_inpcb *inp;
3532 	struct sctp_tcb *stcb = NULL;
3533 
3534 	inp = (struct sctp_inpcb *)so->so_pcb;
3535 	if (inp == 0) {
3536 		/* I made the same as TCP since we are not setup? */
3537 		return (ECONNRESET);
3538 	}
3539 	SCTP_ASOC_CREATE_LOCK(inp);
3540 	create_lock_on = 1;
3541 
3542 	SCTP_INP_INCR_REF(inp);
3543 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3544 	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
3545 		/* Should I really unlock ? */
3546 		error = EFAULT;
3547 		goto out_now;
3548 	}
3549 #ifdef INET6
3550 	if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) &&
3551 	    (addr->sa_family == AF_INET6)) {
3552 		error = EINVAL;
3553 		goto out_now;
3554 	}
3555 #endif				/* INET6 */
3556 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) ==
3557 	    SCTP_PCB_FLAGS_UNBOUND) {
3558 		/* Bind a ephemeral port */
3559 		error = sctp_inpcb_bind(so, NULL, p);
3560 		if (error) {
3561 			goto out_now;
3562 		}
3563 	}
3564 	/* Now do we connect? */
3565 	if (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) {
3566 		error = EINVAL;
3567 		goto out_now;
3568 	}
3569 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3570 	    (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) {
3571 		/* We are already connected AND the TCP model */
3572 		error = EADDRINUSE;
3573 		goto out_now;
3574 	}
3575 	if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
3576 		SCTP_INP_RLOCK(inp);
3577 		stcb = LIST_FIRST(&inp->sctp_asoc_list);
3578 		SCTP_INP_RUNLOCK(inp);
3579 	} else {
3580 		/*
3581 		 * We increment here since sctp_findassociation_ep_addr()
3582 		 * wil do a decrement if it finds the stcb as long as the
3583 		 * locked tcb (last argument) is NOT a TCB.. aka NULL.
3584 		 */
3585 		SCTP_INP_INCR_REF(inp);
3586 		stcb = sctp_findassociation_ep_addr(&inp, addr, NULL, NULL, NULL);
3587 		if (stcb == NULL) {
3588 			SCTP_INP_DECR_REF(inp);
3589 		} else {
3590 			SCTP_TCB_LOCK(stcb);
3591 		}
3592 	}
3593 	if (stcb != NULL) {
3594 		/* Already have or am bring up an association */
3595 		error = EALREADY;
3596 		goto out_now;
3597 	}
3598 	vrf_id = inp->def_vrf_id;
3599 	/* We are GOOD to go */
3600 	stcb = sctp_aloc_assoc(inp, addr, 1, &error, 0, vrf_id);
3601 	if (stcb == NULL) {
3602 		/* Gak! no memory */
3603 		goto out_now;
3604 	}
3605 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
3606 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
3607 		/* Set the connected flag so we can queue data */
3608 		soisconnecting(so);
3609 	}
3610 	stcb->asoc.state = SCTP_STATE_COOKIE_WAIT;
3611 	SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
3612 
3613 	/* initialize authentication parameters for the assoc */
3614 	sctp_initialize_auth_params(inp, stcb);
3615 
3616 	sctp_send_initiate(inp, stcb);
3617 	SCTP_TCB_UNLOCK(stcb);
3618 out_now:
3619 	if (create_lock_on)
3620 		SCTP_ASOC_CREATE_UNLOCK(inp);
3621 
3622 	SCTP_INP_DECR_REF(inp);
3623 	return error;
3624 }
3625 
3626 int
3627 sctp_listen(struct socket *so, int backlog, struct thread *p)
3628 {
3629 	/*
3630 	 * Note this module depends on the protocol processing being called
3631 	 * AFTER any socket level flags and backlog are applied to the
3632 	 * socket. The traditional way that the socket flags are applied is
3633 	 * AFTER protocol processing. We have made a change to the
3634 	 * sys/kern/uipc_socket.c module to reverse this but this MUST be in
3635 	 * place if the socket API for SCTP is to work properly.
3636 	 */
3637 
3638 	int error = 0;
3639 	struct sctp_inpcb *inp;
3640 
3641 	inp = (struct sctp_inpcb *)so->so_pcb;
3642 	if (inp == 0) {
3643 		/* I made the same as TCP since we are not setup? */
3644 		return (ECONNRESET);
3645 	}
3646 	SCTP_INP_RLOCK(inp);
3647 #ifdef SCTP_LOCK_LOGGING
3648 	sctp_log_lock(inp, (struct sctp_tcb *)NULL, SCTP_LOG_LOCK_SOCK);
3649 #endif
3650 	SOCK_LOCK(so);
3651 	error = solisten_proto_check(so);
3652 	if (error) {
3653 		SOCK_UNLOCK(so);
3654 		return (error);
3655 	}
3656 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3657 	    (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) {
3658 		/* We are already connected AND the TCP model */
3659 		SCTP_INP_RUNLOCK(inp);
3660 		SOCK_UNLOCK(so);
3661 		return (EADDRINUSE);
3662 	}
3663 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
3664 		/* We must do a bind. */
3665 		SOCK_UNLOCK(so);
3666 		SCTP_INP_RUNLOCK(inp);
3667 		if ((error = sctp_inpcb_bind(so, NULL, p))) {
3668 			/* bind error, probably perm */
3669 			return (error);
3670 		}
3671 		SOCK_LOCK(so);
3672 	} else {
3673 		SCTP_INP_RUNLOCK(inp);
3674 	}
3675 	/* It appears for 7.0 and on, we must always call this. */
3676 	solisten_proto(so, backlog);
3677 
3678 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) {
3679 		/* remove the ACCEPTCONN flag for one-to-many sockets */
3680 		so->so_options &= ~SO_ACCEPTCONN;
3681 	}
3682 	if (backlog == 0) {
3683 		/* turning off listen */
3684 		so->so_options &= ~SO_ACCEPTCONN;
3685 	}
3686 	SOCK_UNLOCK(so);
3687 	return (error);
3688 }
3689 
3690 static int sctp_defered_wakeup_cnt = 0;
3691 
3692 int
3693 sctp_accept(struct socket *so, struct sockaddr **addr)
3694 {
3695 	struct sctp_tcb *stcb;
3696 	struct sctp_inpcb *inp;
3697 	union sctp_sockstore store;
3698 
3699 	int error;
3700 
3701 	inp = (struct sctp_inpcb *)so->so_pcb;
3702 
3703 	if (inp == 0) {
3704 		return (ECONNRESET);
3705 	}
3706 	SCTP_INP_RLOCK(inp);
3707 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) {
3708 		SCTP_INP_RUNLOCK(inp);
3709 		return (ENOTSUP);
3710 	}
3711 	if (so->so_state & SS_ISDISCONNECTED) {
3712 		SCTP_INP_RUNLOCK(inp);
3713 		return (ECONNABORTED);
3714 	}
3715 	stcb = LIST_FIRST(&inp->sctp_asoc_list);
3716 	if (stcb == NULL) {
3717 		SCTP_INP_RUNLOCK(inp);
3718 		return (ECONNRESET);
3719 	}
3720 	SCTP_TCB_LOCK(stcb);
3721 	SCTP_INP_RUNLOCK(inp);
3722 	store = stcb->asoc.primary_destination->ro._l_addr;
3723 	SCTP_TCB_UNLOCK(stcb);
3724 	if (store.sa.sa_family == AF_INET) {
3725 		struct sockaddr_in *sin;
3726 
3727 		SCTP_MALLOC_SONAME(sin, struct sockaddr_in *, sizeof *sin);
3728 		sin->sin_family = AF_INET;
3729 		sin->sin_len = sizeof(*sin);
3730 		sin->sin_port = ((struct sockaddr_in *)&store)->sin_port;
3731 		sin->sin_addr = ((struct sockaddr_in *)&store)->sin_addr;
3732 		*addr = (struct sockaddr *)sin;
3733 	} else {
3734 		struct sockaddr_in6 *sin6;
3735 
3736 		SCTP_MALLOC_SONAME(sin6, struct sockaddr_in6 *, sizeof *sin6);
3737 		sin6->sin6_family = AF_INET6;
3738 		sin6->sin6_len = sizeof(*sin6);
3739 		sin6->sin6_port = ((struct sockaddr_in6 *)&store)->sin6_port;
3740 
3741 		sin6->sin6_addr = ((struct sockaddr_in6 *)&store)->sin6_addr;
3742 		if ((error = sa6_recoverscope(sin6)) != 0) {
3743 			SCTP_FREE_SONAME(sin6);
3744 			return (error);
3745 		}
3746 		*addr = (struct sockaddr *)sin6;
3747 	}
3748 	/* Wake any delayed sleep action */
3749 	if (inp->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) {
3750 		SCTP_INP_WLOCK(inp);
3751 		inp->sctp_flags &= ~SCTP_PCB_FLAGS_DONT_WAKE;
3752 		if (inp->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT) {
3753 			inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAKEOUTPUT;
3754 			SCTP_INP_WUNLOCK(inp);
3755 			SOCKBUF_LOCK(&inp->sctp_socket->so_snd);
3756 			if (sowriteable(inp->sctp_socket)) {
3757 				sowwakeup_locked(inp->sctp_socket);
3758 			} else {
3759 				SOCKBUF_UNLOCK(&inp->sctp_socket->so_snd);
3760 			}
3761 			SCTP_INP_WLOCK(inp);
3762 		}
3763 		if (inp->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT) {
3764 			inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAKEINPUT;
3765 			SCTP_INP_WUNLOCK(inp);
3766 			SOCKBUF_LOCK(&inp->sctp_socket->so_rcv);
3767 			if (soreadable(inp->sctp_socket)) {
3768 				sctp_defered_wakeup_cnt++;
3769 				sorwakeup_locked(inp->sctp_socket);
3770 			} else {
3771 				SOCKBUF_UNLOCK(&inp->sctp_socket->so_rcv);
3772 			}
3773 			SCTP_INP_WLOCK(inp);
3774 		}
3775 		SCTP_INP_WUNLOCK(inp);
3776 	}
3777 	return (0);
3778 }
3779 
3780 int
3781 sctp_ingetaddr(struct socket *so, struct sockaddr **addr)
3782 {
3783 	struct sockaddr_in *sin;
3784 	uint32_t vrf_id;
3785 	struct sctp_inpcb *inp;
3786 	struct sctp_ifa *sctp_ifa;
3787 
3788 	/*
3789 	 * Do the malloc first in case it blocks.
3790 	 */
3791 	SCTP_MALLOC_SONAME(sin, struct sockaddr_in *, sizeof *sin);
3792 	sin->sin_family = AF_INET;
3793 	sin->sin_len = sizeof(*sin);
3794 	inp = (struct sctp_inpcb *)so->so_pcb;
3795 	if (!inp) {
3796 		SCTP_FREE_SONAME(sin);
3797 		return ECONNRESET;
3798 	}
3799 	SCTP_INP_RLOCK(inp);
3800 	sin->sin_port = inp->sctp_lport;
3801 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
3802 		if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
3803 			struct sctp_tcb *stcb;
3804 			struct sockaddr_in *sin_a;
3805 			struct sctp_nets *net;
3806 			int fnd;
3807 
3808 			stcb = LIST_FIRST(&inp->sctp_asoc_list);
3809 			if (stcb == NULL) {
3810 				goto notConn;
3811 			}
3812 			fnd = 0;
3813 			sin_a = NULL;
3814 			SCTP_TCB_LOCK(stcb);
3815 			TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
3816 				sin_a = (struct sockaddr_in *)&net->ro._l_addr;
3817 				if (sin_a == NULL)
3818 					/* this will make coverity happy */
3819 					continue;
3820 
3821 				if (sin_a->sin_family == AF_INET) {
3822 					fnd = 1;
3823 					break;
3824 				}
3825 			}
3826 			if ((!fnd) || (sin_a == NULL)) {
3827 				/* punt */
3828 				SCTP_TCB_UNLOCK(stcb);
3829 				goto notConn;
3830 			}
3831 			vrf_id = inp->def_vrf_id;
3832 			sctp_ifa = sctp_source_address_selection(inp,
3833 			    stcb,
3834 			    (sctp_route_t *) & net->ro,
3835 			    net, 0, vrf_id);
3836 			if (sctp_ifa) {
3837 				sin->sin_addr = sctp_ifa->address.sin.sin_addr;
3838 				sctp_free_ifa(sctp_ifa);
3839 			}
3840 			SCTP_TCB_UNLOCK(stcb);
3841 		} else {
3842 			/* For the bound all case you get back 0 */
3843 	notConn:
3844 			sin->sin_addr.s_addr = 0;
3845 		}
3846 
3847 	} else {
3848 		/* Take the first IPv4 address in the list */
3849 		struct sctp_laddr *laddr;
3850 		int fnd = 0;
3851 
3852 		LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
3853 			if (laddr->ifa->address.sa.sa_family == AF_INET) {
3854 				struct sockaddr_in *sin_a;
3855 
3856 				sin_a = (struct sockaddr_in *)&laddr->ifa->address.sa;
3857 				sin->sin_addr = sin_a->sin_addr;
3858 				fnd = 1;
3859 				break;
3860 			}
3861 		}
3862 		if (!fnd) {
3863 			SCTP_FREE_SONAME(sin);
3864 			SCTP_INP_RUNLOCK(inp);
3865 			return ENOENT;
3866 		}
3867 	}
3868 	SCTP_INP_RUNLOCK(inp);
3869 	(*addr) = (struct sockaddr *)sin;
3870 	return (0);
3871 }
3872 
3873 int
3874 sctp_peeraddr(struct socket *so, struct sockaddr **addr)
3875 {
3876 	struct sockaddr_in *sin = (struct sockaddr_in *)*addr;
3877 	int fnd;
3878 	struct sockaddr_in *sin_a;
3879 	struct sctp_inpcb *inp;
3880 	struct sctp_tcb *stcb;
3881 	struct sctp_nets *net;
3882 
3883 	/* Do the malloc first in case it blocks. */
3884 	inp = (struct sctp_inpcb *)so->so_pcb;
3885 	if ((inp == NULL) ||
3886 	    ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) {
3887 		/* UDP type and listeners will drop out here */
3888 		return (ENOTCONN);
3889 	}
3890 	SCTP_MALLOC_SONAME(sin, struct sockaddr_in *, sizeof *sin);
3891 	sin->sin_family = AF_INET;
3892 	sin->sin_len = sizeof(*sin);
3893 
3894 	/* We must recapture incase we blocked */
3895 	inp = (struct sctp_inpcb *)so->so_pcb;
3896 	if (!inp) {
3897 		SCTP_FREE_SONAME(sin);
3898 		return ECONNRESET;
3899 	}
3900 	SCTP_INP_RLOCK(inp);
3901 	stcb = LIST_FIRST(&inp->sctp_asoc_list);
3902 	if (stcb)
3903 		SCTP_TCB_LOCK(stcb);
3904 	SCTP_INP_RUNLOCK(inp);
3905 	if (stcb == NULL) {
3906 		SCTP_FREE_SONAME(sin);
3907 		return ECONNRESET;
3908 	}
3909 	fnd = 0;
3910 	TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
3911 		sin_a = (struct sockaddr_in *)&net->ro._l_addr;
3912 		if (sin_a->sin_family == AF_INET) {
3913 			fnd = 1;
3914 			sin->sin_port = stcb->rport;
3915 			sin->sin_addr = sin_a->sin_addr;
3916 			break;
3917 		}
3918 	}
3919 	SCTP_TCB_UNLOCK(stcb);
3920 	if (!fnd) {
3921 		/* No IPv4 address */
3922 		SCTP_FREE_SONAME(sin);
3923 		return ENOENT;
3924 	}
3925 	(*addr) = (struct sockaddr *)sin;
3926 	return (0);
3927 }
3928 
3929 struct pr_usrreqs sctp_usrreqs = {
3930 	.pru_abort = sctp_abort,
3931 	.pru_accept = sctp_accept,
3932 	.pru_attach = sctp_attach,
3933 	.pru_bind = sctp_bind,
3934 	.pru_connect = sctp_connect,
3935 	.pru_control = in_control,
3936 	.pru_close = sctp_close,
3937 	.pru_detach = sctp_close,
3938 	.pru_sopoll = sopoll_generic,
3939 	.pru_disconnect = sctp_disconnect,
3940 	.pru_listen = sctp_listen,
3941 	.pru_peeraddr = sctp_peeraddr,
3942 	.pru_send = sctp_sendm,
3943 	.pru_shutdown = sctp_shutdown,
3944 	.pru_sockaddr = sctp_ingetaddr,
3945 	.pru_sosend = sctp_sosend,
3946 	.pru_soreceive = sctp_soreceive
3947 };
3948