1 /*- 2 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved. 3 * Copyright (c) 2008-2011, by Randall Stewart. All rights reserved. 4 * Copyright (c) 2008-2011, by Michael Tuexen. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions are met: 8 * 9 * a) Redistributions of source code must retain the above copyright notice, 10 * this list of conditions and the following disclaimer. 11 * 12 * b) Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in 14 * the documentation and/or other materials provided with the distribution. 15 * 16 * c) Neither the name of Cisco Systems, Inc. nor the names of its 17 * contributors may be used to endorse or promote products derived 18 * from this software without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 /* $KAME: sctp_usrreq.c,v 1.48 2005/03/07 23:26:08 itojun Exp $ */ 34 35 #include <sys/cdefs.h> 36 __FBSDID("$FreeBSD$"); 37 #include <netinet/sctp_os.h> 38 #include <sys/proc.h> 39 #include <netinet/sctp_pcb.h> 40 #include <netinet/sctp_header.h> 41 #include <netinet/sctp_var.h> 42 #if defined(INET6) 43 #endif 44 #include <netinet/sctp_sysctl.h> 45 #include <netinet/sctp_output.h> 46 #include <netinet/sctp_uio.h> 47 #include <netinet/sctp_asconf.h> 48 #include <netinet/sctputil.h> 49 #include <netinet/sctp_indata.h> 50 #include <netinet/sctp_timer.h> 51 #include <netinet/sctp_auth.h> 52 #include <netinet/sctp_bsd_addr.h> 53 #include <netinet/udp.h> 54 55 56 57 extern struct sctp_cc_functions sctp_cc_functions[]; 58 extern struct sctp_ss_functions sctp_ss_functions[]; 59 60 void 61 sctp_init(void) 62 { 63 u_long sb_max_adj; 64 65 /* Initialize and modify the sysctled variables */ 66 sctp_init_sysctls(); 67 if ((nmbclusters / 8) > SCTP_ASOC_MAX_CHUNKS_ON_QUEUE) 68 SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue) = (nmbclusters / 8); 69 /* 70 * Allow a user to take no more than 1/2 the number of clusters or 71 * the SB_MAX whichever is smaller for the send window. 72 */ 73 sb_max_adj = (u_long)((u_quad_t) (SB_MAX) * MCLBYTES / (MSIZE + MCLBYTES)); 74 SCTP_BASE_SYSCTL(sctp_sendspace) = min(sb_max_adj, 75 (((uint32_t) nmbclusters / 2) * SCTP_DEFAULT_MAXSEGMENT)); 76 /* 77 * Now for the recv window, should we take the same amount? or 78 * should I do 1/2 the SB_MAX instead in the SB_MAX min above. For 79 * now I will just copy. 80 */ 81 SCTP_BASE_SYSCTL(sctp_recvspace) = SCTP_BASE_SYSCTL(sctp_sendspace); 82 83 SCTP_BASE_VAR(first_time) = 0; 84 SCTP_BASE_VAR(sctp_pcb_initialized) = 0; 85 sctp_pcb_init(); 86 #if defined(SCTP_PACKET_LOGGING) 87 SCTP_BASE_VAR(packet_log_writers) = 0; 88 SCTP_BASE_VAR(packet_log_end) = 0; 89 bzero(&SCTP_BASE_VAR(packet_log_buffer), SCTP_PACKET_LOG_SIZE); 90 #endif 91 92 93 } 94 95 void 96 sctp_finish(void) 97 { 98 sctp_pcb_finish(); 99 } 100 101 102 103 void 104 sctp_pathmtu_adjustment(struct sctp_inpcb *inp, 105 struct sctp_tcb *stcb, 106 struct sctp_nets *net, 107 uint16_t nxtsz) 108 { 109 struct sctp_tmit_chunk *chk; 110 uint16_t overhead; 111 112 /* Adjust that too */ 113 stcb->asoc.smallest_mtu = nxtsz; 114 /* now off to subtract IP_DF flag if needed */ 115 overhead = IP_HDR_SIZE; 116 if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks)) { 117 overhead += sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id); 118 } 119 TAILQ_FOREACH(chk, &stcb->asoc.send_queue, sctp_next) { 120 if ((chk->send_size + overhead) > nxtsz) { 121 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; 122 } 123 } 124 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 125 if ((chk->send_size + overhead) > nxtsz) { 126 /* 127 * For this guy we also mark for immediate resend 128 * since we sent to big of chunk 129 */ 130 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; 131 if (chk->sent < SCTP_DATAGRAM_RESEND) { 132 sctp_flight_size_decrease(chk); 133 sctp_total_flight_decrease(stcb, chk); 134 } 135 if (chk->sent != SCTP_DATAGRAM_RESEND) { 136 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 137 } 138 chk->sent = SCTP_DATAGRAM_RESEND; 139 chk->rec.data.doing_fast_retransmit = 0; 140 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 141 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_PMTU, 142 chk->whoTo->flight_size, 143 chk->book_size, 144 (uintptr_t) chk->whoTo, 145 chk->rec.data.TSN_seq); 146 } 147 /* Clear any time so NO RTT is being done */ 148 chk->do_rtt = 0; 149 } 150 } 151 } 152 153 static void 154 sctp_notify_mbuf(struct sctp_inpcb *inp, 155 struct sctp_tcb *stcb, 156 struct sctp_nets *net, 157 struct ip *ip, 158 struct sctphdr *sh) 159 { 160 struct icmp *icmph; 161 int totsz, tmr_stopped = 0; 162 uint16_t nxtsz; 163 164 /* protection */ 165 if ((inp == NULL) || (stcb == NULL) || (net == NULL) || 166 (ip == NULL) || (sh == NULL)) { 167 if (stcb != NULL) { 168 SCTP_TCB_UNLOCK(stcb); 169 } 170 return; 171 } 172 /* First job is to verify the vtag matches what I would send */ 173 if (ntohl(sh->v_tag) != (stcb->asoc.peer_vtag)) { 174 SCTP_TCB_UNLOCK(stcb); 175 return; 176 } 177 icmph = (struct icmp *)((caddr_t)ip - (sizeof(struct icmp) - 178 sizeof(struct ip))); 179 if (icmph->icmp_type != ICMP_UNREACH) { 180 /* We only care about unreachable */ 181 SCTP_TCB_UNLOCK(stcb); 182 return; 183 } 184 if (icmph->icmp_code != ICMP_UNREACH_NEEDFRAG) { 185 /* not a unreachable message due to frag. */ 186 SCTP_TCB_UNLOCK(stcb); 187 return; 188 } 189 totsz = ip->ip_len; 190 191 nxtsz = ntohs(icmph->icmp_nextmtu); 192 if (nxtsz == 0) { 193 /* 194 * old type router that does not tell us what the next size 195 * mtu is. Rats we will have to guess (in a educated fashion 196 * of course) 197 */ 198 nxtsz = sctp_get_prev_mtu(totsz); 199 } 200 /* Stop any PMTU timer */ 201 if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) { 202 tmr_stopped = 1; 203 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net, 204 SCTP_FROM_SCTP_USRREQ + SCTP_LOC_1); 205 } 206 /* Adjust destination size limit */ 207 if (net->mtu > nxtsz) { 208 net->mtu = nxtsz; 209 if (net->port) { 210 net->mtu -= sizeof(struct udphdr); 211 } 212 } 213 /* now what about the ep? */ 214 if (stcb->asoc.smallest_mtu > nxtsz) { 215 sctp_pathmtu_adjustment(inp, stcb, net, nxtsz); 216 } 217 if (tmr_stopped) 218 sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net); 219 220 SCTP_TCB_UNLOCK(stcb); 221 } 222 223 224 void 225 sctp_notify(struct sctp_inpcb *inp, 226 struct ip *ip, 227 struct sctphdr *sh, 228 struct sockaddr *to, 229 struct sctp_tcb *stcb, 230 struct sctp_nets *net) 231 { 232 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 233 struct socket *so; 234 235 #endif 236 /* protection */ 237 int reason; 238 struct icmp *icmph; 239 240 241 if ((inp == NULL) || (stcb == NULL) || (net == NULL) || 242 (sh == NULL) || (to == NULL)) { 243 if (stcb) 244 SCTP_TCB_UNLOCK(stcb); 245 return; 246 } 247 /* First job is to verify the vtag matches what I would send */ 248 if (ntohl(sh->v_tag) != (stcb->asoc.peer_vtag)) { 249 SCTP_TCB_UNLOCK(stcb); 250 return; 251 } 252 icmph = (struct icmp *)((caddr_t)ip - (sizeof(struct icmp) - 253 sizeof(struct ip))); 254 if (icmph->icmp_type != ICMP_UNREACH) { 255 /* We only care about unreachable */ 256 SCTP_TCB_UNLOCK(stcb); 257 return; 258 } 259 if ((icmph->icmp_code == ICMP_UNREACH_NET) || 260 (icmph->icmp_code == ICMP_UNREACH_HOST) || 261 (icmph->icmp_code == ICMP_UNREACH_NET_UNKNOWN) || 262 (icmph->icmp_code == ICMP_UNREACH_HOST_UNKNOWN) || 263 (icmph->icmp_code == ICMP_UNREACH_ISOLATED) || 264 (icmph->icmp_code == ICMP_UNREACH_NET_PROHIB) || 265 (icmph->icmp_code == ICMP_UNREACH_HOST_PROHIB) || 266 (icmph->icmp_code == ICMP_UNREACH_FILTER_PROHIB)) { 267 268 /* 269 * Hmm reachablity problems we must examine closely. If its 270 * not reachable, we may have lost a network. Or if there is 271 * NO protocol at the other end named SCTP. well we consider 272 * it a OOTB abort. 273 */ 274 if (net->dest_state & SCTP_ADDR_REACHABLE) { 275 /* Ok that destination is NOT reachable */ 276 SCTP_PRINTF("ICMP (thresh %d/%d) takes interface %p down\n", 277 net->error_count, 278 net->failure_threshold, 279 net); 280 281 net->dest_state &= ~SCTP_ADDR_REACHABLE; 282 net->dest_state |= SCTP_ADDR_NOT_REACHABLE; 283 /* 284 * JRS 5/14/07 - If a destination is unreachable, 285 * the PF bit is turned off. This allows an 286 * unambiguous use of the PF bit for destinations 287 * that are reachable but potentially failed. If the 288 * destination is set to the unreachable state, also 289 * set the destination to the PF state. 290 */ 291 /* 292 * Add debug message here if destination is not in 293 * PF state. 294 */ 295 /* Stop any running T3 timers here? */ 296 if ((stcb->asoc.sctp_cmt_on_off > 0) && 297 (stcb->asoc.sctp_cmt_pf > 0)) { 298 net->dest_state &= ~SCTP_ADDR_PF; 299 SCTPDBG(SCTP_DEBUG_TIMER4, "Destination %p moved from PF to unreachable.\n", 300 net); 301 } 302 net->error_count = net->failure_threshold + 1; 303 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN, 304 stcb, SCTP_FAILED_THRESHOLD, 305 (void *)net, SCTP_SO_NOT_LOCKED); 306 } 307 SCTP_TCB_UNLOCK(stcb); 308 } else if ((icmph->icmp_code == ICMP_UNREACH_PROTOCOL) || 309 (icmph->icmp_code == ICMP_UNREACH_PORT)) { 310 /* 311 * Here the peer is either playing tricks on us, including 312 * an address that belongs to someone who does not support 313 * SCTP OR was a userland implementation that shutdown and 314 * now is dead. In either case treat it like a OOTB abort 315 * with no TCB 316 */ 317 reason = SCTP_PEER_FAULTY; 318 sctp_abort_notification(stcb, reason, SCTP_SO_NOT_LOCKED); 319 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 320 so = SCTP_INP_SO(inp); 321 atomic_add_int(&stcb->asoc.refcnt, 1); 322 SCTP_TCB_UNLOCK(stcb); 323 SCTP_SOCKET_LOCK(so, 1); 324 SCTP_TCB_LOCK(stcb); 325 atomic_subtract_int(&stcb->asoc.refcnt, 1); 326 #endif 327 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_2); 328 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 329 SCTP_SOCKET_UNLOCK(so, 1); 330 /* SCTP_TCB_UNLOCK(stcb); MT: I think this is not needed. */ 331 #endif 332 /* no need to unlock here, since the TCB is gone */ 333 } else { 334 SCTP_TCB_UNLOCK(stcb); 335 } 336 } 337 338 void 339 sctp_ctlinput(cmd, sa, vip) 340 int cmd; 341 struct sockaddr *sa; 342 void *vip; 343 { 344 struct ip *ip = vip; 345 struct sctphdr *sh; 346 uint32_t vrf_id; 347 348 /* FIX, for non-bsd is this right? */ 349 vrf_id = SCTP_DEFAULT_VRFID; 350 if (sa->sa_family != AF_INET || 351 ((struct sockaddr_in *)sa)->sin_addr.s_addr == INADDR_ANY) { 352 return; 353 } 354 if (PRC_IS_REDIRECT(cmd)) { 355 ip = 0; 356 } else if ((unsigned)cmd >= PRC_NCMDS || inetctlerrmap[cmd] == 0) { 357 return; 358 } 359 if (ip) { 360 struct sctp_inpcb *inp = NULL; 361 struct sctp_tcb *stcb = NULL; 362 struct sctp_nets *net = NULL; 363 struct sockaddr_in to, from; 364 365 sh = (struct sctphdr *)((caddr_t)ip + (ip->ip_hl << 2)); 366 bzero(&to, sizeof(to)); 367 bzero(&from, sizeof(from)); 368 from.sin_family = to.sin_family = AF_INET; 369 from.sin_len = to.sin_len = sizeof(to); 370 from.sin_port = sh->src_port; 371 from.sin_addr = ip->ip_src; 372 to.sin_port = sh->dest_port; 373 to.sin_addr = ip->ip_dst; 374 375 /* 376 * 'to' holds the dest of the packet that failed to be sent. 377 * 'from' holds our local endpoint address. Thus we reverse 378 * the to and the from in the lookup. 379 */ 380 stcb = sctp_findassociation_addr_sa((struct sockaddr *)&from, 381 (struct sockaddr *)&to, 382 &inp, &net, 1, vrf_id); 383 if (stcb != NULL && inp && (inp->sctp_socket != NULL)) { 384 if (cmd != PRC_MSGSIZE) { 385 sctp_notify(inp, ip, sh, 386 (struct sockaddr *)&to, stcb, 387 net); 388 } else { 389 /* handle possible ICMP size messages */ 390 sctp_notify_mbuf(inp, stcb, net, ip, sh); 391 } 392 } else { 393 if ((stcb == NULL) && (inp != NULL)) { 394 /* reduce ref-count */ 395 SCTP_INP_WLOCK(inp); 396 SCTP_INP_DECR_REF(inp); 397 SCTP_INP_WUNLOCK(inp); 398 } 399 if (stcb) { 400 SCTP_TCB_UNLOCK(stcb); 401 } 402 } 403 } 404 return; 405 } 406 407 static int 408 sctp_getcred(SYSCTL_HANDLER_ARGS) 409 { 410 struct xucred xuc; 411 struct sockaddr_in addrs[2]; 412 struct sctp_inpcb *inp; 413 struct sctp_nets *net; 414 struct sctp_tcb *stcb; 415 int error; 416 uint32_t vrf_id; 417 418 /* FIX, for non-bsd is this right? */ 419 vrf_id = SCTP_DEFAULT_VRFID; 420 421 error = priv_check(req->td, PRIV_NETINET_GETCRED); 422 423 if (error) 424 return (error); 425 426 error = SYSCTL_IN(req, addrs, sizeof(addrs)); 427 if (error) 428 return (error); 429 430 stcb = sctp_findassociation_addr_sa(sintosa(&addrs[0]), 431 sintosa(&addrs[1]), 432 &inp, &net, 1, vrf_id); 433 if (stcb == NULL || inp == NULL || inp->sctp_socket == NULL) { 434 if ((inp != NULL) && (stcb == NULL)) { 435 /* reduce ref-count */ 436 SCTP_INP_WLOCK(inp); 437 SCTP_INP_DECR_REF(inp); 438 goto cred_can_cont; 439 } 440 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); 441 error = ENOENT; 442 goto out; 443 } 444 SCTP_TCB_UNLOCK(stcb); 445 /* 446 * We use the write lock here, only since in the error leg we need 447 * it. If we used RLOCK, then we would have to 448 * wlock/decr/unlock/rlock. Which in theory could create a hole. 449 * Better to use higher wlock. 450 */ 451 SCTP_INP_WLOCK(inp); 452 cred_can_cont: 453 error = cr_canseesocket(req->td->td_ucred, inp->sctp_socket); 454 if (error) { 455 SCTP_INP_WUNLOCK(inp); 456 goto out; 457 } 458 cru2x(inp->sctp_socket->so_cred, &xuc); 459 SCTP_INP_WUNLOCK(inp); 460 error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred)); 461 out: 462 return (error); 463 } 464 465 SYSCTL_PROC(_net_inet_sctp, OID_AUTO, getcred, CTLTYPE_OPAQUE | CTLFLAG_RW, 466 0, 0, sctp_getcred, "S,ucred", "Get the ucred of a SCTP connection"); 467 468 469 static void 470 sctp_abort(struct socket *so) 471 { 472 struct sctp_inpcb *inp; 473 uint32_t flags; 474 475 inp = (struct sctp_inpcb *)so->so_pcb; 476 if (inp == 0) { 477 return; 478 } 479 sctp_must_try_again: 480 flags = inp->sctp_flags; 481 #ifdef SCTP_LOG_CLOSING 482 sctp_log_closing(inp, NULL, 17); 483 #endif 484 if (((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) && 485 (atomic_cmpset_int(&inp->sctp_flags, flags, (flags | SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_CLOSE_IP)))) { 486 #ifdef SCTP_LOG_CLOSING 487 sctp_log_closing(inp, NULL, 16); 488 #endif 489 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 490 SCTP_CALLED_AFTER_CMPSET_OFCLOSE); 491 SOCK_LOCK(so); 492 SCTP_SB_CLEAR(so->so_snd); 493 /* 494 * same for the rcv ones, they are only here for the 495 * accounting/select. 496 */ 497 SCTP_SB_CLEAR(so->so_rcv); 498 499 /* Now null out the reference, we are completely detached. */ 500 so->so_pcb = NULL; 501 SOCK_UNLOCK(so); 502 } else { 503 flags = inp->sctp_flags; 504 if ((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) { 505 goto sctp_must_try_again; 506 } 507 } 508 return; 509 } 510 511 static int 512 sctp_attach(struct socket *so, int proto, struct thread *p) 513 { 514 struct sctp_inpcb *inp; 515 struct inpcb *ip_inp; 516 int error; 517 uint32_t vrf_id = SCTP_DEFAULT_VRFID; 518 519 #ifdef IPSEC 520 uint32_t flags; 521 522 #endif 523 524 inp = (struct sctp_inpcb *)so->so_pcb; 525 if (inp != 0) { 526 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 527 return EINVAL; 528 } 529 if (so->so_snd.sb_hiwat == 0 || so->so_rcv.sb_hiwat == 0) { 530 error = SCTP_SORESERVE(so, SCTP_BASE_SYSCTL(sctp_sendspace), SCTP_BASE_SYSCTL(sctp_recvspace)); 531 if (error) { 532 return error; 533 } 534 } 535 error = sctp_inpcb_alloc(so, vrf_id); 536 if (error) { 537 return error; 538 } 539 inp = (struct sctp_inpcb *)so->so_pcb; 540 SCTP_INP_WLOCK(inp); 541 inp->sctp_flags &= ~SCTP_PCB_FLAGS_BOUND_V6; /* I'm not v6! */ 542 ip_inp = &inp->ip_inp.inp; 543 ip_inp->inp_vflag |= INP_IPV4; 544 ip_inp->inp_ip_ttl = MODULE_GLOBAL(ip_defttl); 545 #ifdef IPSEC 546 error = ipsec_init_policy(so, &ip_inp->inp_sp); 547 #ifdef SCTP_LOG_CLOSING 548 sctp_log_closing(inp, NULL, 17); 549 #endif 550 if (error != 0) { 551 try_again: 552 flags = inp->sctp_flags; 553 if (((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) && 554 (atomic_cmpset_int(&inp->sctp_flags, flags, (flags | SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_CLOSE_IP)))) { 555 #ifdef SCTP_LOG_CLOSING 556 sctp_log_closing(inp, NULL, 15); 557 #endif 558 SCTP_INP_WUNLOCK(inp); 559 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 560 SCTP_CALLED_AFTER_CMPSET_OFCLOSE); 561 } else { 562 flags = inp->sctp_flags; 563 if ((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) { 564 goto try_again; 565 } else { 566 SCTP_INP_WUNLOCK(inp); 567 } 568 } 569 return error; 570 } 571 #endif /* IPSEC */ 572 SCTP_INP_WUNLOCK(inp); 573 return 0; 574 } 575 576 static int 577 sctp_bind(struct socket *so, struct sockaddr *addr, struct thread *p) 578 { 579 struct sctp_inpcb *inp = NULL; 580 int error; 581 582 #ifdef INET6 583 if (addr && addr->sa_family != AF_INET) { 584 /* must be a v4 address! */ 585 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 586 return EINVAL; 587 } 588 #endif /* INET6 */ 589 if (addr && (addr->sa_len != sizeof(struct sockaddr_in))) { 590 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 591 return EINVAL; 592 } 593 inp = (struct sctp_inpcb *)so->so_pcb; 594 if (inp == 0) { 595 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 596 return EINVAL; 597 } 598 error = sctp_inpcb_bind(so, addr, NULL, p); 599 return error; 600 } 601 602 void 603 sctp_close(struct socket *so) 604 { 605 struct sctp_inpcb *inp; 606 uint32_t flags; 607 608 inp = (struct sctp_inpcb *)so->so_pcb; 609 if (inp == 0) 610 return; 611 612 /* 613 * Inform all the lower layer assoc that we are done. 614 */ 615 sctp_must_try_again: 616 flags = inp->sctp_flags; 617 #ifdef SCTP_LOG_CLOSING 618 sctp_log_closing(inp, NULL, 17); 619 #endif 620 if (((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) && 621 (atomic_cmpset_int(&inp->sctp_flags, flags, (flags | SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_CLOSE_IP)))) { 622 if (((so->so_options & SO_LINGER) && (so->so_linger == 0)) || 623 (so->so_rcv.sb_cc > 0)) { 624 #ifdef SCTP_LOG_CLOSING 625 sctp_log_closing(inp, NULL, 13); 626 #endif 627 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 628 SCTP_CALLED_AFTER_CMPSET_OFCLOSE); 629 } else { 630 #ifdef SCTP_LOG_CLOSING 631 sctp_log_closing(inp, NULL, 14); 632 #endif 633 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_GRACEFUL_CLOSE, 634 SCTP_CALLED_AFTER_CMPSET_OFCLOSE); 635 } 636 /* 637 * The socket is now detached, no matter what the state of 638 * the SCTP association. 639 */ 640 SOCK_LOCK(so); 641 SCTP_SB_CLEAR(so->so_snd); 642 /* 643 * same for the rcv ones, they are only here for the 644 * accounting/select. 645 */ 646 SCTP_SB_CLEAR(so->so_rcv); 647 648 /* Now null out the reference, we are completely detached. */ 649 so->so_pcb = NULL; 650 SOCK_UNLOCK(so); 651 } else { 652 flags = inp->sctp_flags; 653 if ((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) { 654 goto sctp_must_try_again; 655 } 656 } 657 return; 658 } 659 660 661 int 662 sctp_sendm(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr, 663 struct mbuf *control, struct thread *p); 664 665 666 int 667 sctp_sendm(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr, 668 struct mbuf *control, struct thread *p) 669 { 670 struct sctp_inpcb *inp; 671 int error; 672 673 inp = (struct sctp_inpcb *)so->so_pcb; 674 if (inp == 0) { 675 if (control) { 676 sctp_m_freem(control); 677 control = NULL; 678 } 679 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 680 sctp_m_freem(m); 681 return EINVAL; 682 } 683 /* Got to have an to address if we are NOT a connected socket */ 684 if ((addr == NULL) && 685 ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) || 686 (inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)) 687 ) { 688 goto connected_type; 689 } else if (addr == NULL) { 690 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EDESTADDRREQ); 691 error = EDESTADDRREQ; 692 sctp_m_freem(m); 693 if (control) { 694 sctp_m_freem(control); 695 control = NULL; 696 } 697 return (error); 698 } 699 #ifdef INET6 700 if (addr->sa_family != AF_INET) { 701 /* must be a v4 address! */ 702 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EDESTADDRREQ); 703 sctp_m_freem(m); 704 if (control) { 705 sctp_m_freem(control); 706 control = NULL; 707 } 708 error = EDESTADDRREQ; 709 return EDESTADDRREQ; 710 } 711 #endif /* INET6 */ 712 connected_type: 713 /* now what about control */ 714 if (control) { 715 if (inp->control) { 716 SCTP_PRINTF("huh? control set?\n"); 717 sctp_m_freem(inp->control); 718 inp->control = NULL; 719 } 720 inp->control = control; 721 } 722 /* Place the data */ 723 if (inp->pkt) { 724 SCTP_BUF_NEXT(inp->pkt_last) = m; 725 inp->pkt_last = m; 726 } else { 727 inp->pkt_last = inp->pkt = m; 728 } 729 if ( 730 /* FreeBSD uses a flag passed */ 731 ((flags & PRUS_MORETOCOME) == 0) 732 ) { 733 /* 734 * note with the current version this code will only be used 735 * by OpenBSD-- NetBSD, FreeBSD, and MacOS have methods for 736 * re-defining sosend to use the sctp_sosend. One can 737 * optionally switch back to this code (by changing back the 738 * definitions) but this is not advisable. This code is used 739 * by FreeBSD when sending a file with sendfile() though. 740 */ 741 int ret; 742 743 ret = sctp_output(inp, inp->pkt, addr, inp->control, p, flags); 744 inp->pkt = NULL; 745 inp->control = NULL; 746 return (ret); 747 } else { 748 return (0); 749 } 750 } 751 752 int 753 sctp_disconnect(struct socket *so) 754 { 755 struct sctp_inpcb *inp; 756 757 inp = (struct sctp_inpcb *)so->so_pcb; 758 if (inp == NULL) { 759 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN); 760 return (ENOTCONN); 761 } 762 SCTP_INP_RLOCK(inp); 763 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 764 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 765 if (LIST_EMPTY(&inp->sctp_asoc_list)) { 766 /* No connection */ 767 SCTP_INP_RUNLOCK(inp); 768 return (0); 769 } else { 770 struct sctp_association *asoc; 771 struct sctp_tcb *stcb; 772 773 stcb = LIST_FIRST(&inp->sctp_asoc_list); 774 if (stcb == NULL) { 775 SCTP_INP_RUNLOCK(inp); 776 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 777 return (EINVAL); 778 } 779 SCTP_TCB_LOCK(stcb); 780 asoc = &stcb->asoc; 781 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 782 /* We are about to be freed, out of here */ 783 SCTP_TCB_UNLOCK(stcb); 784 SCTP_INP_RUNLOCK(inp); 785 return (0); 786 } 787 if (((so->so_options & SO_LINGER) && 788 (so->so_linger == 0)) || 789 (so->so_rcv.sb_cc > 0)) { 790 if (SCTP_GET_STATE(asoc) != 791 SCTP_STATE_COOKIE_WAIT) { 792 /* Left with Data unread */ 793 struct mbuf *err; 794 795 err = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_DONTWAIT, 1, MT_DATA); 796 if (err) { 797 /* 798 * Fill in the user 799 * initiated abort 800 */ 801 struct sctp_paramhdr *ph; 802 803 ph = mtod(err, struct sctp_paramhdr *); 804 SCTP_BUF_LEN(err) = sizeof(struct sctp_paramhdr); 805 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT); 806 ph->param_length = htons(SCTP_BUF_LEN(err)); 807 } 808 #if defined(SCTP_PANIC_ON_ABORT) 809 panic("disconnect does an abort"); 810 #endif 811 sctp_send_abort_tcb(stcb, err, SCTP_SO_LOCKED); 812 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 813 } 814 SCTP_INP_RUNLOCK(inp); 815 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) || 816 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 817 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 818 } 819 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_3); 820 /* No unlock tcb assoc is gone */ 821 return (0); 822 } 823 if (TAILQ_EMPTY(&asoc->send_queue) && 824 TAILQ_EMPTY(&asoc->sent_queue) && 825 (asoc->stream_queue_cnt == 0)) { 826 /* there is nothing queued to send, so done */ 827 if (asoc->locked_on_sending) { 828 goto abort_anyway; 829 } 830 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) && 831 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) { 832 /* only send SHUTDOWN 1st time thru */ 833 sctp_stop_timers_for_shutdown(stcb); 834 sctp_send_shutdown(stcb, 835 stcb->asoc.primary_destination); 836 sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_LOCKED); 837 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) || 838 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 839 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 840 } 841 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT); 842 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 843 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, 844 stcb->sctp_ep, stcb, 845 asoc->primary_destination); 846 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 847 stcb->sctp_ep, stcb, 848 asoc->primary_destination); 849 } 850 } else { 851 /* 852 * we still got (or just got) data to send, 853 * so set SHUTDOWN_PENDING 854 */ 855 /* 856 * XXX sockets draft says that SCTP_EOF 857 * should be sent with no data. currently, 858 * we will allow user data to be sent first 859 * and move to SHUTDOWN-PENDING 860 */ 861 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING; 862 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb, 863 asoc->primary_destination); 864 if (asoc->locked_on_sending) { 865 /* Locked to send out the data */ 866 struct sctp_stream_queue_pending *sp; 867 868 sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead); 869 if (sp == NULL) { 870 SCTP_PRINTF("Error, sp is NULL, locked on sending is non-null strm:%d\n", 871 asoc->locked_on_sending->stream_no); 872 } else { 873 if ((sp->length == 0) && (sp->msg_is_complete == 0)) 874 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT; 875 } 876 } 877 if (TAILQ_EMPTY(&asoc->send_queue) && 878 TAILQ_EMPTY(&asoc->sent_queue) && 879 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) { 880 struct mbuf *op_err; 881 882 abort_anyway: 883 op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 884 0, M_DONTWAIT, 1, MT_DATA); 885 if (op_err) { 886 /* 887 * Fill in the user 888 * initiated abort 889 */ 890 struct sctp_paramhdr *ph; 891 uint32_t *ippp; 892 893 SCTP_BUF_LEN(op_err) = 894 (sizeof(struct sctp_paramhdr) + sizeof(uint32_t)); 895 ph = mtod(op_err, 896 struct sctp_paramhdr *); 897 ph->param_type = htons( 898 SCTP_CAUSE_USER_INITIATED_ABT); 899 ph->param_length = htons(SCTP_BUF_LEN(op_err)); 900 ippp = (uint32_t *) (ph + 1); 901 *ippp = htonl(SCTP_FROM_SCTP_USRREQ + SCTP_LOC_4); 902 } 903 #if defined(SCTP_PANIC_ON_ABORT) 904 panic("disconnect does an abort"); 905 #endif 906 907 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_USRREQ + SCTP_LOC_4; 908 sctp_send_abort_tcb(stcb, op_err, SCTP_SO_LOCKED); 909 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 910 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) || 911 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 912 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 913 } 914 SCTP_INP_RUNLOCK(inp); 915 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_5); 916 return (0); 917 } else { 918 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CLOSING, SCTP_SO_LOCKED); 919 } 920 } 921 soisdisconnecting(so); 922 SCTP_TCB_UNLOCK(stcb); 923 SCTP_INP_RUNLOCK(inp); 924 return (0); 925 } 926 /* not reached */ 927 } else { 928 /* UDP model does not support this */ 929 SCTP_INP_RUNLOCK(inp); 930 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP); 931 return EOPNOTSUPP; 932 } 933 } 934 935 int 936 sctp_flush(struct socket *so, int how) 937 { 938 /* 939 * We will just clear out the values and let subsequent close clear 940 * out the data, if any. Note if the user did a shutdown(SHUT_RD) 941 * they will not be able to read the data, the socket will block 942 * that from happening. 943 */ 944 struct sctp_inpcb *inp; 945 946 inp = (struct sctp_inpcb *)so->so_pcb; 947 if (inp == NULL) { 948 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 949 return EINVAL; 950 } 951 SCTP_INP_RLOCK(inp); 952 /* For the 1 to many model this does nothing */ 953 if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) { 954 SCTP_INP_RUNLOCK(inp); 955 return (0); 956 } 957 SCTP_INP_RUNLOCK(inp); 958 if ((how == PRU_FLUSH_RD) || (how == PRU_FLUSH_RDWR)) { 959 /* 960 * First make sure the sb will be happy, we don't use these 961 * except maybe the count 962 */ 963 SCTP_INP_WLOCK(inp); 964 SCTP_INP_READ_LOCK(inp); 965 inp->sctp_flags |= SCTP_PCB_FLAGS_SOCKET_CANT_READ; 966 SCTP_INP_READ_UNLOCK(inp); 967 SCTP_INP_WUNLOCK(inp); 968 so->so_rcv.sb_cc = 0; 969 so->so_rcv.sb_mbcnt = 0; 970 so->so_rcv.sb_mb = NULL; 971 } 972 if ((how == PRU_FLUSH_WR) || (how == PRU_FLUSH_RDWR)) { 973 /* 974 * First make sure the sb will be happy, we don't use these 975 * except maybe the count 976 */ 977 so->so_snd.sb_cc = 0; 978 so->so_snd.sb_mbcnt = 0; 979 so->so_snd.sb_mb = NULL; 980 981 } 982 return (0); 983 } 984 985 int 986 sctp_shutdown(struct socket *so) 987 { 988 struct sctp_inpcb *inp; 989 990 inp = (struct sctp_inpcb *)so->so_pcb; 991 if (inp == 0) { 992 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 993 return EINVAL; 994 } 995 SCTP_INP_RLOCK(inp); 996 /* For UDP model this is a invalid call */ 997 if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) { 998 /* Restore the flags that the soshutdown took away. */ 999 SOCKBUF_LOCK(&so->so_rcv); 1000 so->so_rcv.sb_state &= ~SBS_CANTRCVMORE; 1001 SOCKBUF_UNLOCK(&so->so_rcv); 1002 /* This proc will wakeup for read and do nothing (I hope) */ 1003 SCTP_INP_RUNLOCK(inp); 1004 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP); 1005 return (EOPNOTSUPP); 1006 } 1007 /* 1008 * Ok if we reach here its the TCP model and it is either a SHUT_WR 1009 * or SHUT_RDWR. This means we put the shutdown flag against it. 1010 */ 1011 { 1012 struct sctp_tcb *stcb; 1013 struct sctp_association *asoc; 1014 1015 if ((so->so_state & 1016 (SS_ISCONNECTED | SS_ISCONNECTING | SS_ISDISCONNECTING)) == 0) { 1017 SCTP_INP_RUNLOCK(inp); 1018 return (ENOTCONN); 1019 } 1020 socantsendmore(so); 1021 1022 stcb = LIST_FIRST(&inp->sctp_asoc_list); 1023 if (stcb == NULL) { 1024 /* 1025 * Ok we hit the case that the shutdown call was 1026 * made after an abort or something. Nothing to do 1027 * now. 1028 */ 1029 SCTP_INP_RUNLOCK(inp); 1030 return (0); 1031 } 1032 SCTP_TCB_LOCK(stcb); 1033 asoc = &stcb->asoc; 1034 if (TAILQ_EMPTY(&asoc->send_queue) && 1035 TAILQ_EMPTY(&asoc->sent_queue) && 1036 (asoc->stream_queue_cnt == 0)) { 1037 if (asoc->locked_on_sending) { 1038 goto abort_anyway; 1039 } 1040 /* there is nothing queued to send, so I'm done... */ 1041 if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) { 1042 /* only send SHUTDOWN the first time through */ 1043 sctp_stop_timers_for_shutdown(stcb); 1044 sctp_send_shutdown(stcb, 1045 stcb->asoc.primary_destination); 1046 sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_LOCKED); 1047 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) || 1048 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 1049 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 1050 } 1051 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT); 1052 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 1053 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, 1054 stcb->sctp_ep, stcb, 1055 asoc->primary_destination); 1056 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 1057 stcb->sctp_ep, stcb, 1058 asoc->primary_destination); 1059 } 1060 } else { 1061 /* 1062 * we still got (or just got) data to send, so set 1063 * SHUTDOWN_PENDING 1064 */ 1065 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING; 1066 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb, 1067 asoc->primary_destination); 1068 1069 if (asoc->locked_on_sending) { 1070 /* Locked to send out the data */ 1071 struct sctp_stream_queue_pending *sp; 1072 1073 sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead); 1074 if (sp == NULL) { 1075 SCTP_PRINTF("Error, sp is NULL, locked on sending is non-null strm:%d\n", 1076 asoc->locked_on_sending->stream_no); 1077 } else { 1078 if ((sp->length == 0) && (sp->msg_is_complete == 0)) { 1079 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT; 1080 } 1081 } 1082 } 1083 if (TAILQ_EMPTY(&asoc->send_queue) && 1084 TAILQ_EMPTY(&asoc->sent_queue) && 1085 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) { 1086 struct mbuf *op_err; 1087 1088 abort_anyway: 1089 op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 1090 0, M_DONTWAIT, 1, MT_DATA); 1091 if (op_err) { 1092 /* Fill in the user initiated abort */ 1093 struct sctp_paramhdr *ph; 1094 uint32_t *ippp; 1095 1096 SCTP_BUF_LEN(op_err) = 1097 sizeof(struct sctp_paramhdr) + sizeof(uint32_t); 1098 ph = mtod(op_err, 1099 struct sctp_paramhdr *); 1100 ph->param_type = htons( 1101 SCTP_CAUSE_USER_INITIATED_ABT); 1102 ph->param_length = htons(SCTP_BUF_LEN(op_err)); 1103 ippp = (uint32_t *) (ph + 1); 1104 *ippp = htonl(SCTP_FROM_SCTP_USRREQ + SCTP_LOC_6); 1105 } 1106 #if defined(SCTP_PANIC_ON_ABORT) 1107 panic("shutdown does an abort"); 1108 #endif 1109 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_USRREQ + SCTP_LOC_6; 1110 sctp_abort_an_association(stcb->sctp_ep, stcb, 1111 SCTP_RESPONSE_TO_USER_REQ, 1112 op_err, SCTP_SO_LOCKED); 1113 goto skip_unlock; 1114 } else { 1115 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CLOSING, SCTP_SO_LOCKED); 1116 } 1117 } 1118 SCTP_TCB_UNLOCK(stcb); 1119 } 1120 skip_unlock: 1121 SCTP_INP_RUNLOCK(inp); 1122 return 0; 1123 } 1124 1125 /* 1126 * copies a "user" presentable address and removes embedded scope, etc. 1127 * returns 0 on success, 1 on error 1128 */ 1129 static uint32_t 1130 sctp_fill_user_address(struct sockaddr_storage *ss, struct sockaddr *sa) 1131 { 1132 #ifdef INET6 1133 struct sockaddr_in6 lsa6; 1134 1135 sa = (struct sockaddr *)sctp_recover_scope((struct sockaddr_in6 *)sa, 1136 &lsa6); 1137 #endif 1138 memcpy(ss, sa, sa->sa_len); 1139 return (0); 1140 } 1141 1142 1143 1144 /* 1145 * NOTE: assumes addr lock is held 1146 */ 1147 static size_t 1148 sctp_fill_up_addresses_vrf(struct sctp_inpcb *inp, 1149 struct sctp_tcb *stcb, 1150 size_t limit, 1151 struct sockaddr_storage *sas, 1152 uint32_t vrf_id) 1153 { 1154 struct sctp_ifn *sctp_ifn; 1155 struct sctp_ifa *sctp_ifa; 1156 int loopback_scope, ipv4_local_scope, local_scope, site_scope; 1157 size_t actual; 1158 int ipv4_addr_legal, ipv6_addr_legal; 1159 struct sctp_vrf *vrf; 1160 1161 actual = 0; 1162 if (limit <= 0) 1163 return (actual); 1164 1165 if (stcb) { 1166 /* Turn on all the appropriate scope */ 1167 loopback_scope = stcb->asoc.loopback_scope; 1168 ipv4_local_scope = stcb->asoc.ipv4_local_scope; 1169 local_scope = stcb->asoc.local_scope; 1170 site_scope = stcb->asoc.site_scope; 1171 } else { 1172 /* Turn on ALL scope, since we look at the EP */ 1173 loopback_scope = ipv4_local_scope = local_scope = 1174 site_scope = 1; 1175 } 1176 ipv4_addr_legal = ipv6_addr_legal = 0; 1177 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 1178 ipv6_addr_legal = 1; 1179 if (SCTP_IPV6_V6ONLY(inp) == 0) { 1180 ipv4_addr_legal = 1; 1181 } 1182 } else { 1183 ipv4_addr_legal = 1; 1184 } 1185 vrf = sctp_find_vrf(vrf_id); 1186 if (vrf == NULL) { 1187 return (0); 1188 } 1189 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 1190 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) { 1191 if ((loopback_scope == 0) && 1192 SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) { 1193 /* Skip loopback if loopback_scope not set */ 1194 continue; 1195 } 1196 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { 1197 if (stcb) { 1198 /* 1199 * For the BOUND-ALL case, the list 1200 * associated with a TCB is Always 1201 * considered a reverse list.. i.e. 1202 * it lists addresses that are NOT 1203 * part of the association. If this 1204 * is one of those we must skip it. 1205 */ 1206 if (sctp_is_addr_restricted(stcb, 1207 sctp_ifa)) { 1208 continue; 1209 } 1210 } 1211 switch (sctp_ifa->address.sa.sa_family) { 1212 case AF_INET: 1213 if (ipv4_addr_legal) { 1214 struct sockaddr_in *sin; 1215 1216 sin = (struct sockaddr_in *)&sctp_ifa->address.sa; 1217 if (sin->sin_addr.s_addr == 0) { 1218 /* 1219 * we skip 1220 * unspecifed 1221 * addresses 1222 */ 1223 continue; 1224 } 1225 if ((ipv4_local_scope == 0) && 1226 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) { 1227 continue; 1228 } 1229 #ifdef INET6 1230 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) { 1231 in6_sin_2_v4mapsin6(sin, (struct sockaddr_in6 *)sas); 1232 ((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport; 1233 sas = (struct sockaddr_storage *)((caddr_t)sas + sizeof(struct sockaddr_in6)); 1234 actual += sizeof(struct sockaddr_in6); 1235 } else { 1236 #endif 1237 memcpy(sas, sin, sizeof(*sin)); 1238 ((struct sockaddr_in *)sas)->sin_port = inp->sctp_lport; 1239 sas = (struct sockaddr_storage *)((caddr_t)sas + sizeof(*sin)); 1240 actual += sizeof(*sin); 1241 #ifdef INET6 1242 } 1243 #endif 1244 if (actual >= limit) { 1245 return (actual); 1246 } 1247 } else { 1248 continue; 1249 } 1250 break; 1251 #ifdef INET6 1252 case AF_INET6: 1253 if (ipv6_addr_legal) { 1254 struct sockaddr_in6 *sin6; 1255 1256 sin6 = (struct sockaddr_in6 *)&sctp_ifa->address.sa; 1257 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { 1258 /* 1259 * we skip 1260 * unspecifed 1261 * addresses 1262 */ 1263 continue; 1264 } 1265 if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) { 1266 if (local_scope == 0) 1267 continue; 1268 if (sin6->sin6_scope_id == 0) { 1269 if (sa6_recoverscope(sin6) != 0) 1270 /* 1271 * 1272 * bad 1273 * 1274 * li 1275 * nk 1276 * 1277 * loc 1278 * al 1279 * 1280 * add 1281 * re 1282 * ss 1283 * */ 1284 continue; 1285 } 1286 } 1287 if ((site_scope == 0) && 1288 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) { 1289 continue; 1290 } 1291 memcpy(sas, sin6, sizeof(*sin6)); 1292 ((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport; 1293 sas = (struct sockaddr_storage *)((caddr_t)sas + sizeof(*sin6)); 1294 actual += sizeof(*sin6); 1295 if (actual >= limit) { 1296 return (actual); 1297 } 1298 } else { 1299 continue; 1300 } 1301 break; 1302 #endif 1303 default: 1304 /* TSNH */ 1305 break; 1306 } 1307 } 1308 } 1309 } else { 1310 struct sctp_laddr *laddr; 1311 1312 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 1313 if (stcb) { 1314 if (sctp_is_addr_restricted(stcb, laddr->ifa)) { 1315 continue; 1316 } 1317 } 1318 if (sctp_fill_user_address(sas, &laddr->ifa->address.sa)) 1319 continue; 1320 1321 ((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport; 1322 sas = (struct sockaddr_storage *)((caddr_t)sas + 1323 laddr->ifa->address.sa.sa_len); 1324 actual += laddr->ifa->address.sa.sa_len; 1325 if (actual >= limit) { 1326 return (actual); 1327 } 1328 } 1329 } 1330 return (actual); 1331 } 1332 1333 static size_t 1334 sctp_fill_up_addresses(struct sctp_inpcb *inp, 1335 struct sctp_tcb *stcb, 1336 size_t limit, 1337 struct sockaddr_storage *sas) 1338 { 1339 size_t size = 0; 1340 1341 SCTP_IPI_ADDR_RLOCK(); 1342 /* fill up addresses for the endpoint's default vrf */ 1343 size = sctp_fill_up_addresses_vrf(inp, stcb, limit, sas, 1344 inp->def_vrf_id); 1345 SCTP_IPI_ADDR_RUNLOCK(); 1346 return (size); 1347 } 1348 1349 /* 1350 * NOTE: assumes addr lock is held 1351 */ 1352 static int 1353 sctp_count_max_addresses_vrf(struct sctp_inpcb *inp, uint32_t vrf_id) 1354 { 1355 int cnt = 0; 1356 struct sctp_vrf *vrf = NULL; 1357 1358 /* 1359 * In both sub-set bound an bound_all cases we return the MAXIMUM 1360 * number of addresses that you COULD get. In reality the sub-set 1361 * bound may have an exclusion list for a given TCB OR in the 1362 * bound-all case a TCB may NOT include the loopback or other 1363 * addresses as well. 1364 */ 1365 vrf = sctp_find_vrf(vrf_id); 1366 if (vrf == NULL) { 1367 return (0); 1368 } 1369 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 1370 struct sctp_ifn *sctp_ifn; 1371 struct sctp_ifa *sctp_ifa; 1372 1373 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) { 1374 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { 1375 /* Count them if they are the right type */ 1376 if (sctp_ifa->address.sa.sa_family == AF_INET) { 1377 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) 1378 cnt += sizeof(struct sockaddr_in6); 1379 else 1380 cnt += sizeof(struct sockaddr_in); 1381 1382 } else if (sctp_ifa->address.sa.sa_family == AF_INET6) 1383 cnt += sizeof(struct sockaddr_in6); 1384 } 1385 } 1386 } else { 1387 struct sctp_laddr *laddr; 1388 1389 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 1390 if (laddr->ifa->address.sa.sa_family == AF_INET) { 1391 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) 1392 cnt += sizeof(struct sockaddr_in6); 1393 else 1394 cnt += sizeof(struct sockaddr_in); 1395 1396 } else if (laddr->ifa->address.sa.sa_family == AF_INET6) 1397 cnt += sizeof(struct sockaddr_in6); 1398 } 1399 } 1400 return (cnt); 1401 } 1402 1403 static int 1404 sctp_count_max_addresses(struct sctp_inpcb *inp) 1405 { 1406 int cnt = 0; 1407 1408 SCTP_IPI_ADDR_RLOCK(); 1409 /* count addresses for the endpoint's default VRF */ 1410 cnt = sctp_count_max_addresses_vrf(inp, inp->def_vrf_id); 1411 SCTP_IPI_ADDR_RUNLOCK(); 1412 return (cnt); 1413 } 1414 1415 static int 1416 sctp_do_connect_x(struct socket *so, struct sctp_inpcb *inp, void *optval, 1417 size_t optsize, void *p, int delay) 1418 { 1419 int error = 0; 1420 int creat_lock_on = 0; 1421 struct sctp_tcb *stcb = NULL; 1422 struct sockaddr *sa; 1423 int num_v6 = 0, num_v4 = 0, *totaddrp, totaddr; 1424 int added = 0; 1425 uint32_t vrf_id; 1426 int bad_addresses = 0; 1427 sctp_assoc_t *a_id; 1428 1429 SCTPDBG(SCTP_DEBUG_PCB1, "Connectx called\n"); 1430 1431 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 1432 (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) { 1433 /* We are already connected AND the TCP model */ 1434 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EADDRINUSE); 1435 return (EADDRINUSE); 1436 } 1437 if ((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) && 1438 (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_PORTREUSE))) { 1439 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 1440 return (EINVAL); 1441 } 1442 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { 1443 SCTP_INP_RLOCK(inp); 1444 stcb = LIST_FIRST(&inp->sctp_asoc_list); 1445 SCTP_INP_RUNLOCK(inp); 1446 } 1447 if (stcb) { 1448 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY); 1449 return (EALREADY); 1450 } 1451 SCTP_INP_INCR_REF(inp); 1452 SCTP_ASOC_CREATE_LOCK(inp); 1453 creat_lock_on = 1; 1454 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 1455 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) { 1456 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EFAULT); 1457 error = EFAULT; 1458 goto out_now; 1459 } 1460 totaddrp = (int *)optval; 1461 totaddr = *totaddrp; 1462 sa = (struct sockaddr *)(totaddrp + 1); 1463 stcb = sctp_connectx_helper_find(inp, sa, &totaddr, &num_v4, &num_v6, &error, (optsize - sizeof(int)), &bad_addresses); 1464 if ((stcb != NULL) || bad_addresses) { 1465 /* Already have or am bring up an association */ 1466 SCTP_ASOC_CREATE_UNLOCK(inp); 1467 creat_lock_on = 0; 1468 if (stcb) 1469 SCTP_TCB_UNLOCK(stcb); 1470 if (bad_addresses == 0) { 1471 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY); 1472 error = EALREADY; 1473 } 1474 goto out_now; 1475 } 1476 #ifdef INET6 1477 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) && 1478 (num_v6 > 0)) { 1479 error = EINVAL; 1480 goto out_now; 1481 } 1482 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 1483 (num_v4 > 0)) { 1484 struct in6pcb *inp6; 1485 1486 inp6 = (struct in6pcb *)inp; 1487 if (SCTP_IPV6_V6ONLY(inp6)) { 1488 /* 1489 * if IPV6_V6ONLY flag, ignore connections destined 1490 * to a v4 addr or v4-mapped addr 1491 */ 1492 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 1493 error = EINVAL; 1494 goto out_now; 1495 } 1496 } 1497 #endif /* INET6 */ 1498 if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) == 1499 SCTP_PCB_FLAGS_UNBOUND) { 1500 /* Bind a ephemeral port */ 1501 error = sctp_inpcb_bind(so, NULL, NULL, p); 1502 if (error) { 1503 goto out_now; 1504 } 1505 } 1506 /* FIX ME: do we want to pass in a vrf on the connect call? */ 1507 vrf_id = inp->def_vrf_id; 1508 1509 1510 /* We are GOOD to go */ 1511 stcb = sctp_aloc_assoc(inp, sa, &error, 0, vrf_id, 1512 (struct thread *)p 1513 ); 1514 if (stcb == NULL) { 1515 /* Gak! no memory */ 1516 goto out_now; 1517 } 1518 SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_COOKIE_WAIT); 1519 /* move to second address */ 1520 if (sa->sa_family == AF_INET) 1521 sa = (struct sockaddr *)((caddr_t)sa + sizeof(struct sockaddr_in)); 1522 else 1523 sa = (struct sockaddr *)((caddr_t)sa + sizeof(struct sockaddr_in6)); 1524 1525 error = 0; 1526 added = sctp_connectx_helper_add(stcb, sa, (totaddr - 1), &error); 1527 /* Fill in the return id */ 1528 if (error) { 1529 (void)sctp_free_assoc(inp, stcb, SCTP_PCBFREE_FORCE, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_6); 1530 goto out_now; 1531 } 1532 a_id = (sctp_assoc_t *) optval; 1533 *a_id = sctp_get_associd(stcb); 1534 1535 /* initialize authentication parameters for the assoc */ 1536 sctp_initialize_auth_params(inp, stcb); 1537 1538 if (delay) { 1539 /* doing delayed connection */ 1540 stcb->asoc.delayed_connection = 1; 1541 sctp_timer_start(SCTP_TIMER_TYPE_INIT, inp, stcb, stcb->asoc.primary_destination); 1542 } else { 1543 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered); 1544 sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED); 1545 } 1546 SCTP_TCB_UNLOCK(stcb); 1547 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) { 1548 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED; 1549 /* Set the connected flag so we can queue data */ 1550 soisconnecting(so); 1551 } 1552 out_now: 1553 if (creat_lock_on) { 1554 SCTP_ASOC_CREATE_UNLOCK(inp); 1555 } 1556 SCTP_INP_DECR_REF(inp); 1557 return error; 1558 } 1559 1560 #define SCTP_FIND_STCB(inp, stcb, assoc_id) { \ 1561 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||\ 1562 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { \ 1563 SCTP_INP_RLOCK(inp); \ 1564 stcb = LIST_FIRST(&inp->sctp_asoc_list); \ 1565 if (stcb) { \ 1566 SCTP_TCB_LOCK(stcb); \ 1567 } \ 1568 SCTP_INP_RUNLOCK(inp); \ 1569 } else if (assoc_id != 0) { \ 1570 stcb = sctp_findassociation_ep_asocid(inp, assoc_id, 1); \ 1571 if (stcb == NULL) { \ 1572 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); \ 1573 error = ENOENT; \ 1574 break; \ 1575 } \ 1576 } else { \ 1577 stcb = NULL; \ 1578 } \ 1579 } 1580 1581 1582 #define SCTP_CHECK_AND_CAST(destp, srcp, type, size) {\ 1583 if (size < sizeof(type)) { \ 1584 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); \ 1585 error = EINVAL; \ 1586 break; \ 1587 } else { \ 1588 destp = (type *)srcp; \ 1589 } \ 1590 } 1591 1592 static int 1593 sctp_getopt(struct socket *so, int optname, void *optval, size_t *optsize, 1594 void *p) 1595 { 1596 struct sctp_inpcb *inp = NULL; 1597 int error, val = 0; 1598 struct sctp_tcb *stcb = NULL; 1599 1600 if (optval == NULL) { 1601 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 1602 return (EINVAL); 1603 } 1604 inp = (struct sctp_inpcb *)so->so_pcb; 1605 if (inp == 0) { 1606 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 1607 return EINVAL; 1608 } 1609 error = 0; 1610 1611 switch (optname) { 1612 case SCTP_NODELAY: 1613 case SCTP_AUTOCLOSE: 1614 case SCTP_EXPLICIT_EOR: 1615 case SCTP_AUTO_ASCONF: 1616 case SCTP_DISABLE_FRAGMENTS: 1617 case SCTP_I_WANT_MAPPED_V4_ADDR: 1618 case SCTP_USE_EXT_RCVINFO: 1619 SCTP_INP_RLOCK(inp); 1620 switch (optname) { 1621 case SCTP_DISABLE_FRAGMENTS: 1622 val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NO_FRAGMENT); 1623 break; 1624 case SCTP_I_WANT_MAPPED_V4_ADDR: 1625 val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4); 1626 break; 1627 case SCTP_AUTO_ASCONF: 1628 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 1629 /* only valid for bound all sockets */ 1630 val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTO_ASCONF); 1631 } else { 1632 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 1633 error = EINVAL; 1634 goto flags_out; 1635 } 1636 break; 1637 case SCTP_EXPLICIT_EOR: 1638 val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR); 1639 break; 1640 case SCTP_NODELAY: 1641 val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NODELAY); 1642 break; 1643 case SCTP_USE_EXT_RCVINFO: 1644 val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO); 1645 break; 1646 case SCTP_AUTOCLOSE: 1647 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) 1648 val = TICKS_TO_SEC(inp->sctp_ep.auto_close_time); 1649 else 1650 val = 0; 1651 break; 1652 1653 default: 1654 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT); 1655 error = ENOPROTOOPT; 1656 } /* end switch (sopt->sopt_name) */ 1657 if (optname != SCTP_AUTOCLOSE) { 1658 /* make it an "on/off" value */ 1659 val = (val != 0); 1660 } 1661 if (*optsize < sizeof(val)) { 1662 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 1663 error = EINVAL; 1664 } 1665 flags_out: 1666 SCTP_INP_RUNLOCK(inp); 1667 if (error == 0) { 1668 /* return the option value */ 1669 *(int *)optval = val; 1670 *optsize = sizeof(val); 1671 } 1672 break; 1673 case SCTP_GET_PACKET_LOG: 1674 { 1675 #ifdef SCTP_PACKET_LOGGING 1676 uint8_t *target; 1677 int ret; 1678 1679 SCTP_CHECK_AND_CAST(target, optval, uint8_t, *optsize); 1680 ret = sctp_copy_out_packet_log(target, (int)*optsize); 1681 *optsize = ret; 1682 #else 1683 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP); 1684 error = EOPNOTSUPP; 1685 #endif 1686 break; 1687 } 1688 case SCTP_REUSE_PORT: 1689 { 1690 uint32_t *value; 1691 1692 if ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE)) { 1693 /* Can't do this for a 1-m socket */ 1694 error = EINVAL; 1695 break; 1696 } 1697 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize); 1698 *value = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PORTREUSE); 1699 *optsize = sizeof(uint32_t); 1700 } 1701 break; 1702 case SCTP_PARTIAL_DELIVERY_POINT: 1703 { 1704 uint32_t *value; 1705 1706 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize); 1707 *value = inp->partial_delivery_point; 1708 *optsize = sizeof(uint32_t); 1709 } 1710 break; 1711 case SCTP_FRAGMENT_INTERLEAVE: 1712 { 1713 uint32_t *value; 1714 1715 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize); 1716 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) { 1717 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) { 1718 *value = SCTP_FRAG_LEVEL_2; 1719 } else { 1720 *value = SCTP_FRAG_LEVEL_1; 1721 } 1722 } else { 1723 *value = SCTP_FRAG_LEVEL_0; 1724 } 1725 *optsize = sizeof(uint32_t); 1726 } 1727 break; 1728 case SCTP_CMT_ON_OFF: 1729 { 1730 struct sctp_assoc_value *av; 1731 1732 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize); 1733 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 1734 if (stcb) { 1735 av->assoc_value = stcb->asoc.sctp_cmt_on_off; 1736 SCTP_TCB_UNLOCK(stcb); 1737 } else { 1738 SCTP_INP_RLOCK(inp); 1739 av->assoc_value = inp->sctp_cmt_on_off; 1740 SCTP_INP_RUNLOCK(inp); 1741 } 1742 *optsize = sizeof(*av); 1743 } 1744 break; 1745 /* JRS - Get socket option for pluggable congestion control */ 1746 case SCTP_PLUGGABLE_CC: 1747 { 1748 struct sctp_assoc_value *av; 1749 1750 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize); 1751 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 1752 if (stcb) { 1753 av->assoc_value = stcb->asoc.congestion_control_module; 1754 SCTP_TCB_UNLOCK(stcb); 1755 } else { 1756 av->assoc_value = inp->sctp_ep.sctp_default_cc_module; 1757 } 1758 *optsize = sizeof(*av); 1759 } 1760 break; 1761 /* RS - Get socket option for pluggable stream scheduling */ 1762 case SCTP_PLUGGABLE_SS: 1763 { 1764 struct sctp_assoc_value *av; 1765 1766 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize); 1767 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 1768 if (stcb) { 1769 av->assoc_value = stcb->asoc.stream_scheduling_module; 1770 SCTP_TCB_UNLOCK(stcb); 1771 } else { 1772 av->assoc_value = inp->sctp_ep.sctp_default_ss_module; 1773 } 1774 *optsize = sizeof(*av); 1775 } 1776 break; 1777 case SCTP_SS_VALUE: 1778 { 1779 struct sctp_stream_value *av; 1780 1781 SCTP_CHECK_AND_CAST(av, optval, struct sctp_stream_value, *optsize); 1782 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 1783 if (stcb) { 1784 if (stcb->asoc.ss_functions.sctp_ss_get_value(stcb, &stcb->asoc, &stcb->asoc.strmout[av->stream_id], 1785 &av->stream_value) < 0) { 1786 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 1787 error = EINVAL; 1788 } else { 1789 *optsize = sizeof(*av); 1790 } 1791 SCTP_TCB_UNLOCK(stcb); 1792 } else { 1793 /* 1794 * Can't get stream value without 1795 * association 1796 */ 1797 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 1798 error = EINVAL; 1799 } 1800 } 1801 break; 1802 case SCTP_GET_ADDR_LEN: 1803 { 1804 struct sctp_assoc_value *av; 1805 1806 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize); 1807 error = EINVAL; 1808 #ifdef INET 1809 if (av->assoc_value == AF_INET) { 1810 av->assoc_value = sizeof(struct sockaddr_in); 1811 error = 0; 1812 } 1813 #endif 1814 #ifdef INET6 1815 if (av->assoc_value == AF_INET6) { 1816 av->assoc_value = sizeof(struct sockaddr_in6); 1817 error = 0; 1818 } 1819 #endif 1820 if (error) { 1821 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 1822 } 1823 *optsize = sizeof(*av); 1824 } 1825 break; 1826 case SCTP_GET_ASSOC_NUMBER: 1827 { 1828 uint32_t *value, cnt; 1829 1830 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize); 1831 cnt = 0; 1832 SCTP_INP_RLOCK(inp); 1833 LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) { 1834 cnt++; 1835 } 1836 SCTP_INP_RUNLOCK(inp); 1837 *value = cnt; 1838 *optsize = sizeof(uint32_t); 1839 } 1840 break; 1841 1842 case SCTP_GET_ASSOC_ID_LIST: 1843 { 1844 struct sctp_assoc_ids *ids; 1845 unsigned int at, limit; 1846 1847 SCTP_CHECK_AND_CAST(ids, optval, struct sctp_assoc_ids, *optsize); 1848 at = 0; 1849 limit = (*optsize - sizeof(uint32_t)) / sizeof(sctp_assoc_t); 1850 SCTP_INP_RLOCK(inp); 1851 LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) { 1852 if (at < limit) { 1853 ids->gaids_assoc_id[at++] = sctp_get_associd(stcb); 1854 } else { 1855 error = EINVAL; 1856 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 1857 break; 1858 } 1859 } 1860 SCTP_INP_RUNLOCK(inp); 1861 ids->gaids_number_of_ids = at; 1862 *optsize = ((at * sizeof(sctp_assoc_t)) + sizeof(uint32_t)); 1863 } 1864 break; 1865 case SCTP_CONTEXT: 1866 { 1867 struct sctp_assoc_value *av; 1868 1869 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize); 1870 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 1871 1872 if (stcb) { 1873 av->assoc_value = stcb->asoc.context; 1874 SCTP_TCB_UNLOCK(stcb); 1875 } else { 1876 SCTP_INP_RLOCK(inp); 1877 av->assoc_value = inp->sctp_context; 1878 SCTP_INP_RUNLOCK(inp); 1879 } 1880 *optsize = sizeof(*av); 1881 } 1882 break; 1883 case SCTP_VRF_ID: 1884 { 1885 uint32_t *default_vrfid; 1886 1887 SCTP_CHECK_AND_CAST(default_vrfid, optval, uint32_t, *optsize); 1888 *default_vrfid = inp->def_vrf_id; 1889 break; 1890 } 1891 case SCTP_GET_ASOC_VRF: 1892 { 1893 struct sctp_assoc_value *id; 1894 1895 SCTP_CHECK_AND_CAST(id, optval, struct sctp_assoc_value, *optsize); 1896 SCTP_FIND_STCB(inp, stcb, id->assoc_id); 1897 if (stcb == NULL) { 1898 error = EINVAL; 1899 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 1900 break; 1901 } 1902 id->assoc_value = stcb->asoc.vrf_id; 1903 break; 1904 } 1905 case SCTP_GET_VRF_IDS: 1906 { 1907 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP); 1908 error = EOPNOTSUPP; 1909 break; 1910 } 1911 case SCTP_GET_NONCE_VALUES: 1912 { 1913 struct sctp_get_nonce_values *gnv; 1914 1915 SCTP_CHECK_AND_CAST(gnv, optval, struct sctp_get_nonce_values, *optsize); 1916 SCTP_FIND_STCB(inp, stcb, gnv->gn_assoc_id); 1917 1918 if (stcb) { 1919 gnv->gn_peers_tag = stcb->asoc.peer_vtag; 1920 gnv->gn_local_tag = stcb->asoc.my_vtag; 1921 SCTP_TCB_UNLOCK(stcb); 1922 } else { 1923 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN); 1924 error = ENOTCONN; 1925 } 1926 *optsize = sizeof(*gnv); 1927 } 1928 break; 1929 case SCTP_DELAYED_SACK: 1930 { 1931 struct sctp_sack_info *sack; 1932 1933 SCTP_CHECK_AND_CAST(sack, optval, struct sctp_sack_info, *optsize); 1934 SCTP_FIND_STCB(inp, stcb, sack->sack_assoc_id); 1935 if (stcb) { 1936 sack->sack_delay = stcb->asoc.delayed_ack; 1937 sack->sack_freq = stcb->asoc.sack_freq; 1938 SCTP_TCB_UNLOCK(stcb); 1939 } else { 1940 SCTP_INP_RLOCK(inp); 1941 sack->sack_delay = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]); 1942 sack->sack_freq = inp->sctp_ep.sctp_sack_freq; 1943 SCTP_INP_RUNLOCK(inp); 1944 } 1945 *optsize = sizeof(*sack); 1946 } 1947 break; 1948 1949 case SCTP_GET_SNDBUF_USE: 1950 { 1951 struct sctp_sockstat *ss; 1952 1953 SCTP_CHECK_AND_CAST(ss, optval, struct sctp_sockstat, *optsize); 1954 SCTP_FIND_STCB(inp, stcb, ss->ss_assoc_id); 1955 1956 if (stcb) { 1957 ss->ss_total_sndbuf = stcb->asoc.total_output_queue_size; 1958 ss->ss_total_recv_buf = (stcb->asoc.size_on_reasm_queue + 1959 stcb->asoc.size_on_all_streams); 1960 SCTP_TCB_UNLOCK(stcb); 1961 } else { 1962 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN); 1963 error = ENOTCONN; 1964 } 1965 *optsize = sizeof(struct sctp_sockstat); 1966 } 1967 break; 1968 case SCTP_MAX_BURST: 1969 { 1970 struct sctp_assoc_value *av; 1971 1972 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize); 1973 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 1974 1975 if (stcb) { 1976 av->assoc_value = stcb->asoc.max_burst; 1977 SCTP_TCB_UNLOCK(stcb); 1978 } else { 1979 SCTP_INP_RLOCK(inp); 1980 av->assoc_value = inp->sctp_ep.max_burst; 1981 SCTP_INP_RUNLOCK(inp); 1982 } 1983 *optsize = sizeof(struct sctp_assoc_value); 1984 1985 } 1986 break; 1987 case SCTP_MAXSEG: 1988 { 1989 struct sctp_assoc_value *av; 1990 int ovh; 1991 1992 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize); 1993 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 1994 1995 if (stcb) { 1996 av->assoc_value = sctp_get_frag_point(stcb, &stcb->asoc); 1997 SCTP_TCB_UNLOCK(stcb); 1998 } else { 1999 SCTP_INP_RLOCK(inp); 2000 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 2001 ovh = SCTP_MED_OVERHEAD; 2002 } else { 2003 ovh = SCTP_MED_V4_OVERHEAD; 2004 } 2005 if (inp->sctp_frag_point >= SCTP_DEFAULT_MAXSEGMENT) 2006 av->assoc_value = 0; 2007 else 2008 av->assoc_value = inp->sctp_frag_point - ovh; 2009 SCTP_INP_RUNLOCK(inp); 2010 } 2011 *optsize = sizeof(struct sctp_assoc_value); 2012 } 2013 break; 2014 case SCTP_GET_STAT_LOG: 2015 error = sctp_fill_stat_log(optval, optsize); 2016 break; 2017 case SCTP_EVENTS: 2018 { 2019 struct sctp_event_subscribe *events; 2020 2021 SCTP_CHECK_AND_CAST(events, optval, struct sctp_event_subscribe, *optsize); 2022 memset(events, 0, sizeof(*events)); 2023 SCTP_INP_RLOCK(inp); 2024 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) 2025 events->sctp_data_io_event = 1; 2026 2027 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVASSOCEVNT)) 2028 events->sctp_association_event = 1; 2029 2030 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVPADDREVNT)) 2031 events->sctp_address_event = 1; 2032 2033 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)) 2034 events->sctp_send_failure_event = 1; 2035 2036 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVPEERERR)) 2037 events->sctp_peer_error_event = 1; 2038 2039 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) 2040 events->sctp_shutdown_event = 1; 2041 2042 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PDAPIEVNT)) 2043 events->sctp_partial_delivery_event = 1; 2044 2045 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) 2046 events->sctp_adaptation_layer_event = 1; 2047 2048 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTHEVNT)) 2049 events->sctp_authentication_event = 1; 2050 2051 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_DRYEVNT)) 2052 events->sctp_sender_dry_event = 1; 2053 2054 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_STREAM_RESETEVNT)) 2055 events->sctp_stream_reset_event = 1; 2056 SCTP_INP_RUNLOCK(inp); 2057 *optsize = sizeof(struct sctp_event_subscribe); 2058 } 2059 break; 2060 2061 case SCTP_ADAPTATION_LAYER: 2062 { 2063 uint32_t *value; 2064 2065 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize); 2066 2067 SCTP_INP_RLOCK(inp); 2068 *value = inp->sctp_ep.adaptation_layer_indicator; 2069 SCTP_INP_RUNLOCK(inp); 2070 *optsize = sizeof(uint32_t); 2071 } 2072 break; 2073 case SCTP_SET_INITIAL_DBG_SEQ: 2074 { 2075 uint32_t *value; 2076 2077 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize); 2078 SCTP_INP_RLOCK(inp); 2079 *value = inp->sctp_ep.initial_sequence_debug; 2080 SCTP_INP_RUNLOCK(inp); 2081 *optsize = sizeof(uint32_t); 2082 } 2083 break; 2084 case SCTP_GET_LOCAL_ADDR_SIZE: 2085 { 2086 uint32_t *value; 2087 2088 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize); 2089 SCTP_INP_RLOCK(inp); 2090 *value = sctp_count_max_addresses(inp); 2091 SCTP_INP_RUNLOCK(inp); 2092 *optsize = sizeof(uint32_t); 2093 } 2094 break; 2095 case SCTP_GET_REMOTE_ADDR_SIZE: 2096 { 2097 uint32_t *value; 2098 size_t size; 2099 struct sctp_nets *net; 2100 2101 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize); 2102 /* FIXME MT: change to sctp_assoc_value? */ 2103 SCTP_FIND_STCB(inp, stcb, (sctp_assoc_t) * value); 2104 2105 if (stcb) { 2106 size = 0; 2107 /* Count the sizes */ 2108 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 2109 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) || 2110 (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET6)) { 2111 size += sizeof(struct sockaddr_in6); 2112 } else if (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET) { 2113 size += sizeof(struct sockaddr_in); 2114 } else { 2115 /* huh */ 2116 break; 2117 } 2118 } 2119 SCTP_TCB_UNLOCK(stcb); 2120 *value = (uint32_t) size; 2121 } else { 2122 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN); 2123 error = ENOTCONN; 2124 } 2125 *optsize = sizeof(uint32_t); 2126 } 2127 break; 2128 case SCTP_GET_PEER_ADDRESSES: 2129 /* 2130 * Get the address information, an array is passed in to 2131 * fill up we pack it. 2132 */ 2133 { 2134 size_t cpsz, left; 2135 struct sockaddr_storage *sas; 2136 struct sctp_nets *net; 2137 struct sctp_getaddresses *saddr; 2138 2139 SCTP_CHECK_AND_CAST(saddr, optval, struct sctp_getaddresses, *optsize); 2140 SCTP_FIND_STCB(inp, stcb, saddr->sget_assoc_id); 2141 2142 if (stcb) { 2143 left = (*optsize) - sizeof(struct sctp_getaddresses); 2144 *optsize = sizeof(struct sctp_getaddresses); 2145 sas = (struct sockaddr_storage *)&saddr->addr[0]; 2146 2147 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 2148 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) || 2149 (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET6)) { 2150 cpsz = sizeof(struct sockaddr_in6); 2151 } else if (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET) { 2152 cpsz = sizeof(struct sockaddr_in); 2153 } else { 2154 /* huh */ 2155 break; 2156 } 2157 if (left < cpsz) { 2158 /* not enough room. */ 2159 break; 2160 } 2161 #ifdef INET6 2162 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) && 2163 (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET)) { 2164 /* Must map the address */ 2165 in6_sin_2_v4mapsin6((struct sockaddr_in *)&net->ro._l_addr, 2166 (struct sockaddr_in6 *)sas); 2167 } else { 2168 #endif 2169 memcpy(sas, &net->ro._l_addr, cpsz); 2170 #ifdef INET6 2171 } 2172 #endif 2173 ((struct sockaddr_in *)sas)->sin_port = stcb->rport; 2174 2175 sas = (struct sockaddr_storage *)((caddr_t)sas + cpsz); 2176 left -= cpsz; 2177 *optsize += cpsz; 2178 } 2179 SCTP_TCB_UNLOCK(stcb); 2180 } else { 2181 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); 2182 error = ENOENT; 2183 } 2184 } 2185 break; 2186 case SCTP_GET_LOCAL_ADDRESSES: 2187 { 2188 size_t limit, actual; 2189 struct sockaddr_storage *sas; 2190 struct sctp_getaddresses *saddr; 2191 2192 SCTP_CHECK_AND_CAST(saddr, optval, struct sctp_getaddresses, *optsize); 2193 SCTP_FIND_STCB(inp, stcb, saddr->sget_assoc_id); 2194 2195 sas = (struct sockaddr_storage *)&saddr->addr[0]; 2196 limit = *optsize - sizeof(sctp_assoc_t); 2197 actual = sctp_fill_up_addresses(inp, stcb, limit, sas); 2198 if (stcb) { 2199 SCTP_TCB_UNLOCK(stcb); 2200 } 2201 *optsize = sizeof(struct sockaddr_storage) + actual; 2202 } 2203 break; 2204 case SCTP_PEER_ADDR_PARAMS: 2205 { 2206 struct sctp_paddrparams *paddrp; 2207 struct sctp_nets *net; 2208 2209 SCTP_CHECK_AND_CAST(paddrp, optval, struct sctp_paddrparams, *optsize); 2210 SCTP_FIND_STCB(inp, stcb, paddrp->spp_assoc_id); 2211 2212 net = NULL; 2213 if (stcb) { 2214 net = sctp_findnet(stcb, (struct sockaddr *)&paddrp->spp_address); 2215 } else { 2216 /* 2217 * We increment here since 2218 * sctp_findassociation_ep_addr() wil do a 2219 * decrement if it finds the stcb as long as 2220 * the locked tcb (last argument) is NOT a 2221 * TCB.. aka NULL. 2222 */ 2223 SCTP_INP_INCR_REF(inp); 2224 stcb = sctp_findassociation_ep_addr(&inp, (struct sockaddr *)&paddrp->spp_address, &net, NULL, NULL); 2225 if (stcb == NULL) { 2226 SCTP_INP_DECR_REF(inp); 2227 } 2228 } 2229 if (stcb && (net == NULL)) { 2230 struct sockaddr *sa; 2231 2232 sa = (struct sockaddr *)&paddrp->spp_address; 2233 if (sa->sa_family == AF_INET) { 2234 struct sockaddr_in *sin; 2235 2236 sin = (struct sockaddr_in *)sa; 2237 if (sin->sin_addr.s_addr) { 2238 error = EINVAL; 2239 SCTP_TCB_UNLOCK(stcb); 2240 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 2241 break; 2242 } 2243 } else if (sa->sa_family == AF_INET6) { 2244 struct sockaddr_in6 *sin6; 2245 2246 sin6 = (struct sockaddr_in6 *)sa; 2247 if (!IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { 2248 error = EINVAL; 2249 SCTP_TCB_UNLOCK(stcb); 2250 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 2251 break; 2252 } 2253 } else { 2254 error = EAFNOSUPPORT; 2255 SCTP_TCB_UNLOCK(stcb); 2256 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 2257 break; 2258 } 2259 } 2260 if (stcb) { 2261 /* Applys to the specific association */ 2262 paddrp->spp_flags = 0; 2263 if (net) { 2264 int ovh; 2265 2266 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 2267 ovh = SCTP_MED_OVERHEAD; 2268 } else { 2269 ovh = SCTP_MED_V4_OVERHEAD; 2270 } 2271 2272 2273 paddrp->spp_pathmaxrxt = net->failure_threshold; 2274 paddrp->spp_pathmtu = net->mtu - ovh; 2275 /* get flags for HB */ 2276 if (net->dest_state & SCTP_ADDR_NOHB) 2277 paddrp->spp_flags |= SPP_HB_DISABLE; 2278 else 2279 paddrp->spp_flags |= SPP_HB_ENABLE; 2280 /* get flags for PMTU */ 2281 if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) { 2282 paddrp->spp_flags |= SPP_PMTUD_ENABLE; 2283 } else { 2284 paddrp->spp_flags |= SPP_PMTUD_DISABLE; 2285 } 2286 #ifdef INET 2287 if (net->ro._l_addr.sin.sin_family == AF_INET) { 2288 paddrp->spp_ipv4_tos = net->tos_flowlabel & 0x000000fc; 2289 paddrp->spp_flags |= SPP_IPV4_TOS; 2290 } 2291 #endif 2292 #ifdef INET6 2293 if (net->ro._l_addr.sin6.sin6_family == AF_INET6) { 2294 paddrp->spp_ipv6_flowlabel = net->tos_flowlabel; 2295 paddrp->spp_flags |= SPP_IPV6_FLOWLABEL; 2296 } 2297 #endif 2298 } else { 2299 /* 2300 * No destination so return default 2301 * value 2302 */ 2303 int cnt = 0; 2304 2305 paddrp->spp_pathmaxrxt = stcb->asoc.def_net_failure; 2306 paddrp->spp_pathmtu = sctp_get_frag_point(stcb, &stcb->asoc); 2307 #ifdef INET 2308 paddrp->spp_ipv4_tos = stcb->asoc.default_tos & 0x000000fc; 2309 paddrp->spp_flags |= SPP_IPV4_TOS; 2310 #endif 2311 #ifdef INET6 2312 paddrp->spp_ipv6_flowlabel = stcb->asoc.default_flowlabel; 2313 paddrp->spp_flags |= SPP_IPV6_FLOWLABEL; 2314 #endif 2315 /* default settings should be these */ 2316 if (stcb->asoc.hb_is_disabled == 0) { 2317 paddrp->spp_flags |= SPP_HB_ENABLE; 2318 } else { 2319 paddrp->spp_flags |= SPP_HB_DISABLE; 2320 } 2321 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 2322 if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) { 2323 cnt++; 2324 } 2325 } 2326 if (cnt) { 2327 paddrp->spp_flags |= SPP_PMTUD_ENABLE; 2328 } 2329 } 2330 paddrp->spp_hbinterval = stcb->asoc.heart_beat_delay; 2331 paddrp->spp_assoc_id = sctp_get_associd(stcb); 2332 SCTP_TCB_UNLOCK(stcb); 2333 } else { 2334 /* Use endpoint defaults */ 2335 SCTP_INP_RLOCK(inp); 2336 paddrp->spp_pathmaxrxt = inp->sctp_ep.def_net_failure; 2337 paddrp->spp_hbinterval = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]); 2338 paddrp->spp_assoc_id = (sctp_assoc_t) 0; 2339 /* get inp's default */ 2340 #ifdef INET 2341 paddrp->spp_ipv4_tos = inp->ip_inp.inp.inp_ip_tos; 2342 paddrp->spp_flags |= SPP_IPV4_TOS; 2343 #endif 2344 #ifdef INET6 2345 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 2346 paddrp->spp_ipv6_flowlabel = ((struct in6pcb *)inp)->in6p_flowinfo; 2347 paddrp->spp_flags |= SPP_IPV6_FLOWLABEL; 2348 } 2349 #endif 2350 /* can't return this */ 2351 paddrp->spp_pathmtu = 0; 2352 2353 /* default behavior, no stcb */ 2354 paddrp->spp_flags = SPP_PMTUD_ENABLE; 2355 2356 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_DONOT_HEARTBEAT)) { 2357 paddrp->spp_flags |= SPP_HB_ENABLE; 2358 } else { 2359 paddrp->spp_flags |= SPP_HB_DISABLE; 2360 } 2361 SCTP_INP_RUNLOCK(inp); 2362 } 2363 *optsize = sizeof(struct sctp_paddrparams); 2364 } 2365 break; 2366 case SCTP_GET_PEER_ADDR_INFO: 2367 { 2368 struct sctp_paddrinfo *paddri; 2369 struct sctp_nets *net; 2370 2371 SCTP_CHECK_AND_CAST(paddri, optval, struct sctp_paddrinfo, *optsize); 2372 SCTP_FIND_STCB(inp, stcb, paddri->spinfo_assoc_id); 2373 2374 net = NULL; 2375 if (stcb) { 2376 net = sctp_findnet(stcb, (struct sockaddr *)&paddri->spinfo_address); 2377 } else { 2378 /* 2379 * We increment here since 2380 * sctp_findassociation_ep_addr() wil do a 2381 * decrement if it finds the stcb as long as 2382 * the locked tcb (last argument) is NOT a 2383 * TCB.. aka NULL. 2384 */ 2385 SCTP_INP_INCR_REF(inp); 2386 stcb = sctp_findassociation_ep_addr(&inp, (struct sockaddr *)&paddri->spinfo_address, &net, NULL, NULL); 2387 if (stcb == NULL) { 2388 SCTP_INP_DECR_REF(inp); 2389 } 2390 } 2391 2392 if ((stcb) && (net)) { 2393 if (net->dest_state & SCTP_ADDR_UNCONFIRMED) { 2394 /* It's unconfirmed */ 2395 paddri->spinfo_state = SCTP_UNCONFIRMED; 2396 } else if (net->dest_state & SCTP_ADDR_REACHABLE) { 2397 /* It's active */ 2398 paddri->spinfo_state = SCTP_ACTIVE; 2399 } else { 2400 /* It's inactive */ 2401 paddri->spinfo_state = SCTP_INACTIVE; 2402 } 2403 paddri->spinfo_cwnd = net->cwnd; 2404 paddri->spinfo_srtt = ((net->lastsa >> 2) + net->lastsv) >> 1; 2405 paddri->spinfo_rto = net->RTO; 2406 paddri->spinfo_assoc_id = sctp_get_associd(stcb); 2407 SCTP_TCB_UNLOCK(stcb); 2408 } else { 2409 if (stcb) { 2410 SCTP_TCB_UNLOCK(stcb); 2411 } 2412 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); 2413 error = ENOENT; 2414 } 2415 *optsize = sizeof(struct sctp_paddrinfo); 2416 } 2417 break; 2418 case SCTP_PCB_STATUS: 2419 { 2420 struct sctp_pcbinfo *spcb; 2421 2422 SCTP_CHECK_AND_CAST(spcb, optval, struct sctp_pcbinfo, *optsize); 2423 sctp_fill_pcbinfo(spcb); 2424 *optsize = sizeof(struct sctp_pcbinfo); 2425 } 2426 break; 2427 2428 case SCTP_STATUS: 2429 { 2430 struct sctp_nets *net; 2431 struct sctp_status *sstat; 2432 2433 SCTP_CHECK_AND_CAST(sstat, optval, struct sctp_status, *optsize); 2434 SCTP_FIND_STCB(inp, stcb, sstat->sstat_assoc_id); 2435 2436 if (stcb == NULL) { 2437 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 2438 error = EINVAL; 2439 break; 2440 } 2441 /* 2442 * I think passing the state is fine since 2443 * sctp_constants.h will be available to the user 2444 * land. 2445 */ 2446 sstat->sstat_state = stcb->asoc.state; 2447 sstat->sstat_assoc_id = sctp_get_associd(stcb); 2448 sstat->sstat_rwnd = stcb->asoc.peers_rwnd; 2449 sstat->sstat_unackdata = stcb->asoc.sent_queue_cnt; 2450 /* 2451 * We can't include chunks that have been passed to 2452 * the socket layer. Only things in queue. 2453 */ 2454 sstat->sstat_penddata = (stcb->asoc.cnt_on_reasm_queue + 2455 stcb->asoc.cnt_on_all_streams); 2456 2457 2458 sstat->sstat_instrms = stcb->asoc.streamincnt; 2459 sstat->sstat_outstrms = stcb->asoc.streamoutcnt; 2460 sstat->sstat_fragmentation_point = sctp_get_frag_point(stcb, &stcb->asoc); 2461 memcpy(&sstat->sstat_primary.spinfo_address, 2462 &stcb->asoc.primary_destination->ro._l_addr, 2463 ((struct sockaddr *)(&stcb->asoc.primary_destination->ro._l_addr))->sa_len); 2464 net = stcb->asoc.primary_destination; 2465 ((struct sockaddr_in *)&sstat->sstat_primary.spinfo_address)->sin_port = stcb->rport; 2466 /* 2467 * Again the user can get info from sctp_constants.h 2468 * for what the state of the network is. 2469 */ 2470 if (net->dest_state & SCTP_ADDR_UNCONFIRMED) { 2471 /* It's unconfirmed */ 2472 sstat->sstat_primary.spinfo_state = SCTP_UNCONFIRMED; 2473 } else if (net->dest_state & SCTP_ADDR_REACHABLE) { 2474 /* It's active */ 2475 sstat->sstat_primary.spinfo_state = SCTP_ACTIVE; 2476 } else { 2477 /* It's inactive */ 2478 sstat->sstat_primary.spinfo_state = SCTP_INACTIVE; 2479 } 2480 sstat->sstat_primary.spinfo_cwnd = net->cwnd; 2481 sstat->sstat_primary.spinfo_srtt = net->lastsa; 2482 sstat->sstat_primary.spinfo_rto = net->RTO; 2483 sstat->sstat_primary.spinfo_mtu = net->mtu; 2484 sstat->sstat_primary.spinfo_assoc_id = sctp_get_associd(stcb); 2485 SCTP_TCB_UNLOCK(stcb); 2486 *optsize = sizeof(*sstat); 2487 } 2488 break; 2489 case SCTP_RTOINFO: 2490 { 2491 struct sctp_rtoinfo *srto; 2492 2493 SCTP_CHECK_AND_CAST(srto, optval, struct sctp_rtoinfo, *optsize); 2494 SCTP_FIND_STCB(inp, stcb, srto->srto_assoc_id); 2495 2496 if (stcb) { 2497 srto->srto_initial = stcb->asoc.initial_rto; 2498 srto->srto_max = stcb->asoc.maxrto; 2499 srto->srto_min = stcb->asoc.minrto; 2500 SCTP_TCB_UNLOCK(stcb); 2501 } else { 2502 SCTP_INP_RLOCK(inp); 2503 srto->srto_initial = inp->sctp_ep.initial_rto; 2504 srto->srto_max = inp->sctp_ep.sctp_maxrto; 2505 srto->srto_min = inp->sctp_ep.sctp_minrto; 2506 SCTP_INP_RUNLOCK(inp); 2507 } 2508 *optsize = sizeof(*srto); 2509 } 2510 break; 2511 case SCTP_TIMEOUTS: 2512 { 2513 struct sctp_timeouts *stimo; 2514 2515 SCTP_CHECK_AND_CAST(stimo, optval, struct sctp_timeouts, *optsize); 2516 SCTP_FIND_STCB(inp, stcb, stimo->stimo_assoc_id); 2517 2518 if (stcb) { 2519 stimo->stimo_init = stcb->asoc.timoinit; 2520 stimo->stimo_data = stcb->asoc.timodata; 2521 stimo->stimo_sack = stcb->asoc.timosack; 2522 stimo->stimo_shutdown = stcb->asoc.timoshutdown; 2523 stimo->stimo_heartbeat = stcb->asoc.timoheartbeat; 2524 stimo->stimo_cookie = stcb->asoc.timocookie; 2525 stimo->stimo_shutdownack = stcb->asoc.timoshutdownack; 2526 SCTP_TCB_UNLOCK(stcb); 2527 } else { 2528 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 2529 error = EINVAL; 2530 } 2531 *optsize = sizeof(*stimo); 2532 } 2533 break; 2534 case SCTP_ASSOCINFO: 2535 { 2536 struct sctp_assocparams *sasoc; 2537 uint32_t oldval; 2538 2539 SCTP_CHECK_AND_CAST(sasoc, optval, struct sctp_assocparams, *optsize); 2540 SCTP_FIND_STCB(inp, stcb, sasoc->sasoc_assoc_id); 2541 2542 if (stcb) { 2543 oldval = sasoc->sasoc_cookie_life; 2544 sasoc->sasoc_cookie_life = TICKS_TO_MSEC(stcb->asoc.cookie_life); 2545 sasoc->sasoc_asocmaxrxt = stcb->asoc.max_send_times; 2546 sasoc->sasoc_number_peer_destinations = stcb->asoc.numnets; 2547 sasoc->sasoc_peer_rwnd = stcb->asoc.peers_rwnd; 2548 sasoc->sasoc_local_rwnd = stcb->asoc.my_rwnd; 2549 SCTP_TCB_UNLOCK(stcb); 2550 } else { 2551 SCTP_INP_RLOCK(inp); 2552 sasoc->sasoc_cookie_life = TICKS_TO_MSEC(inp->sctp_ep.def_cookie_life); 2553 sasoc->sasoc_asocmaxrxt = inp->sctp_ep.max_send_times; 2554 sasoc->sasoc_number_peer_destinations = 0; 2555 sasoc->sasoc_peer_rwnd = 0; 2556 sasoc->sasoc_local_rwnd = sbspace(&inp->sctp_socket->so_rcv); 2557 SCTP_INP_RUNLOCK(inp); 2558 } 2559 *optsize = sizeof(*sasoc); 2560 } 2561 break; 2562 case SCTP_DEFAULT_SEND_PARAM: 2563 { 2564 struct sctp_sndrcvinfo *s_info; 2565 2566 SCTP_CHECK_AND_CAST(s_info, optval, struct sctp_sndrcvinfo, *optsize); 2567 SCTP_FIND_STCB(inp, stcb, s_info->sinfo_assoc_id); 2568 2569 if (stcb) { 2570 memcpy(s_info, &stcb->asoc.def_send, sizeof(stcb->asoc.def_send)); 2571 SCTP_TCB_UNLOCK(stcb); 2572 } else { 2573 SCTP_INP_RLOCK(inp); 2574 memcpy(s_info, &inp->def_send, sizeof(inp->def_send)); 2575 SCTP_INP_RUNLOCK(inp); 2576 } 2577 *optsize = sizeof(*s_info); 2578 } 2579 break; 2580 case SCTP_INITMSG: 2581 { 2582 struct sctp_initmsg *sinit; 2583 2584 SCTP_CHECK_AND_CAST(sinit, optval, struct sctp_initmsg, *optsize); 2585 SCTP_INP_RLOCK(inp); 2586 sinit->sinit_num_ostreams = inp->sctp_ep.pre_open_stream_count; 2587 sinit->sinit_max_instreams = inp->sctp_ep.max_open_streams_intome; 2588 sinit->sinit_max_attempts = inp->sctp_ep.max_init_times; 2589 sinit->sinit_max_init_timeo = inp->sctp_ep.initial_init_rto_max; 2590 SCTP_INP_RUNLOCK(inp); 2591 *optsize = sizeof(*sinit); 2592 } 2593 break; 2594 case SCTP_PRIMARY_ADDR: 2595 /* we allow a "get" operation on this */ 2596 { 2597 struct sctp_setprim *ssp; 2598 2599 SCTP_CHECK_AND_CAST(ssp, optval, struct sctp_setprim, *optsize); 2600 SCTP_FIND_STCB(inp, stcb, ssp->ssp_assoc_id); 2601 2602 if (stcb) { 2603 /* simply copy out the sockaddr_storage... */ 2604 int len; 2605 2606 len = *optsize; 2607 if (len > stcb->asoc.primary_destination->ro._l_addr.sa.sa_len) 2608 len = stcb->asoc.primary_destination->ro._l_addr.sa.sa_len; 2609 2610 memcpy(&ssp->ssp_addr, 2611 &stcb->asoc.primary_destination->ro._l_addr, 2612 len); 2613 SCTP_TCB_UNLOCK(stcb); 2614 } else { 2615 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 2616 error = EINVAL; 2617 } 2618 *optsize = sizeof(*ssp); 2619 } 2620 break; 2621 2622 case SCTP_HMAC_IDENT: 2623 { 2624 struct sctp_hmacalgo *shmac; 2625 sctp_hmaclist_t *hmaclist; 2626 uint32_t size; 2627 int i; 2628 2629 SCTP_CHECK_AND_CAST(shmac, optval, struct sctp_hmacalgo, *optsize); 2630 2631 SCTP_INP_RLOCK(inp); 2632 hmaclist = inp->sctp_ep.local_hmacs; 2633 if (hmaclist == NULL) { 2634 /* no HMACs to return */ 2635 *optsize = sizeof(*shmac); 2636 SCTP_INP_RUNLOCK(inp); 2637 break; 2638 } 2639 /* is there room for all of the hmac ids? */ 2640 size = sizeof(*shmac) + (hmaclist->num_algo * 2641 sizeof(shmac->shmac_idents[0])); 2642 if ((size_t)(*optsize) < size) { 2643 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 2644 error = EINVAL; 2645 SCTP_INP_RUNLOCK(inp); 2646 break; 2647 } 2648 /* copy in the list */ 2649 shmac->shmac_number_of_idents = hmaclist->num_algo; 2650 for (i = 0; i < hmaclist->num_algo; i++) { 2651 shmac->shmac_idents[i] = hmaclist->hmac[i]; 2652 } 2653 SCTP_INP_RUNLOCK(inp); 2654 *optsize = size; 2655 break; 2656 } 2657 case SCTP_AUTH_ACTIVE_KEY: 2658 { 2659 struct sctp_authkeyid *scact; 2660 2661 SCTP_CHECK_AND_CAST(scact, optval, struct sctp_authkeyid, *optsize); 2662 SCTP_FIND_STCB(inp, stcb, scact->scact_assoc_id); 2663 2664 if (stcb) { 2665 /* get the active key on the assoc */ 2666 scact->scact_keynumber = stcb->asoc.authinfo.active_keyid; 2667 SCTP_TCB_UNLOCK(stcb); 2668 } else { 2669 /* get the endpoint active key */ 2670 SCTP_INP_RLOCK(inp); 2671 scact->scact_keynumber = inp->sctp_ep.default_keyid; 2672 SCTP_INP_RUNLOCK(inp); 2673 } 2674 *optsize = sizeof(*scact); 2675 break; 2676 } 2677 case SCTP_LOCAL_AUTH_CHUNKS: 2678 { 2679 struct sctp_authchunks *sac; 2680 sctp_auth_chklist_t *chklist = NULL; 2681 size_t size = 0; 2682 2683 SCTP_CHECK_AND_CAST(sac, optval, struct sctp_authchunks, *optsize); 2684 SCTP_FIND_STCB(inp, stcb, sac->gauth_assoc_id); 2685 2686 if (stcb) { 2687 /* get off the assoc */ 2688 chklist = stcb->asoc.local_auth_chunks; 2689 /* is there enough space? */ 2690 size = sctp_auth_get_chklist_size(chklist); 2691 if (*optsize < (sizeof(struct sctp_authchunks) + size)) { 2692 error = EINVAL; 2693 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 2694 } else { 2695 /* copy in the chunks */ 2696 (void)sctp_serialize_auth_chunks(chklist, sac->gauth_chunks); 2697 } 2698 SCTP_TCB_UNLOCK(stcb); 2699 } else { 2700 /* get off the endpoint */ 2701 SCTP_INP_RLOCK(inp); 2702 chklist = inp->sctp_ep.local_auth_chunks; 2703 /* is there enough space? */ 2704 size = sctp_auth_get_chklist_size(chklist); 2705 if (*optsize < (sizeof(struct sctp_authchunks) + size)) { 2706 error = EINVAL; 2707 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 2708 } else { 2709 /* copy in the chunks */ 2710 (void)sctp_serialize_auth_chunks(chklist, sac->gauth_chunks); 2711 } 2712 SCTP_INP_RUNLOCK(inp); 2713 } 2714 *optsize = sizeof(struct sctp_authchunks) + size; 2715 break; 2716 } 2717 case SCTP_PEER_AUTH_CHUNKS: 2718 { 2719 struct sctp_authchunks *sac; 2720 sctp_auth_chklist_t *chklist = NULL; 2721 size_t size = 0; 2722 2723 SCTP_CHECK_AND_CAST(sac, optval, struct sctp_authchunks, *optsize); 2724 SCTP_FIND_STCB(inp, stcb, sac->gauth_assoc_id); 2725 2726 if (stcb) { 2727 /* get off the assoc */ 2728 chklist = stcb->asoc.peer_auth_chunks; 2729 /* is there enough space? */ 2730 size = sctp_auth_get_chklist_size(chklist); 2731 if (*optsize < (sizeof(struct sctp_authchunks) + size)) { 2732 error = EINVAL; 2733 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 2734 } else { 2735 /* copy in the chunks */ 2736 (void)sctp_serialize_auth_chunks(chklist, sac->gauth_chunks); 2737 } 2738 SCTP_TCB_UNLOCK(stcb); 2739 } else { 2740 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); 2741 error = ENOENT; 2742 } 2743 *optsize = sizeof(struct sctp_authchunks) + size; 2744 break; 2745 } 2746 2747 2748 default: 2749 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT); 2750 error = ENOPROTOOPT; 2751 *optsize = 0; 2752 break; 2753 } /* end switch (sopt->sopt_name) */ 2754 return (error); 2755 } 2756 2757 static int 2758 sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize, 2759 void *p) 2760 { 2761 int error, set_opt; 2762 uint32_t *mopt; 2763 struct sctp_tcb *stcb = NULL; 2764 struct sctp_inpcb *inp = NULL; 2765 uint32_t vrf_id; 2766 2767 if (optval == NULL) { 2768 SCTP_PRINTF("optval is NULL\n"); 2769 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 2770 return (EINVAL); 2771 } 2772 inp = (struct sctp_inpcb *)so->so_pcb; 2773 if (inp == 0) { 2774 SCTP_PRINTF("inp is NULL?\n"); 2775 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 2776 return EINVAL; 2777 } 2778 vrf_id = inp->def_vrf_id; 2779 2780 error = 0; 2781 switch (optname) { 2782 case SCTP_NODELAY: 2783 case SCTP_AUTOCLOSE: 2784 case SCTP_AUTO_ASCONF: 2785 case SCTP_EXPLICIT_EOR: 2786 case SCTP_DISABLE_FRAGMENTS: 2787 case SCTP_USE_EXT_RCVINFO: 2788 case SCTP_I_WANT_MAPPED_V4_ADDR: 2789 /* copy in the option value */ 2790 SCTP_CHECK_AND_CAST(mopt, optval, uint32_t, optsize); 2791 set_opt = 0; 2792 if (error) 2793 break; 2794 switch (optname) { 2795 case SCTP_DISABLE_FRAGMENTS: 2796 set_opt = SCTP_PCB_FLAGS_NO_FRAGMENT; 2797 break; 2798 case SCTP_AUTO_ASCONF: 2799 /* 2800 * NOTE: we don't really support this flag 2801 */ 2802 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 2803 /* only valid for bound all sockets */ 2804 set_opt = SCTP_PCB_FLAGS_AUTO_ASCONF; 2805 } else { 2806 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 2807 return (EINVAL); 2808 } 2809 break; 2810 case SCTP_EXPLICIT_EOR: 2811 set_opt = SCTP_PCB_FLAGS_EXPLICIT_EOR; 2812 break; 2813 case SCTP_USE_EXT_RCVINFO: 2814 set_opt = SCTP_PCB_FLAGS_EXT_RCVINFO; 2815 break; 2816 case SCTP_I_WANT_MAPPED_V4_ADDR: 2817 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 2818 set_opt = SCTP_PCB_FLAGS_NEEDS_MAPPED_V4; 2819 } else { 2820 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 2821 return (EINVAL); 2822 } 2823 break; 2824 case SCTP_NODELAY: 2825 set_opt = SCTP_PCB_FLAGS_NODELAY; 2826 break; 2827 case SCTP_AUTOCLOSE: 2828 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 2829 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 2830 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 2831 return (EINVAL); 2832 } 2833 set_opt = SCTP_PCB_FLAGS_AUTOCLOSE; 2834 /* 2835 * The value is in ticks. Note this does not effect 2836 * old associations, only new ones. 2837 */ 2838 inp->sctp_ep.auto_close_time = SEC_TO_TICKS(*mopt); 2839 break; 2840 } 2841 SCTP_INP_WLOCK(inp); 2842 if (*mopt != 0) { 2843 sctp_feature_on(inp, set_opt); 2844 } else { 2845 sctp_feature_off(inp, set_opt); 2846 } 2847 SCTP_INP_WUNLOCK(inp); 2848 break; 2849 case SCTP_REUSE_PORT: 2850 { 2851 SCTP_CHECK_AND_CAST(mopt, optval, uint32_t, optsize); 2852 if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) == 0) { 2853 /* Can't set it after we are bound */ 2854 error = EINVAL; 2855 break; 2856 } 2857 if ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE)) { 2858 /* Can't do this for a 1-m socket */ 2859 error = EINVAL; 2860 break; 2861 } 2862 if (optval) 2863 sctp_feature_on(inp, SCTP_PCB_FLAGS_PORTREUSE); 2864 else 2865 sctp_feature_off(inp, SCTP_PCB_FLAGS_PORTREUSE); 2866 } 2867 break; 2868 case SCTP_PARTIAL_DELIVERY_POINT: 2869 { 2870 uint32_t *value; 2871 2872 SCTP_CHECK_AND_CAST(value, optval, uint32_t, optsize); 2873 if (*value > SCTP_SB_LIMIT_RCV(so)) { 2874 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 2875 error = EINVAL; 2876 break; 2877 } 2878 inp->partial_delivery_point = *value; 2879 } 2880 break; 2881 case SCTP_FRAGMENT_INTERLEAVE: 2882 /* not yet until we re-write sctp_recvmsg() */ 2883 { 2884 uint32_t *level; 2885 2886 SCTP_CHECK_AND_CAST(level, optval, uint32_t, optsize); 2887 if (*level == SCTP_FRAG_LEVEL_2) { 2888 sctp_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE); 2889 sctp_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS); 2890 } else if (*level == SCTP_FRAG_LEVEL_1) { 2891 sctp_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE); 2892 sctp_feature_off(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS); 2893 } else if (*level == SCTP_FRAG_LEVEL_0) { 2894 sctp_feature_off(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE); 2895 sctp_feature_off(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS); 2896 2897 } else { 2898 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 2899 error = EINVAL; 2900 } 2901 } 2902 break; 2903 case SCTP_CMT_ON_OFF: 2904 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off)) { 2905 struct sctp_assoc_value *av; 2906 2907 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize); 2908 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 2909 if (stcb) { 2910 stcb->asoc.sctp_cmt_on_off = av->assoc_value; 2911 if (stcb->asoc.sctp_cmt_on_off > 2) { 2912 stcb->asoc.sctp_cmt_on_off = 2; 2913 } 2914 SCTP_TCB_UNLOCK(stcb); 2915 } else { 2916 SCTP_INP_WLOCK(inp); 2917 inp->sctp_cmt_on_off = av->assoc_value; 2918 if (inp->sctp_cmt_on_off > 2) { 2919 inp->sctp_cmt_on_off = 2; 2920 } 2921 SCTP_INP_WUNLOCK(inp); 2922 } 2923 } else { 2924 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT); 2925 error = ENOPROTOOPT; 2926 } 2927 break; 2928 /* JRS - Set socket option for pluggable congestion control */ 2929 case SCTP_PLUGGABLE_CC: 2930 { 2931 struct sctp_assoc_value *av; 2932 2933 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize); 2934 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 2935 if (stcb) { 2936 switch (av->assoc_value) { 2937 case SCTP_CC_RFC2581: 2938 case SCTP_CC_HSTCP: 2939 case SCTP_CC_HTCP: 2940 stcb->asoc.cc_functions = sctp_cc_functions[av->assoc_value]; 2941 stcb->asoc.congestion_control_module = av->assoc_value; 2942 break; 2943 default: 2944 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 2945 error = EINVAL; 2946 break; 2947 } 2948 SCTP_TCB_UNLOCK(stcb); 2949 } else { 2950 switch (av->assoc_value) { 2951 case SCTP_CC_RFC2581: 2952 case SCTP_CC_HSTCP: 2953 case SCTP_CC_HTCP: 2954 SCTP_INP_WLOCK(inp); 2955 inp->sctp_ep.sctp_default_cc_module = av->assoc_value; 2956 SCTP_INP_WUNLOCK(inp); 2957 break; 2958 default: 2959 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 2960 error = EINVAL; 2961 break; 2962 } 2963 } 2964 } 2965 break; 2966 /* RS - Set socket option for pluggable stream scheduling */ 2967 case SCTP_PLUGGABLE_SS: 2968 { 2969 struct sctp_assoc_value *av; 2970 2971 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize); 2972 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 2973 if (stcb) { 2974 switch (av->assoc_value) { 2975 case SCTP_SS_DEFAULT: 2976 case SCTP_SS_ROUND_ROBIN: 2977 case SCTP_SS_ROUND_ROBIN_PACKET: 2978 case SCTP_SS_PRIORITY: 2979 case SCTP_SS_FAIR_BANDWITH: 2980 case SCTP_SS_FIRST_COME: 2981 stcb->asoc.ss_functions.sctp_ss_clear(stcb, &stcb->asoc, 1, 1); 2982 stcb->asoc.ss_functions = sctp_ss_functions[av->assoc_value]; 2983 stcb->asoc.stream_scheduling_module = av->assoc_value; 2984 stcb->asoc.ss_functions.sctp_ss_init(stcb, &stcb->asoc, 1); 2985 break; 2986 default: 2987 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 2988 error = EINVAL; 2989 break; 2990 } 2991 SCTP_TCB_UNLOCK(stcb); 2992 } else { 2993 switch (av->assoc_value) { 2994 case SCTP_SS_DEFAULT: 2995 case SCTP_SS_ROUND_ROBIN: 2996 case SCTP_SS_ROUND_ROBIN_PACKET: 2997 case SCTP_SS_PRIORITY: 2998 case SCTP_SS_FAIR_BANDWITH: 2999 case SCTP_SS_FIRST_COME: 3000 SCTP_INP_WLOCK(inp); 3001 inp->sctp_ep.sctp_default_ss_module = av->assoc_value; 3002 SCTP_INP_WUNLOCK(inp); 3003 break; 3004 default: 3005 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3006 error = EINVAL; 3007 break; 3008 } 3009 } 3010 } 3011 break; 3012 case SCTP_SS_VALUE: 3013 { 3014 struct sctp_stream_value *av; 3015 3016 SCTP_CHECK_AND_CAST(av, optval, struct sctp_stream_value, optsize); 3017 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 3018 if (stcb) { 3019 if (stcb->asoc.ss_functions.sctp_ss_set_value(stcb, &stcb->asoc, &stcb->asoc.strmout[av->stream_id], 3020 av->stream_value) < 0) { 3021 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3022 error = EINVAL; 3023 } 3024 SCTP_TCB_UNLOCK(stcb); 3025 } else { 3026 /* 3027 * Can't set stream value without 3028 * association 3029 */ 3030 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3031 error = EINVAL; 3032 } 3033 } 3034 break; 3035 case SCTP_CLR_STAT_LOG: 3036 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP); 3037 error = EOPNOTSUPP; 3038 break; 3039 case SCTP_CONTEXT: 3040 { 3041 struct sctp_assoc_value *av; 3042 3043 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize); 3044 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 3045 3046 if (stcb) { 3047 stcb->asoc.context = av->assoc_value; 3048 SCTP_TCB_UNLOCK(stcb); 3049 } else { 3050 SCTP_INP_WLOCK(inp); 3051 inp->sctp_context = av->assoc_value; 3052 SCTP_INP_WUNLOCK(inp); 3053 } 3054 } 3055 break; 3056 case SCTP_VRF_ID: 3057 { 3058 uint32_t *default_vrfid; 3059 3060 SCTP_CHECK_AND_CAST(default_vrfid, optval, uint32_t, optsize); 3061 if (*default_vrfid > SCTP_MAX_VRF_ID) { 3062 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3063 error = EINVAL; 3064 break; 3065 } 3066 inp->def_vrf_id = *default_vrfid; 3067 break; 3068 } 3069 case SCTP_DEL_VRF_ID: 3070 { 3071 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP); 3072 error = EOPNOTSUPP; 3073 break; 3074 } 3075 case SCTP_ADD_VRF_ID: 3076 { 3077 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP); 3078 error = EOPNOTSUPP; 3079 break; 3080 } 3081 case SCTP_DELAYED_SACK: 3082 { 3083 struct sctp_sack_info *sack; 3084 3085 SCTP_CHECK_AND_CAST(sack, optval, struct sctp_sack_info, optsize); 3086 SCTP_FIND_STCB(inp, stcb, sack->sack_assoc_id); 3087 if (sack->sack_delay) { 3088 if (sack->sack_delay > SCTP_MAX_SACK_DELAY) 3089 sack->sack_delay = SCTP_MAX_SACK_DELAY; 3090 } 3091 if (stcb) { 3092 if (sack->sack_delay) { 3093 if (MSEC_TO_TICKS(sack->sack_delay) < 1) { 3094 sack->sack_delay = TICKS_TO_MSEC(1); 3095 } 3096 stcb->asoc.delayed_ack = sack->sack_delay; 3097 } 3098 if (sack->sack_freq) { 3099 stcb->asoc.sack_freq = sack->sack_freq; 3100 } 3101 SCTP_TCB_UNLOCK(stcb); 3102 } else { 3103 SCTP_INP_WLOCK(inp); 3104 if (sack->sack_delay) { 3105 if (MSEC_TO_TICKS(sack->sack_delay) < 1) { 3106 sack->sack_delay = TICKS_TO_MSEC(1); 3107 } 3108 inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV] = MSEC_TO_TICKS(sack->sack_delay); 3109 } 3110 if (sack->sack_freq) { 3111 inp->sctp_ep.sctp_sack_freq = sack->sack_freq; 3112 } 3113 SCTP_INP_WUNLOCK(inp); 3114 } 3115 break; 3116 } 3117 case SCTP_AUTH_CHUNK: 3118 { 3119 struct sctp_authchunk *sauth; 3120 3121 SCTP_CHECK_AND_CAST(sauth, optval, struct sctp_authchunk, optsize); 3122 3123 SCTP_INP_WLOCK(inp); 3124 if (sctp_auth_add_chunk(sauth->sauth_chunk, inp->sctp_ep.local_auth_chunks)) { 3125 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3126 error = EINVAL; 3127 } 3128 SCTP_INP_WUNLOCK(inp); 3129 break; 3130 } 3131 case SCTP_AUTH_KEY: 3132 { 3133 struct sctp_authkey *sca; 3134 struct sctp_keyhead *shared_keys; 3135 sctp_sharedkey_t *shared_key; 3136 sctp_key_t *key = NULL; 3137 size_t size; 3138 3139 SCTP_CHECK_AND_CAST(sca, optval, struct sctp_authkey, optsize); 3140 SCTP_FIND_STCB(inp, stcb, sca->sca_assoc_id); 3141 size = optsize - sizeof(*sca); 3142 3143 if (stcb) { 3144 /* set it on the assoc */ 3145 shared_keys = &stcb->asoc.shared_keys; 3146 /* clear the cached keys for this key id */ 3147 sctp_clear_cachedkeys(stcb, sca->sca_keynumber); 3148 /* 3149 * create the new shared key and 3150 * insert/replace it 3151 */ 3152 if (size > 0) { 3153 key = sctp_set_key(sca->sca_key, (uint32_t) size); 3154 if (key == NULL) { 3155 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM); 3156 error = ENOMEM; 3157 SCTP_TCB_UNLOCK(stcb); 3158 break; 3159 } 3160 } 3161 shared_key = sctp_alloc_sharedkey(); 3162 if (shared_key == NULL) { 3163 sctp_free_key(key); 3164 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM); 3165 error = ENOMEM; 3166 SCTP_TCB_UNLOCK(stcb); 3167 break; 3168 } 3169 shared_key->key = key; 3170 shared_key->keyid = sca->sca_keynumber; 3171 error = sctp_insert_sharedkey(shared_keys, shared_key); 3172 SCTP_TCB_UNLOCK(stcb); 3173 } else { 3174 /* set it on the endpoint */ 3175 SCTP_INP_WLOCK(inp); 3176 shared_keys = &inp->sctp_ep.shared_keys; 3177 /* 3178 * clear the cached keys on all assocs for 3179 * this key id 3180 */ 3181 sctp_clear_cachedkeys_ep(inp, sca->sca_keynumber); 3182 /* 3183 * create the new shared key and 3184 * insert/replace it 3185 */ 3186 if (size > 0) { 3187 key = sctp_set_key(sca->sca_key, (uint32_t) size); 3188 if (key == NULL) { 3189 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM); 3190 error = ENOMEM; 3191 SCTP_INP_WUNLOCK(inp); 3192 break; 3193 } 3194 } 3195 shared_key = sctp_alloc_sharedkey(); 3196 if (shared_key == NULL) { 3197 sctp_free_key(key); 3198 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM); 3199 error = ENOMEM; 3200 SCTP_INP_WUNLOCK(inp); 3201 break; 3202 } 3203 shared_key->key = key; 3204 shared_key->keyid = sca->sca_keynumber; 3205 error = sctp_insert_sharedkey(shared_keys, shared_key); 3206 SCTP_INP_WUNLOCK(inp); 3207 } 3208 break; 3209 } 3210 case SCTP_HMAC_IDENT: 3211 { 3212 struct sctp_hmacalgo *shmac; 3213 sctp_hmaclist_t *hmaclist; 3214 uint16_t hmacid; 3215 uint32_t i; 3216 3217 size_t found; 3218 3219 SCTP_CHECK_AND_CAST(shmac, optval, struct sctp_hmacalgo, optsize); 3220 if (optsize < sizeof(struct sctp_hmacalgo) + shmac->shmac_number_of_idents * sizeof(uint16_t)) { 3221 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3222 error = EINVAL; 3223 break; 3224 } 3225 hmaclist = sctp_alloc_hmaclist(shmac->shmac_number_of_idents); 3226 if (hmaclist == NULL) { 3227 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM); 3228 error = ENOMEM; 3229 break; 3230 } 3231 for (i = 0; i < shmac->shmac_number_of_idents; i++) { 3232 hmacid = shmac->shmac_idents[i]; 3233 if (sctp_auth_add_hmacid(hmaclist, hmacid)) { 3234 /* invalid HMACs were found */ ; 3235 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3236 error = EINVAL; 3237 sctp_free_hmaclist(hmaclist); 3238 goto sctp_set_hmac_done; 3239 } 3240 } 3241 found = 0; 3242 for (i = 0; i < hmaclist->num_algo; i++) { 3243 if (hmaclist->hmac[i] == SCTP_AUTH_HMAC_ID_SHA1) { 3244 /* already in list */ 3245 found = 1; 3246 } 3247 } 3248 if (!found) { 3249 sctp_free_hmaclist(hmaclist); 3250 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3251 error = EINVAL; 3252 break; 3253 } 3254 /* set it on the endpoint */ 3255 SCTP_INP_WLOCK(inp); 3256 if (inp->sctp_ep.local_hmacs) 3257 sctp_free_hmaclist(inp->sctp_ep.local_hmacs); 3258 inp->sctp_ep.local_hmacs = hmaclist; 3259 SCTP_INP_WUNLOCK(inp); 3260 sctp_set_hmac_done: 3261 break; 3262 } 3263 case SCTP_AUTH_ACTIVE_KEY: 3264 { 3265 struct sctp_authkeyid *scact; 3266 3267 SCTP_CHECK_AND_CAST(scact, optval, struct sctp_authkeyid, 3268 optsize); 3269 SCTP_FIND_STCB(inp, stcb, scact->scact_assoc_id); 3270 3271 /* set the active key on the right place */ 3272 if (stcb) { 3273 /* set the active key on the assoc */ 3274 if (sctp_auth_setactivekey(stcb, 3275 scact->scact_keynumber)) { 3276 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, 3277 SCTP_FROM_SCTP_USRREQ, 3278 EINVAL); 3279 error = EINVAL; 3280 } 3281 SCTP_TCB_UNLOCK(stcb); 3282 } else { 3283 /* set the active key on the endpoint */ 3284 SCTP_INP_WLOCK(inp); 3285 if (sctp_auth_setactivekey_ep(inp, 3286 scact->scact_keynumber)) { 3287 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, 3288 SCTP_FROM_SCTP_USRREQ, 3289 EINVAL); 3290 error = EINVAL; 3291 } 3292 SCTP_INP_WUNLOCK(inp); 3293 } 3294 break; 3295 } 3296 case SCTP_AUTH_DELETE_KEY: 3297 { 3298 struct sctp_authkeyid *scdel; 3299 3300 SCTP_CHECK_AND_CAST(scdel, optval, struct sctp_authkeyid, 3301 optsize); 3302 SCTP_FIND_STCB(inp, stcb, scdel->scact_assoc_id); 3303 3304 /* delete the key from the right place */ 3305 if (stcb) { 3306 if (sctp_delete_sharedkey(stcb, 3307 scdel->scact_keynumber)) { 3308 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, 3309 SCTP_FROM_SCTP_USRREQ, 3310 EINVAL); 3311 error = EINVAL; 3312 } 3313 SCTP_TCB_UNLOCK(stcb); 3314 } else { 3315 SCTP_INP_WLOCK(inp); 3316 if (sctp_delete_sharedkey_ep(inp, 3317 scdel->scact_keynumber)) { 3318 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, 3319 SCTP_FROM_SCTP_USRREQ, 3320 EINVAL); 3321 error = EINVAL; 3322 } 3323 SCTP_INP_WUNLOCK(inp); 3324 } 3325 break; 3326 } 3327 case SCTP_AUTH_DEACTIVATE_KEY: 3328 { 3329 struct sctp_authkeyid *keyid; 3330 3331 SCTP_CHECK_AND_CAST(keyid, optval, struct sctp_authkeyid, 3332 optsize); 3333 SCTP_FIND_STCB(inp, stcb, keyid->scact_assoc_id); 3334 3335 /* deactivate the key from the right place */ 3336 if (stcb) { 3337 if (sctp_deact_sharedkey(stcb, 3338 keyid->scact_keynumber)) { 3339 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, 3340 SCTP_FROM_SCTP_USRREQ, 3341 EINVAL); 3342 error = EINVAL; 3343 } 3344 SCTP_TCB_UNLOCK(stcb); 3345 } else { 3346 SCTP_INP_WLOCK(inp); 3347 if (sctp_deact_sharedkey_ep(inp, 3348 keyid->scact_keynumber)) { 3349 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, 3350 SCTP_FROM_SCTP_USRREQ, 3351 EINVAL); 3352 error = EINVAL; 3353 } 3354 SCTP_INP_WUNLOCK(inp); 3355 } 3356 break; 3357 } 3358 3359 case SCTP_RESET_STREAMS: 3360 { 3361 struct sctp_stream_reset *strrst; 3362 uint8_t send_in = 0, send_tsn = 0, send_out = 0, 3363 addstream = 0; 3364 uint16_t addstrmcnt = 0; 3365 int i; 3366 3367 SCTP_CHECK_AND_CAST(strrst, optval, struct sctp_stream_reset, optsize); 3368 SCTP_FIND_STCB(inp, stcb, strrst->strrst_assoc_id); 3369 3370 if (stcb == NULL) { 3371 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); 3372 error = ENOENT; 3373 break; 3374 } 3375 if (stcb->asoc.peer_supports_strreset == 0) { 3376 /* 3377 * Peer does not support it, we return 3378 * protocol not supported since this is true 3379 * for this feature and this peer, not the 3380 * socket request in general. 3381 */ 3382 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EPROTONOSUPPORT); 3383 error = EPROTONOSUPPORT; 3384 SCTP_TCB_UNLOCK(stcb); 3385 break; 3386 } 3387 if (stcb->asoc.stream_reset_outstanding) { 3388 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY); 3389 error = EALREADY; 3390 SCTP_TCB_UNLOCK(stcb); 3391 break; 3392 } 3393 if (strrst->strrst_flags == SCTP_RESET_LOCAL_RECV) { 3394 send_in = 1; 3395 } else if (strrst->strrst_flags == SCTP_RESET_LOCAL_SEND) { 3396 send_out = 1; 3397 } else if (strrst->strrst_flags == SCTP_RESET_BOTH) { 3398 send_in = 1; 3399 send_out = 1; 3400 } else if (strrst->strrst_flags == SCTP_RESET_TSN) { 3401 send_tsn = 1; 3402 } else if (strrst->strrst_flags == SCTP_RESET_ADD_STREAMS) { 3403 if (send_tsn || 3404 send_in || 3405 send_out) { 3406 /* We can't do that and add streams */ 3407 error = EINVAL; 3408 goto skip_stuff; 3409 } 3410 if (stcb->asoc.stream_reset_outstanding) { 3411 error = EBUSY; 3412 goto skip_stuff; 3413 } 3414 addstream = 1; 3415 /* We allocate here */ 3416 addstrmcnt = strrst->strrst_num_streams; 3417 if ((int)(addstrmcnt + stcb->asoc.streamoutcnt) > 0xffff) { 3418 /* You can't have more than 64k */ 3419 error = EINVAL; 3420 goto skip_stuff; 3421 } 3422 if ((stcb->asoc.strm_realoutsize - stcb->asoc.streamoutcnt) < addstrmcnt) { 3423 /* Need to allocate more */ 3424 struct sctp_stream_out *oldstream; 3425 struct sctp_stream_queue_pending *sp, 3426 *nsp; 3427 3428 oldstream = stcb->asoc.strmout; 3429 /* get some more */ 3430 SCTP_MALLOC(stcb->asoc.strmout, struct sctp_stream_out *, 3431 ((stcb->asoc.streamoutcnt + addstrmcnt) * sizeof(struct sctp_stream_out)), 3432 SCTP_M_STRMO); 3433 if (stcb->asoc.strmout == NULL) { 3434 stcb->asoc.strmout = oldstream; 3435 error = ENOMEM; 3436 goto skip_stuff; 3437 } 3438 /* 3439 * Ok now we proceed with copying 3440 * the old out stuff and 3441 * initializing the new stuff. 3442 */ 3443 SCTP_TCB_SEND_LOCK(stcb); 3444 stcb->asoc.ss_functions.sctp_ss_clear(stcb, &stcb->asoc, 0, 1); 3445 for (i = 0; i < stcb->asoc.streamoutcnt; i++) { 3446 TAILQ_INIT(&stcb->asoc.strmout[i].outqueue); 3447 stcb->asoc.strmout[i].next_sequence_sent = oldstream[i].next_sequence_sent; 3448 stcb->asoc.strmout[i].last_msg_incomplete = oldstream[i].last_msg_incomplete; 3449 stcb->asoc.strmout[i].stream_no = i; 3450 stcb->asoc.ss_functions.sctp_ss_init_stream(&stcb->asoc.strmout[i], &oldstream[i]); 3451 /* 3452 * now anything on those 3453 * queues? 3454 */ 3455 TAILQ_FOREACH_SAFE(sp, &oldstream[i].outqueue, next, nsp) { 3456 TAILQ_REMOVE(&oldstream[i].outqueue, sp, next); 3457 TAILQ_INSERT_TAIL(&stcb->asoc.strmout[i].outqueue, sp, next); 3458 } 3459 /* 3460 * Now move assoc pointers 3461 * too 3462 */ 3463 if (stcb->asoc.last_out_stream == &oldstream[i]) { 3464 stcb->asoc.last_out_stream = &stcb->asoc.strmout[i]; 3465 } 3466 if (stcb->asoc.locked_on_sending == &oldstream[i]) { 3467 stcb->asoc.locked_on_sending = &stcb->asoc.strmout[i]; 3468 } 3469 } 3470 /* now the new streams */ 3471 stcb->asoc.ss_functions.sctp_ss_init(stcb, &stcb->asoc, 1); 3472 for (i = stcb->asoc.streamoutcnt; i < (stcb->asoc.streamoutcnt + addstrmcnt); i++) { 3473 stcb->asoc.strmout[i].next_sequence_sent = 0x0; 3474 TAILQ_INIT(&stcb->asoc.strmout[i].outqueue); 3475 stcb->asoc.strmout[i].stream_no = i; 3476 stcb->asoc.strmout[i].last_msg_incomplete = 0; 3477 stcb->asoc.ss_functions.sctp_ss_init_stream(&stcb->asoc.strmout[i], NULL); 3478 } 3479 stcb->asoc.strm_realoutsize = stcb->asoc.streamoutcnt + addstrmcnt; 3480 SCTP_FREE(oldstream, SCTP_M_STRMO); 3481 } 3482 SCTP_TCB_SEND_UNLOCK(stcb); 3483 goto skip_stuff; 3484 } else { 3485 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3486 error = EINVAL; 3487 SCTP_TCB_UNLOCK(stcb); 3488 break; 3489 } 3490 for (i = 0; i < strrst->strrst_num_streams; i++) { 3491 if ((send_in) && 3492 3493 (strrst->strrst_list[i] > stcb->asoc.streamincnt)) { 3494 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3495 error = EINVAL; 3496 goto get_out; 3497 } 3498 if ((send_out) && 3499 (strrst->strrst_list[i] > stcb->asoc.streamoutcnt)) { 3500 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3501 error = EINVAL; 3502 goto get_out; 3503 } 3504 } 3505 skip_stuff: 3506 if (error) { 3507 get_out: 3508 SCTP_TCB_UNLOCK(stcb); 3509 break; 3510 } 3511 error = sctp_send_str_reset_req(stcb, strrst->strrst_num_streams, 3512 strrst->strrst_list, 3513 send_out, (stcb->asoc.str_reset_seq_in - 3), 3514 send_in, send_tsn, addstream, addstrmcnt); 3515 3516 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_REQ, SCTP_SO_LOCKED); 3517 SCTP_TCB_UNLOCK(stcb); 3518 } 3519 break; 3520 3521 case SCTP_CONNECT_X: 3522 if (optsize < (sizeof(int) + sizeof(struct sockaddr_in))) { 3523 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3524 error = EINVAL; 3525 break; 3526 } 3527 error = sctp_do_connect_x(so, inp, optval, optsize, p, 0); 3528 break; 3529 3530 case SCTP_CONNECT_X_DELAYED: 3531 if (optsize < (sizeof(int) + sizeof(struct sockaddr_in))) { 3532 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3533 error = EINVAL; 3534 break; 3535 } 3536 error = sctp_do_connect_x(so, inp, optval, optsize, p, 1); 3537 break; 3538 3539 case SCTP_CONNECT_X_COMPLETE: 3540 { 3541 struct sockaddr *sa; 3542 struct sctp_nets *net; 3543 3544 /* FIXME MT: check correct? */ 3545 SCTP_CHECK_AND_CAST(sa, optval, struct sockaddr, optsize); 3546 3547 /* find tcb */ 3548 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { 3549 SCTP_INP_RLOCK(inp); 3550 stcb = LIST_FIRST(&inp->sctp_asoc_list); 3551 if (stcb) { 3552 SCTP_TCB_LOCK(stcb); 3553 net = sctp_findnet(stcb, sa); 3554 } 3555 SCTP_INP_RUNLOCK(inp); 3556 } else { 3557 /* 3558 * We increment here since 3559 * sctp_findassociation_ep_addr() wil do a 3560 * decrement if it finds the stcb as long as 3561 * the locked tcb (last argument) is NOT a 3562 * TCB.. aka NULL. 3563 */ 3564 SCTP_INP_INCR_REF(inp); 3565 stcb = sctp_findassociation_ep_addr(&inp, sa, &net, NULL, NULL); 3566 if (stcb == NULL) { 3567 SCTP_INP_DECR_REF(inp); 3568 } 3569 } 3570 3571 if (stcb == NULL) { 3572 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); 3573 error = ENOENT; 3574 break; 3575 } 3576 if (stcb->asoc.delayed_connection == 1) { 3577 stcb->asoc.delayed_connection = 0; 3578 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered); 3579 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, 3580 stcb->asoc.primary_destination, 3581 SCTP_FROM_SCTP_USRREQ + SCTP_LOC_9); 3582 sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED); 3583 } else { 3584 /* 3585 * already expired or did not use delayed 3586 * connectx 3587 */ 3588 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY); 3589 error = EALREADY; 3590 } 3591 SCTP_TCB_UNLOCK(stcb); 3592 } 3593 break; 3594 case SCTP_MAX_BURST: 3595 { 3596 struct sctp_assoc_value *av; 3597 3598 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize); 3599 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 3600 3601 if (stcb) { 3602 stcb->asoc.max_burst = av->assoc_value; 3603 SCTP_TCB_UNLOCK(stcb); 3604 } else { 3605 SCTP_INP_WLOCK(inp); 3606 inp->sctp_ep.max_burst = av->assoc_value; 3607 SCTP_INP_WUNLOCK(inp); 3608 } 3609 } 3610 break; 3611 case SCTP_MAXSEG: 3612 { 3613 struct sctp_assoc_value *av; 3614 int ovh; 3615 3616 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize); 3617 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 3618 3619 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 3620 ovh = SCTP_MED_OVERHEAD; 3621 } else { 3622 ovh = SCTP_MED_V4_OVERHEAD; 3623 } 3624 if (stcb) { 3625 if (av->assoc_value) { 3626 stcb->asoc.sctp_frag_point = (av->assoc_value + ovh); 3627 } else { 3628 stcb->asoc.sctp_frag_point = SCTP_DEFAULT_MAXSEGMENT; 3629 } 3630 SCTP_TCB_UNLOCK(stcb); 3631 } else { 3632 SCTP_INP_WLOCK(inp); 3633 /* 3634 * FIXME MT: I think this is not in tune 3635 * with the API ID 3636 */ 3637 if (av->assoc_value) { 3638 inp->sctp_frag_point = (av->assoc_value + ovh); 3639 } else { 3640 inp->sctp_frag_point = SCTP_DEFAULT_MAXSEGMENT; 3641 } 3642 SCTP_INP_WUNLOCK(inp); 3643 } 3644 } 3645 break; 3646 case SCTP_EVENTS: 3647 { 3648 struct sctp_event_subscribe *events; 3649 3650 SCTP_CHECK_AND_CAST(events, optval, struct sctp_event_subscribe, optsize); 3651 3652 SCTP_INP_WLOCK(inp); 3653 if (events->sctp_data_io_event) { 3654 sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT); 3655 } else { 3656 sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT); 3657 } 3658 3659 if (events->sctp_association_event) { 3660 sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVASSOCEVNT); 3661 } else { 3662 sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVASSOCEVNT); 3663 } 3664 3665 if (events->sctp_address_event) { 3666 sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVPADDREVNT); 3667 } else { 3668 sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVPADDREVNT); 3669 } 3670 3671 if (events->sctp_send_failure_event) { 3672 sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVSENDFAILEVNT); 3673 } else { 3674 sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVSENDFAILEVNT); 3675 } 3676 3677 if (events->sctp_peer_error_event) { 3678 sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVPEERERR); 3679 } else { 3680 sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVPEERERR); 3681 } 3682 3683 if (events->sctp_shutdown_event) { 3684 sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT); 3685 } else { 3686 sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT); 3687 } 3688 3689 if (events->sctp_partial_delivery_event) { 3690 sctp_feature_on(inp, SCTP_PCB_FLAGS_PDAPIEVNT); 3691 } else { 3692 sctp_feature_off(inp, SCTP_PCB_FLAGS_PDAPIEVNT); 3693 } 3694 3695 if (events->sctp_adaptation_layer_event) { 3696 sctp_feature_on(inp, SCTP_PCB_FLAGS_ADAPTATIONEVNT); 3697 } else { 3698 sctp_feature_off(inp, SCTP_PCB_FLAGS_ADAPTATIONEVNT); 3699 } 3700 3701 if (events->sctp_authentication_event) { 3702 sctp_feature_on(inp, SCTP_PCB_FLAGS_AUTHEVNT); 3703 } else { 3704 sctp_feature_off(inp, SCTP_PCB_FLAGS_AUTHEVNT); 3705 } 3706 3707 if (events->sctp_sender_dry_event) { 3708 sctp_feature_on(inp, SCTP_PCB_FLAGS_DRYEVNT); 3709 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 3710 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 3711 stcb = LIST_FIRST(&inp->sctp_asoc_list); 3712 if (stcb) { 3713 SCTP_TCB_LOCK(stcb); 3714 } 3715 if (stcb && 3716 TAILQ_EMPTY(&stcb->asoc.send_queue) && 3717 TAILQ_EMPTY(&stcb->asoc.sent_queue) && 3718 (stcb->asoc.stream_queue_cnt == 0)) { 3719 sctp_ulp_notify(SCTP_NOTIFY_SENDER_DRY, stcb, 0, NULL, SCTP_SO_LOCKED); 3720 } 3721 if (stcb) { 3722 SCTP_TCB_UNLOCK(stcb); 3723 } 3724 } 3725 } else { 3726 sctp_feature_off(inp, SCTP_PCB_FLAGS_DRYEVNT); 3727 } 3728 3729 if (events->sctp_stream_reset_event) { 3730 sctp_feature_on(inp, SCTP_PCB_FLAGS_STREAM_RESETEVNT); 3731 } else { 3732 sctp_feature_off(inp, SCTP_PCB_FLAGS_STREAM_RESETEVNT); 3733 } 3734 SCTP_INP_WUNLOCK(inp); 3735 } 3736 break; 3737 3738 case SCTP_ADAPTATION_LAYER: 3739 { 3740 struct sctp_setadaptation *adap_bits; 3741 3742 SCTP_CHECK_AND_CAST(adap_bits, optval, struct sctp_setadaptation, optsize); 3743 SCTP_INP_WLOCK(inp); 3744 inp->sctp_ep.adaptation_layer_indicator = adap_bits->ssb_adaptation_ind; 3745 SCTP_INP_WUNLOCK(inp); 3746 } 3747 break; 3748 #ifdef SCTP_DEBUG 3749 case SCTP_SET_INITIAL_DBG_SEQ: 3750 { 3751 uint32_t *vvv; 3752 3753 SCTP_CHECK_AND_CAST(vvv, optval, uint32_t, optsize); 3754 SCTP_INP_WLOCK(inp); 3755 inp->sctp_ep.initial_sequence_debug = *vvv; 3756 SCTP_INP_WUNLOCK(inp); 3757 } 3758 break; 3759 #endif 3760 case SCTP_DEFAULT_SEND_PARAM: 3761 { 3762 struct sctp_sndrcvinfo *s_info; 3763 3764 SCTP_CHECK_AND_CAST(s_info, optval, struct sctp_sndrcvinfo, optsize); 3765 SCTP_FIND_STCB(inp, stcb, s_info->sinfo_assoc_id); 3766 3767 if (stcb) { 3768 if (s_info->sinfo_stream <= stcb->asoc.streamoutcnt) { 3769 memcpy(&stcb->asoc.def_send, s_info, min(optsize, sizeof(stcb->asoc.def_send))); 3770 } else { 3771 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3772 error = EINVAL; 3773 } 3774 SCTP_TCB_UNLOCK(stcb); 3775 } else { 3776 SCTP_INP_WLOCK(inp); 3777 memcpy(&inp->def_send, s_info, min(optsize, sizeof(inp->def_send))); 3778 SCTP_INP_WUNLOCK(inp); 3779 } 3780 } 3781 break; 3782 case SCTP_PEER_ADDR_PARAMS: 3783 /* Applys to the specific association */ 3784 { 3785 struct sctp_paddrparams *paddrp; 3786 struct sctp_nets *net; 3787 3788 SCTP_CHECK_AND_CAST(paddrp, optval, struct sctp_paddrparams, optsize); 3789 SCTP_FIND_STCB(inp, stcb, paddrp->spp_assoc_id); 3790 net = NULL; 3791 if (stcb) { 3792 net = sctp_findnet(stcb, (struct sockaddr *)&paddrp->spp_address); 3793 } else { 3794 /* 3795 * We increment here since 3796 * sctp_findassociation_ep_addr() wil do a 3797 * decrement if it finds the stcb as long as 3798 * the locked tcb (last argument) is NOT a 3799 * TCB.. aka NULL. 3800 */ 3801 SCTP_INP_INCR_REF(inp); 3802 stcb = sctp_findassociation_ep_addr(&inp, 3803 (struct sockaddr *)&paddrp->spp_address, 3804 &net, NULL, NULL); 3805 if (stcb == NULL) { 3806 SCTP_INP_DECR_REF(inp); 3807 } 3808 } 3809 if (stcb && (net == NULL)) { 3810 struct sockaddr *sa; 3811 3812 sa = (struct sockaddr *)&paddrp->spp_address; 3813 if (sa->sa_family == AF_INET) { 3814 struct sockaddr_in *sin; 3815 3816 sin = (struct sockaddr_in *)sa; 3817 if (sin->sin_addr.s_addr) { 3818 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3819 SCTP_TCB_UNLOCK(stcb); 3820 error = EINVAL; 3821 break; 3822 } 3823 } else if (sa->sa_family == AF_INET6) { 3824 struct sockaddr_in6 *sin6; 3825 3826 sin6 = (struct sockaddr_in6 *)sa; 3827 if (!IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { 3828 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3829 SCTP_TCB_UNLOCK(stcb); 3830 error = EINVAL; 3831 break; 3832 } 3833 } else { 3834 error = EAFNOSUPPORT; 3835 SCTP_TCB_UNLOCK(stcb); 3836 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 3837 break; 3838 } 3839 } 3840 /* sanity checks */ 3841 if ((paddrp->spp_flags & SPP_HB_ENABLE) && (paddrp->spp_flags & SPP_HB_DISABLE)) { 3842 if (stcb) 3843 SCTP_TCB_UNLOCK(stcb); 3844 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3845 return (EINVAL); 3846 } 3847 if ((paddrp->spp_flags & SPP_PMTUD_ENABLE) && (paddrp->spp_flags & SPP_PMTUD_DISABLE)) { 3848 if (stcb) 3849 SCTP_TCB_UNLOCK(stcb); 3850 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3851 return (EINVAL); 3852 } 3853 if (stcb) { 3854 /************************TCB SPECIFIC SET ******************/ 3855 /* 3856 * do we change the timer for HB, we run 3857 * only one? 3858 */ 3859 int ovh = 0; 3860 3861 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 3862 ovh = SCTP_MED_OVERHEAD; 3863 } else { 3864 ovh = SCTP_MED_V4_OVERHEAD; 3865 } 3866 3867 if (paddrp->spp_hbinterval) 3868 stcb->asoc.heart_beat_delay = paddrp->spp_hbinterval; 3869 else if (paddrp->spp_flags & SPP_HB_TIME_IS_ZERO) 3870 stcb->asoc.heart_beat_delay = 0; 3871 3872 /* network sets ? */ 3873 if (net) { 3874 /************************NET SPECIFIC SET ******************/ 3875 if (paddrp->spp_flags & SPP_HB_DEMAND) { 3876 /* on demand HB */ 3877 if (sctp_send_hb(stcb, 1, net) < 0) { 3878 /* asoc destroyed */ 3879 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3880 error = EINVAL; 3881 break; 3882 } 3883 } 3884 if (paddrp->spp_flags & SPP_HB_DISABLE) { 3885 net->dest_state |= SCTP_ADDR_NOHB; 3886 } 3887 if (paddrp->spp_flags & SPP_HB_ENABLE) { 3888 net->dest_state &= ~SCTP_ADDR_NOHB; 3889 } 3890 if ((paddrp->spp_flags & SPP_PMTUD_DISABLE) && (paddrp->spp_pathmtu >= SCTP_SMALLEST_PMTU)) { 3891 if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) { 3892 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net, 3893 SCTP_FROM_SCTP_USRREQ + SCTP_LOC_10); 3894 } 3895 if (paddrp->spp_pathmtu > SCTP_DEFAULT_MINSEGMENT) { 3896 net->mtu = paddrp->spp_pathmtu + ovh; 3897 if (net->mtu < stcb->asoc.smallest_mtu) { 3898 sctp_pathmtu_adjustment(inp, stcb, net, net->mtu); 3899 } 3900 } 3901 } 3902 if (paddrp->spp_flags & SPP_PMTUD_ENABLE) { 3903 if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) { 3904 sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net); 3905 } 3906 } 3907 if (paddrp->spp_pathmaxrxt) 3908 net->failure_threshold = paddrp->spp_pathmaxrxt; 3909 #ifdef INET 3910 if (paddrp->spp_flags & SPP_IPV4_TOS) { 3911 if (net->ro._l_addr.sin.sin_family == AF_INET) { 3912 net->tos_flowlabel = paddrp->spp_ipv4_tos & 0x000000fc; 3913 } 3914 } 3915 #endif 3916 #ifdef INET6 3917 if (paddrp->spp_flags & SPP_IPV6_FLOWLABEL) { 3918 if (net->ro._l_addr.sin6.sin6_family == AF_INET6) { 3919 net->tos_flowlabel = paddrp->spp_ipv6_flowlabel; 3920 } 3921 } 3922 #endif 3923 } else { 3924 /************************ASSOC ONLY -- NO NET SPECIFIC SET ******************/ 3925 if (paddrp->spp_pathmaxrxt) 3926 stcb->asoc.def_net_failure = paddrp->spp_pathmaxrxt; 3927 3928 if (paddrp->spp_flags & SPP_HB_ENABLE) { 3929 /* Turn back on the timer */ 3930 stcb->asoc.hb_is_disabled = 0; 3931 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net); 3932 } 3933 if ((paddrp->spp_flags & SPP_PMTUD_DISABLE) && (paddrp->spp_pathmtu >= SCTP_SMALLEST_PMTU)) { 3934 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 3935 if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) { 3936 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net, 3937 SCTP_FROM_SCTP_USRREQ + SCTP_LOC_10); 3938 } 3939 if (paddrp->spp_pathmtu > SCTP_DEFAULT_MINSEGMENT) { 3940 net->mtu = paddrp->spp_pathmtu + ovh; 3941 if (net->mtu < stcb->asoc.smallest_mtu) { 3942 sctp_pathmtu_adjustment(inp, stcb, net, net->mtu); 3943 } 3944 } 3945 } 3946 } 3947 if (paddrp->spp_flags & SPP_PMTUD_ENABLE) { 3948 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 3949 if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) { 3950 sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net); 3951 } 3952 } 3953 } 3954 if (paddrp->spp_flags & SPP_HB_DISABLE) { 3955 int cnt_of_unconf = 0; 3956 struct sctp_nets *lnet; 3957 3958 stcb->asoc.hb_is_disabled = 1; 3959 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 3960 if (lnet->dest_state & SCTP_ADDR_UNCONFIRMED) { 3961 cnt_of_unconf++; 3962 } 3963 } 3964 /* 3965 * stop the timer ONLY if we 3966 * have no unconfirmed 3967 * addresses 3968 */ 3969 if (cnt_of_unconf == 0) { 3970 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 3971 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, 3972 SCTP_FROM_SCTP_USRREQ + SCTP_LOC_11); 3973 } 3974 } 3975 } 3976 if (paddrp->spp_flags & SPP_HB_ENABLE) { 3977 /* start up the timer. */ 3978 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 3979 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net); 3980 } 3981 } 3982 #ifdef INET 3983 if (paddrp->spp_flags & SPP_IPV4_TOS) 3984 stcb->asoc.default_tos = paddrp->spp_ipv4_tos & 0x000000fc; 3985 #endif 3986 #ifdef INET6 3987 if (paddrp->spp_flags & SPP_IPV6_FLOWLABEL) 3988 stcb->asoc.default_flowlabel = paddrp->spp_ipv6_flowlabel; 3989 #endif 3990 3991 } 3992 SCTP_TCB_UNLOCK(stcb); 3993 } else { 3994 /************************NO TCB, SET TO default stuff ******************/ 3995 SCTP_INP_WLOCK(inp); 3996 /* 3997 * For the TOS/FLOWLABEL stuff you set it 3998 * with the options on the socket 3999 */ 4000 if (paddrp->spp_pathmaxrxt) { 4001 inp->sctp_ep.def_net_failure = paddrp->spp_pathmaxrxt; 4002 } 4003 if (paddrp->spp_flags & SPP_HB_TIME_IS_ZERO) 4004 inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT] = 0; 4005 else if (paddrp->spp_hbinterval) { 4006 if (paddrp->spp_hbinterval > SCTP_MAX_HB_INTERVAL) 4007 paddrp->spp_hbinterval = SCTP_MAX_HB_INTERVAL; 4008 inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT] = MSEC_TO_TICKS(paddrp->spp_hbinterval); 4009 } 4010 if (paddrp->spp_flags & SPP_HB_ENABLE) { 4011 sctp_feature_off(inp, SCTP_PCB_FLAGS_DONOT_HEARTBEAT); 4012 4013 } else if (paddrp->spp_flags & SPP_HB_DISABLE) { 4014 sctp_feature_on(inp, SCTP_PCB_FLAGS_DONOT_HEARTBEAT); 4015 } 4016 SCTP_INP_WUNLOCK(inp); 4017 } 4018 } 4019 break; 4020 case SCTP_RTOINFO: 4021 { 4022 struct sctp_rtoinfo *srto; 4023 uint32_t new_init, new_min, new_max; 4024 4025 SCTP_CHECK_AND_CAST(srto, optval, struct sctp_rtoinfo, optsize); 4026 SCTP_FIND_STCB(inp, stcb, srto->srto_assoc_id); 4027 4028 if (stcb) { 4029 if (srto->srto_initial) 4030 new_init = srto->srto_initial; 4031 else 4032 new_init = stcb->asoc.initial_rto; 4033 if (srto->srto_max) 4034 new_max = srto->srto_max; 4035 else 4036 new_max = stcb->asoc.maxrto; 4037 if (srto->srto_min) 4038 new_min = srto->srto_min; 4039 else 4040 new_min = stcb->asoc.minrto; 4041 if ((new_min <= new_init) && (new_init <= new_max)) { 4042 stcb->asoc.initial_rto = new_init; 4043 stcb->asoc.maxrto = new_max; 4044 stcb->asoc.minrto = new_min; 4045 } else { 4046 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4047 error = EINVAL; 4048 } 4049 SCTP_TCB_UNLOCK(stcb); 4050 } else { 4051 SCTP_INP_WLOCK(inp); 4052 if (srto->srto_initial) 4053 new_init = srto->srto_initial; 4054 else 4055 new_init = inp->sctp_ep.initial_rto; 4056 if (srto->srto_max) 4057 new_max = srto->srto_max; 4058 else 4059 new_max = inp->sctp_ep.sctp_maxrto; 4060 if (srto->srto_min) 4061 new_min = srto->srto_min; 4062 else 4063 new_min = inp->sctp_ep.sctp_minrto; 4064 if ((new_min <= new_init) && (new_init <= new_max)) { 4065 inp->sctp_ep.initial_rto = new_init; 4066 inp->sctp_ep.sctp_maxrto = new_max; 4067 inp->sctp_ep.sctp_minrto = new_min; 4068 } else { 4069 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4070 error = EINVAL; 4071 } 4072 SCTP_INP_WUNLOCK(inp); 4073 } 4074 } 4075 break; 4076 case SCTP_ASSOCINFO: 4077 { 4078 struct sctp_assocparams *sasoc; 4079 4080 SCTP_CHECK_AND_CAST(sasoc, optval, struct sctp_assocparams, optsize); 4081 SCTP_FIND_STCB(inp, stcb, sasoc->sasoc_assoc_id); 4082 if (sasoc->sasoc_cookie_life) { 4083 /* boundary check the cookie life */ 4084 if (sasoc->sasoc_cookie_life < 1000) 4085 sasoc->sasoc_cookie_life = 1000; 4086 if (sasoc->sasoc_cookie_life > SCTP_MAX_COOKIE_LIFE) { 4087 sasoc->sasoc_cookie_life = SCTP_MAX_COOKIE_LIFE; 4088 } 4089 } 4090 if (stcb) { 4091 if (sasoc->sasoc_asocmaxrxt) 4092 stcb->asoc.max_send_times = sasoc->sasoc_asocmaxrxt; 4093 sasoc->sasoc_number_peer_destinations = stcb->asoc.numnets; 4094 sasoc->sasoc_peer_rwnd = 0; 4095 sasoc->sasoc_local_rwnd = 0; 4096 if (sasoc->sasoc_cookie_life) { 4097 stcb->asoc.cookie_life = MSEC_TO_TICKS(sasoc->sasoc_cookie_life); 4098 } 4099 SCTP_TCB_UNLOCK(stcb); 4100 } else { 4101 SCTP_INP_WLOCK(inp); 4102 if (sasoc->sasoc_asocmaxrxt) 4103 inp->sctp_ep.max_send_times = sasoc->sasoc_asocmaxrxt; 4104 sasoc->sasoc_number_peer_destinations = 0; 4105 sasoc->sasoc_peer_rwnd = 0; 4106 sasoc->sasoc_local_rwnd = 0; 4107 if (sasoc->sasoc_cookie_life) { 4108 inp->sctp_ep.def_cookie_life = MSEC_TO_TICKS(sasoc->sasoc_cookie_life); 4109 } 4110 SCTP_INP_WUNLOCK(inp); 4111 } 4112 } 4113 break; 4114 case SCTP_INITMSG: 4115 { 4116 struct sctp_initmsg *sinit; 4117 4118 SCTP_CHECK_AND_CAST(sinit, optval, struct sctp_initmsg, optsize); 4119 SCTP_INP_WLOCK(inp); 4120 if (sinit->sinit_num_ostreams) 4121 inp->sctp_ep.pre_open_stream_count = sinit->sinit_num_ostreams; 4122 4123 if (sinit->sinit_max_instreams) 4124 inp->sctp_ep.max_open_streams_intome = sinit->sinit_max_instreams; 4125 4126 if (sinit->sinit_max_attempts) 4127 inp->sctp_ep.max_init_times = sinit->sinit_max_attempts; 4128 4129 if (sinit->sinit_max_init_timeo) 4130 inp->sctp_ep.initial_init_rto_max = sinit->sinit_max_init_timeo; 4131 SCTP_INP_WUNLOCK(inp); 4132 } 4133 break; 4134 case SCTP_PRIMARY_ADDR: 4135 { 4136 struct sctp_setprim *spa; 4137 struct sctp_nets *net, *lnet; 4138 4139 SCTP_CHECK_AND_CAST(spa, optval, struct sctp_setprim, optsize); 4140 SCTP_FIND_STCB(inp, stcb, spa->ssp_assoc_id); 4141 4142 net = NULL; 4143 if (stcb) { 4144 net = sctp_findnet(stcb, (struct sockaddr *)&spa->ssp_addr); 4145 } else { 4146 /* 4147 * We increment here since 4148 * sctp_findassociation_ep_addr() wil do a 4149 * decrement if it finds the stcb as long as 4150 * the locked tcb (last argument) is NOT a 4151 * TCB.. aka NULL. 4152 */ 4153 SCTP_INP_INCR_REF(inp); 4154 stcb = sctp_findassociation_ep_addr(&inp, 4155 (struct sockaddr *)&spa->ssp_addr, 4156 &net, NULL, NULL); 4157 if (stcb == NULL) { 4158 SCTP_INP_DECR_REF(inp); 4159 } 4160 } 4161 4162 if ((stcb) && (net)) { 4163 if ((net != stcb->asoc.primary_destination) && 4164 (!(net->dest_state & SCTP_ADDR_UNCONFIRMED))) { 4165 /* Ok we need to set it */ 4166 lnet = stcb->asoc.primary_destination; 4167 if (sctp_set_primary_addr(stcb, (struct sockaddr *)NULL, net) == 0) { 4168 if (net->dest_state & SCTP_ADDR_SWITCH_PRIMARY) { 4169 net->dest_state |= SCTP_ADDR_DOUBLE_SWITCH; 4170 } 4171 net->dest_state |= SCTP_ADDR_SWITCH_PRIMARY; 4172 } 4173 } 4174 } else { 4175 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4176 error = EINVAL; 4177 } 4178 if (stcb) { 4179 SCTP_TCB_UNLOCK(stcb); 4180 } 4181 } 4182 break; 4183 case SCTP_SET_DYNAMIC_PRIMARY: 4184 { 4185 union sctp_sockstore *ss; 4186 4187 error = priv_check(curthread, 4188 PRIV_NETINET_RESERVEDPORT); 4189 if (error) 4190 break; 4191 4192 SCTP_CHECK_AND_CAST(ss, optval, union sctp_sockstore, optsize); 4193 /* SUPER USER CHECK? */ 4194 error = sctp_dynamic_set_primary(&ss->sa, vrf_id); 4195 } 4196 break; 4197 case SCTP_SET_PEER_PRIMARY_ADDR: 4198 { 4199 struct sctp_setpeerprim *sspp; 4200 4201 SCTP_CHECK_AND_CAST(sspp, optval, struct sctp_setpeerprim, optsize); 4202 SCTP_FIND_STCB(inp, stcb, sspp->sspp_assoc_id); 4203 if (stcb != NULL) { 4204 struct sctp_ifa *ifa; 4205 4206 ifa = sctp_find_ifa_by_addr((struct sockaddr *)&sspp->sspp_addr, 4207 stcb->asoc.vrf_id, SCTP_ADDR_NOT_LOCKED); 4208 if (ifa == NULL) { 4209 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4210 error = EINVAL; 4211 goto out_of_it; 4212 } 4213 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) { 4214 /* 4215 * Must validate the ifa found is in 4216 * our ep 4217 */ 4218 struct sctp_laddr *laddr; 4219 int found = 0; 4220 4221 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 4222 if (laddr->ifa == NULL) { 4223 SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n", 4224 __FUNCTION__); 4225 continue; 4226 } 4227 if (laddr->ifa == ifa) { 4228 found = 1; 4229 break; 4230 } 4231 } 4232 if (!found) { 4233 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4234 error = EINVAL; 4235 goto out_of_it; 4236 } 4237 } 4238 if (sctp_set_primary_ip_address_sa(stcb, 4239 (struct sockaddr *)&sspp->sspp_addr) != 0) { 4240 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4241 error = EINVAL; 4242 } 4243 out_of_it: 4244 SCTP_TCB_UNLOCK(stcb); 4245 } else { 4246 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4247 error = EINVAL; 4248 } 4249 4250 } 4251 break; 4252 case SCTP_BINDX_ADD_ADDR: 4253 { 4254 struct sctp_getaddresses *addrs; 4255 size_t sz; 4256 struct thread *td; 4257 4258 td = (struct thread *)p; 4259 SCTP_CHECK_AND_CAST(addrs, optval, struct sctp_getaddresses, 4260 optsize); 4261 if (addrs->addr->sa_family == AF_INET) { 4262 sz = sizeof(struct sctp_getaddresses) - sizeof(struct sockaddr) + sizeof(struct sockaddr_in); 4263 if (optsize < sz) { 4264 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4265 error = EINVAL; 4266 break; 4267 } 4268 if (td != NULL && (error = prison_local_ip4(td->td_ucred, &(((struct sockaddr_in *)(addrs->addr))->sin_addr)))) { 4269 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, error); 4270 break; 4271 } 4272 #ifdef INET6 4273 } else if (addrs->addr->sa_family == AF_INET6) { 4274 sz = sizeof(struct sctp_getaddresses) - sizeof(struct sockaddr) + sizeof(struct sockaddr_in6); 4275 if (optsize < sz) { 4276 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4277 error = EINVAL; 4278 break; 4279 } 4280 if (td != NULL && (error = prison_local_ip6(td->td_ucred, &(((struct sockaddr_in6 *)(addrs->addr))->sin6_addr), 4281 (SCTP_IPV6_V6ONLY(inp) != 0))) != 0) { 4282 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, error); 4283 break; 4284 } 4285 #endif 4286 } else { 4287 error = EAFNOSUPPORT; 4288 break; 4289 } 4290 sctp_bindx_add_address(so, inp, addrs->addr, 4291 addrs->sget_assoc_id, vrf_id, 4292 &error, p); 4293 } 4294 break; 4295 case SCTP_BINDX_REM_ADDR: 4296 { 4297 struct sctp_getaddresses *addrs; 4298 size_t sz; 4299 struct thread *td; 4300 4301 td = (struct thread *)p; 4302 4303 SCTP_CHECK_AND_CAST(addrs, optval, struct sctp_getaddresses, optsize); 4304 if (addrs->addr->sa_family == AF_INET) { 4305 sz = sizeof(struct sctp_getaddresses) - sizeof(struct sockaddr) + sizeof(struct sockaddr_in); 4306 if (optsize < sz) { 4307 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4308 error = EINVAL; 4309 break; 4310 } 4311 if (td != NULL && (error = prison_local_ip4(td->td_ucred, &(((struct sockaddr_in *)(addrs->addr))->sin_addr)))) { 4312 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, error); 4313 break; 4314 } 4315 #ifdef INET6 4316 } else if (addrs->addr->sa_family == AF_INET6) { 4317 sz = sizeof(struct sctp_getaddresses) - sizeof(struct sockaddr) + sizeof(struct sockaddr_in6); 4318 if (optsize < sz) { 4319 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4320 error = EINVAL; 4321 break; 4322 } 4323 if (td != NULL && (error = prison_local_ip6(td->td_ucred, &(((struct sockaddr_in6 *)(addrs->addr))->sin6_addr), 4324 (SCTP_IPV6_V6ONLY(inp) != 0))) != 0) { 4325 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, error); 4326 break; 4327 } 4328 #endif 4329 } else { 4330 error = EAFNOSUPPORT; 4331 break; 4332 } 4333 sctp_bindx_delete_address(so, inp, addrs->addr, 4334 addrs->sget_assoc_id, vrf_id, 4335 &error); 4336 } 4337 break; 4338 default: 4339 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT); 4340 error = ENOPROTOOPT; 4341 break; 4342 } /* end switch (opt) */ 4343 return (error); 4344 } 4345 4346 int 4347 sctp_ctloutput(struct socket *so, struct sockopt *sopt) 4348 { 4349 void *optval = NULL; 4350 size_t optsize = 0; 4351 struct sctp_inpcb *inp; 4352 void *p; 4353 int error = 0; 4354 4355 inp = (struct sctp_inpcb *)so->so_pcb; 4356 if (inp == 0) { 4357 /* I made the same as TCP since we are not setup? */ 4358 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4359 return (ECONNRESET); 4360 } 4361 if (sopt->sopt_level != IPPROTO_SCTP) { 4362 /* wrong proto level... send back up to IP */ 4363 #ifdef INET6 4364 if (INP_CHECK_SOCKAF(so, AF_INET6)) 4365 error = ip6_ctloutput(so, sopt); 4366 else 4367 #endif /* INET6 */ 4368 error = ip_ctloutput(so, sopt); 4369 return (error); 4370 } 4371 optsize = sopt->sopt_valsize; 4372 if (optsize) { 4373 SCTP_MALLOC(optval, void *, optsize, SCTP_M_SOCKOPT); 4374 if (optval == NULL) { 4375 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOBUFS); 4376 return (ENOBUFS); 4377 } 4378 error = sooptcopyin(sopt, optval, optsize, optsize); 4379 if (error) { 4380 SCTP_FREE(optval, SCTP_M_SOCKOPT); 4381 goto out; 4382 } 4383 } 4384 p = (void *)sopt->sopt_td; 4385 if (sopt->sopt_dir == SOPT_SET) { 4386 error = sctp_setopt(so, sopt->sopt_name, optval, optsize, p); 4387 } else if (sopt->sopt_dir == SOPT_GET) { 4388 error = sctp_getopt(so, sopt->sopt_name, optval, &optsize, p); 4389 } else { 4390 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4391 error = EINVAL; 4392 } 4393 if ((error == 0) && (optval != NULL)) { 4394 error = sooptcopyout(sopt, optval, optsize); 4395 SCTP_FREE(optval, SCTP_M_SOCKOPT); 4396 } else if (optval != NULL) { 4397 SCTP_FREE(optval, SCTP_M_SOCKOPT); 4398 } 4399 out: 4400 return (error); 4401 } 4402 4403 4404 static int 4405 sctp_connect(struct socket *so, struct sockaddr *addr, struct thread *p) 4406 { 4407 int error = 0; 4408 int create_lock_on = 0; 4409 uint32_t vrf_id; 4410 struct sctp_inpcb *inp; 4411 struct sctp_tcb *stcb = NULL; 4412 4413 inp = (struct sctp_inpcb *)so->so_pcb; 4414 if (inp == 0) { 4415 /* I made the same as TCP since we are not setup? */ 4416 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4417 return (ECONNRESET); 4418 } 4419 if (addr == NULL) { 4420 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4421 return EINVAL; 4422 } 4423 #ifdef INET6 4424 if (addr->sa_family == AF_INET6) { 4425 struct sockaddr_in6 *sin6p; 4426 4427 if (addr->sa_len != sizeof(struct sockaddr_in6)) { 4428 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4429 return (EINVAL); 4430 } 4431 sin6p = (struct sockaddr_in6 *)addr; 4432 if (p != NULL && (error = prison_remote_ip6(p->td_ucred, &sin6p->sin6_addr)) != 0) { 4433 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 4434 return (error); 4435 } 4436 } else 4437 #endif 4438 if (addr->sa_family == AF_INET) { 4439 struct sockaddr_in *sinp; 4440 4441 if (addr->sa_len != sizeof(struct sockaddr_in)) { 4442 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4443 return (EINVAL); 4444 } 4445 sinp = (struct sockaddr_in *)addr; 4446 if (p != NULL && (error = prison_remote_ip4(p->td_ucred, &sinp->sin_addr)) != 0) { 4447 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 4448 return (error); 4449 } 4450 } else { 4451 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EAFNOSUPPORT); 4452 return (EAFNOSUPPORT); 4453 } 4454 SCTP_INP_INCR_REF(inp); 4455 SCTP_ASOC_CREATE_LOCK(inp); 4456 create_lock_on = 1; 4457 4458 4459 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 4460 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) { 4461 /* Should I really unlock ? */ 4462 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EFAULT); 4463 error = EFAULT; 4464 goto out_now; 4465 } 4466 #ifdef INET6 4467 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) && 4468 (addr->sa_family == AF_INET6)) { 4469 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4470 error = EINVAL; 4471 goto out_now; 4472 } 4473 #endif /* INET6 */ 4474 if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) == 4475 SCTP_PCB_FLAGS_UNBOUND) { 4476 /* Bind a ephemeral port */ 4477 error = sctp_inpcb_bind(so, NULL, NULL, p); 4478 if (error) { 4479 goto out_now; 4480 } 4481 } 4482 /* Now do we connect? */ 4483 if ((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) && 4484 (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_PORTREUSE))) { 4485 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4486 error = EINVAL; 4487 goto out_now; 4488 } 4489 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 4490 (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) { 4491 /* We are already connected AND the TCP model */ 4492 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EADDRINUSE); 4493 error = EADDRINUSE; 4494 goto out_now; 4495 } 4496 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { 4497 SCTP_INP_RLOCK(inp); 4498 stcb = LIST_FIRST(&inp->sctp_asoc_list); 4499 SCTP_INP_RUNLOCK(inp); 4500 } else { 4501 /* 4502 * We increment here since sctp_findassociation_ep_addr() 4503 * will do a decrement if it finds the stcb as long as the 4504 * locked tcb (last argument) is NOT a TCB.. aka NULL. 4505 */ 4506 SCTP_INP_INCR_REF(inp); 4507 stcb = sctp_findassociation_ep_addr(&inp, addr, NULL, NULL, NULL); 4508 if (stcb == NULL) { 4509 SCTP_INP_DECR_REF(inp); 4510 } else { 4511 SCTP_TCB_UNLOCK(stcb); 4512 } 4513 } 4514 if (stcb != NULL) { 4515 /* Already have or am bring up an association */ 4516 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY); 4517 error = EALREADY; 4518 goto out_now; 4519 } 4520 vrf_id = inp->def_vrf_id; 4521 /* We are GOOD to go */ 4522 stcb = sctp_aloc_assoc(inp, addr, &error, 0, vrf_id, p); 4523 if (stcb == NULL) { 4524 /* Gak! no memory */ 4525 goto out_now; 4526 } 4527 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) { 4528 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED; 4529 /* Set the connected flag so we can queue data */ 4530 SOCKBUF_LOCK(&so->so_rcv); 4531 so->so_rcv.sb_state &= ~SBS_CANTRCVMORE; 4532 SOCKBUF_UNLOCK(&so->so_rcv); 4533 SOCKBUF_LOCK(&so->so_snd); 4534 so->so_snd.sb_state &= ~SBS_CANTSENDMORE; 4535 SOCKBUF_UNLOCK(&so->so_snd); 4536 SOCK_LOCK(so); 4537 so->so_state &= ~SS_ISDISCONNECTING; 4538 SOCK_UNLOCK(so); 4539 soisconnecting(so); 4540 } 4541 SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_COOKIE_WAIT); 4542 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered); 4543 4544 /* initialize authentication parameters for the assoc */ 4545 sctp_initialize_auth_params(inp, stcb); 4546 4547 sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED); 4548 SCTP_TCB_UNLOCK(stcb); 4549 out_now: 4550 if (create_lock_on) { 4551 SCTP_ASOC_CREATE_UNLOCK(inp); 4552 } 4553 SCTP_INP_DECR_REF(inp); 4554 return error; 4555 } 4556 4557 int 4558 sctp_listen(struct socket *so, int backlog, struct thread *p) 4559 { 4560 /* 4561 * Note this module depends on the protocol processing being called 4562 * AFTER any socket level flags and backlog are applied to the 4563 * socket. The traditional way that the socket flags are applied is 4564 * AFTER protocol processing. We have made a change to the 4565 * sys/kern/uipc_socket.c module to reverse this but this MUST be in 4566 * place if the socket API for SCTP is to work properly. 4567 */ 4568 4569 int error = 0; 4570 struct sctp_inpcb *inp; 4571 4572 inp = (struct sctp_inpcb *)so->so_pcb; 4573 if (inp == 0) { 4574 /* I made the same as TCP since we are not setup? */ 4575 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4576 return (ECONNRESET); 4577 } 4578 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PORTREUSE)) { 4579 /* See if we have a listener */ 4580 struct sctp_inpcb *tinp; 4581 union sctp_sockstore store, *sp; 4582 4583 sp = &store; 4584 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) { 4585 /* not bound all */ 4586 struct sctp_laddr *laddr; 4587 4588 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 4589 memcpy(&store, &laddr->ifa->address, sizeof(store)); 4590 sp->sin.sin_port = inp->sctp_lport; 4591 tinp = sctp_pcb_findep(&sp->sa, 0, 0, inp->def_vrf_id); 4592 if (tinp && (tinp != inp) && 4593 ((tinp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) == 0) && 4594 ((tinp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) && 4595 (tinp->sctp_socket->so_qlimit)) { 4596 /* 4597 * we have a listener already and 4598 * its not this inp. 4599 */ 4600 SCTP_INP_DECR_REF(tinp); 4601 return (EADDRINUSE); 4602 } else if (tinp) { 4603 SCTP_INP_DECR_REF(tinp); 4604 } 4605 } 4606 } else { 4607 /* Setup a local addr bound all */ 4608 memset(&store, 0, sizeof(store)); 4609 store.sin.sin_port = inp->sctp_lport; 4610 #ifdef INET6 4611 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 4612 store.sa.sa_family = AF_INET6; 4613 store.sa.sa_len = sizeof(struct sockaddr_in6); 4614 } 4615 #endif 4616 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) { 4617 store.sa.sa_family = AF_INET; 4618 store.sa.sa_len = sizeof(struct sockaddr_in); 4619 } 4620 tinp = sctp_pcb_findep(&sp->sa, 0, 0, inp->def_vrf_id); 4621 if (tinp && (tinp != inp) && 4622 ((tinp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) == 0) && 4623 ((tinp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) && 4624 (tinp->sctp_socket->so_qlimit)) { 4625 /* 4626 * we have a listener already and its not 4627 * this inp. 4628 */ 4629 SCTP_INP_DECR_REF(tinp); 4630 return (EADDRINUSE); 4631 } else if (tinp) { 4632 SCTP_INP_DECR_REF(inp); 4633 } 4634 } 4635 } 4636 SCTP_INP_RLOCK(inp); 4637 #ifdef SCTP_LOCK_LOGGING 4638 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) { 4639 sctp_log_lock(inp, (struct sctp_tcb *)NULL, SCTP_LOG_LOCK_SOCK); 4640 } 4641 #endif 4642 SOCK_LOCK(so); 4643 error = solisten_proto_check(so); 4644 if (error) { 4645 SOCK_UNLOCK(so); 4646 SCTP_INP_RUNLOCK(inp); 4647 return (error); 4648 } 4649 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PORTREUSE)) && 4650 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 4651 /* 4652 * The unlucky case - We are in the tcp pool with this guy. 4653 * - Someone else is in the main inp slot. - We must move 4654 * this guy (the listener) to the main slot - We must then 4655 * move the guy that was listener to the TCP Pool. 4656 */ 4657 if (sctp_swap_inpcb_for_listen(inp)) { 4658 goto in_use; 4659 } 4660 } 4661 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 4662 (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) { 4663 /* We are already connected AND the TCP model */ 4664 in_use: 4665 SCTP_INP_RUNLOCK(inp); 4666 SOCK_UNLOCK(so); 4667 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EADDRINUSE); 4668 return (EADDRINUSE); 4669 } 4670 SCTP_INP_RUNLOCK(inp); 4671 if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) { 4672 /* We must do a bind. */ 4673 SOCK_UNLOCK(so); 4674 if ((error = sctp_inpcb_bind(so, NULL, NULL, p))) { 4675 /* bind error, probably perm */ 4676 return (error); 4677 } 4678 SOCK_LOCK(so); 4679 } 4680 /* It appears for 7.0 and on, we must always call this. */ 4681 solisten_proto(so, backlog); 4682 if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) { 4683 /* remove the ACCEPTCONN flag for one-to-many sockets */ 4684 so->so_options &= ~SO_ACCEPTCONN; 4685 } 4686 if (backlog == 0) { 4687 /* turning off listen */ 4688 so->so_options &= ~SO_ACCEPTCONN; 4689 } 4690 SOCK_UNLOCK(so); 4691 return (error); 4692 } 4693 4694 static int sctp_defered_wakeup_cnt = 0; 4695 4696 int 4697 sctp_accept(struct socket *so, struct sockaddr **addr) 4698 { 4699 struct sctp_tcb *stcb; 4700 struct sctp_inpcb *inp; 4701 union sctp_sockstore store; 4702 4703 #ifdef INET6 4704 int error; 4705 4706 #endif 4707 inp = (struct sctp_inpcb *)so->so_pcb; 4708 4709 if (inp == 0) { 4710 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4711 return (ECONNRESET); 4712 } 4713 SCTP_INP_RLOCK(inp); 4714 if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) { 4715 SCTP_INP_RUNLOCK(inp); 4716 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP); 4717 return (EOPNOTSUPP); 4718 } 4719 if (so->so_state & SS_ISDISCONNECTED) { 4720 SCTP_INP_RUNLOCK(inp); 4721 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ECONNABORTED); 4722 return (ECONNABORTED); 4723 } 4724 stcb = LIST_FIRST(&inp->sctp_asoc_list); 4725 if (stcb == NULL) { 4726 SCTP_INP_RUNLOCK(inp); 4727 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4728 return (ECONNRESET); 4729 } 4730 SCTP_TCB_LOCK(stcb); 4731 SCTP_INP_RUNLOCK(inp); 4732 store = stcb->asoc.primary_destination->ro._l_addr; 4733 stcb->asoc.state &= ~SCTP_STATE_IN_ACCEPT_QUEUE; 4734 SCTP_TCB_UNLOCK(stcb); 4735 switch (store.sa.sa_family) { 4736 case AF_INET: 4737 { 4738 struct sockaddr_in *sin; 4739 4740 SCTP_MALLOC_SONAME(sin, struct sockaddr_in *, sizeof *sin); 4741 if (sin == NULL) 4742 return (ENOMEM); 4743 sin->sin_family = AF_INET; 4744 sin->sin_len = sizeof(*sin); 4745 sin->sin_port = ((struct sockaddr_in *)&store)->sin_port; 4746 sin->sin_addr = ((struct sockaddr_in *)&store)->sin_addr; 4747 *addr = (struct sockaddr *)sin; 4748 break; 4749 } 4750 #ifdef INET6 4751 case AF_INET6: 4752 { 4753 struct sockaddr_in6 *sin6; 4754 4755 SCTP_MALLOC_SONAME(sin6, struct sockaddr_in6 *, sizeof *sin6); 4756 if (sin6 == NULL) 4757 return (ENOMEM); 4758 sin6->sin6_family = AF_INET6; 4759 sin6->sin6_len = sizeof(*sin6); 4760 sin6->sin6_port = ((struct sockaddr_in6 *)&store)->sin6_port; 4761 4762 sin6->sin6_addr = ((struct sockaddr_in6 *)&store)->sin6_addr; 4763 if ((error = sa6_recoverscope(sin6)) != 0) { 4764 SCTP_FREE_SONAME(sin6); 4765 return (error); 4766 } 4767 *addr = (struct sockaddr *)sin6; 4768 break; 4769 } 4770 #endif 4771 default: 4772 /* TSNH */ 4773 break; 4774 } 4775 /* Wake any delayed sleep action */ 4776 if (inp->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) { 4777 SCTP_INP_WLOCK(inp); 4778 inp->sctp_flags &= ~SCTP_PCB_FLAGS_DONT_WAKE; 4779 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT) { 4780 inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAKEOUTPUT; 4781 SCTP_INP_WUNLOCK(inp); 4782 SOCKBUF_LOCK(&inp->sctp_socket->so_snd); 4783 if (sowriteable(inp->sctp_socket)) { 4784 sowwakeup_locked(inp->sctp_socket); 4785 } else { 4786 SOCKBUF_UNLOCK(&inp->sctp_socket->so_snd); 4787 } 4788 SCTP_INP_WLOCK(inp); 4789 } 4790 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT) { 4791 inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAKEINPUT; 4792 SCTP_INP_WUNLOCK(inp); 4793 SOCKBUF_LOCK(&inp->sctp_socket->so_rcv); 4794 if (soreadable(inp->sctp_socket)) { 4795 sctp_defered_wakeup_cnt++; 4796 sorwakeup_locked(inp->sctp_socket); 4797 } else { 4798 SOCKBUF_UNLOCK(&inp->sctp_socket->so_rcv); 4799 } 4800 SCTP_INP_WLOCK(inp); 4801 } 4802 SCTP_INP_WUNLOCK(inp); 4803 } 4804 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 4805 SCTP_TCB_LOCK(stcb); 4806 sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_7); 4807 } 4808 return (0); 4809 } 4810 4811 int 4812 sctp_ingetaddr(struct socket *so, struct sockaddr **addr) 4813 { 4814 struct sockaddr_in *sin; 4815 uint32_t vrf_id; 4816 struct sctp_inpcb *inp; 4817 struct sctp_ifa *sctp_ifa; 4818 4819 /* 4820 * Do the malloc first in case it blocks. 4821 */ 4822 SCTP_MALLOC_SONAME(sin, struct sockaddr_in *, sizeof *sin); 4823 if (sin == NULL) 4824 return (ENOMEM); 4825 sin->sin_family = AF_INET; 4826 sin->sin_len = sizeof(*sin); 4827 inp = (struct sctp_inpcb *)so->so_pcb; 4828 if (!inp) { 4829 SCTP_FREE_SONAME(sin); 4830 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4831 return ECONNRESET; 4832 } 4833 SCTP_INP_RLOCK(inp); 4834 sin->sin_port = inp->sctp_lport; 4835 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 4836 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { 4837 struct sctp_tcb *stcb; 4838 struct sockaddr_in *sin_a; 4839 struct sctp_nets *net; 4840 int fnd; 4841 4842 stcb = LIST_FIRST(&inp->sctp_asoc_list); 4843 if (stcb == NULL) { 4844 goto notConn; 4845 } 4846 fnd = 0; 4847 sin_a = NULL; 4848 SCTP_TCB_LOCK(stcb); 4849 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 4850 sin_a = (struct sockaddr_in *)&net->ro._l_addr; 4851 if (sin_a == NULL) 4852 /* this will make coverity happy */ 4853 continue; 4854 4855 if (sin_a->sin_family == AF_INET) { 4856 fnd = 1; 4857 break; 4858 } 4859 } 4860 if ((!fnd) || (sin_a == NULL)) { 4861 /* punt */ 4862 SCTP_TCB_UNLOCK(stcb); 4863 goto notConn; 4864 } 4865 vrf_id = inp->def_vrf_id; 4866 sctp_ifa = sctp_source_address_selection(inp, 4867 stcb, 4868 (sctp_route_t *) & net->ro, 4869 net, 0, vrf_id); 4870 if (sctp_ifa) { 4871 sin->sin_addr = sctp_ifa->address.sin.sin_addr; 4872 sctp_free_ifa(sctp_ifa); 4873 } 4874 SCTP_TCB_UNLOCK(stcb); 4875 } else { 4876 /* For the bound all case you get back 0 */ 4877 notConn: 4878 sin->sin_addr.s_addr = 0; 4879 } 4880 4881 } else { 4882 /* Take the first IPv4 address in the list */ 4883 struct sctp_laddr *laddr; 4884 int fnd = 0; 4885 4886 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 4887 if (laddr->ifa->address.sa.sa_family == AF_INET) { 4888 struct sockaddr_in *sin_a; 4889 4890 sin_a = (struct sockaddr_in *)&laddr->ifa->address.sa; 4891 sin->sin_addr = sin_a->sin_addr; 4892 fnd = 1; 4893 break; 4894 } 4895 } 4896 if (!fnd) { 4897 SCTP_FREE_SONAME(sin); 4898 SCTP_INP_RUNLOCK(inp); 4899 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); 4900 return ENOENT; 4901 } 4902 } 4903 SCTP_INP_RUNLOCK(inp); 4904 (*addr) = (struct sockaddr *)sin; 4905 return (0); 4906 } 4907 4908 int 4909 sctp_peeraddr(struct socket *so, struct sockaddr **addr) 4910 { 4911 struct sockaddr_in *sin = (struct sockaddr_in *)*addr; 4912 int fnd; 4913 struct sockaddr_in *sin_a; 4914 struct sctp_inpcb *inp; 4915 struct sctp_tcb *stcb; 4916 struct sctp_nets *net; 4917 4918 /* Do the malloc first in case it blocks. */ 4919 inp = (struct sctp_inpcb *)so->so_pcb; 4920 if ((inp == NULL) || 4921 ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) { 4922 /* UDP type and listeners will drop out here */ 4923 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN); 4924 return (ENOTCONN); 4925 } 4926 SCTP_MALLOC_SONAME(sin, struct sockaddr_in *, sizeof *sin); 4927 if (sin == NULL) 4928 return (ENOMEM); 4929 sin->sin_family = AF_INET; 4930 sin->sin_len = sizeof(*sin); 4931 4932 /* We must recapture incase we blocked */ 4933 inp = (struct sctp_inpcb *)so->so_pcb; 4934 if (!inp) { 4935 SCTP_FREE_SONAME(sin); 4936 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4937 return ECONNRESET; 4938 } 4939 SCTP_INP_RLOCK(inp); 4940 stcb = LIST_FIRST(&inp->sctp_asoc_list); 4941 if (stcb) { 4942 SCTP_TCB_LOCK(stcb); 4943 } 4944 SCTP_INP_RUNLOCK(inp); 4945 if (stcb == NULL) { 4946 SCTP_FREE_SONAME(sin); 4947 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4948 return ECONNRESET; 4949 } 4950 fnd = 0; 4951 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 4952 sin_a = (struct sockaddr_in *)&net->ro._l_addr; 4953 if (sin_a->sin_family == AF_INET) { 4954 fnd = 1; 4955 sin->sin_port = stcb->rport; 4956 sin->sin_addr = sin_a->sin_addr; 4957 break; 4958 } 4959 } 4960 SCTP_TCB_UNLOCK(stcb); 4961 if (!fnd) { 4962 /* No IPv4 address */ 4963 SCTP_FREE_SONAME(sin); 4964 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); 4965 return ENOENT; 4966 } 4967 (*addr) = (struct sockaddr *)sin; 4968 return (0); 4969 } 4970 4971 struct pr_usrreqs sctp_usrreqs = { 4972 .pru_abort = sctp_abort, 4973 .pru_accept = sctp_accept, 4974 .pru_attach = sctp_attach, 4975 .pru_bind = sctp_bind, 4976 .pru_connect = sctp_connect, 4977 .pru_control = in_control, 4978 .pru_close = sctp_close, 4979 .pru_detach = sctp_close, 4980 .pru_sopoll = sopoll_generic, 4981 .pru_flush = sctp_flush, 4982 .pru_disconnect = sctp_disconnect, 4983 .pru_listen = sctp_listen, 4984 .pru_peeraddr = sctp_peeraddr, 4985 .pru_send = sctp_sendm, 4986 .pru_shutdown = sctp_shutdown, 4987 .pru_sockaddr = sctp_ingetaddr, 4988 .pru_sosend = sctp_sosend, 4989 .pru_soreceive = sctp_soreceive 4990 }; 4991