1 /*- 2 * Copyright (c) 2001-2007, Cisco Systems, Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions are met: 6 * 7 * a) Redistributions of source code must retain the above copyright notice, 8 * this list of conditions and the following disclaimer. 9 * 10 * b) Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in 12 * the documentation and/or other materials provided with the distribution. 13 * 14 * c) Neither the name of Cisco Systems, Inc. nor the names of its 15 * contributors may be used to endorse or promote products derived 16 * from this software without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 28 * THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 /* $KAME: sctp_usrreq.c,v 1.48 2005/03/07 23:26:08 itojun Exp $ */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 #include <netinet/sctp_os.h> 36 #include <sys/proc.h> 37 #include <netinet/sctp_pcb.h> 38 #include <netinet/sctp_header.h> 39 #include <netinet/sctp_var.h> 40 #if defined(INET6) 41 #include <netinet6/sctp6_var.h> 42 #endif 43 #include <netinet/sctp_sysctl.h> 44 #include <netinet/sctp_output.h> 45 #include <netinet/sctp_uio.h> 46 #include <netinet/sctp_asconf.h> 47 #include <netinet/sctputil.h> 48 #include <netinet/sctp_indata.h> 49 #include <netinet/sctp_timer.h> 50 #include <netinet/sctp_auth.h> 51 52 53 54 void 55 sctp_init(void) 56 { 57 /* Init the SCTP pcb in sctp_pcb.c */ 58 u_long sb_max_adj; 59 60 sctp_pcb_init(); 61 62 if ((nmbclusters / 8) > SCTP_ASOC_MAX_CHUNKS_ON_QUEUE) 63 sctp_max_chunks_on_queue = (nmbclusters / 8); 64 /* 65 * Allow a user to take no more than 1/2 the number of clusters or 66 * the SB_MAX whichever is smaller for the send window. 67 */ 68 sb_max_adj = (u_long)((u_quad_t) (SB_MAX) * MCLBYTES / (MSIZE + MCLBYTES)); 69 sctp_sendspace = min((min(SB_MAX, sb_max_adj)), 70 ((nmbclusters / 2) * SCTP_DEFAULT_MAXSEGMENT)); 71 /* 72 * Now for the recv window, should we take the same amount? or 73 * should I do 1/2 the SB_MAX instead in the SB_MAX min above. For 74 * now I will just copy. 75 */ 76 sctp_recvspace = sctp_sendspace; 77 78 79 } 80 81 82 83 /* 84 * cleanup of the sctppcbinfo structure. 85 * Assumes that the sctppcbinfo lock is held. 86 */ 87 void 88 sctp_pcbinfo_cleanup(void) 89 { 90 /* free the hash tables */ 91 if (sctppcbinfo.sctp_asochash != NULL) 92 SCTP_HASH_FREE(sctppcbinfo.sctp_asochash, sctppcbinfo.hashasocmark); 93 if (sctppcbinfo.sctp_ephash != NULL) 94 SCTP_HASH_FREE(sctppcbinfo.sctp_ephash, sctppcbinfo.hashmark); 95 if (sctppcbinfo.sctp_tcpephash != NULL) 96 SCTP_HASH_FREE(sctppcbinfo.sctp_tcpephash, sctppcbinfo.hashtcpmark); 97 if (sctppcbinfo.sctp_restarthash != NULL) 98 SCTP_HASH_FREE(sctppcbinfo.sctp_restarthash, sctppcbinfo.hashrestartmark); 99 } 100 101 102 static void 103 sctp_pathmtu_adjustment(struct sctp_inpcb *inp, 104 struct sctp_tcb *stcb, 105 struct sctp_nets *net, 106 uint16_t nxtsz) 107 { 108 struct sctp_tmit_chunk *chk; 109 110 /* Adjust that too */ 111 stcb->asoc.smallest_mtu = nxtsz; 112 /* now off to subtract IP_DF flag if needed */ 113 114 TAILQ_FOREACH(chk, &stcb->asoc.send_queue, sctp_next) { 115 if ((chk->send_size + IP_HDR_SIZE) > nxtsz) { 116 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; 117 } 118 } 119 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 120 if ((chk->send_size + IP_HDR_SIZE) > nxtsz) { 121 /* 122 * For this guy we also mark for immediate resend 123 * since we sent to big of chunk 124 */ 125 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; 126 if (chk->sent != SCTP_DATAGRAM_RESEND) { 127 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 128 } 129 chk->sent = SCTP_DATAGRAM_RESEND; 130 chk->rec.data.doing_fast_retransmit = 0; 131 132 /* Clear any time so NO RTT is being done */ 133 chk->do_rtt = 0; 134 if (stcb->asoc.total_flight >= chk->book_size) 135 stcb->asoc.total_flight -= chk->book_size; 136 else 137 stcb->asoc.total_flight = 0; 138 if (stcb->asoc.total_flight_count > 0) 139 stcb->asoc.total_flight_count--; 140 if (net->flight_size >= chk->book_size) 141 net->flight_size -= chk->book_size; 142 else 143 net->flight_size = 0; 144 } 145 } 146 } 147 148 static void 149 sctp_notify_mbuf(struct sctp_inpcb *inp, 150 struct sctp_tcb *stcb, 151 struct sctp_nets *net, 152 struct ip *ip, 153 struct sctphdr *sh) 154 { 155 struct icmp *icmph; 156 int totsz, tmr_stopped = 0; 157 uint16_t nxtsz; 158 159 /* protection */ 160 if ((inp == NULL) || (stcb == NULL) || (net == NULL) || 161 (ip == NULL) || (sh == NULL)) { 162 if (stcb != NULL) 163 SCTP_TCB_UNLOCK(stcb); 164 return; 165 } 166 /* First job is to verify the vtag matches what I would send */ 167 if (ntohl(sh->v_tag) != (stcb->asoc.peer_vtag)) { 168 SCTP_TCB_UNLOCK(stcb); 169 return; 170 } 171 icmph = (struct icmp *)((caddr_t)ip - (sizeof(struct icmp) - 172 sizeof(struct ip))); 173 if (icmph->icmp_type != ICMP_UNREACH) { 174 /* We only care about unreachable */ 175 SCTP_TCB_UNLOCK(stcb); 176 return; 177 } 178 if (icmph->icmp_code != ICMP_UNREACH_NEEDFRAG) { 179 /* not a unreachable message due to frag. */ 180 SCTP_TCB_UNLOCK(stcb); 181 return; 182 } 183 totsz = ip->ip_len; 184 185 nxtsz = ntohs(icmph->icmp_seq); 186 if (nxtsz == 0) { 187 /* 188 * old type router that does not tell us what the next size 189 * mtu is. Rats we will have to guess (in a educated fashion 190 * of course) 191 */ 192 nxtsz = find_next_best_mtu(totsz); 193 } 194 /* Stop any PMTU timer */ 195 if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) { 196 tmr_stopped = 1; 197 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net, 198 SCTP_FROM_SCTP_USRREQ + SCTP_LOC_1); 199 } 200 /* Adjust destination size limit */ 201 if (net->mtu > nxtsz) { 202 net->mtu = nxtsz; 203 } 204 /* now what about the ep? */ 205 if (stcb->asoc.smallest_mtu > nxtsz) { 206 sctp_pathmtu_adjustment(inp, stcb, net, nxtsz); 207 } 208 if (tmr_stopped) 209 sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net); 210 211 SCTP_TCB_UNLOCK(stcb); 212 } 213 214 215 void 216 sctp_notify(struct sctp_inpcb *inp, 217 int error, 218 struct sctphdr *sh, 219 struct sockaddr *to, 220 struct sctp_tcb *stcb, 221 struct sctp_nets *net) 222 { 223 /* protection */ 224 if ((inp == NULL) || (stcb == NULL) || (net == NULL) || 225 (sh == NULL) || (to == NULL)) { 226 return; 227 } 228 /* First job is to verify the vtag matches what I would send */ 229 if (ntohl(sh->v_tag) != (stcb->asoc.peer_vtag)) { 230 return; 231 } 232 /* FIX ME FIX ME PROTOPT i.e. no SCTP should ALWAYS be an ABORT */ 233 234 if ((error == EHOSTUNREACH) || /* Host is not reachable */ 235 (error == EHOSTDOWN) || /* Host is down */ 236 (error == ECONNREFUSED) || /* Host refused the connection, (not 237 * an abort?) */ 238 (error == ENOPROTOOPT) /* SCTP is not present on host */ 239 ) { 240 /* 241 * Hmm reachablity problems we must examine closely. If its 242 * not reachable, we may have lost a network. Or if there is 243 * NO protocol at the other end named SCTP. well we consider 244 * it a OOTB abort. 245 */ 246 if ((error == EHOSTUNREACH) || (error == EHOSTDOWN)) { 247 if (net->dest_state & SCTP_ADDR_REACHABLE) { 248 /* Ok that destination is NOT reachable */ 249 printf("ICMP (thresh %d/%d) takes interface %p down\n", 250 net->error_count, 251 net->failure_threshold, 252 net); 253 254 net->dest_state &= ~SCTP_ADDR_REACHABLE; 255 net->dest_state |= SCTP_ADDR_NOT_REACHABLE; 256 net->error_count = net->failure_threshold + 1; 257 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN, 258 stcb, SCTP_FAILED_THRESHOLD, 259 (void *)net); 260 } 261 if (stcb) 262 SCTP_TCB_UNLOCK(stcb); 263 } else { 264 /* 265 * Here the peer is either playing tricks on us, 266 * including an address that belongs to someone who 267 * does not support SCTP OR was a userland 268 * implementation that shutdown and now is dead. In 269 * either case treat it like a OOTB abort with no 270 * TCB 271 */ 272 sctp_abort_notification(stcb, SCTP_PEER_FAULTY); 273 sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_2); 274 /* no need to unlock here, since the TCB is gone */ 275 } 276 } else { 277 /* Send all others to the app */ 278 if (stcb) 279 SCTP_TCB_UNLOCK(stcb); 280 281 282 if (inp->sctp_socket) { 283 #ifdef SCTP_LOCK_LOGGING 284 sctp_log_lock(inp, stcb, SCTP_LOG_LOCK_SOCK); 285 #endif 286 SOCK_LOCK(inp->sctp_socket); 287 inp->sctp_socket->so_error = error; 288 sctp_sowwakeup(inp, inp->sctp_socket); 289 SOCK_UNLOCK(inp->sctp_socket); 290 } 291 } 292 } 293 294 void 295 sctp_ctlinput(cmd, sa, vip) 296 int cmd; 297 struct sockaddr *sa; 298 void *vip; 299 { 300 struct ip *ip = vip; 301 struct sctphdr *sh; 302 uint32_t vrf_id; 303 304 /* FIX, for non-bsd is this right? */ 305 vrf_id = SCTP_DEFAULT_VRFID; 306 if (sa->sa_family != AF_INET || 307 ((struct sockaddr_in *)sa)->sin_addr.s_addr == INADDR_ANY) { 308 return; 309 } 310 if (PRC_IS_REDIRECT(cmd)) { 311 ip = 0; 312 } else if ((unsigned)cmd >= PRC_NCMDS || inetctlerrmap[cmd] == 0) { 313 return; 314 } 315 if (ip) { 316 struct sctp_inpcb *inp = NULL; 317 struct sctp_tcb *stcb = NULL; 318 struct sctp_nets *net = NULL; 319 struct sockaddr_in to, from; 320 321 sh = (struct sctphdr *)((caddr_t)ip + (ip->ip_hl << 2)); 322 bzero(&to, sizeof(to)); 323 bzero(&from, sizeof(from)); 324 from.sin_family = to.sin_family = AF_INET; 325 from.sin_len = to.sin_len = sizeof(to); 326 from.sin_port = sh->src_port; 327 from.sin_addr = ip->ip_src; 328 to.sin_port = sh->dest_port; 329 to.sin_addr = ip->ip_dst; 330 331 /* 332 * 'to' holds the dest of the packet that failed to be sent. 333 * 'from' holds our local endpoint address. Thus we reverse 334 * the to and the from in the lookup. 335 */ 336 stcb = sctp_findassociation_addr_sa((struct sockaddr *)&from, 337 (struct sockaddr *)&to, 338 &inp, &net, 1, vrf_id); 339 if (stcb != NULL && inp && (inp->sctp_socket != NULL)) { 340 if (cmd != PRC_MSGSIZE) { 341 int cm; 342 343 if (cmd == PRC_HOSTDEAD) { 344 cm = EHOSTUNREACH; 345 } else { 346 cm = inetctlerrmap[cmd]; 347 } 348 sctp_notify(inp, cm, sh, 349 (struct sockaddr *)&to, stcb, 350 net); 351 } else { 352 /* handle possible ICMP size messages */ 353 sctp_notify_mbuf(inp, stcb, net, ip, sh); 354 } 355 } else { 356 if ((stcb == NULL) && (inp != NULL)) { 357 /* reduce ref-count */ 358 SCTP_INP_WLOCK(inp); 359 SCTP_INP_DECR_REF(inp); 360 SCTP_INP_WUNLOCK(inp); 361 } 362 } 363 } 364 return; 365 } 366 367 static int 368 sctp_getcred(SYSCTL_HANDLER_ARGS) 369 { 370 struct xucred xuc; 371 struct sockaddr_in addrs[2]; 372 struct sctp_inpcb *inp; 373 struct sctp_nets *net; 374 struct sctp_tcb *stcb; 375 int error; 376 uint32_t vrf_id; 377 378 379 /* FIX, for non-bsd is this right? */ 380 vrf_id = SCTP_DEFAULT_VRFID; 381 382 /* 383 * XXXRW: Other instances of getcred use SUSER_ALLOWJAIL, as socket 384 * visibility is scoped using cr_canseesocket(), which it is not 385 * here. 386 */ 387 error = priv_check_cred(req->td->td_ucred, PRIV_NETINET_GETCRED, 388 SUSER_ALLOWJAIL); 389 if (error) 390 return (error); 391 392 error = SYSCTL_IN(req, addrs, sizeof(addrs)); 393 if (error) 394 return (error); 395 396 stcb = sctp_findassociation_addr_sa(sintosa(&addrs[0]), 397 sintosa(&addrs[1]), 398 &inp, &net, 1, vrf_id); 399 if (stcb == NULL || inp == NULL || inp->sctp_socket == NULL) { 400 if ((inp != NULL) && (stcb == NULL)) { 401 /* reduce ref-count */ 402 SCTP_INP_WLOCK(inp); 403 SCTP_INP_DECR_REF(inp); 404 goto cred_can_cont; 405 } 406 error = ENOENT; 407 goto out; 408 } 409 SCTP_TCB_UNLOCK(stcb); 410 /* 411 * We use the write lock here, only since in the error leg we need 412 * it. If we used RLOCK, then we would have to 413 * wlock/decr/unlock/rlock. Which in theory could create a hole. 414 * Better to use higher wlock. 415 */ 416 SCTP_INP_WLOCK(inp); 417 cred_can_cont: 418 error = cr_canseesocket(req->td->td_ucred, inp->sctp_socket); 419 if (error) { 420 SCTP_INP_WUNLOCK(inp); 421 goto out; 422 } 423 cru2x(inp->sctp_socket->so_cred, &xuc); 424 SCTP_INP_WUNLOCK(inp); 425 error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred)); 426 out: 427 return (error); 428 } 429 430 SYSCTL_PROC(_net_inet_sctp, OID_AUTO, getcred, CTLTYPE_OPAQUE | CTLFLAG_RW, 431 0, 0, sctp_getcred, "S,ucred", "Get the ucred of a SCTP connection"); 432 433 434 static void 435 sctp_abort(struct socket *so) 436 { 437 struct sctp_inpcb *inp; 438 uint32_t flags; 439 440 inp = (struct sctp_inpcb *)so->so_pcb; 441 if (inp == 0) 442 return; 443 444 sctp_must_try_again: 445 flags = inp->sctp_flags; 446 #ifdef SCTP_LOG_CLOSING 447 sctp_log_closing(inp, NULL, 17); 448 #endif 449 if (((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) && 450 (atomic_cmpset_int(&inp->sctp_flags, flags, (flags | SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_CLOSE_IP)))) { 451 #ifdef SCTP_LOG_CLOSING 452 sctp_log_closing(inp, NULL, 16); 453 #endif 454 sctp_inpcb_free(inp, 1, 0); 455 SOCK_LOCK(so); 456 SCTP_SB_CLEAR(so->so_snd); 457 /* 458 * same for the rcv ones, they are only here for the 459 * accounting/select. 460 */ 461 SCTP_SB_CLEAR(so->so_rcv); 462 463 /* Now null out the reference, we are completely detached. */ 464 so->so_pcb = NULL; 465 SOCK_UNLOCK(so); 466 } else { 467 flags = inp->sctp_flags; 468 if ((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) { 469 goto sctp_must_try_again; 470 } 471 } 472 return; 473 } 474 475 static int 476 sctp_attach(struct socket *so, int proto, struct thread *p) 477 { 478 struct sctp_inpcb *inp; 479 struct inpcb *ip_inp; 480 int error; 481 482 #ifdef IPSEC 483 uint32_t flags; 484 485 #endif 486 inp = (struct sctp_inpcb *)so->so_pcb; 487 if (inp != 0) { 488 return EINVAL; 489 } 490 error = SCTP_SORESERVE(so, sctp_sendspace, sctp_recvspace); 491 if (error) { 492 return error; 493 } 494 error = sctp_inpcb_alloc(so); 495 if (error) { 496 return error; 497 } 498 inp = (struct sctp_inpcb *)so->so_pcb; 499 SCTP_INP_WLOCK(inp); 500 501 inp->sctp_flags &= ~SCTP_PCB_FLAGS_BOUND_V6; /* I'm not v6! */ 502 ip_inp = &inp->ip_inp.inp; 503 ip_inp->inp_vflag |= INP_IPV4; 504 ip_inp->inp_ip_ttl = ip_defttl; 505 506 #ifdef IPSEC 507 error = ipsec_init_pcbpolicy(so, &ip_inp->inp_sp); 508 #ifdef SCTP_LOG_CLOSING 509 sctp_log_closing(inp, NULL, 17); 510 #endif 511 if (error != 0) { 512 flags = inp->sctp_flags; 513 if (((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) && 514 (atomic_cmpset_int(&inp->sctp_flags, flags, (flags | SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_CLOSE_IP)))) { 515 #ifdef SCTP_LOG_CLOSING 516 sctp_log_closing(inp, NULL, 15); 517 #endif 518 sctp_inpcb_free(inp, 1, 0); 519 } 520 return error; 521 } 522 #endif /* IPSEC */ 523 SCTP_INP_WUNLOCK(inp); 524 return 0; 525 } 526 527 static int 528 sctp_bind(struct socket *so, struct sockaddr *addr, struct thread *p) 529 { 530 struct sctp_inpcb *inp; 531 int error; 532 533 #ifdef INET6 534 if (addr && addr->sa_family != AF_INET) 535 /* must be a v4 address! */ 536 return EINVAL; 537 #endif /* INET6 */ 538 539 inp = (struct sctp_inpcb *)so->so_pcb; 540 if (inp == 0) 541 return EINVAL; 542 543 error = sctp_inpcb_bind(so, addr, p); 544 return error; 545 } 546 547 static void 548 sctp_close(struct socket *so) 549 { 550 struct sctp_inpcb *inp; 551 uint32_t flags; 552 553 inp = (struct sctp_inpcb *)so->so_pcb; 554 if (inp == 0) 555 return; 556 557 /* 558 * Inform all the lower layer assoc that we are done. 559 */ 560 sctp_must_try_again: 561 flags = inp->sctp_flags; 562 #ifdef SCTP_LOG_CLOSING 563 sctp_log_closing(inp, NULL, 17); 564 #endif 565 if (((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) && 566 (atomic_cmpset_int(&inp->sctp_flags, flags, (flags | SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_CLOSE_IP)))) { 567 if (((so->so_options & SO_LINGER) && (so->so_linger == 0)) || 568 (so->so_rcv.sb_cc > 0)) { 569 #ifdef SCTP_LOG_CLOSING 570 sctp_log_closing(inp, NULL, 13); 571 #endif 572 sctp_inpcb_free(inp, 1, 1); 573 } else { 574 #ifdef SCTP_LOG_CLOSING 575 sctp_log_closing(inp, NULL, 14); 576 #endif 577 sctp_inpcb_free(inp, 0, 1); 578 } 579 /* 580 * The socket is now detached, no matter what the state of 581 * the SCTP association. 582 */ 583 SOCK_LOCK(so); 584 SCTP_SB_CLEAR(so->so_snd); 585 /* 586 * same for the rcv ones, they are only here for the 587 * accounting/select. 588 */ 589 SCTP_SB_CLEAR(so->so_rcv); 590 591 /* Now null out the reference, we are completely detached. */ 592 so->so_pcb = NULL; 593 SOCK_UNLOCK(so); 594 } else { 595 flags = inp->sctp_flags; 596 if ((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) { 597 goto sctp_must_try_again; 598 } 599 } 600 return; 601 } 602 603 604 int 605 sctp_sendm(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr, 606 struct mbuf *control, struct thread *p); 607 608 609 int 610 sctp_sendm(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr, 611 struct mbuf *control, struct thread *p) 612 { 613 struct sctp_inpcb *inp; 614 int error; 615 616 inp = (struct sctp_inpcb *)so->so_pcb; 617 if (inp == 0) { 618 if (control) { 619 sctp_m_freem(control); 620 control = NULL; 621 } 622 sctp_m_freem(m); 623 return EINVAL; 624 } 625 /* Got to have an to address if we are NOT a connected socket */ 626 if ((addr == NULL) && 627 ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) || 628 (inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)) 629 ) { 630 goto connected_type; 631 } else if (addr == NULL) { 632 error = EDESTADDRREQ; 633 sctp_m_freem(m); 634 if (control) { 635 sctp_m_freem(control); 636 control = NULL; 637 } 638 return (error); 639 } 640 #ifdef INET6 641 if (addr->sa_family != AF_INET) { 642 /* must be a v4 address! */ 643 sctp_m_freem(m); 644 if (control) { 645 sctp_m_freem(control); 646 control = NULL; 647 } 648 error = EDESTADDRREQ; 649 return EINVAL; 650 } 651 #endif /* INET6 */ 652 connected_type: 653 /* now what about control */ 654 if (control) { 655 if (inp->control) { 656 printf("huh? control set?\n"); 657 sctp_m_freem(inp->control); 658 inp->control = NULL; 659 } 660 inp->control = control; 661 } 662 /* Place the data */ 663 if (inp->pkt) { 664 SCTP_BUF_NEXT(inp->pkt_last) = m; 665 inp->pkt_last = m; 666 } else { 667 inp->pkt_last = inp->pkt = m; 668 } 669 if ( 670 /* FreeBSD uses a flag passed */ 671 ((flags & PRUS_MORETOCOME) == 0) 672 ) { 673 /* 674 * note with the current version this code will only be used 675 * by OpenBSD-- NetBSD, FreeBSD, and MacOS have methods for 676 * re-defining sosend to use the sctp_sosend. One can 677 * optionally switch back to this code (by changing back the 678 * definitions) but this is not advisable. This code is used 679 * by FreeBSD when sending a file with sendfile() though. 680 */ 681 int ret; 682 683 ret = sctp_output(inp, inp->pkt, addr, inp->control, p, flags); 684 inp->pkt = NULL; 685 inp->control = NULL; 686 return (ret); 687 } else { 688 return (0); 689 } 690 } 691 692 static int 693 sctp_disconnect(struct socket *so) 694 { 695 struct sctp_inpcb *inp; 696 697 inp = (struct sctp_inpcb *)so->so_pcb; 698 if (inp == NULL) { 699 return (ENOTCONN); 700 } 701 SCTP_INP_RLOCK(inp); 702 if (inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) { 703 if (SCTP_LIST_EMPTY(&inp->sctp_asoc_list)) { 704 /* No connection */ 705 SCTP_INP_RUNLOCK(inp); 706 return (0); 707 } else { 708 struct sctp_association *asoc; 709 struct sctp_tcb *stcb; 710 711 stcb = LIST_FIRST(&inp->sctp_asoc_list); 712 if (stcb == NULL) { 713 SCTP_INP_RUNLOCK(inp); 714 return (EINVAL); 715 } 716 SCTP_TCB_LOCK(stcb); 717 asoc = &stcb->asoc; 718 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 719 /* We are about to be freed, out of here */ 720 SCTP_TCB_UNLOCK(stcb); 721 SCTP_INP_RUNLOCK(inp); 722 return (0); 723 } 724 if (((so->so_options & SO_LINGER) && 725 (so->so_linger == 0)) || 726 (so->so_rcv.sb_cc > 0)) { 727 if (SCTP_GET_STATE(asoc) != 728 SCTP_STATE_COOKIE_WAIT) { 729 /* Left with Data unread */ 730 struct mbuf *err; 731 732 err = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_DONTWAIT, 1, MT_DATA); 733 if (err) { 734 /* 735 * Fill in the user 736 * initiated abort 737 */ 738 struct sctp_paramhdr *ph; 739 740 ph = mtod(err, struct sctp_paramhdr *); 741 SCTP_BUF_LEN(err) = sizeof(struct sctp_paramhdr); 742 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT); 743 ph->param_length = htons(SCTP_BUF_LEN(err)); 744 } 745 sctp_send_abort_tcb(stcb, err); 746 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 747 } 748 SCTP_INP_RUNLOCK(inp); 749 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) || 750 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 751 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 752 } 753 sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_3); 754 /* No unlock tcb assoc is gone */ 755 return (0); 756 } 757 if (TAILQ_EMPTY(&asoc->send_queue) && 758 TAILQ_EMPTY(&asoc->sent_queue) && 759 (asoc->stream_queue_cnt == 0)) { 760 /* there is nothing queued to send, so done */ 761 if (asoc->locked_on_sending) { 762 goto abort_anyway; 763 } 764 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) && 765 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) { 766 /* only send SHUTDOWN 1st time thru */ 767 sctp_stop_timers_for_shutdown(stcb); 768 sctp_send_shutdown(stcb, 769 stcb->asoc.primary_destination); 770 sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_T3); 771 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) || 772 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 773 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 774 } 775 asoc->state = SCTP_STATE_SHUTDOWN_SENT; 776 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, 777 stcb->sctp_ep, stcb, 778 asoc->primary_destination); 779 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 780 stcb->sctp_ep, stcb, 781 asoc->primary_destination); 782 } 783 } else { 784 /* 785 * we still got (or just got) data to send, 786 * so set SHUTDOWN_PENDING 787 */ 788 /* 789 * XXX sockets draft says that SCTP_EOF 790 * should be sent with no data. currently, 791 * we will allow user data to be sent first 792 * and move to SHUTDOWN-PENDING 793 */ 794 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING; 795 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb, 796 asoc->primary_destination); 797 if (asoc->locked_on_sending) { 798 /* Locked to send out the data */ 799 struct sctp_stream_queue_pending *sp; 800 801 sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead); 802 if (sp == NULL) { 803 printf("Error, sp is NULL, locked on sending is non-null strm:%d\n", 804 asoc->locked_on_sending->stream_no); 805 } else { 806 if ((sp->length == 0) && (sp->msg_is_complete == 0)) 807 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT; 808 } 809 } 810 if (TAILQ_EMPTY(&asoc->send_queue) && 811 TAILQ_EMPTY(&asoc->sent_queue) && 812 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) { 813 struct mbuf *op_err; 814 815 abort_anyway: 816 op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 817 0, M_DONTWAIT, 1, MT_DATA); 818 if (op_err) { 819 /* 820 * Fill in the user 821 * initiated abort 822 */ 823 struct sctp_paramhdr *ph; 824 uint32_t *ippp; 825 826 SCTP_BUF_LEN(op_err) = 827 (sizeof(struct sctp_paramhdr) + sizeof(uint32_t)); 828 ph = mtod(op_err, 829 struct sctp_paramhdr *); 830 ph->param_type = htons( 831 SCTP_CAUSE_USER_INITIATED_ABT); 832 ph->param_length = htons(SCTP_BUF_LEN(op_err)); 833 ippp = (uint32_t *) (ph + 1); 834 *ippp = htonl(SCTP_FROM_SCTP_USRREQ + SCTP_LOC_4); 835 } 836 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_USRREQ + SCTP_LOC_4; 837 sctp_send_abort_tcb(stcb, op_err); 838 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 839 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) || 840 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 841 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 842 } 843 SCTP_INP_RUNLOCK(inp); 844 sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_5); 845 return (0); 846 } 847 } 848 SCTP_TCB_UNLOCK(stcb); 849 SCTP_INP_RUNLOCK(inp); 850 return (0); 851 } 852 /* not reached */ 853 printf("Not reached reached?\n"); 854 } else { 855 /* UDP model does not support this */ 856 SCTP_INP_RUNLOCK(inp); 857 return EOPNOTSUPP; 858 } 859 } 860 861 int 862 sctp_shutdown(struct socket *so) 863 { 864 struct sctp_inpcb *inp; 865 866 inp = (struct sctp_inpcb *)so->so_pcb; 867 if (inp == 0) { 868 return EINVAL; 869 } 870 SCTP_INP_RLOCK(inp); 871 /* For UDP model this is a invalid call */ 872 if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) { 873 /* Restore the flags that the soshutdown took away. */ 874 so->so_rcv.sb_state &= ~SBS_CANTRCVMORE; 875 /* This proc will wakeup for read and do nothing (I hope) */ 876 SCTP_INP_RUNLOCK(inp); 877 return (EOPNOTSUPP); 878 } 879 /* 880 * Ok if we reach here its the TCP model and it is either a SHUT_WR 881 * or SHUT_RDWR. This means we put the shutdown flag against it. 882 */ 883 { 884 struct sctp_tcb *stcb; 885 struct sctp_association *asoc; 886 887 socantsendmore(so); 888 889 stcb = LIST_FIRST(&inp->sctp_asoc_list); 890 if (stcb == NULL) { 891 /* 892 * Ok we hit the case that the shutdown call was 893 * made after an abort or something. Nothing to do 894 * now. 895 */ 896 SCTP_INP_RUNLOCK(inp); 897 return (0); 898 } 899 SCTP_TCB_LOCK(stcb); 900 asoc = &stcb->asoc; 901 if (TAILQ_EMPTY(&asoc->send_queue) && 902 TAILQ_EMPTY(&asoc->sent_queue) && 903 (asoc->stream_queue_cnt == 0)) { 904 if (asoc->locked_on_sending) { 905 goto abort_anyway; 906 } 907 /* there is nothing queued to send, so I'm done... */ 908 if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) { 909 /* only send SHUTDOWN the first time through */ 910 sctp_stop_timers_for_shutdown(stcb); 911 sctp_send_shutdown(stcb, 912 stcb->asoc.primary_destination); 913 sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_T3); 914 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) || 915 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 916 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 917 } 918 asoc->state = SCTP_STATE_SHUTDOWN_SENT; 919 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, 920 stcb->sctp_ep, stcb, 921 asoc->primary_destination); 922 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 923 stcb->sctp_ep, stcb, 924 asoc->primary_destination); 925 } 926 } else { 927 /* 928 * we still got (or just got) data to send, so set 929 * SHUTDOWN_PENDING 930 */ 931 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING; 932 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb, 933 asoc->primary_destination); 934 935 if (asoc->locked_on_sending) { 936 /* Locked to send out the data */ 937 struct sctp_stream_queue_pending *sp; 938 939 sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead); 940 if (sp == NULL) { 941 printf("Error, sp is NULL, locked on sending is non-null strm:%d\n", 942 asoc->locked_on_sending->stream_no); 943 } else { 944 if ((sp->length == 0) && (sp->msg_is_complete == 0)) { 945 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT; 946 } 947 } 948 } 949 if (TAILQ_EMPTY(&asoc->send_queue) && 950 TAILQ_EMPTY(&asoc->sent_queue) && 951 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) { 952 struct mbuf *op_err; 953 954 abort_anyway: 955 op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 956 0, M_DONTWAIT, 1, MT_DATA); 957 if (op_err) { 958 /* Fill in the user initiated abort */ 959 struct sctp_paramhdr *ph; 960 uint32_t *ippp; 961 962 SCTP_BUF_LEN(op_err) = 963 sizeof(struct sctp_paramhdr) + sizeof(uint32_t); 964 ph = mtod(op_err, 965 struct sctp_paramhdr *); 966 ph->param_type = htons( 967 SCTP_CAUSE_USER_INITIATED_ABT); 968 ph->param_length = htons(SCTP_BUF_LEN(op_err)); 969 ippp = (uint32_t *) (ph + 1); 970 *ippp = htonl(SCTP_FROM_SCTP_USRREQ + SCTP_LOC_6); 971 } 972 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_USRREQ + SCTP_LOC_6; 973 sctp_abort_an_association(stcb->sctp_ep, stcb, 974 SCTP_RESPONSE_TO_USER_REQ, 975 op_err); 976 goto skip_unlock; 977 } 978 } 979 SCTP_TCB_UNLOCK(stcb); 980 } 981 skip_unlock: 982 SCTP_INP_RUNLOCK(inp); 983 return 0; 984 } 985 986 /* 987 * copies a "user" presentable address and removes embedded scope, etc. 988 * returns 0 on success, 1 on error 989 */ 990 static uint32_t 991 sctp_fill_user_address(struct sockaddr_storage *ss, struct sockaddr *sa) 992 { 993 struct sockaddr_in6 lsa6; 994 995 sa = (struct sockaddr *)sctp_recover_scope((struct sockaddr_in6 *)sa, 996 &lsa6); 997 memcpy(ss, sa, sa->sa_len); 998 return (0); 999 } 1000 1001 1002 1003 static size_t 1004 sctp_fill_up_addresses_vrf(struct sctp_inpcb *inp, 1005 struct sctp_tcb *stcb, 1006 size_t limit, 1007 struct sockaddr_storage *sas, 1008 uint32_t vrf_id) 1009 { 1010 struct sctp_ifn *sctp_ifn; 1011 struct sctp_ifa *sctp_ifa; 1012 int loopback_scope, ipv4_local_scope, local_scope, site_scope; 1013 size_t actual; 1014 int ipv4_addr_legal, ipv6_addr_legal; 1015 struct sctp_vrf *vrf; 1016 1017 actual = 0; 1018 if (limit <= 0) 1019 return (actual); 1020 1021 if (stcb) { 1022 /* Turn on all the appropriate scope */ 1023 loopback_scope = stcb->asoc.loopback_scope; 1024 ipv4_local_scope = stcb->asoc.ipv4_local_scope; 1025 local_scope = stcb->asoc.local_scope; 1026 site_scope = stcb->asoc.site_scope; 1027 } else { 1028 /* Turn on ALL scope, since we look at the EP */ 1029 loopback_scope = ipv4_local_scope = local_scope = 1030 site_scope = 1; 1031 } 1032 ipv4_addr_legal = ipv6_addr_legal = 0; 1033 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 1034 ipv6_addr_legal = 1; 1035 if (SCTP_IPV6_V6ONLY(inp) == 0) { 1036 ipv4_addr_legal = 1; 1037 } 1038 } else { 1039 ipv4_addr_legal = 1; 1040 } 1041 vrf = sctp_find_vrf(vrf_id); 1042 if (vrf == NULL) { 1043 return (0); 1044 } 1045 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 1046 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) { 1047 if ((loopback_scope == 0) && 1048 SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) { 1049 /* Skip loopback if loopback_scope not set */ 1050 continue; 1051 } 1052 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { 1053 if (stcb) { 1054 /* 1055 * For the BOUND-ALL case, the list 1056 * associated with a TCB is Always 1057 * considered a reverse list.. i.e. 1058 * it lists addresses that are NOT 1059 * part of the association. If this 1060 * is one of those we must skip it. 1061 */ 1062 if (sctp_is_addr_restricted(stcb, 1063 sctp_ifa)) { 1064 continue; 1065 } 1066 } 1067 if ((sctp_ifa->address.sa.sa_family == AF_INET) && 1068 (ipv4_addr_legal)) { 1069 struct sockaddr_in *sin; 1070 1071 sin = (struct sockaddr_in *)&sctp_ifa->address.sa; 1072 if (sin->sin_addr.s_addr == 0) { 1073 /* 1074 * we skip unspecifed 1075 * addresses 1076 */ 1077 continue; 1078 } 1079 if ((ipv4_local_scope == 0) && 1080 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) { 1081 continue; 1082 } 1083 if (inp->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) { 1084 in6_sin_2_v4mapsin6(sin, (struct sockaddr_in6 *)sas); 1085 ((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport; 1086 sas = (struct sockaddr_storage *)((caddr_t)sas + sizeof(struct sockaddr_in6)); 1087 actual += sizeof(sizeof(struct sockaddr_in6)); 1088 } else { 1089 memcpy(sas, sin, sizeof(*sin)); 1090 ((struct sockaddr_in *)sas)->sin_port = inp->sctp_lport; 1091 sas = (struct sockaddr_storage *)((caddr_t)sas + sizeof(*sin)); 1092 actual += sizeof(*sin); 1093 } 1094 if (actual >= limit) { 1095 return (actual); 1096 } 1097 } else if ((sctp_ifa->address.sa.sa_family == AF_INET6) && 1098 (ipv6_addr_legal)) { 1099 struct sockaddr_in6 *sin6; 1100 1101 sin6 = (struct sockaddr_in6 *)&sctp_ifa->address.sa; 1102 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { 1103 /* 1104 * we skip unspecifed 1105 * addresses 1106 */ 1107 continue; 1108 } 1109 if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) { 1110 if (local_scope == 0) 1111 continue; 1112 if (sin6->sin6_scope_id == 0) { 1113 if (sa6_recoverscope(sin6) != 0) 1114 /* 1115 * bad link 1116 * local 1117 * address 1118 */ 1119 continue; 1120 } 1121 } 1122 if ((site_scope == 0) && 1123 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) { 1124 continue; 1125 } 1126 memcpy(sas, sin6, sizeof(*sin6)); 1127 ((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport; 1128 sas = (struct sockaddr_storage *)((caddr_t)sas + sizeof(*sin6)); 1129 actual += sizeof(*sin6); 1130 if (actual >= limit) { 1131 return (actual); 1132 } 1133 } 1134 } 1135 } 1136 } else { 1137 struct sctp_laddr *laddr; 1138 1139 /* The list is a NEGATIVE list */ 1140 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 1141 if (stcb) { 1142 if (sctp_is_addr_restricted(stcb, laddr->ifa)) { 1143 continue; 1144 } 1145 } 1146 if (sctp_fill_user_address(sas, &laddr->ifa->address.sa)) 1147 continue; 1148 1149 ((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport; 1150 sas = (struct sockaddr_storage *)((caddr_t)sas + 1151 laddr->ifa->address.sa.sa_len); 1152 actual += laddr->ifa->address.sa.sa_len; 1153 if (actual >= limit) { 1154 return (actual); 1155 } 1156 } 1157 } 1158 return (actual); 1159 } 1160 1161 static size_t 1162 sctp_fill_up_addresses(struct sctp_inpcb *inp, 1163 struct sctp_tcb *stcb, 1164 size_t limit, 1165 struct sockaddr_storage *sas) 1166 { 1167 size_t size = 0; 1168 1169 /* fill up addresses for the endpoint's default vrf */ 1170 size = sctp_fill_up_addresses_vrf(inp, stcb, limit, sas, 1171 inp->def_vrf_id); 1172 return (size); 1173 } 1174 1175 static int 1176 sctp_count_max_addresses_vrf(struct sctp_inpcb *inp, uint32_t vrf_id) 1177 { 1178 int cnt = 0; 1179 struct sctp_vrf *vrf = NULL; 1180 1181 /* 1182 * In both sub-set bound an bound_all cases we return the MAXIMUM 1183 * number of addresses that you COULD get. In reality the sub-set 1184 * bound may have an exclusion list for a given TCB OR in the 1185 * bound-all case a TCB may NOT include the loopback or other 1186 * addresses as well. 1187 */ 1188 vrf = sctp_find_vrf(vrf_id); 1189 if (vrf == NULL) { 1190 return (0); 1191 } 1192 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 1193 struct sctp_ifn *sctp_ifn; 1194 struct sctp_ifa *sctp_ifa; 1195 1196 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) { 1197 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { 1198 /* Count them if they are the right type */ 1199 if (sctp_ifa->address.sa.sa_family == AF_INET) { 1200 if (inp->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) 1201 cnt += sizeof(struct sockaddr_in6); 1202 else 1203 cnt += sizeof(struct sockaddr_in); 1204 1205 } else if (sctp_ifa->address.sa.sa_family == AF_INET6) 1206 cnt += sizeof(struct sockaddr_in6); 1207 } 1208 } 1209 } else { 1210 struct sctp_laddr *laddr; 1211 1212 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 1213 if (laddr->ifa->address.sa.sa_family == AF_INET) { 1214 if (inp->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) 1215 cnt += sizeof(struct sockaddr_in6); 1216 else 1217 cnt += sizeof(struct sockaddr_in); 1218 1219 } else if (laddr->ifa->address.sa.sa_family == AF_INET6) 1220 cnt += sizeof(struct sockaddr_in6); 1221 } 1222 } 1223 return (cnt); 1224 } 1225 1226 static int 1227 sctp_count_max_addresses(struct sctp_inpcb *inp) 1228 { 1229 int cnt = 0; 1230 1231 /* count addresses for the endpoint's default VRF */ 1232 cnt = sctp_count_max_addresses_vrf(inp, inp->def_vrf_id); 1233 return (cnt); 1234 } 1235 1236 1237 static int 1238 sctp_do_connect_x(struct socket *so, struct sctp_inpcb *inp, void *optval, 1239 size_t optsize, void *p, int delay) 1240 { 1241 int error = 0; 1242 int creat_lock_on = 0; 1243 struct sctp_tcb *stcb = NULL; 1244 struct sockaddr *sa; 1245 int num_v6 = 0, num_v4 = 0, *totaddrp, totaddr, i; 1246 size_t incr, at; 1247 uint32_t vrf_id; 1248 sctp_assoc_t *a_id; 1249 1250 #ifdef SCTP_DEBUG 1251 if (sctp_debug_on & SCTP_DEBUG_PCB1) { 1252 printf("Connectx called\n"); 1253 } 1254 #endif /* SCTP_DEBUG */ 1255 1256 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 1257 (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) { 1258 /* We are already connected AND the TCP model */ 1259 return (EADDRINUSE); 1260 } 1261 if (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) { 1262 return (EINVAL); 1263 } 1264 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { 1265 SCTP_INP_RLOCK(inp); 1266 stcb = LIST_FIRST(&inp->sctp_asoc_list); 1267 SCTP_INP_RUNLOCK(inp); 1268 } 1269 if (stcb) { 1270 return (EALREADY); 1271 } 1272 SCTP_INP_INCR_REF(inp); 1273 SCTP_ASOC_CREATE_LOCK(inp); 1274 creat_lock_on = 1; 1275 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 1276 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) { 1277 error = EFAULT; 1278 goto out_now; 1279 } 1280 totaddrp = (int *)optval; 1281 totaddr = *totaddrp; 1282 sa = (struct sockaddr *)(totaddrp + 1); 1283 at = incr = 0; 1284 /* account and validate addresses */ 1285 for (i = 0; i < totaddr; i++) { 1286 if (sa->sa_family == AF_INET) { 1287 num_v4++; 1288 incr = sizeof(struct sockaddr_in); 1289 } else if (sa->sa_family == AF_INET6) { 1290 struct sockaddr_in6 *sin6; 1291 1292 sin6 = (struct sockaddr_in6 *)sa; 1293 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 1294 /* Must be non-mapped for connectx */ 1295 error = EINVAL; 1296 goto out_now; 1297 } 1298 num_v6++; 1299 incr = sizeof(struct sockaddr_in6); 1300 } else { 1301 totaddr = i; 1302 break; 1303 } 1304 stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL); 1305 if (stcb != NULL) { 1306 /* Already have or am bring up an association */ 1307 SCTP_ASOC_CREATE_UNLOCK(inp); 1308 creat_lock_on = 0; 1309 SCTP_TCB_UNLOCK(stcb); 1310 error = EALREADY; 1311 goto out_now; 1312 } 1313 if ((at + incr) > optsize) { 1314 totaddr = i; 1315 break; 1316 } 1317 sa = (struct sockaddr *)((caddr_t)sa + incr); 1318 } 1319 sa = (struct sockaddr *)(totaddrp + 1); 1320 #ifdef INET6 1321 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) && 1322 (num_v6 > 0)) { 1323 error = EINVAL; 1324 goto out_now; 1325 } 1326 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 1327 (num_v4 > 0)) { 1328 struct in6pcb *inp6; 1329 1330 inp6 = (struct in6pcb *)inp; 1331 if (SCTP_IPV6_V6ONLY(inp6)) { 1332 /* 1333 * if IPV6_V6ONLY flag, ignore connections destined 1334 * to a v4 addr or v4-mapped addr 1335 */ 1336 error = EINVAL; 1337 goto out_now; 1338 } 1339 } 1340 #endif /* INET6 */ 1341 if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) == 1342 SCTP_PCB_FLAGS_UNBOUND) { 1343 /* Bind a ephemeral port */ 1344 error = sctp_inpcb_bind(so, NULL, p); 1345 if (error) { 1346 goto out_now; 1347 } 1348 } 1349 /* FIX ME: do we want to pass in a vrf on the connect call? */ 1350 vrf_id = inp->def_vrf_id; 1351 1352 /* We are GOOD to go */ 1353 stcb = sctp_aloc_assoc(inp, sa, 1, &error, 0, vrf_id); 1354 if (stcb == NULL) { 1355 /* Gak! no memory */ 1356 goto out_now; 1357 } 1358 /* move to second address */ 1359 if (sa->sa_family == AF_INET) 1360 sa = (struct sockaddr *)((caddr_t)sa + sizeof(struct sockaddr_in)); 1361 else 1362 sa = (struct sockaddr *)((caddr_t)sa + sizeof(struct sockaddr_in6)); 1363 1364 for (i = 1; i < totaddr; i++) { 1365 if (sa->sa_family == AF_INET) { 1366 incr = sizeof(struct sockaddr_in); 1367 if (sctp_add_remote_addr(stcb, sa, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) { 1368 /* assoc gone no un-lock */ 1369 sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_7); 1370 error = ENOBUFS; 1371 goto out_now; 1372 } 1373 } else if (sa->sa_family == AF_INET6) { 1374 incr = sizeof(struct sockaddr_in6); 1375 if (sctp_add_remote_addr(stcb, sa, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) { 1376 /* assoc gone no un-lock */ 1377 sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_8); 1378 error = ENOBUFS; 1379 goto out_now; 1380 } 1381 } 1382 sa = (struct sockaddr *)((caddr_t)sa + incr); 1383 } 1384 stcb->asoc.state = SCTP_STATE_COOKIE_WAIT; 1385 /* Fill in the return id */ 1386 a_id = (sctp_assoc_t *) optval; 1387 *a_id = sctp_get_associd(stcb); 1388 1389 /* initialize authentication parameters for the assoc */ 1390 sctp_initialize_auth_params(inp, stcb); 1391 1392 if (delay) { 1393 /* doing delayed connection */ 1394 stcb->asoc.delayed_connection = 1; 1395 sctp_timer_start(SCTP_TIMER_TYPE_INIT, inp, stcb, stcb->asoc.primary_destination); 1396 } else { 1397 SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered); 1398 sctp_send_initiate(inp, stcb); 1399 } 1400 SCTP_TCB_UNLOCK(stcb); 1401 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) { 1402 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED; 1403 /* Set the connected flag so we can queue data */ 1404 soisconnecting(so); 1405 } 1406 out_now: 1407 if (creat_lock_on) 1408 SCTP_ASOC_CREATE_UNLOCK(inp); 1409 SCTP_INP_DECR_REF(inp); 1410 return error; 1411 } 1412 1413 #define SCTP_FIND_STCB(inp, stcb, assoc_id) \ 1414 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { \ 1415 SCTP_INP_RLOCK(inp); \ 1416 stcb = LIST_FIRST(&inp->sctp_asoc_list); \ 1417 if (stcb) \ 1418 SCTP_TCB_LOCK(stcb); \ 1419 SCTP_INP_RUNLOCK(inp); \ 1420 } else if (assoc_id != 0) { \ 1421 stcb = sctp_findassociation_ep_asocid(inp, assoc_id, 1); \ 1422 if (stcb == NULL) { \ 1423 error = ENOENT; \ 1424 break; \ 1425 } \ 1426 } else { \ 1427 stcb = NULL; \ 1428 } 1429 1430 #define SCTP_CHECK_AND_CAST(destp, srcp, type, size) \ 1431 if (size < sizeof(type)) { \ 1432 error = EINVAL; \ 1433 break; \ 1434 } else { \ 1435 destp = (type *)srcp; \ 1436 } 1437 1438 static int 1439 sctp_getopt(struct socket *so, int optname, void *optval, size_t *optsize, 1440 void *p) 1441 { 1442 struct sctp_inpcb *inp; 1443 int error, val = 0; 1444 struct sctp_tcb *stcb = NULL; 1445 1446 if (optval == NULL) { 1447 return (EINVAL); 1448 } 1449 inp = (struct sctp_inpcb *)so->so_pcb; 1450 if (inp == 0) 1451 return EINVAL; 1452 error = 0; 1453 1454 switch (optname) { 1455 case SCTP_NODELAY: 1456 case SCTP_AUTOCLOSE: 1457 case SCTP_EXPLICIT_EOR: 1458 case SCTP_AUTO_ASCONF: 1459 case SCTP_DISABLE_FRAGMENTS: 1460 case SCTP_I_WANT_MAPPED_V4_ADDR: 1461 case SCTP_USE_EXT_RCVINFO: 1462 SCTP_INP_RLOCK(inp); 1463 switch (optname) { 1464 case SCTP_DISABLE_FRAGMENTS: 1465 val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NO_FRAGMENT); 1466 break; 1467 case SCTP_I_WANT_MAPPED_V4_ADDR: 1468 val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4); 1469 break; 1470 case SCTP_AUTO_ASCONF: 1471 val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTO_ASCONF); 1472 break; 1473 case SCTP_EXPLICIT_EOR: 1474 val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR); 1475 break; 1476 case SCTP_NODELAY: 1477 val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NODELAY); 1478 break; 1479 case SCTP_USE_EXT_RCVINFO: 1480 val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO); 1481 break; 1482 case SCTP_AUTOCLOSE: 1483 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) 1484 val = TICKS_TO_SEC(inp->sctp_ep.auto_close_time); 1485 else 1486 val = 0; 1487 break; 1488 1489 default: 1490 error = ENOPROTOOPT; 1491 } /* end switch (sopt->sopt_name) */ 1492 if (optname != SCTP_AUTOCLOSE) { 1493 /* make it an "on/off" value */ 1494 val = (val != 0); 1495 } 1496 if (*optsize < sizeof(val)) { 1497 error = EINVAL; 1498 } 1499 SCTP_INP_RUNLOCK(inp); 1500 if (error == 0) { 1501 /* return the option value */ 1502 *(int *)optval = val; 1503 *optsize = sizeof(val); 1504 } 1505 break; 1506 1507 case SCTP_PARTIAL_DELIVERY_POINT: 1508 { 1509 uint32_t *value; 1510 1511 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize); 1512 *value = inp->partial_delivery_point; 1513 *optsize = sizeof(uint32_t); 1514 } 1515 break; 1516 case SCTP_FRAGMENT_INTERLEAVE: 1517 { 1518 uint32_t *value; 1519 1520 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize); 1521 *value = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE); 1522 *optsize = sizeof(uint32_t); 1523 } 1524 break; 1525 case SCTP_CMT_ON_OFF: 1526 { 1527 struct sctp_assoc_value *av; 1528 1529 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize); 1530 if (sctp_cmt_on_off) { 1531 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 1532 if (stcb) { 1533 av->assoc_value = stcb->asoc.sctp_cmt_on_off; 1534 SCTP_TCB_UNLOCK(stcb); 1535 1536 } else { 1537 error = ENOTCONN; 1538 } 1539 } else { 1540 error = ENOPROTOOPT; 1541 } 1542 *optsize = sizeof(*av); 1543 } 1544 break; 1545 case SCTP_GET_ADDR_LEN: 1546 { 1547 struct sctp_assoc_value *av; 1548 1549 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize); 1550 error = EINVAL; 1551 #ifdef INET 1552 if (av->assoc_value == AF_INET) { 1553 av->assoc_value = sizeof(struct sockaddr_in); 1554 error = 0; 1555 } 1556 #endif 1557 #ifdef INET6 1558 if (av->assoc_value == AF_INET6) { 1559 av->assoc_value = sizeof(struct sockaddr_in6); 1560 error = 0; 1561 } 1562 #endif 1563 *optsize = sizeof(*av); 1564 } 1565 break; 1566 case SCTP_GET_ASOC_ID_LIST: 1567 { 1568 struct sctp_assoc_ids *ids; 1569 int cnt, at; 1570 uint16_t orig; 1571 1572 SCTP_CHECK_AND_CAST(ids, optval, struct sctp_assoc_ids, *optsize); 1573 cnt = 0; 1574 SCTP_INP_RLOCK(inp); 1575 stcb = LIST_FIRST(&inp->sctp_asoc_list); 1576 if (stcb == NULL) { 1577 none_out_now: 1578 ids->asls_numb_present = 0; 1579 ids->asls_more_to_get = 0; 1580 SCTP_INP_RUNLOCK(inp); 1581 break; 1582 } 1583 orig = ids->asls_assoc_start; 1584 stcb = LIST_FIRST(&inp->sctp_asoc_list); 1585 while (orig) { 1586 stcb = LIST_NEXT(stcb, sctp_tcblist); 1587 orig--; 1588 cnt--; 1589 if (stcb == NULL) 1590 goto none_out_now; 1591 } 1592 if (stcb == NULL) 1593 goto none_out_now; 1594 1595 at = 0; 1596 ids->asls_numb_present = 0; 1597 ids->asls_more_to_get = 1; 1598 while (at < MAX_ASOC_IDS_RET) { 1599 ids->asls_assoc_id[at] = sctp_get_associd(stcb); 1600 at++; 1601 ids->asls_numb_present++; 1602 stcb = LIST_NEXT(stcb, sctp_tcblist); 1603 if (stcb == NULL) { 1604 ids->asls_more_to_get = 0; 1605 break; 1606 } 1607 } 1608 SCTP_INP_RUNLOCK(inp); 1609 } 1610 break; 1611 case SCTP_CONTEXT: 1612 { 1613 struct sctp_assoc_value *av; 1614 1615 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize); 1616 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 1617 1618 if (stcb) { 1619 av->assoc_value = stcb->asoc.context; 1620 SCTP_TCB_UNLOCK(stcb); 1621 } else { 1622 SCTP_INP_RLOCK(inp); 1623 av->assoc_value = inp->sctp_context; 1624 SCTP_INP_RUNLOCK(inp); 1625 } 1626 *optsize = sizeof(*av); 1627 } 1628 break; 1629 case SCTP_VRF_ID: 1630 { 1631 uint32_t *vrf_id; 1632 1633 SCTP_CHECK_AND_CAST(vrf_id, optval, uint32_t, *optsize); 1634 *vrf_id = inp->def_vrf_id; 1635 break; 1636 } 1637 case SCTP_GET_ASOC_VRF: 1638 { 1639 struct sctp_assoc_value *id; 1640 1641 SCTP_CHECK_AND_CAST(id, optval, struct sctp_assoc_value, *optsize); 1642 SCTP_FIND_STCB(inp, stcb, id->assoc_id); 1643 if (stcb == NULL) { 1644 error = EINVAL; 1645 break; 1646 } 1647 id->assoc_value = stcb->asoc.vrf_id; 1648 break; 1649 } 1650 case SCTP_GET_VRF_IDS: 1651 { 1652 error = EOPNOTSUPP; 1653 break; 1654 } 1655 case SCTP_GET_NONCE_VALUES: 1656 { 1657 struct sctp_get_nonce_values *gnv; 1658 1659 SCTP_CHECK_AND_CAST(gnv, optval, struct sctp_get_nonce_values, *optsize); 1660 SCTP_FIND_STCB(inp, stcb, gnv->gn_assoc_id); 1661 1662 if (stcb) { 1663 gnv->gn_peers_tag = stcb->asoc.peer_vtag; 1664 gnv->gn_local_tag = stcb->asoc.my_vtag; 1665 SCTP_TCB_UNLOCK(stcb); 1666 } else { 1667 error = ENOTCONN; 1668 } 1669 *optsize = sizeof(*gnv); 1670 } 1671 break; 1672 case SCTP_DELAYED_ACK_TIME: 1673 { 1674 struct sctp_assoc_value *tm; 1675 1676 SCTP_CHECK_AND_CAST(tm, optval, struct sctp_assoc_value, *optsize); 1677 SCTP_FIND_STCB(inp, stcb, tm->assoc_id); 1678 1679 if (stcb) { 1680 tm->assoc_value = stcb->asoc.delayed_ack; 1681 SCTP_TCB_UNLOCK(stcb); 1682 } else { 1683 SCTP_INP_RLOCK(inp); 1684 tm->assoc_value = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]); 1685 SCTP_INP_RUNLOCK(inp); 1686 } 1687 *optsize = sizeof(*tm); 1688 } 1689 break; 1690 1691 case SCTP_GET_SNDBUF_USE: 1692 { 1693 struct sctp_sockstat *ss; 1694 1695 SCTP_CHECK_AND_CAST(ss, optval, struct sctp_sockstat, *optsize); 1696 SCTP_FIND_STCB(inp, stcb, ss->ss_assoc_id); 1697 1698 if (stcb) { 1699 ss->ss_total_sndbuf = stcb->asoc.total_output_queue_size; 1700 ss->ss_total_recv_buf = (stcb->asoc.size_on_reasm_queue + 1701 stcb->asoc.size_on_all_streams); 1702 SCTP_TCB_UNLOCK(stcb); 1703 } else { 1704 error = ENOTCONN; 1705 } 1706 *optsize = sizeof(struct sctp_sockstat); 1707 } 1708 break; 1709 case SCTP_MAXBURST: 1710 { 1711 uint8_t *value; 1712 1713 SCTP_CHECK_AND_CAST(value, optval, uint8_t, *optsize); 1714 1715 SCTP_INP_RLOCK(inp); 1716 *value = inp->sctp_ep.max_burst; 1717 SCTP_INP_RUNLOCK(inp); 1718 *optsize = sizeof(uint8_t); 1719 } 1720 break; 1721 case SCTP_MAXSEG: 1722 { 1723 struct sctp_assoc_value *av; 1724 int ovh; 1725 1726 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize); 1727 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 1728 1729 if (stcb) { 1730 av->assoc_value = sctp_get_frag_point(stcb, &stcb->asoc); 1731 SCTP_TCB_UNLOCK(stcb); 1732 } else { 1733 SCTP_INP_RLOCK(inp); 1734 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 1735 ovh = SCTP_MED_OVERHEAD; 1736 } else { 1737 ovh = SCTP_MED_V4_OVERHEAD; 1738 } 1739 av->assoc_value = inp->sctp_frag_point - ovh; 1740 SCTP_INP_RUNLOCK(inp); 1741 } 1742 *optsize = sizeof(struct sctp_assoc_value); 1743 } 1744 break; 1745 case SCTP_GET_STAT_LOG: 1746 #ifdef SCTP_STAT_LOGGING 1747 error = sctp_fill_stat_log(optval, optsize); 1748 #else 1749 error = EOPNOTSUPP; 1750 #endif 1751 break; 1752 case SCTP_EVENTS: 1753 { 1754 struct sctp_event_subscribe *events; 1755 1756 SCTP_CHECK_AND_CAST(events, optval, struct sctp_event_subscribe, *optsize); 1757 memset(events, 0, sizeof(*events)); 1758 SCTP_INP_RLOCK(inp); 1759 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) 1760 events->sctp_data_io_event = 1; 1761 1762 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVASSOCEVNT)) 1763 events->sctp_association_event = 1; 1764 1765 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVPADDREVNT)) 1766 events->sctp_address_event = 1; 1767 1768 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)) 1769 events->sctp_send_failure_event = 1; 1770 1771 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVPEERERR)) 1772 events->sctp_peer_error_event = 1; 1773 1774 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) 1775 events->sctp_shutdown_event = 1; 1776 1777 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PDAPIEVNT)) 1778 events->sctp_partial_delivery_event = 1; 1779 1780 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) 1781 events->sctp_adaptation_layer_event = 1; 1782 1783 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTHEVNT)) 1784 events->sctp_authentication_event = 1; 1785 1786 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_STREAM_RESETEVNT)) 1787 events->sctp_stream_reset_events = 1; 1788 SCTP_INP_RUNLOCK(inp); 1789 *optsize = sizeof(struct sctp_event_subscribe); 1790 } 1791 break; 1792 1793 case SCTP_ADAPTATION_LAYER: 1794 { 1795 uint32_t *value; 1796 1797 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize); 1798 1799 SCTP_INP_RLOCK(inp); 1800 *value = inp->sctp_ep.adaptation_layer_indicator; 1801 SCTP_INP_RUNLOCK(inp); 1802 *optsize = sizeof(uint32_t); 1803 } 1804 break; 1805 case SCTP_SET_INITIAL_DBG_SEQ: 1806 { 1807 uint32_t *value; 1808 1809 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize); 1810 SCTP_INP_RLOCK(inp); 1811 *value = inp->sctp_ep.initial_sequence_debug; 1812 SCTP_INP_RUNLOCK(inp); 1813 *optsize = sizeof(uint32_t); 1814 } 1815 break; 1816 case SCTP_GET_LOCAL_ADDR_SIZE: 1817 { 1818 uint32_t *value; 1819 1820 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize); 1821 SCTP_INP_RLOCK(inp); 1822 *value = sctp_count_max_addresses(inp); 1823 SCTP_INP_RUNLOCK(inp); 1824 *optsize = sizeof(uint32_t); 1825 } 1826 break; 1827 case SCTP_GET_REMOTE_ADDR_SIZE: 1828 { 1829 uint32_t *value; 1830 size_t size; 1831 struct sctp_nets *net; 1832 1833 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize); 1834 /* FIXME MT: change to sctp_assoc_value? */ 1835 SCTP_FIND_STCB(inp, stcb, (sctp_assoc_t) * value); 1836 1837 if (stcb) { 1838 size = 0; 1839 /* Count the sizes */ 1840 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 1841 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) || 1842 (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET6)) { 1843 size += sizeof(struct sockaddr_in6); 1844 } else if (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET) { 1845 size += sizeof(struct sockaddr_in); 1846 } else { 1847 /* huh */ 1848 break; 1849 } 1850 } 1851 SCTP_TCB_UNLOCK(stcb); 1852 *value = (uint32_t) size; 1853 } else { 1854 error = ENOTCONN; 1855 } 1856 *optsize = sizeof(uint32_t); 1857 } 1858 break; 1859 case SCTP_GET_PEER_ADDRESSES: 1860 /* 1861 * Get the address information, an array is passed in to 1862 * fill up we pack it. 1863 */ 1864 { 1865 size_t cpsz, left; 1866 struct sockaddr_storage *sas; 1867 struct sctp_nets *net; 1868 struct sctp_getaddresses *saddr; 1869 1870 SCTP_CHECK_AND_CAST(saddr, optval, struct sctp_getaddresses, *optsize); 1871 SCTP_FIND_STCB(inp, stcb, saddr->sget_assoc_id); 1872 1873 if (stcb) { 1874 left = (*optsize) - sizeof(struct sctp_getaddresses); 1875 *optsize = sizeof(struct sctp_getaddresses); 1876 sas = (struct sockaddr_storage *)&saddr->addr[0]; 1877 1878 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 1879 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) || 1880 (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET6)) { 1881 cpsz = sizeof(struct sockaddr_in6); 1882 } else if (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET) { 1883 cpsz = sizeof(struct sockaddr_in); 1884 } else { 1885 /* huh */ 1886 break; 1887 } 1888 if (left < cpsz) { 1889 /* not enough room. */ 1890 break; 1891 } 1892 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) && 1893 (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET)) { 1894 /* Must map the address */ 1895 in6_sin_2_v4mapsin6((struct sockaddr_in *)&net->ro._l_addr, 1896 (struct sockaddr_in6 *)sas); 1897 } else { 1898 memcpy(sas, &net->ro._l_addr, cpsz); 1899 } 1900 ((struct sockaddr_in *)sas)->sin_port = stcb->rport; 1901 1902 sas = (struct sockaddr_storage *)((caddr_t)sas + cpsz); 1903 left -= cpsz; 1904 *optsize += cpsz; 1905 } 1906 SCTP_TCB_UNLOCK(stcb); 1907 } else { 1908 error = ENOENT; 1909 } 1910 } 1911 break; 1912 case SCTP_GET_LOCAL_ADDRESSES: 1913 { 1914 size_t limit, actual; 1915 struct sockaddr_storage *sas; 1916 struct sctp_getaddresses *saddr; 1917 1918 SCTP_CHECK_AND_CAST(saddr, optval, struct sctp_getaddresses, *optsize); 1919 SCTP_FIND_STCB(inp, stcb, saddr->sget_assoc_id); 1920 1921 sas = (struct sockaddr_storage *)&saddr->addr[0]; 1922 limit = *optsize - sizeof(sctp_assoc_t); 1923 actual = sctp_fill_up_addresses(inp, stcb, limit, sas); 1924 if (stcb) 1925 SCTP_TCB_UNLOCK(stcb); 1926 *optsize = sizeof(struct sockaddr_storage) + actual; 1927 } 1928 break; 1929 case SCTP_PEER_ADDR_PARAMS: 1930 { 1931 struct sctp_paddrparams *paddrp; 1932 struct sctp_nets *net; 1933 1934 SCTP_CHECK_AND_CAST(paddrp, optval, struct sctp_paddrparams, *optsize); 1935 SCTP_FIND_STCB(inp, stcb, paddrp->spp_assoc_id); 1936 1937 net = NULL; 1938 if (stcb) { 1939 net = sctp_findnet(stcb, (struct sockaddr *)&paddrp->spp_address); 1940 } else { 1941 /* 1942 * We increment here since 1943 * sctp_findassociation_ep_addr() wil do a 1944 * decrement if it finds the stcb as long as 1945 * the locked tcb (last argument) is NOT a 1946 * TCB.. aka NULL. 1947 */ 1948 SCTP_INP_INCR_REF(inp); 1949 stcb = sctp_findassociation_ep_addr(&inp, (struct sockaddr *)&paddrp->spp_address, &net, NULL, NULL); 1950 if (stcb == NULL) { 1951 SCTP_INP_DECR_REF(inp); 1952 } 1953 } 1954 1955 if (stcb) { 1956 /* Applys to the specific association */ 1957 paddrp->spp_flags = 0; 1958 if (net) { 1959 paddrp->spp_pathmaxrxt = net->failure_threshold; 1960 paddrp->spp_pathmtu = net->mtu; 1961 /* get flags for HB */ 1962 if (net->dest_state & SCTP_ADDR_NOHB) 1963 paddrp->spp_flags |= SPP_HB_DISABLE; 1964 else 1965 paddrp->spp_flags |= SPP_HB_ENABLE; 1966 /* get flags for PMTU */ 1967 if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) { 1968 paddrp->spp_flags |= SPP_PMTUD_ENABLE; 1969 } else { 1970 paddrp->spp_flags |= SPP_PMTUD_DISABLE; 1971 } 1972 #ifdef INET 1973 if (net->ro._l_addr.sin.sin_family == AF_INET) { 1974 paddrp->spp_ipv4_tos = net->tos_flowlabel & 0x000000fc; 1975 paddrp->spp_flags |= SPP_IPV4_TOS; 1976 } 1977 #endif 1978 #ifdef INET6 1979 if (net->ro._l_addr.sin6.sin6_family == AF_INET6) { 1980 paddrp->spp_ipv6_flowlabel = net->tos_flowlabel; 1981 paddrp->spp_flags |= SPP_IPV6_FLOWLABEL; 1982 } 1983 #endif 1984 } else { 1985 /* 1986 * No destination so return default 1987 * value 1988 */ 1989 paddrp->spp_pathmaxrxt = stcb->asoc.def_net_failure; 1990 paddrp->spp_pathmtu = sctp_get_frag_point(stcb, &stcb->asoc); 1991 #ifdef INET 1992 paddrp->spp_ipv4_tos = stcb->asoc.default_tos & 0x000000fc; 1993 paddrp->spp_flags |= SPP_IPV4_TOS; 1994 #endif 1995 #ifdef INET6 1996 paddrp->spp_ipv6_flowlabel = stcb->asoc.default_flowlabel; 1997 paddrp->spp_flags |= SPP_IPV6_FLOWLABEL; 1998 #endif 1999 /* default settings should be these */ 2000 if (sctp_is_hb_timer_running(stcb)) { 2001 paddrp->spp_flags |= SPP_HB_ENABLE; 2002 } 2003 } 2004 paddrp->spp_hbinterval = stcb->asoc.heart_beat_delay; 2005 paddrp->spp_assoc_id = sctp_get_associd(stcb); 2006 SCTP_TCB_UNLOCK(stcb); 2007 } else { 2008 /* Use endpoint defaults */ 2009 SCTP_INP_RLOCK(inp); 2010 paddrp->spp_pathmaxrxt = inp->sctp_ep.def_net_failure; 2011 paddrp->spp_hbinterval = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]); 2012 paddrp->spp_assoc_id = (sctp_assoc_t) 0; 2013 /* get inp's default */ 2014 #ifdef INET 2015 paddrp->spp_ipv4_tos = inp->ip_inp.inp.inp_ip_tos; 2016 paddrp->spp_flags |= SPP_IPV4_TOS; 2017 #endif 2018 #ifdef INET6 2019 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 2020 paddrp->spp_ipv6_flowlabel = ((struct in6pcb *)inp)->in6p_flowinfo; 2021 paddrp->spp_flags |= SPP_IPV6_FLOWLABEL; 2022 } 2023 #endif 2024 /* can't return this */ 2025 paddrp->spp_pathmaxrxt = 0; 2026 paddrp->spp_pathmtu = 0; 2027 /* default behavior, no stcb */ 2028 paddrp->spp_flags = SPP_HB_ENABLE | SPP_PMTUD_ENABLE; 2029 2030 SCTP_INP_RUNLOCK(inp); 2031 } 2032 *optsize = sizeof(struct sctp_paddrparams); 2033 } 2034 break; 2035 case SCTP_GET_PEER_ADDR_INFO: 2036 { 2037 struct sctp_paddrinfo *paddri; 2038 struct sctp_nets *net; 2039 2040 SCTP_CHECK_AND_CAST(paddri, optval, struct sctp_paddrinfo, *optsize); 2041 SCTP_FIND_STCB(inp, stcb, paddri->spinfo_assoc_id); 2042 2043 net = NULL; 2044 if (stcb) { 2045 net = sctp_findnet(stcb, (struct sockaddr *)&paddri->spinfo_address); 2046 } else { 2047 /* 2048 * We increment here since 2049 * sctp_findassociation_ep_addr() wil do a 2050 * decrement if it finds the stcb as long as 2051 * the locked tcb (last argument) is NOT a 2052 * TCB.. aka NULL. 2053 */ 2054 SCTP_INP_INCR_REF(inp); 2055 stcb = sctp_findassociation_ep_addr(&inp, (struct sockaddr *)&paddri->spinfo_address, &net, NULL, NULL); 2056 if (stcb == NULL) { 2057 SCTP_INP_DECR_REF(inp); 2058 } 2059 } 2060 2061 if ((stcb) && (net)) { 2062 paddri->spinfo_state = net->dest_state & (SCTP_REACHABLE_MASK | SCTP_ADDR_NOHB); 2063 paddri->spinfo_cwnd = net->cwnd; 2064 paddri->spinfo_srtt = ((net->lastsa >> 2) + net->lastsv) >> 1; 2065 paddri->spinfo_rto = net->RTO; 2066 paddri->spinfo_assoc_id = sctp_get_associd(stcb); 2067 SCTP_TCB_UNLOCK(stcb); 2068 } else { 2069 if (stcb) { 2070 SCTP_TCB_UNLOCK(stcb); 2071 } 2072 error = ENOENT; 2073 } 2074 *optsize = sizeof(struct sctp_paddrinfo); 2075 } 2076 break; 2077 case SCTP_PCB_STATUS: 2078 { 2079 struct sctp_pcbinfo *spcb; 2080 2081 SCTP_CHECK_AND_CAST(spcb, optval, struct sctp_pcbinfo, *optsize); 2082 sctp_fill_pcbinfo(spcb); 2083 *optsize = sizeof(struct sctp_pcbinfo); 2084 } 2085 break; 2086 2087 case SCTP_STATUS: 2088 { 2089 struct sctp_nets *net; 2090 struct sctp_status *sstat; 2091 2092 SCTP_CHECK_AND_CAST(sstat, optval, struct sctp_status, *optsize); 2093 SCTP_FIND_STCB(inp, stcb, sstat->sstat_assoc_id); 2094 2095 if (stcb == NULL) { 2096 error = EINVAL; 2097 break; 2098 } 2099 /* 2100 * I think passing the state is fine since 2101 * sctp_constants.h will be available to the user 2102 * land. 2103 */ 2104 sstat->sstat_state = stcb->asoc.state; 2105 sstat->sstat_rwnd = stcb->asoc.peers_rwnd; 2106 sstat->sstat_unackdata = stcb->asoc.sent_queue_cnt; 2107 /* 2108 * We can't include chunks that have been passed to 2109 * the socket layer. Only things in queue. 2110 */ 2111 sstat->sstat_penddata = (stcb->asoc.cnt_on_reasm_queue + 2112 stcb->asoc.cnt_on_all_streams); 2113 2114 2115 sstat->sstat_instrms = stcb->asoc.streamincnt; 2116 sstat->sstat_outstrms = stcb->asoc.streamoutcnt; 2117 sstat->sstat_fragmentation_point = sctp_get_frag_point(stcb, &stcb->asoc); 2118 memcpy(&sstat->sstat_primary.spinfo_address, 2119 &stcb->asoc.primary_destination->ro._l_addr, 2120 ((struct sockaddr *)(&stcb->asoc.primary_destination->ro._l_addr))->sa_len); 2121 net = stcb->asoc.primary_destination; 2122 ((struct sockaddr_in *)&sstat->sstat_primary.spinfo_address)->sin_port = stcb->rport; 2123 /* 2124 * Again the user can get info from sctp_constants.h 2125 * for what the state of the network is. 2126 */ 2127 sstat->sstat_primary.spinfo_state = net->dest_state & SCTP_REACHABLE_MASK; 2128 sstat->sstat_primary.spinfo_cwnd = net->cwnd; 2129 sstat->sstat_primary.spinfo_srtt = net->lastsa; 2130 sstat->sstat_primary.spinfo_rto = net->RTO; 2131 sstat->sstat_primary.spinfo_mtu = net->mtu; 2132 sstat->sstat_primary.spinfo_assoc_id = sctp_get_associd(stcb); 2133 SCTP_TCB_UNLOCK(stcb); 2134 *optsize = sizeof(*sstat); 2135 } 2136 break; 2137 case SCTP_RTOINFO: 2138 { 2139 struct sctp_rtoinfo *srto; 2140 2141 SCTP_CHECK_AND_CAST(srto, optval, struct sctp_rtoinfo, *optsize); 2142 SCTP_FIND_STCB(inp, stcb, srto->srto_assoc_id); 2143 2144 if (stcb) { 2145 srto->srto_initial = stcb->asoc.initial_rto; 2146 srto->srto_max = stcb->asoc.maxrto; 2147 srto->srto_min = stcb->asoc.minrto; 2148 SCTP_TCB_UNLOCK(stcb); 2149 } else { 2150 SCTP_INP_RLOCK(inp); 2151 srto->srto_initial = inp->sctp_ep.initial_rto; 2152 srto->srto_max = inp->sctp_ep.sctp_maxrto; 2153 srto->srto_min = inp->sctp_ep.sctp_minrto; 2154 SCTP_INP_RUNLOCK(inp); 2155 } 2156 *optsize = sizeof(*srto); 2157 } 2158 break; 2159 case SCTP_ASSOCINFO: 2160 { 2161 struct sctp_assocparams *sasoc; 2162 2163 SCTP_CHECK_AND_CAST(sasoc, optval, struct sctp_assocparams, *optsize); 2164 SCTP_FIND_STCB(inp, stcb, sasoc->sasoc_assoc_id); 2165 2166 if (stcb) { 2167 sasoc->sasoc_asocmaxrxt = stcb->asoc.max_send_times; 2168 sasoc->sasoc_number_peer_destinations = stcb->asoc.numnets; 2169 sasoc->sasoc_peer_rwnd = stcb->asoc.peers_rwnd; 2170 sasoc->sasoc_local_rwnd = stcb->asoc.my_rwnd; 2171 sasoc->sasoc_cookie_life = stcb->asoc.cookie_life; 2172 sasoc->sasoc_sack_delay = stcb->asoc.delayed_ack; 2173 sasoc->sasoc_sack_freq = stcb->asoc.sack_freq; 2174 SCTP_TCB_UNLOCK(stcb); 2175 } else { 2176 SCTP_INP_RLOCK(inp); 2177 sasoc->sasoc_asocmaxrxt = inp->sctp_ep.max_send_times; 2178 sasoc->sasoc_number_peer_destinations = 0; 2179 sasoc->sasoc_peer_rwnd = 0; 2180 sasoc->sasoc_local_rwnd = sbspace(&inp->sctp_socket->so_rcv); 2181 sasoc->sasoc_cookie_life = inp->sctp_ep.def_cookie_life; 2182 sasoc->sasoc_sack_delay = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]); 2183 sasoc->sasoc_sack_freq = inp->sctp_ep.sctp_sack_freq; 2184 SCTP_INP_RUNLOCK(inp); 2185 } 2186 *optsize = sizeof(*sasoc); 2187 } 2188 break; 2189 case SCTP_DEFAULT_SEND_PARAM: 2190 { 2191 struct sctp_sndrcvinfo *s_info; 2192 2193 SCTP_CHECK_AND_CAST(s_info, optval, struct sctp_sndrcvinfo, *optsize); 2194 SCTP_FIND_STCB(inp, stcb, s_info->sinfo_assoc_id); 2195 2196 if (stcb) { 2197 *s_info = stcb->asoc.def_send; 2198 SCTP_TCB_UNLOCK(stcb); 2199 } else { 2200 SCTP_INP_RLOCK(inp); 2201 *s_info = inp->def_send; 2202 SCTP_INP_RUNLOCK(inp); 2203 } 2204 *optsize = sizeof(*s_info); 2205 } 2206 break; 2207 case SCTP_INITMSG: 2208 { 2209 struct sctp_initmsg *sinit; 2210 2211 SCTP_CHECK_AND_CAST(sinit, optval, struct sctp_initmsg, *optsize); 2212 SCTP_INP_RLOCK(inp); 2213 sinit->sinit_num_ostreams = inp->sctp_ep.pre_open_stream_count; 2214 sinit->sinit_max_instreams = inp->sctp_ep.max_open_streams_intome; 2215 sinit->sinit_max_attempts = inp->sctp_ep.max_init_times; 2216 sinit->sinit_max_init_timeo = inp->sctp_ep.initial_init_rto_max; 2217 SCTP_INP_RUNLOCK(inp); 2218 *optsize = sizeof(*sinit); 2219 } 2220 break; 2221 case SCTP_PRIMARY_ADDR: 2222 /* we allow a "get" operation on this */ 2223 { 2224 struct sctp_setprim *ssp; 2225 2226 SCTP_CHECK_AND_CAST(ssp, optval, struct sctp_setprim, *optsize); 2227 SCTP_FIND_STCB(inp, stcb, ssp->ssp_assoc_id); 2228 2229 if (stcb) { 2230 /* simply copy out the sockaddr_storage... */ 2231 memcpy(&ssp->ssp_addr, &stcb->asoc.primary_destination->ro._l_addr, 2232 ((struct sockaddr *)&stcb->asoc.primary_destination->ro._l_addr)->sa_len); 2233 SCTP_TCB_UNLOCK(stcb); 2234 } else { 2235 error = EINVAL; 2236 } 2237 *optsize = sizeof(*ssp); 2238 } 2239 break; 2240 2241 case SCTP_HMAC_IDENT: 2242 { 2243 struct sctp_hmacalgo *shmac; 2244 sctp_hmaclist_t *hmaclist; 2245 uint32_t size; 2246 int i; 2247 2248 SCTP_CHECK_AND_CAST(shmac, optval, struct sctp_hmacalgo, *optsize); 2249 2250 SCTP_INP_RLOCK(inp); 2251 hmaclist = inp->sctp_ep.local_hmacs; 2252 if (hmaclist == NULL) { 2253 /* no HMACs to return */ 2254 *optsize = sizeof(*shmac); 2255 SCTP_INP_RUNLOCK(inp); 2256 break; 2257 } 2258 /* is there room for all of the hmac ids? */ 2259 size = sizeof(*shmac) + (hmaclist->num_algo * 2260 sizeof(shmac->shmac_idents[0])); 2261 if ((size_t)(*optsize) < size) { 2262 error = EINVAL; 2263 SCTP_INP_RUNLOCK(inp); 2264 break; 2265 } 2266 /* copy in the list */ 2267 for (i = 0; i < hmaclist->num_algo; i++) 2268 shmac->shmac_idents[i] = hmaclist->hmac[i]; 2269 SCTP_INP_RUNLOCK(inp); 2270 *optsize = size; 2271 break; 2272 } 2273 case SCTP_AUTH_ACTIVE_KEY: 2274 { 2275 struct sctp_authkeyid *scact; 2276 2277 SCTP_CHECK_AND_CAST(scact, optval, struct sctp_authkeyid, *optsize); 2278 SCTP_FIND_STCB(inp, stcb, scact->scact_assoc_id); 2279 2280 if (stcb) { 2281 /* get the active key on the assoc */ 2282 scact->scact_keynumber = stcb->asoc.authinfo.assoc_keyid; 2283 SCTP_TCB_UNLOCK(stcb); 2284 } else { 2285 /* get the endpoint active key */ 2286 SCTP_INP_RLOCK(inp); 2287 scact->scact_keynumber = inp->sctp_ep.default_keyid; 2288 SCTP_INP_RUNLOCK(inp); 2289 } 2290 *optsize = sizeof(*scact); 2291 break; 2292 } 2293 case SCTP_LOCAL_AUTH_CHUNKS: 2294 { 2295 struct sctp_authchunks *sac; 2296 sctp_auth_chklist_t *chklist = NULL; 2297 size_t size = 0; 2298 2299 SCTP_CHECK_AND_CAST(sac, optval, struct sctp_authchunks, *optsize); 2300 SCTP_FIND_STCB(inp, stcb, sac->gauth_assoc_id); 2301 2302 if (stcb) { 2303 /* get off the assoc */ 2304 chklist = stcb->asoc.local_auth_chunks; 2305 /* is there enough space? */ 2306 size = sctp_auth_get_chklist_size(chklist); 2307 if (*optsize < (sizeof(struct sctp_authchunks) + size)) { 2308 error = EINVAL; 2309 } else { 2310 /* copy in the chunks */ 2311 sctp_serialize_auth_chunks(chklist, sac->gauth_chunks); 2312 } 2313 SCTP_TCB_UNLOCK(stcb); 2314 } else { 2315 /* get off the endpoint */ 2316 SCTP_INP_RLOCK(inp); 2317 chklist = inp->sctp_ep.local_auth_chunks; 2318 /* is there enough space? */ 2319 size = sctp_auth_get_chklist_size(chklist); 2320 if (*optsize < (sizeof(struct sctp_authchunks) + size)) { 2321 error = EINVAL; 2322 } else { 2323 /* copy in the chunks */ 2324 sctp_serialize_auth_chunks(chklist, sac->gauth_chunks); 2325 } 2326 SCTP_INP_RUNLOCK(inp); 2327 } 2328 *optsize = sizeof(struct sctp_authchunks) + size; 2329 break; 2330 } 2331 case SCTP_PEER_AUTH_CHUNKS: 2332 { 2333 struct sctp_authchunks *sac; 2334 sctp_auth_chklist_t *chklist = NULL; 2335 size_t size = 0; 2336 2337 SCTP_CHECK_AND_CAST(sac, optval, struct sctp_authchunks, *optsize); 2338 SCTP_FIND_STCB(inp, stcb, sac->gauth_assoc_id); 2339 2340 if (stcb) { 2341 /* get off the assoc */ 2342 chklist = stcb->asoc.peer_auth_chunks; 2343 /* is there enough space? */ 2344 size = sctp_auth_get_chklist_size(chklist); 2345 if (*optsize < (sizeof(struct sctp_authchunks) + size)) { 2346 error = EINVAL; 2347 } else { 2348 /* copy in the chunks */ 2349 sctp_serialize_auth_chunks(chklist, sac->gauth_chunks); 2350 } 2351 SCTP_TCB_UNLOCK(stcb); 2352 } else { 2353 error = ENOENT; 2354 } 2355 *optsize = sizeof(struct sctp_authchunks) + size; 2356 break; 2357 } 2358 2359 2360 default: 2361 error = ENOPROTOOPT; 2362 *optsize = 0; 2363 break; 2364 } /* end switch (sopt->sopt_name) */ 2365 return (error); 2366 } 2367 2368 static int 2369 sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize, 2370 void *p) 2371 { 2372 int error, set_opt; 2373 uint32_t *mopt; 2374 struct sctp_tcb *stcb = NULL; 2375 struct sctp_inpcb *inp; 2376 uint32_t vrf_id; 2377 2378 if (optval == NULL) { 2379 printf("optval is NULL\n"); 2380 return (EINVAL); 2381 } 2382 inp = (struct sctp_inpcb *)so->so_pcb; 2383 if (inp == 0) { 2384 printf("inp is NULL?\n"); 2385 return EINVAL; 2386 } 2387 vrf_id = inp->def_vrf_id; 2388 2389 error = 0; 2390 switch (optname) { 2391 case SCTP_NODELAY: 2392 case SCTP_AUTOCLOSE: 2393 case SCTP_AUTO_ASCONF: 2394 case SCTP_EXPLICIT_EOR: 2395 case SCTP_DISABLE_FRAGMENTS: 2396 case SCTP_USE_EXT_RCVINFO: 2397 case SCTP_I_WANT_MAPPED_V4_ADDR: 2398 /* copy in the option value */ 2399 SCTP_CHECK_AND_CAST(mopt, optval, uint32_t, optsize); 2400 set_opt = 0; 2401 if (error) 2402 break; 2403 switch (optname) { 2404 case SCTP_DISABLE_FRAGMENTS: 2405 set_opt = SCTP_PCB_FLAGS_NO_FRAGMENT; 2406 break; 2407 case SCTP_AUTO_ASCONF: 2408 set_opt = SCTP_PCB_FLAGS_AUTO_ASCONF; 2409 break; 2410 case SCTP_EXPLICIT_EOR: 2411 set_opt = SCTP_PCB_FLAGS_EXPLICIT_EOR; 2412 break; 2413 case SCTP_USE_EXT_RCVINFO: 2414 set_opt = SCTP_PCB_FLAGS_EXT_RCVINFO; 2415 break; 2416 case SCTP_I_WANT_MAPPED_V4_ADDR: 2417 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 2418 set_opt = SCTP_PCB_FLAGS_NEEDS_MAPPED_V4; 2419 } else { 2420 return (EINVAL); 2421 } 2422 break; 2423 case SCTP_NODELAY: 2424 set_opt = SCTP_PCB_FLAGS_NODELAY; 2425 break; 2426 case SCTP_AUTOCLOSE: 2427 set_opt = SCTP_PCB_FLAGS_AUTOCLOSE; 2428 /* 2429 * The value is in ticks. Note this does not effect 2430 * old associations, only new ones. 2431 */ 2432 inp->sctp_ep.auto_close_time = SEC_TO_TICKS(*mopt); 2433 break; 2434 } 2435 SCTP_INP_WLOCK(inp); 2436 if (*mopt != 0) { 2437 sctp_feature_on(inp, set_opt); 2438 } else { 2439 sctp_feature_off(inp, set_opt); 2440 } 2441 SCTP_INP_WUNLOCK(inp); 2442 break; 2443 case SCTP_PARTIAL_DELIVERY_POINT: 2444 { 2445 uint32_t *value; 2446 2447 SCTP_CHECK_AND_CAST(value, optval, uint32_t, optsize); 2448 if (*value > SCTP_SB_LIMIT_RCV(so)) { 2449 error = EINVAL; 2450 break; 2451 } 2452 inp->partial_delivery_point = *value; 2453 } 2454 break; 2455 case SCTP_FRAGMENT_INTERLEAVE: 2456 /* not yet until we re-write sctp_recvmsg() */ 2457 { 2458 uint32_t *on_off; 2459 2460 SCTP_CHECK_AND_CAST(on_off, optval, uint32_t, optsize); 2461 if (*on_off) { 2462 sctp_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE); 2463 } else { 2464 sctp_feature_off(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE); 2465 } 2466 } 2467 break; 2468 case SCTP_CMT_ON_OFF: 2469 { 2470 struct sctp_assoc_value *av; 2471 2472 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize); 2473 if (sctp_cmt_on_off) { 2474 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 2475 if (stcb) { 2476 stcb->asoc.sctp_cmt_on_off = (uint8_t) av->assoc_value; 2477 SCTP_TCB_UNLOCK(stcb); 2478 } else { 2479 error = ENOTCONN; 2480 } 2481 } else { 2482 error = ENOPROTOOPT; 2483 } 2484 } 2485 break; 2486 case SCTP_CLR_STAT_LOG: 2487 #ifdef SCTP_STAT_LOGGING 2488 sctp_clr_stat_log(); 2489 #else 2490 error = EOPNOTSUPP; 2491 #endif 2492 break; 2493 case SCTP_CONTEXT: 2494 { 2495 struct sctp_assoc_value *av; 2496 2497 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize); 2498 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 2499 2500 if (stcb) { 2501 stcb->asoc.context = av->assoc_value; 2502 SCTP_TCB_UNLOCK(stcb); 2503 } else { 2504 SCTP_INP_WLOCK(inp); 2505 inp->sctp_context = av->assoc_value; 2506 SCTP_INP_WUNLOCK(inp); 2507 } 2508 } 2509 break; 2510 case SCTP_VRF_ID: 2511 { 2512 uint32_t *vrf_id; 2513 2514 SCTP_CHECK_AND_CAST(vrf_id, optval, uint32_t, optsize); 2515 if (*vrf_id > SCTP_MAX_VRF_ID) { 2516 error = EINVAL; 2517 break; 2518 } 2519 inp->def_vrf_id = *vrf_id; 2520 break; 2521 } 2522 case SCTP_DEL_VRF_ID: 2523 { 2524 error = EOPNOTSUPP; 2525 break; 2526 } 2527 case SCTP_ADD_VRF_ID: 2528 { 2529 error = EOPNOTSUPP; 2530 break; 2531 } 2532 2533 case SCTP_DELAYED_ACK_TIME: 2534 { 2535 struct sctp_assoc_value *tm; 2536 2537 SCTP_CHECK_AND_CAST(tm, optval, struct sctp_assoc_value, optsize); 2538 SCTP_FIND_STCB(inp, stcb, tm->assoc_id); 2539 2540 if (stcb) { 2541 stcb->asoc.delayed_ack = tm->assoc_value; 2542 SCTP_TCB_UNLOCK(stcb); 2543 } else { 2544 SCTP_INP_WLOCK(inp); 2545 inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV] = MSEC_TO_TICKS(tm->assoc_value); 2546 SCTP_INP_WUNLOCK(inp); 2547 } 2548 break; 2549 } 2550 case SCTP_AUTH_CHUNK: 2551 { 2552 struct sctp_authchunk *sauth; 2553 2554 SCTP_CHECK_AND_CAST(sauth, optval, struct sctp_authchunk, optsize); 2555 2556 SCTP_INP_WLOCK(inp); 2557 if (sctp_auth_add_chunk(sauth->sauth_chunk, inp->sctp_ep.local_auth_chunks)) 2558 error = EINVAL; 2559 SCTP_INP_WUNLOCK(inp); 2560 break; 2561 } 2562 case SCTP_AUTH_KEY: 2563 { 2564 struct sctp_authkey *sca; 2565 struct sctp_keyhead *shared_keys; 2566 sctp_sharedkey_t *shared_key; 2567 sctp_key_t *key = NULL; 2568 size_t size; 2569 2570 SCTP_CHECK_AND_CAST(sca, optval, struct sctp_authkey, optsize); 2571 SCTP_FIND_STCB(inp, stcb, sca->sca_assoc_id) 2572 size = optsize - sizeof(*sca); 2573 2574 if (stcb) { 2575 /* set it on the assoc */ 2576 shared_keys = &stcb->asoc.shared_keys; 2577 /* clear the cached keys for this key id */ 2578 sctp_clear_cachedkeys(stcb, sca->sca_keynumber); 2579 /* 2580 * create the new shared key and 2581 * insert/replace it 2582 */ 2583 if (size > 0) { 2584 key = sctp_set_key(sca->sca_key, (uint32_t) size); 2585 if (key == NULL) { 2586 error = ENOMEM; 2587 SCTP_TCB_UNLOCK(stcb); 2588 break; 2589 } 2590 } 2591 shared_key = sctp_alloc_sharedkey(); 2592 if (shared_key == NULL) { 2593 sctp_free_key(key); 2594 error = ENOMEM; 2595 SCTP_TCB_UNLOCK(stcb); 2596 break; 2597 } 2598 shared_key->key = key; 2599 shared_key->keyid = sca->sca_keynumber; 2600 sctp_insert_sharedkey(shared_keys, shared_key); 2601 SCTP_TCB_UNLOCK(stcb); 2602 } else { 2603 /* set it on the endpoint */ 2604 SCTP_INP_WLOCK(inp); 2605 shared_keys = &inp->sctp_ep.shared_keys; 2606 /* 2607 * clear the cached keys on all assocs for 2608 * this key id 2609 */ 2610 sctp_clear_cachedkeys_ep(inp, sca->sca_keynumber); 2611 /* 2612 * create the new shared key and 2613 * insert/replace it 2614 */ 2615 if (size > 0) { 2616 key = sctp_set_key(sca->sca_key, (uint32_t) size); 2617 if (key == NULL) { 2618 error = ENOMEM; 2619 SCTP_INP_WUNLOCK(inp); 2620 break; 2621 } 2622 } 2623 shared_key = sctp_alloc_sharedkey(); 2624 if (shared_key == NULL) { 2625 sctp_free_key(key); 2626 error = ENOMEM; 2627 SCTP_INP_WUNLOCK(inp); 2628 break; 2629 } 2630 shared_key->key = key; 2631 shared_key->keyid = sca->sca_keynumber; 2632 sctp_insert_sharedkey(shared_keys, shared_key); 2633 SCTP_INP_WUNLOCK(inp); 2634 } 2635 break; 2636 } 2637 case SCTP_HMAC_IDENT: 2638 { 2639 struct sctp_hmacalgo *shmac; 2640 sctp_hmaclist_t *hmaclist; 2641 uint32_t hmacid; 2642 size_t size, i; 2643 2644 SCTP_CHECK_AND_CAST(shmac, optval, struct sctp_hmacalgo, optsize); 2645 size = (optsize - sizeof(*shmac)) / sizeof(shmac->shmac_idents[0]); 2646 hmaclist = sctp_alloc_hmaclist(size); 2647 if (hmaclist == NULL) { 2648 error = ENOMEM; 2649 break; 2650 } 2651 for (i = 0; i < size; i++) { 2652 hmacid = shmac->shmac_idents[i]; 2653 if (sctp_auth_add_hmacid(hmaclist, (uint16_t) hmacid)) { 2654 /* invalid HMACs were found */ ; 2655 error = EINVAL; 2656 sctp_free_hmaclist(hmaclist); 2657 goto sctp_set_hmac_done; 2658 } 2659 } 2660 /* set it on the endpoint */ 2661 SCTP_INP_WLOCK(inp); 2662 if (inp->sctp_ep.local_hmacs) 2663 sctp_free_hmaclist(inp->sctp_ep.local_hmacs); 2664 inp->sctp_ep.local_hmacs = hmaclist; 2665 SCTP_INP_WUNLOCK(inp); 2666 sctp_set_hmac_done: 2667 break; 2668 } 2669 case SCTP_AUTH_ACTIVE_KEY: 2670 { 2671 struct sctp_authkeyid *scact; 2672 2673 SCTP_CHECK_AND_CAST(scact, optval, struct sctp_authkeyid, optsize); 2674 SCTP_FIND_STCB(inp, stcb, scact->scact_assoc_id); 2675 2676 /* set the active key on the right place */ 2677 if (stcb) { 2678 /* set the active key on the assoc */ 2679 if (sctp_auth_setactivekey(stcb, scact->scact_keynumber)) 2680 error = EINVAL; 2681 SCTP_TCB_UNLOCK(stcb); 2682 } else { 2683 /* set the active key on the endpoint */ 2684 SCTP_INP_WLOCK(inp); 2685 if (sctp_auth_setactivekey_ep(inp, scact->scact_keynumber)) 2686 error = EINVAL; 2687 SCTP_INP_WUNLOCK(inp); 2688 } 2689 break; 2690 } 2691 case SCTP_AUTH_DELETE_KEY: 2692 { 2693 struct sctp_authkeyid *scdel; 2694 2695 SCTP_CHECK_AND_CAST(scdel, optval, struct sctp_authkeyid, optsize); 2696 SCTP_FIND_STCB(inp, stcb, scdel->scact_assoc_id); 2697 2698 /* delete the key from the right place */ 2699 if (stcb) { 2700 if (sctp_delete_sharedkey(stcb, scdel->scact_keynumber)) 2701 error = EINVAL; 2702 SCTP_TCB_UNLOCK(stcb); 2703 } else { 2704 SCTP_INP_WLOCK(inp); 2705 if (sctp_delete_sharedkey_ep(inp, scdel->scact_keynumber)) 2706 error = EINVAL; 2707 SCTP_INP_WUNLOCK(inp); 2708 } 2709 break; 2710 } 2711 2712 case SCTP_RESET_STREAMS: 2713 { 2714 struct sctp_stream_reset *strrst; 2715 uint8_t send_in = 0, send_tsn = 0, send_out = 0; 2716 int i; 2717 2718 SCTP_CHECK_AND_CAST(strrst, optval, struct sctp_stream_reset, optsize); 2719 SCTP_FIND_STCB(inp, stcb, strrst->strrst_assoc_id); 2720 2721 if (stcb == NULL) { 2722 error = ENOENT; 2723 break; 2724 } 2725 if (stcb->asoc.peer_supports_strreset == 0) { 2726 /* 2727 * Peer does not support it, we return 2728 * protocol not supported since this is true 2729 * for this feature and this peer, not the 2730 * socket request in general. 2731 */ 2732 error = EPROTONOSUPPORT; 2733 SCTP_TCB_UNLOCK(stcb); 2734 break; 2735 } 2736 if (stcb->asoc.stream_reset_outstanding) { 2737 error = EALREADY; 2738 SCTP_TCB_UNLOCK(stcb); 2739 break; 2740 } 2741 if (strrst->strrst_flags == SCTP_RESET_LOCAL_RECV) { 2742 send_in = 1; 2743 } else if (strrst->strrst_flags == SCTP_RESET_LOCAL_SEND) { 2744 send_out = 1; 2745 } else if (strrst->strrst_flags == SCTP_RESET_BOTH) { 2746 send_in = 1; 2747 send_out = 1; 2748 } else if (strrst->strrst_flags == SCTP_RESET_TSN) { 2749 send_tsn = 1; 2750 } else { 2751 error = EINVAL; 2752 SCTP_TCB_UNLOCK(stcb); 2753 break; 2754 } 2755 for (i = 0; i < strrst->strrst_num_streams; i++) { 2756 if ((send_in) && 2757 2758 (strrst->strrst_list[i] > stcb->asoc.streamincnt)) { 2759 error = EINVAL; 2760 goto get_out; 2761 } 2762 if ((send_out) && 2763 (strrst->strrst_list[i] > stcb->asoc.streamoutcnt)) { 2764 error = EINVAL; 2765 goto get_out; 2766 } 2767 } 2768 if (error) { 2769 get_out: 2770 SCTP_TCB_UNLOCK(stcb); 2771 break; 2772 } 2773 error = sctp_send_str_reset_req(stcb, strrst->strrst_num_streams, 2774 strrst->strrst_list, 2775 send_out, (stcb->asoc.str_reset_seq_in - 3), 2776 send_in, send_tsn); 2777 2778 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_REQ); 2779 SCTP_TCB_UNLOCK(stcb); 2780 } 2781 break; 2782 2783 case SCTP_CONNECT_X: 2784 if (optsize < (sizeof(int) + sizeof(struct sockaddr_in))) { 2785 error = EINVAL; 2786 break; 2787 } 2788 error = sctp_do_connect_x(so, inp, optval, optsize, p, 0); 2789 break; 2790 2791 case SCTP_CONNECT_X_DELAYED: 2792 if (optsize < (sizeof(int) + sizeof(struct sockaddr_in))) { 2793 error = EINVAL; 2794 break; 2795 } 2796 error = sctp_do_connect_x(so, inp, optval, optsize, p, 1); 2797 break; 2798 2799 case SCTP_CONNECT_X_COMPLETE: 2800 { 2801 struct sockaddr *sa; 2802 struct sctp_nets *net; 2803 2804 /* FIXME MT: check correct? */ 2805 SCTP_CHECK_AND_CAST(sa, optval, struct sockaddr, optsize); 2806 2807 /* find tcb */ 2808 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { 2809 SCTP_INP_RLOCK(inp); 2810 stcb = LIST_FIRST(&inp->sctp_asoc_list); 2811 if (stcb) { 2812 SCTP_TCB_LOCK(stcb); 2813 net = sctp_findnet(stcb, sa); 2814 } 2815 SCTP_INP_RUNLOCK(inp); 2816 } else { 2817 /* 2818 * We increment here since 2819 * sctp_findassociation_ep_addr() wil do a 2820 * decrement if it finds the stcb as long as 2821 * the locked tcb (last argument) is NOT a 2822 * TCB.. aka NULL. 2823 */ 2824 SCTP_INP_INCR_REF(inp); 2825 stcb = sctp_findassociation_ep_addr(&inp, sa, &net, NULL, NULL); 2826 if (stcb == NULL) { 2827 SCTP_INP_DECR_REF(inp); 2828 } 2829 } 2830 2831 if (stcb == NULL) { 2832 error = ENOENT; 2833 break; 2834 } 2835 if (stcb->asoc.delayed_connection == 1) { 2836 stcb->asoc.delayed_connection = 0; 2837 SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered); 2838 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, 2839 stcb->asoc.primary_destination, 2840 SCTP_FROM_SCTP_USRREQ + SCTP_LOC_9); 2841 sctp_send_initiate(inp, stcb); 2842 } else { 2843 /* 2844 * already expired or did not use delayed 2845 * connectx 2846 */ 2847 error = EALREADY; 2848 } 2849 SCTP_TCB_UNLOCK(stcb); 2850 } 2851 break; 2852 case SCTP_MAXBURST: 2853 { 2854 uint8_t *burst; 2855 2856 SCTP_CHECK_AND_CAST(burst, optval, uint8_t, optsize); 2857 2858 SCTP_INP_WLOCK(inp); 2859 if (*burst) { 2860 inp->sctp_ep.max_burst = *burst; 2861 } 2862 SCTP_INP_WUNLOCK(inp); 2863 } 2864 break; 2865 case SCTP_MAXSEG: 2866 { 2867 struct sctp_assoc_value *av; 2868 int ovh; 2869 2870 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize); 2871 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 2872 2873 if (stcb) { 2874 error = EINVAL; 2875 SCTP_TCB_UNLOCK(stcb); 2876 } else { 2877 SCTP_INP_WLOCK(inp); 2878 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 2879 ovh = SCTP_MED_OVERHEAD; 2880 } else { 2881 ovh = SCTP_MED_V4_OVERHEAD; 2882 } 2883 /* 2884 * FIXME MT: I think this is not in tune 2885 * with the API ID 2886 */ 2887 if (av->assoc_value) { 2888 inp->sctp_frag_point = (av->assoc_value + ovh); 2889 } else { 2890 error = EINVAL; 2891 } 2892 SCTP_INP_WUNLOCK(inp); 2893 } 2894 } 2895 break; 2896 case SCTP_EVENTS: 2897 { 2898 struct sctp_event_subscribe *events; 2899 2900 SCTP_CHECK_AND_CAST(events, optval, struct sctp_event_subscribe, optsize); 2901 2902 SCTP_INP_WLOCK(inp); 2903 if (events->sctp_data_io_event) { 2904 sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT); 2905 } else { 2906 sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT); 2907 } 2908 2909 if (events->sctp_association_event) { 2910 sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVASSOCEVNT); 2911 } else { 2912 sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVASSOCEVNT); 2913 } 2914 2915 if (events->sctp_address_event) { 2916 sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVPADDREVNT); 2917 } else { 2918 sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVPADDREVNT); 2919 } 2920 2921 if (events->sctp_send_failure_event) { 2922 sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVSENDFAILEVNT); 2923 } else { 2924 sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVSENDFAILEVNT); 2925 } 2926 2927 if (events->sctp_peer_error_event) { 2928 sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVPEERERR); 2929 } else { 2930 sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVPEERERR); 2931 } 2932 2933 if (events->sctp_shutdown_event) { 2934 sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT); 2935 } else { 2936 sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT); 2937 } 2938 2939 if (events->sctp_partial_delivery_event) { 2940 sctp_feature_on(inp, SCTP_PCB_FLAGS_PDAPIEVNT); 2941 } else { 2942 sctp_feature_off(inp, SCTP_PCB_FLAGS_PDAPIEVNT); 2943 } 2944 2945 if (events->sctp_adaptation_layer_event) { 2946 sctp_feature_on(inp, SCTP_PCB_FLAGS_ADAPTATIONEVNT); 2947 } else { 2948 sctp_feature_off(inp, SCTP_PCB_FLAGS_ADAPTATIONEVNT); 2949 } 2950 2951 if (events->sctp_authentication_event) { 2952 sctp_feature_on(inp, SCTP_PCB_FLAGS_AUTHEVNT); 2953 } else { 2954 sctp_feature_off(inp, SCTP_PCB_FLAGS_AUTHEVNT); 2955 } 2956 2957 if (events->sctp_stream_reset_events) { 2958 sctp_feature_on(inp, SCTP_PCB_FLAGS_STREAM_RESETEVNT); 2959 } else { 2960 sctp_feature_off(inp, SCTP_PCB_FLAGS_STREAM_RESETEVNT); 2961 } 2962 SCTP_INP_WUNLOCK(inp); 2963 } 2964 break; 2965 2966 case SCTP_ADAPTATION_LAYER: 2967 { 2968 struct sctp_setadaptation *adap_bits; 2969 2970 SCTP_CHECK_AND_CAST(adap_bits, optval, struct sctp_setadaptation, optsize); 2971 SCTP_INP_WLOCK(inp); 2972 inp->sctp_ep.adaptation_layer_indicator = adap_bits->ssb_adaptation_ind; 2973 SCTP_INP_WUNLOCK(inp); 2974 } 2975 break; 2976 #ifdef SCTP_DEBUG 2977 case SCTP_SET_INITIAL_DBG_SEQ: 2978 { 2979 uint32_t *vvv; 2980 2981 SCTP_CHECK_AND_CAST(vvv, optval, uint32_t, optsize); 2982 SCTP_INP_WLOCK(inp); 2983 inp->sctp_ep.initial_sequence_debug = *vvv; 2984 SCTP_INP_WUNLOCK(inp); 2985 } 2986 break; 2987 #endif 2988 case SCTP_DEFAULT_SEND_PARAM: 2989 { 2990 struct sctp_sndrcvinfo *s_info; 2991 2992 SCTP_CHECK_AND_CAST(s_info, optval, struct sctp_sndrcvinfo, optsize); 2993 SCTP_FIND_STCB(inp, stcb, s_info->sinfo_assoc_id); 2994 2995 if (stcb) { 2996 if (s_info->sinfo_stream <= stcb->asoc.streamoutcnt) { 2997 stcb->asoc.def_send = *s_info; 2998 } else { 2999 error = EINVAL; 3000 } 3001 SCTP_TCB_UNLOCK(stcb); 3002 } else { 3003 SCTP_INP_WLOCK(inp); 3004 inp->def_send = *s_info; 3005 SCTP_INP_WUNLOCK(inp); 3006 } 3007 } 3008 break; 3009 case SCTP_PEER_ADDR_PARAMS: 3010 /* Applys to the specific association */ 3011 { 3012 struct sctp_paddrparams *paddrp; 3013 struct sctp_nets *net; 3014 3015 SCTP_CHECK_AND_CAST(paddrp, optval, struct sctp_paddrparams, optsize); 3016 SCTP_FIND_STCB(inp, stcb, paddrp->spp_assoc_id); 3017 net = NULL; 3018 if (stcb) { 3019 net = sctp_findnet(stcb, (struct sockaddr *)&paddrp->spp_address); 3020 } else { 3021 /* 3022 * We increment here since 3023 * sctp_findassociation_ep_addr() wil do a 3024 * decrement if it finds the stcb as long as 3025 * the locked tcb (last argument) is NOT a 3026 * TCB.. aka NULL. 3027 */ 3028 SCTP_INP_INCR_REF(inp); 3029 stcb = sctp_findassociation_ep_addr(&inp, 3030 (struct sockaddr *)&paddrp->spp_address, 3031 &net, NULL, NULL); 3032 if (stcb == NULL) { 3033 SCTP_INP_DECR_REF(inp); 3034 } 3035 } 3036 3037 3038 if (stcb) { 3039 /************************TCB SPECIFIC SET ******************/ 3040 /* 3041 * do we change the timer for HB, we run 3042 * only one? 3043 */ 3044 if (paddrp->spp_hbinterval) 3045 stcb->asoc.heart_beat_delay = paddrp->spp_hbinterval; 3046 else if (paddrp->spp_flags & SPP_HB_TIME_IS_ZERO) 3047 stcb->asoc.heart_beat_delay = 0; 3048 3049 /* network sets ? */ 3050 if (net) { 3051 /************************NET SPECIFIC SET ******************/ 3052 if (paddrp->spp_flags & SPP_HB_DEMAND) { 3053 /* on demand HB */ 3054 sctp_send_hb(stcb, 1, net); 3055 } 3056 if (paddrp->spp_flags & SPP_HB_DISABLE) { 3057 net->dest_state |= SCTP_ADDR_NOHB; 3058 } 3059 if (paddrp->spp_flags & SPP_HB_ENABLE) { 3060 net->dest_state &= ~SCTP_ADDR_NOHB; 3061 } 3062 if (paddrp->spp_flags & SPP_PMTUD_DISABLE) { 3063 if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) { 3064 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net, 3065 SCTP_FROM_SCTP_USRREQ + SCTP_LOC_10); 3066 } 3067 if (paddrp->spp_pathmtu > SCTP_DEFAULT_MINSEGMENT) { 3068 net->mtu = paddrp->spp_pathmtu; 3069 if (net->mtu < stcb->asoc.smallest_mtu) 3070 sctp_pathmtu_adjustment(inp, stcb, net, net->mtu); 3071 } 3072 } 3073 if (paddrp->spp_flags & SPP_PMTUD_ENABLE) { 3074 if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) { 3075 sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net); 3076 } 3077 } 3078 if (paddrp->spp_pathmaxrxt) 3079 net->failure_threshold = paddrp->spp_pathmaxrxt; 3080 #ifdef INET 3081 if (paddrp->spp_flags & SPP_IPV4_TOS) { 3082 if (net->ro._l_addr.sin.sin_family == AF_INET) { 3083 net->tos_flowlabel = paddrp->spp_ipv4_tos & 0x000000fc; 3084 } 3085 } 3086 #endif 3087 #ifdef INET6 3088 if (paddrp->spp_flags & SPP_IPV6_FLOWLABEL) { 3089 if (net->ro._l_addr.sin6.sin6_family == AF_INET6) { 3090 net->tos_flowlabel = paddrp->spp_ipv6_flowlabel; 3091 } 3092 } 3093 #endif 3094 } else { 3095 /************************ASSOC ONLY -- NO NET SPECIFIC SET ******************/ 3096 if (paddrp->spp_pathmaxrxt) 3097 stcb->asoc.def_net_failure = paddrp->spp_pathmaxrxt; 3098 3099 if (paddrp->spp_flags & SPP_HB_ENABLE) { 3100 /* Turn back on the timer */ 3101 stcb->asoc.hb_is_disabled = 0; 3102 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net); 3103 } 3104 if (paddrp->spp_flags & SPP_HB_DISABLE) { 3105 int cnt_of_unconf = 0; 3106 struct sctp_nets *lnet; 3107 3108 stcb->asoc.hb_is_disabled = 1; 3109 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 3110 if (lnet->dest_state & SCTP_ADDR_UNCONFIRMED) { 3111 cnt_of_unconf++; 3112 } 3113 } 3114 /* 3115 * stop the timer ONLY if we 3116 * have no unconfirmed 3117 * addresses 3118 */ 3119 if (cnt_of_unconf == 0) { 3120 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_11); 3121 } 3122 } 3123 if (paddrp->spp_flags & SPP_HB_ENABLE) { 3124 /* start up the timer. */ 3125 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net); 3126 } 3127 #ifdef INET 3128 if (paddrp->spp_flags & SPP_IPV4_TOS) 3129 stcb->asoc.default_tos = paddrp->spp_ipv4_tos & 0x000000fc; 3130 #endif 3131 #ifdef INET6 3132 if (paddrp->spp_flags & SPP_IPV6_FLOWLABEL) 3133 stcb->asoc.default_flowlabel = paddrp->spp_ipv6_flowlabel; 3134 #endif 3135 3136 } 3137 SCTP_TCB_UNLOCK(stcb); 3138 } else { 3139 /************************NO TCB, SET TO default stuff ******************/ 3140 SCTP_INP_WLOCK(inp); 3141 /* 3142 * For the TOS/FLOWLABEL stuff you set it 3143 * with the options on the socket 3144 */ 3145 if (paddrp->spp_pathmaxrxt) { 3146 inp->sctp_ep.def_net_failure = paddrp->spp_pathmaxrxt; 3147 } 3148 if (paddrp->spp_flags & SPP_HB_ENABLE) { 3149 inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT] = MSEC_TO_TICKS(paddrp->spp_hbinterval); 3150 sctp_feature_off(inp, SCTP_PCB_FLAGS_DONOT_HEARTBEAT); 3151 } else if (paddrp->spp_flags & SPP_HB_DISABLE) { 3152 sctp_feature_on(inp, SCTP_PCB_FLAGS_DONOT_HEARTBEAT); 3153 } 3154 SCTP_INP_WUNLOCK(inp); 3155 } 3156 } 3157 break; 3158 case SCTP_RTOINFO: 3159 { 3160 struct sctp_rtoinfo *srto; 3161 3162 SCTP_CHECK_AND_CAST(srto, optval, struct sctp_rtoinfo, optsize); 3163 SCTP_FIND_STCB(inp, stcb, srto->srto_assoc_id); 3164 3165 if (stcb) { 3166 /* Set in ms we hope :-) */ 3167 if (srto->srto_initial) 3168 stcb->asoc.initial_rto = srto->srto_initial; 3169 if (srto->srto_max) 3170 stcb->asoc.maxrto = srto->srto_max; 3171 if (srto->srto_min) 3172 stcb->asoc.minrto = srto->srto_min; 3173 SCTP_TCB_UNLOCK(stcb); 3174 } else { 3175 SCTP_INP_WLOCK(inp); 3176 /* 3177 * If we have a null asoc, its default for 3178 * the endpoint 3179 */ 3180 if (srto->srto_initial) 3181 inp->sctp_ep.initial_rto = srto->srto_initial; 3182 if (srto->srto_max) 3183 inp->sctp_ep.sctp_maxrto = srto->srto_max; 3184 if (srto->srto_min) 3185 inp->sctp_ep.sctp_minrto = srto->srto_min; 3186 SCTP_INP_WUNLOCK(inp); 3187 } 3188 } 3189 break; 3190 case SCTP_ASSOCINFO: 3191 { 3192 struct sctp_assocparams *sasoc; 3193 3194 SCTP_CHECK_AND_CAST(sasoc, optval, struct sctp_assocparams, optsize); 3195 SCTP_FIND_STCB(inp, stcb, sasoc->sasoc_assoc_id); 3196 3197 if (stcb) { 3198 if (sasoc->sasoc_asocmaxrxt) 3199 stcb->asoc.max_send_times = sasoc->sasoc_asocmaxrxt; 3200 sasoc->sasoc_number_peer_destinations = stcb->asoc.numnets; 3201 sasoc->sasoc_peer_rwnd = 0; 3202 sasoc->sasoc_local_rwnd = 0; 3203 if (stcb->asoc.cookie_life) 3204 stcb->asoc.cookie_life = sasoc->sasoc_cookie_life; 3205 stcb->asoc.delayed_ack = sasoc->sasoc_sack_delay; 3206 if (sasoc->sasoc_sack_freq) { 3207 stcb->asoc.sack_freq = sasoc->sasoc_sack_freq; 3208 } 3209 SCTP_TCB_UNLOCK(stcb); 3210 } else { 3211 SCTP_INP_WLOCK(inp); 3212 if (sasoc->sasoc_asocmaxrxt) 3213 inp->sctp_ep.max_send_times = sasoc->sasoc_asocmaxrxt; 3214 sasoc->sasoc_number_peer_destinations = 0; 3215 sasoc->sasoc_peer_rwnd = 0; 3216 sasoc->sasoc_local_rwnd = 0; 3217 if (sasoc->sasoc_cookie_life) 3218 inp->sctp_ep.def_cookie_life = sasoc->sasoc_cookie_life; 3219 inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV] = MSEC_TO_TICKS(sasoc->sasoc_sack_delay); 3220 if (sasoc->sasoc_sack_freq) { 3221 inp->sctp_ep.sctp_sack_freq = sasoc->sasoc_sack_freq; 3222 } 3223 SCTP_INP_WUNLOCK(inp); 3224 } 3225 } 3226 break; 3227 case SCTP_INITMSG: 3228 { 3229 struct sctp_initmsg *sinit; 3230 3231 SCTP_CHECK_AND_CAST(sinit, optval, struct sctp_initmsg, optsize); 3232 SCTP_INP_WLOCK(inp); 3233 if (sinit->sinit_num_ostreams) 3234 inp->sctp_ep.pre_open_stream_count = sinit->sinit_num_ostreams; 3235 3236 if (sinit->sinit_max_instreams) 3237 inp->sctp_ep.max_open_streams_intome = sinit->sinit_max_instreams; 3238 3239 if (sinit->sinit_max_attempts) 3240 inp->sctp_ep.max_init_times = sinit->sinit_max_attempts; 3241 3242 if (sinit->sinit_max_init_timeo) 3243 inp->sctp_ep.initial_init_rto_max = sinit->sinit_max_init_timeo; 3244 SCTP_INP_WUNLOCK(inp); 3245 } 3246 break; 3247 case SCTP_PRIMARY_ADDR: 3248 { 3249 struct sctp_setprim *spa; 3250 struct sctp_nets *net, *lnet; 3251 3252 SCTP_CHECK_AND_CAST(spa, optval, struct sctp_setprim, optsize); 3253 SCTP_FIND_STCB(inp, stcb, spa->ssp_assoc_id); 3254 3255 net = NULL; 3256 if (stcb) { 3257 net = sctp_findnet(stcb, (struct sockaddr *)&spa->ssp_addr); 3258 } else { 3259 /* 3260 * We increment here since 3261 * sctp_findassociation_ep_addr() wil do a 3262 * decrement if it finds the stcb as long as 3263 * the locked tcb (last argument) is NOT a 3264 * TCB.. aka NULL. 3265 */ 3266 SCTP_INP_INCR_REF(inp); 3267 stcb = sctp_findassociation_ep_addr(&inp, 3268 (struct sockaddr *)&spa->ssp_addr, 3269 &net, NULL, NULL); 3270 if (stcb == NULL) { 3271 SCTP_INP_DECR_REF(inp); 3272 } 3273 } 3274 3275 if ((stcb) && (net)) { 3276 if ((net != stcb->asoc.primary_destination) && 3277 (!(net->dest_state & SCTP_ADDR_UNCONFIRMED))) { 3278 /* Ok we need to set it */ 3279 lnet = stcb->asoc.primary_destination; 3280 if (sctp_set_primary_addr(stcb, (struct sockaddr *)NULL, net) == 0) { 3281 if (net->dest_state & SCTP_ADDR_SWITCH_PRIMARY) { 3282 net->dest_state |= SCTP_ADDR_DOUBLE_SWITCH; 3283 } 3284 net->dest_state |= SCTP_ADDR_SWITCH_PRIMARY; 3285 } 3286 } 3287 } else { 3288 error = EINVAL; 3289 } 3290 if (stcb) { 3291 SCTP_TCB_UNLOCK(stcb); 3292 } 3293 } 3294 break; 3295 case SCTP_SET_DYNAMIC_PRIMARY: 3296 { 3297 union sctp_sockstore *ss; 3298 3299 error = priv_check_cred(curthread->td_ucred, 3300 PRIV_NETINET_RESERVEDPORT, 3301 SUSER_ALLOWJAIL); 3302 if (error) 3303 break; 3304 3305 SCTP_CHECK_AND_CAST(ss, optval, union sctp_sockstore, optsize); 3306 /* SUPER USER CHECK? */ 3307 error = sctp_dynamic_set_primary(&ss->sa, vrf_id); 3308 } 3309 break; 3310 case SCTP_SET_PEER_PRIMARY_ADDR: 3311 { 3312 struct sctp_setpeerprim *sspp; 3313 3314 SCTP_CHECK_AND_CAST(sspp, optval, struct sctp_setpeerprim, optsize); 3315 SCTP_FIND_STCB(inp, stcb, sspp->sspp_assoc_id); 3316 3317 if (stcb) { 3318 if (sctp_set_primary_ip_address_sa(stcb, (struct sockaddr *)&sspp->sspp_addr) != 0) { 3319 error = EINVAL; 3320 } 3321 } else { 3322 error = EINVAL; 3323 } 3324 SCTP_TCB_UNLOCK(stcb); 3325 } 3326 break; 3327 case SCTP_BINDX_ADD_ADDR: 3328 { 3329 struct sctp_getaddresses *addrs; 3330 struct sockaddr *addr_touse; 3331 struct sockaddr_in sin; 3332 3333 SCTP_CHECK_AND_CAST(addrs, optval, struct sctp_getaddresses, optsize); 3334 3335 /* see if we're bound all already! */ 3336 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 3337 error = EINVAL; 3338 break; 3339 } 3340 /* Is the VRF one we have */ 3341 addr_touse = addrs->addr; 3342 #if defined(INET6) 3343 if (addrs->addr->sa_family == AF_INET6) { 3344 struct sockaddr_in6 *sin6; 3345 3346 sin6 = (struct sockaddr_in6 *)addr_touse; 3347 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 3348 in6_sin6_2_sin(&sin, sin6); 3349 addr_touse = (struct sockaddr *)&sin; 3350 } 3351 } 3352 #endif 3353 if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) { 3354 if (p == NULL) { 3355 /* Can't get proc for Net/Open BSD */ 3356 error = EINVAL; 3357 break; 3358 } 3359 error = sctp_inpcb_bind(so, addr_touse, p); 3360 break; 3361 } 3362 /* 3363 * No locks required here since bind and mgmt_ep_sa 3364 * all do their own locking. If we do something for 3365 * the FIX: below we may need to lock in that case. 3366 */ 3367 if (addrs->sget_assoc_id == 0) { 3368 /* add the address */ 3369 struct sctp_inpcb *lep; 3370 3371 ((struct sockaddr_in *)addr_touse)->sin_port = inp->sctp_lport; 3372 lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id); 3373 if (lep != NULL) { 3374 /* 3375 * We must decrement the refcount 3376 * since we have the ep already and 3377 * are binding. No remove going on 3378 * here. 3379 */ 3380 SCTP_INP_DECR_REF(inp); 3381 } 3382 if (lep == inp) { 3383 /* already bound to it.. ok */ 3384 break; 3385 } else if (lep == NULL) { 3386 ((struct sockaddr_in *)addr_touse)->sin_port = 0; 3387 error = sctp_addr_mgmt_ep_sa(inp, addr_touse, 3388 SCTP_ADD_IP_ADDRESS, vrf_id); 3389 } else { 3390 error = EADDRNOTAVAIL; 3391 } 3392 if (error) 3393 break; 3394 3395 } else { 3396 /* 3397 * FIX: decide whether we allow assoc based 3398 * bindx 3399 */ 3400 } 3401 } 3402 break; 3403 case SCTP_BINDX_REM_ADDR: 3404 { 3405 struct sctp_getaddresses *addrs; 3406 struct sockaddr *addr_touse; 3407 struct sockaddr_in sin; 3408 3409 SCTP_CHECK_AND_CAST(addrs, optval, struct sctp_getaddresses, optsize); 3410 /* see if we're bound all already! */ 3411 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 3412 error = EINVAL; 3413 break; 3414 } 3415 addr_touse = addrs->addr; 3416 #if defined(INET6) 3417 if (addrs->addr->sa_family == AF_INET6) { 3418 struct sockaddr_in6 *sin6; 3419 3420 sin6 = (struct sockaddr_in6 *)addr_touse; 3421 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 3422 in6_sin6_2_sin(&sin, sin6); 3423 addr_touse = (struct sockaddr *)&sin; 3424 } 3425 } 3426 #endif 3427 /* 3428 * No lock required mgmt_ep_sa does its own locking. 3429 * If the FIX: below is ever changed we may need to 3430 * lock before calling association level binding. 3431 */ 3432 if (addrs->sget_assoc_id == 0) { 3433 /* delete the address */ 3434 sctp_addr_mgmt_ep_sa(inp, addr_touse, 3435 SCTP_DEL_IP_ADDRESS, vrf_id); 3436 } else { 3437 /* 3438 * FIX: decide whether we allow assoc based 3439 * bindx 3440 */ 3441 } 3442 } 3443 break; 3444 default: 3445 error = ENOPROTOOPT; 3446 break; 3447 } /* end switch (opt) */ 3448 return (error); 3449 } 3450 3451 3452 int 3453 sctp_ctloutput(struct socket *so, struct sockopt *sopt) 3454 { 3455 void *optval = NULL; 3456 size_t optsize = 0; 3457 struct sctp_inpcb *inp; 3458 void *p; 3459 int error = 0; 3460 3461 inp = (struct sctp_inpcb *)so->so_pcb; 3462 if (inp == 0) { 3463 /* I made the same as TCP since we are not setup? */ 3464 return (ECONNRESET); 3465 } 3466 if (sopt->sopt_level != IPPROTO_SCTP) { 3467 /* wrong proto level... send back up to IP */ 3468 #ifdef INET6 3469 if (INP_CHECK_SOCKAF(so, AF_INET6)) 3470 error = ip6_ctloutput(so, sopt); 3471 else 3472 #endif /* INET6 */ 3473 error = ip_ctloutput(so, sopt); 3474 return (error); 3475 } 3476 optsize = sopt->sopt_valsize; 3477 if (optsize) { 3478 SCTP_MALLOC(optval, void *, optsize, "SCTPSockOpt"); 3479 if (optval == NULL) { 3480 return (ENOBUFS); 3481 } 3482 error = sooptcopyin(sopt, optval, optsize, optsize); 3483 if (error) { 3484 SCTP_FREE(optval); 3485 goto out; 3486 } 3487 } 3488 p = (void *)sopt->sopt_td; 3489 if (sopt->sopt_dir == SOPT_SET) { 3490 error = sctp_setopt(so, sopt->sopt_name, optval, optsize, p); 3491 } else if (sopt->sopt_dir == SOPT_GET) { 3492 error = sctp_getopt(so, sopt->sopt_name, optval, &optsize, p); 3493 } else { 3494 error = EINVAL; 3495 } 3496 if ((error == 0) && (optval != NULL)) { 3497 error = sooptcopyout(sopt, optval, optsize); 3498 SCTP_FREE(optval); 3499 } else if (optval != NULL) { 3500 SCTP_FREE(optval); 3501 } 3502 out: 3503 return (error); 3504 } 3505 3506 3507 static int 3508 sctp_connect(struct socket *so, struct sockaddr *addr, struct thread *p) 3509 { 3510 int error = 0; 3511 int create_lock_on = 0; 3512 uint32_t vrf_id; 3513 struct sctp_inpcb *inp; 3514 struct sctp_tcb *stcb = NULL; 3515 3516 inp = (struct sctp_inpcb *)so->so_pcb; 3517 if (inp == 0) { 3518 /* I made the same as TCP since we are not setup? */ 3519 return (ECONNRESET); 3520 } 3521 SCTP_ASOC_CREATE_LOCK(inp); 3522 create_lock_on = 1; 3523 3524 SCTP_INP_INCR_REF(inp); 3525 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 3526 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) { 3527 /* Should I really unlock ? */ 3528 error = EFAULT; 3529 goto out_now; 3530 } 3531 #ifdef INET6 3532 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) && 3533 (addr->sa_family == AF_INET6)) { 3534 error = EINVAL; 3535 goto out_now; 3536 } 3537 #endif /* INET6 */ 3538 if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) == 3539 SCTP_PCB_FLAGS_UNBOUND) { 3540 /* Bind a ephemeral port */ 3541 error = sctp_inpcb_bind(so, NULL, p); 3542 if (error) { 3543 goto out_now; 3544 } 3545 } 3546 /* Now do we connect? */ 3547 if (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) { 3548 error = EINVAL; 3549 goto out_now; 3550 } 3551 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 3552 (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) { 3553 /* We are already connected AND the TCP model */ 3554 error = EADDRINUSE; 3555 goto out_now; 3556 } 3557 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { 3558 SCTP_INP_RLOCK(inp); 3559 stcb = LIST_FIRST(&inp->sctp_asoc_list); 3560 SCTP_INP_RUNLOCK(inp); 3561 } else { 3562 /* 3563 * We increment here since sctp_findassociation_ep_addr() 3564 * wil do a decrement if it finds the stcb as long as the 3565 * locked tcb (last argument) is NOT a TCB.. aka NULL. 3566 */ 3567 SCTP_INP_INCR_REF(inp); 3568 stcb = sctp_findassociation_ep_addr(&inp, addr, NULL, NULL, NULL); 3569 if (stcb == NULL) { 3570 SCTP_INP_DECR_REF(inp); 3571 } else { 3572 SCTP_TCB_LOCK(stcb); 3573 } 3574 } 3575 if (stcb != NULL) { 3576 /* Already have or am bring up an association */ 3577 error = EALREADY; 3578 goto out_now; 3579 } 3580 vrf_id = inp->def_vrf_id; 3581 /* We are GOOD to go */ 3582 stcb = sctp_aloc_assoc(inp, addr, 1, &error, 0, vrf_id); 3583 if (stcb == NULL) { 3584 /* Gak! no memory */ 3585 goto out_now; 3586 } 3587 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) { 3588 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED; 3589 /* Set the connected flag so we can queue data */ 3590 soisconnecting(so); 3591 } 3592 stcb->asoc.state = SCTP_STATE_COOKIE_WAIT; 3593 SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered); 3594 3595 /* initialize authentication parameters for the assoc */ 3596 sctp_initialize_auth_params(inp, stcb); 3597 3598 sctp_send_initiate(inp, stcb); 3599 SCTP_TCB_UNLOCK(stcb); 3600 out_now: 3601 if (create_lock_on) 3602 SCTP_ASOC_CREATE_UNLOCK(inp); 3603 3604 SCTP_INP_DECR_REF(inp); 3605 return error; 3606 } 3607 3608 int 3609 sctp_listen(struct socket *so, int backlog, struct thread *p) 3610 { 3611 /* 3612 * Note this module depends on the protocol processing being called 3613 * AFTER any socket level flags and backlog are applied to the 3614 * socket. The traditional way that the socket flags are applied is 3615 * AFTER protocol processing. We have made a change to the 3616 * sys/kern/uipc_socket.c module to reverse this but this MUST be in 3617 * place if the socket API for SCTP is to work properly. 3618 */ 3619 3620 int error = 0; 3621 struct sctp_inpcb *inp; 3622 3623 inp = (struct sctp_inpcb *)so->so_pcb; 3624 if (inp == 0) { 3625 /* I made the same as TCP since we are not setup? */ 3626 return (ECONNRESET); 3627 } 3628 SCTP_INP_RLOCK(inp); 3629 #ifdef SCTP_LOCK_LOGGING 3630 sctp_log_lock(inp, (struct sctp_tcb *)NULL, SCTP_LOG_LOCK_SOCK); 3631 #endif 3632 SOCK_LOCK(so); 3633 error = solisten_proto_check(so); 3634 if (error) { 3635 SOCK_UNLOCK(so); 3636 return (error); 3637 } 3638 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 3639 (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) { 3640 /* We are already connected AND the TCP model */ 3641 SCTP_INP_RUNLOCK(inp); 3642 SOCK_UNLOCK(so); 3643 return (EADDRINUSE); 3644 } 3645 if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) { 3646 /* We must do a bind. */ 3647 SOCK_UNLOCK(so); 3648 SCTP_INP_RUNLOCK(inp); 3649 if ((error = sctp_inpcb_bind(so, NULL, p))) { 3650 /* bind error, probably perm */ 3651 return (error); 3652 } 3653 SOCK_LOCK(so); 3654 } else { 3655 SCTP_INP_RUNLOCK(inp); 3656 } 3657 /* It appears for 7.0 and on, we must always call this. */ 3658 solisten_proto(so, backlog); 3659 3660 if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) { 3661 /* remove the ACCEPTCONN flag for one-to-many sockets */ 3662 so->so_options &= ~SO_ACCEPTCONN; 3663 } 3664 if (backlog == 0) { 3665 /* turning off listen */ 3666 so->so_options &= ~SO_ACCEPTCONN; 3667 } 3668 SOCK_UNLOCK(so); 3669 return (error); 3670 } 3671 3672 static int sctp_defered_wakeup_cnt = 0; 3673 3674 int 3675 sctp_accept(struct socket *so, struct sockaddr **addr) 3676 { 3677 struct sctp_tcb *stcb; 3678 struct sctp_inpcb *inp; 3679 union sctp_sockstore store; 3680 3681 int error; 3682 3683 inp = (struct sctp_inpcb *)so->so_pcb; 3684 3685 if (inp == 0) { 3686 return (ECONNRESET); 3687 } 3688 SCTP_INP_RLOCK(inp); 3689 if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) { 3690 SCTP_INP_RUNLOCK(inp); 3691 return (ENOTSUP); 3692 } 3693 if (so->so_state & SS_ISDISCONNECTED) { 3694 SCTP_INP_RUNLOCK(inp); 3695 return (ECONNABORTED); 3696 } 3697 stcb = LIST_FIRST(&inp->sctp_asoc_list); 3698 if (stcb == NULL) { 3699 SCTP_INP_RUNLOCK(inp); 3700 return (ECONNRESET); 3701 } 3702 SCTP_TCB_LOCK(stcb); 3703 SCTP_INP_RUNLOCK(inp); 3704 store = stcb->asoc.primary_destination->ro._l_addr; 3705 SCTP_TCB_UNLOCK(stcb); 3706 if (store.sa.sa_family == AF_INET) { 3707 struct sockaddr_in *sin; 3708 3709 SCTP_MALLOC_SONAME(sin, struct sockaddr_in *, sizeof *sin); 3710 sin->sin_family = AF_INET; 3711 sin->sin_len = sizeof(*sin); 3712 sin->sin_port = ((struct sockaddr_in *)&store)->sin_port; 3713 sin->sin_addr = ((struct sockaddr_in *)&store)->sin_addr; 3714 *addr = (struct sockaddr *)sin; 3715 } else { 3716 struct sockaddr_in6 *sin6; 3717 3718 SCTP_MALLOC_SONAME(sin6, struct sockaddr_in6 *, sizeof *sin6); 3719 sin6->sin6_family = AF_INET6; 3720 sin6->sin6_len = sizeof(*sin6); 3721 sin6->sin6_port = ((struct sockaddr_in6 *)&store)->sin6_port; 3722 3723 sin6->sin6_addr = ((struct sockaddr_in6 *)&store)->sin6_addr; 3724 if ((error = sa6_recoverscope(sin6)) != 0) { 3725 SCTP_FREE_SONAME(sin6); 3726 return (error); 3727 } 3728 *addr = (struct sockaddr *)sin6; 3729 } 3730 /* Wake any delayed sleep action */ 3731 if (inp->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) { 3732 SCTP_INP_WLOCK(inp); 3733 inp->sctp_flags &= ~SCTP_PCB_FLAGS_DONT_WAKE; 3734 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT) { 3735 inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAKEOUTPUT; 3736 SCTP_INP_WUNLOCK(inp); 3737 SOCKBUF_LOCK(&inp->sctp_socket->so_snd); 3738 if (sowriteable(inp->sctp_socket)) { 3739 sowwakeup_locked(inp->sctp_socket); 3740 } else { 3741 SOCKBUF_UNLOCK(&inp->sctp_socket->so_snd); 3742 } 3743 SCTP_INP_WLOCK(inp); 3744 } 3745 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT) { 3746 inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAKEINPUT; 3747 SCTP_INP_WUNLOCK(inp); 3748 SOCKBUF_LOCK(&inp->sctp_socket->so_rcv); 3749 if (soreadable(inp->sctp_socket)) { 3750 sctp_defered_wakeup_cnt++; 3751 sorwakeup_locked(inp->sctp_socket); 3752 } else { 3753 SOCKBUF_UNLOCK(&inp->sctp_socket->so_rcv); 3754 } 3755 SCTP_INP_WLOCK(inp); 3756 } 3757 SCTP_INP_WUNLOCK(inp); 3758 } 3759 return (0); 3760 } 3761 3762 int 3763 sctp_ingetaddr(struct socket *so, struct sockaddr **addr) 3764 { 3765 struct sockaddr_in *sin; 3766 uint32_t vrf_id; 3767 struct sctp_inpcb *inp; 3768 struct sctp_ifa *sctp_ifa; 3769 3770 /* 3771 * Do the malloc first in case it blocks. 3772 */ 3773 SCTP_MALLOC_SONAME(sin, struct sockaddr_in *, sizeof *sin); 3774 sin->sin_family = AF_INET; 3775 sin->sin_len = sizeof(*sin); 3776 inp = (struct sctp_inpcb *)so->so_pcb; 3777 if (!inp) { 3778 SCTP_FREE_SONAME(sin); 3779 return ECONNRESET; 3780 } 3781 SCTP_INP_RLOCK(inp); 3782 sin->sin_port = inp->sctp_lport; 3783 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 3784 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { 3785 struct sctp_tcb *stcb; 3786 struct sockaddr_in *sin_a; 3787 struct sctp_nets *net; 3788 int fnd; 3789 3790 stcb = LIST_FIRST(&inp->sctp_asoc_list); 3791 if (stcb == NULL) { 3792 goto notConn; 3793 } 3794 fnd = 0; 3795 sin_a = NULL; 3796 SCTP_TCB_LOCK(stcb); 3797 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 3798 sin_a = (struct sockaddr_in *)&net->ro._l_addr; 3799 if (sin_a == NULL) 3800 /* this will make coverity happy */ 3801 continue; 3802 3803 if (sin_a->sin_family == AF_INET) { 3804 fnd = 1; 3805 break; 3806 } 3807 } 3808 if ((!fnd) || (sin_a == NULL)) { 3809 /* punt */ 3810 SCTP_TCB_UNLOCK(stcb); 3811 goto notConn; 3812 } 3813 vrf_id = inp->def_vrf_id; 3814 sctp_ifa = sctp_source_address_selection(inp, 3815 stcb, 3816 (sctp_route_t *) & net->ro, 3817 net, 0, vrf_id); 3818 if (sctp_ifa) { 3819 sin->sin_addr = sctp_ifa->address.sin.sin_addr; 3820 sctp_free_ifa(sctp_ifa); 3821 } 3822 SCTP_TCB_UNLOCK(stcb); 3823 } else { 3824 /* For the bound all case you get back 0 */ 3825 notConn: 3826 sin->sin_addr.s_addr = 0; 3827 } 3828 3829 } else { 3830 /* Take the first IPv4 address in the list */ 3831 struct sctp_laddr *laddr; 3832 int fnd = 0; 3833 3834 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 3835 if (laddr->ifa->address.sa.sa_family == AF_INET) { 3836 struct sockaddr_in *sin_a; 3837 3838 sin_a = (struct sockaddr_in *)&laddr->ifa->address.sa; 3839 sin->sin_addr = sin_a->sin_addr; 3840 fnd = 1; 3841 break; 3842 } 3843 } 3844 if (!fnd) { 3845 SCTP_FREE_SONAME(sin); 3846 SCTP_INP_RUNLOCK(inp); 3847 return ENOENT; 3848 } 3849 } 3850 SCTP_INP_RUNLOCK(inp); 3851 (*addr) = (struct sockaddr *)sin; 3852 return (0); 3853 } 3854 3855 int 3856 sctp_peeraddr(struct socket *so, struct sockaddr **addr) 3857 { 3858 struct sockaddr_in *sin = (struct sockaddr_in *)*addr; 3859 int fnd; 3860 struct sockaddr_in *sin_a; 3861 struct sctp_inpcb *inp; 3862 struct sctp_tcb *stcb; 3863 struct sctp_nets *net; 3864 3865 /* Do the malloc first in case it blocks. */ 3866 inp = (struct sctp_inpcb *)so->so_pcb; 3867 if ((inp == NULL) || 3868 ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) { 3869 /* UDP type and listeners will drop out here */ 3870 return (ENOTCONN); 3871 } 3872 SCTP_MALLOC_SONAME(sin, struct sockaddr_in *, sizeof *sin); 3873 sin->sin_family = AF_INET; 3874 sin->sin_len = sizeof(*sin); 3875 3876 /* We must recapture incase we blocked */ 3877 inp = (struct sctp_inpcb *)so->so_pcb; 3878 if (!inp) { 3879 SCTP_FREE_SONAME(sin); 3880 return ECONNRESET; 3881 } 3882 SCTP_INP_RLOCK(inp); 3883 stcb = LIST_FIRST(&inp->sctp_asoc_list); 3884 if (stcb) 3885 SCTP_TCB_LOCK(stcb); 3886 SCTP_INP_RUNLOCK(inp); 3887 if (stcb == NULL) { 3888 SCTP_FREE_SONAME(sin); 3889 return ECONNRESET; 3890 } 3891 fnd = 0; 3892 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 3893 sin_a = (struct sockaddr_in *)&net->ro._l_addr; 3894 if (sin_a->sin_family == AF_INET) { 3895 fnd = 1; 3896 sin->sin_port = stcb->rport; 3897 sin->sin_addr = sin_a->sin_addr; 3898 break; 3899 } 3900 } 3901 SCTP_TCB_UNLOCK(stcb); 3902 if (!fnd) { 3903 /* No IPv4 address */ 3904 SCTP_FREE_SONAME(sin); 3905 return ENOENT; 3906 } 3907 (*addr) = (struct sockaddr *)sin; 3908 return (0); 3909 } 3910 3911 struct pr_usrreqs sctp_usrreqs = { 3912 .pru_abort = sctp_abort, 3913 .pru_accept = sctp_accept, 3914 .pru_attach = sctp_attach, 3915 .pru_bind = sctp_bind, 3916 .pru_connect = sctp_connect, 3917 .pru_control = in_control, 3918 .pru_close = sctp_close, 3919 .pru_detach = sctp_close, 3920 .pru_sopoll = sopoll_generic, 3921 .pru_disconnect = sctp_disconnect, 3922 .pru_listen = sctp_listen, 3923 .pru_peeraddr = sctp_peeraddr, 3924 .pru_send = sctp_sendm, 3925 .pru_shutdown = sctp_shutdown, 3926 .pru_sockaddr = sctp_ingetaddr, 3927 .pru_sosend = sctp_sosend, 3928 .pru_soreceive = sctp_soreceive 3929 }; 3930