1 /*- 2 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions are met: 6 * 7 * a) Redistributions of source code must retain the above copyright notice, 8 * this list of conditions and the following disclaimer. 9 * 10 * b) Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in 12 * the documentation and/or other materials provided with the distribution. 13 * 14 * c) Neither the name of Cisco Systems, Inc. nor the names of its 15 * contributors may be used to endorse or promote products derived 16 * from this software without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 28 * THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 /* $KAME: sctp_usrreq.c,v 1.48 2005/03/07 23:26:08 itojun Exp $ */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 #include <netinet/sctp_os.h> 36 #include <sys/proc.h> 37 #include <netinet/sctp_pcb.h> 38 #include <netinet/sctp_header.h> 39 #include <netinet/sctp_var.h> 40 #if defined(INET6) 41 #include <netinet6/sctp6_var.h> 42 #endif 43 #include <netinet/sctp_sysctl.h> 44 #include <netinet/sctp_output.h> 45 #include <netinet/sctp_uio.h> 46 #include <netinet/sctp_asconf.h> 47 #include <netinet/sctputil.h> 48 #include <netinet/sctp_indata.h> 49 #include <netinet/sctp_timer.h> 50 #include <netinet/sctp_auth.h> 51 #include <netinet/sctp_bsd_addr.h> 52 #include <netinet/sctp_cc_functions.h> 53 54 55 56 57 void 58 sctp_init(void) 59 { 60 /* Init the SCTP pcb in sctp_pcb.c */ 61 u_long sb_max_adj; 62 63 sctp_pcb_init(); 64 65 66 if ((nmbclusters / 8) > SCTP_ASOC_MAX_CHUNKS_ON_QUEUE) 67 sctp_max_chunks_on_queue = (nmbclusters / 8); 68 /* 69 * Allow a user to take no more than 1/2 the number of clusters or 70 * the SB_MAX whichever is smaller for the send window. 71 */ 72 sb_max_adj = (u_long)((u_quad_t) (SB_MAX) * MCLBYTES / (MSIZE + MCLBYTES)); 73 sctp_sendspace = min((min(SB_MAX, sb_max_adj)), 74 (((uint32_t) nmbclusters / 2) * SCTP_DEFAULT_MAXSEGMENT)); 75 /* 76 * Now for the recv window, should we take the same amount? or 77 * should I do 1/2 the SB_MAX instead in the SB_MAX min above. For 78 * now I will just copy. 79 */ 80 sctp_recvspace = sctp_sendspace; 81 82 } 83 84 85 86 /* 87 * cleanup of the sctppcbinfo structure. 88 * Assumes that the sctppcbinfo lock is held. 89 */ 90 void 91 sctp_pcbinfo_cleanup(void) 92 { 93 /* free the hash tables */ 94 if (sctppcbinfo.sctp_asochash != NULL) 95 SCTP_HASH_FREE(sctppcbinfo.sctp_asochash, sctppcbinfo.hashasocmark); 96 if (sctppcbinfo.sctp_ephash != NULL) 97 SCTP_HASH_FREE(sctppcbinfo.sctp_ephash, sctppcbinfo.hashmark); 98 if (sctppcbinfo.sctp_tcpephash != NULL) 99 SCTP_HASH_FREE(sctppcbinfo.sctp_tcpephash, sctppcbinfo.hashtcpmark); 100 if (sctppcbinfo.sctp_restarthash != NULL) 101 SCTP_HASH_FREE(sctppcbinfo.sctp_restarthash, sctppcbinfo.hashrestartmark); 102 } 103 104 105 static void 106 sctp_pathmtu_adjustment(struct sctp_inpcb *inp, 107 struct sctp_tcb *stcb, 108 struct sctp_nets *net, 109 uint16_t nxtsz) 110 { 111 struct sctp_tmit_chunk *chk; 112 113 /* Adjust that too */ 114 stcb->asoc.smallest_mtu = nxtsz; 115 /* now off to subtract IP_DF flag if needed */ 116 #ifdef SCTP_PRINT_FOR_B_AND_M 117 SCTP_PRINTF("sctp_pathmtu_adjust called inp:%p stcb:%p net:%p nxtsz:%d\n", 118 inp, stcb, net, nxtsz); 119 #endif 120 TAILQ_FOREACH(chk, &stcb->asoc.send_queue, sctp_next) { 121 if ((chk->send_size + IP_HDR_SIZE) > nxtsz) { 122 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; 123 } 124 } 125 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 126 if ((chk->send_size + IP_HDR_SIZE) > nxtsz) { 127 /* 128 * For this guy we also mark for immediate resend 129 * since we sent to big of chunk 130 */ 131 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; 132 if (chk->sent != SCTP_DATAGRAM_RESEND) { 133 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 134 } 135 chk->sent = SCTP_DATAGRAM_RESEND; 136 chk->rec.data.doing_fast_retransmit = 0; 137 if (sctp_logging_level & SCTP_FLIGHT_LOGGING_ENABLE) { 138 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_PMTU, 139 chk->whoTo->flight_size, 140 chk->book_size, 141 (uintptr_t) chk->whoTo, 142 chk->rec.data.TSN_seq); 143 } 144 /* Clear any time so NO RTT is being done */ 145 chk->do_rtt = 0; 146 sctp_flight_size_decrease(chk); 147 sctp_total_flight_decrease(stcb, chk); 148 } 149 } 150 } 151 152 static void 153 sctp_notify_mbuf(struct sctp_inpcb *inp, 154 struct sctp_tcb *stcb, 155 struct sctp_nets *net, 156 struct ip *ip, 157 struct sctphdr *sh) 158 { 159 struct icmp *icmph; 160 int totsz, tmr_stopped = 0; 161 uint16_t nxtsz; 162 163 /* protection */ 164 if ((inp == NULL) || (stcb == NULL) || (net == NULL) || 165 (ip == NULL) || (sh == NULL)) { 166 if (stcb != NULL) { 167 SCTP_TCB_UNLOCK(stcb); 168 } 169 return; 170 } 171 /* First job is to verify the vtag matches what I would send */ 172 if (ntohl(sh->v_tag) != (stcb->asoc.peer_vtag)) { 173 SCTP_TCB_UNLOCK(stcb); 174 return; 175 } 176 icmph = (struct icmp *)((caddr_t)ip - (sizeof(struct icmp) - 177 sizeof(struct ip))); 178 if (icmph->icmp_type != ICMP_UNREACH) { 179 /* We only care about unreachable */ 180 SCTP_TCB_UNLOCK(stcb); 181 return; 182 } 183 if (icmph->icmp_code != ICMP_UNREACH_NEEDFRAG) { 184 /* not a unreachable message due to frag. */ 185 SCTP_TCB_UNLOCK(stcb); 186 return; 187 } 188 totsz = ip->ip_len; 189 190 nxtsz = ntohs(icmph->icmp_nextmtu); 191 if (nxtsz == 0) { 192 /* 193 * old type router that does not tell us what the next size 194 * mtu is. Rats we will have to guess (in a educated fashion 195 * of course) 196 */ 197 nxtsz = find_next_best_mtu(totsz); 198 } 199 /* Stop any PMTU timer */ 200 if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) { 201 tmr_stopped = 1; 202 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net, 203 SCTP_FROM_SCTP_USRREQ + SCTP_LOC_1); 204 } 205 /* Adjust destination size limit */ 206 if (net->mtu > nxtsz) { 207 net->mtu = nxtsz; 208 } 209 /* now what about the ep? */ 210 if (stcb->asoc.smallest_mtu > nxtsz) { 211 #ifdef SCTP_PRINT_FOR_B_AND_M 212 SCTP_PRINTF("notify_mbuf (ICMP) calls sctp_pathmtu_adjust mtu:%d\n", 213 nxtsz); 214 #endif 215 sctp_pathmtu_adjustment(inp, stcb, net, nxtsz); 216 } 217 if (tmr_stopped) 218 sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net); 219 220 SCTP_TCB_UNLOCK(stcb); 221 } 222 223 224 void 225 sctp_notify(struct sctp_inpcb *inp, 226 struct ip *ip, 227 struct sctphdr *sh, 228 struct sockaddr *to, 229 struct sctp_tcb *stcb, 230 struct sctp_nets *net) 231 { 232 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 233 struct socket *so; 234 235 #endif 236 /* protection */ 237 int reason; 238 struct icmp *icmph; 239 240 241 if ((inp == NULL) || (stcb == NULL) || (net == NULL) || 242 (sh == NULL) || (to == NULL)) { 243 if (stcb) 244 SCTP_TCB_UNLOCK(stcb); 245 return; 246 } 247 /* First job is to verify the vtag matches what I would send */ 248 if (ntohl(sh->v_tag) != (stcb->asoc.peer_vtag)) { 249 SCTP_TCB_UNLOCK(stcb); 250 return; 251 } 252 icmph = (struct icmp *)((caddr_t)ip - (sizeof(struct icmp) - 253 sizeof(struct ip))); 254 if (icmph->icmp_type != ICMP_UNREACH) { 255 /* We only care about unreachable */ 256 SCTP_TCB_UNLOCK(stcb); 257 return; 258 } 259 if ((icmph->icmp_code == ICMP_UNREACH_NET) || 260 (icmph->icmp_code == ICMP_UNREACH_HOST) || 261 (icmph->icmp_code == ICMP_UNREACH_NET_UNKNOWN) || 262 (icmph->icmp_code == ICMP_UNREACH_HOST_UNKNOWN) || 263 (icmph->icmp_code == ICMP_UNREACH_ISOLATED) || 264 (icmph->icmp_code == ICMP_UNREACH_NET_PROHIB) || 265 (icmph->icmp_code == ICMP_UNREACH_HOST_PROHIB) || 266 (icmph->icmp_code == ICMP_UNREACH_FILTER_PROHIB)) { 267 268 /* 269 * Hmm reachablity problems we must examine closely. If its 270 * not reachable, we may have lost a network. Or if there is 271 * NO protocol at the other end named SCTP. well we consider 272 * it a OOTB abort. 273 */ 274 if (net->dest_state & SCTP_ADDR_REACHABLE) { 275 /* Ok that destination is NOT reachable */ 276 SCTP_PRINTF("ICMP (thresh %d/%d) takes interface %p down\n", 277 net->error_count, 278 net->failure_threshold, 279 net); 280 281 net->dest_state &= ~SCTP_ADDR_REACHABLE; 282 net->dest_state |= SCTP_ADDR_NOT_REACHABLE; 283 /* 284 * JRS 5/14/07 - If a destination is unreachable, 285 * the PF bit is turned off. This allows an 286 * unambiguous use of the PF bit for destinations 287 * that are reachable but potentially failed. If the 288 * destination is set to the unreachable state, also 289 * set the destination to the PF state. 290 */ 291 /* 292 * Add debug message here if destination is not in 293 * PF state. 294 */ 295 /* Stop any running T3 timers here? */ 296 if (sctp_cmt_on_off && sctp_cmt_pf) { 297 net->dest_state &= ~SCTP_ADDR_PF; 298 SCTPDBG(SCTP_DEBUG_TIMER4, "Destination %p moved from PF to unreachable.\n", 299 net); 300 } 301 net->error_count = net->failure_threshold + 1; 302 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN, 303 stcb, SCTP_FAILED_THRESHOLD, 304 (void *)net, SCTP_SO_NOT_LOCKED); 305 } 306 SCTP_TCB_UNLOCK(stcb); 307 } else if ((icmph->icmp_code == ICMP_UNREACH_PROTOCOL) || 308 (icmph->icmp_code == ICMP_UNREACH_PORT)) { 309 /* 310 * Here the peer is either playing tricks on us, including 311 * an address that belongs to someone who does not support 312 * SCTP OR was a userland implementation that shutdown and 313 * now is dead. In either case treat it like a OOTB abort 314 * with no TCB 315 */ 316 reason = SCTP_PEER_FAULTY; 317 sctp_abort_notification(stcb, reason, SCTP_SO_NOT_LOCKED); 318 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 319 so = SCTP_INP_SO(inp); 320 atomic_add_int(&stcb->asoc.refcnt, 1); 321 SCTP_TCB_UNLOCK(stcb); 322 SCTP_SOCKET_LOCK(so, 1); 323 SCTP_TCB_LOCK(stcb); 324 atomic_subtract_int(&stcb->asoc.refcnt, 1); 325 #endif 326 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_2); 327 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 328 SCTP_SOCKET_UNLOCK(so, 1); 329 /* SCTP_TCB_UNLOCK(stcb); MT: I think this is not needed. */ 330 #endif 331 /* no need to unlock here, since the TCB is gone */ 332 } else { 333 SCTP_TCB_UNLOCK(stcb); 334 } 335 } 336 337 void 338 sctp_ctlinput(cmd, sa, vip) 339 int cmd; 340 struct sockaddr *sa; 341 void *vip; 342 { 343 struct ip *ip = vip; 344 struct sctphdr *sh; 345 uint32_t vrf_id; 346 347 /* FIX, for non-bsd is this right? */ 348 vrf_id = SCTP_DEFAULT_VRFID; 349 if (sa->sa_family != AF_INET || 350 ((struct sockaddr_in *)sa)->sin_addr.s_addr == INADDR_ANY) { 351 return; 352 } 353 if (PRC_IS_REDIRECT(cmd)) { 354 ip = 0; 355 } else if ((unsigned)cmd >= PRC_NCMDS || inetctlerrmap[cmd] == 0) { 356 return; 357 } 358 if (ip) { 359 struct sctp_inpcb *inp = NULL; 360 struct sctp_tcb *stcb = NULL; 361 struct sctp_nets *net = NULL; 362 struct sockaddr_in to, from; 363 364 sh = (struct sctphdr *)((caddr_t)ip + (ip->ip_hl << 2)); 365 bzero(&to, sizeof(to)); 366 bzero(&from, sizeof(from)); 367 from.sin_family = to.sin_family = AF_INET; 368 from.sin_len = to.sin_len = sizeof(to); 369 from.sin_port = sh->src_port; 370 from.sin_addr = ip->ip_src; 371 to.sin_port = sh->dest_port; 372 to.sin_addr = ip->ip_dst; 373 374 /* 375 * 'to' holds the dest of the packet that failed to be sent. 376 * 'from' holds our local endpoint address. Thus we reverse 377 * the to and the from in the lookup. 378 */ 379 stcb = sctp_findassociation_addr_sa((struct sockaddr *)&from, 380 (struct sockaddr *)&to, 381 &inp, &net, 1, vrf_id); 382 if (stcb != NULL && inp && (inp->sctp_socket != NULL)) { 383 if (cmd != PRC_MSGSIZE) { 384 sctp_notify(inp, ip, sh, 385 (struct sockaddr *)&to, stcb, 386 net); 387 } else { 388 /* handle possible ICMP size messages */ 389 sctp_notify_mbuf(inp, stcb, net, ip, sh); 390 } 391 } else { 392 if ((stcb == NULL) && (inp != NULL)) { 393 /* reduce ref-count */ 394 SCTP_INP_WLOCK(inp); 395 SCTP_INP_DECR_REF(inp); 396 SCTP_INP_WUNLOCK(inp); 397 } 398 } 399 } 400 return; 401 } 402 403 static int 404 sctp_getcred(SYSCTL_HANDLER_ARGS) 405 { 406 struct xucred xuc; 407 struct sockaddr_in addrs[2]; 408 struct sctp_inpcb *inp; 409 struct sctp_nets *net; 410 struct sctp_tcb *stcb; 411 int error; 412 uint32_t vrf_id; 413 414 /* FIX, for non-bsd is this right? */ 415 vrf_id = SCTP_DEFAULT_VRFID; 416 417 error = priv_check(req->td, PRIV_NETINET_GETCRED); 418 419 if (error) 420 return (error); 421 422 error = SYSCTL_IN(req, addrs, sizeof(addrs)); 423 if (error) 424 return (error); 425 426 stcb = sctp_findassociation_addr_sa(sintosa(&addrs[0]), 427 sintosa(&addrs[1]), 428 &inp, &net, 1, vrf_id); 429 if (stcb == NULL || inp == NULL || inp->sctp_socket == NULL) { 430 if ((inp != NULL) && (stcb == NULL)) { 431 /* reduce ref-count */ 432 SCTP_INP_WLOCK(inp); 433 SCTP_INP_DECR_REF(inp); 434 goto cred_can_cont; 435 } 436 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); 437 error = ENOENT; 438 goto out; 439 } 440 SCTP_TCB_UNLOCK(stcb); 441 /* 442 * We use the write lock here, only since in the error leg we need 443 * it. If we used RLOCK, then we would have to 444 * wlock/decr/unlock/rlock. Which in theory could create a hole. 445 * Better to use higher wlock. 446 */ 447 SCTP_INP_WLOCK(inp); 448 cred_can_cont: 449 error = cr_canseesocket(req->td->td_ucred, inp->sctp_socket); 450 if (error) { 451 SCTP_INP_WUNLOCK(inp); 452 goto out; 453 } 454 cru2x(inp->sctp_socket->so_cred, &xuc); 455 SCTP_INP_WUNLOCK(inp); 456 error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred)); 457 out: 458 return (error); 459 } 460 461 SYSCTL_PROC(_net_inet_sctp, OID_AUTO, getcred, CTLTYPE_OPAQUE | CTLFLAG_RW, 462 0, 0, sctp_getcred, "S,ucred", "Get the ucred of a SCTP connection"); 463 464 465 static void 466 sctp_abort(struct socket *so) 467 { 468 struct sctp_inpcb *inp; 469 uint32_t flags; 470 471 inp = (struct sctp_inpcb *)so->so_pcb; 472 if (inp == 0) { 473 return; 474 } 475 sctp_must_try_again: 476 flags = inp->sctp_flags; 477 #ifdef SCTP_LOG_CLOSING 478 sctp_log_closing(inp, NULL, 17); 479 #endif 480 if (((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) && 481 (atomic_cmpset_int(&inp->sctp_flags, flags, (flags | SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_CLOSE_IP)))) { 482 #ifdef SCTP_LOG_CLOSING 483 sctp_log_closing(inp, NULL, 16); 484 #endif 485 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 486 SCTP_CALLED_AFTER_CMPSET_OFCLOSE); 487 SOCK_LOCK(so); 488 SCTP_SB_CLEAR(so->so_snd); 489 /* 490 * same for the rcv ones, they are only here for the 491 * accounting/select. 492 */ 493 SCTP_SB_CLEAR(so->so_rcv); 494 495 /* Now null out the reference, we are completely detached. */ 496 so->so_pcb = NULL; 497 SOCK_UNLOCK(so); 498 } else { 499 flags = inp->sctp_flags; 500 if ((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) { 501 goto sctp_must_try_again; 502 } 503 } 504 return; 505 } 506 507 static int 508 sctp_attach(struct socket *so, int proto, struct thread *p) 509 { 510 struct sctp_inpcb *inp; 511 struct inpcb *ip_inp; 512 int error; 513 uint32_t vrf_id = SCTP_DEFAULT_VRFID; 514 515 #ifdef IPSEC 516 uint32_t flags; 517 518 #endif 519 inp = (struct sctp_inpcb *)so->so_pcb; 520 if (inp != 0) { 521 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 522 return EINVAL; 523 } 524 error = SCTP_SORESERVE(so, sctp_sendspace, sctp_recvspace); 525 if (error) { 526 return error; 527 } 528 error = sctp_inpcb_alloc(so, vrf_id); 529 if (error) { 530 return error; 531 } 532 inp = (struct sctp_inpcb *)so->so_pcb; 533 SCTP_INP_WLOCK(inp); 534 inp->sctp_flags &= ~SCTP_PCB_FLAGS_BOUND_V6; /* I'm not v6! */ 535 ip_inp = &inp->ip_inp.inp; 536 ip_inp->inp_vflag |= INP_IPV4; 537 ip_inp->inp_ip_ttl = ip_defttl; 538 #ifdef IPSEC 539 error = ipsec_init_policy(so, &ip_inp->inp_sp); 540 #ifdef SCTP_LOG_CLOSING 541 sctp_log_closing(inp, NULL, 17); 542 #endif 543 if (error != 0) { 544 flags = inp->sctp_flags; 545 if (((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) && 546 (atomic_cmpset_int(&inp->sctp_flags, flags, (flags | SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_CLOSE_IP)))) { 547 #ifdef SCTP_LOG_CLOSING 548 sctp_log_closing(inp, NULL, 15); 549 #endif 550 SCTP_INP_WUNLOCK(inp); 551 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 552 SCTP_CALLED_AFTER_CMPSET_OFCLOSE); 553 } else { 554 SCTP_INP_WUNLOCK(inp); 555 } 556 return error; 557 } 558 #endif /* IPSEC */ 559 SCTP_INP_WUNLOCK(inp); 560 return 0; 561 } 562 563 static int 564 sctp_bind(struct socket *so, struct sockaddr *addr, struct thread *p) 565 { 566 struct sctp_inpcb *inp = NULL; 567 int error; 568 569 #ifdef INET6 570 if (addr && addr->sa_family != AF_INET) { 571 /* must be a v4 address! */ 572 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 573 return EINVAL; 574 } 575 #endif /* INET6 */ 576 if (addr && (addr->sa_len != sizeof(struct sockaddr_in))) { 577 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 578 return EINVAL; 579 } 580 inp = (struct sctp_inpcb *)so->so_pcb; 581 if (inp == 0) { 582 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 583 return EINVAL; 584 } 585 error = sctp_inpcb_bind(so, addr, NULL, p); 586 return error; 587 } 588 589 void 590 sctp_close(struct socket *so) 591 { 592 struct sctp_inpcb *inp; 593 uint32_t flags; 594 595 inp = (struct sctp_inpcb *)so->so_pcb; 596 if (inp == 0) 597 return; 598 599 /* 600 * Inform all the lower layer assoc that we are done. 601 */ 602 sctp_must_try_again: 603 flags = inp->sctp_flags; 604 #ifdef SCTP_LOG_CLOSING 605 sctp_log_closing(inp, NULL, 17); 606 #endif 607 if (((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) && 608 (atomic_cmpset_int(&inp->sctp_flags, flags, (flags | SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_CLOSE_IP)))) { 609 if (((so->so_options & SO_LINGER) && (so->so_linger == 0)) || 610 (so->so_rcv.sb_cc > 0)) { 611 #ifdef SCTP_LOG_CLOSING 612 sctp_log_closing(inp, NULL, 13); 613 #endif 614 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 615 SCTP_CALLED_AFTER_CMPSET_OFCLOSE); 616 } else { 617 #ifdef SCTP_LOG_CLOSING 618 sctp_log_closing(inp, NULL, 14); 619 #endif 620 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_GRACEFUL_CLOSE, 621 SCTP_CALLED_AFTER_CMPSET_OFCLOSE); 622 } 623 /* 624 * The socket is now detached, no matter what the state of 625 * the SCTP association. 626 */ 627 SOCK_LOCK(so); 628 SCTP_SB_CLEAR(so->so_snd); 629 /* 630 * same for the rcv ones, they are only here for the 631 * accounting/select. 632 */ 633 SCTP_SB_CLEAR(so->so_rcv); 634 635 /* Now null out the reference, we are completely detached. */ 636 so->so_pcb = NULL; 637 SOCK_UNLOCK(so); 638 } else { 639 flags = inp->sctp_flags; 640 if ((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) { 641 goto sctp_must_try_again; 642 } 643 } 644 return; 645 } 646 647 648 int 649 sctp_sendm(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr, 650 struct mbuf *control, struct thread *p); 651 652 653 int 654 sctp_sendm(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr, 655 struct mbuf *control, struct thread *p) 656 { 657 struct sctp_inpcb *inp; 658 int error; 659 660 inp = (struct sctp_inpcb *)so->so_pcb; 661 if (inp == 0) { 662 if (control) { 663 sctp_m_freem(control); 664 control = NULL; 665 } 666 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 667 sctp_m_freem(m); 668 return EINVAL; 669 } 670 /* Got to have an to address if we are NOT a connected socket */ 671 if ((addr == NULL) && 672 ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) || 673 (inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)) 674 ) { 675 goto connected_type; 676 } else if (addr == NULL) { 677 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EDESTADDRREQ); 678 error = EDESTADDRREQ; 679 sctp_m_freem(m); 680 if (control) { 681 sctp_m_freem(control); 682 control = NULL; 683 } 684 return (error); 685 } 686 #ifdef INET6 687 if (addr->sa_family != AF_INET) { 688 /* must be a v4 address! */ 689 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EDESTADDRREQ); 690 sctp_m_freem(m); 691 if (control) { 692 sctp_m_freem(control); 693 control = NULL; 694 } 695 error = EDESTADDRREQ; 696 return EDESTADDRREQ; 697 } 698 #endif /* INET6 */ 699 connected_type: 700 /* now what about control */ 701 if (control) { 702 if (inp->control) { 703 SCTP_PRINTF("huh? control set?\n"); 704 sctp_m_freem(inp->control); 705 inp->control = NULL; 706 } 707 inp->control = control; 708 } 709 /* Place the data */ 710 if (inp->pkt) { 711 SCTP_BUF_NEXT(inp->pkt_last) = m; 712 inp->pkt_last = m; 713 } else { 714 inp->pkt_last = inp->pkt = m; 715 } 716 if ( 717 /* FreeBSD uses a flag passed */ 718 ((flags & PRUS_MORETOCOME) == 0) 719 ) { 720 /* 721 * note with the current version this code will only be used 722 * by OpenBSD-- NetBSD, FreeBSD, and MacOS have methods for 723 * re-defining sosend to use the sctp_sosend. One can 724 * optionally switch back to this code (by changing back the 725 * definitions) but this is not advisable. This code is used 726 * by FreeBSD when sending a file with sendfile() though. 727 */ 728 int ret; 729 730 ret = sctp_output(inp, inp->pkt, addr, inp->control, p, flags); 731 inp->pkt = NULL; 732 inp->control = NULL; 733 return (ret); 734 } else { 735 return (0); 736 } 737 } 738 739 int 740 sctp_disconnect(struct socket *so) 741 { 742 struct sctp_inpcb *inp; 743 744 inp = (struct sctp_inpcb *)so->so_pcb; 745 if (inp == NULL) { 746 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN); 747 return (ENOTCONN); 748 } 749 SCTP_INP_RLOCK(inp); 750 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 751 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 752 if (SCTP_LIST_EMPTY(&inp->sctp_asoc_list)) { 753 /* No connection */ 754 SCTP_INP_RUNLOCK(inp); 755 return (0); 756 } else { 757 struct sctp_association *asoc; 758 struct sctp_tcb *stcb; 759 760 stcb = LIST_FIRST(&inp->sctp_asoc_list); 761 if (stcb == NULL) { 762 SCTP_INP_RUNLOCK(inp); 763 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 764 return (EINVAL); 765 } 766 SCTP_TCB_LOCK(stcb); 767 asoc = &stcb->asoc; 768 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 769 /* We are about to be freed, out of here */ 770 SCTP_TCB_UNLOCK(stcb); 771 SCTP_INP_RUNLOCK(inp); 772 return (0); 773 } 774 if (((so->so_options & SO_LINGER) && 775 (so->so_linger == 0)) || 776 (so->so_rcv.sb_cc > 0)) { 777 if (SCTP_GET_STATE(asoc) != 778 SCTP_STATE_COOKIE_WAIT) { 779 /* Left with Data unread */ 780 struct mbuf *err; 781 782 err = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_DONTWAIT, 1, MT_DATA); 783 if (err) { 784 /* 785 * Fill in the user 786 * initiated abort 787 */ 788 struct sctp_paramhdr *ph; 789 790 ph = mtod(err, struct sctp_paramhdr *); 791 SCTP_BUF_LEN(err) = sizeof(struct sctp_paramhdr); 792 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT); 793 ph->param_length = htons(SCTP_BUF_LEN(err)); 794 } 795 sctp_send_abort_tcb(stcb, err, SCTP_SO_LOCKED); 796 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 797 } 798 SCTP_INP_RUNLOCK(inp); 799 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) || 800 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 801 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 802 } 803 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_3); 804 /* No unlock tcb assoc is gone */ 805 return (0); 806 } 807 if (TAILQ_EMPTY(&asoc->send_queue) && 808 TAILQ_EMPTY(&asoc->sent_queue) && 809 (asoc->stream_queue_cnt == 0)) { 810 /* there is nothing queued to send, so done */ 811 if (asoc->locked_on_sending) { 812 goto abort_anyway; 813 } 814 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) && 815 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) { 816 /* only send SHUTDOWN 1st time thru */ 817 sctp_stop_timers_for_shutdown(stcb); 818 sctp_send_shutdown(stcb, 819 stcb->asoc.primary_destination); 820 sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_LOCKED); 821 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) || 822 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 823 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 824 } 825 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT); 826 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, 827 stcb->sctp_ep, stcb, 828 asoc->primary_destination); 829 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 830 stcb->sctp_ep, stcb, 831 asoc->primary_destination); 832 } 833 } else { 834 /* 835 * we still got (or just got) data to send, 836 * so set SHUTDOWN_PENDING 837 */ 838 /* 839 * XXX sockets draft says that SCTP_EOF 840 * should be sent with no data. currently, 841 * we will allow user data to be sent first 842 * and move to SHUTDOWN-PENDING 843 */ 844 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING; 845 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb, 846 asoc->primary_destination); 847 if (asoc->locked_on_sending) { 848 /* Locked to send out the data */ 849 struct sctp_stream_queue_pending *sp; 850 851 sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead); 852 if (sp == NULL) { 853 SCTP_PRINTF("Error, sp is NULL, locked on sending is non-null strm:%d\n", 854 asoc->locked_on_sending->stream_no); 855 } else { 856 if ((sp->length == 0) && (sp->msg_is_complete == 0)) 857 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT; 858 } 859 } 860 if (TAILQ_EMPTY(&asoc->send_queue) && 861 TAILQ_EMPTY(&asoc->sent_queue) && 862 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) { 863 struct mbuf *op_err; 864 865 abort_anyway: 866 op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 867 0, M_DONTWAIT, 1, MT_DATA); 868 if (op_err) { 869 /* 870 * Fill in the user 871 * initiated abort 872 */ 873 struct sctp_paramhdr *ph; 874 uint32_t *ippp; 875 876 SCTP_BUF_LEN(op_err) = 877 (sizeof(struct sctp_paramhdr) + sizeof(uint32_t)); 878 ph = mtod(op_err, 879 struct sctp_paramhdr *); 880 ph->param_type = htons( 881 SCTP_CAUSE_USER_INITIATED_ABT); 882 ph->param_length = htons(SCTP_BUF_LEN(op_err)); 883 ippp = (uint32_t *) (ph + 1); 884 *ippp = htonl(SCTP_FROM_SCTP_USRREQ + SCTP_LOC_4); 885 } 886 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_USRREQ + SCTP_LOC_4; 887 sctp_send_abort_tcb(stcb, op_err, SCTP_SO_LOCKED); 888 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 889 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) || 890 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 891 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 892 } 893 SCTP_INP_RUNLOCK(inp); 894 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_5); 895 return (0); 896 } else { 897 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CLOSING, SCTP_SO_LOCKED); 898 } 899 } 900 SCTP_TCB_UNLOCK(stcb); 901 SCTP_INP_RUNLOCK(inp); 902 return (0); 903 } 904 /* not reached */ 905 } else { 906 /* UDP model does not support this */ 907 SCTP_INP_RUNLOCK(inp); 908 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP); 909 return EOPNOTSUPP; 910 } 911 } 912 913 int 914 sctp_shutdown(struct socket *so) 915 { 916 struct sctp_inpcb *inp; 917 918 inp = (struct sctp_inpcb *)so->so_pcb; 919 if (inp == 0) { 920 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 921 return EINVAL; 922 } 923 SCTP_INP_RLOCK(inp); 924 /* For UDP model this is a invalid call */ 925 if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) { 926 /* Restore the flags that the soshutdown took away. */ 927 so->so_rcv.sb_state &= ~SBS_CANTRCVMORE; 928 /* This proc will wakeup for read and do nothing (I hope) */ 929 SCTP_INP_RUNLOCK(inp); 930 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP); 931 return (EOPNOTSUPP); 932 } 933 /* 934 * Ok if we reach here its the TCP model and it is either a SHUT_WR 935 * or SHUT_RDWR. This means we put the shutdown flag against it. 936 */ 937 { 938 struct sctp_tcb *stcb; 939 struct sctp_association *asoc; 940 941 socantsendmore(so); 942 943 stcb = LIST_FIRST(&inp->sctp_asoc_list); 944 if (stcb == NULL) { 945 /* 946 * Ok we hit the case that the shutdown call was 947 * made after an abort or something. Nothing to do 948 * now. 949 */ 950 SCTP_INP_RUNLOCK(inp); 951 return (0); 952 } 953 SCTP_TCB_LOCK(stcb); 954 asoc = &stcb->asoc; 955 if (TAILQ_EMPTY(&asoc->send_queue) && 956 TAILQ_EMPTY(&asoc->sent_queue) && 957 (asoc->stream_queue_cnt == 0)) { 958 if (asoc->locked_on_sending) { 959 goto abort_anyway; 960 } 961 /* there is nothing queued to send, so I'm done... */ 962 if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) { 963 /* only send SHUTDOWN the first time through */ 964 sctp_stop_timers_for_shutdown(stcb); 965 sctp_send_shutdown(stcb, 966 stcb->asoc.primary_destination); 967 sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 968 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) || 969 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 970 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 971 } 972 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT); 973 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, 974 stcb->sctp_ep, stcb, 975 asoc->primary_destination); 976 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 977 stcb->sctp_ep, stcb, 978 asoc->primary_destination); 979 } 980 } else { 981 /* 982 * we still got (or just got) data to send, so set 983 * SHUTDOWN_PENDING 984 */ 985 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING; 986 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb, 987 asoc->primary_destination); 988 989 if (asoc->locked_on_sending) { 990 /* Locked to send out the data */ 991 struct sctp_stream_queue_pending *sp; 992 993 sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead); 994 if (sp == NULL) { 995 SCTP_PRINTF("Error, sp is NULL, locked on sending is non-null strm:%d\n", 996 asoc->locked_on_sending->stream_no); 997 } else { 998 if ((sp->length == 0) && (sp->msg_is_complete == 0)) { 999 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT; 1000 } 1001 } 1002 } 1003 if (TAILQ_EMPTY(&asoc->send_queue) && 1004 TAILQ_EMPTY(&asoc->sent_queue) && 1005 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) { 1006 struct mbuf *op_err; 1007 1008 abort_anyway: 1009 op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 1010 0, M_DONTWAIT, 1, MT_DATA); 1011 if (op_err) { 1012 /* Fill in the user initiated abort */ 1013 struct sctp_paramhdr *ph; 1014 uint32_t *ippp; 1015 1016 SCTP_BUF_LEN(op_err) = 1017 sizeof(struct sctp_paramhdr) + sizeof(uint32_t); 1018 ph = mtod(op_err, 1019 struct sctp_paramhdr *); 1020 ph->param_type = htons( 1021 SCTP_CAUSE_USER_INITIATED_ABT); 1022 ph->param_length = htons(SCTP_BUF_LEN(op_err)); 1023 ippp = (uint32_t *) (ph + 1); 1024 *ippp = htonl(SCTP_FROM_SCTP_USRREQ + SCTP_LOC_6); 1025 } 1026 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_USRREQ + SCTP_LOC_6; 1027 sctp_abort_an_association(stcb->sctp_ep, stcb, 1028 SCTP_RESPONSE_TO_USER_REQ, 1029 op_err, SCTP_SO_LOCKED); 1030 goto skip_unlock; 1031 } else { 1032 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CLOSING, SCTP_SO_LOCKED); 1033 } 1034 } 1035 SCTP_TCB_UNLOCK(stcb); 1036 } 1037 skip_unlock: 1038 SCTP_INP_RUNLOCK(inp); 1039 return 0; 1040 } 1041 1042 /* 1043 * copies a "user" presentable address and removes embedded scope, etc. 1044 * returns 0 on success, 1 on error 1045 */ 1046 static uint32_t 1047 sctp_fill_user_address(struct sockaddr_storage *ss, struct sockaddr *sa) 1048 { 1049 struct sockaddr_in6 lsa6; 1050 1051 sa = (struct sockaddr *)sctp_recover_scope((struct sockaddr_in6 *)sa, 1052 &lsa6); 1053 memcpy(ss, sa, sa->sa_len); 1054 return (0); 1055 } 1056 1057 1058 1059 /* 1060 * NOTE: assumes addr lock is held 1061 */ 1062 static size_t 1063 sctp_fill_up_addresses_vrf(struct sctp_inpcb *inp, 1064 struct sctp_tcb *stcb, 1065 size_t limit, 1066 struct sockaddr_storage *sas, 1067 uint32_t vrf_id) 1068 { 1069 struct sctp_ifn *sctp_ifn; 1070 struct sctp_ifa *sctp_ifa; 1071 int loopback_scope, ipv4_local_scope, local_scope, site_scope; 1072 size_t actual; 1073 int ipv4_addr_legal, ipv6_addr_legal; 1074 struct sctp_vrf *vrf; 1075 1076 actual = 0; 1077 if (limit <= 0) 1078 return (actual); 1079 1080 if (stcb) { 1081 /* Turn on all the appropriate scope */ 1082 loopback_scope = stcb->asoc.loopback_scope; 1083 ipv4_local_scope = stcb->asoc.ipv4_local_scope; 1084 local_scope = stcb->asoc.local_scope; 1085 site_scope = stcb->asoc.site_scope; 1086 } else { 1087 /* Turn on ALL scope, since we look at the EP */ 1088 loopback_scope = ipv4_local_scope = local_scope = 1089 site_scope = 1; 1090 } 1091 ipv4_addr_legal = ipv6_addr_legal = 0; 1092 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 1093 ipv6_addr_legal = 1; 1094 if (SCTP_IPV6_V6ONLY(inp) == 0) { 1095 ipv4_addr_legal = 1; 1096 } 1097 } else { 1098 ipv4_addr_legal = 1; 1099 } 1100 vrf = sctp_find_vrf(vrf_id); 1101 if (vrf == NULL) { 1102 return (0); 1103 } 1104 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 1105 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) { 1106 if ((loopback_scope == 0) && 1107 SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) { 1108 /* Skip loopback if loopback_scope not set */ 1109 continue; 1110 } 1111 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { 1112 if (stcb) { 1113 /* 1114 * For the BOUND-ALL case, the list 1115 * associated with a TCB is Always 1116 * considered a reverse list.. i.e. 1117 * it lists addresses that are NOT 1118 * part of the association. If this 1119 * is one of those we must skip it. 1120 */ 1121 if (sctp_is_addr_restricted(stcb, 1122 sctp_ifa)) { 1123 continue; 1124 } 1125 } 1126 if ((sctp_ifa->address.sa.sa_family == AF_INET) && 1127 (ipv4_addr_legal)) { 1128 struct sockaddr_in *sin; 1129 1130 sin = (struct sockaddr_in *)&sctp_ifa->address.sa; 1131 if (sin->sin_addr.s_addr == 0) { 1132 /* 1133 * we skip unspecifed 1134 * addresses 1135 */ 1136 continue; 1137 } 1138 if ((ipv4_local_scope == 0) && 1139 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) { 1140 continue; 1141 } 1142 if (inp->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) { 1143 in6_sin_2_v4mapsin6(sin, (struct sockaddr_in6 *)sas); 1144 ((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport; 1145 sas = (struct sockaddr_storage *)((caddr_t)sas + sizeof(struct sockaddr_in6)); 1146 actual += sizeof(struct sockaddr_in6); 1147 } else { 1148 memcpy(sas, sin, sizeof(*sin)); 1149 ((struct sockaddr_in *)sas)->sin_port = inp->sctp_lport; 1150 sas = (struct sockaddr_storage *)((caddr_t)sas + sizeof(*sin)); 1151 actual += sizeof(*sin); 1152 } 1153 if (actual >= limit) { 1154 return (actual); 1155 } 1156 } else if ((sctp_ifa->address.sa.sa_family == AF_INET6) && 1157 (ipv6_addr_legal)) { 1158 struct sockaddr_in6 *sin6; 1159 1160 sin6 = (struct sockaddr_in6 *)&sctp_ifa->address.sa; 1161 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { 1162 /* 1163 * we skip unspecifed 1164 * addresses 1165 */ 1166 continue; 1167 } 1168 if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) { 1169 if (local_scope == 0) 1170 continue; 1171 if (sin6->sin6_scope_id == 0) { 1172 if (sa6_recoverscope(sin6) != 0) 1173 /* 1174 * bad link 1175 * local 1176 * address 1177 */ 1178 continue; 1179 } 1180 } 1181 if ((site_scope == 0) && 1182 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) { 1183 continue; 1184 } 1185 memcpy(sas, sin6, sizeof(*sin6)); 1186 ((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport; 1187 sas = (struct sockaddr_storage *)((caddr_t)sas + sizeof(*sin6)); 1188 actual += sizeof(*sin6); 1189 if (actual >= limit) { 1190 return (actual); 1191 } 1192 } 1193 } 1194 } 1195 } else { 1196 struct sctp_laddr *laddr; 1197 1198 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 1199 if (stcb) { 1200 if (sctp_is_addr_restricted(stcb, laddr->ifa)) { 1201 continue; 1202 } 1203 } 1204 if (sctp_fill_user_address(sas, &laddr->ifa->address.sa)) 1205 continue; 1206 1207 ((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport; 1208 sas = (struct sockaddr_storage *)((caddr_t)sas + 1209 laddr->ifa->address.sa.sa_len); 1210 actual += laddr->ifa->address.sa.sa_len; 1211 if (actual >= limit) { 1212 return (actual); 1213 } 1214 } 1215 } 1216 return (actual); 1217 } 1218 1219 static size_t 1220 sctp_fill_up_addresses(struct sctp_inpcb *inp, 1221 struct sctp_tcb *stcb, 1222 size_t limit, 1223 struct sockaddr_storage *sas) 1224 { 1225 size_t size = 0; 1226 1227 SCTP_IPI_ADDR_LOCK(); 1228 /* fill up addresses for the endpoint's default vrf */ 1229 size = sctp_fill_up_addresses_vrf(inp, stcb, limit, sas, 1230 inp->def_vrf_id); 1231 SCTP_IPI_ADDR_UNLOCK(); 1232 return (size); 1233 } 1234 1235 /* 1236 * NOTE: assumes addr lock is held 1237 */ 1238 static int 1239 sctp_count_max_addresses_vrf(struct sctp_inpcb *inp, uint32_t vrf_id) 1240 { 1241 int cnt = 0; 1242 struct sctp_vrf *vrf = NULL; 1243 1244 /* 1245 * In both sub-set bound an bound_all cases we return the MAXIMUM 1246 * number of addresses that you COULD get. In reality the sub-set 1247 * bound may have an exclusion list for a given TCB OR in the 1248 * bound-all case a TCB may NOT include the loopback or other 1249 * addresses as well. 1250 */ 1251 vrf = sctp_find_vrf(vrf_id); 1252 if (vrf == NULL) { 1253 return (0); 1254 } 1255 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 1256 struct sctp_ifn *sctp_ifn; 1257 struct sctp_ifa *sctp_ifa; 1258 1259 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) { 1260 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { 1261 /* Count them if they are the right type */ 1262 if (sctp_ifa->address.sa.sa_family == AF_INET) { 1263 if (inp->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) 1264 cnt += sizeof(struct sockaddr_in6); 1265 else 1266 cnt += sizeof(struct sockaddr_in); 1267 1268 } else if (sctp_ifa->address.sa.sa_family == AF_INET6) 1269 cnt += sizeof(struct sockaddr_in6); 1270 } 1271 } 1272 } else { 1273 struct sctp_laddr *laddr; 1274 1275 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 1276 if (laddr->ifa->address.sa.sa_family == AF_INET) { 1277 if (inp->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) 1278 cnt += sizeof(struct sockaddr_in6); 1279 else 1280 cnt += sizeof(struct sockaddr_in); 1281 1282 } else if (laddr->ifa->address.sa.sa_family == AF_INET6) 1283 cnt += sizeof(struct sockaddr_in6); 1284 } 1285 } 1286 return (cnt); 1287 } 1288 1289 static int 1290 sctp_count_max_addresses(struct sctp_inpcb *inp) 1291 { 1292 int cnt = 0; 1293 1294 SCTP_IPI_ADDR_LOCK(); 1295 /* count addresses for the endpoint's default VRF */ 1296 cnt = sctp_count_max_addresses_vrf(inp, inp->def_vrf_id); 1297 SCTP_IPI_ADDR_UNLOCK(); 1298 return (cnt); 1299 } 1300 1301 static int 1302 sctp_do_connect_x(struct socket *so, struct sctp_inpcb *inp, void *optval, 1303 size_t optsize, void *p, int delay) 1304 { 1305 int error = 0; 1306 int creat_lock_on = 0; 1307 struct sctp_tcb *stcb = NULL; 1308 struct sockaddr *sa; 1309 int num_v6 = 0, num_v4 = 0, *totaddrp, totaddr; 1310 int added = 0; 1311 uint32_t vrf_id; 1312 int bad_addresses = 0; 1313 sctp_assoc_t *a_id; 1314 1315 SCTPDBG(SCTP_DEBUG_PCB1, "Connectx called\n"); 1316 1317 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 1318 (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) { 1319 /* We are already connected AND the TCP model */ 1320 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EADDRINUSE); 1321 return (EADDRINUSE); 1322 } 1323 if (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) { 1324 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 1325 return (EINVAL); 1326 } 1327 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { 1328 SCTP_INP_RLOCK(inp); 1329 stcb = LIST_FIRST(&inp->sctp_asoc_list); 1330 SCTP_INP_RUNLOCK(inp); 1331 } 1332 if (stcb) { 1333 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY); 1334 return (EALREADY); 1335 } 1336 SCTP_INP_INCR_REF(inp); 1337 SCTP_ASOC_CREATE_LOCK(inp); 1338 creat_lock_on = 1; 1339 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 1340 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) { 1341 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EFAULT); 1342 error = EFAULT; 1343 goto out_now; 1344 } 1345 totaddrp = (int *)optval; 1346 totaddr = *totaddrp; 1347 sa = (struct sockaddr *)(totaddrp + 1); 1348 stcb = sctp_connectx_helper_find(inp, sa, &totaddr, &num_v4, &num_v6, &error, (optsize - sizeof(int)), &bad_addresses); 1349 if ((stcb != NULL) || bad_addresses) { 1350 /* Already have or am bring up an association */ 1351 SCTP_ASOC_CREATE_UNLOCK(inp); 1352 creat_lock_on = 0; 1353 if (stcb) 1354 SCTP_TCB_UNLOCK(stcb); 1355 if (bad_addresses == 0) { 1356 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY); 1357 error = EALREADY; 1358 } 1359 goto out_now; 1360 } 1361 #ifdef INET6 1362 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) && 1363 (num_v6 > 0)) { 1364 error = EINVAL; 1365 goto out_now; 1366 } 1367 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 1368 (num_v4 > 0)) { 1369 struct in6pcb *inp6; 1370 1371 inp6 = (struct in6pcb *)inp; 1372 if (SCTP_IPV6_V6ONLY(inp6)) { 1373 /* 1374 * if IPV6_V6ONLY flag, ignore connections destined 1375 * to a v4 addr or v4-mapped addr 1376 */ 1377 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 1378 error = EINVAL; 1379 goto out_now; 1380 } 1381 } 1382 #endif /* INET6 */ 1383 if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) == 1384 SCTP_PCB_FLAGS_UNBOUND) { 1385 /* Bind a ephemeral port */ 1386 error = sctp_inpcb_bind(so, NULL, NULL, p); 1387 if (error) { 1388 goto out_now; 1389 } 1390 } 1391 /* FIX ME: do we want to pass in a vrf on the connect call? */ 1392 vrf_id = inp->def_vrf_id; 1393 1394 /* We are GOOD to go */ 1395 stcb = sctp_aloc_assoc(inp, sa, 1, &error, 0, vrf_id, 1396 (struct thread *)p 1397 ); 1398 if (stcb == NULL) { 1399 /* Gak! no memory */ 1400 goto out_now; 1401 } 1402 SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_COOKIE_WAIT); 1403 /* move to second address */ 1404 if (sa->sa_family == AF_INET) 1405 sa = (struct sockaddr *)((caddr_t)sa + sizeof(struct sockaddr_in)); 1406 else 1407 sa = (struct sockaddr *)((caddr_t)sa + sizeof(struct sockaddr_in6)); 1408 1409 error = 0; 1410 added = sctp_connectx_helper_add(stcb, sa, (totaddr - 1), &error); 1411 /* Fill in the return id */ 1412 if (error) { 1413 (void)sctp_free_assoc(inp, stcb, SCTP_PCBFREE_FORCE, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_12); 1414 goto out_now; 1415 } 1416 a_id = (sctp_assoc_t *) optval; 1417 *a_id = sctp_get_associd(stcb); 1418 1419 /* initialize authentication parameters for the assoc */ 1420 sctp_initialize_auth_params(inp, stcb); 1421 1422 if (delay) { 1423 /* doing delayed connection */ 1424 stcb->asoc.delayed_connection = 1; 1425 sctp_timer_start(SCTP_TIMER_TYPE_INIT, inp, stcb, stcb->asoc.primary_destination); 1426 } else { 1427 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered); 1428 sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED); 1429 } 1430 SCTP_TCB_UNLOCK(stcb); 1431 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) { 1432 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED; 1433 /* Set the connected flag so we can queue data */ 1434 soisconnecting(so); 1435 } 1436 out_now: 1437 if (creat_lock_on) { 1438 SCTP_ASOC_CREATE_UNLOCK(inp); 1439 } 1440 SCTP_INP_DECR_REF(inp); 1441 return error; 1442 } 1443 1444 #define SCTP_FIND_STCB(inp, stcb, assoc_id) { \ 1445 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||\ 1446 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { \ 1447 SCTP_INP_RLOCK(inp); \ 1448 stcb = LIST_FIRST(&inp->sctp_asoc_list); \ 1449 if (stcb) { \ 1450 SCTP_TCB_LOCK(stcb); \ 1451 } \ 1452 SCTP_INP_RUNLOCK(inp); \ 1453 } else if (assoc_id != 0) { \ 1454 stcb = sctp_findassociation_ep_asocid(inp, assoc_id, 1); \ 1455 if (stcb == NULL) { \ 1456 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); \ 1457 error = ENOENT; \ 1458 break; \ 1459 } \ 1460 } else { \ 1461 stcb = NULL; \ 1462 } \ 1463 } 1464 1465 1466 #define SCTP_CHECK_AND_CAST(destp, srcp, type, size) {\ 1467 if (size < sizeof(type)) { \ 1468 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); \ 1469 error = EINVAL; \ 1470 break; \ 1471 } else { \ 1472 destp = (type *)srcp; \ 1473 } \ 1474 } 1475 1476 static int 1477 sctp_getopt(struct socket *so, int optname, void *optval, size_t *optsize, 1478 void *p) 1479 { 1480 struct sctp_inpcb *inp = NULL; 1481 int error, val = 0; 1482 struct sctp_tcb *stcb = NULL; 1483 1484 if (optval == NULL) { 1485 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 1486 return (EINVAL); 1487 } 1488 inp = (struct sctp_inpcb *)so->so_pcb; 1489 if (inp == 0) { 1490 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 1491 return EINVAL; 1492 } 1493 error = 0; 1494 1495 switch (optname) { 1496 case SCTP_NODELAY: 1497 case SCTP_AUTOCLOSE: 1498 case SCTP_EXPLICIT_EOR: 1499 case SCTP_AUTO_ASCONF: 1500 case SCTP_DISABLE_FRAGMENTS: 1501 case SCTP_I_WANT_MAPPED_V4_ADDR: 1502 case SCTP_USE_EXT_RCVINFO: 1503 SCTP_INP_RLOCK(inp); 1504 switch (optname) { 1505 case SCTP_DISABLE_FRAGMENTS: 1506 val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NO_FRAGMENT); 1507 break; 1508 case SCTP_I_WANT_MAPPED_V4_ADDR: 1509 val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4); 1510 break; 1511 case SCTP_AUTO_ASCONF: 1512 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 1513 /* only valid for bound all sockets */ 1514 val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTO_ASCONF); 1515 } else { 1516 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 1517 error = EINVAL; 1518 goto flags_out; 1519 } 1520 break; 1521 case SCTP_EXPLICIT_EOR: 1522 val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR); 1523 break; 1524 case SCTP_NODELAY: 1525 val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NODELAY); 1526 break; 1527 case SCTP_USE_EXT_RCVINFO: 1528 val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO); 1529 break; 1530 case SCTP_AUTOCLOSE: 1531 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) 1532 val = TICKS_TO_SEC(inp->sctp_ep.auto_close_time); 1533 else 1534 val = 0; 1535 break; 1536 1537 default: 1538 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT); 1539 error = ENOPROTOOPT; 1540 } /* end switch (sopt->sopt_name) */ 1541 if (optname != SCTP_AUTOCLOSE) { 1542 /* make it an "on/off" value */ 1543 val = (val != 0); 1544 } 1545 if (*optsize < sizeof(val)) { 1546 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 1547 error = EINVAL; 1548 } 1549 flags_out: 1550 SCTP_INP_RUNLOCK(inp); 1551 if (error == 0) { 1552 /* return the option value */ 1553 *(int *)optval = val; 1554 *optsize = sizeof(val); 1555 } 1556 break; 1557 case SCTP_GET_PACKET_LOG: 1558 { 1559 #ifdef SCTP_PACKET_LOGGING 1560 uint8_t *target; 1561 int ret; 1562 1563 SCTP_CHECK_AND_CAST(target, optval, uint8_t, *optsize); 1564 ret = sctp_copy_out_packet_log(target, (int)*optsize); 1565 *optsize = ret; 1566 #else 1567 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP); 1568 error = EOPNOTSUPP; 1569 #endif 1570 break; 1571 } 1572 case SCTP_PARTIAL_DELIVERY_POINT: 1573 { 1574 uint32_t *value; 1575 1576 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize); 1577 *value = inp->partial_delivery_point; 1578 *optsize = sizeof(uint32_t); 1579 } 1580 break; 1581 case SCTP_FRAGMENT_INTERLEAVE: 1582 { 1583 uint32_t *value; 1584 1585 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize); 1586 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) { 1587 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) { 1588 *value = SCTP_FRAG_LEVEL_2; 1589 } else { 1590 *value = SCTP_FRAG_LEVEL_1; 1591 } 1592 } else { 1593 *value = SCTP_FRAG_LEVEL_0; 1594 } 1595 *optsize = sizeof(uint32_t); 1596 } 1597 break; 1598 case SCTP_CMT_ON_OFF: 1599 { 1600 struct sctp_assoc_value *av; 1601 1602 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize); 1603 if (sctp_cmt_on_off) { 1604 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 1605 if (stcb) { 1606 av->assoc_value = stcb->asoc.sctp_cmt_on_off; 1607 SCTP_TCB_UNLOCK(stcb); 1608 1609 } else { 1610 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN); 1611 error = ENOTCONN; 1612 } 1613 } else { 1614 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT); 1615 error = ENOPROTOOPT; 1616 } 1617 *optsize = sizeof(*av); 1618 } 1619 break; 1620 /* JRS - Get socket option for pluggable congestion control */ 1621 case SCTP_PLUGGABLE_CC: 1622 { 1623 struct sctp_assoc_value *av; 1624 1625 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize); 1626 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 1627 if (stcb) { 1628 av->assoc_value = stcb->asoc.congestion_control_module; 1629 SCTP_TCB_UNLOCK(stcb); 1630 } else { 1631 av->assoc_value = inp->sctp_ep.sctp_default_cc_module; 1632 } 1633 *optsize = sizeof(*av); 1634 } 1635 break; 1636 case SCTP_GET_ADDR_LEN: 1637 { 1638 struct sctp_assoc_value *av; 1639 1640 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize); 1641 error = EINVAL; 1642 #ifdef INET 1643 if (av->assoc_value == AF_INET) { 1644 av->assoc_value = sizeof(struct sockaddr_in); 1645 error = 0; 1646 } 1647 #endif 1648 #ifdef INET6 1649 if (av->assoc_value == AF_INET6) { 1650 av->assoc_value = sizeof(struct sockaddr_in6); 1651 error = 0; 1652 } 1653 #endif 1654 if (error) { 1655 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 1656 } 1657 *optsize = sizeof(*av); 1658 } 1659 break; 1660 case SCTP_GET_ASSOC_NUMBER: 1661 { 1662 uint32_t *value, cnt; 1663 1664 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize); 1665 cnt = 0; 1666 SCTP_INP_RLOCK(inp); 1667 LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) { 1668 cnt++; 1669 } 1670 SCTP_INP_RUNLOCK(inp); 1671 *value = cnt; 1672 *optsize = sizeof(uint32_t); 1673 } 1674 break; 1675 1676 case SCTP_GET_ASSOC_ID_LIST: 1677 { 1678 struct sctp_assoc_ids *ids; 1679 unsigned int at, limit; 1680 1681 SCTP_CHECK_AND_CAST(ids, optval, struct sctp_assoc_ids, *optsize); 1682 at = 0; 1683 limit = *optsize / sizeof(sctp_assoc_t); 1684 SCTP_INP_RLOCK(inp); 1685 LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) { 1686 if (at < limit) { 1687 ids->gaids_assoc_id[at++] = sctp_get_associd(stcb); 1688 } else { 1689 error = EINVAL; 1690 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 1691 break; 1692 } 1693 } 1694 SCTP_INP_RUNLOCK(inp); 1695 *optsize = at * sizeof(sctp_assoc_t); 1696 } 1697 break; 1698 case SCTP_CONTEXT: 1699 { 1700 struct sctp_assoc_value *av; 1701 1702 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize); 1703 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 1704 1705 if (stcb) { 1706 av->assoc_value = stcb->asoc.context; 1707 SCTP_TCB_UNLOCK(stcb); 1708 } else { 1709 SCTP_INP_RLOCK(inp); 1710 av->assoc_value = inp->sctp_context; 1711 SCTP_INP_RUNLOCK(inp); 1712 } 1713 *optsize = sizeof(*av); 1714 } 1715 break; 1716 case SCTP_VRF_ID: 1717 { 1718 uint32_t *default_vrfid; 1719 1720 SCTP_CHECK_AND_CAST(default_vrfid, optval, uint32_t, *optsize); 1721 *default_vrfid = inp->def_vrf_id; 1722 break; 1723 } 1724 case SCTP_GET_ASOC_VRF: 1725 { 1726 struct sctp_assoc_value *id; 1727 1728 SCTP_CHECK_AND_CAST(id, optval, struct sctp_assoc_value, *optsize); 1729 SCTP_FIND_STCB(inp, stcb, id->assoc_id); 1730 if (stcb == NULL) { 1731 error = EINVAL; 1732 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 1733 break; 1734 } 1735 id->assoc_value = stcb->asoc.vrf_id; 1736 break; 1737 } 1738 case SCTP_GET_VRF_IDS: 1739 { 1740 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP); 1741 error = EOPNOTSUPP; 1742 break; 1743 } 1744 case SCTP_GET_NONCE_VALUES: 1745 { 1746 struct sctp_get_nonce_values *gnv; 1747 1748 SCTP_CHECK_AND_CAST(gnv, optval, struct sctp_get_nonce_values, *optsize); 1749 SCTP_FIND_STCB(inp, stcb, gnv->gn_assoc_id); 1750 1751 if (stcb) { 1752 gnv->gn_peers_tag = stcb->asoc.peer_vtag; 1753 gnv->gn_local_tag = stcb->asoc.my_vtag; 1754 SCTP_TCB_UNLOCK(stcb); 1755 } else { 1756 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN); 1757 error = ENOTCONN; 1758 } 1759 *optsize = sizeof(*gnv); 1760 } 1761 break; 1762 case SCTP_DELAYED_SACK: 1763 { 1764 struct sctp_sack_info *sack; 1765 1766 SCTP_CHECK_AND_CAST(sack, optval, struct sctp_sack_info, *optsize); 1767 SCTP_FIND_STCB(inp, stcb, sack->sack_assoc_id); 1768 if (stcb) { 1769 sack->sack_delay = stcb->asoc.delayed_ack; 1770 sack->sack_freq = stcb->asoc.sack_freq; 1771 SCTP_TCB_UNLOCK(stcb); 1772 } else { 1773 SCTP_INP_RLOCK(inp); 1774 sack->sack_delay = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]); 1775 sack->sack_freq = inp->sctp_ep.sctp_sack_freq; 1776 SCTP_INP_RUNLOCK(inp); 1777 } 1778 *optsize = sizeof(*sack); 1779 } 1780 break; 1781 1782 case SCTP_GET_SNDBUF_USE: 1783 { 1784 struct sctp_sockstat *ss; 1785 1786 SCTP_CHECK_AND_CAST(ss, optval, struct sctp_sockstat, *optsize); 1787 SCTP_FIND_STCB(inp, stcb, ss->ss_assoc_id); 1788 1789 if (stcb) { 1790 ss->ss_total_sndbuf = stcb->asoc.total_output_queue_size; 1791 ss->ss_total_recv_buf = (stcb->asoc.size_on_reasm_queue + 1792 stcb->asoc.size_on_all_streams); 1793 SCTP_TCB_UNLOCK(stcb); 1794 } else { 1795 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN); 1796 error = ENOTCONN; 1797 } 1798 *optsize = sizeof(struct sctp_sockstat); 1799 } 1800 break; 1801 case SCTP_MAX_BURST: 1802 { 1803 uint8_t *value; 1804 1805 SCTP_CHECK_AND_CAST(value, optval, uint8_t, *optsize); 1806 1807 SCTP_INP_RLOCK(inp); 1808 *value = inp->sctp_ep.max_burst; 1809 SCTP_INP_RUNLOCK(inp); 1810 *optsize = sizeof(uint8_t); 1811 } 1812 break; 1813 case SCTP_MAXSEG: 1814 { 1815 struct sctp_assoc_value *av; 1816 int ovh; 1817 1818 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize); 1819 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 1820 1821 if (stcb) { 1822 av->assoc_value = sctp_get_frag_point(stcb, &stcb->asoc); 1823 SCTP_TCB_UNLOCK(stcb); 1824 } else { 1825 SCTP_INP_RLOCK(inp); 1826 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 1827 ovh = SCTP_MED_OVERHEAD; 1828 } else { 1829 ovh = SCTP_MED_V4_OVERHEAD; 1830 } 1831 if (inp->sctp_frag_point >= SCTP_DEFAULT_MAXSEGMENT) 1832 av->assoc_value = 0; 1833 else 1834 av->assoc_value = inp->sctp_frag_point - ovh; 1835 SCTP_INP_RUNLOCK(inp); 1836 } 1837 *optsize = sizeof(struct sctp_assoc_value); 1838 } 1839 break; 1840 case SCTP_GET_STAT_LOG: 1841 error = sctp_fill_stat_log(optval, optsize); 1842 break; 1843 case SCTP_EVENTS: 1844 { 1845 struct sctp_event_subscribe *events; 1846 1847 SCTP_CHECK_AND_CAST(events, optval, struct sctp_event_subscribe, *optsize); 1848 memset(events, 0, sizeof(*events)); 1849 SCTP_INP_RLOCK(inp); 1850 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) 1851 events->sctp_data_io_event = 1; 1852 1853 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVASSOCEVNT)) 1854 events->sctp_association_event = 1; 1855 1856 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVPADDREVNT)) 1857 events->sctp_address_event = 1; 1858 1859 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)) 1860 events->sctp_send_failure_event = 1; 1861 1862 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVPEERERR)) 1863 events->sctp_peer_error_event = 1; 1864 1865 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) 1866 events->sctp_shutdown_event = 1; 1867 1868 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PDAPIEVNT)) 1869 events->sctp_partial_delivery_event = 1; 1870 1871 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) 1872 events->sctp_adaptation_layer_event = 1; 1873 1874 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTHEVNT)) 1875 events->sctp_authentication_event = 1; 1876 1877 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_STREAM_RESETEVNT)) 1878 events->sctp_stream_reset_events = 1; 1879 SCTP_INP_RUNLOCK(inp); 1880 *optsize = sizeof(struct sctp_event_subscribe); 1881 } 1882 break; 1883 1884 case SCTP_ADAPTATION_LAYER: 1885 { 1886 uint32_t *value; 1887 1888 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize); 1889 1890 SCTP_INP_RLOCK(inp); 1891 *value = inp->sctp_ep.adaptation_layer_indicator; 1892 SCTP_INP_RUNLOCK(inp); 1893 *optsize = sizeof(uint32_t); 1894 } 1895 break; 1896 case SCTP_SET_INITIAL_DBG_SEQ: 1897 { 1898 uint32_t *value; 1899 1900 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize); 1901 SCTP_INP_RLOCK(inp); 1902 *value = inp->sctp_ep.initial_sequence_debug; 1903 SCTP_INP_RUNLOCK(inp); 1904 *optsize = sizeof(uint32_t); 1905 } 1906 break; 1907 case SCTP_GET_LOCAL_ADDR_SIZE: 1908 { 1909 uint32_t *value; 1910 1911 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize); 1912 SCTP_INP_RLOCK(inp); 1913 *value = sctp_count_max_addresses(inp); 1914 SCTP_INP_RUNLOCK(inp); 1915 *optsize = sizeof(uint32_t); 1916 } 1917 break; 1918 case SCTP_GET_REMOTE_ADDR_SIZE: 1919 { 1920 uint32_t *value; 1921 size_t size; 1922 struct sctp_nets *net; 1923 1924 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize); 1925 /* FIXME MT: change to sctp_assoc_value? */ 1926 SCTP_FIND_STCB(inp, stcb, (sctp_assoc_t) * value); 1927 1928 if (stcb) { 1929 size = 0; 1930 /* Count the sizes */ 1931 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 1932 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) || 1933 (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET6)) { 1934 size += sizeof(struct sockaddr_in6); 1935 } else if (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET) { 1936 size += sizeof(struct sockaddr_in); 1937 } else { 1938 /* huh */ 1939 break; 1940 } 1941 } 1942 SCTP_TCB_UNLOCK(stcb); 1943 *value = (uint32_t) size; 1944 } else { 1945 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN); 1946 error = ENOTCONN; 1947 } 1948 *optsize = sizeof(uint32_t); 1949 } 1950 break; 1951 case SCTP_GET_PEER_ADDRESSES: 1952 /* 1953 * Get the address information, an array is passed in to 1954 * fill up we pack it. 1955 */ 1956 { 1957 size_t cpsz, left; 1958 struct sockaddr_storage *sas; 1959 struct sctp_nets *net; 1960 struct sctp_getaddresses *saddr; 1961 1962 SCTP_CHECK_AND_CAST(saddr, optval, struct sctp_getaddresses, *optsize); 1963 SCTP_FIND_STCB(inp, stcb, saddr->sget_assoc_id); 1964 1965 if (stcb) { 1966 left = (*optsize) - sizeof(struct sctp_getaddresses); 1967 *optsize = sizeof(struct sctp_getaddresses); 1968 sas = (struct sockaddr_storage *)&saddr->addr[0]; 1969 1970 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 1971 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) || 1972 (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET6)) { 1973 cpsz = sizeof(struct sockaddr_in6); 1974 } else if (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET) { 1975 cpsz = sizeof(struct sockaddr_in); 1976 } else { 1977 /* huh */ 1978 break; 1979 } 1980 if (left < cpsz) { 1981 /* not enough room. */ 1982 break; 1983 } 1984 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) && 1985 (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET)) { 1986 /* Must map the address */ 1987 in6_sin_2_v4mapsin6((struct sockaddr_in *)&net->ro._l_addr, 1988 (struct sockaddr_in6 *)sas); 1989 } else { 1990 memcpy(sas, &net->ro._l_addr, cpsz); 1991 } 1992 ((struct sockaddr_in *)sas)->sin_port = stcb->rport; 1993 1994 sas = (struct sockaddr_storage *)((caddr_t)sas + cpsz); 1995 left -= cpsz; 1996 *optsize += cpsz; 1997 } 1998 SCTP_TCB_UNLOCK(stcb); 1999 } else { 2000 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); 2001 error = ENOENT; 2002 } 2003 } 2004 break; 2005 case SCTP_GET_LOCAL_ADDRESSES: 2006 { 2007 size_t limit, actual; 2008 struct sockaddr_storage *sas; 2009 struct sctp_getaddresses *saddr; 2010 2011 SCTP_CHECK_AND_CAST(saddr, optval, struct sctp_getaddresses, *optsize); 2012 SCTP_FIND_STCB(inp, stcb, saddr->sget_assoc_id); 2013 2014 sas = (struct sockaddr_storage *)&saddr->addr[0]; 2015 limit = *optsize - sizeof(sctp_assoc_t); 2016 actual = sctp_fill_up_addresses(inp, stcb, limit, sas); 2017 if (stcb) { 2018 SCTP_TCB_UNLOCK(stcb); 2019 } 2020 *optsize = sizeof(struct sockaddr_storage) + actual; 2021 } 2022 break; 2023 case SCTP_PEER_ADDR_PARAMS: 2024 { 2025 struct sctp_paddrparams *paddrp; 2026 struct sctp_nets *net; 2027 2028 SCTP_CHECK_AND_CAST(paddrp, optval, struct sctp_paddrparams, *optsize); 2029 SCTP_FIND_STCB(inp, stcb, paddrp->spp_assoc_id); 2030 2031 net = NULL; 2032 if (stcb) { 2033 net = sctp_findnet(stcb, (struct sockaddr *)&paddrp->spp_address); 2034 } else { 2035 /* 2036 * We increment here since 2037 * sctp_findassociation_ep_addr() wil do a 2038 * decrement if it finds the stcb as long as 2039 * the locked tcb (last argument) is NOT a 2040 * TCB.. aka NULL. 2041 */ 2042 SCTP_INP_INCR_REF(inp); 2043 stcb = sctp_findassociation_ep_addr(&inp, (struct sockaddr *)&paddrp->spp_address, &net, NULL, NULL); 2044 if (stcb == NULL) { 2045 SCTP_INP_DECR_REF(inp); 2046 } 2047 } 2048 if (stcb && (net == NULL)) { 2049 struct sockaddr *sa; 2050 2051 sa = (struct sockaddr *)&paddrp->spp_address; 2052 if (sa->sa_family == AF_INET) { 2053 struct sockaddr_in *sin; 2054 2055 sin = (struct sockaddr_in *)sa; 2056 if (sin->sin_addr.s_addr) { 2057 error = EINVAL; 2058 SCTP_TCB_UNLOCK(stcb); 2059 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 2060 break; 2061 } 2062 } else if (sa->sa_family == AF_INET6) { 2063 struct sockaddr_in6 *sin6; 2064 2065 sin6 = (struct sockaddr_in6 *)sa; 2066 if (!IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { 2067 error = EINVAL; 2068 SCTP_TCB_UNLOCK(stcb); 2069 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 2070 break; 2071 } 2072 } else { 2073 error = EAFNOSUPPORT; 2074 SCTP_TCB_UNLOCK(stcb); 2075 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 2076 break; 2077 } 2078 } 2079 if (stcb) { 2080 /* Applys to the specific association */ 2081 paddrp->spp_flags = 0; 2082 if (net) { 2083 int ovh; 2084 2085 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 2086 ovh = SCTP_MED_OVERHEAD; 2087 } else { 2088 ovh = SCTP_MED_V4_OVERHEAD; 2089 } 2090 2091 2092 paddrp->spp_pathmaxrxt = net->failure_threshold; 2093 paddrp->spp_pathmtu = net->mtu - ovh; 2094 /* get flags for HB */ 2095 if (net->dest_state & SCTP_ADDR_NOHB) 2096 paddrp->spp_flags |= SPP_HB_DISABLE; 2097 else 2098 paddrp->spp_flags |= SPP_HB_ENABLE; 2099 /* get flags for PMTU */ 2100 if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) { 2101 paddrp->spp_flags |= SPP_PMTUD_ENABLE; 2102 } else { 2103 paddrp->spp_flags |= SPP_PMTUD_DISABLE; 2104 } 2105 #ifdef INET 2106 if (net->ro._l_addr.sin.sin_family == AF_INET) { 2107 paddrp->spp_ipv4_tos = net->tos_flowlabel & 0x000000fc; 2108 paddrp->spp_flags |= SPP_IPV4_TOS; 2109 } 2110 #endif 2111 #ifdef INET6 2112 if (net->ro._l_addr.sin6.sin6_family == AF_INET6) { 2113 paddrp->spp_ipv6_flowlabel = net->tos_flowlabel; 2114 paddrp->spp_flags |= SPP_IPV6_FLOWLABEL; 2115 } 2116 #endif 2117 } else { 2118 /* 2119 * No destination so return default 2120 * value 2121 */ 2122 int cnt = 0; 2123 2124 paddrp->spp_pathmaxrxt = stcb->asoc.def_net_failure; 2125 paddrp->spp_pathmtu = sctp_get_frag_point(stcb, &stcb->asoc); 2126 #ifdef INET 2127 paddrp->spp_ipv4_tos = stcb->asoc.default_tos & 0x000000fc; 2128 paddrp->spp_flags |= SPP_IPV4_TOS; 2129 #endif 2130 #ifdef INET6 2131 paddrp->spp_ipv6_flowlabel = stcb->asoc.default_flowlabel; 2132 paddrp->spp_flags |= SPP_IPV6_FLOWLABEL; 2133 #endif 2134 /* default settings should be these */ 2135 if (stcb->asoc.hb_is_disabled == 0) { 2136 paddrp->spp_flags |= SPP_HB_ENABLE; 2137 } else { 2138 paddrp->spp_flags |= SPP_HB_DISABLE; 2139 } 2140 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 2141 if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) { 2142 cnt++; 2143 } 2144 } 2145 if (cnt) { 2146 paddrp->spp_flags |= SPP_PMTUD_ENABLE; 2147 } 2148 } 2149 paddrp->spp_hbinterval = stcb->asoc.heart_beat_delay; 2150 paddrp->spp_assoc_id = sctp_get_associd(stcb); 2151 SCTP_TCB_UNLOCK(stcb); 2152 } else { 2153 /* Use endpoint defaults */ 2154 SCTP_INP_RLOCK(inp); 2155 paddrp->spp_pathmaxrxt = inp->sctp_ep.def_net_failure; 2156 paddrp->spp_hbinterval = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]); 2157 paddrp->spp_assoc_id = (sctp_assoc_t) 0; 2158 /* get inp's default */ 2159 #ifdef INET 2160 paddrp->spp_ipv4_tos = inp->ip_inp.inp.inp_ip_tos; 2161 paddrp->spp_flags |= SPP_IPV4_TOS; 2162 #endif 2163 #ifdef INET6 2164 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 2165 paddrp->spp_ipv6_flowlabel = ((struct in6pcb *)inp)->in6p_flowinfo; 2166 paddrp->spp_flags |= SPP_IPV6_FLOWLABEL; 2167 } 2168 #endif 2169 /* can't return this */ 2170 paddrp->spp_pathmtu = 0; 2171 2172 /* default behavior, no stcb */ 2173 paddrp->spp_flags = SPP_PMTUD_ENABLE; 2174 2175 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_DONOT_HEARTBEAT)) { 2176 paddrp->spp_flags |= SPP_HB_ENABLE; 2177 } else { 2178 paddrp->spp_flags |= SPP_HB_DISABLE; 2179 } 2180 SCTP_INP_RUNLOCK(inp); 2181 } 2182 *optsize = sizeof(struct sctp_paddrparams); 2183 } 2184 break; 2185 case SCTP_GET_PEER_ADDR_INFO: 2186 { 2187 struct sctp_paddrinfo *paddri; 2188 struct sctp_nets *net; 2189 2190 SCTP_CHECK_AND_CAST(paddri, optval, struct sctp_paddrinfo, *optsize); 2191 SCTP_FIND_STCB(inp, stcb, paddri->spinfo_assoc_id); 2192 2193 net = NULL; 2194 if (stcb) { 2195 net = sctp_findnet(stcb, (struct sockaddr *)&paddri->spinfo_address); 2196 } else { 2197 /* 2198 * We increment here since 2199 * sctp_findassociation_ep_addr() wil do a 2200 * decrement if it finds the stcb as long as 2201 * the locked tcb (last argument) is NOT a 2202 * TCB.. aka NULL. 2203 */ 2204 SCTP_INP_INCR_REF(inp); 2205 stcb = sctp_findassociation_ep_addr(&inp, (struct sockaddr *)&paddri->spinfo_address, &net, NULL, NULL); 2206 if (stcb == NULL) { 2207 SCTP_INP_DECR_REF(inp); 2208 } 2209 } 2210 2211 if ((stcb) && (net)) { 2212 paddri->spinfo_state = net->dest_state & (SCTP_REACHABLE_MASK | SCTP_ADDR_NOHB); 2213 paddri->spinfo_cwnd = net->cwnd; 2214 paddri->spinfo_srtt = ((net->lastsa >> 2) + net->lastsv) >> 1; 2215 paddri->spinfo_rto = net->RTO; 2216 paddri->spinfo_assoc_id = sctp_get_associd(stcb); 2217 SCTP_TCB_UNLOCK(stcb); 2218 } else { 2219 if (stcb) { 2220 SCTP_TCB_UNLOCK(stcb); 2221 } 2222 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); 2223 error = ENOENT; 2224 } 2225 *optsize = sizeof(struct sctp_paddrinfo); 2226 } 2227 break; 2228 case SCTP_PCB_STATUS: 2229 { 2230 struct sctp_pcbinfo *spcb; 2231 2232 SCTP_CHECK_AND_CAST(spcb, optval, struct sctp_pcbinfo, *optsize); 2233 sctp_fill_pcbinfo(spcb); 2234 *optsize = sizeof(struct sctp_pcbinfo); 2235 } 2236 break; 2237 2238 case SCTP_STATUS: 2239 { 2240 struct sctp_nets *net; 2241 struct sctp_status *sstat; 2242 2243 SCTP_CHECK_AND_CAST(sstat, optval, struct sctp_status, *optsize); 2244 SCTP_FIND_STCB(inp, stcb, sstat->sstat_assoc_id); 2245 2246 if (stcb == NULL) { 2247 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 2248 error = EINVAL; 2249 break; 2250 } 2251 /* 2252 * I think passing the state is fine since 2253 * sctp_constants.h will be available to the user 2254 * land. 2255 */ 2256 sstat->sstat_state = stcb->asoc.state; 2257 sstat->sstat_rwnd = stcb->asoc.peers_rwnd; 2258 sstat->sstat_unackdata = stcb->asoc.sent_queue_cnt; 2259 /* 2260 * We can't include chunks that have been passed to 2261 * the socket layer. Only things in queue. 2262 */ 2263 sstat->sstat_penddata = (stcb->asoc.cnt_on_reasm_queue + 2264 stcb->asoc.cnt_on_all_streams); 2265 2266 2267 sstat->sstat_instrms = stcb->asoc.streamincnt; 2268 sstat->sstat_outstrms = stcb->asoc.streamoutcnt; 2269 sstat->sstat_fragmentation_point = sctp_get_frag_point(stcb, &stcb->asoc); 2270 memcpy(&sstat->sstat_primary.spinfo_address, 2271 &stcb->asoc.primary_destination->ro._l_addr, 2272 ((struct sockaddr *)(&stcb->asoc.primary_destination->ro._l_addr))->sa_len); 2273 net = stcb->asoc.primary_destination; 2274 ((struct sockaddr_in *)&sstat->sstat_primary.spinfo_address)->sin_port = stcb->rport; 2275 /* 2276 * Again the user can get info from sctp_constants.h 2277 * for what the state of the network is. 2278 */ 2279 sstat->sstat_primary.spinfo_state = net->dest_state & SCTP_REACHABLE_MASK; 2280 sstat->sstat_primary.spinfo_cwnd = net->cwnd; 2281 sstat->sstat_primary.spinfo_srtt = net->lastsa; 2282 sstat->sstat_primary.spinfo_rto = net->RTO; 2283 sstat->sstat_primary.spinfo_mtu = net->mtu; 2284 sstat->sstat_primary.spinfo_assoc_id = sctp_get_associd(stcb); 2285 SCTP_TCB_UNLOCK(stcb); 2286 *optsize = sizeof(*sstat); 2287 } 2288 break; 2289 case SCTP_RTOINFO: 2290 { 2291 struct sctp_rtoinfo *srto; 2292 2293 SCTP_CHECK_AND_CAST(srto, optval, struct sctp_rtoinfo, *optsize); 2294 SCTP_FIND_STCB(inp, stcb, srto->srto_assoc_id); 2295 2296 if (stcb) { 2297 srto->srto_initial = stcb->asoc.initial_rto; 2298 srto->srto_max = stcb->asoc.maxrto; 2299 srto->srto_min = stcb->asoc.minrto; 2300 SCTP_TCB_UNLOCK(stcb); 2301 } else { 2302 SCTP_INP_RLOCK(inp); 2303 srto->srto_initial = inp->sctp_ep.initial_rto; 2304 srto->srto_max = inp->sctp_ep.sctp_maxrto; 2305 srto->srto_min = inp->sctp_ep.sctp_minrto; 2306 SCTP_INP_RUNLOCK(inp); 2307 } 2308 *optsize = sizeof(*srto); 2309 } 2310 break; 2311 case SCTP_ASSOCINFO: 2312 { 2313 struct sctp_assocparams *sasoc; 2314 uint32_t oldval; 2315 2316 SCTP_CHECK_AND_CAST(sasoc, optval, struct sctp_assocparams, *optsize); 2317 SCTP_FIND_STCB(inp, stcb, sasoc->sasoc_assoc_id); 2318 2319 if (stcb) { 2320 oldval = sasoc->sasoc_cookie_life; 2321 sasoc->sasoc_cookie_life = TICKS_TO_MSEC(stcb->asoc.cookie_life); 2322 sasoc->sasoc_asocmaxrxt = stcb->asoc.max_send_times; 2323 sasoc->sasoc_number_peer_destinations = stcb->asoc.numnets; 2324 sasoc->sasoc_peer_rwnd = stcb->asoc.peers_rwnd; 2325 sasoc->sasoc_local_rwnd = stcb->asoc.my_rwnd; 2326 SCTP_TCB_UNLOCK(stcb); 2327 } else { 2328 SCTP_INP_RLOCK(inp); 2329 sasoc->sasoc_cookie_life = TICKS_TO_MSEC(inp->sctp_ep.def_cookie_life); 2330 sasoc->sasoc_asocmaxrxt = inp->sctp_ep.max_send_times; 2331 sasoc->sasoc_number_peer_destinations = 0; 2332 sasoc->sasoc_peer_rwnd = 0; 2333 sasoc->sasoc_local_rwnd = sbspace(&inp->sctp_socket->so_rcv); 2334 SCTP_INP_RUNLOCK(inp); 2335 } 2336 *optsize = sizeof(*sasoc); 2337 } 2338 break; 2339 case SCTP_DEFAULT_SEND_PARAM: 2340 { 2341 struct sctp_sndrcvinfo *s_info; 2342 2343 SCTP_CHECK_AND_CAST(s_info, optval, struct sctp_sndrcvinfo, *optsize); 2344 SCTP_FIND_STCB(inp, stcb, s_info->sinfo_assoc_id); 2345 2346 if (stcb) { 2347 memcpy(s_info, &stcb->asoc.def_send, sizeof(stcb->asoc.def_send)); 2348 SCTP_TCB_UNLOCK(stcb); 2349 } else { 2350 SCTP_INP_RLOCK(inp); 2351 memcpy(s_info, &inp->def_send, sizeof(inp->def_send)); 2352 SCTP_INP_RUNLOCK(inp); 2353 } 2354 *optsize = sizeof(*s_info); 2355 } 2356 break; 2357 case SCTP_INITMSG: 2358 { 2359 struct sctp_initmsg *sinit; 2360 2361 SCTP_CHECK_AND_CAST(sinit, optval, struct sctp_initmsg, *optsize); 2362 SCTP_INP_RLOCK(inp); 2363 sinit->sinit_num_ostreams = inp->sctp_ep.pre_open_stream_count; 2364 sinit->sinit_max_instreams = inp->sctp_ep.max_open_streams_intome; 2365 sinit->sinit_max_attempts = inp->sctp_ep.max_init_times; 2366 sinit->sinit_max_init_timeo = inp->sctp_ep.initial_init_rto_max; 2367 SCTP_INP_RUNLOCK(inp); 2368 *optsize = sizeof(*sinit); 2369 } 2370 break; 2371 case SCTP_PRIMARY_ADDR: 2372 /* we allow a "get" operation on this */ 2373 { 2374 struct sctp_setprim *ssp; 2375 2376 SCTP_CHECK_AND_CAST(ssp, optval, struct sctp_setprim, *optsize); 2377 SCTP_FIND_STCB(inp, stcb, ssp->ssp_assoc_id); 2378 2379 if (stcb) { 2380 /* simply copy out the sockaddr_storage... */ 2381 int len; 2382 2383 len = *optsize; 2384 if (len > stcb->asoc.primary_destination->ro._l_addr.sa.sa_len) 2385 len = stcb->asoc.primary_destination->ro._l_addr.sa.sa_len; 2386 2387 memcpy(&ssp->ssp_addr, 2388 &stcb->asoc.primary_destination->ro._l_addr, 2389 len); 2390 SCTP_TCB_UNLOCK(stcb); 2391 } else { 2392 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 2393 error = EINVAL; 2394 } 2395 *optsize = sizeof(*ssp); 2396 } 2397 break; 2398 2399 case SCTP_HMAC_IDENT: 2400 { 2401 struct sctp_hmacalgo *shmac; 2402 sctp_hmaclist_t *hmaclist; 2403 uint32_t size; 2404 int i; 2405 2406 SCTP_CHECK_AND_CAST(shmac, optval, struct sctp_hmacalgo, *optsize); 2407 2408 SCTP_INP_RLOCK(inp); 2409 hmaclist = inp->sctp_ep.local_hmacs; 2410 if (hmaclist == NULL) { 2411 /* no HMACs to return */ 2412 *optsize = sizeof(*shmac); 2413 SCTP_INP_RUNLOCK(inp); 2414 break; 2415 } 2416 /* is there room for all of the hmac ids? */ 2417 size = sizeof(*shmac) + (hmaclist->num_algo * 2418 sizeof(shmac->shmac_idents[0])); 2419 if ((size_t)(*optsize) < size) { 2420 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 2421 error = EINVAL; 2422 SCTP_INP_RUNLOCK(inp); 2423 break; 2424 } 2425 /* copy in the list */ 2426 for (i = 0; i < hmaclist->num_algo; i++) 2427 shmac->shmac_idents[i] = hmaclist->hmac[i]; 2428 SCTP_INP_RUNLOCK(inp); 2429 *optsize = size; 2430 break; 2431 } 2432 case SCTP_AUTH_ACTIVE_KEY: 2433 { 2434 struct sctp_authkeyid *scact; 2435 2436 SCTP_CHECK_AND_CAST(scact, optval, struct sctp_authkeyid, *optsize); 2437 SCTP_FIND_STCB(inp, stcb, scact->scact_assoc_id); 2438 2439 if (stcb) { 2440 /* get the active key on the assoc */ 2441 scact->scact_keynumber = stcb->asoc.authinfo.assoc_keyid; 2442 SCTP_TCB_UNLOCK(stcb); 2443 } else { 2444 /* get the endpoint active key */ 2445 SCTP_INP_RLOCK(inp); 2446 scact->scact_keynumber = inp->sctp_ep.default_keyid; 2447 SCTP_INP_RUNLOCK(inp); 2448 } 2449 *optsize = sizeof(*scact); 2450 break; 2451 } 2452 case SCTP_LOCAL_AUTH_CHUNKS: 2453 { 2454 struct sctp_authchunks *sac; 2455 sctp_auth_chklist_t *chklist = NULL; 2456 size_t size = 0; 2457 2458 SCTP_CHECK_AND_CAST(sac, optval, struct sctp_authchunks, *optsize); 2459 SCTP_FIND_STCB(inp, stcb, sac->gauth_assoc_id); 2460 2461 if (stcb) { 2462 /* get off the assoc */ 2463 chklist = stcb->asoc.local_auth_chunks; 2464 /* is there enough space? */ 2465 size = sctp_auth_get_chklist_size(chklist); 2466 if (*optsize < (sizeof(struct sctp_authchunks) + size)) { 2467 error = EINVAL; 2468 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 2469 } else { 2470 /* copy in the chunks */ 2471 (void)sctp_serialize_auth_chunks(chklist, sac->gauth_chunks); 2472 } 2473 SCTP_TCB_UNLOCK(stcb); 2474 } else { 2475 /* get off the endpoint */ 2476 SCTP_INP_RLOCK(inp); 2477 chklist = inp->sctp_ep.local_auth_chunks; 2478 /* is there enough space? */ 2479 size = sctp_auth_get_chklist_size(chklist); 2480 if (*optsize < (sizeof(struct sctp_authchunks) + size)) { 2481 error = EINVAL; 2482 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 2483 } else { 2484 /* copy in the chunks */ 2485 (void)sctp_serialize_auth_chunks(chklist, sac->gauth_chunks); 2486 } 2487 SCTP_INP_RUNLOCK(inp); 2488 } 2489 *optsize = sizeof(struct sctp_authchunks) + size; 2490 break; 2491 } 2492 case SCTP_PEER_AUTH_CHUNKS: 2493 { 2494 struct sctp_authchunks *sac; 2495 sctp_auth_chklist_t *chklist = NULL; 2496 size_t size = 0; 2497 2498 SCTP_CHECK_AND_CAST(sac, optval, struct sctp_authchunks, *optsize); 2499 SCTP_FIND_STCB(inp, stcb, sac->gauth_assoc_id); 2500 2501 if (stcb) { 2502 /* get off the assoc */ 2503 chklist = stcb->asoc.peer_auth_chunks; 2504 /* is there enough space? */ 2505 size = sctp_auth_get_chklist_size(chklist); 2506 if (*optsize < (sizeof(struct sctp_authchunks) + size)) { 2507 error = EINVAL; 2508 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 2509 } else { 2510 /* copy in the chunks */ 2511 (void)sctp_serialize_auth_chunks(chklist, sac->gauth_chunks); 2512 } 2513 SCTP_TCB_UNLOCK(stcb); 2514 } else { 2515 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); 2516 error = ENOENT; 2517 } 2518 *optsize = sizeof(struct sctp_authchunks) + size; 2519 break; 2520 } 2521 2522 2523 default: 2524 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT); 2525 error = ENOPROTOOPT; 2526 *optsize = 0; 2527 break; 2528 } /* end switch (sopt->sopt_name) */ 2529 return (error); 2530 } 2531 2532 static int 2533 sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize, 2534 void *p) 2535 { 2536 int error, set_opt; 2537 uint32_t *mopt; 2538 struct sctp_tcb *stcb = NULL; 2539 struct sctp_inpcb *inp = NULL; 2540 uint32_t vrf_id; 2541 2542 if (optval == NULL) { 2543 SCTP_PRINTF("optval is NULL\n"); 2544 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 2545 return (EINVAL); 2546 } 2547 inp = (struct sctp_inpcb *)so->so_pcb; 2548 if (inp == 0) { 2549 SCTP_PRINTF("inp is NULL?\n"); 2550 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 2551 return EINVAL; 2552 } 2553 vrf_id = inp->def_vrf_id; 2554 2555 error = 0; 2556 switch (optname) { 2557 case SCTP_NODELAY: 2558 case SCTP_AUTOCLOSE: 2559 case SCTP_AUTO_ASCONF: 2560 case SCTP_EXPLICIT_EOR: 2561 case SCTP_DISABLE_FRAGMENTS: 2562 case SCTP_USE_EXT_RCVINFO: 2563 case SCTP_I_WANT_MAPPED_V4_ADDR: 2564 /* copy in the option value */ 2565 SCTP_CHECK_AND_CAST(mopt, optval, uint32_t, optsize); 2566 set_opt = 0; 2567 if (error) 2568 break; 2569 switch (optname) { 2570 case SCTP_DISABLE_FRAGMENTS: 2571 set_opt = SCTP_PCB_FLAGS_NO_FRAGMENT; 2572 break; 2573 case SCTP_AUTO_ASCONF: 2574 /* 2575 * NOTE: we don't really support this flag 2576 */ 2577 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 2578 /* only valid for bound all sockets */ 2579 set_opt = SCTP_PCB_FLAGS_AUTO_ASCONF; 2580 } else { 2581 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 2582 return (EINVAL); 2583 } 2584 break; 2585 case SCTP_EXPLICIT_EOR: 2586 set_opt = SCTP_PCB_FLAGS_EXPLICIT_EOR; 2587 break; 2588 case SCTP_USE_EXT_RCVINFO: 2589 set_opt = SCTP_PCB_FLAGS_EXT_RCVINFO; 2590 break; 2591 case SCTP_I_WANT_MAPPED_V4_ADDR: 2592 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 2593 set_opt = SCTP_PCB_FLAGS_NEEDS_MAPPED_V4; 2594 } else { 2595 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 2596 return (EINVAL); 2597 } 2598 break; 2599 case SCTP_NODELAY: 2600 set_opt = SCTP_PCB_FLAGS_NODELAY; 2601 break; 2602 case SCTP_AUTOCLOSE: 2603 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 2604 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 2605 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 2606 return (EINVAL); 2607 } 2608 set_opt = SCTP_PCB_FLAGS_AUTOCLOSE; 2609 /* 2610 * The value is in ticks. Note this does not effect 2611 * old associations, only new ones. 2612 */ 2613 inp->sctp_ep.auto_close_time = SEC_TO_TICKS(*mopt); 2614 break; 2615 } 2616 SCTP_INP_WLOCK(inp); 2617 if (*mopt != 0) { 2618 sctp_feature_on(inp, set_opt); 2619 } else { 2620 sctp_feature_off(inp, set_opt); 2621 } 2622 SCTP_INP_WUNLOCK(inp); 2623 break; 2624 case SCTP_PARTIAL_DELIVERY_POINT: 2625 { 2626 uint32_t *value; 2627 2628 SCTP_CHECK_AND_CAST(value, optval, uint32_t, optsize); 2629 if (*value > SCTP_SB_LIMIT_RCV(so)) { 2630 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 2631 error = EINVAL; 2632 break; 2633 } 2634 inp->partial_delivery_point = *value; 2635 } 2636 break; 2637 case SCTP_FRAGMENT_INTERLEAVE: 2638 /* not yet until we re-write sctp_recvmsg() */ 2639 { 2640 uint32_t *level; 2641 2642 SCTP_CHECK_AND_CAST(level, optval, uint32_t, optsize); 2643 if (*level == SCTP_FRAG_LEVEL_2) { 2644 sctp_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE); 2645 sctp_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS); 2646 } else if (*level == SCTP_FRAG_LEVEL_1) { 2647 sctp_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE); 2648 sctp_feature_off(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS); 2649 } else if (*level == SCTP_FRAG_LEVEL_0) { 2650 sctp_feature_off(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE); 2651 sctp_feature_off(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS); 2652 2653 } else { 2654 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 2655 error = EINVAL; 2656 } 2657 } 2658 break; 2659 case SCTP_CMT_ON_OFF: 2660 { 2661 struct sctp_assoc_value *av; 2662 2663 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize); 2664 if (sctp_cmt_on_off) { 2665 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 2666 if (stcb) { 2667 stcb->asoc.sctp_cmt_on_off = (uint8_t) av->assoc_value; 2668 SCTP_TCB_UNLOCK(stcb); 2669 } else { 2670 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN); 2671 error = ENOTCONN; 2672 } 2673 } else { 2674 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT); 2675 error = ENOPROTOOPT; 2676 } 2677 } 2678 break; 2679 /* JRS - Set socket option for pluggable congestion control */ 2680 case SCTP_PLUGGABLE_CC: 2681 { 2682 struct sctp_assoc_value *av; 2683 2684 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize); 2685 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 2686 if (stcb) { 2687 switch (av->assoc_value) { 2688 /* 2689 * JRS - Standard TCP congestion 2690 * control 2691 */ 2692 case SCTP_CC_RFC2581: 2693 { 2694 stcb->asoc.congestion_control_module = SCTP_CC_RFC2581; 2695 stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param; 2696 stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_cwnd_update_after_sack; 2697 stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_cwnd_update_after_fr; 2698 stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout; 2699 stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo; 2700 stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped; 2701 stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output; 2702 stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer; 2703 SCTP_TCB_UNLOCK(stcb); 2704 break; 2705 } 2706 /* 2707 * JRS - High Speed TCP congestion 2708 * control (Floyd) 2709 */ 2710 case SCTP_CC_HSTCP: 2711 { 2712 stcb->asoc.congestion_control_module = SCTP_CC_HSTCP; 2713 stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param; 2714 stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_hs_cwnd_update_after_sack; 2715 stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_hs_cwnd_update_after_fr; 2716 stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout; 2717 stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo; 2718 stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped; 2719 stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output; 2720 stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer; 2721 SCTP_TCB_UNLOCK(stcb); 2722 break; 2723 } 2724 /* JRS - HTCP congestion control */ 2725 case SCTP_CC_HTCP: 2726 { 2727 stcb->asoc.congestion_control_module = SCTP_CC_HTCP; 2728 stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_htcp_set_initial_cc_param; 2729 stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_htcp_cwnd_update_after_sack; 2730 stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_htcp_cwnd_update_after_fr; 2731 stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_htcp_cwnd_update_after_timeout; 2732 stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_htcp_cwnd_update_after_ecn_echo; 2733 stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped; 2734 stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output; 2735 stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_htcp_cwnd_update_after_fr_timer; 2736 SCTP_TCB_UNLOCK(stcb); 2737 break; 2738 } 2739 /* 2740 * JRS - All other values are 2741 * invalid 2742 */ 2743 default: 2744 { 2745 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 2746 error = EINVAL; 2747 SCTP_TCB_UNLOCK(stcb); 2748 break; 2749 } 2750 } 2751 } else { 2752 switch (av->assoc_value) { 2753 case SCTP_CC_RFC2581: 2754 case SCTP_CC_HSTCP: 2755 case SCTP_CC_HTCP: 2756 inp->sctp_ep.sctp_default_cc_module = av->assoc_value; 2757 break; 2758 default: 2759 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 2760 error = EINVAL; 2761 break; 2762 }; 2763 } 2764 } 2765 break; 2766 case SCTP_CLR_STAT_LOG: 2767 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP); 2768 error = EOPNOTSUPP; 2769 break; 2770 case SCTP_CONTEXT: 2771 { 2772 struct sctp_assoc_value *av; 2773 2774 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize); 2775 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 2776 2777 if (stcb) { 2778 stcb->asoc.context = av->assoc_value; 2779 SCTP_TCB_UNLOCK(stcb); 2780 } else { 2781 SCTP_INP_WLOCK(inp); 2782 inp->sctp_context = av->assoc_value; 2783 SCTP_INP_WUNLOCK(inp); 2784 } 2785 } 2786 break; 2787 case SCTP_VRF_ID: 2788 { 2789 uint32_t *default_vrfid; 2790 2791 SCTP_CHECK_AND_CAST(default_vrfid, optval, uint32_t, optsize); 2792 if (*default_vrfid > SCTP_MAX_VRF_ID) { 2793 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 2794 error = EINVAL; 2795 break; 2796 } 2797 inp->def_vrf_id = *default_vrfid; 2798 break; 2799 } 2800 case SCTP_DEL_VRF_ID: 2801 { 2802 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP); 2803 error = EOPNOTSUPP; 2804 break; 2805 } 2806 case SCTP_ADD_VRF_ID: 2807 { 2808 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP); 2809 error = EOPNOTSUPP; 2810 break; 2811 } 2812 case SCTP_DELAYED_SACK: 2813 { 2814 struct sctp_sack_info *sack; 2815 2816 SCTP_CHECK_AND_CAST(sack, optval, struct sctp_sack_info, optsize); 2817 SCTP_FIND_STCB(inp, stcb, sack->sack_assoc_id); 2818 if (sack->sack_delay) { 2819 if (sack->sack_delay > SCTP_MAX_SACK_DELAY) 2820 sack->sack_delay = SCTP_MAX_SACK_DELAY; 2821 } 2822 if (stcb) { 2823 if (sack->sack_delay) { 2824 if (MSEC_TO_TICKS(sack->sack_delay) < 1) { 2825 sack->sack_delay = TICKS_TO_MSEC(1); 2826 } 2827 stcb->asoc.delayed_ack = sack->sack_delay; 2828 } 2829 if (sack->sack_freq) { 2830 stcb->asoc.sack_freq = sack->sack_freq; 2831 } 2832 SCTP_TCB_UNLOCK(stcb); 2833 } else { 2834 SCTP_INP_WLOCK(inp); 2835 if (sack->sack_delay) { 2836 if (MSEC_TO_TICKS(sack->sack_delay) < 1) { 2837 sack->sack_delay = TICKS_TO_MSEC(1); 2838 } 2839 inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV] = MSEC_TO_TICKS(sack->sack_delay); 2840 } 2841 if (sack->sack_freq) { 2842 inp->sctp_ep.sctp_sack_freq = sack->sack_freq; 2843 } 2844 SCTP_INP_WUNLOCK(inp); 2845 } 2846 break; 2847 } 2848 case SCTP_AUTH_CHUNK: 2849 { 2850 struct sctp_authchunk *sauth; 2851 2852 SCTP_CHECK_AND_CAST(sauth, optval, struct sctp_authchunk, optsize); 2853 2854 SCTP_INP_WLOCK(inp); 2855 if (sctp_auth_add_chunk(sauth->sauth_chunk, inp->sctp_ep.local_auth_chunks)) { 2856 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 2857 error = EINVAL; 2858 } 2859 SCTP_INP_WUNLOCK(inp); 2860 break; 2861 } 2862 case SCTP_AUTH_KEY: 2863 { 2864 struct sctp_authkey *sca; 2865 struct sctp_keyhead *shared_keys; 2866 sctp_sharedkey_t *shared_key; 2867 sctp_key_t *key = NULL; 2868 size_t size; 2869 2870 SCTP_CHECK_AND_CAST(sca, optval, struct sctp_authkey, optsize); 2871 SCTP_FIND_STCB(inp, stcb, sca->sca_assoc_id); 2872 size = optsize - sizeof(*sca); 2873 2874 if (stcb) { 2875 /* set it on the assoc */ 2876 shared_keys = &stcb->asoc.shared_keys; 2877 /* clear the cached keys for this key id */ 2878 sctp_clear_cachedkeys(stcb, sca->sca_keynumber); 2879 /* 2880 * create the new shared key and 2881 * insert/replace it 2882 */ 2883 if (size > 0) { 2884 key = sctp_set_key(sca->sca_key, (uint32_t) size); 2885 if (key == NULL) { 2886 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM); 2887 error = ENOMEM; 2888 SCTP_TCB_UNLOCK(stcb); 2889 break; 2890 } 2891 } 2892 shared_key = sctp_alloc_sharedkey(); 2893 if (shared_key == NULL) { 2894 sctp_free_key(key); 2895 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM); 2896 error = ENOMEM; 2897 SCTP_TCB_UNLOCK(stcb); 2898 break; 2899 } 2900 shared_key->key = key; 2901 shared_key->keyid = sca->sca_keynumber; 2902 sctp_insert_sharedkey(shared_keys, shared_key); 2903 SCTP_TCB_UNLOCK(stcb); 2904 } else { 2905 /* set it on the endpoint */ 2906 SCTP_INP_WLOCK(inp); 2907 shared_keys = &inp->sctp_ep.shared_keys; 2908 /* 2909 * clear the cached keys on all assocs for 2910 * this key id 2911 */ 2912 sctp_clear_cachedkeys_ep(inp, sca->sca_keynumber); 2913 /* 2914 * create the new shared key and 2915 * insert/replace it 2916 */ 2917 if (size > 0) { 2918 key = sctp_set_key(sca->sca_key, (uint32_t) size); 2919 if (key == NULL) { 2920 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM); 2921 error = ENOMEM; 2922 SCTP_INP_WUNLOCK(inp); 2923 break; 2924 } 2925 } 2926 shared_key = sctp_alloc_sharedkey(); 2927 if (shared_key == NULL) { 2928 sctp_free_key(key); 2929 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM); 2930 error = ENOMEM; 2931 SCTP_INP_WUNLOCK(inp); 2932 break; 2933 } 2934 shared_key->key = key; 2935 shared_key->keyid = sca->sca_keynumber; 2936 sctp_insert_sharedkey(shared_keys, shared_key); 2937 SCTP_INP_WUNLOCK(inp); 2938 } 2939 break; 2940 } 2941 case SCTP_HMAC_IDENT: 2942 { 2943 struct sctp_hmacalgo *shmac; 2944 sctp_hmaclist_t *hmaclist; 2945 uint32_t hmacid; 2946 size_t size, i, found; 2947 2948 SCTP_CHECK_AND_CAST(shmac, optval, struct sctp_hmacalgo, optsize); 2949 size = (optsize - sizeof(*shmac)) / sizeof(shmac->shmac_idents[0]); 2950 hmaclist = sctp_alloc_hmaclist(size); 2951 if (hmaclist == NULL) { 2952 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM); 2953 error = ENOMEM; 2954 break; 2955 } 2956 for (i = 0; i < size; i++) { 2957 hmacid = shmac->shmac_idents[i]; 2958 if (sctp_auth_add_hmacid(hmaclist, (uint16_t) hmacid)) { 2959 /* invalid HMACs were found */ ; 2960 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 2961 error = EINVAL; 2962 sctp_free_hmaclist(hmaclist); 2963 goto sctp_set_hmac_done; 2964 } 2965 } 2966 found = 0; 2967 for (i = 0; i < hmaclist->num_algo; i++) { 2968 if (hmaclist->hmac[i] == SCTP_AUTH_HMAC_ID_SHA1) { 2969 /* already in list */ 2970 found = 1; 2971 } 2972 } 2973 if (!found) { 2974 sctp_free_hmaclist(hmaclist); 2975 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 2976 error = EINVAL; 2977 break; 2978 } 2979 /* set it on the endpoint */ 2980 SCTP_INP_WLOCK(inp); 2981 if (inp->sctp_ep.local_hmacs) 2982 sctp_free_hmaclist(inp->sctp_ep.local_hmacs); 2983 inp->sctp_ep.local_hmacs = hmaclist; 2984 SCTP_INP_WUNLOCK(inp); 2985 sctp_set_hmac_done: 2986 break; 2987 } 2988 case SCTP_AUTH_ACTIVE_KEY: 2989 { 2990 struct sctp_authkeyid *scact; 2991 2992 SCTP_CHECK_AND_CAST(scact, optval, struct sctp_authkeyid, optsize); 2993 SCTP_FIND_STCB(inp, stcb, scact->scact_assoc_id); 2994 2995 /* set the active key on the right place */ 2996 if (stcb) { 2997 /* set the active key on the assoc */ 2998 if (sctp_auth_setactivekey(stcb, scact->scact_keynumber)) { 2999 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3000 error = EINVAL; 3001 } 3002 SCTP_TCB_UNLOCK(stcb); 3003 } else { 3004 /* set the active key on the endpoint */ 3005 SCTP_INP_WLOCK(inp); 3006 if (sctp_auth_setactivekey_ep(inp, scact->scact_keynumber)) { 3007 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3008 error = EINVAL; 3009 } 3010 SCTP_INP_WUNLOCK(inp); 3011 } 3012 break; 3013 } 3014 case SCTP_AUTH_DELETE_KEY: 3015 { 3016 struct sctp_authkeyid *scdel; 3017 3018 SCTP_CHECK_AND_CAST(scdel, optval, struct sctp_authkeyid, optsize); 3019 SCTP_FIND_STCB(inp, stcb, scdel->scact_assoc_id); 3020 3021 /* delete the key from the right place */ 3022 if (stcb) { 3023 if (sctp_delete_sharedkey(stcb, scdel->scact_keynumber)) { 3024 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3025 error = EINVAL; 3026 } 3027 SCTP_TCB_UNLOCK(stcb); 3028 } else { 3029 SCTP_INP_WLOCK(inp); 3030 if (sctp_delete_sharedkey_ep(inp, scdel->scact_keynumber)) { 3031 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3032 error = EINVAL; 3033 } 3034 SCTP_INP_WUNLOCK(inp); 3035 } 3036 break; 3037 } 3038 3039 case SCTP_RESET_STREAMS: 3040 { 3041 struct sctp_stream_reset *strrst; 3042 uint8_t send_in = 0, send_tsn = 0, send_out = 0; 3043 int i; 3044 3045 SCTP_CHECK_AND_CAST(strrst, optval, struct sctp_stream_reset, optsize); 3046 SCTP_FIND_STCB(inp, stcb, strrst->strrst_assoc_id); 3047 3048 if (stcb == NULL) { 3049 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); 3050 error = ENOENT; 3051 break; 3052 } 3053 if (stcb->asoc.peer_supports_strreset == 0) { 3054 /* 3055 * Peer does not support it, we return 3056 * protocol not supported since this is true 3057 * for this feature and this peer, not the 3058 * socket request in general. 3059 */ 3060 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EPROTONOSUPPORT); 3061 error = EPROTONOSUPPORT; 3062 SCTP_TCB_UNLOCK(stcb); 3063 break; 3064 } 3065 if (stcb->asoc.stream_reset_outstanding) { 3066 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY); 3067 error = EALREADY; 3068 SCTP_TCB_UNLOCK(stcb); 3069 break; 3070 } 3071 if (strrst->strrst_flags == SCTP_RESET_LOCAL_RECV) { 3072 send_in = 1; 3073 } else if (strrst->strrst_flags == SCTP_RESET_LOCAL_SEND) { 3074 send_out = 1; 3075 } else if (strrst->strrst_flags == SCTP_RESET_BOTH) { 3076 send_in = 1; 3077 send_out = 1; 3078 } else if (strrst->strrst_flags == SCTP_RESET_TSN) { 3079 send_tsn = 1; 3080 } else { 3081 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3082 error = EINVAL; 3083 SCTP_TCB_UNLOCK(stcb); 3084 break; 3085 } 3086 for (i = 0; i < strrst->strrst_num_streams; i++) { 3087 if ((send_in) && 3088 3089 (strrst->strrst_list[i] > stcb->asoc.streamincnt)) { 3090 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3091 error = EINVAL; 3092 goto get_out; 3093 } 3094 if ((send_out) && 3095 (strrst->strrst_list[i] > stcb->asoc.streamoutcnt)) { 3096 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3097 error = EINVAL; 3098 goto get_out; 3099 } 3100 } 3101 if (error) { 3102 get_out: 3103 SCTP_TCB_UNLOCK(stcb); 3104 break; 3105 } 3106 error = sctp_send_str_reset_req(stcb, strrst->strrst_num_streams, 3107 strrst->strrst_list, 3108 send_out, (stcb->asoc.str_reset_seq_in - 3), 3109 send_in, send_tsn); 3110 3111 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_REQ, SCTP_SO_LOCKED); 3112 SCTP_TCB_UNLOCK(stcb); 3113 } 3114 break; 3115 3116 case SCTP_CONNECT_X: 3117 if (optsize < (sizeof(int) + sizeof(struct sockaddr_in))) { 3118 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3119 error = EINVAL; 3120 break; 3121 } 3122 error = sctp_do_connect_x(so, inp, optval, optsize, p, 0); 3123 break; 3124 3125 case SCTP_CONNECT_X_DELAYED: 3126 if (optsize < (sizeof(int) + sizeof(struct sockaddr_in))) { 3127 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3128 error = EINVAL; 3129 break; 3130 } 3131 error = sctp_do_connect_x(so, inp, optval, optsize, p, 1); 3132 break; 3133 3134 case SCTP_CONNECT_X_COMPLETE: 3135 { 3136 struct sockaddr *sa; 3137 struct sctp_nets *net; 3138 3139 /* FIXME MT: check correct? */ 3140 SCTP_CHECK_AND_CAST(sa, optval, struct sockaddr, optsize); 3141 3142 /* find tcb */ 3143 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { 3144 SCTP_INP_RLOCK(inp); 3145 stcb = LIST_FIRST(&inp->sctp_asoc_list); 3146 if (stcb) { 3147 SCTP_TCB_LOCK(stcb); 3148 net = sctp_findnet(stcb, sa); 3149 } 3150 SCTP_INP_RUNLOCK(inp); 3151 } else { 3152 /* 3153 * We increment here since 3154 * sctp_findassociation_ep_addr() wil do a 3155 * decrement if it finds the stcb as long as 3156 * the locked tcb (last argument) is NOT a 3157 * TCB.. aka NULL. 3158 */ 3159 SCTP_INP_INCR_REF(inp); 3160 stcb = sctp_findassociation_ep_addr(&inp, sa, &net, NULL, NULL); 3161 if (stcb == NULL) { 3162 SCTP_INP_DECR_REF(inp); 3163 } 3164 } 3165 3166 if (stcb == NULL) { 3167 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); 3168 error = ENOENT; 3169 break; 3170 } 3171 if (stcb->asoc.delayed_connection == 1) { 3172 stcb->asoc.delayed_connection = 0; 3173 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered); 3174 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, 3175 stcb->asoc.primary_destination, 3176 SCTP_FROM_SCTP_USRREQ + SCTP_LOC_9); 3177 sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED); 3178 } else { 3179 /* 3180 * already expired or did not use delayed 3181 * connectx 3182 */ 3183 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY); 3184 error = EALREADY; 3185 } 3186 SCTP_TCB_UNLOCK(stcb); 3187 } 3188 break; 3189 case SCTP_MAX_BURST: 3190 { 3191 uint8_t *burst; 3192 3193 SCTP_CHECK_AND_CAST(burst, optval, uint8_t, optsize); 3194 3195 SCTP_INP_WLOCK(inp); 3196 if (*burst) { 3197 inp->sctp_ep.max_burst = *burst; 3198 } 3199 SCTP_INP_WUNLOCK(inp); 3200 } 3201 break; 3202 case SCTP_MAXSEG: 3203 { 3204 struct sctp_assoc_value *av; 3205 int ovh; 3206 3207 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize); 3208 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 3209 3210 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 3211 ovh = SCTP_MED_OVERHEAD; 3212 } else { 3213 ovh = SCTP_MED_V4_OVERHEAD; 3214 } 3215 if (stcb) { 3216 if (av->assoc_value) { 3217 stcb->asoc.sctp_frag_point = (av->assoc_value + ovh); 3218 } else { 3219 stcb->asoc.sctp_frag_point = SCTP_DEFAULT_MAXSEGMENT; 3220 } 3221 SCTP_TCB_UNLOCK(stcb); 3222 } else { 3223 SCTP_INP_WLOCK(inp); 3224 /* 3225 * FIXME MT: I think this is not in tune 3226 * with the API ID 3227 */ 3228 if (av->assoc_value) { 3229 inp->sctp_frag_point = (av->assoc_value + ovh); 3230 } else { 3231 inp->sctp_frag_point = SCTP_DEFAULT_MAXSEGMENT; 3232 } 3233 SCTP_INP_WUNLOCK(inp); 3234 } 3235 } 3236 break; 3237 case SCTP_EVENTS: 3238 { 3239 struct sctp_event_subscribe *events; 3240 3241 SCTP_CHECK_AND_CAST(events, optval, struct sctp_event_subscribe, optsize); 3242 3243 SCTP_INP_WLOCK(inp); 3244 if (events->sctp_data_io_event) { 3245 sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT); 3246 } else { 3247 sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT); 3248 } 3249 3250 if (events->sctp_association_event) { 3251 sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVASSOCEVNT); 3252 } else { 3253 sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVASSOCEVNT); 3254 } 3255 3256 if (events->sctp_address_event) { 3257 sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVPADDREVNT); 3258 } else { 3259 sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVPADDREVNT); 3260 } 3261 3262 if (events->sctp_send_failure_event) { 3263 sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVSENDFAILEVNT); 3264 } else { 3265 sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVSENDFAILEVNT); 3266 } 3267 3268 if (events->sctp_peer_error_event) { 3269 sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVPEERERR); 3270 } else { 3271 sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVPEERERR); 3272 } 3273 3274 if (events->sctp_shutdown_event) { 3275 sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT); 3276 } else { 3277 sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT); 3278 } 3279 3280 if (events->sctp_partial_delivery_event) { 3281 sctp_feature_on(inp, SCTP_PCB_FLAGS_PDAPIEVNT); 3282 } else { 3283 sctp_feature_off(inp, SCTP_PCB_FLAGS_PDAPIEVNT); 3284 } 3285 3286 if (events->sctp_adaptation_layer_event) { 3287 sctp_feature_on(inp, SCTP_PCB_FLAGS_ADAPTATIONEVNT); 3288 } else { 3289 sctp_feature_off(inp, SCTP_PCB_FLAGS_ADAPTATIONEVNT); 3290 } 3291 3292 if (events->sctp_authentication_event) { 3293 sctp_feature_on(inp, SCTP_PCB_FLAGS_AUTHEVNT); 3294 } else { 3295 sctp_feature_off(inp, SCTP_PCB_FLAGS_AUTHEVNT); 3296 } 3297 3298 if (events->sctp_stream_reset_events) { 3299 sctp_feature_on(inp, SCTP_PCB_FLAGS_STREAM_RESETEVNT); 3300 } else { 3301 sctp_feature_off(inp, SCTP_PCB_FLAGS_STREAM_RESETEVNT); 3302 } 3303 SCTP_INP_WUNLOCK(inp); 3304 } 3305 break; 3306 3307 case SCTP_ADAPTATION_LAYER: 3308 { 3309 struct sctp_setadaptation *adap_bits; 3310 3311 SCTP_CHECK_AND_CAST(adap_bits, optval, struct sctp_setadaptation, optsize); 3312 SCTP_INP_WLOCK(inp); 3313 inp->sctp_ep.adaptation_layer_indicator = adap_bits->ssb_adaptation_ind; 3314 SCTP_INP_WUNLOCK(inp); 3315 } 3316 break; 3317 #ifdef SCTP_DEBUG 3318 case SCTP_SET_INITIAL_DBG_SEQ: 3319 { 3320 uint32_t *vvv; 3321 3322 SCTP_CHECK_AND_CAST(vvv, optval, uint32_t, optsize); 3323 SCTP_INP_WLOCK(inp); 3324 inp->sctp_ep.initial_sequence_debug = *vvv; 3325 SCTP_INP_WUNLOCK(inp); 3326 } 3327 break; 3328 #endif 3329 case SCTP_DEFAULT_SEND_PARAM: 3330 { 3331 struct sctp_sndrcvinfo *s_info; 3332 3333 SCTP_CHECK_AND_CAST(s_info, optval, struct sctp_sndrcvinfo, optsize); 3334 SCTP_FIND_STCB(inp, stcb, s_info->sinfo_assoc_id); 3335 3336 if (stcb) { 3337 if (s_info->sinfo_stream <= stcb->asoc.streamoutcnt) { 3338 memcpy(&stcb->asoc.def_send, s_info, min(optsize, sizeof(stcb->asoc.def_send))); 3339 } else { 3340 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3341 error = EINVAL; 3342 } 3343 SCTP_TCB_UNLOCK(stcb); 3344 } else { 3345 SCTP_INP_WLOCK(inp); 3346 memcpy(&inp->def_send, s_info, min(optsize, sizeof(inp->def_send))); 3347 SCTP_INP_WUNLOCK(inp); 3348 } 3349 } 3350 break; 3351 case SCTP_PEER_ADDR_PARAMS: 3352 /* Applys to the specific association */ 3353 { 3354 struct sctp_paddrparams *paddrp; 3355 struct sctp_nets *net; 3356 3357 SCTP_CHECK_AND_CAST(paddrp, optval, struct sctp_paddrparams, optsize); 3358 SCTP_FIND_STCB(inp, stcb, paddrp->spp_assoc_id); 3359 net = NULL; 3360 if (stcb) { 3361 net = sctp_findnet(stcb, (struct sockaddr *)&paddrp->spp_address); 3362 } else { 3363 /* 3364 * We increment here since 3365 * sctp_findassociation_ep_addr() wil do a 3366 * decrement if it finds the stcb as long as 3367 * the locked tcb (last argument) is NOT a 3368 * TCB.. aka NULL. 3369 */ 3370 SCTP_INP_INCR_REF(inp); 3371 stcb = sctp_findassociation_ep_addr(&inp, 3372 (struct sockaddr *)&paddrp->spp_address, 3373 &net, NULL, NULL); 3374 if (stcb == NULL) { 3375 SCTP_INP_DECR_REF(inp); 3376 } 3377 } 3378 if (stcb && (net == NULL)) { 3379 struct sockaddr *sa; 3380 3381 sa = (struct sockaddr *)&paddrp->spp_address; 3382 if (sa->sa_family == AF_INET) { 3383 struct sockaddr_in *sin; 3384 3385 sin = (struct sockaddr_in *)sa; 3386 if (sin->sin_addr.s_addr) { 3387 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3388 SCTP_TCB_UNLOCK(stcb); 3389 error = EINVAL; 3390 break; 3391 } 3392 } else if (sa->sa_family == AF_INET6) { 3393 struct sockaddr_in6 *sin6; 3394 3395 sin6 = (struct sockaddr_in6 *)sa; 3396 if (!IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { 3397 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3398 SCTP_TCB_UNLOCK(stcb); 3399 error = EINVAL; 3400 break; 3401 } 3402 } else { 3403 error = EAFNOSUPPORT; 3404 SCTP_TCB_UNLOCK(stcb); 3405 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 3406 break; 3407 } 3408 } 3409 /* sanity checks */ 3410 if ((paddrp->spp_flags & SPP_HB_ENABLE) && (paddrp->spp_flags & SPP_HB_DISABLE)) { 3411 if (stcb) 3412 SCTP_TCB_UNLOCK(stcb); 3413 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3414 return (EINVAL); 3415 } 3416 if ((paddrp->spp_flags & SPP_PMTUD_ENABLE) && (paddrp->spp_flags & SPP_PMTUD_DISABLE)) { 3417 if (stcb) 3418 SCTP_TCB_UNLOCK(stcb); 3419 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3420 return (EINVAL); 3421 } 3422 if (stcb) { 3423 /************************TCB SPECIFIC SET ******************/ 3424 /* 3425 * do we change the timer for HB, we run 3426 * only one? 3427 */ 3428 int ovh = 0; 3429 3430 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 3431 ovh = SCTP_MED_OVERHEAD; 3432 } else { 3433 ovh = SCTP_MED_V4_OVERHEAD; 3434 } 3435 3436 if (paddrp->spp_hbinterval) 3437 stcb->asoc.heart_beat_delay = paddrp->spp_hbinterval; 3438 else if (paddrp->spp_flags & SPP_HB_TIME_IS_ZERO) 3439 stcb->asoc.heart_beat_delay = 0; 3440 3441 /* network sets ? */ 3442 if (net) { 3443 /************************NET SPECIFIC SET ******************/ 3444 if (paddrp->spp_flags & SPP_HB_DEMAND) { 3445 /* on demand HB */ 3446 if (sctp_send_hb(stcb, 1, net) < 0) { 3447 /* asoc destroyed */ 3448 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3449 error = EINVAL; 3450 break; 3451 } 3452 } 3453 if (paddrp->spp_flags & SPP_HB_DISABLE) { 3454 net->dest_state |= SCTP_ADDR_NOHB; 3455 } 3456 if (paddrp->spp_flags & SPP_HB_ENABLE) { 3457 net->dest_state &= ~SCTP_ADDR_NOHB; 3458 } 3459 if ((paddrp->spp_flags & SPP_PMTUD_DISABLE) && (paddrp->spp_pathmtu >= SCTP_SMALLEST_PMTU)) { 3460 if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) { 3461 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net, 3462 SCTP_FROM_SCTP_USRREQ + SCTP_LOC_10); 3463 } 3464 if (paddrp->spp_pathmtu > SCTP_DEFAULT_MINSEGMENT) { 3465 net->mtu = paddrp->spp_pathmtu + ovh; 3466 if (net->mtu < stcb->asoc.smallest_mtu) { 3467 #ifdef SCTP_PRINT_FOR_B_AND_M 3468 SCTP_PRINTF("SCTP_PMTU_DISABLE calls sctp_pathmtu_adjustment:%d\n", 3469 net->mtu); 3470 #endif 3471 sctp_pathmtu_adjustment(inp, stcb, net, net->mtu); 3472 } 3473 } 3474 } 3475 if (paddrp->spp_flags & SPP_PMTUD_ENABLE) { 3476 if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) { 3477 sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net); 3478 } 3479 } 3480 if (paddrp->spp_pathmaxrxt) 3481 net->failure_threshold = paddrp->spp_pathmaxrxt; 3482 #ifdef INET 3483 if (paddrp->spp_flags & SPP_IPV4_TOS) { 3484 if (net->ro._l_addr.sin.sin_family == AF_INET) { 3485 net->tos_flowlabel = paddrp->spp_ipv4_tos & 0x000000fc; 3486 } 3487 } 3488 #endif 3489 #ifdef INET6 3490 if (paddrp->spp_flags & SPP_IPV6_FLOWLABEL) { 3491 if (net->ro._l_addr.sin6.sin6_family == AF_INET6) { 3492 net->tos_flowlabel = paddrp->spp_ipv6_flowlabel; 3493 } 3494 } 3495 #endif 3496 } else { 3497 /************************ASSOC ONLY -- NO NET SPECIFIC SET ******************/ 3498 if (paddrp->spp_pathmaxrxt) 3499 stcb->asoc.def_net_failure = paddrp->spp_pathmaxrxt; 3500 3501 if (paddrp->spp_flags & SPP_HB_ENABLE) { 3502 /* Turn back on the timer */ 3503 stcb->asoc.hb_is_disabled = 0; 3504 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net); 3505 } 3506 if ((paddrp->spp_flags & SPP_PMTUD_DISABLE) && (paddrp->spp_pathmtu >= SCTP_SMALLEST_PMTU)) { 3507 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 3508 if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) { 3509 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net, 3510 SCTP_FROM_SCTP_USRREQ + SCTP_LOC_10); 3511 } 3512 if (paddrp->spp_pathmtu > SCTP_DEFAULT_MINSEGMENT) { 3513 net->mtu = paddrp->spp_pathmtu + ovh; 3514 if (net->mtu < stcb->asoc.smallest_mtu) { 3515 #ifdef SCTP_PRINT_FOR_B_AND_M 3516 SCTP_PRINTF("SCTP_PMTU_DISABLE calls sctp_pathmtu_adjustment:%d\n", 3517 net->mtu); 3518 #endif 3519 sctp_pathmtu_adjustment(inp, stcb, net, net->mtu); 3520 } 3521 } 3522 } 3523 } 3524 if (paddrp->spp_flags & SPP_PMTUD_ENABLE) { 3525 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 3526 if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) { 3527 sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net); 3528 } 3529 } 3530 } 3531 if (paddrp->spp_flags & SPP_HB_DISABLE) { 3532 int cnt_of_unconf = 0; 3533 struct sctp_nets *lnet; 3534 3535 stcb->asoc.hb_is_disabled = 1; 3536 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 3537 if (lnet->dest_state & SCTP_ADDR_UNCONFIRMED) { 3538 cnt_of_unconf++; 3539 } 3540 } 3541 /* 3542 * stop the timer ONLY if we 3543 * have no unconfirmed 3544 * addresses 3545 */ 3546 if (cnt_of_unconf == 0) { 3547 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 3548 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, 3549 SCTP_FROM_SCTP_USRREQ + SCTP_LOC_11); 3550 } 3551 } 3552 } 3553 if (paddrp->spp_flags & SPP_HB_ENABLE) { 3554 /* start up the timer. */ 3555 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 3556 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net); 3557 } 3558 } 3559 #ifdef INET 3560 if (paddrp->spp_flags & SPP_IPV4_TOS) 3561 stcb->asoc.default_tos = paddrp->spp_ipv4_tos & 0x000000fc; 3562 #endif 3563 #ifdef INET6 3564 if (paddrp->spp_flags & SPP_IPV6_FLOWLABEL) 3565 stcb->asoc.default_flowlabel = paddrp->spp_ipv6_flowlabel; 3566 #endif 3567 3568 } 3569 SCTP_TCB_UNLOCK(stcb); 3570 } else { 3571 /************************NO TCB, SET TO default stuff ******************/ 3572 SCTP_INP_WLOCK(inp); 3573 /* 3574 * For the TOS/FLOWLABEL stuff you set it 3575 * with the options on the socket 3576 */ 3577 if (paddrp->spp_pathmaxrxt) { 3578 inp->sctp_ep.def_net_failure = paddrp->spp_pathmaxrxt; 3579 } 3580 if (paddrp->spp_flags & SPP_HB_TIME_IS_ZERO) 3581 inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT] = 0; 3582 else if (paddrp->spp_hbinterval) { 3583 if (paddrp->spp_hbinterval > SCTP_MAX_HB_INTERVAL) 3584 paddrp->spp_hbinterval = SCTP_MAX_HB_INTERVAL; 3585 inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT] = MSEC_TO_TICKS(paddrp->spp_hbinterval); 3586 } 3587 if (paddrp->spp_flags & SPP_HB_ENABLE) { 3588 sctp_feature_off(inp, SCTP_PCB_FLAGS_DONOT_HEARTBEAT); 3589 3590 } else if (paddrp->spp_flags & SPP_HB_DISABLE) { 3591 sctp_feature_on(inp, SCTP_PCB_FLAGS_DONOT_HEARTBEAT); 3592 } 3593 SCTP_INP_WUNLOCK(inp); 3594 } 3595 } 3596 break; 3597 case SCTP_RTOINFO: 3598 { 3599 struct sctp_rtoinfo *srto; 3600 uint32_t new_init, new_min, new_max; 3601 3602 SCTP_CHECK_AND_CAST(srto, optval, struct sctp_rtoinfo, optsize); 3603 SCTP_FIND_STCB(inp, stcb, srto->srto_assoc_id); 3604 3605 if (stcb) { 3606 if (srto->srto_initial) 3607 new_init = srto->srto_initial; 3608 else 3609 new_init = stcb->asoc.initial_rto; 3610 if (srto->srto_max) 3611 new_max = srto->srto_max; 3612 else 3613 new_max = stcb->asoc.maxrto; 3614 if (srto->srto_min) 3615 new_min = srto->srto_min; 3616 else 3617 new_min = stcb->asoc.minrto; 3618 if ((new_min <= new_init) && (new_init <= new_max)) { 3619 stcb->asoc.initial_rto = new_init; 3620 stcb->asoc.maxrto = new_max; 3621 stcb->asoc.minrto = new_min; 3622 } else { 3623 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EDOM); 3624 error = EDOM; 3625 } 3626 SCTP_TCB_UNLOCK(stcb); 3627 } else { 3628 SCTP_INP_WLOCK(inp); 3629 if (srto->srto_initial) 3630 new_init = srto->srto_initial; 3631 else 3632 new_init = inp->sctp_ep.initial_rto; 3633 if (srto->srto_max) 3634 new_max = srto->srto_max; 3635 else 3636 new_max = inp->sctp_ep.sctp_maxrto; 3637 if (srto->srto_min) 3638 new_min = srto->srto_min; 3639 else 3640 new_min = inp->sctp_ep.sctp_minrto; 3641 if ((new_min <= new_init) && (new_init <= new_max)) { 3642 inp->sctp_ep.initial_rto = new_init; 3643 inp->sctp_ep.sctp_maxrto = new_max; 3644 inp->sctp_ep.sctp_minrto = new_min; 3645 } else { 3646 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EDOM); 3647 error = EDOM; 3648 } 3649 SCTP_INP_WUNLOCK(inp); 3650 } 3651 } 3652 break; 3653 case SCTP_ASSOCINFO: 3654 { 3655 struct sctp_assocparams *sasoc; 3656 3657 SCTP_CHECK_AND_CAST(sasoc, optval, struct sctp_assocparams, optsize); 3658 SCTP_FIND_STCB(inp, stcb, sasoc->sasoc_assoc_id); 3659 if (sasoc->sasoc_cookie_life) { 3660 /* boundary check the cookie life */ 3661 if (sasoc->sasoc_cookie_life < 1000) 3662 sasoc->sasoc_cookie_life = 1000; 3663 if (sasoc->sasoc_cookie_life > SCTP_MAX_COOKIE_LIFE) { 3664 sasoc->sasoc_cookie_life = SCTP_MAX_COOKIE_LIFE; 3665 } 3666 } 3667 if (stcb) { 3668 if (sasoc->sasoc_asocmaxrxt) 3669 stcb->asoc.max_send_times = sasoc->sasoc_asocmaxrxt; 3670 sasoc->sasoc_number_peer_destinations = stcb->asoc.numnets; 3671 sasoc->sasoc_peer_rwnd = 0; 3672 sasoc->sasoc_local_rwnd = 0; 3673 if (sasoc->sasoc_cookie_life) { 3674 stcb->asoc.cookie_life = MSEC_TO_TICKS(sasoc->sasoc_cookie_life); 3675 } 3676 SCTP_TCB_UNLOCK(stcb); 3677 } else { 3678 SCTP_INP_WLOCK(inp); 3679 if (sasoc->sasoc_asocmaxrxt) 3680 inp->sctp_ep.max_send_times = sasoc->sasoc_asocmaxrxt; 3681 sasoc->sasoc_number_peer_destinations = 0; 3682 sasoc->sasoc_peer_rwnd = 0; 3683 sasoc->sasoc_local_rwnd = 0; 3684 if (sasoc->sasoc_cookie_life) { 3685 inp->sctp_ep.def_cookie_life = MSEC_TO_TICKS(sasoc->sasoc_cookie_life); 3686 } 3687 SCTP_INP_WUNLOCK(inp); 3688 } 3689 } 3690 break; 3691 case SCTP_INITMSG: 3692 { 3693 struct sctp_initmsg *sinit; 3694 3695 SCTP_CHECK_AND_CAST(sinit, optval, struct sctp_initmsg, optsize); 3696 SCTP_INP_WLOCK(inp); 3697 if (sinit->sinit_num_ostreams) 3698 inp->sctp_ep.pre_open_stream_count = sinit->sinit_num_ostreams; 3699 3700 if (sinit->sinit_max_instreams) 3701 inp->sctp_ep.max_open_streams_intome = sinit->sinit_max_instreams; 3702 3703 if (sinit->sinit_max_attempts) 3704 inp->sctp_ep.max_init_times = sinit->sinit_max_attempts; 3705 3706 if (sinit->sinit_max_init_timeo) 3707 inp->sctp_ep.initial_init_rto_max = sinit->sinit_max_init_timeo; 3708 SCTP_INP_WUNLOCK(inp); 3709 } 3710 break; 3711 case SCTP_PRIMARY_ADDR: 3712 { 3713 struct sctp_setprim *spa; 3714 struct sctp_nets *net, *lnet; 3715 3716 SCTP_CHECK_AND_CAST(spa, optval, struct sctp_setprim, optsize); 3717 SCTP_FIND_STCB(inp, stcb, spa->ssp_assoc_id); 3718 3719 net = NULL; 3720 if (stcb) { 3721 net = sctp_findnet(stcb, (struct sockaddr *)&spa->ssp_addr); 3722 } else { 3723 /* 3724 * We increment here since 3725 * sctp_findassociation_ep_addr() wil do a 3726 * decrement if it finds the stcb as long as 3727 * the locked tcb (last argument) is NOT a 3728 * TCB.. aka NULL. 3729 */ 3730 SCTP_INP_INCR_REF(inp); 3731 stcb = sctp_findassociation_ep_addr(&inp, 3732 (struct sockaddr *)&spa->ssp_addr, 3733 &net, NULL, NULL); 3734 if (stcb == NULL) { 3735 SCTP_INP_DECR_REF(inp); 3736 } 3737 } 3738 3739 if ((stcb) && (net)) { 3740 if ((net != stcb->asoc.primary_destination) && 3741 (!(net->dest_state & SCTP_ADDR_UNCONFIRMED))) { 3742 /* Ok we need to set it */ 3743 lnet = stcb->asoc.primary_destination; 3744 if (sctp_set_primary_addr(stcb, (struct sockaddr *)NULL, net) == 0) { 3745 if (net->dest_state & SCTP_ADDR_SWITCH_PRIMARY) { 3746 net->dest_state |= SCTP_ADDR_DOUBLE_SWITCH; 3747 } 3748 net->dest_state |= SCTP_ADDR_SWITCH_PRIMARY; 3749 } 3750 } 3751 } else { 3752 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3753 error = EINVAL; 3754 } 3755 if (stcb) { 3756 SCTP_TCB_UNLOCK(stcb); 3757 } 3758 } 3759 break; 3760 case SCTP_SET_DYNAMIC_PRIMARY: 3761 { 3762 union sctp_sockstore *ss; 3763 3764 error = priv_check(curthread, 3765 PRIV_NETINET_RESERVEDPORT); 3766 if (error) 3767 break; 3768 3769 SCTP_CHECK_AND_CAST(ss, optval, union sctp_sockstore, optsize); 3770 /* SUPER USER CHECK? */ 3771 error = sctp_dynamic_set_primary(&ss->sa, vrf_id); 3772 } 3773 break; 3774 case SCTP_SET_PEER_PRIMARY_ADDR: 3775 { 3776 struct sctp_setpeerprim *sspp; 3777 3778 SCTP_CHECK_AND_CAST(sspp, optval, struct sctp_setpeerprim, optsize); 3779 SCTP_FIND_STCB(inp, stcb, sspp->sspp_assoc_id); 3780 if (stcb != NULL) { 3781 struct sctp_ifa *ifa; 3782 3783 ifa = sctp_find_ifa_by_addr((struct sockaddr *)&sspp->sspp_addr, 3784 stcb->asoc.vrf_id, SCTP_ADDR_NOT_LOCKED); 3785 if (ifa == NULL) { 3786 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3787 error = EINVAL; 3788 goto out_of_it; 3789 } 3790 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) { 3791 /* 3792 * Must validate the ifa found is in 3793 * our ep 3794 */ 3795 struct sctp_laddr *laddr; 3796 int found = 0; 3797 3798 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 3799 if (laddr->ifa == NULL) { 3800 SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n", 3801 __FUNCTION__); 3802 continue; 3803 } 3804 if (laddr->ifa == ifa) { 3805 found = 1; 3806 break; 3807 } 3808 } 3809 if (!found) { 3810 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3811 error = EINVAL; 3812 goto out_of_it; 3813 } 3814 } 3815 if (sctp_set_primary_ip_address_sa(stcb, 3816 (struct sockaddr *)&sspp->sspp_addr) != 0) { 3817 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3818 error = EINVAL; 3819 } 3820 out_of_it: 3821 SCTP_TCB_UNLOCK(stcb); 3822 } else { 3823 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3824 error = EINVAL; 3825 } 3826 3827 } 3828 break; 3829 case SCTP_BINDX_ADD_ADDR: 3830 { 3831 struct sctp_getaddresses *addrs; 3832 size_t sz; 3833 struct thread *td; 3834 int prison = 0; 3835 3836 td = (struct thread *)p; 3837 if (jailed(td->td_ucred)) { 3838 prison = 1; 3839 } 3840 SCTP_CHECK_AND_CAST(addrs, optval, struct sctp_getaddresses, 3841 optsize); 3842 if (addrs->addr->sa_family == AF_INET) { 3843 sz = sizeof(struct sctp_getaddresses) - sizeof(struct sockaddr) + sizeof(struct sockaddr_in); 3844 if (optsize < sz) { 3845 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3846 error = EINVAL; 3847 break; 3848 } 3849 if (prison && prison_ip(td->td_ucred, 0, &(((struct sockaddr_in *)(addrs->addr))->sin_addr.s_addr))) { 3850 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EADDRNOTAVAIL); 3851 error = EADDRNOTAVAIL; 3852 } 3853 } else if (addrs->addr->sa_family == AF_INET6) { 3854 sz = sizeof(struct sctp_getaddresses) - sizeof(struct sockaddr) + sizeof(struct sockaddr_in6); 3855 if (optsize < sz) { 3856 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3857 error = EINVAL; 3858 break; 3859 } 3860 /* JAIL XXXX Add else here for V6 */ 3861 } 3862 sctp_bindx_add_address(so, inp, addrs->addr, 3863 addrs->sget_assoc_id, vrf_id, 3864 &error, p); 3865 } 3866 break; 3867 case SCTP_BINDX_REM_ADDR: 3868 { 3869 struct sctp_getaddresses *addrs; 3870 size_t sz; 3871 struct thread *td; 3872 int prison = 0; 3873 3874 td = (struct thread *)p; 3875 if (jailed(td->td_ucred)) { 3876 prison = 1; 3877 } 3878 SCTP_CHECK_AND_CAST(addrs, optval, struct sctp_getaddresses, optsize); 3879 if (addrs->addr->sa_family == AF_INET) { 3880 sz = sizeof(struct sctp_getaddresses) - sizeof(struct sockaddr) + sizeof(struct sockaddr_in); 3881 if (optsize < sz) { 3882 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3883 error = EINVAL; 3884 break; 3885 } 3886 if (prison && prison_ip(td->td_ucred, 0, &(((struct sockaddr_in *)(addrs->addr))->sin_addr.s_addr))) { 3887 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EADDRNOTAVAIL); 3888 error = EADDRNOTAVAIL; 3889 } 3890 } else if (addrs->addr->sa_family == AF_INET6) { 3891 sz = sizeof(struct sctp_getaddresses) - sizeof(struct sockaddr) + sizeof(struct sockaddr_in6); 3892 if (optsize < sz) { 3893 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3894 error = EINVAL; 3895 break; 3896 } 3897 /* JAIL XXXX Add else here for V6 */ 3898 } 3899 sctp_bindx_delete_address(so, inp, addrs->addr, 3900 addrs->sget_assoc_id, vrf_id, 3901 &error); 3902 } 3903 break; 3904 default: 3905 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT); 3906 error = ENOPROTOOPT; 3907 break; 3908 } /* end switch (opt) */ 3909 return (error); 3910 } 3911 3912 3913 int 3914 sctp_ctloutput(struct socket *so, struct sockopt *sopt) 3915 { 3916 void *optval = NULL; 3917 size_t optsize = 0; 3918 struct sctp_inpcb *inp; 3919 void *p; 3920 int error = 0; 3921 3922 inp = (struct sctp_inpcb *)so->so_pcb; 3923 if (inp == 0) { 3924 /* I made the same as TCP since we are not setup? */ 3925 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3926 return (ECONNRESET); 3927 } 3928 if (sopt->sopt_level != IPPROTO_SCTP) { 3929 /* wrong proto level... send back up to IP */ 3930 #ifdef INET6 3931 if (INP_CHECK_SOCKAF(so, AF_INET6)) 3932 error = ip6_ctloutput(so, sopt); 3933 else 3934 #endif /* INET6 */ 3935 error = ip_ctloutput(so, sopt); 3936 return (error); 3937 } 3938 optsize = sopt->sopt_valsize; 3939 if (optsize) { 3940 SCTP_MALLOC(optval, void *, optsize, SCTP_M_SOCKOPT); 3941 if (optval == NULL) { 3942 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOBUFS); 3943 return (ENOBUFS); 3944 } 3945 error = sooptcopyin(sopt, optval, optsize, optsize); 3946 if (error) { 3947 SCTP_FREE(optval, SCTP_M_SOCKOPT); 3948 goto out; 3949 } 3950 } 3951 p = (void *)sopt->sopt_td; 3952 if (sopt->sopt_dir == SOPT_SET) { 3953 error = sctp_setopt(so, sopt->sopt_name, optval, optsize, p); 3954 } else if (sopt->sopt_dir == SOPT_GET) { 3955 error = sctp_getopt(so, sopt->sopt_name, optval, &optsize, p); 3956 } else { 3957 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3958 error = EINVAL; 3959 } 3960 if ((error == 0) && (optval != NULL)) { 3961 error = sooptcopyout(sopt, optval, optsize); 3962 SCTP_FREE(optval, SCTP_M_SOCKOPT); 3963 } else if (optval != NULL) { 3964 SCTP_FREE(optval, SCTP_M_SOCKOPT); 3965 } 3966 out: 3967 return (error); 3968 } 3969 3970 3971 static int 3972 sctp_connect(struct socket *so, struct sockaddr *addr, struct thread *p) 3973 { 3974 int error = 0; 3975 int create_lock_on = 0; 3976 uint32_t vrf_id; 3977 struct sctp_inpcb *inp; 3978 struct sctp_tcb *stcb = NULL; 3979 3980 inp = (struct sctp_inpcb *)so->so_pcb; 3981 if (inp == 0) { 3982 /* I made the same as TCP since we are not setup? */ 3983 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3984 return (ECONNRESET); 3985 } 3986 if (addr == NULL) { 3987 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3988 return EINVAL; 3989 } 3990 if ((addr->sa_family == AF_INET6) && (addr->sa_len != sizeof(struct sockaddr_in6))) { 3991 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3992 return (EINVAL); 3993 } 3994 if ((addr->sa_family == AF_INET) && (addr->sa_len != sizeof(struct sockaddr_in))) { 3995 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3996 return (EINVAL); 3997 } 3998 SCTP_ASOC_CREATE_LOCK(inp); 3999 create_lock_on = 1; 4000 4001 SCTP_INP_INCR_REF(inp); 4002 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 4003 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) { 4004 /* Should I really unlock ? */ 4005 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EFAULT); 4006 error = EFAULT; 4007 goto out_now; 4008 } 4009 #ifdef INET6 4010 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) && 4011 (addr->sa_family == AF_INET6)) { 4012 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4013 error = EINVAL; 4014 goto out_now; 4015 } 4016 #endif /* INET6 */ 4017 if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) == 4018 SCTP_PCB_FLAGS_UNBOUND) { 4019 /* Bind a ephemeral port */ 4020 error = sctp_inpcb_bind(so, NULL, NULL, p); 4021 if (error) { 4022 goto out_now; 4023 } 4024 } 4025 /* Now do we connect? */ 4026 if (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) { 4027 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4028 error = EINVAL; 4029 goto out_now; 4030 } 4031 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 4032 (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) { 4033 /* We are already connected AND the TCP model */ 4034 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EADDRINUSE); 4035 error = EADDRINUSE; 4036 goto out_now; 4037 } 4038 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { 4039 SCTP_INP_RLOCK(inp); 4040 stcb = LIST_FIRST(&inp->sctp_asoc_list); 4041 SCTP_INP_RUNLOCK(inp); 4042 } else { 4043 /* 4044 * We increment here since sctp_findassociation_ep_addr() 4045 * wil do a decrement if it finds the stcb as long as the 4046 * locked tcb (last argument) is NOT a TCB.. aka NULL. 4047 */ 4048 SCTP_INP_INCR_REF(inp); 4049 stcb = sctp_findassociation_ep_addr(&inp, addr, NULL, NULL, NULL); 4050 if (stcb == NULL) { 4051 SCTP_INP_DECR_REF(inp); 4052 } else { 4053 SCTP_TCB_LOCK(stcb); 4054 } 4055 } 4056 if (stcb != NULL) { 4057 /* Already have or am bring up an association */ 4058 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY); 4059 error = EALREADY; 4060 goto out_now; 4061 } 4062 vrf_id = inp->def_vrf_id; 4063 /* We are GOOD to go */ 4064 stcb = sctp_aloc_assoc(inp, addr, 1, &error, 0, vrf_id, p); 4065 if (stcb == NULL) { 4066 /* Gak! no memory */ 4067 goto out_now; 4068 } 4069 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) { 4070 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED; 4071 /* Set the connected flag so we can queue data */ 4072 soisconnecting(so); 4073 } 4074 SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_COOKIE_WAIT); 4075 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered); 4076 4077 /* initialize authentication parameters for the assoc */ 4078 sctp_initialize_auth_params(inp, stcb); 4079 4080 sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED); 4081 SCTP_TCB_UNLOCK(stcb); 4082 out_now: 4083 if (create_lock_on) { 4084 SCTP_ASOC_CREATE_UNLOCK(inp); 4085 } 4086 SCTP_INP_DECR_REF(inp); 4087 return error; 4088 } 4089 4090 int 4091 sctp_listen(struct socket *so, int backlog, struct thread *p) 4092 { 4093 /* 4094 * Note this module depends on the protocol processing being called 4095 * AFTER any socket level flags and backlog are applied to the 4096 * socket. The traditional way that the socket flags are applied is 4097 * AFTER protocol processing. We have made a change to the 4098 * sys/kern/uipc_socket.c module to reverse this but this MUST be in 4099 * place if the socket API for SCTP is to work properly. 4100 */ 4101 4102 int error = 0; 4103 struct sctp_inpcb *inp; 4104 4105 inp = (struct sctp_inpcb *)so->so_pcb; 4106 if (inp == 0) { 4107 /* I made the same as TCP since we are not setup? */ 4108 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4109 return (ECONNRESET); 4110 } 4111 SCTP_INP_RLOCK(inp); 4112 #ifdef SCTP_LOCK_LOGGING 4113 if (sctp_logging_level & SCTP_LOCK_LOGGING_ENABLE) { 4114 sctp_log_lock(inp, (struct sctp_tcb *)NULL, SCTP_LOG_LOCK_SOCK); 4115 } 4116 #endif 4117 SOCK_LOCK(so); 4118 error = solisten_proto_check(so); 4119 if (error) { 4120 SOCK_UNLOCK(so); 4121 SCTP_INP_RUNLOCK(inp); 4122 return (error); 4123 } 4124 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 4125 (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) { 4126 /* We are already connected AND the TCP model */ 4127 SCTP_INP_RUNLOCK(inp); 4128 SOCK_UNLOCK(so); 4129 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EADDRINUSE); 4130 return (EADDRINUSE); 4131 } 4132 if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) { 4133 /* We must do a bind. */ 4134 SOCK_UNLOCK(so); 4135 SCTP_INP_RUNLOCK(inp); 4136 if ((error = sctp_inpcb_bind(so, NULL, NULL, p))) { 4137 /* bind error, probably perm */ 4138 return (error); 4139 } 4140 SOCK_LOCK(so); 4141 } else { 4142 SCTP_INP_RUNLOCK(inp); 4143 } 4144 /* It appears for 7.0 and on, we must always call this. */ 4145 solisten_proto(so, backlog); 4146 if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) { 4147 /* remove the ACCEPTCONN flag for one-to-many sockets */ 4148 so->so_options &= ~SO_ACCEPTCONN; 4149 } 4150 if (backlog == 0) { 4151 /* turning off listen */ 4152 so->so_options &= ~SO_ACCEPTCONN; 4153 } 4154 SOCK_UNLOCK(so); 4155 return (error); 4156 } 4157 4158 static int sctp_defered_wakeup_cnt = 0; 4159 4160 int 4161 sctp_accept(struct socket *so, struct sockaddr **addr) 4162 { 4163 struct sctp_tcb *stcb; 4164 struct sctp_inpcb *inp; 4165 union sctp_sockstore store; 4166 4167 int error; 4168 4169 inp = (struct sctp_inpcb *)so->so_pcb; 4170 4171 if (inp == 0) { 4172 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4173 return (ECONNRESET); 4174 } 4175 SCTP_INP_RLOCK(inp); 4176 if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) { 4177 SCTP_INP_RUNLOCK(inp); 4178 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP); 4179 return (EOPNOTSUPP); 4180 } 4181 if (so->so_state & SS_ISDISCONNECTED) { 4182 SCTP_INP_RUNLOCK(inp); 4183 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ECONNABORTED); 4184 return (ECONNABORTED); 4185 } 4186 stcb = LIST_FIRST(&inp->sctp_asoc_list); 4187 if (stcb == NULL) { 4188 SCTP_INP_RUNLOCK(inp); 4189 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4190 return (ECONNRESET); 4191 } 4192 SCTP_TCB_LOCK(stcb); 4193 SCTP_INP_RUNLOCK(inp); 4194 store = stcb->asoc.primary_destination->ro._l_addr; 4195 SCTP_TCB_UNLOCK(stcb); 4196 if (store.sa.sa_family == AF_INET) { 4197 struct sockaddr_in *sin; 4198 4199 SCTP_MALLOC_SONAME(sin, struct sockaddr_in *, sizeof *sin); 4200 sin->sin_family = AF_INET; 4201 sin->sin_len = sizeof(*sin); 4202 sin->sin_port = ((struct sockaddr_in *)&store)->sin_port; 4203 sin->sin_addr = ((struct sockaddr_in *)&store)->sin_addr; 4204 *addr = (struct sockaddr *)sin; 4205 } else { 4206 struct sockaddr_in6 *sin6; 4207 4208 SCTP_MALLOC_SONAME(sin6, struct sockaddr_in6 *, sizeof *sin6); 4209 sin6->sin6_family = AF_INET6; 4210 sin6->sin6_len = sizeof(*sin6); 4211 sin6->sin6_port = ((struct sockaddr_in6 *)&store)->sin6_port; 4212 4213 sin6->sin6_addr = ((struct sockaddr_in6 *)&store)->sin6_addr; 4214 if ((error = sa6_recoverscope(sin6)) != 0) { 4215 SCTP_FREE_SONAME(sin6); 4216 return (error); 4217 } 4218 *addr = (struct sockaddr *)sin6; 4219 } 4220 /* Wake any delayed sleep action */ 4221 if (inp->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) { 4222 SCTP_INP_WLOCK(inp); 4223 inp->sctp_flags &= ~SCTP_PCB_FLAGS_DONT_WAKE; 4224 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT) { 4225 inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAKEOUTPUT; 4226 SCTP_INP_WUNLOCK(inp); 4227 SOCKBUF_LOCK(&inp->sctp_socket->so_snd); 4228 if (sowriteable(inp->sctp_socket)) { 4229 sowwakeup_locked(inp->sctp_socket); 4230 } else { 4231 SOCKBUF_UNLOCK(&inp->sctp_socket->so_snd); 4232 } 4233 SCTP_INP_WLOCK(inp); 4234 } 4235 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT) { 4236 inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAKEINPUT; 4237 SCTP_INP_WUNLOCK(inp); 4238 SOCKBUF_LOCK(&inp->sctp_socket->so_rcv); 4239 if (soreadable(inp->sctp_socket)) { 4240 sctp_defered_wakeup_cnt++; 4241 sorwakeup_locked(inp->sctp_socket); 4242 } else { 4243 SOCKBUF_UNLOCK(&inp->sctp_socket->so_rcv); 4244 } 4245 SCTP_INP_WLOCK(inp); 4246 } 4247 SCTP_INP_WUNLOCK(inp); 4248 } 4249 return (0); 4250 } 4251 4252 int 4253 sctp_ingetaddr(struct socket *so, struct sockaddr **addr) 4254 { 4255 struct sockaddr_in *sin; 4256 uint32_t vrf_id; 4257 struct sctp_inpcb *inp; 4258 struct sctp_ifa *sctp_ifa; 4259 4260 /* 4261 * Do the malloc first in case it blocks. 4262 */ 4263 SCTP_MALLOC_SONAME(sin, struct sockaddr_in *, sizeof *sin); 4264 sin->sin_family = AF_INET; 4265 sin->sin_len = sizeof(*sin); 4266 inp = (struct sctp_inpcb *)so->so_pcb; 4267 if (!inp) { 4268 SCTP_FREE_SONAME(sin); 4269 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4270 return ECONNRESET; 4271 } 4272 SCTP_INP_RLOCK(inp); 4273 sin->sin_port = inp->sctp_lport; 4274 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 4275 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { 4276 struct sctp_tcb *stcb; 4277 struct sockaddr_in *sin_a; 4278 struct sctp_nets *net; 4279 int fnd; 4280 4281 stcb = LIST_FIRST(&inp->sctp_asoc_list); 4282 if (stcb == NULL) { 4283 goto notConn; 4284 } 4285 fnd = 0; 4286 sin_a = NULL; 4287 SCTP_TCB_LOCK(stcb); 4288 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 4289 sin_a = (struct sockaddr_in *)&net->ro._l_addr; 4290 if (sin_a == NULL) 4291 /* this will make coverity happy */ 4292 continue; 4293 4294 if (sin_a->sin_family == AF_INET) { 4295 fnd = 1; 4296 break; 4297 } 4298 } 4299 if ((!fnd) || (sin_a == NULL)) { 4300 /* punt */ 4301 SCTP_TCB_UNLOCK(stcb); 4302 goto notConn; 4303 } 4304 vrf_id = inp->def_vrf_id; 4305 sctp_ifa = sctp_source_address_selection(inp, 4306 stcb, 4307 (sctp_route_t *) & net->ro, 4308 net, 0, vrf_id); 4309 if (sctp_ifa) { 4310 sin->sin_addr = sctp_ifa->address.sin.sin_addr; 4311 sctp_free_ifa(sctp_ifa); 4312 } 4313 SCTP_TCB_UNLOCK(stcb); 4314 } else { 4315 /* For the bound all case you get back 0 */ 4316 notConn: 4317 sin->sin_addr.s_addr = 0; 4318 } 4319 4320 } else { 4321 /* Take the first IPv4 address in the list */ 4322 struct sctp_laddr *laddr; 4323 int fnd = 0; 4324 4325 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 4326 if (laddr->ifa->address.sa.sa_family == AF_INET) { 4327 struct sockaddr_in *sin_a; 4328 4329 sin_a = (struct sockaddr_in *)&laddr->ifa->address.sa; 4330 sin->sin_addr = sin_a->sin_addr; 4331 fnd = 1; 4332 break; 4333 } 4334 } 4335 if (!fnd) { 4336 SCTP_FREE_SONAME(sin); 4337 SCTP_INP_RUNLOCK(inp); 4338 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); 4339 return ENOENT; 4340 } 4341 } 4342 SCTP_INP_RUNLOCK(inp); 4343 (*addr) = (struct sockaddr *)sin; 4344 return (0); 4345 } 4346 4347 int 4348 sctp_peeraddr(struct socket *so, struct sockaddr **addr) 4349 { 4350 struct sockaddr_in *sin = (struct sockaddr_in *)*addr; 4351 int fnd; 4352 struct sockaddr_in *sin_a; 4353 struct sctp_inpcb *inp; 4354 struct sctp_tcb *stcb; 4355 struct sctp_nets *net; 4356 4357 /* Do the malloc first in case it blocks. */ 4358 inp = (struct sctp_inpcb *)so->so_pcb; 4359 if ((inp == NULL) || 4360 ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) { 4361 /* UDP type and listeners will drop out here */ 4362 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN); 4363 return (ENOTCONN); 4364 } 4365 SCTP_MALLOC_SONAME(sin, struct sockaddr_in *, sizeof *sin); 4366 sin->sin_family = AF_INET; 4367 sin->sin_len = sizeof(*sin); 4368 4369 /* We must recapture incase we blocked */ 4370 inp = (struct sctp_inpcb *)so->so_pcb; 4371 if (!inp) { 4372 SCTP_FREE_SONAME(sin); 4373 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4374 return ECONNRESET; 4375 } 4376 SCTP_INP_RLOCK(inp); 4377 stcb = LIST_FIRST(&inp->sctp_asoc_list); 4378 if (stcb) { 4379 SCTP_TCB_LOCK(stcb); 4380 } 4381 SCTP_INP_RUNLOCK(inp); 4382 if (stcb == NULL) { 4383 SCTP_FREE_SONAME(sin); 4384 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4385 return ECONNRESET; 4386 } 4387 fnd = 0; 4388 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 4389 sin_a = (struct sockaddr_in *)&net->ro._l_addr; 4390 if (sin_a->sin_family == AF_INET) { 4391 fnd = 1; 4392 sin->sin_port = stcb->rport; 4393 sin->sin_addr = sin_a->sin_addr; 4394 break; 4395 } 4396 } 4397 SCTP_TCB_UNLOCK(stcb); 4398 if (!fnd) { 4399 /* No IPv4 address */ 4400 SCTP_FREE_SONAME(sin); 4401 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); 4402 return ENOENT; 4403 } 4404 (*addr) = (struct sockaddr *)sin; 4405 return (0); 4406 } 4407 4408 struct pr_usrreqs sctp_usrreqs = { 4409 .pru_abort = sctp_abort, 4410 .pru_accept = sctp_accept, 4411 .pru_attach = sctp_attach, 4412 .pru_bind = sctp_bind, 4413 .pru_connect = sctp_connect, 4414 .pru_control = in_control, 4415 .pru_close = sctp_close, 4416 .pru_detach = sctp_close, 4417 .pru_sopoll = sopoll_generic, 4418 .pru_disconnect = sctp_disconnect, 4419 .pru_listen = sctp_listen, 4420 .pru_peeraddr = sctp_peeraddr, 4421 .pru_send = sctp_sendm, 4422 .pru_shutdown = sctp_shutdown, 4423 .pru_sockaddr = sctp_ingetaddr, 4424 .pru_sosend = sctp_sosend, 4425 .pru_soreceive = sctp_soreceive 4426 }; 4427