1 /*- 2 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions are met: 6 * 7 * a) Redistributions of source code must retain the above copyright notice, 8 * this list of conditions and the following disclaimer. 9 * 10 * b) Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in 12 * the documentation and/or other materials provided with the distribution. 13 * 14 * c) Neither the name of Cisco Systems, Inc. nor the names of its 15 * contributors may be used to endorse or promote products derived 16 * from this software without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 28 * THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 /* $KAME: sctp_usrreq.c,v 1.48 2005/03/07 23:26:08 itojun Exp $ */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 #include <netinet/sctp_os.h> 36 #include <sys/proc.h> 37 #include <netinet/sctp_pcb.h> 38 #include <netinet/sctp_header.h> 39 #include <netinet/sctp_var.h> 40 #if defined(INET6) 41 #endif 42 #include <netinet/sctp_sysctl.h> 43 #include <netinet/sctp_output.h> 44 #include <netinet/sctp_uio.h> 45 #include <netinet/sctp_asconf.h> 46 #include <netinet/sctputil.h> 47 #include <netinet/sctp_indata.h> 48 #include <netinet/sctp_timer.h> 49 #include <netinet/sctp_auth.h> 50 #include <netinet/sctp_bsd_addr.h> 51 #include <netinet/sctp_cc_functions.h> 52 #include <netinet/udp.h> 53 54 55 56 57 void 58 sctp_init(void) 59 { 60 u_long sb_max_adj; 61 62 bzero(&SCTP_BASE_STATS, sizeof(struct sctpstat)); 63 64 /* Initialize and modify the sysctled variables */ 65 sctp_init_sysctls(); 66 if ((nmbclusters / 8) > SCTP_ASOC_MAX_CHUNKS_ON_QUEUE) 67 SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue) = (nmbclusters / 8); 68 /* 69 * Allow a user to take no more than 1/2 the number of clusters or 70 * the SB_MAX whichever is smaller for the send window. 71 */ 72 sb_max_adj = (u_long)((u_quad_t) (SB_MAX) * MCLBYTES / (MSIZE + MCLBYTES)); 73 SCTP_BASE_SYSCTL(sctp_sendspace) = min(sb_max_adj, 74 (((uint32_t) nmbclusters / 2) * SCTP_DEFAULT_MAXSEGMENT)); 75 /* 76 * Now for the recv window, should we take the same amount? or 77 * should I do 1/2 the SB_MAX instead in the SB_MAX min above. For 78 * now I will just copy. 79 */ 80 SCTP_BASE_SYSCTL(sctp_recvspace) = SCTP_BASE_SYSCTL(sctp_sendspace); 81 82 SCTP_BASE_VAR(first_time) = 0; 83 SCTP_BASE_VAR(sctp_pcb_initialized) = 0; 84 sctp_pcb_init(); 85 #if defined(SCTP_PACKET_LOGGING) 86 SCTP_BASE_VAR(packet_log_writers) = 0; 87 SCTP_BASE_VAR(packet_log_end) = 0; 88 bzero(&SCTP_BASE_VAR(packet_log_buffer), SCTP_PACKET_LOG_SIZE); 89 #endif 90 91 92 } 93 94 void 95 sctp_finish(void) 96 { 97 sctp_pcb_finish(); 98 } 99 100 101 102 void 103 sctp_pathmtu_adjustment(struct sctp_inpcb *inp, 104 struct sctp_tcb *stcb, 105 struct sctp_nets *net, 106 uint16_t nxtsz) 107 { 108 struct sctp_tmit_chunk *chk; 109 110 /* Adjust that too */ 111 stcb->asoc.smallest_mtu = nxtsz; 112 /* now off to subtract IP_DF flag if needed */ 113 #ifdef SCTP_PRINT_FOR_B_AND_M 114 SCTP_PRINTF("sctp_pathmtu_adjust called inp:%p stcb:%p net:%p nxtsz:%d\n", 115 inp, stcb, net, nxtsz); 116 #endif 117 TAILQ_FOREACH(chk, &stcb->asoc.send_queue, sctp_next) { 118 if ((chk->send_size + IP_HDR_SIZE) > nxtsz) { 119 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; 120 } 121 } 122 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 123 if ((chk->send_size + IP_HDR_SIZE) > nxtsz) { 124 /* 125 * For this guy we also mark for immediate resend 126 * since we sent to big of chunk 127 */ 128 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; 129 if (chk->sent != SCTP_DATAGRAM_RESEND) { 130 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 131 } 132 chk->sent = SCTP_DATAGRAM_RESEND; 133 chk->rec.data.doing_fast_retransmit = 0; 134 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 135 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_PMTU, 136 chk->whoTo->flight_size, 137 chk->book_size, 138 (uintptr_t) chk->whoTo, 139 chk->rec.data.TSN_seq); 140 } 141 /* Clear any time so NO RTT is being done */ 142 chk->do_rtt = 0; 143 sctp_flight_size_decrease(chk); 144 sctp_total_flight_decrease(stcb, chk); 145 } 146 } 147 } 148 149 static void 150 sctp_notify_mbuf(struct sctp_inpcb *inp, 151 struct sctp_tcb *stcb, 152 struct sctp_nets *net, 153 struct ip *ip, 154 struct sctphdr *sh) 155 { 156 struct icmp *icmph; 157 int totsz, tmr_stopped = 0; 158 uint16_t nxtsz; 159 160 /* protection */ 161 if ((inp == NULL) || (stcb == NULL) || (net == NULL) || 162 (ip == NULL) || (sh == NULL)) { 163 if (stcb != NULL) { 164 SCTP_TCB_UNLOCK(stcb); 165 } 166 return; 167 } 168 /* First job is to verify the vtag matches what I would send */ 169 if (ntohl(sh->v_tag) != (stcb->asoc.peer_vtag)) { 170 SCTP_TCB_UNLOCK(stcb); 171 return; 172 } 173 icmph = (struct icmp *)((caddr_t)ip - (sizeof(struct icmp) - 174 sizeof(struct ip))); 175 if (icmph->icmp_type != ICMP_UNREACH) { 176 /* We only care about unreachable */ 177 SCTP_TCB_UNLOCK(stcb); 178 return; 179 } 180 if (icmph->icmp_code != ICMP_UNREACH_NEEDFRAG) { 181 /* not a unreachable message due to frag. */ 182 SCTP_TCB_UNLOCK(stcb); 183 return; 184 } 185 totsz = ip->ip_len; 186 187 nxtsz = ntohs(icmph->icmp_nextmtu); 188 if (nxtsz == 0) { 189 /* 190 * old type router that does not tell us what the next size 191 * mtu is. Rats we will have to guess (in a educated fashion 192 * of course) 193 */ 194 nxtsz = find_next_best_mtu(totsz); 195 } 196 /* Stop any PMTU timer */ 197 if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) { 198 tmr_stopped = 1; 199 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net, 200 SCTP_FROM_SCTP_USRREQ + SCTP_LOC_1); 201 } 202 /* Adjust destination size limit */ 203 if (net->mtu > nxtsz) { 204 net->mtu = nxtsz; 205 if (net->port) { 206 net->mtu -= sizeof(struct udphdr); 207 } 208 } 209 /* now what about the ep? */ 210 if (stcb->asoc.smallest_mtu > nxtsz) { 211 #ifdef SCTP_PRINT_FOR_B_AND_M 212 SCTP_PRINTF("notify_mbuf (ICMP) calls sctp_pathmtu_adjust mtu:%d\n", 213 nxtsz); 214 #endif 215 sctp_pathmtu_adjustment(inp, stcb, net, nxtsz); 216 } 217 if (tmr_stopped) 218 sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net); 219 220 SCTP_TCB_UNLOCK(stcb); 221 } 222 223 224 void 225 sctp_notify(struct sctp_inpcb *inp, 226 struct ip *ip, 227 struct sctphdr *sh, 228 struct sockaddr *to, 229 struct sctp_tcb *stcb, 230 struct sctp_nets *net) 231 { 232 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 233 struct socket *so; 234 235 #endif 236 /* protection */ 237 int reason; 238 struct icmp *icmph; 239 240 241 if ((inp == NULL) || (stcb == NULL) || (net == NULL) || 242 (sh == NULL) || (to == NULL)) { 243 if (stcb) 244 SCTP_TCB_UNLOCK(stcb); 245 return; 246 } 247 /* First job is to verify the vtag matches what I would send */ 248 if (ntohl(sh->v_tag) != (stcb->asoc.peer_vtag)) { 249 SCTP_TCB_UNLOCK(stcb); 250 return; 251 } 252 icmph = (struct icmp *)((caddr_t)ip - (sizeof(struct icmp) - 253 sizeof(struct ip))); 254 if (icmph->icmp_type != ICMP_UNREACH) { 255 /* We only care about unreachable */ 256 SCTP_TCB_UNLOCK(stcb); 257 return; 258 } 259 if ((icmph->icmp_code == ICMP_UNREACH_NET) || 260 (icmph->icmp_code == ICMP_UNREACH_HOST) || 261 (icmph->icmp_code == ICMP_UNREACH_NET_UNKNOWN) || 262 (icmph->icmp_code == ICMP_UNREACH_HOST_UNKNOWN) || 263 (icmph->icmp_code == ICMP_UNREACH_ISOLATED) || 264 (icmph->icmp_code == ICMP_UNREACH_NET_PROHIB) || 265 (icmph->icmp_code == ICMP_UNREACH_HOST_PROHIB) || 266 (icmph->icmp_code == ICMP_UNREACH_FILTER_PROHIB)) { 267 268 /* 269 * Hmm reachablity problems we must examine closely. If its 270 * not reachable, we may have lost a network. Or if there is 271 * NO protocol at the other end named SCTP. well we consider 272 * it a OOTB abort. 273 */ 274 if (net->dest_state & SCTP_ADDR_REACHABLE) { 275 /* Ok that destination is NOT reachable */ 276 SCTP_PRINTF("ICMP (thresh %d/%d) takes interface %p down\n", 277 net->error_count, 278 net->failure_threshold, 279 net); 280 281 net->dest_state &= ~SCTP_ADDR_REACHABLE; 282 net->dest_state |= SCTP_ADDR_NOT_REACHABLE; 283 /* 284 * JRS 5/14/07 - If a destination is unreachable, 285 * the PF bit is turned off. This allows an 286 * unambiguous use of the PF bit for destinations 287 * that are reachable but potentially failed. If the 288 * destination is set to the unreachable state, also 289 * set the destination to the PF state. 290 */ 291 /* 292 * Add debug message here if destination is not in 293 * PF state. 294 */ 295 /* Stop any running T3 timers here? */ 296 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_pf)) { 297 net->dest_state &= ~SCTP_ADDR_PF; 298 SCTPDBG(SCTP_DEBUG_TIMER4, "Destination %p moved from PF to unreachable.\n", 299 net); 300 } 301 net->error_count = net->failure_threshold + 1; 302 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN, 303 stcb, SCTP_FAILED_THRESHOLD, 304 (void *)net, SCTP_SO_NOT_LOCKED); 305 } 306 SCTP_TCB_UNLOCK(stcb); 307 } else if ((icmph->icmp_code == ICMP_UNREACH_PROTOCOL) || 308 (icmph->icmp_code == ICMP_UNREACH_PORT)) { 309 /* 310 * Here the peer is either playing tricks on us, including 311 * an address that belongs to someone who does not support 312 * SCTP OR was a userland implementation that shutdown and 313 * now is dead. In either case treat it like a OOTB abort 314 * with no TCB 315 */ 316 reason = SCTP_PEER_FAULTY; 317 sctp_abort_notification(stcb, reason, SCTP_SO_NOT_LOCKED); 318 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 319 so = SCTP_INP_SO(inp); 320 atomic_add_int(&stcb->asoc.refcnt, 1); 321 SCTP_TCB_UNLOCK(stcb); 322 SCTP_SOCKET_LOCK(so, 1); 323 SCTP_TCB_LOCK(stcb); 324 atomic_subtract_int(&stcb->asoc.refcnt, 1); 325 #endif 326 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_2); 327 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 328 SCTP_SOCKET_UNLOCK(so, 1); 329 /* SCTP_TCB_UNLOCK(stcb); MT: I think this is not needed. */ 330 #endif 331 /* no need to unlock here, since the TCB is gone */ 332 } else { 333 SCTP_TCB_UNLOCK(stcb); 334 } 335 } 336 337 void 338 sctp_ctlinput(cmd, sa, vip) 339 int cmd; 340 struct sockaddr *sa; 341 void *vip; 342 { 343 struct ip *ip = vip; 344 struct sctphdr *sh; 345 uint32_t vrf_id; 346 347 /* FIX, for non-bsd is this right? */ 348 vrf_id = SCTP_DEFAULT_VRFID; 349 if (sa->sa_family != AF_INET || 350 ((struct sockaddr_in *)sa)->sin_addr.s_addr == INADDR_ANY) { 351 return; 352 } 353 if (PRC_IS_REDIRECT(cmd)) { 354 ip = 0; 355 } else if ((unsigned)cmd >= PRC_NCMDS || inetctlerrmap[cmd] == 0) { 356 return; 357 } 358 if (ip) { 359 struct sctp_inpcb *inp = NULL; 360 struct sctp_tcb *stcb = NULL; 361 struct sctp_nets *net = NULL; 362 struct sockaddr_in to, from; 363 364 sh = (struct sctphdr *)((caddr_t)ip + (ip->ip_hl << 2)); 365 bzero(&to, sizeof(to)); 366 bzero(&from, sizeof(from)); 367 from.sin_family = to.sin_family = AF_INET; 368 from.sin_len = to.sin_len = sizeof(to); 369 from.sin_port = sh->src_port; 370 from.sin_addr = ip->ip_src; 371 to.sin_port = sh->dest_port; 372 to.sin_addr = ip->ip_dst; 373 374 /* 375 * 'to' holds the dest of the packet that failed to be sent. 376 * 'from' holds our local endpoint address. Thus we reverse 377 * the to and the from in the lookup. 378 */ 379 stcb = sctp_findassociation_addr_sa((struct sockaddr *)&from, 380 (struct sockaddr *)&to, 381 &inp, &net, 1, vrf_id); 382 if (stcb != NULL && inp && (inp->sctp_socket != NULL)) { 383 if (cmd != PRC_MSGSIZE) { 384 sctp_notify(inp, ip, sh, 385 (struct sockaddr *)&to, stcb, 386 net); 387 } else { 388 /* handle possible ICMP size messages */ 389 sctp_notify_mbuf(inp, stcb, net, ip, sh); 390 } 391 } else { 392 if ((stcb == NULL) && (inp != NULL)) { 393 /* reduce ref-count */ 394 SCTP_INP_WLOCK(inp); 395 SCTP_INP_DECR_REF(inp); 396 SCTP_INP_WUNLOCK(inp); 397 } 398 } 399 } 400 return; 401 } 402 403 static int 404 sctp_getcred(SYSCTL_HANDLER_ARGS) 405 { 406 struct xucred xuc; 407 struct sockaddr_in addrs[2]; 408 struct sctp_inpcb *inp; 409 struct sctp_nets *net; 410 struct sctp_tcb *stcb; 411 int error; 412 uint32_t vrf_id; 413 414 /* FIX, for non-bsd is this right? */ 415 vrf_id = SCTP_DEFAULT_VRFID; 416 417 error = priv_check(req->td, PRIV_NETINET_GETCRED); 418 419 if (error) 420 return (error); 421 422 error = SYSCTL_IN(req, addrs, sizeof(addrs)); 423 if (error) 424 return (error); 425 426 stcb = sctp_findassociation_addr_sa(sintosa(&addrs[0]), 427 sintosa(&addrs[1]), 428 &inp, &net, 1, vrf_id); 429 if (stcb == NULL || inp == NULL || inp->sctp_socket == NULL) { 430 if ((inp != NULL) && (stcb == NULL)) { 431 /* reduce ref-count */ 432 SCTP_INP_WLOCK(inp); 433 SCTP_INP_DECR_REF(inp); 434 goto cred_can_cont; 435 } 436 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); 437 error = ENOENT; 438 goto out; 439 } 440 SCTP_TCB_UNLOCK(stcb); 441 /* 442 * We use the write lock here, only since in the error leg we need 443 * it. If we used RLOCK, then we would have to 444 * wlock/decr/unlock/rlock. Which in theory could create a hole. 445 * Better to use higher wlock. 446 */ 447 SCTP_INP_WLOCK(inp); 448 cred_can_cont: 449 error = cr_canseesocket(req->td->td_ucred, inp->sctp_socket); 450 if (error) { 451 SCTP_INP_WUNLOCK(inp); 452 goto out; 453 } 454 cru2x(inp->sctp_socket->so_cred, &xuc); 455 SCTP_INP_WUNLOCK(inp); 456 error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred)); 457 out: 458 return (error); 459 } 460 461 SYSCTL_PROC(_net_inet_sctp, OID_AUTO, getcred, CTLTYPE_OPAQUE | CTLFLAG_RW, 462 0, 0, sctp_getcred, "S,ucred", "Get the ucred of a SCTP connection"); 463 464 465 static void 466 sctp_abort(struct socket *so) 467 { 468 struct sctp_inpcb *inp; 469 uint32_t flags; 470 471 inp = (struct sctp_inpcb *)so->so_pcb; 472 if (inp == 0) { 473 return; 474 } 475 sctp_must_try_again: 476 flags = inp->sctp_flags; 477 #ifdef SCTP_LOG_CLOSING 478 sctp_log_closing(inp, NULL, 17); 479 #endif 480 if (((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) && 481 (atomic_cmpset_int(&inp->sctp_flags, flags, (flags | SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_CLOSE_IP)))) { 482 #ifdef SCTP_LOG_CLOSING 483 sctp_log_closing(inp, NULL, 16); 484 #endif 485 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 486 SCTP_CALLED_AFTER_CMPSET_OFCLOSE); 487 SOCK_LOCK(so); 488 SCTP_SB_CLEAR(so->so_snd); 489 /* 490 * same for the rcv ones, they are only here for the 491 * accounting/select. 492 */ 493 SCTP_SB_CLEAR(so->so_rcv); 494 495 /* Now null out the reference, we are completely detached. */ 496 so->so_pcb = NULL; 497 SOCK_UNLOCK(so); 498 } else { 499 flags = inp->sctp_flags; 500 if ((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) { 501 goto sctp_must_try_again; 502 } 503 } 504 return; 505 } 506 507 static int 508 sctp_attach(struct socket *so, int proto, struct thread *p) 509 { 510 struct sctp_inpcb *inp; 511 struct inpcb *ip_inp; 512 int error; 513 uint32_t vrf_id = SCTP_DEFAULT_VRFID; 514 515 #ifdef IPSEC 516 uint32_t flags; 517 518 #endif 519 520 inp = (struct sctp_inpcb *)so->so_pcb; 521 if (inp != 0) { 522 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 523 return EINVAL; 524 } 525 if (so->so_snd.sb_hiwat == 0 || so->so_rcv.sb_hiwat == 0) { 526 error = SCTP_SORESERVE(so, SCTP_BASE_SYSCTL(sctp_sendspace), SCTP_BASE_SYSCTL(sctp_recvspace)); 527 if (error) { 528 return error; 529 } 530 } 531 error = sctp_inpcb_alloc(so, vrf_id); 532 if (error) { 533 return error; 534 } 535 inp = (struct sctp_inpcb *)so->so_pcb; 536 SCTP_INP_WLOCK(inp); 537 inp->sctp_flags &= ~SCTP_PCB_FLAGS_BOUND_V6; /* I'm not v6! */ 538 ip_inp = &inp->ip_inp.inp; 539 ip_inp->inp_vflag |= INP_IPV4; 540 ip_inp->inp_ip_ttl = MODULE_GLOBAL(MOD_INET, ip_defttl); 541 #ifdef IPSEC 542 error = ipsec_init_policy(so, &ip_inp->inp_sp); 543 #ifdef SCTP_LOG_CLOSING 544 sctp_log_closing(inp, NULL, 17); 545 #endif 546 if (error != 0) { 547 flags = inp->sctp_flags; 548 if (((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) && 549 (atomic_cmpset_int(&inp->sctp_flags, flags, (flags | SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_CLOSE_IP)))) { 550 #ifdef SCTP_LOG_CLOSING 551 sctp_log_closing(inp, NULL, 15); 552 #endif 553 SCTP_INP_WUNLOCK(inp); 554 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 555 SCTP_CALLED_AFTER_CMPSET_OFCLOSE); 556 } else { 557 SCTP_INP_WUNLOCK(inp); 558 } 559 return error; 560 } 561 #endif /* IPSEC */ 562 SCTP_INP_WUNLOCK(inp); 563 return 0; 564 } 565 566 static int 567 sctp_bind(struct socket *so, struct sockaddr *addr, struct thread *p) 568 { 569 struct sctp_inpcb *inp = NULL; 570 int error; 571 572 #ifdef INET6 573 if (addr && addr->sa_family != AF_INET) { 574 /* must be a v4 address! */ 575 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 576 return EINVAL; 577 } 578 #endif /* INET6 */ 579 if (addr && (addr->sa_len != sizeof(struct sockaddr_in))) { 580 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 581 return EINVAL; 582 } 583 inp = (struct sctp_inpcb *)so->so_pcb; 584 if (inp == 0) { 585 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 586 return EINVAL; 587 } 588 error = sctp_inpcb_bind(so, addr, NULL, p); 589 return error; 590 } 591 592 void 593 sctp_close(struct socket *so) 594 { 595 struct sctp_inpcb *inp; 596 uint32_t flags; 597 598 inp = (struct sctp_inpcb *)so->so_pcb; 599 if (inp == 0) 600 return; 601 602 /* 603 * Inform all the lower layer assoc that we are done. 604 */ 605 sctp_must_try_again: 606 flags = inp->sctp_flags; 607 #ifdef SCTP_LOG_CLOSING 608 sctp_log_closing(inp, NULL, 17); 609 #endif 610 if (((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) && 611 (atomic_cmpset_int(&inp->sctp_flags, flags, (flags | SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_CLOSE_IP)))) { 612 if (((so->so_options & SO_LINGER) && (so->so_linger == 0)) || 613 (so->so_rcv.sb_cc > 0)) { 614 #ifdef SCTP_LOG_CLOSING 615 sctp_log_closing(inp, NULL, 13); 616 #endif 617 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 618 SCTP_CALLED_AFTER_CMPSET_OFCLOSE); 619 } else { 620 #ifdef SCTP_LOG_CLOSING 621 sctp_log_closing(inp, NULL, 14); 622 #endif 623 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_GRACEFUL_CLOSE, 624 SCTP_CALLED_AFTER_CMPSET_OFCLOSE); 625 } 626 /* 627 * The socket is now detached, no matter what the state of 628 * the SCTP association. 629 */ 630 SOCK_LOCK(so); 631 SCTP_SB_CLEAR(so->so_snd); 632 /* 633 * same for the rcv ones, they are only here for the 634 * accounting/select. 635 */ 636 SCTP_SB_CLEAR(so->so_rcv); 637 638 /* Now null out the reference, we are completely detached. */ 639 so->so_pcb = NULL; 640 SOCK_UNLOCK(so); 641 } else { 642 flags = inp->sctp_flags; 643 if ((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) { 644 goto sctp_must_try_again; 645 } 646 } 647 return; 648 } 649 650 651 int 652 sctp_sendm(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr, 653 struct mbuf *control, struct thread *p); 654 655 656 int 657 sctp_sendm(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr, 658 struct mbuf *control, struct thread *p) 659 { 660 struct sctp_inpcb *inp; 661 int error; 662 663 inp = (struct sctp_inpcb *)so->so_pcb; 664 if (inp == 0) { 665 if (control) { 666 sctp_m_freem(control); 667 control = NULL; 668 } 669 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 670 sctp_m_freem(m); 671 return EINVAL; 672 } 673 /* Got to have an to address if we are NOT a connected socket */ 674 if ((addr == NULL) && 675 ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) || 676 (inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)) 677 ) { 678 goto connected_type; 679 } else if (addr == NULL) { 680 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EDESTADDRREQ); 681 error = EDESTADDRREQ; 682 sctp_m_freem(m); 683 if (control) { 684 sctp_m_freem(control); 685 control = NULL; 686 } 687 return (error); 688 } 689 #ifdef INET6 690 if (addr->sa_family != AF_INET) { 691 /* must be a v4 address! */ 692 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EDESTADDRREQ); 693 sctp_m_freem(m); 694 if (control) { 695 sctp_m_freem(control); 696 control = NULL; 697 } 698 error = EDESTADDRREQ; 699 return EDESTADDRREQ; 700 } 701 #endif /* INET6 */ 702 connected_type: 703 /* now what about control */ 704 if (control) { 705 if (inp->control) { 706 SCTP_PRINTF("huh? control set?\n"); 707 sctp_m_freem(inp->control); 708 inp->control = NULL; 709 } 710 inp->control = control; 711 } 712 /* Place the data */ 713 if (inp->pkt) { 714 SCTP_BUF_NEXT(inp->pkt_last) = m; 715 inp->pkt_last = m; 716 } else { 717 inp->pkt_last = inp->pkt = m; 718 } 719 if ( 720 /* FreeBSD uses a flag passed */ 721 ((flags & PRUS_MORETOCOME) == 0) 722 ) { 723 /* 724 * note with the current version this code will only be used 725 * by OpenBSD-- NetBSD, FreeBSD, and MacOS have methods for 726 * re-defining sosend to use the sctp_sosend. One can 727 * optionally switch back to this code (by changing back the 728 * definitions) but this is not advisable. This code is used 729 * by FreeBSD when sending a file with sendfile() though. 730 */ 731 int ret; 732 733 ret = sctp_output(inp, inp->pkt, addr, inp->control, p, flags); 734 inp->pkt = NULL; 735 inp->control = NULL; 736 return (ret); 737 } else { 738 return (0); 739 } 740 } 741 742 int 743 sctp_disconnect(struct socket *so) 744 { 745 struct sctp_inpcb *inp; 746 747 inp = (struct sctp_inpcb *)so->so_pcb; 748 if (inp == NULL) { 749 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN); 750 return (ENOTCONN); 751 } 752 SCTP_INP_RLOCK(inp); 753 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 754 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 755 if (SCTP_LIST_EMPTY(&inp->sctp_asoc_list)) { 756 /* No connection */ 757 SCTP_INP_RUNLOCK(inp); 758 return (0); 759 } else { 760 struct sctp_association *asoc; 761 struct sctp_tcb *stcb; 762 763 stcb = LIST_FIRST(&inp->sctp_asoc_list); 764 if (stcb == NULL) { 765 SCTP_INP_RUNLOCK(inp); 766 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 767 return (EINVAL); 768 } 769 SCTP_TCB_LOCK(stcb); 770 asoc = &stcb->asoc; 771 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 772 /* We are about to be freed, out of here */ 773 SCTP_TCB_UNLOCK(stcb); 774 SCTP_INP_RUNLOCK(inp); 775 return (0); 776 } 777 if (((so->so_options & SO_LINGER) && 778 (so->so_linger == 0)) || 779 (so->so_rcv.sb_cc > 0)) { 780 if (SCTP_GET_STATE(asoc) != 781 SCTP_STATE_COOKIE_WAIT) { 782 /* Left with Data unread */ 783 struct mbuf *err; 784 785 err = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_DONTWAIT, 1, MT_DATA); 786 if (err) { 787 /* 788 * Fill in the user 789 * initiated abort 790 */ 791 struct sctp_paramhdr *ph; 792 793 ph = mtod(err, struct sctp_paramhdr *); 794 SCTP_BUF_LEN(err) = sizeof(struct sctp_paramhdr); 795 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT); 796 ph->param_length = htons(SCTP_BUF_LEN(err)); 797 } 798 #if defined(SCTP_PANIC_ON_ABORT) 799 panic("disconnect does an abort"); 800 #endif 801 sctp_send_abort_tcb(stcb, err, SCTP_SO_LOCKED); 802 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 803 } 804 SCTP_INP_RUNLOCK(inp); 805 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) || 806 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 807 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 808 } 809 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_3); 810 /* No unlock tcb assoc is gone */ 811 return (0); 812 } 813 if (TAILQ_EMPTY(&asoc->send_queue) && 814 TAILQ_EMPTY(&asoc->sent_queue) && 815 (asoc->stream_queue_cnt == 0)) { 816 /* there is nothing queued to send, so done */ 817 if (asoc->locked_on_sending) { 818 goto abort_anyway; 819 } 820 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) && 821 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) { 822 /* only send SHUTDOWN 1st time thru */ 823 sctp_stop_timers_for_shutdown(stcb); 824 sctp_send_shutdown(stcb, 825 stcb->asoc.primary_destination); 826 sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_LOCKED); 827 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) || 828 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 829 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 830 } 831 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT); 832 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 833 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, 834 stcb->sctp_ep, stcb, 835 asoc->primary_destination); 836 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 837 stcb->sctp_ep, stcb, 838 asoc->primary_destination); 839 } 840 } else { 841 /* 842 * we still got (or just got) data to send, 843 * so set SHUTDOWN_PENDING 844 */ 845 /* 846 * XXX sockets draft says that SCTP_EOF 847 * should be sent with no data. currently, 848 * we will allow user data to be sent first 849 * and move to SHUTDOWN-PENDING 850 */ 851 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING; 852 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb, 853 asoc->primary_destination); 854 if (asoc->locked_on_sending) { 855 /* Locked to send out the data */ 856 struct sctp_stream_queue_pending *sp; 857 858 sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead); 859 if (sp == NULL) { 860 SCTP_PRINTF("Error, sp is NULL, locked on sending is non-null strm:%d\n", 861 asoc->locked_on_sending->stream_no); 862 } else { 863 if ((sp->length == 0) && (sp->msg_is_complete == 0)) 864 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT; 865 } 866 } 867 if (TAILQ_EMPTY(&asoc->send_queue) && 868 TAILQ_EMPTY(&asoc->sent_queue) && 869 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) { 870 struct mbuf *op_err; 871 872 abort_anyway: 873 op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 874 0, M_DONTWAIT, 1, MT_DATA); 875 if (op_err) { 876 /* 877 * Fill in the user 878 * initiated abort 879 */ 880 struct sctp_paramhdr *ph; 881 uint32_t *ippp; 882 883 SCTP_BUF_LEN(op_err) = 884 (sizeof(struct sctp_paramhdr) + sizeof(uint32_t)); 885 ph = mtod(op_err, 886 struct sctp_paramhdr *); 887 ph->param_type = htons( 888 SCTP_CAUSE_USER_INITIATED_ABT); 889 ph->param_length = htons(SCTP_BUF_LEN(op_err)); 890 ippp = (uint32_t *) (ph + 1); 891 *ippp = htonl(SCTP_FROM_SCTP_USRREQ + SCTP_LOC_4); 892 } 893 #if defined(SCTP_PANIC_ON_ABORT) 894 panic("disconnect does an abort"); 895 #endif 896 897 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_USRREQ + SCTP_LOC_4; 898 sctp_send_abort_tcb(stcb, op_err, SCTP_SO_LOCKED); 899 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 900 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) || 901 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 902 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 903 } 904 SCTP_INP_RUNLOCK(inp); 905 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_5); 906 return (0); 907 } else { 908 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CLOSING, SCTP_SO_LOCKED); 909 } 910 } 911 SCTP_TCB_UNLOCK(stcb); 912 SCTP_INP_RUNLOCK(inp); 913 return (0); 914 } 915 /* not reached */ 916 } else { 917 /* UDP model does not support this */ 918 SCTP_INP_RUNLOCK(inp); 919 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP); 920 return EOPNOTSUPP; 921 } 922 } 923 924 int 925 sctp_flush(struct socket *so, int how) 926 { 927 /* 928 * We will just clear out the values and let subsequent close clear 929 * out the data, if any. Note if the user did a shutdown(SHUT_RD) 930 * they will not be able to read the data, the socket will block 931 * that from happening. 932 */ 933 if ((how == PRU_FLUSH_RD) || (how == PRU_FLUSH_RDWR)) { 934 /* 935 * First make sure the sb will be happy, we don't use these 936 * except maybe the count 937 */ 938 so->so_rcv.sb_cc = 0; 939 so->so_rcv.sb_mbcnt = 0; 940 so->so_rcv.sb_mb = NULL; 941 } 942 if ((how == PRU_FLUSH_WR) || (how == PRU_FLUSH_RDWR)) { 943 /* 944 * First make sure the sb will be happy, we don't use these 945 * except maybe the count 946 */ 947 so->so_snd.sb_cc = 0; 948 so->so_snd.sb_mbcnt = 0; 949 so->so_snd.sb_mb = NULL; 950 951 } 952 return (0); 953 } 954 955 int 956 sctp_shutdown(struct socket *so) 957 { 958 struct sctp_inpcb *inp; 959 960 inp = (struct sctp_inpcb *)so->so_pcb; 961 if (inp == 0) { 962 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 963 return EINVAL; 964 } 965 SCTP_INP_RLOCK(inp); 966 /* For UDP model this is a invalid call */ 967 if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) { 968 /* Restore the flags that the soshutdown took away. */ 969 so->so_rcv.sb_state &= ~SBS_CANTRCVMORE; 970 /* This proc will wakeup for read and do nothing (I hope) */ 971 SCTP_INP_RUNLOCK(inp); 972 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP); 973 return (EOPNOTSUPP); 974 } 975 /* 976 * Ok if we reach here its the TCP model and it is either a SHUT_WR 977 * or SHUT_RDWR. This means we put the shutdown flag against it. 978 */ 979 { 980 struct sctp_tcb *stcb; 981 struct sctp_association *asoc; 982 983 socantsendmore(so); 984 985 stcb = LIST_FIRST(&inp->sctp_asoc_list); 986 if (stcb == NULL) { 987 /* 988 * Ok we hit the case that the shutdown call was 989 * made after an abort or something. Nothing to do 990 * now. 991 */ 992 SCTP_INP_RUNLOCK(inp); 993 return (0); 994 } 995 SCTP_TCB_LOCK(stcb); 996 asoc = &stcb->asoc; 997 if (TAILQ_EMPTY(&asoc->send_queue) && 998 TAILQ_EMPTY(&asoc->sent_queue) && 999 (asoc->stream_queue_cnt == 0)) { 1000 if (asoc->locked_on_sending) { 1001 goto abort_anyway; 1002 } 1003 /* there is nothing queued to send, so I'm done... */ 1004 if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) { 1005 /* only send SHUTDOWN the first time through */ 1006 sctp_stop_timers_for_shutdown(stcb); 1007 sctp_send_shutdown(stcb, 1008 stcb->asoc.primary_destination); 1009 sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_LOCKED); 1010 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) || 1011 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 1012 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 1013 } 1014 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT); 1015 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 1016 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, 1017 stcb->sctp_ep, stcb, 1018 asoc->primary_destination); 1019 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 1020 stcb->sctp_ep, stcb, 1021 asoc->primary_destination); 1022 } 1023 } else { 1024 /* 1025 * we still got (or just got) data to send, so set 1026 * SHUTDOWN_PENDING 1027 */ 1028 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING; 1029 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb, 1030 asoc->primary_destination); 1031 1032 if (asoc->locked_on_sending) { 1033 /* Locked to send out the data */ 1034 struct sctp_stream_queue_pending *sp; 1035 1036 sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead); 1037 if (sp == NULL) { 1038 SCTP_PRINTF("Error, sp is NULL, locked on sending is non-null strm:%d\n", 1039 asoc->locked_on_sending->stream_no); 1040 } else { 1041 if ((sp->length == 0) && (sp->msg_is_complete == 0)) { 1042 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT; 1043 } 1044 } 1045 } 1046 if (TAILQ_EMPTY(&asoc->send_queue) && 1047 TAILQ_EMPTY(&asoc->sent_queue) && 1048 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) { 1049 struct mbuf *op_err; 1050 1051 abort_anyway: 1052 op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 1053 0, M_DONTWAIT, 1, MT_DATA); 1054 if (op_err) { 1055 /* Fill in the user initiated abort */ 1056 struct sctp_paramhdr *ph; 1057 uint32_t *ippp; 1058 1059 SCTP_BUF_LEN(op_err) = 1060 sizeof(struct sctp_paramhdr) + sizeof(uint32_t); 1061 ph = mtod(op_err, 1062 struct sctp_paramhdr *); 1063 ph->param_type = htons( 1064 SCTP_CAUSE_USER_INITIATED_ABT); 1065 ph->param_length = htons(SCTP_BUF_LEN(op_err)); 1066 ippp = (uint32_t *) (ph + 1); 1067 *ippp = htonl(SCTP_FROM_SCTP_USRREQ + SCTP_LOC_6); 1068 } 1069 #if defined(SCTP_PANIC_ON_ABORT) 1070 panic("shutdown does an abort"); 1071 #endif 1072 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_USRREQ + SCTP_LOC_6; 1073 sctp_abort_an_association(stcb->sctp_ep, stcb, 1074 SCTP_RESPONSE_TO_USER_REQ, 1075 op_err, SCTP_SO_LOCKED); 1076 goto skip_unlock; 1077 } else { 1078 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CLOSING, SCTP_SO_LOCKED); 1079 } 1080 } 1081 SCTP_TCB_UNLOCK(stcb); 1082 } 1083 skip_unlock: 1084 SCTP_INP_RUNLOCK(inp); 1085 return 0; 1086 } 1087 1088 /* 1089 * copies a "user" presentable address and removes embedded scope, etc. 1090 * returns 0 on success, 1 on error 1091 */ 1092 static uint32_t 1093 sctp_fill_user_address(struct sockaddr_storage *ss, struct sockaddr *sa) 1094 { 1095 #ifdef INET6 1096 struct sockaddr_in6 lsa6; 1097 1098 sa = (struct sockaddr *)sctp_recover_scope((struct sockaddr_in6 *)sa, 1099 &lsa6); 1100 #endif 1101 memcpy(ss, sa, sa->sa_len); 1102 return (0); 1103 } 1104 1105 1106 1107 /* 1108 * NOTE: assumes addr lock is held 1109 */ 1110 static size_t 1111 sctp_fill_up_addresses_vrf(struct sctp_inpcb *inp, 1112 struct sctp_tcb *stcb, 1113 size_t limit, 1114 struct sockaddr_storage *sas, 1115 uint32_t vrf_id) 1116 { 1117 struct sctp_ifn *sctp_ifn; 1118 struct sctp_ifa *sctp_ifa; 1119 int loopback_scope, ipv4_local_scope, local_scope, site_scope; 1120 size_t actual; 1121 int ipv4_addr_legal, ipv6_addr_legal; 1122 struct sctp_vrf *vrf; 1123 1124 actual = 0; 1125 if (limit <= 0) 1126 return (actual); 1127 1128 if (stcb) { 1129 /* Turn on all the appropriate scope */ 1130 loopback_scope = stcb->asoc.loopback_scope; 1131 ipv4_local_scope = stcb->asoc.ipv4_local_scope; 1132 local_scope = stcb->asoc.local_scope; 1133 site_scope = stcb->asoc.site_scope; 1134 } else { 1135 /* Turn on ALL scope, since we look at the EP */ 1136 loopback_scope = ipv4_local_scope = local_scope = 1137 site_scope = 1; 1138 } 1139 ipv4_addr_legal = ipv6_addr_legal = 0; 1140 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 1141 ipv6_addr_legal = 1; 1142 if (SCTP_IPV6_V6ONLY(inp) == 0) { 1143 ipv4_addr_legal = 1; 1144 } 1145 } else { 1146 ipv4_addr_legal = 1; 1147 } 1148 vrf = sctp_find_vrf(vrf_id); 1149 if (vrf == NULL) { 1150 return (0); 1151 } 1152 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 1153 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) { 1154 if ((loopback_scope == 0) && 1155 SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) { 1156 /* Skip loopback if loopback_scope not set */ 1157 continue; 1158 } 1159 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { 1160 if (stcb) { 1161 /* 1162 * For the BOUND-ALL case, the list 1163 * associated with a TCB is Always 1164 * considered a reverse list.. i.e. 1165 * it lists addresses that are NOT 1166 * part of the association. If this 1167 * is one of those we must skip it. 1168 */ 1169 if (sctp_is_addr_restricted(stcb, 1170 sctp_ifa)) { 1171 continue; 1172 } 1173 } 1174 switch (sctp_ifa->address.sa.sa_family) { 1175 case AF_INET: 1176 if (ipv4_addr_legal) { 1177 struct sockaddr_in *sin; 1178 1179 sin = (struct sockaddr_in *)&sctp_ifa->address.sa; 1180 if (sin->sin_addr.s_addr == 0) { 1181 /* 1182 * we skip 1183 * unspecifed 1184 * addresses 1185 */ 1186 continue; 1187 } 1188 if ((ipv4_local_scope == 0) && 1189 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) { 1190 continue; 1191 } 1192 #ifdef INET6 1193 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) { 1194 in6_sin_2_v4mapsin6(sin, (struct sockaddr_in6 *)sas); 1195 ((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport; 1196 sas = (struct sockaddr_storage *)((caddr_t)sas + sizeof(struct sockaddr_in6)); 1197 actual += sizeof(struct sockaddr_in6); 1198 } else { 1199 #endif 1200 memcpy(sas, sin, sizeof(*sin)); 1201 ((struct sockaddr_in *)sas)->sin_port = inp->sctp_lport; 1202 sas = (struct sockaddr_storage *)((caddr_t)sas + sizeof(*sin)); 1203 actual += sizeof(*sin); 1204 #ifdef INET6 1205 } 1206 #endif 1207 if (actual >= limit) { 1208 return (actual); 1209 } 1210 } else { 1211 continue; 1212 } 1213 break; 1214 #ifdef INET6 1215 case AF_INET6: 1216 if (ipv6_addr_legal) { 1217 struct sockaddr_in6 *sin6; 1218 1219 sin6 = (struct sockaddr_in6 *)&sctp_ifa->address.sa; 1220 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { 1221 /* 1222 * we skip 1223 * unspecifed 1224 * addresses 1225 */ 1226 continue; 1227 } 1228 if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) { 1229 if (local_scope == 0) 1230 continue; 1231 if (sin6->sin6_scope_id == 0) { 1232 if (sa6_recoverscope(sin6) != 0) 1233 /* 1234 * 1235 * bad 1236 * 1237 * li 1238 * nk 1239 * 1240 * loc 1241 * al 1242 * 1243 * add 1244 * re 1245 * ss 1246 * */ 1247 continue; 1248 } 1249 } 1250 if ((site_scope == 0) && 1251 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) { 1252 continue; 1253 } 1254 memcpy(sas, sin6, sizeof(*sin6)); 1255 ((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport; 1256 sas = (struct sockaddr_storage *)((caddr_t)sas + sizeof(*sin6)); 1257 actual += sizeof(*sin6); 1258 if (actual >= limit) { 1259 return (actual); 1260 } 1261 } else { 1262 continue; 1263 } 1264 break; 1265 #endif 1266 default: 1267 /* TSNH */ 1268 break; 1269 } 1270 } 1271 } 1272 } else { 1273 struct sctp_laddr *laddr; 1274 1275 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 1276 if (stcb) { 1277 if (sctp_is_addr_restricted(stcb, laddr->ifa)) { 1278 continue; 1279 } 1280 } 1281 if (sctp_fill_user_address(sas, &laddr->ifa->address.sa)) 1282 continue; 1283 1284 ((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport; 1285 sas = (struct sockaddr_storage *)((caddr_t)sas + 1286 laddr->ifa->address.sa.sa_len); 1287 actual += laddr->ifa->address.sa.sa_len; 1288 if (actual >= limit) { 1289 return (actual); 1290 } 1291 } 1292 } 1293 return (actual); 1294 } 1295 1296 static size_t 1297 sctp_fill_up_addresses(struct sctp_inpcb *inp, 1298 struct sctp_tcb *stcb, 1299 size_t limit, 1300 struct sockaddr_storage *sas) 1301 { 1302 size_t size = 0; 1303 1304 SCTP_IPI_ADDR_RLOCK(); 1305 /* fill up addresses for the endpoint's default vrf */ 1306 size = sctp_fill_up_addresses_vrf(inp, stcb, limit, sas, 1307 inp->def_vrf_id); 1308 SCTP_IPI_ADDR_RUNLOCK(); 1309 return (size); 1310 } 1311 1312 /* 1313 * NOTE: assumes addr lock is held 1314 */ 1315 static int 1316 sctp_count_max_addresses_vrf(struct sctp_inpcb *inp, uint32_t vrf_id) 1317 { 1318 int cnt = 0; 1319 struct sctp_vrf *vrf = NULL; 1320 1321 /* 1322 * In both sub-set bound an bound_all cases we return the MAXIMUM 1323 * number of addresses that you COULD get. In reality the sub-set 1324 * bound may have an exclusion list for a given TCB OR in the 1325 * bound-all case a TCB may NOT include the loopback or other 1326 * addresses as well. 1327 */ 1328 vrf = sctp_find_vrf(vrf_id); 1329 if (vrf == NULL) { 1330 return (0); 1331 } 1332 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 1333 struct sctp_ifn *sctp_ifn; 1334 struct sctp_ifa *sctp_ifa; 1335 1336 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) { 1337 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { 1338 /* Count them if they are the right type */ 1339 if (sctp_ifa->address.sa.sa_family == AF_INET) { 1340 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) 1341 cnt += sizeof(struct sockaddr_in6); 1342 else 1343 cnt += sizeof(struct sockaddr_in); 1344 1345 } else if (sctp_ifa->address.sa.sa_family == AF_INET6) 1346 cnt += sizeof(struct sockaddr_in6); 1347 } 1348 } 1349 } else { 1350 struct sctp_laddr *laddr; 1351 1352 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 1353 if (laddr->ifa->address.sa.sa_family == AF_INET) { 1354 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) 1355 cnt += sizeof(struct sockaddr_in6); 1356 else 1357 cnt += sizeof(struct sockaddr_in); 1358 1359 } else if (laddr->ifa->address.sa.sa_family == AF_INET6) 1360 cnt += sizeof(struct sockaddr_in6); 1361 } 1362 } 1363 return (cnt); 1364 } 1365 1366 static int 1367 sctp_count_max_addresses(struct sctp_inpcb *inp) 1368 { 1369 int cnt = 0; 1370 1371 SCTP_IPI_ADDR_RLOCK(); 1372 /* count addresses for the endpoint's default VRF */ 1373 cnt = sctp_count_max_addresses_vrf(inp, inp->def_vrf_id); 1374 SCTP_IPI_ADDR_RUNLOCK(); 1375 return (cnt); 1376 } 1377 1378 static int 1379 sctp_do_connect_x(struct socket *so, struct sctp_inpcb *inp, void *optval, 1380 size_t optsize, void *p, int delay) 1381 { 1382 int error = 0; 1383 int creat_lock_on = 0; 1384 struct sctp_tcb *stcb = NULL; 1385 struct sockaddr *sa; 1386 int num_v6 = 0, num_v4 = 0, *totaddrp, totaddr; 1387 int added = 0; 1388 uint32_t vrf_id; 1389 int bad_addresses = 0; 1390 sctp_assoc_t *a_id; 1391 1392 SCTPDBG(SCTP_DEBUG_PCB1, "Connectx called\n"); 1393 1394 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 1395 (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) { 1396 /* We are already connected AND the TCP model */ 1397 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EADDRINUSE); 1398 return (EADDRINUSE); 1399 } 1400 if ((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) && 1401 (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_PORTREUSE))) { 1402 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 1403 return (EINVAL); 1404 } 1405 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { 1406 SCTP_INP_RLOCK(inp); 1407 stcb = LIST_FIRST(&inp->sctp_asoc_list); 1408 SCTP_INP_RUNLOCK(inp); 1409 } 1410 if (stcb) { 1411 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY); 1412 return (EALREADY); 1413 } 1414 SCTP_INP_INCR_REF(inp); 1415 SCTP_ASOC_CREATE_LOCK(inp); 1416 creat_lock_on = 1; 1417 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 1418 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) { 1419 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EFAULT); 1420 error = EFAULT; 1421 goto out_now; 1422 } 1423 totaddrp = (int *)optval; 1424 totaddr = *totaddrp; 1425 sa = (struct sockaddr *)(totaddrp + 1); 1426 stcb = sctp_connectx_helper_find(inp, sa, &totaddr, &num_v4, &num_v6, &error, (optsize - sizeof(int)), &bad_addresses); 1427 if ((stcb != NULL) || bad_addresses) { 1428 /* Already have or am bring up an association */ 1429 SCTP_ASOC_CREATE_UNLOCK(inp); 1430 creat_lock_on = 0; 1431 if (stcb) 1432 SCTP_TCB_UNLOCK(stcb); 1433 if (bad_addresses == 0) { 1434 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY); 1435 error = EALREADY; 1436 } 1437 goto out_now; 1438 } 1439 #ifdef INET6 1440 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) && 1441 (num_v6 > 0)) { 1442 error = EINVAL; 1443 goto out_now; 1444 } 1445 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 1446 (num_v4 > 0)) { 1447 struct in6pcb *inp6; 1448 1449 inp6 = (struct in6pcb *)inp; 1450 if (SCTP_IPV6_V6ONLY(inp6)) { 1451 /* 1452 * if IPV6_V6ONLY flag, ignore connections destined 1453 * to a v4 addr or v4-mapped addr 1454 */ 1455 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 1456 error = EINVAL; 1457 goto out_now; 1458 } 1459 } 1460 #endif /* INET6 */ 1461 if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) == 1462 SCTP_PCB_FLAGS_UNBOUND) { 1463 /* Bind a ephemeral port */ 1464 error = sctp_inpcb_bind(so, NULL, NULL, p); 1465 if (error) { 1466 goto out_now; 1467 } 1468 } 1469 /* FIX ME: do we want to pass in a vrf on the connect call? */ 1470 vrf_id = inp->def_vrf_id; 1471 1472 1473 /* We are GOOD to go */ 1474 stcb = sctp_aloc_assoc(inp, sa, 1, &error, 0, vrf_id, 1475 (struct thread *)p 1476 ); 1477 if (stcb == NULL) { 1478 /* Gak! no memory */ 1479 goto out_now; 1480 } 1481 SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_COOKIE_WAIT); 1482 /* move to second address */ 1483 if (sa->sa_family == AF_INET) 1484 sa = (struct sockaddr *)((caddr_t)sa + sizeof(struct sockaddr_in)); 1485 else 1486 sa = (struct sockaddr *)((caddr_t)sa + sizeof(struct sockaddr_in6)); 1487 1488 error = 0; 1489 added = sctp_connectx_helper_add(stcb, sa, (totaddr - 1), &error); 1490 /* Fill in the return id */ 1491 if (error) { 1492 (void)sctp_free_assoc(inp, stcb, SCTP_PCBFREE_FORCE, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_12); 1493 goto out_now; 1494 } 1495 a_id = (sctp_assoc_t *) optval; 1496 *a_id = sctp_get_associd(stcb); 1497 1498 /* initialize authentication parameters for the assoc */ 1499 sctp_initialize_auth_params(inp, stcb); 1500 1501 if (delay) { 1502 /* doing delayed connection */ 1503 stcb->asoc.delayed_connection = 1; 1504 sctp_timer_start(SCTP_TIMER_TYPE_INIT, inp, stcb, stcb->asoc.primary_destination); 1505 } else { 1506 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered); 1507 sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED); 1508 } 1509 SCTP_TCB_UNLOCK(stcb); 1510 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) { 1511 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED; 1512 /* Set the connected flag so we can queue data */ 1513 soisconnecting(so); 1514 } 1515 out_now: 1516 if (creat_lock_on) { 1517 SCTP_ASOC_CREATE_UNLOCK(inp); 1518 } 1519 SCTP_INP_DECR_REF(inp); 1520 return error; 1521 } 1522 1523 #define SCTP_FIND_STCB(inp, stcb, assoc_id) { \ 1524 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||\ 1525 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { \ 1526 SCTP_INP_RLOCK(inp); \ 1527 stcb = LIST_FIRST(&inp->sctp_asoc_list); \ 1528 if (stcb) { \ 1529 SCTP_TCB_LOCK(stcb); \ 1530 } \ 1531 SCTP_INP_RUNLOCK(inp); \ 1532 } else if (assoc_id != 0) { \ 1533 stcb = sctp_findassociation_ep_asocid(inp, assoc_id, 1); \ 1534 if (stcb == NULL) { \ 1535 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); \ 1536 error = ENOENT; \ 1537 break; \ 1538 } \ 1539 } else { \ 1540 stcb = NULL; \ 1541 } \ 1542 } 1543 1544 1545 #define SCTP_CHECK_AND_CAST(destp, srcp, type, size) {\ 1546 if (size < sizeof(type)) { \ 1547 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); \ 1548 error = EINVAL; \ 1549 break; \ 1550 } else { \ 1551 destp = (type *)srcp; \ 1552 } \ 1553 } 1554 1555 static int 1556 sctp_getopt(struct socket *so, int optname, void *optval, size_t *optsize, 1557 void *p) 1558 { 1559 struct sctp_inpcb *inp = NULL; 1560 int error, val = 0; 1561 struct sctp_tcb *stcb = NULL; 1562 1563 if (optval == NULL) { 1564 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 1565 return (EINVAL); 1566 } 1567 inp = (struct sctp_inpcb *)so->so_pcb; 1568 if (inp == 0) { 1569 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 1570 return EINVAL; 1571 } 1572 error = 0; 1573 1574 switch (optname) { 1575 case SCTP_NODELAY: 1576 case SCTP_AUTOCLOSE: 1577 case SCTP_EXPLICIT_EOR: 1578 case SCTP_AUTO_ASCONF: 1579 case SCTP_DISABLE_FRAGMENTS: 1580 case SCTP_I_WANT_MAPPED_V4_ADDR: 1581 case SCTP_USE_EXT_RCVINFO: 1582 SCTP_INP_RLOCK(inp); 1583 switch (optname) { 1584 case SCTP_DISABLE_FRAGMENTS: 1585 val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NO_FRAGMENT); 1586 break; 1587 case SCTP_I_WANT_MAPPED_V4_ADDR: 1588 val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4); 1589 break; 1590 case SCTP_AUTO_ASCONF: 1591 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 1592 /* only valid for bound all sockets */ 1593 val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTO_ASCONF); 1594 } else { 1595 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 1596 error = EINVAL; 1597 goto flags_out; 1598 } 1599 break; 1600 case SCTP_EXPLICIT_EOR: 1601 val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR); 1602 break; 1603 case SCTP_NODELAY: 1604 val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NODELAY); 1605 break; 1606 case SCTP_USE_EXT_RCVINFO: 1607 val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO); 1608 break; 1609 case SCTP_AUTOCLOSE: 1610 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) 1611 val = TICKS_TO_SEC(inp->sctp_ep.auto_close_time); 1612 else 1613 val = 0; 1614 break; 1615 1616 default: 1617 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT); 1618 error = ENOPROTOOPT; 1619 } /* end switch (sopt->sopt_name) */ 1620 if (optname != SCTP_AUTOCLOSE) { 1621 /* make it an "on/off" value */ 1622 val = (val != 0); 1623 } 1624 if (*optsize < sizeof(val)) { 1625 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 1626 error = EINVAL; 1627 } 1628 flags_out: 1629 SCTP_INP_RUNLOCK(inp); 1630 if (error == 0) { 1631 /* return the option value */ 1632 *(int *)optval = val; 1633 *optsize = sizeof(val); 1634 } 1635 break; 1636 case SCTP_GET_PACKET_LOG: 1637 { 1638 #ifdef SCTP_PACKET_LOGGING 1639 uint8_t *target; 1640 int ret; 1641 1642 SCTP_CHECK_AND_CAST(target, optval, uint8_t, *optsize); 1643 ret = sctp_copy_out_packet_log(target, (int)*optsize); 1644 *optsize = ret; 1645 #else 1646 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP); 1647 error = EOPNOTSUPP; 1648 #endif 1649 break; 1650 } 1651 case SCTP_REUSE_PORT: 1652 { 1653 uint32_t *value; 1654 1655 if ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE)) { 1656 /* Can't do this for a 1-m socket */ 1657 error = EINVAL; 1658 break; 1659 } 1660 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize); 1661 *value = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PORTREUSE); 1662 *optsize = sizeof(uint32_t); 1663 } 1664 break; 1665 case SCTP_PARTIAL_DELIVERY_POINT: 1666 { 1667 uint32_t *value; 1668 1669 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize); 1670 *value = inp->partial_delivery_point; 1671 *optsize = sizeof(uint32_t); 1672 } 1673 break; 1674 case SCTP_FRAGMENT_INTERLEAVE: 1675 { 1676 uint32_t *value; 1677 1678 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize); 1679 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) { 1680 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) { 1681 *value = SCTP_FRAG_LEVEL_2; 1682 } else { 1683 *value = SCTP_FRAG_LEVEL_1; 1684 } 1685 } else { 1686 *value = SCTP_FRAG_LEVEL_0; 1687 } 1688 *optsize = sizeof(uint32_t); 1689 } 1690 break; 1691 case SCTP_CMT_ON_OFF: 1692 { 1693 struct sctp_assoc_value *av; 1694 1695 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize); 1696 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off)) { 1697 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 1698 if (stcb) { 1699 av->assoc_value = stcb->asoc.sctp_cmt_on_off; 1700 SCTP_TCB_UNLOCK(stcb); 1701 1702 } else { 1703 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN); 1704 error = ENOTCONN; 1705 } 1706 } else { 1707 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT); 1708 error = ENOPROTOOPT; 1709 } 1710 *optsize = sizeof(*av); 1711 } 1712 break; 1713 /* EY - set socket option for nr_sacks */ 1714 case SCTP_NR_SACK_ON_OFF: 1715 { 1716 struct sctp_assoc_value *av; 1717 1718 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize); 1719 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off)) { 1720 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 1721 if (stcb) { 1722 av->assoc_value = stcb->asoc.sctp_nr_sack_on_off; 1723 SCTP_TCB_UNLOCK(stcb); 1724 1725 } else { 1726 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN); 1727 error = ENOTCONN; 1728 } 1729 } else { 1730 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT); 1731 error = ENOPROTOOPT; 1732 } 1733 *optsize = sizeof(*av); 1734 } 1735 break; 1736 /* JRS - Get socket option for pluggable congestion control */ 1737 case SCTP_PLUGGABLE_CC: 1738 { 1739 struct sctp_assoc_value *av; 1740 1741 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize); 1742 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 1743 if (stcb) { 1744 av->assoc_value = stcb->asoc.congestion_control_module; 1745 SCTP_TCB_UNLOCK(stcb); 1746 } else { 1747 av->assoc_value = inp->sctp_ep.sctp_default_cc_module; 1748 } 1749 *optsize = sizeof(*av); 1750 } 1751 break; 1752 case SCTP_GET_ADDR_LEN: 1753 { 1754 struct sctp_assoc_value *av; 1755 1756 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize); 1757 error = EINVAL; 1758 #ifdef INET 1759 if (av->assoc_value == AF_INET) { 1760 av->assoc_value = sizeof(struct sockaddr_in); 1761 error = 0; 1762 } 1763 #endif 1764 #ifdef INET6 1765 if (av->assoc_value == AF_INET6) { 1766 av->assoc_value = sizeof(struct sockaddr_in6); 1767 error = 0; 1768 } 1769 #endif 1770 if (error) { 1771 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 1772 } 1773 *optsize = sizeof(*av); 1774 } 1775 break; 1776 case SCTP_GET_ASSOC_NUMBER: 1777 { 1778 uint32_t *value, cnt; 1779 1780 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize); 1781 cnt = 0; 1782 SCTP_INP_RLOCK(inp); 1783 LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) { 1784 cnt++; 1785 } 1786 SCTP_INP_RUNLOCK(inp); 1787 *value = cnt; 1788 *optsize = sizeof(uint32_t); 1789 } 1790 break; 1791 1792 case SCTP_GET_ASSOC_ID_LIST: 1793 { 1794 struct sctp_assoc_ids *ids; 1795 unsigned int at, limit; 1796 1797 SCTP_CHECK_AND_CAST(ids, optval, struct sctp_assoc_ids, *optsize); 1798 at = 0; 1799 limit = (*optsize - sizeof(uint32_t)) / sizeof(sctp_assoc_t); 1800 SCTP_INP_RLOCK(inp); 1801 LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) { 1802 if (at < limit) { 1803 ids->gaids_assoc_id[at++] = sctp_get_associd(stcb); 1804 } else { 1805 error = EINVAL; 1806 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 1807 break; 1808 } 1809 } 1810 SCTP_INP_RUNLOCK(inp); 1811 ids->gaids_number_of_ids = at; 1812 *optsize = ((at * sizeof(sctp_assoc_t)) + sizeof(uint32_t)); 1813 } 1814 break; 1815 case SCTP_CONTEXT: 1816 { 1817 struct sctp_assoc_value *av; 1818 1819 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize); 1820 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 1821 1822 if (stcb) { 1823 av->assoc_value = stcb->asoc.context; 1824 SCTP_TCB_UNLOCK(stcb); 1825 } else { 1826 SCTP_INP_RLOCK(inp); 1827 av->assoc_value = inp->sctp_context; 1828 SCTP_INP_RUNLOCK(inp); 1829 } 1830 *optsize = sizeof(*av); 1831 } 1832 break; 1833 case SCTP_VRF_ID: 1834 { 1835 uint32_t *default_vrfid; 1836 1837 SCTP_CHECK_AND_CAST(default_vrfid, optval, uint32_t, *optsize); 1838 *default_vrfid = inp->def_vrf_id; 1839 break; 1840 } 1841 case SCTP_GET_ASOC_VRF: 1842 { 1843 struct sctp_assoc_value *id; 1844 1845 SCTP_CHECK_AND_CAST(id, optval, struct sctp_assoc_value, *optsize); 1846 SCTP_FIND_STCB(inp, stcb, id->assoc_id); 1847 if (stcb == NULL) { 1848 error = EINVAL; 1849 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 1850 break; 1851 } 1852 id->assoc_value = stcb->asoc.vrf_id; 1853 break; 1854 } 1855 case SCTP_GET_VRF_IDS: 1856 { 1857 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP); 1858 error = EOPNOTSUPP; 1859 break; 1860 } 1861 case SCTP_GET_NONCE_VALUES: 1862 { 1863 struct sctp_get_nonce_values *gnv; 1864 1865 SCTP_CHECK_AND_CAST(gnv, optval, struct sctp_get_nonce_values, *optsize); 1866 SCTP_FIND_STCB(inp, stcb, gnv->gn_assoc_id); 1867 1868 if (stcb) { 1869 gnv->gn_peers_tag = stcb->asoc.peer_vtag; 1870 gnv->gn_local_tag = stcb->asoc.my_vtag; 1871 SCTP_TCB_UNLOCK(stcb); 1872 } else { 1873 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN); 1874 error = ENOTCONN; 1875 } 1876 *optsize = sizeof(*gnv); 1877 } 1878 break; 1879 case SCTP_DELAYED_SACK: 1880 { 1881 struct sctp_sack_info *sack; 1882 1883 SCTP_CHECK_AND_CAST(sack, optval, struct sctp_sack_info, *optsize); 1884 SCTP_FIND_STCB(inp, stcb, sack->sack_assoc_id); 1885 if (stcb) { 1886 sack->sack_delay = stcb->asoc.delayed_ack; 1887 sack->sack_freq = stcb->asoc.sack_freq; 1888 SCTP_TCB_UNLOCK(stcb); 1889 } else { 1890 SCTP_INP_RLOCK(inp); 1891 sack->sack_delay = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]); 1892 sack->sack_freq = inp->sctp_ep.sctp_sack_freq; 1893 SCTP_INP_RUNLOCK(inp); 1894 } 1895 *optsize = sizeof(*sack); 1896 } 1897 break; 1898 1899 case SCTP_GET_SNDBUF_USE: 1900 { 1901 struct sctp_sockstat *ss; 1902 1903 SCTP_CHECK_AND_CAST(ss, optval, struct sctp_sockstat, *optsize); 1904 SCTP_FIND_STCB(inp, stcb, ss->ss_assoc_id); 1905 1906 if (stcb) { 1907 ss->ss_total_sndbuf = stcb->asoc.total_output_queue_size; 1908 ss->ss_total_recv_buf = (stcb->asoc.size_on_reasm_queue + 1909 stcb->asoc.size_on_all_streams); 1910 SCTP_TCB_UNLOCK(stcb); 1911 } else { 1912 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN); 1913 error = ENOTCONN; 1914 } 1915 *optsize = sizeof(struct sctp_sockstat); 1916 } 1917 break; 1918 case SCTP_MAX_BURST: 1919 { 1920 uint8_t *value; 1921 1922 SCTP_CHECK_AND_CAST(value, optval, uint8_t, *optsize); 1923 1924 SCTP_INP_RLOCK(inp); 1925 *value = inp->sctp_ep.max_burst; 1926 SCTP_INP_RUNLOCK(inp); 1927 *optsize = sizeof(uint8_t); 1928 } 1929 break; 1930 case SCTP_MAXSEG: 1931 { 1932 struct sctp_assoc_value *av; 1933 int ovh; 1934 1935 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize); 1936 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 1937 1938 if (stcb) { 1939 av->assoc_value = sctp_get_frag_point(stcb, &stcb->asoc); 1940 SCTP_TCB_UNLOCK(stcb); 1941 } else { 1942 SCTP_INP_RLOCK(inp); 1943 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 1944 ovh = SCTP_MED_OVERHEAD; 1945 } else { 1946 ovh = SCTP_MED_V4_OVERHEAD; 1947 } 1948 if (inp->sctp_frag_point >= SCTP_DEFAULT_MAXSEGMENT) 1949 av->assoc_value = 0; 1950 else 1951 av->assoc_value = inp->sctp_frag_point - ovh; 1952 SCTP_INP_RUNLOCK(inp); 1953 } 1954 *optsize = sizeof(struct sctp_assoc_value); 1955 } 1956 break; 1957 case SCTP_GET_STAT_LOG: 1958 error = sctp_fill_stat_log(optval, optsize); 1959 break; 1960 case SCTP_EVENTS: 1961 { 1962 struct sctp_event_subscribe *events; 1963 1964 SCTP_CHECK_AND_CAST(events, optval, struct sctp_event_subscribe, *optsize); 1965 memset(events, 0, sizeof(*events)); 1966 SCTP_INP_RLOCK(inp); 1967 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) 1968 events->sctp_data_io_event = 1; 1969 1970 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVASSOCEVNT)) 1971 events->sctp_association_event = 1; 1972 1973 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVPADDREVNT)) 1974 events->sctp_address_event = 1; 1975 1976 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)) 1977 events->sctp_send_failure_event = 1; 1978 1979 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVPEERERR)) 1980 events->sctp_peer_error_event = 1; 1981 1982 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) 1983 events->sctp_shutdown_event = 1; 1984 1985 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PDAPIEVNT)) 1986 events->sctp_partial_delivery_event = 1; 1987 1988 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) 1989 events->sctp_adaptation_layer_event = 1; 1990 1991 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTHEVNT)) 1992 events->sctp_authentication_event = 1; 1993 1994 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_DRYEVNT)) 1995 events->sctp_sender_dry_event = 1; 1996 1997 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_STREAM_RESETEVNT)) 1998 events->sctp_stream_reset_events = 1; 1999 SCTP_INP_RUNLOCK(inp); 2000 *optsize = sizeof(struct sctp_event_subscribe); 2001 } 2002 break; 2003 2004 case SCTP_ADAPTATION_LAYER: 2005 { 2006 uint32_t *value; 2007 2008 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize); 2009 2010 SCTP_INP_RLOCK(inp); 2011 *value = inp->sctp_ep.adaptation_layer_indicator; 2012 SCTP_INP_RUNLOCK(inp); 2013 *optsize = sizeof(uint32_t); 2014 } 2015 break; 2016 case SCTP_SET_INITIAL_DBG_SEQ: 2017 { 2018 uint32_t *value; 2019 2020 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize); 2021 SCTP_INP_RLOCK(inp); 2022 *value = inp->sctp_ep.initial_sequence_debug; 2023 SCTP_INP_RUNLOCK(inp); 2024 *optsize = sizeof(uint32_t); 2025 } 2026 break; 2027 case SCTP_GET_LOCAL_ADDR_SIZE: 2028 { 2029 uint32_t *value; 2030 2031 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize); 2032 SCTP_INP_RLOCK(inp); 2033 *value = sctp_count_max_addresses(inp); 2034 SCTP_INP_RUNLOCK(inp); 2035 *optsize = sizeof(uint32_t); 2036 } 2037 break; 2038 case SCTP_GET_REMOTE_ADDR_SIZE: 2039 { 2040 uint32_t *value; 2041 size_t size; 2042 struct sctp_nets *net; 2043 2044 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize); 2045 /* FIXME MT: change to sctp_assoc_value? */ 2046 SCTP_FIND_STCB(inp, stcb, (sctp_assoc_t) * value); 2047 2048 if (stcb) { 2049 size = 0; 2050 /* Count the sizes */ 2051 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 2052 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) || 2053 (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET6)) { 2054 size += sizeof(struct sockaddr_in6); 2055 } else if (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET) { 2056 size += sizeof(struct sockaddr_in); 2057 } else { 2058 /* huh */ 2059 break; 2060 } 2061 } 2062 SCTP_TCB_UNLOCK(stcb); 2063 *value = (uint32_t) size; 2064 } else { 2065 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN); 2066 error = ENOTCONN; 2067 } 2068 *optsize = sizeof(uint32_t); 2069 } 2070 break; 2071 case SCTP_GET_PEER_ADDRESSES: 2072 /* 2073 * Get the address information, an array is passed in to 2074 * fill up we pack it. 2075 */ 2076 { 2077 size_t cpsz, left; 2078 struct sockaddr_storage *sas; 2079 struct sctp_nets *net; 2080 struct sctp_getaddresses *saddr; 2081 2082 SCTP_CHECK_AND_CAST(saddr, optval, struct sctp_getaddresses, *optsize); 2083 SCTP_FIND_STCB(inp, stcb, saddr->sget_assoc_id); 2084 2085 if (stcb) { 2086 left = (*optsize) - sizeof(struct sctp_getaddresses); 2087 *optsize = sizeof(struct sctp_getaddresses); 2088 sas = (struct sockaddr_storage *)&saddr->addr[0]; 2089 2090 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 2091 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) || 2092 (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET6)) { 2093 cpsz = sizeof(struct sockaddr_in6); 2094 } else if (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET) { 2095 cpsz = sizeof(struct sockaddr_in); 2096 } else { 2097 /* huh */ 2098 break; 2099 } 2100 if (left < cpsz) { 2101 /* not enough room. */ 2102 break; 2103 } 2104 #ifdef INET6 2105 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) && 2106 (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET)) { 2107 /* Must map the address */ 2108 in6_sin_2_v4mapsin6((struct sockaddr_in *)&net->ro._l_addr, 2109 (struct sockaddr_in6 *)sas); 2110 } else { 2111 #endif 2112 memcpy(sas, &net->ro._l_addr, cpsz); 2113 #ifdef INET6 2114 } 2115 #endif 2116 ((struct sockaddr_in *)sas)->sin_port = stcb->rport; 2117 2118 sas = (struct sockaddr_storage *)((caddr_t)sas + cpsz); 2119 left -= cpsz; 2120 *optsize += cpsz; 2121 } 2122 SCTP_TCB_UNLOCK(stcb); 2123 } else { 2124 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); 2125 error = ENOENT; 2126 } 2127 } 2128 break; 2129 case SCTP_GET_LOCAL_ADDRESSES: 2130 { 2131 size_t limit, actual; 2132 struct sockaddr_storage *sas; 2133 struct sctp_getaddresses *saddr; 2134 2135 SCTP_CHECK_AND_CAST(saddr, optval, struct sctp_getaddresses, *optsize); 2136 SCTP_FIND_STCB(inp, stcb, saddr->sget_assoc_id); 2137 2138 sas = (struct sockaddr_storage *)&saddr->addr[0]; 2139 limit = *optsize - sizeof(sctp_assoc_t); 2140 actual = sctp_fill_up_addresses(inp, stcb, limit, sas); 2141 if (stcb) { 2142 SCTP_TCB_UNLOCK(stcb); 2143 } 2144 *optsize = sizeof(struct sockaddr_storage) + actual; 2145 } 2146 break; 2147 case SCTP_PEER_ADDR_PARAMS: 2148 { 2149 struct sctp_paddrparams *paddrp; 2150 struct sctp_nets *net; 2151 2152 SCTP_CHECK_AND_CAST(paddrp, optval, struct sctp_paddrparams, *optsize); 2153 SCTP_FIND_STCB(inp, stcb, paddrp->spp_assoc_id); 2154 2155 net = NULL; 2156 if (stcb) { 2157 net = sctp_findnet(stcb, (struct sockaddr *)&paddrp->spp_address); 2158 } else { 2159 /* 2160 * We increment here since 2161 * sctp_findassociation_ep_addr() wil do a 2162 * decrement if it finds the stcb as long as 2163 * the locked tcb (last argument) is NOT a 2164 * TCB.. aka NULL. 2165 */ 2166 SCTP_INP_INCR_REF(inp); 2167 stcb = sctp_findassociation_ep_addr(&inp, (struct sockaddr *)&paddrp->spp_address, &net, NULL, NULL); 2168 if (stcb == NULL) { 2169 SCTP_INP_DECR_REF(inp); 2170 } 2171 } 2172 if (stcb && (net == NULL)) { 2173 struct sockaddr *sa; 2174 2175 sa = (struct sockaddr *)&paddrp->spp_address; 2176 if (sa->sa_family == AF_INET) { 2177 struct sockaddr_in *sin; 2178 2179 sin = (struct sockaddr_in *)sa; 2180 if (sin->sin_addr.s_addr) { 2181 error = EINVAL; 2182 SCTP_TCB_UNLOCK(stcb); 2183 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 2184 break; 2185 } 2186 } else if (sa->sa_family == AF_INET6) { 2187 struct sockaddr_in6 *sin6; 2188 2189 sin6 = (struct sockaddr_in6 *)sa; 2190 if (!IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { 2191 error = EINVAL; 2192 SCTP_TCB_UNLOCK(stcb); 2193 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 2194 break; 2195 } 2196 } else { 2197 error = EAFNOSUPPORT; 2198 SCTP_TCB_UNLOCK(stcb); 2199 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 2200 break; 2201 } 2202 } 2203 if (stcb) { 2204 /* Applys to the specific association */ 2205 paddrp->spp_flags = 0; 2206 if (net) { 2207 int ovh; 2208 2209 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 2210 ovh = SCTP_MED_OVERHEAD; 2211 } else { 2212 ovh = SCTP_MED_V4_OVERHEAD; 2213 } 2214 2215 2216 paddrp->spp_pathmaxrxt = net->failure_threshold; 2217 paddrp->spp_pathmtu = net->mtu - ovh; 2218 /* get flags for HB */ 2219 if (net->dest_state & SCTP_ADDR_NOHB) 2220 paddrp->spp_flags |= SPP_HB_DISABLE; 2221 else 2222 paddrp->spp_flags |= SPP_HB_ENABLE; 2223 /* get flags for PMTU */ 2224 if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) { 2225 paddrp->spp_flags |= SPP_PMTUD_ENABLE; 2226 } else { 2227 paddrp->spp_flags |= SPP_PMTUD_DISABLE; 2228 } 2229 #ifdef INET 2230 if (net->ro._l_addr.sin.sin_family == AF_INET) { 2231 paddrp->spp_ipv4_tos = net->tos_flowlabel & 0x000000fc; 2232 paddrp->spp_flags |= SPP_IPV4_TOS; 2233 } 2234 #endif 2235 #ifdef INET6 2236 if (net->ro._l_addr.sin6.sin6_family == AF_INET6) { 2237 paddrp->spp_ipv6_flowlabel = net->tos_flowlabel; 2238 paddrp->spp_flags |= SPP_IPV6_FLOWLABEL; 2239 } 2240 #endif 2241 } else { 2242 /* 2243 * No destination so return default 2244 * value 2245 */ 2246 int cnt = 0; 2247 2248 paddrp->spp_pathmaxrxt = stcb->asoc.def_net_failure; 2249 paddrp->spp_pathmtu = sctp_get_frag_point(stcb, &stcb->asoc); 2250 #ifdef INET 2251 paddrp->spp_ipv4_tos = stcb->asoc.default_tos & 0x000000fc; 2252 paddrp->spp_flags |= SPP_IPV4_TOS; 2253 #endif 2254 #ifdef INET6 2255 paddrp->spp_ipv6_flowlabel = stcb->asoc.default_flowlabel; 2256 paddrp->spp_flags |= SPP_IPV6_FLOWLABEL; 2257 #endif 2258 /* default settings should be these */ 2259 if (stcb->asoc.hb_is_disabled == 0) { 2260 paddrp->spp_flags |= SPP_HB_ENABLE; 2261 } else { 2262 paddrp->spp_flags |= SPP_HB_DISABLE; 2263 } 2264 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 2265 if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) { 2266 cnt++; 2267 } 2268 } 2269 if (cnt) { 2270 paddrp->spp_flags |= SPP_PMTUD_ENABLE; 2271 } 2272 } 2273 paddrp->spp_hbinterval = stcb->asoc.heart_beat_delay; 2274 paddrp->spp_assoc_id = sctp_get_associd(stcb); 2275 SCTP_TCB_UNLOCK(stcb); 2276 } else { 2277 /* Use endpoint defaults */ 2278 SCTP_INP_RLOCK(inp); 2279 paddrp->spp_pathmaxrxt = inp->sctp_ep.def_net_failure; 2280 paddrp->spp_hbinterval = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]); 2281 paddrp->spp_assoc_id = (sctp_assoc_t) 0; 2282 /* get inp's default */ 2283 #ifdef INET 2284 paddrp->spp_ipv4_tos = inp->ip_inp.inp.inp_ip_tos; 2285 paddrp->spp_flags |= SPP_IPV4_TOS; 2286 #endif 2287 #ifdef INET6 2288 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 2289 paddrp->spp_ipv6_flowlabel = ((struct in6pcb *)inp)->in6p_flowinfo; 2290 paddrp->spp_flags |= SPP_IPV6_FLOWLABEL; 2291 } 2292 #endif 2293 /* can't return this */ 2294 paddrp->spp_pathmtu = 0; 2295 2296 /* default behavior, no stcb */ 2297 paddrp->spp_flags = SPP_PMTUD_ENABLE; 2298 2299 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_DONOT_HEARTBEAT)) { 2300 paddrp->spp_flags |= SPP_HB_ENABLE; 2301 } else { 2302 paddrp->spp_flags |= SPP_HB_DISABLE; 2303 } 2304 SCTP_INP_RUNLOCK(inp); 2305 } 2306 *optsize = sizeof(struct sctp_paddrparams); 2307 } 2308 break; 2309 case SCTP_GET_PEER_ADDR_INFO: 2310 { 2311 struct sctp_paddrinfo *paddri; 2312 struct sctp_nets *net; 2313 2314 SCTP_CHECK_AND_CAST(paddri, optval, struct sctp_paddrinfo, *optsize); 2315 SCTP_FIND_STCB(inp, stcb, paddri->spinfo_assoc_id); 2316 2317 net = NULL; 2318 if (stcb) { 2319 net = sctp_findnet(stcb, (struct sockaddr *)&paddri->spinfo_address); 2320 } else { 2321 /* 2322 * We increment here since 2323 * sctp_findassociation_ep_addr() wil do a 2324 * decrement if it finds the stcb as long as 2325 * the locked tcb (last argument) is NOT a 2326 * TCB.. aka NULL. 2327 */ 2328 SCTP_INP_INCR_REF(inp); 2329 stcb = sctp_findassociation_ep_addr(&inp, (struct sockaddr *)&paddri->spinfo_address, &net, NULL, NULL); 2330 if (stcb == NULL) { 2331 SCTP_INP_DECR_REF(inp); 2332 } 2333 } 2334 2335 if ((stcb) && (net)) { 2336 paddri->spinfo_state = net->dest_state & (SCTP_REACHABLE_MASK | SCTP_ADDR_NOHB); 2337 paddri->spinfo_cwnd = net->cwnd; 2338 paddri->spinfo_srtt = ((net->lastsa >> 2) + net->lastsv) >> 1; 2339 paddri->spinfo_rto = net->RTO; 2340 paddri->spinfo_assoc_id = sctp_get_associd(stcb); 2341 SCTP_TCB_UNLOCK(stcb); 2342 } else { 2343 if (stcb) { 2344 SCTP_TCB_UNLOCK(stcb); 2345 } 2346 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); 2347 error = ENOENT; 2348 } 2349 *optsize = sizeof(struct sctp_paddrinfo); 2350 } 2351 break; 2352 case SCTP_PCB_STATUS: 2353 { 2354 struct sctp_pcbinfo *spcb; 2355 2356 SCTP_CHECK_AND_CAST(spcb, optval, struct sctp_pcbinfo, *optsize); 2357 sctp_fill_pcbinfo(spcb); 2358 *optsize = sizeof(struct sctp_pcbinfo); 2359 } 2360 break; 2361 2362 case SCTP_STATUS: 2363 { 2364 struct sctp_nets *net; 2365 struct sctp_status *sstat; 2366 2367 SCTP_CHECK_AND_CAST(sstat, optval, struct sctp_status, *optsize); 2368 SCTP_FIND_STCB(inp, stcb, sstat->sstat_assoc_id); 2369 2370 if (stcb == NULL) { 2371 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 2372 error = EINVAL; 2373 break; 2374 } 2375 /* 2376 * I think passing the state is fine since 2377 * sctp_constants.h will be available to the user 2378 * land. 2379 */ 2380 sstat->sstat_state = stcb->asoc.state; 2381 sstat->sstat_assoc_id = sctp_get_associd(stcb); 2382 sstat->sstat_rwnd = stcb->asoc.peers_rwnd; 2383 sstat->sstat_unackdata = stcb->asoc.sent_queue_cnt; 2384 /* 2385 * We can't include chunks that have been passed to 2386 * the socket layer. Only things in queue. 2387 */ 2388 sstat->sstat_penddata = (stcb->asoc.cnt_on_reasm_queue + 2389 stcb->asoc.cnt_on_all_streams); 2390 2391 2392 sstat->sstat_instrms = stcb->asoc.streamincnt; 2393 sstat->sstat_outstrms = stcb->asoc.streamoutcnt; 2394 sstat->sstat_fragmentation_point = sctp_get_frag_point(stcb, &stcb->asoc); 2395 memcpy(&sstat->sstat_primary.spinfo_address, 2396 &stcb->asoc.primary_destination->ro._l_addr, 2397 ((struct sockaddr *)(&stcb->asoc.primary_destination->ro._l_addr))->sa_len); 2398 net = stcb->asoc.primary_destination; 2399 ((struct sockaddr_in *)&sstat->sstat_primary.spinfo_address)->sin_port = stcb->rport; 2400 /* 2401 * Again the user can get info from sctp_constants.h 2402 * for what the state of the network is. 2403 */ 2404 sstat->sstat_primary.spinfo_state = net->dest_state & SCTP_REACHABLE_MASK; 2405 sstat->sstat_primary.spinfo_cwnd = net->cwnd; 2406 sstat->sstat_primary.spinfo_srtt = net->lastsa; 2407 sstat->sstat_primary.spinfo_rto = net->RTO; 2408 sstat->sstat_primary.spinfo_mtu = net->mtu; 2409 sstat->sstat_primary.spinfo_assoc_id = sctp_get_associd(stcb); 2410 SCTP_TCB_UNLOCK(stcb); 2411 *optsize = sizeof(*sstat); 2412 } 2413 break; 2414 case SCTP_RTOINFO: 2415 { 2416 struct sctp_rtoinfo *srto; 2417 2418 SCTP_CHECK_AND_CAST(srto, optval, struct sctp_rtoinfo, *optsize); 2419 SCTP_FIND_STCB(inp, stcb, srto->srto_assoc_id); 2420 2421 if (stcb) { 2422 srto->srto_initial = stcb->asoc.initial_rto; 2423 srto->srto_max = stcb->asoc.maxrto; 2424 srto->srto_min = stcb->asoc.minrto; 2425 SCTP_TCB_UNLOCK(stcb); 2426 } else { 2427 SCTP_INP_RLOCK(inp); 2428 srto->srto_initial = inp->sctp_ep.initial_rto; 2429 srto->srto_max = inp->sctp_ep.sctp_maxrto; 2430 srto->srto_min = inp->sctp_ep.sctp_minrto; 2431 SCTP_INP_RUNLOCK(inp); 2432 } 2433 *optsize = sizeof(*srto); 2434 } 2435 break; 2436 case SCTP_ASSOCINFO: 2437 { 2438 struct sctp_assocparams *sasoc; 2439 uint32_t oldval; 2440 2441 SCTP_CHECK_AND_CAST(sasoc, optval, struct sctp_assocparams, *optsize); 2442 SCTP_FIND_STCB(inp, stcb, sasoc->sasoc_assoc_id); 2443 2444 if (stcb) { 2445 oldval = sasoc->sasoc_cookie_life; 2446 sasoc->sasoc_cookie_life = TICKS_TO_MSEC(stcb->asoc.cookie_life); 2447 sasoc->sasoc_asocmaxrxt = stcb->asoc.max_send_times; 2448 sasoc->sasoc_number_peer_destinations = stcb->asoc.numnets; 2449 sasoc->sasoc_peer_rwnd = stcb->asoc.peers_rwnd; 2450 sasoc->sasoc_local_rwnd = stcb->asoc.my_rwnd; 2451 SCTP_TCB_UNLOCK(stcb); 2452 } else { 2453 SCTP_INP_RLOCK(inp); 2454 sasoc->sasoc_cookie_life = TICKS_TO_MSEC(inp->sctp_ep.def_cookie_life); 2455 sasoc->sasoc_asocmaxrxt = inp->sctp_ep.max_send_times; 2456 sasoc->sasoc_number_peer_destinations = 0; 2457 sasoc->sasoc_peer_rwnd = 0; 2458 sasoc->sasoc_local_rwnd = sbspace(&inp->sctp_socket->so_rcv); 2459 SCTP_INP_RUNLOCK(inp); 2460 } 2461 *optsize = sizeof(*sasoc); 2462 } 2463 break; 2464 case SCTP_DEFAULT_SEND_PARAM: 2465 { 2466 struct sctp_sndrcvinfo *s_info; 2467 2468 SCTP_CHECK_AND_CAST(s_info, optval, struct sctp_sndrcvinfo, *optsize); 2469 SCTP_FIND_STCB(inp, stcb, s_info->sinfo_assoc_id); 2470 2471 if (stcb) { 2472 memcpy(s_info, &stcb->asoc.def_send, sizeof(stcb->asoc.def_send)); 2473 SCTP_TCB_UNLOCK(stcb); 2474 } else { 2475 SCTP_INP_RLOCK(inp); 2476 memcpy(s_info, &inp->def_send, sizeof(inp->def_send)); 2477 SCTP_INP_RUNLOCK(inp); 2478 } 2479 *optsize = sizeof(*s_info); 2480 } 2481 break; 2482 case SCTP_INITMSG: 2483 { 2484 struct sctp_initmsg *sinit; 2485 2486 SCTP_CHECK_AND_CAST(sinit, optval, struct sctp_initmsg, *optsize); 2487 SCTP_INP_RLOCK(inp); 2488 sinit->sinit_num_ostreams = inp->sctp_ep.pre_open_stream_count; 2489 sinit->sinit_max_instreams = inp->sctp_ep.max_open_streams_intome; 2490 sinit->sinit_max_attempts = inp->sctp_ep.max_init_times; 2491 sinit->sinit_max_init_timeo = inp->sctp_ep.initial_init_rto_max; 2492 SCTP_INP_RUNLOCK(inp); 2493 *optsize = sizeof(*sinit); 2494 } 2495 break; 2496 case SCTP_PRIMARY_ADDR: 2497 /* we allow a "get" operation on this */ 2498 { 2499 struct sctp_setprim *ssp; 2500 2501 SCTP_CHECK_AND_CAST(ssp, optval, struct sctp_setprim, *optsize); 2502 SCTP_FIND_STCB(inp, stcb, ssp->ssp_assoc_id); 2503 2504 if (stcb) { 2505 /* simply copy out the sockaddr_storage... */ 2506 int len; 2507 2508 len = *optsize; 2509 if (len > stcb->asoc.primary_destination->ro._l_addr.sa.sa_len) 2510 len = stcb->asoc.primary_destination->ro._l_addr.sa.sa_len; 2511 2512 memcpy(&ssp->ssp_addr, 2513 &stcb->asoc.primary_destination->ro._l_addr, 2514 len); 2515 SCTP_TCB_UNLOCK(stcb); 2516 } else { 2517 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 2518 error = EINVAL; 2519 } 2520 *optsize = sizeof(*ssp); 2521 } 2522 break; 2523 2524 case SCTP_HMAC_IDENT: 2525 { 2526 struct sctp_hmacalgo *shmac; 2527 sctp_hmaclist_t *hmaclist; 2528 uint32_t size; 2529 int i; 2530 2531 SCTP_CHECK_AND_CAST(shmac, optval, struct sctp_hmacalgo, *optsize); 2532 2533 SCTP_INP_RLOCK(inp); 2534 hmaclist = inp->sctp_ep.local_hmacs; 2535 if (hmaclist == NULL) { 2536 /* no HMACs to return */ 2537 *optsize = sizeof(*shmac); 2538 SCTP_INP_RUNLOCK(inp); 2539 break; 2540 } 2541 /* is there room for all of the hmac ids? */ 2542 size = sizeof(*shmac) + (hmaclist->num_algo * 2543 sizeof(shmac->shmac_idents[0])); 2544 if ((size_t)(*optsize) < size) { 2545 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 2546 error = EINVAL; 2547 SCTP_INP_RUNLOCK(inp); 2548 break; 2549 } 2550 /* copy in the list */ 2551 shmac->shmac_number_of_idents = hmaclist->num_algo; 2552 for (i = 0; i < hmaclist->num_algo; i++) { 2553 shmac->shmac_idents[i] = hmaclist->hmac[i]; 2554 } 2555 SCTP_INP_RUNLOCK(inp); 2556 *optsize = size; 2557 break; 2558 } 2559 case SCTP_AUTH_ACTIVE_KEY: 2560 { 2561 struct sctp_authkeyid *scact; 2562 2563 SCTP_CHECK_AND_CAST(scact, optval, struct sctp_authkeyid, *optsize); 2564 SCTP_FIND_STCB(inp, stcb, scact->scact_assoc_id); 2565 2566 if (stcb) { 2567 /* get the active key on the assoc */ 2568 scact->scact_keynumber = stcb->asoc.authinfo.active_keyid; 2569 SCTP_TCB_UNLOCK(stcb); 2570 } else { 2571 /* get the endpoint active key */ 2572 SCTP_INP_RLOCK(inp); 2573 scact->scact_keynumber = inp->sctp_ep.default_keyid; 2574 SCTP_INP_RUNLOCK(inp); 2575 } 2576 *optsize = sizeof(*scact); 2577 break; 2578 } 2579 case SCTP_LOCAL_AUTH_CHUNKS: 2580 { 2581 struct sctp_authchunks *sac; 2582 sctp_auth_chklist_t *chklist = NULL; 2583 size_t size = 0; 2584 2585 SCTP_CHECK_AND_CAST(sac, optval, struct sctp_authchunks, *optsize); 2586 SCTP_FIND_STCB(inp, stcb, sac->gauth_assoc_id); 2587 2588 if (stcb) { 2589 /* get off the assoc */ 2590 chklist = stcb->asoc.local_auth_chunks; 2591 /* is there enough space? */ 2592 size = sctp_auth_get_chklist_size(chklist); 2593 if (*optsize < (sizeof(struct sctp_authchunks) + size)) { 2594 error = EINVAL; 2595 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 2596 } else { 2597 /* copy in the chunks */ 2598 (void)sctp_serialize_auth_chunks(chklist, sac->gauth_chunks); 2599 } 2600 SCTP_TCB_UNLOCK(stcb); 2601 } else { 2602 /* get off the endpoint */ 2603 SCTP_INP_RLOCK(inp); 2604 chklist = inp->sctp_ep.local_auth_chunks; 2605 /* is there enough space? */ 2606 size = sctp_auth_get_chklist_size(chklist); 2607 if (*optsize < (sizeof(struct sctp_authchunks) + size)) { 2608 error = EINVAL; 2609 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 2610 } else { 2611 /* copy in the chunks */ 2612 (void)sctp_serialize_auth_chunks(chklist, sac->gauth_chunks); 2613 } 2614 SCTP_INP_RUNLOCK(inp); 2615 } 2616 *optsize = sizeof(struct sctp_authchunks) + size; 2617 break; 2618 } 2619 case SCTP_PEER_AUTH_CHUNKS: 2620 { 2621 struct sctp_authchunks *sac; 2622 sctp_auth_chklist_t *chklist = NULL; 2623 size_t size = 0; 2624 2625 SCTP_CHECK_AND_CAST(sac, optval, struct sctp_authchunks, *optsize); 2626 SCTP_FIND_STCB(inp, stcb, sac->gauth_assoc_id); 2627 2628 if (stcb) { 2629 /* get off the assoc */ 2630 chklist = stcb->asoc.peer_auth_chunks; 2631 /* is there enough space? */ 2632 size = sctp_auth_get_chklist_size(chklist); 2633 if (*optsize < (sizeof(struct sctp_authchunks) + size)) { 2634 error = EINVAL; 2635 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 2636 } else { 2637 /* copy in the chunks */ 2638 (void)sctp_serialize_auth_chunks(chklist, sac->gauth_chunks); 2639 } 2640 SCTP_TCB_UNLOCK(stcb); 2641 } else { 2642 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); 2643 error = ENOENT; 2644 } 2645 *optsize = sizeof(struct sctp_authchunks) + size; 2646 break; 2647 } 2648 2649 2650 default: 2651 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT); 2652 error = ENOPROTOOPT; 2653 *optsize = 0; 2654 break; 2655 } /* end switch (sopt->sopt_name) */ 2656 return (error); 2657 } 2658 2659 static int 2660 sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize, 2661 void *p) 2662 { 2663 int error, set_opt; 2664 uint32_t *mopt; 2665 struct sctp_tcb *stcb = NULL; 2666 struct sctp_inpcb *inp = NULL; 2667 uint32_t vrf_id; 2668 2669 if (optval == NULL) { 2670 SCTP_PRINTF("optval is NULL\n"); 2671 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 2672 return (EINVAL); 2673 } 2674 inp = (struct sctp_inpcb *)so->so_pcb; 2675 if (inp == 0) { 2676 SCTP_PRINTF("inp is NULL?\n"); 2677 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 2678 return EINVAL; 2679 } 2680 vrf_id = inp->def_vrf_id; 2681 2682 error = 0; 2683 switch (optname) { 2684 case SCTP_NODELAY: 2685 case SCTP_AUTOCLOSE: 2686 case SCTP_AUTO_ASCONF: 2687 case SCTP_EXPLICIT_EOR: 2688 case SCTP_DISABLE_FRAGMENTS: 2689 case SCTP_USE_EXT_RCVINFO: 2690 case SCTP_I_WANT_MAPPED_V4_ADDR: 2691 /* copy in the option value */ 2692 SCTP_CHECK_AND_CAST(mopt, optval, uint32_t, optsize); 2693 set_opt = 0; 2694 if (error) 2695 break; 2696 switch (optname) { 2697 case SCTP_DISABLE_FRAGMENTS: 2698 set_opt = SCTP_PCB_FLAGS_NO_FRAGMENT; 2699 break; 2700 case SCTP_AUTO_ASCONF: 2701 /* 2702 * NOTE: we don't really support this flag 2703 */ 2704 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 2705 /* only valid for bound all sockets */ 2706 set_opt = SCTP_PCB_FLAGS_AUTO_ASCONF; 2707 } else { 2708 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 2709 return (EINVAL); 2710 } 2711 break; 2712 case SCTP_EXPLICIT_EOR: 2713 set_opt = SCTP_PCB_FLAGS_EXPLICIT_EOR; 2714 break; 2715 case SCTP_USE_EXT_RCVINFO: 2716 set_opt = SCTP_PCB_FLAGS_EXT_RCVINFO; 2717 break; 2718 case SCTP_I_WANT_MAPPED_V4_ADDR: 2719 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 2720 set_opt = SCTP_PCB_FLAGS_NEEDS_MAPPED_V4; 2721 } else { 2722 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 2723 return (EINVAL); 2724 } 2725 break; 2726 case SCTP_NODELAY: 2727 set_opt = SCTP_PCB_FLAGS_NODELAY; 2728 break; 2729 case SCTP_AUTOCLOSE: 2730 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 2731 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 2732 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 2733 return (EINVAL); 2734 } 2735 set_opt = SCTP_PCB_FLAGS_AUTOCLOSE; 2736 /* 2737 * The value is in ticks. Note this does not effect 2738 * old associations, only new ones. 2739 */ 2740 inp->sctp_ep.auto_close_time = SEC_TO_TICKS(*mopt); 2741 break; 2742 } 2743 SCTP_INP_WLOCK(inp); 2744 if (*mopt != 0) { 2745 sctp_feature_on(inp, set_opt); 2746 } else { 2747 sctp_feature_off(inp, set_opt); 2748 } 2749 SCTP_INP_WUNLOCK(inp); 2750 break; 2751 case SCTP_REUSE_PORT: 2752 { 2753 SCTP_CHECK_AND_CAST(mopt, optval, uint32_t, optsize); 2754 if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) == 0) { 2755 /* Can't set it after we are bound */ 2756 error = EINVAL; 2757 break; 2758 } 2759 if ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE)) { 2760 /* Can't do this for a 1-m socket */ 2761 error = EINVAL; 2762 break; 2763 } 2764 if (optval) 2765 sctp_feature_on(inp, SCTP_PCB_FLAGS_PORTREUSE); 2766 else 2767 sctp_feature_off(inp, SCTP_PCB_FLAGS_PORTREUSE); 2768 } 2769 break; 2770 case SCTP_PARTIAL_DELIVERY_POINT: 2771 { 2772 uint32_t *value; 2773 2774 SCTP_CHECK_AND_CAST(value, optval, uint32_t, optsize); 2775 if (*value > SCTP_SB_LIMIT_RCV(so)) { 2776 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 2777 error = EINVAL; 2778 break; 2779 } 2780 inp->partial_delivery_point = *value; 2781 } 2782 break; 2783 case SCTP_FRAGMENT_INTERLEAVE: 2784 /* not yet until we re-write sctp_recvmsg() */ 2785 { 2786 uint32_t *level; 2787 2788 SCTP_CHECK_AND_CAST(level, optval, uint32_t, optsize); 2789 if (*level == SCTP_FRAG_LEVEL_2) { 2790 sctp_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE); 2791 sctp_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS); 2792 } else if (*level == SCTP_FRAG_LEVEL_1) { 2793 sctp_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE); 2794 sctp_feature_off(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS); 2795 } else if (*level == SCTP_FRAG_LEVEL_0) { 2796 sctp_feature_off(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE); 2797 sctp_feature_off(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS); 2798 2799 } else { 2800 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 2801 error = EINVAL; 2802 } 2803 } 2804 break; 2805 case SCTP_CMT_ON_OFF: 2806 { 2807 struct sctp_assoc_value *av; 2808 2809 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize); 2810 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off)) { 2811 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 2812 if (stcb) { 2813 stcb->asoc.sctp_cmt_on_off = (uint8_t) av->assoc_value; 2814 SCTP_TCB_UNLOCK(stcb); 2815 } else { 2816 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN); 2817 error = ENOTCONN; 2818 } 2819 } else { 2820 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT); 2821 error = ENOPROTOOPT; 2822 } 2823 } 2824 break; 2825 /* EY nr_sack_on_off socket option */ 2826 case SCTP_NR_SACK_ON_OFF: 2827 { 2828 struct sctp_assoc_value *av; 2829 2830 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize); 2831 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off)) { 2832 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 2833 if (stcb) { 2834 stcb->asoc.sctp_nr_sack_on_off = (uint8_t) av->assoc_value; 2835 SCTP_TCB_UNLOCK(stcb); 2836 } else { 2837 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN); 2838 error = ENOTCONN; 2839 } 2840 } else { 2841 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT); 2842 error = ENOPROTOOPT; 2843 } 2844 } 2845 break; 2846 /* JRS - Set socket option for pluggable congestion control */ 2847 case SCTP_PLUGGABLE_CC: 2848 { 2849 struct sctp_assoc_value *av; 2850 2851 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize); 2852 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 2853 if (stcb) { 2854 switch (av->assoc_value) { 2855 /* 2856 * JRS - Standard TCP congestion 2857 * control 2858 */ 2859 case SCTP_CC_RFC2581: 2860 { 2861 stcb->asoc.congestion_control_module = SCTP_CC_RFC2581; 2862 stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param; 2863 stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_cwnd_update_after_sack; 2864 stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_cwnd_update_after_fr; 2865 stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout; 2866 stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo; 2867 stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped; 2868 stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output; 2869 stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer; 2870 SCTP_TCB_UNLOCK(stcb); 2871 break; 2872 } 2873 /* 2874 * JRS - High Speed TCP congestion 2875 * control (Floyd) 2876 */ 2877 case SCTP_CC_HSTCP: 2878 { 2879 stcb->asoc.congestion_control_module = SCTP_CC_HSTCP; 2880 stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param; 2881 stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_hs_cwnd_update_after_sack; 2882 stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_hs_cwnd_update_after_fr; 2883 stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout; 2884 stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo; 2885 stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped; 2886 stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output; 2887 stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer; 2888 SCTP_TCB_UNLOCK(stcb); 2889 break; 2890 } 2891 /* JRS - HTCP congestion control */ 2892 case SCTP_CC_HTCP: 2893 { 2894 stcb->asoc.congestion_control_module = SCTP_CC_HTCP; 2895 stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_htcp_set_initial_cc_param; 2896 stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_htcp_cwnd_update_after_sack; 2897 stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_htcp_cwnd_update_after_fr; 2898 stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_htcp_cwnd_update_after_timeout; 2899 stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_htcp_cwnd_update_after_ecn_echo; 2900 stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped; 2901 stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output; 2902 stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_htcp_cwnd_update_after_fr_timer; 2903 SCTP_TCB_UNLOCK(stcb); 2904 break; 2905 } 2906 /* 2907 * JRS - All other values are 2908 * invalid 2909 */ 2910 default: 2911 { 2912 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 2913 error = EINVAL; 2914 SCTP_TCB_UNLOCK(stcb); 2915 break; 2916 } 2917 } 2918 } else { 2919 switch (av->assoc_value) { 2920 case SCTP_CC_RFC2581: 2921 case SCTP_CC_HSTCP: 2922 case SCTP_CC_HTCP: 2923 inp->sctp_ep.sctp_default_cc_module = av->assoc_value; 2924 break; 2925 default: 2926 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 2927 error = EINVAL; 2928 break; 2929 }; 2930 } 2931 } 2932 break; 2933 case SCTP_CLR_STAT_LOG: 2934 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP); 2935 error = EOPNOTSUPP; 2936 break; 2937 case SCTP_CONTEXT: 2938 { 2939 struct sctp_assoc_value *av; 2940 2941 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize); 2942 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 2943 2944 if (stcb) { 2945 stcb->asoc.context = av->assoc_value; 2946 SCTP_TCB_UNLOCK(stcb); 2947 } else { 2948 SCTP_INP_WLOCK(inp); 2949 inp->sctp_context = av->assoc_value; 2950 SCTP_INP_WUNLOCK(inp); 2951 } 2952 } 2953 break; 2954 case SCTP_VRF_ID: 2955 { 2956 uint32_t *default_vrfid; 2957 2958 SCTP_CHECK_AND_CAST(default_vrfid, optval, uint32_t, optsize); 2959 if (*default_vrfid > SCTP_MAX_VRF_ID) { 2960 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 2961 error = EINVAL; 2962 break; 2963 } 2964 inp->def_vrf_id = *default_vrfid; 2965 break; 2966 } 2967 case SCTP_DEL_VRF_ID: 2968 { 2969 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP); 2970 error = EOPNOTSUPP; 2971 break; 2972 } 2973 case SCTP_ADD_VRF_ID: 2974 { 2975 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP); 2976 error = EOPNOTSUPP; 2977 break; 2978 } 2979 case SCTP_DELAYED_SACK: 2980 { 2981 struct sctp_sack_info *sack; 2982 2983 SCTP_CHECK_AND_CAST(sack, optval, struct sctp_sack_info, optsize); 2984 SCTP_FIND_STCB(inp, stcb, sack->sack_assoc_id); 2985 if (sack->sack_delay) { 2986 if (sack->sack_delay > SCTP_MAX_SACK_DELAY) 2987 sack->sack_delay = SCTP_MAX_SACK_DELAY; 2988 } 2989 if (stcb) { 2990 if (sack->sack_delay) { 2991 if (MSEC_TO_TICKS(sack->sack_delay) < 1) { 2992 sack->sack_delay = TICKS_TO_MSEC(1); 2993 } 2994 stcb->asoc.delayed_ack = sack->sack_delay; 2995 } 2996 if (sack->sack_freq) { 2997 stcb->asoc.sack_freq = sack->sack_freq; 2998 } 2999 SCTP_TCB_UNLOCK(stcb); 3000 } else { 3001 SCTP_INP_WLOCK(inp); 3002 if (sack->sack_delay) { 3003 if (MSEC_TO_TICKS(sack->sack_delay) < 1) { 3004 sack->sack_delay = TICKS_TO_MSEC(1); 3005 } 3006 inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV] = MSEC_TO_TICKS(sack->sack_delay); 3007 } 3008 if (sack->sack_freq) { 3009 inp->sctp_ep.sctp_sack_freq = sack->sack_freq; 3010 } 3011 SCTP_INP_WUNLOCK(inp); 3012 } 3013 break; 3014 } 3015 case SCTP_AUTH_CHUNK: 3016 { 3017 struct sctp_authchunk *sauth; 3018 3019 SCTP_CHECK_AND_CAST(sauth, optval, struct sctp_authchunk, optsize); 3020 3021 SCTP_INP_WLOCK(inp); 3022 if (sctp_auth_add_chunk(sauth->sauth_chunk, inp->sctp_ep.local_auth_chunks)) { 3023 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3024 error = EINVAL; 3025 } 3026 SCTP_INP_WUNLOCK(inp); 3027 break; 3028 } 3029 case SCTP_AUTH_KEY: 3030 { 3031 struct sctp_authkey *sca; 3032 struct sctp_keyhead *shared_keys; 3033 sctp_sharedkey_t *shared_key; 3034 sctp_key_t *key = NULL; 3035 size_t size; 3036 3037 SCTP_CHECK_AND_CAST(sca, optval, struct sctp_authkey, optsize); 3038 SCTP_FIND_STCB(inp, stcb, sca->sca_assoc_id); 3039 size = optsize - sizeof(*sca); 3040 3041 if (stcb) { 3042 /* set it on the assoc */ 3043 shared_keys = &stcb->asoc.shared_keys; 3044 /* clear the cached keys for this key id */ 3045 sctp_clear_cachedkeys(stcb, sca->sca_keynumber); 3046 /* 3047 * create the new shared key and 3048 * insert/replace it 3049 */ 3050 if (size > 0) { 3051 key = sctp_set_key(sca->sca_key, (uint32_t) size); 3052 if (key == NULL) { 3053 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM); 3054 error = ENOMEM; 3055 SCTP_TCB_UNLOCK(stcb); 3056 break; 3057 } 3058 } 3059 shared_key = sctp_alloc_sharedkey(); 3060 if (shared_key == NULL) { 3061 sctp_free_key(key); 3062 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM); 3063 error = ENOMEM; 3064 SCTP_TCB_UNLOCK(stcb); 3065 break; 3066 } 3067 shared_key->key = key; 3068 shared_key->keyid = sca->sca_keynumber; 3069 error = sctp_insert_sharedkey(shared_keys, shared_key); 3070 SCTP_TCB_UNLOCK(stcb); 3071 } else { 3072 /* set it on the endpoint */ 3073 SCTP_INP_WLOCK(inp); 3074 shared_keys = &inp->sctp_ep.shared_keys; 3075 /* 3076 * clear the cached keys on all assocs for 3077 * this key id 3078 */ 3079 sctp_clear_cachedkeys_ep(inp, sca->sca_keynumber); 3080 /* 3081 * create the new shared key and 3082 * insert/replace it 3083 */ 3084 if (size > 0) { 3085 key = sctp_set_key(sca->sca_key, (uint32_t) size); 3086 if (key == NULL) { 3087 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM); 3088 error = ENOMEM; 3089 SCTP_INP_WUNLOCK(inp); 3090 break; 3091 } 3092 } 3093 shared_key = sctp_alloc_sharedkey(); 3094 if (shared_key == NULL) { 3095 sctp_free_key(key); 3096 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM); 3097 error = ENOMEM; 3098 SCTP_INP_WUNLOCK(inp); 3099 break; 3100 } 3101 shared_key->key = key; 3102 shared_key->keyid = sca->sca_keynumber; 3103 error = sctp_insert_sharedkey(shared_keys, shared_key); 3104 SCTP_INP_WUNLOCK(inp); 3105 } 3106 break; 3107 } 3108 case SCTP_HMAC_IDENT: 3109 { 3110 struct sctp_hmacalgo *shmac; 3111 sctp_hmaclist_t *hmaclist; 3112 uint16_t hmacid; 3113 uint32_t i; 3114 3115 size_t found; 3116 3117 SCTP_CHECK_AND_CAST(shmac, optval, struct sctp_hmacalgo, optsize); 3118 if (optsize < sizeof(struct sctp_hmacalgo) + shmac->shmac_number_of_idents * sizeof(uint16_t)) { 3119 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3120 error = EINVAL; 3121 break; 3122 } 3123 hmaclist = sctp_alloc_hmaclist(shmac->shmac_number_of_idents); 3124 if (hmaclist == NULL) { 3125 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM); 3126 error = ENOMEM; 3127 break; 3128 } 3129 for (i = 0; i < shmac->shmac_number_of_idents; i++) { 3130 hmacid = shmac->shmac_idents[i]; 3131 if (sctp_auth_add_hmacid(hmaclist, hmacid)) { 3132 /* invalid HMACs were found */ ; 3133 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3134 error = EINVAL; 3135 sctp_free_hmaclist(hmaclist); 3136 goto sctp_set_hmac_done; 3137 } 3138 } 3139 found = 0; 3140 for (i = 0; i < hmaclist->num_algo; i++) { 3141 if (hmaclist->hmac[i] == SCTP_AUTH_HMAC_ID_SHA1) { 3142 /* already in list */ 3143 found = 1; 3144 } 3145 } 3146 if (!found) { 3147 sctp_free_hmaclist(hmaclist); 3148 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3149 error = EINVAL; 3150 break; 3151 } 3152 /* set it on the endpoint */ 3153 SCTP_INP_WLOCK(inp); 3154 if (inp->sctp_ep.local_hmacs) 3155 sctp_free_hmaclist(inp->sctp_ep.local_hmacs); 3156 inp->sctp_ep.local_hmacs = hmaclist; 3157 SCTP_INP_WUNLOCK(inp); 3158 sctp_set_hmac_done: 3159 break; 3160 } 3161 case SCTP_AUTH_ACTIVE_KEY: 3162 { 3163 struct sctp_authkeyid *scact; 3164 3165 SCTP_CHECK_AND_CAST(scact, optval, struct sctp_authkeyid, 3166 optsize); 3167 SCTP_FIND_STCB(inp, stcb, scact->scact_assoc_id); 3168 3169 /* set the active key on the right place */ 3170 if (stcb) { 3171 /* set the active key on the assoc */ 3172 if (sctp_auth_setactivekey(stcb, 3173 scact->scact_keynumber)) { 3174 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, 3175 SCTP_FROM_SCTP_USRREQ, 3176 EINVAL); 3177 error = EINVAL; 3178 } 3179 SCTP_TCB_UNLOCK(stcb); 3180 } else { 3181 /* set the active key on the endpoint */ 3182 SCTP_INP_WLOCK(inp); 3183 if (sctp_auth_setactivekey_ep(inp, 3184 scact->scact_keynumber)) { 3185 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, 3186 SCTP_FROM_SCTP_USRREQ, 3187 EINVAL); 3188 error = EINVAL; 3189 } 3190 SCTP_INP_WUNLOCK(inp); 3191 } 3192 break; 3193 } 3194 case SCTP_AUTH_DELETE_KEY: 3195 { 3196 struct sctp_authkeyid *scdel; 3197 3198 SCTP_CHECK_AND_CAST(scdel, optval, struct sctp_authkeyid, 3199 optsize); 3200 SCTP_FIND_STCB(inp, stcb, scdel->scact_assoc_id); 3201 3202 /* delete the key from the right place */ 3203 if (stcb) { 3204 if (sctp_delete_sharedkey(stcb, 3205 scdel->scact_keynumber)) { 3206 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, 3207 SCTP_FROM_SCTP_USRREQ, 3208 EINVAL); 3209 error = EINVAL; 3210 } 3211 SCTP_TCB_UNLOCK(stcb); 3212 } else { 3213 SCTP_INP_WLOCK(inp); 3214 if (sctp_delete_sharedkey_ep(inp, 3215 scdel->scact_keynumber)) { 3216 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, 3217 SCTP_FROM_SCTP_USRREQ, 3218 EINVAL); 3219 error = EINVAL; 3220 } 3221 SCTP_INP_WUNLOCK(inp); 3222 } 3223 break; 3224 } 3225 case SCTP_AUTH_DEACTIVATE_KEY: 3226 { 3227 struct sctp_authkeyid *keyid; 3228 3229 SCTP_CHECK_AND_CAST(keyid, optval, struct sctp_authkeyid, 3230 optsize); 3231 SCTP_FIND_STCB(inp, stcb, keyid->scact_assoc_id); 3232 3233 /* deactivate the key from the right place */ 3234 if (stcb) { 3235 if (sctp_deact_sharedkey(stcb, 3236 keyid->scact_keynumber)) { 3237 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, 3238 SCTP_FROM_SCTP_USRREQ, 3239 EINVAL); 3240 error = EINVAL; 3241 } 3242 SCTP_TCB_UNLOCK(stcb); 3243 } else { 3244 SCTP_INP_WLOCK(inp); 3245 if (sctp_deact_sharedkey_ep(inp, 3246 keyid->scact_keynumber)) { 3247 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, 3248 SCTP_FROM_SCTP_USRREQ, 3249 EINVAL); 3250 error = EINVAL; 3251 } 3252 SCTP_INP_WUNLOCK(inp); 3253 } 3254 break; 3255 } 3256 3257 case SCTP_RESET_STREAMS: 3258 { 3259 struct sctp_stream_reset *strrst; 3260 uint8_t send_in = 0, send_tsn = 0, send_out = 0; 3261 int i; 3262 3263 SCTP_CHECK_AND_CAST(strrst, optval, struct sctp_stream_reset, optsize); 3264 SCTP_FIND_STCB(inp, stcb, strrst->strrst_assoc_id); 3265 3266 if (stcb == NULL) { 3267 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); 3268 error = ENOENT; 3269 break; 3270 } 3271 if (stcb->asoc.peer_supports_strreset == 0) { 3272 /* 3273 * Peer does not support it, we return 3274 * protocol not supported since this is true 3275 * for this feature and this peer, not the 3276 * socket request in general. 3277 */ 3278 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EPROTONOSUPPORT); 3279 error = EPROTONOSUPPORT; 3280 SCTP_TCB_UNLOCK(stcb); 3281 break; 3282 } 3283 if (stcb->asoc.stream_reset_outstanding) { 3284 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY); 3285 error = EALREADY; 3286 SCTP_TCB_UNLOCK(stcb); 3287 break; 3288 } 3289 if (strrst->strrst_flags == SCTP_RESET_LOCAL_RECV) { 3290 send_in = 1; 3291 } else if (strrst->strrst_flags == SCTP_RESET_LOCAL_SEND) { 3292 send_out = 1; 3293 } else if (strrst->strrst_flags == SCTP_RESET_BOTH) { 3294 send_in = 1; 3295 send_out = 1; 3296 } else if (strrst->strrst_flags == SCTP_RESET_TSN) { 3297 send_tsn = 1; 3298 } else { 3299 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3300 error = EINVAL; 3301 SCTP_TCB_UNLOCK(stcb); 3302 break; 3303 } 3304 for (i = 0; i < strrst->strrst_num_streams; i++) { 3305 if ((send_in) && 3306 3307 (strrst->strrst_list[i] > stcb->asoc.streamincnt)) { 3308 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3309 error = EINVAL; 3310 goto get_out; 3311 } 3312 if ((send_out) && 3313 (strrst->strrst_list[i] > stcb->asoc.streamoutcnt)) { 3314 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3315 error = EINVAL; 3316 goto get_out; 3317 } 3318 } 3319 if (error) { 3320 get_out: 3321 SCTP_TCB_UNLOCK(stcb); 3322 break; 3323 } 3324 error = sctp_send_str_reset_req(stcb, strrst->strrst_num_streams, 3325 strrst->strrst_list, 3326 send_out, (stcb->asoc.str_reset_seq_in - 3), 3327 send_in, send_tsn); 3328 3329 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_REQ, SCTP_SO_LOCKED); 3330 SCTP_TCB_UNLOCK(stcb); 3331 } 3332 break; 3333 3334 case SCTP_CONNECT_X: 3335 if (optsize < (sizeof(int) + sizeof(struct sockaddr_in))) { 3336 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3337 error = EINVAL; 3338 break; 3339 } 3340 error = sctp_do_connect_x(so, inp, optval, optsize, p, 0); 3341 break; 3342 3343 case SCTP_CONNECT_X_DELAYED: 3344 if (optsize < (sizeof(int) + sizeof(struct sockaddr_in))) { 3345 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3346 error = EINVAL; 3347 break; 3348 } 3349 error = sctp_do_connect_x(so, inp, optval, optsize, p, 1); 3350 break; 3351 3352 case SCTP_CONNECT_X_COMPLETE: 3353 { 3354 struct sockaddr *sa; 3355 struct sctp_nets *net; 3356 3357 /* FIXME MT: check correct? */ 3358 SCTP_CHECK_AND_CAST(sa, optval, struct sockaddr, optsize); 3359 3360 /* find tcb */ 3361 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { 3362 SCTP_INP_RLOCK(inp); 3363 stcb = LIST_FIRST(&inp->sctp_asoc_list); 3364 if (stcb) { 3365 SCTP_TCB_LOCK(stcb); 3366 net = sctp_findnet(stcb, sa); 3367 } 3368 SCTP_INP_RUNLOCK(inp); 3369 } else { 3370 /* 3371 * We increment here since 3372 * sctp_findassociation_ep_addr() wil do a 3373 * decrement if it finds the stcb as long as 3374 * the locked tcb (last argument) is NOT a 3375 * TCB.. aka NULL. 3376 */ 3377 SCTP_INP_INCR_REF(inp); 3378 stcb = sctp_findassociation_ep_addr(&inp, sa, &net, NULL, NULL); 3379 if (stcb == NULL) { 3380 SCTP_INP_DECR_REF(inp); 3381 } 3382 } 3383 3384 if (stcb == NULL) { 3385 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); 3386 error = ENOENT; 3387 break; 3388 } 3389 if (stcb->asoc.delayed_connection == 1) { 3390 stcb->asoc.delayed_connection = 0; 3391 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered); 3392 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, 3393 stcb->asoc.primary_destination, 3394 SCTP_FROM_SCTP_USRREQ + SCTP_LOC_9); 3395 sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED); 3396 } else { 3397 /* 3398 * already expired or did not use delayed 3399 * connectx 3400 */ 3401 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY); 3402 error = EALREADY; 3403 } 3404 SCTP_TCB_UNLOCK(stcb); 3405 } 3406 break; 3407 case SCTP_MAX_BURST: 3408 { 3409 uint8_t *burst; 3410 3411 SCTP_CHECK_AND_CAST(burst, optval, uint8_t, optsize); 3412 3413 SCTP_INP_WLOCK(inp); 3414 if (*burst) { 3415 inp->sctp_ep.max_burst = *burst; 3416 } 3417 SCTP_INP_WUNLOCK(inp); 3418 } 3419 break; 3420 case SCTP_MAXSEG: 3421 { 3422 struct sctp_assoc_value *av; 3423 int ovh; 3424 3425 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize); 3426 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 3427 3428 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 3429 ovh = SCTP_MED_OVERHEAD; 3430 } else { 3431 ovh = SCTP_MED_V4_OVERHEAD; 3432 } 3433 if (stcb) { 3434 if (av->assoc_value) { 3435 stcb->asoc.sctp_frag_point = (av->assoc_value + ovh); 3436 } else { 3437 stcb->asoc.sctp_frag_point = SCTP_DEFAULT_MAXSEGMENT; 3438 } 3439 SCTP_TCB_UNLOCK(stcb); 3440 } else { 3441 SCTP_INP_WLOCK(inp); 3442 /* 3443 * FIXME MT: I think this is not in tune 3444 * with the API ID 3445 */ 3446 if (av->assoc_value) { 3447 inp->sctp_frag_point = (av->assoc_value + ovh); 3448 } else { 3449 inp->sctp_frag_point = SCTP_DEFAULT_MAXSEGMENT; 3450 } 3451 SCTP_INP_WUNLOCK(inp); 3452 } 3453 } 3454 break; 3455 case SCTP_EVENTS: 3456 { 3457 struct sctp_event_subscribe *events; 3458 3459 SCTP_CHECK_AND_CAST(events, optval, struct sctp_event_subscribe, optsize); 3460 3461 SCTP_INP_WLOCK(inp); 3462 if (events->sctp_data_io_event) { 3463 sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT); 3464 } else { 3465 sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT); 3466 } 3467 3468 if (events->sctp_association_event) { 3469 sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVASSOCEVNT); 3470 } else { 3471 sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVASSOCEVNT); 3472 } 3473 3474 if (events->sctp_address_event) { 3475 sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVPADDREVNT); 3476 } else { 3477 sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVPADDREVNT); 3478 } 3479 3480 if (events->sctp_send_failure_event) { 3481 sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVSENDFAILEVNT); 3482 } else { 3483 sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVSENDFAILEVNT); 3484 } 3485 3486 if (events->sctp_peer_error_event) { 3487 sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVPEERERR); 3488 } else { 3489 sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVPEERERR); 3490 } 3491 3492 if (events->sctp_shutdown_event) { 3493 sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT); 3494 } else { 3495 sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT); 3496 } 3497 3498 if (events->sctp_partial_delivery_event) { 3499 sctp_feature_on(inp, SCTP_PCB_FLAGS_PDAPIEVNT); 3500 } else { 3501 sctp_feature_off(inp, SCTP_PCB_FLAGS_PDAPIEVNT); 3502 } 3503 3504 if (events->sctp_adaptation_layer_event) { 3505 sctp_feature_on(inp, SCTP_PCB_FLAGS_ADAPTATIONEVNT); 3506 } else { 3507 sctp_feature_off(inp, SCTP_PCB_FLAGS_ADAPTATIONEVNT); 3508 } 3509 3510 if (events->sctp_authentication_event) { 3511 sctp_feature_on(inp, SCTP_PCB_FLAGS_AUTHEVNT); 3512 } else { 3513 sctp_feature_off(inp, SCTP_PCB_FLAGS_AUTHEVNT); 3514 } 3515 3516 if (events->sctp_sender_dry_event) { 3517 sctp_feature_on(inp, SCTP_PCB_FLAGS_DRYEVNT); 3518 } else { 3519 sctp_feature_off(inp, SCTP_PCB_FLAGS_DRYEVNT); 3520 } 3521 3522 if (events->sctp_stream_reset_events) { 3523 sctp_feature_on(inp, SCTP_PCB_FLAGS_STREAM_RESETEVNT); 3524 } else { 3525 sctp_feature_off(inp, SCTP_PCB_FLAGS_STREAM_RESETEVNT); 3526 } 3527 SCTP_INP_WUNLOCK(inp); 3528 } 3529 break; 3530 3531 case SCTP_ADAPTATION_LAYER: 3532 { 3533 struct sctp_setadaptation *adap_bits; 3534 3535 SCTP_CHECK_AND_CAST(adap_bits, optval, struct sctp_setadaptation, optsize); 3536 SCTP_INP_WLOCK(inp); 3537 inp->sctp_ep.adaptation_layer_indicator = adap_bits->ssb_adaptation_ind; 3538 SCTP_INP_WUNLOCK(inp); 3539 } 3540 break; 3541 #ifdef SCTP_DEBUG 3542 case SCTP_SET_INITIAL_DBG_SEQ: 3543 { 3544 uint32_t *vvv; 3545 3546 SCTP_CHECK_AND_CAST(vvv, optval, uint32_t, optsize); 3547 SCTP_INP_WLOCK(inp); 3548 inp->sctp_ep.initial_sequence_debug = *vvv; 3549 SCTP_INP_WUNLOCK(inp); 3550 } 3551 break; 3552 #endif 3553 case SCTP_DEFAULT_SEND_PARAM: 3554 { 3555 struct sctp_sndrcvinfo *s_info; 3556 3557 SCTP_CHECK_AND_CAST(s_info, optval, struct sctp_sndrcvinfo, optsize); 3558 SCTP_FIND_STCB(inp, stcb, s_info->sinfo_assoc_id); 3559 3560 if (stcb) { 3561 if (s_info->sinfo_stream <= stcb->asoc.streamoutcnt) { 3562 memcpy(&stcb->asoc.def_send, s_info, min(optsize, sizeof(stcb->asoc.def_send))); 3563 } else { 3564 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3565 error = EINVAL; 3566 } 3567 SCTP_TCB_UNLOCK(stcb); 3568 } else { 3569 SCTP_INP_WLOCK(inp); 3570 memcpy(&inp->def_send, s_info, min(optsize, sizeof(inp->def_send))); 3571 SCTP_INP_WUNLOCK(inp); 3572 } 3573 } 3574 break; 3575 case SCTP_PEER_ADDR_PARAMS: 3576 /* Applys to the specific association */ 3577 { 3578 struct sctp_paddrparams *paddrp; 3579 struct sctp_nets *net; 3580 3581 SCTP_CHECK_AND_CAST(paddrp, optval, struct sctp_paddrparams, optsize); 3582 SCTP_FIND_STCB(inp, stcb, paddrp->spp_assoc_id); 3583 net = NULL; 3584 if (stcb) { 3585 net = sctp_findnet(stcb, (struct sockaddr *)&paddrp->spp_address); 3586 } else { 3587 /* 3588 * We increment here since 3589 * sctp_findassociation_ep_addr() wil do a 3590 * decrement if it finds the stcb as long as 3591 * the locked tcb (last argument) is NOT a 3592 * TCB.. aka NULL. 3593 */ 3594 SCTP_INP_INCR_REF(inp); 3595 stcb = sctp_findassociation_ep_addr(&inp, 3596 (struct sockaddr *)&paddrp->spp_address, 3597 &net, NULL, NULL); 3598 if (stcb == NULL) { 3599 SCTP_INP_DECR_REF(inp); 3600 } 3601 } 3602 if (stcb && (net == NULL)) { 3603 struct sockaddr *sa; 3604 3605 sa = (struct sockaddr *)&paddrp->spp_address; 3606 if (sa->sa_family == AF_INET) { 3607 struct sockaddr_in *sin; 3608 3609 sin = (struct sockaddr_in *)sa; 3610 if (sin->sin_addr.s_addr) { 3611 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3612 SCTP_TCB_UNLOCK(stcb); 3613 error = EINVAL; 3614 break; 3615 } 3616 } else if (sa->sa_family == AF_INET6) { 3617 struct sockaddr_in6 *sin6; 3618 3619 sin6 = (struct sockaddr_in6 *)sa; 3620 if (!IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { 3621 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3622 SCTP_TCB_UNLOCK(stcb); 3623 error = EINVAL; 3624 break; 3625 } 3626 } else { 3627 error = EAFNOSUPPORT; 3628 SCTP_TCB_UNLOCK(stcb); 3629 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 3630 break; 3631 } 3632 } 3633 /* sanity checks */ 3634 if ((paddrp->spp_flags & SPP_HB_ENABLE) && (paddrp->spp_flags & SPP_HB_DISABLE)) { 3635 if (stcb) 3636 SCTP_TCB_UNLOCK(stcb); 3637 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3638 return (EINVAL); 3639 } 3640 if ((paddrp->spp_flags & SPP_PMTUD_ENABLE) && (paddrp->spp_flags & SPP_PMTUD_DISABLE)) { 3641 if (stcb) 3642 SCTP_TCB_UNLOCK(stcb); 3643 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3644 return (EINVAL); 3645 } 3646 if (stcb) { 3647 /************************TCB SPECIFIC SET ******************/ 3648 /* 3649 * do we change the timer for HB, we run 3650 * only one? 3651 */ 3652 int ovh = 0; 3653 3654 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 3655 ovh = SCTP_MED_OVERHEAD; 3656 } else { 3657 ovh = SCTP_MED_V4_OVERHEAD; 3658 } 3659 3660 if (paddrp->spp_hbinterval) 3661 stcb->asoc.heart_beat_delay = paddrp->spp_hbinterval; 3662 else if (paddrp->spp_flags & SPP_HB_TIME_IS_ZERO) 3663 stcb->asoc.heart_beat_delay = 0; 3664 3665 /* network sets ? */ 3666 if (net) { 3667 /************************NET SPECIFIC SET ******************/ 3668 if (paddrp->spp_flags & SPP_HB_DEMAND) { 3669 /* on demand HB */ 3670 if (sctp_send_hb(stcb, 1, net) < 0) { 3671 /* asoc destroyed */ 3672 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3673 error = EINVAL; 3674 break; 3675 } 3676 } 3677 if (paddrp->spp_flags & SPP_HB_DISABLE) { 3678 net->dest_state |= SCTP_ADDR_NOHB; 3679 } 3680 if (paddrp->spp_flags & SPP_HB_ENABLE) { 3681 net->dest_state &= ~SCTP_ADDR_NOHB; 3682 } 3683 if ((paddrp->spp_flags & SPP_PMTUD_DISABLE) && (paddrp->spp_pathmtu >= SCTP_SMALLEST_PMTU)) { 3684 if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) { 3685 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net, 3686 SCTP_FROM_SCTP_USRREQ + SCTP_LOC_10); 3687 } 3688 if (paddrp->spp_pathmtu > SCTP_DEFAULT_MINSEGMENT) { 3689 net->mtu = paddrp->spp_pathmtu + ovh; 3690 if (net->mtu < stcb->asoc.smallest_mtu) { 3691 #ifdef SCTP_PRINT_FOR_B_AND_M 3692 SCTP_PRINTF("SCTP_PMTU_DISABLE calls sctp_pathmtu_adjustment:%d\n", 3693 net->mtu); 3694 #endif 3695 sctp_pathmtu_adjustment(inp, stcb, net, net->mtu); 3696 } 3697 } 3698 } 3699 if (paddrp->spp_flags & SPP_PMTUD_ENABLE) { 3700 if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) { 3701 sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net); 3702 } 3703 } 3704 if (paddrp->spp_pathmaxrxt) 3705 net->failure_threshold = paddrp->spp_pathmaxrxt; 3706 #ifdef INET 3707 if (paddrp->spp_flags & SPP_IPV4_TOS) { 3708 if (net->ro._l_addr.sin.sin_family == AF_INET) { 3709 net->tos_flowlabel = paddrp->spp_ipv4_tos & 0x000000fc; 3710 } 3711 } 3712 #endif 3713 #ifdef INET6 3714 if (paddrp->spp_flags & SPP_IPV6_FLOWLABEL) { 3715 if (net->ro._l_addr.sin6.sin6_family == AF_INET6) { 3716 net->tos_flowlabel = paddrp->spp_ipv6_flowlabel; 3717 } 3718 } 3719 #endif 3720 } else { 3721 /************************ASSOC ONLY -- NO NET SPECIFIC SET ******************/ 3722 if (paddrp->spp_pathmaxrxt) 3723 stcb->asoc.def_net_failure = paddrp->spp_pathmaxrxt; 3724 3725 if (paddrp->spp_flags & SPP_HB_ENABLE) { 3726 /* Turn back on the timer */ 3727 stcb->asoc.hb_is_disabled = 0; 3728 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net); 3729 } 3730 if ((paddrp->spp_flags & SPP_PMTUD_DISABLE) && (paddrp->spp_pathmtu >= SCTP_SMALLEST_PMTU)) { 3731 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 3732 if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) { 3733 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net, 3734 SCTP_FROM_SCTP_USRREQ + SCTP_LOC_10); 3735 } 3736 if (paddrp->spp_pathmtu > SCTP_DEFAULT_MINSEGMENT) { 3737 net->mtu = paddrp->spp_pathmtu + ovh; 3738 if (net->mtu < stcb->asoc.smallest_mtu) { 3739 #ifdef SCTP_PRINT_FOR_B_AND_M 3740 SCTP_PRINTF("SCTP_PMTU_DISABLE calls sctp_pathmtu_adjustment:%d\n", 3741 net->mtu); 3742 #endif 3743 sctp_pathmtu_adjustment(inp, stcb, net, net->mtu); 3744 } 3745 } 3746 } 3747 } 3748 if (paddrp->spp_flags & SPP_PMTUD_ENABLE) { 3749 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 3750 if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) { 3751 sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net); 3752 } 3753 } 3754 } 3755 if (paddrp->spp_flags & SPP_HB_DISABLE) { 3756 int cnt_of_unconf = 0; 3757 struct sctp_nets *lnet; 3758 3759 stcb->asoc.hb_is_disabled = 1; 3760 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 3761 if (lnet->dest_state & SCTP_ADDR_UNCONFIRMED) { 3762 cnt_of_unconf++; 3763 } 3764 } 3765 /* 3766 * stop the timer ONLY if we 3767 * have no unconfirmed 3768 * addresses 3769 */ 3770 if (cnt_of_unconf == 0) { 3771 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 3772 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, 3773 SCTP_FROM_SCTP_USRREQ + SCTP_LOC_11); 3774 } 3775 } 3776 } 3777 if (paddrp->spp_flags & SPP_HB_ENABLE) { 3778 /* start up the timer. */ 3779 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 3780 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net); 3781 } 3782 } 3783 #ifdef INET 3784 if (paddrp->spp_flags & SPP_IPV4_TOS) 3785 stcb->asoc.default_tos = paddrp->spp_ipv4_tos & 0x000000fc; 3786 #endif 3787 #ifdef INET6 3788 if (paddrp->spp_flags & SPP_IPV6_FLOWLABEL) 3789 stcb->asoc.default_flowlabel = paddrp->spp_ipv6_flowlabel; 3790 #endif 3791 3792 } 3793 SCTP_TCB_UNLOCK(stcb); 3794 } else { 3795 /************************NO TCB, SET TO default stuff ******************/ 3796 SCTP_INP_WLOCK(inp); 3797 /* 3798 * For the TOS/FLOWLABEL stuff you set it 3799 * with the options on the socket 3800 */ 3801 if (paddrp->spp_pathmaxrxt) { 3802 inp->sctp_ep.def_net_failure = paddrp->spp_pathmaxrxt; 3803 } 3804 if (paddrp->spp_flags & SPP_HB_TIME_IS_ZERO) 3805 inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT] = 0; 3806 else if (paddrp->spp_hbinterval) { 3807 if (paddrp->spp_hbinterval > SCTP_MAX_HB_INTERVAL) 3808 paddrp->spp_hbinterval = SCTP_MAX_HB_INTERVAL; 3809 inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT] = MSEC_TO_TICKS(paddrp->spp_hbinterval); 3810 } 3811 if (paddrp->spp_flags & SPP_HB_ENABLE) { 3812 sctp_feature_off(inp, SCTP_PCB_FLAGS_DONOT_HEARTBEAT); 3813 3814 } else if (paddrp->spp_flags & SPP_HB_DISABLE) { 3815 sctp_feature_on(inp, SCTP_PCB_FLAGS_DONOT_HEARTBEAT); 3816 } 3817 SCTP_INP_WUNLOCK(inp); 3818 } 3819 } 3820 break; 3821 case SCTP_RTOINFO: 3822 { 3823 struct sctp_rtoinfo *srto; 3824 uint32_t new_init, new_min, new_max; 3825 3826 SCTP_CHECK_AND_CAST(srto, optval, struct sctp_rtoinfo, optsize); 3827 SCTP_FIND_STCB(inp, stcb, srto->srto_assoc_id); 3828 3829 if (stcb) { 3830 if (srto->srto_initial) 3831 new_init = srto->srto_initial; 3832 else 3833 new_init = stcb->asoc.initial_rto; 3834 if (srto->srto_max) 3835 new_max = srto->srto_max; 3836 else 3837 new_max = stcb->asoc.maxrto; 3838 if (srto->srto_min) 3839 new_min = srto->srto_min; 3840 else 3841 new_min = stcb->asoc.minrto; 3842 if ((new_min <= new_init) && (new_init <= new_max)) { 3843 stcb->asoc.initial_rto = new_init; 3844 stcb->asoc.maxrto = new_max; 3845 stcb->asoc.minrto = new_min; 3846 } else { 3847 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3848 error = EINVAL; 3849 } 3850 SCTP_TCB_UNLOCK(stcb); 3851 } else { 3852 SCTP_INP_WLOCK(inp); 3853 if (srto->srto_initial) 3854 new_init = srto->srto_initial; 3855 else 3856 new_init = inp->sctp_ep.initial_rto; 3857 if (srto->srto_max) 3858 new_max = srto->srto_max; 3859 else 3860 new_max = inp->sctp_ep.sctp_maxrto; 3861 if (srto->srto_min) 3862 new_min = srto->srto_min; 3863 else 3864 new_min = inp->sctp_ep.sctp_minrto; 3865 if ((new_min <= new_init) && (new_init <= new_max)) { 3866 inp->sctp_ep.initial_rto = new_init; 3867 inp->sctp_ep.sctp_maxrto = new_max; 3868 inp->sctp_ep.sctp_minrto = new_min; 3869 } else { 3870 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3871 error = EINVAL; 3872 } 3873 SCTP_INP_WUNLOCK(inp); 3874 } 3875 } 3876 break; 3877 case SCTP_ASSOCINFO: 3878 { 3879 struct sctp_assocparams *sasoc; 3880 3881 SCTP_CHECK_AND_CAST(sasoc, optval, struct sctp_assocparams, optsize); 3882 SCTP_FIND_STCB(inp, stcb, sasoc->sasoc_assoc_id); 3883 if (sasoc->sasoc_cookie_life) { 3884 /* boundary check the cookie life */ 3885 if (sasoc->sasoc_cookie_life < 1000) 3886 sasoc->sasoc_cookie_life = 1000; 3887 if (sasoc->sasoc_cookie_life > SCTP_MAX_COOKIE_LIFE) { 3888 sasoc->sasoc_cookie_life = SCTP_MAX_COOKIE_LIFE; 3889 } 3890 } 3891 if (stcb) { 3892 if (sasoc->sasoc_asocmaxrxt) 3893 stcb->asoc.max_send_times = sasoc->sasoc_asocmaxrxt; 3894 sasoc->sasoc_number_peer_destinations = stcb->asoc.numnets; 3895 sasoc->sasoc_peer_rwnd = 0; 3896 sasoc->sasoc_local_rwnd = 0; 3897 if (sasoc->sasoc_cookie_life) { 3898 stcb->asoc.cookie_life = MSEC_TO_TICKS(sasoc->sasoc_cookie_life); 3899 } 3900 SCTP_TCB_UNLOCK(stcb); 3901 } else { 3902 SCTP_INP_WLOCK(inp); 3903 if (sasoc->sasoc_asocmaxrxt) 3904 inp->sctp_ep.max_send_times = sasoc->sasoc_asocmaxrxt; 3905 sasoc->sasoc_number_peer_destinations = 0; 3906 sasoc->sasoc_peer_rwnd = 0; 3907 sasoc->sasoc_local_rwnd = 0; 3908 if (sasoc->sasoc_cookie_life) { 3909 inp->sctp_ep.def_cookie_life = MSEC_TO_TICKS(sasoc->sasoc_cookie_life); 3910 } 3911 SCTP_INP_WUNLOCK(inp); 3912 } 3913 } 3914 break; 3915 case SCTP_INITMSG: 3916 { 3917 struct sctp_initmsg *sinit; 3918 3919 SCTP_CHECK_AND_CAST(sinit, optval, struct sctp_initmsg, optsize); 3920 SCTP_INP_WLOCK(inp); 3921 if (sinit->sinit_num_ostreams) 3922 inp->sctp_ep.pre_open_stream_count = sinit->sinit_num_ostreams; 3923 3924 if (sinit->sinit_max_instreams) 3925 inp->sctp_ep.max_open_streams_intome = sinit->sinit_max_instreams; 3926 3927 if (sinit->sinit_max_attempts) 3928 inp->sctp_ep.max_init_times = sinit->sinit_max_attempts; 3929 3930 if (sinit->sinit_max_init_timeo) 3931 inp->sctp_ep.initial_init_rto_max = sinit->sinit_max_init_timeo; 3932 SCTP_INP_WUNLOCK(inp); 3933 } 3934 break; 3935 case SCTP_PRIMARY_ADDR: 3936 { 3937 struct sctp_setprim *spa; 3938 struct sctp_nets *net, *lnet; 3939 3940 SCTP_CHECK_AND_CAST(spa, optval, struct sctp_setprim, optsize); 3941 SCTP_FIND_STCB(inp, stcb, spa->ssp_assoc_id); 3942 3943 net = NULL; 3944 if (stcb) { 3945 net = sctp_findnet(stcb, (struct sockaddr *)&spa->ssp_addr); 3946 } else { 3947 /* 3948 * We increment here since 3949 * sctp_findassociation_ep_addr() wil do a 3950 * decrement if it finds the stcb as long as 3951 * the locked tcb (last argument) is NOT a 3952 * TCB.. aka NULL. 3953 */ 3954 SCTP_INP_INCR_REF(inp); 3955 stcb = sctp_findassociation_ep_addr(&inp, 3956 (struct sockaddr *)&spa->ssp_addr, 3957 &net, NULL, NULL); 3958 if (stcb == NULL) { 3959 SCTP_INP_DECR_REF(inp); 3960 } 3961 } 3962 3963 if ((stcb) && (net)) { 3964 if ((net != stcb->asoc.primary_destination) && 3965 (!(net->dest_state & SCTP_ADDR_UNCONFIRMED))) { 3966 /* Ok we need to set it */ 3967 lnet = stcb->asoc.primary_destination; 3968 if (sctp_set_primary_addr(stcb, (struct sockaddr *)NULL, net) == 0) { 3969 if (net->dest_state & SCTP_ADDR_SWITCH_PRIMARY) { 3970 net->dest_state |= SCTP_ADDR_DOUBLE_SWITCH; 3971 } 3972 net->dest_state |= SCTP_ADDR_SWITCH_PRIMARY; 3973 } 3974 } 3975 } else { 3976 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3977 error = EINVAL; 3978 } 3979 if (stcb) { 3980 SCTP_TCB_UNLOCK(stcb); 3981 } 3982 } 3983 break; 3984 case SCTP_SET_DYNAMIC_PRIMARY: 3985 { 3986 union sctp_sockstore *ss; 3987 3988 error = priv_check(curthread, 3989 PRIV_NETINET_RESERVEDPORT); 3990 if (error) 3991 break; 3992 3993 SCTP_CHECK_AND_CAST(ss, optval, union sctp_sockstore, optsize); 3994 /* SUPER USER CHECK? */ 3995 error = sctp_dynamic_set_primary(&ss->sa, vrf_id); 3996 } 3997 break; 3998 case SCTP_SET_PEER_PRIMARY_ADDR: 3999 { 4000 struct sctp_setpeerprim *sspp; 4001 4002 SCTP_CHECK_AND_CAST(sspp, optval, struct sctp_setpeerprim, optsize); 4003 SCTP_FIND_STCB(inp, stcb, sspp->sspp_assoc_id); 4004 if (stcb != NULL) { 4005 struct sctp_ifa *ifa; 4006 4007 ifa = sctp_find_ifa_by_addr((struct sockaddr *)&sspp->sspp_addr, 4008 stcb->asoc.vrf_id, SCTP_ADDR_NOT_LOCKED); 4009 if (ifa == NULL) { 4010 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4011 error = EINVAL; 4012 goto out_of_it; 4013 } 4014 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) { 4015 /* 4016 * Must validate the ifa found is in 4017 * our ep 4018 */ 4019 struct sctp_laddr *laddr; 4020 int found = 0; 4021 4022 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 4023 if (laddr->ifa == NULL) { 4024 SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n", 4025 __FUNCTION__); 4026 continue; 4027 } 4028 if (laddr->ifa == ifa) { 4029 found = 1; 4030 break; 4031 } 4032 } 4033 if (!found) { 4034 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4035 error = EINVAL; 4036 goto out_of_it; 4037 } 4038 } 4039 if (sctp_set_primary_ip_address_sa(stcb, 4040 (struct sockaddr *)&sspp->sspp_addr) != 0) { 4041 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4042 error = EINVAL; 4043 } 4044 out_of_it: 4045 SCTP_TCB_UNLOCK(stcb); 4046 } else { 4047 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4048 error = EINVAL; 4049 } 4050 4051 } 4052 break; 4053 case SCTP_BINDX_ADD_ADDR: 4054 { 4055 struct sctp_getaddresses *addrs; 4056 size_t sz; 4057 struct thread *td; 4058 4059 td = (struct thread *)p; 4060 SCTP_CHECK_AND_CAST(addrs, optval, struct sctp_getaddresses, 4061 optsize); 4062 if (addrs->addr->sa_family == AF_INET) { 4063 sz = sizeof(struct sctp_getaddresses) - sizeof(struct sockaddr) + sizeof(struct sockaddr_in); 4064 if (optsize < sz) { 4065 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4066 error = EINVAL; 4067 break; 4068 } 4069 if (td != NULL && prison_local_ip4(td->td_ucred, &(((struct sockaddr_in *)(addrs->addr))->sin_addr))) { 4070 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EADDRNOTAVAIL); 4071 error = EADDRNOTAVAIL; 4072 break; 4073 } 4074 #ifdef INET6 4075 } else if (addrs->addr->sa_family == AF_INET6) { 4076 sz = sizeof(struct sctp_getaddresses) - sizeof(struct sockaddr) + sizeof(struct sockaddr_in6); 4077 if (optsize < sz) { 4078 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4079 error = EINVAL; 4080 break; 4081 } 4082 if (td != NULL && prison_local_ip6(td->td_ucred, &(((struct sockaddr_in6 *)(addrs->addr))->sin6_addr), 4083 (SCTP_IPV6_V6ONLY(inp) != 0)) != 0) { 4084 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EADDRNOTAVAIL); 4085 error = EADDRNOTAVAIL; 4086 break; 4087 } 4088 #endif 4089 } else { 4090 error = EAFNOSUPPORT; 4091 break; 4092 } 4093 sctp_bindx_add_address(so, inp, addrs->addr, 4094 addrs->sget_assoc_id, vrf_id, 4095 &error, p); 4096 } 4097 break; 4098 case SCTP_BINDX_REM_ADDR: 4099 { 4100 struct sctp_getaddresses *addrs; 4101 size_t sz; 4102 struct thread *td; 4103 4104 td = (struct thread *)p; 4105 4106 SCTP_CHECK_AND_CAST(addrs, optval, struct sctp_getaddresses, optsize); 4107 if (addrs->addr->sa_family == AF_INET) { 4108 sz = sizeof(struct sctp_getaddresses) - sizeof(struct sockaddr) + sizeof(struct sockaddr_in); 4109 if (optsize < sz) { 4110 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4111 error = EINVAL; 4112 break; 4113 } 4114 if (td != NULL && prison_local_ip4(td->td_ucred, &(((struct sockaddr_in *)(addrs->addr))->sin_addr))) { 4115 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EADDRNOTAVAIL); 4116 error = EADDRNOTAVAIL; 4117 break; 4118 } 4119 #ifdef INET6 4120 } else if (addrs->addr->sa_family == AF_INET6) { 4121 sz = sizeof(struct sctp_getaddresses) - sizeof(struct sockaddr) + sizeof(struct sockaddr_in6); 4122 if (optsize < sz) { 4123 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4124 error = EINVAL; 4125 break; 4126 } 4127 if (td != NULL && prison_local_ip6(td->td_ucred, &(((struct sockaddr_in6 *)(addrs->addr))->sin6_addr), 4128 (SCTP_IPV6_V6ONLY(inp) != 0)) != 0) { 4129 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EADDRNOTAVAIL); 4130 error = EADDRNOTAVAIL; 4131 break; 4132 } 4133 #endif 4134 } else { 4135 error = EAFNOSUPPORT; 4136 break; 4137 } 4138 sctp_bindx_delete_address(so, inp, addrs->addr, 4139 addrs->sget_assoc_id, vrf_id, 4140 &error); 4141 } 4142 break; 4143 default: 4144 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT); 4145 error = ENOPROTOOPT; 4146 break; 4147 } /* end switch (opt) */ 4148 return (error); 4149 } 4150 4151 int 4152 sctp_ctloutput(struct socket *so, struct sockopt *sopt) 4153 { 4154 void *optval = NULL; 4155 size_t optsize = 0; 4156 struct sctp_inpcb *inp; 4157 void *p; 4158 int error = 0; 4159 4160 inp = (struct sctp_inpcb *)so->so_pcb; 4161 if (inp == 0) { 4162 /* I made the same as TCP since we are not setup? */ 4163 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4164 return (ECONNRESET); 4165 } 4166 if (sopt->sopt_level != IPPROTO_SCTP) { 4167 /* wrong proto level... send back up to IP */ 4168 #ifdef INET6 4169 if (INP_CHECK_SOCKAF(so, AF_INET6)) 4170 error = ip6_ctloutput(so, sopt); 4171 else 4172 #endif /* INET6 */ 4173 error = ip_ctloutput(so, sopt); 4174 return (error); 4175 } 4176 optsize = sopt->sopt_valsize; 4177 if (optsize) { 4178 SCTP_MALLOC(optval, void *, optsize, SCTP_M_SOCKOPT); 4179 if (optval == NULL) { 4180 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOBUFS); 4181 return (ENOBUFS); 4182 } 4183 error = sooptcopyin(sopt, optval, optsize, optsize); 4184 if (error) { 4185 SCTP_FREE(optval, SCTP_M_SOCKOPT); 4186 goto out; 4187 } 4188 } 4189 p = (void *)sopt->sopt_td; 4190 if (sopt->sopt_dir == SOPT_SET) { 4191 error = sctp_setopt(so, sopt->sopt_name, optval, optsize, p); 4192 } else if (sopt->sopt_dir == SOPT_GET) { 4193 error = sctp_getopt(so, sopt->sopt_name, optval, &optsize, p); 4194 } else { 4195 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4196 error = EINVAL; 4197 } 4198 if ((error == 0) && (optval != NULL)) { 4199 error = sooptcopyout(sopt, optval, optsize); 4200 SCTP_FREE(optval, SCTP_M_SOCKOPT); 4201 } else if (optval != NULL) { 4202 SCTP_FREE(optval, SCTP_M_SOCKOPT); 4203 } 4204 out: 4205 return (error); 4206 } 4207 4208 4209 static int 4210 sctp_connect(struct socket *so, struct sockaddr *addr, struct thread *p) 4211 { 4212 int error = 0; 4213 int create_lock_on = 0; 4214 uint32_t vrf_id; 4215 struct sctp_inpcb *inp; 4216 struct sctp_tcb *stcb = NULL; 4217 4218 inp = (struct sctp_inpcb *)so->so_pcb; 4219 if (inp == 0) { 4220 /* I made the same as TCP since we are not setup? */ 4221 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4222 return (ECONNRESET); 4223 } 4224 if (addr == NULL) { 4225 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4226 return EINVAL; 4227 } 4228 #ifdef INET6 4229 if (addr->sa_family == AF_INET6) { 4230 struct sockaddr_in6 *sin6p; 4231 4232 if (addr->sa_len != sizeof(struct sockaddr_in6)) { 4233 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4234 return (EINVAL); 4235 } 4236 sin6p = (struct sockaddr_in6 *)addr; 4237 if (p != NULL && prison_remote_ip6(p->td_ucred, &sin6p->sin6_addr) != 0) { 4238 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4239 return (EINVAL); 4240 } 4241 } else 4242 #endif 4243 if (addr->sa_family == AF_INET) { 4244 struct sockaddr_in *sinp; 4245 4246 if (addr->sa_len != sizeof(struct sockaddr_in)) { 4247 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4248 return (EINVAL); 4249 } 4250 sinp = (struct sockaddr_in *)addr; 4251 if (p != NULL && prison_remote_ip4(p->td_ucred, &sinp->sin_addr) != 0) { 4252 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4253 return (EINVAL); 4254 } 4255 } else { 4256 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EAFNOSUPPORT); 4257 return (EAFNOSUPPORT); 4258 } 4259 SCTP_INP_INCR_REF(inp); 4260 SCTP_ASOC_CREATE_LOCK(inp); 4261 create_lock_on = 1; 4262 4263 4264 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 4265 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) { 4266 /* Should I really unlock ? */ 4267 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EFAULT); 4268 error = EFAULT; 4269 goto out_now; 4270 } 4271 #ifdef INET6 4272 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) && 4273 (addr->sa_family == AF_INET6)) { 4274 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4275 error = EINVAL; 4276 goto out_now; 4277 } 4278 #endif /* INET6 */ 4279 if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) == 4280 SCTP_PCB_FLAGS_UNBOUND) { 4281 /* Bind a ephemeral port */ 4282 error = sctp_inpcb_bind(so, NULL, NULL, p); 4283 if (error) { 4284 goto out_now; 4285 } 4286 } 4287 /* Now do we connect? */ 4288 if ((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) && 4289 (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_PORTREUSE))) { 4290 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4291 error = EINVAL; 4292 goto out_now; 4293 } 4294 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 4295 (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) { 4296 /* We are already connected AND the TCP model */ 4297 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EADDRINUSE); 4298 error = EADDRINUSE; 4299 goto out_now; 4300 } 4301 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { 4302 SCTP_INP_RLOCK(inp); 4303 stcb = LIST_FIRST(&inp->sctp_asoc_list); 4304 SCTP_INP_RUNLOCK(inp); 4305 } else { 4306 /* 4307 * We increment here since sctp_findassociation_ep_addr() 4308 * will do a decrement if it finds the stcb as long as the 4309 * locked tcb (last argument) is NOT a TCB.. aka NULL. 4310 */ 4311 SCTP_INP_INCR_REF(inp); 4312 stcb = sctp_findassociation_ep_addr(&inp, addr, NULL, NULL, NULL); 4313 if (stcb == NULL) { 4314 SCTP_INP_DECR_REF(inp); 4315 } else { 4316 SCTP_TCB_UNLOCK(stcb); 4317 } 4318 } 4319 if (stcb != NULL) { 4320 /* Already have or am bring up an association */ 4321 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY); 4322 error = EALREADY; 4323 goto out_now; 4324 } 4325 vrf_id = inp->def_vrf_id; 4326 /* We are GOOD to go */ 4327 stcb = sctp_aloc_assoc(inp, addr, 1, &error, 0, vrf_id, p); 4328 if (stcb == NULL) { 4329 /* Gak! no memory */ 4330 goto out_now; 4331 } 4332 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) { 4333 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED; 4334 /* Set the connected flag so we can queue data */ 4335 soisconnecting(so); 4336 } 4337 SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_COOKIE_WAIT); 4338 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered); 4339 4340 /* initialize authentication parameters for the assoc */ 4341 sctp_initialize_auth_params(inp, stcb); 4342 4343 sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED); 4344 SCTP_TCB_UNLOCK(stcb); 4345 out_now: 4346 if (create_lock_on) { 4347 SCTP_ASOC_CREATE_UNLOCK(inp); 4348 } 4349 SCTP_INP_DECR_REF(inp); 4350 return error; 4351 } 4352 4353 int 4354 sctp_listen(struct socket *so, int backlog, struct thread *p) 4355 { 4356 /* 4357 * Note this module depends on the protocol processing being called 4358 * AFTER any socket level flags and backlog are applied to the 4359 * socket. The traditional way that the socket flags are applied is 4360 * AFTER protocol processing. We have made a change to the 4361 * sys/kern/uipc_socket.c module to reverse this but this MUST be in 4362 * place if the socket API for SCTP is to work properly. 4363 */ 4364 4365 int error = 0; 4366 struct sctp_inpcb *inp; 4367 4368 inp = (struct sctp_inpcb *)so->so_pcb; 4369 if (inp == 0) { 4370 /* I made the same as TCP since we are not setup? */ 4371 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4372 return (ECONNRESET); 4373 } 4374 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PORTREUSE)) { 4375 /* See if we have a listener */ 4376 struct sctp_inpcb *tinp; 4377 union sctp_sockstore store, *sp; 4378 4379 sp = &store; 4380 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) { 4381 /* not bound all */ 4382 struct sctp_laddr *laddr; 4383 4384 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 4385 memcpy(&store, &laddr->ifa->address, sizeof(store)); 4386 sp->sin.sin_port = inp->sctp_lport; 4387 tinp = sctp_pcb_findep(&sp->sa, 0, 0, inp->def_vrf_id); 4388 if (tinp && (tinp != inp) && 4389 ((tinp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) == 0) && 4390 ((tinp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) && 4391 (tinp->sctp_socket->so_qlimit)) { 4392 /* 4393 * we have a listener already and 4394 * its not this inp. 4395 */ 4396 SCTP_INP_DECR_REF(tinp); 4397 return (EADDRINUSE); 4398 } else if (tinp) { 4399 SCTP_INP_DECR_REF(tinp); 4400 } 4401 } 4402 } else { 4403 /* Setup a local addr bound all */ 4404 memset(&store, 0, sizeof(store)); 4405 store.sin.sin_port = inp->sctp_lport; 4406 #ifdef INET6 4407 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 4408 store.sa.sa_family = AF_INET6; 4409 store.sa.sa_len = sizeof(struct sockaddr_in6); 4410 } 4411 #endif 4412 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) { 4413 store.sa.sa_family = AF_INET; 4414 store.sa.sa_len = sizeof(struct sockaddr_in); 4415 } 4416 tinp = sctp_pcb_findep(&sp->sa, 0, 0, inp->def_vrf_id); 4417 if (tinp && (tinp != inp) && 4418 ((tinp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) == 0) && 4419 ((tinp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) && 4420 (tinp->sctp_socket->so_qlimit)) { 4421 /* 4422 * we have a listener already and its not 4423 * this inp. 4424 */ 4425 SCTP_INP_DECR_REF(tinp); 4426 return (EADDRINUSE); 4427 } else if (tinp) { 4428 SCTP_INP_DECR_REF(inp); 4429 } 4430 } 4431 } 4432 SCTP_INP_RLOCK(inp); 4433 #ifdef SCTP_LOCK_LOGGING 4434 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) { 4435 sctp_log_lock(inp, (struct sctp_tcb *)NULL, SCTP_LOG_LOCK_SOCK); 4436 } 4437 #endif 4438 SOCK_LOCK(so); 4439 error = solisten_proto_check(so); 4440 if (error) { 4441 SOCK_UNLOCK(so); 4442 SCTP_INP_RUNLOCK(inp); 4443 return (error); 4444 } 4445 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PORTREUSE)) && 4446 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 4447 /* 4448 * The unlucky case - We are in the tcp pool with this guy. 4449 * - Someone else is in the main inp slot. - We must move 4450 * this guy (the listener) to the main slot - We must then 4451 * move the guy that was listener to the TCP Pool. 4452 */ 4453 if (sctp_swap_inpcb_for_listen(inp)) { 4454 goto in_use; 4455 } 4456 } 4457 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 4458 (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) { 4459 /* We are already connected AND the TCP model */ 4460 in_use: 4461 SCTP_INP_RUNLOCK(inp); 4462 SOCK_UNLOCK(so); 4463 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EADDRINUSE); 4464 return (EADDRINUSE); 4465 } 4466 SCTP_INP_RUNLOCK(inp); 4467 if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) { 4468 /* We must do a bind. */ 4469 SOCK_UNLOCK(so); 4470 if ((error = sctp_inpcb_bind(so, NULL, NULL, p))) { 4471 /* bind error, probably perm */ 4472 return (error); 4473 } 4474 SOCK_LOCK(so); 4475 } 4476 /* It appears for 7.0 and on, we must always call this. */ 4477 solisten_proto(so, backlog); 4478 if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) { 4479 /* remove the ACCEPTCONN flag for one-to-many sockets */ 4480 so->so_options &= ~SO_ACCEPTCONN; 4481 } 4482 if (backlog == 0) { 4483 /* turning off listen */ 4484 so->so_options &= ~SO_ACCEPTCONN; 4485 } 4486 SOCK_UNLOCK(so); 4487 return (error); 4488 } 4489 4490 static int sctp_defered_wakeup_cnt = 0; 4491 4492 int 4493 sctp_accept(struct socket *so, struct sockaddr **addr) 4494 { 4495 struct sctp_tcb *stcb; 4496 struct sctp_inpcb *inp; 4497 union sctp_sockstore store; 4498 4499 #ifdef INET6 4500 int error; 4501 4502 #endif 4503 inp = (struct sctp_inpcb *)so->so_pcb; 4504 4505 if (inp == 0) { 4506 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4507 return (ECONNRESET); 4508 } 4509 SCTP_INP_RLOCK(inp); 4510 if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) { 4511 SCTP_INP_RUNLOCK(inp); 4512 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP); 4513 return (EOPNOTSUPP); 4514 } 4515 if (so->so_state & SS_ISDISCONNECTED) { 4516 SCTP_INP_RUNLOCK(inp); 4517 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ECONNABORTED); 4518 return (ECONNABORTED); 4519 } 4520 stcb = LIST_FIRST(&inp->sctp_asoc_list); 4521 if (stcb == NULL) { 4522 SCTP_INP_RUNLOCK(inp); 4523 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4524 return (ECONNRESET); 4525 } 4526 SCTP_TCB_LOCK(stcb); 4527 SCTP_INP_RUNLOCK(inp); 4528 store = stcb->asoc.primary_destination->ro._l_addr; 4529 SCTP_TCB_UNLOCK(stcb); 4530 switch (store.sa.sa_family) { 4531 case AF_INET: 4532 { 4533 struct sockaddr_in *sin; 4534 4535 SCTP_MALLOC_SONAME(sin, struct sockaddr_in *, sizeof *sin); 4536 sin->sin_family = AF_INET; 4537 sin->sin_len = sizeof(*sin); 4538 sin->sin_port = ((struct sockaddr_in *)&store)->sin_port; 4539 sin->sin_addr = ((struct sockaddr_in *)&store)->sin_addr; 4540 *addr = (struct sockaddr *)sin; 4541 break; 4542 } 4543 #ifdef INET6 4544 case AF_INET6: 4545 { 4546 struct sockaddr_in6 *sin6; 4547 4548 SCTP_MALLOC_SONAME(sin6, struct sockaddr_in6 *, sizeof *sin6); 4549 sin6->sin6_family = AF_INET6; 4550 sin6->sin6_len = sizeof(*sin6); 4551 sin6->sin6_port = ((struct sockaddr_in6 *)&store)->sin6_port; 4552 4553 sin6->sin6_addr = ((struct sockaddr_in6 *)&store)->sin6_addr; 4554 if ((error = sa6_recoverscope(sin6)) != 0) { 4555 SCTP_FREE_SONAME(sin6); 4556 return (error); 4557 } 4558 *addr = (struct sockaddr *)sin6; 4559 break; 4560 } 4561 #endif 4562 default: 4563 /* TSNH */ 4564 break; 4565 } 4566 /* Wake any delayed sleep action */ 4567 if (inp->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) { 4568 SCTP_INP_WLOCK(inp); 4569 inp->sctp_flags &= ~SCTP_PCB_FLAGS_DONT_WAKE; 4570 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT) { 4571 inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAKEOUTPUT; 4572 SCTP_INP_WUNLOCK(inp); 4573 SOCKBUF_LOCK(&inp->sctp_socket->so_snd); 4574 if (sowriteable(inp->sctp_socket)) { 4575 sowwakeup_locked(inp->sctp_socket); 4576 } else { 4577 SOCKBUF_UNLOCK(&inp->sctp_socket->so_snd); 4578 } 4579 SCTP_INP_WLOCK(inp); 4580 } 4581 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT) { 4582 inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAKEINPUT; 4583 SCTP_INP_WUNLOCK(inp); 4584 SOCKBUF_LOCK(&inp->sctp_socket->so_rcv); 4585 if (soreadable(inp->sctp_socket)) { 4586 sctp_defered_wakeup_cnt++; 4587 sorwakeup_locked(inp->sctp_socket); 4588 } else { 4589 SOCKBUF_UNLOCK(&inp->sctp_socket->so_rcv); 4590 } 4591 SCTP_INP_WLOCK(inp); 4592 } 4593 SCTP_INP_WUNLOCK(inp); 4594 } 4595 return (0); 4596 } 4597 4598 int 4599 sctp_ingetaddr(struct socket *so, struct sockaddr **addr) 4600 { 4601 struct sockaddr_in *sin; 4602 uint32_t vrf_id; 4603 struct sctp_inpcb *inp; 4604 struct sctp_ifa *sctp_ifa; 4605 4606 /* 4607 * Do the malloc first in case it blocks. 4608 */ 4609 SCTP_MALLOC_SONAME(sin, struct sockaddr_in *, sizeof *sin); 4610 sin->sin_family = AF_INET; 4611 sin->sin_len = sizeof(*sin); 4612 inp = (struct sctp_inpcb *)so->so_pcb; 4613 if (!inp) { 4614 SCTP_FREE_SONAME(sin); 4615 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4616 return ECONNRESET; 4617 } 4618 SCTP_INP_RLOCK(inp); 4619 sin->sin_port = inp->sctp_lport; 4620 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 4621 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { 4622 struct sctp_tcb *stcb; 4623 struct sockaddr_in *sin_a; 4624 struct sctp_nets *net; 4625 int fnd; 4626 4627 stcb = LIST_FIRST(&inp->sctp_asoc_list); 4628 if (stcb == NULL) { 4629 goto notConn; 4630 } 4631 fnd = 0; 4632 sin_a = NULL; 4633 SCTP_TCB_LOCK(stcb); 4634 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 4635 sin_a = (struct sockaddr_in *)&net->ro._l_addr; 4636 if (sin_a == NULL) 4637 /* this will make coverity happy */ 4638 continue; 4639 4640 if (sin_a->sin_family == AF_INET) { 4641 fnd = 1; 4642 break; 4643 } 4644 } 4645 if ((!fnd) || (sin_a == NULL)) { 4646 /* punt */ 4647 SCTP_TCB_UNLOCK(stcb); 4648 goto notConn; 4649 } 4650 vrf_id = inp->def_vrf_id; 4651 sctp_ifa = sctp_source_address_selection(inp, 4652 stcb, 4653 (sctp_route_t *) & net->ro, 4654 net, 0, vrf_id); 4655 if (sctp_ifa) { 4656 sin->sin_addr = sctp_ifa->address.sin.sin_addr; 4657 sctp_free_ifa(sctp_ifa); 4658 } 4659 SCTP_TCB_UNLOCK(stcb); 4660 } else { 4661 /* For the bound all case you get back 0 */ 4662 notConn: 4663 sin->sin_addr.s_addr = 0; 4664 } 4665 4666 } else { 4667 /* Take the first IPv4 address in the list */ 4668 struct sctp_laddr *laddr; 4669 int fnd = 0; 4670 4671 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 4672 if (laddr->ifa->address.sa.sa_family == AF_INET) { 4673 struct sockaddr_in *sin_a; 4674 4675 sin_a = (struct sockaddr_in *)&laddr->ifa->address.sa; 4676 sin->sin_addr = sin_a->sin_addr; 4677 fnd = 1; 4678 break; 4679 } 4680 } 4681 if (!fnd) { 4682 SCTP_FREE_SONAME(sin); 4683 SCTP_INP_RUNLOCK(inp); 4684 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); 4685 return ENOENT; 4686 } 4687 } 4688 SCTP_INP_RUNLOCK(inp); 4689 (*addr) = (struct sockaddr *)sin; 4690 return (0); 4691 } 4692 4693 int 4694 sctp_peeraddr(struct socket *so, struct sockaddr **addr) 4695 { 4696 struct sockaddr_in *sin = (struct sockaddr_in *)*addr; 4697 int fnd; 4698 struct sockaddr_in *sin_a; 4699 struct sctp_inpcb *inp; 4700 struct sctp_tcb *stcb; 4701 struct sctp_nets *net; 4702 4703 /* Do the malloc first in case it blocks. */ 4704 inp = (struct sctp_inpcb *)so->so_pcb; 4705 if ((inp == NULL) || 4706 ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) { 4707 /* UDP type and listeners will drop out here */ 4708 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN); 4709 return (ENOTCONN); 4710 } 4711 SCTP_MALLOC_SONAME(sin, struct sockaddr_in *, sizeof *sin); 4712 sin->sin_family = AF_INET; 4713 sin->sin_len = sizeof(*sin); 4714 4715 /* We must recapture incase we blocked */ 4716 inp = (struct sctp_inpcb *)so->so_pcb; 4717 if (!inp) { 4718 SCTP_FREE_SONAME(sin); 4719 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4720 return ECONNRESET; 4721 } 4722 SCTP_INP_RLOCK(inp); 4723 stcb = LIST_FIRST(&inp->sctp_asoc_list); 4724 if (stcb) { 4725 SCTP_TCB_LOCK(stcb); 4726 } 4727 SCTP_INP_RUNLOCK(inp); 4728 if (stcb == NULL) { 4729 SCTP_FREE_SONAME(sin); 4730 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4731 return ECONNRESET; 4732 } 4733 fnd = 0; 4734 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 4735 sin_a = (struct sockaddr_in *)&net->ro._l_addr; 4736 if (sin_a->sin_family == AF_INET) { 4737 fnd = 1; 4738 sin->sin_port = stcb->rport; 4739 sin->sin_addr = sin_a->sin_addr; 4740 break; 4741 } 4742 } 4743 SCTP_TCB_UNLOCK(stcb); 4744 if (!fnd) { 4745 /* No IPv4 address */ 4746 SCTP_FREE_SONAME(sin); 4747 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); 4748 return ENOENT; 4749 } 4750 (*addr) = (struct sockaddr *)sin; 4751 return (0); 4752 } 4753 4754 struct pr_usrreqs sctp_usrreqs = { 4755 .pru_abort = sctp_abort, 4756 .pru_accept = sctp_accept, 4757 .pru_attach = sctp_attach, 4758 .pru_bind = sctp_bind, 4759 .pru_connect = sctp_connect, 4760 .pru_control = in_control, 4761 .pru_close = sctp_close, 4762 .pru_detach = sctp_close, 4763 .pru_sopoll = sopoll_generic, 4764 .pru_flush = sctp_flush, 4765 .pru_disconnect = sctp_disconnect, 4766 .pru_listen = sctp_listen, 4767 .pru_peeraddr = sctp_peeraddr, 4768 .pru_send = sctp_sendm, 4769 .pru_shutdown = sctp_shutdown, 4770 .pru_sockaddr = sctp_ingetaddr, 4771 .pru_sosend = sctp_sosend, 4772 .pru_soreceive = sctp_soreceive 4773 }; 4774