1 /*- 2 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions are met: 6 * 7 * a) Redistributions of source code must retain the above copyright notice, 8 * this list of conditions and the following disclaimer. 9 * 10 * b) Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in 12 * the documentation and/or other materials provided with the distribution. 13 * 14 * c) Neither the name of Cisco Systems, Inc. nor the names of its 15 * contributors may be used to endorse or promote products derived 16 * from this software without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 28 * THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 /* $KAME: sctp_usrreq.c,v 1.48 2005/03/07 23:26:08 itojun Exp $ */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 #include <netinet/sctp_os.h> 36 #include <sys/proc.h> 37 #include <netinet/sctp_pcb.h> 38 #include <netinet/sctp_header.h> 39 #include <netinet/sctp_var.h> 40 #if defined(INET6) 41 #endif 42 #include <netinet/sctp_sysctl.h> 43 #include <netinet/sctp_output.h> 44 #include <netinet/sctp_uio.h> 45 #include <netinet/sctp_asconf.h> 46 #include <netinet/sctputil.h> 47 #include <netinet/sctp_indata.h> 48 #include <netinet/sctp_timer.h> 49 #include <netinet/sctp_auth.h> 50 #include <netinet/sctp_bsd_addr.h> 51 #include <netinet/sctp_cc_functions.h> 52 53 54 55 56 void 57 sctp_init(void) 58 { 59 u_long sb_max_adj; 60 61 bzero(&SCTP_BASE_STATS, sizeof(struct sctpstat)); 62 63 /* Initialize and modify the sysctled variables */ 64 sctp_init_sysctls(); 65 if ((nmbclusters / 8) > SCTP_ASOC_MAX_CHUNKS_ON_QUEUE) 66 SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue) = (nmbclusters / 8); 67 /* 68 * Allow a user to take no more than 1/2 the number of clusters or 69 * the SB_MAX whichever is smaller for the send window. 70 */ 71 sb_max_adj = (u_long)((u_quad_t) (SB_MAX) * MCLBYTES / (MSIZE + MCLBYTES)); 72 SCTP_BASE_SYSCTL(sctp_sendspace) = min(sb_max_adj, 73 (((uint32_t) nmbclusters / 2) * SCTP_DEFAULT_MAXSEGMENT)); 74 /* 75 * Now for the recv window, should we take the same amount? or 76 * should I do 1/2 the SB_MAX instead in the SB_MAX min above. For 77 * now I will just copy. 78 */ 79 SCTP_BASE_SYSCTL(sctp_recvspace) = SCTP_BASE_SYSCTL(sctp_sendspace); 80 81 SCTP_BASE_VAR(first_time) = 0; 82 SCTP_BASE_VAR(sctp_pcb_initialized) = 0; 83 sctp_pcb_init(); 84 #if defined(SCTP_PACKET_LOGGING) 85 SCTP_BASE_VAR(packet_log_writers) = 0; 86 SCTP_BASE_VAR(packet_log_end) = 0; 87 bzero(&SCTP_BASE_VAR(packet_log_buffer), SCTP_PACKET_LOG_SIZE); 88 #endif 89 90 91 } 92 93 void 94 sctp_finish(void) 95 { 96 sctp_pcb_finish(); 97 } 98 99 100 101 void 102 sctp_pathmtu_adjustment(struct sctp_inpcb *inp, 103 struct sctp_tcb *stcb, 104 struct sctp_nets *net, 105 uint16_t nxtsz) 106 { 107 struct sctp_tmit_chunk *chk; 108 109 /* Adjust that too */ 110 stcb->asoc.smallest_mtu = nxtsz; 111 /* now off to subtract IP_DF flag if needed */ 112 #ifdef SCTP_PRINT_FOR_B_AND_M 113 SCTP_PRINTF("sctp_pathmtu_adjust called inp:%p stcb:%p net:%p nxtsz:%d\n", 114 inp, stcb, net, nxtsz); 115 #endif 116 TAILQ_FOREACH(chk, &stcb->asoc.send_queue, sctp_next) { 117 if ((chk->send_size + IP_HDR_SIZE) > nxtsz) { 118 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; 119 } 120 } 121 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 122 if ((chk->send_size + IP_HDR_SIZE) > nxtsz) { 123 /* 124 * For this guy we also mark for immediate resend 125 * since we sent to big of chunk 126 */ 127 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; 128 if (chk->sent != SCTP_DATAGRAM_RESEND) { 129 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 130 } 131 chk->sent = SCTP_DATAGRAM_RESEND; 132 chk->rec.data.doing_fast_retransmit = 0; 133 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 134 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_PMTU, 135 chk->whoTo->flight_size, 136 chk->book_size, 137 (uintptr_t) chk->whoTo, 138 chk->rec.data.TSN_seq); 139 } 140 /* Clear any time so NO RTT is being done */ 141 chk->do_rtt = 0; 142 sctp_flight_size_decrease(chk); 143 sctp_total_flight_decrease(stcb, chk); 144 } 145 } 146 } 147 148 static void 149 sctp_notify_mbuf(struct sctp_inpcb *inp, 150 struct sctp_tcb *stcb, 151 struct sctp_nets *net, 152 struct ip *ip, 153 struct sctphdr *sh) 154 { 155 struct icmp *icmph; 156 int totsz, tmr_stopped = 0; 157 uint16_t nxtsz; 158 159 /* protection */ 160 if ((inp == NULL) || (stcb == NULL) || (net == NULL) || 161 (ip == NULL) || (sh == NULL)) { 162 if (stcb != NULL) { 163 SCTP_TCB_UNLOCK(stcb); 164 } 165 return; 166 } 167 /* First job is to verify the vtag matches what I would send */ 168 if (ntohl(sh->v_tag) != (stcb->asoc.peer_vtag)) { 169 SCTP_TCB_UNLOCK(stcb); 170 return; 171 } 172 icmph = (struct icmp *)((caddr_t)ip - (sizeof(struct icmp) - 173 sizeof(struct ip))); 174 if (icmph->icmp_type != ICMP_UNREACH) { 175 /* We only care about unreachable */ 176 SCTP_TCB_UNLOCK(stcb); 177 return; 178 } 179 if (icmph->icmp_code != ICMP_UNREACH_NEEDFRAG) { 180 /* not a unreachable message due to frag. */ 181 SCTP_TCB_UNLOCK(stcb); 182 return; 183 } 184 totsz = ip->ip_len; 185 186 nxtsz = ntohs(icmph->icmp_nextmtu); 187 if (nxtsz == 0) { 188 /* 189 * old type router that does not tell us what the next size 190 * mtu is. Rats we will have to guess (in a educated fashion 191 * of course) 192 */ 193 nxtsz = find_next_best_mtu(totsz); 194 } 195 /* Stop any PMTU timer */ 196 if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) { 197 tmr_stopped = 1; 198 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net, 199 SCTP_FROM_SCTP_USRREQ + SCTP_LOC_1); 200 } 201 /* Adjust destination size limit */ 202 if (net->mtu > nxtsz) { 203 net->mtu = nxtsz; 204 } 205 /* now what about the ep? */ 206 if (stcb->asoc.smallest_mtu > nxtsz) { 207 #ifdef SCTP_PRINT_FOR_B_AND_M 208 SCTP_PRINTF("notify_mbuf (ICMP) calls sctp_pathmtu_adjust mtu:%d\n", 209 nxtsz); 210 #endif 211 sctp_pathmtu_adjustment(inp, stcb, net, nxtsz); 212 } 213 if (tmr_stopped) 214 sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net); 215 216 SCTP_TCB_UNLOCK(stcb); 217 } 218 219 220 void 221 sctp_notify(struct sctp_inpcb *inp, 222 struct ip *ip, 223 struct sctphdr *sh, 224 struct sockaddr *to, 225 struct sctp_tcb *stcb, 226 struct sctp_nets *net) 227 { 228 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 229 struct socket *so; 230 231 #endif 232 /* protection */ 233 int reason; 234 struct icmp *icmph; 235 236 237 if ((inp == NULL) || (stcb == NULL) || (net == NULL) || 238 (sh == NULL) || (to == NULL)) { 239 if (stcb) 240 SCTP_TCB_UNLOCK(stcb); 241 return; 242 } 243 /* First job is to verify the vtag matches what I would send */ 244 if (ntohl(sh->v_tag) != (stcb->asoc.peer_vtag)) { 245 SCTP_TCB_UNLOCK(stcb); 246 return; 247 } 248 icmph = (struct icmp *)((caddr_t)ip - (sizeof(struct icmp) - 249 sizeof(struct ip))); 250 if (icmph->icmp_type != ICMP_UNREACH) { 251 /* We only care about unreachable */ 252 SCTP_TCB_UNLOCK(stcb); 253 return; 254 } 255 if ((icmph->icmp_code == ICMP_UNREACH_NET) || 256 (icmph->icmp_code == ICMP_UNREACH_HOST) || 257 (icmph->icmp_code == ICMP_UNREACH_NET_UNKNOWN) || 258 (icmph->icmp_code == ICMP_UNREACH_HOST_UNKNOWN) || 259 (icmph->icmp_code == ICMP_UNREACH_ISOLATED) || 260 (icmph->icmp_code == ICMP_UNREACH_NET_PROHIB) || 261 (icmph->icmp_code == ICMP_UNREACH_HOST_PROHIB) || 262 (icmph->icmp_code == ICMP_UNREACH_FILTER_PROHIB)) { 263 264 /* 265 * Hmm reachablity problems we must examine closely. If its 266 * not reachable, we may have lost a network. Or if there is 267 * NO protocol at the other end named SCTP. well we consider 268 * it a OOTB abort. 269 */ 270 if (net->dest_state & SCTP_ADDR_REACHABLE) { 271 /* Ok that destination is NOT reachable */ 272 SCTP_PRINTF("ICMP (thresh %d/%d) takes interface %p down\n", 273 net->error_count, 274 net->failure_threshold, 275 net); 276 277 net->dest_state &= ~SCTP_ADDR_REACHABLE; 278 net->dest_state |= SCTP_ADDR_NOT_REACHABLE; 279 /* 280 * JRS 5/14/07 - If a destination is unreachable, 281 * the PF bit is turned off. This allows an 282 * unambiguous use of the PF bit for destinations 283 * that are reachable but potentially failed. If the 284 * destination is set to the unreachable state, also 285 * set the destination to the PF state. 286 */ 287 /* 288 * Add debug message here if destination is not in 289 * PF state. 290 */ 291 /* Stop any running T3 timers here? */ 292 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_pf)) { 293 net->dest_state &= ~SCTP_ADDR_PF; 294 SCTPDBG(SCTP_DEBUG_TIMER4, "Destination %p moved from PF to unreachable.\n", 295 net); 296 } 297 net->error_count = net->failure_threshold + 1; 298 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN, 299 stcb, SCTP_FAILED_THRESHOLD, 300 (void *)net, SCTP_SO_NOT_LOCKED); 301 } 302 SCTP_TCB_UNLOCK(stcb); 303 } else if ((icmph->icmp_code == ICMP_UNREACH_PROTOCOL) || 304 (icmph->icmp_code == ICMP_UNREACH_PORT)) { 305 /* 306 * Here the peer is either playing tricks on us, including 307 * an address that belongs to someone who does not support 308 * SCTP OR was a userland implementation that shutdown and 309 * now is dead. In either case treat it like a OOTB abort 310 * with no TCB 311 */ 312 reason = SCTP_PEER_FAULTY; 313 sctp_abort_notification(stcb, reason, SCTP_SO_NOT_LOCKED); 314 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 315 so = SCTP_INP_SO(inp); 316 atomic_add_int(&stcb->asoc.refcnt, 1); 317 SCTP_TCB_UNLOCK(stcb); 318 SCTP_SOCKET_LOCK(so, 1); 319 SCTP_TCB_LOCK(stcb); 320 atomic_subtract_int(&stcb->asoc.refcnt, 1); 321 #endif 322 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_2); 323 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 324 SCTP_SOCKET_UNLOCK(so, 1); 325 /* SCTP_TCB_UNLOCK(stcb); MT: I think this is not needed. */ 326 #endif 327 /* no need to unlock here, since the TCB is gone */ 328 } else { 329 SCTP_TCB_UNLOCK(stcb); 330 } 331 } 332 333 void 334 sctp_ctlinput(cmd, sa, vip) 335 int cmd; 336 struct sockaddr *sa; 337 void *vip; 338 { 339 struct ip *ip = vip; 340 struct sctphdr *sh; 341 uint32_t vrf_id; 342 343 /* FIX, for non-bsd is this right? */ 344 vrf_id = SCTP_DEFAULT_VRFID; 345 if (sa->sa_family != AF_INET || 346 ((struct sockaddr_in *)sa)->sin_addr.s_addr == INADDR_ANY) { 347 return; 348 } 349 if (PRC_IS_REDIRECT(cmd)) { 350 ip = 0; 351 } else if ((unsigned)cmd >= PRC_NCMDS || inetctlerrmap[cmd] == 0) { 352 return; 353 } 354 if (ip) { 355 struct sctp_inpcb *inp = NULL; 356 struct sctp_tcb *stcb = NULL; 357 struct sctp_nets *net = NULL; 358 struct sockaddr_in to, from; 359 360 sh = (struct sctphdr *)((caddr_t)ip + (ip->ip_hl << 2)); 361 bzero(&to, sizeof(to)); 362 bzero(&from, sizeof(from)); 363 from.sin_family = to.sin_family = AF_INET; 364 from.sin_len = to.sin_len = sizeof(to); 365 from.sin_port = sh->src_port; 366 from.sin_addr = ip->ip_src; 367 to.sin_port = sh->dest_port; 368 to.sin_addr = ip->ip_dst; 369 370 /* 371 * 'to' holds the dest of the packet that failed to be sent. 372 * 'from' holds our local endpoint address. Thus we reverse 373 * the to and the from in the lookup. 374 */ 375 stcb = sctp_findassociation_addr_sa((struct sockaddr *)&from, 376 (struct sockaddr *)&to, 377 &inp, &net, 1, vrf_id); 378 if (stcb != NULL && inp && (inp->sctp_socket != NULL)) { 379 if (cmd != PRC_MSGSIZE) { 380 sctp_notify(inp, ip, sh, 381 (struct sockaddr *)&to, stcb, 382 net); 383 } else { 384 /* handle possible ICMP size messages */ 385 sctp_notify_mbuf(inp, stcb, net, ip, sh); 386 } 387 } else { 388 if ((stcb == NULL) && (inp != NULL)) { 389 /* reduce ref-count */ 390 SCTP_INP_WLOCK(inp); 391 SCTP_INP_DECR_REF(inp); 392 SCTP_INP_WUNLOCK(inp); 393 } 394 } 395 } 396 return; 397 } 398 399 static int 400 sctp_getcred(SYSCTL_HANDLER_ARGS) 401 { 402 struct xucred xuc; 403 struct sockaddr_in addrs[2]; 404 struct sctp_inpcb *inp; 405 struct sctp_nets *net; 406 struct sctp_tcb *stcb; 407 int error; 408 uint32_t vrf_id; 409 410 /* FIX, for non-bsd is this right? */ 411 vrf_id = SCTP_DEFAULT_VRFID; 412 413 error = priv_check(req->td, PRIV_NETINET_GETCRED); 414 415 if (error) 416 return (error); 417 418 error = SYSCTL_IN(req, addrs, sizeof(addrs)); 419 if (error) 420 return (error); 421 422 stcb = sctp_findassociation_addr_sa(sintosa(&addrs[0]), 423 sintosa(&addrs[1]), 424 &inp, &net, 1, vrf_id); 425 if (stcb == NULL || inp == NULL || inp->sctp_socket == NULL) { 426 if ((inp != NULL) && (stcb == NULL)) { 427 /* reduce ref-count */ 428 SCTP_INP_WLOCK(inp); 429 SCTP_INP_DECR_REF(inp); 430 goto cred_can_cont; 431 } 432 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); 433 error = ENOENT; 434 goto out; 435 } 436 SCTP_TCB_UNLOCK(stcb); 437 /* 438 * We use the write lock here, only since in the error leg we need 439 * it. If we used RLOCK, then we would have to 440 * wlock/decr/unlock/rlock. Which in theory could create a hole. 441 * Better to use higher wlock. 442 */ 443 SCTP_INP_WLOCK(inp); 444 cred_can_cont: 445 error = cr_canseesocket(req->td->td_ucred, inp->sctp_socket); 446 if (error) { 447 SCTP_INP_WUNLOCK(inp); 448 goto out; 449 } 450 cru2x(inp->sctp_socket->so_cred, &xuc); 451 SCTP_INP_WUNLOCK(inp); 452 error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred)); 453 out: 454 return (error); 455 } 456 457 SYSCTL_PROC(_net_inet_sctp, OID_AUTO, getcred, CTLTYPE_OPAQUE | CTLFLAG_RW, 458 0, 0, sctp_getcred, "S,ucred", "Get the ucred of a SCTP connection"); 459 460 461 static void 462 sctp_abort(struct socket *so) 463 { 464 struct sctp_inpcb *inp; 465 uint32_t flags; 466 467 inp = (struct sctp_inpcb *)so->so_pcb; 468 if (inp == 0) { 469 return; 470 } 471 sctp_must_try_again: 472 flags = inp->sctp_flags; 473 #ifdef SCTP_LOG_CLOSING 474 sctp_log_closing(inp, NULL, 17); 475 #endif 476 if (((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) && 477 (atomic_cmpset_int(&inp->sctp_flags, flags, (flags | SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_CLOSE_IP)))) { 478 #ifdef SCTP_LOG_CLOSING 479 sctp_log_closing(inp, NULL, 16); 480 #endif 481 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 482 SCTP_CALLED_AFTER_CMPSET_OFCLOSE); 483 SOCK_LOCK(so); 484 SCTP_SB_CLEAR(so->so_snd); 485 /* 486 * same for the rcv ones, they are only here for the 487 * accounting/select. 488 */ 489 SCTP_SB_CLEAR(so->so_rcv); 490 491 /* Now null out the reference, we are completely detached. */ 492 so->so_pcb = NULL; 493 SOCK_UNLOCK(so); 494 } else { 495 flags = inp->sctp_flags; 496 if ((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) { 497 goto sctp_must_try_again; 498 } 499 } 500 return; 501 } 502 503 static int 504 sctp_attach(struct socket *so, int proto, struct thread *p) 505 { 506 struct sctp_inpcb *inp; 507 struct inpcb *ip_inp; 508 int error; 509 uint32_t vrf_id = SCTP_DEFAULT_VRFID; 510 511 #ifdef IPSEC 512 uint32_t flags; 513 514 #endif 515 inp = (struct sctp_inpcb *)so->so_pcb; 516 if (inp != 0) { 517 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 518 return EINVAL; 519 } 520 error = SCTP_SORESERVE(so, SCTP_BASE_SYSCTL(sctp_sendspace), SCTP_BASE_SYSCTL(sctp_recvspace)); 521 if (error) { 522 return error; 523 } 524 error = sctp_inpcb_alloc(so, vrf_id); 525 if (error) { 526 return error; 527 } 528 inp = (struct sctp_inpcb *)so->so_pcb; 529 SCTP_INP_WLOCK(inp); 530 inp->sctp_flags &= ~SCTP_PCB_FLAGS_BOUND_V6; /* I'm not v6! */ 531 ip_inp = &inp->ip_inp.inp; 532 ip_inp->inp_vflag |= INP_IPV4; 533 ip_inp->inp_ip_ttl = MODULE_GLOBAL(MOD_INET, ip_defttl); 534 #ifdef IPSEC 535 error = ipsec_init_policy(so, &ip_inp->inp_sp); 536 #ifdef SCTP_LOG_CLOSING 537 sctp_log_closing(inp, NULL, 17); 538 #endif 539 if (error != 0) { 540 flags = inp->sctp_flags; 541 if (((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) && 542 (atomic_cmpset_int(&inp->sctp_flags, flags, (flags | SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_CLOSE_IP)))) { 543 #ifdef SCTP_LOG_CLOSING 544 sctp_log_closing(inp, NULL, 15); 545 #endif 546 SCTP_INP_WUNLOCK(inp); 547 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 548 SCTP_CALLED_AFTER_CMPSET_OFCLOSE); 549 } else { 550 SCTP_INP_WUNLOCK(inp); 551 } 552 return error; 553 } 554 #endif /* IPSEC */ 555 SCTP_INP_WUNLOCK(inp); 556 return 0; 557 } 558 559 static int 560 sctp_bind(struct socket *so, struct sockaddr *addr, struct thread *p) 561 { 562 struct sctp_inpcb *inp = NULL; 563 int error; 564 565 #ifdef INET6 566 if (addr && addr->sa_family != AF_INET) { 567 /* must be a v4 address! */ 568 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 569 return EINVAL; 570 } 571 #endif /* INET6 */ 572 if (addr && (addr->sa_len != sizeof(struct sockaddr_in))) { 573 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 574 return EINVAL; 575 } 576 inp = (struct sctp_inpcb *)so->so_pcb; 577 if (inp == 0) { 578 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 579 return EINVAL; 580 } 581 error = sctp_inpcb_bind(so, addr, NULL, p); 582 return error; 583 } 584 585 void 586 sctp_close(struct socket *so) 587 { 588 struct sctp_inpcb *inp; 589 uint32_t flags; 590 591 inp = (struct sctp_inpcb *)so->so_pcb; 592 if (inp == 0) 593 return; 594 595 /* 596 * Inform all the lower layer assoc that we are done. 597 */ 598 sctp_must_try_again: 599 flags = inp->sctp_flags; 600 #ifdef SCTP_LOG_CLOSING 601 sctp_log_closing(inp, NULL, 17); 602 #endif 603 if (((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) && 604 (atomic_cmpset_int(&inp->sctp_flags, flags, (flags | SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_CLOSE_IP)))) { 605 if (((so->so_options & SO_LINGER) && (so->so_linger == 0)) || 606 (so->so_rcv.sb_cc > 0)) { 607 #ifdef SCTP_LOG_CLOSING 608 sctp_log_closing(inp, NULL, 13); 609 #endif 610 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 611 SCTP_CALLED_AFTER_CMPSET_OFCLOSE); 612 } else { 613 #ifdef SCTP_LOG_CLOSING 614 sctp_log_closing(inp, NULL, 14); 615 #endif 616 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_GRACEFUL_CLOSE, 617 SCTP_CALLED_AFTER_CMPSET_OFCLOSE); 618 } 619 /* 620 * The socket is now detached, no matter what the state of 621 * the SCTP association. 622 */ 623 SOCK_LOCK(so); 624 SCTP_SB_CLEAR(so->so_snd); 625 /* 626 * same for the rcv ones, they are only here for the 627 * accounting/select. 628 */ 629 SCTP_SB_CLEAR(so->so_rcv); 630 631 /* Now null out the reference, we are completely detached. */ 632 so->so_pcb = NULL; 633 SOCK_UNLOCK(so); 634 } else { 635 flags = inp->sctp_flags; 636 if ((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) { 637 goto sctp_must_try_again; 638 } 639 } 640 return; 641 } 642 643 644 int 645 sctp_sendm(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr, 646 struct mbuf *control, struct thread *p); 647 648 649 int 650 sctp_sendm(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr, 651 struct mbuf *control, struct thread *p) 652 { 653 struct sctp_inpcb *inp; 654 int error; 655 656 inp = (struct sctp_inpcb *)so->so_pcb; 657 if (inp == 0) { 658 if (control) { 659 sctp_m_freem(control); 660 control = NULL; 661 } 662 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 663 sctp_m_freem(m); 664 return EINVAL; 665 } 666 /* Got to have an to address if we are NOT a connected socket */ 667 if ((addr == NULL) && 668 ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) || 669 (inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)) 670 ) { 671 goto connected_type; 672 } else if (addr == NULL) { 673 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EDESTADDRREQ); 674 error = EDESTADDRREQ; 675 sctp_m_freem(m); 676 if (control) { 677 sctp_m_freem(control); 678 control = NULL; 679 } 680 return (error); 681 } 682 #ifdef INET6 683 if (addr->sa_family != AF_INET) { 684 /* must be a v4 address! */ 685 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EDESTADDRREQ); 686 sctp_m_freem(m); 687 if (control) { 688 sctp_m_freem(control); 689 control = NULL; 690 } 691 error = EDESTADDRREQ; 692 return EDESTADDRREQ; 693 } 694 #endif /* INET6 */ 695 connected_type: 696 /* now what about control */ 697 if (control) { 698 if (inp->control) { 699 SCTP_PRINTF("huh? control set?\n"); 700 sctp_m_freem(inp->control); 701 inp->control = NULL; 702 } 703 inp->control = control; 704 } 705 /* Place the data */ 706 if (inp->pkt) { 707 SCTP_BUF_NEXT(inp->pkt_last) = m; 708 inp->pkt_last = m; 709 } else { 710 inp->pkt_last = inp->pkt = m; 711 } 712 if ( 713 /* FreeBSD uses a flag passed */ 714 ((flags & PRUS_MORETOCOME) == 0) 715 ) { 716 /* 717 * note with the current version this code will only be used 718 * by OpenBSD-- NetBSD, FreeBSD, and MacOS have methods for 719 * re-defining sosend to use the sctp_sosend. One can 720 * optionally switch back to this code (by changing back the 721 * definitions) but this is not advisable. This code is used 722 * by FreeBSD when sending a file with sendfile() though. 723 */ 724 int ret; 725 726 ret = sctp_output(inp, inp->pkt, addr, inp->control, p, flags); 727 inp->pkt = NULL; 728 inp->control = NULL; 729 return (ret); 730 } else { 731 return (0); 732 } 733 } 734 735 int 736 sctp_disconnect(struct socket *so) 737 { 738 struct sctp_inpcb *inp; 739 740 inp = (struct sctp_inpcb *)so->so_pcb; 741 if (inp == NULL) { 742 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN); 743 return (ENOTCONN); 744 } 745 SCTP_INP_RLOCK(inp); 746 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 747 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 748 if (SCTP_LIST_EMPTY(&inp->sctp_asoc_list)) { 749 /* No connection */ 750 SCTP_INP_RUNLOCK(inp); 751 return (0); 752 } else { 753 struct sctp_association *asoc; 754 struct sctp_tcb *stcb; 755 756 stcb = LIST_FIRST(&inp->sctp_asoc_list); 757 if (stcb == NULL) { 758 SCTP_INP_RUNLOCK(inp); 759 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 760 return (EINVAL); 761 } 762 SCTP_TCB_LOCK(stcb); 763 asoc = &stcb->asoc; 764 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 765 /* We are about to be freed, out of here */ 766 SCTP_TCB_UNLOCK(stcb); 767 SCTP_INP_RUNLOCK(inp); 768 return (0); 769 } 770 if (((so->so_options & SO_LINGER) && 771 (so->so_linger == 0)) || 772 (so->so_rcv.sb_cc > 0)) { 773 if (SCTP_GET_STATE(asoc) != 774 SCTP_STATE_COOKIE_WAIT) { 775 /* Left with Data unread */ 776 struct mbuf *err; 777 778 err = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_DONTWAIT, 1, MT_DATA); 779 if (err) { 780 /* 781 * Fill in the user 782 * initiated abort 783 */ 784 struct sctp_paramhdr *ph; 785 786 ph = mtod(err, struct sctp_paramhdr *); 787 SCTP_BUF_LEN(err) = sizeof(struct sctp_paramhdr); 788 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT); 789 ph->param_length = htons(SCTP_BUF_LEN(err)); 790 } 791 #if defined(SCTP_PANIC_ON_ABORT) 792 panic("disconnect does an abort"); 793 #endif 794 sctp_send_abort_tcb(stcb, err, SCTP_SO_LOCKED); 795 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 796 } 797 SCTP_INP_RUNLOCK(inp); 798 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) || 799 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 800 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 801 } 802 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_3); 803 /* No unlock tcb assoc is gone */ 804 return (0); 805 } 806 if (TAILQ_EMPTY(&asoc->send_queue) && 807 TAILQ_EMPTY(&asoc->sent_queue) && 808 (asoc->stream_queue_cnt == 0)) { 809 /* there is nothing queued to send, so done */ 810 if (asoc->locked_on_sending) { 811 goto abort_anyway; 812 } 813 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) && 814 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) { 815 /* only send SHUTDOWN 1st time thru */ 816 sctp_stop_timers_for_shutdown(stcb); 817 sctp_send_shutdown(stcb, 818 stcb->asoc.primary_destination); 819 sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_LOCKED); 820 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) || 821 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 822 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 823 } 824 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT); 825 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 826 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, 827 stcb->sctp_ep, stcb, 828 asoc->primary_destination); 829 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 830 stcb->sctp_ep, stcb, 831 asoc->primary_destination); 832 } 833 } else { 834 /* 835 * we still got (or just got) data to send, 836 * so set SHUTDOWN_PENDING 837 */ 838 /* 839 * XXX sockets draft says that SCTP_EOF 840 * should be sent with no data. currently, 841 * we will allow user data to be sent first 842 * and move to SHUTDOWN-PENDING 843 */ 844 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING; 845 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb, 846 asoc->primary_destination); 847 if (asoc->locked_on_sending) { 848 /* Locked to send out the data */ 849 struct sctp_stream_queue_pending *sp; 850 851 sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead); 852 if (sp == NULL) { 853 SCTP_PRINTF("Error, sp is NULL, locked on sending is non-null strm:%d\n", 854 asoc->locked_on_sending->stream_no); 855 } else { 856 if ((sp->length == 0) && (sp->msg_is_complete == 0)) 857 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT; 858 } 859 } 860 if (TAILQ_EMPTY(&asoc->send_queue) && 861 TAILQ_EMPTY(&asoc->sent_queue) && 862 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) { 863 struct mbuf *op_err; 864 865 abort_anyway: 866 op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 867 0, M_DONTWAIT, 1, MT_DATA); 868 if (op_err) { 869 /* 870 * Fill in the user 871 * initiated abort 872 */ 873 struct sctp_paramhdr *ph; 874 uint32_t *ippp; 875 876 SCTP_BUF_LEN(op_err) = 877 (sizeof(struct sctp_paramhdr) + sizeof(uint32_t)); 878 ph = mtod(op_err, 879 struct sctp_paramhdr *); 880 ph->param_type = htons( 881 SCTP_CAUSE_USER_INITIATED_ABT); 882 ph->param_length = htons(SCTP_BUF_LEN(op_err)); 883 ippp = (uint32_t *) (ph + 1); 884 *ippp = htonl(SCTP_FROM_SCTP_USRREQ + SCTP_LOC_4); 885 } 886 #if defined(SCTP_PANIC_ON_ABORT) 887 panic("disconnect does an abort"); 888 #endif 889 890 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_USRREQ + SCTP_LOC_4; 891 sctp_send_abort_tcb(stcb, op_err, SCTP_SO_LOCKED); 892 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 893 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) || 894 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 895 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 896 } 897 SCTP_INP_RUNLOCK(inp); 898 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_5); 899 return (0); 900 } else { 901 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CLOSING, SCTP_SO_LOCKED); 902 } 903 } 904 SCTP_TCB_UNLOCK(stcb); 905 SCTP_INP_RUNLOCK(inp); 906 return (0); 907 } 908 /* not reached */ 909 } else { 910 /* UDP model does not support this */ 911 SCTP_INP_RUNLOCK(inp); 912 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP); 913 return EOPNOTSUPP; 914 } 915 } 916 917 int 918 sctp_flush(struct socket *so, int how) 919 { 920 /* 921 * We will just clear out the values and let subsequent close clear 922 * out the data, if any. Note if the user did a shutdown(SHUT_RD) 923 * they will not be able to read the data, the socket will block 924 * that from happening. 925 */ 926 if ((how == PRU_FLUSH_RD) || (how == PRU_FLUSH_RDWR)) { 927 /* 928 * First make sure the sb will be happy, we don't use these 929 * except maybe the count 930 */ 931 so->so_rcv.sb_cc = 0; 932 so->so_rcv.sb_mbcnt = 0; 933 so->so_rcv.sb_mb = NULL; 934 } 935 if ((how == PRU_FLUSH_WR) || (how == PRU_FLUSH_RDWR)) { 936 /* 937 * First make sure the sb will be happy, we don't use these 938 * except maybe the count 939 */ 940 so->so_snd.sb_cc = 0; 941 so->so_snd.sb_mbcnt = 0; 942 so->so_snd.sb_mb = NULL; 943 944 } 945 return (0); 946 } 947 948 int 949 sctp_shutdown(struct socket *so) 950 { 951 struct sctp_inpcb *inp; 952 953 inp = (struct sctp_inpcb *)so->so_pcb; 954 if (inp == 0) { 955 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 956 return EINVAL; 957 } 958 SCTP_INP_RLOCK(inp); 959 /* For UDP model this is a invalid call */ 960 if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) { 961 /* Restore the flags that the soshutdown took away. */ 962 so->so_rcv.sb_state &= ~SBS_CANTRCVMORE; 963 /* This proc will wakeup for read and do nothing (I hope) */ 964 SCTP_INP_RUNLOCK(inp); 965 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP); 966 return (EOPNOTSUPP); 967 } 968 /* 969 * Ok if we reach here its the TCP model and it is either a SHUT_WR 970 * or SHUT_RDWR. This means we put the shutdown flag against it. 971 */ 972 { 973 struct sctp_tcb *stcb; 974 struct sctp_association *asoc; 975 976 socantsendmore(so); 977 978 stcb = LIST_FIRST(&inp->sctp_asoc_list); 979 if (stcb == NULL) { 980 /* 981 * Ok we hit the case that the shutdown call was 982 * made after an abort or something. Nothing to do 983 * now. 984 */ 985 SCTP_INP_RUNLOCK(inp); 986 return (0); 987 } 988 SCTP_TCB_LOCK(stcb); 989 asoc = &stcb->asoc; 990 if (TAILQ_EMPTY(&asoc->send_queue) && 991 TAILQ_EMPTY(&asoc->sent_queue) && 992 (asoc->stream_queue_cnt == 0)) { 993 if (asoc->locked_on_sending) { 994 goto abort_anyway; 995 } 996 /* there is nothing queued to send, so I'm done... */ 997 if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) { 998 /* only send SHUTDOWN the first time through */ 999 sctp_stop_timers_for_shutdown(stcb); 1000 sctp_send_shutdown(stcb, 1001 stcb->asoc.primary_destination); 1002 sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_LOCKED); 1003 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) || 1004 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 1005 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 1006 } 1007 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT); 1008 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 1009 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, 1010 stcb->sctp_ep, stcb, 1011 asoc->primary_destination); 1012 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 1013 stcb->sctp_ep, stcb, 1014 asoc->primary_destination); 1015 } 1016 } else { 1017 /* 1018 * we still got (or just got) data to send, so set 1019 * SHUTDOWN_PENDING 1020 */ 1021 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING; 1022 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb, 1023 asoc->primary_destination); 1024 1025 if (asoc->locked_on_sending) { 1026 /* Locked to send out the data */ 1027 struct sctp_stream_queue_pending *sp; 1028 1029 sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead); 1030 if (sp == NULL) { 1031 SCTP_PRINTF("Error, sp is NULL, locked on sending is non-null strm:%d\n", 1032 asoc->locked_on_sending->stream_no); 1033 } else { 1034 if ((sp->length == 0) && (sp->msg_is_complete == 0)) { 1035 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT; 1036 } 1037 } 1038 } 1039 if (TAILQ_EMPTY(&asoc->send_queue) && 1040 TAILQ_EMPTY(&asoc->sent_queue) && 1041 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) { 1042 struct mbuf *op_err; 1043 1044 abort_anyway: 1045 op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 1046 0, M_DONTWAIT, 1, MT_DATA); 1047 if (op_err) { 1048 /* Fill in the user initiated abort */ 1049 struct sctp_paramhdr *ph; 1050 uint32_t *ippp; 1051 1052 SCTP_BUF_LEN(op_err) = 1053 sizeof(struct sctp_paramhdr) + sizeof(uint32_t); 1054 ph = mtod(op_err, 1055 struct sctp_paramhdr *); 1056 ph->param_type = htons( 1057 SCTP_CAUSE_USER_INITIATED_ABT); 1058 ph->param_length = htons(SCTP_BUF_LEN(op_err)); 1059 ippp = (uint32_t *) (ph + 1); 1060 *ippp = htonl(SCTP_FROM_SCTP_USRREQ + SCTP_LOC_6); 1061 } 1062 #if defined(SCTP_PANIC_ON_ABORT) 1063 panic("shutdown does an abort"); 1064 #endif 1065 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_USRREQ + SCTP_LOC_6; 1066 sctp_abort_an_association(stcb->sctp_ep, stcb, 1067 SCTP_RESPONSE_TO_USER_REQ, 1068 op_err, SCTP_SO_LOCKED); 1069 goto skip_unlock; 1070 } else { 1071 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CLOSING, SCTP_SO_LOCKED); 1072 } 1073 } 1074 SCTP_TCB_UNLOCK(stcb); 1075 } 1076 skip_unlock: 1077 SCTP_INP_RUNLOCK(inp); 1078 return 0; 1079 } 1080 1081 /* 1082 * copies a "user" presentable address and removes embedded scope, etc. 1083 * returns 0 on success, 1 on error 1084 */ 1085 static uint32_t 1086 sctp_fill_user_address(struct sockaddr_storage *ss, struct sockaddr *sa) 1087 { 1088 #ifdef INET6 1089 struct sockaddr_in6 lsa6; 1090 1091 sa = (struct sockaddr *)sctp_recover_scope((struct sockaddr_in6 *)sa, 1092 &lsa6); 1093 #endif 1094 memcpy(ss, sa, sa->sa_len); 1095 return (0); 1096 } 1097 1098 1099 1100 /* 1101 * NOTE: assumes addr lock is held 1102 */ 1103 static size_t 1104 sctp_fill_up_addresses_vrf(struct sctp_inpcb *inp, 1105 struct sctp_tcb *stcb, 1106 size_t limit, 1107 struct sockaddr_storage *sas, 1108 uint32_t vrf_id) 1109 { 1110 struct sctp_ifn *sctp_ifn; 1111 struct sctp_ifa *sctp_ifa; 1112 int loopback_scope, ipv4_local_scope, local_scope, site_scope; 1113 size_t actual; 1114 int ipv4_addr_legal, ipv6_addr_legal; 1115 struct sctp_vrf *vrf; 1116 1117 actual = 0; 1118 if (limit <= 0) 1119 return (actual); 1120 1121 if (stcb) { 1122 /* Turn on all the appropriate scope */ 1123 loopback_scope = stcb->asoc.loopback_scope; 1124 ipv4_local_scope = stcb->asoc.ipv4_local_scope; 1125 local_scope = stcb->asoc.local_scope; 1126 site_scope = stcb->asoc.site_scope; 1127 } else { 1128 /* Turn on ALL scope, since we look at the EP */ 1129 loopback_scope = ipv4_local_scope = local_scope = 1130 site_scope = 1; 1131 } 1132 ipv4_addr_legal = ipv6_addr_legal = 0; 1133 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 1134 ipv6_addr_legal = 1; 1135 if (SCTP_IPV6_V6ONLY(inp) == 0) { 1136 ipv4_addr_legal = 1; 1137 } 1138 } else { 1139 ipv4_addr_legal = 1; 1140 } 1141 vrf = sctp_find_vrf(vrf_id); 1142 if (vrf == NULL) { 1143 return (0); 1144 } 1145 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 1146 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) { 1147 if ((loopback_scope == 0) && 1148 SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) { 1149 /* Skip loopback if loopback_scope not set */ 1150 continue; 1151 } 1152 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { 1153 if (stcb) { 1154 /* 1155 * For the BOUND-ALL case, the list 1156 * associated with a TCB is Always 1157 * considered a reverse list.. i.e. 1158 * it lists addresses that are NOT 1159 * part of the association. If this 1160 * is one of those we must skip it. 1161 */ 1162 if (sctp_is_addr_restricted(stcb, 1163 sctp_ifa)) { 1164 continue; 1165 } 1166 } 1167 switch (sctp_ifa->address.sa.sa_family) { 1168 case AF_INET: 1169 if (ipv4_addr_legal) { 1170 struct sockaddr_in *sin; 1171 1172 sin = (struct sockaddr_in *)&sctp_ifa->address.sa; 1173 if (sin->sin_addr.s_addr == 0) { 1174 /* 1175 * we skip 1176 * unspecifed 1177 * addresses 1178 */ 1179 continue; 1180 } 1181 if ((ipv4_local_scope == 0) && 1182 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) { 1183 continue; 1184 } 1185 #ifdef INET6 1186 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) { 1187 in6_sin_2_v4mapsin6(sin, (struct sockaddr_in6 *)sas); 1188 ((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport; 1189 sas = (struct sockaddr_storage *)((caddr_t)sas + sizeof(struct sockaddr_in6)); 1190 actual += sizeof(struct sockaddr_in6); 1191 } else { 1192 #endif 1193 memcpy(sas, sin, sizeof(*sin)); 1194 ((struct sockaddr_in *)sas)->sin_port = inp->sctp_lport; 1195 sas = (struct sockaddr_storage *)((caddr_t)sas + sizeof(*sin)); 1196 actual += sizeof(*sin); 1197 #ifdef INET6 1198 } 1199 #endif 1200 if (actual >= limit) { 1201 return (actual); 1202 } 1203 } else { 1204 continue; 1205 } 1206 break; 1207 #ifdef INET6 1208 case AF_INET6: 1209 if (ipv6_addr_legal) { 1210 struct sockaddr_in6 *sin6; 1211 1212 sin6 = (struct sockaddr_in6 *)&sctp_ifa->address.sa; 1213 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { 1214 /* 1215 * we skip 1216 * unspecifed 1217 * addresses 1218 */ 1219 continue; 1220 } 1221 if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) { 1222 if (local_scope == 0) 1223 continue; 1224 if (sin6->sin6_scope_id == 0) { 1225 if (sa6_recoverscope(sin6) != 0) 1226 /* 1227 * 1228 * bad 1229 * 1230 * li 1231 * nk 1232 * 1233 * loc 1234 * al 1235 * 1236 * add 1237 * re 1238 * ss 1239 * */ 1240 continue; 1241 } 1242 } 1243 if ((site_scope == 0) && 1244 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) { 1245 continue; 1246 } 1247 memcpy(sas, sin6, sizeof(*sin6)); 1248 ((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport; 1249 sas = (struct sockaddr_storage *)((caddr_t)sas + sizeof(*sin6)); 1250 actual += sizeof(*sin6); 1251 if (actual >= limit) { 1252 return (actual); 1253 } 1254 } else { 1255 continue; 1256 } 1257 break; 1258 #endif 1259 default: 1260 /* TSNH */ 1261 break; 1262 } 1263 } 1264 } 1265 } else { 1266 struct sctp_laddr *laddr; 1267 1268 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 1269 if (stcb) { 1270 if (sctp_is_addr_restricted(stcb, laddr->ifa)) { 1271 continue; 1272 } 1273 } 1274 if (sctp_fill_user_address(sas, &laddr->ifa->address.sa)) 1275 continue; 1276 1277 ((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport; 1278 sas = (struct sockaddr_storage *)((caddr_t)sas + 1279 laddr->ifa->address.sa.sa_len); 1280 actual += laddr->ifa->address.sa.sa_len; 1281 if (actual >= limit) { 1282 return (actual); 1283 } 1284 } 1285 } 1286 return (actual); 1287 } 1288 1289 static size_t 1290 sctp_fill_up_addresses(struct sctp_inpcb *inp, 1291 struct sctp_tcb *stcb, 1292 size_t limit, 1293 struct sockaddr_storage *sas) 1294 { 1295 size_t size = 0; 1296 1297 SCTP_IPI_ADDR_RLOCK(); 1298 /* fill up addresses for the endpoint's default vrf */ 1299 size = sctp_fill_up_addresses_vrf(inp, stcb, limit, sas, 1300 inp->def_vrf_id); 1301 SCTP_IPI_ADDR_RUNLOCK(); 1302 return (size); 1303 } 1304 1305 /* 1306 * NOTE: assumes addr lock is held 1307 */ 1308 static int 1309 sctp_count_max_addresses_vrf(struct sctp_inpcb *inp, uint32_t vrf_id) 1310 { 1311 int cnt = 0; 1312 struct sctp_vrf *vrf = NULL; 1313 1314 /* 1315 * In both sub-set bound an bound_all cases we return the MAXIMUM 1316 * number of addresses that you COULD get. In reality the sub-set 1317 * bound may have an exclusion list for a given TCB OR in the 1318 * bound-all case a TCB may NOT include the loopback or other 1319 * addresses as well. 1320 */ 1321 vrf = sctp_find_vrf(vrf_id); 1322 if (vrf == NULL) { 1323 return (0); 1324 } 1325 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 1326 struct sctp_ifn *sctp_ifn; 1327 struct sctp_ifa *sctp_ifa; 1328 1329 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) { 1330 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { 1331 /* Count them if they are the right type */ 1332 if (sctp_ifa->address.sa.sa_family == AF_INET) { 1333 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) 1334 cnt += sizeof(struct sockaddr_in6); 1335 else 1336 cnt += sizeof(struct sockaddr_in); 1337 1338 } else if (sctp_ifa->address.sa.sa_family == AF_INET6) 1339 cnt += sizeof(struct sockaddr_in6); 1340 } 1341 } 1342 } else { 1343 struct sctp_laddr *laddr; 1344 1345 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 1346 if (laddr->ifa->address.sa.sa_family == AF_INET) { 1347 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) 1348 cnt += sizeof(struct sockaddr_in6); 1349 else 1350 cnt += sizeof(struct sockaddr_in); 1351 1352 } else if (laddr->ifa->address.sa.sa_family == AF_INET6) 1353 cnt += sizeof(struct sockaddr_in6); 1354 } 1355 } 1356 return (cnt); 1357 } 1358 1359 static int 1360 sctp_count_max_addresses(struct sctp_inpcb *inp) 1361 { 1362 int cnt = 0; 1363 1364 SCTP_IPI_ADDR_RLOCK(); 1365 /* count addresses for the endpoint's default VRF */ 1366 cnt = sctp_count_max_addresses_vrf(inp, inp->def_vrf_id); 1367 SCTP_IPI_ADDR_RUNLOCK(); 1368 return (cnt); 1369 } 1370 1371 static int 1372 sctp_do_connect_x(struct socket *so, struct sctp_inpcb *inp, void *optval, 1373 size_t optsize, void *p, int delay) 1374 { 1375 int error = 0; 1376 int creat_lock_on = 0; 1377 struct sctp_tcb *stcb = NULL; 1378 struct sockaddr *sa; 1379 int num_v6 = 0, num_v4 = 0, *totaddrp, totaddr; 1380 int added = 0; 1381 uint32_t vrf_id; 1382 int bad_addresses = 0; 1383 sctp_assoc_t *a_id; 1384 1385 SCTPDBG(SCTP_DEBUG_PCB1, "Connectx called\n"); 1386 1387 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 1388 (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) { 1389 /* We are already connected AND the TCP model */ 1390 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EADDRINUSE); 1391 return (EADDRINUSE); 1392 } 1393 if (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) { 1394 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 1395 return (EINVAL); 1396 } 1397 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { 1398 SCTP_INP_RLOCK(inp); 1399 stcb = LIST_FIRST(&inp->sctp_asoc_list); 1400 SCTP_INP_RUNLOCK(inp); 1401 } 1402 if (stcb) { 1403 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY); 1404 return (EALREADY); 1405 } 1406 SCTP_INP_INCR_REF(inp); 1407 SCTP_ASOC_CREATE_LOCK(inp); 1408 creat_lock_on = 1; 1409 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 1410 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) { 1411 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EFAULT); 1412 error = EFAULT; 1413 goto out_now; 1414 } 1415 totaddrp = (int *)optval; 1416 totaddr = *totaddrp; 1417 sa = (struct sockaddr *)(totaddrp + 1); 1418 stcb = sctp_connectx_helper_find(inp, sa, &totaddr, &num_v4, &num_v6, &error, (optsize - sizeof(int)), &bad_addresses); 1419 if ((stcb != NULL) || bad_addresses) { 1420 /* Already have or am bring up an association */ 1421 SCTP_ASOC_CREATE_UNLOCK(inp); 1422 creat_lock_on = 0; 1423 if (stcb) 1424 SCTP_TCB_UNLOCK(stcb); 1425 if (bad_addresses == 0) { 1426 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY); 1427 error = EALREADY; 1428 } 1429 goto out_now; 1430 } 1431 #ifdef INET6 1432 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) && 1433 (num_v6 > 0)) { 1434 error = EINVAL; 1435 goto out_now; 1436 } 1437 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 1438 (num_v4 > 0)) { 1439 struct in6pcb *inp6; 1440 1441 inp6 = (struct in6pcb *)inp; 1442 if (SCTP_IPV6_V6ONLY(inp6)) { 1443 /* 1444 * if IPV6_V6ONLY flag, ignore connections destined 1445 * to a v4 addr or v4-mapped addr 1446 */ 1447 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 1448 error = EINVAL; 1449 goto out_now; 1450 } 1451 } 1452 #endif /* INET6 */ 1453 if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) == 1454 SCTP_PCB_FLAGS_UNBOUND) { 1455 /* Bind a ephemeral port */ 1456 error = sctp_inpcb_bind(so, NULL, NULL, p); 1457 if (error) { 1458 goto out_now; 1459 } 1460 } 1461 /* FIX ME: do we want to pass in a vrf on the connect call? */ 1462 vrf_id = inp->def_vrf_id; 1463 1464 /* We are GOOD to go */ 1465 stcb = sctp_aloc_assoc(inp, sa, 1, &error, 0, vrf_id, 1466 (struct thread *)p 1467 ); 1468 if (stcb == NULL) { 1469 /* Gak! no memory */ 1470 goto out_now; 1471 } 1472 SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_COOKIE_WAIT); 1473 /* move to second address */ 1474 if (sa->sa_family == AF_INET) 1475 sa = (struct sockaddr *)((caddr_t)sa + sizeof(struct sockaddr_in)); 1476 else 1477 sa = (struct sockaddr *)((caddr_t)sa + sizeof(struct sockaddr_in6)); 1478 1479 error = 0; 1480 added = sctp_connectx_helper_add(stcb, sa, (totaddr - 1), &error); 1481 /* Fill in the return id */ 1482 if (error) { 1483 (void)sctp_free_assoc(inp, stcb, SCTP_PCBFREE_FORCE, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_12); 1484 goto out_now; 1485 } 1486 a_id = (sctp_assoc_t *) optval; 1487 *a_id = sctp_get_associd(stcb); 1488 1489 /* initialize authentication parameters for the assoc */ 1490 sctp_initialize_auth_params(inp, stcb); 1491 1492 if (delay) { 1493 /* doing delayed connection */ 1494 stcb->asoc.delayed_connection = 1; 1495 sctp_timer_start(SCTP_TIMER_TYPE_INIT, inp, stcb, stcb->asoc.primary_destination); 1496 } else { 1497 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered); 1498 sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED); 1499 } 1500 SCTP_TCB_UNLOCK(stcb); 1501 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) { 1502 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED; 1503 /* Set the connected flag so we can queue data */ 1504 soisconnecting(so); 1505 } 1506 out_now: 1507 if (creat_lock_on) { 1508 SCTP_ASOC_CREATE_UNLOCK(inp); 1509 } 1510 SCTP_INP_DECR_REF(inp); 1511 return error; 1512 } 1513 1514 #define SCTP_FIND_STCB(inp, stcb, assoc_id) { \ 1515 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||\ 1516 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { \ 1517 SCTP_INP_RLOCK(inp); \ 1518 stcb = LIST_FIRST(&inp->sctp_asoc_list); \ 1519 if (stcb) { \ 1520 SCTP_TCB_LOCK(stcb); \ 1521 } \ 1522 SCTP_INP_RUNLOCK(inp); \ 1523 } else if (assoc_id != 0) { \ 1524 stcb = sctp_findassociation_ep_asocid(inp, assoc_id, 1); \ 1525 if (stcb == NULL) { \ 1526 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); \ 1527 error = ENOENT; \ 1528 break; \ 1529 } \ 1530 } else { \ 1531 stcb = NULL; \ 1532 } \ 1533 } 1534 1535 1536 #define SCTP_CHECK_AND_CAST(destp, srcp, type, size) {\ 1537 if (size < sizeof(type)) { \ 1538 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); \ 1539 error = EINVAL; \ 1540 break; \ 1541 } else { \ 1542 destp = (type *)srcp; \ 1543 } \ 1544 } 1545 1546 static int 1547 sctp_getopt(struct socket *so, int optname, void *optval, size_t *optsize, 1548 void *p) 1549 { 1550 struct sctp_inpcb *inp = NULL; 1551 int error, val = 0; 1552 struct sctp_tcb *stcb = NULL; 1553 1554 if (optval == NULL) { 1555 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 1556 return (EINVAL); 1557 } 1558 inp = (struct sctp_inpcb *)so->so_pcb; 1559 if (inp == 0) { 1560 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 1561 return EINVAL; 1562 } 1563 error = 0; 1564 1565 switch (optname) { 1566 case SCTP_NODELAY: 1567 case SCTP_AUTOCLOSE: 1568 case SCTP_EXPLICIT_EOR: 1569 case SCTP_AUTO_ASCONF: 1570 case SCTP_DISABLE_FRAGMENTS: 1571 case SCTP_I_WANT_MAPPED_V4_ADDR: 1572 case SCTP_USE_EXT_RCVINFO: 1573 SCTP_INP_RLOCK(inp); 1574 switch (optname) { 1575 case SCTP_DISABLE_FRAGMENTS: 1576 val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NO_FRAGMENT); 1577 break; 1578 case SCTP_I_WANT_MAPPED_V4_ADDR: 1579 val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4); 1580 break; 1581 case SCTP_AUTO_ASCONF: 1582 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 1583 /* only valid for bound all sockets */ 1584 val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTO_ASCONF); 1585 } else { 1586 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 1587 error = EINVAL; 1588 goto flags_out; 1589 } 1590 break; 1591 case SCTP_EXPLICIT_EOR: 1592 val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR); 1593 break; 1594 case SCTP_NODELAY: 1595 val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NODELAY); 1596 break; 1597 case SCTP_USE_EXT_RCVINFO: 1598 val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO); 1599 break; 1600 case SCTP_AUTOCLOSE: 1601 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) 1602 val = TICKS_TO_SEC(inp->sctp_ep.auto_close_time); 1603 else 1604 val = 0; 1605 break; 1606 1607 default: 1608 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT); 1609 error = ENOPROTOOPT; 1610 } /* end switch (sopt->sopt_name) */ 1611 if (optname != SCTP_AUTOCLOSE) { 1612 /* make it an "on/off" value */ 1613 val = (val != 0); 1614 } 1615 if (*optsize < sizeof(val)) { 1616 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 1617 error = EINVAL; 1618 } 1619 flags_out: 1620 SCTP_INP_RUNLOCK(inp); 1621 if (error == 0) { 1622 /* return the option value */ 1623 *(int *)optval = val; 1624 *optsize = sizeof(val); 1625 } 1626 break; 1627 case SCTP_GET_PACKET_LOG: 1628 { 1629 #ifdef SCTP_PACKET_LOGGING 1630 uint8_t *target; 1631 int ret; 1632 1633 SCTP_CHECK_AND_CAST(target, optval, uint8_t, *optsize); 1634 ret = sctp_copy_out_packet_log(target, (int)*optsize); 1635 *optsize = ret; 1636 #else 1637 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP); 1638 error = EOPNOTSUPP; 1639 #endif 1640 break; 1641 } 1642 case SCTP_PARTIAL_DELIVERY_POINT: 1643 { 1644 uint32_t *value; 1645 1646 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize); 1647 *value = inp->partial_delivery_point; 1648 *optsize = sizeof(uint32_t); 1649 } 1650 break; 1651 case SCTP_FRAGMENT_INTERLEAVE: 1652 { 1653 uint32_t *value; 1654 1655 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize); 1656 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) { 1657 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) { 1658 *value = SCTP_FRAG_LEVEL_2; 1659 } else { 1660 *value = SCTP_FRAG_LEVEL_1; 1661 } 1662 } else { 1663 *value = SCTP_FRAG_LEVEL_0; 1664 } 1665 *optsize = sizeof(uint32_t); 1666 } 1667 break; 1668 case SCTP_CMT_ON_OFF: 1669 { 1670 struct sctp_assoc_value *av; 1671 1672 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize); 1673 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off)) { 1674 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 1675 if (stcb) { 1676 av->assoc_value = stcb->asoc.sctp_cmt_on_off; 1677 SCTP_TCB_UNLOCK(stcb); 1678 1679 } else { 1680 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN); 1681 error = ENOTCONN; 1682 } 1683 } else { 1684 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT); 1685 error = ENOPROTOOPT; 1686 } 1687 *optsize = sizeof(*av); 1688 } 1689 break; 1690 /* JRS - Get socket option for pluggable congestion control */ 1691 case SCTP_PLUGGABLE_CC: 1692 { 1693 struct sctp_assoc_value *av; 1694 1695 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize); 1696 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 1697 if (stcb) { 1698 av->assoc_value = stcb->asoc.congestion_control_module; 1699 SCTP_TCB_UNLOCK(stcb); 1700 } else { 1701 av->assoc_value = inp->sctp_ep.sctp_default_cc_module; 1702 } 1703 *optsize = sizeof(*av); 1704 } 1705 break; 1706 case SCTP_GET_ADDR_LEN: 1707 { 1708 struct sctp_assoc_value *av; 1709 1710 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize); 1711 error = EINVAL; 1712 #ifdef INET 1713 if (av->assoc_value == AF_INET) { 1714 av->assoc_value = sizeof(struct sockaddr_in); 1715 error = 0; 1716 } 1717 #endif 1718 #ifdef INET6 1719 if (av->assoc_value == AF_INET6) { 1720 av->assoc_value = sizeof(struct sockaddr_in6); 1721 error = 0; 1722 } 1723 #endif 1724 if (error) { 1725 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 1726 } 1727 *optsize = sizeof(*av); 1728 } 1729 break; 1730 case SCTP_GET_ASSOC_NUMBER: 1731 { 1732 uint32_t *value, cnt; 1733 1734 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize); 1735 cnt = 0; 1736 SCTP_INP_RLOCK(inp); 1737 LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) { 1738 cnt++; 1739 } 1740 SCTP_INP_RUNLOCK(inp); 1741 *value = cnt; 1742 *optsize = sizeof(uint32_t); 1743 } 1744 break; 1745 1746 case SCTP_GET_ASSOC_ID_LIST: 1747 { 1748 struct sctp_assoc_ids *ids; 1749 unsigned int at, limit; 1750 1751 SCTP_CHECK_AND_CAST(ids, optval, struct sctp_assoc_ids, *optsize); 1752 at = 0; 1753 limit = *optsize / sizeof(sctp_assoc_t); 1754 SCTP_INP_RLOCK(inp); 1755 LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) { 1756 if (at < limit) { 1757 ids->gaids_assoc_id[at++] = sctp_get_associd(stcb); 1758 } else { 1759 error = EINVAL; 1760 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 1761 break; 1762 } 1763 } 1764 SCTP_INP_RUNLOCK(inp); 1765 *optsize = at * sizeof(sctp_assoc_t); 1766 } 1767 break; 1768 case SCTP_CONTEXT: 1769 { 1770 struct sctp_assoc_value *av; 1771 1772 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize); 1773 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 1774 1775 if (stcb) { 1776 av->assoc_value = stcb->asoc.context; 1777 SCTP_TCB_UNLOCK(stcb); 1778 } else { 1779 SCTP_INP_RLOCK(inp); 1780 av->assoc_value = inp->sctp_context; 1781 SCTP_INP_RUNLOCK(inp); 1782 } 1783 *optsize = sizeof(*av); 1784 } 1785 break; 1786 case SCTP_VRF_ID: 1787 { 1788 uint32_t *default_vrfid; 1789 1790 SCTP_CHECK_AND_CAST(default_vrfid, optval, uint32_t, *optsize); 1791 *default_vrfid = inp->def_vrf_id; 1792 break; 1793 } 1794 case SCTP_GET_ASOC_VRF: 1795 { 1796 struct sctp_assoc_value *id; 1797 1798 SCTP_CHECK_AND_CAST(id, optval, struct sctp_assoc_value, *optsize); 1799 SCTP_FIND_STCB(inp, stcb, id->assoc_id); 1800 if (stcb == NULL) { 1801 error = EINVAL; 1802 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 1803 break; 1804 } 1805 id->assoc_value = stcb->asoc.vrf_id; 1806 break; 1807 } 1808 case SCTP_GET_VRF_IDS: 1809 { 1810 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP); 1811 error = EOPNOTSUPP; 1812 break; 1813 } 1814 case SCTP_GET_NONCE_VALUES: 1815 { 1816 struct sctp_get_nonce_values *gnv; 1817 1818 SCTP_CHECK_AND_CAST(gnv, optval, struct sctp_get_nonce_values, *optsize); 1819 SCTP_FIND_STCB(inp, stcb, gnv->gn_assoc_id); 1820 1821 if (stcb) { 1822 gnv->gn_peers_tag = stcb->asoc.peer_vtag; 1823 gnv->gn_local_tag = stcb->asoc.my_vtag; 1824 SCTP_TCB_UNLOCK(stcb); 1825 } else { 1826 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN); 1827 error = ENOTCONN; 1828 } 1829 *optsize = sizeof(*gnv); 1830 } 1831 break; 1832 case SCTP_DELAYED_SACK: 1833 { 1834 struct sctp_sack_info *sack; 1835 1836 SCTP_CHECK_AND_CAST(sack, optval, struct sctp_sack_info, *optsize); 1837 SCTP_FIND_STCB(inp, stcb, sack->sack_assoc_id); 1838 if (stcb) { 1839 sack->sack_delay = stcb->asoc.delayed_ack; 1840 sack->sack_freq = stcb->asoc.sack_freq; 1841 SCTP_TCB_UNLOCK(stcb); 1842 } else { 1843 SCTP_INP_RLOCK(inp); 1844 sack->sack_delay = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]); 1845 sack->sack_freq = inp->sctp_ep.sctp_sack_freq; 1846 SCTP_INP_RUNLOCK(inp); 1847 } 1848 *optsize = sizeof(*sack); 1849 } 1850 break; 1851 1852 case SCTP_GET_SNDBUF_USE: 1853 { 1854 struct sctp_sockstat *ss; 1855 1856 SCTP_CHECK_AND_CAST(ss, optval, struct sctp_sockstat, *optsize); 1857 SCTP_FIND_STCB(inp, stcb, ss->ss_assoc_id); 1858 1859 if (stcb) { 1860 ss->ss_total_sndbuf = stcb->asoc.total_output_queue_size; 1861 ss->ss_total_recv_buf = (stcb->asoc.size_on_reasm_queue + 1862 stcb->asoc.size_on_all_streams); 1863 SCTP_TCB_UNLOCK(stcb); 1864 } else { 1865 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN); 1866 error = ENOTCONN; 1867 } 1868 *optsize = sizeof(struct sctp_sockstat); 1869 } 1870 break; 1871 case SCTP_MAX_BURST: 1872 { 1873 uint8_t *value; 1874 1875 SCTP_CHECK_AND_CAST(value, optval, uint8_t, *optsize); 1876 1877 SCTP_INP_RLOCK(inp); 1878 *value = inp->sctp_ep.max_burst; 1879 SCTP_INP_RUNLOCK(inp); 1880 *optsize = sizeof(uint8_t); 1881 } 1882 break; 1883 case SCTP_MAXSEG: 1884 { 1885 struct sctp_assoc_value *av; 1886 int ovh; 1887 1888 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize); 1889 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 1890 1891 if (stcb) { 1892 av->assoc_value = sctp_get_frag_point(stcb, &stcb->asoc); 1893 SCTP_TCB_UNLOCK(stcb); 1894 } else { 1895 SCTP_INP_RLOCK(inp); 1896 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 1897 ovh = SCTP_MED_OVERHEAD; 1898 } else { 1899 ovh = SCTP_MED_V4_OVERHEAD; 1900 } 1901 if (inp->sctp_frag_point >= SCTP_DEFAULT_MAXSEGMENT) 1902 av->assoc_value = 0; 1903 else 1904 av->assoc_value = inp->sctp_frag_point - ovh; 1905 SCTP_INP_RUNLOCK(inp); 1906 } 1907 *optsize = sizeof(struct sctp_assoc_value); 1908 } 1909 break; 1910 case SCTP_GET_STAT_LOG: 1911 error = sctp_fill_stat_log(optval, optsize); 1912 break; 1913 case SCTP_EVENTS: 1914 { 1915 struct sctp_event_subscribe *events; 1916 1917 SCTP_CHECK_AND_CAST(events, optval, struct sctp_event_subscribe, *optsize); 1918 memset(events, 0, sizeof(*events)); 1919 SCTP_INP_RLOCK(inp); 1920 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) 1921 events->sctp_data_io_event = 1; 1922 1923 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVASSOCEVNT)) 1924 events->sctp_association_event = 1; 1925 1926 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVPADDREVNT)) 1927 events->sctp_address_event = 1; 1928 1929 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)) 1930 events->sctp_send_failure_event = 1; 1931 1932 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVPEERERR)) 1933 events->sctp_peer_error_event = 1; 1934 1935 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) 1936 events->sctp_shutdown_event = 1; 1937 1938 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PDAPIEVNT)) 1939 events->sctp_partial_delivery_event = 1; 1940 1941 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) 1942 events->sctp_adaptation_layer_event = 1; 1943 1944 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTHEVNT)) 1945 events->sctp_authentication_event = 1; 1946 1947 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_STREAM_RESETEVNT)) 1948 events->sctp_stream_reset_events = 1; 1949 SCTP_INP_RUNLOCK(inp); 1950 *optsize = sizeof(struct sctp_event_subscribe); 1951 } 1952 break; 1953 1954 case SCTP_ADAPTATION_LAYER: 1955 { 1956 uint32_t *value; 1957 1958 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize); 1959 1960 SCTP_INP_RLOCK(inp); 1961 *value = inp->sctp_ep.adaptation_layer_indicator; 1962 SCTP_INP_RUNLOCK(inp); 1963 *optsize = sizeof(uint32_t); 1964 } 1965 break; 1966 case SCTP_SET_INITIAL_DBG_SEQ: 1967 { 1968 uint32_t *value; 1969 1970 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize); 1971 SCTP_INP_RLOCK(inp); 1972 *value = inp->sctp_ep.initial_sequence_debug; 1973 SCTP_INP_RUNLOCK(inp); 1974 *optsize = sizeof(uint32_t); 1975 } 1976 break; 1977 case SCTP_GET_LOCAL_ADDR_SIZE: 1978 { 1979 uint32_t *value; 1980 1981 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize); 1982 SCTP_INP_RLOCK(inp); 1983 *value = sctp_count_max_addresses(inp); 1984 SCTP_INP_RUNLOCK(inp); 1985 *optsize = sizeof(uint32_t); 1986 } 1987 break; 1988 case SCTP_GET_REMOTE_ADDR_SIZE: 1989 { 1990 uint32_t *value; 1991 size_t size; 1992 struct sctp_nets *net; 1993 1994 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize); 1995 /* FIXME MT: change to sctp_assoc_value? */ 1996 SCTP_FIND_STCB(inp, stcb, (sctp_assoc_t) * value); 1997 1998 if (stcb) { 1999 size = 0; 2000 /* Count the sizes */ 2001 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 2002 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) || 2003 (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET6)) { 2004 size += sizeof(struct sockaddr_in6); 2005 } else if (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET) { 2006 size += sizeof(struct sockaddr_in); 2007 } else { 2008 /* huh */ 2009 break; 2010 } 2011 } 2012 SCTP_TCB_UNLOCK(stcb); 2013 *value = (uint32_t) size; 2014 } else { 2015 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN); 2016 error = ENOTCONN; 2017 } 2018 *optsize = sizeof(uint32_t); 2019 } 2020 break; 2021 case SCTP_GET_PEER_ADDRESSES: 2022 /* 2023 * Get the address information, an array is passed in to 2024 * fill up we pack it. 2025 */ 2026 { 2027 size_t cpsz, left; 2028 struct sockaddr_storage *sas; 2029 struct sctp_nets *net; 2030 struct sctp_getaddresses *saddr; 2031 2032 SCTP_CHECK_AND_CAST(saddr, optval, struct sctp_getaddresses, *optsize); 2033 SCTP_FIND_STCB(inp, stcb, saddr->sget_assoc_id); 2034 2035 if (stcb) { 2036 left = (*optsize) - sizeof(struct sctp_getaddresses); 2037 *optsize = sizeof(struct sctp_getaddresses); 2038 sas = (struct sockaddr_storage *)&saddr->addr[0]; 2039 2040 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 2041 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) || 2042 (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET6)) { 2043 cpsz = sizeof(struct sockaddr_in6); 2044 } else if (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET) { 2045 cpsz = sizeof(struct sockaddr_in); 2046 } else { 2047 /* huh */ 2048 break; 2049 } 2050 if (left < cpsz) { 2051 /* not enough room. */ 2052 break; 2053 } 2054 #ifdef INET6 2055 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) && 2056 (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET)) { 2057 /* Must map the address */ 2058 in6_sin_2_v4mapsin6((struct sockaddr_in *)&net->ro._l_addr, 2059 (struct sockaddr_in6 *)sas); 2060 } else { 2061 #endif 2062 memcpy(sas, &net->ro._l_addr, cpsz); 2063 #ifdef INET6 2064 } 2065 #endif 2066 ((struct sockaddr_in *)sas)->sin_port = stcb->rport; 2067 2068 sas = (struct sockaddr_storage *)((caddr_t)sas + cpsz); 2069 left -= cpsz; 2070 *optsize += cpsz; 2071 } 2072 SCTP_TCB_UNLOCK(stcb); 2073 } else { 2074 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); 2075 error = ENOENT; 2076 } 2077 } 2078 break; 2079 case SCTP_GET_LOCAL_ADDRESSES: 2080 { 2081 size_t limit, actual; 2082 struct sockaddr_storage *sas; 2083 struct sctp_getaddresses *saddr; 2084 2085 SCTP_CHECK_AND_CAST(saddr, optval, struct sctp_getaddresses, *optsize); 2086 SCTP_FIND_STCB(inp, stcb, saddr->sget_assoc_id); 2087 2088 sas = (struct sockaddr_storage *)&saddr->addr[0]; 2089 limit = *optsize - sizeof(sctp_assoc_t); 2090 actual = sctp_fill_up_addresses(inp, stcb, limit, sas); 2091 if (stcb) { 2092 SCTP_TCB_UNLOCK(stcb); 2093 } 2094 *optsize = sizeof(struct sockaddr_storage) + actual; 2095 } 2096 break; 2097 case SCTP_PEER_ADDR_PARAMS: 2098 { 2099 struct sctp_paddrparams *paddrp; 2100 struct sctp_nets *net; 2101 2102 SCTP_CHECK_AND_CAST(paddrp, optval, struct sctp_paddrparams, *optsize); 2103 SCTP_FIND_STCB(inp, stcb, paddrp->spp_assoc_id); 2104 2105 net = NULL; 2106 if (stcb) { 2107 net = sctp_findnet(stcb, (struct sockaddr *)&paddrp->spp_address); 2108 } else { 2109 /* 2110 * We increment here since 2111 * sctp_findassociation_ep_addr() wil do a 2112 * decrement if it finds the stcb as long as 2113 * the locked tcb (last argument) is NOT a 2114 * TCB.. aka NULL. 2115 */ 2116 SCTP_INP_INCR_REF(inp); 2117 stcb = sctp_findassociation_ep_addr(&inp, (struct sockaddr *)&paddrp->spp_address, &net, NULL, NULL); 2118 if (stcb == NULL) { 2119 SCTP_INP_DECR_REF(inp); 2120 } 2121 } 2122 if (stcb && (net == NULL)) { 2123 struct sockaddr *sa; 2124 2125 sa = (struct sockaddr *)&paddrp->spp_address; 2126 if (sa->sa_family == AF_INET) { 2127 struct sockaddr_in *sin; 2128 2129 sin = (struct sockaddr_in *)sa; 2130 if (sin->sin_addr.s_addr) { 2131 error = EINVAL; 2132 SCTP_TCB_UNLOCK(stcb); 2133 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 2134 break; 2135 } 2136 } else if (sa->sa_family == AF_INET6) { 2137 struct sockaddr_in6 *sin6; 2138 2139 sin6 = (struct sockaddr_in6 *)sa; 2140 if (!IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { 2141 error = EINVAL; 2142 SCTP_TCB_UNLOCK(stcb); 2143 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 2144 break; 2145 } 2146 } else { 2147 error = EAFNOSUPPORT; 2148 SCTP_TCB_UNLOCK(stcb); 2149 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 2150 break; 2151 } 2152 } 2153 if (stcb) { 2154 /* Applys to the specific association */ 2155 paddrp->spp_flags = 0; 2156 if (net) { 2157 int ovh; 2158 2159 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 2160 ovh = SCTP_MED_OVERHEAD; 2161 } else { 2162 ovh = SCTP_MED_V4_OVERHEAD; 2163 } 2164 2165 2166 paddrp->spp_pathmaxrxt = net->failure_threshold; 2167 paddrp->spp_pathmtu = net->mtu - ovh; 2168 /* get flags for HB */ 2169 if (net->dest_state & SCTP_ADDR_NOHB) 2170 paddrp->spp_flags |= SPP_HB_DISABLE; 2171 else 2172 paddrp->spp_flags |= SPP_HB_ENABLE; 2173 /* get flags for PMTU */ 2174 if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) { 2175 paddrp->spp_flags |= SPP_PMTUD_ENABLE; 2176 } else { 2177 paddrp->spp_flags |= SPP_PMTUD_DISABLE; 2178 } 2179 #ifdef INET 2180 if (net->ro._l_addr.sin.sin_family == AF_INET) { 2181 paddrp->spp_ipv4_tos = net->tos_flowlabel & 0x000000fc; 2182 paddrp->spp_flags |= SPP_IPV4_TOS; 2183 } 2184 #endif 2185 #ifdef INET6 2186 if (net->ro._l_addr.sin6.sin6_family == AF_INET6) { 2187 paddrp->spp_ipv6_flowlabel = net->tos_flowlabel; 2188 paddrp->spp_flags |= SPP_IPV6_FLOWLABEL; 2189 } 2190 #endif 2191 } else { 2192 /* 2193 * No destination so return default 2194 * value 2195 */ 2196 int cnt = 0; 2197 2198 paddrp->spp_pathmaxrxt = stcb->asoc.def_net_failure; 2199 paddrp->spp_pathmtu = sctp_get_frag_point(stcb, &stcb->asoc); 2200 #ifdef INET 2201 paddrp->spp_ipv4_tos = stcb->asoc.default_tos & 0x000000fc; 2202 paddrp->spp_flags |= SPP_IPV4_TOS; 2203 #endif 2204 #ifdef INET6 2205 paddrp->spp_ipv6_flowlabel = stcb->asoc.default_flowlabel; 2206 paddrp->spp_flags |= SPP_IPV6_FLOWLABEL; 2207 #endif 2208 /* default settings should be these */ 2209 if (stcb->asoc.hb_is_disabled == 0) { 2210 paddrp->spp_flags |= SPP_HB_ENABLE; 2211 } else { 2212 paddrp->spp_flags |= SPP_HB_DISABLE; 2213 } 2214 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 2215 if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) { 2216 cnt++; 2217 } 2218 } 2219 if (cnt) { 2220 paddrp->spp_flags |= SPP_PMTUD_ENABLE; 2221 } 2222 } 2223 paddrp->spp_hbinterval = stcb->asoc.heart_beat_delay; 2224 paddrp->spp_assoc_id = sctp_get_associd(stcb); 2225 SCTP_TCB_UNLOCK(stcb); 2226 } else { 2227 /* Use endpoint defaults */ 2228 SCTP_INP_RLOCK(inp); 2229 paddrp->spp_pathmaxrxt = inp->sctp_ep.def_net_failure; 2230 paddrp->spp_hbinterval = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]); 2231 paddrp->spp_assoc_id = (sctp_assoc_t) 0; 2232 /* get inp's default */ 2233 #ifdef INET 2234 paddrp->spp_ipv4_tos = inp->ip_inp.inp.inp_ip_tos; 2235 paddrp->spp_flags |= SPP_IPV4_TOS; 2236 #endif 2237 #ifdef INET6 2238 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 2239 paddrp->spp_ipv6_flowlabel = ((struct in6pcb *)inp)->in6p_flowinfo; 2240 paddrp->spp_flags |= SPP_IPV6_FLOWLABEL; 2241 } 2242 #endif 2243 /* can't return this */ 2244 paddrp->spp_pathmtu = 0; 2245 2246 /* default behavior, no stcb */ 2247 paddrp->spp_flags = SPP_PMTUD_ENABLE; 2248 2249 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_DONOT_HEARTBEAT)) { 2250 paddrp->spp_flags |= SPP_HB_ENABLE; 2251 } else { 2252 paddrp->spp_flags |= SPP_HB_DISABLE; 2253 } 2254 SCTP_INP_RUNLOCK(inp); 2255 } 2256 *optsize = sizeof(struct sctp_paddrparams); 2257 } 2258 break; 2259 case SCTP_GET_PEER_ADDR_INFO: 2260 { 2261 struct sctp_paddrinfo *paddri; 2262 struct sctp_nets *net; 2263 2264 SCTP_CHECK_AND_CAST(paddri, optval, struct sctp_paddrinfo, *optsize); 2265 SCTP_FIND_STCB(inp, stcb, paddri->spinfo_assoc_id); 2266 2267 net = NULL; 2268 if (stcb) { 2269 net = sctp_findnet(stcb, (struct sockaddr *)&paddri->spinfo_address); 2270 } else { 2271 /* 2272 * We increment here since 2273 * sctp_findassociation_ep_addr() wil do a 2274 * decrement if it finds the stcb as long as 2275 * the locked tcb (last argument) is NOT a 2276 * TCB.. aka NULL. 2277 */ 2278 SCTP_INP_INCR_REF(inp); 2279 stcb = sctp_findassociation_ep_addr(&inp, (struct sockaddr *)&paddri->spinfo_address, &net, NULL, NULL); 2280 if (stcb == NULL) { 2281 SCTP_INP_DECR_REF(inp); 2282 } 2283 } 2284 2285 if ((stcb) && (net)) { 2286 paddri->spinfo_state = net->dest_state & (SCTP_REACHABLE_MASK | SCTP_ADDR_NOHB); 2287 paddri->spinfo_cwnd = net->cwnd; 2288 paddri->spinfo_srtt = ((net->lastsa >> 2) + net->lastsv) >> 1; 2289 paddri->spinfo_rto = net->RTO; 2290 paddri->spinfo_assoc_id = sctp_get_associd(stcb); 2291 SCTP_TCB_UNLOCK(stcb); 2292 } else { 2293 if (stcb) { 2294 SCTP_TCB_UNLOCK(stcb); 2295 } 2296 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); 2297 error = ENOENT; 2298 } 2299 *optsize = sizeof(struct sctp_paddrinfo); 2300 } 2301 break; 2302 case SCTP_PCB_STATUS: 2303 { 2304 struct sctp_pcbinfo *spcb; 2305 2306 SCTP_CHECK_AND_CAST(spcb, optval, struct sctp_pcbinfo, *optsize); 2307 sctp_fill_pcbinfo(spcb); 2308 *optsize = sizeof(struct sctp_pcbinfo); 2309 } 2310 break; 2311 2312 case SCTP_STATUS: 2313 { 2314 struct sctp_nets *net; 2315 struct sctp_status *sstat; 2316 2317 SCTP_CHECK_AND_CAST(sstat, optval, struct sctp_status, *optsize); 2318 SCTP_FIND_STCB(inp, stcb, sstat->sstat_assoc_id); 2319 2320 if (stcb == NULL) { 2321 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 2322 error = EINVAL; 2323 break; 2324 } 2325 /* 2326 * I think passing the state is fine since 2327 * sctp_constants.h will be available to the user 2328 * land. 2329 */ 2330 sstat->sstat_state = stcb->asoc.state; 2331 sstat->sstat_assoc_id = sctp_get_associd(stcb); 2332 sstat->sstat_rwnd = stcb->asoc.peers_rwnd; 2333 sstat->sstat_unackdata = stcb->asoc.sent_queue_cnt; 2334 /* 2335 * We can't include chunks that have been passed to 2336 * the socket layer. Only things in queue. 2337 */ 2338 sstat->sstat_penddata = (stcb->asoc.cnt_on_reasm_queue + 2339 stcb->asoc.cnt_on_all_streams); 2340 2341 2342 sstat->sstat_instrms = stcb->asoc.streamincnt; 2343 sstat->sstat_outstrms = stcb->asoc.streamoutcnt; 2344 sstat->sstat_fragmentation_point = sctp_get_frag_point(stcb, &stcb->asoc); 2345 memcpy(&sstat->sstat_primary.spinfo_address, 2346 &stcb->asoc.primary_destination->ro._l_addr, 2347 ((struct sockaddr *)(&stcb->asoc.primary_destination->ro._l_addr))->sa_len); 2348 net = stcb->asoc.primary_destination; 2349 ((struct sockaddr_in *)&sstat->sstat_primary.spinfo_address)->sin_port = stcb->rport; 2350 /* 2351 * Again the user can get info from sctp_constants.h 2352 * for what the state of the network is. 2353 */ 2354 sstat->sstat_primary.spinfo_state = net->dest_state & SCTP_REACHABLE_MASK; 2355 sstat->sstat_primary.spinfo_cwnd = net->cwnd; 2356 sstat->sstat_primary.spinfo_srtt = net->lastsa; 2357 sstat->sstat_primary.spinfo_rto = net->RTO; 2358 sstat->sstat_primary.spinfo_mtu = net->mtu; 2359 sstat->sstat_primary.spinfo_assoc_id = sctp_get_associd(stcb); 2360 SCTP_TCB_UNLOCK(stcb); 2361 *optsize = sizeof(*sstat); 2362 } 2363 break; 2364 case SCTP_RTOINFO: 2365 { 2366 struct sctp_rtoinfo *srto; 2367 2368 SCTP_CHECK_AND_CAST(srto, optval, struct sctp_rtoinfo, *optsize); 2369 SCTP_FIND_STCB(inp, stcb, srto->srto_assoc_id); 2370 2371 if (stcb) { 2372 srto->srto_initial = stcb->asoc.initial_rto; 2373 srto->srto_max = stcb->asoc.maxrto; 2374 srto->srto_min = stcb->asoc.minrto; 2375 SCTP_TCB_UNLOCK(stcb); 2376 } else { 2377 SCTP_INP_RLOCK(inp); 2378 srto->srto_initial = inp->sctp_ep.initial_rto; 2379 srto->srto_max = inp->sctp_ep.sctp_maxrto; 2380 srto->srto_min = inp->sctp_ep.sctp_minrto; 2381 SCTP_INP_RUNLOCK(inp); 2382 } 2383 *optsize = sizeof(*srto); 2384 } 2385 break; 2386 case SCTP_ASSOCINFO: 2387 { 2388 struct sctp_assocparams *sasoc; 2389 uint32_t oldval; 2390 2391 SCTP_CHECK_AND_CAST(sasoc, optval, struct sctp_assocparams, *optsize); 2392 SCTP_FIND_STCB(inp, stcb, sasoc->sasoc_assoc_id); 2393 2394 if (stcb) { 2395 oldval = sasoc->sasoc_cookie_life; 2396 sasoc->sasoc_cookie_life = TICKS_TO_MSEC(stcb->asoc.cookie_life); 2397 sasoc->sasoc_asocmaxrxt = stcb->asoc.max_send_times; 2398 sasoc->sasoc_number_peer_destinations = stcb->asoc.numnets; 2399 sasoc->sasoc_peer_rwnd = stcb->asoc.peers_rwnd; 2400 sasoc->sasoc_local_rwnd = stcb->asoc.my_rwnd; 2401 SCTP_TCB_UNLOCK(stcb); 2402 } else { 2403 SCTP_INP_RLOCK(inp); 2404 sasoc->sasoc_cookie_life = TICKS_TO_MSEC(inp->sctp_ep.def_cookie_life); 2405 sasoc->sasoc_asocmaxrxt = inp->sctp_ep.max_send_times; 2406 sasoc->sasoc_number_peer_destinations = 0; 2407 sasoc->sasoc_peer_rwnd = 0; 2408 sasoc->sasoc_local_rwnd = sbspace(&inp->sctp_socket->so_rcv); 2409 SCTP_INP_RUNLOCK(inp); 2410 } 2411 *optsize = sizeof(*sasoc); 2412 } 2413 break; 2414 case SCTP_DEFAULT_SEND_PARAM: 2415 { 2416 struct sctp_sndrcvinfo *s_info; 2417 2418 SCTP_CHECK_AND_CAST(s_info, optval, struct sctp_sndrcvinfo, *optsize); 2419 SCTP_FIND_STCB(inp, stcb, s_info->sinfo_assoc_id); 2420 2421 if (stcb) { 2422 memcpy(s_info, &stcb->asoc.def_send, sizeof(stcb->asoc.def_send)); 2423 SCTP_TCB_UNLOCK(stcb); 2424 } else { 2425 SCTP_INP_RLOCK(inp); 2426 memcpy(s_info, &inp->def_send, sizeof(inp->def_send)); 2427 SCTP_INP_RUNLOCK(inp); 2428 } 2429 *optsize = sizeof(*s_info); 2430 } 2431 break; 2432 case SCTP_INITMSG: 2433 { 2434 struct sctp_initmsg *sinit; 2435 2436 SCTP_CHECK_AND_CAST(sinit, optval, struct sctp_initmsg, *optsize); 2437 SCTP_INP_RLOCK(inp); 2438 sinit->sinit_num_ostreams = inp->sctp_ep.pre_open_stream_count; 2439 sinit->sinit_max_instreams = inp->sctp_ep.max_open_streams_intome; 2440 sinit->sinit_max_attempts = inp->sctp_ep.max_init_times; 2441 sinit->sinit_max_init_timeo = inp->sctp_ep.initial_init_rto_max; 2442 SCTP_INP_RUNLOCK(inp); 2443 *optsize = sizeof(*sinit); 2444 } 2445 break; 2446 case SCTP_PRIMARY_ADDR: 2447 /* we allow a "get" operation on this */ 2448 { 2449 struct sctp_setprim *ssp; 2450 2451 SCTP_CHECK_AND_CAST(ssp, optval, struct sctp_setprim, *optsize); 2452 SCTP_FIND_STCB(inp, stcb, ssp->ssp_assoc_id); 2453 2454 if (stcb) { 2455 /* simply copy out the sockaddr_storage... */ 2456 int len; 2457 2458 len = *optsize; 2459 if (len > stcb->asoc.primary_destination->ro._l_addr.sa.sa_len) 2460 len = stcb->asoc.primary_destination->ro._l_addr.sa.sa_len; 2461 2462 memcpy(&ssp->ssp_addr, 2463 &stcb->asoc.primary_destination->ro._l_addr, 2464 len); 2465 SCTP_TCB_UNLOCK(stcb); 2466 } else { 2467 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 2468 error = EINVAL; 2469 } 2470 *optsize = sizeof(*ssp); 2471 } 2472 break; 2473 2474 case SCTP_HMAC_IDENT: 2475 { 2476 struct sctp_hmacalgo *shmac; 2477 sctp_hmaclist_t *hmaclist; 2478 uint32_t size; 2479 int i; 2480 2481 SCTP_CHECK_AND_CAST(shmac, optval, struct sctp_hmacalgo, *optsize); 2482 2483 SCTP_INP_RLOCK(inp); 2484 hmaclist = inp->sctp_ep.local_hmacs; 2485 if (hmaclist == NULL) { 2486 /* no HMACs to return */ 2487 *optsize = sizeof(*shmac); 2488 SCTP_INP_RUNLOCK(inp); 2489 break; 2490 } 2491 /* is there room for all of the hmac ids? */ 2492 size = sizeof(*shmac) + (hmaclist->num_algo * 2493 sizeof(shmac->shmac_idents[0])); 2494 if ((size_t)(*optsize) < size) { 2495 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 2496 error = EINVAL; 2497 SCTP_INP_RUNLOCK(inp); 2498 break; 2499 } 2500 /* copy in the list */ 2501 for (i = 0; i < hmaclist->num_algo; i++) 2502 shmac->shmac_idents[i] = hmaclist->hmac[i]; 2503 SCTP_INP_RUNLOCK(inp); 2504 *optsize = size; 2505 break; 2506 } 2507 case SCTP_AUTH_ACTIVE_KEY: 2508 { 2509 struct sctp_authkeyid *scact; 2510 2511 SCTP_CHECK_AND_CAST(scact, optval, struct sctp_authkeyid, *optsize); 2512 SCTP_FIND_STCB(inp, stcb, scact->scact_assoc_id); 2513 2514 if (stcb) { 2515 /* get the active key on the assoc */ 2516 scact->scact_keynumber = stcb->asoc.authinfo.assoc_keyid; 2517 SCTP_TCB_UNLOCK(stcb); 2518 } else { 2519 /* get the endpoint active key */ 2520 SCTP_INP_RLOCK(inp); 2521 scact->scact_keynumber = inp->sctp_ep.default_keyid; 2522 SCTP_INP_RUNLOCK(inp); 2523 } 2524 *optsize = sizeof(*scact); 2525 break; 2526 } 2527 case SCTP_LOCAL_AUTH_CHUNKS: 2528 { 2529 struct sctp_authchunks *sac; 2530 sctp_auth_chklist_t *chklist = NULL; 2531 size_t size = 0; 2532 2533 SCTP_CHECK_AND_CAST(sac, optval, struct sctp_authchunks, *optsize); 2534 SCTP_FIND_STCB(inp, stcb, sac->gauth_assoc_id); 2535 2536 if (stcb) { 2537 /* get off the assoc */ 2538 chklist = stcb->asoc.local_auth_chunks; 2539 /* is there enough space? */ 2540 size = sctp_auth_get_chklist_size(chklist); 2541 if (*optsize < (sizeof(struct sctp_authchunks) + size)) { 2542 error = EINVAL; 2543 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 2544 } else { 2545 /* copy in the chunks */ 2546 (void)sctp_serialize_auth_chunks(chklist, sac->gauth_chunks); 2547 } 2548 SCTP_TCB_UNLOCK(stcb); 2549 } else { 2550 /* get off the endpoint */ 2551 SCTP_INP_RLOCK(inp); 2552 chklist = inp->sctp_ep.local_auth_chunks; 2553 /* is there enough space? */ 2554 size = sctp_auth_get_chklist_size(chklist); 2555 if (*optsize < (sizeof(struct sctp_authchunks) + size)) { 2556 error = EINVAL; 2557 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 2558 } else { 2559 /* copy in the chunks */ 2560 (void)sctp_serialize_auth_chunks(chklist, sac->gauth_chunks); 2561 } 2562 SCTP_INP_RUNLOCK(inp); 2563 } 2564 *optsize = sizeof(struct sctp_authchunks) + size; 2565 break; 2566 } 2567 case SCTP_PEER_AUTH_CHUNKS: 2568 { 2569 struct sctp_authchunks *sac; 2570 sctp_auth_chklist_t *chklist = NULL; 2571 size_t size = 0; 2572 2573 SCTP_CHECK_AND_CAST(sac, optval, struct sctp_authchunks, *optsize); 2574 SCTP_FIND_STCB(inp, stcb, sac->gauth_assoc_id); 2575 2576 if (stcb) { 2577 /* get off the assoc */ 2578 chklist = stcb->asoc.peer_auth_chunks; 2579 /* is there enough space? */ 2580 size = sctp_auth_get_chklist_size(chklist); 2581 if (*optsize < (sizeof(struct sctp_authchunks) + size)) { 2582 error = EINVAL; 2583 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 2584 } else { 2585 /* copy in the chunks */ 2586 (void)sctp_serialize_auth_chunks(chklist, sac->gauth_chunks); 2587 } 2588 SCTP_TCB_UNLOCK(stcb); 2589 } else { 2590 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); 2591 error = ENOENT; 2592 } 2593 *optsize = sizeof(struct sctp_authchunks) + size; 2594 break; 2595 } 2596 2597 2598 default: 2599 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT); 2600 error = ENOPROTOOPT; 2601 *optsize = 0; 2602 break; 2603 } /* end switch (sopt->sopt_name) */ 2604 return (error); 2605 } 2606 2607 static int 2608 sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize, 2609 void *p) 2610 { 2611 int error, set_opt; 2612 uint32_t *mopt; 2613 struct sctp_tcb *stcb = NULL; 2614 struct sctp_inpcb *inp = NULL; 2615 uint32_t vrf_id; 2616 2617 if (optval == NULL) { 2618 SCTP_PRINTF("optval is NULL\n"); 2619 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 2620 return (EINVAL); 2621 } 2622 inp = (struct sctp_inpcb *)so->so_pcb; 2623 if (inp == 0) { 2624 SCTP_PRINTF("inp is NULL?\n"); 2625 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 2626 return EINVAL; 2627 } 2628 vrf_id = inp->def_vrf_id; 2629 2630 error = 0; 2631 switch (optname) { 2632 case SCTP_NODELAY: 2633 case SCTP_AUTOCLOSE: 2634 case SCTP_AUTO_ASCONF: 2635 case SCTP_EXPLICIT_EOR: 2636 case SCTP_DISABLE_FRAGMENTS: 2637 case SCTP_USE_EXT_RCVINFO: 2638 case SCTP_I_WANT_MAPPED_V4_ADDR: 2639 /* copy in the option value */ 2640 SCTP_CHECK_AND_CAST(mopt, optval, uint32_t, optsize); 2641 set_opt = 0; 2642 if (error) 2643 break; 2644 switch (optname) { 2645 case SCTP_DISABLE_FRAGMENTS: 2646 set_opt = SCTP_PCB_FLAGS_NO_FRAGMENT; 2647 break; 2648 case SCTP_AUTO_ASCONF: 2649 /* 2650 * NOTE: we don't really support this flag 2651 */ 2652 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 2653 /* only valid for bound all sockets */ 2654 set_opt = SCTP_PCB_FLAGS_AUTO_ASCONF; 2655 } else { 2656 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 2657 return (EINVAL); 2658 } 2659 break; 2660 case SCTP_EXPLICIT_EOR: 2661 set_opt = SCTP_PCB_FLAGS_EXPLICIT_EOR; 2662 break; 2663 case SCTP_USE_EXT_RCVINFO: 2664 set_opt = SCTP_PCB_FLAGS_EXT_RCVINFO; 2665 break; 2666 case SCTP_I_WANT_MAPPED_V4_ADDR: 2667 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 2668 set_opt = SCTP_PCB_FLAGS_NEEDS_MAPPED_V4; 2669 } else { 2670 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 2671 return (EINVAL); 2672 } 2673 break; 2674 case SCTP_NODELAY: 2675 set_opt = SCTP_PCB_FLAGS_NODELAY; 2676 break; 2677 case SCTP_AUTOCLOSE: 2678 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 2679 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 2680 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 2681 return (EINVAL); 2682 } 2683 set_opt = SCTP_PCB_FLAGS_AUTOCLOSE; 2684 /* 2685 * The value is in ticks. Note this does not effect 2686 * old associations, only new ones. 2687 */ 2688 inp->sctp_ep.auto_close_time = SEC_TO_TICKS(*mopt); 2689 break; 2690 } 2691 SCTP_INP_WLOCK(inp); 2692 if (*mopt != 0) { 2693 sctp_feature_on(inp, set_opt); 2694 } else { 2695 sctp_feature_off(inp, set_opt); 2696 } 2697 SCTP_INP_WUNLOCK(inp); 2698 break; 2699 case SCTP_PARTIAL_DELIVERY_POINT: 2700 { 2701 uint32_t *value; 2702 2703 SCTP_CHECK_AND_CAST(value, optval, uint32_t, optsize); 2704 if (*value > SCTP_SB_LIMIT_RCV(so)) { 2705 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 2706 error = EINVAL; 2707 break; 2708 } 2709 inp->partial_delivery_point = *value; 2710 } 2711 break; 2712 case SCTP_FRAGMENT_INTERLEAVE: 2713 /* not yet until we re-write sctp_recvmsg() */ 2714 { 2715 uint32_t *level; 2716 2717 SCTP_CHECK_AND_CAST(level, optval, uint32_t, optsize); 2718 if (*level == SCTP_FRAG_LEVEL_2) { 2719 sctp_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE); 2720 sctp_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS); 2721 } else if (*level == SCTP_FRAG_LEVEL_1) { 2722 sctp_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE); 2723 sctp_feature_off(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS); 2724 } else if (*level == SCTP_FRAG_LEVEL_0) { 2725 sctp_feature_off(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE); 2726 sctp_feature_off(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS); 2727 2728 } else { 2729 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 2730 error = EINVAL; 2731 } 2732 } 2733 break; 2734 case SCTP_CMT_ON_OFF: 2735 { 2736 struct sctp_assoc_value *av; 2737 2738 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize); 2739 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off)) { 2740 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 2741 if (stcb) { 2742 stcb->asoc.sctp_cmt_on_off = (uint8_t) av->assoc_value; 2743 SCTP_TCB_UNLOCK(stcb); 2744 } else { 2745 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN); 2746 error = ENOTCONN; 2747 } 2748 } else { 2749 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT); 2750 error = ENOPROTOOPT; 2751 } 2752 } 2753 break; 2754 /* JRS - Set socket option for pluggable congestion control */ 2755 case SCTP_PLUGGABLE_CC: 2756 { 2757 struct sctp_assoc_value *av; 2758 2759 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize); 2760 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 2761 if (stcb) { 2762 switch (av->assoc_value) { 2763 /* 2764 * JRS - Standard TCP congestion 2765 * control 2766 */ 2767 case SCTP_CC_RFC2581: 2768 { 2769 stcb->asoc.congestion_control_module = SCTP_CC_RFC2581; 2770 stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param; 2771 stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_cwnd_update_after_sack; 2772 stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_cwnd_update_after_fr; 2773 stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout; 2774 stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo; 2775 stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped; 2776 stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output; 2777 stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer; 2778 SCTP_TCB_UNLOCK(stcb); 2779 break; 2780 } 2781 /* 2782 * JRS - High Speed TCP congestion 2783 * control (Floyd) 2784 */ 2785 case SCTP_CC_HSTCP: 2786 { 2787 stcb->asoc.congestion_control_module = SCTP_CC_HSTCP; 2788 stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param; 2789 stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_hs_cwnd_update_after_sack; 2790 stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_hs_cwnd_update_after_fr; 2791 stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout; 2792 stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo; 2793 stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped; 2794 stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output; 2795 stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer; 2796 SCTP_TCB_UNLOCK(stcb); 2797 break; 2798 } 2799 /* JRS - HTCP congestion control */ 2800 case SCTP_CC_HTCP: 2801 { 2802 stcb->asoc.congestion_control_module = SCTP_CC_HTCP; 2803 stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_htcp_set_initial_cc_param; 2804 stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_htcp_cwnd_update_after_sack; 2805 stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_htcp_cwnd_update_after_fr; 2806 stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_htcp_cwnd_update_after_timeout; 2807 stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_htcp_cwnd_update_after_ecn_echo; 2808 stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped; 2809 stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output; 2810 stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_htcp_cwnd_update_after_fr_timer; 2811 SCTP_TCB_UNLOCK(stcb); 2812 break; 2813 } 2814 /* 2815 * JRS - All other values are 2816 * invalid 2817 */ 2818 default: 2819 { 2820 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 2821 error = EINVAL; 2822 SCTP_TCB_UNLOCK(stcb); 2823 break; 2824 } 2825 } 2826 } else { 2827 switch (av->assoc_value) { 2828 case SCTP_CC_RFC2581: 2829 case SCTP_CC_HSTCP: 2830 case SCTP_CC_HTCP: 2831 inp->sctp_ep.sctp_default_cc_module = av->assoc_value; 2832 break; 2833 default: 2834 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 2835 error = EINVAL; 2836 break; 2837 }; 2838 } 2839 } 2840 break; 2841 case SCTP_CLR_STAT_LOG: 2842 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP); 2843 error = EOPNOTSUPP; 2844 break; 2845 case SCTP_CONTEXT: 2846 { 2847 struct sctp_assoc_value *av; 2848 2849 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize); 2850 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 2851 2852 if (stcb) { 2853 stcb->asoc.context = av->assoc_value; 2854 SCTP_TCB_UNLOCK(stcb); 2855 } else { 2856 SCTP_INP_WLOCK(inp); 2857 inp->sctp_context = av->assoc_value; 2858 SCTP_INP_WUNLOCK(inp); 2859 } 2860 } 2861 break; 2862 case SCTP_VRF_ID: 2863 { 2864 uint32_t *default_vrfid; 2865 2866 SCTP_CHECK_AND_CAST(default_vrfid, optval, uint32_t, optsize); 2867 if (*default_vrfid > SCTP_MAX_VRF_ID) { 2868 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 2869 error = EINVAL; 2870 break; 2871 } 2872 inp->def_vrf_id = *default_vrfid; 2873 break; 2874 } 2875 case SCTP_DEL_VRF_ID: 2876 { 2877 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP); 2878 error = EOPNOTSUPP; 2879 break; 2880 } 2881 case SCTP_ADD_VRF_ID: 2882 { 2883 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP); 2884 error = EOPNOTSUPP; 2885 break; 2886 } 2887 case SCTP_DELAYED_SACK: 2888 { 2889 struct sctp_sack_info *sack; 2890 2891 SCTP_CHECK_AND_CAST(sack, optval, struct sctp_sack_info, optsize); 2892 SCTP_FIND_STCB(inp, stcb, sack->sack_assoc_id); 2893 if (sack->sack_delay) { 2894 if (sack->sack_delay > SCTP_MAX_SACK_DELAY) 2895 sack->sack_delay = SCTP_MAX_SACK_DELAY; 2896 } 2897 if (stcb) { 2898 if (sack->sack_delay) { 2899 if (MSEC_TO_TICKS(sack->sack_delay) < 1) { 2900 sack->sack_delay = TICKS_TO_MSEC(1); 2901 } 2902 stcb->asoc.delayed_ack = sack->sack_delay; 2903 } 2904 if (sack->sack_freq) { 2905 stcb->asoc.sack_freq = sack->sack_freq; 2906 } 2907 SCTP_TCB_UNLOCK(stcb); 2908 } else { 2909 SCTP_INP_WLOCK(inp); 2910 if (sack->sack_delay) { 2911 if (MSEC_TO_TICKS(sack->sack_delay) < 1) { 2912 sack->sack_delay = TICKS_TO_MSEC(1); 2913 } 2914 inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV] = MSEC_TO_TICKS(sack->sack_delay); 2915 } 2916 if (sack->sack_freq) { 2917 inp->sctp_ep.sctp_sack_freq = sack->sack_freq; 2918 } 2919 SCTP_INP_WUNLOCK(inp); 2920 } 2921 break; 2922 } 2923 case SCTP_AUTH_CHUNK: 2924 { 2925 struct sctp_authchunk *sauth; 2926 2927 SCTP_CHECK_AND_CAST(sauth, optval, struct sctp_authchunk, optsize); 2928 2929 SCTP_INP_WLOCK(inp); 2930 if (sctp_auth_add_chunk(sauth->sauth_chunk, inp->sctp_ep.local_auth_chunks)) { 2931 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 2932 error = EINVAL; 2933 } 2934 SCTP_INP_WUNLOCK(inp); 2935 break; 2936 } 2937 case SCTP_AUTH_KEY: 2938 { 2939 struct sctp_authkey *sca; 2940 struct sctp_keyhead *shared_keys; 2941 sctp_sharedkey_t *shared_key; 2942 sctp_key_t *key = NULL; 2943 size_t size; 2944 2945 SCTP_CHECK_AND_CAST(sca, optval, struct sctp_authkey, optsize); 2946 SCTP_FIND_STCB(inp, stcb, sca->sca_assoc_id); 2947 size = optsize - sizeof(*sca); 2948 2949 if (stcb) { 2950 /* set it on the assoc */ 2951 shared_keys = &stcb->asoc.shared_keys; 2952 /* clear the cached keys for this key id */ 2953 sctp_clear_cachedkeys(stcb, sca->sca_keynumber); 2954 /* 2955 * create the new shared key and 2956 * insert/replace it 2957 */ 2958 if (size > 0) { 2959 key = sctp_set_key(sca->sca_key, (uint32_t) size); 2960 if (key == NULL) { 2961 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM); 2962 error = ENOMEM; 2963 SCTP_TCB_UNLOCK(stcb); 2964 break; 2965 } 2966 } 2967 shared_key = sctp_alloc_sharedkey(); 2968 if (shared_key == NULL) { 2969 sctp_free_key(key); 2970 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM); 2971 error = ENOMEM; 2972 SCTP_TCB_UNLOCK(stcb); 2973 break; 2974 } 2975 shared_key->key = key; 2976 shared_key->keyid = sca->sca_keynumber; 2977 sctp_insert_sharedkey(shared_keys, shared_key); 2978 SCTP_TCB_UNLOCK(stcb); 2979 } else { 2980 /* set it on the endpoint */ 2981 SCTP_INP_WLOCK(inp); 2982 shared_keys = &inp->sctp_ep.shared_keys; 2983 /* 2984 * clear the cached keys on all assocs for 2985 * this key id 2986 */ 2987 sctp_clear_cachedkeys_ep(inp, sca->sca_keynumber); 2988 /* 2989 * create the new shared key and 2990 * insert/replace it 2991 */ 2992 if (size > 0) { 2993 key = sctp_set_key(sca->sca_key, (uint32_t) size); 2994 if (key == NULL) { 2995 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM); 2996 error = ENOMEM; 2997 SCTP_INP_WUNLOCK(inp); 2998 break; 2999 } 3000 } 3001 shared_key = sctp_alloc_sharedkey(); 3002 if (shared_key == NULL) { 3003 sctp_free_key(key); 3004 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM); 3005 error = ENOMEM; 3006 SCTP_INP_WUNLOCK(inp); 3007 break; 3008 } 3009 shared_key->key = key; 3010 shared_key->keyid = sca->sca_keynumber; 3011 sctp_insert_sharedkey(shared_keys, shared_key); 3012 SCTP_INP_WUNLOCK(inp); 3013 } 3014 break; 3015 } 3016 case SCTP_HMAC_IDENT: 3017 { 3018 struct sctp_hmacalgo *shmac; 3019 sctp_hmaclist_t *hmaclist; 3020 uint32_t hmacid; 3021 size_t size, i, found; 3022 3023 SCTP_CHECK_AND_CAST(shmac, optval, struct sctp_hmacalgo, optsize); 3024 size = (optsize - sizeof(*shmac)) / sizeof(shmac->shmac_idents[0]); 3025 hmaclist = sctp_alloc_hmaclist(size); 3026 if (hmaclist == NULL) { 3027 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM); 3028 error = ENOMEM; 3029 break; 3030 } 3031 for (i = 0; i < size; i++) { 3032 hmacid = shmac->shmac_idents[i]; 3033 if (sctp_auth_add_hmacid(hmaclist, (uint16_t) hmacid)) { 3034 /* invalid HMACs were found */ ; 3035 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3036 error = EINVAL; 3037 sctp_free_hmaclist(hmaclist); 3038 goto sctp_set_hmac_done; 3039 } 3040 } 3041 found = 0; 3042 for (i = 0; i < hmaclist->num_algo; i++) { 3043 if (hmaclist->hmac[i] == SCTP_AUTH_HMAC_ID_SHA1) { 3044 /* already in list */ 3045 found = 1; 3046 } 3047 } 3048 if (!found) { 3049 sctp_free_hmaclist(hmaclist); 3050 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3051 error = EINVAL; 3052 break; 3053 } 3054 /* set it on the endpoint */ 3055 SCTP_INP_WLOCK(inp); 3056 if (inp->sctp_ep.local_hmacs) 3057 sctp_free_hmaclist(inp->sctp_ep.local_hmacs); 3058 inp->sctp_ep.local_hmacs = hmaclist; 3059 SCTP_INP_WUNLOCK(inp); 3060 sctp_set_hmac_done: 3061 break; 3062 } 3063 case SCTP_AUTH_ACTIVE_KEY: 3064 { 3065 struct sctp_authkeyid *scact; 3066 3067 SCTP_CHECK_AND_CAST(scact, optval, struct sctp_authkeyid, optsize); 3068 SCTP_FIND_STCB(inp, stcb, scact->scact_assoc_id); 3069 3070 /* set the active key on the right place */ 3071 if (stcb) { 3072 /* set the active key on the assoc */ 3073 if (sctp_auth_setactivekey(stcb, scact->scact_keynumber)) { 3074 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3075 error = EINVAL; 3076 } 3077 SCTP_TCB_UNLOCK(stcb); 3078 } else { 3079 /* set the active key on the endpoint */ 3080 SCTP_INP_WLOCK(inp); 3081 if (sctp_auth_setactivekey_ep(inp, scact->scact_keynumber)) { 3082 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3083 error = EINVAL; 3084 } 3085 SCTP_INP_WUNLOCK(inp); 3086 } 3087 break; 3088 } 3089 case SCTP_AUTH_DELETE_KEY: 3090 { 3091 struct sctp_authkeyid *scdel; 3092 3093 SCTP_CHECK_AND_CAST(scdel, optval, struct sctp_authkeyid, optsize); 3094 SCTP_FIND_STCB(inp, stcb, scdel->scact_assoc_id); 3095 3096 /* delete the key from the right place */ 3097 if (stcb) { 3098 if (sctp_delete_sharedkey(stcb, scdel->scact_keynumber)) { 3099 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3100 error = EINVAL; 3101 } 3102 SCTP_TCB_UNLOCK(stcb); 3103 } else { 3104 SCTP_INP_WLOCK(inp); 3105 if (sctp_delete_sharedkey_ep(inp, scdel->scact_keynumber)) { 3106 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3107 error = EINVAL; 3108 } 3109 SCTP_INP_WUNLOCK(inp); 3110 } 3111 break; 3112 } 3113 3114 case SCTP_RESET_STREAMS: 3115 { 3116 struct sctp_stream_reset *strrst; 3117 uint8_t send_in = 0, send_tsn = 0, send_out = 0; 3118 int i; 3119 3120 SCTP_CHECK_AND_CAST(strrst, optval, struct sctp_stream_reset, optsize); 3121 SCTP_FIND_STCB(inp, stcb, strrst->strrst_assoc_id); 3122 3123 if (stcb == NULL) { 3124 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); 3125 error = ENOENT; 3126 break; 3127 } 3128 if (stcb->asoc.peer_supports_strreset == 0) { 3129 /* 3130 * Peer does not support it, we return 3131 * protocol not supported since this is true 3132 * for this feature and this peer, not the 3133 * socket request in general. 3134 */ 3135 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EPROTONOSUPPORT); 3136 error = EPROTONOSUPPORT; 3137 SCTP_TCB_UNLOCK(stcb); 3138 break; 3139 } 3140 if (stcb->asoc.stream_reset_outstanding) { 3141 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY); 3142 error = EALREADY; 3143 SCTP_TCB_UNLOCK(stcb); 3144 break; 3145 } 3146 if (strrst->strrst_flags == SCTP_RESET_LOCAL_RECV) { 3147 send_in = 1; 3148 } else if (strrst->strrst_flags == SCTP_RESET_LOCAL_SEND) { 3149 send_out = 1; 3150 } else if (strrst->strrst_flags == SCTP_RESET_BOTH) { 3151 send_in = 1; 3152 send_out = 1; 3153 } else if (strrst->strrst_flags == SCTP_RESET_TSN) { 3154 send_tsn = 1; 3155 } else { 3156 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3157 error = EINVAL; 3158 SCTP_TCB_UNLOCK(stcb); 3159 break; 3160 } 3161 for (i = 0; i < strrst->strrst_num_streams; i++) { 3162 if ((send_in) && 3163 3164 (strrst->strrst_list[i] > stcb->asoc.streamincnt)) { 3165 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3166 error = EINVAL; 3167 goto get_out; 3168 } 3169 if ((send_out) && 3170 (strrst->strrst_list[i] > stcb->asoc.streamoutcnt)) { 3171 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3172 error = EINVAL; 3173 goto get_out; 3174 } 3175 } 3176 if (error) { 3177 get_out: 3178 SCTP_TCB_UNLOCK(stcb); 3179 break; 3180 } 3181 error = sctp_send_str_reset_req(stcb, strrst->strrst_num_streams, 3182 strrst->strrst_list, 3183 send_out, (stcb->asoc.str_reset_seq_in - 3), 3184 send_in, send_tsn); 3185 3186 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_REQ, SCTP_SO_LOCKED); 3187 SCTP_TCB_UNLOCK(stcb); 3188 } 3189 break; 3190 3191 case SCTP_CONNECT_X: 3192 if (optsize < (sizeof(int) + sizeof(struct sockaddr_in))) { 3193 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3194 error = EINVAL; 3195 break; 3196 } 3197 error = sctp_do_connect_x(so, inp, optval, optsize, p, 0); 3198 break; 3199 3200 case SCTP_CONNECT_X_DELAYED: 3201 if (optsize < (sizeof(int) + sizeof(struct sockaddr_in))) { 3202 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3203 error = EINVAL; 3204 break; 3205 } 3206 error = sctp_do_connect_x(so, inp, optval, optsize, p, 1); 3207 break; 3208 3209 case SCTP_CONNECT_X_COMPLETE: 3210 { 3211 struct sockaddr *sa; 3212 struct sctp_nets *net; 3213 3214 /* FIXME MT: check correct? */ 3215 SCTP_CHECK_AND_CAST(sa, optval, struct sockaddr, optsize); 3216 3217 /* find tcb */ 3218 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { 3219 SCTP_INP_RLOCK(inp); 3220 stcb = LIST_FIRST(&inp->sctp_asoc_list); 3221 if (stcb) { 3222 SCTP_TCB_LOCK(stcb); 3223 net = sctp_findnet(stcb, sa); 3224 } 3225 SCTP_INP_RUNLOCK(inp); 3226 } else { 3227 /* 3228 * We increment here since 3229 * sctp_findassociation_ep_addr() wil do a 3230 * decrement if it finds the stcb as long as 3231 * the locked tcb (last argument) is NOT a 3232 * TCB.. aka NULL. 3233 */ 3234 SCTP_INP_INCR_REF(inp); 3235 stcb = sctp_findassociation_ep_addr(&inp, sa, &net, NULL, NULL); 3236 if (stcb == NULL) { 3237 SCTP_INP_DECR_REF(inp); 3238 } 3239 } 3240 3241 if (stcb == NULL) { 3242 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); 3243 error = ENOENT; 3244 break; 3245 } 3246 if (stcb->asoc.delayed_connection == 1) { 3247 stcb->asoc.delayed_connection = 0; 3248 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered); 3249 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, 3250 stcb->asoc.primary_destination, 3251 SCTP_FROM_SCTP_USRREQ + SCTP_LOC_9); 3252 sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED); 3253 } else { 3254 /* 3255 * already expired or did not use delayed 3256 * connectx 3257 */ 3258 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY); 3259 error = EALREADY; 3260 } 3261 SCTP_TCB_UNLOCK(stcb); 3262 } 3263 break; 3264 case SCTP_MAX_BURST: 3265 { 3266 uint8_t *burst; 3267 3268 SCTP_CHECK_AND_CAST(burst, optval, uint8_t, optsize); 3269 3270 SCTP_INP_WLOCK(inp); 3271 if (*burst) { 3272 inp->sctp_ep.max_burst = *burst; 3273 } 3274 SCTP_INP_WUNLOCK(inp); 3275 } 3276 break; 3277 case SCTP_MAXSEG: 3278 { 3279 struct sctp_assoc_value *av; 3280 int ovh; 3281 3282 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize); 3283 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 3284 3285 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 3286 ovh = SCTP_MED_OVERHEAD; 3287 } else { 3288 ovh = SCTP_MED_V4_OVERHEAD; 3289 } 3290 if (stcb) { 3291 if (av->assoc_value) { 3292 stcb->asoc.sctp_frag_point = (av->assoc_value + ovh); 3293 } else { 3294 stcb->asoc.sctp_frag_point = SCTP_DEFAULT_MAXSEGMENT; 3295 } 3296 SCTP_TCB_UNLOCK(stcb); 3297 } else { 3298 SCTP_INP_WLOCK(inp); 3299 /* 3300 * FIXME MT: I think this is not in tune 3301 * with the API ID 3302 */ 3303 if (av->assoc_value) { 3304 inp->sctp_frag_point = (av->assoc_value + ovh); 3305 } else { 3306 inp->sctp_frag_point = SCTP_DEFAULT_MAXSEGMENT; 3307 } 3308 SCTP_INP_WUNLOCK(inp); 3309 } 3310 } 3311 break; 3312 case SCTP_EVENTS: 3313 { 3314 struct sctp_event_subscribe *events; 3315 3316 SCTP_CHECK_AND_CAST(events, optval, struct sctp_event_subscribe, optsize); 3317 3318 SCTP_INP_WLOCK(inp); 3319 if (events->sctp_data_io_event) { 3320 sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT); 3321 } else { 3322 sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT); 3323 } 3324 3325 if (events->sctp_association_event) { 3326 sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVASSOCEVNT); 3327 } else { 3328 sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVASSOCEVNT); 3329 } 3330 3331 if (events->sctp_address_event) { 3332 sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVPADDREVNT); 3333 } else { 3334 sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVPADDREVNT); 3335 } 3336 3337 if (events->sctp_send_failure_event) { 3338 sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVSENDFAILEVNT); 3339 } else { 3340 sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVSENDFAILEVNT); 3341 } 3342 3343 if (events->sctp_peer_error_event) { 3344 sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVPEERERR); 3345 } else { 3346 sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVPEERERR); 3347 } 3348 3349 if (events->sctp_shutdown_event) { 3350 sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT); 3351 } else { 3352 sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT); 3353 } 3354 3355 if (events->sctp_partial_delivery_event) { 3356 sctp_feature_on(inp, SCTP_PCB_FLAGS_PDAPIEVNT); 3357 } else { 3358 sctp_feature_off(inp, SCTP_PCB_FLAGS_PDAPIEVNT); 3359 } 3360 3361 if (events->sctp_adaptation_layer_event) { 3362 sctp_feature_on(inp, SCTP_PCB_FLAGS_ADAPTATIONEVNT); 3363 } else { 3364 sctp_feature_off(inp, SCTP_PCB_FLAGS_ADAPTATIONEVNT); 3365 } 3366 3367 if (events->sctp_authentication_event) { 3368 sctp_feature_on(inp, SCTP_PCB_FLAGS_AUTHEVNT); 3369 } else { 3370 sctp_feature_off(inp, SCTP_PCB_FLAGS_AUTHEVNT); 3371 } 3372 3373 if (events->sctp_stream_reset_events) { 3374 sctp_feature_on(inp, SCTP_PCB_FLAGS_STREAM_RESETEVNT); 3375 } else { 3376 sctp_feature_off(inp, SCTP_PCB_FLAGS_STREAM_RESETEVNT); 3377 } 3378 SCTP_INP_WUNLOCK(inp); 3379 } 3380 break; 3381 3382 case SCTP_ADAPTATION_LAYER: 3383 { 3384 struct sctp_setadaptation *adap_bits; 3385 3386 SCTP_CHECK_AND_CAST(adap_bits, optval, struct sctp_setadaptation, optsize); 3387 SCTP_INP_WLOCK(inp); 3388 inp->sctp_ep.adaptation_layer_indicator = adap_bits->ssb_adaptation_ind; 3389 SCTP_INP_WUNLOCK(inp); 3390 } 3391 break; 3392 #ifdef SCTP_DEBUG 3393 case SCTP_SET_INITIAL_DBG_SEQ: 3394 { 3395 uint32_t *vvv; 3396 3397 SCTP_CHECK_AND_CAST(vvv, optval, uint32_t, optsize); 3398 SCTP_INP_WLOCK(inp); 3399 inp->sctp_ep.initial_sequence_debug = *vvv; 3400 SCTP_INP_WUNLOCK(inp); 3401 } 3402 break; 3403 #endif 3404 case SCTP_DEFAULT_SEND_PARAM: 3405 { 3406 struct sctp_sndrcvinfo *s_info; 3407 3408 SCTP_CHECK_AND_CAST(s_info, optval, struct sctp_sndrcvinfo, optsize); 3409 SCTP_FIND_STCB(inp, stcb, s_info->sinfo_assoc_id); 3410 3411 if (stcb) { 3412 if (s_info->sinfo_stream <= stcb->asoc.streamoutcnt) { 3413 memcpy(&stcb->asoc.def_send, s_info, min(optsize, sizeof(stcb->asoc.def_send))); 3414 } else { 3415 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3416 error = EINVAL; 3417 } 3418 SCTP_TCB_UNLOCK(stcb); 3419 } else { 3420 SCTP_INP_WLOCK(inp); 3421 memcpy(&inp->def_send, s_info, min(optsize, sizeof(inp->def_send))); 3422 SCTP_INP_WUNLOCK(inp); 3423 } 3424 } 3425 break; 3426 case SCTP_PEER_ADDR_PARAMS: 3427 /* Applys to the specific association */ 3428 { 3429 struct sctp_paddrparams *paddrp; 3430 struct sctp_nets *net; 3431 3432 SCTP_CHECK_AND_CAST(paddrp, optval, struct sctp_paddrparams, optsize); 3433 SCTP_FIND_STCB(inp, stcb, paddrp->spp_assoc_id); 3434 net = NULL; 3435 if (stcb) { 3436 net = sctp_findnet(stcb, (struct sockaddr *)&paddrp->spp_address); 3437 } else { 3438 /* 3439 * We increment here since 3440 * sctp_findassociation_ep_addr() wil do a 3441 * decrement if it finds the stcb as long as 3442 * the locked tcb (last argument) is NOT a 3443 * TCB.. aka NULL. 3444 */ 3445 SCTP_INP_INCR_REF(inp); 3446 stcb = sctp_findassociation_ep_addr(&inp, 3447 (struct sockaddr *)&paddrp->spp_address, 3448 &net, NULL, NULL); 3449 if (stcb == NULL) { 3450 SCTP_INP_DECR_REF(inp); 3451 } 3452 } 3453 if (stcb && (net == NULL)) { 3454 struct sockaddr *sa; 3455 3456 sa = (struct sockaddr *)&paddrp->spp_address; 3457 if (sa->sa_family == AF_INET) { 3458 struct sockaddr_in *sin; 3459 3460 sin = (struct sockaddr_in *)sa; 3461 if (sin->sin_addr.s_addr) { 3462 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3463 SCTP_TCB_UNLOCK(stcb); 3464 error = EINVAL; 3465 break; 3466 } 3467 } else if (sa->sa_family == AF_INET6) { 3468 struct sockaddr_in6 *sin6; 3469 3470 sin6 = (struct sockaddr_in6 *)sa; 3471 if (!IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { 3472 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3473 SCTP_TCB_UNLOCK(stcb); 3474 error = EINVAL; 3475 break; 3476 } 3477 } else { 3478 error = EAFNOSUPPORT; 3479 SCTP_TCB_UNLOCK(stcb); 3480 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 3481 break; 3482 } 3483 } 3484 /* sanity checks */ 3485 if ((paddrp->spp_flags & SPP_HB_ENABLE) && (paddrp->spp_flags & SPP_HB_DISABLE)) { 3486 if (stcb) 3487 SCTP_TCB_UNLOCK(stcb); 3488 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3489 return (EINVAL); 3490 } 3491 if ((paddrp->spp_flags & SPP_PMTUD_ENABLE) && (paddrp->spp_flags & SPP_PMTUD_DISABLE)) { 3492 if (stcb) 3493 SCTP_TCB_UNLOCK(stcb); 3494 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3495 return (EINVAL); 3496 } 3497 if (stcb) { 3498 /************************TCB SPECIFIC SET ******************/ 3499 /* 3500 * do we change the timer for HB, we run 3501 * only one? 3502 */ 3503 int ovh = 0; 3504 3505 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 3506 ovh = SCTP_MED_OVERHEAD; 3507 } else { 3508 ovh = SCTP_MED_V4_OVERHEAD; 3509 } 3510 3511 if (paddrp->spp_hbinterval) 3512 stcb->asoc.heart_beat_delay = paddrp->spp_hbinterval; 3513 else if (paddrp->spp_flags & SPP_HB_TIME_IS_ZERO) 3514 stcb->asoc.heart_beat_delay = 0; 3515 3516 /* network sets ? */ 3517 if (net) { 3518 /************************NET SPECIFIC SET ******************/ 3519 if (paddrp->spp_flags & SPP_HB_DEMAND) { 3520 /* on demand HB */ 3521 if (sctp_send_hb(stcb, 1, net) < 0) { 3522 /* asoc destroyed */ 3523 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3524 error = EINVAL; 3525 break; 3526 } 3527 } 3528 if (paddrp->spp_flags & SPP_HB_DISABLE) { 3529 net->dest_state |= SCTP_ADDR_NOHB; 3530 } 3531 if (paddrp->spp_flags & SPP_HB_ENABLE) { 3532 net->dest_state &= ~SCTP_ADDR_NOHB; 3533 } 3534 if ((paddrp->spp_flags & SPP_PMTUD_DISABLE) && (paddrp->spp_pathmtu >= SCTP_SMALLEST_PMTU)) { 3535 if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) { 3536 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net, 3537 SCTP_FROM_SCTP_USRREQ + SCTP_LOC_10); 3538 } 3539 if (paddrp->spp_pathmtu > SCTP_DEFAULT_MINSEGMENT) { 3540 net->mtu = paddrp->spp_pathmtu + ovh; 3541 if (net->mtu < stcb->asoc.smallest_mtu) { 3542 #ifdef SCTP_PRINT_FOR_B_AND_M 3543 SCTP_PRINTF("SCTP_PMTU_DISABLE calls sctp_pathmtu_adjustment:%d\n", 3544 net->mtu); 3545 #endif 3546 sctp_pathmtu_adjustment(inp, stcb, net, net->mtu); 3547 } 3548 } 3549 } 3550 if (paddrp->spp_flags & SPP_PMTUD_ENABLE) { 3551 if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) { 3552 sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net); 3553 } 3554 } 3555 if (paddrp->spp_pathmaxrxt) 3556 net->failure_threshold = paddrp->spp_pathmaxrxt; 3557 #ifdef INET 3558 if (paddrp->spp_flags & SPP_IPV4_TOS) { 3559 if (net->ro._l_addr.sin.sin_family == AF_INET) { 3560 net->tos_flowlabel = paddrp->spp_ipv4_tos & 0x000000fc; 3561 } 3562 } 3563 #endif 3564 #ifdef INET6 3565 if (paddrp->spp_flags & SPP_IPV6_FLOWLABEL) { 3566 if (net->ro._l_addr.sin6.sin6_family == AF_INET6) { 3567 net->tos_flowlabel = paddrp->spp_ipv6_flowlabel; 3568 } 3569 } 3570 #endif 3571 } else { 3572 /************************ASSOC ONLY -- NO NET SPECIFIC SET ******************/ 3573 if (paddrp->spp_pathmaxrxt) 3574 stcb->asoc.def_net_failure = paddrp->spp_pathmaxrxt; 3575 3576 if (paddrp->spp_flags & SPP_HB_ENABLE) { 3577 /* Turn back on the timer */ 3578 stcb->asoc.hb_is_disabled = 0; 3579 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net); 3580 } 3581 if ((paddrp->spp_flags & SPP_PMTUD_DISABLE) && (paddrp->spp_pathmtu >= SCTP_SMALLEST_PMTU)) { 3582 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 3583 if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) { 3584 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net, 3585 SCTP_FROM_SCTP_USRREQ + SCTP_LOC_10); 3586 } 3587 if (paddrp->spp_pathmtu > SCTP_DEFAULT_MINSEGMENT) { 3588 net->mtu = paddrp->spp_pathmtu + ovh; 3589 if (net->mtu < stcb->asoc.smallest_mtu) { 3590 #ifdef SCTP_PRINT_FOR_B_AND_M 3591 SCTP_PRINTF("SCTP_PMTU_DISABLE calls sctp_pathmtu_adjustment:%d\n", 3592 net->mtu); 3593 #endif 3594 sctp_pathmtu_adjustment(inp, stcb, net, net->mtu); 3595 } 3596 } 3597 } 3598 } 3599 if (paddrp->spp_flags & SPP_PMTUD_ENABLE) { 3600 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 3601 if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) { 3602 sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net); 3603 } 3604 } 3605 } 3606 if (paddrp->spp_flags & SPP_HB_DISABLE) { 3607 int cnt_of_unconf = 0; 3608 struct sctp_nets *lnet; 3609 3610 stcb->asoc.hb_is_disabled = 1; 3611 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 3612 if (lnet->dest_state & SCTP_ADDR_UNCONFIRMED) { 3613 cnt_of_unconf++; 3614 } 3615 } 3616 /* 3617 * stop the timer ONLY if we 3618 * have no unconfirmed 3619 * addresses 3620 */ 3621 if (cnt_of_unconf == 0) { 3622 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 3623 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, 3624 SCTP_FROM_SCTP_USRREQ + SCTP_LOC_11); 3625 } 3626 } 3627 } 3628 if (paddrp->spp_flags & SPP_HB_ENABLE) { 3629 /* start up the timer. */ 3630 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 3631 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net); 3632 } 3633 } 3634 #ifdef INET 3635 if (paddrp->spp_flags & SPP_IPV4_TOS) 3636 stcb->asoc.default_tos = paddrp->spp_ipv4_tos & 0x000000fc; 3637 #endif 3638 #ifdef INET6 3639 if (paddrp->spp_flags & SPP_IPV6_FLOWLABEL) 3640 stcb->asoc.default_flowlabel = paddrp->spp_ipv6_flowlabel; 3641 #endif 3642 3643 } 3644 SCTP_TCB_UNLOCK(stcb); 3645 } else { 3646 /************************NO TCB, SET TO default stuff ******************/ 3647 SCTP_INP_WLOCK(inp); 3648 /* 3649 * For the TOS/FLOWLABEL stuff you set it 3650 * with the options on the socket 3651 */ 3652 if (paddrp->spp_pathmaxrxt) { 3653 inp->sctp_ep.def_net_failure = paddrp->spp_pathmaxrxt; 3654 } 3655 if (paddrp->spp_flags & SPP_HB_TIME_IS_ZERO) 3656 inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT] = 0; 3657 else if (paddrp->spp_hbinterval) { 3658 if (paddrp->spp_hbinterval > SCTP_MAX_HB_INTERVAL) 3659 paddrp->spp_hbinterval = SCTP_MAX_HB_INTERVAL; 3660 inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT] = MSEC_TO_TICKS(paddrp->spp_hbinterval); 3661 } 3662 if (paddrp->spp_flags & SPP_HB_ENABLE) { 3663 sctp_feature_off(inp, SCTP_PCB_FLAGS_DONOT_HEARTBEAT); 3664 3665 } else if (paddrp->spp_flags & SPP_HB_DISABLE) { 3666 sctp_feature_on(inp, SCTP_PCB_FLAGS_DONOT_HEARTBEAT); 3667 } 3668 SCTP_INP_WUNLOCK(inp); 3669 } 3670 } 3671 break; 3672 case SCTP_RTOINFO: 3673 { 3674 struct sctp_rtoinfo *srto; 3675 uint32_t new_init, new_min, new_max; 3676 3677 SCTP_CHECK_AND_CAST(srto, optval, struct sctp_rtoinfo, optsize); 3678 SCTP_FIND_STCB(inp, stcb, srto->srto_assoc_id); 3679 3680 if (stcb) { 3681 if (srto->srto_initial) 3682 new_init = srto->srto_initial; 3683 else 3684 new_init = stcb->asoc.initial_rto; 3685 if (srto->srto_max) 3686 new_max = srto->srto_max; 3687 else 3688 new_max = stcb->asoc.maxrto; 3689 if (srto->srto_min) 3690 new_min = srto->srto_min; 3691 else 3692 new_min = stcb->asoc.minrto; 3693 if ((new_min <= new_init) && (new_init <= new_max)) { 3694 stcb->asoc.initial_rto = new_init; 3695 stcb->asoc.maxrto = new_max; 3696 stcb->asoc.minrto = new_min; 3697 } else { 3698 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3699 error = EINVAL; 3700 } 3701 SCTP_TCB_UNLOCK(stcb); 3702 } else { 3703 SCTP_INP_WLOCK(inp); 3704 if (srto->srto_initial) 3705 new_init = srto->srto_initial; 3706 else 3707 new_init = inp->sctp_ep.initial_rto; 3708 if (srto->srto_max) 3709 new_max = srto->srto_max; 3710 else 3711 new_max = inp->sctp_ep.sctp_maxrto; 3712 if (srto->srto_min) 3713 new_min = srto->srto_min; 3714 else 3715 new_min = inp->sctp_ep.sctp_minrto; 3716 if ((new_min <= new_init) && (new_init <= new_max)) { 3717 inp->sctp_ep.initial_rto = new_init; 3718 inp->sctp_ep.sctp_maxrto = new_max; 3719 inp->sctp_ep.sctp_minrto = new_min; 3720 } else { 3721 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3722 error = EINVAL; 3723 } 3724 SCTP_INP_WUNLOCK(inp); 3725 } 3726 } 3727 break; 3728 case SCTP_ASSOCINFO: 3729 { 3730 struct sctp_assocparams *sasoc; 3731 3732 SCTP_CHECK_AND_CAST(sasoc, optval, struct sctp_assocparams, optsize); 3733 SCTP_FIND_STCB(inp, stcb, sasoc->sasoc_assoc_id); 3734 if (sasoc->sasoc_cookie_life) { 3735 /* boundary check the cookie life */ 3736 if (sasoc->sasoc_cookie_life < 1000) 3737 sasoc->sasoc_cookie_life = 1000; 3738 if (sasoc->sasoc_cookie_life > SCTP_MAX_COOKIE_LIFE) { 3739 sasoc->sasoc_cookie_life = SCTP_MAX_COOKIE_LIFE; 3740 } 3741 } 3742 if (stcb) { 3743 if (sasoc->sasoc_asocmaxrxt) 3744 stcb->asoc.max_send_times = sasoc->sasoc_asocmaxrxt; 3745 sasoc->sasoc_number_peer_destinations = stcb->asoc.numnets; 3746 sasoc->sasoc_peer_rwnd = 0; 3747 sasoc->sasoc_local_rwnd = 0; 3748 if (sasoc->sasoc_cookie_life) { 3749 stcb->asoc.cookie_life = MSEC_TO_TICKS(sasoc->sasoc_cookie_life); 3750 } 3751 SCTP_TCB_UNLOCK(stcb); 3752 } else { 3753 SCTP_INP_WLOCK(inp); 3754 if (sasoc->sasoc_asocmaxrxt) 3755 inp->sctp_ep.max_send_times = sasoc->sasoc_asocmaxrxt; 3756 sasoc->sasoc_number_peer_destinations = 0; 3757 sasoc->sasoc_peer_rwnd = 0; 3758 sasoc->sasoc_local_rwnd = 0; 3759 if (sasoc->sasoc_cookie_life) { 3760 inp->sctp_ep.def_cookie_life = MSEC_TO_TICKS(sasoc->sasoc_cookie_life); 3761 } 3762 SCTP_INP_WUNLOCK(inp); 3763 } 3764 } 3765 break; 3766 case SCTP_INITMSG: 3767 { 3768 struct sctp_initmsg *sinit; 3769 3770 SCTP_CHECK_AND_CAST(sinit, optval, struct sctp_initmsg, optsize); 3771 SCTP_INP_WLOCK(inp); 3772 if (sinit->sinit_num_ostreams) 3773 inp->sctp_ep.pre_open_stream_count = sinit->sinit_num_ostreams; 3774 3775 if (sinit->sinit_max_instreams) 3776 inp->sctp_ep.max_open_streams_intome = sinit->sinit_max_instreams; 3777 3778 if (sinit->sinit_max_attempts) 3779 inp->sctp_ep.max_init_times = sinit->sinit_max_attempts; 3780 3781 if (sinit->sinit_max_init_timeo) 3782 inp->sctp_ep.initial_init_rto_max = sinit->sinit_max_init_timeo; 3783 SCTP_INP_WUNLOCK(inp); 3784 } 3785 break; 3786 case SCTP_PRIMARY_ADDR: 3787 { 3788 struct sctp_setprim *spa; 3789 struct sctp_nets *net, *lnet; 3790 3791 SCTP_CHECK_AND_CAST(spa, optval, struct sctp_setprim, optsize); 3792 SCTP_FIND_STCB(inp, stcb, spa->ssp_assoc_id); 3793 3794 net = NULL; 3795 if (stcb) { 3796 net = sctp_findnet(stcb, (struct sockaddr *)&spa->ssp_addr); 3797 } else { 3798 /* 3799 * We increment here since 3800 * sctp_findassociation_ep_addr() wil do a 3801 * decrement if it finds the stcb as long as 3802 * the locked tcb (last argument) is NOT a 3803 * TCB.. aka NULL. 3804 */ 3805 SCTP_INP_INCR_REF(inp); 3806 stcb = sctp_findassociation_ep_addr(&inp, 3807 (struct sockaddr *)&spa->ssp_addr, 3808 &net, NULL, NULL); 3809 if (stcb == NULL) { 3810 SCTP_INP_DECR_REF(inp); 3811 } 3812 } 3813 3814 if ((stcb) && (net)) { 3815 if ((net != stcb->asoc.primary_destination) && 3816 (!(net->dest_state & SCTP_ADDR_UNCONFIRMED))) { 3817 /* Ok we need to set it */ 3818 lnet = stcb->asoc.primary_destination; 3819 if (sctp_set_primary_addr(stcb, (struct sockaddr *)NULL, net) == 0) { 3820 if (net->dest_state & SCTP_ADDR_SWITCH_PRIMARY) { 3821 net->dest_state |= SCTP_ADDR_DOUBLE_SWITCH; 3822 } 3823 net->dest_state |= SCTP_ADDR_SWITCH_PRIMARY; 3824 } 3825 } 3826 } else { 3827 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3828 error = EINVAL; 3829 } 3830 if (stcb) { 3831 SCTP_TCB_UNLOCK(stcb); 3832 } 3833 } 3834 break; 3835 case SCTP_SET_DYNAMIC_PRIMARY: 3836 { 3837 union sctp_sockstore *ss; 3838 3839 error = priv_check(curthread, 3840 PRIV_NETINET_RESERVEDPORT); 3841 if (error) 3842 break; 3843 3844 SCTP_CHECK_AND_CAST(ss, optval, union sctp_sockstore, optsize); 3845 /* SUPER USER CHECK? */ 3846 error = sctp_dynamic_set_primary(&ss->sa, vrf_id); 3847 } 3848 break; 3849 case SCTP_SET_PEER_PRIMARY_ADDR: 3850 { 3851 struct sctp_setpeerprim *sspp; 3852 3853 SCTP_CHECK_AND_CAST(sspp, optval, struct sctp_setpeerprim, optsize); 3854 SCTP_FIND_STCB(inp, stcb, sspp->sspp_assoc_id); 3855 if (stcb != NULL) { 3856 struct sctp_ifa *ifa; 3857 3858 ifa = sctp_find_ifa_by_addr((struct sockaddr *)&sspp->sspp_addr, 3859 stcb->asoc.vrf_id, SCTP_ADDR_NOT_LOCKED); 3860 if (ifa == NULL) { 3861 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3862 error = EINVAL; 3863 goto out_of_it; 3864 } 3865 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) { 3866 /* 3867 * Must validate the ifa found is in 3868 * our ep 3869 */ 3870 struct sctp_laddr *laddr; 3871 int found = 0; 3872 3873 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 3874 if (laddr->ifa == NULL) { 3875 SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n", 3876 __FUNCTION__); 3877 continue; 3878 } 3879 if (laddr->ifa == ifa) { 3880 found = 1; 3881 break; 3882 } 3883 } 3884 if (!found) { 3885 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3886 error = EINVAL; 3887 goto out_of_it; 3888 } 3889 } 3890 if (sctp_set_primary_ip_address_sa(stcb, 3891 (struct sockaddr *)&sspp->sspp_addr) != 0) { 3892 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3893 error = EINVAL; 3894 } 3895 out_of_it: 3896 SCTP_TCB_UNLOCK(stcb); 3897 } else { 3898 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3899 error = EINVAL; 3900 } 3901 3902 } 3903 break; 3904 case SCTP_BINDX_ADD_ADDR: 3905 { 3906 struct sctp_getaddresses *addrs; 3907 size_t sz; 3908 struct thread *td; 3909 int prison = 0; 3910 3911 td = (struct thread *)p; 3912 if (jailed(td->td_ucred)) { 3913 prison = 1; 3914 } 3915 SCTP_CHECK_AND_CAST(addrs, optval, struct sctp_getaddresses, 3916 optsize); 3917 if (addrs->addr->sa_family == AF_INET) { 3918 sz = sizeof(struct sctp_getaddresses) - sizeof(struct sockaddr) + sizeof(struct sockaddr_in); 3919 if (optsize < sz) { 3920 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3921 error = EINVAL; 3922 break; 3923 } 3924 if (prison && prison_ip(td->td_ucred, 0, &(((struct sockaddr_in *)(addrs->addr))->sin_addr.s_addr))) { 3925 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EADDRNOTAVAIL); 3926 error = EADDRNOTAVAIL; 3927 } 3928 } else if (addrs->addr->sa_family == AF_INET6) { 3929 sz = sizeof(struct sctp_getaddresses) - sizeof(struct sockaddr) + sizeof(struct sockaddr_in6); 3930 if (optsize < sz) { 3931 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3932 error = EINVAL; 3933 break; 3934 } 3935 /* JAIL XXXX Add else here for V6 */ 3936 } 3937 sctp_bindx_add_address(so, inp, addrs->addr, 3938 addrs->sget_assoc_id, vrf_id, 3939 &error, p); 3940 } 3941 break; 3942 case SCTP_BINDX_REM_ADDR: 3943 { 3944 struct sctp_getaddresses *addrs; 3945 size_t sz; 3946 struct thread *td; 3947 int prison = 0; 3948 3949 td = (struct thread *)p; 3950 if (jailed(td->td_ucred)) { 3951 prison = 1; 3952 } 3953 SCTP_CHECK_AND_CAST(addrs, optval, struct sctp_getaddresses, optsize); 3954 if (addrs->addr->sa_family == AF_INET) { 3955 sz = sizeof(struct sctp_getaddresses) - sizeof(struct sockaddr) + sizeof(struct sockaddr_in); 3956 if (optsize < sz) { 3957 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3958 error = EINVAL; 3959 break; 3960 } 3961 if (prison && prison_ip(td->td_ucred, 0, &(((struct sockaddr_in *)(addrs->addr))->sin_addr.s_addr))) { 3962 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EADDRNOTAVAIL); 3963 error = EADDRNOTAVAIL; 3964 } 3965 } else if (addrs->addr->sa_family == AF_INET6) { 3966 sz = sizeof(struct sctp_getaddresses) - sizeof(struct sockaddr) + sizeof(struct sockaddr_in6); 3967 if (optsize < sz) { 3968 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3969 error = EINVAL; 3970 break; 3971 } 3972 /* JAIL XXXX Add else here for V6 */ 3973 } 3974 sctp_bindx_delete_address(so, inp, addrs->addr, 3975 addrs->sget_assoc_id, vrf_id, 3976 &error); 3977 } 3978 break; 3979 default: 3980 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT); 3981 error = ENOPROTOOPT; 3982 break; 3983 } /* end switch (opt) */ 3984 return (error); 3985 } 3986 3987 int 3988 sctp_ctloutput(struct socket *so, struct sockopt *sopt) 3989 { 3990 void *optval = NULL; 3991 size_t optsize = 0; 3992 struct sctp_inpcb *inp; 3993 void *p; 3994 int error = 0; 3995 3996 inp = (struct sctp_inpcb *)so->so_pcb; 3997 if (inp == 0) { 3998 /* I made the same as TCP since we are not setup? */ 3999 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4000 return (ECONNRESET); 4001 } 4002 if (sopt->sopt_level != IPPROTO_SCTP) { 4003 /* wrong proto level... send back up to IP */ 4004 #ifdef INET6 4005 if (INP_CHECK_SOCKAF(so, AF_INET6)) 4006 error = ip6_ctloutput(so, sopt); 4007 else 4008 #endif /* INET6 */ 4009 error = ip_ctloutput(so, sopt); 4010 return (error); 4011 } 4012 optsize = sopt->sopt_valsize; 4013 if (optsize) { 4014 SCTP_MALLOC(optval, void *, optsize, SCTP_M_SOCKOPT); 4015 if (optval == NULL) { 4016 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOBUFS); 4017 return (ENOBUFS); 4018 } 4019 error = sooptcopyin(sopt, optval, optsize, optsize); 4020 if (error) { 4021 SCTP_FREE(optval, SCTP_M_SOCKOPT); 4022 goto out; 4023 } 4024 } 4025 p = (void *)sopt->sopt_td; 4026 if (sopt->sopt_dir == SOPT_SET) { 4027 error = sctp_setopt(so, sopt->sopt_name, optval, optsize, p); 4028 } else if (sopt->sopt_dir == SOPT_GET) { 4029 error = sctp_getopt(so, sopt->sopt_name, optval, &optsize, p); 4030 } else { 4031 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4032 error = EINVAL; 4033 } 4034 if ((error == 0) && (optval != NULL)) { 4035 error = sooptcopyout(sopt, optval, optsize); 4036 SCTP_FREE(optval, SCTP_M_SOCKOPT); 4037 } else if (optval != NULL) { 4038 SCTP_FREE(optval, SCTP_M_SOCKOPT); 4039 } 4040 out: 4041 return (error); 4042 } 4043 4044 4045 static int 4046 sctp_connect(struct socket *so, struct sockaddr *addr, struct thread *p) 4047 { 4048 int error = 0; 4049 int create_lock_on = 0; 4050 uint32_t vrf_id; 4051 struct sctp_inpcb *inp; 4052 struct sctp_tcb *stcb = NULL; 4053 4054 inp = (struct sctp_inpcb *)so->so_pcb; 4055 if (inp == 0) { 4056 /* I made the same as TCP since we are not setup? */ 4057 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4058 return (ECONNRESET); 4059 } 4060 if (addr == NULL) { 4061 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4062 return EINVAL; 4063 } 4064 if ((addr->sa_family == AF_INET6) && (addr->sa_len != sizeof(struct sockaddr_in6))) { 4065 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4066 return (EINVAL); 4067 } 4068 if ((addr->sa_family == AF_INET) && (addr->sa_len != sizeof(struct sockaddr_in))) { 4069 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4070 return (EINVAL); 4071 } 4072 SCTP_INP_INCR_REF(inp); 4073 SCTP_ASOC_CREATE_LOCK(inp); 4074 create_lock_on = 1; 4075 4076 4077 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 4078 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) { 4079 /* Should I really unlock ? */ 4080 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EFAULT); 4081 error = EFAULT; 4082 goto out_now; 4083 } 4084 #ifdef INET6 4085 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) && 4086 (addr->sa_family == AF_INET6)) { 4087 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4088 error = EINVAL; 4089 goto out_now; 4090 } 4091 #endif /* INET6 */ 4092 if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) == 4093 SCTP_PCB_FLAGS_UNBOUND) { 4094 /* Bind a ephemeral port */ 4095 error = sctp_inpcb_bind(so, NULL, NULL, p); 4096 if (error) { 4097 goto out_now; 4098 } 4099 } 4100 /* Now do we connect? */ 4101 if (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) { 4102 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4103 error = EINVAL; 4104 goto out_now; 4105 } 4106 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 4107 (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) { 4108 /* We are already connected AND the TCP model */ 4109 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EADDRINUSE); 4110 error = EADDRINUSE; 4111 goto out_now; 4112 } 4113 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { 4114 SCTP_INP_RLOCK(inp); 4115 stcb = LIST_FIRST(&inp->sctp_asoc_list); 4116 SCTP_INP_RUNLOCK(inp); 4117 } else { 4118 /* 4119 * We increment here since sctp_findassociation_ep_addr() 4120 * wil do a decrement if it finds the stcb as long as the 4121 * locked tcb (last argument) is NOT a TCB.. aka NULL. 4122 */ 4123 SCTP_INP_INCR_REF(inp); 4124 stcb = sctp_findassociation_ep_addr(&inp, addr, NULL, NULL, NULL); 4125 if (stcb == NULL) { 4126 SCTP_INP_DECR_REF(inp); 4127 } else { 4128 SCTP_TCB_UNLOCK(stcb); 4129 } 4130 } 4131 if (stcb != NULL) { 4132 /* Already have or am bring up an association */ 4133 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY); 4134 error = EALREADY; 4135 goto out_now; 4136 } 4137 vrf_id = inp->def_vrf_id; 4138 /* We are GOOD to go */ 4139 stcb = sctp_aloc_assoc(inp, addr, 1, &error, 0, vrf_id, p); 4140 if (stcb == NULL) { 4141 /* Gak! no memory */ 4142 goto out_now; 4143 } 4144 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) { 4145 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED; 4146 /* Set the connected flag so we can queue data */ 4147 soisconnecting(so); 4148 } 4149 SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_COOKIE_WAIT); 4150 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered); 4151 4152 /* initialize authentication parameters for the assoc */ 4153 sctp_initialize_auth_params(inp, stcb); 4154 4155 sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED); 4156 SCTP_TCB_UNLOCK(stcb); 4157 out_now: 4158 if (create_lock_on) { 4159 SCTP_ASOC_CREATE_UNLOCK(inp); 4160 } 4161 SCTP_INP_DECR_REF(inp); 4162 return error; 4163 } 4164 4165 int 4166 sctp_listen(struct socket *so, int backlog, struct thread *p) 4167 { 4168 /* 4169 * Note this module depends on the protocol processing being called 4170 * AFTER any socket level flags and backlog are applied to the 4171 * socket. The traditional way that the socket flags are applied is 4172 * AFTER protocol processing. We have made a change to the 4173 * sys/kern/uipc_socket.c module to reverse this but this MUST be in 4174 * place if the socket API for SCTP is to work properly. 4175 */ 4176 4177 int error = 0; 4178 struct sctp_inpcb *inp; 4179 4180 inp = (struct sctp_inpcb *)so->so_pcb; 4181 if (inp == 0) { 4182 /* I made the same as TCP since we are not setup? */ 4183 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4184 return (ECONNRESET); 4185 } 4186 SCTP_INP_RLOCK(inp); 4187 #ifdef SCTP_LOCK_LOGGING 4188 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) { 4189 sctp_log_lock(inp, (struct sctp_tcb *)NULL, SCTP_LOG_LOCK_SOCK); 4190 } 4191 #endif 4192 SOCK_LOCK(so); 4193 error = solisten_proto_check(so); 4194 if (error) { 4195 SOCK_UNLOCK(so); 4196 SCTP_INP_RUNLOCK(inp); 4197 return (error); 4198 } 4199 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 4200 (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) { 4201 /* We are already connected AND the TCP model */ 4202 SCTP_INP_RUNLOCK(inp); 4203 SOCK_UNLOCK(so); 4204 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EADDRINUSE); 4205 return (EADDRINUSE); 4206 } 4207 if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) { 4208 /* We must do a bind. */ 4209 SOCK_UNLOCK(so); 4210 SCTP_INP_RUNLOCK(inp); 4211 if ((error = sctp_inpcb_bind(so, NULL, NULL, p))) { 4212 /* bind error, probably perm */ 4213 return (error); 4214 } 4215 SOCK_LOCK(so); 4216 } else { 4217 if (backlog != 0) { 4218 inp->sctp_flags |= SCTP_PCB_FLAGS_LISTENING; 4219 } else { 4220 inp->sctp_flags &= ~SCTP_PCB_FLAGS_LISTENING; 4221 } 4222 SCTP_INP_RUNLOCK(inp); 4223 } 4224 /* It appears for 7.0 and on, we must always call this. */ 4225 solisten_proto(so, backlog); 4226 if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) { 4227 /* remove the ACCEPTCONN flag for one-to-many sockets */ 4228 so->so_options &= ~SO_ACCEPTCONN; 4229 } 4230 if (backlog == 0) { 4231 /* turning off listen */ 4232 so->so_options &= ~SO_ACCEPTCONN; 4233 } 4234 SOCK_UNLOCK(so); 4235 return (error); 4236 } 4237 4238 static int sctp_defered_wakeup_cnt = 0; 4239 4240 int 4241 sctp_accept(struct socket *so, struct sockaddr **addr) 4242 { 4243 struct sctp_tcb *stcb; 4244 struct sctp_inpcb *inp; 4245 union sctp_sockstore store; 4246 4247 #ifdef INET6 4248 int error; 4249 4250 #endif 4251 inp = (struct sctp_inpcb *)so->so_pcb; 4252 4253 if (inp == 0) { 4254 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4255 return (ECONNRESET); 4256 } 4257 SCTP_INP_RLOCK(inp); 4258 if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) { 4259 SCTP_INP_RUNLOCK(inp); 4260 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP); 4261 return (EOPNOTSUPP); 4262 } 4263 if (so->so_state & SS_ISDISCONNECTED) { 4264 SCTP_INP_RUNLOCK(inp); 4265 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ECONNABORTED); 4266 return (ECONNABORTED); 4267 } 4268 stcb = LIST_FIRST(&inp->sctp_asoc_list); 4269 if (stcb == NULL) { 4270 SCTP_INP_RUNLOCK(inp); 4271 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4272 return (ECONNRESET); 4273 } 4274 SCTP_TCB_LOCK(stcb); 4275 SCTP_INP_RUNLOCK(inp); 4276 store = stcb->asoc.primary_destination->ro._l_addr; 4277 SCTP_TCB_UNLOCK(stcb); 4278 switch (store.sa.sa_family) { 4279 case AF_INET: 4280 { 4281 struct sockaddr_in *sin; 4282 4283 SCTP_MALLOC_SONAME(sin, struct sockaddr_in *, sizeof *sin); 4284 sin->sin_family = AF_INET; 4285 sin->sin_len = sizeof(*sin); 4286 sin->sin_port = ((struct sockaddr_in *)&store)->sin_port; 4287 sin->sin_addr = ((struct sockaddr_in *)&store)->sin_addr; 4288 *addr = (struct sockaddr *)sin; 4289 break; 4290 } 4291 #ifdef INET6 4292 case AF_INET6: 4293 { 4294 struct sockaddr_in6 *sin6; 4295 4296 SCTP_MALLOC_SONAME(sin6, struct sockaddr_in6 *, sizeof *sin6); 4297 sin6->sin6_family = AF_INET6; 4298 sin6->sin6_len = sizeof(*sin6); 4299 sin6->sin6_port = ((struct sockaddr_in6 *)&store)->sin6_port; 4300 4301 sin6->sin6_addr = ((struct sockaddr_in6 *)&store)->sin6_addr; 4302 if ((error = sa6_recoverscope(sin6)) != 0) { 4303 SCTP_FREE_SONAME(sin6); 4304 return (error); 4305 } 4306 *addr = (struct sockaddr *)sin6; 4307 break; 4308 } 4309 #endif 4310 default: 4311 /* TSNH */ 4312 break; 4313 } 4314 /* Wake any delayed sleep action */ 4315 if (inp->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) { 4316 SCTP_INP_WLOCK(inp); 4317 inp->sctp_flags &= ~SCTP_PCB_FLAGS_DONT_WAKE; 4318 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT) { 4319 inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAKEOUTPUT; 4320 SCTP_INP_WUNLOCK(inp); 4321 SOCKBUF_LOCK(&inp->sctp_socket->so_snd); 4322 if (sowriteable(inp->sctp_socket)) { 4323 sowwakeup_locked(inp->sctp_socket); 4324 } else { 4325 SOCKBUF_UNLOCK(&inp->sctp_socket->so_snd); 4326 } 4327 SCTP_INP_WLOCK(inp); 4328 } 4329 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT) { 4330 inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAKEINPUT; 4331 SCTP_INP_WUNLOCK(inp); 4332 SOCKBUF_LOCK(&inp->sctp_socket->so_rcv); 4333 if (soreadable(inp->sctp_socket)) { 4334 sctp_defered_wakeup_cnt++; 4335 sorwakeup_locked(inp->sctp_socket); 4336 } else { 4337 SOCKBUF_UNLOCK(&inp->sctp_socket->so_rcv); 4338 } 4339 SCTP_INP_WLOCK(inp); 4340 } 4341 SCTP_INP_WUNLOCK(inp); 4342 } 4343 return (0); 4344 } 4345 4346 int 4347 sctp_ingetaddr(struct socket *so, struct sockaddr **addr) 4348 { 4349 struct sockaddr_in *sin; 4350 uint32_t vrf_id; 4351 struct sctp_inpcb *inp; 4352 struct sctp_ifa *sctp_ifa; 4353 4354 /* 4355 * Do the malloc first in case it blocks. 4356 */ 4357 SCTP_MALLOC_SONAME(sin, struct sockaddr_in *, sizeof *sin); 4358 sin->sin_family = AF_INET; 4359 sin->sin_len = sizeof(*sin); 4360 inp = (struct sctp_inpcb *)so->so_pcb; 4361 if (!inp) { 4362 SCTP_FREE_SONAME(sin); 4363 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4364 return ECONNRESET; 4365 } 4366 SCTP_INP_RLOCK(inp); 4367 sin->sin_port = inp->sctp_lport; 4368 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 4369 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { 4370 struct sctp_tcb *stcb; 4371 struct sockaddr_in *sin_a; 4372 struct sctp_nets *net; 4373 int fnd; 4374 4375 stcb = LIST_FIRST(&inp->sctp_asoc_list); 4376 if (stcb == NULL) { 4377 goto notConn; 4378 } 4379 fnd = 0; 4380 sin_a = NULL; 4381 SCTP_TCB_LOCK(stcb); 4382 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 4383 sin_a = (struct sockaddr_in *)&net->ro._l_addr; 4384 if (sin_a == NULL) 4385 /* this will make coverity happy */ 4386 continue; 4387 4388 if (sin_a->sin_family == AF_INET) { 4389 fnd = 1; 4390 break; 4391 } 4392 } 4393 if ((!fnd) || (sin_a == NULL)) { 4394 /* punt */ 4395 SCTP_TCB_UNLOCK(stcb); 4396 goto notConn; 4397 } 4398 vrf_id = inp->def_vrf_id; 4399 sctp_ifa = sctp_source_address_selection(inp, 4400 stcb, 4401 (sctp_route_t *) & net->ro, 4402 net, 0, vrf_id); 4403 if (sctp_ifa) { 4404 sin->sin_addr = sctp_ifa->address.sin.sin_addr; 4405 sctp_free_ifa(sctp_ifa); 4406 } 4407 SCTP_TCB_UNLOCK(stcb); 4408 } else { 4409 /* For the bound all case you get back 0 */ 4410 notConn: 4411 sin->sin_addr.s_addr = 0; 4412 } 4413 4414 } else { 4415 /* Take the first IPv4 address in the list */ 4416 struct sctp_laddr *laddr; 4417 int fnd = 0; 4418 4419 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 4420 if (laddr->ifa->address.sa.sa_family == AF_INET) { 4421 struct sockaddr_in *sin_a; 4422 4423 sin_a = (struct sockaddr_in *)&laddr->ifa->address.sa; 4424 sin->sin_addr = sin_a->sin_addr; 4425 fnd = 1; 4426 break; 4427 } 4428 } 4429 if (!fnd) { 4430 SCTP_FREE_SONAME(sin); 4431 SCTP_INP_RUNLOCK(inp); 4432 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); 4433 return ENOENT; 4434 } 4435 } 4436 SCTP_INP_RUNLOCK(inp); 4437 (*addr) = (struct sockaddr *)sin; 4438 return (0); 4439 } 4440 4441 int 4442 sctp_peeraddr(struct socket *so, struct sockaddr **addr) 4443 { 4444 struct sockaddr_in *sin = (struct sockaddr_in *)*addr; 4445 int fnd; 4446 struct sockaddr_in *sin_a; 4447 struct sctp_inpcb *inp; 4448 struct sctp_tcb *stcb; 4449 struct sctp_nets *net; 4450 4451 /* Do the malloc first in case it blocks. */ 4452 inp = (struct sctp_inpcb *)so->so_pcb; 4453 if ((inp == NULL) || 4454 ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) { 4455 /* UDP type and listeners will drop out here */ 4456 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN); 4457 return (ENOTCONN); 4458 } 4459 SCTP_MALLOC_SONAME(sin, struct sockaddr_in *, sizeof *sin); 4460 sin->sin_family = AF_INET; 4461 sin->sin_len = sizeof(*sin); 4462 4463 /* We must recapture incase we blocked */ 4464 inp = (struct sctp_inpcb *)so->so_pcb; 4465 if (!inp) { 4466 SCTP_FREE_SONAME(sin); 4467 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4468 return ECONNRESET; 4469 } 4470 SCTP_INP_RLOCK(inp); 4471 stcb = LIST_FIRST(&inp->sctp_asoc_list); 4472 if (stcb) { 4473 SCTP_TCB_LOCK(stcb); 4474 } 4475 SCTP_INP_RUNLOCK(inp); 4476 if (stcb == NULL) { 4477 SCTP_FREE_SONAME(sin); 4478 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4479 return ECONNRESET; 4480 } 4481 fnd = 0; 4482 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 4483 sin_a = (struct sockaddr_in *)&net->ro._l_addr; 4484 if (sin_a->sin_family == AF_INET) { 4485 fnd = 1; 4486 sin->sin_port = stcb->rport; 4487 sin->sin_addr = sin_a->sin_addr; 4488 break; 4489 } 4490 } 4491 SCTP_TCB_UNLOCK(stcb); 4492 if (!fnd) { 4493 /* No IPv4 address */ 4494 SCTP_FREE_SONAME(sin); 4495 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); 4496 return ENOENT; 4497 } 4498 (*addr) = (struct sockaddr *)sin; 4499 return (0); 4500 } 4501 4502 struct pr_usrreqs sctp_usrreqs = { 4503 .pru_abort = sctp_abort, 4504 .pru_accept = sctp_accept, 4505 .pru_attach = sctp_attach, 4506 .pru_bind = sctp_bind, 4507 .pru_connect = sctp_connect, 4508 .pru_control = in_control, 4509 .pru_close = sctp_close, 4510 .pru_detach = sctp_close, 4511 .pru_sopoll = sopoll_generic, 4512 .pru_flush = sctp_flush, 4513 .pru_disconnect = sctp_disconnect, 4514 .pru_listen = sctp_listen, 4515 .pru_peeraddr = sctp_peeraddr, 4516 .pru_send = sctp_sendm, 4517 .pru_shutdown = sctp_shutdown, 4518 .pru_sockaddr = sctp_ingetaddr, 4519 .pru_sosend = sctp_sosend, 4520 .pru_soreceive = sctp_soreceive 4521 }; 4522