1 /*- 2 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions are met: 6 * 7 * a) Redistributions of source code must retain the above copyright notice, 8 * this list of conditions and the following disclaimer. 9 * 10 * b) Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in 12 * the documentation and/or other materials provided with the distribution. 13 * 14 * c) Neither the name of Cisco Systems, Inc. nor the names of its 15 * contributors may be used to endorse or promote products derived 16 * from this software without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 28 * THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 /* $KAME: sctp_usrreq.c,v 1.48 2005/03/07 23:26:08 itojun Exp $ */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 #include <netinet/sctp_os.h> 36 #include <sys/proc.h> 37 #include <netinet/sctp_pcb.h> 38 #include <netinet/sctp_header.h> 39 #include <netinet/sctp_var.h> 40 #if defined(INET6) 41 #endif 42 #include <netinet/sctp_sysctl.h> 43 #include <netinet/sctp_output.h> 44 #include <netinet/sctp_uio.h> 45 #include <netinet/sctp_asconf.h> 46 #include <netinet/sctputil.h> 47 #include <netinet/sctp_indata.h> 48 #include <netinet/sctp_timer.h> 49 #include <netinet/sctp_auth.h> 50 #include <netinet/sctp_bsd_addr.h> 51 #include <netinet/sctp_cc_functions.h> 52 #include <netinet/udp.h> 53 54 55 56 57 void 58 sctp_init(void) 59 { 60 u_long sb_max_adj; 61 62 bzero(&SCTP_BASE_STATS, sizeof(struct sctpstat)); 63 64 /* Initialize and modify the sysctled variables */ 65 sctp_init_sysctls(); 66 if ((nmbclusters / 8) > SCTP_ASOC_MAX_CHUNKS_ON_QUEUE) 67 SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue) = (nmbclusters / 8); 68 /* 69 * Allow a user to take no more than 1/2 the number of clusters or 70 * the SB_MAX whichever is smaller for the send window. 71 */ 72 sb_max_adj = (u_long)((u_quad_t) (SB_MAX) * MCLBYTES / (MSIZE + MCLBYTES)); 73 SCTP_BASE_SYSCTL(sctp_sendspace) = min(sb_max_adj, 74 (((uint32_t) nmbclusters / 2) * SCTP_DEFAULT_MAXSEGMENT)); 75 /* 76 * Now for the recv window, should we take the same amount? or 77 * should I do 1/2 the SB_MAX instead in the SB_MAX min above. For 78 * now I will just copy. 79 */ 80 SCTP_BASE_SYSCTL(sctp_recvspace) = SCTP_BASE_SYSCTL(sctp_sendspace); 81 82 SCTP_BASE_VAR(first_time) = 0; 83 SCTP_BASE_VAR(sctp_pcb_initialized) = 0; 84 sctp_pcb_init(); 85 #if defined(SCTP_PACKET_LOGGING) 86 SCTP_BASE_VAR(packet_log_writers) = 0; 87 SCTP_BASE_VAR(packet_log_end) = 0; 88 bzero(&SCTP_BASE_VAR(packet_log_buffer), SCTP_PACKET_LOG_SIZE); 89 #endif 90 91 92 } 93 94 void 95 sctp_finish(void) 96 { 97 sctp_pcb_finish(); 98 } 99 100 101 102 void 103 sctp_pathmtu_adjustment(struct sctp_inpcb *inp, 104 struct sctp_tcb *stcb, 105 struct sctp_nets *net, 106 uint16_t nxtsz) 107 { 108 struct sctp_tmit_chunk *chk; 109 110 /* Adjust that too */ 111 stcb->asoc.smallest_mtu = nxtsz; 112 /* now off to subtract IP_DF flag if needed */ 113 #ifdef SCTP_PRINT_FOR_B_AND_M 114 SCTP_PRINTF("sctp_pathmtu_adjust called inp:%p stcb:%p net:%p nxtsz:%d\n", 115 inp, stcb, net, nxtsz); 116 #endif 117 TAILQ_FOREACH(chk, &stcb->asoc.send_queue, sctp_next) { 118 if ((chk->send_size + IP_HDR_SIZE) > nxtsz) { 119 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; 120 } 121 } 122 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 123 if ((chk->send_size + IP_HDR_SIZE) > nxtsz) { 124 /* 125 * For this guy we also mark for immediate resend 126 * since we sent to big of chunk 127 */ 128 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; 129 if (chk->sent != SCTP_DATAGRAM_RESEND) { 130 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 131 } 132 chk->sent = SCTP_DATAGRAM_RESEND; 133 chk->rec.data.doing_fast_retransmit = 0; 134 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 135 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_PMTU, 136 chk->whoTo->flight_size, 137 chk->book_size, 138 (uintptr_t) chk->whoTo, 139 chk->rec.data.TSN_seq); 140 } 141 /* Clear any time so NO RTT is being done */ 142 chk->do_rtt = 0; 143 sctp_flight_size_decrease(chk); 144 sctp_total_flight_decrease(stcb, chk); 145 } 146 } 147 } 148 149 static void 150 sctp_notify_mbuf(struct sctp_inpcb *inp, 151 struct sctp_tcb *stcb, 152 struct sctp_nets *net, 153 struct ip *ip, 154 struct sctphdr *sh) 155 { 156 struct icmp *icmph; 157 int totsz, tmr_stopped = 0; 158 uint16_t nxtsz; 159 160 /* protection */ 161 if ((inp == NULL) || (stcb == NULL) || (net == NULL) || 162 (ip == NULL) || (sh == NULL)) { 163 if (stcb != NULL) { 164 SCTP_TCB_UNLOCK(stcb); 165 } 166 return; 167 } 168 /* First job is to verify the vtag matches what I would send */ 169 if (ntohl(sh->v_tag) != (stcb->asoc.peer_vtag)) { 170 SCTP_TCB_UNLOCK(stcb); 171 return; 172 } 173 icmph = (struct icmp *)((caddr_t)ip - (sizeof(struct icmp) - 174 sizeof(struct ip))); 175 if (icmph->icmp_type != ICMP_UNREACH) { 176 /* We only care about unreachable */ 177 SCTP_TCB_UNLOCK(stcb); 178 return; 179 } 180 if (icmph->icmp_code != ICMP_UNREACH_NEEDFRAG) { 181 /* not a unreachable message due to frag. */ 182 SCTP_TCB_UNLOCK(stcb); 183 return; 184 } 185 totsz = ip->ip_len; 186 187 nxtsz = ntohs(icmph->icmp_nextmtu); 188 if (nxtsz == 0) { 189 /* 190 * old type router that does not tell us what the next size 191 * mtu is. Rats we will have to guess (in a educated fashion 192 * of course) 193 */ 194 nxtsz = find_next_best_mtu(totsz); 195 } 196 /* Stop any PMTU timer */ 197 if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) { 198 tmr_stopped = 1; 199 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net, 200 SCTP_FROM_SCTP_USRREQ + SCTP_LOC_1); 201 } 202 /* Adjust destination size limit */ 203 if (net->mtu > nxtsz) { 204 net->mtu = nxtsz; 205 if (net->port) { 206 net->mtu -= sizeof(struct udphdr); 207 } 208 } 209 /* now what about the ep? */ 210 if (stcb->asoc.smallest_mtu > nxtsz) { 211 #ifdef SCTP_PRINT_FOR_B_AND_M 212 SCTP_PRINTF("notify_mbuf (ICMP) calls sctp_pathmtu_adjust mtu:%d\n", 213 nxtsz); 214 #endif 215 sctp_pathmtu_adjustment(inp, stcb, net, nxtsz); 216 } 217 if (tmr_stopped) 218 sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net); 219 220 SCTP_TCB_UNLOCK(stcb); 221 } 222 223 224 void 225 sctp_notify(struct sctp_inpcb *inp, 226 struct ip *ip, 227 struct sctphdr *sh, 228 struct sockaddr *to, 229 struct sctp_tcb *stcb, 230 struct sctp_nets *net) 231 { 232 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 233 struct socket *so; 234 235 #endif 236 /* protection */ 237 int reason; 238 struct icmp *icmph; 239 240 241 if ((inp == NULL) || (stcb == NULL) || (net == NULL) || 242 (sh == NULL) || (to == NULL)) { 243 if (stcb) 244 SCTP_TCB_UNLOCK(stcb); 245 return; 246 } 247 /* First job is to verify the vtag matches what I would send */ 248 if (ntohl(sh->v_tag) != (stcb->asoc.peer_vtag)) { 249 SCTP_TCB_UNLOCK(stcb); 250 return; 251 } 252 icmph = (struct icmp *)((caddr_t)ip - (sizeof(struct icmp) - 253 sizeof(struct ip))); 254 if (icmph->icmp_type != ICMP_UNREACH) { 255 /* We only care about unreachable */ 256 SCTP_TCB_UNLOCK(stcb); 257 return; 258 } 259 if ((icmph->icmp_code == ICMP_UNREACH_NET) || 260 (icmph->icmp_code == ICMP_UNREACH_HOST) || 261 (icmph->icmp_code == ICMP_UNREACH_NET_UNKNOWN) || 262 (icmph->icmp_code == ICMP_UNREACH_HOST_UNKNOWN) || 263 (icmph->icmp_code == ICMP_UNREACH_ISOLATED) || 264 (icmph->icmp_code == ICMP_UNREACH_NET_PROHIB) || 265 (icmph->icmp_code == ICMP_UNREACH_HOST_PROHIB) || 266 (icmph->icmp_code == ICMP_UNREACH_FILTER_PROHIB)) { 267 268 /* 269 * Hmm reachablity problems we must examine closely. If its 270 * not reachable, we may have lost a network. Or if there is 271 * NO protocol at the other end named SCTP. well we consider 272 * it a OOTB abort. 273 */ 274 if (net->dest_state & SCTP_ADDR_REACHABLE) { 275 /* Ok that destination is NOT reachable */ 276 SCTP_PRINTF("ICMP (thresh %d/%d) takes interface %p down\n", 277 net->error_count, 278 net->failure_threshold, 279 net); 280 281 net->dest_state &= ~SCTP_ADDR_REACHABLE; 282 net->dest_state |= SCTP_ADDR_NOT_REACHABLE; 283 /* 284 * JRS 5/14/07 - If a destination is unreachable, 285 * the PF bit is turned off. This allows an 286 * unambiguous use of the PF bit for destinations 287 * that are reachable but potentially failed. If the 288 * destination is set to the unreachable state, also 289 * set the destination to the PF state. 290 */ 291 /* 292 * Add debug message here if destination is not in 293 * PF state. 294 */ 295 /* Stop any running T3 timers here? */ 296 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_pf)) { 297 net->dest_state &= ~SCTP_ADDR_PF; 298 SCTPDBG(SCTP_DEBUG_TIMER4, "Destination %p moved from PF to unreachable.\n", 299 net); 300 } 301 net->error_count = net->failure_threshold + 1; 302 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN, 303 stcb, SCTP_FAILED_THRESHOLD, 304 (void *)net, SCTP_SO_NOT_LOCKED); 305 } 306 SCTP_TCB_UNLOCK(stcb); 307 } else if ((icmph->icmp_code == ICMP_UNREACH_PROTOCOL) || 308 (icmph->icmp_code == ICMP_UNREACH_PORT)) { 309 /* 310 * Here the peer is either playing tricks on us, including 311 * an address that belongs to someone who does not support 312 * SCTP OR was a userland implementation that shutdown and 313 * now is dead. In either case treat it like a OOTB abort 314 * with no TCB 315 */ 316 reason = SCTP_PEER_FAULTY; 317 sctp_abort_notification(stcb, reason, SCTP_SO_NOT_LOCKED); 318 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 319 so = SCTP_INP_SO(inp); 320 atomic_add_int(&stcb->asoc.refcnt, 1); 321 SCTP_TCB_UNLOCK(stcb); 322 SCTP_SOCKET_LOCK(so, 1); 323 SCTP_TCB_LOCK(stcb); 324 atomic_subtract_int(&stcb->asoc.refcnt, 1); 325 #endif 326 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_2); 327 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 328 SCTP_SOCKET_UNLOCK(so, 1); 329 /* SCTP_TCB_UNLOCK(stcb); MT: I think this is not needed. */ 330 #endif 331 /* no need to unlock here, since the TCB is gone */ 332 } else { 333 SCTP_TCB_UNLOCK(stcb); 334 } 335 } 336 337 void 338 sctp_ctlinput(cmd, sa, vip) 339 int cmd; 340 struct sockaddr *sa; 341 void *vip; 342 { 343 struct ip *ip = vip; 344 struct sctphdr *sh; 345 uint32_t vrf_id; 346 347 /* FIX, for non-bsd is this right? */ 348 vrf_id = SCTP_DEFAULT_VRFID; 349 if (sa->sa_family != AF_INET || 350 ((struct sockaddr_in *)sa)->sin_addr.s_addr == INADDR_ANY) { 351 return; 352 } 353 if (PRC_IS_REDIRECT(cmd)) { 354 ip = 0; 355 } else if ((unsigned)cmd >= PRC_NCMDS || inetctlerrmap[cmd] == 0) { 356 return; 357 } 358 if (ip) { 359 struct sctp_inpcb *inp = NULL; 360 struct sctp_tcb *stcb = NULL; 361 struct sctp_nets *net = NULL; 362 struct sockaddr_in to, from; 363 364 sh = (struct sctphdr *)((caddr_t)ip + (ip->ip_hl << 2)); 365 bzero(&to, sizeof(to)); 366 bzero(&from, sizeof(from)); 367 from.sin_family = to.sin_family = AF_INET; 368 from.sin_len = to.sin_len = sizeof(to); 369 from.sin_port = sh->src_port; 370 from.sin_addr = ip->ip_src; 371 to.sin_port = sh->dest_port; 372 to.sin_addr = ip->ip_dst; 373 374 /* 375 * 'to' holds the dest of the packet that failed to be sent. 376 * 'from' holds our local endpoint address. Thus we reverse 377 * the to and the from in the lookup. 378 */ 379 stcb = sctp_findassociation_addr_sa((struct sockaddr *)&from, 380 (struct sockaddr *)&to, 381 &inp, &net, 1, vrf_id); 382 if (stcb != NULL && inp && (inp->sctp_socket != NULL)) { 383 if (cmd != PRC_MSGSIZE) { 384 sctp_notify(inp, ip, sh, 385 (struct sockaddr *)&to, stcb, 386 net); 387 } else { 388 /* handle possible ICMP size messages */ 389 sctp_notify_mbuf(inp, stcb, net, ip, sh); 390 } 391 } else { 392 if ((stcb == NULL) && (inp != NULL)) { 393 /* reduce ref-count */ 394 SCTP_INP_WLOCK(inp); 395 SCTP_INP_DECR_REF(inp); 396 SCTP_INP_WUNLOCK(inp); 397 } 398 } 399 } 400 return; 401 } 402 403 static int 404 sctp_getcred(SYSCTL_HANDLER_ARGS) 405 { 406 struct xucred xuc; 407 struct sockaddr_in addrs[2]; 408 struct sctp_inpcb *inp; 409 struct sctp_nets *net; 410 struct sctp_tcb *stcb; 411 int error; 412 uint32_t vrf_id; 413 414 /* FIX, for non-bsd is this right? */ 415 vrf_id = SCTP_DEFAULT_VRFID; 416 417 error = priv_check(req->td, PRIV_NETINET_GETCRED); 418 419 if (error) 420 return (error); 421 422 error = SYSCTL_IN(req, addrs, sizeof(addrs)); 423 if (error) 424 return (error); 425 426 stcb = sctp_findassociation_addr_sa(sintosa(&addrs[0]), 427 sintosa(&addrs[1]), 428 &inp, &net, 1, vrf_id); 429 if (stcb == NULL || inp == NULL || inp->sctp_socket == NULL) { 430 if ((inp != NULL) && (stcb == NULL)) { 431 /* reduce ref-count */ 432 SCTP_INP_WLOCK(inp); 433 SCTP_INP_DECR_REF(inp); 434 goto cred_can_cont; 435 } 436 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); 437 error = ENOENT; 438 goto out; 439 } 440 SCTP_TCB_UNLOCK(stcb); 441 /* 442 * We use the write lock here, only since in the error leg we need 443 * it. If we used RLOCK, then we would have to 444 * wlock/decr/unlock/rlock. Which in theory could create a hole. 445 * Better to use higher wlock. 446 */ 447 SCTP_INP_WLOCK(inp); 448 cred_can_cont: 449 error = cr_canseesocket(req->td->td_ucred, inp->sctp_socket); 450 if (error) { 451 SCTP_INP_WUNLOCK(inp); 452 goto out; 453 } 454 cru2x(inp->sctp_socket->so_cred, &xuc); 455 SCTP_INP_WUNLOCK(inp); 456 error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred)); 457 out: 458 return (error); 459 } 460 461 SYSCTL_PROC(_net_inet_sctp, OID_AUTO, getcred, CTLTYPE_OPAQUE | CTLFLAG_RW, 462 0, 0, sctp_getcred, "S,ucred", "Get the ucred of a SCTP connection"); 463 464 465 static void 466 sctp_abort(struct socket *so) 467 { 468 struct sctp_inpcb *inp; 469 uint32_t flags; 470 471 inp = (struct sctp_inpcb *)so->so_pcb; 472 if (inp == 0) { 473 return; 474 } 475 sctp_must_try_again: 476 flags = inp->sctp_flags; 477 #ifdef SCTP_LOG_CLOSING 478 sctp_log_closing(inp, NULL, 17); 479 #endif 480 if (((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) && 481 (atomic_cmpset_int(&inp->sctp_flags, flags, (flags | SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_CLOSE_IP)))) { 482 #ifdef SCTP_LOG_CLOSING 483 sctp_log_closing(inp, NULL, 16); 484 #endif 485 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 486 SCTP_CALLED_AFTER_CMPSET_OFCLOSE); 487 SOCK_LOCK(so); 488 SCTP_SB_CLEAR(so->so_snd); 489 /* 490 * same for the rcv ones, they are only here for the 491 * accounting/select. 492 */ 493 SCTP_SB_CLEAR(so->so_rcv); 494 495 /* Now null out the reference, we are completely detached. */ 496 so->so_pcb = NULL; 497 SOCK_UNLOCK(so); 498 } else { 499 flags = inp->sctp_flags; 500 if ((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) { 501 goto sctp_must_try_again; 502 } 503 } 504 return; 505 } 506 507 static int 508 sctp_attach(struct socket *so, int proto, struct thread *p) 509 { 510 struct sctp_inpcb *inp; 511 struct inpcb *ip_inp; 512 int error; 513 uint32_t vrf_id = SCTP_DEFAULT_VRFID; 514 515 #ifdef IPSEC 516 uint32_t flags; 517 518 #endif 519 520 inp = (struct sctp_inpcb *)so->so_pcb; 521 if (inp != 0) { 522 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 523 return EINVAL; 524 } 525 if (so->so_snd.sb_hiwat == 0 || so->so_rcv.sb_hiwat == 0) { 526 error = SCTP_SORESERVE(so, SCTP_BASE_SYSCTL(sctp_sendspace), SCTP_BASE_SYSCTL(sctp_recvspace)); 527 if (error) { 528 return error; 529 } 530 } 531 error = sctp_inpcb_alloc(so, vrf_id); 532 if (error) { 533 return error; 534 } 535 inp = (struct sctp_inpcb *)so->so_pcb; 536 SCTP_INP_WLOCK(inp); 537 inp->sctp_flags &= ~SCTP_PCB_FLAGS_BOUND_V6; /* I'm not v6! */ 538 ip_inp = &inp->ip_inp.inp; 539 ip_inp->inp_vflag |= INP_IPV4; 540 ip_inp->inp_ip_ttl = MODULE_GLOBAL(MOD_INET, ip_defttl); 541 #ifdef IPSEC 542 error = ipsec_init_policy(so, &ip_inp->inp_sp); 543 #ifdef SCTP_LOG_CLOSING 544 sctp_log_closing(inp, NULL, 17); 545 #endif 546 if (error != 0) { 547 flags = inp->sctp_flags; 548 if (((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) && 549 (atomic_cmpset_int(&inp->sctp_flags, flags, (flags | SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_CLOSE_IP)))) { 550 #ifdef SCTP_LOG_CLOSING 551 sctp_log_closing(inp, NULL, 15); 552 #endif 553 SCTP_INP_WUNLOCK(inp); 554 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 555 SCTP_CALLED_AFTER_CMPSET_OFCLOSE); 556 } else { 557 SCTP_INP_WUNLOCK(inp); 558 } 559 return error; 560 } 561 #endif /* IPSEC */ 562 SCTP_INP_WUNLOCK(inp); 563 return 0; 564 } 565 566 static int 567 sctp_bind(struct socket *so, struct sockaddr *addr, struct thread *p) 568 { 569 struct sctp_inpcb *inp = NULL; 570 int error; 571 572 #ifdef INET6 573 if (addr && addr->sa_family != AF_INET) { 574 /* must be a v4 address! */ 575 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 576 return EINVAL; 577 } 578 #endif /* INET6 */ 579 if (addr && (addr->sa_len != sizeof(struct sockaddr_in))) { 580 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 581 return EINVAL; 582 } 583 inp = (struct sctp_inpcb *)so->so_pcb; 584 if (inp == 0) { 585 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 586 return EINVAL; 587 } 588 error = sctp_inpcb_bind(so, addr, NULL, p); 589 return error; 590 } 591 592 void 593 sctp_close(struct socket *so) 594 { 595 struct sctp_inpcb *inp; 596 uint32_t flags; 597 598 inp = (struct sctp_inpcb *)so->so_pcb; 599 if (inp == 0) 600 return; 601 602 /* 603 * Inform all the lower layer assoc that we are done. 604 */ 605 sctp_must_try_again: 606 flags = inp->sctp_flags; 607 #ifdef SCTP_LOG_CLOSING 608 sctp_log_closing(inp, NULL, 17); 609 #endif 610 if (((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) && 611 (atomic_cmpset_int(&inp->sctp_flags, flags, (flags | SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_CLOSE_IP)))) { 612 if (((so->so_options & SO_LINGER) && (so->so_linger == 0)) || 613 (so->so_rcv.sb_cc > 0)) { 614 #ifdef SCTP_LOG_CLOSING 615 sctp_log_closing(inp, NULL, 13); 616 #endif 617 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 618 SCTP_CALLED_AFTER_CMPSET_OFCLOSE); 619 } else { 620 #ifdef SCTP_LOG_CLOSING 621 sctp_log_closing(inp, NULL, 14); 622 #endif 623 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_GRACEFUL_CLOSE, 624 SCTP_CALLED_AFTER_CMPSET_OFCLOSE); 625 } 626 /* 627 * The socket is now detached, no matter what the state of 628 * the SCTP association. 629 */ 630 SOCK_LOCK(so); 631 SCTP_SB_CLEAR(so->so_snd); 632 /* 633 * same for the rcv ones, they are only here for the 634 * accounting/select. 635 */ 636 SCTP_SB_CLEAR(so->so_rcv); 637 638 /* Now null out the reference, we are completely detached. */ 639 so->so_pcb = NULL; 640 SOCK_UNLOCK(so); 641 } else { 642 flags = inp->sctp_flags; 643 if ((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) { 644 goto sctp_must_try_again; 645 } 646 } 647 return; 648 } 649 650 651 int 652 sctp_sendm(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr, 653 struct mbuf *control, struct thread *p); 654 655 656 int 657 sctp_sendm(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr, 658 struct mbuf *control, struct thread *p) 659 { 660 struct sctp_inpcb *inp; 661 int error; 662 663 inp = (struct sctp_inpcb *)so->so_pcb; 664 if (inp == 0) { 665 if (control) { 666 sctp_m_freem(control); 667 control = NULL; 668 } 669 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 670 sctp_m_freem(m); 671 return EINVAL; 672 } 673 /* Got to have an to address if we are NOT a connected socket */ 674 if ((addr == NULL) && 675 ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) || 676 (inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)) 677 ) { 678 goto connected_type; 679 } else if (addr == NULL) { 680 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EDESTADDRREQ); 681 error = EDESTADDRREQ; 682 sctp_m_freem(m); 683 if (control) { 684 sctp_m_freem(control); 685 control = NULL; 686 } 687 return (error); 688 } 689 #ifdef INET6 690 if (addr->sa_family != AF_INET) { 691 /* must be a v4 address! */ 692 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EDESTADDRREQ); 693 sctp_m_freem(m); 694 if (control) { 695 sctp_m_freem(control); 696 control = NULL; 697 } 698 error = EDESTADDRREQ; 699 return EDESTADDRREQ; 700 } 701 #endif /* INET6 */ 702 connected_type: 703 /* now what about control */ 704 if (control) { 705 if (inp->control) { 706 SCTP_PRINTF("huh? control set?\n"); 707 sctp_m_freem(inp->control); 708 inp->control = NULL; 709 } 710 inp->control = control; 711 } 712 /* Place the data */ 713 if (inp->pkt) { 714 SCTP_BUF_NEXT(inp->pkt_last) = m; 715 inp->pkt_last = m; 716 } else { 717 inp->pkt_last = inp->pkt = m; 718 } 719 if ( 720 /* FreeBSD uses a flag passed */ 721 ((flags & PRUS_MORETOCOME) == 0) 722 ) { 723 /* 724 * note with the current version this code will only be used 725 * by OpenBSD-- NetBSD, FreeBSD, and MacOS have methods for 726 * re-defining sosend to use the sctp_sosend. One can 727 * optionally switch back to this code (by changing back the 728 * definitions) but this is not advisable. This code is used 729 * by FreeBSD when sending a file with sendfile() though. 730 */ 731 int ret; 732 733 ret = sctp_output(inp, inp->pkt, addr, inp->control, p, flags); 734 inp->pkt = NULL; 735 inp->control = NULL; 736 return (ret); 737 } else { 738 return (0); 739 } 740 } 741 742 int 743 sctp_disconnect(struct socket *so) 744 { 745 struct sctp_inpcb *inp; 746 747 inp = (struct sctp_inpcb *)so->so_pcb; 748 if (inp == NULL) { 749 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN); 750 return (ENOTCONN); 751 } 752 SCTP_INP_RLOCK(inp); 753 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 754 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 755 if (SCTP_LIST_EMPTY(&inp->sctp_asoc_list)) { 756 /* No connection */ 757 SCTP_INP_RUNLOCK(inp); 758 return (0); 759 } else { 760 struct sctp_association *asoc; 761 struct sctp_tcb *stcb; 762 763 stcb = LIST_FIRST(&inp->sctp_asoc_list); 764 if (stcb == NULL) { 765 SCTP_INP_RUNLOCK(inp); 766 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 767 return (EINVAL); 768 } 769 SCTP_TCB_LOCK(stcb); 770 asoc = &stcb->asoc; 771 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 772 /* We are about to be freed, out of here */ 773 SCTP_TCB_UNLOCK(stcb); 774 SCTP_INP_RUNLOCK(inp); 775 return (0); 776 } 777 if (((so->so_options & SO_LINGER) && 778 (so->so_linger == 0)) || 779 (so->so_rcv.sb_cc > 0)) { 780 if (SCTP_GET_STATE(asoc) != 781 SCTP_STATE_COOKIE_WAIT) { 782 /* Left with Data unread */ 783 struct mbuf *err; 784 785 err = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_DONTWAIT, 1, MT_DATA); 786 if (err) { 787 /* 788 * Fill in the user 789 * initiated abort 790 */ 791 struct sctp_paramhdr *ph; 792 793 ph = mtod(err, struct sctp_paramhdr *); 794 SCTP_BUF_LEN(err) = sizeof(struct sctp_paramhdr); 795 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT); 796 ph->param_length = htons(SCTP_BUF_LEN(err)); 797 } 798 #if defined(SCTP_PANIC_ON_ABORT) 799 panic("disconnect does an abort"); 800 #endif 801 sctp_send_abort_tcb(stcb, err, SCTP_SO_LOCKED); 802 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 803 } 804 SCTP_INP_RUNLOCK(inp); 805 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) || 806 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 807 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 808 } 809 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_3); 810 /* No unlock tcb assoc is gone */ 811 return (0); 812 } 813 if (TAILQ_EMPTY(&asoc->send_queue) && 814 TAILQ_EMPTY(&asoc->sent_queue) && 815 (asoc->stream_queue_cnt == 0)) { 816 /* there is nothing queued to send, so done */ 817 if (asoc->locked_on_sending) { 818 goto abort_anyway; 819 } 820 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) && 821 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) { 822 /* only send SHUTDOWN 1st time thru */ 823 sctp_stop_timers_for_shutdown(stcb); 824 sctp_send_shutdown(stcb, 825 stcb->asoc.primary_destination); 826 sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_LOCKED); 827 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) || 828 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 829 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 830 } 831 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT); 832 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 833 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, 834 stcb->sctp_ep, stcb, 835 asoc->primary_destination); 836 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 837 stcb->sctp_ep, stcb, 838 asoc->primary_destination); 839 } 840 } else { 841 /* 842 * we still got (or just got) data to send, 843 * so set SHUTDOWN_PENDING 844 */ 845 /* 846 * XXX sockets draft says that SCTP_EOF 847 * should be sent with no data. currently, 848 * we will allow user data to be sent first 849 * and move to SHUTDOWN-PENDING 850 */ 851 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING; 852 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb, 853 asoc->primary_destination); 854 if (asoc->locked_on_sending) { 855 /* Locked to send out the data */ 856 struct sctp_stream_queue_pending *sp; 857 858 sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead); 859 if (sp == NULL) { 860 SCTP_PRINTF("Error, sp is NULL, locked on sending is non-null strm:%d\n", 861 asoc->locked_on_sending->stream_no); 862 } else { 863 if ((sp->length == 0) && (sp->msg_is_complete == 0)) 864 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT; 865 } 866 } 867 if (TAILQ_EMPTY(&asoc->send_queue) && 868 TAILQ_EMPTY(&asoc->sent_queue) && 869 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) { 870 struct mbuf *op_err; 871 872 abort_anyway: 873 op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 874 0, M_DONTWAIT, 1, MT_DATA); 875 if (op_err) { 876 /* 877 * Fill in the user 878 * initiated abort 879 */ 880 struct sctp_paramhdr *ph; 881 uint32_t *ippp; 882 883 SCTP_BUF_LEN(op_err) = 884 (sizeof(struct sctp_paramhdr) + sizeof(uint32_t)); 885 ph = mtod(op_err, 886 struct sctp_paramhdr *); 887 ph->param_type = htons( 888 SCTP_CAUSE_USER_INITIATED_ABT); 889 ph->param_length = htons(SCTP_BUF_LEN(op_err)); 890 ippp = (uint32_t *) (ph + 1); 891 *ippp = htonl(SCTP_FROM_SCTP_USRREQ + SCTP_LOC_4); 892 } 893 #if defined(SCTP_PANIC_ON_ABORT) 894 panic("disconnect does an abort"); 895 #endif 896 897 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_USRREQ + SCTP_LOC_4; 898 sctp_send_abort_tcb(stcb, op_err, SCTP_SO_LOCKED); 899 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 900 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) || 901 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 902 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 903 } 904 SCTP_INP_RUNLOCK(inp); 905 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_5); 906 return (0); 907 } else { 908 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CLOSING, SCTP_SO_LOCKED); 909 } 910 } 911 soisdisconnecting(so); 912 SCTP_TCB_UNLOCK(stcb); 913 SCTP_INP_RUNLOCK(inp); 914 return (0); 915 } 916 /* not reached */ 917 } else { 918 /* UDP model does not support this */ 919 SCTP_INP_RUNLOCK(inp); 920 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP); 921 return EOPNOTSUPP; 922 } 923 } 924 925 int 926 sctp_flush(struct socket *so, int how) 927 { 928 /* 929 * We will just clear out the values and let subsequent close clear 930 * out the data, if any. Note if the user did a shutdown(SHUT_RD) 931 * they will not be able to read the data, the socket will block 932 * that from happening. 933 */ 934 if ((how == PRU_FLUSH_RD) || (how == PRU_FLUSH_RDWR)) { 935 /* 936 * First make sure the sb will be happy, we don't use these 937 * except maybe the count 938 */ 939 so->so_rcv.sb_cc = 0; 940 so->so_rcv.sb_mbcnt = 0; 941 so->so_rcv.sb_mb = NULL; 942 } 943 if ((how == PRU_FLUSH_WR) || (how == PRU_FLUSH_RDWR)) { 944 /* 945 * First make sure the sb will be happy, we don't use these 946 * except maybe the count 947 */ 948 so->so_snd.sb_cc = 0; 949 so->so_snd.sb_mbcnt = 0; 950 so->so_snd.sb_mb = NULL; 951 952 } 953 return (0); 954 } 955 956 int 957 sctp_shutdown(struct socket *so) 958 { 959 struct sctp_inpcb *inp; 960 961 inp = (struct sctp_inpcb *)so->so_pcb; 962 if (inp == 0) { 963 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 964 return EINVAL; 965 } 966 SCTP_INP_RLOCK(inp); 967 /* For UDP model this is a invalid call */ 968 if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) { 969 /* Restore the flags that the soshutdown took away. */ 970 so->so_rcv.sb_state &= ~SBS_CANTRCVMORE; 971 /* This proc will wakeup for read and do nothing (I hope) */ 972 SCTP_INP_RUNLOCK(inp); 973 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP); 974 return (EOPNOTSUPP); 975 } 976 /* 977 * Ok if we reach here its the TCP model and it is either a SHUT_WR 978 * or SHUT_RDWR. This means we put the shutdown flag against it. 979 */ 980 { 981 struct sctp_tcb *stcb; 982 struct sctp_association *asoc; 983 984 if ((so->so_state & 985 (SS_ISCONNECTED | SS_ISCONNECTING | SS_ISDISCONNECTING)) == 0) { 986 SCTP_INP_RUNLOCK(inp); 987 return (ENOTCONN); 988 } 989 socantsendmore(so); 990 991 stcb = LIST_FIRST(&inp->sctp_asoc_list); 992 if (stcb == NULL) { 993 /* 994 * Ok we hit the case that the shutdown call was 995 * made after an abort or something. Nothing to do 996 * now. 997 */ 998 SCTP_INP_RUNLOCK(inp); 999 return (0); 1000 } 1001 SCTP_TCB_LOCK(stcb); 1002 asoc = &stcb->asoc; 1003 if (TAILQ_EMPTY(&asoc->send_queue) && 1004 TAILQ_EMPTY(&asoc->sent_queue) && 1005 (asoc->stream_queue_cnt == 0)) { 1006 if (asoc->locked_on_sending) { 1007 goto abort_anyway; 1008 } 1009 /* there is nothing queued to send, so I'm done... */ 1010 if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) { 1011 /* only send SHUTDOWN the first time through */ 1012 sctp_stop_timers_for_shutdown(stcb); 1013 sctp_send_shutdown(stcb, 1014 stcb->asoc.primary_destination); 1015 sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_LOCKED); 1016 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) || 1017 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 1018 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 1019 } 1020 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT); 1021 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 1022 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, 1023 stcb->sctp_ep, stcb, 1024 asoc->primary_destination); 1025 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 1026 stcb->sctp_ep, stcb, 1027 asoc->primary_destination); 1028 } 1029 } else { 1030 /* 1031 * we still got (or just got) data to send, so set 1032 * SHUTDOWN_PENDING 1033 */ 1034 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING; 1035 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb, 1036 asoc->primary_destination); 1037 1038 if (asoc->locked_on_sending) { 1039 /* Locked to send out the data */ 1040 struct sctp_stream_queue_pending *sp; 1041 1042 sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead); 1043 if (sp == NULL) { 1044 SCTP_PRINTF("Error, sp is NULL, locked on sending is non-null strm:%d\n", 1045 asoc->locked_on_sending->stream_no); 1046 } else { 1047 if ((sp->length == 0) && (sp->msg_is_complete == 0)) { 1048 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT; 1049 } 1050 } 1051 } 1052 if (TAILQ_EMPTY(&asoc->send_queue) && 1053 TAILQ_EMPTY(&asoc->sent_queue) && 1054 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) { 1055 struct mbuf *op_err; 1056 1057 abort_anyway: 1058 op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 1059 0, M_DONTWAIT, 1, MT_DATA); 1060 if (op_err) { 1061 /* Fill in the user initiated abort */ 1062 struct sctp_paramhdr *ph; 1063 uint32_t *ippp; 1064 1065 SCTP_BUF_LEN(op_err) = 1066 sizeof(struct sctp_paramhdr) + sizeof(uint32_t); 1067 ph = mtod(op_err, 1068 struct sctp_paramhdr *); 1069 ph->param_type = htons( 1070 SCTP_CAUSE_USER_INITIATED_ABT); 1071 ph->param_length = htons(SCTP_BUF_LEN(op_err)); 1072 ippp = (uint32_t *) (ph + 1); 1073 *ippp = htonl(SCTP_FROM_SCTP_USRREQ + SCTP_LOC_6); 1074 } 1075 #if defined(SCTP_PANIC_ON_ABORT) 1076 panic("shutdown does an abort"); 1077 #endif 1078 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_USRREQ + SCTP_LOC_6; 1079 sctp_abort_an_association(stcb->sctp_ep, stcb, 1080 SCTP_RESPONSE_TO_USER_REQ, 1081 op_err, SCTP_SO_LOCKED); 1082 goto skip_unlock; 1083 } else { 1084 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CLOSING, SCTP_SO_LOCKED); 1085 } 1086 } 1087 SCTP_TCB_UNLOCK(stcb); 1088 } 1089 skip_unlock: 1090 SCTP_INP_RUNLOCK(inp); 1091 return 0; 1092 } 1093 1094 /* 1095 * copies a "user" presentable address and removes embedded scope, etc. 1096 * returns 0 on success, 1 on error 1097 */ 1098 static uint32_t 1099 sctp_fill_user_address(struct sockaddr_storage *ss, struct sockaddr *sa) 1100 { 1101 #ifdef INET6 1102 struct sockaddr_in6 lsa6; 1103 1104 sa = (struct sockaddr *)sctp_recover_scope((struct sockaddr_in6 *)sa, 1105 &lsa6); 1106 #endif 1107 memcpy(ss, sa, sa->sa_len); 1108 return (0); 1109 } 1110 1111 1112 1113 /* 1114 * NOTE: assumes addr lock is held 1115 */ 1116 static size_t 1117 sctp_fill_up_addresses_vrf(struct sctp_inpcb *inp, 1118 struct sctp_tcb *stcb, 1119 size_t limit, 1120 struct sockaddr_storage *sas, 1121 uint32_t vrf_id) 1122 { 1123 struct sctp_ifn *sctp_ifn; 1124 struct sctp_ifa *sctp_ifa; 1125 int loopback_scope, ipv4_local_scope, local_scope, site_scope; 1126 size_t actual; 1127 int ipv4_addr_legal, ipv6_addr_legal; 1128 struct sctp_vrf *vrf; 1129 1130 actual = 0; 1131 if (limit <= 0) 1132 return (actual); 1133 1134 if (stcb) { 1135 /* Turn on all the appropriate scope */ 1136 loopback_scope = stcb->asoc.loopback_scope; 1137 ipv4_local_scope = stcb->asoc.ipv4_local_scope; 1138 local_scope = stcb->asoc.local_scope; 1139 site_scope = stcb->asoc.site_scope; 1140 } else { 1141 /* Turn on ALL scope, since we look at the EP */ 1142 loopback_scope = ipv4_local_scope = local_scope = 1143 site_scope = 1; 1144 } 1145 ipv4_addr_legal = ipv6_addr_legal = 0; 1146 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 1147 ipv6_addr_legal = 1; 1148 if (SCTP_IPV6_V6ONLY(inp) == 0) { 1149 ipv4_addr_legal = 1; 1150 } 1151 } else { 1152 ipv4_addr_legal = 1; 1153 } 1154 vrf = sctp_find_vrf(vrf_id); 1155 if (vrf == NULL) { 1156 return (0); 1157 } 1158 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 1159 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) { 1160 if ((loopback_scope == 0) && 1161 SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) { 1162 /* Skip loopback if loopback_scope not set */ 1163 continue; 1164 } 1165 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { 1166 if (stcb) { 1167 /* 1168 * For the BOUND-ALL case, the list 1169 * associated with a TCB is Always 1170 * considered a reverse list.. i.e. 1171 * it lists addresses that are NOT 1172 * part of the association. If this 1173 * is one of those we must skip it. 1174 */ 1175 if (sctp_is_addr_restricted(stcb, 1176 sctp_ifa)) { 1177 continue; 1178 } 1179 } 1180 switch (sctp_ifa->address.sa.sa_family) { 1181 case AF_INET: 1182 if (ipv4_addr_legal) { 1183 struct sockaddr_in *sin; 1184 1185 sin = (struct sockaddr_in *)&sctp_ifa->address.sa; 1186 if (sin->sin_addr.s_addr == 0) { 1187 /* 1188 * we skip 1189 * unspecifed 1190 * addresses 1191 */ 1192 continue; 1193 } 1194 if ((ipv4_local_scope == 0) && 1195 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) { 1196 continue; 1197 } 1198 #ifdef INET6 1199 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) { 1200 in6_sin_2_v4mapsin6(sin, (struct sockaddr_in6 *)sas); 1201 ((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport; 1202 sas = (struct sockaddr_storage *)((caddr_t)sas + sizeof(struct sockaddr_in6)); 1203 actual += sizeof(struct sockaddr_in6); 1204 } else { 1205 #endif 1206 memcpy(sas, sin, sizeof(*sin)); 1207 ((struct sockaddr_in *)sas)->sin_port = inp->sctp_lport; 1208 sas = (struct sockaddr_storage *)((caddr_t)sas + sizeof(*sin)); 1209 actual += sizeof(*sin); 1210 #ifdef INET6 1211 } 1212 #endif 1213 if (actual >= limit) { 1214 return (actual); 1215 } 1216 } else { 1217 continue; 1218 } 1219 break; 1220 #ifdef INET6 1221 case AF_INET6: 1222 if (ipv6_addr_legal) { 1223 struct sockaddr_in6 *sin6; 1224 1225 sin6 = (struct sockaddr_in6 *)&sctp_ifa->address.sa; 1226 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { 1227 /* 1228 * we skip 1229 * unspecifed 1230 * addresses 1231 */ 1232 continue; 1233 } 1234 if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) { 1235 if (local_scope == 0) 1236 continue; 1237 if (sin6->sin6_scope_id == 0) { 1238 if (sa6_recoverscope(sin6) != 0) 1239 /* 1240 * 1241 * bad 1242 * 1243 * li 1244 * nk 1245 * 1246 * loc 1247 * al 1248 * 1249 * add 1250 * re 1251 * ss 1252 * */ 1253 continue; 1254 } 1255 } 1256 if ((site_scope == 0) && 1257 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) { 1258 continue; 1259 } 1260 memcpy(sas, sin6, sizeof(*sin6)); 1261 ((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport; 1262 sas = (struct sockaddr_storage *)((caddr_t)sas + sizeof(*sin6)); 1263 actual += sizeof(*sin6); 1264 if (actual >= limit) { 1265 return (actual); 1266 } 1267 } else { 1268 continue; 1269 } 1270 break; 1271 #endif 1272 default: 1273 /* TSNH */ 1274 break; 1275 } 1276 } 1277 } 1278 } else { 1279 struct sctp_laddr *laddr; 1280 1281 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 1282 if (stcb) { 1283 if (sctp_is_addr_restricted(stcb, laddr->ifa)) { 1284 continue; 1285 } 1286 } 1287 if (sctp_fill_user_address(sas, &laddr->ifa->address.sa)) 1288 continue; 1289 1290 ((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport; 1291 sas = (struct sockaddr_storage *)((caddr_t)sas + 1292 laddr->ifa->address.sa.sa_len); 1293 actual += laddr->ifa->address.sa.sa_len; 1294 if (actual >= limit) { 1295 return (actual); 1296 } 1297 } 1298 } 1299 return (actual); 1300 } 1301 1302 static size_t 1303 sctp_fill_up_addresses(struct sctp_inpcb *inp, 1304 struct sctp_tcb *stcb, 1305 size_t limit, 1306 struct sockaddr_storage *sas) 1307 { 1308 size_t size = 0; 1309 1310 SCTP_IPI_ADDR_RLOCK(); 1311 /* fill up addresses for the endpoint's default vrf */ 1312 size = sctp_fill_up_addresses_vrf(inp, stcb, limit, sas, 1313 inp->def_vrf_id); 1314 SCTP_IPI_ADDR_RUNLOCK(); 1315 return (size); 1316 } 1317 1318 /* 1319 * NOTE: assumes addr lock is held 1320 */ 1321 static int 1322 sctp_count_max_addresses_vrf(struct sctp_inpcb *inp, uint32_t vrf_id) 1323 { 1324 int cnt = 0; 1325 struct sctp_vrf *vrf = NULL; 1326 1327 /* 1328 * In both sub-set bound an bound_all cases we return the MAXIMUM 1329 * number of addresses that you COULD get. In reality the sub-set 1330 * bound may have an exclusion list for a given TCB OR in the 1331 * bound-all case a TCB may NOT include the loopback or other 1332 * addresses as well. 1333 */ 1334 vrf = sctp_find_vrf(vrf_id); 1335 if (vrf == NULL) { 1336 return (0); 1337 } 1338 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 1339 struct sctp_ifn *sctp_ifn; 1340 struct sctp_ifa *sctp_ifa; 1341 1342 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) { 1343 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { 1344 /* Count them if they are the right type */ 1345 if (sctp_ifa->address.sa.sa_family == AF_INET) { 1346 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) 1347 cnt += sizeof(struct sockaddr_in6); 1348 else 1349 cnt += sizeof(struct sockaddr_in); 1350 1351 } else if (sctp_ifa->address.sa.sa_family == AF_INET6) 1352 cnt += sizeof(struct sockaddr_in6); 1353 } 1354 } 1355 } else { 1356 struct sctp_laddr *laddr; 1357 1358 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 1359 if (laddr->ifa->address.sa.sa_family == AF_INET) { 1360 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) 1361 cnt += sizeof(struct sockaddr_in6); 1362 else 1363 cnt += sizeof(struct sockaddr_in); 1364 1365 } else if (laddr->ifa->address.sa.sa_family == AF_INET6) 1366 cnt += sizeof(struct sockaddr_in6); 1367 } 1368 } 1369 return (cnt); 1370 } 1371 1372 static int 1373 sctp_count_max_addresses(struct sctp_inpcb *inp) 1374 { 1375 int cnt = 0; 1376 1377 SCTP_IPI_ADDR_RLOCK(); 1378 /* count addresses for the endpoint's default VRF */ 1379 cnt = sctp_count_max_addresses_vrf(inp, inp->def_vrf_id); 1380 SCTP_IPI_ADDR_RUNLOCK(); 1381 return (cnt); 1382 } 1383 1384 static int 1385 sctp_do_connect_x(struct socket *so, struct sctp_inpcb *inp, void *optval, 1386 size_t optsize, void *p, int delay) 1387 { 1388 int error = 0; 1389 int creat_lock_on = 0; 1390 struct sctp_tcb *stcb = NULL; 1391 struct sockaddr *sa; 1392 int num_v6 = 0, num_v4 = 0, *totaddrp, totaddr; 1393 int added = 0; 1394 uint32_t vrf_id; 1395 int bad_addresses = 0; 1396 sctp_assoc_t *a_id; 1397 1398 SCTPDBG(SCTP_DEBUG_PCB1, "Connectx called\n"); 1399 1400 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 1401 (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) { 1402 /* We are already connected AND the TCP model */ 1403 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EADDRINUSE); 1404 return (EADDRINUSE); 1405 } 1406 if ((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) && 1407 (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_PORTREUSE))) { 1408 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 1409 return (EINVAL); 1410 } 1411 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { 1412 SCTP_INP_RLOCK(inp); 1413 stcb = LIST_FIRST(&inp->sctp_asoc_list); 1414 SCTP_INP_RUNLOCK(inp); 1415 } 1416 if (stcb) { 1417 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY); 1418 return (EALREADY); 1419 } 1420 SCTP_INP_INCR_REF(inp); 1421 SCTP_ASOC_CREATE_LOCK(inp); 1422 creat_lock_on = 1; 1423 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 1424 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) { 1425 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EFAULT); 1426 error = EFAULT; 1427 goto out_now; 1428 } 1429 totaddrp = (int *)optval; 1430 totaddr = *totaddrp; 1431 sa = (struct sockaddr *)(totaddrp + 1); 1432 stcb = sctp_connectx_helper_find(inp, sa, &totaddr, &num_v4, &num_v6, &error, (optsize - sizeof(int)), &bad_addresses); 1433 if ((stcb != NULL) || bad_addresses) { 1434 /* Already have or am bring up an association */ 1435 SCTP_ASOC_CREATE_UNLOCK(inp); 1436 creat_lock_on = 0; 1437 if (stcb) 1438 SCTP_TCB_UNLOCK(stcb); 1439 if (bad_addresses == 0) { 1440 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY); 1441 error = EALREADY; 1442 } 1443 goto out_now; 1444 } 1445 #ifdef INET6 1446 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) && 1447 (num_v6 > 0)) { 1448 error = EINVAL; 1449 goto out_now; 1450 } 1451 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 1452 (num_v4 > 0)) { 1453 struct in6pcb *inp6; 1454 1455 inp6 = (struct in6pcb *)inp; 1456 if (SCTP_IPV6_V6ONLY(inp6)) { 1457 /* 1458 * if IPV6_V6ONLY flag, ignore connections destined 1459 * to a v4 addr or v4-mapped addr 1460 */ 1461 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 1462 error = EINVAL; 1463 goto out_now; 1464 } 1465 } 1466 #endif /* INET6 */ 1467 if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) == 1468 SCTP_PCB_FLAGS_UNBOUND) { 1469 /* Bind a ephemeral port */ 1470 error = sctp_inpcb_bind(so, NULL, NULL, p); 1471 if (error) { 1472 goto out_now; 1473 } 1474 } 1475 /* FIX ME: do we want to pass in a vrf on the connect call? */ 1476 vrf_id = inp->def_vrf_id; 1477 1478 1479 /* We are GOOD to go */ 1480 stcb = sctp_aloc_assoc(inp, sa, 1, &error, 0, vrf_id, 1481 (struct thread *)p 1482 ); 1483 if (stcb == NULL) { 1484 /* Gak! no memory */ 1485 goto out_now; 1486 } 1487 SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_COOKIE_WAIT); 1488 /* move to second address */ 1489 if (sa->sa_family == AF_INET) 1490 sa = (struct sockaddr *)((caddr_t)sa + sizeof(struct sockaddr_in)); 1491 else 1492 sa = (struct sockaddr *)((caddr_t)sa + sizeof(struct sockaddr_in6)); 1493 1494 error = 0; 1495 added = sctp_connectx_helper_add(stcb, sa, (totaddr - 1), &error); 1496 /* Fill in the return id */ 1497 if (error) { 1498 (void)sctp_free_assoc(inp, stcb, SCTP_PCBFREE_FORCE, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_12); 1499 goto out_now; 1500 } 1501 a_id = (sctp_assoc_t *) optval; 1502 *a_id = sctp_get_associd(stcb); 1503 1504 /* initialize authentication parameters for the assoc */ 1505 sctp_initialize_auth_params(inp, stcb); 1506 1507 if (delay) { 1508 /* doing delayed connection */ 1509 stcb->asoc.delayed_connection = 1; 1510 sctp_timer_start(SCTP_TIMER_TYPE_INIT, inp, stcb, stcb->asoc.primary_destination); 1511 } else { 1512 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered); 1513 sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED); 1514 } 1515 SCTP_TCB_UNLOCK(stcb); 1516 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) { 1517 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED; 1518 /* Set the connected flag so we can queue data */ 1519 soisconnecting(so); 1520 } 1521 out_now: 1522 if (creat_lock_on) { 1523 SCTP_ASOC_CREATE_UNLOCK(inp); 1524 } 1525 SCTP_INP_DECR_REF(inp); 1526 return error; 1527 } 1528 1529 #define SCTP_FIND_STCB(inp, stcb, assoc_id) { \ 1530 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||\ 1531 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { \ 1532 SCTP_INP_RLOCK(inp); \ 1533 stcb = LIST_FIRST(&inp->sctp_asoc_list); \ 1534 if (stcb) { \ 1535 SCTP_TCB_LOCK(stcb); \ 1536 } \ 1537 SCTP_INP_RUNLOCK(inp); \ 1538 } else if (assoc_id != 0) { \ 1539 stcb = sctp_findassociation_ep_asocid(inp, assoc_id, 1); \ 1540 if (stcb == NULL) { \ 1541 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); \ 1542 error = ENOENT; \ 1543 break; \ 1544 } \ 1545 } else { \ 1546 stcb = NULL; \ 1547 } \ 1548 } 1549 1550 1551 #define SCTP_CHECK_AND_CAST(destp, srcp, type, size) {\ 1552 if (size < sizeof(type)) { \ 1553 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); \ 1554 error = EINVAL; \ 1555 break; \ 1556 } else { \ 1557 destp = (type *)srcp; \ 1558 } \ 1559 } 1560 1561 static int 1562 sctp_getopt(struct socket *so, int optname, void *optval, size_t *optsize, 1563 void *p) 1564 { 1565 struct sctp_inpcb *inp = NULL; 1566 int error, val = 0; 1567 struct sctp_tcb *stcb = NULL; 1568 1569 if (optval == NULL) { 1570 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 1571 return (EINVAL); 1572 } 1573 inp = (struct sctp_inpcb *)so->so_pcb; 1574 if (inp == 0) { 1575 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 1576 return EINVAL; 1577 } 1578 error = 0; 1579 1580 switch (optname) { 1581 case SCTP_NODELAY: 1582 case SCTP_AUTOCLOSE: 1583 case SCTP_EXPLICIT_EOR: 1584 case SCTP_AUTO_ASCONF: 1585 case SCTP_DISABLE_FRAGMENTS: 1586 case SCTP_I_WANT_MAPPED_V4_ADDR: 1587 case SCTP_USE_EXT_RCVINFO: 1588 SCTP_INP_RLOCK(inp); 1589 switch (optname) { 1590 case SCTP_DISABLE_FRAGMENTS: 1591 val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NO_FRAGMENT); 1592 break; 1593 case SCTP_I_WANT_MAPPED_V4_ADDR: 1594 val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4); 1595 break; 1596 case SCTP_AUTO_ASCONF: 1597 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 1598 /* only valid for bound all sockets */ 1599 val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTO_ASCONF); 1600 } else { 1601 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 1602 error = EINVAL; 1603 goto flags_out; 1604 } 1605 break; 1606 case SCTP_EXPLICIT_EOR: 1607 val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR); 1608 break; 1609 case SCTP_NODELAY: 1610 val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NODELAY); 1611 break; 1612 case SCTP_USE_EXT_RCVINFO: 1613 val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO); 1614 break; 1615 case SCTP_AUTOCLOSE: 1616 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) 1617 val = TICKS_TO_SEC(inp->sctp_ep.auto_close_time); 1618 else 1619 val = 0; 1620 break; 1621 1622 default: 1623 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT); 1624 error = ENOPROTOOPT; 1625 } /* end switch (sopt->sopt_name) */ 1626 if (optname != SCTP_AUTOCLOSE) { 1627 /* make it an "on/off" value */ 1628 val = (val != 0); 1629 } 1630 if (*optsize < sizeof(val)) { 1631 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 1632 error = EINVAL; 1633 } 1634 flags_out: 1635 SCTP_INP_RUNLOCK(inp); 1636 if (error == 0) { 1637 /* return the option value */ 1638 *(int *)optval = val; 1639 *optsize = sizeof(val); 1640 } 1641 break; 1642 case SCTP_GET_PACKET_LOG: 1643 { 1644 #ifdef SCTP_PACKET_LOGGING 1645 uint8_t *target; 1646 int ret; 1647 1648 SCTP_CHECK_AND_CAST(target, optval, uint8_t, *optsize); 1649 ret = sctp_copy_out_packet_log(target, (int)*optsize); 1650 *optsize = ret; 1651 #else 1652 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP); 1653 error = EOPNOTSUPP; 1654 #endif 1655 break; 1656 } 1657 case SCTP_REUSE_PORT: 1658 { 1659 uint32_t *value; 1660 1661 if ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE)) { 1662 /* Can't do this for a 1-m socket */ 1663 error = EINVAL; 1664 break; 1665 } 1666 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize); 1667 *value = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PORTREUSE); 1668 *optsize = sizeof(uint32_t); 1669 } 1670 break; 1671 case SCTP_PARTIAL_DELIVERY_POINT: 1672 { 1673 uint32_t *value; 1674 1675 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize); 1676 *value = inp->partial_delivery_point; 1677 *optsize = sizeof(uint32_t); 1678 } 1679 break; 1680 case SCTP_FRAGMENT_INTERLEAVE: 1681 { 1682 uint32_t *value; 1683 1684 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize); 1685 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) { 1686 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) { 1687 *value = SCTP_FRAG_LEVEL_2; 1688 } else { 1689 *value = SCTP_FRAG_LEVEL_1; 1690 } 1691 } else { 1692 *value = SCTP_FRAG_LEVEL_0; 1693 } 1694 *optsize = sizeof(uint32_t); 1695 } 1696 break; 1697 case SCTP_CMT_ON_OFF: 1698 { 1699 struct sctp_assoc_value *av; 1700 1701 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize); 1702 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off)) { 1703 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 1704 if (stcb) { 1705 av->assoc_value = stcb->asoc.sctp_cmt_on_off; 1706 SCTP_TCB_UNLOCK(stcb); 1707 1708 } else { 1709 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN); 1710 error = ENOTCONN; 1711 } 1712 } else { 1713 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT); 1714 error = ENOPROTOOPT; 1715 } 1716 *optsize = sizeof(*av); 1717 } 1718 break; 1719 /* EY - set socket option for nr_sacks */ 1720 case SCTP_NR_SACK_ON_OFF: 1721 { 1722 struct sctp_assoc_value *av; 1723 1724 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize); 1725 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off)) { 1726 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 1727 if (stcb) { 1728 av->assoc_value = stcb->asoc.sctp_nr_sack_on_off; 1729 SCTP_TCB_UNLOCK(stcb); 1730 1731 } else { 1732 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN); 1733 error = ENOTCONN; 1734 } 1735 } else { 1736 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT); 1737 error = ENOPROTOOPT; 1738 } 1739 *optsize = sizeof(*av); 1740 } 1741 break; 1742 /* JRS - Get socket option for pluggable congestion control */ 1743 case SCTP_PLUGGABLE_CC: 1744 { 1745 struct sctp_assoc_value *av; 1746 1747 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize); 1748 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 1749 if (stcb) { 1750 av->assoc_value = stcb->asoc.congestion_control_module; 1751 SCTP_TCB_UNLOCK(stcb); 1752 } else { 1753 av->assoc_value = inp->sctp_ep.sctp_default_cc_module; 1754 } 1755 *optsize = sizeof(*av); 1756 } 1757 break; 1758 case SCTP_GET_ADDR_LEN: 1759 { 1760 struct sctp_assoc_value *av; 1761 1762 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize); 1763 error = EINVAL; 1764 #ifdef INET 1765 if (av->assoc_value == AF_INET) { 1766 av->assoc_value = sizeof(struct sockaddr_in); 1767 error = 0; 1768 } 1769 #endif 1770 #ifdef INET6 1771 if (av->assoc_value == AF_INET6) { 1772 av->assoc_value = sizeof(struct sockaddr_in6); 1773 error = 0; 1774 } 1775 #endif 1776 if (error) { 1777 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 1778 } 1779 *optsize = sizeof(*av); 1780 } 1781 break; 1782 case SCTP_GET_ASSOC_NUMBER: 1783 { 1784 uint32_t *value, cnt; 1785 1786 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize); 1787 cnt = 0; 1788 SCTP_INP_RLOCK(inp); 1789 LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) { 1790 cnt++; 1791 } 1792 SCTP_INP_RUNLOCK(inp); 1793 *value = cnt; 1794 *optsize = sizeof(uint32_t); 1795 } 1796 break; 1797 1798 case SCTP_GET_ASSOC_ID_LIST: 1799 { 1800 struct sctp_assoc_ids *ids; 1801 unsigned int at, limit; 1802 1803 SCTP_CHECK_AND_CAST(ids, optval, struct sctp_assoc_ids, *optsize); 1804 at = 0; 1805 limit = (*optsize - sizeof(uint32_t)) / sizeof(sctp_assoc_t); 1806 SCTP_INP_RLOCK(inp); 1807 LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) { 1808 if (at < limit) { 1809 ids->gaids_assoc_id[at++] = sctp_get_associd(stcb); 1810 } else { 1811 error = EINVAL; 1812 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 1813 break; 1814 } 1815 } 1816 SCTP_INP_RUNLOCK(inp); 1817 ids->gaids_number_of_ids = at; 1818 *optsize = ((at * sizeof(sctp_assoc_t)) + sizeof(uint32_t)); 1819 } 1820 break; 1821 case SCTP_CONTEXT: 1822 { 1823 struct sctp_assoc_value *av; 1824 1825 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize); 1826 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 1827 1828 if (stcb) { 1829 av->assoc_value = stcb->asoc.context; 1830 SCTP_TCB_UNLOCK(stcb); 1831 } else { 1832 SCTP_INP_RLOCK(inp); 1833 av->assoc_value = inp->sctp_context; 1834 SCTP_INP_RUNLOCK(inp); 1835 } 1836 *optsize = sizeof(*av); 1837 } 1838 break; 1839 case SCTP_VRF_ID: 1840 { 1841 uint32_t *default_vrfid; 1842 1843 SCTP_CHECK_AND_CAST(default_vrfid, optval, uint32_t, *optsize); 1844 *default_vrfid = inp->def_vrf_id; 1845 break; 1846 } 1847 case SCTP_GET_ASOC_VRF: 1848 { 1849 struct sctp_assoc_value *id; 1850 1851 SCTP_CHECK_AND_CAST(id, optval, struct sctp_assoc_value, *optsize); 1852 SCTP_FIND_STCB(inp, stcb, id->assoc_id); 1853 if (stcb == NULL) { 1854 error = EINVAL; 1855 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 1856 break; 1857 } 1858 id->assoc_value = stcb->asoc.vrf_id; 1859 break; 1860 } 1861 case SCTP_GET_VRF_IDS: 1862 { 1863 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP); 1864 error = EOPNOTSUPP; 1865 break; 1866 } 1867 case SCTP_GET_NONCE_VALUES: 1868 { 1869 struct sctp_get_nonce_values *gnv; 1870 1871 SCTP_CHECK_AND_CAST(gnv, optval, struct sctp_get_nonce_values, *optsize); 1872 SCTP_FIND_STCB(inp, stcb, gnv->gn_assoc_id); 1873 1874 if (stcb) { 1875 gnv->gn_peers_tag = stcb->asoc.peer_vtag; 1876 gnv->gn_local_tag = stcb->asoc.my_vtag; 1877 SCTP_TCB_UNLOCK(stcb); 1878 } else { 1879 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN); 1880 error = ENOTCONN; 1881 } 1882 *optsize = sizeof(*gnv); 1883 } 1884 break; 1885 case SCTP_DELAYED_SACK: 1886 { 1887 struct sctp_sack_info *sack; 1888 1889 SCTP_CHECK_AND_CAST(sack, optval, struct sctp_sack_info, *optsize); 1890 SCTP_FIND_STCB(inp, stcb, sack->sack_assoc_id); 1891 if (stcb) { 1892 sack->sack_delay = stcb->asoc.delayed_ack; 1893 sack->sack_freq = stcb->asoc.sack_freq; 1894 SCTP_TCB_UNLOCK(stcb); 1895 } else { 1896 SCTP_INP_RLOCK(inp); 1897 sack->sack_delay = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]); 1898 sack->sack_freq = inp->sctp_ep.sctp_sack_freq; 1899 SCTP_INP_RUNLOCK(inp); 1900 } 1901 *optsize = sizeof(*sack); 1902 } 1903 break; 1904 1905 case SCTP_GET_SNDBUF_USE: 1906 { 1907 struct sctp_sockstat *ss; 1908 1909 SCTP_CHECK_AND_CAST(ss, optval, struct sctp_sockstat, *optsize); 1910 SCTP_FIND_STCB(inp, stcb, ss->ss_assoc_id); 1911 1912 if (stcb) { 1913 ss->ss_total_sndbuf = stcb->asoc.total_output_queue_size; 1914 ss->ss_total_recv_buf = (stcb->asoc.size_on_reasm_queue + 1915 stcb->asoc.size_on_all_streams); 1916 SCTP_TCB_UNLOCK(stcb); 1917 } else { 1918 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN); 1919 error = ENOTCONN; 1920 } 1921 *optsize = sizeof(struct sctp_sockstat); 1922 } 1923 break; 1924 case SCTP_MAX_BURST: 1925 { 1926 uint8_t *value; 1927 1928 SCTP_CHECK_AND_CAST(value, optval, uint8_t, *optsize); 1929 1930 SCTP_INP_RLOCK(inp); 1931 *value = inp->sctp_ep.max_burst; 1932 SCTP_INP_RUNLOCK(inp); 1933 *optsize = sizeof(uint8_t); 1934 } 1935 break; 1936 case SCTP_MAXSEG: 1937 { 1938 struct sctp_assoc_value *av; 1939 int ovh; 1940 1941 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize); 1942 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 1943 1944 if (stcb) { 1945 av->assoc_value = sctp_get_frag_point(stcb, &stcb->asoc); 1946 SCTP_TCB_UNLOCK(stcb); 1947 } else { 1948 SCTP_INP_RLOCK(inp); 1949 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 1950 ovh = SCTP_MED_OVERHEAD; 1951 } else { 1952 ovh = SCTP_MED_V4_OVERHEAD; 1953 } 1954 if (inp->sctp_frag_point >= SCTP_DEFAULT_MAXSEGMENT) 1955 av->assoc_value = 0; 1956 else 1957 av->assoc_value = inp->sctp_frag_point - ovh; 1958 SCTP_INP_RUNLOCK(inp); 1959 } 1960 *optsize = sizeof(struct sctp_assoc_value); 1961 } 1962 break; 1963 case SCTP_GET_STAT_LOG: 1964 error = sctp_fill_stat_log(optval, optsize); 1965 break; 1966 case SCTP_EVENTS: 1967 { 1968 struct sctp_event_subscribe *events; 1969 1970 SCTP_CHECK_AND_CAST(events, optval, struct sctp_event_subscribe, *optsize); 1971 memset(events, 0, sizeof(*events)); 1972 SCTP_INP_RLOCK(inp); 1973 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) 1974 events->sctp_data_io_event = 1; 1975 1976 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVASSOCEVNT)) 1977 events->sctp_association_event = 1; 1978 1979 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVPADDREVNT)) 1980 events->sctp_address_event = 1; 1981 1982 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)) 1983 events->sctp_send_failure_event = 1; 1984 1985 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVPEERERR)) 1986 events->sctp_peer_error_event = 1; 1987 1988 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) 1989 events->sctp_shutdown_event = 1; 1990 1991 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PDAPIEVNT)) 1992 events->sctp_partial_delivery_event = 1; 1993 1994 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) 1995 events->sctp_adaptation_layer_event = 1; 1996 1997 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTHEVNT)) 1998 events->sctp_authentication_event = 1; 1999 2000 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_DRYEVNT)) 2001 events->sctp_sender_dry_event = 1; 2002 2003 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_STREAM_RESETEVNT)) 2004 events->sctp_stream_reset_events = 1; 2005 SCTP_INP_RUNLOCK(inp); 2006 *optsize = sizeof(struct sctp_event_subscribe); 2007 } 2008 break; 2009 2010 case SCTP_ADAPTATION_LAYER: 2011 { 2012 uint32_t *value; 2013 2014 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize); 2015 2016 SCTP_INP_RLOCK(inp); 2017 *value = inp->sctp_ep.adaptation_layer_indicator; 2018 SCTP_INP_RUNLOCK(inp); 2019 *optsize = sizeof(uint32_t); 2020 } 2021 break; 2022 case SCTP_SET_INITIAL_DBG_SEQ: 2023 { 2024 uint32_t *value; 2025 2026 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize); 2027 SCTP_INP_RLOCK(inp); 2028 *value = inp->sctp_ep.initial_sequence_debug; 2029 SCTP_INP_RUNLOCK(inp); 2030 *optsize = sizeof(uint32_t); 2031 } 2032 break; 2033 case SCTP_GET_LOCAL_ADDR_SIZE: 2034 { 2035 uint32_t *value; 2036 2037 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize); 2038 SCTP_INP_RLOCK(inp); 2039 *value = sctp_count_max_addresses(inp); 2040 SCTP_INP_RUNLOCK(inp); 2041 *optsize = sizeof(uint32_t); 2042 } 2043 break; 2044 case SCTP_GET_REMOTE_ADDR_SIZE: 2045 { 2046 uint32_t *value; 2047 size_t size; 2048 struct sctp_nets *net; 2049 2050 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize); 2051 /* FIXME MT: change to sctp_assoc_value? */ 2052 SCTP_FIND_STCB(inp, stcb, (sctp_assoc_t) * value); 2053 2054 if (stcb) { 2055 size = 0; 2056 /* Count the sizes */ 2057 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 2058 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) || 2059 (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET6)) { 2060 size += sizeof(struct sockaddr_in6); 2061 } else if (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET) { 2062 size += sizeof(struct sockaddr_in); 2063 } else { 2064 /* huh */ 2065 break; 2066 } 2067 } 2068 SCTP_TCB_UNLOCK(stcb); 2069 *value = (uint32_t) size; 2070 } else { 2071 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN); 2072 error = ENOTCONN; 2073 } 2074 *optsize = sizeof(uint32_t); 2075 } 2076 break; 2077 case SCTP_GET_PEER_ADDRESSES: 2078 /* 2079 * Get the address information, an array is passed in to 2080 * fill up we pack it. 2081 */ 2082 { 2083 size_t cpsz, left; 2084 struct sockaddr_storage *sas; 2085 struct sctp_nets *net; 2086 struct sctp_getaddresses *saddr; 2087 2088 SCTP_CHECK_AND_CAST(saddr, optval, struct sctp_getaddresses, *optsize); 2089 SCTP_FIND_STCB(inp, stcb, saddr->sget_assoc_id); 2090 2091 if (stcb) { 2092 left = (*optsize) - sizeof(struct sctp_getaddresses); 2093 *optsize = sizeof(struct sctp_getaddresses); 2094 sas = (struct sockaddr_storage *)&saddr->addr[0]; 2095 2096 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 2097 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) || 2098 (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET6)) { 2099 cpsz = sizeof(struct sockaddr_in6); 2100 } else if (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET) { 2101 cpsz = sizeof(struct sockaddr_in); 2102 } else { 2103 /* huh */ 2104 break; 2105 } 2106 if (left < cpsz) { 2107 /* not enough room. */ 2108 break; 2109 } 2110 #ifdef INET6 2111 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) && 2112 (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET)) { 2113 /* Must map the address */ 2114 in6_sin_2_v4mapsin6((struct sockaddr_in *)&net->ro._l_addr, 2115 (struct sockaddr_in6 *)sas); 2116 } else { 2117 #endif 2118 memcpy(sas, &net->ro._l_addr, cpsz); 2119 #ifdef INET6 2120 } 2121 #endif 2122 ((struct sockaddr_in *)sas)->sin_port = stcb->rport; 2123 2124 sas = (struct sockaddr_storage *)((caddr_t)sas + cpsz); 2125 left -= cpsz; 2126 *optsize += cpsz; 2127 } 2128 SCTP_TCB_UNLOCK(stcb); 2129 } else { 2130 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); 2131 error = ENOENT; 2132 } 2133 } 2134 break; 2135 case SCTP_GET_LOCAL_ADDRESSES: 2136 { 2137 size_t limit, actual; 2138 struct sockaddr_storage *sas; 2139 struct sctp_getaddresses *saddr; 2140 2141 SCTP_CHECK_AND_CAST(saddr, optval, struct sctp_getaddresses, *optsize); 2142 SCTP_FIND_STCB(inp, stcb, saddr->sget_assoc_id); 2143 2144 sas = (struct sockaddr_storage *)&saddr->addr[0]; 2145 limit = *optsize - sizeof(sctp_assoc_t); 2146 actual = sctp_fill_up_addresses(inp, stcb, limit, sas); 2147 if (stcb) { 2148 SCTP_TCB_UNLOCK(stcb); 2149 } 2150 *optsize = sizeof(struct sockaddr_storage) + actual; 2151 } 2152 break; 2153 case SCTP_PEER_ADDR_PARAMS: 2154 { 2155 struct sctp_paddrparams *paddrp; 2156 struct sctp_nets *net; 2157 2158 SCTP_CHECK_AND_CAST(paddrp, optval, struct sctp_paddrparams, *optsize); 2159 SCTP_FIND_STCB(inp, stcb, paddrp->spp_assoc_id); 2160 2161 net = NULL; 2162 if (stcb) { 2163 net = sctp_findnet(stcb, (struct sockaddr *)&paddrp->spp_address); 2164 } else { 2165 /* 2166 * We increment here since 2167 * sctp_findassociation_ep_addr() wil do a 2168 * decrement if it finds the stcb as long as 2169 * the locked tcb (last argument) is NOT a 2170 * TCB.. aka NULL. 2171 */ 2172 SCTP_INP_INCR_REF(inp); 2173 stcb = sctp_findassociation_ep_addr(&inp, (struct sockaddr *)&paddrp->spp_address, &net, NULL, NULL); 2174 if (stcb == NULL) { 2175 SCTP_INP_DECR_REF(inp); 2176 } 2177 } 2178 if (stcb && (net == NULL)) { 2179 struct sockaddr *sa; 2180 2181 sa = (struct sockaddr *)&paddrp->spp_address; 2182 if (sa->sa_family == AF_INET) { 2183 struct sockaddr_in *sin; 2184 2185 sin = (struct sockaddr_in *)sa; 2186 if (sin->sin_addr.s_addr) { 2187 error = EINVAL; 2188 SCTP_TCB_UNLOCK(stcb); 2189 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 2190 break; 2191 } 2192 } else if (sa->sa_family == AF_INET6) { 2193 struct sockaddr_in6 *sin6; 2194 2195 sin6 = (struct sockaddr_in6 *)sa; 2196 if (!IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { 2197 error = EINVAL; 2198 SCTP_TCB_UNLOCK(stcb); 2199 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 2200 break; 2201 } 2202 } else { 2203 error = EAFNOSUPPORT; 2204 SCTP_TCB_UNLOCK(stcb); 2205 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 2206 break; 2207 } 2208 } 2209 if (stcb) { 2210 /* Applys to the specific association */ 2211 paddrp->spp_flags = 0; 2212 if (net) { 2213 int ovh; 2214 2215 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 2216 ovh = SCTP_MED_OVERHEAD; 2217 } else { 2218 ovh = SCTP_MED_V4_OVERHEAD; 2219 } 2220 2221 2222 paddrp->spp_pathmaxrxt = net->failure_threshold; 2223 paddrp->spp_pathmtu = net->mtu - ovh; 2224 /* get flags for HB */ 2225 if (net->dest_state & SCTP_ADDR_NOHB) 2226 paddrp->spp_flags |= SPP_HB_DISABLE; 2227 else 2228 paddrp->spp_flags |= SPP_HB_ENABLE; 2229 /* get flags for PMTU */ 2230 if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) { 2231 paddrp->spp_flags |= SPP_PMTUD_ENABLE; 2232 } else { 2233 paddrp->spp_flags |= SPP_PMTUD_DISABLE; 2234 } 2235 #ifdef INET 2236 if (net->ro._l_addr.sin.sin_family == AF_INET) { 2237 paddrp->spp_ipv4_tos = net->tos_flowlabel & 0x000000fc; 2238 paddrp->spp_flags |= SPP_IPV4_TOS; 2239 } 2240 #endif 2241 #ifdef INET6 2242 if (net->ro._l_addr.sin6.sin6_family == AF_INET6) { 2243 paddrp->spp_ipv6_flowlabel = net->tos_flowlabel; 2244 paddrp->spp_flags |= SPP_IPV6_FLOWLABEL; 2245 } 2246 #endif 2247 } else { 2248 /* 2249 * No destination so return default 2250 * value 2251 */ 2252 int cnt = 0; 2253 2254 paddrp->spp_pathmaxrxt = stcb->asoc.def_net_failure; 2255 paddrp->spp_pathmtu = sctp_get_frag_point(stcb, &stcb->asoc); 2256 #ifdef INET 2257 paddrp->spp_ipv4_tos = stcb->asoc.default_tos & 0x000000fc; 2258 paddrp->spp_flags |= SPP_IPV4_TOS; 2259 #endif 2260 #ifdef INET6 2261 paddrp->spp_ipv6_flowlabel = stcb->asoc.default_flowlabel; 2262 paddrp->spp_flags |= SPP_IPV6_FLOWLABEL; 2263 #endif 2264 /* default settings should be these */ 2265 if (stcb->asoc.hb_is_disabled == 0) { 2266 paddrp->spp_flags |= SPP_HB_ENABLE; 2267 } else { 2268 paddrp->spp_flags |= SPP_HB_DISABLE; 2269 } 2270 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 2271 if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) { 2272 cnt++; 2273 } 2274 } 2275 if (cnt) { 2276 paddrp->spp_flags |= SPP_PMTUD_ENABLE; 2277 } 2278 } 2279 paddrp->spp_hbinterval = stcb->asoc.heart_beat_delay; 2280 paddrp->spp_assoc_id = sctp_get_associd(stcb); 2281 SCTP_TCB_UNLOCK(stcb); 2282 } else { 2283 /* Use endpoint defaults */ 2284 SCTP_INP_RLOCK(inp); 2285 paddrp->spp_pathmaxrxt = inp->sctp_ep.def_net_failure; 2286 paddrp->spp_hbinterval = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]); 2287 paddrp->spp_assoc_id = (sctp_assoc_t) 0; 2288 /* get inp's default */ 2289 #ifdef INET 2290 paddrp->spp_ipv4_tos = inp->ip_inp.inp.inp_ip_tos; 2291 paddrp->spp_flags |= SPP_IPV4_TOS; 2292 #endif 2293 #ifdef INET6 2294 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 2295 paddrp->spp_ipv6_flowlabel = ((struct in6pcb *)inp)->in6p_flowinfo; 2296 paddrp->spp_flags |= SPP_IPV6_FLOWLABEL; 2297 } 2298 #endif 2299 /* can't return this */ 2300 paddrp->spp_pathmtu = 0; 2301 2302 /* default behavior, no stcb */ 2303 paddrp->spp_flags = SPP_PMTUD_ENABLE; 2304 2305 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_DONOT_HEARTBEAT)) { 2306 paddrp->spp_flags |= SPP_HB_ENABLE; 2307 } else { 2308 paddrp->spp_flags |= SPP_HB_DISABLE; 2309 } 2310 SCTP_INP_RUNLOCK(inp); 2311 } 2312 *optsize = sizeof(struct sctp_paddrparams); 2313 } 2314 break; 2315 case SCTP_GET_PEER_ADDR_INFO: 2316 { 2317 struct sctp_paddrinfo *paddri; 2318 struct sctp_nets *net; 2319 2320 SCTP_CHECK_AND_CAST(paddri, optval, struct sctp_paddrinfo, *optsize); 2321 SCTP_FIND_STCB(inp, stcb, paddri->spinfo_assoc_id); 2322 2323 net = NULL; 2324 if (stcb) { 2325 net = sctp_findnet(stcb, (struct sockaddr *)&paddri->spinfo_address); 2326 } else { 2327 /* 2328 * We increment here since 2329 * sctp_findassociation_ep_addr() wil do a 2330 * decrement if it finds the stcb as long as 2331 * the locked tcb (last argument) is NOT a 2332 * TCB.. aka NULL. 2333 */ 2334 SCTP_INP_INCR_REF(inp); 2335 stcb = sctp_findassociation_ep_addr(&inp, (struct sockaddr *)&paddri->spinfo_address, &net, NULL, NULL); 2336 if (stcb == NULL) { 2337 SCTP_INP_DECR_REF(inp); 2338 } 2339 } 2340 2341 if ((stcb) && (net)) { 2342 paddri->spinfo_state = net->dest_state & (SCTP_REACHABLE_MASK | SCTP_ADDR_NOHB); 2343 paddri->spinfo_cwnd = net->cwnd; 2344 paddri->spinfo_srtt = ((net->lastsa >> 2) + net->lastsv) >> 1; 2345 paddri->spinfo_rto = net->RTO; 2346 paddri->spinfo_assoc_id = sctp_get_associd(stcb); 2347 SCTP_TCB_UNLOCK(stcb); 2348 } else { 2349 if (stcb) { 2350 SCTP_TCB_UNLOCK(stcb); 2351 } 2352 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); 2353 error = ENOENT; 2354 } 2355 *optsize = sizeof(struct sctp_paddrinfo); 2356 } 2357 break; 2358 case SCTP_PCB_STATUS: 2359 { 2360 struct sctp_pcbinfo *spcb; 2361 2362 SCTP_CHECK_AND_CAST(spcb, optval, struct sctp_pcbinfo, *optsize); 2363 sctp_fill_pcbinfo(spcb); 2364 *optsize = sizeof(struct sctp_pcbinfo); 2365 } 2366 break; 2367 2368 case SCTP_STATUS: 2369 { 2370 struct sctp_nets *net; 2371 struct sctp_status *sstat; 2372 2373 SCTP_CHECK_AND_CAST(sstat, optval, struct sctp_status, *optsize); 2374 SCTP_FIND_STCB(inp, stcb, sstat->sstat_assoc_id); 2375 2376 if (stcb == NULL) { 2377 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 2378 error = EINVAL; 2379 break; 2380 } 2381 /* 2382 * I think passing the state is fine since 2383 * sctp_constants.h will be available to the user 2384 * land. 2385 */ 2386 sstat->sstat_state = stcb->asoc.state; 2387 sstat->sstat_assoc_id = sctp_get_associd(stcb); 2388 sstat->sstat_rwnd = stcb->asoc.peers_rwnd; 2389 sstat->sstat_unackdata = stcb->asoc.sent_queue_cnt; 2390 /* 2391 * We can't include chunks that have been passed to 2392 * the socket layer. Only things in queue. 2393 */ 2394 sstat->sstat_penddata = (stcb->asoc.cnt_on_reasm_queue + 2395 stcb->asoc.cnt_on_all_streams); 2396 2397 2398 sstat->sstat_instrms = stcb->asoc.streamincnt; 2399 sstat->sstat_outstrms = stcb->asoc.streamoutcnt; 2400 sstat->sstat_fragmentation_point = sctp_get_frag_point(stcb, &stcb->asoc); 2401 memcpy(&sstat->sstat_primary.spinfo_address, 2402 &stcb->asoc.primary_destination->ro._l_addr, 2403 ((struct sockaddr *)(&stcb->asoc.primary_destination->ro._l_addr))->sa_len); 2404 net = stcb->asoc.primary_destination; 2405 ((struct sockaddr_in *)&sstat->sstat_primary.spinfo_address)->sin_port = stcb->rport; 2406 /* 2407 * Again the user can get info from sctp_constants.h 2408 * for what the state of the network is. 2409 */ 2410 sstat->sstat_primary.spinfo_state = net->dest_state & SCTP_REACHABLE_MASK; 2411 sstat->sstat_primary.spinfo_cwnd = net->cwnd; 2412 sstat->sstat_primary.spinfo_srtt = net->lastsa; 2413 sstat->sstat_primary.spinfo_rto = net->RTO; 2414 sstat->sstat_primary.spinfo_mtu = net->mtu; 2415 sstat->sstat_primary.spinfo_assoc_id = sctp_get_associd(stcb); 2416 SCTP_TCB_UNLOCK(stcb); 2417 *optsize = sizeof(*sstat); 2418 } 2419 break; 2420 case SCTP_RTOINFO: 2421 { 2422 struct sctp_rtoinfo *srto; 2423 2424 SCTP_CHECK_AND_CAST(srto, optval, struct sctp_rtoinfo, *optsize); 2425 SCTP_FIND_STCB(inp, stcb, srto->srto_assoc_id); 2426 2427 if (stcb) { 2428 srto->srto_initial = stcb->asoc.initial_rto; 2429 srto->srto_max = stcb->asoc.maxrto; 2430 srto->srto_min = stcb->asoc.minrto; 2431 SCTP_TCB_UNLOCK(stcb); 2432 } else { 2433 SCTP_INP_RLOCK(inp); 2434 srto->srto_initial = inp->sctp_ep.initial_rto; 2435 srto->srto_max = inp->sctp_ep.sctp_maxrto; 2436 srto->srto_min = inp->sctp_ep.sctp_minrto; 2437 SCTP_INP_RUNLOCK(inp); 2438 } 2439 *optsize = sizeof(*srto); 2440 } 2441 break; 2442 case SCTP_ASSOCINFO: 2443 { 2444 struct sctp_assocparams *sasoc; 2445 uint32_t oldval; 2446 2447 SCTP_CHECK_AND_CAST(sasoc, optval, struct sctp_assocparams, *optsize); 2448 SCTP_FIND_STCB(inp, stcb, sasoc->sasoc_assoc_id); 2449 2450 if (stcb) { 2451 oldval = sasoc->sasoc_cookie_life; 2452 sasoc->sasoc_cookie_life = TICKS_TO_MSEC(stcb->asoc.cookie_life); 2453 sasoc->sasoc_asocmaxrxt = stcb->asoc.max_send_times; 2454 sasoc->sasoc_number_peer_destinations = stcb->asoc.numnets; 2455 sasoc->sasoc_peer_rwnd = stcb->asoc.peers_rwnd; 2456 sasoc->sasoc_local_rwnd = stcb->asoc.my_rwnd; 2457 SCTP_TCB_UNLOCK(stcb); 2458 } else { 2459 SCTP_INP_RLOCK(inp); 2460 sasoc->sasoc_cookie_life = TICKS_TO_MSEC(inp->sctp_ep.def_cookie_life); 2461 sasoc->sasoc_asocmaxrxt = inp->sctp_ep.max_send_times; 2462 sasoc->sasoc_number_peer_destinations = 0; 2463 sasoc->sasoc_peer_rwnd = 0; 2464 sasoc->sasoc_local_rwnd = sbspace(&inp->sctp_socket->so_rcv); 2465 SCTP_INP_RUNLOCK(inp); 2466 } 2467 *optsize = sizeof(*sasoc); 2468 } 2469 break; 2470 case SCTP_DEFAULT_SEND_PARAM: 2471 { 2472 struct sctp_sndrcvinfo *s_info; 2473 2474 SCTP_CHECK_AND_CAST(s_info, optval, struct sctp_sndrcvinfo, *optsize); 2475 SCTP_FIND_STCB(inp, stcb, s_info->sinfo_assoc_id); 2476 2477 if (stcb) { 2478 memcpy(s_info, &stcb->asoc.def_send, sizeof(stcb->asoc.def_send)); 2479 SCTP_TCB_UNLOCK(stcb); 2480 } else { 2481 SCTP_INP_RLOCK(inp); 2482 memcpy(s_info, &inp->def_send, sizeof(inp->def_send)); 2483 SCTP_INP_RUNLOCK(inp); 2484 } 2485 *optsize = sizeof(*s_info); 2486 } 2487 break; 2488 case SCTP_INITMSG: 2489 { 2490 struct sctp_initmsg *sinit; 2491 2492 SCTP_CHECK_AND_CAST(sinit, optval, struct sctp_initmsg, *optsize); 2493 SCTP_INP_RLOCK(inp); 2494 sinit->sinit_num_ostreams = inp->sctp_ep.pre_open_stream_count; 2495 sinit->sinit_max_instreams = inp->sctp_ep.max_open_streams_intome; 2496 sinit->sinit_max_attempts = inp->sctp_ep.max_init_times; 2497 sinit->sinit_max_init_timeo = inp->sctp_ep.initial_init_rto_max; 2498 SCTP_INP_RUNLOCK(inp); 2499 *optsize = sizeof(*sinit); 2500 } 2501 break; 2502 case SCTP_PRIMARY_ADDR: 2503 /* we allow a "get" operation on this */ 2504 { 2505 struct sctp_setprim *ssp; 2506 2507 SCTP_CHECK_AND_CAST(ssp, optval, struct sctp_setprim, *optsize); 2508 SCTP_FIND_STCB(inp, stcb, ssp->ssp_assoc_id); 2509 2510 if (stcb) { 2511 /* simply copy out the sockaddr_storage... */ 2512 int len; 2513 2514 len = *optsize; 2515 if (len > stcb->asoc.primary_destination->ro._l_addr.sa.sa_len) 2516 len = stcb->asoc.primary_destination->ro._l_addr.sa.sa_len; 2517 2518 memcpy(&ssp->ssp_addr, 2519 &stcb->asoc.primary_destination->ro._l_addr, 2520 len); 2521 SCTP_TCB_UNLOCK(stcb); 2522 } else { 2523 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 2524 error = EINVAL; 2525 } 2526 *optsize = sizeof(*ssp); 2527 } 2528 break; 2529 2530 case SCTP_HMAC_IDENT: 2531 { 2532 struct sctp_hmacalgo *shmac; 2533 sctp_hmaclist_t *hmaclist; 2534 uint32_t size; 2535 int i; 2536 2537 SCTP_CHECK_AND_CAST(shmac, optval, struct sctp_hmacalgo, *optsize); 2538 2539 SCTP_INP_RLOCK(inp); 2540 hmaclist = inp->sctp_ep.local_hmacs; 2541 if (hmaclist == NULL) { 2542 /* no HMACs to return */ 2543 *optsize = sizeof(*shmac); 2544 SCTP_INP_RUNLOCK(inp); 2545 break; 2546 } 2547 /* is there room for all of the hmac ids? */ 2548 size = sizeof(*shmac) + (hmaclist->num_algo * 2549 sizeof(shmac->shmac_idents[0])); 2550 if ((size_t)(*optsize) < size) { 2551 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 2552 error = EINVAL; 2553 SCTP_INP_RUNLOCK(inp); 2554 break; 2555 } 2556 /* copy in the list */ 2557 shmac->shmac_number_of_idents = hmaclist->num_algo; 2558 for (i = 0; i < hmaclist->num_algo; i++) { 2559 shmac->shmac_idents[i] = hmaclist->hmac[i]; 2560 } 2561 SCTP_INP_RUNLOCK(inp); 2562 *optsize = size; 2563 break; 2564 } 2565 case SCTP_AUTH_ACTIVE_KEY: 2566 { 2567 struct sctp_authkeyid *scact; 2568 2569 SCTP_CHECK_AND_CAST(scact, optval, struct sctp_authkeyid, *optsize); 2570 SCTP_FIND_STCB(inp, stcb, scact->scact_assoc_id); 2571 2572 if (stcb) { 2573 /* get the active key on the assoc */ 2574 scact->scact_keynumber = stcb->asoc.authinfo.active_keyid; 2575 SCTP_TCB_UNLOCK(stcb); 2576 } else { 2577 /* get the endpoint active key */ 2578 SCTP_INP_RLOCK(inp); 2579 scact->scact_keynumber = inp->sctp_ep.default_keyid; 2580 SCTP_INP_RUNLOCK(inp); 2581 } 2582 *optsize = sizeof(*scact); 2583 break; 2584 } 2585 case SCTP_LOCAL_AUTH_CHUNKS: 2586 { 2587 struct sctp_authchunks *sac; 2588 sctp_auth_chklist_t *chklist = NULL; 2589 size_t size = 0; 2590 2591 SCTP_CHECK_AND_CAST(sac, optval, struct sctp_authchunks, *optsize); 2592 SCTP_FIND_STCB(inp, stcb, sac->gauth_assoc_id); 2593 2594 if (stcb) { 2595 /* get off the assoc */ 2596 chklist = stcb->asoc.local_auth_chunks; 2597 /* is there enough space? */ 2598 size = sctp_auth_get_chklist_size(chklist); 2599 if (*optsize < (sizeof(struct sctp_authchunks) + size)) { 2600 error = EINVAL; 2601 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 2602 } else { 2603 /* copy in the chunks */ 2604 (void)sctp_serialize_auth_chunks(chklist, sac->gauth_chunks); 2605 } 2606 SCTP_TCB_UNLOCK(stcb); 2607 } else { 2608 /* get off the endpoint */ 2609 SCTP_INP_RLOCK(inp); 2610 chklist = inp->sctp_ep.local_auth_chunks; 2611 /* is there enough space? */ 2612 size = sctp_auth_get_chklist_size(chklist); 2613 if (*optsize < (sizeof(struct sctp_authchunks) + size)) { 2614 error = EINVAL; 2615 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 2616 } else { 2617 /* copy in the chunks */ 2618 (void)sctp_serialize_auth_chunks(chklist, sac->gauth_chunks); 2619 } 2620 SCTP_INP_RUNLOCK(inp); 2621 } 2622 *optsize = sizeof(struct sctp_authchunks) + size; 2623 break; 2624 } 2625 case SCTP_PEER_AUTH_CHUNKS: 2626 { 2627 struct sctp_authchunks *sac; 2628 sctp_auth_chklist_t *chklist = NULL; 2629 size_t size = 0; 2630 2631 SCTP_CHECK_AND_CAST(sac, optval, struct sctp_authchunks, *optsize); 2632 SCTP_FIND_STCB(inp, stcb, sac->gauth_assoc_id); 2633 2634 if (stcb) { 2635 /* get off the assoc */ 2636 chklist = stcb->asoc.peer_auth_chunks; 2637 /* is there enough space? */ 2638 size = sctp_auth_get_chklist_size(chklist); 2639 if (*optsize < (sizeof(struct sctp_authchunks) + size)) { 2640 error = EINVAL; 2641 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 2642 } else { 2643 /* copy in the chunks */ 2644 (void)sctp_serialize_auth_chunks(chklist, sac->gauth_chunks); 2645 } 2646 SCTP_TCB_UNLOCK(stcb); 2647 } else { 2648 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); 2649 error = ENOENT; 2650 } 2651 *optsize = sizeof(struct sctp_authchunks) + size; 2652 break; 2653 } 2654 2655 2656 default: 2657 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT); 2658 error = ENOPROTOOPT; 2659 *optsize = 0; 2660 break; 2661 } /* end switch (sopt->sopt_name) */ 2662 return (error); 2663 } 2664 2665 static int 2666 sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize, 2667 void *p) 2668 { 2669 int error, set_opt; 2670 uint32_t *mopt; 2671 struct sctp_tcb *stcb = NULL; 2672 struct sctp_inpcb *inp = NULL; 2673 uint32_t vrf_id; 2674 2675 if (optval == NULL) { 2676 SCTP_PRINTF("optval is NULL\n"); 2677 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 2678 return (EINVAL); 2679 } 2680 inp = (struct sctp_inpcb *)so->so_pcb; 2681 if (inp == 0) { 2682 SCTP_PRINTF("inp is NULL?\n"); 2683 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 2684 return EINVAL; 2685 } 2686 vrf_id = inp->def_vrf_id; 2687 2688 error = 0; 2689 switch (optname) { 2690 case SCTP_NODELAY: 2691 case SCTP_AUTOCLOSE: 2692 case SCTP_AUTO_ASCONF: 2693 case SCTP_EXPLICIT_EOR: 2694 case SCTP_DISABLE_FRAGMENTS: 2695 case SCTP_USE_EXT_RCVINFO: 2696 case SCTP_I_WANT_MAPPED_V4_ADDR: 2697 /* copy in the option value */ 2698 SCTP_CHECK_AND_CAST(mopt, optval, uint32_t, optsize); 2699 set_opt = 0; 2700 if (error) 2701 break; 2702 switch (optname) { 2703 case SCTP_DISABLE_FRAGMENTS: 2704 set_opt = SCTP_PCB_FLAGS_NO_FRAGMENT; 2705 break; 2706 case SCTP_AUTO_ASCONF: 2707 /* 2708 * NOTE: we don't really support this flag 2709 */ 2710 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 2711 /* only valid for bound all sockets */ 2712 set_opt = SCTP_PCB_FLAGS_AUTO_ASCONF; 2713 } else { 2714 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 2715 return (EINVAL); 2716 } 2717 break; 2718 case SCTP_EXPLICIT_EOR: 2719 set_opt = SCTP_PCB_FLAGS_EXPLICIT_EOR; 2720 break; 2721 case SCTP_USE_EXT_RCVINFO: 2722 set_opt = SCTP_PCB_FLAGS_EXT_RCVINFO; 2723 break; 2724 case SCTP_I_WANT_MAPPED_V4_ADDR: 2725 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 2726 set_opt = SCTP_PCB_FLAGS_NEEDS_MAPPED_V4; 2727 } else { 2728 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 2729 return (EINVAL); 2730 } 2731 break; 2732 case SCTP_NODELAY: 2733 set_opt = SCTP_PCB_FLAGS_NODELAY; 2734 break; 2735 case SCTP_AUTOCLOSE: 2736 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 2737 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 2738 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 2739 return (EINVAL); 2740 } 2741 set_opt = SCTP_PCB_FLAGS_AUTOCLOSE; 2742 /* 2743 * The value is in ticks. Note this does not effect 2744 * old associations, only new ones. 2745 */ 2746 inp->sctp_ep.auto_close_time = SEC_TO_TICKS(*mopt); 2747 break; 2748 } 2749 SCTP_INP_WLOCK(inp); 2750 if (*mopt != 0) { 2751 sctp_feature_on(inp, set_opt); 2752 } else { 2753 sctp_feature_off(inp, set_opt); 2754 } 2755 SCTP_INP_WUNLOCK(inp); 2756 break; 2757 case SCTP_REUSE_PORT: 2758 { 2759 SCTP_CHECK_AND_CAST(mopt, optval, uint32_t, optsize); 2760 if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) == 0) { 2761 /* Can't set it after we are bound */ 2762 error = EINVAL; 2763 break; 2764 } 2765 if ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE)) { 2766 /* Can't do this for a 1-m socket */ 2767 error = EINVAL; 2768 break; 2769 } 2770 if (optval) 2771 sctp_feature_on(inp, SCTP_PCB_FLAGS_PORTREUSE); 2772 else 2773 sctp_feature_off(inp, SCTP_PCB_FLAGS_PORTREUSE); 2774 } 2775 break; 2776 case SCTP_PARTIAL_DELIVERY_POINT: 2777 { 2778 uint32_t *value; 2779 2780 SCTP_CHECK_AND_CAST(value, optval, uint32_t, optsize); 2781 if (*value > SCTP_SB_LIMIT_RCV(so)) { 2782 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 2783 error = EINVAL; 2784 break; 2785 } 2786 inp->partial_delivery_point = *value; 2787 } 2788 break; 2789 case SCTP_FRAGMENT_INTERLEAVE: 2790 /* not yet until we re-write sctp_recvmsg() */ 2791 { 2792 uint32_t *level; 2793 2794 SCTP_CHECK_AND_CAST(level, optval, uint32_t, optsize); 2795 if (*level == SCTP_FRAG_LEVEL_2) { 2796 sctp_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE); 2797 sctp_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS); 2798 } else if (*level == SCTP_FRAG_LEVEL_1) { 2799 sctp_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE); 2800 sctp_feature_off(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS); 2801 } else if (*level == SCTP_FRAG_LEVEL_0) { 2802 sctp_feature_off(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE); 2803 sctp_feature_off(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS); 2804 2805 } else { 2806 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 2807 error = EINVAL; 2808 } 2809 } 2810 break; 2811 case SCTP_CMT_ON_OFF: 2812 { 2813 struct sctp_assoc_value *av; 2814 2815 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize); 2816 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off)) { 2817 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 2818 if (stcb) { 2819 stcb->asoc.sctp_cmt_on_off = (uint8_t) av->assoc_value; 2820 SCTP_TCB_UNLOCK(stcb); 2821 } else { 2822 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN); 2823 error = ENOTCONN; 2824 } 2825 } else { 2826 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT); 2827 error = ENOPROTOOPT; 2828 } 2829 } 2830 break; 2831 /* EY nr_sack_on_off socket option */ 2832 case SCTP_NR_SACK_ON_OFF: 2833 { 2834 struct sctp_assoc_value *av; 2835 2836 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize); 2837 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off)) { 2838 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 2839 if (stcb) { 2840 stcb->asoc.sctp_nr_sack_on_off = (uint8_t) av->assoc_value; 2841 SCTP_TCB_UNLOCK(stcb); 2842 } else { 2843 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN); 2844 error = ENOTCONN; 2845 } 2846 } else { 2847 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT); 2848 error = ENOPROTOOPT; 2849 } 2850 } 2851 break; 2852 /* JRS - Set socket option for pluggable congestion control */ 2853 case SCTP_PLUGGABLE_CC: 2854 { 2855 struct sctp_assoc_value *av; 2856 2857 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize); 2858 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 2859 if (stcb) { 2860 switch (av->assoc_value) { 2861 /* 2862 * JRS - Standard TCP congestion 2863 * control 2864 */ 2865 case SCTP_CC_RFC2581: 2866 { 2867 stcb->asoc.congestion_control_module = SCTP_CC_RFC2581; 2868 stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param; 2869 stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_cwnd_update_after_sack; 2870 stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_cwnd_update_after_fr; 2871 stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout; 2872 stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo; 2873 stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped; 2874 stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output; 2875 stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer; 2876 SCTP_TCB_UNLOCK(stcb); 2877 break; 2878 } 2879 /* 2880 * JRS - High Speed TCP congestion 2881 * control (Floyd) 2882 */ 2883 case SCTP_CC_HSTCP: 2884 { 2885 stcb->asoc.congestion_control_module = SCTP_CC_HSTCP; 2886 stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param; 2887 stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_hs_cwnd_update_after_sack; 2888 stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_hs_cwnd_update_after_fr; 2889 stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout; 2890 stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo; 2891 stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped; 2892 stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output; 2893 stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer; 2894 SCTP_TCB_UNLOCK(stcb); 2895 break; 2896 } 2897 /* JRS - HTCP congestion control */ 2898 case SCTP_CC_HTCP: 2899 { 2900 stcb->asoc.congestion_control_module = SCTP_CC_HTCP; 2901 stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_htcp_set_initial_cc_param; 2902 stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_htcp_cwnd_update_after_sack; 2903 stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_htcp_cwnd_update_after_fr; 2904 stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_htcp_cwnd_update_after_timeout; 2905 stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_htcp_cwnd_update_after_ecn_echo; 2906 stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped; 2907 stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output; 2908 stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_htcp_cwnd_update_after_fr_timer; 2909 SCTP_TCB_UNLOCK(stcb); 2910 break; 2911 } 2912 /* 2913 * JRS - All other values are 2914 * invalid 2915 */ 2916 default: 2917 { 2918 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 2919 error = EINVAL; 2920 SCTP_TCB_UNLOCK(stcb); 2921 break; 2922 } 2923 } 2924 } else { 2925 switch (av->assoc_value) { 2926 case SCTP_CC_RFC2581: 2927 case SCTP_CC_HSTCP: 2928 case SCTP_CC_HTCP: 2929 inp->sctp_ep.sctp_default_cc_module = av->assoc_value; 2930 break; 2931 default: 2932 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 2933 error = EINVAL; 2934 break; 2935 }; 2936 } 2937 } 2938 break; 2939 case SCTP_CLR_STAT_LOG: 2940 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP); 2941 error = EOPNOTSUPP; 2942 break; 2943 case SCTP_CONTEXT: 2944 { 2945 struct sctp_assoc_value *av; 2946 2947 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize); 2948 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 2949 2950 if (stcb) { 2951 stcb->asoc.context = av->assoc_value; 2952 SCTP_TCB_UNLOCK(stcb); 2953 } else { 2954 SCTP_INP_WLOCK(inp); 2955 inp->sctp_context = av->assoc_value; 2956 SCTP_INP_WUNLOCK(inp); 2957 } 2958 } 2959 break; 2960 case SCTP_VRF_ID: 2961 { 2962 uint32_t *default_vrfid; 2963 2964 SCTP_CHECK_AND_CAST(default_vrfid, optval, uint32_t, optsize); 2965 if (*default_vrfid > SCTP_MAX_VRF_ID) { 2966 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 2967 error = EINVAL; 2968 break; 2969 } 2970 inp->def_vrf_id = *default_vrfid; 2971 break; 2972 } 2973 case SCTP_DEL_VRF_ID: 2974 { 2975 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP); 2976 error = EOPNOTSUPP; 2977 break; 2978 } 2979 case SCTP_ADD_VRF_ID: 2980 { 2981 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP); 2982 error = EOPNOTSUPP; 2983 break; 2984 } 2985 case SCTP_DELAYED_SACK: 2986 { 2987 struct sctp_sack_info *sack; 2988 2989 SCTP_CHECK_AND_CAST(sack, optval, struct sctp_sack_info, optsize); 2990 SCTP_FIND_STCB(inp, stcb, sack->sack_assoc_id); 2991 if (sack->sack_delay) { 2992 if (sack->sack_delay > SCTP_MAX_SACK_DELAY) 2993 sack->sack_delay = SCTP_MAX_SACK_DELAY; 2994 } 2995 if (stcb) { 2996 if (sack->sack_delay) { 2997 if (MSEC_TO_TICKS(sack->sack_delay) < 1) { 2998 sack->sack_delay = TICKS_TO_MSEC(1); 2999 } 3000 stcb->asoc.delayed_ack = sack->sack_delay; 3001 } 3002 if (sack->sack_freq) { 3003 stcb->asoc.sack_freq = sack->sack_freq; 3004 } 3005 SCTP_TCB_UNLOCK(stcb); 3006 } else { 3007 SCTP_INP_WLOCK(inp); 3008 if (sack->sack_delay) { 3009 if (MSEC_TO_TICKS(sack->sack_delay) < 1) { 3010 sack->sack_delay = TICKS_TO_MSEC(1); 3011 } 3012 inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV] = MSEC_TO_TICKS(sack->sack_delay); 3013 } 3014 if (sack->sack_freq) { 3015 inp->sctp_ep.sctp_sack_freq = sack->sack_freq; 3016 } 3017 SCTP_INP_WUNLOCK(inp); 3018 } 3019 break; 3020 } 3021 case SCTP_AUTH_CHUNK: 3022 { 3023 struct sctp_authchunk *sauth; 3024 3025 SCTP_CHECK_AND_CAST(sauth, optval, struct sctp_authchunk, optsize); 3026 3027 SCTP_INP_WLOCK(inp); 3028 if (sctp_auth_add_chunk(sauth->sauth_chunk, inp->sctp_ep.local_auth_chunks)) { 3029 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3030 error = EINVAL; 3031 } 3032 SCTP_INP_WUNLOCK(inp); 3033 break; 3034 } 3035 case SCTP_AUTH_KEY: 3036 { 3037 struct sctp_authkey *sca; 3038 struct sctp_keyhead *shared_keys; 3039 sctp_sharedkey_t *shared_key; 3040 sctp_key_t *key = NULL; 3041 size_t size; 3042 3043 SCTP_CHECK_AND_CAST(sca, optval, struct sctp_authkey, optsize); 3044 SCTP_FIND_STCB(inp, stcb, sca->sca_assoc_id); 3045 size = optsize - sizeof(*sca); 3046 3047 if (stcb) { 3048 /* set it on the assoc */ 3049 shared_keys = &stcb->asoc.shared_keys; 3050 /* clear the cached keys for this key id */ 3051 sctp_clear_cachedkeys(stcb, sca->sca_keynumber); 3052 /* 3053 * create the new shared key and 3054 * insert/replace it 3055 */ 3056 if (size > 0) { 3057 key = sctp_set_key(sca->sca_key, (uint32_t) size); 3058 if (key == NULL) { 3059 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM); 3060 error = ENOMEM; 3061 SCTP_TCB_UNLOCK(stcb); 3062 break; 3063 } 3064 } 3065 shared_key = sctp_alloc_sharedkey(); 3066 if (shared_key == NULL) { 3067 sctp_free_key(key); 3068 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM); 3069 error = ENOMEM; 3070 SCTP_TCB_UNLOCK(stcb); 3071 break; 3072 } 3073 shared_key->key = key; 3074 shared_key->keyid = sca->sca_keynumber; 3075 error = sctp_insert_sharedkey(shared_keys, shared_key); 3076 SCTP_TCB_UNLOCK(stcb); 3077 } else { 3078 /* set it on the endpoint */ 3079 SCTP_INP_WLOCK(inp); 3080 shared_keys = &inp->sctp_ep.shared_keys; 3081 /* 3082 * clear the cached keys on all assocs for 3083 * this key id 3084 */ 3085 sctp_clear_cachedkeys_ep(inp, sca->sca_keynumber); 3086 /* 3087 * create the new shared key and 3088 * insert/replace it 3089 */ 3090 if (size > 0) { 3091 key = sctp_set_key(sca->sca_key, (uint32_t) size); 3092 if (key == NULL) { 3093 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM); 3094 error = ENOMEM; 3095 SCTP_INP_WUNLOCK(inp); 3096 break; 3097 } 3098 } 3099 shared_key = sctp_alloc_sharedkey(); 3100 if (shared_key == NULL) { 3101 sctp_free_key(key); 3102 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM); 3103 error = ENOMEM; 3104 SCTP_INP_WUNLOCK(inp); 3105 break; 3106 } 3107 shared_key->key = key; 3108 shared_key->keyid = sca->sca_keynumber; 3109 error = sctp_insert_sharedkey(shared_keys, shared_key); 3110 SCTP_INP_WUNLOCK(inp); 3111 } 3112 break; 3113 } 3114 case SCTP_HMAC_IDENT: 3115 { 3116 struct sctp_hmacalgo *shmac; 3117 sctp_hmaclist_t *hmaclist; 3118 uint16_t hmacid; 3119 uint32_t i; 3120 3121 size_t found; 3122 3123 SCTP_CHECK_AND_CAST(shmac, optval, struct sctp_hmacalgo, optsize); 3124 if (optsize < sizeof(struct sctp_hmacalgo) + shmac->shmac_number_of_idents * sizeof(uint16_t)) { 3125 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3126 error = EINVAL; 3127 break; 3128 } 3129 hmaclist = sctp_alloc_hmaclist(shmac->shmac_number_of_idents); 3130 if (hmaclist == NULL) { 3131 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM); 3132 error = ENOMEM; 3133 break; 3134 } 3135 for (i = 0; i < shmac->shmac_number_of_idents; i++) { 3136 hmacid = shmac->shmac_idents[i]; 3137 if (sctp_auth_add_hmacid(hmaclist, hmacid)) { 3138 /* invalid HMACs were found */ ; 3139 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3140 error = EINVAL; 3141 sctp_free_hmaclist(hmaclist); 3142 goto sctp_set_hmac_done; 3143 } 3144 } 3145 found = 0; 3146 for (i = 0; i < hmaclist->num_algo; i++) { 3147 if (hmaclist->hmac[i] == SCTP_AUTH_HMAC_ID_SHA1) { 3148 /* already in list */ 3149 found = 1; 3150 } 3151 } 3152 if (!found) { 3153 sctp_free_hmaclist(hmaclist); 3154 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3155 error = EINVAL; 3156 break; 3157 } 3158 /* set it on the endpoint */ 3159 SCTP_INP_WLOCK(inp); 3160 if (inp->sctp_ep.local_hmacs) 3161 sctp_free_hmaclist(inp->sctp_ep.local_hmacs); 3162 inp->sctp_ep.local_hmacs = hmaclist; 3163 SCTP_INP_WUNLOCK(inp); 3164 sctp_set_hmac_done: 3165 break; 3166 } 3167 case SCTP_AUTH_ACTIVE_KEY: 3168 { 3169 struct sctp_authkeyid *scact; 3170 3171 SCTP_CHECK_AND_CAST(scact, optval, struct sctp_authkeyid, 3172 optsize); 3173 SCTP_FIND_STCB(inp, stcb, scact->scact_assoc_id); 3174 3175 /* set the active key on the right place */ 3176 if (stcb) { 3177 /* set the active key on the assoc */ 3178 if (sctp_auth_setactivekey(stcb, 3179 scact->scact_keynumber)) { 3180 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, 3181 SCTP_FROM_SCTP_USRREQ, 3182 EINVAL); 3183 error = EINVAL; 3184 } 3185 SCTP_TCB_UNLOCK(stcb); 3186 } else { 3187 /* set the active key on the endpoint */ 3188 SCTP_INP_WLOCK(inp); 3189 if (sctp_auth_setactivekey_ep(inp, 3190 scact->scact_keynumber)) { 3191 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, 3192 SCTP_FROM_SCTP_USRREQ, 3193 EINVAL); 3194 error = EINVAL; 3195 } 3196 SCTP_INP_WUNLOCK(inp); 3197 } 3198 break; 3199 } 3200 case SCTP_AUTH_DELETE_KEY: 3201 { 3202 struct sctp_authkeyid *scdel; 3203 3204 SCTP_CHECK_AND_CAST(scdel, optval, struct sctp_authkeyid, 3205 optsize); 3206 SCTP_FIND_STCB(inp, stcb, scdel->scact_assoc_id); 3207 3208 /* delete the key from the right place */ 3209 if (stcb) { 3210 if (sctp_delete_sharedkey(stcb, 3211 scdel->scact_keynumber)) { 3212 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, 3213 SCTP_FROM_SCTP_USRREQ, 3214 EINVAL); 3215 error = EINVAL; 3216 } 3217 SCTP_TCB_UNLOCK(stcb); 3218 } else { 3219 SCTP_INP_WLOCK(inp); 3220 if (sctp_delete_sharedkey_ep(inp, 3221 scdel->scact_keynumber)) { 3222 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, 3223 SCTP_FROM_SCTP_USRREQ, 3224 EINVAL); 3225 error = EINVAL; 3226 } 3227 SCTP_INP_WUNLOCK(inp); 3228 } 3229 break; 3230 } 3231 case SCTP_AUTH_DEACTIVATE_KEY: 3232 { 3233 struct sctp_authkeyid *keyid; 3234 3235 SCTP_CHECK_AND_CAST(keyid, optval, struct sctp_authkeyid, 3236 optsize); 3237 SCTP_FIND_STCB(inp, stcb, keyid->scact_assoc_id); 3238 3239 /* deactivate the key from the right place */ 3240 if (stcb) { 3241 if (sctp_deact_sharedkey(stcb, 3242 keyid->scact_keynumber)) { 3243 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, 3244 SCTP_FROM_SCTP_USRREQ, 3245 EINVAL); 3246 error = EINVAL; 3247 } 3248 SCTP_TCB_UNLOCK(stcb); 3249 } else { 3250 SCTP_INP_WLOCK(inp); 3251 if (sctp_deact_sharedkey_ep(inp, 3252 keyid->scact_keynumber)) { 3253 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, 3254 SCTP_FROM_SCTP_USRREQ, 3255 EINVAL); 3256 error = EINVAL; 3257 } 3258 SCTP_INP_WUNLOCK(inp); 3259 } 3260 break; 3261 } 3262 3263 case SCTP_RESET_STREAMS: 3264 { 3265 struct sctp_stream_reset *strrst; 3266 uint8_t send_in = 0, send_tsn = 0, send_out = 0, 3267 addstream = 0; 3268 uint16_t addstrmcnt = 0; 3269 int i; 3270 3271 SCTP_CHECK_AND_CAST(strrst, optval, struct sctp_stream_reset, optsize); 3272 SCTP_FIND_STCB(inp, stcb, strrst->strrst_assoc_id); 3273 3274 if (stcb == NULL) { 3275 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); 3276 error = ENOENT; 3277 break; 3278 } 3279 if (stcb->asoc.peer_supports_strreset == 0) { 3280 /* 3281 * Peer does not support it, we return 3282 * protocol not supported since this is true 3283 * for this feature and this peer, not the 3284 * socket request in general. 3285 */ 3286 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EPROTONOSUPPORT); 3287 error = EPROTONOSUPPORT; 3288 SCTP_TCB_UNLOCK(stcb); 3289 break; 3290 } 3291 if (stcb->asoc.stream_reset_outstanding) { 3292 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY); 3293 error = EALREADY; 3294 SCTP_TCB_UNLOCK(stcb); 3295 break; 3296 } 3297 if (strrst->strrst_flags == SCTP_RESET_LOCAL_RECV) { 3298 send_in = 1; 3299 } else if (strrst->strrst_flags == SCTP_RESET_LOCAL_SEND) { 3300 send_out = 1; 3301 } else if (strrst->strrst_flags == SCTP_RESET_BOTH) { 3302 send_in = 1; 3303 send_out = 1; 3304 } else if (strrst->strrst_flags == SCTP_RESET_TSN) { 3305 send_tsn = 1; 3306 } else if (strrst->strrst_flags == SCTP_RESET_ADD_STREAMS) { 3307 if (send_tsn || 3308 send_in || 3309 send_out) { 3310 /* We can't do that and add streams */ 3311 error = EINVAL; 3312 goto skip_stuff; 3313 } 3314 if (stcb->asoc.stream_reset_outstanding) { 3315 error = EBUSY; 3316 goto skip_stuff; 3317 } 3318 addstream = 1; 3319 /* We allocate here */ 3320 addstrmcnt = strrst->strrst_num_streams; 3321 if ((int)(addstrmcnt + stcb->asoc.streamoutcnt) > 0xffff) { 3322 /* You can't have more than 64k */ 3323 error = EINVAL; 3324 goto skip_stuff; 3325 } 3326 if ((stcb->asoc.strm_realoutsize - stcb->asoc.streamoutcnt) < addstrmcnt) { 3327 /* Need to allocate more */ 3328 struct sctp_stream_out *oldstream; 3329 struct sctp_stream_queue_pending *sp; 3330 int removed; 3331 3332 oldstream = stcb->asoc.strmout; 3333 /* get some more */ 3334 SCTP_MALLOC(stcb->asoc.strmout, struct sctp_stream_out *, 3335 ((stcb->asoc.streamoutcnt + addstrmcnt) * sizeof(struct sctp_stream_out)), 3336 SCTP_M_STRMO); 3337 if (stcb->asoc.strmout == NULL) { 3338 stcb->asoc.strmout = oldstream; 3339 error = ENOMEM; 3340 goto skip_stuff; 3341 } 3342 /* 3343 * Ok now we proceed with copying 3344 * the old out stuff and 3345 * initializing the new stuff. 3346 */ 3347 SCTP_TCB_SEND_LOCK(stcb); 3348 for (i = 0; i < stcb->asoc.streamoutcnt; i++) { 3349 TAILQ_INIT(&stcb->asoc.strmout[i].outqueue); 3350 stcb->asoc.strmout[i].next_sequence_sent = oldstream[i].next_sequence_sent; 3351 stcb->asoc.strmout[i].last_msg_incomplete = oldstream[i].last_msg_incomplete; 3352 stcb->asoc.strmout[i].stream_no = i; 3353 if (oldstream[i].next_spoke.tqe_next) { 3354 sctp_remove_from_wheel(stcb, &stcb->asoc, &oldstream[i], 1); 3355 stcb->asoc.strmout[i].next_spoke.tqe_next = NULL; 3356 stcb->asoc.strmout[i].next_spoke.tqe_prev = NULL; 3357 removed = 1; 3358 } else { 3359 /* not on out wheel */ 3360 stcb->asoc.strmout[i].next_spoke.tqe_next = NULL; 3361 stcb->asoc.strmout[i].next_spoke.tqe_prev = NULL; 3362 removed = 0; 3363 } 3364 /* 3365 * now anything on those 3366 * queues? 3367 */ 3368 while (TAILQ_EMPTY(&oldstream[i].outqueue) == 0) { 3369 sp = TAILQ_FIRST(&oldstream[i].outqueue); 3370 TAILQ_REMOVE(&oldstream[i].outqueue, sp, next); 3371 TAILQ_INSERT_TAIL(&stcb->asoc.strmout[i].outqueue, sp, next); 3372 } 3373 /* Did we disrupt the wheel? */ 3374 if (removed) { 3375 sctp_insert_on_wheel(stcb, 3376 &stcb->asoc, 3377 &stcb->asoc.strmout[i], 3378 1); 3379 } 3380 /* 3381 * Now move assoc pointers 3382 * too 3383 */ 3384 if (stcb->asoc.last_out_stream == &oldstream[i]) { 3385 stcb->asoc.last_out_stream = &stcb->asoc.strmout[i]; 3386 } 3387 if (stcb->asoc.locked_on_sending == &oldstream[i]) { 3388 stcb->asoc.locked_on_sending = &stcb->asoc.strmout[i]; 3389 } 3390 } 3391 /* now the new streams */ 3392 for (i = stcb->asoc.streamoutcnt; i < (stcb->asoc.streamoutcnt + addstrmcnt); i++) { 3393 stcb->asoc.strmout[i].next_sequence_sent = 0x0; 3394 TAILQ_INIT(&stcb->asoc.strmout[i].outqueue); 3395 stcb->asoc.strmout[i].stream_no = i; 3396 stcb->asoc.strmout[i].last_msg_incomplete = 0; 3397 stcb->asoc.strmout[i].next_spoke.tqe_next = NULL; 3398 stcb->asoc.strmout[i].next_spoke.tqe_prev = NULL; 3399 } 3400 stcb->asoc.strm_realoutsize = stcb->asoc.streamoutcnt + addstrmcnt; 3401 SCTP_FREE(oldstream, SCTP_M_STRMO); 3402 } 3403 SCTP_TCB_SEND_UNLOCK(stcb); 3404 goto skip_stuff; 3405 } else { 3406 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3407 error = EINVAL; 3408 SCTP_TCB_UNLOCK(stcb); 3409 break; 3410 } 3411 for (i = 0; i < strrst->strrst_num_streams; i++) { 3412 if ((send_in) && 3413 3414 (strrst->strrst_list[i] > stcb->asoc.streamincnt)) { 3415 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3416 error = EINVAL; 3417 goto get_out; 3418 } 3419 if ((send_out) && 3420 (strrst->strrst_list[i] > stcb->asoc.streamoutcnt)) { 3421 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3422 error = EINVAL; 3423 goto get_out; 3424 } 3425 } 3426 skip_stuff: 3427 if (error) { 3428 get_out: 3429 SCTP_TCB_UNLOCK(stcb); 3430 break; 3431 } 3432 error = sctp_send_str_reset_req(stcb, strrst->strrst_num_streams, 3433 strrst->strrst_list, 3434 send_out, (stcb->asoc.str_reset_seq_in - 3), 3435 send_in, send_tsn, addstream, addstrmcnt); 3436 3437 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_REQ, SCTP_SO_LOCKED); 3438 SCTP_TCB_UNLOCK(stcb); 3439 } 3440 break; 3441 3442 case SCTP_CONNECT_X: 3443 if (optsize < (sizeof(int) + sizeof(struct sockaddr_in))) { 3444 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3445 error = EINVAL; 3446 break; 3447 } 3448 error = sctp_do_connect_x(so, inp, optval, optsize, p, 0); 3449 break; 3450 3451 case SCTP_CONNECT_X_DELAYED: 3452 if (optsize < (sizeof(int) + sizeof(struct sockaddr_in))) { 3453 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3454 error = EINVAL; 3455 break; 3456 } 3457 error = sctp_do_connect_x(so, inp, optval, optsize, p, 1); 3458 break; 3459 3460 case SCTP_CONNECT_X_COMPLETE: 3461 { 3462 struct sockaddr *sa; 3463 struct sctp_nets *net; 3464 3465 /* FIXME MT: check correct? */ 3466 SCTP_CHECK_AND_CAST(sa, optval, struct sockaddr, optsize); 3467 3468 /* find tcb */ 3469 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { 3470 SCTP_INP_RLOCK(inp); 3471 stcb = LIST_FIRST(&inp->sctp_asoc_list); 3472 if (stcb) { 3473 SCTP_TCB_LOCK(stcb); 3474 net = sctp_findnet(stcb, sa); 3475 } 3476 SCTP_INP_RUNLOCK(inp); 3477 } else { 3478 /* 3479 * We increment here since 3480 * sctp_findassociation_ep_addr() wil do a 3481 * decrement if it finds the stcb as long as 3482 * the locked tcb (last argument) is NOT a 3483 * TCB.. aka NULL. 3484 */ 3485 SCTP_INP_INCR_REF(inp); 3486 stcb = sctp_findassociation_ep_addr(&inp, sa, &net, NULL, NULL); 3487 if (stcb == NULL) { 3488 SCTP_INP_DECR_REF(inp); 3489 } 3490 } 3491 3492 if (stcb == NULL) { 3493 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); 3494 error = ENOENT; 3495 break; 3496 } 3497 if (stcb->asoc.delayed_connection == 1) { 3498 stcb->asoc.delayed_connection = 0; 3499 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered); 3500 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, 3501 stcb->asoc.primary_destination, 3502 SCTP_FROM_SCTP_USRREQ + SCTP_LOC_9); 3503 sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED); 3504 } else { 3505 /* 3506 * already expired or did not use delayed 3507 * connectx 3508 */ 3509 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY); 3510 error = EALREADY; 3511 } 3512 SCTP_TCB_UNLOCK(stcb); 3513 } 3514 break; 3515 case SCTP_MAX_BURST: 3516 { 3517 uint8_t *burst; 3518 3519 SCTP_CHECK_AND_CAST(burst, optval, uint8_t, optsize); 3520 3521 SCTP_INP_WLOCK(inp); 3522 if (*burst) { 3523 inp->sctp_ep.max_burst = *burst; 3524 } 3525 SCTP_INP_WUNLOCK(inp); 3526 } 3527 break; 3528 case SCTP_MAXSEG: 3529 { 3530 struct sctp_assoc_value *av; 3531 int ovh; 3532 3533 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize); 3534 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 3535 3536 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 3537 ovh = SCTP_MED_OVERHEAD; 3538 } else { 3539 ovh = SCTP_MED_V4_OVERHEAD; 3540 } 3541 if (stcb) { 3542 if (av->assoc_value) { 3543 stcb->asoc.sctp_frag_point = (av->assoc_value + ovh); 3544 } else { 3545 stcb->asoc.sctp_frag_point = SCTP_DEFAULT_MAXSEGMENT; 3546 } 3547 SCTP_TCB_UNLOCK(stcb); 3548 } else { 3549 SCTP_INP_WLOCK(inp); 3550 /* 3551 * FIXME MT: I think this is not in tune 3552 * with the API ID 3553 */ 3554 if (av->assoc_value) { 3555 inp->sctp_frag_point = (av->assoc_value + ovh); 3556 } else { 3557 inp->sctp_frag_point = SCTP_DEFAULT_MAXSEGMENT; 3558 } 3559 SCTP_INP_WUNLOCK(inp); 3560 } 3561 } 3562 break; 3563 case SCTP_EVENTS: 3564 { 3565 struct sctp_event_subscribe *events; 3566 3567 SCTP_CHECK_AND_CAST(events, optval, struct sctp_event_subscribe, optsize); 3568 3569 SCTP_INP_WLOCK(inp); 3570 if (events->sctp_data_io_event) { 3571 sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT); 3572 } else { 3573 sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT); 3574 } 3575 3576 if (events->sctp_association_event) { 3577 sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVASSOCEVNT); 3578 } else { 3579 sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVASSOCEVNT); 3580 } 3581 3582 if (events->sctp_address_event) { 3583 sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVPADDREVNT); 3584 } else { 3585 sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVPADDREVNT); 3586 } 3587 3588 if (events->sctp_send_failure_event) { 3589 sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVSENDFAILEVNT); 3590 } else { 3591 sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVSENDFAILEVNT); 3592 } 3593 3594 if (events->sctp_peer_error_event) { 3595 sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVPEERERR); 3596 } else { 3597 sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVPEERERR); 3598 } 3599 3600 if (events->sctp_shutdown_event) { 3601 sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT); 3602 } else { 3603 sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT); 3604 } 3605 3606 if (events->sctp_partial_delivery_event) { 3607 sctp_feature_on(inp, SCTP_PCB_FLAGS_PDAPIEVNT); 3608 } else { 3609 sctp_feature_off(inp, SCTP_PCB_FLAGS_PDAPIEVNT); 3610 } 3611 3612 if (events->sctp_adaptation_layer_event) { 3613 sctp_feature_on(inp, SCTP_PCB_FLAGS_ADAPTATIONEVNT); 3614 } else { 3615 sctp_feature_off(inp, SCTP_PCB_FLAGS_ADAPTATIONEVNT); 3616 } 3617 3618 if (events->sctp_authentication_event) { 3619 sctp_feature_on(inp, SCTP_PCB_FLAGS_AUTHEVNT); 3620 } else { 3621 sctp_feature_off(inp, SCTP_PCB_FLAGS_AUTHEVNT); 3622 } 3623 3624 if (events->sctp_sender_dry_event) { 3625 sctp_feature_on(inp, SCTP_PCB_FLAGS_DRYEVNT); 3626 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 3627 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 3628 stcb = LIST_FIRST(&inp->sctp_asoc_list); 3629 if (stcb) { 3630 SCTP_TCB_LOCK(stcb); 3631 } 3632 if (stcb && 3633 TAILQ_EMPTY(&stcb->asoc.send_queue) && 3634 TAILQ_EMPTY(&stcb->asoc.sent_queue) && 3635 (stcb->asoc.stream_queue_cnt == 0)) { 3636 sctp_ulp_notify(SCTP_NOTIFY_SENDER_DRY, stcb, 0, NULL, SCTP_SO_LOCKED); 3637 } 3638 if (stcb) { 3639 SCTP_TCB_UNLOCK(stcb); 3640 } 3641 } 3642 } else { 3643 sctp_feature_off(inp, SCTP_PCB_FLAGS_DRYEVNT); 3644 } 3645 3646 if (events->sctp_stream_reset_events) { 3647 sctp_feature_on(inp, SCTP_PCB_FLAGS_STREAM_RESETEVNT); 3648 } else { 3649 sctp_feature_off(inp, SCTP_PCB_FLAGS_STREAM_RESETEVNT); 3650 } 3651 SCTP_INP_WUNLOCK(inp); 3652 } 3653 break; 3654 3655 case SCTP_ADAPTATION_LAYER: 3656 { 3657 struct sctp_setadaptation *adap_bits; 3658 3659 SCTP_CHECK_AND_CAST(adap_bits, optval, struct sctp_setadaptation, optsize); 3660 SCTP_INP_WLOCK(inp); 3661 inp->sctp_ep.adaptation_layer_indicator = adap_bits->ssb_adaptation_ind; 3662 SCTP_INP_WUNLOCK(inp); 3663 } 3664 break; 3665 #ifdef SCTP_DEBUG 3666 case SCTP_SET_INITIAL_DBG_SEQ: 3667 { 3668 uint32_t *vvv; 3669 3670 SCTP_CHECK_AND_CAST(vvv, optval, uint32_t, optsize); 3671 SCTP_INP_WLOCK(inp); 3672 inp->sctp_ep.initial_sequence_debug = *vvv; 3673 SCTP_INP_WUNLOCK(inp); 3674 } 3675 break; 3676 #endif 3677 case SCTP_DEFAULT_SEND_PARAM: 3678 { 3679 struct sctp_sndrcvinfo *s_info; 3680 3681 SCTP_CHECK_AND_CAST(s_info, optval, struct sctp_sndrcvinfo, optsize); 3682 SCTP_FIND_STCB(inp, stcb, s_info->sinfo_assoc_id); 3683 3684 if (stcb) { 3685 if (s_info->sinfo_stream <= stcb->asoc.streamoutcnt) { 3686 memcpy(&stcb->asoc.def_send, s_info, min(optsize, sizeof(stcb->asoc.def_send))); 3687 } else { 3688 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3689 error = EINVAL; 3690 } 3691 SCTP_TCB_UNLOCK(stcb); 3692 } else { 3693 SCTP_INP_WLOCK(inp); 3694 memcpy(&inp->def_send, s_info, min(optsize, sizeof(inp->def_send))); 3695 SCTP_INP_WUNLOCK(inp); 3696 } 3697 } 3698 break; 3699 case SCTP_PEER_ADDR_PARAMS: 3700 /* Applys to the specific association */ 3701 { 3702 struct sctp_paddrparams *paddrp; 3703 struct sctp_nets *net; 3704 3705 SCTP_CHECK_AND_CAST(paddrp, optval, struct sctp_paddrparams, optsize); 3706 SCTP_FIND_STCB(inp, stcb, paddrp->spp_assoc_id); 3707 net = NULL; 3708 if (stcb) { 3709 net = sctp_findnet(stcb, (struct sockaddr *)&paddrp->spp_address); 3710 } else { 3711 /* 3712 * We increment here since 3713 * sctp_findassociation_ep_addr() wil do a 3714 * decrement if it finds the stcb as long as 3715 * the locked tcb (last argument) is NOT a 3716 * TCB.. aka NULL. 3717 */ 3718 SCTP_INP_INCR_REF(inp); 3719 stcb = sctp_findassociation_ep_addr(&inp, 3720 (struct sockaddr *)&paddrp->spp_address, 3721 &net, NULL, NULL); 3722 if (stcb == NULL) { 3723 SCTP_INP_DECR_REF(inp); 3724 } 3725 } 3726 if (stcb && (net == NULL)) { 3727 struct sockaddr *sa; 3728 3729 sa = (struct sockaddr *)&paddrp->spp_address; 3730 if (sa->sa_family == AF_INET) { 3731 struct sockaddr_in *sin; 3732 3733 sin = (struct sockaddr_in *)sa; 3734 if (sin->sin_addr.s_addr) { 3735 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3736 SCTP_TCB_UNLOCK(stcb); 3737 error = EINVAL; 3738 break; 3739 } 3740 } else if (sa->sa_family == AF_INET6) { 3741 struct sockaddr_in6 *sin6; 3742 3743 sin6 = (struct sockaddr_in6 *)sa; 3744 if (!IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { 3745 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3746 SCTP_TCB_UNLOCK(stcb); 3747 error = EINVAL; 3748 break; 3749 } 3750 } else { 3751 error = EAFNOSUPPORT; 3752 SCTP_TCB_UNLOCK(stcb); 3753 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 3754 break; 3755 } 3756 } 3757 /* sanity checks */ 3758 if ((paddrp->spp_flags & SPP_HB_ENABLE) && (paddrp->spp_flags & SPP_HB_DISABLE)) { 3759 if (stcb) 3760 SCTP_TCB_UNLOCK(stcb); 3761 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3762 return (EINVAL); 3763 } 3764 if ((paddrp->spp_flags & SPP_PMTUD_ENABLE) && (paddrp->spp_flags & SPP_PMTUD_DISABLE)) { 3765 if (stcb) 3766 SCTP_TCB_UNLOCK(stcb); 3767 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3768 return (EINVAL); 3769 } 3770 if (stcb) { 3771 /************************TCB SPECIFIC SET ******************/ 3772 /* 3773 * do we change the timer for HB, we run 3774 * only one? 3775 */ 3776 int ovh = 0; 3777 3778 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 3779 ovh = SCTP_MED_OVERHEAD; 3780 } else { 3781 ovh = SCTP_MED_V4_OVERHEAD; 3782 } 3783 3784 if (paddrp->spp_hbinterval) 3785 stcb->asoc.heart_beat_delay = paddrp->spp_hbinterval; 3786 else if (paddrp->spp_flags & SPP_HB_TIME_IS_ZERO) 3787 stcb->asoc.heart_beat_delay = 0; 3788 3789 /* network sets ? */ 3790 if (net) { 3791 /************************NET SPECIFIC SET ******************/ 3792 if (paddrp->spp_flags & SPP_HB_DEMAND) { 3793 /* on demand HB */ 3794 if (sctp_send_hb(stcb, 1, net) < 0) { 3795 /* asoc destroyed */ 3796 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3797 error = EINVAL; 3798 break; 3799 } 3800 } 3801 if (paddrp->spp_flags & SPP_HB_DISABLE) { 3802 net->dest_state |= SCTP_ADDR_NOHB; 3803 } 3804 if (paddrp->spp_flags & SPP_HB_ENABLE) { 3805 net->dest_state &= ~SCTP_ADDR_NOHB; 3806 } 3807 if ((paddrp->spp_flags & SPP_PMTUD_DISABLE) && (paddrp->spp_pathmtu >= SCTP_SMALLEST_PMTU)) { 3808 if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) { 3809 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net, 3810 SCTP_FROM_SCTP_USRREQ + SCTP_LOC_10); 3811 } 3812 if (paddrp->spp_pathmtu > SCTP_DEFAULT_MINSEGMENT) { 3813 net->mtu = paddrp->spp_pathmtu + ovh; 3814 if (net->mtu < stcb->asoc.smallest_mtu) { 3815 #ifdef SCTP_PRINT_FOR_B_AND_M 3816 SCTP_PRINTF("SCTP_PMTU_DISABLE calls sctp_pathmtu_adjustment:%d\n", 3817 net->mtu); 3818 #endif 3819 sctp_pathmtu_adjustment(inp, stcb, net, net->mtu); 3820 } 3821 } 3822 } 3823 if (paddrp->spp_flags & SPP_PMTUD_ENABLE) { 3824 if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) { 3825 sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net); 3826 } 3827 } 3828 if (paddrp->spp_pathmaxrxt) 3829 net->failure_threshold = paddrp->spp_pathmaxrxt; 3830 #ifdef INET 3831 if (paddrp->spp_flags & SPP_IPV4_TOS) { 3832 if (net->ro._l_addr.sin.sin_family == AF_INET) { 3833 net->tos_flowlabel = paddrp->spp_ipv4_tos & 0x000000fc; 3834 } 3835 } 3836 #endif 3837 #ifdef INET6 3838 if (paddrp->spp_flags & SPP_IPV6_FLOWLABEL) { 3839 if (net->ro._l_addr.sin6.sin6_family == AF_INET6) { 3840 net->tos_flowlabel = paddrp->spp_ipv6_flowlabel; 3841 } 3842 } 3843 #endif 3844 } else { 3845 /************************ASSOC ONLY -- NO NET SPECIFIC SET ******************/ 3846 if (paddrp->spp_pathmaxrxt) 3847 stcb->asoc.def_net_failure = paddrp->spp_pathmaxrxt; 3848 3849 if (paddrp->spp_flags & SPP_HB_ENABLE) { 3850 /* Turn back on the timer */ 3851 stcb->asoc.hb_is_disabled = 0; 3852 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net); 3853 } 3854 if ((paddrp->spp_flags & SPP_PMTUD_DISABLE) && (paddrp->spp_pathmtu >= SCTP_SMALLEST_PMTU)) { 3855 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 3856 if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) { 3857 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net, 3858 SCTP_FROM_SCTP_USRREQ + SCTP_LOC_10); 3859 } 3860 if (paddrp->spp_pathmtu > SCTP_DEFAULT_MINSEGMENT) { 3861 net->mtu = paddrp->spp_pathmtu + ovh; 3862 if (net->mtu < stcb->asoc.smallest_mtu) { 3863 #ifdef SCTP_PRINT_FOR_B_AND_M 3864 SCTP_PRINTF("SCTP_PMTU_DISABLE calls sctp_pathmtu_adjustment:%d\n", 3865 net->mtu); 3866 #endif 3867 sctp_pathmtu_adjustment(inp, stcb, net, net->mtu); 3868 } 3869 } 3870 } 3871 } 3872 if (paddrp->spp_flags & SPP_PMTUD_ENABLE) { 3873 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 3874 if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) { 3875 sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net); 3876 } 3877 } 3878 } 3879 if (paddrp->spp_flags & SPP_HB_DISABLE) { 3880 int cnt_of_unconf = 0; 3881 struct sctp_nets *lnet; 3882 3883 stcb->asoc.hb_is_disabled = 1; 3884 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 3885 if (lnet->dest_state & SCTP_ADDR_UNCONFIRMED) { 3886 cnt_of_unconf++; 3887 } 3888 } 3889 /* 3890 * stop the timer ONLY if we 3891 * have no unconfirmed 3892 * addresses 3893 */ 3894 if (cnt_of_unconf == 0) { 3895 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 3896 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, 3897 SCTP_FROM_SCTP_USRREQ + SCTP_LOC_11); 3898 } 3899 } 3900 } 3901 if (paddrp->spp_flags & SPP_HB_ENABLE) { 3902 /* start up the timer. */ 3903 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 3904 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net); 3905 } 3906 } 3907 #ifdef INET 3908 if (paddrp->spp_flags & SPP_IPV4_TOS) 3909 stcb->asoc.default_tos = paddrp->spp_ipv4_tos & 0x000000fc; 3910 #endif 3911 #ifdef INET6 3912 if (paddrp->spp_flags & SPP_IPV6_FLOWLABEL) 3913 stcb->asoc.default_flowlabel = paddrp->spp_ipv6_flowlabel; 3914 #endif 3915 3916 } 3917 SCTP_TCB_UNLOCK(stcb); 3918 } else { 3919 /************************NO TCB, SET TO default stuff ******************/ 3920 SCTP_INP_WLOCK(inp); 3921 /* 3922 * For the TOS/FLOWLABEL stuff you set it 3923 * with the options on the socket 3924 */ 3925 if (paddrp->spp_pathmaxrxt) { 3926 inp->sctp_ep.def_net_failure = paddrp->spp_pathmaxrxt; 3927 } 3928 if (paddrp->spp_flags & SPP_HB_TIME_IS_ZERO) 3929 inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT] = 0; 3930 else if (paddrp->spp_hbinterval) { 3931 if (paddrp->spp_hbinterval > SCTP_MAX_HB_INTERVAL) 3932 paddrp->spp_hbinterval = SCTP_MAX_HB_INTERVAL; 3933 inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT] = MSEC_TO_TICKS(paddrp->spp_hbinterval); 3934 } 3935 if (paddrp->spp_flags & SPP_HB_ENABLE) { 3936 sctp_feature_off(inp, SCTP_PCB_FLAGS_DONOT_HEARTBEAT); 3937 3938 } else if (paddrp->spp_flags & SPP_HB_DISABLE) { 3939 sctp_feature_on(inp, SCTP_PCB_FLAGS_DONOT_HEARTBEAT); 3940 } 3941 SCTP_INP_WUNLOCK(inp); 3942 } 3943 } 3944 break; 3945 case SCTP_RTOINFO: 3946 { 3947 struct sctp_rtoinfo *srto; 3948 uint32_t new_init, new_min, new_max; 3949 3950 SCTP_CHECK_AND_CAST(srto, optval, struct sctp_rtoinfo, optsize); 3951 SCTP_FIND_STCB(inp, stcb, srto->srto_assoc_id); 3952 3953 if (stcb) { 3954 if (srto->srto_initial) 3955 new_init = srto->srto_initial; 3956 else 3957 new_init = stcb->asoc.initial_rto; 3958 if (srto->srto_max) 3959 new_max = srto->srto_max; 3960 else 3961 new_max = stcb->asoc.maxrto; 3962 if (srto->srto_min) 3963 new_min = srto->srto_min; 3964 else 3965 new_min = stcb->asoc.minrto; 3966 if ((new_min <= new_init) && (new_init <= new_max)) { 3967 stcb->asoc.initial_rto = new_init; 3968 stcb->asoc.maxrto = new_max; 3969 stcb->asoc.minrto = new_min; 3970 } else { 3971 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3972 error = EINVAL; 3973 } 3974 SCTP_TCB_UNLOCK(stcb); 3975 } else { 3976 SCTP_INP_WLOCK(inp); 3977 if (srto->srto_initial) 3978 new_init = srto->srto_initial; 3979 else 3980 new_init = inp->sctp_ep.initial_rto; 3981 if (srto->srto_max) 3982 new_max = srto->srto_max; 3983 else 3984 new_max = inp->sctp_ep.sctp_maxrto; 3985 if (srto->srto_min) 3986 new_min = srto->srto_min; 3987 else 3988 new_min = inp->sctp_ep.sctp_minrto; 3989 if ((new_min <= new_init) && (new_init <= new_max)) { 3990 inp->sctp_ep.initial_rto = new_init; 3991 inp->sctp_ep.sctp_maxrto = new_max; 3992 inp->sctp_ep.sctp_minrto = new_min; 3993 } else { 3994 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3995 error = EINVAL; 3996 } 3997 SCTP_INP_WUNLOCK(inp); 3998 } 3999 } 4000 break; 4001 case SCTP_ASSOCINFO: 4002 { 4003 struct sctp_assocparams *sasoc; 4004 4005 SCTP_CHECK_AND_CAST(sasoc, optval, struct sctp_assocparams, optsize); 4006 SCTP_FIND_STCB(inp, stcb, sasoc->sasoc_assoc_id); 4007 if (sasoc->sasoc_cookie_life) { 4008 /* boundary check the cookie life */ 4009 if (sasoc->sasoc_cookie_life < 1000) 4010 sasoc->sasoc_cookie_life = 1000; 4011 if (sasoc->sasoc_cookie_life > SCTP_MAX_COOKIE_LIFE) { 4012 sasoc->sasoc_cookie_life = SCTP_MAX_COOKIE_LIFE; 4013 } 4014 } 4015 if (stcb) { 4016 if (sasoc->sasoc_asocmaxrxt) 4017 stcb->asoc.max_send_times = sasoc->sasoc_asocmaxrxt; 4018 sasoc->sasoc_number_peer_destinations = stcb->asoc.numnets; 4019 sasoc->sasoc_peer_rwnd = 0; 4020 sasoc->sasoc_local_rwnd = 0; 4021 if (sasoc->sasoc_cookie_life) { 4022 stcb->asoc.cookie_life = MSEC_TO_TICKS(sasoc->sasoc_cookie_life); 4023 } 4024 SCTP_TCB_UNLOCK(stcb); 4025 } else { 4026 SCTP_INP_WLOCK(inp); 4027 if (sasoc->sasoc_asocmaxrxt) 4028 inp->sctp_ep.max_send_times = sasoc->sasoc_asocmaxrxt; 4029 sasoc->sasoc_number_peer_destinations = 0; 4030 sasoc->sasoc_peer_rwnd = 0; 4031 sasoc->sasoc_local_rwnd = 0; 4032 if (sasoc->sasoc_cookie_life) { 4033 inp->sctp_ep.def_cookie_life = MSEC_TO_TICKS(sasoc->sasoc_cookie_life); 4034 } 4035 SCTP_INP_WUNLOCK(inp); 4036 } 4037 } 4038 break; 4039 case SCTP_INITMSG: 4040 { 4041 struct sctp_initmsg *sinit; 4042 4043 SCTP_CHECK_AND_CAST(sinit, optval, struct sctp_initmsg, optsize); 4044 SCTP_INP_WLOCK(inp); 4045 if (sinit->sinit_num_ostreams) 4046 inp->sctp_ep.pre_open_stream_count = sinit->sinit_num_ostreams; 4047 4048 if (sinit->sinit_max_instreams) 4049 inp->sctp_ep.max_open_streams_intome = sinit->sinit_max_instreams; 4050 4051 if (sinit->sinit_max_attempts) 4052 inp->sctp_ep.max_init_times = sinit->sinit_max_attempts; 4053 4054 if (sinit->sinit_max_init_timeo) 4055 inp->sctp_ep.initial_init_rto_max = sinit->sinit_max_init_timeo; 4056 SCTP_INP_WUNLOCK(inp); 4057 } 4058 break; 4059 case SCTP_PRIMARY_ADDR: 4060 { 4061 struct sctp_setprim *spa; 4062 struct sctp_nets *net, *lnet; 4063 4064 SCTP_CHECK_AND_CAST(spa, optval, struct sctp_setprim, optsize); 4065 SCTP_FIND_STCB(inp, stcb, spa->ssp_assoc_id); 4066 4067 net = NULL; 4068 if (stcb) { 4069 net = sctp_findnet(stcb, (struct sockaddr *)&spa->ssp_addr); 4070 } else { 4071 /* 4072 * We increment here since 4073 * sctp_findassociation_ep_addr() wil do a 4074 * decrement if it finds the stcb as long as 4075 * the locked tcb (last argument) is NOT a 4076 * TCB.. aka NULL. 4077 */ 4078 SCTP_INP_INCR_REF(inp); 4079 stcb = sctp_findassociation_ep_addr(&inp, 4080 (struct sockaddr *)&spa->ssp_addr, 4081 &net, NULL, NULL); 4082 if (stcb == NULL) { 4083 SCTP_INP_DECR_REF(inp); 4084 } 4085 } 4086 4087 if ((stcb) && (net)) { 4088 if ((net != stcb->asoc.primary_destination) && 4089 (!(net->dest_state & SCTP_ADDR_UNCONFIRMED))) { 4090 /* Ok we need to set it */ 4091 lnet = stcb->asoc.primary_destination; 4092 if (sctp_set_primary_addr(stcb, (struct sockaddr *)NULL, net) == 0) { 4093 if (net->dest_state & SCTP_ADDR_SWITCH_PRIMARY) { 4094 net->dest_state |= SCTP_ADDR_DOUBLE_SWITCH; 4095 } 4096 net->dest_state |= SCTP_ADDR_SWITCH_PRIMARY; 4097 } 4098 } 4099 } else { 4100 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4101 error = EINVAL; 4102 } 4103 if (stcb) { 4104 SCTP_TCB_UNLOCK(stcb); 4105 } 4106 } 4107 break; 4108 case SCTP_SET_DYNAMIC_PRIMARY: 4109 { 4110 union sctp_sockstore *ss; 4111 4112 error = priv_check(curthread, 4113 PRIV_NETINET_RESERVEDPORT); 4114 if (error) 4115 break; 4116 4117 SCTP_CHECK_AND_CAST(ss, optval, union sctp_sockstore, optsize); 4118 /* SUPER USER CHECK? */ 4119 error = sctp_dynamic_set_primary(&ss->sa, vrf_id); 4120 } 4121 break; 4122 case SCTP_SET_PEER_PRIMARY_ADDR: 4123 { 4124 struct sctp_setpeerprim *sspp; 4125 4126 SCTP_CHECK_AND_CAST(sspp, optval, struct sctp_setpeerprim, optsize); 4127 SCTP_FIND_STCB(inp, stcb, sspp->sspp_assoc_id); 4128 if (stcb != NULL) { 4129 struct sctp_ifa *ifa; 4130 4131 ifa = sctp_find_ifa_by_addr((struct sockaddr *)&sspp->sspp_addr, 4132 stcb->asoc.vrf_id, SCTP_ADDR_NOT_LOCKED); 4133 if (ifa == NULL) { 4134 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4135 error = EINVAL; 4136 goto out_of_it; 4137 } 4138 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) { 4139 /* 4140 * Must validate the ifa found is in 4141 * our ep 4142 */ 4143 struct sctp_laddr *laddr; 4144 int found = 0; 4145 4146 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 4147 if (laddr->ifa == NULL) { 4148 SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n", 4149 __FUNCTION__); 4150 continue; 4151 } 4152 if (laddr->ifa == ifa) { 4153 found = 1; 4154 break; 4155 } 4156 } 4157 if (!found) { 4158 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4159 error = EINVAL; 4160 goto out_of_it; 4161 } 4162 } 4163 if (sctp_set_primary_ip_address_sa(stcb, 4164 (struct sockaddr *)&sspp->sspp_addr) != 0) { 4165 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4166 error = EINVAL; 4167 } 4168 out_of_it: 4169 SCTP_TCB_UNLOCK(stcb); 4170 } else { 4171 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4172 error = EINVAL; 4173 } 4174 4175 } 4176 break; 4177 case SCTP_BINDX_ADD_ADDR: 4178 { 4179 struct sctp_getaddresses *addrs; 4180 size_t sz; 4181 struct thread *td; 4182 4183 td = (struct thread *)p; 4184 SCTP_CHECK_AND_CAST(addrs, optval, struct sctp_getaddresses, 4185 optsize); 4186 if (addrs->addr->sa_family == AF_INET) { 4187 sz = sizeof(struct sctp_getaddresses) - sizeof(struct sockaddr) + sizeof(struct sockaddr_in); 4188 if (optsize < sz) { 4189 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4190 error = EINVAL; 4191 break; 4192 } 4193 if (td != NULL && (error = prison_local_ip4(td->td_ucred, &(((struct sockaddr_in *)(addrs->addr))->sin_addr)))) { 4194 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, error); 4195 break; 4196 } 4197 #ifdef INET6 4198 } else if (addrs->addr->sa_family == AF_INET6) { 4199 sz = sizeof(struct sctp_getaddresses) - sizeof(struct sockaddr) + sizeof(struct sockaddr_in6); 4200 if (optsize < sz) { 4201 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4202 error = EINVAL; 4203 break; 4204 } 4205 if (td != NULL && (error = prison_local_ip6(td->td_ucred, &(((struct sockaddr_in6 *)(addrs->addr))->sin6_addr), 4206 (SCTP_IPV6_V6ONLY(inp) != 0))) != 0) { 4207 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, error); 4208 break; 4209 } 4210 #endif 4211 } else { 4212 error = EAFNOSUPPORT; 4213 break; 4214 } 4215 sctp_bindx_add_address(so, inp, addrs->addr, 4216 addrs->sget_assoc_id, vrf_id, 4217 &error, p); 4218 } 4219 break; 4220 case SCTP_BINDX_REM_ADDR: 4221 { 4222 struct sctp_getaddresses *addrs; 4223 size_t sz; 4224 struct thread *td; 4225 4226 td = (struct thread *)p; 4227 4228 SCTP_CHECK_AND_CAST(addrs, optval, struct sctp_getaddresses, optsize); 4229 if (addrs->addr->sa_family == AF_INET) { 4230 sz = sizeof(struct sctp_getaddresses) - sizeof(struct sockaddr) + sizeof(struct sockaddr_in); 4231 if (optsize < sz) { 4232 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4233 error = EINVAL; 4234 break; 4235 } 4236 if (td != NULL && (error = prison_local_ip4(td->td_ucred, &(((struct sockaddr_in *)(addrs->addr))->sin_addr)))) { 4237 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, error); 4238 break; 4239 } 4240 #ifdef INET6 4241 } else if (addrs->addr->sa_family == AF_INET6) { 4242 sz = sizeof(struct sctp_getaddresses) - sizeof(struct sockaddr) + sizeof(struct sockaddr_in6); 4243 if (optsize < sz) { 4244 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4245 error = EINVAL; 4246 break; 4247 } 4248 if (td != NULL && (error = prison_local_ip6(td->td_ucred, &(((struct sockaddr_in6 *)(addrs->addr))->sin6_addr), 4249 (SCTP_IPV6_V6ONLY(inp) != 0))) != 0) { 4250 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, error); 4251 break; 4252 } 4253 #endif 4254 } else { 4255 error = EAFNOSUPPORT; 4256 break; 4257 } 4258 sctp_bindx_delete_address(so, inp, addrs->addr, 4259 addrs->sget_assoc_id, vrf_id, 4260 &error); 4261 } 4262 break; 4263 default: 4264 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT); 4265 error = ENOPROTOOPT; 4266 break; 4267 } /* end switch (opt) */ 4268 return (error); 4269 } 4270 4271 int 4272 sctp_ctloutput(struct socket *so, struct sockopt *sopt) 4273 { 4274 void *optval = NULL; 4275 size_t optsize = 0; 4276 struct sctp_inpcb *inp; 4277 void *p; 4278 int error = 0; 4279 4280 inp = (struct sctp_inpcb *)so->so_pcb; 4281 if (inp == 0) { 4282 /* I made the same as TCP since we are not setup? */ 4283 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4284 return (ECONNRESET); 4285 } 4286 if (sopt->sopt_level != IPPROTO_SCTP) { 4287 /* wrong proto level... send back up to IP */ 4288 #ifdef INET6 4289 if (INP_CHECK_SOCKAF(so, AF_INET6)) 4290 error = ip6_ctloutput(so, sopt); 4291 else 4292 #endif /* INET6 */ 4293 error = ip_ctloutput(so, sopt); 4294 return (error); 4295 } 4296 optsize = sopt->sopt_valsize; 4297 if (optsize) { 4298 SCTP_MALLOC(optval, void *, optsize, SCTP_M_SOCKOPT); 4299 if (optval == NULL) { 4300 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOBUFS); 4301 return (ENOBUFS); 4302 } 4303 error = sooptcopyin(sopt, optval, optsize, optsize); 4304 if (error) { 4305 SCTP_FREE(optval, SCTP_M_SOCKOPT); 4306 goto out; 4307 } 4308 } 4309 p = (void *)sopt->sopt_td; 4310 if (sopt->sopt_dir == SOPT_SET) { 4311 error = sctp_setopt(so, sopt->sopt_name, optval, optsize, p); 4312 } else if (sopt->sopt_dir == SOPT_GET) { 4313 error = sctp_getopt(so, sopt->sopt_name, optval, &optsize, p); 4314 } else { 4315 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4316 error = EINVAL; 4317 } 4318 if ((error == 0) && (optval != NULL)) { 4319 error = sooptcopyout(sopt, optval, optsize); 4320 SCTP_FREE(optval, SCTP_M_SOCKOPT); 4321 } else if (optval != NULL) { 4322 SCTP_FREE(optval, SCTP_M_SOCKOPT); 4323 } 4324 out: 4325 return (error); 4326 } 4327 4328 4329 static int 4330 sctp_connect(struct socket *so, struct sockaddr *addr, struct thread *p) 4331 { 4332 int error = 0; 4333 int create_lock_on = 0; 4334 uint32_t vrf_id; 4335 struct sctp_inpcb *inp; 4336 struct sctp_tcb *stcb = NULL; 4337 4338 inp = (struct sctp_inpcb *)so->so_pcb; 4339 if (inp == 0) { 4340 /* I made the same as TCP since we are not setup? */ 4341 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4342 return (ECONNRESET); 4343 } 4344 if (addr == NULL) { 4345 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4346 return EINVAL; 4347 } 4348 #ifdef INET6 4349 if (addr->sa_family == AF_INET6) { 4350 struct sockaddr_in6 *sin6p; 4351 4352 if (addr->sa_len != sizeof(struct sockaddr_in6)) { 4353 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4354 return (EINVAL); 4355 } 4356 sin6p = (struct sockaddr_in6 *)addr; 4357 if (p != NULL && (error = prison_remote_ip6(p->td_ucred, &sin6p->sin6_addr)) != 0) { 4358 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 4359 return (error); 4360 } 4361 } else 4362 #endif 4363 if (addr->sa_family == AF_INET) { 4364 struct sockaddr_in *sinp; 4365 4366 if (addr->sa_len != sizeof(struct sockaddr_in)) { 4367 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4368 return (EINVAL); 4369 } 4370 sinp = (struct sockaddr_in *)addr; 4371 if (p != NULL && (error = prison_remote_ip4(p->td_ucred, &sinp->sin_addr)) != 0) { 4372 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 4373 return (error); 4374 } 4375 } else { 4376 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EAFNOSUPPORT); 4377 return (EAFNOSUPPORT); 4378 } 4379 SCTP_INP_INCR_REF(inp); 4380 SCTP_ASOC_CREATE_LOCK(inp); 4381 create_lock_on = 1; 4382 4383 4384 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 4385 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) { 4386 /* Should I really unlock ? */ 4387 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EFAULT); 4388 error = EFAULT; 4389 goto out_now; 4390 } 4391 #ifdef INET6 4392 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) && 4393 (addr->sa_family == AF_INET6)) { 4394 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4395 error = EINVAL; 4396 goto out_now; 4397 } 4398 #endif /* INET6 */ 4399 if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) == 4400 SCTP_PCB_FLAGS_UNBOUND) { 4401 /* Bind a ephemeral port */ 4402 error = sctp_inpcb_bind(so, NULL, NULL, p); 4403 if (error) { 4404 goto out_now; 4405 } 4406 } 4407 /* Now do we connect? */ 4408 if ((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) && 4409 (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_PORTREUSE))) { 4410 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4411 error = EINVAL; 4412 goto out_now; 4413 } 4414 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 4415 (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) { 4416 /* We are already connected AND the TCP model */ 4417 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EADDRINUSE); 4418 error = EADDRINUSE; 4419 goto out_now; 4420 } 4421 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { 4422 SCTP_INP_RLOCK(inp); 4423 stcb = LIST_FIRST(&inp->sctp_asoc_list); 4424 SCTP_INP_RUNLOCK(inp); 4425 } else { 4426 /* 4427 * We increment here since sctp_findassociation_ep_addr() 4428 * will do a decrement if it finds the stcb as long as the 4429 * locked tcb (last argument) is NOT a TCB.. aka NULL. 4430 */ 4431 SCTP_INP_INCR_REF(inp); 4432 stcb = sctp_findassociation_ep_addr(&inp, addr, NULL, NULL, NULL); 4433 if (stcb == NULL) { 4434 SCTP_INP_DECR_REF(inp); 4435 } else { 4436 SCTP_TCB_UNLOCK(stcb); 4437 } 4438 } 4439 if (stcb != NULL) { 4440 /* Already have or am bring up an association */ 4441 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY); 4442 error = EALREADY; 4443 goto out_now; 4444 } 4445 vrf_id = inp->def_vrf_id; 4446 /* We are GOOD to go */ 4447 stcb = sctp_aloc_assoc(inp, addr, 1, &error, 0, vrf_id, p); 4448 if (stcb == NULL) { 4449 /* Gak! no memory */ 4450 goto out_now; 4451 } 4452 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) { 4453 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED; 4454 /* Set the connected flag so we can queue data */ 4455 soisconnecting(so); 4456 } 4457 SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_COOKIE_WAIT); 4458 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered); 4459 4460 /* initialize authentication parameters for the assoc */ 4461 sctp_initialize_auth_params(inp, stcb); 4462 4463 sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED); 4464 SCTP_TCB_UNLOCK(stcb); 4465 out_now: 4466 if (create_lock_on) { 4467 SCTP_ASOC_CREATE_UNLOCK(inp); 4468 } 4469 SCTP_INP_DECR_REF(inp); 4470 return error; 4471 } 4472 4473 int 4474 sctp_listen(struct socket *so, int backlog, struct thread *p) 4475 { 4476 /* 4477 * Note this module depends on the protocol processing being called 4478 * AFTER any socket level flags and backlog are applied to the 4479 * socket. The traditional way that the socket flags are applied is 4480 * AFTER protocol processing. We have made a change to the 4481 * sys/kern/uipc_socket.c module to reverse this but this MUST be in 4482 * place if the socket API for SCTP is to work properly. 4483 */ 4484 4485 int error = 0; 4486 struct sctp_inpcb *inp; 4487 4488 inp = (struct sctp_inpcb *)so->so_pcb; 4489 if (inp == 0) { 4490 /* I made the same as TCP since we are not setup? */ 4491 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4492 return (ECONNRESET); 4493 } 4494 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PORTREUSE)) { 4495 /* See if we have a listener */ 4496 struct sctp_inpcb *tinp; 4497 union sctp_sockstore store, *sp; 4498 4499 sp = &store; 4500 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) { 4501 /* not bound all */ 4502 struct sctp_laddr *laddr; 4503 4504 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 4505 memcpy(&store, &laddr->ifa->address, sizeof(store)); 4506 sp->sin.sin_port = inp->sctp_lport; 4507 tinp = sctp_pcb_findep(&sp->sa, 0, 0, inp->def_vrf_id); 4508 if (tinp && (tinp != inp) && 4509 ((tinp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) == 0) && 4510 ((tinp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) && 4511 (tinp->sctp_socket->so_qlimit)) { 4512 /* 4513 * we have a listener already and 4514 * its not this inp. 4515 */ 4516 SCTP_INP_DECR_REF(tinp); 4517 return (EADDRINUSE); 4518 } else if (tinp) { 4519 SCTP_INP_DECR_REF(tinp); 4520 } 4521 } 4522 } else { 4523 /* Setup a local addr bound all */ 4524 memset(&store, 0, sizeof(store)); 4525 store.sin.sin_port = inp->sctp_lport; 4526 #ifdef INET6 4527 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 4528 store.sa.sa_family = AF_INET6; 4529 store.sa.sa_len = sizeof(struct sockaddr_in6); 4530 } 4531 #endif 4532 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) { 4533 store.sa.sa_family = AF_INET; 4534 store.sa.sa_len = sizeof(struct sockaddr_in); 4535 } 4536 tinp = sctp_pcb_findep(&sp->sa, 0, 0, inp->def_vrf_id); 4537 if (tinp && (tinp != inp) && 4538 ((tinp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) == 0) && 4539 ((tinp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) && 4540 (tinp->sctp_socket->so_qlimit)) { 4541 /* 4542 * we have a listener already and its not 4543 * this inp. 4544 */ 4545 SCTP_INP_DECR_REF(tinp); 4546 return (EADDRINUSE); 4547 } else if (tinp) { 4548 SCTP_INP_DECR_REF(inp); 4549 } 4550 } 4551 } 4552 SCTP_INP_RLOCK(inp); 4553 #ifdef SCTP_LOCK_LOGGING 4554 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) { 4555 sctp_log_lock(inp, (struct sctp_tcb *)NULL, SCTP_LOG_LOCK_SOCK); 4556 } 4557 #endif 4558 SOCK_LOCK(so); 4559 error = solisten_proto_check(so); 4560 if (error) { 4561 SOCK_UNLOCK(so); 4562 SCTP_INP_RUNLOCK(inp); 4563 return (error); 4564 } 4565 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PORTREUSE)) && 4566 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 4567 /* 4568 * The unlucky case - We are in the tcp pool with this guy. 4569 * - Someone else is in the main inp slot. - We must move 4570 * this guy (the listener) to the main slot - We must then 4571 * move the guy that was listener to the TCP Pool. 4572 */ 4573 if (sctp_swap_inpcb_for_listen(inp)) { 4574 goto in_use; 4575 } 4576 } 4577 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 4578 (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) { 4579 /* We are already connected AND the TCP model */ 4580 in_use: 4581 SCTP_INP_RUNLOCK(inp); 4582 SOCK_UNLOCK(so); 4583 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EADDRINUSE); 4584 return (EADDRINUSE); 4585 } 4586 SCTP_INP_RUNLOCK(inp); 4587 if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) { 4588 /* We must do a bind. */ 4589 SOCK_UNLOCK(so); 4590 if ((error = sctp_inpcb_bind(so, NULL, NULL, p))) { 4591 /* bind error, probably perm */ 4592 return (error); 4593 } 4594 SOCK_LOCK(so); 4595 } 4596 /* It appears for 7.0 and on, we must always call this. */ 4597 solisten_proto(so, backlog); 4598 if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) { 4599 /* remove the ACCEPTCONN flag for one-to-many sockets */ 4600 so->so_options &= ~SO_ACCEPTCONN; 4601 } 4602 if (backlog == 0) { 4603 /* turning off listen */ 4604 so->so_options &= ~SO_ACCEPTCONN; 4605 } 4606 SOCK_UNLOCK(so); 4607 return (error); 4608 } 4609 4610 static int sctp_defered_wakeup_cnt = 0; 4611 4612 int 4613 sctp_accept(struct socket *so, struct sockaddr **addr) 4614 { 4615 struct sctp_tcb *stcb; 4616 struct sctp_inpcb *inp; 4617 union sctp_sockstore store; 4618 4619 #ifdef INET6 4620 int error; 4621 4622 #endif 4623 inp = (struct sctp_inpcb *)so->so_pcb; 4624 4625 if (inp == 0) { 4626 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4627 return (ECONNRESET); 4628 } 4629 SCTP_INP_RLOCK(inp); 4630 if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) { 4631 SCTP_INP_RUNLOCK(inp); 4632 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP); 4633 return (EOPNOTSUPP); 4634 } 4635 if (so->so_state & SS_ISDISCONNECTED) { 4636 SCTP_INP_RUNLOCK(inp); 4637 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ECONNABORTED); 4638 return (ECONNABORTED); 4639 } 4640 stcb = LIST_FIRST(&inp->sctp_asoc_list); 4641 if (stcb == NULL) { 4642 SCTP_INP_RUNLOCK(inp); 4643 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4644 return (ECONNRESET); 4645 } 4646 SCTP_TCB_LOCK(stcb); 4647 SCTP_INP_RUNLOCK(inp); 4648 store = stcb->asoc.primary_destination->ro._l_addr; 4649 SCTP_TCB_UNLOCK(stcb); 4650 switch (store.sa.sa_family) { 4651 case AF_INET: 4652 { 4653 struct sockaddr_in *sin; 4654 4655 SCTP_MALLOC_SONAME(sin, struct sockaddr_in *, sizeof *sin); 4656 sin->sin_family = AF_INET; 4657 sin->sin_len = sizeof(*sin); 4658 sin->sin_port = ((struct sockaddr_in *)&store)->sin_port; 4659 sin->sin_addr = ((struct sockaddr_in *)&store)->sin_addr; 4660 *addr = (struct sockaddr *)sin; 4661 break; 4662 } 4663 #ifdef INET6 4664 case AF_INET6: 4665 { 4666 struct sockaddr_in6 *sin6; 4667 4668 SCTP_MALLOC_SONAME(sin6, struct sockaddr_in6 *, sizeof *sin6); 4669 sin6->sin6_family = AF_INET6; 4670 sin6->sin6_len = sizeof(*sin6); 4671 sin6->sin6_port = ((struct sockaddr_in6 *)&store)->sin6_port; 4672 4673 sin6->sin6_addr = ((struct sockaddr_in6 *)&store)->sin6_addr; 4674 if ((error = sa6_recoverscope(sin6)) != 0) { 4675 SCTP_FREE_SONAME(sin6); 4676 return (error); 4677 } 4678 *addr = (struct sockaddr *)sin6; 4679 break; 4680 } 4681 #endif 4682 default: 4683 /* TSNH */ 4684 break; 4685 } 4686 /* Wake any delayed sleep action */ 4687 if (inp->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) { 4688 SCTP_INP_WLOCK(inp); 4689 inp->sctp_flags &= ~SCTP_PCB_FLAGS_DONT_WAKE; 4690 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT) { 4691 inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAKEOUTPUT; 4692 SCTP_INP_WUNLOCK(inp); 4693 SOCKBUF_LOCK(&inp->sctp_socket->so_snd); 4694 if (sowriteable(inp->sctp_socket)) { 4695 sowwakeup_locked(inp->sctp_socket); 4696 } else { 4697 SOCKBUF_UNLOCK(&inp->sctp_socket->so_snd); 4698 } 4699 SCTP_INP_WLOCK(inp); 4700 } 4701 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT) { 4702 inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAKEINPUT; 4703 SCTP_INP_WUNLOCK(inp); 4704 SOCKBUF_LOCK(&inp->sctp_socket->so_rcv); 4705 if (soreadable(inp->sctp_socket)) { 4706 sctp_defered_wakeup_cnt++; 4707 sorwakeup_locked(inp->sctp_socket); 4708 } else { 4709 SOCKBUF_UNLOCK(&inp->sctp_socket->so_rcv); 4710 } 4711 SCTP_INP_WLOCK(inp); 4712 } 4713 SCTP_INP_WUNLOCK(inp); 4714 } 4715 return (0); 4716 } 4717 4718 int 4719 sctp_ingetaddr(struct socket *so, struct sockaddr **addr) 4720 { 4721 struct sockaddr_in *sin; 4722 uint32_t vrf_id; 4723 struct sctp_inpcb *inp; 4724 struct sctp_ifa *sctp_ifa; 4725 4726 /* 4727 * Do the malloc first in case it blocks. 4728 */ 4729 SCTP_MALLOC_SONAME(sin, struct sockaddr_in *, sizeof *sin); 4730 sin->sin_family = AF_INET; 4731 sin->sin_len = sizeof(*sin); 4732 inp = (struct sctp_inpcb *)so->so_pcb; 4733 if (!inp) { 4734 SCTP_FREE_SONAME(sin); 4735 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4736 return ECONNRESET; 4737 } 4738 SCTP_INP_RLOCK(inp); 4739 sin->sin_port = inp->sctp_lport; 4740 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 4741 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { 4742 struct sctp_tcb *stcb; 4743 struct sockaddr_in *sin_a; 4744 struct sctp_nets *net; 4745 int fnd; 4746 4747 stcb = LIST_FIRST(&inp->sctp_asoc_list); 4748 if (stcb == NULL) { 4749 goto notConn; 4750 } 4751 fnd = 0; 4752 sin_a = NULL; 4753 SCTP_TCB_LOCK(stcb); 4754 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 4755 sin_a = (struct sockaddr_in *)&net->ro._l_addr; 4756 if (sin_a == NULL) 4757 /* this will make coverity happy */ 4758 continue; 4759 4760 if (sin_a->sin_family == AF_INET) { 4761 fnd = 1; 4762 break; 4763 } 4764 } 4765 if ((!fnd) || (sin_a == NULL)) { 4766 /* punt */ 4767 SCTP_TCB_UNLOCK(stcb); 4768 goto notConn; 4769 } 4770 vrf_id = inp->def_vrf_id; 4771 sctp_ifa = sctp_source_address_selection(inp, 4772 stcb, 4773 (sctp_route_t *) & net->ro, 4774 net, 0, vrf_id); 4775 if (sctp_ifa) { 4776 sin->sin_addr = sctp_ifa->address.sin.sin_addr; 4777 sctp_free_ifa(sctp_ifa); 4778 } 4779 SCTP_TCB_UNLOCK(stcb); 4780 } else { 4781 /* For the bound all case you get back 0 */ 4782 notConn: 4783 sin->sin_addr.s_addr = 0; 4784 } 4785 4786 } else { 4787 /* Take the first IPv4 address in the list */ 4788 struct sctp_laddr *laddr; 4789 int fnd = 0; 4790 4791 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 4792 if (laddr->ifa->address.sa.sa_family == AF_INET) { 4793 struct sockaddr_in *sin_a; 4794 4795 sin_a = (struct sockaddr_in *)&laddr->ifa->address.sa; 4796 sin->sin_addr = sin_a->sin_addr; 4797 fnd = 1; 4798 break; 4799 } 4800 } 4801 if (!fnd) { 4802 SCTP_FREE_SONAME(sin); 4803 SCTP_INP_RUNLOCK(inp); 4804 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); 4805 return ENOENT; 4806 } 4807 } 4808 SCTP_INP_RUNLOCK(inp); 4809 (*addr) = (struct sockaddr *)sin; 4810 return (0); 4811 } 4812 4813 int 4814 sctp_peeraddr(struct socket *so, struct sockaddr **addr) 4815 { 4816 struct sockaddr_in *sin = (struct sockaddr_in *)*addr; 4817 int fnd; 4818 struct sockaddr_in *sin_a; 4819 struct sctp_inpcb *inp; 4820 struct sctp_tcb *stcb; 4821 struct sctp_nets *net; 4822 4823 /* Do the malloc first in case it blocks. */ 4824 inp = (struct sctp_inpcb *)so->so_pcb; 4825 if ((inp == NULL) || 4826 ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) { 4827 /* UDP type and listeners will drop out here */ 4828 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN); 4829 return (ENOTCONN); 4830 } 4831 SCTP_MALLOC_SONAME(sin, struct sockaddr_in *, sizeof *sin); 4832 sin->sin_family = AF_INET; 4833 sin->sin_len = sizeof(*sin); 4834 4835 /* We must recapture incase we blocked */ 4836 inp = (struct sctp_inpcb *)so->so_pcb; 4837 if (!inp) { 4838 SCTP_FREE_SONAME(sin); 4839 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4840 return ECONNRESET; 4841 } 4842 SCTP_INP_RLOCK(inp); 4843 stcb = LIST_FIRST(&inp->sctp_asoc_list); 4844 if (stcb) { 4845 SCTP_TCB_LOCK(stcb); 4846 } 4847 SCTP_INP_RUNLOCK(inp); 4848 if (stcb == NULL) { 4849 SCTP_FREE_SONAME(sin); 4850 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4851 return ECONNRESET; 4852 } 4853 fnd = 0; 4854 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 4855 sin_a = (struct sockaddr_in *)&net->ro._l_addr; 4856 if (sin_a->sin_family == AF_INET) { 4857 fnd = 1; 4858 sin->sin_port = stcb->rport; 4859 sin->sin_addr = sin_a->sin_addr; 4860 break; 4861 } 4862 } 4863 SCTP_TCB_UNLOCK(stcb); 4864 if (!fnd) { 4865 /* No IPv4 address */ 4866 SCTP_FREE_SONAME(sin); 4867 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); 4868 return ENOENT; 4869 } 4870 (*addr) = (struct sockaddr *)sin; 4871 return (0); 4872 } 4873 4874 struct pr_usrreqs sctp_usrreqs = { 4875 .pru_abort = sctp_abort, 4876 .pru_accept = sctp_accept, 4877 .pru_attach = sctp_attach, 4878 .pru_bind = sctp_bind, 4879 .pru_connect = sctp_connect, 4880 .pru_control = in_control, 4881 .pru_close = sctp_close, 4882 .pru_detach = sctp_close, 4883 .pru_sopoll = sopoll_generic, 4884 .pru_flush = sctp_flush, 4885 .pru_disconnect = sctp_disconnect, 4886 .pru_listen = sctp_listen, 4887 .pru_peeraddr = sctp_peeraddr, 4888 .pru_send = sctp_sendm, 4889 .pru_shutdown = sctp_shutdown, 4890 .pru_sockaddr = sctp_ingetaddr, 4891 .pru_sosend = sctp_sosend, 4892 .pru_soreceive = sctp_soreceive 4893 }; 4894