1 /*- 2 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions are met: 6 * 7 * a) Redistributions of source code must retain the above copyright notice, 8 * this list of conditions and the following disclaimer. 9 * 10 * b) Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in 12 * the documentation and/or other materials provided with the distribution. 13 * 14 * c) Neither the name of Cisco Systems, Inc. nor the names of its 15 * contributors may be used to endorse or promote products derived 16 * from this software without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 28 * THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 /* $KAME: sctp_usrreq.c,v 1.48 2005/03/07 23:26:08 itojun Exp $ */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 #include <netinet/sctp_os.h> 36 #include <sys/proc.h> 37 #include <netinet/sctp_pcb.h> 38 #include <netinet/sctp_header.h> 39 #include <netinet/sctp_var.h> 40 #if defined(INET6) 41 #endif 42 #include <netinet/sctp_sysctl.h> 43 #include <netinet/sctp_output.h> 44 #include <netinet/sctp_uio.h> 45 #include <netinet/sctp_asconf.h> 46 #include <netinet/sctputil.h> 47 #include <netinet/sctp_indata.h> 48 #include <netinet/sctp_timer.h> 49 #include <netinet/sctp_auth.h> 50 #include <netinet/sctp_bsd_addr.h> 51 #include <netinet/sctp_cc_functions.h> 52 #include <netinet/udp.h> 53 54 55 56 57 void 58 sctp_init(void) 59 { 60 u_long sb_max_adj; 61 62 bzero(&SCTP_BASE_STATS, sizeof(struct sctpstat)); 63 64 /* Initialize and modify the sysctled variables */ 65 sctp_init_sysctls(); 66 if ((nmbclusters / 8) > SCTP_ASOC_MAX_CHUNKS_ON_QUEUE) 67 SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue) = (nmbclusters / 8); 68 /* 69 * Allow a user to take no more than 1/2 the number of clusters or 70 * the SB_MAX whichever is smaller for the send window. 71 */ 72 sb_max_adj = (u_long)((u_quad_t) (SB_MAX) * MCLBYTES / (MSIZE + MCLBYTES)); 73 SCTP_BASE_SYSCTL(sctp_sendspace) = min(sb_max_adj, 74 (((uint32_t) nmbclusters / 2) * SCTP_DEFAULT_MAXSEGMENT)); 75 /* 76 * Now for the recv window, should we take the same amount? or 77 * should I do 1/2 the SB_MAX instead in the SB_MAX min above. For 78 * now I will just copy. 79 */ 80 SCTP_BASE_SYSCTL(sctp_recvspace) = SCTP_BASE_SYSCTL(sctp_sendspace); 81 82 SCTP_BASE_VAR(first_time) = 0; 83 SCTP_BASE_VAR(sctp_pcb_initialized) = 0; 84 sctp_pcb_init(); 85 #if defined(SCTP_PACKET_LOGGING) 86 SCTP_BASE_VAR(packet_log_writers) = 0; 87 SCTP_BASE_VAR(packet_log_end) = 0; 88 bzero(&SCTP_BASE_VAR(packet_log_buffer), SCTP_PACKET_LOG_SIZE); 89 #endif 90 91 92 } 93 94 void 95 sctp_finish(void) 96 { 97 sctp_pcb_finish(); 98 } 99 100 101 102 void 103 sctp_pathmtu_adjustment(struct sctp_inpcb *inp, 104 struct sctp_tcb *stcb, 105 struct sctp_nets *net, 106 uint16_t nxtsz) 107 { 108 struct sctp_tmit_chunk *chk; 109 uint16_t overhead; 110 111 /* Adjust that too */ 112 stcb->asoc.smallest_mtu = nxtsz; 113 /* now off to subtract IP_DF flag if needed */ 114 #ifdef SCTP_PRINT_FOR_B_AND_M 115 SCTP_PRINTF("sctp_pathmtu_adjust called inp:%p stcb:%p net:%p nxtsz:%d\n", 116 inp, stcb, net, nxtsz); 117 #endif 118 overhead = IP_HDR_SIZE; 119 if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks)) { 120 overhead += sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id); 121 } 122 TAILQ_FOREACH(chk, &stcb->asoc.send_queue, sctp_next) { 123 if ((chk->send_size + overhead) > nxtsz) { 124 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; 125 } 126 } 127 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 128 if ((chk->send_size + overhead) > nxtsz) { 129 /* 130 * For this guy we also mark for immediate resend 131 * since we sent to big of chunk 132 */ 133 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; 134 if (chk->sent < SCTP_DATAGRAM_RESEND) { 135 sctp_flight_size_decrease(chk); 136 sctp_total_flight_decrease(stcb, chk); 137 } 138 if (chk->sent != SCTP_DATAGRAM_RESEND) { 139 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 140 } 141 chk->sent = SCTP_DATAGRAM_RESEND; 142 chk->rec.data.doing_fast_retransmit = 0; 143 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 144 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_PMTU, 145 chk->whoTo->flight_size, 146 chk->book_size, 147 (uintptr_t) chk->whoTo, 148 chk->rec.data.TSN_seq); 149 } 150 /* Clear any time so NO RTT is being done */ 151 chk->do_rtt = 0; 152 } 153 } 154 } 155 156 static void 157 sctp_notify_mbuf(struct sctp_inpcb *inp, 158 struct sctp_tcb *stcb, 159 struct sctp_nets *net, 160 struct ip *ip, 161 struct sctphdr *sh) 162 { 163 struct icmp *icmph; 164 int totsz, tmr_stopped = 0; 165 uint16_t nxtsz; 166 167 /* protection */ 168 if ((inp == NULL) || (stcb == NULL) || (net == NULL) || 169 (ip == NULL) || (sh == NULL)) { 170 if (stcb != NULL) { 171 SCTP_TCB_UNLOCK(stcb); 172 } 173 return; 174 } 175 /* First job is to verify the vtag matches what I would send */ 176 if (ntohl(sh->v_tag) != (stcb->asoc.peer_vtag)) { 177 SCTP_TCB_UNLOCK(stcb); 178 return; 179 } 180 icmph = (struct icmp *)((caddr_t)ip - (sizeof(struct icmp) - 181 sizeof(struct ip))); 182 if (icmph->icmp_type != ICMP_UNREACH) { 183 /* We only care about unreachable */ 184 SCTP_TCB_UNLOCK(stcb); 185 return; 186 } 187 if (icmph->icmp_code != ICMP_UNREACH_NEEDFRAG) { 188 /* not a unreachable message due to frag. */ 189 SCTP_TCB_UNLOCK(stcb); 190 return; 191 } 192 totsz = ip->ip_len; 193 194 nxtsz = ntohs(icmph->icmp_nextmtu); 195 if (nxtsz == 0) { 196 /* 197 * old type router that does not tell us what the next size 198 * mtu is. Rats we will have to guess (in a educated fashion 199 * of course) 200 */ 201 nxtsz = find_next_best_mtu(totsz); 202 } 203 /* Stop any PMTU timer */ 204 if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) { 205 tmr_stopped = 1; 206 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net, 207 SCTP_FROM_SCTP_USRREQ + SCTP_LOC_1); 208 } 209 /* Adjust destination size limit */ 210 if (net->mtu > nxtsz) { 211 net->mtu = nxtsz; 212 if (net->port) { 213 net->mtu -= sizeof(struct udphdr); 214 } 215 } 216 /* now what about the ep? */ 217 if (stcb->asoc.smallest_mtu > nxtsz) { 218 #ifdef SCTP_PRINT_FOR_B_AND_M 219 SCTP_PRINTF("notify_mbuf (ICMP) calls sctp_pathmtu_adjust mtu:%d\n", 220 nxtsz); 221 #endif 222 sctp_pathmtu_adjustment(inp, stcb, net, nxtsz); 223 } 224 if (tmr_stopped) 225 sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net); 226 227 SCTP_TCB_UNLOCK(stcb); 228 } 229 230 231 void 232 sctp_notify(struct sctp_inpcb *inp, 233 struct ip *ip, 234 struct sctphdr *sh, 235 struct sockaddr *to, 236 struct sctp_tcb *stcb, 237 struct sctp_nets *net) 238 { 239 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 240 struct socket *so; 241 242 #endif 243 /* protection */ 244 int reason; 245 struct icmp *icmph; 246 247 248 if ((inp == NULL) || (stcb == NULL) || (net == NULL) || 249 (sh == NULL) || (to == NULL)) { 250 if (stcb) 251 SCTP_TCB_UNLOCK(stcb); 252 return; 253 } 254 /* First job is to verify the vtag matches what I would send */ 255 if (ntohl(sh->v_tag) != (stcb->asoc.peer_vtag)) { 256 SCTP_TCB_UNLOCK(stcb); 257 return; 258 } 259 icmph = (struct icmp *)((caddr_t)ip - (sizeof(struct icmp) - 260 sizeof(struct ip))); 261 if (icmph->icmp_type != ICMP_UNREACH) { 262 /* We only care about unreachable */ 263 SCTP_TCB_UNLOCK(stcb); 264 return; 265 } 266 if ((icmph->icmp_code == ICMP_UNREACH_NET) || 267 (icmph->icmp_code == ICMP_UNREACH_HOST) || 268 (icmph->icmp_code == ICMP_UNREACH_NET_UNKNOWN) || 269 (icmph->icmp_code == ICMP_UNREACH_HOST_UNKNOWN) || 270 (icmph->icmp_code == ICMP_UNREACH_ISOLATED) || 271 (icmph->icmp_code == ICMP_UNREACH_NET_PROHIB) || 272 (icmph->icmp_code == ICMP_UNREACH_HOST_PROHIB) || 273 (icmph->icmp_code == ICMP_UNREACH_FILTER_PROHIB)) { 274 275 /* 276 * Hmm reachablity problems we must examine closely. If its 277 * not reachable, we may have lost a network. Or if there is 278 * NO protocol at the other end named SCTP. well we consider 279 * it a OOTB abort. 280 */ 281 if (net->dest_state & SCTP_ADDR_REACHABLE) { 282 /* Ok that destination is NOT reachable */ 283 SCTP_PRINTF("ICMP (thresh %d/%d) takes interface %p down\n", 284 net->error_count, 285 net->failure_threshold, 286 net); 287 288 net->dest_state &= ~SCTP_ADDR_REACHABLE; 289 net->dest_state |= SCTP_ADDR_NOT_REACHABLE; 290 /* 291 * JRS 5/14/07 - If a destination is unreachable, 292 * the PF bit is turned off. This allows an 293 * unambiguous use of the PF bit for destinations 294 * that are reachable but potentially failed. If the 295 * destination is set to the unreachable state, also 296 * set the destination to the PF state. 297 */ 298 /* 299 * Add debug message here if destination is not in 300 * PF state. 301 */ 302 /* Stop any running T3 timers here? */ 303 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_pf)) { 304 net->dest_state &= ~SCTP_ADDR_PF; 305 SCTPDBG(SCTP_DEBUG_TIMER4, "Destination %p moved from PF to unreachable.\n", 306 net); 307 } 308 net->error_count = net->failure_threshold + 1; 309 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN, 310 stcb, SCTP_FAILED_THRESHOLD, 311 (void *)net, SCTP_SO_NOT_LOCKED); 312 } 313 SCTP_TCB_UNLOCK(stcb); 314 } else if ((icmph->icmp_code == ICMP_UNREACH_PROTOCOL) || 315 (icmph->icmp_code == ICMP_UNREACH_PORT)) { 316 /* 317 * Here the peer is either playing tricks on us, including 318 * an address that belongs to someone who does not support 319 * SCTP OR was a userland implementation that shutdown and 320 * now is dead. In either case treat it like a OOTB abort 321 * with no TCB 322 */ 323 reason = SCTP_PEER_FAULTY; 324 sctp_abort_notification(stcb, reason, SCTP_SO_NOT_LOCKED); 325 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 326 so = SCTP_INP_SO(inp); 327 atomic_add_int(&stcb->asoc.refcnt, 1); 328 SCTP_TCB_UNLOCK(stcb); 329 SCTP_SOCKET_LOCK(so, 1); 330 SCTP_TCB_LOCK(stcb); 331 atomic_subtract_int(&stcb->asoc.refcnt, 1); 332 #endif 333 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_2); 334 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 335 SCTP_SOCKET_UNLOCK(so, 1); 336 /* SCTP_TCB_UNLOCK(stcb); MT: I think this is not needed. */ 337 #endif 338 /* no need to unlock here, since the TCB is gone */ 339 } else { 340 SCTP_TCB_UNLOCK(stcb); 341 } 342 } 343 344 void 345 sctp_ctlinput(cmd, sa, vip) 346 int cmd; 347 struct sockaddr *sa; 348 void *vip; 349 { 350 struct ip *ip = vip; 351 struct sctphdr *sh; 352 uint32_t vrf_id; 353 354 /* FIX, for non-bsd is this right? */ 355 vrf_id = SCTP_DEFAULT_VRFID; 356 if (sa->sa_family != AF_INET || 357 ((struct sockaddr_in *)sa)->sin_addr.s_addr == INADDR_ANY) { 358 return; 359 } 360 if (PRC_IS_REDIRECT(cmd)) { 361 ip = 0; 362 } else if ((unsigned)cmd >= PRC_NCMDS || inetctlerrmap[cmd] == 0) { 363 return; 364 } 365 if (ip) { 366 struct sctp_inpcb *inp = NULL; 367 struct sctp_tcb *stcb = NULL; 368 struct sctp_nets *net = NULL; 369 struct sockaddr_in to, from; 370 371 sh = (struct sctphdr *)((caddr_t)ip + (ip->ip_hl << 2)); 372 bzero(&to, sizeof(to)); 373 bzero(&from, sizeof(from)); 374 from.sin_family = to.sin_family = AF_INET; 375 from.sin_len = to.sin_len = sizeof(to); 376 from.sin_port = sh->src_port; 377 from.sin_addr = ip->ip_src; 378 to.sin_port = sh->dest_port; 379 to.sin_addr = ip->ip_dst; 380 381 /* 382 * 'to' holds the dest of the packet that failed to be sent. 383 * 'from' holds our local endpoint address. Thus we reverse 384 * the to and the from in the lookup. 385 */ 386 stcb = sctp_findassociation_addr_sa((struct sockaddr *)&from, 387 (struct sockaddr *)&to, 388 &inp, &net, 1, vrf_id); 389 if (stcb != NULL && inp && (inp->sctp_socket != NULL)) { 390 if (cmd != PRC_MSGSIZE) { 391 sctp_notify(inp, ip, sh, 392 (struct sockaddr *)&to, stcb, 393 net); 394 } else { 395 /* handle possible ICMP size messages */ 396 sctp_notify_mbuf(inp, stcb, net, ip, sh); 397 } 398 } else { 399 if ((stcb == NULL) && (inp != NULL)) { 400 /* reduce ref-count */ 401 SCTP_INP_WLOCK(inp); 402 SCTP_INP_DECR_REF(inp); 403 SCTP_INP_WUNLOCK(inp); 404 } 405 if (stcb) { 406 SCTP_TCB_UNLOCK(stcb); 407 } 408 } 409 } 410 return; 411 } 412 413 static int 414 sctp_getcred(SYSCTL_HANDLER_ARGS) 415 { 416 struct xucred xuc; 417 struct sockaddr_in addrs[2]; 418 struct sctp_inpcb *inp; 419 struct sctp_nets *net; 420 struct sctp_tcb *stcb; 421 int error; 422 uint32_t vrf_id; 423 424 /* FIX, for non-bsd is this right? */ 425 vrf_id = SCTP_DEFAULT_VRFID; 426 427 error = priv_check(req->td, PRIV_NETINET_GETCRED); 428 429 if (error) 430 return (error); 431 432 error = SYSCTL_IN(req, addrs, sizeof(addrs)); 433 if (error) 434 return (error); 435 436 stcb = sctp_findassociation_addr_sa(sintosa(&addrs[0]), 437 sintosa(&addrs[1]), 438 &inp, &net, 1, vrf_id); 439 if (stcb == NULL || inp == NULL || inp->sctp_socket == NULL) { 440 if ((inp != NULL) && (stcb == NULL)) { 441 /* reduce ref-count */ 442 SCTP_INP_WLOCK(inp); 443 SCTP_INP_DECR_REF(inp); 444 goto cred_can_cont; 445 } 446 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); 447 error = ENOENT; 448 goto out; 449 } 450 SCTP_TCB_UNLOCK(stcb); 451 /* 452 * We use the write lock here, only since in the error leg we need 453 * it. If we used RLOCK, then we would have to 454 * wlock/decr/unlock/rlock. Which in theory could create a hole. 455 * Better to use higher wlock. 456 */ 457 SCTP_INP_WLOCK(inp); 458 cred_can_cont: 459 error = cr_canseesocket(req->td->td_ucred, inp->sctp_socket); 460 if (error) { 461 SCTP_INP_WUNLOCK(inp); 462 goto out; 463 } 464 cru2x(inp->sctp_socket->so_cred, &xuc); 465 SCTP_INP_WUNLOCK(inp); 466 error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred)); 467 out: 468 return (error); 469 } 470 471 SYSCTL_PROC(_net_inet_sctp, OID_AUTO, getcred, CTLTYPE_OPAQUE | CTLFLAG_RW, 472 0, 0, sctp_getcred, "S,ucred", "Get the ucred of a SCTP connection"); 473 474 475 static void 476 sctp_abort(struct socket *so) 477 { 478 struct sctp_inpcb *inp; 479 uint32_t flags; 480 481 inp = (struct sctp_inpcb *)so->so_pcb; 482 if (inp == 0) { 483 return; 484 } 485 sctp_must_try_again: 486 flags = inp->sctp_flags; 487 #ifdef SCTP_LOG_CLOSING 488 sctp_log_closing(inp, NULL, 17); 489 #endif 490 if (((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) && 491 (atomic_cmpset_int(&inp->sctp_flags, flags, (flags | SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_CLOSE_IP)))) { 492 #ifdef SCTP_LOG_CLOSING 493 sctp_log_closing(inp, NULL, 16); 494 #endif 495 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 496 SCTP_CALLED_AFTER_CMPSET_OFCLOSE); 497 SOCK_LOCK(so); 498 SCTP_SB_CLEAR(so->so_snd); 499 /* 500 * same for the rcv ones, they are only here for the 501 * accounting/select. 502 */ 503 SCTP_SB_CLEAR(so->so_rcv); 504 505 /* Now null out the reference, we are completely detached. */ 506 so->so_pcb = NULL; 507 SOCK_UNLOCK(so); 508 } else { 509 flags = inp->sctp_flags; 510 if ((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) { 511 goto sctp_must_try_again; 512 } 513 } 514 return; 515 } 516 517 static int 518 sctp_attach(struct socket *so, int proto, struct thread *p) 519 { 520 struct sctp_inpcb *inp; 521 struct inpcb *ip_inp; 522 int error; 523 uint32_t vrf_id = SCTP_DEFAULT_VRFID; 524 525 #ifdef IPSEC 526 uint32_t flags; 527 528 #endif 529 530 inp = (struct sctp_inpcb *)so->so_pcb; 531 if (inp != 0) { 532 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 533 return EINVAL; 534 } 535 if (so->so_snd.sb_hiwat == 0 || so->so_rcv.sb_hiwat == 0) { 536 error = SCTP_SORESERVE(so, SCTP_BASE_SYSCTL(sctp_sendspace), SCTP_BASE_SYSCTL(sctp_recvspace)); 537 if (error) { 538 return error; 539 } 540 } 541 error = sctp_inpcb_alloc(so, vrf_id); 542 if (error) { 543 return error; 544 } 545 inp = (struct sctp_inpcb *)so->so_pcb; 546 SCTP_INP_WLOCK(inp); 547 inp->sctp_flags &= ~SCTP_PCB_FLAGS_BOUND_V6; /* I'm not v6! */ 548 ip_inp = &inp->ip_inp.inp; 549 ip_inp->inp_vflag |= INP_IPV4; 550 ip_inp->inp_ip_ttl = MODULE_GLOBAL(ip_defttl); 551 #ifdef IPSEC 552 error = ipsec_init_policy(so, &ip_inp->inp_sp); 553 #ifdef SCTP_LOG_CLOSING 554 sctp_log_closing(inp, NULL, 17); 555 #endif 556 if (error != 0) { 557 try_again: 558 flags = inp->sctp_flags; 559 if (((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) && 560 (atomic_cmpset_int(&inp->sctp_flags, flags, (flags | SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_CLOSE_IP)))) { 561 #ifdef SCTP_LOG_CLOSING 562 sctp_log_closing(inp, NULL, 15); 563 #endif 564 SCTP_INP_WUNLOCK(inp); 565 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 566 SCTP_CALLED_AFTER_CMPSET_OFCLOSE); 567 } else { 568 flags = inp->sctp_flags; 569 if ((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) { 570 goto try_again; 571 } else { 572 SCTP_INP_WUNLOCK(inp); 573 } 574 } 575 return error; 576 } 577 #endif /* IPSEC */ 578 SCTP_INP_WUNLOCK(inp); 579 return 0; 580 } 581 582 static int 583 sctp_bind(struct socket *so, struct sockaddr *addr, struct thread *p) 584 { 585 struct sctp_inpcb *inp = NULL; 586 int error; 587 588 #ifdef INET6 589 if (addr && addr->sa_family != AF_INET) { 590 /* must be a v4 address! */ 591 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 592 return EINVAL; 593 } 594 #endif /* INET6 */ 595 if (addr && (addr->sa_len != sizeof(struct sockaddr_in))) { 596 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 597 return EINVAL; 598 } 599 inp = (struct sctp_inpcb *)so->so_pcb; 600 if (inp == 0) { 601 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 602 return EINVAL; 603 } 604 error = sctp_inpcb_bind(so, addr, NULL, p); 605 return error; 606 } 607 608 void 609 sctp_close(struct socket *so) 610 { 611 struct sctp_inpcb *inp; 612 uint32_t flags; 613 614 inp = (struct sctp_inpcb *)so->so_pcb; 615 if (inp == 0) 616 return; 617 618 /* 619 * Inform all the lower layer assoc that we are done. 620 */ 621 sctp_must_try_again: 622 flags = inp->sctp_flags; 623 #ifdef SCTP_LOG_CLOSING 624 sctp_log_closing(inp, NULL, 17); 625 #endif 626 if (((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) && 627 (atomic_cmpset_int(&inp->sctp_flags, flags, (flags | SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_CLOSE_IP)))) { 628 if (((so->so_options & SO_LINGER) && (so->so_linger == 0)) || 629 (so->so_rcv.sb_cc > 0)) { 630 #ifdef SCTP_LOG_CLOSING 631 sctp_log_closing(inp, NULL, 13); 632 #endif 633 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 634 SCTP_CALLED_AFTER_CMPSET_OFCLOSE); 635 } else { 636 #ifdef SCTP_LOG_CLOSING 637 sctp_log_closing(inp, NULL, 14); 638 #endif 639 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_GRACEFUL_CLOSE, 640 SCTP_CALLED_AFTER_CMPSET_OFCLOSE); 641 } 642 /* 643 * The socket is now detached, no matter what the state of 644 * the SCTP association. 645 */ 646 SOCK_LOCK(so); 647 SCTP_SB_CLEAR(so->so_snd); 648 /* 649 * same for the rcv ones, they are only here for the 650 * accounting/select. 651 */ 652 SCTP_SB_CLEAR(so->so_rcv); 653 654 /* Now null out the reference, we are completely detached. */ 655 so->so_pcb = NULL; 656 SOCK_UNLOCK(so); 657 } else { 658 flags = inp->sctp_flags; 659 if ((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) { 660 goto sctp_must_try_again; 661 } 662 } 663 return; 664 } 665 666 667 int 668 sctp_sendm(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr, 669 struct mbuf *control, struct thread *p); 670 671 672 int 673 sctp_sendm(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr, 674 struct mbuf *control, struct thread *p) 675 { 676 struct sctp_inpcb *inp; 677 int error; 678 679 inp = (struct sctp_inpcb *)so->so_pcb; 680 if (inp == 0) { 681 if (control) { 682 sctp_m_freem(control); 683 control = NULL; 684 } 685 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 686 sctp_m_freem(m); 687 return EINVAL; 688 } 689 /* Got to have an to address if we are NOT a connected socket */ 690 if ((addr == NULL) && 691 ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) || 692 (inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)) 693 ) { 694 goto connected_type; 695 } else if (addr == NULL) { 696 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EDESTADDRREQ); 697 error = EDESTADDRREQ; 698 sctp_m_freem(m); 699 if (control) { 700 sctp_m_freem(control); 701 control = NULL; 702 } 703 return (error); 704 } 705 #ifdef INET6 706 if (addr->sa_family != AF_INET) { 707 /* must be a v4 address! */ 708 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EDESTADDRREQ); 709 sctp_m_freem(m); 710 if (control) { 711 sctp_m_freem(control); 712 control = NULL; 713 } 714 error = EDESTADDRREQ; 715 return EDESTADDRREQ; 716 } 717 #endif /* INET6 */ 718 connected_type: 719 /* now what about control */ 720 if (control) { 721 if (inp->control) { 722 SCTP_PRINTF("huh? control set?\n"); 723 sctp_m_freem(inp->control); 724 inp->control = NULL; 725 } 726 inp->control = control; 727 } 728 /* Place the data */ 729 if (inp->pkt) { 730 SCTP_BUF_NEXT(inp->pkt_last) = m; 731 inp->pkt_last = m; 732 } else { 733 inp->pkt_last = inp->pkt = m; 734 } 735 if ( 736 /* FreeBSD uses a flag passed */ 737 ((flags & PRUS_MORETOCOME) == 0) 738 ) { 739 /* 740 * note with the current version this code will only be used 741 * by OpenBSD-- NetBSD, FreeBSD, and MacOS have methods for 742 * re-defining sosend to use the sctp_sosend. One can 743 * optionally switch back to this code (by changing back the 744 * definitions) but this is not advisable. This code is used 745 * by FreeBSD when sending a file with sendfile() though. 746 */ 747 int ret; 748 749 ret = sctp_output(inp, inp->pkt, addr, inp->control, p, flags); 750 inp->pkt = NULL; 751 inp->control = NULL; 752 return (ret); 753 } else { 754 return (0); 755 } 756 } 757 758 int 759 sctp_disconnect(struct socket *so) 760 { 761 struct sctp_inpcb *inp; 762 763 inp = (struct sctp_inpcb *)so->so_pcb; 764 if (inp == NULL) { 765 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN); 766 return (ENOTCONN); 767 } 768 SCTP_INP_RLOCK(inp); 769 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 770 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 771 if (LIST_EMPTY(&inp->sctp_asoc_list)) { 772 /* No connection */ 773 SCTP_INP_RUNLOCK(inp); 774 return (0); 775 } else { 776 struct sctp_association *asoc; 777 struct sctp_tcb *stcb; 778 779 stcb = LIST_FIRST(&inp->sctp_asoc_list); 780 if (stcb == NULL) { 781 SCTP_INP_RUNLOCK(inp); 782 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 783 return (EINVAL); 784 } 785 SCTP_TCB_LOCK(stcb); 786 asoc = &stcb->asoc; 787 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 788 /* We are about to be freed, out of here */ 789 SCTP_TCB_UNLOCK(stcb); 790 SCTP_INP_RUNLOCK(inp); 791 return (0); 792 } 793 if (((so->so_options & SO_LINGER) && 794 (so->so_linger == 0)) || 795 (so->so_rcv.sb_cc > 0)) { 796 if (SCTP_GET_STATE(asoc) != 797 SCTP_STATE_COOKIE_WAIT) { 798 /* Left with Data unread */ 799 struct mbuf *err; 800 801 err = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_DONTWAIT, 1, MT_DATA); 802 if (err) { 803 /* 804 * Fill in the user 805 * initiated abort 806 */ 807 struct sctp_paramhdr *ph; 808 809 ph = mtod(err, struct sctp_paramhdr *); 810 SCTP_BUF_LEN(err) = sizeof(struct sctp_paramhdr); 811 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT); 812 ph->param_length = htons(SCTP_BUF_LEN(err)); 813 } 814 #if defined(SCTP_PANIC_ON_ABORT) 815 panic("disconnect does an abort"); 816 #endif 817 sctp_send_abort_tcb(stcb, err, SCTP_SO_LOCKED); 818 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 819 } 820 SCTP_INP_RUNLOCK(inp); 821 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) || 822 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 823 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 824 } 825 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_3); 826 /* No unlock tcb assoc is gone */ 827 return (0); 828 } 829 if (TAILQ_EMPTY(&asoc->send_queue) && 830 TAILQ_EMPTY(&asoc->sent_queue) && 831 (asoc->stream_queue_cnt == 0)) { 832 /* there is nothing queued to send, so done */ 833 if (asoc->locked_on_sending) { 834 goto abort_anyway; 835 } 836 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) && 837 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) { 838 /* only send SHUTDOWN 1st time thru */ 839 sctp_stop_timers_for_shutdown(stcb); 840 sctp_send_shutdown(stcb, 841 stcb->asoc.primary_destination); 842 sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_LOCKED); 843 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) || 844 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 845 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 846 } 847 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT); 848 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 849 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, 850 stcb->sctp_ep, stcb, 851 asoc->primary_destination); 852 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 853 stcb->sctp_ep, stcb, 854 asoc->primary_destination); 855 } 856 } else { 857 /* 858 * we still got (or just got) data to send, 859 * so set SHUTDOWN_PENDING 860 */ 861 /* 862 * XXX sockets draft says that SCTP_EOF 863 * should be sent with no data. currently, 864 * we will allow user data to be sent first 865 * and move to SHUTDOWN-PENDING 866 */ 867 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING; 868 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb, 869 asoc->primary_destination); 870 if (asoc->locked_on_sending) { 871 /* Locked to send out the data */ 872 struct sctp_stream_queue_pending *sp; 873 874 sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead); 875 if (sp == NULL) { 876 SCTP_PRINTF("Error, sp is NULL, locked on sending is non-null strm:%d\n", 877 asoc->locked_on_sending->stream_no); 878 } else { 879 if ((sp->length == 0) && (sp->msg_is_complete == 0)) 880 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT; 881 } 882 } 883 if (TAILQ_EMPTY(&asoc->send_queue) && 884 TAILQ_EMPTY(&asoc->sent_queue) && 885 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) { 886 struct mbuf *op_err; 887 888 abort_anyway: 889 op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 890 0, M_DONTWAIT, 1, MT_DATA); 891 if (op_err) { 892 /* 893 * Fill in the user 894 * initiated abort 895 */ 896 struct sctp_paramhdr *ph; 897 uint32_t *ippp; 898 899 SCTP_BUF_LEN(op_err) = 900 (sizeof(struct sctp_paramhdr) + sizeof(uint32_t)); 901 ph = mtod(op_err, 902 struct sctp_paramhdr *); 903 ph->param_type = htons( 904 SCTP_CAUSE_USER_INITIATED_ABT); 905 ph->param_length = htons(SCTP_BUF_LEN(op_err)); 906 ippp = (uint32_t *) (ph + 1); 907 *ippp = htonl(SCTP_FROM_SCTP_USRREQ + SCTP_LOC_4); 908 } 909 #if defined(SCTP_PANIC_ON_ABORT) 910 panic("disconnect does an abort"); 911 #endif 912 913 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_USRREQ + SCTP_LOC_4; 914 sctp_send_abort_tcb(stcb, op_err, SCTP_SO_LOCKED); 915 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 916 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) || 917 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 918 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 919 } 920 SCTP_INP_RUNLOCK(inp); 921 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_5); 922 return (0); 923 } else { 924 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CLOSING, SCTP_SO_LOCKED); 925 } 926 } 927 soisdisconnecting(so); 928 SCTP_TCB_UNLOCK(stcb); 929 SCTP_INP_RUNLOCK(inp); 930 return (0); 931 } 932 /* not reached */ 933 } else { 934 /* UDP model does not support this */ 935 SCTP_INP_RUNLOCK(inp); 936 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP); 937 return EOPNOTSUPP; 938 } 939 } 940 941 int 942 sctp_flush(struct socket *so, int how) 943 { 944 /* 945 * We will just clear out the values and let subsequent close clear 946 * out the data, if any. Note if the user did a shutdown(SHUT_RD) 947 * they will not be able to read the data, the socket will block 948 * that from happening. 949 */ 950 struct sctp_inpcb *inp; 951 952 inp = (struct sctp_inpcb *)so->so_pcb; 953 if (inp == NULL) { 954 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 955 return EINVAL; 956 } 957 SCTP_INP_RLOCK(inp); 958 /* For the 1 to many model this does nothing */ 959 if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) { 960 SCTP_INP_RUNLOCK(inp); 961 return (0); 962 } 963 SCTP_INP_RUNLOCK(inp); 964 if ((how == PRU_FLUSH_RD) || (how == PRU_FLUSH_RDWR)) { 965 /* 966 * First make sure the sb will be happy, we don't use these 967 * except maybe the count 968 */ 969 SCTP_INP_WLOCK(inp); 970 SCTP_INP_READ_LOCK(inp); 971 inp->sctp_flags |= SCTP_PCB_FLAGS_SOCKET_CANT_READ; 972 SCTP_INP_READ_UNLOCK(inp); 973 SCTP_INP_WUNLOCK(inp); 974 so->so_rcv.sb_cc = 0; 975 so->so_rcv.sb_mbcnt = 0; 976 so->so_rcv.sb_mb = NULL; 977 } 978 if ((how == PRU_FLUSH_WR) || (how == PRU_FLUSH_RDWR)) { 979 /* 980 * First make sure the sb will be happy, we don't use these 981 * except maybe the count 982 */ 983 so->so_snd.sb_cc = 0; 984 so->so_snd.sb_mbcnt = 0; 985 so->so_snd.sb_mb = NULL; 986 987 } 988 return (0); 989 } 990 991 int 992 sctp_shutdown(struct socket *so) 993 { 994 struct sctp_inpcb *inp; 995 996 inp = (struct sctp_inpcb *)so->so_pcb; 997 if (inp == 0) { 998 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 999 return EINVAL; 1000 } 1001 SCTP_INP_RLOCK(inp); 1002 /* For UDP model this is a invalid call */ 1003 if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) { 1004 /* Restore the flags that the soshutdown took away. */ 1005 SOCKBUF_LOCK(&so->so_rcv); 1006 so->so_rcv.sb_state &= ~SBS_CANTRCVMORE; 1007 SOCKBUF_UNLOCK(&so->so_rcv); 1008 /* This proc will wakeup for read and do nothing (I hope) */ 1009 SCTP_INP_RUNLOCK(inp); 1010 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP); 1011 return (EOPNOTSUPP); 1012 } 1013 /* 1014 * Ok if we reach here its the TCP model and it is either a SHUT_WR 1015 * or SHUT_RDWR. This means we put the shutdown flag against it. 1016 */ 1017 { 1018 struct sctp_tcb *stcb; 1019 struct sctp_association *asoc; 1020 1021 if ((so->so_state & 1022 (SS_ISCONNECTED | SS_ISCONNECTING | SS_ISDISCONNECTING)) == 0) { 1023 SCTP_INP_RUNLOCK(inp); 1024 return (ENOTCONN); 1025 } 1026 socantsendmore(so); 1027 1028 stcb = LIST_FIRST(&inp->sctp_asoc_list); 1029 if (stcb == NULL) { 1030 /* 1031 * Ok we hit the case that the shutdown call was 1032 * made after an abort or something. Nothing to do 1033 * now. 1034 */ 1035 SCTP_INP_RUNLOCK(inp); 1036 return (0); 1037 } 1038 SCTP_TCB_LOCK(stcb); 1039 asoc = &stcb->asoc; 1040 if (TAILQ_EMPTY(&asoc->send_queue) && 1041 TAILQ_EMPTY(&asoc->sent_queue) && 1042 (asoc->stream_queue_cnt == 0)) { 1043 if (asoc->locked_on_sending) { 1044 goto abort_anyway; 1045 } 1046 /* there is nothing queued to send, so I'm done... */ 1047 if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) { 1048 /* only send SHUTDOWN the first time through */ 1049 sctp_stop_timers_for_shutdown(stcb); 1050 sctp_send_shutdown(stcb, 1051 stcb->asoc.primary_destination); 1052 sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_LOCKED); 1053 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) || 1054 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 1055 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 1056 } 1057 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT); 1058 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 1059 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, 1060 stcb->sctp_ep, stcb, 1061 asoc->primary_destination); 1062 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 1063 stcb->sctp_ep, stcb, 1064 asoc->primary_destination); 1065 } 1066 } else { 1067 /* 1068 * we still got (or just got) data to send, so set 1069 * SHUTDOWN_PENDING 1070 */ 1071 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING; 1072 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb, 1073 asoc->primary_destination); 1074 1075 if (asoc->locked_on_sending) { 1076 /* Locked to send out the data */ 1077 struct sctp_stream_queue_pending *sp; 1078 1079 sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead); 1080 if (sp == NULL) { 1081 SCTP_PRINTF("Error, sp is NULL, locked on sending is non-null strm:%d\n", 1082 asoc->locked_on_sending->stream_no); 1083 } else { 1084 if ((sp->length == 0) && (sp->msg_is_complete == 0)) { 1085 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT; 1086 } 1087 } 1088 } 1089 if (TAILQ_EMPTY(&asoc->send_queue) && 1090 TAILQ_EMPTY(&asoc->sent_queue) && 1091 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) { 1092 struct mbuf *op_err; 1093 1094 abort_anyway: 1095 op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 1096 0, M_DONTWAIT, 1, MT_DATA); 1097 if (op_err) { 1098 /* Fill in the user initiated abort */ 1099 struct sctp_paramhdr *ph; 1100 uint32_t *ippp; 1101 1102 SCTP_BUF_LEN(op_err) = 1103 sizeof(struct sctp_paramhdr) + sizeof(uint32_t); 1104 ph = mtod(op_err, 1105 struct sctp_paramhdr *); 1106 ph->param_type = htons( 1107 SCTP_CAUSE_USER_INITIATED_ABT); 1108 ph->param_length = htons(SCTP_BUF_LEN(op_err)); 1109 ippp = (uint32_t *) (ph + 1); 1110 *ippp = htonl(SCTP_FROM_SCTP_USRREQ + SCTP_LOC_6); 1111 } 1112 #if defined(SCTP_PANIC_ON_ABORT) 1113 panic("shutdown does an abort"); 1114 #endif 1115 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_USRREQ + SCTP_LOC_6; 1116 sctp_abort_an_association(stcb->sctp_ep, stcb, 1117 SCTP_RESPONSE_TO_USER_REQ, 1118 op_err, SCTP_SO_LOCKED); 1119 goto skip_unlock; 1120 } else { 1121 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CLOSING, SCTP_SO_LOCKED); 1122 } 1123 } 1124 SCTP_TCB_UNLOCK(stcb); 1125 } 1126 skip_unlock: 1127 SCTP_INP_RUNLOCK(inp); 1128 return 0; 1129 } 1130 1131 /* 1132 * copies a "user" presentable address and removes embedded scope, etc. 1133 * returns 0 on success, 1 on error 1134 */ 1135 static uint32_t 1136 sctp_fill_user_address(struct sockaddr_storage *ss, struct sockaddr *sa) 1137 { 1138 #ifdef INET6 1139 struct sockaddr_in6 lsa6; 1140 1141 sa = (struct sockaddr *)sctp_recover_scope((struct sockaddr_in6 *)sa, 1142 &lsa6); 1143 #endif 1144 memcpy(ss, sa, sa->sa_len); 1145 return (0); 1146 } 1147 1148 1149 1150 /* 1151 * NOTE: assumes addr lock is held 1152 */ 1153 static size_t 1154 sctp_fill_up_addresses_vrf(struct sctp_inpcb *inp, 1155 struct sctp_tcb *stcb, 1156 size_t limit, 1157 struct sockaddr_storage *sas, 1158 uint32_t vrf_id) 1159 { 1160 struct sctp_ifn *sctp_ifn; 1161 struct sctp_ifa *sctp_ifa; 1162 int loopback_scope, ipv4_local_scope, local_scope, site_scope; 1163 size_t actual; 1164 int ipv4_addr_legal, ipv6_addr_legal; 1165 struct sctp_vrf *vrf; 1166 1167 actual = 0; 1168 if (limit <= 0) 1169 return (actual); 1170 1171 if (stcb) { 1172 /* Turn on all the appropriate scope */ 1173 loopback_scope = stcb->asoc.loopback_scope; 1174 ipv4_local_scope = stcb->asoc.ipv4_local_scope; 1175 local_scope = stcb->asoc.local_scope; 1176 site_scope = stcb->asoc.site_scope; 1177 } else { 1178 /* Turn on ALL scope, since we look at the EP */ 1179 loopback_scope = ipv4_local_scope = local_scope = 1180 site_scope = 1; 1181 } 1182 ipv4_addr_legal = ipv6_addr_legal = 0; 1183 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 1184 ipv6_addr_legal = 1; 1185 if (SCTP_IPV6_V6ONLY(inp) == 0) { 1186 ipv4_addr_legal = 1; 1187 } 1188 } else { 1189 ipv4_addr_legal = 1; 1190 } 1191 vrf = sctp_find_vrf(vrf_id); 1192 if (vrf == NULL) { 1193 return (0); 1194 } 1195 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 1196 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) { 1197 if ((loopback_scope == 0) && 1198 SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) { 1199 /* Skip loopback if loopback_scope not set */ 1200 continue; 1201 } 1202 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { 1203 if (stcb) { 1204 /* 1205 * For the BOUND-ALL case, the list 1206 * associated with a TCB is Always 1207 * considered a reverse list.. i.e. 1208 * it lists addresses that are NOT 1209 * part of the association. If this 1210 * is one of those we must skip it. 1211 */ 1212 if (sctp_is_addr_restricted(stcb, 1213 sctp_ifa)) { 1214 continue; 1215 } 1216 } 1217 switch (sctp_ifa->address.sa.sa_family) { 1218 case AF_INET: 1219 if (ipv4_addr_legal) { 1220 struct sockaddr_in *sin; 1221 1222 sin = (struct sockaddr_in *)&sctp_ifa->address.sa; 1223 if (sin->sin_addr.s_addr == 0) { 1224 /* 1225 * we skip 1226 * unspecifed 1227 * addresses 1228 */ 1229 continue; 1230 } 1231 if ((ipv4_local_scope == 0) && 1232 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) { 1233 continue; 1234 } 1235 #ifdef INET6 1236 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) { 1237 in6_sin_2_v4mapsin6(sin, (struct sockaddr_in6 *)sas); 1238 ((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport; 1239 sas = (struct sockaddr_storage *)((caddr_t)sas + sizeof(struct sockaddr_in6)); 1240 actual += sizeof(struct sockaddr_in6); 1241 } else { 1242 #endif 1243 memcpy(sas, sin, sizeof(*sin)); 1244 ((struct sockaddr_in *)sas)->sin_port = inp->sctp_lport; 1245 sas = (struct sockaddr_storage *)((caddr_t)sas + sizeof(*sin)); 1246 actual += sizeof(*sin); 1247 #ifdef INET6 1248 } 1249 #endif 1250 if (actual >= limit) { 1251 return (actual); 1252 } 1253 } else { 1254 continue; 1255 } 1256 break; 1257 #ifdef INET6 1258 case AF_INET6: 1259 if (ipv6_addr_legal) { 1260 struct sockaddr_in6 *sin6; 1261 1262 sin6 = (struct sockaddr_in6 *)&sctp_ifa->address.sa; 1263 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { 1264 /* 1265 * we skip 1266 * unspecifed 1267 * addresses 1268 */ 1269 continue; 1270 } 1271 if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) { 1272 if (local_scope == 0) 1273 continue; 1274 if (sin6->sin6_scope_id == 0) { 1275 if (sa6_recoverscope(sin6) != 0) 1276 /* 1277 * 1278 * bad 1279 * 1280 * li 1281 * nk 1282 * 1283 * loc 1284 * al 1285 * 1286 * add 1287 * re 1288 * ss 1289 * */ 1290 continue; 1291 } 1292 } 1293 if ((site_scope == 0) && 1294 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) { 1295 continue; 1296 } 1297 memcpy(sas, sin6, sizeof(*sin6)); 1298 ((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport; 1299 sas = (struct sockaddr_storage *)((caddr_t)sas + sizeof(*sin6)); 1300 actual += sizeof(*sin6); 1301 if (actual >= limit) { 1302 return (actual); 1303 } 1304 } else { 1305 continue; 1306 } 1307 break; 1308 #endif 1309 default: 1310 /* TSNH */ 1311 break; 1312 } 1313 } 1314 } 1315 } else { 1316 struct sctp_laddr *laddr; 1317 1318 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 1319 if (stcb) { 1320 if (sctp_is_addr_restricted(stcb, laddr->ifa)) { 1321 continue; 1322 } 1323 } 1324 if (sctp_fill_user_address(sas, &laddr->ifa->address.sa)) 1325 continue; 1326 1327 ((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport; 1328 sas = (struct sockaddr_storage *)((caddr_t)sas + 1329 laddr->ifa->address.sa.sa_len); 1330 actual += laddr->ifa->address.sa.sa_len; 1331 if (actual >= limit) { 1332 return (actual); 1333 } 1334 } 1335 } 1336 return (actual); 1337 } 1338 1339 static size_t 1340 sctp_fill_up_addresses(struct sctp_inpcb *inp, 1341 struct sctp_tcb *stcb, 1342 size_t limit, 1343 struct sockaddr_storage *sas) 1344 { 1345 size_t size = 0; 1346 1347 SCTP_IPI_ADDR_RLOCK(); 1348 /* fill up addresses for the endpoint's default vrf */ 1349 size = sctp_fill_up_addresses_vrf(inp, stcb, limit, sas, 1350 inp->def_vrf_id); 1351 SCTP_IPI_ADDR_RUNLOCK(); 1352 return (size); 1353 } 1354 1355 /* 1356 * NOTE: assumes addr lock is held 1357 */ 1358 static int 1359 sctp_count_max_addresses_vrf(struct sctp_inpcb *inp, uint32_t vrf_id) 1360 { 1361 int cnt = 0; 1362 struct sctp_vrf *vrf = NULL; 1363 1364 /* 1365 * In both sub-set bound an bound_all cases we return the MAXIMUM 1366 * number of addresses that you COULD get. In reality the sub-set 1367 * bound may have an exclusion list for a given TCB OR in the 1368 * bound-all case a TCB may NOT include the loopback or other 1369 * addresses as well. 1370 */ 1371 vrf = sctp_find_vrf(vrf_id); 1372 if (vrf == NULL) { 1373 return (0); 1374 } 1375 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 1376 struct sctp_ifn *sctp_ifn; 1377 struct sctp_ifa *sctp_ifa; 1378 1379 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) { 1380 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { 1381 /* Count them if they are the right type */ 1382 if (sctp_ifa->address.sa.sa_family == AF_INET) { 1383 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) 1384 cnt += sizeof(struct sockaddr_in6); 1385 else 1386 cnt += sizeof(struct sockaddr_in); 1387 1388 } else if (sctp_ifa->address.sa.sa_family == AF_INET6) 1389 cnt += sizeof(struct sockaddr_in6); 1390 } 1391 } 1392 } else { 1393 struct sctp_laddr *laddr; 1394 1395 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 1396 if (laddr->ifa->address.sa.sa_family == AF_INET) { 1397 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) 1398 cnt += sizeof(struct sockaddr_in6); 1399 else 1400 cnt += sizeof(struct sockaddr_in); 1401 1402 } else if (laddr->ifa->address.sa.sa_family == AF_INET6) 1403 cnt += sizeof(struct sockaddr_in6); 1404 } 1405 } 1406 return (cnt); 1407 } 1408 1409 static int 1410 sctp_count_max_addresses(struct sctp_inpcb *inp) 1411 { 1412 int cnt = 0; 1413 1414 SCTP_IPI_ADDR_RLOCK(); 1415 /* count addresses for the endpoint's default VRF */ 1416 cnt = sctp_count_max_addresses_vrf(inp, inp->def_vrf_id); 1417 SCTP_IPI_ADDR_RUNLOCK(); 1418 return (cnt); 1419 } 1420 1421 static int 1422 sctp_do_connect_x(struct socket *so, struct sctp_inpcb *inp, void *optval, 1423 size_t optsize, void *p, int delay) 1424 { 1425 int error = 0; 1426 int creat_lock_on = 0; 1427 struct sctp_tcb *stcb = NULL; 1428 struct sockaddr *sa; 1429 int num_v6 = 0, num_v4 = 0, *totaddrp, totaddr; 1430 int added = 0; 1431 uint32_t vrf_id; 1432 int bad_addresses = 0; 1433 sctp_assoc_t *a_id; 1434 1435 SCTPDBG(SCTP_DEBUG_PCB1, "Connectx called\n"); 1436 1437 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 1438 (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) { 1439 /* We are already connected AND the TCP model */ 1440 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EADDRINUSE); 1441 return (EADDRINUSE); 1442 } 1443 if ((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) && 1444 (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_PORTREUSE))) { 1445 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 1446 return (EINVAL); 1447 } 1448 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { 1449 SCTP_INP_RLOCK(inp); 1450 stcb = LIST_FIRST(&inp->sctp_asoc_list); 1451 SCTP_INP_RUNLOCK(inp); 1452 } 1453 if (stcb) { 1454 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY); 1455 return (EALREADY); 1456 } 1457 SCTP_INP_INCR_REF(inp); 1458 SCTP_ASOC_CREATE_LOCK(inp); 1459 creat_lock_on = 1; 1460 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 1461 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) { 1462 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EFAULT); 1463 error = EFAULT; 1464 goto out_now; 1465 } 1466 totaddrp = (int *)optval; 1467 totaddr = *totaddrp; 1468 sa = (struct sockaddr *)(totaddrp + 1); 1469 stcb = sctp_connectx_helper_find(inp, sa, &totaddr, &num_v4, &num_v6, &error, (optsize - sizeof(int)), &bad_addresses); 1470 if ((stcb != NULL) || bad_addresses) { 1471 /* Already have or am bring up an association */ 1472 SCTP_ASOC_CREATE_UNLOCK(inp); 1473 creat_lock_on = 0; 1474 if (stcb) 1475 SCTP_TCB_UNLOCK(stcb); 1476 if (bad_addresses == 0) { 1477 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY); 1478 error = EALREADY; 1479 } 1480 goto out_now; 1481 } 1482 #ifdef INET6 1483 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) && 1484 (num_v6 > 0)) { 1485 error = EINVAL; 1486 goto out_now; 1487 } 1488 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 1489 (num_v4 > 0)) { 1490 struct in6pcb *inp6; 1491 1492 inp6 = (struct in6pcb *)inp; 1493 if (SCTP_IPV6_V6ONLY(inp6)) { 1494 /* 1495 * if IPV6_V6ONLY flag, ignore connections destined 1496 * to a v4 addr or v4-mapped addr 1497 */ 1498 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 1499 error = EINVAL; 1500 goto out_now; 1501 } 1502 } 1503 #endif /* INET6 */ 1504 if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) == 1505 SCTP_PCB_FLAGS_UNBOUND) { 1506 /* Bind a ephemeral port */ 1507 error = sctp_inpcb_bind(so, NULL, NULL, p); 1508 if (error) { 1509 goto out_now; 1510 } 1511 } 1512 /* FIX ME: do we want to pass in a vrf on the connect call? */ 1513 vrf_id = inp->def_vrf_id; 1514 1515 1516 /* We are GOOD to go */ 1517 stcb = sctp_aloc_assoc(inp, sa, &error, 0, vrf_id, 1518 (struct thread *)p 1519 ); 1520 if (stcb == NULL) { 1521 /* Gak! no memory */ 1522 goto out_now; 1523 } 1524 SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_COOKIE_WAIT); 1525 /* move to second address */ 1526 if (sa->sa_family == AF_INET) 1527 sa = (struct sockaddr *)((caddr_t)sa + sizeof(struct sockaddr_in)); 1528 else 1529 sa = (struct sockaddr *)((caddr_t)sa + sizeof(struct sockaddr_in6)); 1530 1531 error = 0; 1532 added = sctp_connectx_helper_add(stcb, sa, (totaddr - 1), &error); 1533 /* Fill in the return id */ 1534 if (error) { 1535 (void)sctp_free_assoc(inp, stcb, SCTP_PCBFREE_FORCE, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_6); 1536 goto out_now; 1537 } 1538 a_id = (sctp_assoc_t *) optval; 1539 *a_id = sctp_get_associd(stcb); 1540 1541 /* initialize authentication parameters for the assoc */ 1542 sctp_initialize_auth_params(inp, stcb); 1543 1544 if (delay) { 1545 /* doing delayed connection */ 1546 stcb->asoc.delayed_connection = 1; 1547 sctp_timer_start(SCTP_TIMER_TYPE_INIT, inp, stcb, stcb->asoc.primary_destination); 1548 } else { 1549 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered); 1550 sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED); 1551 } 1552 SCTP_TCB_UNLOCK(stcb); 1553 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) { 1554 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED; 1555 /* Set the connected flag so we can queue data */ 1556 soisconnecting(so); 1557 } 1558 out_now: 1559 if (creat_lock_on) { 1560 SCTP_ASOC_CREATE_UNLOCK(inp); 1561 } 1562 SCTP_INP_DECR_REF(inp); 1563 return error; 1564 } 1565 1566 #define SCTP_FIND_STCB(inp, stcb, assoc_id) { \ 1567 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||\ 1568 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { \ 1569 SCTP_INP_RLOCK(inp); \ 1570 stcb = LIST_FIRST(&inp->sctp_asoc_list); \ 1571 if (stcb) { \ 1572 SCTP_TCB_LOCK(stcb); \ 1573 } \ 1574 SCTP_INP_RUNLOCK(inp); \ 1575 } else if (assoc_id != 0) { \ 1576 stcb = sctp_findassociation_ep_asocid(inp, assoc_id, 1); \ 1577 if (stcb == NULL) { \ 1578 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); \ 1579 error = ENOENT; \ 1580 break; \ 1581 } \ 1582 } else { \ 1583 stcb = NULL; \ 1584 } \ 1585 } 1586 1587 1588 #define SCTP_CHECK_AND_CAST(destp, srcp, type, size) {\ 1589 if (size < sizeof(type)) { \ 1590 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); \ 1591 error = EINVAL; \ 1592 break; \ 1593 } else { \ 1594 destp = (type *)srcp; \ 1595 } \ 1596 } 1597 1598 static int 1599 sctp_getopt(struct socket *so, int optname, void *optval, size_t *optsize, 1600 void *p) 1601 { 1602 struct sctp_inpcb *inp = NULL; 1603 int error, val = 0; 1604 struct sctp_tcb *stcb = NULL; 1605 1606 if (optval == NULL) { 1607 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 1608 return (EINVAL); 1609 } 1610 inp = (struct sctp_inpcb *)so->so_pcb; 1611 if (inp == 0) { 1612 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 1613 return EINVAL; 1614 } 1615 error = 0; 1616 1617 switch (optname) { 1618 case SCTP_NODELAY: 1619 case SCTP_AUTOCLOSE: 1620 case SCTP_EXPLICIT_EOR: 1621 case SCTP_AUTO_ASCONF: 1622 case SCTP_DISABLE_FRAGMENTS: 1623 case SCTP_I_WANT_MAPPED_V4_ADDR: 1624 case SCTP_USE_EXT_RCVINFO: 1625 SCTP_INP_RLOCK(inp); 1626 switch (optname) { 1627 case SCTP_DISABLE_FRAGMENTS: 1628 val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NO_FRAGMENT); 1629 break; 1630 case SCTP_I_WANT_MAPPED_V4_ADDR: 1631 val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4); 1632 break; 1633 case SCTP_AUTO_ASCONF: 1634 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 1635 /* only valid for bound all sockets */ 1636 val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTO_ASCONF); 1637 } else { 1638 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 1639 error = EINVAL; 1640 goto flags_out; 1641 } 1642 break; 1643 case SCTP_EXPLICIT_EOR: 1644 val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR); 1645 break; 1646 case SCTP_NODELAY: 1647 val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NODELAY); 1648 break; 1649 case SCTP_USE_EXT_RCVINFO: 1650 val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO); 1651 break; 1652 case SCTP_AUTOCLOSE: 1653 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) 1654 val = TICKS_TO_SEC(inp->sctp_ep.auto_close_time); 1655 else 1656 val = 0; 1657 break; 1658 1659 default: 1660 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT); 1661 error = ENOPROTOOPT; 1662 } /* end switch (sopt->sopt_name) */ 1663 if (optname != SCTP_AUTOCLOSE) { 1664 /* make it an "on/off" value */ 1665 val = (val != 0); 1666 } 1667 if (*optsize < sizeof(val)) { 1668 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 1669 error = EINVAL; 1670 } 1671 flags_out: 1672 SCTP_INP_RUNLOCK(inp); 1673 if (error == 0) { 1674 /* return the option value */ 1675 *(int *)optval = val; 1676 *optsize = sizeof(val); 1677 } 1678 break; 1679 case SCTP_GET_PACKET_LOG: 1680 { 1681 #ifdef SCTP_PACKET_LOGGING 1682 uint8_t *target; 1683 int ret; 1684 1685 SCTP_CHECK_AND_CAST(target, optval, uint8_t, *optsize); 1686 ret = sctp_copy_out_packet_log(target, (int)*optsize); 1687 *optsize = ret; 1688 #else 1689 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP); 1690 error = EOPNOTSUPP; 1691 #endif 1692 break; 1693 } 1694 case SCTP_REUSE_PORT: 1695 { 1696 uint32_t *value; 1697 1698 if ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE)) { 1699 /* Can't do this for a 1-m socket */ 1700 error = EINVAL; 1701 break; 1702 } 1703 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize); 1704 *value = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PORTREUSE); 1705 *optsize = sizeof(uint32_t); 1706 } 1707 break; 1708 case SCTP_PARTIAL_DELIVERY_POINT: 1709 { 1710 uint32_t *value; 1711 1712 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize); 1713 *value = inp->partial_delivery_point; 1714 *optsize = sizeof(uint32_t); 1715 } 1716 break; 1717 case SCTP_FRAGMENT_INTERLEAVE: 1718 { 1719 uint32_t *value; 1720 1721 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize); 1722 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) { 1723 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) { 1724 *value = SCTP_FRAG_LEVEL_2; 1725 } else { 1726 *value = SCTP_FRAG_LEVEL_1; 1727 } 1728 } else { 1729 *value = SCTP_FRAG_LEVEL_0; 1730 } 1731 *optsize = sizeof(uint32_t); 1732 } 1733 break; 1734 case SCTP_CMT_ON_OFF: 1735 { 1736 struct sctp_assoc_value *av; 1737 1738 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize); 1739 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off)) { 1740 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 1741 if (stcb) { 1742 av->assoc_value = stcb->asoc.sctp_cmt_on_off; 1743 SCTP_TCB_UNLOCK(stcb); 1744 1745 } else { 1746 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN); 1747 error = ENOTCONN; 1748 } 1749 } else { 1750 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT); 1751 error = ENOPROTOOPT; 1752 } 1753 *optsize = sizeof(*av); 1754 } 1755 break; 1756 /* EY - set socket option for nr_sacks */ 1757 case SCTP_NR_SACK_ON_OFF: 1758 { 1759 struct sctp_assoc_value *av; 1760 1761 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize); 1762 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off)) { 1763 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 1764 if (stcb) { 1765 av->assoc_value = stcb->asoc.sctp_nr_sack_on_off; 1766 SCTP_TCB_UNLOCK(stcb); 1767 1768 } else { 1769 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN); 1770 error = ENOTCONN; 1771 } 1772 } else { 1773 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT); 1774 error = ENOPROTOOPT; 1775 } 1776 *optsize = sizeof(*av); 1777 } 1778 break; 1779 /* JRS - Get socket option for pluggable congestion control */ 1780 case SCTP_PLUGGABLE_CC: 1781 { 1782 struct sctp_assoc_value *av; 1783 1784 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize); 1785 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 1786 if (stcb) { 1787 av->assoc_value = stcb->asoc.congestion_control_module; 1788 SCTP_TCB_UNLOCK(stcb); 1789 } else { 1790 av->assoc_value = inp->sctp_ep.sctp_default_cc_module; 1791 } 1792 *optsize = sizeof(*av); 1793 } 1794 break; 1795 case SCTP_GET_ADDR_LEN: 1796 { 1797 struct sctp_assoc_value *av; 1798 1799 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize); 1800 error = EINVAL; 1801 #ifdef INET 1802 if (av->assoc_value == AF_INET) { 1803 av->assoc_value = sizeof(struct sockaddr_in); 1804 error = 0; 1805 } 1806 #endif 1807 #ifdef INET6 1808 if (av->assoc_value == AF_INET6) { 1809 av->assoc_value = sizeof(struct sockaddr_in6); 1810 error = 0; 1811 } 1812 #endif 1813 if (error) { 1814 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 1815 } 1816 *optsize = sizeof(*av); 1817 } 1818 break; 1819 case SCTP_GET_ASSOC_NUMBER: 1820 { 1821 uint32_t *value, cnt; 1822 1823 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize); 1824 cnt = 0; 1825 SCTP_INP_RLOCK(inp); 1826 LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) { 1827 cnt++; 1828 } 1829 SCTP_INP_RUNLOCK(inp); 1830 *value = cnt; 1831 *optsize = sizeof(uint32_t); 1832 } 1833 break; 1834 1835 case SCTP_GET_ASSOC_ID_LIST: 1836 { 1837 struct sctp_assoc_ids *ids; 1838 unsigned int at, limit; 1839 1840 SCTP_CHECK_AND_CAST(ids, optval, struct sctp_assoc_ids, *optsize); 1841 at = 0; 1842 limit = (*optsize - sizeof(uint32_t)) / sizeof(sctp_assoc_t); 1843 SCTP_INP_RLOCK(inp); 1844 LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) { 1845 if (at < limit) { 1846 ids->gaids_assoc_id[at++] = sctp_get_associd(stcb); 1847 } else { 1848 error = EINVAL; 1849 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 1850 break; 1851 } 1852 } 1853 SCTP_INP_RUNLOCK(inp); 1854 ids->gaids_number_of_ids = at; 1855 *optsize = ((at * sizeof(sctp_assoc_t)) + sizeof(uint32_t)); 1856 } 1857 break; 1858 case SCTP_CONTEXT: 1859 { 1860 struct sctp_assoc_value *av; 1861 1862 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize); 1863 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 1864 1865 if (stcb) { 1866 av->assoc_value = stcb->asoc.context; 1867 SCTP_TCB_UNLOCK(stcb); 1868 } else { 1869 SCTP_INP_RLOCK(inp); 1870 av->assoc_value = inp->sctp_context; 1871 SCTP_INP_RUNLOCK(inp); 1872 } 1873 *optsize = sizeof(*av); 1874 } 1875 break; 1876 case SCTP_VRF_ID: 1877 { 1878 uint32_t *default_vrfid; 1879 1880 SCTP_CHECK_AND_CAST(default_vrfid, optval, uint32_t, *optsize); 1881 *default_vrfid = inp->def_vrf_id; 1882 break; 1883 } 1884 case SCTP_GET_ASOC_VRF: 1885 { 1886 struct sctp_assoc_value *id; 1887 1888 SCTP_CHECK_AND_CAST(id, optval, struct sctp_assoc_value, *optsize); 1889 SCTP_FIND_STCB(inp, stcb, id->assoc_id); 1890 if (stcb == NULL) { 1891 error = EINVAL; 1892 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 1893 break; 1894 } 1895 id->assoc_value = stcb->asoc.vrf_id; 1896 break; 1897 } 1898 case SCTP_GET_VRF_IDS: 1899 { 1900 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP); 1901 error = EOPNOTSUPP; 1902 break; 1903 } 1904 case SCTP_GET_NONCE_VALUES: 1905 { 1906 struct sctp_get_nonce_values *gnv; 1907 1908 SCTP_CHECK_AND_CAST(gnv, optval, struct sctp_get_nonce_values, *optsize); 1909 SCTP_FIND_STCB(inp, stcb, gnv->gn_assoc_id); 1910 1911 if (stcb) { 1912 gnv->gn_peers_tag = stcb->asoc.peer_vtag; 1913 gnv->gn_local_tag = stcb->asoc.my_vtag; 1914 SCTP_TCB_UNLOCK(stcb); 1915 } else { 1916 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN); 1917 error = ENOTCONN; 1918 } 1919 *optsize = sizeof(*gnv); 1920 } 1921 break; 1922 case SCTP_DELAYED_SACK: 1923 { 1924 struct sctp_sack_info *sack; 1925 1926 SCTP_CHECK_AND_CAST(sack, optval, struct sctp_sack_info, *optsize); 1927 SCTP_FIND_STCB(inp, stcb, sack->sack_assoc_id); 1928 if (stcb) { 1929 sack->sack_delay = stcb->asoc.delayed_ack; 1930 sack->sack_freq = stcb->asoc.sack_freq; 1931 SCTP_TCB_UNLOCK(stcb); 1932 } else { 1933 SCTP_INP_RLOCK(inp); 1934 sack->sack_delay = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]); 1935 sack->sack_freq = inp->sctp_ep.sctp_sack_freq; 1936 SCTP_INP_RUNLOCK(inp); 1937 } 1938 *optsize = sizeof(*sack); 1939 } 1940 break; 1941 1942 case SCTP_GET_SNDBUF_USE: 1943 { 1944 struct sctp_sockstat *ss; 1945 1946 SCTP_CHECK_AND_CAST(ss, optval, struct sctp_sockstat, *optsize); 1947 SCTP_FIND_STCB(inp, stcb, ss->ss_assoc_id); 1948 1949 if (stcb) { 1950 ss->ss_total_sndbuf = stcb->asoc.total_output_queue_size; 1951 ss->ss_total_recv_buf = (stcb->asoc.size_on_reasm_queue + 1952 stcb->asoc.size_on_all_streams); 1953 SCTP_TCB_UNLOCK(stcb); 1954 } else { 1955 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN); 1956 error = ENOTCONN; 1957 } 1958 *optsize = sizeof(struct sctp_sockstat); 1959 } 1960 break; 1961 case SCTP_MAX_BURST: 1962 { 1963 uint8_t *value; 1964 1965 SCTP_CHECK_AND_CAST(value, optval, uint8_t, *optsize); 1966 1967 SCTP_INP_RLOCK(inp); 1968 *value = inp->sctp_ep.max_burst; 1969 SCTP_INP_RUNLOCK(inp); 1970 *optsize = sizeof(uint8_t); 1971 } 1972 break; 1973 case SCTP_MAXSEG: 1974 { 1975 struct sctp_assoc_value *av; 1976 int ovh; 1977 1978 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize); 1979 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 1980 1981 if (stcb) { 1982 av->assoc_value = sctp_get_frag_point(stcb, &stcb->asoc); 1983 SCTP_TCB_UNLOCK(stcb); 1984 } else { 1985 SCTP_INP_RLOCK(inp); 1986 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 1987 ovh = SCTP_MED_OVERHEAD; 1988 } else { 1989 ovh = SCTP_MED_V4_OVERHEAD; 1990 } 1991 if (inp->sctp_frag_point >= SCTP_DEFAULT_MAXSEGMENT) 1992 av->assoc_value = 0; 1993 else 1994 av->assoc_value = inp->sctp_frag_point - ovh; 1995 SCTP_INP_RUNLOCK(inp); 1996 } 1997 *optsize = sizeof(struct sctp_assoc_value); 1998 } 1999 break; 2000 case SCTP_GET_STAT_LOG: 2001 error = sctp_fill_stat_log(optval, optsize); 2002 break; 2003 case SCTP_EVENTS: 2004 { 2005 struct sctp_event_subscribe *events; 2006 2007 SCTP_CHECK_AND_CAST(events, optval, struct sctp_event_subscribe, *optsize); 2008 memset(events, 0, sizeof(*events)); 2009 SCTP_INP_RLOCK(inp); 2010 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) 2011 events->sctp_data_io_event = 1; 2012 2013 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVASSOCEVNT)) 2014 events->sctp_association_event = 1; 2015 2016 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVPADDREVNT)) 2017 events->sctp_address_event = 1; 2018 2019 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)) 2020 events->sctp_send_failure_event = 1; 2021 2022 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVPEERERR)) 2023 events->sctp_peer_error_event = 1; 2024 2025 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) 2026 events->sctp_shutdown_event = 1; 2027 2028 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PDAPIEVNT)) 2029 events->sctp_partial_delivery_event = 1; 2030 2031 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) 2032 events->sctp_adaptation_layer_event = 1; 2033 2034 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTHEVNT)) 2035 events->sctp_authentication_event = 1; 2036 2037 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_DRYEVNT)) 2038 events->sctp_sender_dry_event = 1; 2039 2040 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_STREAM_RESETEVNT)) 2041 events->sctp_stream_reset_event = 1; 2042 SCTP_INP_RUNLOCK(inp); 2043 *optsize = sizeof(struct sctp_event_subscribe); 2044 } 2045 break; 2046 2047 case SCTP_ADAPTATION_LAYER: 2048 { 2049 uint32_t *value; 2050 2051 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize); 2052 2053 SCTP_INP_RLOCK(inp); 2054 *value = inp->sctp_ep.adaptation_layer_indicator; 2055 SCTP_INP_RUNLOCK(inp); 2056 *optsize = sizeof(uint32_t); 2057 } 2058 break; 2059 case SCTP_SET_INITIAL_DBG_SEQ: 2060 { 2061 uint32_t *value; 2062 2063 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize); 2064 SCTP_INP_RLOCK(inp); 2065 *value = inp->sctp_ep.initial_sequence_debug; 2066 SCTP_INP_RUNLOCK(inp); 2067 *optsize = sizeof(uint32_t); 2068 } 2069 break; 2070 case SCTP_GET_LOCAL_ADDR_SIZE: 2071 { 2072 uint32_t *value; 2073 2074 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize); 2075 SCTP_INP_RLOCK(inp); 2076 *value = sctp_count_max_addresses(inp); 2077 SCTP_INP_RUNLOCK(inp); 2078 *optsize = sizeof(uint32_t); 2079 } 2080 break; 2081 case SCTP_GET_REMOTE_ADDR_SIZE: 2082 { 2083 uint32_t *value; 2084 size_t size; 2085 struct sctp_nets *net; 2086 2087 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize); 2088 /* FIXME MT: change to sctp_assoc_value? */ 2089 SCTP_FIND_STCB(inp, stcb, (sctp_assoc_t) * value); 2090 2091 if (stcb) { 2092 size = 0; 2093 /* Count the sizes */ 2094 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 2095 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) || 2096 (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET6)) { 2097 size += sizeof(struct sockaddr_in6); 2098 } else if (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET) { 2099 size += sizeof(struct sockaddr_in); 2100 } else { 2101 /* huh */ 2102 break; 2103 } 2104 } 2105 SCTP_TCB_UNLOCK(stcb); 2106 *value = (uint32_t) size; 2107 } else { 2108 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN); 2109 error = ENOTCONN; 2110 } 2111 *optsize = sizeof(uint32_t); 2112 } 2113 break; 2114 case SCTP_GET_PEER_ADDRESSES: 2115 /* 2116 * Get the address information, an array is passed in to 2117 * fill up we pack it. 2118 */ 2119 { 2120 size_t cpsz, left; 2121 struct sockaddr_storage *sas; 2122 struct sctp_nets *net; 2123 struct sctp_getaddresses *saddr; 2124 2125 SCTP_CHECK_AND_CAST(saddr, optval, struct sctp_getaddresses, *optsize); 2126 SCTP_FIND_STCB(inp, stcb, saddr->sget_assoc_id); 2127 2128 if (stcb) { 2129 left = (*optsize) - sizeof(struct sctp_getaddresses); 2130 *optsize = sizeof(struct sctp_getaddresses); 2131 sas = (struct sockaddr_storage *)&saddr->addr[0]; 2132 2133 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 2134 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) || 2135 (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET6)) { 2136 cpsz = sizeof(struct sockaddr_in6); 2137 } else if (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET) { 2138 cpsz = sizeof(struct sockaddr_in); 2139 } else { 2140 /* huh */ 2141 break; 2142 } 2143 if (left < cpsz) { 2144 /* not enough room. */ 2145 break; 2146 } 2147 #ifdef INET6 2148 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) && 2149 (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET)) { 2150 /* Must map the address */ 2151 in6_sin_2_v4mapsin6((struct sockaddr_in *)&net->ro._l_addr, 2152 (struct sockaddr_in6 *)sas); 2153 } else { 2154 #endif 2155 memcpy(sas, &net->ro._l_addr, cpsz); 2156 #ifdef INET6 2157 } 2158 #endif 2159 ((struct sockaddr_in *)sas)->sin_port = stcb->rport; 2160 2161 sas = (struct sockaddr_storage *)((caddr_t)sas + cpsz); 2162 left -= cpsz; 2163 *optsize += cpsz; 2164 } 2165 SCTP_TCB_UNLOCK(stcb); 2166 } else { 2167 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); 2168 error = ENOENT; 2169 } 2170 } 2171 break; 2172 case SCTP_GET_LOCAL_ADDRESSES: 2173 { 2174 size_t limit, actual; 2175 struct sockaddr_storage *sas; 2176 struct sctp_getaddresses *saddr; 2177 2178 SCTP_CHECK_AND_CAST(saddr, optval, struct sctp_getaddresses, *optsize); 2179 SCTP_FIND_STCB(inp, stcb, saddr->sget_assoc_id); 2180 2181 sas = (struct sockaddr_storage *)&saddr->addr[0]; 2182 limit = *optsize - sizeof(sctp_assoc_t); 2183 actual = sctp_fill_up_addresses(inp, stcb, limit, sas); 2184 if (stcb) { 2185 SCTP_TCB_UNLOCK(stcb); 2186 } 2187 *optsize = sizeof(struct sockaddr_storage) + actual; 2188 } 2189 break; 2190 case SCTP_PEER_ADDR_PARAMS: 2191 { 2192 struct sctp_paddrparams *paddrp; 2193 struct sctp_nets *net; 2194 2195 SCTP_CHECK_AND_CAST(paddrp, optval, struct sctp_paddrparams, *optsize); 2196 SCTP_FIND_STCB(inp, stcb, paddrp->spp_assoc_id); 2197 2198 net = NULL; 2199 if (stcb) { 2200 net = sctp_findnet(stcb, (struct sockaddr *)&paddrp->spp_address); 2201 } else { 2202 /* 2203 * We increment here since 2204 * sctp_findassociation_ep_addr() wil do a 2205 * decrement if it finds the stcb as long as 2206 * the locked tcb (last argument) is NOT a 2207 * TCB.. aka NULL. 2208 */ 2209 SCTP_INP_INCR_REF(inp); 2210 stcb = sctp_findassociation_ep_addr(&inp, (struct sockaddr *)&paddrp->spp_address, &net, NULL, NULL); 2211 if (stcb == NULL) { 2212 SCTP_INP_DECR_REF(inp); 2213 } 2214 } 2215 if (stcb && (net == NULL)) { 2216 struct sockaddr *sa; 2217 2218 sa = (struct sockaddr *)&paddrp->spp_address; 2219 if (sa->sa_family == AF_INET) { 2220 struct sockaddr_in *sin; 2221 2222 sin = (struct sockaddr_in *)sa; 2223 if (sin->sin_addr.s_addr) { 2224 error = EINVAL; 2225 SCTP_TCB_UNLOCK(stcb); 2226 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 2227 break; 2228 } 2229 } else if (sa->sa_family == AF_INET6) { 2230 struct sockaddr_in6 *sin6; 2231 2232 sin6 = (struct sockaddr_in6 *)sa; 2233 if (!IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { 2234 error = EINVAL; 2235 SCTP_TCB_UNLOCK(stcb); 2236 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 2237 break; 2238 } 2239 } else { 2240 error = EAFNOSUPPORT; 2241 SCTP_TCB_UNLOCK(stcb); 2242 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 2243 break; 2244 } 2245 } 2246 if (stcb) { 2247 /* Applys to the specific association */ 2248 paddrp->spp_flags = 0; 2249 if (net) { 2250 int ovh; 2251 2252 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 2253 ovh = SCTP_MED_OVERHEAD; 2254 } else { 2255 ovh = SCTP_MED_V4_OVERHEAD; 2256 } 2257 2258 2259 paddrp->spp_pathmaxrxt = net->failure_threshold; 2260 paddrp->spp_pathmtu = net->mtu - ovh; 2261 /* get flags for HB */ 2262 if (net->dest_state & SCTP_ADDR_NOHB) 2263 paddrp->spp_flags |= SPP_HB_DISABLE; 2264 else 2265 paddrp->spp_flags |= SPP_HB_ENABLE; 2266 /* get flags for PMTU */ 2267 if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) { 2268 paddrp->spp_flags |= SPP_PMTUD_ENABLE; 2269 } else { 2270 paddrp->spp_flags |= SPP_PMTUD_DISABLE; 2271 } 2272 #ifdef INET 2273 if (net->ro._l_addr.sin.sin_family == AF_INET) { 2274 paddrp->spp_ipv4_tos = net->tos_flowlabel & 0x000000fc; 2275 paddrp->spp_flags |= SPP_IPV4_TOS; 2276 } 2277 #endif 2278 #ifdef INET6 2279 if (net->ro._l_addr.sin6.sin6_family == AF_INET6) { 2280 paddrp->spp_ipv6_flowlabel = net->tos_flowlabel; 2281 paddrp->spp_flags |= SPP_IPV6_FLOWLABEL; 2282 } 2283 #endif 2284 } else { 2285 /* 2286 * No destination so return default 2287 * value 2288 */ 2289 int cnt = 0; 2290 2291 paddrp->spp_pathmaxrxt = stcb->asoc.def_net_failure; 2292 paddrp->spp_pathmtu = sctp_get_frag_point(stcb, &stcb->asoc); 2293 #ifdef INET 2294 paddrp->spp_ipv4_tos = stcb->asoc.default_tos & 0x000000fc; 2295 paddrp->spp_flags |= SPP_IPV4_TOS; 2296 #endif 2297 #ifdef INET6 2298 paddrp->spp_ipv6_flowlabel = stcb->asoc.default_flowlabel; 2299 paddrp->spp_flags |= SPP_IPV6_FLOWLABEL; 2300 #endif 2301 /* default settings should be these */ 2302 if (stcb->asoc.hb_is_disabled == 0) { 2303 paddrp->spp_flags |= SPP_HB_ENABLE; 2304 } else { 2305 paddrp->spp_flags |= SPP_HB_DISABLE; 2306 } 2307 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 2308 if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) { 2309 cnt++; 2310 } 2311 } 2312 if (cnt) { 2313 paddrp->spp_flags |= SPP_PMTUD_ENABLE; 2314 } 2315 } 2316 paddrp->spp_hbinterval = stcb->asoc.heart_beat_delay; 2317 paddrp->spp_assoc_id = sctp_get_associd(stcb); 2318 SCTP_TCB_UNLOCK(stcb); 2319 } else { 2320 /* Use endpoint defaults */ 2321 SCTP_INP_RLOCK(inp); 2322 paddrp->spp_pathmaxrxt = inp->sctp_ep.def_net_failure; 2323 paddrp->spp_hbinterval = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]); 2324 paddrp->spp_assoc_id = (sctp_assoc_t) 0; 2325 /* get inp's default */ 2326 #ifdef INET 2327 paddrp->spp_ipv4_tos = inp->ip_inp.inp.inp_ip_tos; 2328 paddrp->spp_flags |= SPP_IPV4_TOS; 2329 #endif 2330 #ifdef INET6 2331 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 2332 paddrp->spp_ipv6_flowlabel = ((struct in6pcb *)inp)->in6p_flowinfo; 2333 paddrp->spp_flags |= SPP_IPV6_FLOWLABEL; 2334 } 2335 #endif 2336 /* can't return this */ 2337 paddrp->spp_pathmtu = 0; 2338 2339 /* default behavior, no stcb */ 2340 paddrp->spp_flags = SPP_PMTUD_ENABLE; 2341 2342 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_DONOT_HEARTBEAT)) { 2343 paddrp->spp_flags |= SPP_HB_ENABLE; 2344 } else { 2345 paddrp->spp_flags |= SPP_HB_DISABLE; 2346 } 2347 SCTP_INP_RUNLOCK(inp); 2348 } 2349 *optsize = sizeof(struct sctp_paddrparams); 2350 } 2351 break; 2352 case SCTP_GET_PEER_ADDR_INFO: 2353 { 2354 struct sctp_paddrinfo *paddri; 2355 struct sctp_nets *net; 2356 2357 SCTP_CHECK_AND_CAST(paddri, optval, struct sctp_paddrinfo, *optsize); 2358 SCTP_FIND_STCB(inp, stcb, paddri->spinfo_assoc_id); 2359 2360 net = NULL; 2361 if (stcb) { 2362 net = sctp_findnet(stcb, (struct sockaddr *)&paddri->spinfo_address); 2363 } else { 2364 /* 2365 * We increment here since 2366 * sctp_findassociation_ep_addr() wil do a 2367 * decrement if it finds the stcb as long as 2368 * the locked tcb (last argument) is NOT a 2369 * TCB.. aka NULL. 2370 */ 2371 SCTP_INP_INCR_REF(inp); 2372 stcb = sctp_findassociation_ep_addr(&inp, (struct sockaddr *)&paddri->spinfo_address, &net, NULL, NULL); 2373 if (stcb == NULL) { 2374 SCTP_INP_DECR_REF(inp); 2375 } 2376 } 2377 2378 if ((stcb) && (net)) { 2379 paddri->spinfo_state = net->dest_state & (SCTP_REACHABLE_MASK | SCTP_ADDR_NOHB); 2380 paddri->spinfo_cwnd = net->cwnd; 2381 paddri->spinfo_srtt = ((net->lastsa >> 2) + net->lastsv) >> 1; 2382 paddri->spinfo_rto = net->RTO; 2383 paddri->spinfo_assoc_id = sctp_get_associd(stcb); 2384 SCTP_TCB_UNLOCK(stcb); 2385 } else { 2386 if (stcb) { 2387 SCTP_TCB_UNLOCK(stcb); 2388 } 2389 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); 2390 error = ENOENT; 2391 } 2392 *optsize = sizeof(struct sctp_paddrinfo); 2393 } 2394 break; 2395 case SCTP_PCB_STATUS: 2396 { 2397 struct sctp_pcbinfo *spcb; 2398 2399 SCTP_CHECK_AND_CAST(spcb, optval, struct sctp_pcbinfo, *optsize); 2400 sctp_fill_pcbinfo(spcb); 2401 *optsize = sizeof(struct sctp_pcbinfo); 2402 } 2403 break; 2404 2405 case SCTP_STATUS: 2406 { 2407 struct sctp_nets *net; 2408 struct sctp_status *sstat; 2409 2410 SCTP_CHECK_AND_CAST(sstat, optval, struct sctp_status, *optsize); 2411 SCTP_FIND_STCB(inp, stcb, sstat->sstat_assoc_id); 2412 2413 if (stcb == NULL) { 2414 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 2415 error = EINVAL; 2416 break; 2417 } 2418 /* 2419 * I think passing the state is fine since 2420 * sctp_constants.h will be available to the user 2421 * land. 2422 */ 2423 sstat->sstat_state = stcb->asoc.state; 2424 sstat->sstat_assoc_id = sctp_get_associd(stcb); 2425 sstat->sstat_rwnd = stcb->asoc.peers_rwnd; 2426 sstat->sstat_unackdata = stcb->asoc.sent_queue_cnt; 2427 /* 2428 * We can't include chunks that have been passed to 2429 * the socket layer. Only things in queue. 2430 */ 2431 sstat->sstat_penddata = (stcb->asoc.cnt_on_reasm_queue + 2432 stcb->asoc.cnt_on_all_streams); 2433 2434 2435 sstat->sstat_instrms = stcb->asoc.streamincnt; 2436 sstat->sstat_outstrms = stcb->asoc.streamoutcnt; 2437 sstat->sstat_fragmentation_point = sctp_get_frag_point(stcb, &stcb->asoc); 2438 memcpy(&sstat->sstat_primary.spinfo_address, 2439 &stcb->asoc.primary_destination->ro._l_addr, 2440 ((struct sockaddr *)(&stcb->asoc.primary_destination->ro._l_addr))->sa_len); 2441 net = stcb->asoc.primary_destination; 2442 ((struct sockaddr_in *)&sstat->sstat_primary.spinfo_address)->sin_port = stcb->rport; 2443 /* 2444 * Again the user can get info from sctp_constants.h 2445 * for what the state of the network is. 2446 */ 2447 sstat->sstat_primary.spinfo_state = net->dest_state & SCTP_REACHABLE_MASK; 2448 sstat->sstat_primary.spinfo_cwnd = net->cwnd; 2449 sstat->sstat_primary.spinfo_srtt = net->lastsa; 2450 sstat->sstat_primary.spinfo_rto = net->RTO; 2451 sstat->sstat_primary.spinfo_mtu = net->mtu; 2452 sstat->sstat_primary.spinfo_assoc_id = sctp_get_associd(stcb); 2453 SCTP_TCB_UNLOCK(stcb); 2454 *optsize = sizeof(*sstat); 2455 } 2456 break; 2457 case SCTP_RTOINFO: 2458 { 2459 struct sctp_rtoinfo *srto; 2460 2461 SCTP_CHECK_AND_CAST(srto, optval, struct sctp_rtoinfo, *optsize); 2462 SCTP_FIND_STCB(inp, stcb, srto->srto_assoc_id); 2463 2464 if (stcb) { 2465 srto->srto_initial = stcb->asoc.initial_rto; 2466 srto->srto_max = stcb->asoc.maxrto; 2467 srto->srto_min = stcb->asoc.minrto; 2468 SCTP_TCB_UNLOCK(stcb); 2469 } else { 2470 SCTP_INP_RLOCK(inp); 2471 srto->srto_initial = inp->sctp_ep.initial_rto; 2472 srto->srto_max = inp->sctp_ep.sctp_maxrto; 2473 srto->srto_min = inp->sctp_ep.sctp_minrto; 2474 SCTP_INP_RUNLOCK(inp); 2475 } 2476 *optsize = sizeof(*srto); 2477 } 2478 break; 2479 case SCTP_ASSOCINFO: 2480 { 2481 struct sctp_assocparams *sasoc; 2482 uint32_t oldval; 2483 2484 SCTP_CHECK_AND_CAST(sasoc, optval, struct sctp_assocparams, *optsize); 2485 SCTP_FIND_STCB(inp, stcb, sasoc->sasoc_assoc_id); 2486 2487 if (stcb) { 2488 oldval = sasoc->sasoc_cookie_life; 2489 sasoc->sasoc_cookie_life = TICKS_TO_MSEC(stcb->asoc.cookie_life); 2490 sasoc->sasoc_asocmaxrxt = stcb->asoc.max_send_times; 2491 sasoc->sasoc_number_peer_destinations = stcb->asoc.numnets; 2492 sasoc->sasoc_peer_rwnd = stcb->asoc.peers_rwnd; 2493 sasoc->sasoc_local_rwnd = stcb->asoc.my_rwnd; 2494 SCTP_TCB_UNLOCK(stcb); 2495 } else { 2496 SCTP_INP_RLOCK(inp); 2497 sasoc->sasoc_cookie_life = TICKS_TO_MSEC(inp->sctp_ep.def_cookie_life); 2498 sasoc->sasoc_asocmaxrxt = inp->sctp_ep.max_send_times; 2499 sasoc->sasoc_number_peer_destinations = 0; 2500 sasoc->sasoc_peer_rwnd = 0; 2501 sasoc->sasoc_local_rwnd = sbspace(&inp->sctp_socket->so_rcv); 2502 SCTP_INP_RUNLOCK(inp); 2503 } 2504 *optsize = sizeof(*sasoc); 2505 } 2506 break; 2507 case SCTP_DEFAULT_SEND_PARAM: 2508 { 2509 struct sctp_sndrcvinfo *s_info; 2510 2511 SCTP_CHECK_AND_CAST(s_info, optval, struct sctp_sndrcvinfo, *optsize); 2512 SCTP_FIND_STCB(inp, stcb, s_info->sinfo_assoc_id); 2513 2514 if (stcb) { 2515 memcpy(s_info, &stcb->asoc.def_send, sizeof(stcb->asoc.def_send)); 2516 SCTP_TCB_UNLOCK(stcb); 2517 } else { 2518 SCTP_INP_RLOCK(inp); 2519 memcpy(s_info, &inp->def_send, sizeof(inp->def_send)); 2520 SCTP_INP_RUNLOCK(inp); 2521 } 2522 *optsize = sizeof(*s_info); 2523 } 2524 break; 2525 case SCTP_INITMSG: 2526 { 2527 struct sctp_initmsg *sinit; 2528 2529 SCTP_CHECK_AND_CAST(sinit, optval, struct sctp_initmsg, *optsize); 2530 SCTP_INP_RLOCK(inp); 2531 sinit->sinit_num_ostreams = inp->sctp_ep.pre_open_stream_count; 2532 sinit->sinit_max_instreams = inp->sctp_ep.max_open_streams_intome; 2533 sinit->sinit_max_attempts = inp->sctp_ep.max_init_times; 2534 sinit->sinit_max_init_timeo = inp->sctp_ep.initial_init_rto_max; 2535 SCTP_INP_RUNLOCK(inp); 2536 *optsize = sizeof(*sinit); 2537 } 2538 break; 2539 case SCTP_PRIMARY_ADDR: 2540 /* we allow a "get" operation on this */ 2541 { 2542 struct sctp_setprim *ssp; 2543 2544 SCTP_CHECK_AND_CAST(ssp, optval, struct sctp_setprim, *optsize); 2545 SCTP_FIND_STCB(inp, stcb, ssp->ssp_assoc_id); 2546 2547 if (stcb) { 2548 /* simply copy out the sockaddr_storage... */ 2549 int len; 2550 2551 len = *optsize; 2552 if (len > stcb->asoc.primary_destination->ro._l_addr.sa.sa_len) 2553 len = stcb->asoc.primary_destination->ro._l_addr.sa.sa_len; 2554 2555 memcpy(&ssp->ssp_addr, 2556 &stcb->asoc.primary_destination->ro._l_addr, 2557 len); 2558 SCTP_TCB_UNLOCK(stcb); 2559 } else { 2560 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 2561 error = EINVAL; 2562 } 2563 *optsize = sizeof(*ssp); 2564 } 2565 break; 2566 2567 case SCTP_HMAC_IDENT: 2568 { 2569 struct sctp_hmacalgo *shmac; 2570 sctp_hmaclist_t *hmaclist; 2571 uint32_t size; 2572 int i; 2573 2574 SCTP_CHECK_AND_CAST(shmac, optval, struct sctp_hmacalgo, *optsize); 2575 2576 SCTP_INP_RLOCK(inp); 2577 hmaclist = inp->sctp_ep.local_hmacs; 2578 if (hmaclist == NULL) { 2579 /* no HMACs to return */ 2580 *optsize = sizeof(*shmac); 2581 SCTP_INP_RUNLOCK(inp); 2582 break; 2583 } 2584 /* is there room for all of the hmac ids? */ 2585 size = sizeof(*shmac) + (hmaclist->num_algo * 2586 sizeof(shmac->shmac_idents[0])); 2587 if ((size_t)(*optsize) < size) { 2588 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 2589 error = EINVAL; 2590 SCTP_INP_RUNLOCK(inp); 2591 break; 2592 } 2593 /* copy in the list */ 2594 shmac->shmac_number_of_idents = hmaclist->num_algo; 2595 for (i = 0; i < hmaclist->num_algo; i++) { 2596 shmac->shmac_idents[i] = hmaclist->hmac[i]; 2597 } 2598 SCTP_INP_RUNLOCK(inp); 2599 *optsize = size; 2600 break; 2601 } 2602 case SCTP_AUTH_ACTIVE_KEY: 2603 { 2604 struct sctp_authkeyid *scact; 2605 2606 SCTP_CHECK_AND_CAST(scact, optval, struct sctp_authkeyid, *optsize); 2607 SCTP_FIND_STCB(inp, stcb, scact->scact_assoc_id); 2608 2609 if (stcb) { 2610 /* get the active key on the assoc */ 2611 scact->scact_keynumber = stcb->asoc.authinfo.active_keyid; 2612 SCTP_TCB_UNLOCK(stcb); 2613 } else { 2614 /* get the endpoint active key */ 2615 SCTP_INP_RLOCK(inp); 2616 scact->scact_keynumber = inp->sctp_ep.default_keyid; 2617 SCTP_INP_RUNLOCK(inp); 2618 } 2619 *optsize = sizeof(*scact); 2620 break; 2621 } 2622 case SCTP_LOCAL_AUTH_CHUNKS: 2623 { 2624 struct sctp_authchunks *sac; 2625 sctp_auth_chklist_t *chklist = NULL; 2626 size_t size = 0; 2627 2628 SCTP_CHECK_AND_CAST(sac, optval, struct sctp_authchunks, *optsize); 2629 SCTP_FIND_STCB(inp, stcb, sac->gauth_assoc_id); 2630 2631 if (stcb) { 2632 /* get off the assoc */ 2633 chklist = stcb->asoc.local_auth_chunks; 2634 /* is there enough space? */ 2635 size = sctp_auth_get_chklist_size(chklist); 2636 if (*optsize < (sizeof(struct sctp_authchunks) + size)) { 2637 error = EINVAL; 2638 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 2639 } else { 2640 /* copy in the chunks */ 2641 (void)sctp_serialize_auth_chunks(chklist, sac->gauth_chunks); 2642 } 2643 SCTP_TCB_UNLOCK(stcb); 2644 } else { 2645 /* get off the endpoint */ 2646 SCTP_INP_RLOCK(inp); 2647 chklist = inp->sctp_ep.local_auth_chunks; 2648 /* is there enough space? */ 2649 size = sctp_auth_get_chklist_size(chklist); 2650 if (*optsize < (sizeof(struct sctp_authchunks) + size)) { 2651 error = EINVAL; 2652 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 2653 } else { 2654 /* copy in the chunks */ 2655 (void)sctp_serialize_auth_chunks(chklist, sac->gauth_chunks); 2656 } 2657 SCTP_INP_RUNLOCK(inp); 2658 } 2659 *optsize = sizeof(struct sctp_authchunks) + size; 2660 break; 2661 } 2662 case SCTP_PEER_AUTH_CHUNKS: 2663 { 2664 struct sctp_authchunks *sac; 2665 sctp_auth_chklist_t *chklist = NULL; 2666 size_t size = 0; 2667 2668 SCTP_CHECK_AND_CAST(sac, optval, struct sctp_authchunks, *optsize); 2669 SCTP_FIND_STCB(inp, stcb, sac->gauth_assoc_id); 2670 2671 if (stcb) { 2672 /* get off the assoc */ 2673 chklist = stcb->asoc.peer_auth_chunks; 2674 /* is there enough space? */ 2675 size = sctp_auth_get_chklist_size(chklist); 2676 if (*optsize < (sizeof(struct sctp_authchunks) + size)) { 2677 error = EINVAL; 2678 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 2679 } else { 2680 /* copy in the chunks */ 2681 (void)sctp_serialize_auth_chunks(chklist, sac->gauth_chunks); 2682 } 2683 SCTP_TCB_UNLOCK(stcb); 2684 } else { 2685 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); 2686 error = ENOENT; 2687 } 2688 *optsize = sizeof(struct sctp_authchunks) + size; 2689 break; 2690 } 2691 2692 2693 default: 2694 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT); 2695 error = ENOPROTOOPT; 2696 *optsize = 0; 2697 break; 2698 } /* end switch (sopt->sopt_name) */ 2699 return (error); 2700 } 2701 2702 static int 2703 sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize, 2704 void *p) 2705 { 2706 int error, set_opt; 2707 uint32_t *mopt; 2708 struct sctp_tcb *stcb = NULL; 2709 struct sctp_inpcb *inp = NULL; 2710 uint32_t vrf_id; 2711 2712 if (optval == NULL) { 2713 SCTP_PRINTF("optval is NULL\n"); 2714 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 2715 return (EINVAL); 2716 } 2717 inp = (struct sctp_inpcb *)so->so_pcb; 2718 if (inp == 0) { 2719 SCTP_PRINTF("inp is NULL?\n"); 2720 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 2721 return EINVAL; 2722 } 2723 vrf_id = inp->def_vrf_id; 2724 2725 error = 0; 2726 switch (optname) { 2727 case SCTP_NODELAY: 2728 case SCTP_AUTOCLOSE: 2729 case SCTP_AUTO_ASCONF: 2730 case SCTP_EXPLICIT_EOR: 2731 case SCTP_DISABLE_FRAGMENTS: 2732 case SCTP_USE_EXT_RCVINFO: 2733 case SCTP_I_WANT_MAPPED_V4_ADDR: 2734 /* copy in the option value */ 2735 SCTP_CHECK_AND_CAST(mopt, optval, uint32_t, optsize); 2736 set_opt = 0; 2737 if (error) 2738 break; 2739 switch (optname) { 2740 case SCTP_DISABLE_FRAGMENTS: 2741 set_opt = SCTP_PCB_FLAGS_NO_FRAGMENT; 2742 break; 2743 case SCTP_AUTO_ASCONF: 2744 /* 2745 * NOTE: we don't really support this flag 2746 */ 2747 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 2748 /* only valid for bound all sockets */ 2749 set_opt = SCTP_PCB_FLAGS_AUTO_ASCONF; 2750 } else { 2751 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 2752 return (EINVAL); 2753 } 2754 break; 2755 case SCTP_EXPLICIT_EOR: 2756 set_opt = SCTP_PCB_FLAGS_EXPLICIT_EOR; 2757 break; 2758 case SCTP_USE_EXT_RCVINFO: 2759 set_opt = SCTP_PCB_FLAGS_EXT_RCVINFO; 2760 break; 2761 case SCTP_I_WANT_MAPPED_V4_ADDR: 2762 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 2763 set_opt = SCTP_PCB_FLAGS_NEEDS_MAPPED_V4; 2764 } else { 2765 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 2766 return (EINVAL); 2767 } 2768 break; 2769 case SCTP_NODELAY: 2770 set_opt = SCTP_PCB_FLAGS_NODELAY; 2771 break; 2772 case SCTP_AUTOCLOSE: 2773 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 2774 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 2775 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 2776 return (EINVAL); 2777 } 2778 set_opt = SCTP_PCB_FLAGS_AUTOCLOSE; 2779 /* 2780 * The value is in ticks. Note this does not effect 2781 * old associations, only new ones. 2782 */ 2783 inp->sctp_ep.auto_close_time = SEC_TO_TICKS(*mopt); 2784 break; 2785 } 2786 SCTP_INP_WLOCK(inp); 2787 if (*mopt != 0) { 2788 sctp_feature_on(inp, set_opt); 2789 } else { 2790 sctp_feature_off(inp, set_opt); 2791 } 2792 SCTP_INP_WUNLOCK(inp); 2793 break; 2794 case SCTP_REUSE_PORT: 2795 { 2796 SCTP_CHECK_AND_CAST(mopt, optval, uint32_t, optsize); 2797 if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) == 0) { 2798 /* Can't set it after we are bound */ 2799 error = EINVAL; 2800 break; 2801 } 2802 if ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE)) { 2803 /* Can't do this for a 1-m socket */ 2804 error = EINVAL; 2805 break; 2806 } 2807 if (optval) 2808 sctp_feature_on(inp, SCTP_PCB_FLAGS_PORTREUSE); 2809 else 2810 sctp_feature_off(inp, SCTP_PCB_FLAGS_PORTREUSE); 2811 } 2812 break; 2813 case SCTP_PARTIAL_DELIVERY_POINT: 2814 { 2815 uint32_t *value; 2816 2817 SCTP_CHECK_AND_CAST(value, optval, uint32_t, optsize); 2818 if (*value > SCTP_SB_LIMIT_RCV(so)) { 2819 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 2820 error = EINVAL; 2821 break; 2822 } 2823 inp->partial_delivery_point = *value; 2824 } 2825 break; 2826 case SCTP_FRAGMENT_INTERLEAVE: 2827 /* not yet until we re-write sctp_recvmsg() */ 2828 { 2829 uint32_t *level; 2830 2831 SCTP_CHECK_AND_CAST(level, optval, uint32_t, optsize); 2832 if (*level == SCTP_FRAG_LEVEL_2) { 2833 sctp_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE); 2834 sctp_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS); 2835 } else if (*level == SCTP_FRAG_LEVEL_1) { 2836 sctp_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE); 2837 sctp_feature_off(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS); 2838 } else if (*level == SCTP_FRAG_LEVEL_0) { 2839 sctp_feature_off(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE); 2840 sctp_feature_off(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS); 2841 2842 } else { 2843 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 2844 error = EINVAL; 2845 } 2846 } 2847 break; 2848 case SCTP_CMT_ON_OFF: 2849 { 2850 struct sctp_assoc_value *av; 2851 2852 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize); 2853 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off)) { 2854 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 2855 if (stcb) { 2856 stcb->asoc.sctp_cmt_on_off = (uint8_t) av->assoc_value; 2857 SCTP_TCB_UNLOCK(stcb); 2858 } else { 2859 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN); 2860 error = ENOTCONN; 2861 } 2862 } else { 2863 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT); 2864 error = ENOPROTOOPT; 2865 } 2866 } 2867 break; 2868 /* EY nr_sack_on_off socket option */ 2869 case SCTP_NR_SACK_ON_OFF: 2870 { 2871 struct sctp_assoc_value *av; 2872 2873 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize); 2874 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off)) { 2875 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 2876 if (stcb) { 2877 stcb->asoc.sctp_nr_sack_on_off = (uint8_t) av->assoc_value; 2878 SCTP_TCB_UNLOCK(stcb); 2879 } else { 2880 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN); 2881 error = ENOTCONN; 2882 } 2883 } else { 2884 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT); 2885 error = ENOPROTOOPT; 2886 } 2887 } 2888 break; 2889 /* JRS - Set socket option for pluggable congestion control */ 2890 case SCTP_PLUGGABLE_CC: 2891 { 2892 struct sctp_assoc_value *av; 2893 2894 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize); 2895 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 2896 if (stcb) { 2897 switch (av->assoc_value) { 2898 /* 2899 * JRS - Standard TCP congestion 2900 * control 2901 */ 2902 case SCTP_CC_RFC2581: 2903 { 2904 stcb->asoc.congestion_control_module = SCTP_CC_RFC2581; 2905 stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param; 2906 stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_cwnd_update_after_sack; 2907 stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_cwnd_update_after_fr; 2908 stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout; 2909 stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo; 2910 stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped; 2911 stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output; 2912 stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer; 2913 SCTP_TCB_UNLOCK(stcb); 2914 break; 2915 } 2916 /* 2917 * JRS - High Speed TCP congestion 2918 * control (Floyd) 2919 */ 2920 case SCTP_CC_HSTCP: 2921 { 2922 stcb->asoc.congestion_control_module = SCTP_CC_HSTCP; 2923 stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param; 2924 stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_hs_cwnd_update_after_sack; 2925 stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_hs_cwnd_update_after_fr; 2926 stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout; 2927 stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo; 2928 stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped; 2929 stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output; 2930 stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer; 2931 SCTP_TCB_UNLOCK(stcb); 2932 break; 2933 } 2934 /* JRS - HTCP congestion control */ 2935 case SCTP_CC_HTCP: 2936 { 2937 stcb->asoc.congestion_control_module = SCTP_CC_HTCP; 2938 stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_htcp_set_initial_cc_param; 2939 stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_htcp_cwnd_update_after_sack; 2940 stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_htcp_cwnd_update_after_fr; 2941 stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_htcp_cwnd_update_after_timeout; 2942 stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_htcp_cwnd_update_after_ecn_echo; 2943 stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped; 2944 stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output; 2945 stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_htcp_cwnd_update_after_fr_timer; 2946 SCTP_TCB_UNLOCK(stcb); 2947 break; 2948 } 2949 /* 2950 * JRS - All other values are 2951 * invalid 2952 */ 2953 default: 2954 { 2955 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 2956 error = EINVAL; 2957 SCTP_TCB_UNLOCK(stcb); 2958 break; 2959 } 2960 } 2961 } else { 2962 switch (av->assoc_value) { 2963 case SCTP_CC_RFC2581: 2964 case SCTP_CC_HSTCP: 2965 case SCTP_CC_HTCP: 2966 inp->sctp_ep.sctp_default_cc_module = av->assoc_value; 2967 break; 2968 default: 2969 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 2970 error = EINVAL; 2971 break; 2972 }; 2973 } 2974 } 2975 break; 2976 case SCTP_CLR_STAT_LOG: 2977 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP); 2978 error = EOPNOTSUPP; 2979 break; 2980 case SCTP_CONTEXT: 2981 { 2982 struct sctp_assoc_value *av; 2983 2984 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize); 2985 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 2986 2987 if (stcb) { 2988 stcb->asoc.context = av->assoc_value; 2989 SCTP_TCB_UNLOCK(stcb); 2990 } else { 2991 SCTP_INP_WLOCK(inp); 2992 inp->sctp_context = av->assoc_value; 2993 SCTP_INP_WUNLOCK(inp); 2994 } 2995 } 2996 break; 2997 case SCTP_VRF_ID: 2998 { 2999 uint32_t *default_vrfid; 3000 3001 SCTP_CHECK_AND_CAST(default_vrfid, optval, uint32_t, optsize); 3002 if (*default_vrfid > SCTP_MAX_VRF_ID) { 3003 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3004 error = EINVAL; 3005 break; 3006 } 3007 inp->def_vrf_id = *default_vrfid; 3008 break; 3009 } 3010 case SCTP_DEL_VRF_ID: 3011 { 3012 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP); 3013 error = EOPNOTSUPP; 3014 break; 3015 } 3016 case SCTP_ADD_VRF_ID: 3017 { 3018 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP); 3019 error = EOPNOTSUPP; 3020 break; 3021 } 3022 case SCTP_DELAYED_SACK: 3023 { 3024 struct sctp_sack_info *sack; 3025 3026 SCTP_CHECK_AND_CAST(sack, optval, struct sctp_sack_info, optsize); 3027 SCTP_FIND_STCB(inp, stcb, sack->sack_assoc_id); 3028 if (sack->sack_delay) { 3029 if (sack->sack_delay > SCTP_MAX_SACK_DELAY) 3030 sack->sack_delay = SCTP_MAX_SACK_DELAY; 3031 } 3032 if (stcb) { 3033 if (sack->sack_delay) { 3034 if (MSEC_TO_TICKS(sack->sack_delay) < 1) { 3035 sack->sack_delay = TICKS_TO_MSEC(1); 3036 } 3037 stcb->asoc.delayed_ack = sack->sack_delay; 3038 } 3039 if (sack->sack_freq) { 3040 stcb->asoc.sack_freq = sack->sack_freq; 3041 } 3042 SCTP_TCB_UNLOCK(stcb); 3043 } else { 3044 SCTP_INP_WLOCK(inp); 3045 if (sack->sack_delay) { 3046 if (MSEC_TO_TICKS(sack->sack_delay) < 1) { 3047 sack->sack_delay = TICKS_TO_MSEC(1); 3048 } 3049 inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV] = MSEC_TO_TICKS(sack->sack_delay); 3050 } 3051 if (sack->sack_freq) { 3052 inp->sctp_ep.sctp_sack_freq = sack->sack_freq; 3053 } 3054 SCTP_INP_WUNLOCK(inp); 3055 } 3056 break; 3057 } 3058 case SCTP_AUTH_CHUNK: 3059 { 3060 struct sctp_authchunk *sauth; 3061 3062 SCTP_CHECK_AND_CAST(sauth, optval, struct sctp_authchunk, optsize); 3063 3064 SCTP_INP_WLOCK(inp); 3065 if (sctp_auth_add_chunk(sauth->sauth_chunk, inp->sctp_ep.local_auth_chunks)) { 3066 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3067 error = EINVAL; 3068 } 3069 SCTP_INP_WUNLOCK(inp); 3070 break; 3071 } 3072 case SCTP_AUTH_KEY: 3073 { 3074 struct sctp_authkey *sca; 3075 struct sctp_keyhead *shared_keys; 3076 sctp_sharedkey_t *shared_key; 3077 sctp_key_t *key = NULL; 3078 size_t size; 3079 3080 SCTP_CHECK_AND_CAST(sca, optval, struct sctp_authkey, optsize); 3081 SCTP_FIND_STCB(inp, stcb, sca->sca_assoc_id); 3082 size = optsize - sizeof(*sca); 3083 3084 if (stcb) { 3085 /* set it on the assoc */ 3086 shared_keys = &stcb->asoc.shared_keys; 3087 /* clear the cached keys for this key id */ 3088 sctp_clear_cachedkeys(stcb, sca->sca_keynumber); 3089 /* 3090 * create the new shared key and 3091 * insert/replace it 3092 */ 3093 if (size > 0) { 3094 key = sctp_set_key(sca->sca_key, (uint32_t) size); 3095 if (key == NULL) { 3096 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM); 3097 error = ENOMEM; 3098 SCTP_TCB_UNLOCK(stcb); 3099 break; 3100 } 3101 } 3102 shared_key = sctp_alloc_sharedkey(); 3103 if (shared_key == NULL) { 3104 sctp_free_key(key); 3105 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM); 3106 error = ENOMEM; 3107 SCTP_TCB_UNLOCK(stcb); 3108 break; 3109 } 3110 shared_key->key = key; 3111 shared_key->keyid = sca->sca_keynumber; 3112 error = sctp_insert_sharedkey(shared_keys, shared_key); 3113 SCTP_TCB_UNLOCK(stcb); 3114 } else { 3115 /* set it on the endpoint */ 3116 SCTP_INP_WLOCK(inp); 3117 shared_keys = &inp->sctp_ep.shared_keys; 3118 /* 3119 * clear the cached keys on all assocs for 3120 * this key id 3121 */ 3122 sctp_clear_cachedkeys_ep(inp, sca->sca_keynumber); 3123 /* 3124 * create the new shared key and 3125 * insert/replace it 3126 */ 3127 if (size > 0) { 3128 key = sctp_set_key(sca->sca_key, (uint32_t) size); 3129 if (key == NULL) { 3130 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM); 3131 error = ENOMEM; 3132 SCTP_INP_WUNLOCK(inp); 3133 break; 3134 } 3135 } 3136 shared_key = sctp_alloc_sharedkey(); 3137 if (shared_key == NULL) { 3138 sctp_free_key(key); 3139 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM); 3140 error = ENOMEM; 3141 SCTP_INP_WUNLOCK(inp); 3142 break; 3143 } 3144 shared_key->key = key; 3145 shared_key->keyid = sca->sca_keynumber; 3146 error = sctp_insert_sharedkey(shared_keys, shared_key); 3147 SCTP_INP_WUNLOCK(inp); 3148 } 3149 break; 3150 } 3151 case SCTP_HMAC_IDENT: 3152 { 3153 struct sctp_hmacalgo *shmac; 3154 sctp_hmaclist_t *hmaclist; 3155 uint16_t hmacid; 3156 uint32_t i; 3157 3158 size_t found; 3159 3160 SCTP_CHECK_AND_CAST(shmac, optval, struct sctp_hmacalgo, optsize); 3161 if (optsize < sizeof(struct sctp_hmacalgo) + shmac->shmac_number_of_idents * sizeof(uint16_t)) { 3162 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3163 error = EINVAL; 3164 break; 3165 } 3166 hmaclist = sctp_alloc_hmaclist(shmac->shmac_number_of_idents); 3167 if (hmaclist == NULL) { 3168 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM); 3169 error = ENOMEM; 3170 break; 3171 } 3172 for (i = 0; i < shmac->shmac_number_of_idents; i++) { 3173 hmacid = shmac->shmac_idents[i]; 3174 if (sctp_auth_add_hmacid(hmaclist, hmacid)) { 3175 /* invalid HMACs were found */ ; 3176 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3177 error = EINVAL; 3178 sctp_free_hmaclist(hmaclist); 3179 goto sctp_set_hmac_done; 3180 } 3181 } 3182 found = 0; 3183 for (i = 0; i < hmaclist->num_algo; i++) { 3184 if (hmaclist->hmac[i] == SCTP_AUTH_HMAC_ID_SHA1) { 3185 /* already in list */ 3186 found = 1; 3187 } 3188 } 3189 if (!found) { 3190 sctp_free_hmaclist(hmaclist); 3191 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3192 error = EINVAL; 3193 break; 3194 } 3195 /* set it on the endpoint */ 3196 SCTP_INP_WLOCK(inp); 3197 if (inp->sctp_ep.local_hmacs) 3198 sctp_free_hmaclist(inp->sctp_ep.local_hmacs); 3199 inp->sctp_ep.local_hmacs = hmaclist; 3200 SCTP_INP_WUNLOCK(inp); 3201 sctp_set_hmac_done: 3202 break; 3203 } 3204 case SCTP_AUTH_ACTIVE_KEY: 3205 { 3206 struct sctp_authkeyid *scact; 3207 3208 SCTP_CHECK_AND_CAST(scact, optval, struct sctp_authkeyid, 3209 optsize); 3210 SCTP_FIND_STCB(inp, stcb, scact->scact_assoc_id); 3211 3212 /* set the active key on the right place */ 3213 if (stcb) { 3214 /* set the active key on the assoc */ 3215 if (sctp_auth_setactivekey(stcb, 3216 scact->scact_keynumber)) { 3217 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, 3218 SCTP_FROM_SCTP_USRREQ, 3219 EINVAL); 3220 error = EINVAL; 3221 } 3222 SCTP_TCB_UNLOCK(stcb); 3223 } else { 3224 /* set the active key on the endpoint */ 3225 SCTP_INP_WLOCK(inp); 3226 if (sctp_auth_setactivekey_ep(inp, 3227 scact->scact_keynumber)) { 3228 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, 3229 SCTP_FROM_SCTP_USRREQ, 3230 EINVAL); 3231 error = EINVAL; 3232 } 3233 SCTP_INP_WUNLOCK(inp); 3234 } 3235 break; 3236 } 3237 case SCTP_AUTH_DELETE_KEY: 3238 { 3239 struct sctp_authkeyid *scdel; 3240 3241 SCTP_CHECK_AND_CAST(scdel, optval, struct sctp_authkeyid, 3242 optsize); 3243 SCTP_FIND_STCB(inp, stcb, scdel->scact_assoc_id); 3244 3245 /* delete the key from the right place */ 3246 if (stcb) { 3247 if (sctp_delete_sharedkey(stcb, 3248 scdel->scact_keynumber)) { 3249 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, 3250 SCTP_FROM_SCTP_USRREQ, 3251 EINVAL); 3252 error = EINVAL; 3253 } 3254 SCTP_TCB_UNLOCK(stcb); 3255 } else { 3256 SCTP_INP_WLOCK(inp); 3257 if (sctp_delete_sharedkey_ep(inp, 3258 scdel->scact_keynumber)) { 3259 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, 3260 SCTP_FROM_SCTP_USRREQ, 3261 EINVAL); 3262 error = EINVAL; 3263 } 3264 SCTP_INP_WUNLOCK(inp); 3265 } 3266 break; 3267 } 3268 case SCTP_AUTH_DEACTIVATE_KEY: 3269 { 3270 struct sctp_authkeyid *keyid; 3271 3272 SCTP_CHECK_AND_CAST(keyid, optval, struct sctp_authkeyid, 3273 optsize); 3274 SCTP_FIND_STCB(inp, stcb, keyid->scact_assoc_id); 3275 3276 /* deactivate the key from the right place */ 3277 if (stcb) { 3278 if (sctp_deact_sharedkey(stcb, 3279 keyid->scact_keynumber)) { 3280 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, 3281 SCTP_FROM_SCTP_USRREQ, 3282 EINVAL); 3283 error = EINVAL; 3284 } 3285 SCTP_TCB_UNLOCK(stcb); 3286 } else { 3287 SCTP_INP_WLOCK(inp); 3288 if (sctp_deact_sharedkey_ep(inp, 3289 keyid->scact_keynumber)) { 3290 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, 3291 SCTP_FROM_SCTP_USRREQ, 3292 EINVAL); 3293 error = EINVAL; 3294 } 3295 SCTP_INP_WUNLOCK(inp); 3296 } 3297 break; 3298 } 3299 3300 case SCTP_RESET_STREAMS: 3301 { 3302 struct sctp_stream_reset *strrst; 3303 uint8_t send_in = 0, send_tsn = 0, send_out = 0, 3304 addstream = 0; 3305 uint16_t addstrmcnt = 0; 3306 int i; 3307 3308 SCTP_CHECK_AND_CAST(strrst, optval, struct sctp_stream_reset, optsize); 3309 SCTP_FIND_STCB(inp, stcb, strrst->strrst_assoc_id); 3310 3311 if (stcb == NULL) { 3312 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); 3313 error = ENOENT; 3314 break; 3315 } 3316 if (stcb->asoc.peer_supports_strreset == 0) { 3317 /* 3318 * Peer does not support it, we return 3319 * protocol not supported since this is true 3320 * for this feature and this peer, not the 3321 * socket request in general. 3322 */ 3323 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EPROTONOSUPPORT); 3324 error = EPROTONOSUPPORT; 3325 SCTP_TCB_UNLOCK(stcb); 3326 break; 3327 } 3328 if (stcb->asoc.stream_reset_outstanding) { 3329 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY); 3330 error = EALREADY; 3331 SCTP_TCB_UNLOCK(stcb); 3332 break; 3333 } 3334 if (strrst->strrst_flags == SCTP_RESET_LOCAL_RECV) { 3335 send_in = 1; 3336 } else if (strrst->strrst_flags == SCTP_RESET_LOCAL_SEND) { 3337 send_out = 1; 3338 } else if (strrst->strrst_flags == SCTP_RESET_BOTH) { 3339 send_in = 1; 3340 send_out = 1; 3341 } else if (strrst->strrst_flags == SCTP_RESET_TSN) { 3342 send_tsn = 1; 3343 } else if (strrst->strrst_flags == SCTP_RESET_ADD_STREAMS) { 3344 if (send_tsn || 3345 send_in || 3346 send_out) { 3347 /* We can't do that and add streams */ 3348 error = EINVAL; 3349 goto skip_stuff; 3350 } 3351 if (stcb->asoc.stream_reset_outstanding) { 3352 error = EBUSY; 3353 goto skip_stuff; 3354 } 3355 addstream = 1; 3356 /* We allocate here */ 3357 addstrmcnt = strrst->strrst_num_streams; 3358 if ((int)(addstrmcnt + stcb->asoc.streamoutcnt) > 0xffff) { 3359 /* You can't have more than 64k */ 3360 error = EINVAL; 3361 goto skip_stuff; 3362 } 3363 if ((stcb->asoc.strm_realoutsize - stcb->asoc.streamoutcnt) < addstrmcnt) { 3364 /* Need to allocate more */ 3365 struct sctp_stream_out *oldstream; 3366 struct sctp_stream_queue_pending *sp; 3367 int removed; 3368 3369 oldstream = stcb->asoc.strmout; 3370 /* get some more */ 3371 SCTP_MALLOC(stcb->asoc.strmout, struct sctp_stream_out *, 3372 ((stcb->asoc.streamoutcnt + addstrmcnt) * sizeof(struct sctp_stream_out)), 3373 SCTP_M_STRMO); 3374 if (stcb->asoc.strmout == NULL) { 3375 stcb->asoc.strmout = oldstream; 3376 error = ENOMEM; 3377 goto skip_stuff; 3378 } 3379 /* 3380 * Ok now we proceed with copying 3381 * the old out stuff and 3382 * initializing the new stuff. 3383 */ 3384 SCTP_TCB_SEND_LOCK(stcb); 3385 for (i = 0; i < stcb->asoc.streamoutcnt; i++) { 3386 TAILQ_INIT(&stcb->asoc.strmout[i].outqueue); 3387 stcb->asoc.strmout[i].next_sequence_sent = oldstream[i].next_sequence_sent; 3388 stcb->asoc.strmout[i].last_msg_incomplete = oldstream[i].last_msg_incomplete; 3389 stcb->asoc.strmout[i].stream_no = i; 3390 if (oldstream[i].next_spoke.tqe_next) { 3391 sctp_remove_from_wheel(stcb, &stcb->asoc, &oldstream[i], 1); 3392 stcb->asoc.strmout[i].next_spoke.tqe_next = NULL; 3393 stcb->asoc.strmout[i].next_spoke.tqe_prev = NULL; 3394 removed = 1; 3395 } else { 3396 /* not on out wheel */ 3397 stcb->asoc.strmout[i].next_spoke.tqe_next = NULL; 3398 stcb->asoc.strmout[i].next_spoke.tqe_prev = NULL; 3399 removed = 0; 3400 } 3401 /* 3402 * now anything on those 3403 * queues? 3404 */ 3405 while (TAILQ_EMPTY(&oldstream[i].outqueue) == 0) { 3406 sp = TAILQ_FIRST(&oldstream[i].outqueue); 3407 TAILQ_REMOVE(&oldstream[i].outqueue, sp, next); 3408 TAILQ_INSERT_TAIL(&stcb->asoc.strmout[i].outqueue, sp, next); 3409 } 3410 /* Did we disrupt the wheel? */ 3411 if (removed) { 3412 sctp_insert_on_wheel(stcb, 3413 &stcb->asoc, 3414 &stcb->asoc.strmout[i], 3415 1); 3416 } 3417 /* 3418 * Now move assoc pointers 3419 * too 3420 */ 3421 if (stcb->asoc.last_out_stream == &oldstream[i]) { 3422 stcb->asoc.last_out_stream = &stcb->asoc.strmout[i]; 3423 } 3424 if (stcb->asoc.locked_on_sending == &oldstream[i]) { 3425 stcb->asoc.locked_on_sending = &stcb->asoc.strmout[i]; 3426 } 3427 } 3428 /* now the new streams */ 3429 for (i = stcb->asoc.streamoutcnt; i < (stcb->asoc.streamoutcnt + addstrmcnt); i++) { 3430 stcb->asoc.strmout[i].next_sequence_sent = 0x0; 3431 TAILQ_INIT(&stcb->asoc.strmout[i].outqueue); 3432 stcb->asoc.strmout[i].stream_no = i; 3433 stcb->asoc.strmout[i].last_msg_incomplete = 0; 3434 stcb->asoc.strmout[i].next_spoke.tqe_next = NULL; 3435 stcb->asoc.strmout[i].next_spoke.tqe_prev = NULL; 3436 } 3437 stcb->asoc.strm_realoutsize = stcb->asoc.streamoutcnt + addstrmcnt; 3438 SCTP_FREE(oldstream, SCTP_M_STRMO); 3439 } 3440 SCTP_TCB_SEND_UNLOCK(stcb); 3441 goto skip_stuff; 3442 } else { 3443 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3444 error = EINVAL; 3445 SCTP_TCB_UNLOCK(stcb); 3446 break; 3447 } 3448 for (i = 0; i < strrst->strrst_num_streams; i++) { 3449 if ((send_in) && 3450 3451 (strrst->strrst_list[i] > stcb->asoc.streamincnt)) { 3452 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3453 error = EINVAL; 3454 goto get_out; 3455 } 3456 if ((send_out) && 3457 (strrst->strrst_list[i] > stcb->asoc.streamoutcnt)) { 3458 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3459 error = EINVAL; 3460 goto get_out; 3461 } 3462 } 3463 skip_stuff: 3464 if (error) { 3465 get_out: 3466 SCTP_TCB_UNLOCK(stcb); 3467 break; 3468 } 3469 error = sctp_send_str_reset_req(stcb, strrst->strrst_num_streams, 3470 strrst->strrst_list, 3471 send_out, (stcb->asoc.str_reset_seq_in - 3), 3472 send_in, send_tsn, addstream, addstrmcnt); 3473 3474 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_REQ, SCTP_SO_LOCKED); 3475 SCTP_TCB_UNLOCK(stcb); 3476 } 3477 break; 3478 3479 case SCTP_CONNECT_X: 3480 if (optsize < (sizeof(int) + sizeof(struct sockaddr_in))) { 3481 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3482 error = EINVAL; 3483 break; 3484 } 3485 error = sctp_do_connect_x(so, inp, optval, optsize, p, 0); 3486 break; 3487 3488 case SCTP_CONNECT_X_DELAYED: 3489 if (optsize < (sizeof(int) + sizeof(struct sockaddr_in))) { 3490 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3491 error = EINVAL; 3492 break; 3493 } 3494 error = sctp_do_connect_x(so, inp, optval, optsize, p, 1); 3495 break; 3496 3497 case SCTP_CONNECT_X_COMPLETE: 3498 { 3499 struct sockaddr *sa; 3500 struct sctp_nets *net; 3501 3502 /* FIXME MT: check correct? */ 3503 SCTP_CHECK_AND_CAST(sa, optval, struct sockaddr, optsize); 3504 3505 /* find tcb */ 3506 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { 3507 SCTP_INP_RLOCK(inp); 3508 stcb = LIST_FIRST(&inp->sctp_asoc_list); 3509 if (stcb) { 3510 SCTP_TCB_LOCK(stcb); 3511 net = sctp_findnet(stcb, sa); 3512 } 3513 SCTP_INP_RUNLOCK(inp); 3514 } else { 3515 /* 3516 * We increment here since 3517 * sctp_findassociation_ep_addr() wil do a 3518 * decrement if it finds the stcb as long as 3519 * the locked tcb (last argument) is NOT a 3520 * TCB.. aka NULL. 3521 */ 3522 SCTP_INP_INCR_REF(inp); 3523 stcb = sctp_findassociation_ep_addr(&inp, sa, &net, NULL, NULL); 3524 if (stcb == NULL) { 3525 SCTP_INP_DECR_REF(inp); 3526 } 3527 } 3528 3529 if (stcb == NULL) { 3530 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); 3531 error = ENOENT; 3532 break; 3533 } 3534 if (stcb->asoc.delayed_connection == 1) { 3535 stcb->asoc.delayed_connection = 0; 3536 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered); 3537 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, 3538 stcb->asoc.primary_destination, 3539 SCTP_FROM_SCTP_USRREQ + SCTP_LOC_9); 3540 sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED); 3541 } else { 3542 /* 3543 * already expired or did not use delayed 3544 * connectx 3545 */ 3546 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY); 3547 error = EALREADY; 3548 } 3549 SCTP_TCB_UNLOCK(stcb); 3550 } 3551 break; 3552 case SCTP_MAX_BURST: 3553 { 3554 uint8_t *burst; 3555 3556 SCTP_CHECK_AND_CAST(burst, optval, uint8_t, optsize); 3557 3558 SCTP_INP_WLOCK(inp); 3559 if (*burst) { 3560 inp->sctp_ep.max_burst = *burst; 3561 } 3562 SCTP_INP_WUNLOCK(inp); 3563 } 3564 break; 3565 case SCTP_MAXSEG: 3566 { 3567 struct sctp_assoc_value *av; 3568 int ovh; 3569 3570 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize); 3571 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 3572 3573 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 3574 ovh = SCTP_MED_OVERHEAD; 3575 } else { 3576 ovh = SCTP_MED_V4_OVERHEAD; 3577 } 3578 if (stcb) { 3579 if (av->assoc_value) { 3580 stcb->asoc.sctp_frag_point = (av->assoc_value + ovh); 3581 } else { 3582 stcb->asoc.sctp_frag_point = SCTP_DEFAULT_MAXSEGMENT; 3583 } 3584 SCTP_TCB_UNLOCK(stcb); 3585 } else { 3586 SCTP_INP_WLOCK(inp); 3587 /* 3588 * FIXME MT: I think this is not in tune 3589 * with the API ID 3590 */ 3591 if (av->assoc_value) { 3592 inp->sctp_frag_point = (av->assoc_value + ovh); 3593 } else { 3594 inp->sctp_frag_point = SCTP_DEFAULT_MAXSEGMENT; 3595 } 3596 SCTP_INP_WUNLOCK(inp); 3597 } 3598 } 3599 break; 3600 case SCTP_EVENTS: 3601 { 3602 struct sctp_event_subscribe *events; 3603 3604 SCTP_CHECK_AND_CAST(events, optval, struct sctp_event_subscribe, optsize); 3605 3606 SCTP_INP_WLOCK(inp); 3607 if (events->sctp_data_io_event) { 3608 sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT); 3609 } else { 3610 sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT); 3611 } 3612 3613 if (events->sctp_association_event) { 3614 sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVASSOCEVNT); 3615 } else { 3616 sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVASSOCEVNT); 3617 } 3618 3619 if (events->sctp_address_event) { 3620 sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVPADDREVNT); 3621 } else { 3622 sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVPADDREVNT); 3623 } 3624 3625 if (events->sctp_send_failure_event) { 3626 sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVSENDFAILEVNT); 3627 } else { 3628 sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVSENDFAILEVNT); 3629 } 3630 3631 if (events->sctp_peer_error_event) { 3632 sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVPEERERR); 3633 } else { 3634 sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVPEERERR); 3635 } 3636 3637 if (events->sctp_shutdown_event) { 3638 sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT); 3639 } else { 3640 sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT); 3641 } 3642 3643 if (events->sctp_partial_delivery_event) { 3644 sctp_feature_on(inp, SCTP_PCB_FLAGS_PDAPIEVNT); 3645 } else { 3646 sctp_feature_off(inp, SCTP_PCB_FLAGS_PDAPIEVNT); 3647 } 3648 3649 if (events->sctp_adaptation_layer_event) { 3650 sctp_feature_on(inp, SCTP_PCB_FLAGS_ADAPTATIONEVNT); 3651 } else { 3652 sctp_feature_off(inp, SCTP_PCB_FLAGS_ADAPTATIONEVNT); 3653 } 3654 3655 if (events->sctp_authentication_event) { 3656 sctp_feature_on(inp, SCTP_PCB_FLAGS_AUTHEVNT); 3657 } else { 3658 sctp_feature_off(inp, SCTP_PCB_FLAGS_AUTHEVNT); 3659 } 3660 3661 if (events->sctp_sender_dry_event) { 3662 sctp_feature_on(inp, SCTP_PCB_FLAGS_DRYEVNT); 3663 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 3664 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 3665 stcb = LIST_FIRST(&inp->sctp_asoc_list); 3666 if (stcb) { 3667 SCTP_TCB_LOCK(stcb); 3668 } 3669 if (stcb && 3670 TAILQ_EMPTY(&stcb->asoc.send_queue) && 3671 TAILQ_EMPTY(&stcb->asoc.sent_queue) && 3672 (stcb->asoc.stream_queue_cnt == 0)) { 3673 sctp_ulp_notify(SCTP_NOTIFY_SENDER_DRY, stcb, 0, NULL, SCTP_SO_LOCKED); 3674 } 3675 if (stcb) { 3676 SCTP_TCB_UNLOCK(stcb); 3677 } 3678 } 3679 } else { 3680 sctp_feature_off(inp, SCTP_PCB_FLAGS_DRYEVNT); 3681 } 3682 3683 if (events->sctp_stream_reset_event) { 3684 sctp_feature_on(inp, SCTP_PCB_FLAGS_STREAM_RESETEVNT); 3685 } else { 3686 sctp_feature_off(inp, SCTP_PCB_FLAGS_STREAM_RESETEVNT); 3687 } 3688 SCTP_INP_WUNLOCK(inp); 3689 } 3690 break; 3691 3692 case SCTP_ADAPTATION_LAYER: 3693 { 3694 struct sctp_setadaptation *adap_bits; 3695 3696 SCTP_CHECK_AND_CAST(adap_bits, optval, struct sctp_setadaptation, optsize); 3697 SCTP_INP_WLOCK(inp); 3698 inp->sctp_ep.adaptation_layer_indicator = adap_bits->ssb_adaptation_ind; 3699 SCTP_INP_WUNLOCK(inp); 3700 } 3701 break; 3702 #ifdef SCTP_DEBUG 3703 case SCTP_SET_INITIAL_DBG_SEQ: 3704 { 3705 uint32_t *vvv; 3706 3707 SCTP_CHECK_AND_CAST(vvv, optval, uint32_t, optsize); 3708 SCTP_INP_WLOCK(inp); 3709 inp->sctp_ep.initial_sequence_debug = *vvv; 3710 SCTP_INP_WUNLOCK(inp); 3711 } 3712 break; 3713 #endif 3714 case SCTP_DEFAULT_SEND_PARAM: 3715 { 3716 struct sctp_sndrcvinfo *s_info; 3717 3718 SCTP_CHECK_AND_CAST(s_info, optval, struct sctp_sndrcvinfo, optsize); 3719 SCTP_FIND_STCB(inp, stcb, s_info->sinfo_assoc_id); 3720 3721 if (stcb) { 3722 if (s_info->sinfo_stream <= stcb->asoc.streamoutcnt) { 3723 memcpy(&stcb->asoc.def_send, s_info, min(optsize, sizeof(stcb->asoc.def_send))); 3724 } else { 3725 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3726 error = EINVAL; 3727 } 3728 SCTP_TCB_UNLOCK(stcb); 3729 } else { 3730 SCTP_INP_WLOCK(inp); 3731 memcpy(&inp->def_send, s_info, min(optsize, sizeof(inp->def_send))); 3732 SCTP_INP_WUNLOCK(inp); 3733 } 3734 } 3735 break; 3736 case SCTP_PEER_ADDR_PARAMS: 3737 /* Applys to the specific association */ 3738 { 3739 struct sctp_paddrparams *paddrp; 3740 struct sctp_nets *net; 3741 3742 SCTP_CHECK_AND_CAST(paddrp, optval, struct sctp_paddrparams, optsize); 3743 SCTP_FIND_STCB(inp, stcb, paddrp->spp_assoc_id); 3744 net = NULL; 3745 if (stcb) { 3746 net = sctp_findnet(stcb, (struct sockaddr *)&paddrp->spp_address); 3747 } else { 3748 /* 3749 * We increment here since 3750 * sctp_findassociation_ep_addr() wil do a 3751 * decrement if it finds the stcb as long as 3752 * the locked tcb (last argument) is NOT a 3753 * TCB.. aka NULL. 3754 */ 3755 SCTP_INP_INCR_REF(inp); 3756 stcb = sctp_findassociation_ep_addr(&inp, 3757 (struct sockaddr *)&paddrp->spp_address, 3758 &net, NULL, NULL); 3759 if (stcb == NULL) { 3760 SCTP_INP_DECR_REF(inp); 3761 } 3762 } 3763 if (stcb && (net == NULL)) { 3764 struct sockaddr *sa; 3765 3766 sa = (struct sockaddr *)&paddrp->spp_address; 3767 if (sa->sa_family == AF_INET) { 3768 struct sockaddr_in *sin; 3769 3770 sin = (struct sockaddr_in *)sa; 3771 if (sin->sin_addr.s_addr) { 3772 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3773 SCTP_TCB_UNLOCK(stcb); 3774 error = EINVAL; 3775 break; 3776 } 3777 } else if (sa->sa_family == AF_INET6) { 3778 struct sockaddr_in6 *sin6; 3779 3780 sin6 = (struct sockaddr_in6 *)sa; 3781 if (!IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { 3782 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3783 SCTP_TCB_UNLOCK(stcb); 3784 error = EINVAL; 3785 break; 3786 } 3787 } else { 3788 error = EAFNOSUPPORT; 3789 SCTP_TCB_UNLOCK(stcb); 3790 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 3791 break; 3792 } 3793 } 3794 /* sanity checks */ 3795 if ((paddrp->spp_flags & SPP_HB_ENABLE) && (paddrp->spp_flags & SPP_HB_DISABLE)) { 3796 if (stcb) 3797 SCTP_TCB_UNLOCK(stcb); 3798 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3799 return (EINVAL); 3800 } 3801 if ((paddrp->spp_flags & SPP_PMTUD_ENABLE) && (paddrp->spp_flags & SPP_PMTUD_DISABLE)) { 3802 if (stcb) 3803 SCTP_TCB_UNLOCK(stcb); 3804 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3805 return (EINVAL); 3806 } 3807 if (stcb) { 3808 /************************TCB SPECIFIC SET ******************/ 3809 /* 3810 * do we change the timer for HB, we run 3811 * only one? 3812 */ 3813 int ovh = 0; 3814 3815 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 3816 ovh = SCTP_MED_OVERHEAD; 3817 } else { 3818 ovh = SCTP_MED_V4_OVERHEAD; 3819 } 3820 3821 if (paddrp->spp_hbinterval) 3822 stcb->asoc.heart_beat_delay = paddrp->spp_hbinterval; 3823 else if (paddrp->spp_flags & SPP_HB_TIME_IS_ZERO) 3824 stcb->asoc.heart_beat_delay = 0; 3825 3826 /* network sets ? */ 3827 if (net) { 3828 /************************NET SPECIFIC SET ******************/ 3829 if (paddrp->spp_flags & SPP_HB_DEMAND) { 3830 /* on demand HB */ 3831 if (sctp_send_hb(stcb, 1, net) < 0) { 3832 /* asoc destroyed */ 3833 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3834 error = EINVAL; 3835 break; 3836 } 3837 } 3838 if (paddrp->spp_flags & SPP_HB_DISABLE) { 3839 net->dest_state |= SCTP_ADDR_NOHB; 3840 } 3841 if (paddrp->spp_flags & SPP_HB_ENABLE) { 3842 net->dest_state &= ~SCTP_ADDR_NOHB; 3843 } 3844 if ((paddrp->spp_flags & SPP_PMTUD_DISABLE) && (paddrp->spp_pathmtu >= SCTP_SMALLEST_PMTU)) { 3845 if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) { 3846 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net, 3847 SCTP_FROM_SCTP_USRREQ + SCTP_LOC_10); 3848 } 3849 if (paddrp->spp_pathmtu > SCTP_DEFAULT_MINSEGMENT) { 3850 net->mtu = paddrp->spp_pathmtu + ovh; 3851 if (net->mtu < stcb->asoc.smallest_mtu) { 3852 #ifdef SCTP_PRINT_FOR_B_AND_M 3853 SCTP_PRINTF("SCTP_PMTU_DISABLE calls sctp_pathmtu_adjustment:%d\n", 3854 net->mtu); 3855 #endif 3856 sctp_pathmtu_adjustment(inp, stcb, net, net->mtu); 3857 } 3858 } 3859 } 3860 if (paddrp->spp_flags & SPP_PMTUD_ENABLE) { 3861 if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) { 3862 sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net); 3863 } 3864 } 3865 if (paddrp->spp_pathmaxrxt) 3866 net->failure_threshold = paddrp->spp_pathmaxrxt; 3867 #ifdef INET 3868 if (paddrp->spp_flags & SPP_IPV4_TOS) { 3869 if (net->ro._l_addr.sin.sin_family == AF_INET) { 3870 net->tos_flowlabel = paddrp->spp_ipv4_tos & 0x000000fc; 3871 } 3872 } 3873 #endif 3874 #ifdef INET6 3875 if (paddrp->spp_flags & SPP_IPV6_FLOWLABEL) { 3876 if (net->ro._l_addr.sin6.sin6_family == AF_INET6) { 3877 net->tos_flowlabel = paddrp->spp_ipv6_flowlabel; 3878 } 3879 } 3880 #endif 3881 } else { 3882 /************************ASSOC ONLY -- NO NET SPECIFIC SET ******************/ 3883 if (paddrp->spp_pathmaxrxt) 3884 stcb->asoc.def_net_failure = paddrp->spp_pathmaxrxt; 3885 3886 if (paddrp->spp_flags & SPP_HB_ENABLE) { 3887 /* Turn back on the timer */ 3888 stcb->asoc.hb_is_disabled = 0; 3889 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net); 3890 } 3891 if ((paddrp->spp_flags & SPP_PMTUD_DISABLE) && (paddrp->spp_pathmtu >= SCTP_SMALLEST_PMTU)) { 3892 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 3893 if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) { 3894 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net, 3895 SCTP_FROM_SCTP_USRREQ + SCTP_LOC_10); 3896 } 3897 if (paddrp->spp_pathmtu > SCTP_DEFAULT_MINSEGMENT) { 3898 net->mtu = paddrp->spp_pathmtu + ovh; 3899 if (net->mtu < stcb->asoc.smallest_mtu) { 3900 #ifdef SCTP_PRINT_FOR_B_AND_M 3901 SCTP_PRINTF("SCTP_PMTU_DISABLE calls sctp_pathmtu_adjustment:%d\n", 3902 net->mtu); 3903 #endif 3904 sctp_pathmtu_adjustment(inp, stcb, net, net->mtu); 3905 } 3906 } 3907 } 3908 } 3909 if (paddrp->spp_flags & SPP_PMTUD_ENABLE) { 3910 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 3911 if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) { 3912 sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net); 3913 } 3914 } 3915 } 3916 if (paddrp->spp_flags & SPP_HB_DISABLE) { 3917 int cnt_of_unconf = 0; 3918 struct sctp_nets *lnet; 3919 3920 stcb->asoc.hb_is_disabled = 1; 3921 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 3922 if (lnet->dest_state & SCTP_ADDR_UNCONFIRMED) { 3923 cnt_of_unconf++; 3924 } 3925 } 3926 /* 3927 * stop the timer ONLY if we 3928 * have no unconfirmed 3929 * addresses 3930 */ 3931 if (cnt_of_unconf == 0) { 3932 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 3933 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, 3934 SCTP_FROM_SCTP_USRREQ + SCTP_LOC_11); 3935 } 3936 } 3937 } 3938 if (paddrp->spp_flags & SPP_HB_ENABLE) { 3939 /* start up the timer. */ 3940 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 3941 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net); 3942 } 3943 } 3944 #ifdef INET 3945 if (paddrp->spp_flags & SPP_IPV4_TOS) 3946 stcb->asoc.default_tos = paddrp->spp_ipv4_tos & 0x000000fc; 3947 #endif 3948 #ifdef INET6 3949 if (paddrp->spp_flags & SPP_IPV6_FLOWLABEL) 3950 stcb->asoc.default_flowlabel = paddrp->spp_ipv6_flowlabel; 3951 #endif 3952 3953 } 3954 SCTP_TCB_UNLOCK(stcb); 3955 } else { 3956 /************************NO TCB, SET TO default stuff ******************/ 3957 SCTP_INP_WLOCK(inp); 3958 /* 3959 * For the TOS/FLOWLABEL stuff you set it 3960 * with the options on the socket 3961 */ 3962 if (paddrp->spp_pathmaxrxt) { 3963 inp->sctp_ep.def_net_failure = paddrp->spp_pathmaxrxt; 3964 } 3965 if (paddrp->spp_flags & SPP_HB_TIME_IS_ZERO) 3966 inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT] = 0; 3967 else if (paddrp->spp_hbinterval) { 3968 if (paddrp->spp_hbinterval > SCTP_MAX_HB_INTERVAL) 3969 paddrp->spp_hbinterval = SCTP_MAX_HB_INTERVAL; 3970 inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT] = MSEC_TO_TICKS(paddrp->spp_hbinterval); 3971 } 3972 if (paddrp->spp_flags & SPP_HB_ENABLE) { 3973 sctp_feature_off(inp, SCTP_PCB_FLAGS_DONOT_HEARTBEAT); 3974 3975 } else if (paddrp->spp_flags & SPP_HB_DISABLE) { 3976 sctp_feature_on(inp, SCTP_PCB_FLAGS_DONOT_HEARTBEAT); 3977 } 3978 SCTP_INP_WUNLOCK(inp); 3979 } 3980 } 3981 break; 3982 case SCTP_RTOINFO: 3983 { 3984 struct sctp_rtoinfo *srto; 3985 uint32_t new_init, new_min, new_max; 3986 3987 SCTP_CHECK_AND_CAST(srto, optval, struct sctp_rtoinfo, optsize); 3988 SCTP_FIND_STCB(inp, stcb, srto->srto_assoc_id); 3989 3990 if (stcb) { 3991 if (srto->srto_initial) 3992 new_init = srto->srto_initial; 3993 else 3994 new_init = stcb->asoc.initial_rto; 3995 if (srto->srto_max) 3996 new_max = srto->srto_max; 3997 else 3998 new_max = stcb->asoc.maxrto; 3999 if (srto->srto_min) 4000 new_min = srto->srto_min; 4001 else 4002 new_min = stcb->asoc.minrto; 4003 if ((new_min <= new_init) && (new_init <= new_max)) { 4004 stcb->asoc.initial_rto = new_init; 4005 stcb->asoc.maxrto = new_max; 4006 stcb->asoc.minrto = new_min; 4007 } else { 4008 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4009 error = EINVAL; 4010 } 4011 SCTP_TCB_UNLOCK(stcb); 4012 } else { 4013 SCTP_INP_WLOCK(inp); 4014 if (srto->srto_initial) 4015 new_init = srto->srto_initial; 4016 else 4017 new_init = inp->sctp_ep.initial_rto; 4018 if (srto->srto_max) 4019 new_max = srto->srto_max; 4020 else 4021 new_max = inp->sctp_ep.sctp_maxrto; 4022 if (srto->srto_min) 4023 new_min = srto->srto_min; 4024 else 4025 new_min = inp->sctp_ep.sctp_minrto; 4026 if ((new_min <= new_init) && (new_init <= new_max)) { 4027 inp->sctp_ep.initial_rto = new_init; 4028 inp->sctp_ep.sctp_maxrto = new_max; 4029 inp->sctp_ep.sctp_minrto = new_min; 4030 } else { 4031 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4032 error = EINVAL; 4033 } 4034 SCTP_INP_WUNLOCK(inp); 4035 } 4036 } 4037 break; 4038 case SCTP_ASSOCINFO: 4039 { 4040 struct sctp_assocparams *sasoc; 4041 4042 SCTP_CHECK_AND_CAST(sasoc, optval, struct sctp_assocparams, optsize); 4043 SCTP_FIND_STCB(inp, stcb, sasoc->sasoc_assoc_id); 4044 if (sasoc->sasoc_cookie_life) { 4045 /* boundary check the cookie life */ 4046 if (sasoc->sasoc_cookie_life < 1000) 4047 sasoc->sasoc_cookie_life = 1000; 4048 if (sasoc->sasoc_cookie_life > SCTP_MAX_COOKIE_LIFE) { 4049 sasoc->sasoc_cookie_life = SCTP_MAX_COOKIE_LIFE; 4050 } 4051 } 4052 if (stcb) { 4053 if (sasoc->sasoc_asocmaxrxt) 4054 stcb->asoc.max_send_times = sasoc->sasoc_asocmaxrxt; 4055 sasoc->sasoc_number_peer_destinations = stcb->asoc.numnets; 4056 sasoc->sasoc_peer_rwnd = 0; 4057 sasoc->sasoc_local_rwnd = 0; 4058 if (sasoc->sasoc_cookie_life) { 4059 stcb->asoc.cookie_life = MSEC_TO_TICKS(sasoc->sasoc_cookie_life); 4060 } 4061 SCTP_TCB_UNLOCK(stcb); 4062 } else { 4063 SCTP_INP_WLOCK(inp); 4064 if (sasoc->sasoc_asocmaxrxt) 4065 inp->sctp_ep.max_send_times = sasoc->sasoc_asocmaxrxt; 4066 sasoc->sasoc_number_peer_destinations = 0; 4067 sasoc->sasoc_peer_rwnd = 0; 4068 sasoc->sasoc_local_rwnd = 0; 4069 if (sasoc->sasoc_cookie_life) { 4070 inp->sctp_ep.def_cookie_life = MSEC_TO_TICKS(sasoc->sasoc_cookie_life); 4071 } 4072 SCTP_INP_WUNLOCK(inp); 4073 } 4074 } 4075 break; 4076 case SCTP_INITMSG: 4077 { 4078 struct sctp_initmsg *sinit; 4079 4080 SCTP_CHECK_AND_CAST(sinit, optval, struct sctp_initmsg, optsize); 4081 SCTP_INP_WLOCK(inp); 4082 if (sinit->sinit_num_ostreams) 4083 inp->sctp_ep.pre_open_stream_count = sinit->sinit_num_ostreams; 4084 4085 if (sinit->sinit_max_instreams) 4086 inp->sctp_ep.max_open_streams_intome = sinit->sinit_max_instreams; 4087 4088 if (sinit->sinit_max_attempts) 4089 inp->sctp_ep.max_init_times = sinit->sinit_max_attempts; 4090 4091 if (sinit->sinit_max_init_timeo) 4092 inp->sctp_ep.initial_init_rto_max = sinit->sinit_max_init_timeo; 4093 SCTP_INP_WUNLOCK(inp); 4094 } 4095 break; 4096 case SCTP_PRIMARY_ADDR: 4097 { 4098 struct sctp_setprim *spa; 4099 struct sctp_nets *net, *lnet; 4100 4101 SCTP_CHECK_AND_CAST(spa, optval, struct sctp_setprim, optsize); 4102 SCTP_FIND_STCB(inp, stcb, spa->ssp_assoc_id); 4103 4104 net = NULL; 4105 if (stcb) { 4106 net = sctp_findnet(stcb, (struct sockaddr *)&spa->ssp_addr); 4107 } else { 4108 /* 4109 * We increment here since 4110 * sctp_findassociation_ep_addr() wil do a 4111 * decrement if it finds the stcb as long as 4112 * the locked tcb (last argument) is NOT a 4113 * TCB.. aka NULL. 4114 */ 4115 SCTP_INP_INCR_REF(inp); 4116 stcb = sctp_findassociation_ep_addr(&inp, 4117 (struct sockaddr *)&spa->ssp_addr, 4118 &net, NULL, NULL); 4119 if (stcb == NULL) { 4120 SCTP_INP_DECR_REF(inp); 4121 } 4122 } 4123 4124 if ((stcb) && (net)) { 4125 if ((net != stcb->asoc.primary_destination) && 4126 (!(net->dest_state & SCTP_ADDR_UNCONFIRMED))) { 4127 /* Ok we need to set it */ 4128 lnet = stcb->asoc.primary_destination; 4129 if (sctp_set_primary_addr(stcb, (struct sockaddr *)NULL, net) == 0) { 4130 if (net->dest_state & SCTP_ADDR_SWITCH_PRIMARY) { 4131 net->dest_state |= SCTP_ADDR_DOUBLE_SWITCH; 4132 } 4133 net->dest_state |= SCTP_ADDR_SWITCH_PRIMARY; 4134 } 4135 } 4136 } else { 4137 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4138 error = EINVAL; 4139 } 4140 if (stcb) { 4141 SCTP_TCB_UNLOCK(stcb); 4142 } 4143 } 4144 break; 4145 case SCTP_SET_DYNAMIC_PRIMARY: 4146 { 4147 union sctp_sockstore *ss; 4148 4149 error = priv_check(curthread, 4150 PRIV_NETINET_RESERVEDPORT); 4151 if (error) 4152 break; 4153 4154 SCTP_CHECK_AND_CAST(ss, optval, union sctp_sockstore, optsize); 4155 /* SUPER USER CHECK? */ 4156 error = sctp_dynamic_set_primary(&ss->sa, vrf_id); 4157 } 4158 break; 4159 case SCTP_SET_PEER_PRIMARY_ADDR: 4160 { 4161 struct sctp_setpeerprim *sspp; 4162 4163 SCTP_CHECK_AND_CAST(sspp, optval, struct sctp_setpeerprim, optsize); 4164 SCTP_FIND_STCB(inp, stcb, sspp->sspp_assoc_id); 4165 if (stcb != NULL) { 4166 struct sctp_ifa *ifa; 4167 4168 ifa = sctp_find_ifa_by_addr((struct sockaddr *)&sspp->sspp_addr, 4169 stcb->asoc.vrf_id, SCTP_ADDR_NOT_LOCKED); 4170 if (ifa == NULL) { 4171 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4172 error = EINVAL; 4173 goto out_of_it; 4174 } 4175 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) { 4176 /* 4177 * Must validate the ifa found is in 4178 * our ep 4179 */ 4180 struct sctp_laddr *laddr; 4181 int found = 0; 4182 4183 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 4184 if (laddr->ifa == NULL) { 4185 SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n", 4186 __FUNCTION__); 4187 continue; 4188 } 4189 if (laddr->ifa == ifa) { 4190 found = 1; 4191 break; 4192 } 4193 } 4194 if (!found) { 4195 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4196 error = EINVAL; 4197 goto out_of_it; 4198 } 4199 } 4200 if (sctp_set_primary_ip_address_sa(stcb, 4201 (struct sockaddr *)&sspp->sspp_addr) != 0) { 4202 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4203 error = EINVAL; 4204 } 4205 out_of_it: 4206 SCTP_TCB_UNLOCK(stcb); 4207 } else { 4208 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4209 error = EINVAL; 4210 } 4211 4212 } 4213 break; 4214 case SCTP_BINDX_ADD_ADDR: 4215 { 4216 struct sctp_getaddresses *addrs; 4217 size_t sz; 4218 struct thread *td; 4219 4220 td = (struct thread *)p; 4221 SCTP_CHECK_AND_CAST(addrs, optval, struct sctp_getaddresses, 4222 optsize); 4223 if (addrs->addr->sa_family == AF_INET) { 4224 sz = sizeof(struct sctp_getaddresses) - sizeof(struct sockaddr) + sizeof(struct sockaddr_in); 4225 if (optsize < sz) { 4226 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4227 error = EINVAL; 4228 break; 4229 } 4230 if (td != NULL && (error = prison_local_ip4(td->td_ucred, &(((struct sockaddr_in *)(addrs->addr))->sin_addr)))) { 4231 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, error); 4232 break; 4233 } 4234 #ifdef INET6 4235 } else if (addrs->addr->sa_family == AF_INET6) { 4236 sz = sizeof(struct sctp_getaddresses) - sizeof(struct sockaddr) + sizeof(struct sockaddr_in6); 4237 if (optsize < sz) { 4238 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4239 error = EINVAL; 4240 break; 4241 } 4242 if (td != NULL && (error = prison_local_ip6(td->td_ucred, &(((struct sockaddr_in6 *)(addrs->addr))->sin6_addr), 4243 (SCTP_IPV6_V6ONLY(inp) != 0))) != 0) { 4244 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, error); 4245 break; 4246 } 4247 #endif 4248 } else { 4249 error = EAFNOSUPPORT; 4250 break; 4251 } 4252 sctp_bindx_add_address(so, inp, addrs->addr, 4253 addrs->sget_assoc_id, vrf_id, 4254 &error, p); 4255 } 4256 break; 4257 case SCTP_BINDX_REM_ADDR: 4258 { 4259 struct sctp_getaddresses *addrs; 4260 size_t sz; 4261 struct thread *td; 4262 4263 td = (struct thread *)p; 4264 4265 SCTP_CHECK_AND_CAST(addrs, optval, struct sctp_getaddresses, optsize); 4266 if (addrs->addr->sa_family == AF_INET) { 4267 sz = sizeof(struct sctp_getaddresses) - sizeof(struct sockaddr) + sizeof(struct sockaddr_in); 4268 if (optsize < sz) { 4269 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4270 error = EINVAL; 4271 break; 4272 } 4273 if (td != NULL && (error = prison_local_ip4(td->td_ucred, &(((struct sockaddr_in *)(addrs->addr))->sin_addr)))) { 4274 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, error); 4275 break; 4276 } 4277 #ifdef INET6 4278 } else if (addrs->addr->sa_family == AF_INET6) { 4279 sz = sizeof(struct sctp_getaddresses) - sizeof(struct sockaddr) + sizeof(struct sockaddr_in6); 4280 if (optsize < sz) { 4281 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4282 error = EINVAL; 4283 break; 4284 } 4285 if (td != NULL && (error = prison_local_ip6(td->td_ucred, &(((struct sockaddr_in6 *)(addrs->addr))->sin6_addr), 4286 (SCTP_IPV6_V6ONLY(inp) != 0))) != 0) { 4287 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, error); 4288 break; 4289 } 4290 #endif 4291 } else { 4292 error = EAFNOSUPPORT; 4293 break; 4294 } 4295 sctp_bindx_delete_address(so, inp, addrs->addr, 4296 addrs->sget_assoc_id, vrf_id, 4297 &error); 4298 } 4299 break; 4300 default: 4301 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT); 4302 error = ENOPROTOOPT; 4303 break; 4304 } /* end switch (opt) */ 4305 return (error); 4306 } 4307 4308 int 4309 sctp_ctloutput(struct socket *so, struct sockopt *sopt) 4310 { 4311 void *optval = NULL; 4312 size_t optsize = 0; 4313 struct sctp_inpcb *inp; 4314 void *p; 4315 int error = 0; 4316 4317 inp = (struct sctp_inpcb *)so->so_pcb; 4318 if (inp == 0) { 4319 /* I made the same as TCP since we are not setup? */ 4320 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4321 return (ECONNRESET); 4322 } 4323 if (sopt->sopt_level != IPPROTO_SCTP) { 4324 /* wrong proto level... send back up to IP */ 4325 #ifdef INET6 4326 if (INP_CHECK_SOCKAF(so, AF_INET6)) 4327 error = ip6_ctloutput(so, sopt); 4328 else 4329 #endif /* INET6 */ 4330 error = ip_ctloutput(so, sopt); 4331 return (error); 4332 } 4333 optsize = sopt->sopt_valsize; 4334 if (optsize) { 4335 SCTP_MALLOC(optval, void *, optsize, SCTP_M_SOCKOPT); 4336 if (optval == NULL) { 4337 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOBUFS); 4338 return (ENOBUFS); 4339 } 4340 error = sooptcopyin(sopt, optval, optsize, optsize); 4341 if (error) { 4342 SCTP_FREE(optval, SCTP_M_SOCKOPT); 4343 goto out; 4344 } 4345 } 4346 p = (void *)sopt->sopt_td; 4347 if (sopt->sopt_dir == SOPT_SET) { 4348 error = sctp_setopt(so, sopt->sopt_name, optval, optsize, p); 4349 } else if (sopt->sopt_dir == SOPT_GET) { 4350 error = sctp_getopt(so, sopt->sopt_name, optval, &optsize, p); 4351 } else { 4352 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4353 error = EINVAL; 4354 } 4355 if ((error == 0) && (optval != NULL)) { 4356 error = sooptcopyout(sopt, optval, optsize); 4357 SCTP_FREE(optval, SCTP_M_SOCKOPT); 4358 } else if (optval != NULL) { 4359 SCTP_FREE(optval, SCTP_M_SOCKOPT); 4360 } 4361 out: 4362 return (error); 4363 } 4364 4365 4366 static int 4367 sctp_connect(struct socket *so, struct sockaddr *addr, struct thread *p) 4368 { 4369 int error = 0; 4370 int create_lock_on = 0; 4371 uint32_t vrf_id; 4372 struct sctp_inpcb *inp; 4373 struct sctp_tcb *stcb = NULL; 4374 4375 inp = (struct sctp_inpcb *)so->so_pcb; 4376 if (inp == 0) { 4377 /* I made the same as TCP since we are not setup? */ 4378 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4379 return (ECONNRESET); 4380 } 4381 if (addr == NULL) { 4382 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4383 return EINVAL; 4384 } 4385 #ifdef INET6 4386 if (addr->sa_family == AF_INET6) { 4387 struct sockaddr_in6 *sin6p; 4388 4389 if (addr->sa_len != sizeof(struct sockaddr_in6)) { 4390 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4391 return (EINVAL); 4392 } 4393 sin6p = (struct sockaddr_in6 *)addr; 4394 if (p != NULL && (error = prison_remote_ip6(p->td_ucred, &sin6p->sin6_addr)) != 0) { 4395 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 4396 return (error); 4397 } 4398 } else 4399 #endif 4400 if (addr->sa_family == AF_INET) { 4401 struct sockaddr_in *sinp; 4402 4403 if (addr->sa_len != sizeof(struct sockaddr_in)) { 4404 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4405 return (EINVAL); 4406 } 4407 sinp = (struct sockaddr_in *)addr; 4408 if (p != NULL && (error = prison_remote_ip4(p->td_ucred, &sinp->sin_addr)) != 0) { 4409 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 4410 return (error); 4411 } 4412 } else { 4413 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EAFNOSUPPORT); 4414 return (EAFNOSUPPORT); 4415 } 4416 SCTP_INP_INCR_REF(inp); 4417 SCTP_ASOC_CREATE_LOCK(inp); 4418 create_lock_on = 1; 4419 4420 4421 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 4422 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) { 4423 /* Should I really unlock ? */ 4424 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EFAULT); 4425 error = EFAULT; 4426 goto out_now; 4427 } 4428 #ifdef INET6 4429 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) && 4430 (addr->sa_family == AF_INET6)) { 4431 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4432 error = EINVAL; 4433 goto out_now; 4434 } 4435 #endif /* INET6 */ 4436 if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) == 4437 SCTP_PCB_FLAGS_UNBOUND) { 4438 /* Bind a ephemeral port */ 4439 error = sctp_inpcb_bind(so, NULL, NULL, p); 4440 if (error) { 4441 goto out_now; 4442 } 4443 } 4444 /* Now do we connect? */ 4445 if ((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) && 4446 (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_PORTREUSE))) { 4447 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4448 error = EINVAL; 4449 goto out_now; 4450 } 4451 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 4452 (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) { 4453 /* We are already connected AND the TCP model */ 4454 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EADDRINUSE); 4455 error = EADDRINUSE; 4456 goto out_now; 4457 } 4458 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { 4459 SCTP_INP_RLOCK(inp); 4460 stcb = LIST_FIRST(&inp->sctp_asoc_list); 4461 SCTP_INP_RUNLOCK(inp); 4462 } else { 4463 /* 4464 * We increment here since sctp_findassociation_ep_addr() 4465 * will do a decrement if it finds the stcb as long as the 4466 * locked tcb (last argument) is NOT a TCB.. aka NULL. 4467 */ 4468 SCTP_INP_INCR_REF(inp); 4469 stcb = sctp_findassociation_ep_addr(&inp, addr, NULL, NULL, NULL); 4470 if (stcb == NULL) { 4471 SCTP_INP_DECR_REF(inp); 4472 } else { 4473 SCTP_TCB_UNLOCK(stcb); 4474 } 4475 } 4476 if (stcb != NULL) { 4477 /* Already have or am bring up an association */ 4478 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY); 4479 error = EALREADY; 4480 goto out_now; 4481 } 4482 vrf_id = inp->def_vrf_id; 4483 /* We are GOOD to go */ 4484 stcb = sctp_aloc_assoc(inp, addr, &error, 0, vrf_id, p); 4485 if (stcb == NULL) { 4486 /* Gak! no memory */ 4487 goto out_now; 4488 } 4489 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) { 4490 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED; 4491 /* Set the connected flag so we can queue data */ 4492 SOCKBUF_LOCK(&so->so_rcv); 4493 so->so_rcv.sb_state &= ~SBS_CANTRCVMORE; 4494 SOCKBUF_UNLOCK(&so->so_rcv); 4495 SOCKBUF_LOCK(&so->so_snd); 4496 so->so_snd.sb_state &= ~SBS_CANTSENDMORE; 4497 SOCKBUF_UNLOCK(&so->so_snd); 4498 SOCK_LOCK(so); 4499 so->so_state &= ~SS_ISDISCONNECTING; 4500 SOCK_UNLOCK(so); 4501 soisconnecting(so); 4502 } 4503 SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_COOKIE_WAIT); 4504 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered); 4505 4506 /* initialize authentication parameters for the assoc */ 4507 sctp_initialize_auth_params(inp, stcb); 4508 4509 sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED); 4510 SCTP_TCB_UNLOCK(stcb); 4511 out_now: 4512 if (create_lock_on) { 4513 SCTP_ASOC_CREATE_UNLOCK(inp); 4514 } 4515 SCTP_INP_DECR_REF(inp); 4516 return error; 4517 } 4518 4519 int 4520 sctp_listen(struct socket *so, int backlog, struct thread *p) 4521 { 4522 /* 4523 * Note this module depends on the protocol processing being called 4524 * AFTER any socket level flags and backlog are applied to the 4525 * socket. The traditional way that the socket flags are applied is 4526 * AFTER protocol processing. We have made a change to the 4527 * sys/kern/uipc_socket.c module to reverse this but this MUST be in 4528 * place if the socket API for SCTP is to work properly. 4529 */ 4530 4531 int error = 0; 4532 struct sctp_inpcb *inp; 4533 4534 inp = (struct sctp_inpcb *)so->so_pcb; 4535 if (inp == 0) { 4536 /* I made the same as TCP since we are not setup? */ 4537 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4538 return (ECONNRESET); 4539 } 4540 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PORTREUSE)) { 4541 /* See if we have a listener */ 4542 struct sctp_inpcb *tinp; 4543 union sctp_sockstore store, *sp; 4544 4545 sp = &store; 4546 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) { 4547 /* not bound all */ 4548 struct sctp_laddr *laddr; 4549 4550 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 4551 memcpy(&store, &laddr->ifa->address, sizeof(store)); 4552 sp->sin.sin_port = inp->sctp_lport; 4553 tinp = sctp_pcb_findep(&sp->sa, 0, 0, inp->def_vrf_id); 4554 if (tinp && (tinp != inp) && 4555 ((tinp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) == 0) && 4556 ((tinp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) && 4557 (tinp->sctp_socket->so_qlimit)) { 4558 /* 4559 * we have a listener already and 4560 * its not this inp. 4561 */ 4562 SCTP_INP_DECR_REF(tinp); 4563 return (EADDRINUSE); 4564 } else if (tinp) { 4565 SCTP_INP_DECR_REF(tinp); 4566 } 4567 } 4568 } else { 4569 /* Setup a local addr bound all */ 4570 memset(&store, 0, sizeof(store)); 4571 store.sin.sin_port = inp->sctp_lport; 4572 #ifdef INET6 4573 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 4574 store.sa.sa_family = AF_INET6; 4575 store.sa.sa_len = sizeof(struct sockaddr_in6); 4576 } 4577 #endif 4578 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) { 4579 store.sa.sa_family = AF_INET; 4580 store.sa.sa_len = sizeof(struct sockaddr_in); 4581 } 4582 tinp = sctp_pcb_findep(&sp->sa, 0, 0, inp->def_vrf_id); 4583 if (tinp && (tinp != inp) && 4584 ((tinp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) == 0) && 4585 ((tinp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) && 4586 (tinp->sctp_socket->so_qlimit)) { 4587 /* 4588 * we have a listener already and its not 4589 * this inp. 4590 */ 4591 SCTP_INP_DECR_REF(tinp); 4592 return (EADDRINUSE); 4593 } else if (tinp) { 4594 SCTP_INP_DECR_REF(inp); 4595 } 4596 } 4597 } 4598 SCTP_INP_RLOCK(inp); 4599 #ifdef SCTP_LOCK_LOGGING 4600 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) { 4601 sctp_log_lock(inp, (struct sctp_tcb *)NULL, SCTP_LOG_LOCK_SOCK); 4602 } 4603 #endif 4604 SOCK_LOCK(so); 4605 error = solisten_proto_check(so); 4606 if (error) { 4607 SOCK_UNLOCK(so); 4608 SCTP_INP_RUNLOCK(inp); 4609 return (error); 4610 } 4611 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PORTREUSE)) && 4612 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 4613 /* 4614 * The unlucky case - We are in the tcp pool with this guy. 4615 * - Someone else is in the main inp slot. - We must move 4616 * this guy (the listener) to the main slot - We must then 4617 * move the guy that was listener to the TCP Pool. 4618 */ 4619 if (sctp_swap_inpcb_for_listen(inp)) { 4620 goto in_use; 4621 } 4622 } 4623 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 4624 (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) { 4625 /* We are already connected AND the TCP model */ 4626 in_use: 4627 SCTP_INP_RUNLOCK(inp); 4628 SOCK_UNLOCK(so); 4629 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EADDRINUSE); 4630 return (EADDRINUSE); 4631 } 4632 SCTP_INP_RUNLOCK(inp); 4633 if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) { 4634 /* We must do a bind. */ 4635 SOCK_UNLOCK(so); 4636 if ((error = sctp_inpcb_bind(so, NULL, NULL, p))) { 4637 /* bind error, probably perm */ 4638 return (error); 4639 } 4640 SOCK_LOCK(so); 4641 } 4642 /* It appears for 7.0 and on, we must always call this. */ 4643 solisten_proto(so, backlog); 4644 if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) { 4645 /* remove the ACCEPTCONN flag for one-to-many sockets */ 4646 so->so_options &= ~SO_ACCEPTCONN; 4647 } 4648 if (backlog == 0) { 4649 /* turning off listen */ 4650 so->so_options &= ~SO_ACCEPTCONN; 4651 } 4652 SOCK_UNLOCK(so); 4653 return (error); 4654 } 4655 4656 static int sctp_defered_wakeup_cnt = 0; 4657 4658 int 4659 sctp_accept(struct socket *so, struct sockaddr **addr) 4660 { 4661 struct sctp_tcb *stcb; 4662 struct sctp_inpcb *inp; 4663 union sctp_sockstore store; 4664 4665 #ifdef INET6 4666 int error; 4667 4668 #endif 4669 inp = (struct sctp_inpcb *)so->so_pcb; 4670 4671 if (inp == 0) { 4672 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4673 return (ECONNRESET); 4674 } 4675 SCTP_INP_RLOCK(inp); 4676 if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) { 4677 SCTP_INP_RUNLOCK(inp); 4678 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP); 4679 return (EOPNOTSUPP); 4680 } 4681 if (so->so_state & SS_ISDISCONNECTED) { 4682 SCTP_INP_RUNLOCK(inp); 4683 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ECONNABORTED); 4684 return (ECONNABORTED); 4685 } 4686 stcb = LIST_FIRST(&inp->sctp_asoc_list); 4687 if (stcb == NULL) { 4688 SCTP_INP_RUNLOCK(inp); 4689 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4690 return (ECONNRESET); 4691 } 4692 SCTP_TCB_LOCK(stcb); 4693 SCTP_INP_RUNLOCK(inp); 4694 store = stcb->asoc.primary_destination->ro._l_addr; 4695 stcb->asoc.state &= ~SCTP_STATE_IN_ACCEPT_QUEUE; 4696 SCTP_TCB_UNLOCK(stcb); 4697 switch (store.sa.sa_family) { 4698 case AF_INET: 4699 { 4700 struct sockaddr_in *sin; 4701 4702 SCTP_MALLOC_SONAME(sin, struct sockaddr_in *, sizeof *sin); 4703 if (sin == NULL) 4704 return (ENOMEM); 4705 sin->sin_family = AF_INET; 4706 sin->sin_len = sizeof(*sin); 4707 sin->sin_port = ((struct sockaddr_in *)&store)->sin_port; 4708 sin->sin_addr = ((struct sockaddr_in *)&store)->sin_addr; 4709 *addr = (struct sockaddr *)sin; 4710 break; 4711 } 4712 #ifdef INET6 4713 case AF_INET6: 4714 { 4715 struct sockaddr_in6 *sin6; 4716 4717 SCTP_MALLOC_SONAME(sin6, struct sockaddr_in6 *, sizeof *sin6); 4718 if (sin6 == NULL) 4719 return (ENOMEM); 4720 sin6->sin6_family = AF_INET6; 4721 sin6->sin6_len = sizeof(*sin6); 4722 sin6->sin6_port = ((struct sockaddr_in6 *)&store)->sin6_port; 4723 4724 sin6->sin6_addr = ((struct sockaddr_in6 *)&store)->sin6_addr; 4725 if ((error = sa6_recoverscope(sin6)) != 0) { 4726 SCTP_FREE_SONAME(sin6); 4727 return (error); 4728 } 4729 *addr = (struct sockaddr *)sin6; 4730 break; 4731 } 4732 #endif 4733 default: 4734 /* TSNH */ 4735 break; 4736 } 4737 /* Wake any delayed sleep action */ 4738 if (inp->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) { 4739 SCTP_INP_WLOCK(inp); 4740 inp->sctp_flags &= ~SCTP_PCB_FLAGS_DONT_WAKE; 4741 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT) { 4742 inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAKEOUTPUT; 4743 SCTP_INP_WUNLOCK(inp); 4744 SOCKBUF_LOCK(&inp->sctp_socket->so_snd); 4745 if (sowriteable(inp->sctp_socket)) { 4746 sowwakeup_locked(inp->sctp_socket); 4747 } else { 4748 SOCKBUF_UNLOCK(&inp->sctp_socket->so_snd); 4749 } 4750 SCTP_INP_WLOCK(inp); 4751 } 4752 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT) { 4753 inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAKEINPUT; 4754 SCTP_INP_WUNLOCK(inp); 4755 SOCKBUF_LOCK(&inp->sctp_socket->so_rcv); 4756 if (soreadable(inp->sctp_socket)) { 4757 sctp_defered_wakeup_cnt++; 4758 sorwakeup_locked(inp->sctp_socket); 4759 } else { 4760 SOCKBUF_UNLOCK(&inp->sctp_socket->so_rcv); 4761 } 4762 SCTP_INP_WLOCK(inp); 4763 } 4764 SCTP_INP_WUNLOCK(inp); 4765 } 4766 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 4767 SCTP_TCB_LOCK(stcb); 4768 sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_7); 4769 } 4770 return (0); 4771 } 4772 4773 int 4774 sctp_ingetaddr(struct socket *so, struct sockaddr **addr) 4775 { 4776 struct sockaddr_in *sin; 4777 uint32_t vrf_id; 4778 struct sctp_inpcb *inp; 4779 struct sctp_ifa *sctp_ifa; 4780 4781 /* 4782 * Do the malloc first in case it blocks. 4783 */ 4784 SCTP_MALLOC_SONAME(sin, struct sockaddr_in *, sizeof *sin); 4785 if (sin == NULL) 4786 return (ENOMEM); 4787 sin->sin_family = AF_INET; 4788 sin->sin_len = sizeof(*sin); 4789 inp = (struct sctp_inpcb *)so->so_pcb; 4790 if (!inp) { 4791 SCTP_FREE_SONAME(sin); 4792 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4793 return ECONNRESET; 4794 } 4795 SCTP_INP_RLOCK(inp); 4796 sin->sin_port = inp->sctp_lport; 4797 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 4798 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { 4799 struct sctp_tcb *stcb; 4800 struct sockaddr_in *sin_a; 4801 struct sctp_nets *net; 4802 int fnd; 4803 4804 stcb = LIST_FIRST(&inp->sctp_asoc_list); 4805 if (stcb == NULL) { 4806 goto notConn; 4807 } 4808 fnd = 0; 4809 sin_a = NULL; 4810 SCTP_TCB_LOCK(stcb); 4811 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 4812 sin_a = (struct sockaddr_in *)&net->ro._l_addr; 4813 if (sin_a == NULL) 4814 /* this will make coverity happy */ 4815 continue; 4816 4817 if (sin_a->sin_family == AF_INET) { 4818 fnd = 1; 4819 break; 4820 } 4821 } 4822 if ((!fnd) || (sin_a == NULL)) { 4823 /* punt */ 4824 SCTP_TCB_UNLOCK(stcb); 4825 goto notConn; 4826 } 4827 vrf_id = inp->def_vrf_id; 4828 sctp_ifa = sctp_source_address_selection(inp, 4829 stcb, 4830 (sctp_route_t *) & net->ro, 4831 net, 0, vrf_id); 4832 if (sctp_ifa) { 4833 sin->sin_addr = sctp_ifa->address.sin.sin_addr; 4834 sctp_free_ifa(sctp_ifa); 4835 } 4836 SCTP_TCB_UNLOCK(stcb); 4837 } else { 4838 /* For the bound all case you get back 0 */ 4839 notConn: 4840 sin->sin_addr.s_addr = 0; 4841 } 4842 4843 } else { 4844 /* Take the first IPv4 address in the list */ 4845 struct sctp_laddr *laddr; 4846 int fnd = 0; 4847 4848 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 4849 if (laddr->ifa->address.sa.sa_family == AF_INET) { 4850 struct sockaddr_in *sin_a; 4851 4852 sin_a = (struct sockaddr_in *)&laddr->ifa->address.sa; 4853 sin->sin_addr = sin_a->sin_addr; 4854 fnd = 1; 4855 break; 4856 } 4857 } 4858 if (!fnd) { 4859 SCTP_FREE_SONAME(sin); 4860 SCTP_INP_RUNLOCK(inp); 4861 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); 4862 return ENOENT; 4863 } 4864 } 4865 SCTP_INP_RUNLOCK(inp); 4866 (*addr) = (struct sockaddr *)sin; 4867 return (0); 4868 } 4869 4870 int 4871 sctp_peeraddr(struct socket *so, struct sockaddr **addr) 4872 { 4873 struct sockaddr_in *sin = (struct sockaddr_in *)*addr; 4874 int fnd; 4875 struct sockaddr_in *sin_a; 4876 struct sctp_inpcb *inp; 4877 struct sctp_tcb *stcb; 4878 struct sctp_nets *net; 4879 4880 /* Do the malloc first in case it blocks. */ 4881 inp = (struct sctp_inpcb *)so->so_pcb; 4882 if ((inp == NULL) || 4883 ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) { 4884 /* UDP type and listeners will drop out here */ 4885 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN); 4886 return (ENOTCONN); 4887 } 4888 SCTP_MALLOC_SONAME(sin, struct sockaddr_in *, sizeof *sin); 4889 if (sin == NULL) 4890 return (ENOMEM); 4891 sin->sin_family = AF_INET; 4892 sin->sin_len = sizeof(*sin); 4893 4894 /* We must recapture incase we blocked */ 4895 inp = (struct sctp_inpcb *)so->so_pcb; 4896 if (!inp) { 4897 SCTP_FREE_SONAME(sin); 4898 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4899 return ECONNRESET; 4900 } 4901 SCTP_INP_RLOCK(inp); 4902 stcb = LIST_FIRST(&inp->sctp_asoc_list); 4903 if (stcb) { 4904 SCTP_TCB_LOCK(stcb); 4905 } 4906 SCTP_INP_RUNLOCK(inp); 4907 if (stcb == NULL) { 4908 SCTP_FREE_SONAME(sin); 4909 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4910 return ECONNRESET; 4911 } 4912 fnd = 0; 4913 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 4914 sin_a = (struct sockaddr_in *)&net->ro._l_addr; 4915 if (sin_a->sin_family == AF_INET) { 4916 fnd = 1; 4917 sin->sin_port = stcb->rport; 4918 sin->sin_addr = sin_a->sin_addr; 4919 break; 4920 } 4921 } 4922 SCTP_TCB_UNLOCK(stcb); 4923 if (!fnd) { 4924 /* No IPv4 address */ 4925 SCTP_FREE_SONAME(sin); 4926 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); 4927 return ENOENT; 4928 } 4929 (*addr) = (struct sockaddr *)sin; 4930 return (0); 4931 } 4932 4933 struct pr_usrreqs sctp_usrreqs = { 4934 .pru_abort = sctp_abort, 4935 .pru_accept = sctp_accept, 4936 .pru_attach = sctp_attach, 4937 .pru_bind = sctp_bind, 4938 .pru_connect = sctp_connect, 4939 .pru_control = in_control, 4940 .pru_close = sctp_close, 4941 .pru_detach = sctp_close, 4942 .pru_sopoll = sopoll_generic, 4943 .pru_flush = sctp_flush, 4944 .pru_disconnect = sctp_disconnect, 4945 .pru_listen = sctp_listen, 4946 .pru_peeraddr = sctp_peeraddr, 4947 .pru_send = sctp_sendm, 4948 .pru_shutdown = sctp_shutdown, 4949 .pru_sockaddr = sctp_ingetaddr, 4950 .pru_sosend = sctp_sosend, 4951 .pru_soreceive = sctp_soreceive 4952 }; 4953