1 /*- 2 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved. 3 * Copyright (c) 2008-2011, by Randall Stewart. All rights reserved. 4 * Copyright (c) 2008-2011, by Michael Tuexen. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions are met: 8 * 9 * a) Redistributions of source code must retain the above copyright notice, 10 * this list of conditions and the following disclaimer. 11 * 12 * b) Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in 14 * the documentation and/or other materials provided with the distribution. 15 * 16 * c) Neither the name of Cisco Systems, Inc. nor the names of its 17 * contributors may be used to endorse or promote products derived 18 * from this software without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 /* $KAME: sctp_usrreq.c,v 1.48 2005/03/07 23:26:08 itojun Exp $ */ 34 35 #include <sys/cdefs.h> 36 __FBSDID("$FreeBSD$"); 37 #include <netinet/sctp_os.h> 38 #include <sys/proc.h> 39 #include <netinet/sctp_pcb.h> 40 #include <netinet/sctp_header.h> 41 #include <netinet/sctp_var.h> 42 #if defined(INET6) 43 #endif 44 #include <netinet/sctp_sysctl.h> 45 #include <netinet/sctp_output.h> 46 #include <netinet/sctp_uio.h> 47 #include <netinet/sctp_asconf.h> 48 #include <netinet/sctputil.h> 49 #include <netinet/sctp_indata.h> 50 #include <netinet/sctp_timer.h> 51 #include <netinet/sctp_auth.h> 52 #include <netinet/sctp_bsd_addr.h> 53 #include <netinet/udp.h> 54 55 56 57 extern struct sctp_cc_functions sctp_cc_functions[]; 58 extern struct sctp_ss_functions sctp_ss_functions[]; 59 60 void 61 sctp_init(void) 62 { 63 u_long sb_max_adj; 64 65 /* Initialize and modify the sysctled variables */ 66 sctp_init_sysctls(); 67 if ((nmbclusters / 8) > SCTP_ASOC_MAX_CHUNKS_ON_QUEUE) 68 SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue) = (nmbclusters / 8); 69 /* 70 * Allow a user to take no more than 1/2 the number of clusters or 71 * the SB_MAX whichever is smaller for the send window. 72 */ 73 sb_max_adj = (u_long)((u_quad_t) (SB_MAX) * MCLBYTES / (MSIZE + MCLBYTES)); 74 SCTP_BASE_SYSCTL(sctp_sendspace) = min(sb_max_adj, 75 (((uint32_t) nmbclusters / 2) * SCTP_DEFAULT_MAXSEGMENT)); 76 /* 77 * Now for the recv window, should we take the same amount? or 78 * should I do 1/2 the SB_MAX instead in the SB_MAX min above. For 79 * now I will just copy. 80 */ 81 SCTP_BASE_SYSCTL(sctp_recvspace) = SCTP_BASE_SYSCTL(sctp_sendspace); 82 83 SCTP_BASE_VAR(first_time) = 0; 84 SCTP_BASE_VAR(sctp_pcb_initialized) = 0; 85 sctp_pcb_init(); 86 #if defined(SCTP_PACKET_LOGGING) 87 SCTP_BASE_VAR(packet_log_writers) = 0; 88 SCTP_BASE_VAR(packet_log_end) = 0; 89 bzero(&SCTP_BASE_VAR(packet_log_buffer), SCTP_PACKET_LOG_SIZE); 90 #endif 91 92 93 } 94 95 void 96 sctp_finish(void) 97 { 98 sctp_pcb_finish(); 99 } 100 101 102 103 void 104 sctp_pathmtu_adjustment(struct sctp_inpcb *inp, 105 struct sctp_tcb *stcb, 106 struct sctp_nets *net, 107 uint16_t nxtsz) 108 { 109 struct sctp_tmit_chunk *chk; 110 uint16_t overhead; 111 112 /* Adjust that too */ 113 stcb->asoc.smallest_mtu = nxtsz; 114 /* now off to subtract IP_DF flag if needed */ 115 overhead = IP_HDR_SIZE; 116 if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks)) { 117 overhead += sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id); 118 } 119 TAILQ_FOREACH(chk, &stcb->asoc.send_queue, sctp_next) { 120 if ((chk->send_size + overhead) > nxtsz) { 121 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; 122 } 123 } 124 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 125 if ((chk->send_size + overhead) > nxtsz) { 126 /* 127 * For this guy we also mark for immediate resend 128 * since we sent to big of chunk 129 */ 130 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; 131 if (chk->sent < SCTP_DATAGRAM_RESEND) { 132 sctp_flight_size_decrease(chk); 133 sctp_total_flight_decrease(stcb, chk); 134 } 135 if (chk->sent != SCTP_DATAGRAM_RESEND) { 136 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 137 } 138 chk->sent = SCTP_DATAGRAM_RESEND; 139 chk->rec.data.doing_fast_retransmit = 0; 140 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 141 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_PMTU, 142 chk->whoTo->flight_size, 143 chk->book_size, 144 (uintptr_t) chk->whoTo, 145 chk->rec.data.TSN_seq); 146 } 147 /* Clear any time so NO RTT is being done */ 148 chk->do_rtt = 0; 149 } 150 } 151 } 152 153 #ifdef INET 154 static void 155 sctp_notify_mbuf(struct sctp_inpcb *inp, 156 struct sctp_tcb *stcb, 157 struct sctp_nets *net, 158 struct ip *ip, 159 struct sctphdr *sh) 160 { 161 struct icmp *icmph; 162 int totsz, tmr_stopped = 0; 163 uint16_t nxtsz; 164 165 /* protection */ 166 if ((inp == NULL) || (stcb == NULL) || (net == NULL) || 167 (ip == NULL) || (sh == NULL)) { 168 if (stcb != NULL) { 169 SCTP_TCB_UNLOCK(stcb); 170 } 171 return; 172 } 173 /* First job is to verify the vtag matches what I would send */ 174 if (ntohl(sh->v_tag) != (stcb->asoc.peer_vtag)) { 175 SCTP_TCB_UNLOCK(stcb); 176 return; 177 } 178 icmph = (struct icmp *)((caddr_t)ip - (sizeof(struct icmp) - 179 sizeof(struct ip))); 180 if (icmph->icmp_type != ICMP_UNREACH) { 181 /* We only care about unreachable */ 182 SCTP_TCB_UNLOCK(stcb); 183 return; 184 } 185 if (icmph->icmp_code != ICMP_UNREACH_NEEDFRAG) { 186 /* not a unreachable message due to frag. */ 187 SCTP_TCB_UNLOCK(stcb); 188 return; 189 } 190 totsz = ip->ip_len; 191 192 nxtsz = ntohs(icmph->icmp_nextmtu); 193 if (nxtsz == 0) { 194 /* 195 * old type router that does not tell us what the next size 196 * mtu is. Rats we will have to guess (in a educated fashion 197 * of course) 198 */ 199 nxtsz = sctp_get_prev_mtu(totsz); 200 } 201 /* Stop any PMTU timer */ 202 if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) { 203 tmr_stopped = 1; 204 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net, 205 SCTP_FROM_SCTP_USRREQ + SCTP_LOC_1); 206 } 207 /* Adjust destination size limit */ 208 if (net->mtu > nxtsz) { 209 net->mtu = nxtsz; 210 if (net->port) { 211 net->mtu -= sizeof(struct udphdr); 212 } 213 } 214 /* now what about the ep? */ 215 if (stcb->asoc.smallest_mtu > nxtsz) { 216 sctp_pathmtu_adjustment(inp, stcb, net, nxtsz); 217 } 218 if (tmr_stopped) 219 sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net); 220 221 SCTP_TCB_UNLOCK(stcb); 222 } 223 224 #endif 225 226 void 227 sctp_notify(struct sctp_inpcb *inp, 228 struct ip *ip, 229 struct sctphdr *sh, 230 struct sockaddr *to, 231 struct sctp_tcb *stcb, 232 struct sctp_nets *net) 233 { 234 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 235 struct socket *so; 236 237 #endif 238 /* protection */ 239 int reason; 240 struct icmp *icmph; 241 242 243 if ((inp == NULL) || (stcb == NULL) || (net == NULL) || 244 (sh == NULL) || (to == NULL)) { 245 if (stcb) 246 SCTP_TCB_UNLOCK(stcb); 247 return; 248 } 249 /* First job is to verify the vtag matches what I would send */ 250 if (ntohl(sh->v_tag) != (stcb->asoc.peer_vtag)) { 251 SCTP_TCB_UNLOCK(stcb); 252 return; 253 } 254 icmph = (struct icmp *)((caddr_t)ip - (sizeof(struct icmp) - 255 sizeof(struct ip))); 256 if (icmph->icmp_type != ICMP_UNREACH) { 257 /* We only care about unreachable */ 258 SCTP_TCB_UNLOCK(stcb); 259 return; 260 } 261 if ((icmph->icmp_code == ICMP_UNREACH_NET) || 262 (icmph->icmp_code == ICMP_UNREACH_HOST) || 263 (icmph->icmp_code == ICMP_UNREACH_NET_UNKNOWN) || 264 (icmph->icmp_code == ICMP_UNREACH_HOST_UNKNOWN) || 265 (icmph->icmp_code == ICMP_UNREACH_ISOLATED) || 266 (icmph->icmp_code == ICMP_UNREACH_NET_PROHIB) || 267 (icmph->icmp_code == ICMP_UNREACH_HOST_PROHIB) || 268 (icmph->icmp_code == ICMP_UNREACH_FILTER_PROHIB)) { 269 270 /* 271 * Hmm reachablity problems we must examine closely. If its 272 * not reachable, we may have lost a network. Or if there is 273 * NO protocol at the other end named SCTP. well we consider 274 * it a OOTB abort. 275 */ 276 if (net->dest_state & SCTP_ADDR_REACHABLE) { 277 /* Ok that destination is NOT reachable */ 278 SCTP_PRINTF("ICMP (thresh %d/%d) takes interface %p down\n", 279 net->error_count, 280 net->failure_threshold, 281 net); 282 283 net->dest_state &= ~SCTP_ADDR_REACHABLE; 284 net->dest_state |= SCTP_ADDR_NOT_REACHABLE; 285 /* 286 * JRS 5/14/07 - If a destination is unreachable, 287 * the PF bit is turned off. This allows an 288 * unambiguous use of the PF bit for destinations 289 * that are reachable but potentially failed. If the 290 * destination is set to the unreachable state, also 291 * set the destination to the PF state. 292 */ 293 /* 294 * Add debug message here if destination is not in 295 * PF state. 296 */ 297 /* Stop any running T3 timers here? */ 298 if ((stcb->asoc.sctp_cmt_on_off > 0) && 299 (stcb->asoc.sctp_cmt_pf > 0)) { 300 net->dest_state &= ~SCTP_ADDR_PF; 301 SCTPDBG(SCTP_DEBUG_TIMER4, "Destination %p moved from PF to unreachable.\n", 302 net); 303 } 304 net->error_count = net->failure_threshold + 1; 305 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN, 306 stcb, SCTP_FAILED_THRESHOLD, 307 (void *)net, SCTP_SO_NOT_LOCKED); 308 } 309 SCTP_TCB_UNLOCK(stcb); 310 } else if ((icmph->icmp_code == ICMP_UNREACH_PROTOCOL) || 311 (icmph->icmp_code == ICMP_UNREACH_PORT)) { 312 /* 313 * Here the peer is either playing tricks on us, including 314 * an address that belongs to someone who does not support 315 * SCTP OR was a userland implementation that shutdown and 316 * now is dead. In either case treat it like a OOTB abort 317 * with no TCB 318 */ 319 reason = SCTP_PEER_FAULTY; 320 sctp_abort_notification(stcb, reason, SCTP_SO_NOT_LOCKED); 321 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 322 so = SCTP_INP_SO(inp); 323 atomic_add_int(&stcb->asoc.refcnt, 1); 324 SCTP_TCB_UNLOCK(stcb); 325 SCTP_SOCKET_LOCK(so, 1); 326 SCTP_TCB_LOCK(stcb); 327 atomic_subtract_int(&stcb->asoc.refcnt, 1); 328 #endif 329 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_2); 330 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 331 SCTP_SOCKET_UNLOCK(so, 1); 332 /* SCTP_TCB_UNLOCK(stcb); MT: I think this is not needed. */ 333 #endif 334 /* no need to unlock here, since the TCB is gone */ 335 } else { 336 SCTP_TCB_UNLOCK(stcb); 337 } 338 } 339 340 #ifdef INET 341 void 342 sctp_ctlinput(cmd, sa, vip) 343 int cmd; 344 struct sockaddr *sa; 345 void *vip; 346 { 347 struct ip *ip = vip; 348 struct sctphdr *sh; 349 uint32_t vrf_id; 350 351 /* FIX, for non-bsd is this right? */ 352 vrf_id = SCTP_DEFAULT_VRFID; 353 if (sa->sa_family != AF_INET || 354 ((struct sockaddr_in *)sa)->sin_addr.s_addr == INADDR_ANY) { 355 return; 356 } 357 if (PRC_IS_REDIRECT(cmd)) { 358 ip = 0; 359 } else if ((unsigned)cmd >= PRC_NCMDS || inetctlerrmap[cmd] == 0) { 360 return; 361 } 362 if (ip) { 363 struct sctp_inpcb *inp = NULL; 364 struct sctp_tcb *stcb = NULL; 365 struct sctp_nets *net = NULL; 366 struct sockaddr_in to, from; 367 368 sh = (struct sctphdr *)((caddr_t)ip + (ip->ip_hl << 2)); 369 bzero(&to, sizeof(to)); 370 bzero(&from, sizeof(from)); 371 from.sin_family = to.sin_family = AF_INET; 372 from.sin_len = to.sin_len = sizeof(to); 373 from.sin_port = sh->src_port; 374 from.sin_addr = ip->ip_src; 375 to.sin_port = sh->dest_port; 376 to.sin_addr = ip->ip_dst; 377 378 /* 379 * 'to' holds the dest of the packet that failed to be sent. 380 * 'from' holds our local endpoint address. Thus we reverse 381 * the to and the from in the lookup. 382 */ 383 stcb = sctp_findassociation_addr_sa((struct sockaddr *)&from, 384 (struct sockaddr *)&to, 385 &inp, &net, 1, vrf_id); 386 if (stcb != NULL && inp && (inp->sctp_socket != NULL)) { 387 if (cmd != PRC_MSGSIZE) { 388 sctp_notify(inp, ip, sh, 389 (struct sockaddr *)&to, stcb, 390 net); 391 } else { 392 /* handle possible ICMP size messages */ 393 sctp_notify_mbuf(inp, stcb, net, ip, sh); 394 } 395 } else { 396 if ((stcb == NULL) && (inp != NULL)) { 397 /* reduce ref-count */ 398 SCTP_INP_WLOCK(inp); 399 SCTP_INP_DECR_REF(inp); 400 SCTP_INP_WUNLOCK(inp); 401 } 402 if (stcb) { 403 SCTP_TCB_UNLOCK(stcb); 404 } 405 } 406 } 407 return; 408 } 409 410 #endif 411 412 static int 413 sctp_getcred(SYSCTL_HANDLER_ARGS) 414 { 415 struct xucred xuc; 416 struct sockaddr_in addrs[2]; 417 struct sctp_inpcb *inp; 418 struct sctp_nets *net; 419 struct sctp_tcb *stcb; 420 int error; 421 uint32_t vrf_id; 422 423 /* FIX, for non-bsd is this right? */ 424 vrf_id = SCTP_DEFAULT_VRFID; 425 426 error = priv_check(req->td, PRIV_NETINET_GETCRED); 427 428 if (error) 429 return (error); 430 431 error = SYSCTL_IN(req, addrs, sizeof(addrs)); 432 if (error) 433 return (error); 434 435 stcb = sctp_findassociation_addr_sa(sintosa(&addrs[0]), 436 sintosa(&addrs[1]), 437 &inp, &net, 1, vrf_id); 438 if (stcb == NULL || inp == NULL || inp->sctp_socket == NULL) { 439 if ((inp != NULL) && (stcb == NULL)) { 440 /* reduce ref-count */ 441 SCTP_INP_WLOCK(inp); 442 SCTP_INP_DECR_REF(inp); 443 goto cred_can_cont; 444 } 445 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); 446 error = ENOENT; 447 goto out; 448 } 449 SCTP_TCB_UNLOCK(stcb); 450 /* 451 * We use the write lock here, only since in the error leg we need 452 * it. If we used RLOCK, then we would have to 453 * wlock/decr/unlock/rlock. Which in theory could create a hole. 454 * Better to use higher wlock. 455 */ 456 SCTP_INP_WLOCK(inp); 457 cred_can_cont: 458 error = cr_canseesocket(req->td->td_ucred, inp->sctp_socket); 459 if (error) { 460 SCTP_INP_WUNLOCK(inp); 461 goto out; 462 } 463 cru2x(inp->sctp_socket->so_cred, &xuc); 464 SCTP_INP_WUNLOCK(inp); 465 error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred)); 466 out: 467 return (error); 468 } 469 470 SYSCTL_PROC(_net_inet_sctp, OID_AUTO, getcred, CTLTYPE_OPAQUE | CTLFLAG_RW, 471 0, 0, sctp_getcred, "S,ucred", "Get the ucred of a SCTP connection"); 472 473 474 #ifdef INET 475 static void 476 sctp_abort(struct socket *so) 477 { 478 struct sctp_inpcb *inp; 479 uint32_t flags; 480 481 inp = (struct sctp_inpcb *)so->so_pcb; 482 if (inp == 0) { 483 return; 484 } 485 sctp_must_try_again: 486 flags = inp->sctp_flags; 487 #ifdef SCTP_LOG_CLOSING 488 sctp_log_closing(inp, NULL, 17); 489 #endif 490 if (((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) && 491 (atomic_cmpset_int(&inp->sctp_flags, flags, (flags | SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_CLOSE_IP)))) { 492 #ifdef SCTP_LOG_CLOSING 493 sctp_log_closing(inp, NULL, 16); 494 #endif 495 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 496 SCTP_CALLED_AFTER_CMPSET_OFCLOSE); 497 SOCK_LOCK(so); 498 SCTP_SB_CLEAR(so->so_snd); 499 /* 500 * same for the rcv ones, they are only here for the 501 * accounting/select. 502 */ 503 SCTP_SB_CLEAR(so->so_rcv); 504 505 /* Now null out the reference, we are completely detached. */ 506 so->so_pcb = NULL; 507 SOCK_UNLOCK(so); 508 } else { 509 flags = inp->sctp_flags; 510 if ((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) { 511 goto sctp_must_try_again; 512 } 513 } 514 return; 515 } 516 517 static int 518 sctp_attach(struct socket *so, int proto, struct thread *p) 519 { 520 struct sctp_inpcb *inp; 521 struct inpcb *ip_inp; 522 int error; 523 uint32_t vrf_id = SCTP_DEFAULT_VRFID; 524 525 #ifdef IPSEC 526 uint32_t flags; 527 528 #endif 529 530 inp = (struct sctp_inpcb *)so->so_pcb; 531 if (inp != 0) { 532 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 533 return EINVAL; 534 } 535 if (so->so_snd.sb_hiwat == 0 || so->so_rcv.sb_hiwat == 0) { 536 error = SCTP_SORESERVE(so, SCTP_BASE_SYSCTL(sctp_sendspace), SCTP_BASE_SYSCTL(sctp_recvspace)); 537 if (error) { 538 return error; 539 } 540 } 541 error = sctp_inpcb_alloc(so, vrf_id); 542 if (error) { 543 return error; 544 } 545 inp = (struct sctp_inpcb *)so->so_pcb; 546 SCTP_INP_WLOCK(inp); 547 inp->sctp_flags &= ~SCTP_PCB_FLAGS_BOUND_V6; /* I'm not v6! */ 548 ip_inp = &inp->ip_inp.inp; 549 ip_inp->inp_vflag |= INP_IPV4; 550 ip_inp->inp_ip_ttl = MODULE_GLOBAL(ip_defttl); 551 #ifdef IPSEC 552 error = ipsec_init_policy(so, &ip_inp->inp_sp); 553 #ifdef SCTP_LOG_CLOSING 554 sctp_log_closing(inp, NULL, 17); 555 #endif 556 if (error != 0) { 557 try_again: 558 flags = inp->sctp_flags; 559 if (((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) && 560 (atomic_cmpset_int(&inp->sctp_flags, flags, (flags | SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_CLOSE_IP)))) { 561 #ifdef SCTP_LOG_CLOSING 562 sctp_log_closing(inp, NULL, 15); 563 #endif 564 SCTP_INP_WUNLOCK(inp); 565 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 566 SCTP_CALLED_AFTER_CMPSET_OFCLOSE); 567 } else { 568 flags = inp->sctp_flags; 569 if ((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) { 570 goto try_again; 571 } else { 572 SCTP_INP_WUNLOCK(inp); 573 } 574 } 575 return error; 576 } 577 #endif /* IPSEC */ 578 SCTP_INP_WUNLOCK(inp); 579 return 0; 580 } 581 582 static int 583 sctp_bind(struct socket *so, struct sockaddr *addr, struct thread *p) 584 { 585 struct sctp_inpcb *inp = NULL; 586 int error; 587 588 #ifdef INET6 589 if (addr && addr->sa_family != AF_INET) { 590 /* must be a v4 address! */ 591 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 592 return EINVAL; 593 } 594 #endif /* INET6 */ 595 if (addr && (addr->sa_len != sizeof(struct sockaddr_in))) { 596 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 597 return EINVAL; 598 } 599 inp = (struct sctp_inpcb *)so->so_pcb; 600 if (inp == 0) { 601 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 602 return EINVAL; 603 } 604 error = sctp_inpcb_bind(so, addr, NULL, p); 605 return error; 606 } 607 608 #endif 609 void 610 sctp_close(struct socket *so) 611 { 612 struct sctp_inpcb *inp; 613 uint32_t flags; 614 615 inp = (struct sctp_inpcb *)so->so_pcb; 616 if (inp == 0) 617 return; 618 619 /* 620 * Inform all the lower layer assoc that we are done. 621 */ 622 sctp_must_try_again: 623 flags = inp->sctp_flags; 624 #ifdef SCTP_LOG_CLOSING 625 sctp_log_closing(inp, NULL, 17); 626 #endif 627 if (((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) && 628 (atomic_cmpset_int(&inp->sctp_flags, flags, (flags | SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_CLOSE_IP)))) { 629 if (((so->so_options & SO_LINGER) && (so->so_linger == 0)) || 630 (so->so_rcv.sb_cc > 0)) { 631 #ifdef SCTP_LOG_CLOSING 632 sctp_log_closing(inp, NULL, 13); 633 #endif 634 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 635 SCTP_CALLED_AFTER_CMPSET_OFCLOSE); 636 } else { 637 #ifdef SCTP_LOG_CLOSING 638 sctp_log_closing(inp, NULL, 14); 639 #endif 640 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_GRACEFUL_CLOSE, 641 SCTP_CALLED_AFTER_CMPSET_OFCLOSE); 642 } 643 /* 644 * The socket is now detached, no matter what the state of 645 * the SCTP association. 646 */ 647 SOCK_LOCK(so); 648 SCTP_SB_CLEAR(so->so_snd); 649 /* 650 * same for the rcv ones, they are only here for the 651 * accounting/select. 652 */ 653 SCTP_SB_CLEAR(so->so_rcv); 654 655 /* Now null out the reference, we are completely detached. */ 656 so->so_pcb = NULL; 657 SOCK_UNLOCK(so); 658 } else { 659 flags = inp->sctp_flags; 660 if ((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) { 661 goto sctp_must_try_again; 662 } 663 } 664 return; 665 } 666 667 668 int 669 sctp_sendm(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr, 670 struct mbuf *control, struct thread *p); 671 672 673 int 674 sctp_sendm(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr, 675 struct mbuf *control, struct thread *p) 676 { 677 struct sctp_inpcb *inp; 678 int error; 679 680 inp = (struct sctp_inpcb *)so->so_pcb; 681 if (inp == 0) { 682 if (control) { 683 sctp_m_freem(control); 684 control = NULL; 685 } 686 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 687 sctp_m_freem(m); 688 return EINVAL; 689 } 690 /* Got to have an to address if we are NOT a connected socket */ 691 if ((addr == NULL) && 692 ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) || 693 (inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)) 694 ) { 695 goto connected_type; 696 } else if (addr == NULL) { 697 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EDESTADDRREQ); 698 error = EDESTADDRREQ; 699 sctp_m_freem(m); 700 if (control) { 701 sctp_m_freem(control); 702 control = NULL; 703 } 704 return (error); 705 } 706 #ifdef INET6 707 if (addr->sa_family != AF_INET) { 708 /* must be a v4 address! */ 709 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EDESTADDRREQ); 710 sctp_m_freem(m); 711 if (control) { 712 sctp_m_freem(control); 713 control = NULL; 714 } 715 error = EDESTADDRREQ; 716 return EDESTADDRREQ; 717 } 718 #endif /* INET6 */ 719 connected_type: 720 /* now what about control */ 721 if (control) { 722 if (inp->control) { 723 SCTP_PRINTF("huh? control set?\n"); 724 sctp_m_freem(inp->control); 725 inp->control = NULL; 726 } 727 inp->control = control; 728 } 729 /* Place the data */ 730 if (inp->pkt) { 731 SCTP_BUF_NEXT(inp->pkt_last) = m; 732 inp->pkt_last = m; 733 } else { 734 inp->pkt_last = inp->pkt = m; 735 } 736 if ( 737 /* FreeBSD uses a flag passed */ 738 ((flags & PRUS_MORETOCOME) == 0) 739 ) { 740 /* 741 * note with the current version this code will only be used 742 * by OpenBSD-- NetBSD, FreeBSD, and MacOS have methods for 743 * re-defining sosend to use the sctp_sosend. One can 744 * optionally switch back to this code (by changing back the 745 * definitions) but this is not advisable. This code is used 746 * by FreeBSD when sending a file with sendfile() though. 747 */ 748 int ret; 749 750 ret = sctp_output(inp, inp->pkt, addr, inp->control, p, flags); 751 inp->pkt = NULL; 752 inp->control = NULL; 753 return (ret); 754 } else { 755 return (0); 756 } 757 } 758 759 int 760 sctp_disconnect(struct socket *so) 761 { 762 struct sctp_inpcb *inp; 763 764 inp = (struct sctp_inpcb *)so->so_pcb; 765 if (inp == NULL) { 766 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN); 767 return (ENOTCONN); 768 } 769 SCTP_INP_RLOCK(inp); 770 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 771 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 772 if (LIST_EMPTY(&inp->sctp_asoc_list)) { 773 /* No connection */ 774 SCTP_INP_RUNLOCK(inp); 775 return (0); 776 } else { 777 struct sctp_association *asoc; 778 struct sctp_tcb *stcb; 779 780 stcb = LIST_FIRST(&inp->sctp_asoc_list); 781 if (stcb == NULL) { 782 SCTP_INP_RUNLOCK(inp); 783 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 784 return (EINVAL); 785 } 786 SCTP_TCB_LOCK(stcb); 787 asoc = &stcb->asoc; 788 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 789 /* We are about to be freed, out of here */ 790 SCTP_TCB_UNLOCK(stcb); 791 SCTP_INP_RUNLOCK(inp); 792 return (0); 793 } 794 if (((so->so_options & SO_LINGER) && 795 (so->so_linger == 0)) || 796 (so->so_rcv.sb_cc > 0)) { 797 if (SCTP_GET_STATE(asoc) != 798 SCTP_STATE_COOKIE_WAIT) { 799 /* Left with Data unread */ 800 struct mbuf *err; 801 802 err = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_DONTWAIT, 1, MT_DATA); 803 if (err) { 804 /* 805 * Fill in the user 806 * initiated abort 807 */ 808 struct sctp_paramhdr *ph; 809 810 ph = mtod(err, struct sctp_paramhdr *); 811 SCTP_BUF_LEN(err) = sizeof(struct sctp_paramhdr); 812 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT); 813 ph->param_length = htons(SCTP_BUF_LEN(err)); 814 } 815 #if defined(SCTP_PANIC_ON_ABORT) 816 panic("disconnect does an abort"); 817 #endif 818 sctp_send_abort_tcb(stcb, err, SCTP_SO_LOCKED); 819 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 820 } 821 SCTP_INP_RUNLOCK(inp); 822 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) || 823 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 824 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 825 } 826 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_3); 827 /* No unlock tcb assoc is gone */ 828 return (0); 829 } 830 if (TAILQ_EMPTY(&asoc->send_queue) && 831 TAILQ_EMPTY(&asoc->sent_queue) && 832 (asoc->stream_queue_cnt == 0)) { 833 /* there is nothing queued to send, so done */ 834 if (asoc->locked_on_sending) { 835 goto abort_anyway; 836 } 837 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) && 838 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) { 839 /* only send SHUTDOWN 1st time thru */ 840 sctp_stop_timers_for_shutdown(stcb); 841 sctp_send_shutdown(stcb, 842 stcb->asoc.primary_destination); 843 sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_LOCKED); 844 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) || 845 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 846 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 847 } 848 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT); 849 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 850 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, 851 stcb->sctp_ep, stcb, 852 asoc->primary_destination); 853 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 854 stcb->sctp_ep, stcb, 855 asoc->primary_destination); 856 } 857 } else { 858 /* 859 * we still got (or just got) data to send, 860 * so set SHUTDOWN_PENDING 861 */ 862 /* 863 * XXX sockets draft says that SCTP_EOF 864 * should be sent with no data. currently, 865 * we will allow user data to be sent first 866 * and move to SHUTDOWN-PENDING 867 */ 868 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING; 869 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb, 870 asoc->primary_destination); 871 if (asoc->locked_on_sending) { 872 /* Locked to send out the data */ 873 struct sctp_stream_queue_pending *sp; 874 875 sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead); 876 if (sp == NULL) { 877 SCTP_PRINTF("Error, sp is NULL, locked on sending is non-null strm:%d\n", 878 asoc->locked_on_sending->stream_no); 879 } else { 880 if ((sp->length == 0) && (sp->msg_is_complete == 0)) 881 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT; 882 } 883 } 884 if (TAILQ_EMPTY(&asoc->send_queue) && 885 TAILQ_EMPTY(&asoc->sent_queue) && 886 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) { 887 struct mbuf *op_err; 888 889 abort_anyway: 890 op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 891 0, M_DONTWAIT, 1, MT_DATA); 892 if (op_err) { 893 /* 894 * Fill in the user 895 * initiated abort 896 */ 897 struct sctp_paramhdr *ph; 898 uint32_t *ippp; 899 900 SCTP_BUF_LEN(op_err) = 901 (sizeof(struct sctp_paramhdr) + sizeof(uint32_t)); 902 ph = mtod(op_err, 903 struct sctp_paramhdr *); 904 ph->param_type = htons( 905 SCTP_CAUSE_USER_INITIATED_ABT); 906 ph->param_length = htons(SCTP_BUF_LEN(op_err)); 907 ippp = (uint32_t *) (ph + 1); 908 *ippp = htonl(SCTP_FROM_SCTP_USRREQ + SCTP_LOC_4); 909 } 910 #if defined(SCTP_PANIC_ON_ABORT) 911 panic("disconnect does an abort"); 912 #endif 913 914 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_USRREQ + SCTP_LOC_4; 915 sctp_send_abort_tcb(stcb, op_err, SCTP_SO_LOCKED); 916 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 917 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) || 918 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 919 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 920 } 921 SCTP_INP_RUNLOCK(inp); 922 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_5); 923 return (0); 924 } else { 925 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CLOSING, SCTP_SO_LOCKED); 926 } 927 } 928 soisdisconnecting(so); 929 SCTP_TCB_UNLOCK(stcb); 930 SCTP_INP_RUNLOCK(inp); 931 return (0); 932 } 933 /* not reached */ 934 } else { 935 /* UDP model does not support this */ 936 SCTP_INP_RUNLOCK(inp); 937 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP); 938 return EOPNOTSUPP; 939 } 940 } 941 942 int 943 sctp_flush(struct socket *so, int how) 944 { 945 /* 946 * We will just clear out the values and let subsequent close clear 947 * out the data, if any. Note if the user did a shutdown(SHUT_RD) 948 * they will not be able to read the data, the socket will block 949 * that from happening. 950 */ 951 struct sctp_inpcb *inp; 952 953 inp = (struct sctp_inpcb *)so->so_pcb; 954 if (inp == NULL) { 955 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 956 return EINVAL; 957 } 958 SCTP_INP_RLOCK(inp); 959 /* For the 1 to many model this does nothing */ 960 if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) { 961 SCTP_INP_RUNLOCK(inp); 962 return (0); 963 } 964 SCTP_INP_RUNLOCK(inp); 965 if ((how == PRU_FLUSH_RD) || (how == PRU_FLUSH_RDWR)) { 966 /* 967 * First make sure the sb will be happy, we don't use these 968 * except maybe the count 969 */ 970 SCTP_INP_WLOCK(inp); 971 SCTP_INP_READ_LOCK(inp); 972 inp->sctp_flags |= SCTP_PCB_FLAGS_SOCKET_CANT_READ; 973 SCTP_INP_READ_UNLOCK(inp); 974 SCTP_INP_WUNLOCK(inp); 975 so->so_rcv.sb_cc = 0; 976 so->so_rcv.sb_mbcnt = 0; 977 so->so_rcv.sb_mb = NULL; 978 } 979 if ((how == PRU_FLUSH_WR) || (how == PRU_FLUSH_RDWR)) { 980 /* 981 * First make sure the sb will be happy, we don't use these 982 * except maybe the count 983 */ 984 so->so_snd.sb_cc = 0; 985 so->so_snd.sb_mbcnt = 0; 986 so->so_snd.sb_mb = NULL; 987 988 } 989 return (0); 990 } 991 992 int 993 sctp_shutdown(struct socket *so) 994 { 995 struct sctp_inpcb *inp; 996 997 inp = (struct sctp_inpcb *)so->so_pcb; 998 if (inp == 0) { 999 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 1000 return EINVAL; 1001 } 1002 SCTP_INP_RLOCK(inp); 1003 /* For UDP model this is a invalid call */ 1004 if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) { 1005 /* Restore the flags that the soshutdown took away. */ 1006 SOCKBUF_LOCK(&so->so_rcv); 1007 so->so_rcv.sb_state &= ~SBS_CANTRCVMORE; 1008 SOCKBUF_UNLOCK(&so->so_rcv); 1009 /* This proc will wakeup for read and do nothing (I hope) */ 1010 SCTP_INP_RUNLOCK(inp); 1011 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP); 1012 return (EOPNOTSUPP); 1013 } 1014 /* 1015 * Ok if we reach here its the TCP model and it is either a SHUT_WR 1016 * or SHUT_RDWR. This means we put the shutdown flag against it. 1017 */ 1018 { 1019 struct sctp_tcb *stcb; 1020 struct sctp_association *asoc; 1021 1022 if ((so->so_state & 1023 (SS_ISCONNECTED | SS_ISCONNECTING | SS_ISDISCONNECTING)) == 0) { 1024 SCTP_INP_RUNLOCK(inp); 1025 return (ENOTCONN); 1026 } 1027 socantsendmore(so); 1028 1029 stcb = LIST_FIRST(&inp->sctp_asoc_list); 1030 if (stcb == NULL) { 1031 /* 1032 * Ok we hit the case that the shutdown call was 1033 * made after an abort or something. Nothing to do 1034 * now. 1035 */ 1036 SCTP_INP_RUNLOCK(inp); 1037 return (0); 1038 } 1039 SCTP_TCB_LOCK(stcb); 1040 asoc = &stcb->asoc; 1041 if (TAILQ_EMPTY(&asoc->send_queue) && 1042 TAILQ_EMPTY(&asoc->sent_queue) && 1043 (asoc->stream_queue_cnt == 0)) { 1044 if (asoc->locked_on_sending) { 1045 goto abort_anyway; 1046 } 1047 /* there is nothing queued to send, so I'm done... */ 1048 if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) { 1049 /* only send SHUTDOWN the first time through */ 1050 sctp_stop_timers_for_shutdown(stcb); 1051 sctp_send_shutdown(stcb, 1052 stcb->asoc.primary_destination); 1053 sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_LOCKED); 1054 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) || 1055 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 1056 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 1057 } 1058 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT); 1059 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 1060 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, 1061 stcb->sctp_ep, stcb, 1062 asoc->primary_destination); 1063 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 1064 stcb->sctp_ep, stcb, 1065 asoc->primary_destination); 1066 } 1067 } else { 1068 /* 1069 * we still got (or just got) data to send, so set 1070 * SHUTDOWN_PENDING 1071 */ 1072 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING; 1073 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb, 1074 asoc->primary_destination); 1075 1076 if (asoc->locked_on_sending) { 1077 /* Locked to send out the data */ 1078 struct sctp_stream_queue_pending *sp; 1079 1080 sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead); 1081 if (sp == NULL) { 1082 SCTP_PRINTF("Error, sp is NULL, locked on sending is non-null strm:%d\n", 1083 asoc->locked_on_sending->stream_no); 1084 } else { 1085 if ((sp->length == 0) && (sp->msg_is_complete == 0)) { 1086 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT; 1087 } 1088 } 1089 } 1090 if (TAILQ_EMPTY(&asoc->send_queue) && 1091 TAILQ_EMPTY(&asoc->sent_queue) && 1092 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) { 1093 struct mbuf *op_err; 1094 1095 abort_anyway: 1096 op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 1097 0, M_DONTWAIT, 1, MT_DATA); 1098 if (op_err) { 1099 /* Fill in the user initiated abort */ 1100 struct sctp_paramhdr *ph; 1101 uint32_t *ippp; 1102 1103 SCTP_BUF_LEN(op_err) = 1104 sizeof(struct sctp_paramhdr) + sizeof(uint32_t); 1105 ph = mtod(op_err, 1106 struct sctp_paramhdr *); 1107 ph->param_type = htons( 1108 SCTP_CAUSE_USER_INITIATED_ABT); 1109 ph->param_length = htons(SCTP_BUF_LEN(op_err)); 1110 ippp = (uint32_t *) (ph + 1); 1111 *ippp = htonl(SCTP_FROM_SCTP_USRREQ + SCTP_LOC_6); 1112 } 1113 #if defined(SCTP_PANIC_ON_ABORT) 1114 panic("shutdown does an abort"); 1115 #endif 1116 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_USRREQ + SCTP_LOC_6; 1117 sctp_abort_an_association(stcb->sctp_ep, stcb, 1118 SCTP_RESPONSE_TO_USER_REQ, 1119 op_err, SCTP_SO_LOCKED); 1120 goto skip_unlock; 1121 } else { 1122 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CLOSING, SCTP_SO_LOCKED); 1123 } 1124 } 1125 SCTP_TCB_UNLOCK(stcb); 1126 } 1127 skip_unlock: 1128 SCTP_INP_RUNLOCK(inp); 1129 return 0; 1130 } 1131 1132 /* 1133 * copies a "user" presentable address and removes embedded scope, etc. 1134 * returns 0 on success, 1 on error 1135 */ 1136 static uint32_t 1137 sctp_fill_user_address(struct sockaddr_storage *ss, struct sockaddr *sa) 1138 { 1139 #ifdef INET6 1140 struct sockaddr_in6 lsa6; 1141 1142 sa = (struct sockaddr *)sctp_recover_scope((struct sockaddr_in6 *)sa, 1143 &lsa6); 1144 #endif 1145 memcpy(ss, sa, sa->sa_len); 1146 return (0); 1147 } 1148 1149 1150 1151 /* 1152 * NOTE: assumes addr lock is held 1153 */ 1154 static size_t 1155 sctp_fill_up_addresses_vrf(struct sctp_inpcb *inp, 1156 struct sctp_tcb *stcb, 1157 size_t limit, 1158 struct sockaddr_storage *sas, 1159 uint32_t vrf_id) 1160 { 1161 struct sctp_ifn *sctp_ifn; 1162 struct sctp_ifa *sctp_ifa; 1163 int loopback_scope, ipv4_local_scope, local_scope, site_scope; 1164 size_t actual; 1165 int ipv4_addr_legal, ipv6_addr_legal; 1166 struct sctp_vrf *vrf; 1167 1168 actual = 0; 1169 if (limit <= 0) 1170 return (actual); 1171 1172 if (stcb) { 1173 /* Turn on all the appropriate scope */ 1174 loopback_scope = stcb->asoc.loopback_scope; 1175 ipv4_local_scope = stcb->asoc.ipv4_local_scope; 1176 local_scope = stcb->asoc.local_scope; 1177 site_scope = stcb->asoc.site_scope; 1178 } else { 1179 /* Turn on ALL scope, since we look at the EP */ 1180 loopback_scope = ipv4_local_scope = local_scope = 1181 site_scope = 1; 1182 } 1183 ipv4_addr_legal = ipv6_addr_legal = 0; 1184 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 1185 ipv6_addr_legal = 1; 1186 if (SCTP_IPV6_V6ONLY(inp) == 0) { 1187 ipv4_addr_legal = 1; 1188 } 1189 } else { 1190 ipv4_addr_legal = 1; 1191 } 1192 vrf = sctp_find_vrf(vrf_id); 1193 if (vrf == NULL) { 1194 return (0); 1195 } 1196 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 1197 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) { 1198 if ((loopback_scope == 0) && 1199 SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) { 1200 /* Skip loopback if loopback_scope not set */ 1201 continue; 1202 } 1203 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { 1204 if (stcb) { 1205 /* 1206 * For the BOUND-ALL case, the list 1207 * associated with a TCB is Always 1208 * considered a reverse list.. i.e. 1209 * it lists addresses that are NOT 1210 * part of the association. If this 1211 * is one of those we must skip it. 1212 */ 1213 if (sctp_is_addr_restricted(stcb, 1214 sctp_ifa)) { 1215 continue; 1216 } 1217 } 1218 switch (sctp_ifa->address.sa.sa_family) { 1219 #ifdef INET 1220 case AF_INET: 1221 if (ipv4_addr_legal) { 1222 struct sockaddr_in *sin; 1223 1224 sin = (struct sockaddr_in *)&sctp_ifa->address.sa; 1225 if (sin->sin_addr.s_addr == 0) { 1226 /* 1227 * we skip 1228 * unspecifed 1229 * addresses 1230 */ 1231 continue; 1232 } 1233 if ((ipv4_local_scope == 0) && 1234 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) { 1235 continue; 1236 } 1237 #ifdef INET6 1238 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) { 1239 in6_sin_2_v4mapsin6(sin, (struct sockaddr_in6 *)sas); 1240 ((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport; 1241 sas = (struct sockaddr_storage *)((caddr_t)sas + sizeof(struct sockaddr_in6)); 1242 actual += sizeof(struct sockaddr_in6); 1243 } else { 1244 #endif 1245 memcpy(sas, sin, sizeof(*sin)); 1246 ((struct sockaddr_in *)sas)->sin_port = inp->sctp_lport; 1247 sas = (struct sockaddr_storage *)((caddr_t)sas + sizeof(*sin)); 1248 actual += sizeof(*sin); 1249 #ifdef INET6 1250 } 1251 #endif 1252 if (actual >= limit) { 1253 return (actual); 1254 } 1255 } else { 1256 continue; 1257 } 1258 break; 1259 #endif 1260 #ifdef INET6 1261 case AF_INET6: 1262 if (ipv6_addr_legal) { 1263 struct sockaddr_in6 *sin6; 1264 1265 sin6 = (struct sockaddr_in6 *)&sctp_ifa->address.sa; 1266 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { 1267 /* 1268 * we skip 1269 * unspecifed 1270 * addresses 1271 */ 1272 continue; 1273 } 1274 if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) { 1275 if (local_scope == 0) 1276 continue; 1277 if (sin6->sin6_scope_id == 0) { 1278 if (sa6_recoverscope(sin6) != 0) 1279 /* 1280 * 1281 * bad 1282 * 1283 * li 1284 * nk 1285 * 1286 * loc 1287 * al 1288 * 1289 * add 1290 * re 1291 * ss 1292 * */ 1293 continue; 1294 } 1295 } 1296 if ((site_scope == 0) && 1297 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) { 1298 continue; 1299 } 1300 memcpy(sas, sin6, sizeof(*sin6)); 1301 ((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport; 1302 sas = (struct sockaddr_storage *)((caddr_t)sas + sizeof(*sin6)); 1303 actual += sizeof(*sin6); 1304 if (actual >= limit) { 1305 return (actual); 1306 } 1307 } else { 1308 continue; 1309 } 1310 break; 1311 #endif 1312 default: 1313 /* TSNH */ 1314 break; 1315 } 1316 } 1317 } 1318 } else { 1319 struct sctp_laddr *laddr; 1320 1321 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 1322 if (stcb) { 1323 if (sctp_is_addr_restricted(stcb, laddr->ifa)) { 1324 continue; 1325 } 1326 } 1327 if (sctp_fill_user_address(sas, &laddr->ifa->address.sa)) 1328 continue; 1329 1330 ((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport; 1331 sas = (struct sockaddr_storage *)((caddr_t)sas + 1332 laddr->ifa->address.sa.sa_len); 1333 actual += laddr->ifa->address.sa.sa_len; 1334 if (actual >= limit) { 1335 return (actual); 1336 } 1337 } 1338 } 1339 return (actual); 1340 } 1341 1342 static size_t 1343 sctp_fill_up_addresses(struct sctp_inpcb *inp, 1344 struct sctp_tcb *stcb, 1345 size_t limit, 1346 struct sockaddr_storage *sas) 1347 { 1348 size_t size = 0; 1349 1350 SCTP_IPI_ADDR_RLOCK(); 1351 /* fill up addresses for the endpoint's default vrf */ 1352 size = sctp_fill_up_addresses_vrf(inp, stcb, limit, sas, 1353 inp->def_vrf_id); 1354 SCTP_IPI_ADDR_RUNLOCK(); 1355 return (size); 1356 } 1357 1358 /* 1359 * NOTE: assumes addr lock is held 1360 */ 1361 static int 1362 sctp_count_max_addresses_vrf(struct sctp_inpcb *inp, uint32_t vrf_id) 1363 { 1364 int cnt = 0; 1365 struct sctp_vrf *vrf = NULL; 1366 1367 /* 1368 * In both sub-set bound an bound_all cases we return the MAXIMUM 1369 * number of addresses that you COULD get. In reality the sub-set 1370 * bound may have an exclusion list for a given TCB OR in the 1371 * bound-all case a TCB may NOT include the loopback or other 1372 * addresses as well. 1373 */ 1374 vrf = sctp_find_vrf(vrf_id); 1375 if (vrf == NULL) { 1376 return (0); 1377 } 1378 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 1379 struct sctp_ifn *sctp_ifn; 1380 struct sctp_ifa *sctp_ifa; 1381 1382 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) { 1383 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { 1384 /* Count them if they are the right type */ 1385 switch (sctp_ifa->address.sa.sa_family) { 1386 #ifdef INET 1387 case AF_INET: 1388 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) 1389 cnt += sizeof(struct sockaddr_in6); 1390 else 1391 cnt += sizeof(struct sockaddr_in); 1392 break; 1393 #endif 1394 #ifdef INET6 1395 case AF_INET6: 1396 cnt += sizeof(struct sockaddr_in6); 1397 break; 1398 #endif 1399 default: 1400 break; 1401 } 1402 } 1403 } 1404 } else { 1405 struct sctp_laddr *laddr; 1406 1407 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 1408 switch (laddr->ifa->address.sa.sa_family) { 1409 #ifdef INET 1410 case AF_INET: 1411 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) 1412 cnt += sizeof(struct sockaddr_in6); 1413 else 1414 cnt += sizeof(struct sockaddr_in); 1415 break; 1416 #endif 1417 #ifdef INET6 1418 case AF_INET6: 1419 cnt += sizeof(struct sockaddr_in6); 1420 break; 1421 #endif 1422 default: 1423 break; 1424 } 1425 } 1426 } 1427 return (cnt); 1428 } 1429 1430 static int 1431 sctp_count_max_addresses(struct sctp_inpcb *inp) 1432 { 1433 int cnt = 0; 1434 1435 SCTP_IPI_ADDR_RLOCK(); 1436 /* count addresses for the endpoint's default VRF */ 1437 cnt = sctp_count_max_addresses_vrf(inp, inp->def_vrf_id); 1438 SCTP_IPI_ADDR_RUNLOCK(); 1439 return (cnt); 1440 } 1441 1442 static int 1443 sctp_do_connect_x(struct socket *so, struct sctp_inpcb *inp, void *optval, 1444 size_t optsize, void *p, int delay) 1445 { 1446 int error = 0; 1447 int creat_lock_on = 0; 1448 struct sctp_tcb *stcb = NULL; 1449 struct sockaddr *sa; 1450 int num_v6 = 0, num_v4 = 0, *totaddrp, totaddr; 1451 int added = 0; 1452 uint32_t vrf_id; 1453 int bad_addresses = 0; 1454 sctp_assoc_t *a_id; 1455 1456 SCTPDBG(SCTP_DEBUG_PCB1, "Connectx called\n"); 1457 1458 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 1459 (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) { 1460 /* We are already connected AND the TCP model */ 1461 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EADDRINUSE); 1462 return (EADDRINUSE); 1463 } 1464 if ((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) && 1465 (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_PORTREUSE))) { 1466 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 1467 return (EINVAL); 1468 } 1469 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { 1470 SCTP_INP_RLOCK(inp); 1471 stcb = LIST_FIRST(&inp->sctp_asoc_list); 1472 SCTP_INP_RUNLOCK(inp); 1473 } 1474 if (stcb) { 1475 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY); 1476 return (EALREADY); 1477 } 1478 SCTP_INP_INCR_REF(inp); 1479 SCTP_ASOC_CREATE_LOCK(inp); 1480 creat_lock_on = 1; 1481 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 1482 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) { 1483 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EFAULT); 1484 error = EFAULT; 1485 goto out_now; 1486 } 1487 totaddrp = (int *)optval; 1488 totaddr = *totaddrp; 1489 sa = (struct sockaddr *)(totaddrp + 1); 1490 stcb = sctp_connectx_helper_find(inp, sa, &totaddr, &num_v4, &num_v6, &error, (optsize - sizeof(int)), &bad_addresses); 1491 if ((stcb != NULL) || bad_addresses) { 1492 /* Already have or am bring up an association */ 1493 SCTP_ASOC_CREATE_UNLOCK(inp); 1494 creat_lock_on = 0; 1495 if (stcb) 1496 SCTP_TCB_UNLOCK(stcb); 1497 if (bad_addresses == 0) { 1498 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY); 1499 error = EALREADY; 1500 } 1501 goto out_now; 1502 } 1503 #ifdef INET6 1504 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) && 1505 (num_v6 > 0)) { 1506 error = EINVAL; 1507 goto out_now; 1508 } 1509 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 1510 (num_v4 > 0)) { 1511 struct in6pcb *inp6; 1512 1513 inp6 = (struct in6pcb *)inp; 1514 if (SCTP_IPV6_V6ONLY(inp6)) { 1515 /* 1516 * if IPV6_V6ONLY flag, ignore connections destined 1517 * to a v4 addr or v4-mapped addr 1518 */ 1519 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 1520 error = EINVAL; 1521 goto out_now; 1522 } 1523 } 1524 #endif /* INET6 */ 1525 if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) == 1526 SCTP_PCB_FLAGS_UNBOUND) { 1527 /* Bind a ephemeral port */ 1528 error = sctp_inpcb_bind(so, NULL, NULL, p); 1529 if (error) { 1530 goto out_now; 1531 } 1532 } 1533 /* FIX ME: do we want to pass in a vrf on the connect call? */ 1534 vrf_id = inp->def_vrf_id; 1535 1536 1537 /* We are GOOD to go */ 1538 stcb = sctp_aloc_assoc(inp, sa, &error, 0, vrf_id, 1539 (struct thread *)p 1540 ); 1541 if (stcb == NULL) { 1542 /* Gak! no memory */ 1543 goto out_now; 1544 } 1545 SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_COOKIE_WAIT); 1546 /* move to second address */ 1547 switch (sa->sa_family) { 1548 #ifdef INET 1549 case AF_INET: 1550 sa = (struct sockaddr *)((caddr_t)sa + sizeof(struct sockaddr_in)); 1551 break; 1552 #endif 1553 #ifdef INET6 1554 case AF_INET6: 1555 sa = (struct sockaddr *)((caddr_t)sa + sizeof(struct sockaddr_in6)); 1556 break; 1557 #endif 1558 default: 1559 break; 1560 } 1561 1562 error = 0; 1563 added = sctp_connectx_helper_add(stcb, sa, (totaddr - 1), &error); 1564 /* Fill in the return id */ 1565 if (error) { 1566 (void)sctp_free_assoc(inp, stcb, SCTP_PCBFREE_FORCE, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_6); 1567 goto out_now; 1568 } 1569 a_id = (sctp_assoc_t *) optval; 1570 *a_id = sctp_get_associd(stcb); 1571 1572 /* initialize authentication parameters for the assoc */ 1573 sctp_initialize_auth_params(inp, stcb); 1574 1575 if (delay) { 1576 /* doing delayed connection */ 1577 stcb->asoc.delayed_connection = 1; 1578 sctp_timer_start(SCTP_TIMER_TYPE_INIT, inp, stcb, stcb->asoc.primary_destination); 1579 } else { 1580 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered); 1581 sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED); 1582 } 1583 SCTP_TCB_UNLOCK(stcb); 1584 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) { 1585 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED; 1586 /* Set the connected flag so we can queue data */ 1587 soisconnecting(so); 1588 } 1589 out_now: 1590 if (creat_lock_on) { 1591 SCTP_ASOC_CREATE_UNLOCK(inp); 1592 } 1593 SCTP_INP_DECR_REF(inp); 1594 return error; 1595 } 1596 1597 #define SCTP_FIND_STCB(inp, stcb, assoc_id) { \ 1598 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||\ 1599 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { \ 1600 SCTP_INP_RLOCK(inp); \ 1601 stcb = LIST_FIRST(&inp->sctp_asoc_list); \ 1602 if (stcb) { \ 1603 SCTP_TCB_LOCK(stcb); \ 1604 } \ 1605 SCTP_INP_RUNLOCK(inp); \ 1606 } else if (assoc_id != 0) { \ 1607 stcb = sctp_findassociation_ep_asocid(inp, assoc_id, 1); \ 1608 if (stcb == NULL) { \ 1609 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); \ 1610 error = ENOENT; \ 1611 break; \ 1612 } \ 1613 } else { \ 1614 stcb = NULL; \ 1615 } \ 1616 } 1617 1618 1619 #define SCTP_CHECK_AND_CAST(destp, srcp, type, size) {\ 1620 if (size < sizeof(type)) { \ 1621 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); \ 1622 error = EINVAL; \ 1623 break; \ 1624 } else { \ 1625 destp = (type *)srcp; \ 1626 } \ 1627 } 1628 1629 static int 1630 sctp_getopt(struct socket *so, int optname, void *optval, size_t *optsize, 1631 void *p) 1632 { 1633 struct sctp_inpcb *inp = NULL; 1634 int error, val = 0; 1635 struct sctp_tcb *stcb = NULL; 1636 1637 if (optval == NULL) { 1638 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 1639 return (EINVAL); 1640 } 1641 inp = (struct sctp_inpcb *)so->so_pcb; 1642 if (inp == 0) { 1643 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 1644 return EINVAL; 1645 } 1646 error = 0; 1647 1648 switch (optname) { 1649 case SCTP_NODELAY: 1650 case SCTP_AUTOCLOSE: 1651 case SCTP_EXPLICIT_EOR: 1652 case SCTP_AUTO_ASCONF: 1653 case SCTP_DISABLE_FRAGMENTS: 1654 case SCTP_I_WANT_MAPPED_V4_ADDR: 1655 case SCTP_USE_EXT_RCVINFO: 1656 SCTP_INP_RLOCK(inp); 1657 switch (optname) { 1658 case SCTP_DISABLE_FRAGMENTS: 1659 val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NO_FRAGMENT); 1660 break; 1661 case SCTP_I_WANT_MAPPED_V4_ADDR: 1662 val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4); 1663 break; 1664 case SCTP_AUTO_ASCONF: 1665 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 1666 /* only valid for bound all sockets */ 1667 val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTO_ASCONF); 1668 } else { 1669 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 1670 error = EINVAL; 1671 goto flags_out; 1672 } 1673 break; 1674 case SCTP_EXPLICIT_EOR: 1675 val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR); 1676 break; 1677 case SCTP_NODELAY: 1678 val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NODELAY); 1679 break; 1680 case SCTP_USE_EXT_RCVINFO: 1681 val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO); 1682 break; 1683 case SCTP_AUTOCLOSE: 1684 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) 1685 val = TICKS_TO_SEC(inp->sctp_ep.auto_close_time); 1686 else 1687 val = 0; 1688 break; 1689 1690 default: 1691 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT); 1692 error = ENOPROTOOPT; 1693 } /* end switch (sopt->sopt_name) */ 1694 if (optname != SCTP_AUTOCLOSE) { 1695 /* make it an "on/off" value */ 1696 val = (val != 0); 1697 } 1698 if (*optsize < sizeof(val)) { 1699 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 1700 error = EINVAL; 1701 } 1702 flags_out: 1703 SCTP_INP_RUNLOCK(inp); 1704 if (error == 0) { 1705 /* return the option value */ 1706 *(int *)optval = val; 1707 *optsize = sizeof(val); 1708 } 1709 break; 1710 case SCTP_GET_PACKET_LOG: 1711 { 1712 #ifdef SCTP_PACKET_LOGGING 1713 uint8_t *target; 1714 int ret; 1715 1716 SCTP_CHECK_AND_CAST(target, optval, uint8_t, *optsize); 1717 ret = sctp_copy_out_packet_log(target, (int)*optsize); 1718 *optsize = ret; 1719 #else 1720 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP); 1721 error = EOPNOTSUPP; 1722 #endif 1723 break; 1724 } 1725 case SCTP_REUSE_PORT: 1726 { 1727 uint32_t *value; 1728 1729 if ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE)) { 1730 /* Can't do this for a 1-m socket */ 1731 error = EINVAL; 1732 break; 1733 } 1734 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize); 1735 *value = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PORTREUSE); 1736 *optsize = sizeof(uint32_t); 1737 } 1738 break; 1739 case SCTP_PARTIAL_DELIVERY_POINT: 1740 { 1741 uint32_t *value; 1742 1743 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize); 1744 *value = inp->partial_delivery_point; 1745 *optsize = sizeof(uint32_t); 1746 } 1747 break; 1748 case SCTP_FRAGMENT_INTERLEAVE: 1749 { 1750 uint32_t *value; 1751 1752 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize); 1753 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) { 1754 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) { 1755 *value = SCTP_FRAG_LEVEL_2; 1756 } else { 1757 *value = SCTP_FRAG_LEVEL_1; 1758 } 1759 } else { 1760 *value = SCTP_FRAG_LEVEL_0; 1761 } 1762 *optsize = sizeof(uint32_t); 1763 } 1764 break; 1765 case SCTP_CMT_ON_OFF: 1766 { 1767 struct sctp_assoc_value *av; 1768 1769 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize); 1770 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 1771 if (stcb) { 1772 av->assoc_value = stcb->asoc.sctp_cmt_on_off; 1773 SCTP_TCB_UNLOCK(stcb); 1774 } else { 1775 SCTP_INP_RLOCK(inp); 1776 av->assoc_value = inp->sctp_cmt_on_off; 1777 SCTP_INP_RUNLOCK(inp); 1778 } 1779 *optsize = sizeof(*av); 1780 } 1781 break; 1782 /* JRS - Get socket option for pluggable congestion control */ 1783 case SCTP_PLUGGABLE_CC: 1784 { 1785 struct sctp_assoc_value *av; 1786 1787 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize); 1788 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 1789 if (stcb) { 1790 av->assoc_value = stcb->asoc.congestion_control_module; 1791 SCTP_TCB_UNLOCK(stcb); 1792 } else { 1793 av->assoc_value = inp->sctp_ep.sctp_default_cc_module; 1794 } 1795 *optsize = sizeof(*av); 1796 } 1797 break; 1798 case SCTP_CC_OPTION: 1799 { 1800 struct sctp_cc_option *cc_opt; 1801 1802 SCTP_CHECK_AND_CAST(cc_opt, optval, struct sctp_cc_option, *optsize); 1803 SCTP_FIND_STCB(inp, stcb, cc_opt->aid_value.assoc_id); 1804 if (stcb == NULL) { 1805 error = EINVAL; 1806 } else { 1807 if (stcb->asoc.cc_functions.sctp_cwnd_socket_option == NULL) { 1808 error = ENOTSUP; 1809 } else { 1810 error = (*stcb->asoc.cc_functions.sctp_cwnd_socket_option) (stcb, 0, 1811 cc_opt); 1812 *optsize = sizeof(*cc_opt); 1813 } 1814 SCTP_TCB_UNLOCK(stcb); 1815 } 1816 } 1817 break; 1818 /* RS - Get socket option for pluggable stream scheduling */ 1819 case SCTP_PLUGGABLE_SS: 1820 { 1821 struct sctp_assoc_value *av; 1822 1823 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize); 1824 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 1825 if (stcb) { 1826 av->assoc_value = stcb->asoc.stream_scheduling_module; 1827 SCTP_TCB_UNLOCK(stcb); 1828 } else { 1829 av->assoc_value = inp->sctp_ep.sctp_default_ss_module; 1830 } 1831 *optsize = sizeof(*av); 1832 } 1833 break; 1834 case SCTP_SS_VALUE: 1835 { 1836 struct sctp_stream_value *av; 1837 1838 SCTP_CHECK_AND_CAST(av, optval, struct sctp_stream_value, *optsize); 1839 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 1840 if (stcb) { 1841 if (stcb->asoc.ss_functions.sctp_ss_get_value(stcb, &stcb->asoc, &stcb->asoc.strmout[av->stream_id], 1842 &av->stream_value) < 0) { 1843 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 1844 error = EINVAL; 1845 } else { 1846 *optsize = sizeof(*av); 1847 } 1848 SCTP_TCB_UNLOCK(stcb); 1849 } else { 1850 /* 1851 * Can't get stream value without 1852 * association 1853 */ 1854 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 1855 error = EINVAL; 1856 } 1857 } 1858 break; 1859 case SCTP_GET_ADDR_LEN: 1860 { 1861 struct sctp_assoc_value *av; 1862 1863 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize); 1864 error = EINVAL; 1865 #ifdef INET 1866 if (av->assoc_value == AF_INET) { 1867 av->assoc_value = sizeof(struct sockaddr_in); 1868 error = 0; 1869 } 1870 #endif 1871 #ifdef INET6 1872 if (av->assoc_value == AF_INET6) { 1873 av->assoc_value = sizeof(struct sockaddr_in6); 1874 error = 0; 1875 } 1876 #endif 1877 if (error) { 1878 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 1879 } 1880 *optsize = sizeof(*av); 1881 } 1882 break; 1883 case SCTP_GET_ASSOC_NUMBER: 1884 { 1885 uint32_t *value, cnt; 1886 1887 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize); 1888 cnt = 0; 1889 SCTP_INP_RLOCK(inp); 1890 LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) { 1891 cnt++; 1892 } 1893 SCTP_INP_RUNLOCK(inp); 1894 *value = cnt; 1895 *optsize = sizeof(uint32_t); 1896 } 1897 break; 1898 1899 case SCTP_GET_ASSOC_ID_LIST: 1900 { 1901 struct sctp_assoc_ids *ids; 1902 unsigned int at, limit; 1903 1904 SCTP_CHECK_AND_CAST(ids, optval, struct sctp_assoc_ids, *optsize); 1905 at = 0; 1906 limit = (*optsize - sizeof(uint32_t)) / sizeof(sctp_assoc_t); 1907 SCTP_INP_RLOCK(inp); 1908 LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) { 1909 if (at < limit) { 1910 ids->gaids_assoc_id[at++] = sctp_get_associd(stcb); 1911 } else { 1912 error = EINVAL; 1913 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 1914 break; 1915 } 1916 } 1917 SCTP_INP_RUNLOCK(inp); 1918 ids->gaids_number_of_ids = at; 1919 *optsize = ((at * sizeof(sctp_assoc_t)) + sizeof(uint32_t)); 1920 } 1921 break; 1922 case SCTP_CONTEXT: 1923 { 1924 struct sctp_assoc_value *av; 1925 1926 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize); 1927 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 1928 1929 if (stcb) { 1930 av->assoc_value = stcb->asoc.context; 1931 SCTP_TCB_UNLOCK(stcb); 1932 } else { 1933 SCTP_INP_RLOCK(inp); 1934 av->assoc_value = inp->sctp_context; 1935 SCTP_INP_RUNLOCK(inp); 1936 } 1937 *optsize = sizeof(*av); 1938 } 1939 break; 1940 case SCTP_VRF_ID: 1941 { 1942 uint32_t *default_vrfid; 1943 1944 SCTP_CHECK_AND_CAST(default_vrfid, optval, uint32_t, *optsize); 1945 *default_vrfid = inp->def_vrf_id; 1946 break; 1947 } 1948 case SCTP_GET_ASOC_VRF: 1949 { 1950 struct sctp_assoc_value *id; 1951 1952 SCTP_CHECK_AND_CAST(id, optval, struct sctp_assoc_value, *optsize); 1953 SCTP_FIND_STCB(inp, stcb, id->assoc_id); 1954 if (stcb == NULL) { 1955 error = EINVAL; 1956 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 1957 break; 1958 } 1959 id->assoc_value = stcb->asoc.vrf_id; 1960 break; 1961 } 1962 case SCTP_GET_VRF_IDS: 1963 { 1964 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP); 1965 error = EOPNOTSUPP; 1966 break; 1967 } 1968 case SCTP_GET_NONCE_VALUES: 1969 { 1970 struct sctp_get_nonce_values *gnv; 1971 1972 SCTP_CHECK_AND_CAST(gnv, optval, struct sctp_get_nonce_values, *optsize); 1973 SCTP_FIND_STCB(inp, stcb, gnv->gn_assoc_id); 1974 1975 if (stcb) { 1976 gnv->gn_peers_tag = stcb->asoc.peer_vtag; 1977 gnv->gn_local_tag = stcb->asoc.my_vtag; 1978 SCTP_TCB_UNLOCK(stcb); 1979 } else { 1980 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN); 1981 error = ENOTCONN; 1982 } 1983 *optsize = sizeof(*gnv); 1984 } 1985 break; 1986 case SCTP_DELAYED_SACK: 1987 { 1988 struct sctp_sack_info *sack; 1989 1990 SCTP_CHECK_AND_CAST(sack, optval, struct sctp_sack_info, *optsize); 1991 SCTP_FIND_STCB(inp, stcb, sack->sack_assoc_id); 1992 if (stcb) { 1993 sack->sack_delay = stcb->asoc.delayed_ack; 1994 sack->sack_freq = stcb->asoc.sack_freq; 1995 SCTP_TCB_UNLOCK(stcb); 1996 } else { 1997 SCTP_INP_RLOCK(inp); 1998 sack->sack_delay = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]); 1999 sack->sack_freq = inp->sctp_ep.sctp_sack_freq; 2000 SCTP_INP_RUNLOCK(inp); 2001 } 2002 *optsize = sizeof(*sack); 2003 } 2004 break; 2005 2006 case SCTP_GET_SNDBUF_USE: 2007 { 2008 struct sctp_sockstat *ss; 2009 2010 SCTP_CHECK_AND_CAST(ss, optval, struct sctp_sockstat, *optsize); 2011 SCTP_FIND_STCB(inp, stcb, ss->ss_assoc_id); 2012 2013 if (stcb) { 2014 ss->ss_total_sndbuf = stcb->asoc.total_output_queue_size; 2015 ss->ss_total_recv_buf = (stcb->asoc.size_on_reasm_queue + 2016 stcb->asoc.size_on_all_streams); 2017 SCTP_TCB_UNLOCK(stcb); 2018 } else { 2019 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN); 2020 error = ENOTCONN; 2021 } 2022 *optsize = sizeof(struct sctp_sockstat); 2023 } 2024 break; 2025 case SCTP_MAX_BURST: 2026 { 2027 struct sctp_assoc_value *av; 2028 2029 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize); 2030 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 2031 2032 if (stcb) { 2033 av->assoc_value = stcb->asoc.max_burst; 2034 SCTP_TCB_UNLOCK(stcb); 2035 } else { 2036 SCTP_INP_RLOCK(inp); 2037 av->assoc_value = inp->sctp_ep.max_burst; 2038 SCTP_INP_RUNLOCK(inp); 2039 } 2040 *optsize = sizeof(struct sctp_assoc_value); 2041 2042 } 2043 break; 2044 case SCTP_MAXSEG: 2045 { 2046 struct sctp_assoc_value *av; 2047 int ovh; 2048 2049 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize); 2050 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 2051 2052 if (stcb) { 2053 av->assoc_value = sctp_get_frag_point(stcb, &stcb->asoc); 2054 SCTP_TCB_UNLOCK(stcb); 2055 } else { 2056 SCTP_INP_RLOCK(inp); 2057 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 2058 ovh = SCTP_MED_OVERHEAD; 2059 } else { 2060 ovh = SCTP_MED_V4_OVERHEAD; 2061 } 2062 if (inp->sctp_frag_point >= SCTP_DEFAULT_MAXSEGMENT) 2063 av->assoc_value = 0; 2064 else 2065 av->assoc_value = inp->sctp_frag_point - ovh; 2066 SCTP_INP_RUNLOCK(inp); 2067 } 2068 *optsize = sizeof(struct sctp_assoc_value); 2069 } 2070 break; 2071 case SCTP_GET_STAT_LOG: 2072 error = sctp_fill_stat_log(optval, optsize); 2073 break; 2074 case SCTP_EVENTS: 2075 { 2076 struct sctp_event_subscribe *events; 2077 2078 SCTP_CHECK_AND_CAST(events, optval, struct sctp_event_subscribe, *optsize); 2079 memset(events, 0, sizeof(*events)); 2080 SCTP_INP_RLOCK(inp); 2081 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) 2082 events->sctp_data_io_event = 1; 2083 2084 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVASSOCEVNT)) 2085 events->sctp_association_event = 1; 2086 2087 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVPADDREVNT)) 2088 events->sctp_address_event = 1; 2089 2090 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)) 2091 events->sctp_send_failure_event = 1; 2092 2093 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVPEERERR)) 2094 events->sctp_peer_error_event = 1; 2095 2096 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) 2097 events->sctp_shutdown_event = 1; 2098 2099 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PDAPIEVNT)) 2100 events->sctp_partial_delivery_event = 1; 2101 2102 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) 2103 events->sctp_adaptation_layer_event = 1; 2104 2105 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTHEVNT)) 2106 events->sctp_authentication_event = 1; 2107 2108 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_DRYEVNT)) 2109 events->sctp_sender_dry_event = 1; 2110 2111 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_STREAM_RESETEVNT)) 2112 events->sctp_stream_reset_event = 1; 2113 SCTP_INP_RUNLOCK(inp); 2114 *optsize = sizeof(struct sctp_event_subscribe); 2115 } 2116 break; 2117 2118 case SCTP_ADAPTATION_LAYER: 2119 { 2120 uint32_t *value; 2121 2122 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize); 2123 2124 SCTP_INP_RLOCK(inp); 2125 *value = inp->sctp_ep.adaptation_layer_indicator; 2126 SCTP_INP_RUNLOCK(inp); 2127 *optsize = sizeof(uint32_t); 2128 } 2129 break; 2130 case SCTP_SET_INITIAL_DBG_SEQ: 2131 { 2132 uint32_t *value; 2133 2134 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize); 2135 SCTP_INP_RLOCK(inp); 2136 *value = inp->sctp_ep.initial_sequence_debug; 2137 SCTP_INP_RUNLOCK(inp); 2138 *optsize = sizeof(uint32_t); 2139 } 2140 break; 2141 case SCTP_GET_LOCAL_ADDR_SIZE: 2142 { 2143 uint32_t *value; 2144 2145 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize); 2146 SCTP_INP_RLOCK(inp); 2147 *value = sctp_count_max_addresses(inp); 2148 SCTP_INP_RUNLOCK(inp); 2149 *optsize = sizeof(uint32_t); 2150 } 2151 break; 2152 case SCTP_GET_REMOTE_ADDR_SIZE: 2153 { 2154 uint32_t *value; 2155 size_t size; 2156 struct sctp_nets *net; 2157 2158 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize); 2159 /* FIXME MT: change to sctp_assoc_value? */ 2160 SCTP_FIND_STCB(inp, stcb, (sctp_assoc_t) * value); 2161 2162 if (stcb) { 2163 size = 0; 2164 /* Count the sizes */ 2165 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 2166 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) { 2167 size += sizeof(struct sockaddr_in6); 2168 } else { 2169 switch (((struct sockaddr *)&net->ro._l_addr)->sa_family) { 2170 #ifdef INET 2171 case AF_INET: 2172 size += sizeof(struct sockaddr_in); 2173 break; 2174 #endif 2175 #ifdef INET6 2176 case AF_INET6: 2177 size += sizeof(struct sockaddr_in6); 2178 break; 2179 #endif 2180 default: 2181 break; 2182 } 2183 } 2184 } 2185 SCTP_TCB_UNLOCK(stcb); 2186 *value = (uint32_t) size; 2187 } else { 2188 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN); 2189 error = ENOTCONN; 2190 } 2191 *optsize = sizeof(uint32_t); 2192 } 2193 break; 2194 case SCTP_GET_PEER_ADDRESSES: 2195 /* 2196 * Get the address information, an array is passed in to 2197 * fill up we pack it. 2198 */ 2199 { 2200 size_t cpsz, left; 2201 struct sockaddr_storage *sas; 2202 struct sctp_nets *net; 2203 struct sctp_getaddresses *saddr; 2204 2205 SCTP_CHECK_AND_CAST(saddr, optval, struct sctp_getaddresses, *optsize); 2206 SCTP_FIND_STCB(inp, stcb, saddr->sget_assoc_id); 2207 2208 if (stcb) { 2209 left = (*optsize) - sizeof(struct sctp_getaddresses); 2210 *optsize = sizeof(struct sctp_getaddresses); 2211 sas = (struct sockaddr_storage *)&saddr->addr[0]; 2212 2213 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 2214 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) { 2215 cpsz = sizeof(struct sockaddr_in6); 2216 } else { 2217 switch (((struct sockaddr *)&net->ro._l_addr)->sa_family) { 2218 #ifdef INET 2219 case AF_INET: 2220 cpsz = sizeof(struct sockaddr_in); 2221 break; 2222 #endif 2223 #ifdef INET6 2224 case AF_INET6: 2225 cpsz = sizeof(struct sockaddr_in6); 2226 break; 2227 #endif 2228 default: 2229 cpsz = 0; 2230 break; 2231 } 2232 } 2233 if (cpsz == 0) { 2234 break; 2235 } 2236 if (left < cpsz) { 2237 /* not enough room. */ 2238 break; 2239 } 2240 #if defined(INET) && defined(INET6) 2241 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) && 2242 (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET)) { 2243 /* Must map the address */ 2244 in6_sin_2_v4mapsin6((struct sockaddr_in *)&net->ro._l_addr, 2245 (struct sockaddr_in6 *)sas); 2246 } else { 2247 #endif 2248 memcpy(sas, &net->ro._l_addr, cpsz); 2249 #if defined(INET) && defined(INET6) 2250 } 2251 #endif 2252 ((struct sockaddr_in *)sas)->sin_port = stcb->rport; 2253 2254 sas = (struct sockaddr_storage *)((caddr_t)sas + cpsz); 2255 left -= cpsz; 2256 *optsize += cpsz; 2257 } 2258 SCTP_TCB_UNLOCK(stcb); 2259 } else { 2260 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); 2261 error = ENOENT; 2262 } 2263 } 2264 break; 2265 case SCTP_GET_LOCAL_ADDRESSES: 2266 { 2267 size_t limit, actual; 2268 struct sockaddr_storage *sas; 2269 struct sctp_getaddresses *saddr; 2270 2271 SCTP_CHECK_AND_CAST(saddr, optval, struct sctp_getaddresses, *optsize); 2272 SCTP_FIND_STCB(inp, stcb, saddr->sget_assoc_id); 2273 2274 sas = (struct sockaddr_storage *)&saddr->addr[0]; 2275 limit = *optsize - sizeof(sctp_assoc_t); 2276 actual = sctp_fill_up_addresses(inp, stcb, limit, sas); 2277 if (stcb) { 2278 SCTP_TCB_UNLOCK(stcb); 2279 } 2280 *optsize = sizeof(struct sockaddr_storage) + actual; 2281 } 2282 break; 2283 case SCTP_PEER_ADDR_PARAMS: 2284 { 2285 struct sctp_paddrparams *paddrp; 2286 struct sctp_nets *net; 2287 2288 SCTP_CHECK_AND_CAST(paddrp, optval, struct sctp_paddrparams, *optsize); 2289 SCTP_FIND_STCB(inp, stcb, paddrp->spp_assoc_id); 2290 2291 net = NULL; 2292 if (stcb) { 2293 net = sctp_findnet(stcb, (struct sockaddr *)&paddrp->spp_address); 2294 } else { 2295 /* 2296 * We increment here since 2297 * sctp_findassociation_ep_addr() wil do a 2298 * decrement if it finds the stcb as long as 2299 * the locked tcb (last argument) is NOT a 2300 * TCB.. aka NULL. 2301 */ 2302 SCTP_INP_INCR_REF(inp); 2303 stcb = sctp_findassociation_ep_addr(&inp, (struct sockaddr *)&paddrp->spp_address, &net, NULL, NULL); 2304 if (stcb == NULL) { 2305 SCTP_INP_DECR_REF(inp); 2306 } 2307 } 2308 if (stcb && (net == NULL)) { 2309 struct sockaddr *sa; 2310 2311 sa = (struct sockaddr *)&paddrp->spp_address; 2312 #ifdef INET 2313 if (sa->sa_family == AF_INET) { 2314 struct sockaddr_in *sin; 2315 2316 sin = (struct sockaddr_in *)sa; 2317 if (sin->sin_addr.s_addr) { 2318 error = EINVAL; 2319 SCTP_TCB_UNLOCK(stcb); 2320 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 2321 break; 2322 } 2323 } else 2324 #endif 2325 #ifdef INET6 2326 if (sa->sa_family == AF_INET6) { 2327 struct sockaddr_in6 *sin6; 2328 2329 sin6 = (struct sockaddr_in6 *)sa; 2330 if (!IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { 2331 error = EINVAL; 2332 SCTP_TCB_UNLOCK(stcb); 2333 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 2334 break; 2335 } 2336 } else 2337 #endif 2338 { 2339 error = EAFNOSUPPORT; 2340 SCTP_TCB_UNLOCK(stcb); 2341 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 2342 break; 2343 } 2344 } 2345 if (stcb) { 2346 /* Applys to the specific association */ 2347 paddrp->spp_flags = 0; 2348 if (net) { 2349 int ovh; 2350 2351 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 2352 ovh = SCTP_MED_OVERHEAD; 2353 } else { 2354 ovh = SCTP_MED_V4_OVERHEAD; 2355 } 2356 2357 2358 paddrp->spp_pathmaxrxt = net->failure_threshold; 2359 paddrp->spp_pathmtu = net->mtu - ovh; 2360 /* get flags for HB */ 2361 if (net->dest_state & SCTP_ADDR_NOHB) 2362 paddrp->spp_flags |= SPP_HB_DISABLE; 2363 else 2364 paddrp->spp_flags |= SPP_HB_ENABLE; 2365 /* get flags for PMTU */ 2366 if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) { 2367 paddrp->spp_flags |= SPP_PMTUD_ENABLE; 2368 } else { 2369 paddrp->spp_flags |= SPP_PMTUD_DISABLE; 2370 } 2371 #ifdef INET 2372 if (net->ro._l_addr.sin.sin_family == AF_INET) { 2373 paddrp->spp_ipv4_tos = net->tos_flowlabel & 0x000000fc; 2374 paddrp->spp_flags |= SPP_IPV4_TOS; 2375 } 2376 #endif 2377 #ifdef INET6 2378 if (net->ro._l_addr.sin6.sin6_family == AF_INET6) { 2379 paddrp->spp_ipv6_flowlabel = net->tos_flowlabel; 2380 paddrp->spp_flags |= SPP_IPV6_FLOWLABEL; 2381 } 2382 #endif 2383 } else { 2384 /* 2385 * No destination so return default 2386 * value 2387 */ 2388 int cnt = 0; 2389 2390 paddrp->spp_pathmaxrxt = stcb->asoc.def_net_failure; 2391 paddrp->spp_pathmtu = sctp_get_frag_point(stcb, &stcb->asoc); 2392 #ifdef INET 2393 paddrp->spp_ipv4_tos = stcb->asoc.default_tos & 0x000000fc; 2394 paddrp->spp_flags |= SPP_IPV4_TOS; 2395 #endif 2396 #ifdef INET6 2397 paddrp->spp_ipv6_flowlabel = stcb->asoc.default_flowlabel; 2398 paddrp->spp_flags |= SPP_IPV6_FLOWLABEL; 2399 #endif 2400 /* default settings should be these */ 2401 if (stcb->asoc.hb_is_disabled == 0) { 2402 paddrp->spp_flags |= SPP_HB_ENABLE; 2403 } else { 2404 paddrp->spp_flags |= SPP_HB_DISABLE; 2405 } 2406 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 2407 if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) { 2408 cnt++; 2409 } 2410 } 2411 if (cnt) { 2412 paddrp->spp_flags |= SPP_PMTUD_ENABLE; 2413 } 2414 } 2415 paddrp->spp_hbinterval = stcb->asoc.heart_beat_delay; 2416 paddrp->spp_assoc_id = sctp_get_associd(stcb); 2417 SCTP_TCB_UNLOCK(stcb); 2418 } else { 2419 /* Use endpoint defaults */ 2420 SCTP_INP_RLOCK(inp); 2421 paddrp->spp_pathmaxrxt = inp->sctp_ep.def_net_failure; 2422 paddrp->spp_hbinterval = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]); 2423 paddrp->spp_assoc_id = (sctp_assoc_t) 0; 2424 /* get inp's default */ 2425 #ifdef INET 2426 paddrp->spp_ipv4_tos = inp->ip_inp.inp.inp_ip_tos; 2427 paddrp->spp_flags |= SPP_IPV4_TOS; 2428 #endif 2429 #ifdef INET6 2430 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 2431 paddrp->spp_ipv6_flowlabel = ((struct in6pcb *)inp)->in6p_flowinfo; 2432 paddrp->spp_flags |= SPP_IPV6_FLOWLABEL; 2433 } 2434 #endif 2435 /* can't return this */ 2436 paddrp->spp_pathmtu = 0; 2437 2438 /* default behavior, no stcb */ 2439 paddrp->spp_flags = SPP_PMTUD_ENABLE; 2440 2441 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_DONOT_HEARTBEAT)) { 2442 paddrp->spp_flags |= SPP_HB_ENABLE; 2443 } else { 2444 paddrp->spp_flags |= SPP_HB_DISABLE; 2445 } 2446 SCTP_INP_RUNLOCK(inp); 2447 } 2448 *optsize = sizeof(struct sctp_paddrparams); 2449 } 2450 break; 2451 case SCTP_GET_PEER_ADDR_INFO: 2452 { 2453 struct sctp_paddrinfo *paddri; 2454 struct sctp_nets *net; 2455 2456 SCTP_CHECK_AND_CAST(paddri, optval, struct sctp_paddrinfo, *optsize); 2457 SCTP_FIND_STCB(inp, stcb, paddri->spinfo_assoc_id); 2458 2459 net = NULL; 2460 if (stcb) { 2461 net = sctp_findnet(stcb, (struct sockaddr *)&paddri->spinfo_address); 2462 } else { 2463 /* 2464 * We increment here since 2465 * sctp_findassociation_ep_addr() wil do a 2466 * decrement if it finds the stcb as long as 2467 * the locked tcb (last argument) is NOT a 2468 * TCB.. aka NULL. 2469 */ 2470 SCTP_INP_INCR_REF(inp); 2471 stcb = sctp_findassociation_ep_addr(&inp, (struct sockaddr *)&paddri->spinfo_address, &net, NULL, NULL); 2472 if (stcb == NULL) { 2473 SCTP_INP_DECR_REF(inp); 2474 } 2475 } 2476 2477 if ((stcb) && (net)) { 2478 if (net->dest_state & SCTP_ADDR_UNCONFIRMED) { 2479 /* It's unconfirmed */ 2480 paddri->spinfo_state = SCTP_UNCONFIRMED; 2481 } else if (net->dest_state & SCTP_ADDR_REACHABLE) { 2482 /* It's active */ 2483 paddri->spinfo_state = SCTP_ACTIVE; 2484 } else { 2485 /* It's inactive */ 2486 paddri->spinfo_state = SCTP_INACTIVE; 2487 } 2488 paddri->spinfo_cwnd = net->cwnd; 2489 paddri->spinfo_srtt = net->lastsa >> SCTP_RTT_SHIFT; 2490 paddri->spinfo_rto = net->RTO; 2491 paddri->spinfo_assoc_id = sctp_get_associd(stcb); 2492 paddri->spinfo_mtu = net->mtu; 2493 SCTP_TCB_UNLOCK(stcb); 2494 } else { 2495 if (stcb) { 2496 SCTP_TCB_UNLOCK(stcb); 2497 } 2498 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); 2499 error = ENOENT; 2500 } 2501 *optsize = sizeof(struct sctp_paddrinfo); 2502 } 2503 break; 2504 case SCTP_PCB_STATUS: 2505 { 2506 struct sctp_pcbinfo *spcb; 2507 2508 SCTP_CHECK_AND_CAST(spcb, optval, struct sctp_pcbinfo, *optsize); 2509 sctp_fill_pcbinfo(spcb); 2510 *optsize = sizeof(struct sctp_pcbinfo); 2511 } 2512 break; 2513 2514 case SCTP_STATUS: 2515 { 2516 struct sctp_nets *net; 2517 struct sctp_status *sstat; 2518 2519 SCTP_CHECK_AND_CAST(sstat, optval, struct sctp_status, *optsize); 2520 SCTP_FIND_STCB(inp, stcb, sstat->sstat_assoc_id); 2521 2522 if (stcb == NULL) { 2523 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 2524 error = EINVAL; 2525 break; 2526 } 2527 /* 2528 * I think passing the state is fine since 2529 * sctp_constants.h will be available to the user 2530 * land. 2531 */ 2532 sstat->sstat_state = stcb->asoc.state; 2533 sstat->sstat_assoc_id = sctp_get_associd(stcb); 2534 sstat->sstat_rwnd = stcb->asoc.peers_rwnd; 2535 sstat->sstat_unackdata = stcb->asoc.sent_queue_cnt; 2536 /* 2537 * We can't include chunks that have been passed to 2538 * the socket layer. Only things in queue. 2539 */ 2540 sstat->sstat_penddata = (stcb->asoc.cnt_on_reasm_queue + 2541 stcb->asoc.cnt_on_all_streams); 2542 2543 2544 sstat->sstat_instrms = stcb->asoc.streamincnt; 2545 sstat->sstat_outstrms = stcb->asoc.streamoutcnt; 2546 sstat->sstat_fragmentation_point = sctp_get_frag_point(stcb, &stcb->asoc); 2547 memcpy(&sstat->sstat_primary.spinfo_address, 2548 &stcb->asoc.primary_destination->ro._l_addr, 2549 ((struct sockaddr *)(&stcb->asoc.primary_destination->ro._l_addr))->sa_len); 2550 net = stcb->asoc.primary_destination; 2551 ((struct sockaddr_in *)&sstat->sstat_primary.spinfo_address)->sin_port = stcb->rport; 2552 /* 2553 * Again the user can get info from sctp_constants.h 2554 * for what the state of the network is. 2555 */ 2556 if (net->dest_state & SCTP_ADDR_UNCONFIRMED) { 2557 /* It's unconfirmed */ 2558 sstat->sstat_primary.spinfo_state = SCTP_UNCONFIRMED; 2559 } else if (net->dest_state & SCTP_ADDR_REACHABLE) { 2560 /* It's active */ 2561 sstat->sstat_primary.spinfo_state = SCTP_ACTIVE; 2562 } else { 2563 /* It's inactive */ 2564 sstat->sstat_primary.spinfo_state = SCTP_INACTIVE; 2565 } 2566 sstat->sstat_primary.spinfo_cwnd = net->cwnd; 2567 sstat->sstat_primary.spinfo_srtt = net->lastsa >> SCTP_RTT_SHIFT; 2568 sstat->sstat_primary.spinfo_rto = net->RTO; 2569 sstat->sstat_primary.spinfo_mtu = net->mtu; 2570 sstat->sstat_primary.spinfo_assoc_id = sctp_get_associd(stcb); 2571 SCTP_TCB_UNLOCK(stcb); 2572 *optsize = sizeof(*sstat); 2573 } 2574 break; 2575 case SCTP_RTOINFO: 2576 { 2577 struct sctp_rtoinfo *srto; 2578 2579 SCTP_CHECK_AND_CAST(srto, optval, struct sctp_rtoinfo, *optsize); 2580 SCTP_FIND_STCB(inp, stcb, srto->srto_assoc_id); 2581 2582 if (stcb) { 2583 srto->srto_initial = stcb->asoc.initial_rto; 2584 srto->srto_max = stcb->asoc.maxrto; 2585 srto->srto_min = stcb->asoc.minrto; 2586 SCTP_TCB_UNLOCK(stcb); 2587 } else { 2588 SCTP_INP_RLOCK(inp); 2589 srto->srto_initial = inp->sctp_ep.initial_rto; 2590 srto->srto_max = inp->sctp_ep.sctp_maxrto; 2591 srto->srto_min = inp->sctp_ep.sctp_minrto; 2592 SCTP_INP_RUNLOCK(inp); 2593 } 2594 *optsize = sizeof(*srto); 2595 } 2596 break; 2597 case SCTP_TIMEOUTS: 2598 { 2599 struct sctp_timeouts *stimo; 2600 2601 SCTP_CHECK_AND_CAST(stimo, optval, struct sctp_timeouts, *optsize); 2602 SCTP_FIND_STCB(inp, stcb, stimo->stimo_assoc_id); 2603 2604 if (stcb) { 2605 stimo->stimo_init = stcb->asoc.timoinit; 2606 stimo->stimo_data = stcb->asoc.timodata; 2607 stimo->stimo_sack = stcb->asoc.timosack; 2608 stimo->stimo_shutdown = stcb->asoc.timoshutdown; 2609 stimo->stimo_heartbeat = stcb->asoc.timoheartbeat; 2610 stimo->stimo_cookie = stcb->asoc.timocookie; 2611 stimo->stimo_shutdownack = stcb->asoc.timoshutdownack; 2612 SCTP_TCB_UNLOCK(stcb); 2613 } else { 2614 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 2615 error = EINVAL; 2616 } 2617 *optsize = sizeof(*stimo); 2618 } 2619 break; 2620 case SCTP_ASSOCINFO: 2621 { 2622 struct sctp_assocparams *sasoc; 2623 uint32_t oldval; 2624 2625 SCTP_CHECK_AND_CAST(sasoc, optval, struct sctp_assocparams, *optsize); 2626 SCTP_FIND_STCB(inp, stcb, sasoc->sasoc_assoc_id); 2627 2628 if (stcb) { 2629 oldval = sasoc->sasoc_cookie_life; 2630 sasoc->sasoc_cookie_life = TICKS_TO_MSEC(stcb->asoc.cookie_life); 2631 sasoc->sasoc_asocmaxrxt = stcb->asoc.max_send_times; 2632 sasoc->sasoc_number_peer_destinations = stcb->asoc.numnets; 2633 sasoc->sasoc_peer_rwnd = stcb->asoc.peers_rwnd; 2634 sasoc->sasoc_local_rwnd = stcb->asoc.my_rwnd; 2635 SCTP_TCB_UNLOCK(stcb); 2636 } else { 2637 SCTP_INP_RLOCK(inp); 2638 sasoc->sasoc_cookie_life = TICKS_TO_MSEC(inp->sctp_ep.def_cookie_life); 2639 sasoc->sasoc_asocmaxrxt = inp->sctp_ep.max_send_times; 2640 sasoc->sasoc_number_peer_destinations = 0; 2641 sasoc->sasoc_peer_rwnd = 0; 2642 sasoc->sasoc_local_rwnd = sbspace(&inp->sctp_socket->so_rcv); 2643 SCTP_INP_RUNLOCK(inp); 2644 } 2645 *optsize = sizeof(*sasoc); 2646 } 2647 break; 2648 case SCTP_DEFAULT_SEND_PARAM: 2649 { 2650 struct sctp_sndrcvinfo *s_info; 2651 2652 SCTP_CHECK_AND_CAST(s_info, optval, struct sctp_sndrcvinfo, *optsize); 2653 SCTP_FIND_STCB(inp, stcb, s_info->sinfo_assoc_id); 2654 2655 if (stcb) { 2656 memcpy(s_info, &stcb->asoc.def_send, sizeof(stcb->asoc.def_send)); 2657 SCTP_TCB_UNLOCK(stcb); 2658 } else { 2659 SCTP_INP_RLOCK(inp); 2660 memcpy(s_info, &inp->def_send, sizeof(inp->def_send)); 2661 SCTP_INP_RUNLOCK(inp); 2662 } 2663 *optsize = sizeof(*s_info); 2664 } 2665 break; 2666 case SCTP_INITMSG: 2667 { 2668 struct sctp_initmsg *sinit; 2669 2670 SCTP_CHECK_AND_CAST(sinit, optval, struct sctp_initmsg, *optsize); 2671 SCTP_INP_RLOCK(inp); 2672 sinit->sinit_num_ostreams = inp->sctp_ep.pre_open_stream_count; 2673 sinit->sinit_max_instreams = inp->sctp_ep.max_open_streams_intome; 2674 sinit->sinit_max_attempts = inp->sctp_ep.max_init_times; 2675 sinit->sinit_max_init_timeo = inp->sctp_ep.initial_init_rto_max; 2676 SCTP_INP_RUNLOCK(inp); 2677 *optsize = sizeof(*sinit); 2678 } 2679 break; 2680 case SCTP_PRIMARY_ADDR: 2681 /* we allow a "get" operation on this */ 2682 { 2683 struct sctp_setprim *ssp; 2684 2685 SCTP_CHECK_AND_CAST(ssp, optval, struct sctp_setprim, *optsize); 2686 SCTP_FIND_STCB(inp, stcb, ssp->ssp_assoc_id); 2687 2688 if (stcb) { 2689 /* simply copy out the sockaddr_storage... */ 2690 int len; 2691 2692 len = *optsize; 2693 if (len > stcb->asoc.primary_destination->ro._l_addr.sa.sa_len) 2694 len = stcb->asoc.primary_destination->ro._l_addr.sa.sa_len; 2695 2696 memcpy(&ssp->ssp_addr, 2697 &stcb->asoc.primary_destination->ro._l_addr, 2698 len); 2699 SCTP_TCB_UNLOCK(stcb); 2700 } else { 2701 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 2702 error = EINVAL; 2703 } 2704 *optsize = sizeof(*ssp); 2705 } 2706 break; 2707 2708 case SCTP_HMAC_IDENT: 2709 { 2710 struct sctp_hmacalgo *shmac; 2711 sctp_hmaclist_t *hmaclist; 2712 uint32_t size; 2713 int i; 2714 2715 SCTP_CHECK_AND_CAST(shmac, optval, struct sctp_hmacalgo, *optsize); 2716 2717 SCTP_INP_RLOCK(inp); 2718 hmaclist = inp->sctp_ep.local_hmacs; 2719 if (hmaclist == NULL) { 2720 /* no HMACs to return */ 2721 *optsize = sizeof(*shmac); 2722 SCTP_INP_RUNLOCK(inp); 2723 break; 2724 } 2725 /* is there room for all of the hmac ids? */ 2726 size = sizeof(*shmac) + (hmaclist->num_algo * 2727 sizeof(shmac->shmac_idents[0])); 2728 if ((size_t)(*optsize) < size) { 2729 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 2730 error = EINVAL; 2731 SCTP_INP_RUNLOCK(inp); 2732 break; 2733 } 2734 /* copy in the list */ 2735 shmac->shmac_number_of_idents = hmaclist->num_algo; 2736 for (i = 0; i < hmaclist->num_algo; i++) { 2737 shmac->shmac_idents[i] = hmaclist->hmac[i]; 2738 } 2739 SCTP_INP_RUNLOCK(inp); 2740 *optsize = size; 2741 break; 2742 } 2743 case SCTP_AUTH_ACTIVE_KEY: 2744 { 2745 struct sctp_authkeyid *scact; 2746 2747 SCTP_CHECK_AND_CAST(scact, optval, struct sctp_authkeyid, *optsize); 2748 SCTP_FIND_STCB(inp, stcb, scact->scact_assoc_id); 2749 2750 if (stcb) { 2751 /* get the active key on the assoc */ 2752 scact->scact_keynumber = stcb->asoc.authinfo.active_keyid; 2753 SCTP_TCB_UNLOCK(stcb); 2754 } else { 2755 /* get the endpoint active key */ 2756 SCTP_INP_RLOCK(inp); 2757 scact->scact_keynumber = inp->sctp_ep.default_keyid; 2758 SCTP_INP_RUNLOCK(inp); 2759 } 2760 *optsize = sizeof(*scact); 2761 break; 2762 } 2763 case SCTP_LOCAL_AUTH_CHUNKS: 2764 { 2765 struct sctp_authchunks *sac; 2766 sctp_auth_chklist_t *chklist = NULL; 2767 size_t size = 0; 2768 2769 SCTP_CHECK_AND_CAST(sac, optval, struct sctp_authchunks, *optsize); 2770 SCTP_FIND_STCB(inp, stcb, sac->gauth_assoc_id); 2771 2772 if (stcb) { 2773 /* get off the assoc */ 2774 chklist = stcb->asoc.local_auth_chunks; 2775 /* is there enough space? */ 2776 size = sctp_auth_get_chklist_size(chklist); 2777 if (*optsize < (sizeof(struct sctp_authchunks) + size)) { 2778 error = EINVAL; 2779 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 2780 } else { 2781 /* copy in the chunks */ 2782 (void)sctp_serialize_auth_chunks(chklist, sac->gauth_chunks); 2783 } 2784 SCTP_TCB_UNLOCK(stcb); 2785 } else { 2786 /* get off the endpoint */ 2787 SCTP_INP_RLOCK(inp); 2788 chklist = inp->sctp_ep.local_auth_chunks; 2789 /* is there enough space? */ 2790 size = sctp_auth_get_chklist_size(chklist); 2791 if (*optsize < (sizeof(struct sctp_authchunks) + size)) { 2792 error = EINVAL; 2793 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 2794 } else { 2795 /* copy in the chunks */ 2796 (void)sctp_serialize_auth_chunks(chklist, sac->gauth_chunks); 2797 } 2798 SCTP_INP_RUNLOCK(inp); 2799 } 2800 *optsize = sizeof(struct sctp_authchunks) + size; 2801 break; 2802 } 2803 case SCTP_PEER_AUTH_CHUNKS: 2804 { 2805 struct sctp_authchunks *sac; 2806 sctp_auth_chklist_t *chklist = NULL; 2807 size_t size = 0; 2808 2809 SCTP_CHECK_AND_CAST(sac, optval, struct sctp_authchunks, *optsize); 2810 SCTP_FIND_STCB(inp, stcb, sac->gauth_assoc_id); 2811 2812 if (stcb) { 2813 /* get off the assoc */ 2814 chklist = stcb->asoc.peer_auth_chunks; 2815 /* is there enough space? */ 2816 size = sctp_auth_get_chklist_size(chklist); 2817 if (*optsize < (sizeof(struct sctp_authchunks) + size)) { 2818 error = EINVAL; 2819 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 2820 } else { 2821 /* copy in the chunks */ 2822 (void)sctp_serialize_auth_chunks(chklist, sac->gauth_chunks); 2823 } 2824 SCTP_TCB_UNLOCK(stcb); 2825 } else { 2826 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); 2827 error = ENOENT; 2828 } 2829 *optsize = sizeof(struct sctp_authchunks) + size; 2830 break; 2831 } 2832 2833 2834 default: 2835 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT); 2836 error = ENOPROTOOPT; 2837 *optsize = 0; 2838 break; 2839 } /* end switch (sopt->sopt_name) */ 2840 return (error); 2841 } 2842 2843 static int 2844 sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize, 2845 void *p) 2846 { 2847 int error, set_opt; 2848 uint32_t *mopt; 2849 struct sctp_tcb *stcb = NULL; 2850 struct sctp_inpcb *inp = NULL; 2851 uint32_t vrf_id; 2852 2853 if (optval == NULL) { 2854 SCTP_PRINTF("optval is NULL\n"); 2855 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 2856 return (EINVAL); 2857 } 2858 inp = (struct sctp_inpcb *)so->so_pcb; 2859 if (inp == 0) { 2860 SCTP_PRINTF("inp is NULL?\n"); 2861 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 2862 return EINVAL; 2863 } 2864 vrf_id = inp->def_vrf_id; 2865 2866 error = 0; 2867 switch (optname) { 2868 case SCTP_NODELAY: 2869 case SCTP_AUTOCLOSE: 2870 case SCTP_AUTO_ASCONF: 2871 case SCTP_EXPLICIT_EOR: 2872 case SCTP_DISABLE_FRAGMENTS: 2873 case SCTP_USE_EXT_RCVINFO: 2874 case SCTP_I_WANT_MAPPED_V4_ADDR: 2875 /* copy in the option value */ 2876 SCTP_CHECK_AND_CAST(mopt, optval, uint32_t, optsize); 2877 set_opt = 0; 2878 if (error) 2879 break; 2880 switch (optname) { 2881 case SCTP_DISABLE_FRAGMENTS: 2882 set_opt = SCTP_PCB_FLAGS_NO_FRAGMENT; 2883 break; 2884 case SCTP_AUTO_ASCONF: 2885 /* 2886 * NOTE: we don't really support this flag 2887 */ 2888 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 2889 /* only valid for bound all sockets */ 2890 set_opt = SCTP_PCB_FLAGS_AUTO_ASCONF; 2891 } else { 2892 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 2893 return (EINVAL); 2894 } 2895 break; 2896 case SCTP_EXPLICIT_EOR: 2897 set_opt = SCTP_PCB_FLAGS_EXPLICIT_EOR; 2898 break; 2899 case SCTP_USE_EXT_RCVINFO: 2900 set_opt = SCTP_PCB_FLAGS_EXT_RCVINFO; 2901 break; 2902 case SCTP_I_WANT_MAPPED_V4_ADDR: 2903 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 2904 set_opt = SCTP_PCB_FLAGS_NEEDS_MAPPED_V4; 2905 } else { 2906 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 2907 return (EINVAL); 2908 } 2909 break; 2910 case SCTP_NODELAY: 2911 set_opt = SCTP_PCB_FLAGS_NODELAY; 2912 break; 2913 case SCTP_AUTOCLOSE: 2914 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 2915 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 2916 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 2917 return (EINVAL); 2918 } 2919 set_opt = SCTP_PCB_FLAGS_AUTOCLOSE; 2920 /* 2921 * The value is in ticks. Note this does not effect 2922 * old associations, only new ones. 2923 */ 2924 inp->sctp_ep.auto_close_time = SEC_TO_TICKS(*mopt); 2925 break; 2926 } 2927 SCTP_INP_WLOCK(inp); 2928 if (*mopt != 0) { 2929 sctp_feature_on(inp, set_opt); 2930 } else { 2931 sctp_feature_off(inp, set_opt); 2932 } 2933 SCTP_INP_WUNLOCK(inp); 2934 break; 2935 case SCTP_REUSE_PORT: 2936 { 2937 SCTP_CHECK_AND_CAST(mopt, optval, uint32_t, optsize); 2938 if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) == 0) { 2939 /* Can't set it after we are bound */ 2940 error = EINVAL; 2941 break; 2942 } 2943 if ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE)) { 2944 /* Can't do this for a 1-m socket */ 2945 error = EINVAL; 2946 break; 2947 } 2948 if (optval) 2949 sctp_feature_on(inp, SCTP_PCB_FLAGS_PORTREUSE); 2950 else 2951 sctp_feature_off(inp, SCTP_PCB_FLAGS_PORTREUSE); 2952 } 2953 break; 2954 case SCTP_PARTIAL_DELIVERY_POINT: 2955 { 2956 uint32_t *value; 2957 2958 SCTP_CHECK_AND_CAST(value, optval, uint32_t, optsize); 2959 if (*value > SCTP_SB_LIMIT_RCV(so)) { 2960 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 2961 error = EINVAL; 2962 break; 2963 } 2964 inp->partial_delivery_point = *value; 2965 } 2966 break; 2967 case SCTP_FRAGMENT_INTERLEAVE: 2968 /* not yet until we re-write sctp_recvmsg() */ 2969 { 2970 uint32_t *level; 2971 2972 SCTP_CHECK_AND_CAST(level, optval, uint32_t, optsize); 2973 if (*level == SCTP_FRAG_LEVEL_2) { 2974 sctp_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE); 2975 sctp_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS); 2976 } else if (*level == SCTP_FRAG_LEVEL_1) { 2977 sctp_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE); 2978 sctp_feature_off(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS); 2979 } else if (*level == SCTP_FRAG_LEVEL_0) { 2980 sctp_feature_off(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE); 2981 sctp_feature_off(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS); 2982 2983 } else { 2984 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 2985 error = EINVAL; 2986 } 2987 } 2988 break; 2989 case SCTP_CMT_ON_OFF: 2990 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off)) { 2991 struct sctp_assoc_value *av; 2992 2993 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize); 2994 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 2995 if (stcb) { 2996 if (av->assoc_value > SCTP_CMT_MAX) { 2997 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 2998 error = EINVAL; 2999 } else { 3000 stcb->asoc.sctp_cmt_on_off = av->assoc_value; 3001 } 3002 SCTP_TCB_UNLOCK(stcb); 3003 } else { 3004 if (av->assoc_value > SCTP_CMT_MAX) { 3005 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3006 error = EINVAL; 3007 } else { 3008 SCTP_INP_WLOCK(inp); 3009 inp->sctp_cmt_on_off = av->assoc_value; 3010 SCTP_INP_WUNLOCK(inp); 3011 } 3012 } 3013 } else { 3014 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT); 3015 error = ENOPROTOOPT; 3016 } 3017 break; 3018 /* JRS - Set socket option for pluggable congestion control */ 3019 case SCTP_PLUGGABLE_CC: 3020 { 3021 struct sctp_assoc_value *av; 3022 struct sctp_nets *net; 3023 3024 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize); 3025 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 3026 if (stcb) { 3027 switch (av->assoc_value) { 3028 case SCTP_CC_RFC2581: 3029 case SCTP_CC_HSTCP: 3030 case SCTP_CC_HTCP: 3031 case SCTP_CC_RTCC: 3032 stcb->asoc.cc_functions = sctp_cc_functions[av->assoc_value]; 3033 stcb->asoc.congestion_control_module = av->assoc_value; 3034 if (stcb->asoc.cc_functions.sctp_set_initial_cc_param != NULL) { 3035 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 3036 stcb->asoc.cc_functions.sctp_set_initial_cc_param(stcb, net); 3037 } 3038 } 3039 break; 3040 default: 3041 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3042 error = EINVAL; 3043 break; 3044 } 3045 SCTP_TCB_UNLOCK(stcb); 3046 } else { 3047 switch (av->assoc_value) { 3048 case SCTP_CC_RFC2581: 3049 case SCTP_CC_HSTCP: 3050 case SCTP_CC_HTCP: 3051 case SCTP_CC_RTCC: 3052 SCTP_INP_WLOCK(inp); 3053 inp->sctp_ep.sctp_default_cc_module = av->assoc_value; 3054 SCTP_INP_WUNLOCK(inp); 3055 break; 3056 default: 3057 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3058 error = EINVAL; 3059 break; 3060 } 3061 } 3062 } 3063 break; 3064 case SCTP_CC_OPTION: 3065 { 3066 struct sctp_cc_option *cc_opt; 3067 3068 SCTP_CHECK_AND_CAST(cc_opt, optval, struct sctp_cc_option, optsize); 3069 SCTP_FIND_STCB(inp, stcb, cc_opt->aid_value.assoc_id); 3070 if (stcb == NULL) { 3071 error = EINVAL; 3072 } else { 3073 if (stcb->asoc.cc_functions.sctp_cwnd_socket_option == NULL) { 3074 error = ENOTSUP; 3075 } else { 3076 error = (*stcb->asoc.cc_functions.sctp_cwnd_socket_option) (stcb, 1, 3077 cc_opt); 3078 } 3079 SCTP_TCB_UNLOCK(stcb); 3080 } 3081 } 3082 break; 3083 /* RS - Set socket option for pluggable stream scheduling */ 3084 case SCTP_PLUGGABLE_SS: 3085 { 3086 struct sctp_assoc_value *av; 3087 3088 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize); 3089 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 3090 if (stcb) { 3091 switch (av->assoc_value) { 3092 case SCTP_SS_DEFAULT: 3093 case SCTP_SS_ROUND_ROBIN: 3094 case SCTP_SS_ROUND_ROBIN_PACKET: 3095 case SCTP_SS_PRIORITY: 3096 case SCTP_SS_FAIR_BANDWITH: 3097 case SCTP_SS_FIRST_COME: 3098 stcb->asoc.ss_functions.sctp_ss_clear(stcb, &stcb->asoc, 1, 1); 3099 stcb->asoc.ss_functions = sctp_ss_functions[av->assoc_value]; 3100 stcb->asoc.stream_scheduling_module = av->assoc_value; 3101 stcb->asoc.ss_functions.sctp_ss_init(stcb, &stcb->asoc, 1); 3102 break; 3103 default: 3104 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3105 error = EINVAL; 3106 break; 3107 } 3108 SCTP_TCB_UNLOCK(stcb); 3109 } else { 3110 switch (av->assoc_value) { 3111 case SCTP_SS_DEFAULT: 3112 case SCTP_SS_ROUND_ROBIN: 3113 case SCTP_SS_ROUND_ROBIN_PACKET: 3114 case SCTP_SS_PRIORITY: 3115 case SCTP_SS_FAIR_BANDWITH: 3116 case SCTP_SS_FIRST_COME: 3117 SCTP_INP_WLOCK(inp); 3118 inp->sctp_ep.sctp_default_ss_module = av->assoc_value; 3119 SCTP_INP_WUNLOCK(inp); 3120 break; 3121 default: 3122 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3123 error = EINVAL; 3124 break; 3125 } 3126 } 3127 } 3128 break; 3129 case SCTP_SS_VALUE: 3130 { 3131 struct sctp_stream_value *av; 3132 3133 SCTP_CHECK_AND_CAST(av, optval, struct sctp_stream_value, optsize); 3134 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 3135 if (stcb) { 3136 if (stcb->asoc.ss_functions.sctp_ss_set_value(stcb, &stcb->asoc, &stcb->asoc.strmout[av->stream_id], 3137 av->stream_value) < 0) { 3138 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3139 error = EINVAL; 3140 } 3141 SCTP_TCB_UNLOCK(stcb); 3142 } else { 3143 /* 3144 * Can't set stream value without 3145 * association 3146 */ 3147 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3148 error = EINVAL; 3149 } 3150 } 3151 break; 3152 case SCTP_CLR_STAT_LOG: 3153 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP); 3154 error = EOPNOTSUPP; 3155 break; 3156 case SCTP_CONTEXT: 3157 { 3158 struct sctp_assoc_value *av; 3159 3160 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize); 3161 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 3162 3163 if (stcb) { 3164 stcb->asoc.context = av->assoc_value; 3165 SCTP_TCB_UNLOCK(stcb); 3166 } else { 3167 SCTP_INP_WLOCK(inp); 3168 inp->sctp_context = av->assoc_value; 3169 SCTP_INP_WUNLOCK(inp); 3170 } 3171 } 3172 break; 3173 case SCTP_VRF_ID: 3174 { 3175 uint32_t *default_vrfid; 3176 3177 SCTP_CHECK_AND_CAST(default_vrfid, optval, uint32_t, optsize); 3178 if (*default_vrfid > SCTP_MAX_VRF_ID) { 3179 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3180 error = EINVAL; 3181 break; 3182 } 3183 inp->def_vrf_id = *default_vrfid; 3184 break; 3185 } 3186 case SCTP_DEL_VRF_ID: 3187 { 3188 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP); 3189 error = EOPNOTSUPP; 3190 break; 3191 } 3192 case SCTP_ADD_VRF_ID: 3193 { 3194 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP); 3195 error = EOPNOTSUPP; 3196 break; 3197 } 3198 case SCTP_DELAYED_SACK: 3199 { 3200 struct sctp_sack_info *sack; 3201 3202 SCTP_CHECK_AND_CAST(sack, optval, struct sctp_sack_info, optsize); 3203 SCTP_FIND_STCB(inp, stcb, sack->sack_assoc_id); 3204 if (sack->sack_delay) { 3205 if (sack->sack_delay > SCTP_MAX_SACK_DELAY) 3206 sack->sack_delay = SCTP_MAX_SACK_DELAY; 3207 } 3208 if (stcb) { 3209 if (sack->sack_delay) { 3210 if (MSEC_TO_TICKS(sack->sack_delay) < 1) { 3211 sack->sack_delay = TICKS_TO_MSEC(1); 3212 } 3213 stcb->asoc.delayed_ack = sack->sack_delay; 3214 } 3215 if (sack->sack_freq) { 3216 stcb->asoc.sack_freq = sack->sack_freq; 3217 } 3218 SCTP_TCB_UNLOCK(stcb); 3219 } else { 3220 SCTP_INP_WLOCK(inp); 3221 if (sack->sack_delay) { 3222 if (MSEC_TO_TICKS(sack->sack_delay) < 1) { 3223 sack->sack_delay = TICKS_TO_MSEC(1); 3224 } 3225 inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV] = MSEC_TO_TICKS(sack->sack_delay); 3226 } 3227 if (sack->sack_freq) { 3228 inp->sctp_ep.sctp_sack_freq = sack->sack_freq; 3229 } 3230 SCTP_INP_WUNLOCK(inp); 3231 } 3232 break; 3233 } 3234 case SCTP_AUTH_CHUNK: 3235 { 3236 struct sctp_authchunk *sauth; 3237 3238 SCTP_CHECK_AND_CAST(sauth, optval, struct sctp_authchunk, optsize); 3239 3240 SCTP_INP_WLOCK(inp); 3241 if (sctp_auth_add_chunk(sauth->sauth_chunk, inp->sctp_ep.local_auth_chunks)) { 3242 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3243 error = EINVAL; 3244 } 3245 SCTP_INP_WUNLOCK(inp); 3246 break; 3247 } 3248 case SCTP_AUTH_KEY: 3249 { 3250 struct sctp_authkey *sca; 3251 struct sctp_keyhead *shared_keys; 3252 sctp_sharedkey_t *shared_key; 3253 sctp_key_t *key = NULL; 3254 size_t size; 3255 3256 SCTP_CHECK_AND_CAST(sca, optval, struct sctp_authkey, optsize); 3257 SCTP_FIND_STCB(inp, stcb, sca->sca_assoc_id); 3258 size = optsize - sizeof(*sca); 3259 3260 if (stcb) { 3261 /* set it on the assoc */ 3262 shared_keys = &stcb->asoc.shared_keys; 3263 /* clear the cached keys for this key id */ 3264 sctp_clear_cachedkeys(stcb, sca->sca_keynumber); 3265 /* 3266 * create the new shared key and 3267 * insert/replace it 3268 */ 3269 if (size > 0) { 3270 key = sctp_set_key(sca->sca_key, (uint32_t) size); 3271 if (key == NULL) { 3272 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM); 3273 error = ENOMEM; 3274 SCTP_TCB_UNLOCK(stcb); 3275 break; 3276 } 3277 } 3278 shared_key = sctp_alloc_sharedkey(); 3279 if (shared_key == NULL) { 3280 sctp_free_key(key); 3281 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM); 3282 error = ENOMEM; 3283 SCTP_TCB_UNLOCK(stcb); 3284 break; 3285 } 3286 shared_key->key = key; 3287 shared_key->keyid = sca->sca_keynumber; 3288 error = sctp_insert_sharedkey(shared_keys, shared_key); 3289 SCTP_TCB_UNLOCK(stcb); 3290 } else { 3291 /* set it on the endpoint */ 3292 SCTP_INP_WLOCK(inp); 3293 shared_keys = &inp->sctp_ep.shared_keys; 3294 /* 3295 * clear the cached keys on all assocs for 3296 * this key id 3297 */ 3298 sctp_clear_cachedkeys_ep(inp, sca->sca_keynumber); 3299 /* 3300 * create the new shared key and 3301 * insert/replace it 3302 */ 3303 if (size > 0) { 3304 key = sctp_set_key(sca->sca_key, (uint32_t) size); 3305 if (key == NULL) { 3306 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM); 3307 error = ENOMEM; 3308 SCTP_INP_WUNLOCK(inp); 3309 break; 3310 } 3311 } 3312 shared_key = sctp_alloc_sharedkey(); 3313 if (shared_key == NULL) { 3314 sctp_free_key(key); 3315 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM); 3316 error = ENOMEM; 3317 SCTP_INP_WUNLOCK(inp); 3318 break; 3319 } 3320 shared_key->key = key; 3321 shared_key->keyid = sca->sca_keynumber; 3322 error = sctp_insert_sharedkey(shared_keys, shared_key); 3323 SCTP_INP_WUNLOCK(inp); 3324 } 3325 break; 3326 } 3327 case SCTP_HMAC_IDENT: 3328 { 3329 struct sctp_hmacalgo *shmac; 3330 sctp_hmaclist_t *hmaclist; 3331 uint16_t hmacid; 3332 uint32_t i; 3333 3334 size_t found; 3335 3336 SCTP_CHECK_AND_CAST(shmac, optval, struct sctp_hmacalgo, optsize); 3337 if (optsize < sizeof(struct sctp_hmacalgo) + shmac->shmac_number_of_idents * sizeof(uint16_t)) { 3338 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3339 error = EINVAL; 3340 break; 3341 } 3342 hmaclist = sctp_alloc_hmaclist(shmac->shmac_number_of_idents); 3343 if (hmaclist == NULL) { 3344 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM); 3345 error = ENOMEM; 3346 break; 3347 } 3348 for (i = 0; i < shmac->shmac_number_of_idents; i++) { 3349 hmacid = shmac->shmac_idents[i]; 3350 if (sctp_auth_add_hmacid(hmaclist, hmacid)) { 3351 /* invalid HMACs were found */ ; 3352 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3353 error = EINVAL; 3354 sctp_free_hmaclist(hmaclist); 3355 goto sctp_set_hmac_done; 3356 } 3357 } 3358 found = 0; 3359 for (i = 0; i < hmaclist->num_algo; i++) { 3360 if (hmaclist->hmac[i] == SCTP_AUTH_HMAC_ID_SHA1) { 3361 /* already in list */ 3362 found = 1; 3363 } 3364 } 3365 if (!found) { 3366 sctp_free_hmaclist(hmaclist); 3367 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3368 error = EINVAL; 3369 break; 3370 } 3371 /* set it on the endpoint */ 3372 SCTP_INP_WLOCK(inp); 3373 if (inp->sctp_ep.local_hmacs) 3374 sctp_free_hmaclist(inp->sctp_ep.local_hmacs); 3375 inp->sctp_ep.local_hmacs = hmaclist; 3376 SCTP_INP_WUNLOCK(inp); 3377 sctp_set_hmac_done: 3378 break; 3379 } 3380 case SCTP_AUTH_ACTIVE_KEY: 3381 { 3382 struct sctp_authkeyid *scact; 3383 3384 SCTP_CHECK_AND_CAST(scact, optval, struct sctp_authkeyid, 3385 optsize); 3386 SCTP_FIND_STCB(inp, stcb, scact->scact_assoc_id); 3387 3388 /* set the active key on the right place */ 3389 if (stcb) { 3390 /* set the active key on the assoc */ 3391 if (sctp_auth_setactivekey(stcb, 3392 scact->scact_keynumber)) { 3393 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, 3394 SCTP_FROM_SCTP_USRREQ, 3395 EINVAL); 3396 error = EINVAL; 3397 } 3398 SCTP_TCB_UNLOCK(stcb); 3399 } else { 3400 /* set the active key on the endpoint */ 3401 SCTP_INP_WLOCK(inp); 3402 if (sctp_auth_setactivekey_ep(inp, 3403 scact->scact_keynumber)) { 3404 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, 3405 SCTP_FROM_SCTP_USRREQ, 3406 EINVAL); 3407 error = EINVAL; 3408 } 3409 SCTP_INP_WUNLOCK(inp); 3410 } 3411 break; 3412 } 3413 case SCTP_AUTH_DELETE_KEY: 3414 { 3415 struct sctp_authkeyid *scdel; 3416 3417 SCTP_CHECK_AND_CAST(scdel, optval, struct sctp_authkeyid, 3418 optsize); 3419 SCTP_FIND_STCB(inp, stcb, scdel->scact_assoc_id); 3420 3421 /* delete the key from the right place */ 3422 if (stcb) { 3423 if (sctp_delete_sharedkey(stcb, 3424 scdel->scact_keynumber)) { 3425 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, 3426 SCTP_FROM_SCTP_USRREQ, 3427 EINVAL); 3428 error = EINVAL; 3429 } 3430 SCTP_TCB_UNLOCK(stcb); 3431 } else { 3432 SCTP_INP_WLOCK(inp); 3433 if (sctp_delete_sharedkey_ep(inp, 3434 scdel->scact_keynumber)) { 3435 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, 3436 SCTP_FROM_SCTP_USRREQ, 3437 EINVAL); 3438 error = EINVAL; 3439 } 3440 SCTP_INP_WUNLOCK(inp); 3441 } 3442 break; 3443 } 3444 case SCTP_AUTH_DEACTIVATE_KEY: 3445 { 3446 struct sctp_authkeyid *keyid; 3447 3448 SCTP_CHECK_AND_CAST(keyid, optval, struct sctp_authkeyid, 3449 optsize); 3450 SCTP_FIND_STCB(inp, stcb, keyid->scact_assoc_id); 3451 3452 /* deactivate the key from the right place */ 3453 if (stcb) { 3454 if (sctp_deact_sharedkey(stcb, 3455 keyid->scact_keynumber)) { 3456 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, 3457 SCTP_FROM_SCTP_USRREQ, 3458 EINVAL); 3459 error = EINVAL; 3460 } 3461 SCTP_TCB_UNLOCK(stcb); 3462 } else { 3463 SCTP_INP_WLOCK(inp); 3464 if (sctp_deact_sharedkey_ep(inp, 3465 keyid->scact_keynumber)) { 3466 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, 3467 SCTP_FROM_SCTP_USRREQ, 3468 EINVAL); 3469 error = EINVAL; 3470 } 3471 SCTP_INP_WUNLOCK(inp); 3472 } 3473 break; 3474 } 3475 3476 case SCTP_RESET_STREAMS: 3477 { 3478 struct sctp_stream_reset *strrst; 3479 uint8_t send_in = 0, send_tsn = 0, send_out = 0, 3480 addstream = 0; 3481 uint16_t addstrmcnt = 0; 3482 int i; 3483 3484 SCTP_CHECK_AND_CAST(strrst, optval, struct sctp_stream_reset, optsize); 3485 SCTP_FIND_STCB(inp, stcb, strrst->strrst_assoc_id); 3486 3487 if (stcb == NULL) { 3488 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); 3489 error = ENOENT; 3490 break; 3491 } 3492 if (stcb->asoc.peer_supports_strreset == 0) { 3493 /* 3494 * Peer does not support it, we return 3495 * protocol not supported since this is true 3496 * for this feature and this peer, not the 3497 * socket request in general. 3498 */ 3499 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EPROTONOSUPPORT); 3500 error = EPROTONOSUPPORT; 3501 SCTP_TCB_UNLOCK(stcb); 3502 break; 3503 } 3504 if (stcb->asoc.stream_reset_outstanding) { 3505 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY); 3506 error = EALREADY; 3507 SCTP_TCB_UNLOCK(stcb); 3508 break; 3509 } 3510 if (strrst->strrst_flags == SCTP_RESET_LOCAL_RECV) { 3511 send_in = 1; 3512 } else if (strrst->strrst_flags == SCTP_RESET_LOCAL_SEND) { 3513 send_out = 1; 3514 } else if (strrst->strrst_flags == SCTP_RESET_BOTH) { 3515 send_in = 1; 3516 send_out = 1; 3517 } else if (strrst->strrst_flags == SCTP_RESET_TSN) { 3518 send_tsn = 1; 3519 } else if (strrst->strrst_flags == SCTP_RESET_ADD_STREAMS) { 3520 if (send_tsn || 3521 send_in || 3522 send_out) { 3523 /* We can't do that and add streams */ 3524 error = EINVAL; 3525 goto skip_stuff; 3526 } 3527 if (stcb->asoc.stream_reset_outstanding) { 3528 error = EBUSY; 3529 goto skip_stuff; 3530 } 3531 addstream = 1; 3532 /* We allocate here */ 3533 addstrmcnt = strrst->strrst_num_streams; 3534 if ((int)(addstrmcnt + stcb->asoc.streamoutcnt) > 0xffff) { 3535 /* You can't have more than 64k */ 3536 error = EINVAL; 3537 goto skip_stuff; 3538 } 3539 if ((stcb->asoc.strm_realoutsize - stcb->asoc.streamoutcnt) < addstrmcnt) { 3540 /* Need to allocate more */ 3541 struct sctp_stream_out *oldstream; 3542 struct sctp_stream_queue_pending *sp, 3543 *nsp; 3544 3545 oldstream = stcb->asoc.strmout; 3546 /* get some more */ 3547 SCTP_MALLOC(stcb->asoc.strmout, struct sctp_stream_out *, 3548 ((stcb->asoc.streamoutcnt + addstrmcnt) * sizeof(struct sctp_stream_out)), 3549 SCTP_M_STRMO); 3550 if (stcb->asoc.strmout == NULL) { 3551 stcb->asoc.strmout = oldstream; 3552 error = ENOMEM; 3553 goto skip_stuff; 3554 } 3555 /* 3556 * Ok now we proceed with copying 3557 * the old out stuff and 3558 * initializing the new stuff. 3559 */ 3560 SCTP_TCB_SEND_LOCK(stcb); 3561 stcb->asoc.ss_functions.sctp_ss_clear(stcb, &stcb->asoc, 0, 1); 3562 for (i = 0; i < stcb->asoc.streamoutcnt; i++) { 3563 TAILQ_INIT(&stcb->asoc.strmout[i].outqueue); 3564 stcb->asoc.strmout[i].next_sequence_sent = oldstream[i].next_sequence_sent; 3565 stcb->asoc.strmout[i].last_msg_incomplete = oldstream[i].last_msg_incomplete; 3566 stcb->asoc.strmout[i].stream_no = i; 3567 stcb->asoc.ss_functions.sctp_ss_init_stream(&stcb->asoc.strmout[i], &oldstream[i]); 3568 /* 3569 * now anything on those 3570 * queues? 3571 */ 3572 TAILQ_FOREACH_SAFE(sp, &oldstream[i].outqueue, next, nsp) { 3573 TAILQ_REMOVE(&oldstream[i].outqueue, sp, next); 3574 TAILQ_INSERT_TAIL(&stcb->asoc.strmout[i].outqueue, sp, next); 3575 } 3576 /* 3577 * Now move assoc pointers 3578 * too 3579 */ 3580 if (stcb->asoc.last_out_stream == &oldstream[i]) { 3581 stcb->asoc.last_out_stream = &stcb->asoc.strmout[i]; 3582 } 3583 if (stcb->asoc.locked_on_sending == &oldstream[i]) { 3584 stcb->asoc.locked_on_sending = &stcb->asoc.strmout[i]; 3585 } 3586 } 3587 /* now the new streams */ 3588 stcb->asoc.ss_functions.sctp_ss_init(stcb, &stcb->asoc, 1); 3589 for (i = stcb->asoc.streamoutcnt; i < (stcb->asoc.streamoutcnt + addstrmcnt); i++) { 3590 stcb->asoc.strmout[i].next_sequence_sent = 0x0; 3591 TAILQ_INIT(&stcb->asoc.strmout[i].outqueue); 3592 stcb->asoc.strmout[i].stream_no = i; 3593 stcb->asoc.strmout[i].last_msg_incomplete = 0; 3594 stcb->asoc.ss_functions.sctp_ss_init_stream(&stcb->asoc.strmout[i], NULL); 3595 } 3596 stcb->asoc.strm_realoutsize = stcb->asoc.streamoutcnt + addstrmcnt; 3597 SCTP_FREE(oldstream, SCTP_M_STRMO); 3598 } 3599 SCTP_TCB_SEND_UNLOCK(stcb); 3600 goto skip_stuff; 3601 } else { 3602 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3603 error = EINVAL; 3604 SCTP_TCB_UNLOCK(stcb); 3605 break; 3606 } 3607 for (i = 0; i < strrst->strrst_num_streams; i++) { 3608 if ((send_in) && 3609 3610 (strrst->strrst_list[i] > stcb->asoc.streamincnt)) { 3611 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3612 error = EINVAL; 3613 goto get_out; 3614 } 3615 if ((send_out) && 3616 (strrst->strrst_list[i] > stcb->asoc.streamoutcnt)) { 3617 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3618 error = EINVAL; 3619 goto get_out; 3620 } 3621 } 3622 skip_stuff: 3623 if (error) { 3624 get_out: 3625 SCTP_TCB_UNLOCK(stcb); 3626 break; 3627 } 3628 error = sctp_send_str_reset_req(stcb, strrst->strrst_num_streams, 3629 strrst->strrst_list, 3630 send_out, (stcb->asoc.str_reset_seq_in - 3), 3631 send_in, send_tsn, addstream, addstrmcnt); 3632 3633 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_REQ, SCTP_SO_LOCKED); 3634 SCTP_TCB_UNLOCK(stcb); 3635 } 3636 break; 3637 3638 case SCTP_CONNECT_X: 3639 if (optsize < (sizeof(int) + sizeof(struct sockaddr_in))) { 3640 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3641 error = EINVAL; 3642 break; 3643 } 3644 error = sctp_do_connect_x(so, inp, optval, optsize, p, 0); 3645 break; 3646 3647 case SCTP_CONNECT_X_DELAYED: 3648 if (optsize < (sizeof(int) + sizeof(struct sockaddr_in))) { 3649 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3650 error = EINVAL; 3651 break; 3652 } 3653 error = sctp_do_connect_x(so, inp, optval, optsize, p, 1); 3654 break; 3655 3656 case SCTP_CONNECT_X_COMPLETE: 3657 { 3658 struct sockaddr *sa; 3659 struct sctp_nets *net; 3660 3661 /* FIXME MT: check correct? */ 3662 SCTP_CHECK_AND_CAST(sa, optval, struct sockaddr, optsize); 3663 3664 /* find tcb */ 3665 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { 3666 SCTP_INP_RLOCK(inp); 3667 stcb = LIST_FIRST(&inp->sctp_asoc_list); 3668 if (stcb) { 3669 SCTP_TCB_LOCK(stcb); 3670 net = sctp_findnet(stcb, sa); 3671 } 3672 SCTP_INP_RUNLOCK(inp); 3673 } else { 3674 /* 3675 * We increment here since 3676 * sctp_findassociation_ep_addr() wil do a 3677 * decrement if it finds the stcb as long as 3678 * the locked tcb (last argument) is NOT a 3679 * TCB.. aka NULL. 3680 */ 3681 SCTP_INP_INCR_REF(inp); 3682 stcb = sctp_findassociation_ep_addr(&inp, sa, &net, NULL, NULL); 3683 if (stcb == NULL) { 3684 SCTP_INP_DECR_REF(inp); 3685 } 3686 } 3687 3688 if (stcb == NULL) { 3689 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); 3690 error = ENOENT; 3691 break; 3692 } 3693 if (stcb->asoc.delayed_connection == 1) { 3694 stcb->asoc.delayed_connection = 0; 3695 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered); 3696 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, 3697 stcb->asoc.primary_destination, 3698 SCTP_FROM_SCTP_USRREQ + SCTP_LOC_9); 3699 sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED); 3700 } else { 3701 /* 3702 * already expired or did not use delayed 3703 * connectx 3704 */ 3705 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY); 3706 error = EALREADY; 3707 } 3708 SCTP_TCB_UNLOCK(stcb); 3709 } 3710 break; 3711 case SCTP_MAX_BURST: 3712 { 3713 struct sctp_assoc_value *av; 3714 3715 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize); 3716 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 3717 3718 if (stcb) { 3719 stcb->asoc.max_burst = av->assoc_value; 3720 SCTP_TCB_UNLOCK(stcb); 3721 } else { 3722 SCTP_INP_WLOCK(inp); 3723 inp->sctp_ep.max_burst = av->assoc_value; 3724 SCTP_INP_WUNLOCK(inp); 3725 } 3726 } 3727 break; 3728 case SCTP_MAXSEG: 3729 { 3730 struct sctp_assoc_value *av; 3731 int ovh; 3732 3733 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize); 3734 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 3735 3736 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 3737 ovh = SCTP_MED_OVERHEAD; 3738 } else { 3739 ovh = SCTP_MED_V4_OVERHEAD; 3740 } 3741 if (stcb) { 3742 if (av->assoc_value) { 3743 stcb->asoc.sctp_frag_point = (av->assoc_value + ovh); 3744 } else { 3745 stcb->asoc.sctp_frag_point = SCTP_DEFAULT_MAXSEGMENT; 3746 } 3747 SCTP_TCB_UNLOCK(stcb); 3748 } else { 3749 SCTP_INP_WLOCK(inp); 3750 /* 3751 * FIXME MT: I think this is not in tune 3752 * with the API ID 3753 */ 3754 if (av->assoc_value) { 3755 inp->sctp_frag_point = (av->assoc_value + ovh); 3756 } else { 3757 inp->sctp_frag_point = SCTP_DEFAULT_MAXSEGMENT; 3758 } 3759 SCTP_INP_WUNLOCK(inp); 3760 } 3761 } 3762 break; 3763 case SCTP_EVENTS: 3764 { 3765 struct sctp_event_subscribe *events; 3766 3767 SCTP_CHECK_AND_CAST(events, optval, struct sctp_event_subscribe, optsize); 3768 3769 SCTP_INP_WLOCK(inp); 3770 if (events->sctp_data_io_event) { 3771 sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT); 3772 } else { 3773 sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT); 3774 } 3775 3776 if (events->sctp_association_event) { 3777 sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVASSOCEVNT); 3778 } else { 3779 sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVASSOCEVNT); 3780 } 3781 3782 if (events->sctp_address_event) { 3783 sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVPADDREVNT); 3784 } else { 3785 sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVPADDREVNT); 3786 } 3787 3788 if (events->sctp_send_failure_event) { 3789 sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVSENDFAILEVNT); 3790 } else { 3791 sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVSENDFAILEVNT); 3792 } 3793 3794 if (events->sctp_peer_error_event) { 3795 sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVPEERERR); 3796 } else { 3797 sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVPEERERR); 3798 } 3799 3800 if (events->sctp_shutdown_event) { 3801 sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT); 3802 } else { 3803 sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT); 3804 } 3805 3806 if (events->sctp_partial_delivery_event) { 3807 sctp_feature_on(inp, SCTP_PCB_FLAGS_PDAPIEVNT); 3808 } else { 3809 sctp_feature_off(inp, SCTP_PCB_FLAGS_PDAPIEVNT); 3810 } 3811 3812 if (events->sctp_adaptation_layer_event) { 3813 sctp_feature_on(inp, SCTP_PCB_FLAGS_ADAPTATIONEVNT); 3814 } else { 3815 sctp_feature_off(inp, SCTP_PCB_FLAGS_ADAPTATIONEVNT); 3816 } 3817 3818 if (events->sctp_authentication_event) { 3819 sctp_feature_on(inp, SCTP_PCB_FLAGS_AUTHEVNT); 3820 } else { 3821 sctp_feature_off(inp, SCTP_PCB_FLAGS_AUTHEVNT); 3822 } 3823 3824 if (events->sctp_sender_dry_event) { 3825 sctp_feature_on(inp, SCTP_PCB_FLAGS_DRYEVNT); 3826 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 3827 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 3828 stcb = LIST_FIRST(&inp->sctp_asoc_list); 3829 if (stcb) { 3830 SCTP_TCB_LOCK(stcb); 3831 } 3832 if (stcb && 3833 TAILQ_EMPTY(&stcb->asoc.send_queue) && 3834 TAILQ_EMPTY(&stcb->asoc.sent_queue) && 3835 (stcb->asoc.stream_queue_cnt == 0)) { 3836 sctp_ulp_notify(SCTP_NOTIFY_SENDER_DRY, stcb, 0, NULL, SCTP_SO_LOCKED); 3837 } 3838 if (stcb) { 3839 SCTP_TCB_UNLOCK(stcb); 3840 } 3841 } 3842 } else { 3843 sctp_feature_off(inp, SCTP_PCB_FLAGS_DRYEVNT); 3844 } 3845 3846 if (events->sctp_stream_reset_event) { 3847 sctp_feature_on(inp, SCTP_PCB_FLAGS_STREAM_RESETEVNT); 3848 } else { 3849 sctp_feature_off(inp, SCTP_PCB_FLAGS_STREAM_RESETEVNT); 3850 } 3851 SCTP_INP_WUNLOCK(inp); 3852 } 3853 break; 3854 3855 case SCTP_ADAPTATION_LAYER: 3856 { 3857 struct sctp_setadaptation *adap_bits; 3858 3859 SCTP_CHECK_AND_CAST(adap_bits, optval, struct sctp_setadaptation, optsize); 3860 SCTP_INP_WLOCK(inp); 3861 inp->sctp_ep.adaptation_layer_indicator = adap_bits->ssb_adaptation_ind; 3862 SCTP_INP_WUNLOCK(inp); 3863 } 3864 break; 3865 #ifdef SCTP_DEBUG 3866 case SCTP_SET_INITIAL_DBG_SEQ: 3867 { 3868 uint32_t *vvv; 3869 3870 SCTP_CHECK_AND_CAST(vvv, optval, uint32_t, optsize); 3871 SCTP_INP_WLOCK(inp); 3872 inp->sctp_ep.initial_sequence_debug = *vvv; 3873 SCTP_INP_WUNLOCK(inp); 3874 } 3875 break; 3876 #endif 3877 case SCTP_DEFAULT_SEND_PARAM: 3878 { 3879 struct sctp_sndrcvinfo *s_info; 3880 3881 SCTP_CHECK_AND_CAST(s_info, optval, struct sctp_sndrcvinfo, optsize); 3882 SCTP_FIND_STCB(inp, stcb, s_info->sinfo_assoc_id); 3883 3884 if (stcb) { 3885 if (s_info->sinfo_stream <= stcb->asoc.streamoutcnt) { 3886 memcpy(&stcb->asoc.def_send, s_info, min(optsize, sizeof(stcb->asoc.def_send))); 3887 } else { 3888 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3889 error = EINVAL; 3890 } 3891 SCTP_TCB_UNLOCK(stcb); 3892 } else { 3893 SCTP_INP_WLOCK(inp); 3894 memcpy(&inp->def_send, s_info, min(optsize, sizeof(inp->def_send))); 3895 SCTP_INP_WUNLOCK(inp); 3896 } 3897 } 3898 break; 3899 case SCTP_PEER_ADDR_PARAMS: 3900 /* Applys to the specific association */ 3901 { 3902 struct sctp_paddrparams *paddrp; 3903 struct sctp_nets *net; 3904 3905 SCTP_CHECK_AND_CAST(paddrp, optval, struct sctp_paddrparams, optsize); 3906 SCTP_FIND_STCB(inp, stcb, paddrp->spp_assoc_id); 3907 net = NULL; 3908 if (stcb) { 3909 net = sctp_findnet(stcb, (struct sockaddr *)&paddrp->spp_address); 3910 } else { 3911 /* 3912 * We increment here since 3913 * sctp_findassociation_ep_addr() wil do a 3914 * decrement if it finds the stcb as long as 3915 * the locked tcb (last argument) is NOT a 3916 * TCB.. aka NULL. 3917 */ 3918 SCTP_INP_INCR_REF(inp); 3919 stcb = sctp_findassociation_ep_addr(&inp, 3920 (struct sockaddr *)&paddrp->spp_address, 3921 &net, NULL, NULL); 3922 if (stcb == NULL) { 3923 SCTP_INP_DECR_REF(inp); 3924 } 3925 } 3926 if (stcb && (net == NULL)) { 3927 struct sockaddr *sa; 3928 3929 sa = (struct sockaddr *)&paddrp->spp_address; 3930 #ifdef INET 3931 if (sa->sa_family == AF_INET) { 3932 3933 struct sockaddr_in *sin; 3934 3935 sin = (struct sockaddr_in *)sa; 3936 if (sin->sin_addr.s_addr) { 3937 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3938 SCTP_TCB_UNLOCK(stcb); 3939 error = EINVAL; 3940 break; 3941 } 3942 } else 3943 #endif 3944 #ifdef INET6 3945 if (sa->sa_family == AF_INET6) { 3946 struct sockaddr_in6 *sin6; 3947 3948 sin6 = (struct sockaddr_in6 *)sa; 3949 if (!IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { 3950 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3951 SCTP_TCB_UNLOCK(stcb); 3952 error = EINVAL; 3953 break; 3954 } 3955 } else 3956 #endif 3957 { 3958 error = EAFNOSUPPORT; 3959 SCTP_TCB_UNLOCK(stcb); 3960 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 3961 break; 3962 } 3963 } 3964 /* sanity checks */ 3965 if ((paddrp->spp_flags & SPP_HB_ENABLE) && (paddrp->spp_flags & SPP_HB_DISABLE)) { 3966 if (stcb) 3967 SCTP_TCB_UNLOCK(stcb); 3968 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3969 return (EINVAL); 3970 } 3971 if ((paddrp->spp_flags & SPP_PMTUD_ENABLE) && (paddrp->spp_flags & SPP_PMTUD_DISABLE)) { 3972 if (stcb) 3973 SCTP_TCB_UNLOCK(stcb); 3974 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3975 return (EINVAL); 3976 } 3977 if (stcb) { 3978 /************************TCB SPECIFIC SET ******************/ 3979 /* 3980 * do we change the timer for HB, we run 3981 * only one? 3982 */ 3983 int ovh = 0; 3984 3985 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 3986 ovh = SCTP_MED_OVERHEAD; 3987 } else { 3988 ovh = SCTP_MED_V4_OVERHEAD; 3989 } 3990 3991 if (paddrp->spp_hbinterval) 3992 stcb->asoc.heart_beat_delay = paddrp->spp_hbinterval; 3993 else if (paddrp->spp_flags & SPP_HB_TIME_IS_ZERO) 3994 stcb->asoc.heart_beat_delay = 0; 3995 3996 /* network sets ? */ 3997 if (net) { 3998 /************************NET SPECIFIC SET ******************/ 3999 if (paddrp->spp_flags & SPP_HB_DEMAND) { 4000 /* on demand HB */ 4001 if (sctp_send_hb(stcb, 1, net, SCTP_SO_LOCKED) < 0) { 4002 /* asoc destroyed */ 4003 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4004 error = EINVAL; 4005 break; 4006 } 4007 } 4008 if (paddrp->spp_flags & SPP_HB_DISABLE) { 4009 net->dest_state |= SCTP_ADDR_NOHB; 4010 } 4011 if (paddrp->spp_flags & SPP_HB_ENABLE) { 4012 net->dest_state &= ~SCTP_ADDR_NOHB; 4013 } 4014 if ((paddrp->spp_flags & SPP_PMTUD_DISABLE) && (paddrp->spp_pathmtu >= SCTP_SMALLEST_PMTU)) { 4015 if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) { 4016 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net, 4017 SCTP_FROM_SCTP_USRREQ + SCTP_LOC_10); 4018 } 4019 if (paddrp->spp_pathmtu > SCTP_DEFAULT_MINSEGMENT) { 4020 net->mtu = paddrp->spp_pathmtu + ovh; 4021 if (net->mtu < stcb->asoc.smallest_mtu) { 4022 sctp_pathmtu_adjustment(inp, stcb, net, net->mtu); 4023 } 4024 } 4025 } 4026 if (paddrp->spp_flags & SPP_PMTUD_ENABLE) { 4027 if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) { 4028 sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net); 4029 } 4030 } 4031 if (paddrp->spp_pathmaxrxt) 4032 net->failure_threshold = paddrp->spp_pathmaxrxt; 4033 #ifdef INET 4034 if (paddrp->spp_flags & SPP_IPV4_TOS) { 4035 if (net->ro._l_addr.sin.sin_family == AF_INET) { 4036 net->tos_flowlabel = paddrp->spp_ipv4_tos & 0x000000fc; 4037 } 4038 } 4039 #endif 4040 #ifdef INET6 4041 if (paddrp->spp_flags & SPP_IPV6_FLOWLABEL) { 4042 if (net->ro._l_addr.sin6.sin6_family == AF_INET6) { 4043 net->tos_flowlabel = paddrp->spp_ipv6_flowlabel; 4044 } 4045 } 4046 #endif 4047 } else { 4048 /************************ASSOC ONLY -- NO NET SPECIFIC SET ******************/ 4049 if (paddrp->spp_pathmaxrxt) 4050 stcb->asoc.def_net_failure = paddrp->spp_pathmaxrxt; 4051 4052 if (paddrp->spp_flags & SPP_HB_ENABLE) { 4053 /* Turn back on the timer */ 4054 stcb->asoc.hb_is_disabled = 0; 4055 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net); 4056 } 4057 if ((paddrp->spp_flags & SPP_PMTUD_DISABLE) && (paddrp->spp_pathmtu >= SCTP_SMALLEST_PMTU)) { 4058 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 4059 if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) { 4060 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net, 4061 SCTP_FROM_SCTP_USRREQ + SCTP_LOC_10); 4062 } 4063 if (paddrp->spp_pathmtu > SCTP_DEFAULT_MINSEGMENT) { 4064 net->mtu = paddrp->spp_pathmtu + ovh; 4065 if (net->mtu < stcb->asoc.smallest_mtu) { 4066 sctp_pathmtu_adjustment(inp, stcb, net, net->mtu); 4067 } 4068 } 4069 } 4070 } 4071 if (paddrp->spp_flags & SPP_PMTUD_ENABLE) { 4072 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 4073 if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) { 4074 sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net); 4075 } 4076 } 4077 } 4078 if (paddrp->spp_flags & SPP_HB_DISABLE) { 4079 int cnt_of_unconf = 0; 4080 struct sctp_nets *lnet; 4081 4082 stcb->asoc.hb_is_disabled = 1; 4083 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 4084 if (lnet->dest_state & SCTP_ADDR_UNCONFIRMED) { 4085 cnt_of_unconf++; 4086 } 4087 } 4088 /* 4089 * stop the timer ONLY if we 4090 * have no unconfirmed 4091 * addresses 4092 */ 4093 if (cnt_of_unconf == 0) { 4094 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 4095 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, 4096 SCTP_FROM_SCTP_USRREQ + SCTP_LOC_11); 4097 } 4098 } 4099 } 4100 if (paddrp->spp_flags & SPP_HB_ENABLE) { 4101 /* start up the timer. */ 4102 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 4103 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net); 4104 } 4105 } 4106 #ifdef INET 4107 if (paddrp->spp_flags & SPP_IPV4_TOS) 4108 stcb->asoc.default_tos = paddrp->spp_ipv4_tos & 0x000000fc; 4109 #endif 4110 #ifdef INET6 4111 if (paddrp->spp_flags & SPP_IPV6_FLOWLABEL) 4112 stcb->asoc.default_flowlabel = paddrp->spp_ipv6_flowlabel; 4113 #endif 4114 4115 } 4116 SCTP_TCB_UNLOCK(stcb); 4117 } else { 4118 /************************NO TCB, SET TO default stuff ******************/ 4119 SCTP_INP_WLOCK(inp); 4120 /* 4121 * For the TOS/FLOWLABEL stuff you set it 4122 * with the options on the socket 4123 */ 4124 if (paddrp->spp_pathmaxrxt) { 4125 inp->sctp_ep.def_net_failure = paddrp->spp_pathmaxrxt; 4126 } 4127 if (paddrp->spp_flags & SPP_HB_TIME_IS_ZERO) 4128 inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT] = 0; 4129 else if (paddrp->spp_hbinterval) { 4130 if (paddrp->spp_hbinterval > SCTP_MAX_HB_INTERVAL) 4131 paddrp->spp_hbinterval = SCTP_MAX_HB_INTERVAL; 4132 inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT] = MSEC_TO_TICKS(paddrp->spp_hbinterval); 4133 } 4134 if (paddrp->spp_flags & SPP_HB_ENABLE) { 4135 sctp_feature_off(inp, SCTP_PCB_FLAGS_DONOT_HEARTBEAT); 4136 4137 } else if (paddrp->spp_flags & SPP_HB_DISABLE) { 4138 sctp_feature_on(inp, SCTP_PCB_FLAGS_DONOT_HEARTBEAT); 4139 } 4140 SCTP_INP_WUNLOCK(inp); 4141 } 4142 } 4143 break; 4144 case SCTP_RTOINFO: 4145 { 4146 struct sctp_rtoinfo *srto; 4147 uint32_t new_init, new_min, new_max; 4148 4149 SCTP_CHECK_AND_CAST(srto, optval, struct sctp_rtoinfo, optsize); 4150 SCTP_FIND_STCB(inp, stcb, srto->srto_assoc_id); 4151 4152 if (stcb) { 4153 if (srto->srto_initial) 4154 new_init = srto->srto_initial; 4155 else 4156 new_init = stcb->asoc.initial_rto; 4157 if (srto->srto_max) 4158 new_max = srto->srto_max; 4159 else 4160 new_max = stcb->asoc.maxrto; 4161 if (srto->srto_min) 4162 new_min = srto->srto_min; 4163 else 4164 new_min = stcb->asoc.minrto; 4165 if ((new_min <= new_init) && (new_init <= new_max)) { 4166 stcb->asoc.initial_rto = new_init; 4167 stcb->asoc.maxrto = new_max; 4168 stcb->asoc.minrto = new_min; 4169 } else { 4170 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4171 error = EINVAL; 4172 } 4173 SCTP_TCB_UNLOCK(stcb); 4174 } else { 4175 SCTP_INP_WLOCK(inp); 4176 if (srto->srto_initial) 4177 new_init = srto->srto_initial; 4178 else 4179 new_init = inp->sctp_ep.initial_rto; 4180 if (srto->srto_max) 4181 new_max = srto->srto_max; 4182 else 4183 new_max = inp->sctp_ep.sctp_maxrto; 4184 if (srto->srto_min) 4185 new_min = srto->srto_min; 4186 else 4187 new_min = inp->sctp_ep.sctp_minrto; 4188 if ((new_min <= new_init) && (new_init <= new_max)) { 4189 inp->sctp_ep.initial_rto = new_init; 4190 inp->sctp_ep.sctp_maxrto = new_max; 4191 inp->sctp_ep.sctp_minrto = new_min; 4192 } else { 4193 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4194 error = EINVAL; 4195 } 4196 SCTP_INP_WUNLOCK(inp); 4197 } 4198 } 4199 break; 4200 case SCTP_ASSOCINFO: 4201 { 4202 struct sctp_assocparams *sasoc; 4203 4204 SCTP_CHECK_AND_CAST(sasoc, optval, struct sctp_assocparams, optsize); 4205 SCTP_FIND_STCB(inp, stcb, sasoc->sasoc_assoc_id); 4206 if (sasoc->sasoc_cookie_life) { 4207 /* boundary check the cookie life */ 4208 if (sasoc->sasoc_cookie_life < 1000) 4209 sasoc->sasoc_cookie_life = 1000; 4210 if (sasoc->sasoc_cookie_life > SCTP_MAX_COOKIE_LIFE) { 4211 sasoc->sasoc_cookie_life = SCTP_MAX_COOKIE_LIFE; 4212 } 4213 } 4214 if (stcb) { 4215 if (sasoc->sasoc_asocmaxrxt) 4216 stcb->asoc.max_send_times = sasoc->sasoc_asocmaxrxt; 4217 sasoc->sasoc_number_peer_destinations = stcb->asoc.numnets; 4218 sasoc->sasoc_peer_rwnd = 0; 4219 sasoc->sasoc_local_rwnd = 0; 4220 if (sasoc->sasoc_cookie_life) { 4221 stcb->asoc.cookie_life = MSEC_TO_TICKS(sasoc->sasoc_cookie_life); 4222 } 4223 SCTP_TCB_UNLOCK(stcb); 4224 } else { 4225 SCTP_INP_WLOCK(inp); 4226 if (sasoc->sasoc_asocmaxrxt) 4227 inp->sctp_ep.max_send_times = sasoc->sasoc_asocmaxrxt; 4228 sasoc->sasoc_number_peer_destinations = 0; 4229 sasoc->sasoc_peer_rwnd = 0; 4230 sasoc->sasoc_local_rwnd = 0; 4231 if (sasoc->sasoc_cookie_life) { 4232 inp->sctp_ep.def_cookie_life = MSEC_TO_TICKS(sasoc->sasoc_cookie_life); 4233 } 4234 SCTP_INP_WUNLOCK(inp); 4235 } 4236 } 4237 break; 4238 case SCTP_INITMSG: 4239 { 4240 struct sctp_initmsg *sinit; 4241 4242 SCTP_CHECK_AND_CAST(sinit, optval, struct sctp_initmsg, optsize); 4243 SCTP_INP_WLOCK(inp); 4244 if (sinit->sinit_num_ostreams) 4245 inp->sctp_ep.pre_open_stream_count = sinit->sinit_num_ostreams; 4246 4247 if (sinit->sinit_max_instreams) 4248 inp->sctp_ep.max_open_streams_intome = sinit->sinit_max_instreams; 4249 4250 if (sinit->sinit_max_attempts) 4251 inp->sctp_ep.max_init_times = sinit->sinit_max_attempts; 4252 4253 if (sinit->sinit_max_init_timeo) 4254 inp->sctp_ep.initial_init_rto_max = sinit->sinit_max_init_timeo; 4255 SCTP_INP_WUNLOCK(inp); 4256 } 4257 break; 4258 case SCTP_PRIMARY_ADDR: 4259 { 4260 struct sctp_setprim *spa; 4261 struct sctp_nets *net, *lnet; 4262 4263 SCTP_CHECK_AND_CAST(spa, optval, struct sctp_setprim, optsize); 4264 SCTP_FIND_STCB(inp, stcb, spa->ssp_assoc_id); 4265 4266 net = NULL; 4267 if (stcb) { 4268 net = sctp_findnet(stcb, (struct sockaddr *)&spa->ssp_addr); 4269 } else { 4270 /* 4271 * We increment here since 4272 * sctp_findassociation_ep_addr() wil do a 4273 * decrement if it finds the stcb as long as 4274 * the locked tcb (last argument) is NOT a 4275 * TCB.. aka NULL. 4276 */ 4277 SCTP_INP_INCR_REF(inp); 4278 stcb = sctp_findassociation_ep_addr(&inp, 4279 (struct sockaddr *)&spa->ssp_addr, 4280 &net, NULL, NULL); 4281 if (stcb == NULL) { 4282 SCTP_INP_DECR_REF(inp); 4283 } 4284 } 4285 4286 if ((stcb) && (net)) { 4287 if ((net != stcb->asoc.primary_destination) && 4288 (!(net->dest_state & SCTP_ADDR_UNCONFIRMED))) { 4289 /* Ok we need to set it */ 4290 lnet = stcb->asoc.primary_destination; 4291 if (sctp_set_primary_addr(stcb, (struct sockaddr *)NULL, net) == 0) { 4292 if (net->dest_state & SCTP_ADDR_SWITCH_PRIMARY) { 4293 net->dest_state |= SCTP_ADDR_DOUBLE_SWITCH; 4294 } 4295 net->dest_state |= SCTP_ADDR_SWITCH_PRIMARY; 4296 } 4297 } 4298 } else { 4299 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4300 error = EINVAL; 4301 } 4302 if (stcb) { 4303 SCTP_TCB_UNLOCK(stcb); 4304 } 4305 } 4306 break; 4307 case SCTP_SET_DYNAMIC_PRIMARY: 4308 { 4309 union sctp_sockstore *ss; 4310 4311 error = priv_check(curthread, 4312 PRIV_NETINET_RESERVEDPORT); 4313 if (error) 4314 break; 4315 4316 SCTP_CHECK_AND_CAST(ss, optval, union sctp_sockstore, optsize); 4317 /* SUPER USER CHECK? */ 4318 error = sctp_dynamic_set_primary(&ss->sa, vrf_id); 4319 } 4320 break; 4321 case SCTP_SET_PEER_PRIMARY_ADDR: 4322 { 4323 struct sctp_setpeerprim *sspp; 4324 4325 SCTP_CHECK_AND_CAST(sspp, optval, struct sctp_setpeerprim, optsize); 4326 SCTP_FIND_STCB(inp, stcb, sspp->sspp_assoc_id); 4327 if (stcb != NULL) { 4328 struct sctp_ifa *ifa; 4329 4330 ifa = sctp_find_ifa_by_addr((struct sockaddr *)&sspp->sspp_addr, 4331 stcb->asoc.vrf_id, SCTP_ADDR_NOT_LOCKED); 4332 if (ifa == NULL) { 4333 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4334 error = EINVAL; 4335 goto out_of_it; 4336 } 4337 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) { 4338 /* 4339 * Must validate the ifa found is in 4340 * our ep 4341 */ 4342 struct sctp_laddr *laddr; 4343 int found = 0; 4344 4345 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 4346 if (laddr->ifa == NULL) { 4347 SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n", 4348 __FUNCTION__); 4349 continue; 4350 } 4351 if (laddr->ifa == ifa) { 4352 found = 1; 4353 break; 4354 } 4355 } 4356 if (!found) { 4357 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4358 error = EINVAL; 4359 goto out_of_it; 4360 } 4361 } 4362 if (sctp_set_primary_ip_address_sa(stcb, 4363 (struct sockaddr *)&sspp->sspp_addr) != 0) { 4364 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4365 error = EINVAL; 4366 } 4367 out_of_it: 4368 SCTP_TCB_UNLOCK(stcb); 4369 } else { 4370 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4371 error = EINVAL; 4372 } 4373 4374 } 4375 break; 4376 case SCTP_BINDX_ADD_ADDR: 4377 { 4378 struct sctp_getaddresses *addrs; 4379 size_t sz; 4380 struct thread *td; 4381 4382 td = (struct thread *)p; 4383 SCTP_CHECK_AND_CAST(addrs, optval, struct sctp_getaddresses, 4384 optsize); 4385 #ifdef INET 4386 if (addrs->addr->sa_family == AF_INET) { 4387 sz = sizeof(struct sctp_getaddresses) - sizeof(struct sockaddr) + sizeof(struct sockaddr_in); 4388 if (optsize < sz) { 4389 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4390 error = EINVAL; 4391 break; 4392 } 4393 if (td != NULL && (error = prison_local_ip4(td->td_ucred, &(((struct sockaddr_in *)(addrs->addr))->sin_addr)))) { 4394 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, error); 4395 break; 4396 } 4397 } else 4398 #endif 4399 #ifdef INET6 4400 if (addrs->addr->sa_family == AF_INET6) { 4401 sz = sizeof(struct sctp_getaddresses) - sizeof(struct sockaddr) + sizeof(struct sockaddr_in6); 4402 if (optsize < sz) { 4403 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4404 error = EINVAL; 4405 break; 4406 } 4407 if (td != NULL && (error = prison_local_ip6(td->td_ucred, &(((struct sockaddr_in6 *)(addrs->addr))->sin6_addr), 4408 (SCTP_IPV6_V6ONLY(inp) != 0))) != 0) { 4409 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, error); 4410 break; 4411 } 4412 } else 4413 #endif 4414 { 4415 error = EAFNOSUPPORT; 4416 break; 4417 } 4418 sctp_bindx_add_address(so, inp, addrs->addr, 4419 addrs->sget_assoc_id, vrf_id, 4420 &error, p); 4421 } 4422 break; 4423 case SCTP_BINDX_REM_ADDR: 4424 { 4425 struct sctp_getaddresses *addrs; 4426 size_t sz; 4427 struct thread *td; 4428 4429 td = (struct thread *)p; 4430 4431 SCTP_CHECK_AND_CAST(addrs, optval, struct sctp_getaddresses, optsize); 4432 #ifdef INET 4433 if (addrs->addr->sa_family == AF_INET) { 4434 sz = sizeof(struct sctp_getaddresses) - sizeof(struct sockaddr) + sizeof(struct sockaddr_in); 4435 if (optsize < sz) { 4436 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4437 error = EINVAL; 4438 break; 4439 } 4440 if (td != NULL && (error = prison_local_ip4(td->td_ucred, &(((struct sockaddr_in *)(addrs->addr))->sin_addr)))) { 4441 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, error); 4442 break; 4443 } 4444 } else 4445 #endif 4446 #ifdef INET6 4447 if (addrs->addr->sa_family == AF_INET6) { 4448 sz = sizeof(struct sctp_getaddresses) - sizeof(struct sockaddr) + sizeof(struct sockaddr_in6); 4449 if (optsize < sz) { 4450 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4451 error = EINVAL; 4452 break; 4453 } 4454 if (td != NULL && (error = prison_local_ip6(td->td_ucred, &(((struct sockaddr_in6 *)(addrs->addr))->sin6_addr), 4455 (SCTP_IPV6_V6ONLY(inp) != 0))) != 0) { 4456 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, error); 4457 break; 4458 } 4459 } else 4460 #endif 4461 { 4462 error = EAFNOSUPPORT; 4463 break; 4464 } 4465 sctp_bindx_delete_address(so, inp, addrs->addr, 4466 addrs->sget_assoc_id, vrf_id, 4467 &error); 4468 } 4469 break; 4470 default: 4471 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT); 4472 error = ENOPROTOOPT; 4473 break; 4474 } /* end switch (opt) */ 4475 return (error); 4476 } 4477 4478 int 4479 sctp_ctloutput(struct socket *so, struct sockopt *sopt) 4480 { 4481 void *optval = NULL; 4482 size_t optsize = 0; 4483 struct sctp_inpcb *inp; 4484 void *p; 4485 int error = 0; 4486 4487 inp = (struct sctp_inpcb *)so->so_pcb; 4488 if (inp == 0) { 4489 /* I made the same as TCP since we are not setup? */ 4490 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4491 return (ECONNRESET); 4492 } 4493 if (sopt->sopt_level != IPPROTO_SCTP) { 4494 /* wrong proto level... send back up to IP */ 4495 #ifdef INET6 4496 if (INP_CHECK_SOCKAF(so, AF_INET6)) 4497 error = ip6_ctloutput(so, sopt); 4498 #endif /* INET6 */ 4499 #if defined(INET) && defined (INET6) 4500 else 4501 #endif 4502 #ifdef INET 4503 error = ip_ctloutput(so, sopt); 4504 #endif 4505 return (error); 4506 } 4507 optsize = sopt->sopt_valsize; 4508 if (optsize) { 4509 SCTP_MALLOC(optval, void *, optsize, SCTP_M_SOCKOPT); 4510 if (optval == NULL) { 4511 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOBUFS); 4512 return (ENOBUFS); 4513 } 4514 error = sooptcopyin(sopt, optval, optsize, optsize); 4515 if (error) { 4516 SCTP_FREE(optval, SCTP_M_SOCKOPT); 4517 goto out; 4518 } 4519 } 4520 p = (void *)sopt->sopt_td; 4521 if (sopt->sopt_dir == SOPT_SET) { 4522 error = sctp_setopt(so, sopt->sopt_name, optval, optsize, p); 4523 } else if (sopt->sopt_dir == SOPT_GET) { 4524 error = sctp_getopt(so, sopt->sopt_name, optval, &optsize, p); 4525 } else { 4526 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4527 error = EINVAL; 4528 } 4529 if ((error == 0) && (optval != NULL)) { 4530 error = sooptcopyout(sopt, optval, optsize); 4531 SCTP_FREE(optval, SCTP_M_SOCKOPT); 4532 } else if (optval != NULL) { 4533 SCTP_FREE(optval, SCTP_M_SOCKOPT); 4534 } 4535 out: 4536 return (error); 4537 } 4538 4539 #ifdef INET 4540 static int 4541 sctp_connect(struct socket *so, struct sockaddr *addr, struct thread *p) 4542 { 4543 int error = 0; 4544 int create_lock_on = 0; 4545 uint32_t vrf_id; 4546 struct sctp_inpcb *inp; 4547 struct sctp_tcb *stcb = NULL; 4548 4549 inp = (struct sctp_inpcb *)so->so_pcb; 4550 if (inp == 0) { 4551 /* I made the same as TCP since we are not setup? */ 4552 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4553 return (ECONNRESET); 4554 } 4555 if (addr == NULL) { 4556 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4557 return EINVAL; 4558 } 4559 switch (addr->sa_family) { 4560 #ifdef INET6 4561 case AF_INET6: 4562 { 4563 struct sockaddr_in6 *sin6p; 4564 4565 if (addr->sa_len != sizeof(struct sockaddr_in6)) { 4566 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4567 return (EINVAL); 4568 } 4569 sin6p = (struct sockaddr_in6 *)addr; 4570 if (p != NULL && (error = prison_remote_ip6(p->td_ucred, &sin6p->sin6_addr)) != 0) { 4571 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 4572 return (error); 4573 } 4574 break; 4575 } 4576 #endif 4577 #ifdef INET 4578 case AF_INET: 4579 { 4580 struct sockaddr_in *sinp; 4581 4582 if (addr->sa_len != sizeof(struct sockaddr_in)) { 4583 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4584 return (EINVAL); 4585 } 4586 sinp = (struct sockaddr_in *)addr; 4587 if (p != NULL && (error = prison_remote_ip4(p->td_ucred, &sinp->sin_addr)) != 0) { 4588 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 4589 return (error); 4590 } 4591 break; 4592 } 4593 #endif 4594 default: 4595 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EAFNOSUPPORT); 4596 return (EAFNOSUPPORT); 4597 } 4598 SCTP_INP_INCR_REF(inp); 4599 SCTP_ASOC_CREATE_LOCK(inp); 4600 create_lock_on = 1; 4601 4602 4603 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 4604 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) { 4605 /* Should I really unlock ? */ 4606 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EFAULT); 4607 error = EFAULT; 4608 goto out_now; 4609 } 4610 #ifdef INET6 4611 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) && 4612 (addr->sa_family == AF_INET6)) { 4613 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4614 error = EINVAL; 4615 goto out_now; 4616 } 4617 #endif /* INET6 */ 4618 if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) == 4619 SCTP_PCB_FLAGS_UNBOUND) { 4620 /* Bind a ephemeral port */ 4621 error = sctp_inpcb_bind(so, NULL, NULL, p); 4622 if (error) { 4623 goto out_now; 4624 } 4625 } 4626 /* Now do we connect? */ 4627 if ((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) && 4628 (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_PORTREUSE))) { 4629 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4630 error = EINVAL; 4631 goto out_now; 4632 } 4633 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 4634 (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) { 4635 /* We are already connected AND the TCP model */ 4636 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EADDRINUSE); 4637 error = EADDRINUSE; 4638 goto out_now; 4639 } 4640 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { 4641 SCTP_INP_RLOCK(inp); 4642 stcb = LIST_FIRST(&inp->sctp_asoc_list); 4643 SCTP_INP_RUNLOCK(inp); 4644 } else { 4645 /* 4646 * We increment here since sctp_findassociation_ep_addr() 4647 * will do a decrement if it finds the stcb as long as the 4648 * locked tcb (last argument) is NOT a TCB.. aka NULL. 4649 */ 4650 SCTP_INP_INCR_REF(inp); 4651 stcb = sctp_findassociation_ep_addr(&inp, addr, NULL, NULL, NULL); 4652 if (stcb == NULL) { 4653 SCTP_INP_DECR_REF(inp); 4654 } else { 4655 SCTP_TCB_UNLOCK(stcb); 4656 } 4657 } 4658 if (stcb != NULL) { 4659 /* Already have or am bring up an association */ 4660 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY); 4661 error = EALREADY; 4662 goto out_now; 4663 } 4664 vrf_id = inp->def_vrf_id; 4665 /* We are GOOD to go */ 4666 stcb = sctp_aloc_assoc(inp, addr, &error, 0, vrf_id, p); 4667 if (stcb == NULL) { 4668 /* Gak! no memory */ 4669 goto out_now; 4670 } 4671 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) { 4672 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED; 4673 /* Set the connected flag so we can queue data */ 4674 SOCKBUF_LOCK(&so->so_rcv); 4675 so->so_rcv.sb_state &= ~SBS_CANTRCVMORE; 4676 SOCKBUF_UNLOCK(&so->so_rcv); 4677 SOCKBUF_LOCK(&so->so_snd); 4678 so->so_snd.sb_state &= ~SBS_CANTSENDMORE; 4679 SOCKBUF_UNLOCK(&so->so_snd); 4680 SOCK_LOCK(so); 4681 so->so_state &= ~SS_ISDISCONNECTING; 4682 SOCK_UNLOCK(so); 4683 soisconnecting(so); 4684 } 4685 SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_COOKIE_WAIT); 4686 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered); 4687 4688 /* initialize authentication parameters for the assoc */ 4689 sctp_initialize_auth_params(inp, stcb); 4690 4691 sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED); 4692 SCTP_TCB_UNLOCK(stcb); 4693 out_now: 4694 if (create_lock_on) { 4695 SCTP_ASOC_CREATE_UNLOCK(inp); 4696 } 4697 SCTP_INP_DECR_REF(inp); 4698 return error; 4699 } 4700 4701 #endif 4702 4703 int 4704 sctp_listen(struct socket *so, int backlog, struct thread *p) 4705 { 4706 /* 4707 * Note this module depends on the protocol processing being called 4708 * AFTER any socket level flags and backlog are applied to the 4709 * socket. The traditional way that the socket flags are applied is 4710 * AFTER protocol processing. We have made a change to the 4711 * sys/kern/uipc_socket.c module to reverse this but this MUST be in 4712 * place if the socket API for SCTP is to work properly. 4713 */ 4714 4715 int error = 0; 4716 struct sctp_inpcb *inp; 4717 4718 inp = (struct sctp_inpcb *)so->so_pcb; 4719 if (inp == 0) { 4720 /* I made the same as TCP since we are not setup? */ 4721 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4722 return (ECONNRESET); 4723 } 4724 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PORTREUSE)) { 4725 /* See if we have a listener */ 4726 struct sctp_inpcb *tinp; 4727 union sctp_sockstore store, *sp; 4728 4729 sp = &store; 4730 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) { 4731 /* not bound all */ 4732 struct sctp_laddr *laddr; 4733 4734 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 4735 memcpy(&store, &laddr->ifa->address, sizeof(store)); 4736 switch (sp->sa.sa_family) { 4737 #ifdef INET 4738 case AF_INET: 4739 sp->sin.sin_port = inp->sctp_lport; 4740 break; 4741 #endif 4742 #ifdef INET6 4743 case AF_INET6: 4744 sp->sin6.sin6_port = inp->sctp_lport; 4745 break; 4746 #endif 4747 default: 4748 break; 4749 } 4750 tinp = sctp_pcb_findep(&sp->sa, 0, 0, inp->def_vrf_id); 4751 if (tinp && (tinp != inp) && 4752 ((tinp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) == 0) && 4753 ((tinp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) && 4754 (tinp->sctp_socket->so_qlimit)) { 4755 /* 4756 * we have a listener already and 4757 * its not this inp. 4758 */ 4759 SCTP_INP_DECR_REF(tinp); 4760 return (EADDRINUSE); 4761 } else if (tinp) { 4762 SCTP_INP_DECR_REF(tinp); 4763 } 4764 } 4765 } else { 4766 /* Setup a local addr bound all */ 4767 memset(&store, 0, sizeof(store)); 4768 switch (sp->sa.sa_family) { 4769 #ifdef INET 4770 case AF_INET: 4771 store.sin.sin_port = inp->sctp_lport; 4772 break; 4773 #endif 4774 #ifdef INET6 4775 case AF_INET6: 4776 sp->sin6.sin6_port = inp->sctp_lport; 4777 break; 4778 #endif 4779 default: 4780 break; 4781 } 4782 #ifdef INET6 4783 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 4784 store.sa.sa_family = AF_INET6; 4785 store.sa.sa_len = sizeof(struct sockaddr_in6); 4786 } 4787 #endif 4788 #ifdef INET 4789 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) { 4790 store.sa.sa_family = AF_INET; 4791 store.sa.sa_len = sizeof(struct sockaddr_in); 4792 } 4793 #endif 4794 tinp = sctp_pcb_findep(&sp->sa, 0, 0, inp->def_vrf_id); 4795 if (tinp && (tinp != inp) && 4796 ((tinp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) == 0) && 4797 ((tinp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) && 4798 (tinp->sctp_socket->so_qlimit)) { 4799 /* 4800 * we have a listener already and its not 4801 * this inp. 4802 */ 4803 SCTP_INP_DECR_REF(tinp); 4804 return (EADDRINUSE); 4805 } else if (tinp) { 4806 SCTP_INP_DECR_REF(inp); 4807 } 4808 } 4809 } 4810 SCTP_INP_RLOCK(inp); 4811 #ifdef SCTP_LOCK_LOGGING 4812 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) { 4813 sctp_log_lock(inp, (struct sctp_tcb *)NULL, SCTP_LOG_LOCK_SOCK); 4814 } 4815 #endif 4816 SOCK_LOCK(so); 4817 error = solisten_proto_check(so); 4818 if (error) { 4819 SOCK_UNLOCK(so); 4820 SCTP_INP_RUNLOCK(inp); 4821 return (error); 4822 } 4823 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PORTREUSE)) && 4824 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 4825 /* 4826 * The unlucky case - We are in the tcp pool with this guy. 4827 * - Someone else is in the main inp slot. - We must move 4828 * this guy (the listener) to the main slot - We must then 4829 * move the guy that was listener to the TCP Pool. 4830 */ 4831 if (sctp_swap_inpcb_for_listen(inp)) { 4832 goto in_use; 4833 } 4834 } 4835 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 4836 (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) { 4837 /* We are already connected AND the TCP model */ 4838 in_use: 4839 SCTP_INP_RUNLOCK(inp); 4840 SOCK_UNLOCK(so); 4841 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EADDRINUSE); 4842 return (EADDRINUSE); 4843 } 4844 SCTP_INP_RUNLOCK(inp); 4845 if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) { 4846 /* We must do a bind. */ 4847 SOCK_UNLOCK(so); 4848 if ((error = sctp_inpcb_bind(so, NULL, NULL, p))) { 4849 /* bind error, probably perm */ 4850 return (error); 4851 } 4852 SOCK_LOCK(so); 4853 } 4854 /* It appears for 7.0 and on, we must always call this. */ 4855 solisten_proto(so, backlog); 4856 if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) { 4857 /* remove the ACCEPTCONN flag for one-to-many sockets */ 4858 so->so_options &= ~SO_ACCEPTCONN; 4859 } 4860 if (backlog == 0) { 4861 /* turning off listen */ 4862 so->so_options &= ~SO_ACCEPTCONN; 4863 } 4864 SOCK_UNLOCK(so); 4865 return (error); 4866 } 4867 4868 static int sctp_defered_wakeup_cnt = 0; 4869 4870 int 4871 sctp_accept(struct socket *so, struct sockaddr **addr) 4872 { 4873 struct sctp_tcb *stcb; 4874 struct sctp_inpcb *inp; 4875 union sctp_sockstore store; 4876 4877 #ifdef INET6 4878 int error; 4879 4880 #endif 4881 inp = (struct sctp_inpcb *)so->so_pcb; 4882 4883 if (inp == 0) { 4884 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4885 return (ECONNRESET); 4886 } 4887 SCTP_INP_RLOCK(inp); 4888 if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) { 4889 SCTP_INP_RUNLOCK(inp); 4890 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP); 4891 return (EOPNOTSUPP); 4892 } 4893 if (so->so_state & SS_ISDISCONNECTED) { 4894 SCTP_INP_RUNLOCK(inp); 4895 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ECONNABORTED); 4896 return (ECONNABORTED); 4897 } 4898 stcb = LIST_FIRST(&inp->sctp_asoc_list); 4899 if (stcb == NULL) { 4900 SCTP_INP_RUNLOCK(inp); 4901 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4902 return (ECONNRESET); 4903 } 4904 SCTP_TCB_LOCK(stcb); 4905 SCTP_INP_RUNLOCK(inp); 4906 store = stcb->asoc.primary_destination->ro._l_addr; 4907 stcb->asoc.state &= ~SCTP_STATE_IN_ACCEPT_QUEUE; 4908 SCTP_TCB_UNLOCK(stcb); 4909 switch (store.sa.sa_family) { 4910 #ifdef INET 4911 case AF_INET: 4912 { 4913 struct sockaddr_in *sin; 4914 4915 SCTP_MALLOC_SONAME(sin, struct sockaddr_in *, sizeof *sin); 4916 if (sin == NULL) 4917 return (ENOMEM); 4918 sin->sin_family = AF_INET; 4919 sin->sin_len = sizeof(*sin); 4920 sin->sin_port = ((struct sockaddr_in *)&store)->sin_port; 4921 sin->sin_addr = ((struct sockaddr_in *)&store)->sin_addr; 4922 *addr = (struct sockaddr *)sin; 4923 break; 4924 } 4925 #endif 4926 #ifdef INET6 4927 case AF_INET6: 4928 { 4929 struct sockaddr_in6 *sin6; 4930 4931 SCTP_MALLOC_SONAME(sin6, struct sockaddr_in6 *, sizeof *sin6); 4932 if (sin6 == NULL) 4933 return (ENOMEM); 4934 sin6->sin6_family = AF_INET6; 4935 sin6->sin6_len = sizeof(*sin6); 4936 sin6->sin6_port = ((struct sockaddr_in6 *)&store)->sin6_port; 4937 4938 sin6->sin6_addr = ((struct sockaddr_in6 *)&store)->sin6_addr; 4939 if ((error = sa6_recoverscope(sin6)) != 0) { 4940 SCTP_FREE_SONAME(sin6); 4941 return (error); 4942 } 4943 *addr = (struct sockaddr *)sin6; 4944 break; 4945 } 4946 #endif 4947 default: 4948 /* TSNH */ 4949 break; 4950 } 4951 /* Wake any delayed sleep action */ 4952 if (inp->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) { 4953 SCTP_INP_WLOCK(inp); 4954 inp->sctp_flags &= ~SCTP_PCB_FLAGS_DONT_WAKE; 4955 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT) { 4956 inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAKEOUTPUT; 4957 SCTP_INP_WUNLOCK(inp); 4958 SOCKBUF_LOCK(&inp->sctp_socket->so_snd); 4959 if (sowriteable(inp->sctp_socket)) { 4960 sowwakeup_locked(inp->sctp_socket); 4961 } else { 4962 SOCKBUF_UNLOCK(&inp->sctp_socket->so_snd); 4963 } 4964 SCTP_INP_WLOCK(inp); 4965 } 4966 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT) { 4967 inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAKEINPUT; 4968 SCTP_INP_WUNLOCK(inp); 4969 SOCKBUF_LOCK(&inp->sctp_socket->so_rcv); 4970 if (soreadable(inp->sctp_socket)) { 4971 sctp_defered_wakeup_cnt++; 4972 sorwakeup_locked(inp->sctp_socket); 4973 } else { 4974 SOCKBUF_UNLOCK(&inp->sctp_socket->so_rcv); 4975 } 4976 SCTP_INP_WLOCK(inp); 4977 } 4978 SCTP_INP_WUNLOCK(inp); 4979 } 4980 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 4981 SCTP_TCB_LOCK(stcb); 4982 sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_7); 4983 } 4984 return (0); 4985 } 4986 4987 #ifdef INET 4988 int 4989 sctp_ingetaddr(struct socket *so, struct sockaddr **addr) 4990 { 4991 struct sockaddr_in *sin; 4992 uint32_t vrf_id; 4993 struct sctp_inpcb *inp; 4994 struct sctp_ifa *sctp_ifa; 4995 4996 /* 4997 * Do the malloc first in case it blocks. 4998 */ 4999 SCTP_MALLOC_SONAME(sin, struct sockaddr_in *, sizeof *sin); 5000 if (sin == NULL) 5001 return (ENOMEM); 5002 sin->sin_family = AF_INET; 5003 sin->sin_len = sizeof(*sin); 5004 inp = (struct sctp_inpcb *)so->so_pcb; 5005 if (!inp) { 5006 SCTP_FREE_SONAME(sin); 5007 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 5008 return ECONNRESET; 5009 } 5010 SCTP_INP_RLOCK(inp); 5011 sin->sin_port = inp->sctp_lport; 5012 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 5013 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { 5014 struct sctp_tcb *stcb; 5015 struct sockaddr_in *sin_a; 5016 struct sctp_nets *net; 5017 int fnd; 5018 5019 stcb = LIST_FIRST(&inp->sctp_asoc_list); 5020 if (stcb == NULL) { 5021 goto notConn; 5022 } 5023 fnd = 0; 5024 sin_a = NULL; 5025 SCTP_TCB_LOCK(stcb); 5026 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 5027 sin_a = (struct sockaddr_in *)&net->ro._l_addr; 5028 if (sin_a == NULL) 5029 /* this will make coverity happy */ 5030 continue; 5031 5032 if (sin_a->sin_family == AF_INET) { 5033 fnd = 1; 5034 break; 5035 } 5036 } 5037 if ((!fnd) || (sin_a == NULL)) { 5038 /* punt */ 5039 SCTP_TCB_UNLOCK(stcb); 5040 goto notConn; 5041 } 5042 vrf_id = inp->def_vrf_id; 5043 sctp_ifa = sctp_source_address_selection(inp, 5044 stcb, 5045 (sctp_route_t *) & net->ro, 5046 net, 0, vrf_id); 5047 if (sctp_ifa) { 5048 sin->sin_addr = sctp_ifa->address.sin.sin_addr; 5049 sctp_free_ifa(sctp_ifa); 5050 } 5051 SCTP_TCB_UNLOCK(stcb); 5052 } else { 5053 /* For the bound all case you get back 0 */ 5054 notConn: 5055 sin->sin_addr.s_addr = 0; 5056 } 5057 5058 } else { 5059 /* Take the first IPv4 address in the list */ 5060 struct sctp_laddr *laddr; 5061 int fnd = 0; 5062 5063 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 5064 if (laddr->ifa->address.sa.sa_family == AF_INET) { 5065 struct sockaddr_in *sin_a; 5066 5067 sin_a = (struct sockaddr_in *)&laddr->ifa->address.sa; 5068 sin->sin_addr = sin_a->sin_addr; 5069 fnd = 1; 5070 break; 5071 } 5072 } 5073 if (!fnd) { 5074 SCTP_FREE_SONAME(sin); 5075 SCTP_INP_RUNLOCK(inp); 5076 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); 5077 return ENOENT; 5078 } 5079 } 5080 SCTP_INP_RUNLOCK(inp); 5081 (*addr) = (struct sockaddr *)sin; 5082 return (0); 5083 } 5084 5085 int 5086 sctp_peeraddr(struct socket *so, struct sockaddr **addr) 5087 { 5088 struct sockaddr_in *sin = (struct sockaddr_in *)*addr; 5089 int fnd; 5090 struct sockaddr_in *sin_a; 5091 struct sctp_inpcb *inp; 5092 struct sctp_tcb *stcb; 5093 struct sctp_nets *net; 5094 5095 /* Do the malloc first in case it blocks. */ 5096 inp = (struct sctp_inpcb *)so->so_pcb; 5097 if ((inp == NULL) || 5098 ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) { 5099 /* UDP type and listeners will drop out here */ 5100 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN); 5101 return (ENOTCONN); 5102 } 5103 SCTP_MALLOC_SONAME(sin, struct sockaddr_in *, sizeof *sin); 5104 if (sin == NULL) 5105 return (ENOMEM); 5106 sin->sin_family = AF_INET; 5107 sin->sin_len = sizeof(*sin); 5108 5109 /* We must recapture incase we blocked */ 5110 inp = (struct sctp_inpcb *)so->so_pcb; 5111 if (!inp) { 5112 SCTP_FREE_SONAME(sin); 5113 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 5114 return ECONNRESET; 5115 } 5116 SCTP_INP_RLOCK(inp); 5117 stcb = LIST_FIRST(&inp->sctp_asoc_list); 5118 if (stcb) { 5119 SCTP_TCB_LOCK(stcb); 5120 } 5121 SCTP_INP_RUNLOCK(inp); 5122 if (stcb == NULL) { 5123 SCTP_FREE_SONAME(sin); 5124 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 5125 return ECONNRESET; 5126 } 5127 fnd = 0; 5128 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 5129 sin_a = (struct sockaddr_in *)&net->ro._l_addr; 5130 if (sin_a->sin_family == AF_INET) { 5131 fnd = 1; 5132 sin->sin_port = stcb->rport; 5133 sin->sin_addr = sin_a->sin_addr; 5134 break; 5135 } 5136 } 5137 SCTP_TCB_UNLOCK(stcb); 5138 if (!fnd) { 5139 /* No IPv4 address */ 5140 SCTP_FREE_SONAME(sin); 5141 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); 5142 return ENOENT; 5143 } 5144 (*addr) = (struct sockaddr *)sin; 5145 return (0); 5146 } 5147 5148 #ifdef INET 5149 struct pr_usrreqs sctp_usrreqs = { 5150 .pru_abort = sctp_abort, 5151 .pru_accept = sctp_accept, 5152 .pru_attach = sctp_attach, 5153 .pru_bind = sctp_bind, 5154 .pru_connect = sctp_connect, 5155 .pru_control = in_control, 5156 .pru_close = sctp_close, 5157 .pru_detach = sctp_close, 5158 .pru_sopoll = sopoll_generic, 5159 .pru_flush = sctp_flush, 5160 .pru_disconnect = sctp_disconnect, 5161 .pru_listen = sctp_listen, 5162 .pru_peeraddr = sctp_peeraddr, 5163 .pru_send = sctp_sendm, 5164 .pru_shutdown = sctp_shutdown, 5165 .pru_sockaddr = sctp_ingetaddr, 5166 .pru_sosend = sctp_sosend, 5167 .pru_soreceive = sctp_soreceive 5168 }; 5169 5170 #endif 5171 #endif 5172