1 /*- 2 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions are met: 6 * 7 * a) Redistributions of source code must retain the above copyright notice, 8 * this list of conditions and the following disclaimer. 9 * 10 * b) Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in 12 * the documentation and/or other materials provided with the distribution. 13 * 14 * c) Neither the name of Cisco Systems, Inc. nor the names of its 15 * contributors may be used to endorse or promote products derived 16 * from this software without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 28 * THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 /* $KAME: sctp_usrreq.c,v 1.48 2005/03/07 23:26:08 itojun Exp $ */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 #include <netinet/sctp_os.h> 36 #include <sys/proc.h> 37 #include <netinet/sctp_pcb.h> 38 #include <netinet/sctp_header.h> 39 #include <netinet/sctp_var.h> 40 #if defined(INET6) 41 #endif 42 #include <netinet/sctp_sysctl.h> 43 #include <netinet/sctp_output.h> 44 #include <netinet/sctp_uio.h> 45 #include <netinet/sctp_asconf.h> 46 #include <netinet/sctputil.h> 47 #include <netinet/sctp_indata.h> 48 #include <netinet/sctp_timer.h> 49 #include <netinet/sctp_auth.h> 50 #include <netinet/sctp_bsd_addr.h> 51 #include <netinet/sctp_cc_functions.h> 52 #include <netinet/udp.h> 53 54 55 56 57 void 58 sctp_init(void) 59 { 60 u_long sb_max_adj; 61 62 bzero(&SCTP_BASE_STATS, sizeof(struct sctpstat)); 63 64 /* Initialize and modify the sysctled variables */ 65 sctp_init_sysctls(); 66 if ((nmbclusters / 8) > SCTP_ASOC_MAX_CHUNKS_ON_QUEUE) 67 SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue) = (nmbclusters / 8); 68 /* 69 * Allow a user to take no more than 1/2 the number of clusters or 70 * the SB_MAX whichever is smaller for the send window. 71 */ 72 sb_max_adj = (u_long)((u_quad_t) (SB_MAX) * MCLBYTES / (MSIZE + MCLBYTES)); 73 SCTP_BASE_SYSCTL(sctp_sendspace) = min(sb_max_adj, 74 (((uint32_t) nmbclusters / 2) * SCTP_DEFAULT_MAXSEGMENT)); 75 /* 76 * Now for the recv window, should we take the same amount? or 77 * should I do 1/2 the SB_MAX instead in the SB_MAX min above. For 78 * now I will just copy. 79 */ 80 SCTP_BASE_SYSCTL(sctp_recvspace) = SCTP_BASE_SYSCTL(sctp_sendspace); 81 82 SCTP_BASE_VAR(first_time) = 0; 83 SCTP_BASE_VAR(sctp_pcb_initialized) = 0; 84 sctp_pcb_init(); 85 #if defined(SCTP_PACKET_LOGGING) 86 SCTP_BASE_VAR(packet_log_writers) = 0; 87 SCTP_BASE_VAR(packet_log_end) = 0; 88 bzero(&SCTP_BASE_VAR(packet_log_buffer), SCTP_PACKET_LOG_SIZE); 89 #endif 90 91 92 } 93 94 void 95 sctp_finish(void) 96 { 97 sctp_pcb_finish(); 98 } 99 100 101 102 void 103 sctp_pathmtu_adjustment(struct sctp_inpcb *inp, 104 struct sctp_tcb *stcb, 105 struct sctp_nets *net, 106 uint16_t nxtsz) 107 { 108 struct sctp_tmit_chunk *chk; 109 uint16_t overhead; 110 111 /* Adjust that too */ 112 stcb->asoc.smallest_mtu = nxtsz; 113 /* now off to subtract IP_DF flag if needed */ 114 #ifdef SCTP_PRINT_FOR_B_AND_M 115 SCTP_PRINTF("sctp_pathmtu_adjust called inp:%p stcb:%p net:%p nxtsz:%d\n", 116 inp, stcb, net, nxtsz); 117 #endif 118 overhead = IP_HDR_SIZE; 119 if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks)) { 120 overhead += sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id); 121 } 122 TAILQ_FOREACH(chk, &stcb->asoc.send_queue, sctp_next) { 123 if ((chk->send_size + overhead) > nxtsz) { 124 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; 125 } 126 } 127 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 128 if ((chk->send_size + overhead) > nxtsz) { 129 /* 130 * For this guy we also mark for immediate resend 131 * since we sent to big of chunk 132 */ 133 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; 134 if (chk->sent < SCTP_DATAGRAM_RESEND) { 135 sctp_flight_size_decrease(chk); 136 sctp_total_flight_decrease(stcb, chk); 137 } 138 if (chk->sent != SCTP_DATAGRAM_RESEND) { 139 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 140 } 141 chk->sent = SCTP_DATAGRAM_RESEND; 142 chk->rec.data.doing_fast_retransmit = 0; 143 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 144 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_PMTU, 145 chk->whoTo->flight_size, 146 chk->book_size, 147 (uintptr_t) chk->whoTo, 148 chk->rec.data.TSN_seq); 149 } 150 /* Clear any time so NO RTT is being done */ 151 chk->do_rtt = 0; 152 } 153 } 154 } 155 156 static void 157 sctp_notify_mbuf(struct sctp_inpcb *inp, 158 struct sctp_tcb *stcb, 159 struct sctp_nets *net, 160 struct ip *ip, 161 struct sctphdr *sh) 162 { 163 struct icmp *icmph; 164 int totsz, tmr_stopped = 0; 165 uint16_t nxtsz; 166 167 /* protection */ 168 if ((inp == NULL) || (stcb == NULL) || (net == NULL) || 169 (ip == NULL) || (sh == NULL)) { 170 if (stcb != NULL) { 171 SCTP_TCB_UNLOCK(stcb); 172 } 173 return; 174 } 175 /* First job is to verify the vtag matches what I would send */ 176 if (ntohl(sh->v_tag) != (stcb->asoc.peer_vtag)) { 177 SCTP_TCB_UNLOCK(stcb); 178 return; 179 } 180 icmph = (struct icmp *)((caddr_t)ip - (sizeof(struct icmp) - 181 sizeof(struct ip))); 182 if (icmph->icmp_type != ICMP_UNREACH) { 183 /* We only care about unreachable */ 184 SCTP_TCB_UNLOCK(stcb); 185 return; 186 } 187 if (icmph->icmp_code != ICMP_UNREACH_NEEDFRAG) { 188 /* not a unreachable message due to frag. */ 189 SCTP_TCB_UNLOCK(stcb); 190 return; 191 } 192 totsz = ip->ip_len; 193 194 nxtsz = ntohs(icmph->icmp_nextmtu); 195 if (nxtsz == 0) { 196 /* 197 * old type router that does not tell us what the next size 198 * mtu is. Rats we will have to guess (in a educated fashion 199 * of course) 200 */ 201 nxtsz = find_next_best_mtu(totsz); 202 } 203 /* Stop any PMTU timer */ 204 if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) { 205 tmr_stopped = 1; 206 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net, 207 SCTP_FROM_SCTP_USRREQ + SCTP_LOC_1); 208 } 209 /* Adjust destination size limit */ 210 if (net->mtu > nxtsz) { 211 net->mtu = nxtsz; 212 if (net->port) { 213 net->mtu -= sizeof(struct udphdr); 214 } 215 } 216 /* now what about the ep? */ 217 if (stcb->asoc.smallest_mtu > nxtsz) { 218 #ifdef SCTP_PRINT_FOR_B_AND_M 219 SCTP_PRINTF("notify_mbuf (ICMP) calls sctp_pathmtu_adjust mtu:%d\n", 220 nxtsz); 221 #endif 222 sctp_pathmtu_adjustment(inp, stcb, net, nxtsz); 223 } 224 if (tmr_stopped) 225 sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net); 226 227 SCTP_TCB_UNLOCK(stcb); 228 } 229 230 231 void 232 sctp_notify(struct sctp_inpcb *inp, 233 struct ip *ip, 234 struct sctphdr *sh, 235 struct sockaddr *to, 236 struct sctp_tcb *stcb, 237 struct sctp_nets *net) 238 { 239 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 240 struct socket *so; 241 242 #endif 243 /* protection */ 244 int reason; 245 struct icmp *icmph; 246 247 248 if ((inp == NULL) || (stcb == NULL) || (net == NULL) || 249 (sh == NULL) || (to == NULL)) { 250 if (stcb) 251 SCTP_TCB_UNLOCK(stcb); 252 return; 253 } 254 /* First job is to verify the vtag matches what I would send */ 255 if (ntohl(sh->v_tag) != (stcb->asoc.peer_vtag)) { 256 SCTP_TCB_UNLOCK(stcb); 257 return; 258 } 259 icmph = (struct icmp *)((caddr_t)ip - (sizeof(struct icmp) - 260 sizeof(struct ip))); 261 if (icmph->icmp_type != ICMP_UNREACH) { 262 /* We only care about unreachable */ 263 SCTP_TCB_UNLOCK(stcb); 264 return; 265 } 266 if ((icmph->icmp_code == ICMP_UNREACH_NET) || 267 (icmph->icmp_code == ICMP_UNREACH_HOST) || 268 (icmph->icmp_code == ICMP_UNREACH_NET_UNKNOWN) || 269 (icmph->icmp_code == ICMP_UNREACH_HOST_UNKNOWN) || 270 (icmph->icmp_code == ICMP_UNREACH_ISOLATED) || 271 (icmph->icmp_code == ICMP_UNREACH_NET_PROHIB) || 272 (icmph->icmp_code == ICMP_UNREACH_HOST_PROHIB) || 273 (icmph->icmp_code == ICMP_UNREACH_FILTER_PROHIB)) { 274 275 /* 276 * Hmm reachablity problems we must examine closely. If its 277 * not reachable, we may have lost a network. Or if there is 278 * NO protocol at the other end named SCTP. well we consider 279 * it a OOTB abort. 280 */ 281 if (net->dest_state & SCTP_ADDR_REACHABLE) { 282 /* Ok that destination is NOT reachable */ 283 SCTP_PRINTF("ICMP (thresh %d/%d) takes interface %p down\n", 284 net->error_count, 285 net->failure_threshold, 286 net); 287 288 net->dest_state &= ~SCTP_ADDR_REACHABLE; 289 net->dest_state |= SCTP_ADDR_NOT_REACHABLE; 290 /* 291 * JRS 5/14/07 - If a destination is unreachable, 292 * the PF bit is turned off. This allows an 293 * unambiguous use of the PF bit for destinations 294 * that are reachable but potentially failed. If the 295 * destination is set to the unreachable state, also 296 * set the destination to the PF state. 297 */ 298 /* 299 * Add debug message here if destination is not in 300 * PF state. 301 */ 302 /* Stop any running T3 timers here? */ 303 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_pf)) { 304 net->dest_state &= ~SCTP_ADDR_PF; 305 SCTPDBG(SCTP_DEBUG_TIMER4, "Destination %p moved from PF to unreachable.\n", 306 net); 307 } 308 net->error_count = net->failure_threshold + 1; 309 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN, 310 stcb, SCTP_FAILED_THRESHOLD, 311 (void *)net, SCTP_SO_NOT_LOCKED); 312 } 313 SCTP_TCB_UNLOCK(stcb); 314 } else if ((icmph->icmp_code == ICMP_UNREACH_PROTOCOL) || 315 (icmph->icmp_code == ICMP_UNREACH_PORT)) { 316 /* 317 * Here the peer is either playing tricks on us, including 318 * an address that belongs to someone who does not support 319 * SCTP OR was a userland implementation that shutdown and 320 * now is dead. In either case treat it like a OOTB abort 321 * with no TCB 322 */ 323 reason = SCTP_PEER_FAULTY; 324 sctp_abort_notification(stcb, reason, SCTP_SO_NOT_LOCKED); 325 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 326 so = SCTP_INP_SO(inp); 327 atomic_add_int(&stcb->asoc.refcnt, 1); 328 SCTP_TCB_UNLOCK(stcb); 329 SCTP_SOCKET_LOCK(so, 1); 330 SCTP_TCB_LOCK(stcb); 331 atomic_subtract_int(&stcb->asoc.refcnt, 1); 332 #endif 333 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_2); 334 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 335 SCTP_SOCKET_UNLOCK(so, 1); 336 /* SCTP_TCB_UNLOCK(stcb); MT: I think this is not needed. */ 337 #endif 338 /* no need to unlock here, since the TCB is gone */ 339 } else { 340 SCTP_TCB_UNLOCK(stcb); 341 } 342 } 343 344 void 345 sctp_ctlinput(cmd, sa, vip) 346 int cmd; 347 struct sockaddr *sa; 348 void *vip; 349 { 350 struct ip *ip = vip; 351 struct sctphdr *sh; 352 uint32_t vrf_id; 353 354 /* FIX, for non-bsd is this right? */ 355 vrf_id = SCTP_DEFAULT_VRFID; 356 if (sa->sa_family != AF_INET || 357 ((struct sockaddr_in *)sa)->sin_addr.s_addr == INADDR_ANY) { 358 return; 359 } 360 if (PRC_IS_REDIRECT(cmd)) { 361 ip = 0; 362 } else if ((unsigned)cmd >= PRC_NCMDS || inetctlerrmap[cmd] == 0) { 363 return; 364 } 365 if (ip) { 366 struct sctp_inpcb *inp = NULL; 367 struct sctp_tcb *stcb = NULL; 368 struct sctp_nets *net = NULL; 369 struct sockaddr_in to, from; 370 371 sh = (struct sctphdr *)((caddr_t)ip + (ip->ip_hl << 2)); 372 bzero(&to, sizeof(to)); 373 bzero(&from, sizeof(from)); 374 from.sin_family = to.sin_family = AF_INET; 375 from.sin_len = to.sin_len = sizeof(to); 376 from.sin_port = sh->src_port; 377 from.sin_addr = ip->ip_src; 378 to.sin_port = sh->dest_port; 379 to.sin_addr = ip->ip_dst; 380 381 /* 382 * 'to' holds the dest of the packet that failed to be sent. 383 * 'from' holds our local endpoint address. Thus we reverse 384 * the to and the from in the lookup. 385 */ 386 stcb = sctp_findassociation_addr_sa((struct sockaddr *)&from, 387 (struct sockaddr *)&to, 388 &inp, &net, 1, vrf_id); 389 if (stcb != NULL && inp && (inp->sctp_socket != NULL)) { 390 if (cmd != PRC_MSGSIZE) { 391 sctp_notify(inp, ip, sh, 392 (struct sockaddr *)&to, stcb, 393 net); 394 } else { 395 /* handle possible ICMP size messages */ 396 sctp_notify_mbuf(inp, stcb, net, ip, sh); 397 } 398 } else { 399 if ((stcb == NULL) && (inp != NULL)) { 400 /* reduce ref-count */ 401 SCTP_INP_WLOCK(inp); 402 SCTP_INP_DECR_REF(inp); 403 SCTP_INP_WUNLOCK(inp); 404 } 405 } 406 } 407 return; 408 } 409 410 static int 411 sctp_getcred(SYSCTL_HANDLER_ARGS) 412 { 413 struct xucred xuc; 414 struct sockaddr_in addrs[2]; 415 struct sctp_inpcb *inp; 416 struct sctp_nets *net; 417 struct sctp_tcb *stcb; 418 int error; 419 uint32_t vrf_id; 420 421 /* FIX, for non-bsd is this right? */ 422 vrf_id = SCTP_DEFAULT_VRFID; 423 424 error = priv_check(req->td, PRIV_NETINET_GETCRED); 425 426 if (error) 427 return (error); 428 429 error = SYSCTL_IN(req, addrs, sizeof(addrs)); 430 if (error) 431 return (error); 432 433 stcb = sctp_findassociation_addr_sa(sintosa(&addrs[0]), 434 sintosa(&addrs[1]), 435 &inp, &net, 1, vrf_id); 436 if (stcb == NULL || inp == NULL || inp->sctp_socket == NULL) { 437 if ((inp != NULL) && (stcb == NULL)) { 438 /* reduce ref-count */ 439 SCTP_INP_WLOCK(inp); 440 SCTP_INP_DECR_REF(inp); 441 goto cred_can_cont; 442 } 443 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); 444 error = ENOENT; 445 goto out; 446 } 447 SCTP_TCB_UNLOCK(stcb); 448 /* 449 * We use the write lock here, only since in the error leg we need 450 * it. If we used RLOCK, then we would have to 451 * wlock/decr/unlock/rlock. Which in theory could create a hole. 452 * Better to use higher wlock. 453 */ 454 SCTP_INP_WLOCK(inp); 455 cred_can_cont: 456 error = cr_canseesocket(req->td->td_ucred, inp->sctp_socket); 457 if (error) { 458 SCTP_INP_WUNLOCK(inp); 459 goto out; 460 } 461 cru2x(inp->sctp_socket->so_cred, &xuc); 462 SCTP_INP_WUNLOCK(inp); 463 error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred)); 464 out: 465 return (error); 466 } 467 468 SYSCTL_PROC(_net_inet_sctp, OID_AUTO, getcred, CTLTYPE_OPAQUE | CTLFLAG_RW, 469 0, 0, sctp_getcred, "S,ucred", "Get the ucred of a SCTP connection"); 470 471 472 static void 473 sctp_abort(struct socket *so) 474 { 475 struct sctp_inpcb *inp; 476 uint32_t flags; 477 478 inp = (struct sctp_inpcb *)so->so_pcb; 479 if (inp == 0) { 480 return; 481 } 482 sctp_must_try_again: 483 flags = inp->sctp_flags; 484 #ifdef SCTP_LOG_CLOSING 485 sctp_log_closing(inp, NULL, 17); 486 #endif 487 if (((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) && 488 (atomic_cmpset_int(&inp->sctp_flags, flags, (flags | SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_CLOSE_IP)))) { 489 #ifdef SCTP_LOG_CLOSING 490 sctp_log_closing(inp, NULL, 16); 491 #endif 492 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 493 SCTP_CALLED_AFTER_CMPSET_OFCLOSE); 494 SOCK_LOCK(so); 495 SCTP_SB_CLEAR(so->so_snd); 496 /* 497 * same for the rcv ones, they are only here for the 498 * accounting/select. 499 */ 500 SCTP_SB_CLEAR(so->so_rcv); 501 502 /* Now null out the reference, we are completely detached. */ 503 so->so_pcb = NULL; 504 SOCK_UNLOCK(so); 505 } else { 506 flags = inp->sctp_flags; 507 if ((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) { 508 goto sctp_must_try_again; 509 } 510 } 511 return; 512 } 513 514 static int 515 sctp_attach(struct socket *so, int proto, struct thread *p) 516 { 517 struct sctp_inpcb *inp; 518 struct inpcb *ip_inp; 519 int error; 520 uint32_t vrf_id = SCTP_DEFAULT_VRFID; 521 522 #ifdef IPSEC 523 uint32_t flags; 524 525 #endif 526 527 inp = (struct sctp_inpcb *)so->so_pcb; 528 if (inp != 0) { 529 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 530 return EINVAL; 531 } 532 if (so->so_snd.sb_hiwat == 0 || so->so_rcv.sb_hiwat == 0) { 533 error = SCTP_SORESERVE(so, SCTP_BASE_SYSCTL(sctp_sendspace), SCTP_BASE_SYSCTL(sctp_recvspace)); 534 if (error) { 535 return error; 536 } 537 } 538 error = sctp_inpcb_alloc(so, vrf_id); 539 if (error) { 540 return error; 541 } 542 inp = (struct sctp_inpcb *)so->so_pcb; 543 SCTP_INP_WLOCK(inp); 544 inp->sctp_flags &= ~SCTP_PCB_FLAGS_BOUND_V6; /* I'm not v6! */ 545 ip_inp = &inp->ip_inp.inp; 546 ip_inp->inp_vflag |= INP_IPV4; 547 ip_inp->inp_ip_ttl = MODULE_GLOBAL(ip_defttl); 548 #ifdef IPSEC 549 error = ipsec_init_policy(so, &ip_inp->inp_sp); 550 #ifdef SCTP_LOG_CLOSING 551 sctp_log_closing(inp, NULL, 17); 552 #endif 553 if (error != 0) { 554 try_again: 555 flags = inp->sctp_flags; 556 if (((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) && 557 (atomic_cmpset_int(&inp->sctp_flags, flags, (flags | SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_CLOSE_IP)))) { 558 #ifdef SCTP_LOG_CLOSING 559 sctp_log_closing(inp, NULL, 15); 560 #endif 561 SCTP_INP_WUNLOCK(inp); 562 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 563 SCTP_CALLED_AFTER_CMPSET_OFCLOSE); 564 } else { 565 flags = inp->sctp_flags; 566 if ((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) { 567 goto try_again; 568 } else { 569 SCTP_INP_WUNLOCK(inp); 570 } 571 } 572 return error; 573 } 574 #endif /* IPSEC */ 575 SCTP_INP_WUNLOCK(inp); 576 return 0; 577 } 578 579 static int 580 sctp_bind(struct socket *so, struct sockaddr *addr, struct thread *p) 581 { 582 struct sctp_inpcb *inp = NULL; 583 int error; 584 585 #ifdef INET6 586 if (addr && addr->sa_family != AF_INET) { 587 /* must be a v4 address! */ 588 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 589 return EINVAL; 590 } 591 #endif /* INET6 */ 592 if (addr && (addr->sa_len != sizeof(struct sockaddr_in))) { 593 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 594 return EINVAL; 595 } 596 inp = (struct sctp_inpcb *)so->so_pcb; 597 if (inp == 0) { 598 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 599 return EINVAL; 600 } 601 error = sctp_inpcb_bind(so, addr, NULL, p); 602 return error; 603 } 604 605 void 606 sctp_close(struct socket *so) 607 { 608 struct sctp_inpcb *inp; 609 uint32_t flags; 610 611 inp = (struct sctp_inpcb *)so->so_pcb; 612 if (inp == 0) 613 return; 614 615 /* 616 * Inform all the lower layer assoc that we are done. 617 */ 618 sctp_must_try_again: 619 flags = inp->sctp_flags; 620 #ifdef SCTP_LOG_CLOSING 621 sctp_log_closing(inp, NULL, 17); 622 #endif 623 if (((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) && 624 (atomic_cmpset_int(&inp->sctp_flags, flags, (flags | SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_CLOSE_IP)))) { 625 if (((so->so_options & SO_LINGER) && (so->so_linger == 0)) || 626 (so->so_rcv.sb_cc > 0)) { 627 #ifdef SCTP_LOG_CLOSING 628 sctp_log_closing(inp, NULL, 13); 629 #endif 630 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 631 SCTP_CALLED_AFTER_CMPSET_OFCLOSE); 632 } else { 633 #ifdef SCTP_LOG_CLOSING 634 sctp_log_closing(inp, NULL, 14); 635 #endif 636 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_GRACEFUL_CLOSE, 637 SCTP_CALLED_AFTER_CMPSET_OFCLOSE); 638 } 639 /* 640 * The socket is now detached, no matter what the state of 641 * the SCTP association. 642 */ 643 SOCK_LOCK(so); 644 SCTP_SB_CLEAR(so->so_snd); 645 /* 646 * same for the rcv ones, they are only here for the 647 * accounting/select. 648 */ 649 SCTP_SB_CLEAR(so->so_rcv); 650 651 /* Now null out the reference, we are completely detached. */ 652 so->so_pcb = NULL; 653 SOCK_UNLOCK(so); 654 } else { 655 flags = inp->sctp_flags; 656 if ((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) { 657 goto sctp_must_try_again; 658 } 659 } 660 return; 661 } 662 663 664 int 665 sctp_sendm(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr, 666 struct mbuf *control, struct thread *p); 667 668 669 int 670 sctp_sendm(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr, 671 struct mbuf *control, struct thread *p) 672 { 673 struct sctp_inpcb *inp; 674 int error; 675 676 inp = (struct sctp_inpcb *)so->so_pcb; 677 if (inp == 0) { 678 if (control) { 679 sctp_m_freem(control); 680 control = NULL; 681 } 682 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 683 sctp_m_freem(m); 684 return EINVAL; 685 } 686 /* Got to have an to address if we are NOT a connected socket */ 687 if ((addr == NULL) && 688 ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) || 689 (inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)) 690 ) { 691 goto connected_type; 692 } else if (addr == NULL) { 693 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EDESTADDRREQ); 694 error = EDESTADDRREQ; 695 sctp_m_freem(m); 696 if (control) { 697 sctp_m_freem(control); 698 control = NULL; 699 } 700 return (error); 701 } 702 #ifdef INET6 703 if (addr->sa_family != AF_INET) { 704 /* must be a v4 address! */ 705 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EDESTADDRREQ); 706 sctp_m_freem(m); 707 if (control) { 708 sctp_m_freem(control); 709 control = NULL; 710 } 711 error = EDESTADDRREQ; 712 return EDESTADDRREQ; 713 } 714 #endif /* INET6 */ 715 connected_type: 716 /* now what about control */ 717 if (control) { 718 if (inp->control) { 719 SCTP_PRINTF("huh? control set?\n"); 720 sctp_m_freem(inp->control); 721 inp->control = NULL; 722 } 723 inp->control = control; 724 } 725 /* Place the data */ 726 if (inp->pkt) { 727 SCTP_BUF_NEXT(inp->pkt_last) = m; 728 inp->pkt_last = m; 729 } else { 730 inp->pkt_last = inp->pkt = m; 731 } 732 if ( 733 /* FreeBSD uses a flag passed */ 734 ((flags & PRUS_MORETOCOME) == 0) 735 ) { 736 /* 737 * note with the current version this code will only be used 738 * by OpenBSD-- NetBSD, FreeBSD, and MacOS have methods for 739 * re-defining sosend to use the sctp_sosend. One can 740 * optionally switch back to this code (by changing back the 741 * definitions) but this is not advisable. This code is used 742 * by FreeBSD when sending a file with sendfile() though. 743 */ 744 int ret; 745 746 ret = sctp_output(inp, inp->pkt, addr, inp->control, p, flags); 747 inp->pkt = NULL; 748 inp->control = NULL; 749 return (ret); 750 } else { 751 return (0); 752 } 753 } 754 755 int 756 sctp_disconnect(struct socket *so) 757 { 758 struct sctp_inpcb *inp; 759 760 inp = (struct sctp_inpcb *)so->so_pcb; 761 if (inp == NULL) { 762 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN); 763 return (ENOTCONN); 764 } 765 SCTP_INP_RLOCK(inp); 766 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 767 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 768 if (LIST_EMPTY(&inp->sctp_asoc_list)) { 769 /* No connection */ 770 SCTP_INP_RUNLOCK(inp); 771 return (0); 772 } else { 773 struct sctp_association *asoc; 774 struct sctp_tcb *stcb; 775 776 stcb = LIST_FIRST(&inp->sctp_asoc_list); 777 if (stcb == NULL) { 778 SCTP_INP_RUNLOCK(inp); 779 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 780 return (EINVAL); 781 } 782 SCTP_TCB_LOCK(stcb); 783 asoc = &stcb->asoc; 784 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 785 /* We are about to be freed, out of here */ 786 SCTP_TCB_UNLOCK(stcb); 787 SCTP_INP_RUNLOCK(inp); 788 return (0); 789 } 790 if (((so->so_options & SO_LINGER) && 791 (so->so_linger == 0)) || 792 (so->so_rcv.sb_cc > 0)) { 793 if (SCTP_GET_STATE(asoc) != 794 SCTP_STATE_COOKIE_WAIT) { 795 /* Left with Data unread */ 796 struct mbuf *err; 797 798 err = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_DONTWAIT, 1, MT_DATA); 799 if (err) { 800 /* 801 * Fill in the user 802 * initiated abort 803 */ 804 struct sctp_paramhdr *ph; 805 806 ph = mtod(err, struct sctp_paramhdr *); 807 SCTP_BUF_LEN(err) = sizeof(struct sctp_paramhdr); 808 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT); 809 ph->param_length = htons(SCTP_BUF_LEN(err)); 810 } 811 #if defined(SCTP_PANIC_ON_ABORT) 812 panic("disconnect does an abort"); 813 #endif 814 sctp_send_abort_tcb(stcb, err, SCTP_SO_LOCKED); 815 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 816 } 817 SCTP_INP_RUNLOCK(inp); 818 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) || 819 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 820 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 821 } 822 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_3); 823 /* No unlock tcb assoc is gone */ 824 return (0); 825 } 826 if (TAILQ_EMPTY(&asoc->send_queue) && 827 TAILQ_EMPTY(&asoc->sent_queue) && 828 (asoc->stream_queue_cnt == 0)) { 829 /* there is nothing queued to send, so done */ 830 if (asoc->locked_on_sending) { 831 goto abort_anyway; 832 } 833 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) && 834 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) { 835 /* only send SHUTDOWN 1st time thru */ 836 sctp_stop_timers_for_shutdown(stcb); 837 sctp_send_shutdown(stcb, 838 stcb->asoc.primary_destination); 839 sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_LOCKED); 840 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) || 841 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 842 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 843 } 844 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT); 845 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 846 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, 847 stcb->sctp_ep, stcb, 848 asoc->primary_destination); 849 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 850 stcb->sctp_ep, stcb, 851 asoc->primary_destination); 852 } 853 } else { 854 /* 855 * we still got (or just got) data to send, 856 * so set SHUTDOWN_PENDING 857 */ 858 /* 859 * XXX sockets draft says that SCTP_EOF 860 * should be sent with no data. currently, 861 * we will allow user data to be sent first 862 * and move to SHUTDOWN-PENDING 863 */ 864 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING; 865 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb, 866 asoc->primary_destination); 867 if (asoc->locked_on_sending) { 868 /* Locked to send out the data */ 869 struct sctp_stream_queue_pending *sp; 870 871 sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead); 872 if (sp == NULL) { 873 SCTP_PRINTF("Error, sp is NULL, locked on sending is non-null strm:%d\n", 874 asoc->locked_on_sending->stream_no); 875 } else { 876 if ((sp->length == 0) && (sp->msg_is_complete == 0)) 877 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT; 878 } 879 } 880 if (TAILQ_EMPTY(&asoc->send_queue) && 881 TAILQ_EMPTY(&asoc->sent_queue) && 882 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) { 883 struct mbuf *op_err; 884 885 abort_anyway: 886 op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 887 0, M_DONTWAIT, 1, MT_DATA); 888 if (op_err) { 889 /* 890 * Fill in the user 891 * initiated abort 892 */ 893 struct sctp_paramhdr *ph; 894 uint32_t *ippp; 895 896 SCTP_BUF_LEN(op_err) = 897 (sizeof(struct sctp_paramhdr) + sizeof(uint32_t)); 898 ph = mtod(op_err, 899 struct sctp_paramhdr *); 900 ph->param_type = htons( 901 SCTP_CAUSE_USER_INITIATED_ABT); 902 ph->param_length = htons(SCTP_BUF_LEN(op_err)); 903 ippp = (uint32_t *) (ph + 1); 904 *ippp = htonl(SCTP_FROM_SCTP_USRREQ + SCTP_LOC_4); 905 } 906 #if defined(SCTP_PANIC_ON_ABORT) 907 panic("disconnect does an abort"); 908 #endif 909 910 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_USRREQ + SCTP_LOC_4; 911 sctp_send_abort_tcb(stcb, op_err, SCTP_SO_LOCKED); 912 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 913 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) || 914 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 915 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 916 } 917 SCTP_INP_RUNLOCK(inp); 918 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_5); 919 return (0); 920 } else { 921 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CLOSING, SCTP_SO_LOCKED); 922 } 923 } 924 soisdisconnecting(so); 925 SCTP_TCB_UNLOCK(stcb); 926 SCTP_INP_RUNLOCK(inp); 927 return (0); 928 } 929 /* not reached */ 930 } else { 931 /* UDP model does not support this */ 932 SCTP_INP_RUNLOCK(inp); 933 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP); 934 return EOPNOTSUPP; 935 } 936 } 937 938 int 939 sctp_flush(struct socket *so, int how) 940 { 941 /* 942 * We will just clear out the values and let subsequent close clear 943 * out the data, if any. Note if the user did a shutdown(SHUT_RD) 944 * they will not be able to read the data, the socket will block 945 * that from happening. 946 */ 947 if ((how == PRU_FLUSH_RD) || (how == PRU_FLUSH_RDWR)) { 948 /* 949 * First make sure the sb will be happy, we don't use these 950 * except maybe the count 951 */ 952 so->so_rcv.sb_cc = 0; 953 so->so_rcv.sb_mbcnt = 0; 954 so->so_rcv.sb_mb = NULL; 955 } 956 if ((how == PRU_FLUSH_WR) || (how == PRU_FLUSH_RDWR)) { 957 /* 958 * First make sure the sb will be happy, we don't use these 959 * except maybe the count 960 */ 961 so->so_snd.sb_cc = 0; 962 so->so_snd.sb_mbcnt = 0; 963 so->so_snd.sb_mb = NULL; 964 965 } 966 return (0); 967 } 968 969 int 970 sctp_shutdown(struct socket *so) 971 { 972 struct sctp_inpcb *inp; 973 974 inp = (struct sctp_inpcb *)so->so_pcb; 975 if (inp == 0) { 976 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 977 return EINVAL; 978 } 979 SCTP_INP_RLOCK(inp); 980 /* For UDP model this is a invalid call */ 981 if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) { 982 /* Restore the flags that the soshutdown took away. */ 983 SOCKBUF_LOCK(&so->so_rcv); 984 so->so_rcv.sb_state &= ~SBS_CANTRCVMORE; 985 SOCKBUF_UNLOCK(&so->so_rcv); 986 /* This proc will wakeup for read and do nothing (I hope) */ 987 SCTP_INP_RUNLOCK(inp); 988 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP); 989 return (EOPNOTSUPP); 990 } 991 /* 992 * Ok if we reach here its the TCP model and it is either a SHUT_WR 993 * or SHUT_RDWR. This means we put the shutdown flag against it. 994 */ 995 { 996 struct sctp_tcb *stcb; 997 struct sctp_association *asoc; 998 999 if ((so->so_state & 1000 (SS_ISCONNECTED | SS_ISCONNECTING | SS_ISDISCONNECTING)) == 0) { 1001 SCTP_INP_RUNLOCK(inp); 1002 return (ENOTCONN); 1003 } 1004 socantsendmore(so); 1005 1006 stcb = LIST_FIRST(&inp->sctp_asoc_list); 1007 if (stcb == NULL) { 1008 /* 1009 * Ok we hit the case that the shutdown call was 1010 * made after an abort or something. Nothing to do 1011 * now. 1012 */ 1013 SCTP_INP_RUNLOCK(inp); 1014 return (0); 1015 } 1016 SCTP_TCB_LOCK(stcb); 1017 asoc = &stcb->asoc; 1018 if (TAILQ_EMPTY(&asoc->send_queue) && 1019 TAILQ_EMPTY(&asoc->sent_queue) && 1020 (asoc->stream_queue_cnt == 0)) { 1021 if (asoc->locked_on_sending) { 1022 goto abort_anyway; 1023 } 1024 /* there is nothing queued to send, so I'm done... */ 1025 if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) { 1026 /* only send SHUTDOWN the first time through */ 1027 sctp_stop_timers_for_shutdown(stcb); 1028 sctp_send_shutdown(stcb, 1029 stcb->asoc.primary_destination); 1030 sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_LOCKED); 1031 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) || 1032 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 1033 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 1034 } 1035 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT); 1036 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 1037 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, 1038 stcb->sctp_ep, stcb, 1039 asoc->primary_destination); 1040 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 1041 stcb->sctp_ep, stcb, 1042 asoc->primary_destination); 1043 } 1044 } else { 1045 /* 1046 * we still got (or just got) data to send, so set 1047 * SHUTDOWN_PENDING 1048 */ 1049 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING; 1050 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb, 1051 asoc->primary_destination); 1052 1053 if (asoc->locked_on_sending) { 1054 /* Locked to send out the data */ 1055 struct sctp_stream_queue_pending *sp; 1056 1057 sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead); 1058 if (sp == NULL) { 1059 SCTP_PRINTF("Error, sp is NULL, locked on sending is non-null strm:%d\n", 1060 asoc->locked_on_sending->stream_no); 1061 } else { 1062 if ((sp->length == 0) && (sp->msg_is_complete == 0)) { 1063 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT; 1064 } 1065 } 1066 } 1067 if (TAILQ_EMPTY(&asoc->send_queue) && 1068 TAILQ_EMPTY(&asoc->sent_queue) && 1069 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) { 1070 struct mbuf *op_err; 1071 1072 abort_anyway: 1073 op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 1074 0, M_DONTWAIT, 1, MT_DATA); 1075 if (op_err) { 1076 /* Fill in the user initiated abort */ 1077 struct sctp_paramhdr *ph; 1078 uint32_t *ippp; 1079 1080 SCTP_BUF_LEN(op_err) = 1081 sizeof(struct sctp_paramhdr) + sizeof(uint32_t); 1082 ph = mtod(op_err, 1083 struct sctp_paramhdr *); 1084 ph->param_type = htons( 1085 SCTP_CAUSE_USER_INITIATED_ABT); 1086 ph->param_length = htons(SCTP_BUF_LEN(op_err)); 1087 ippp = (uint32_t *) (ph + 1); 1088 *ippp = htonl(SCTP_FROM_SCTP_USRREQ + SCTP_LOC_6); 1089 } 1090 #if defined(SCTP_PANIC_ON_ABORT) 1091 panic("shutdown does an abort"); 1092 #endif 1093 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_USRREQ + SCTP_LOC_6; 1094 sctp_abort_an_association(stcb->sctp_ep, stcb, 1095 SCTP_RESPONSE_TO_USER_REQ, 1096 op_err, SCTP_SO_LOCKED); 1097 goto skip_unlock; 1098 } else { 1099 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CLOSING, SCTP_SO_LOCKED); 1100 } 1101 } 1102 SCTP_TCB_UNLOCK(stcb); 1103 } 1104 skip_unlock: 1105 SCTP_INP_RUNLOCK(inp); 1106 return 0; 1107 } 1108 1109 /* 1110 * copies a "user" presentable address and removes embedded scope, etc. 1111 * returns 0 on success, 1 on error 1112 */ 1113 static uint32_t 1114 sctp_fill_user_address(struct sockaddr_storage *ss, struct sockaddr *sa) 1115 { 1116 #ifdef INET6 1117 struct sockaddr_in6 lsa6; 1118 1119 sa = (struct sockaddr *)sctp_recover_scope((struct sockaddr_in6 *)sa, 1120 &lsa6); 1121 #endif 1122 memcpy(ss, sa, sa->sa_len); 1123 return (0); 1124 } 1125 1126 1127 1128 /* 1129 * NOTE: assumes addr lock is held 1130 */ 1131 static size_t 1132 sctp_fill_up_addresses_vrf(struct sctp_inpcb *inp, 1133 struct sctp_tcb *stcb, 1134 size_t limit, 1135 struct sockaddr_storage *sas, 1136 uint32_t vrf_id) 1137 { 1138 struct sctp_ifn *sctp_ifn; 1139 struct sctp_ifa *sctp_ifa; 1140 int loopback_scope, ipv4_local_scope, local_scope, site_scope; 1141 size_t actual; 1142 int ipv4_addr_legal, ipv6_addr_legal; 1143 struct sctp_vrf *vrf; 1144 1145 actual = 0; 1146 if (limit <= 0) 1147 return (actual); 1148 1149 if (stcb) { 1150 /* Turn on all the appropriate scope */ 1151 loopback_scope = stcb->asoc.loopback_scope; 1152 ipv4_local_scope = stcb->asoc.ipv4_local_scope; 1153 local_scope = stcb->asoc.local_scope; 1154 site_scope = stcb->asoc.site_scope; 1155 } else { 1156 /* Turn on ALL scope, since we look at the EP */ 1157 loopback_scope = ipv4_local_scope = local_scope = 1158 site_scope = 1; 1159 } 1160 ipv4_addr_legal = ipv6_addr_legal = 0; 1161 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 1162 ipv6_addr_legal = 1; 1163 if (SCTP_IPV6_V6ONLY(inp) == 0) { 1164 ipv4_addr_legal = 1; 1165 } 1166 } else { 1167 ipv4_addr_legal = 1; 1168 } 1169 vrf = sctp_find_vrf(vrf_id); 1170 if (vrf == NULL) { 1171 return (0); 1172 } 1173 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 1174 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) { 1175 if ((loopback_scope == 0) && 1176 SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) { 1177 /* Skip loopback if loopback_scope not set */ 1178 continue; 1179 } 1180 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { 1181 if (stcb) { 1182 /* 1183 * For the BOUND-ALL case, the list 1184 * associated with a TCB is Always 1185 * considered a reverse list.. i.e. 1186 * it lists addresses that are NOT 1187 * part of the association. If this 1188 * is one of those we must skip it. 1189 */ 1190 if (sctp_is_addr_restricted(stcb, 1191 sctp_ifa)) { 1192 continue; 1193 } 1194 } 1195 switch (sctp_ifa->address.sa.sa_family) { 1196 case AF_INET: 1197 if (ipv4_addr_legal) { 1198 struct sockaddr_in *sin; 1199 1200 sin = (struct sockaddr_in *)&sctp_ifa->address.sa; 1201 if (sin->sin_addr.s_addr == 0) { 1202 /* 1203 * we skip 1204 * unspecifed 1205 * addresses 1206 */ 1207 continue; 1208 } 1209 if ((ipv4_local_scope == 0) && 1210 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) { 1211 continue; 1212 } 1213 #ifdef INET6 1214 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) { 1215 in6_sin_2_v4mapsin6(sin, (struct sockaddr_in6 *)sas); 1216 ((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport; 1217 sas = (struct sockaddr_storage *)((caddr_t)sas + sizeof(struct sockaddr_in6)); 1218 actual += sizeof(struct sockaddr_in6); 1219 } else { 1220 #endif 1221 memcpy(sas, sin, sizeof(*sin)); 1222 ((struct sockaddr_in *)sas)->sin_port = inp->sctp_lport; 1223 sas = (struct sockaddr_storage *)((caddr_t)sas + sizeof(*sin)); 1224 actual += sizeof(*sin); 1225 #ifdef INET6 1226 } 1227 #endif 1228 if (actual >= limit) { 1229 return (actual); 1230 } 1231 } else { 1232 continue; 1233 } 1234 break; 1235 #ifdef INET6 1236 case AF_INET6: 1237 if (ipv6_addr_legal) { 1238 struct sockaddr_in6 *sin6; 1239 1240 sin6 = (struct sockaddr_in6 *)&sctp_ifa->address.sa; 1241 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { 1242 /* 1243 * we skip 1244 * unspecifed 1245 * addresses 1246 */ 1247 continue; 1248 } 1249 if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) { 1250 if (local_scope == 0) 1251 continue; 1252 if (sin6->sin6_scope_id == 0) { 1253 if (sa6_recoverscope(sin6) != 0) 1254 /* 1255 * 1256 * bad 1257 * 1258 * li 1259 * nk 1260 * 1261 * loc 1262 * al 1263 * 1264 * add 1265 * re 1266 * ss 1267 * */ 1268 continue; 1269 } 1270 } 1271 if ((site_scope == 0) && 1272 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) { 1273 continue; 1274 } 1275 memcpy(sas, sin6, sizeof(*sin6)); 1276 ((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport; 1277 sas = (struct sockaddr_storage *)((caddr_t)sas + sizeof(*sin6)); 1278 actual += sizeof(*sin6); 1279 if (actual >= limit) { 1280 return (actual); 1281 } 1282 } else { 1283 continue; 1284 } 1285 break; 1286 #endif 1287 default: 1288 /* TSNH */ 1289 break; 1290 } 1291 } 1292 } 1293 } else { 1294 struct sctp_laddr *laddr; 1295 1296 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 1297 if (stcb) { 1298 if (sctp_is_addr_restricted(stcb, laddr->ifa)) { 1299 continue; 1300 } 1301 } 1302 if (sctp_fill_user_address(sas, &laddr->ifa->address.sa)) 1303 continue; 1304 1305 ((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport; 1306 sas = (struct sockaddr_storage *)((caddr_t)sas + 1307 laddr->ifa->address.sa.sa_len); 1308 actual += laddr->ifa->address.sa.sa_len; 1309 if (actual >= limit) { 1310 return (actual); 1311 } 1312 } 1313 } 1314 return (actual); 1315 } 1316 1317 static size_t 1318 sctp_fill_up_addresses(struct sctp_inpcb *inp, 1319 struct sctp_tcb *stcb, 1320 size_t limit, 1321 struct sockaddr_storage *sas) 1322 { 1323 size_t size = 0; 1324 1325 SCTP_IPI_ADDR_RLOCK(); 1326 /* fill up addresses for the endpoint's default vrf */ 1327 size = sctp_fill_up_addresses_vrf(inp, stcb, limit, sas, 1328 inp->def_vrf_id); 1329 SCTP_IPI_ADDR_RUNLOCK(); 1330 return (size); 1331 } 1332 1333 /* 1334 * NOTE: assumes addr lock is held 1335 */ 1336 static int 1337 sctp_count_max_addresses_vrf(struct sctp_inpcb *inp, uint32_t vrf_id) 1338 { 1339 int cnt = 0; 1340 struct sctp_vrf *vrf = NULL; 1341 1342 /* 1343 * In both sub-set bound an bound_all cases we return the MAXIMUM 1344 * number of addresses that you COULD get. In reality the sub-set 1345 * bound may have an exclusion list for a given TCB OR in the 1346 * bound-all case a TCB may NOT include the loopback or other 1347 * addresses as well. 1348 */ 1349 vrf = sctp_find_vrf(vrf_id); 1350 if (vrf == NULL) { 1351 return (0); 1352 } 1353 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 1354 struct sctp_ifn *sctp_ifn; 1355 struct sctp_ifa *sctp_ifa; 1356 1357 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) { 1358 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { 1359 /* Count them if they are the right type */ 1360 if (sctp_ifa->address.sa.sa_family == AF_INET) { 1361 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) 1362 cnt += sizeof(struct sockaddr_in6); 1363 else 1364 cnt += sizeof(struct sockaddr_in); 1365 1366 } else if (sctp_ifa->address.sa.sa_family == AF_INET6) 1367 cnt += sizeof(struct sockaddr_in6); 1368 } 1369 } 1370 } else { 1371 struct sctp_laddr *laddr; 1372 1373 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 1374 if (laddr->ifa->address.sa.sa_family == AF_INET) { 1375 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) 1376 cnt += sizeof(struct sockaddr_in6); 1377 else 1378 cnt += sizeof(struct sockaddr_in); 1379 1380 } else if (laddr->ifa->address.sa.sa_family == AF_INET6) 1381 cnt += sizeof(struct sockaddr_in6); 1382 } 1383 } 1384 return (cnt); 1385 } 1386 1387 static int 1388 sctp_count_max_addresses(struct sctp_inpcb *inp) 1389 { 1390 int cnt = 0; 1391 1392 SCTP_IPI_ADDR_RLOCK(); 1393 /* count addresses for the endpoint's default VRF */ 1394 cnt = sctp_count_max_addresses_vrf(inp, inp->def_vrf_id); 1395 SCTP_IPI_ADDR_RUNLOCK(); 1396 return (cnt); 1397 } 1398 1399 static int 1400 sctp_do_connect_x(struct socket *so, struct sctp_inpcb *inp, void *optval, 1401 size_t optsize, void *p, int delay) 1402 { 1403 int error = 0; 1404 int creat_lock_on = 0; 1405 struct sctp_tcb *stcb = NULL; 1406 struct sockaddr *sa; 1407 int num_v6 = 0, num_v4 = 0, *totaddrp, totaddr; 1408 int added = 0; 1409 uint32_t vrf_id; 1410 int bad_addresses = 0; 1411 sctp_assoc_t *a_id; 1412 1413 SCTPDBG(SCTP_DEBUG_PCB1, "Connectx called\n"); 1414 1415 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 1416 (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) { 1417 /* We are already connected AND the TCP model */ 1418 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EADDRINUSE); 1419 return (EADDRINUSE); 1420 } 1421 if ((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) && 1422 (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_PORTREUSE))) { 1423 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 1424 return (EINVAL); 1425 } 1426 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { 1427 SCTP_INP_RLOCK(inp); 1428 stcb = LIST_FIRST(&inp->sctp_asoc_list); 1429 SCTP_INP_RUNLOCK(inp); 1430 } 1431 if (stcb) { 1432 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY); 1433 return (EALREADY); 1434 } 1435 SCTP_INP_INCR_REF(inp); 1436 SCTP_ASOC_CREATE_LOCK(inp); 1437 creat_lock_on = 1; 1438 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 1439 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) { 1440 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EFAULT); 1441 error = EFAULT; 1442 goto out_now; 1443 } 1444 totaddrp = (int *)optval; 1445 totaddr = *totaddrp; 1446 sa = (struct sockaddr *)(totaddrp + 1); 1447 stcb = sctp_connectx_helper_find(inp, sa, &totaddr, &num_v4, &num_v6, &error, (optsize - sizeof(int)), &bad_addresses); 1448 if ((stcb != NULL) || bad_addresses) { 1449 /* Already have or am bring up an association */ 1450 SCTP_ASOC_CREATE_UNLOCK(inp); 1451 creat_lock_on = 0; 1452 if (stcb) 1453 SCTP_TCB_UNLOCK(stcb); 1454 if (bad_addresses == 0) { 1455 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY); 1456 error = EALREADY; 1457 } 1458 goto out_now; 1459 } 1460 #ifdef INET6 1461 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) && 1462 (num_v6 > 0)) { 1463 error = EINVAL; 1464 goto out_now; 1465 } 1466 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 1467 (num_v4 > 0)) { 1468 struct in6pcb *inp6; 1469 1470 inp6 = (struct in6pcb *)inp; 1471 if (SCTP_IPV6_V6ONLY(inp6)) { 1472 /* 1473 * if IPV6_V6ONLY flag, ignore connections destined 1474 * to a v4 addr or v4-mapped addr 1475 */ 1476 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 1477 error = EINVAL; 1478 goto out_now; 1479 } 1480 } 1481 #endif /* INET6 */ 1482 if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) == 1483 SCTP_PCB_FLAGS_UNBOUND) { 1484 /* Bind a ephemeral port */ 1485 error = sctp_inpcb_bind(so, NULL, NULL, p); 1486 if (error) { 1487 goto out_now; 1488 } 1489 } 1490 /* FIX ME: do we want to pass in a vrf on the connect call? */ 1491 vrf_id = inp->def_vrf_id; 1492 1493 1494 /* We are GOOD to go */ 1495 stcb = sctp_aloc_assoc(inp, sa, &error, 0, vrf_id, 1496 (struct thread *)p 1497 ); 1498 if (stcb == NULL) { 1499 /* Gak! no memory */ 1500 goto out_now; 1501 } 1502 SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_COOKIE_WAIT); 1503 /* move to second address */ 1504 if (sa->sa_family == AF_INET) 1505 sa = (struct sockaddr *)((caddr_t)sa + sizeof(struct sockaddr_in)); 1506 else 1507 sa = (struct sockaddr *)((caddr_t)sa + sizeof(struct sockaddr_in6)); 1508 1509 error = 0; 1510 added = sctp_connectx_helper_add(stcb, sa, (totaddr - 1), &error); 1511 /* Fill in the return id */ 1512 if (error) { 1513 (void)sctp_free_assoc(inp, stcb, SCTP_PCBFREE_FORCE, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_6); 1514 goto out_now; 1515 } 1516 a_id = (sctp_assoc_t *) optval; 1517 *a_id = sctp_get_associd(stcb); 1518 1519 /* initialize authentication parameters for the assoc */ 1520 sctp_initialize_auth_params(inp, stcb); 1521 1522 if (delay) { 1523 /* doing delayed connection */ 1524 stcb->asoc.delayed_connection = 1; 1525 sctp_timer_start(SCTP_TIMER_TYPE_INIT, inp, stcb, stcb->asoc.primary_destination); 1526 } else { 1527 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered); 1528 sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED); 1529 } 1530 SCTP_TCB_UNLOCK(stcb); 1531 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) { 1532 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED; 1533 /* Set the connected flag so we can queue data */ 1534 soisconnecting(so); 1535 } 1536 out_now: 1537 if (creat_lock_on) { 1538 SCTP_ASOC_CREATE_UNLOCK(inp); 1539 } 1540 SCTP_INP_DECR_REF(inp); 1541 return error; 1542 } 1543 1544 #define SCTP_FIND_STCB(inp, stcb, assoc_id) { \ 1545 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||\ 1546 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { \ 1547 SCTP_INP_RLOCK(inp); \ 1548 stcb = LIST_FIRST(&inp->sctp_asoc_list); \ 1549 if (stcb) { \ 1550 SCTP_TCB_LOCK(stcb); \ 1551 } \ 1552 SCTP_INP_RUNLOCK(inp); \ 1553 } else if (assoc_id != 0) { \ 1554 stcb = sctp_findassociation_ep_asocid(inp, assoc_id, 1); \ 1555 if (stcb == NULL) { \ 1556 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); \ 1557 error = ENOENT; \ 1558 break; \ 1559 } \ 1560 } else { \ 1561 stcb = NULL; \ 1562 } \ 1563 } 1564 1565 1566 #define SCTP_CHECK_AND_CAST(destp, srcp, type, size) {\ 1567 if (size < sizeof(type)) { \ 1568 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); \ 1569 error = EINVAL; \ 1570 break; \ 1571 } else { \ 1572 destp = (type *)srcp; \ 1573 } \ 1574 } 1575 1576 static int 1577 sctp_getopt(struct socket *so, int optname, void *optval, size_t *optsize, 1578 void *p) 1579 { 1580 struct sctp_inpcb *inp = NULL; 1581 int error, val = 0; 1582 struct sctp_tcb *stcb = NULL; 1583 1584 if (optval == NULL) { 1585 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 1586 return (EINVAL); 1587 } 1588 inp = (struct sctp_inpcb *)so->so_pcb; 1589 if (inp == 0) { 1590 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 1591 return EINVAL; 1592 } 1593 error = 0; 1594 1595 switch (optname) { 1596 case SCTP_NODELAY: 1597 case SCTP_AUTOCLOSE: 1598 case SCTP_EXPLICIT_EOR: 1599 case SCTP_AUTO_ASCONF: 1600 case SCTP_DISABLE_FRAGMENTS: 1601 case SCTP_I_WANT_MAPPED_V4_ADDR: 1602 case SCTP_USE_EXT_RCVINFO: 1603 SCTP_INP_RLOCK(inp); 1604 switch (optname) { 1605 case SCTP_DISABLE_FRAGMENTS: 1606 val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NO_FRAGMENT); 1607 break; 1608 case SCTP_I_WANT_MAPPED_V4_ADDR: 1609 val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4); 1610 break; 1611 case SCTP_AUTO_ASCONF: 1612 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 1613 /* only valid for bound all sockets */ 1614 val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTO_ASCONF); 1615 } else { 1616 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 1617 error = EINVAL; 1618 goto flags_out; 1619 } 1620 break; 1621 case SCTP_EXPLICIT_EOR: 1622 val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR); 1623 break; 1624 case SCTP_NODELAY: 1625 val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NODELAY); 1626 break; 1627 case SCTP_USE_EXT_RCVINFO: 1628 val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO); 1629 break; 1630 case SCTP_AUTOCLOSE: 1631 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) 1632 val = TICKS_TO_SEC(inp->sctp_ep.auto_close_time); 1633 else 1634 val = 0; 1635 break; 1636 1637 default: 1638 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT); 1639 error = ENOPROTOOPT; 1640 } /* end switch (sopt->sopt_name) */ 1641 if (optname != SCTP_AUTOCLOSE) { 1642 /* make it an "on/off" value */ 1643 val = (val != 0); 1644 } 1645 if (*optsize < sizeof(val)) { 1646 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 1647 error = EINVAL; 1648 } 1649 flags_out: 1650 SCTP_INP_RUNLOCK(inp); 1651 if (error == 0) { 1652 /* return the option value */ 1653 *(int *)optval = val; 1654 *optsize = sizeof(val); 1655 } 1656 break; 1657 case SCTP_GET_PACKET_LOG: 1658 { 1659 #ifdef SCTP_PACKET_LOGGING 1660 uint8_t *target; 1661 int ret; 1662 1663 SCTP_CHECK_AND_CAST(target, optval, uint8_t, *optsize); 1664 ret = sctp_copy_out_packet_log(target, (int)*optsize); 1665 *optsize = ret; 1666 #else 1667 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP); 1668 error = EOPNOTSUPP; 1669 #endif 1670 break; 1671 } 1672 case SCTP_REUSE_PORT: 1673 { 1674 uint32_t *value; 1675 1676 if ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE)) { 1677 /* Can't do this for a 1-m socket */ 1678 error = EINVAL; 1679 break; 1680 } 1681 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize); 1682 *value = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PORTREUSE); 1683 *optsize = sizeof(uint32_t); 1684 } 1685 break; 1686 case SCTP_PARTIAL_DELIVERY_POINT: 1687 { 1688 uint32_t *value; 1689 1690 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize); 1691 *value = inp->partial_delivery_point; 1692 *optsize = sizeof(uint32_t); 1693 } 1694 break; 1695 case SCTP_FRAGMENT_INTERLEAVE: 1696 { 1697 uint32_t *value; 1698 1699 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize); 1700 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) { 1701 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) { 1702 *value = SCTP_FRAG_LEVEL_2; 1703 } else { 1704 *value = SCTP_FRAG_LEVEL_1; 1705 } 1706 } else { 1707 *value = SCTP_FRAG_LEVEL_0; 1708 } 1709 *optsize = sizeof(uint32_t); 1710 } 1711 break; 1712 case SCTP_CMT_ON_OFF: 1713 { 1714 struct sctp_assoc_value *av; 1715 1716 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize); 1717 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off)) { 1718 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 1719 if (stcb) { 1720 av->assoc_value = stcb->asoc.sctp_cmt_on_off; 1721 SCTP_TCB_UNLOCK(stcb); 1722 1723 } else { 1724 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN); 1725 error = ENOTCONN; 1726 } 1727 } else { 1728 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT); 1729 error = ENOPROTOOPT; 1730 } 1731 *optsize = sizeof(*av); 1732 } 1733 break; 1734 /* EY - set socket option for nr_sacks */ 1735 case SCTP_NR_SACK_ON_OFF: 1736 { 1737 struct sctp_assoc_value *av; 1738 1739 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize); 1740 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off)) { 1741 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 1742 if (stcb) { 1743 av->assoc_value = stcb->asoc.sctp_nr_sack_on_off; 1744 SCTP_TCB_UNLOCK(stcb); 1745 1746 } else { 1747 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN); 1748 error = ENOTCONN; 1749 } 1750 } else { 1751 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT); 1752 error = ENOPROTOOPT; 1753 } 1754 *optsize = sizeof(*av); 1755 } 1756 break; 1757 /* JRS - Get socket option for pluggable congestion control */ 1758 case SCTP_PLUGGABLE_CC: 1759 { 1760 struct sctp_assoc_value *av; 1761 1762 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize); 1763 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 1764 if (stcb) { 1765 av->assoc_value = stcb->asoc.congestion_control_module; 1766 SCTP_TCB_UNLOCK(stcb); 1767 } else { 1768 av->assoc_value = inp->sctp_ep.sctp_default_cc_module; 1769 } 1770 *optsize = sizeof(*av); 1771 } 1772 break; 1773 case SCTP_GET_ADDR_LEN: 1774 { 1775 struct sctp_assoc_value *av; 1776 1777 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize); 1778 error = EINVAL; 1779 #ifdef INET 1780 if (av->assoc_value == AF_INET) { 1781 av->assoc_value = sizeof(struct sockaddr_in); 1782 error = 0; 1783 } 1784 #endif 1785 #ifdef INET6 1786 if (av->assoc_value == AF_INET6) { 1787 av->assoc_value = sizeof(struct sockaddr_in6); 1788 error = 0; 1789 } 1790 #endif 1791 if (error) { 1792 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 1793 } 1794 *optsize = sizeof(*av); 1795 } 1796 break; 1797 case SCTP_GET_ASSOC_NUMBER: 1798 { 1799 uint32_t *value, cnt; 1800 1801 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize); 1802 cnt = 0; 1803 SCTP_INP_RLOCK(inp); 1804 LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) { 1805 cnt++; 1806 } 1807 SCTP_INP_RUNLOCK(inp); 1808 *value = cnt; 1809 *optsize = sizeof(uint32_t); 1810 } 1811 break; 1812 1813 case SCTP_GET_ASSOC_ID_LIST: 1814 { 1815 struct sctp_assoc_ids *ids; 1816 unsigned int at, limit; 1817 1818 SCTP_CHECK_AND_CAST(ids, optval, struct sctp_assoc_ids, *optsize); 1819 at = 0; 1820 limit = (*optsize - sizeof(uint32_t)) / sizeof(sctp_assoc_t); 1821 SCTP_INP_RLOCK(inp); 1822 LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) { 1823 if (at < limit) { 1824 ids->gaids_assoc_id[at++] = sctp_get_associd(stcb); 1825 } else { 1826 error = EINVAL; 1827 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 1828 break; 1829 } 1830 } 1831 SCTP_INP_RUNLOCK(inp); 1832 ids->gaids_number_of_ids = at; 1833 *optsize = ((at * sizeof(sctp_assoc_t)) + sizeof(uint32_t)); 1834 } 1835 break; 1836 case SCTP_CONTEXT: 1837 { 1838 struct sctp_assoc_value *av; 1839 1840 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize); 1841 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 1842 1843 if (stcb) { 1844 av->assoc_value = stcb->asoc.context; 1845 SCTP_TCB_UNLOCK(stcb); 1846 } else { 1847 SCTP_INP_RLOCK(inp); 1848 av->assoc_value = inp->sctp_context; 1849 SCTP_INP_RUNLOCK(inp); 1850 } 1851 *optsize = sizeof(*av); 1852 } 1853 break; 1854 case SCTP_VRF_ID: 1855 { 1856 uint32_t *default_vrfid; 1857 1858 SCTP_CHECK_AND_CAST(default_vrfid, optval, uint32_t, *optsize); 1859 *default_vrfid = inp->def_vrf_id; 1860 break; 1861 } 1862 case SCTP_GET_ASOC_VRF: 1863 { 1864 struct sctp_assoc_value *id; 1865 1866 SCTP_CHECK_AND_CAST(id, optval, struct sctp_assoc_value, *optsize); 1867 SCTP_FIND_STCB(inp, stcb, id->assoc_id); 1868 if (stcb == NULL) { 1869 error = EINVAL; 1870 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 1871 break; 1872 } 1873 id->assoc_value = stcb->asoc.vrf_id; 1874 break; 1875 } 1876 case SCTP_GET_VRF_IDS: 1877 { 1878 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP); 1879 error = EOPNOTSUPP; 1880 break; 1881 } 1882 case SCTP_GET_NONCE_VALUES: 1883 { 1884 struct sctp_get_nonce_values *gnv; 1885 1886 SCTP_CHECK_AND_CAST(gnv, optval, struct sctp_get_nonce_values, *optsize); 1887 SCTP_FIND_STCB(inp, stcb, gnv->gn_assoc_id); 1888 1889 if (stcb) { 1890 gnv->gn_peers_tag = stcb->asoc.peer_vtag; 1891 gnv->gn_local_tag = stcb->asoc.my_vtag; 1892 SCTP_TCB_UNLOCK(stcb); 1893 } else { 1894 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN); 1895 error = ENOTCONN; 1896 } 1897 *optsize = sizeof(*gnv); 1898 } 1899 break; 1900 case SCTP_DELAYED_SACK: 1901 { 1902 struct sctp_sack_info *sack; 1903 1904 SCTP_CHECK_AND_CAST(sack, optval, struct sctp_sack_info, *optsize); 1905 SCTP_FIND_STCB(inp, stcb, sack->sack_assoc_id); 1906 if (stcb) { 1907 sack->sack_delay = stcb->asoc.delayed_ack; 1908 sack->sack_freq = stcb->asoc.sack_freq; 1909 SCTP_TCB_UNLOCK(stcb); 1910 } else { 1911 SCTP_INP_RLOCK(inp); 1912 sack->sack_delay = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]); 1913 sack->sack_freq = inp->sctp_ep.sctp_sack_freq; 1914 SCTP_INP_RUNLOCK(inp); 1915 } 1916 *optsize = sizeof(*sack); 1917 } 1918 break; 1919 1920 case SCTP_GET_SNDBUF_USE: 1921 { 1922 struct sctp_sockstat *ss; 1923 1924 SCTP_CHECK_AND_CAST(ss, optval, struct sctp_sockstat, *optsize); 1925 SCTP_FIND_STCB(inp, stcb, ss->ss_assoc_id); 1926 1927 if (stcb) { 1928 ss->ss_total_sndbuf = stcb->asoc.total_output_queue_size; 1929 ss->ss_total_recv_buf = (stcb->asoc.size_on_reasm_queue + 1930 stcb->asoc.size_on_all_streams); 1931 SCTP_TCB_UNLOCK(stcb); 1932 } else { 1933 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN); 1934 error = ENOTCONN; 1935 } 1936 *optsize = sizeof(struct sctp_sockstat); 1937 } 1938 break; 1939 case SCTP_MAX_BURST: 1940 { 1941 uint8_t *value; 1942 1943 SCTP_CHECK_AND_CAST(value, optval, uint8_t, *optsize); 1944 1945 SCTP_INP_RLOCK(inp); 1946 *value = inp->sctp_ep.max_burst; 1947 SCTP_INP_RUNLOCK(inp); 1948 *optsize = sizeof(uint8_t); 1949 } 1950 break; 1951 case SCTP_MAXSEG: 1952 { 1953 struct sctp_assoc_value *av; 1954 int ovh; 1955 1956 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize); 1957 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 1958 1959 if (stcb) { 1960 av->assoc_value = sctp_get_frag_point(stcb, &stcb->asoc); 1961 SCTP_TCB_UNLOCK(stcb); 1962 } else { 1963 SCTP_INP_RLOCK(inp); 1964 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 1965 ovh = SCTP_MED_OVERHEAD; 1966 } else { 1967 ovh = SCTP_MED_V4_OVERHEAD; 1968 } 1969 if (inp->sctp_frag_point >= SCTP_DEFAULT_MAXSEGMENT) 1970 av->assoc_value = 0; 1971 else 1972 av->assoc_value = inp->sctp_frag_point - ovh; 1973 SCTP_INP_RUNLOCK(inp); 1974 } 1975 *optsize = sizeof(struct sctp_assoc_value); 1976 } 1977 break; 1978 case SCTP_GET_STAT_LOG: 1979 error = sctp_fill_stat_log(optval, optsize); 1980 break; 1981 case SCTP_EVENTS: 1982 { 1983 struct sctp_event_subscribe *events; 1984 1985 SCTP_CHECK_AND_CAST(events, optval, struct sctp_event_subscribe, *optsize); 1986 memset(events, 0, sizeof(*events)); 1987 SCTP_INP_RLOCK(inp); 1988 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) 1989 events->sctp_data_io_event = 1; 1990 1991 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVASSOCEVNT)) 1992 events->sctp_association_event = 1; 1993 1994 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVPADDREVNT)) 1995 events->sctp_address_event = 1; 1996 1997 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)) 1998 events->sctp_send_failure_event = 1; 1999 2000 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVPEERERR)) 2001 events->sctp_peer_error_event = 1; 2002 2003 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) 2004 events->sctp_shutdown_event = 1; 2005 2006 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PDAPIEVNT)) 2007 events->sctp_partial_delivery_event = 1; 2008 2009 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) 2010 events->sctp_adaptation_layer_event = 1; 2011 2012 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTHEVNT)) 2013 events->sctp_authentication_event = 1; 2014 2015 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_DRYEVNT)) 2016 events->sctp_sender_dry_event = 1; 2017 2018 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_STREAM_RESETEVNT)) 2019 events->sctp_stream_reset_event = 1; 2020 SCTP_INP_RUNLOCK(inp); 2021 *optsize = sizeof(struct sctp_event_subscribe); 2022 } 2023 break; 2024 2025 case SCTP_ADAPTATION_LAYER: 2026 { 2027 uint32_t *value; 2028 2029 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize); 2030 2031 SCTP_INP_RLOCK(inp); 2032 *value = inp->sctp_ep.adaptation_layer_indicator; 2033 SCTP_INP_RUNLOCK(inp); 2034 *optsize = sizeof(uint32_t); 2035 } 2036 break; 2037 case SCTP_SET_INITIAL_DBG_SEQ: 2038 { 2039 uint32_t *value; 2040 2041 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize); 2042 SCTP_INP_RLOCK(inp); 2043 *value = inp->sctp_ep.initial_sequence_debug; 2044 SCTP_INP_RUNLOCK(inp); 2045 *optsize = sizeof(uint32_t); 2046 } 2047 break; 2048 case SCTP_GET_LOCAL_ADDR_SIZE: 2049 { 2050 uint32_t *value; 2051 2052 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize); 2053 SCTP_INP_RLOCK(inp); 2054 *value = sctp_count_max_addresses(inp); 2055 SCTP_INP_RUNLOCK(inp); 2056 *optsize = sizeof(uint32_t); 2057 } 2058 break; 2059 case SCTP_GET_REMOTE_ADDR_SIZE: 2060 { 2061 uint32_t *value; 2062 size_t size; 2063 struct sctp_nets *net; 2064 2065 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize); 2066 /* FIXME MT: change to sctp_assoc_value? */ 2067 SCTP_FIND_STCB(inp, stcb, (sctp_assoc_t) * value); 2068 2069 if (stcb) { 2070 size = 0; 2071 /* Count the sizes */ 2072 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 2073 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) || 2074 (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET6)) { 2075 size += sizeof(struct sockaddr_in6); 2076 } else if (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET) { 2077 size += sizeof(struct sockaddr_in); 2078 } else { 2079 /* huh */ 2080 break; 2081 } 2082 } 2083 SCTP_TCB_UNLOCK(stcb); 2084 *value = (uint32_t) size; 2085 } else { 2086 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN); 2087 error = ENOTCONN; 2088 } 2089 *optsize = sizeof(uint32_t); 2090 } 2091 break; 2092 case SCTP_GET_PEER_ADDRESSES: 2093 /* 2094 * Get the address information, an array is passed in to 2095 * fill up we pack it. 2096 */ 2097 { 2098 size_t cpsz, left; 2099 struct sockaddr_storage *sas; 2100 struct sctp_nets *net; 2101 struct sctp_getaddresses *saddr; 2102 2103 SCTP_CHECK_AND_CAST(saddr, optval, struct sctp_getaddresses, *optsize); 2104 SCTP_FIND_STCB(inp, stcb, saddr->sget_assoc_id); 2105 2106 if (stcb) { 2107 left = (*optsize) - sizeof(struct sctp_getaddresses); 2108 *optsize = sizeof(struct sctp_getaddresses); 2109 sas = (struct sockaddr_storage *)&saddr->addr[0]; 2110 2111 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 2112 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) || 2113 (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET6)) { 2114 cpsz = sizeof(struct sockaddr_in6); 2115 } else if (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET) { 2116 cpsz = sizeof(struct sockaddr_in); 2117 } else { 2118 /* huh */ 2119 break; 2120 } 2121 if (left < cpsz) { 2122 /* not enough room. */ 2123 break; 2124 } 2125 #ifdef INET6 2126 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) && 2127 (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET)) { 2128 /* Must map the address */ 2129 in6_sin_2_v4mapsin6((struct sockaddr_in *)&net->ro._l_addr, 2130 (struct sockaddr_in6 *)sas); 2131 } else { 2132 #endif 2133 memcpy(sas, &net->ro._l_addr, cpsz); 2134 #ifdef INET6 2135 } 2136 #endif 2137 ((struct sockaddr_in *)sas)->sin_port = stcb->rport; 2138 2139 sas = (struct sockaddr_storage *)((caddr_t)sas + cpsz); 2140 left -= cpsz; 2141 *optsize += cpsz; 2142 } 2143 SCTP_TCB_UNLOCK(stcb); 2144 } else { 2145 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); 2146 error = ENOENT; 2147 } 2148 } 2149 break; 2150 case SCTP_GET_LOCAL_ADDRESSES: 2151 { 2152 size_t limit, actual; 2153 struct sockaddr_storage *sas; 2154 struct sctp_getaddresses *saddr; 2155 2156 SCTP_CHECK_AND_CAST(saddr, optval, struct sctp_getaddresses, *optsize); 2157 SCTP_FIND_STCB(inp, stcb, saddr->sget_assoc_id); 2158 2159 sas = (struct sockaddr_storage *)&saddr->addr[0]; 2160 limit = *optsize - sizeof(sctp_assoc_t); 2161 actual = sctp_fill_up_addresses(inp, stcb, limit, sas); 2162 if (stcb) { 2163 SCTP_TCB_UNLOCK(stcb); 2164 } 2165 *optsize = sizeof(struct sockaddr_storage) + actual; 2166 } 2167 break; 2168 case SCTP_PEER_ADDR_PARAMS: 2169 { 2170 struct sctp_paddrparams *paddrp; 2171 struct sctp_nets *net; 2172 2173 SCTP_CHECK_AND_CAST(paddrp, optval, struct sctp_paddrparams, *optsize); 2174 SCTP_FIND_STCB(inp, stcb, paddrp->spp_assoc_id); 2175 2176 net = NULL; 2177 if (stcb) { 2178 net = sctp_findnet(stcb, (struct sockaddr *)&paddrp->spp_address); 2179 } else { 2180 /* 2181 * We increment here since 2182 * sctp_findassociation_ep_addr() wil do a 2183 * decrement if it finds the stcb as long as 2184 * the locked tcb (last argument) is NOT a 2185 * TCB.. aka NULL. 2186 */ 2187 SCTP_INP_INCR_REF(inp); 2188 stcb = sctp_findassociation_ep_addr(&inp, (struct sockaddr *)&paddrp->spp_address, &net, NULL, NULL); 2189 if (stcb == NULL) { 2190 SCTP_INP_DECR_REF(inp); 2191 } 2192 } 2193 if (stcb && (net == NULL)) { 2194 struct sockaddr *sa; 2195 2196 sa = (struct sockaddr *)&paddrp->spp_address; 2197 if (sa->sa_family == AF_INET) { 2198 struct sockaddr_in *sin; 2199 2200 sin = (struct sockaddr_in *)sa; 2201 if (sin->sin_addr.s_addr) { 2202 error = EINVAL; 2203 SCTP_TCB_UNLOCK(stcb); 2204 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 2205 break; 2206 } 2207 } else if (sa->sa_family == AF_INET6) { 2208 struct sockaddr_in6 *sin6; 2209 2210 sin6 = (struct sockaddr_in6 *)sa; 2211 if (!IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { 2212 error = EINVAL; 2213 SCTP_TCB_UNLOCK(stcb); 2214 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 2215 break; 2216 } 2217 } else { 2218 error = EAFNOSUPPORT; 2219 SCTP_TCB_UNLOCK(stcb); 2220 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 2221 break; 2222 } 2223 } 2224 if (stcb) { 2225 /* Applys to the specific association */ 2226 paddrp->spp_flags = 0; 2227 if (net) { 2228 int ovh; 2229 2230 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 2231 ovh = SCTP_MED_OVERHEAD; 2232 } else { 2233 ovh = SCTP_MED_V4_OVERHEAD; 2234 } 2235 2236 2237 paddrp->spp_pathmaxrxt = net->failure_threshold; 2238 paddrp->spp_pathmtu = net->mtu - ovh; 2239 /* get flags for HB */ 2240 if (net->dest_state & SCTP_ADDR_NOHB) 2241 paddrp->spp_flags |= SPP_HB_DISABLE; 2242 else 2243 paddrp->spp_flags |= SPP_HB_ENABLE; 2244 /* get flags for PMTU */ 2245 if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) { 2246 paddrp->spp_flags |= SPP_PMTUD_ENABLE; 2247 } else { 2248 paddrp->spp_flags |= SPP_PMTUD_DISABLE; 2249 } 2250 #ifdef INET 2251 if (net->ro._l_addr.sin.sin_family == AF_INET) { 2252 paddrp->spp_ipv4_tos = net->tos_flowlabel & 0x000000fc; 2253 paddrp->spp_flags |= SPP_IPV4_TOS; 2254 } 2255 #endif 2256 #ifdef INET6 2257 if (net->ro._l_addr.sin6.sin6_family == AF_INET6) { 2258 paddrp->spp_ipv6_flowlabel = net->tos_flowlabel; 2259 paddrp->spp_flags |= SPP_IPV6_FLOWLABEL; 2260 } 2261 #endif 2262 } else { 2263 /* 2264 * No destination so return default 2265 * value 2266 */ 2267 int cnt = 0; 2268 2269 paddrp->spp_pathmaxrxt = stcb->asoc.def_net_failure; 2270 paddrp->spp_pathmtu = sctp_get_frag_point(stcb, &stcb->asoc); 2271 #ifdef INET 2272 paddrp->spp_ipv4_tos = stcb->asoc.default_tos & 0x000000fc; 2273 paddrp->spp_flags |= SPP_IPV4_TOS; 2274 #endif 2275 #ifdef INET6 2276 paddrp->spp_ipv6_flowlabel = stcb->asoc.default_flowlabel; 2277 paddrp->spp_flags |= SPP_IPV6_FLOWLABEL; 2278 #endif 2279 /* default settings should be these */ 2280 if (stcb->asoc.hb_is_disabled == 0) { 2281 paddrp->spp_flags |= SPP_HB_ENABLE; 2282 } else { 2283 paddrp->spp_flags |= SPP_HB_DISABLE; 2284 } 2285 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 2286 if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) { 2287 cnt++; 2288 } 2289 } 2290 if (cnt) { 2291 paddrp->spp_flags |= SPP_PMTUD_ENABLE; 2292 } 2293 } 2294 paddrp->spp_hbinterval = stcb->asoc.heart_beat_delay; 2295 paddrp->spp_assoc_id = sctp_get_associd(stcb); 2296 SCTP_TCB_UNLOCK(stcb); 2297 } else { 2298 /* Use endpoint defaults */ 2299 SCTP_INP_RLOCK(inp); 2300 paddrp->spp_pathmaxrxt = inp->sctp_ep.def_net_failure; 2301 paddrp->spp_hbinterval = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]); 2302 paddrp->spp_assoc_id = (sctp_assoc_t) 0; 2303 /* get inp's default */ 2304 #ifdef INET 2305 paddrp->spp_ipv4_tos = inp->ip_inp.inp.inp_ip_tos; 2306 paddrp->spp_flags |= SPP_IPV4_TOS; 2307 #endif 2308 #ifdef INET6 2309 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 2310 paddrp->spp_ipv6_flowlabel = ((struct in6pcb *)inp)->in6p_flowinfo; 2311 paddrp->spp_flags |= SPP_IPV6_FLOWLABEL; 2312 } 2313 #endif 2314 /* can't return this */ 2315 paddrp->spp_pathmtu = 0; 2316 2317 /* default behavior, no stcb */ 2318 paddrp->spp_flags = SPP_PMTUD_ENABLE; 2319 2320 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_DONOT_HEARTBEAT)) { 2321 paddrp->spp_flags |= SPP_HB_ENABLE; 2322 } else { 2323 paddrp->spp_flags |= SPP_HB_DISABLE; 2324 } 2325 SCTP_INP_RUNLOCK(inp); 2326 } 2327 *optsize = sizeof(struct sctp_paddrparams); 2328 } 2329 break; 2330 case SCTP_GET_PEER_ADDR_INFO: 2331 { 2332 struct sctp_paddrinfo *paddri; 2333 struct sctp_nets *net; 2334 2335 SCTP_CHECK_AND_CAST(paddri, optval, struct sctp_paddrinfo, *optsize); 2336 SCTP_FIND_STCB(inp, stcb, paddri->spinfo_assoc_id); 2337 2338 net = NULL; 2339 if (stcb) { 2340 net = sctp_findnet(stcb, (struct sockaddr *)&paddri->spinfo_address); 2341 } else { 2342 /* 2343 * We increment here since 2344 * sctp_findassociation_ep_addr() wil do a 2345 * decrement if it finds the stcb as long as 2346 * the locked tcb (last argument) is NOT a 2347 * TCB.. aka NULL. 2348 */ 2349 SCTP_INP_INCR_REF(inp); 2350 stcb = sctp_findassociation_ep_addr(&inp, (struct sockaddr *)&paddri->spinfo_address, &net, NULL, NULL); 2351 if (stcb == NULL) { 2352 SCTP_INP_DECR_REF(inp); 2353 } 2354 } 2355 2356 if ((stcb) && (net)) { 2357 paddri->spinfo_state = net->dest_state & (SCTP_REACHABLE_MASK | SCTP_ADDR_NOHB); 2358 paddri->spinfo_cwnd = net->cwnd; 2359 paddri->spinfo_srtt = ((net->lastsa >> 2) + net->lastsv) >> 1; 2360 paddri->spinfo_rto = net->RTO; 2361 paddri->spinfo_assoc_id = sctp_get_associd(stcb); 2362 SCTP_TCB_UNLOCK(stcb); 2363 } else { 2364 if (stcb) { 2365 SCTP_TCB_UNLOCK(stcb); 2366 } 2367 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); 2368 error = ENOENT; 2369 } 2370 *optsize = sizeof(struct sctp_paddrinfo); 2371 } 2372 break; 2373 case SCTP_PCB_STATUS: 2374 { 2375 struct sctp_pcbinfo *spcb; 2376 2377 SCTP_CHECK_AND_CAST(spcb, optval, struct sctp_pcbinfo, *optsize); 2378 sctp_fill_pcbinfo(spcb); 2379 *optsize = sizeof(struct sctp_pcbinfo); 2380 } 2381 break; 2382 2383 case SCTP_STATUS: 2384 { 2385 struct sctp_nets *net; 2386 struct sctp_status *sstat; 2387 2388 SCTP_CHECK_AND_CAST(sstat, optval, struct sctp_status, *optsize); 2389 SCTP_FIND_STCB(inp, stcb, sstat->sstat_assoc_id); 2390 2391 if (stcb == NULL) { 2392 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 2393 error = EINVAL; 2394 break; 2395 } 2396 /* 2397 * I think passing the state is fine since 2398 * sctp_constants.h will be available to the user 2399 * land. 2400 */ 2401 sstat->sstat_state = stcb->asoc.state; 2402 sstat->sstat_assoc_id = sctp_get_associd(stcb); 2403 sstat->sstat_rwnd = stcb->asoc.peers_rwnd; 2404 sstat->sstat_unackdata = stcb->asoc.sent_queue_cnt; 2405 /* 2406 * We can't include chunks that have been passed to 2407 * the socket layer. Only things in queue. 2408 */ 2409 sstat->sstat_penddata = (stcb->asoc.cnt_on_reasm_queue + 2410 stcb->asoc.cnt_on_all_streams); 2411 2412 2413 sstat->sstat_instrms = stcb->asoc.streamincnt; 2414 sstat->sstat_outstrms = stcb->asoc.streamoutcnt; 2415 sstat->sstat_fragmentation_point = sctp_get_frag_point(stcb, &stcb->asoc); 2416 memcpy(&sstat->sstat_primary.spinfo_address, 2417 &stcb->asoc.primary_destination->ro._l_addr, 2418 ((struct sockaddr *)(&stcb->asoc.primary_destination->ro._l_addr))->sa_len); 2419 net = stcb->asoc.primary_destination; 2420 ((struct sockaddr_in *)&sstat->sstat_primary.spinfo_address)->sin_port = stcb->rport; 2421 /* 2422 * Again the user can get info from sctp_constants.h 2423 * for what the state of the network is. 2424 */ 2425 sstat->sstat_primary.spinfo_state = net->dest_state & SCTP_REACHABLE_MASK; 2426 sstat->sstat_primary.spinfo_cwnd = net->cwnd; 2427 sstat->sstat_primary.spinfo_srtt = net->lastsa; 2428 sstat->sstat_primary.spinfo_rto = net->RTO; 2429 sstat->sstat_primary.spinfo_mtu = net->mtu; 2430 sstat->sstat_primary.spinfo_assoc_id = sctp_get_associd(stcb); 2431 SCTP_TCB_UNLOCK(stcb); 2432 *optsize = sizeof(*sstat); 2433 } 2434 break; 2435 case SCTP_RTOINFO: 2436 { 2437 struct sctp_rtoinfo *srto; 2438 2439 SCTP_CHECK_AND_CAST(srto, optval, struct sctp_rtoinfo, *optsize); 2440 SCTP_FIND_STCB(inp, stcb, srto->srto_assoc_id); 2441 2442 if (stcb) { 2443 srto->srto_initial = stcb->asoc.initial_rto; 2444 srto->srto_max = stcb->asoc.maxrto; 2445 srto->srto_min = stcb->asoc.minrto; 2446 SCTP_TCB_UNLOCK(stcb); 2447 } else { 2448 SCTP_INP_RLOCK(inp); 2449 srto->srto_initial = inp->sctp_ep.initial_rto; 2450 srto->srto_max = inp->sctp_ep.sctp_maxrto; 2451 srto->srto_min = inp->sctp_ep.sctp_minrto; 2452 SCTP_INP_RUNLOCK(inp); 2453 } 2454 *optsize = sizeof(*srto); 2455 } 2456 break; 2457 case SCTP_ASSOCINFO: 2458 { 2459 struct sctp_assocparams *sasoc; 2460 uint32_t oldval; 2461 2462 SCTP_CHECK_AND_CAST(sasoc, optval, struct sctp_assocparams, *optsize); 2463 SCTP_FIND_STCB(inp, stcb, sasoc->sasoc_assoc_id); 2464 2465 if (stcb) { 2466 oldval = sasoc->sasoc_cookie_life; 2467 sasoc->sasoc_cookie_life = TICKS_TO_MSEC(stcb->asoc.cookie_life); 2468 sasoc->sasoc_asocmaxrxt = stcb->asoc.max_send_times; 2469 sasoc->sasoc_number_peer_destinations = stcb->asoc.numnets; 2470 sasoc->sasoc_peer_rwnd = stcb->asoc.peers_rwnd; 2471 sasoc->sasoc_local_rwnd = stcb->asoc.my_rwnd; 2472 SCTP_TCB_UNLOCK(stcb); 2473 } else { 2474 SCTP_INP_RLOCK(inp); 2475 sasoc->sasoc_cookie_life = TICKS_TO_MSEC(inp->sctp_ep.def_cookie_life); 2476 sasoc->sasoc_asocmaxrxt = inp->sctp_ep.max_send_times; 2477 sasoc->sasoc_number_peer_destinations = 0; 2478 sasoc->sasoc_peer_rwnd = 0; 2479 sasoc->sasoc_local_rwnd = sbspace(&inp->sctp_socket->so_rcv); 2480 SCTP_INP_RUNLOCK(inp); 2481 } 2482 *optsize = sizeof(*sasoc); 2483 } 2484 break; 2485 case SCTP_DEFAULT_SEND_PARAM: 2486 { 2487 struct sctp_sndrcvinfo *s_info; 2488 2489 SCTP_CHECK_AND_CAST(s_info, optval, struct sctp_sndrcvinfo, *optsize); 2490 SCTP_FIND_STCB(inp, stcb, s_info->sinfo_assoc_id); 2491 2492 if (stcb) { 2493 memcpy(s_info, &stcb->asoc.def_send, sizeof(stcb->asoc.def_send)); 2494 SCTP_TCB_UNLOCK(stcb); 2495 } else { 2496 SCTP_INP_RLOCK(inp); 2497 memcpy(s_info, &inp->def_send, sizeof(inp->def_send)); 2498 SCTP_INP_RUNLOCK(inp); 2499 } 2500 *optsize = sizeof(*s_info); 2501 } 2502 break; 2503 case SCTP_INITMSG: 2504 { 2505 struct sctp_initmsg *sinit; 2506 2507 SCTP_CHECK_AND_CAST(sinit, optval, struct sctp_initmsg, *optsize); 2508 SCTP_INP_RLOCK(inp); 2509 sinit->sinit_num_ostreams = inp->sctp_ep.pre_open_stream_count; 2510 sinit->sinit_max_instreams = inp->sctp_ep.max_open_streams_intome; 2511 sinit->sinit_max_attempts = inp->sctp_ep.max_init_times; 2512 sinit->sinit_max_init_timeo = inp->sctp_ep.initial_init_rto_max; 2513 SCTP_INP_RUNLOCK(inp); 2514 *optsize = sizeof(*sinit); 2515 } 2516 break; 2517 case SCTP_PRIMARY_ADDR: 2518 /* we allow a "get" operation on this */ 2519 { 2520 struct sctp_setprim *ssp; 2521 2522 SCTP_CHECK_AND_CAST(ssp, optval, struct sctp_setprim, *optsize); 2523 SCTP_FIND_STCB(inp, stcb, ssp->ssp_assoc_id); 2524 2525 if (stcb) { 2526 /* simply copy out the sockaddr_storage... */ 2527 int len; 2528 2529 len = *optsize; 2530 if (len > stcb->asoc.primary_destination->ro._l_addr.sa.sa_len) 2531 len = stcb->asoc.primary_destination->ro._l_addr.sa.sa_len; 2532 2533 memcpy(&ssp->ssp_addr, 2534 &stcb->asoc.primary_destination->ro._l_addr, 2535 len); 2536 SCTP_TCB_UNLOCK(stcb); 2537 } else { 2538 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 2539 error = EINVAL; 2540 } 2541 *optsize = sizeof(*ssp); 2542 } 2543 break; 2544 2545 case SCTP_HMAC_IDENT: 2546 { 2547 struct sctp_hmacalgo *shmac; 2548 sctp_hmaclist_t *hmaclist; 2549 uint32_t size; 2550 int i; 2551 2552 SCTP_CHECK_AND_CAST(shmac, optval, struct sctp_hmacalgo, *optsize); 2553 2554 SCTP_INP_RLOCK(inp); 2555 hmaclist = inp->sctp_ep.local_hmacs; 2556 if (hmaclist == NULL) { 2557 /* no HMACs to return */ 2558 *optsize = sizeof(*shmac); 2559 SCTP_INP_RUNLOCK(inp); 2560 break; 2561 } 2562 /* is there room for all of the hmac ids? */ 2563 size = sizeof(*shmac) + (hmaclist->num_algo * 2564 sizeof(shmac->shmac_idents[0])); 2565 if ((size_t)(*optsize) < size) { 2566 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 2567 error = EINVAL; 2568 SCTP_INP_RUNLOCK(inp); 2569 break; 2570 } 2571 /* copy in the list */ 2572 shmac->shmac_number_of_idents = hmaclist->num_algo; 2573 for (i = 0; i < hmaclist->num_algo; i++) { 2574 shmac->shmac_idents[i] = hmaclist->hmac[i]; 2575 } 2576 SCTP_INP_RUNLOCK(inp); 2577 *optsize = size; 2578 break; 2579 } 2580 case SCTP_AUTH_ACTIVE_KEY: 2581 { 2582 struct sctp_authkeyid *scact; 2583 2584 SCTP_CHECK_AND_CAST(scact, optval, struct sctp_authkeyid, *optsize); 2585 SCTP_FIND_STCB(inp, stcb, scact->scact_assoc_id); 2586 2587 if (stcb) { 2588 /* get the active key on the assoc */ 2589 scact->scact_keynumber = stcb->asoc.authinfo.active_keyid; 2590 SCTP_TCB_UNLOCK(stcb); 2591 } else { 2592 /* get the endpoint active key */ 2593 SCTP_INP_RLOCK(inp); 2594 scact->scact_keynumber = inp->sctp_ep.default_keyid; 2595 SCTP_INP_RUNLOCK(inp); 2596 } 2597 *optsize = sizeof(*scact); 2598 break; 2599 } 2600 case SCTP_LOCAL_AUTH_CHUNKS: 2601 { 2602 struct sctp_authchunks *sac; 2603 sctp_auth_chklist_t *chklist = NULL; 2604 size_t size = 0; 2605 2606 SCTP_CHECK_AND_CAST(sac, optval, struct sctp_authchunks, *optsize); 2607 SCTP_FIND_STCB(inp, stcb, sac->gauth_assoc_id); 2608 2609 if (stcb) { 2610 /* get off the assoc */ 2611 chklist = stcb->asoc.local_auth_chunks; 2612 /* is there enough space? */ 2613 size = sctp_auth_get_chklist_size(chklist); 2614 if (*optsize < (sizeof(struct sctp_authchunks) + size)) { 2615 error = EINVAL; 2616 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 2617 } else { 2618 /* copy in the chunks */ 2619 (void)sctp_serialize_auth_chunks(chklist, sac->gauth_chunks); 2620 } 2621 SCTP_TCB_UNLOCK(stcb); 2622 } else { 2623 /* get off the endpoint */ 2624 SCTP_INP_RLOCK(inp); 2625 chklist = inp->sctp_ep.local_auth_chunks; 2626 /* is there enough space? */ 2627 size = sctp_auth_get_chklist_size(chklist); 2628 if (*optsize < (sizeof(struct sctp_authchunks) + size)) { 2629 error = EINVAL; 2630 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 2631 } else { 2632 /* copy in the chunks */ 2633 (void)sctp_serialize_auth_chunks(chklist, sac->gauth_chunks); 2634 } 2635 SCTP_INP_RUNLOCK(inp); 2636 } 2637 *optsize = sizeof(struct sctp_authchunks) + size; 2638 break; 2639 } 2640 case SCTP_PEER_AUTH_CHUNKS: 2641 { 2642 struct sctp_authchunks *sac; 2643 sctp_auth_chklist_t *chklist = NULL; 2644 size_t size = 0; 2645 2646 SCTP_CHECK_AND_CAST(sac, optval, struct sctp_authchunks, *optsize); 2647 SCTP_FIND_STCB(inp, stcb, sac->gauth_assoc_id); 2648 2649 if (stcb) { 2650 /* get off the assoc */ 2651 chklist = stcb->asoc.peer_auth_chunks; 2652 /* is there enough space? */ 2653 size = sctp_auth_get_chklist_size(chklist); 2654 if (*optsize < (sizeof(struct sctp_authchunks) + size)) { 2655 error = EINVAL; 2656 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 2657 } else { 2658 /* copy in the chunks */ 2659 (void)sctp_serialize_auth_chunks(chklist, sac->gauth_chunks); 2660 } 2661 SCTP_TCB_UNLOCK(stcb); 2662 } else { 2663 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); 2664 error = ENOENT; 2665 } 2666 *optsize = sizeof(struct sctp_authchunks) + size; 2667 break; 2668 } 2669 2670 2671 default: 2672 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT); 2673 error = ENOPROTOOPT; 2674 *optsize = 0; 2675 break; 2676 } /* end switch (sopt->sopt_name) */ 2677 return (error); 2678 } 2679 2680 static int 2681 sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize, 2682 void *p) 2683 { 2684 int error, set_opt; 2685 uint32_t *mopt; 2686 struct sctp_tcb *stcb = NULL; 2687 struct sctp_inpcb *inp = NULL; 2688 uint32_t vrf_id; 2689 2690 if (optval == NULL) { 2691 SCTP_PRINTF("optval is NULL\n"); 2692 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 2693 return (EINVAL); 2694 } 2695 inp = (struct sctp_inpcb *)so->so_pcb; 2696 if (inp == 0) { 2697 SCTP_PRINTF("inp is NULL?\n"); 2698 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 2699 return EINVAL; 2700 } 2701 vrf_id = inp->def_vrf_id; 2702 2703 error = 0; 2704 switch (optname) { 2705 case SCTP_NODELAY: 2706 case SCTP_AUTOCLOSE: 2707 case SCTP_AUTO_ASCONF: 2708 case SCTP_EXPLICIT_EOR: 2709 case SCTP_DISABLE_FRAGMENTS: 2710 case SCTP_USE_EXT_RCVINFO: 2711 case SCTP_I_WANT_MAPPED_V4_ADDR: 2712 /* copy in the option value */ 2713 SCTP_CHECK_AND_CAST(mopt, optval, uint32_t, optsize); 2714 set_opt = 0; 2715 if (error) 2716 break; 2717 switch (optname) { 2718 case SCTP_DISABLE_FRAGMENTS: 2719 set_opt = SCTP_PCB_FLAGS_NO_FRAGMENT; 2720 break; 2721 case SCTP_AUTO_ASCONF: 2722 /* 2723 * NOTE: we don't really support this flag 2724 */ 2725 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 2726 /* only valid for bound all sockets */ 2727 set_opt = SCTP_PCB_FLAGS_AUTO_ASCONF; 2728 } else { 2729 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 2730 return (EINVAL); 2731 } 2732 break; 2733 case SCTP_EXPLICIT_EOR: 2734 set_opt = SCTP_PCB_FLAGS_EXPLICIT_EOR; 2735 break; 2736 case SCTP_USE_EXT_RCVINFO: 2737 set_opt = SCTP_PCB_FLAGS_EXT_RCVINFO; 2738 break; 2739 case SCTP_I_WANT_MAPPED_V4_ADDR: 2740 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 2741 set_opt = SCTP_PCB_FLAGS_NEEDS_MAPPED_V4; 2742 } else { 2743 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 2744 return (EINVAL); 2745 } 2746 break; 2747 case SCTP_NODELAY: 2748 set_opt = SCTP_PCB_FLAGS_NODELAY; 2749 break; 2750 case SCTP_AUTOCLOSE: 2751 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 2752 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 2753 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 2754 return (EINVAL); 2755 } 2756 set_opt = SCTP_PCB_FLAGS_AUTOCLOSE; 2757 /* 2758 * The value is in ticks. Note this does not effect 2759 * old associations, only new ones. 2760 */ 2761 inp->sctp_ep.auto_close_time = SEC_TO_TICKS(*mopt); 2762 break; 2763 } 2764 SCTP_INP_WLOCK(inp); 2765 if (*mopt != 0) { 2766 sctp_feature_on(inp, set_opt); 2767 } else { 2768 sctp_feature_off(inp, set_opt); 2769 } 2770 SCTP_INP_WUNLOCK(inp); 2771 break; 2772 case SCTP_REUSE_PORT: 2773 { 2774 SCTP_CHECK_AND_CAST(mopt, optval, uint32_t, optsize); 2775 if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) == 0) { 2776 /* Can't set it after we are bound */ 2777 error = EINVAL; 2778 break; 2779 } 2780 if ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE)) { 2781 /* Can't do this for a 1-m socket */ 2782 error = EINVAL; 2783 break; 2784 } 2785 if (optval) 2786 sctp_feature_on(inp, SCTP_PCB_FLAGS_PORTREUSE); 2787 else 2788 sctp_feature_off(inp, SCTP_PCB_FLAGS_PORTREUSE); 2789 } 2790 break; 2791 case SCTP_PARTIAL_DELIVERY_POINT: 2792 { 2793 uint32_t *value; 2794 2795 SCTP_CHECK_AND_CAST(value, optval, uint32_t, optsize); 2796 if (*value > SCTP_SB_LIMIT_RCV(so)) { 2797 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 2798 error = EINVAL; 2799 break; 2800 } 2801 inp->partial_delivery_point = *value; 2802 } 2803 break; 2804 case SCTP_FRAGMENT_INTERLEAVE: 2805 /* not yet until we re-write sctp_recvmsg() */ 2806 { 2807 uint32_t *level; 2808 2809 SCTP_CHECK_AND_CAST(level, optval, uint32_t, optsize); 2810 if (*level == SCTP_FRAG_LEVEL_2) { 2811 sctp_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE); 2812 sctp_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS); 2813 } else if (*level == SCTP_FRAG_LEVEL_1) { 2814 sctp_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE); 2815 sctp_feature_off(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS); 2816 } else if (*level == SCTP_FRAG_LEVEL_0) { 2817 sctp_feature_off(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE); 2818 sctp_feature_off(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS); 2819 2820 } else { 2821 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 2822 error = EINVAL; 2823 } 2824 } 2825 break; 2826 case SCTP_CMT_ON_OFF: 2827 { 2828 struct sctp_assoc_value *av; 2829 2830 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize); 2831 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off)) { 2832 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 2833 if (stcb) { 2834 stcb->asoc.sctp_cmt_on_off = (uint8_t) av->assoc_value; 2835 SCTP_TCB_UNLOCK(stcb); 2836 } else { 2837 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN); 2838 error = ENOTCONN; 2839 } 2840 } else { 2841 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT); 2842 error = ENOPROTOOPT; 2843 } 2844 } 2845 break; 2846 /* EY nr_sack_on_off socket option */ 2847 case SCTP_NR_SACK_ON_OFF: 2848 { 2849 struct sctp_assoc_value *av; 2850 2851 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize); 2852 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off)) { 2853 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 2854 if (stcb) { 2855 stcb->asoc.sctp_nr_sack_on_off = (uint8_t) av->assoc_value; 2856 SCTP_TCB_UNLOCK(stcb); 2857 } else { 2858 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN); 2859 error = ENOTCONN; 2860 } 2861 } else { 2862 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT); 2863 error = ENOPROTOOPT; 2864 } 2865 } 2866 break; 2867 /* JRS - Set socket option for pluggable congestion control */ 2868 case SCTP_PLUGGABLE_CC: 2869 { 2870 struct sctp_assoc_value *av; 2871 2872 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize); 2873 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 2874 if (stcb) { 2875 switch (av->assoc_value) { 2876 /* 2877 * JRS - Standard TCP congestion 2878 * control 2879 */ 2880 case SCTP_CC_RFC2581: 2881 { 2882 stcb->asoc.congestion_control_module = SCTP_CC_RFC2581; 2883 stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param; 2884 stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_cwnd_update_after_sack; 2885 stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_cwnd_update_after_fr; 2886 stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout; 2887 stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo; 2888 stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped; 2889 stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output; 2890 stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer; 2891 SCTP_TCB_UNLOCK(stcb); 2892 break; 2893 } 2894 /* 2895 * JRS - High Speed TCP congestion 2896 * control (Floyd) 2897 */ 2898 case SCTP_CC_HSTCP: 2899 { 2900 stcb->asoc.congestion_control_module = SCTP_CC_HSTCP; 2901 stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param; 2902 stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_hs_cwnd_update_after_sack; 2903 stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_hs_cwnd_update_after_fr; 2904 stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout; 2905 stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo; 2906 stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped; 2907 stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output; 2908 stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer; 2909 SCTP_TCB_UNLOCK(stcb); 2910 break; 2911 } 2912 /* JRS - HTCP congestion control */ 2913 case SCTP_CC_HTCP: 2914 { 2915 stcb->asoc.congestion_control_module = SCTP_CC_HTCP; 2916 stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_htcp_set_initial_cc_param; 2917 stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_htcp_cwnd_update_after_sack; 2918 stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_htcp_cwnd_update_after_fr; 2919 stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_htcp_cwnd_update_after_timeout; 2920 stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_htcp_cwnd_update_after_ecn_echo; 2921 stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped; 2922 stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output; 2923 stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_htcp_cwnd_update_after_fr_timer; 2924 SCTP_TCB_UNLOCK(stcb); 2925 break; 2926 } 2927 /* 2928 * JRS - All other values are 2929 * invalid 2930 */ 2931 default: 2932 { 2933 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 2934 error = EINVAL; 2935 SCTP_TCB_UNLOCK(stcb); 2936 break; 2937 } 2938 } 2939 } else { 2940 switch (av->assoc_value) { 2941 case SCTP_CC_RFC2581: 2942 case SCTP_CC_HSTCP: 2943 case SCTP_CC_HTCP: 2944 inp->sctp_ep.sctp_default_cc_module = av->assoc_value; 2945 break; 2946 default: 2947 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 2948 error = EINVAL; 2949 break; 2950 }; 2951 } 2952 } 2953 break; 2954 case SCTP_CLR_STAT_LOG: 2955 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP); 2956 error = EOPNOTSUPP; 2957 break; 2958 case SCTP_CONTEXT: 2959 { 2960 struct sctp_assoc_value *av; 2961 2962 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize); 2963 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 2964 2965 if (stcb) { 2966 stcb->asoc.context = av->assoc_value; 2967 SCTP_TCB_UNLOCK(stcb); 2968 } else { 2969 SCTP_INP_WLOCK(inp); 2970 inp->sctp_context = av->assoc_value; 2971 SCTP_INP_WUNLOCK(inp); 2972 } 2973 } 2974 break; 2975 case SCTP_VRF_ID: 2976 { 2977 uint32_t *default_vrfid; 2978 2979 SCTP_CHECK_AND_CAST(default_vrfid, optval, uint32_t, optsize); 2980 if (*default_vrfid > SCTP_MAX_VRF_ID) { 2981 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 2982 error = EINVAL; 2983 break; 2984 } 2985 inp->def_vrf_id = *default_vrfid; 2986 break; 2987 } 2988 case SCTP_DEL_VRF_ID: 2989 { 2990 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP); 2991 error = EOPNOTSUPP; 2992 break; 2993 } 2994 case SCTP_ADD_VRF_ID: 2995 { 2996 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP); 2997 error = EOPNOTSUPP; 2998 break; 2999 } 3000 case SCTP_DELAYED_SACK: 3001 { 3002 struct sctp_sack_info *sack; 3003 3004 SCTP_CHECK_AND_CAST(sack, optval, struct sctp_sack_info, optsize); 3005 SCTP_FIND_STCB(inp, stcb, sack->sack_assoc_id); 3006 if (sack->sack_delay) { 3007 if (sack->sack_delay > SCTP_MAX_SACK_DELAY) 3008 sack->sack_delay = SCTP_MAX_SACK_DELAY; 3009 } 3010 if (stcb) { 3011 if (sack->sack_delay) { 3012 if (MSEC_TO_TICKS(sack->sack_delay) < 1) { 3013 sack->sack_delay = TICKS_TO_MSEC(1); 3014 } 3015 stcb->asoc.delayed_ack = sack->sack_delay; 3016 } 3017 if (sack->sack_freq) { 3018 stcb->asoc.sack_freq = sack->sack_freq; 3019 } 3020 SCTP_TCB_UNLOCK(stcb); 3021 } else { 3022 SCTP_INP_WLOCK(inp); 3023 if (sack->sack_delay) { 3024 if (MSEC_TO_TICKS(sack->sack_delay) < 1) { 3025 sack->sack_delay = TICKS_TO_MSEC(1); 3026 } 3027 inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV] = MSEC_TO_TICKS(sack->sack_delay); 3028 } 3029 if (sack->sack_freq) { 3030 inp->sctp_ep.sctp_sack_freq = sack->sack_freq; 3031 } 3032 SCTP_INP_WUNLOCK(inp); 3033 } 3034 break; 3035 } 3036 case SCTP_AUTH_CHUNK: 3037 { 3038 struct sctp_authchunk *sauth; 3039 3040 SCTP_CHECK_AND_CAST(sauth, optval, struct sctp_authchunk, optsize); 3041 3042 SCTP_INP_WLOCK(inp); 3043 if (sctp_auth_add_chunk(sauth->sauth_chunk, inp->sctp_ep.local_auth_chunks)) { 3044 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3045 error = EINVAL; 3046 } 3047 SCTP_INP_WUNLOCK(inp); 3048 break; 3049 } 3050 case SCTP_AUTH_KEY: 3051 { 3052 struct sctp_authkey *sca; 3053 struct sctp_keyhead *shared_keys; 3054 sctp_sharedkey_t *shared_key; 3055 sctp_key_t *key = NULL; 3056 size_t size; 3057 3058 SCTP_CHECK_AND_CAST(sca, optval, struct sctp_authkey, optsize); 3059 SCTP_FIND_STCB(inp, stcb, sca->sca_assoc_id); 3060 size = optsize - sizeof(*sca); 3061 3062 if (stcb) { 3063 /* set it on the assoc */ 3064 shared_keys = &stcb->asoc.shared_keys; 3065 /* clear the cached keys for this key id */ 3066 sctp_clear_cachedkeys(stcb, sca->sca_keynumber); 3067 /* 3068 * create the new shared key and 3069 * insert/replace it 3070 */ 3071 if (size > 0) { 3072 key = sctp_set_key(sca->sca_key, (uint32_t) size); 3073 if (key == NULL) { 3074 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM); 3075 error = ENOMEM; 3076 SCTP_TCB_UNLOCK(stcb); 3077 break; 3078 } 3079 } 3080 shared_key = sctp_alloc_sharedkey(); 3081 if (shared_key == NULL) { 3082 sctp_free_key(key); 3083 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM); 3084 error = ENOMEM; 3085 SCTP_TCB_UNLOCK(stcb); 3086 break; 3087 } 3088 shared_key->key = key; 3089 shared_key->keyid = sca->sca_keynumber; 3090 error = sctp_insert_sharedkey(shared_keys, shared_key); 3091 SCTP_TCB_UNLOCK(stcb); 3092 } else { 3093 /* set it on the endpoint */ 3094 SCTP_INP_WLOCK(inp); 3095 shared_keys = &inp->sctp_ep.shared_keys; 3096 /* 3097 * clear the cached keys on all assocs for 3098 * this key id 3099 */ 3100 sctp_clear_cachedkeys_ep(inp, sca->sca_keynumber); 3101 /* 3102 * create the new shared key and 3103 * insert/replace it 3104 */ 3105 if (size > 0) { 3106 key = sctp_set_key(sca->sca_key, (uint32_t) size); 3107 if (key == NULL) { 3108 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM); 3109 error = ENOMEM; 3110 SCTP_INP_WUNLOCK(inp); 3111 break; 3112 } 3113 } 3114 shared_key = sctp_alloc_sharedkey(); 3115 if (shared_key == NULL) { 3116 sctp_free_key(key); 3117 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM); 3118 error = ENOMEM; 3119 SCTP_INP_WUNLOCK(inp); 3120 break; 3121 } 3122 shared_key->key = key; 3123 shared_key->keyid = sca->sca_keynumber; 3124 error = sctp_insert_sharedkey(shared_keys, shared_key); 3125 SCTP_INP_WUNLOCK(inp); 3126 } 3127 break; 3128 } 3129 case SCTP_HMAC_IDENT: 3130 { 3131 struct sctp_hmacalgo *shmac; 3132 sctp_hmaclist_t *hmaclist; 3133 uint16_t hmacid; 3134 uint32_t i; 3135 3136 size_t found; 3137 3138 SCTP_CHECK_AND_CAST(shmac, optval, struct sctp_hmacalgo, optsize); 3139 if (optsize < sizeof(struct sctp_hmacalgo) + shmac->shmac_number_of_idents * sizeof(uint16_t)) { 3140 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3141 error = EINVAL; 3142 break; 3143 } 3144 hmaclist = sctp_alloc_hmaclist(shmac->shmac_number_of_idents); 3145 if (hmaclist == NULL) { 3146 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM); 3147 error = ENOMEM; 3148 break; 3149 } 3150 for (i = 0; i < shmac->shmac_number_of_idents; i++) { 3151 hmacid = shmac->shmac_idents[i]; 3152 if (sctp_auth_add_hmacid(hmaclist, hmacid)) { 3153 /* invalid HMACs were found */ ; 3154 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3155 error = EINVAL; 3156 sctp_free_hmaclist(hmaclist); 3157 goto sctp_set_hmac_done; 3158 } 3159 } 3160 found = 0; 3161 for (i = 0; i < hmaclist->num_algo; i++) { 3162 if (hmaclist->hmac[i] == SCTP_AUTH_HMAC_ID_SHA1) { 3163 /* already in list */ 3164 found = 1; 3165 } 3166 } 3167 if (!found) { 3168 sctp_free_hmaclist(hmaclist); 3169 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3170 error = EINVAL; 3171 break; 3172 } 3173 /* set it on the endpoint */ 3174 SCTP_INP_WLOCK(inp); 3175 if (inp->sctp_ep.local_hmacs) 3176 sctp_free_hmaclist(inp->sctp_ep.local_hmacs); 3177 inp->sctp_ep.local_hmacs = hmaclist; 3178 SCTP_INP_WUNLOCK(inp); 3179 sctp_set_hmac_done: 3180 break; 3181 } 3182 case SCTP_AUTH_ACTIVE_KEY: 3183 { 3184 struct sctp_authkeyid *scact; 3185 3186 SCTP_CHECK_AND_CAST(scact, optval, struct sctp_authkeyid, 3187 optsize); 3188 SCTP_FIND_STCB(inp, stcb, scact->scact_assoc_id); 3189 3190 /* set the active key on the right place */ 3191 if (stcb) { 3192 /* set the active key on the assoc */ 3193 if (sctp_auth_setactivekey(stcb, 3194 scact->scact_keynumber)) { 3195 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, 3196 SCTP_FROM_SCTP_USRREQ, 3197 EINVAL); 3198 error = EINVAL; 3199 } 3200 SCTP_TCB_UNLOCK(stcb); 3201 } else { 3202 /* set the active key on the endpoint */ 3203 SCTP_INP_WLOCK(inp); 3204 if (sctp_auth_setactivekey_ep(inp, 3205 scact->scact_keynumber)) { 3206 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, 3207 SCTP_FROM_SCTP_USRREQ, 3208 EINVAL); 3209 error = EINVAL; 3210 } 3211 SCTP_INP_WUNLOCK(inp); 3212 } 3213 break; 3214 } 3215 case SCTP_AUTH_DELETE_KEY: 3216 { 3217 struct sctp_authkeyid *scdel; 3218 3219 SCTP_CHECK_AND_CAST(scdel, optval, struct sctp_authkeyid, 3220 optsize); 3221 SCTP_FIND_STCB(inp, stcb, scdel->scact_assoc_id); 3222 3223 /* delete the key from the right place */ 3224 if (stcb) { 3225 if (sctp_delete_sharedkey(stcb, 3226 scdel->scact_keynumber)) { 3227 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, 3228 SCTP_FROM_SCTP_USRREQ, 3229 EINVAL); 3230 error = EINVAL; 3231 } 3232 SCTP_TCB_UNLOCK(stcb); 3233 } else { 3234 SCTP_INP_WLOCK(inp); 3235 if (sctp_delete_sharedkey_ep(inp, 3236 scdel->scact_keynumber)) { 3237 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, 3238 SCTP_FROM_SCTP_USRREQ, 3239 EINVAL); 3240 error = EINVAL; 3241 } 3242 SCTP_INP_WUNLOCK(inp); 3243 } 3244 break; 3245 } 3246 case SCTP_AUTH_DEACTIVATE_KEY: 3247 { 3248 struct sctp_authkeyid *keyid; 3249 3250 SCTP_CHECK_AND_CAST(keyid, optval, struct sctp_authkeyid, 3251 optsize); 3252 SCTP_FIND_STCB(inp, stcb, keyid->scact_assoc_id); 3253 3254 /* deactivate the key from the right place */ 3255 if (stcb) { 3256 if (sctp_deact_sharedkey(stcb, 3257 keyid->scact_keynumber)) { 3258 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, 3259 SCTP_FROM_SCTP_USRREQ, 3260 EINVAL); 3261 error = EINVAL; 3262 } 3263 SCTP_TCB_UNLOCK(stcb); 3264 } else { 3265 SCTP_INP_WLOCK(inp); 3266 if (sctp_deact_sharedkey_ep(inp, 3267 keyid->scact_keynumber)) { 3268 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, 3269 SCTP_FROM_SCTP_USRREQ, 3270 EINVAL); 3271 error = EINVAL; 3272 } 3273 SCTP_INP_WUNLOCK(inp); 3274 } 3275 break; 3276 } 3277 3278 case SCTP_RESET_STREAMS: 3279 { 3280 struct sctp_stream_reset *strrst; 3281 uint8_t send_in = 0, send_tsn = 0, send_out = 0, 3282 addstream = 0; 3283 uint16_t addstrmcnt = 0; 3284 int i; 3285 3286 SCTP_CHECK_AND_CAST(strrst, optval, struct sctp_stream_reset, optsize); 3287 SCTP_FIND_STCB(inp, stcb, strrst->strrst_assoc_id); 3288 3289 if (stcb == NULL) { 3290 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); 3291 error = ENOENT; 3292 break; 3293 } 3294 if (stcb->asoc.peer_supports_strreset == 0) { 3295 /* 3296 * Peer does not support it, we return 3297 * protocol not supported since this is true 3298 * for this feature and this peer, not the 3299 * socket request in general. 3300 */ 3301 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EPROTONOSUPPORT); 3302 error = EPROTONOSUPPORT; 3303 SCTP_TCB_UNLOCK(stcb); 3304 break; 3305 } 3306 if (stcb->asoc.stream_reset_outstanding) { 3307 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY); 3308 error = EALREADY; 3309 SCTP_TCB_UNLOCK(stcb); 3310 break; 3311 } 3312 if (strrst->strrst_flags == SCTP_RESET_LOCAL_RECV) { 3313 send_in = 1; 3314 } else if (strrst->strrst_flags == SCTP_RESET_LOCAL_SEND) { 3315 send_out = 1; 3316 } else if (strrst->strrst_flags == SCTP_RESET_BOTH) { 3317 send_in = 1; 3318 send_out = 1; 3319 } else if (strrst->strrst_flags == SCTP_RESET_TSN) { 3320 send_tsn = 1; 3321 } else if (strrst->strrst_flags == SCTP_RESET_ADD_STREAMS) { 3322 if (send_tsn || 3323 send_in || 3324 send_out) { 3325 /* We can't do that and add streams */ 3326 error = EINVAL; 3327 goto skip_stuff; 3328 } 3329 if (stcb->asoc.stream_reset_outstanding) { 3330 error = EBUSY; 3331 goto skip_stuff; 3332 } 3333 addstream = 1; 3334 /* We allocate here */ 3335 addstrmcnt = strrst->strrst_num_streams; 3336 if ((int)(addstrmcnt + stcb->asoc.streamoutcnt) > 0xffff) { 3337 /* You can't have more than 64k */ 3338 error = EINVAL; 3339 goto skip_stuff; 3340 } 3341 if ((stcb->asoc.strm_realoutsize - stcb->asoc.streamoutcnt) < addstrmcnt) { 3342 /* Need to allocate more */ 3343 struct sctp_stream_out *oldstream; 3344 struct sctp_stream_queue_pending *sp; 3345 int removed; 3346 3347 oldstream = stcb->asoc.strmout; 3348 /* get some more */ 3349 SCTP_MALLOC(stcb->asoc.strmout, struct sctp_stream_out *, 3350 ((stcb->asoc.streamoutcnt + addstrmcnt) * sizeof(struct sctp_stream_out)), 3351 SCTP_M_STRMO); 3352 if (stcb->asoc.strmout == NULL) { 3353 stcb->asoc.strmout = oldstream; 3354 error = ENOMEM; 3355 goto skip_stuff; 3356 } 3357 /* 3358 * Ok now we proceed with copying 3359 * the old out stuff and 3360 * initializing the new stuff. 3361 */ 3362 SCTP_TCB_SEND_LOCK(stcb); 3363 for (i = 0; i < stcb->asoc.streamoutcnt; i++) { 3364 TAILQ_INIT(&stcb->asoc.strmout[i].outqueue); 3365 stcb->asoc.strmout[i].next_sequence_sent = oldstream[i].next_sequence_sent; 3366 stcb->asoc.strmout[i].last_msg_incomplete = oldstream[i].last_msg_incomplete; 3367 stcb->asoc.strmout[i].stream_no = i; 3368 if (oldstream[i].next_spoke.tqe_next) { 3369 sctp_remove_from_wheel(stcb, &stcb->asoc, &oldstream[i], 1); 3370 stcb->asoc.strmout[i].next_spoke.tqe_next = NULL; 3371 stcb->asoc.strmout[i].next_spoke.tqe_prev = NULL; 3372 removed = 1; 3373 } else { 3374 /* not on out wheel */ 3375 stcb->asoc.strmout[i].next_spoke.tqe_next = NULL; 3376 stcb->asoc.strmout[i].next_spoke.tqe_prev = NULL; 3377 removed = 0; 3378 } 3379 /* 3380 * now anything on those 3381 * queues? 3382 */ 3383 while (TAILQ_EMPTY(&oldstream[i].outqueue) == 0) { 3384 sp = TAILQ_FIRST(&oldstream[i].outqueue); 3385 TAILQ_REMOVE(&oldstream[i].outqueue, sp, next); 3386 TAILQ_INSERT_TAIL(&stcb->asoc.strmout[i].outqueue, sp, next); 3387 } 3388 /* Did we disrupt the wheel? */ 3389 if (removed) { 3390 sctp_insert_on_wheel(stcb, 3391 &stcb->asoc, 3392 &stcb->asoc.strmout[i], 3393 1); 3394 } 3395 /* 3396 * Now move assoc pointers 3397 * too 3398 */ 3399 if (stcb->asoc.last_out_stream == &oldstream[i]) { 3400 stcb->asoc.last_out_stream = &stcb->asoc.strmout[i]; 3401 } 3402 if (stcb->asoc.locked_on_sending == &oldstream[i]) { 3403 stcb->asoc.locked_on_sending = &stcb->asoc.strmout[i]; 3404 } 3405 } 3406 /* now the new streams */ 3407 for (i = stcb->asoc.streamoutcnt; i < (stcb->asoc.streamoutcnt + addstrmcnt); i++) { 3408 stcb->asoc.strmout[i].next_sequence_sent = 0x0; 3409 TAILQ_INIT(&stcb->asoc.strmout[i].outqueue); 3410 stcb->asoc.strmout[i].stream_no = i; 3411 stcb->asoc.strmout[i].last_msg_incomplete = 0; 3412 stcb->asoc.strmout[i].next_spoke.tqe_next = NULL; 3413 stcb->asoc.strmout[i].next_spoke.tqe_prev = NULL; 3414 } 3415 stcb->asoc.strm_realoutsize = stcb->asoc.streamoutcnt + addstrmcnt; 3416 SCTP_FREE(oldstream, SCTP_M_STRMO); 3417 } 3418 SCTP_TCB_SEND_UNLOCK(stcb); 3419 goto skip_stuff; 3420 } else { 3421 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3422 error = EINVAL; 3423 SCTP_TCB_UNLOCK(stcb); 3424 break; 3425 } 3426 for (i = 0; i < strrst->strrst_num_streams; i++) { 3427 if ((send_in) && 3428 3429 (strrst->strrst_list[i] > stcb->asoc.streamincnt)) { 3430 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3431 error = EINVAL; 3432 goto get_out; 3433 } 3434 if ((send_out) && 3435 (strrst->strrst_list[i] > stcb->asoc.streamoutcnt)) { 3436 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3437 error = EINVAL; 3438 goto get_out; 3439 } 3440 } 3441 skip_stuff: 3442 if (error) { 3443 get_out: 3444 SCTP_TCB_UNLOCK(stcb); 3445 break; 3446 } 3447 error = sctp_send_str_reset_req(stcb, strrst->strrst_num_streams, 3448 strrst->strrst_list, 3449 send_out, (stcb->asoc.str_reset_seq_in - 3), 3450 send_in, send_tsn, addstream, addstrmcnt); 3451 3452 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_REQ, SCTP_SO_LOCKED); 3453 SCTP_TCB_UNLOCK(stcb); 3454 } 3455 break; 3456 3457 case SCTP_CONNECT_X: 3458 if (optsize < (sizeof(int) + sizeof(struct sockaddr_in))) { 3459 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3460 error = EINVAL; 3461 break; 3462 } 3463 error = sctp_do_connect_x(so, inp, optval, optsize, p, 0); 3464 break; 3465 3466 case SCTP_CONNECT_X_DELAYED: 3467 if (optsize < (sizeof(int) + sizeof(struct sockaddr_in))) { 3468 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3469 error = EINVAL; 3470 break; 3471 } 3472 error = sctp_do_connect_x(so, inp, optval, optsize, p, 1); 3473 break; 3474 3475 case SCTP_CONNECT_X_COMPLETE: 3476 { 3477 struct sockaddr *sa; 3478 struct sctp_nets *net; 3479 3480 /* FIXME MT: check correct? */ 3481 SCTP_CHECK_AND_CAST(sa, optval, struct sockaddr, optsize); 3482 3483 /* find tcb */ 3484 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { 3485 SCTP_INP_RLOCK(inp); 3486 stcb = LIST_FIRST(&inp->sctp_asoc_list); 3487 if (stcb) { 3488 SCTP_TCB_LOCK(stcb); 3489 net = sctp_findnet(stcb, sa); 3490 } 3491 SCTP_INP_RUNLOCK(inp); 3492 } else { 3493 /* 3494 * We increment here since 3495 * sctp_findassociation_ep_addr() wil do a 3496 * decrement if it finds the stcb as long as 3497 * the locked tcb (last argument) is NOT a 3498 * TCB.. aka NULL. 3499 */ 3500 SCTP_INP_INCR_REF(inp); 3501 stcb = sctp_findassociation_ep_addr(&inp, sa, &net, NULL, NULL); 3502 if (stcb == NULL) { 3503 SCTP_INP_DECR_REF(inp); 3504 } 3505 } 3506 3507 if (stcb == NULL) { 3508 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); 3509 error = ENOENT; 3510 break; 3511 } 3512 if (stcb->asoc.delayed_connection == 1) { 3513 stcb->asoc.delayed_connection = 0; 3514 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered); 3515 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, 3516 stcb->asoc.primary_destination, 3517 SCTP_FROM_SCTP_USRREQ + SCTP_LOC_9); 3518 sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED); 3519 } else { 3520 /* 3521 * already expired or did not use delayed 3522 * connectx 3523 */ 3524 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY); 3525 error = EALREADY; 3526 } 3527 SCTP_TCB_UNLOCK(stcb); 3528 } 3529 break; 3530 case SCTP_MAX_BURST: 3531 { 3532 uint8_t *burst; 3533 3534 SCTP_CHECK_AND_CAST(burst, optval, uint8_t, optsize); 3535 3536 SCTP_INP_WLOCK(inp); 3537 if (*burst) { 3538 inp->sctp_ep.max_burst = *burst; 3539 } 3540 SCTP_INP_WUNLOCK(inp); 3541 } 3542 break; 3543 case SCTP_MAXSEG: 3544 { 3545 struct sctp_assoc_value *av; 3546 int ovh; 3547 3548 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize); 3549 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 3550 3551 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 3552 ovh = SCTP_MED_OVERHEAD; 3553 } else { 3554 ovh = SCTP_MED_V4_OVERHEAD; 3555 } 3556 if (stcb) { 3557 if (av->assoc_value) { 3558 stcb->asoc.sctp_frag_point = (av->assoc_value + ovh); 3559 } else { 3560 stcb->asoc.sctp_frag_point = SCTP_DEFAULT_MAXSEGMENT; 3561 } 3562 SCTP_TCB_UNLOCK(stcb); 3563 } else { 3564 SCTP_INP_WLOCK(inp); 3565 /* 3566 * FIXME MT: I think this is not in tune 3567 * with the API ID 3568 */ 3569 if (av->assoc_value) { 3570 inp->sctp_frag_point = (av->assoc_value + ovh); 3571 } else { 3572 inp->sctp_frag_point = SCTP_DEFAULT_MAXSEGMENT; 3573 } 3574 SCTP_INP_WUNLOCK(inp); 3575 } 3576 } 3577 break; 3578 case SCTP_EVENTS: 3579 { 3580 struct sctp_event_subscribe *events; 3581 3582 SCTP_CHECK_AND_CAST(events, optval, struct sctp_event_subscribe, optsize); 3583 3584 SCTP_INP_WLOCK(inp); 3585 if (events->sctp_data_io_event) { 3586 sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT); 3587 } else { 3588 sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT); 3589 } 3590 3591 if (events->sctp_association_event) { 3592 sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVASSOCEVNT); 3593 } else { 3594 sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVASSOCEVNT); 3595 } 3596 3597 if (events->sctp_address_event) { 3598 sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVPADDREVNT); 3599 } else { 3600 sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVPADDREVNT); 3601 } 3602 3603 if (events->sctp_send_failure_event) { 3604 sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVSENDFAILEVNT); 3605 } else { 3606 sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVSENDFAILEVNT); 3607 } 3608 3609 if (events->sctp_peer_error_event) { 3610 sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVPEERERR); 3611 } else { 3612 sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVPEERERR); 3613 } 3614 3615 if (events->sctp_shutdown_event) { 3616 sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT); 3617 } else { 3618 sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT); 3619 } 3620 3621 if (events->sctp_partial_delivery_event) { 3622 sctp_feature_on(inp, SCTP_PCB_FLAGS_PDAPIEVNT); 3623 } else { 3624 sctp_feature_off(inp, SCTP_PCB_FLAGS_PDAPIEVNT); 3625 } 3626 3627 if (events->sctp_adaptation_layer_event) { 3628 sctp_feature_on(inp, SCTP_PCB_FLAGS_ADAPTATIONEVNT); 3629 } else { 3630 sctp_feature_off(inp, SCTP_PCB_FLAGS_ADAPTATIONEVNT); 3631 } 3632 3633 if (events->sctp_authentication_event) { 3634 sctp_feature_on(inp, SCTP_PCB_FLAGS_AUTHEVNT); 3635 } else { 3636 sctp_feature_off(inp, SCTP_PCB_FLAGS_AUTHEVNT); 3637 } 3638 3639 if (events->sctp_sender_dry_event) { 3640 sctp_feature_on(inp, SCTP_PCB_FLAGS_DRYEVNT); 3641 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 3642 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 3643 stcb = LIST_FIRST(&inp->sctp_asoc_list); 3644 if (stcb) { 3645 SCTP_TCB_LOCK(stcb); 3646 } 3647 if (stcb && 3648 TAILQ_EMPTY(&stcb->asoc.send_queue) && 3649 TAILQ_EMPTY(&stcb->asoc.sent_queue) && 3650 (stcb->asoc.stream_queue_cnt == 0)) { 3651 sctp_ulp_notify(SCTP_NOTIFY_SENDER_DRY, stcb, 0, NULL, SCTP_SO_LOCKED); 3652 } 3653 if (stcb) { 3654 SCTP_TCB_UNLOCK(stcb); 3655 } 3656 } 3657 } else { 3658 sctp_feature_off(inp, SCTP_PCB_FLAGS_DRYEVNT); 3659 } 3660 3661 if (events->sctp_stream_reset_event) { 3662 sctp_feature_on(inp, SCTP_PCB_FLAGS_STREAM_RESETEVNT); 3663 } else { 3664 sctp_feature_off(inp, SCTP_PCB_FLAGS_STREAM_RESETEVNT); 3665 } 3666 SCTP_INP_WUNLOCK(inp); 3667 } 3668 break; 3669 3670 case SCTP_ADAPTATION_LAYER: 3671 { 3672 struct sctp_setadaptation *adap_bits; 3673 3674 SCTP_CHECK_AND_CAST(adap_bits, optval, struct sctp_setadaptation, optsize); 3675 SCTP_INP_WLOCK(inp); 3676 inp->sctp_ep.adaptation_layer_indicator = adap_bits->ssb_adaptation_ind; 3677 SCTP_INP_WUNLOCK(inp); 3678 } 3679 break; 3680 #ifdef SCTP_DEBUG 3681 case SCTP_SET_INITIAL_DBG_SEQ: 3682 { 3683 uint32_t *vvv; 3684 3685 SCTP_CHECK_AND_CAST(vvv, optval, uint32_t, optsize); 3686 SCTP_INP_WLOCK(inp); 3687 inp->sctp_ep.initial_sequence_debug = *vvv; 3688 SCTP_INP_WUNLOCK(inp); 3689 } 3690 break; 3691 #endif 3692 case SCTP_DEFAULT_SEND_PARAM: 3693 { 3694 struct sctp_sndrcvinfo *s_info; 3695 3696 SCTP_CHECK_AND_CAST(s_info, optval, struct sctp_sndrcvinfo, optsize); 3697 SCTP_FIND_STCB(inp, stcb, s_info->sinfo_assoc_id); 3698 3699 if (stcb) { 3700 if (s_info->sinfo_stream <= stcb->asoc.streamoutcnt) { 3701 memcpy(&stcb->asoc.def_send, s_info, min(optsize, sizeof(stcb->asoc.def_send))); 3702 } else { 3703 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3704 error = EINVAL; 3705 } 3706 SCTP_TCB_UNLOCK(stcb); 3707 } else { 3708 SCTP_INP_WLOCK(inp); 3709 memcpy(&inp->def_send, s_info, min(optsize, sizeof(inp->def_send))); 3710 SCTP_INP_WUNLOCK(inp); 3711 } 3712 } 3713 break; 3714 case SCTP_PEER_ADDR_PARAMS: 3715 /* Applys to the specific association */ 3716 { 3717 struct sctp_paddrparams *paddrp; 3718 struct sctp_nets *net; 3719 3720 SCTP_CHECK_AND_CAST(paddrp, optval, struct sctp_paddrparams, optsize); 3721 SCTP_FIND_STCB(inp, stcb, paddrp->spp_assoc_id); 3722 net = NULL; 3723 if (stcb) { 3724 net = sctp_findnet(stcb, (struct sockaddr *)&paddrp->spp_address); 3725 } else { 3726 /* 3727 * We increment here since 3728 * sctp_findassociation_ep_addr() wil do a 3729 * decrement if it finds the stcb as long as 3730 * the locked tcb (last argument) is NOT a 3731 * TCB.. aka NULL. 3732 */ 3733 SCTP_INP_INCR_REF(inp); 3734 stcb = sctp_findassociation_ep_addr(&inp, 3735 (struct sockaddr *)&paddrp->spp_address, 3736 &net, NULL, NULL); 3737 if (stcb == NULL) { 3738 SCTP_INP_DECR_REF(inp); 3739 } 3740 } 3741 if (stcb && (net == NULL)) { 3742 struct sockaddr *sa; 3743 3744 sa = (struct sockaddr *)&paddrp->spp_address; 3745 if (sa->sa_family == AF_INET) { 3746 struct sockaddr_in *sin; 3747 3748 sin = (struct sockaddr_in *)sa; 3749 if (sin->sin_addr.s_addr) { 3750 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3751 SCTP_TCB_UNLOCK(stcb); 3752 error = EINVAL; 3753 break; 3754 } 3755 } else if (sa->sa_family == AF_INET6) { 3756 struct sockaddr_in6 *sin6; 3757 3758 sin6 = (struct sockaddr_in6 *)sa; 3759 if (!IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { 3760 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3761 SCTP_TCB_UNLOCK(stcb); 3762 error = EINVAL; 3763 break; 3764 } 3765 } else { 3766 error = EAFNOSUPPORT; 3767 SCTP_TCB_UNLOCK(stcb); 3768 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 3769 break; 3770 } 3771 } 3772 /* sanity checks */ 3773 if ((paddrp->spp_flags & SPP_HB_ENABLE) && (paddrp->spp_flags & SPP_HB_DISABLE)) { 3774 if (stcb) 3775 SCTP_TCB_UNLOCK(stcb); 3776 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3777 return (EINVAL); 3778 } 3779 if ((paddrp->spp_flags & SPP_PMTUD_ENABLE) && (paddrp->spp_flags & SPP_PMTUD_DISABLE)) { 3780 if (stcb) 3781 SCTP_TCB_UNLOCK(stcb); 3782 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3783 return (EINVAL); 3784 } 3785 if (stcb) { 3786 /************************TCB SPECIFIC SET ******************/ 3787 /* 3788 * do we change the timer for HB, we run 3789 * only one? 3790 */ 3791 int ovh = 0; 3792 3793 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 3794 ovh = SCTP_MED_OVERHEAD; 3795 } else { 3796 ovh = SCTP_MED_V4_OVERHEAD; 3797 } 3798 3799 if (paddrp->spp_hbinterval) 3800 stcb->asoc.heart_beat_delay = paddrp->spp_hbinterval; 3801 else if (paddrp->spp_flags & SPP_HB_TIME_IS_ZERO) 3802 stcb->asoc.heart_beat_delay = 0; 3803 3804 /* network sets ? */ 3805 if (net) { 3806 /************************NET SPECIFIC SET ******************/ 3807 if (paddrp->spp_flags & SPP_HB_DEMAND) { 3808 /* on demand HB */ 3809 if (sctp_send_hb(stcb, 1, net) < 0) { 3810 /* asoc destroyed */ 3811 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3812 error = EINVAL; 3813 break; 3814 } 3815 } 3816 if (paddrp->spp_flags & SPP_HB_DISABLE) { 3817 net->dest_state |= SCTP_ADDR_NOHB; 3818 } 3819 if (paddrp->spp_flags & SPP_HB_ENABLE) { 3820 net->dest_state &= ~SCTP_ADDR_NOHB; 3821 } 3822 if ((paddrp->spp_flags & SPP_PMTUD_DISABLE) && (paddrp->spp_pathmtu >= SCTP_SMALLEST_PMTU)) { 3823 if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) { 3824 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net, 3825 SCTP_FROM_SCTP_USRREQ + SCTP_LOC_10); 3826 } 3827 if (paddrp->spp_pathmtu > SCTP_DEFAULT_MINSEGMENT) { 3828 net->mtu = paddrp->spp_pathmtu + ovh; 3829 if (net->mtu < stcb->asoc.smallest_mtu) { 3830 #ifdef SCTP_PRINT_FOR_B_AND_M 3831 SCTP_PRINTF("SCTP_PMTU_DISABLE calls sctp_pathmtu_adjustment:%d\n", 3832 net->mtu); 3833 #endif 3834 sctp_pathmtu_adjustment(inp, stcb, net, net->mtu); 3835 } 3836 } 3837 } 3838 if (paddrp->spp_flags & SPP_PMTUD_ENABLE) { 3839 if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) { 3840 sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net); 3841 } 3842 } 3843 if (paddrp->spp_pathmaxrxt) 3844 net->failure_threshold = paddrp->spp_pathmaxrxt; 3845 #ifdef INET 3846 if (paddrp->spp_flags & SPP_IPV4_TOS) { 3847 if (net->ro._l_addr.sin.sin_family == AF_INET) { 3848 net->tos_flowlabel = paddrp->spp_ipv4_tos & 0x000000fc; 3849 } 3850 } 3851 #endif 3852 #ifdef INET6 3853 if (paddrp->spp_flags & SPP_IPV6_FLOWLABEL) { 3854 if (net->ro._l_addr.sin6.sin6_family == AF_INET6) { 3855 net->tos_flowlabel = paddrp->spp_ipv6_flowlabel; 3856 } 3857 } 3858 #endif 3859 } else { 3860 /************************ASSOC ONLY -- NO NET SPECIFIC SET ******************/ 3861 if (paddrp->spp_pathmaxrxt) 3862 stcb->asoc.def_net_failure = paddrp->spp_pathmaxrxt; 3863 3864 if (paddrp->spp_flags & SPP_HB_ENABLE) { 3865 /* Turn back on the timer */ 3866 stcb->asoc.hb_is_disabled = 0; 3867 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net); 3868 } 3869 if ((paddrp->spp_flags & SPP_PMTUD_DISABLE) && (paddrp->spp_pathmtu >= SCTP_SMALLEST_PMTU)) { 3870 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 3871 if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) { 3872 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net, 3873 SCTP_FROM_SCTP_USRREQ + SCTP_LOC_10); 3874 } 3875 if (paddrp->spp_pathmtu > SCTP_DEFAULT_MINSEGMENT) { 3876 net->mtu = paddrp->spp_pathmtu + ovh; 3877 if (net->mtu < stcb->asoc.smallest_mtu) { 3878 #ifdef SCTP_PRINT_FOR_B_AND_M 3879 SCTP_PRINTF("SCTP_PMTU_DISABLE calls sctp_pathmtu_adjustment:%d\n", 3880 net->mtu); 3881 #endif 3882 sctp_pathmtu_adjustment(inp, stcb, net, net->mtu); 3883 } 3884 } 3885 } 3886 } 3887 if (paddrp->spp_flags & SPP_PMTUD_ENABLE) { 3888 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 3889 if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) { 3890 sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net); 3891 } 3892 } 3893 } 3894 if (paddrp->spp_flags & SPP_HB_DISABLE) { 3895 int cnt_of_unconf = 0; 3896 struct sctp_nets *lnet; 3897 3898 stcb->asoc.hb_is_disabled = 1; 3899 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 3900 if (lnet->dest_state & SCTP_ADDR_UNCONFIRMED) { 3901 cnt_of_unconf++; 3902 } 3903 } 3904 /* 3905 * stop the timer ONLY if we 3906 * have no unconfirmed 3907 * addresses 3908 */ 3909 if (cnt_of_unconf == 0) { 3910 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 3911 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, 3912 SCTP_FROM_SCTP_USRREQ + SCTP_LOC_11); 3913 } 3914 } 3915 } 3916 if (paddrp->spp_flags & SPP_HB_ENABLE) { 3917 /* start up the timer. */ 3918 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 3919 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net); 3920 } 3921 } 3922 #ifdef INET 3923 if (paddrp->spp_flags & SPP_IPV4_TOS) 3924 stcb->asoc.default_tos = paddrp->spp_ipv4_tos & 0x000000fc; 3925 #endif 3926 #ifdef INET6 3927 if (paddrp->spp_flags & SPP_IPV6_FLOWLABEL) 3928 stcb->asoc.default_flowlabel = paddrp->spp_ipv6_flowlabel; 3929 #endif 3930 3931 } 3932 SCTP_TCB_UNLOCK(stcb); 3933 } else { 3934 /************************NO TCB, SET TO default stuff ******************/ 3935 SCTP_INP_WLOCK(inp); 3936 /* 3937 * For the TOS/FLOWLABEL stuff you set it 3938 * with the options on the socket 3939 */ 3940 if (paddrp->spp_pathmaxrxt) { 3941 inp->sctp_ep.def_net_failure = paddrp->spp_pathmaxrxt; 3942 } 3943 if (paddrp->spp_flags & SPP_HB_TIME_IS_ZERO) 3944 inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT] = 0; 3945 else if (paddrp->spp_hbinterval) { 3946 if (paddrp->spp_hbinterval > SCTP_MAX_HB_INTERVAL) 3947 paddrp->spp_hbinterval = SCTP_MAX_HB_INTERVAL; 3948 inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT] = MSEC_TO_TICKS(paddrp->spp_hbinterval); 3949 } 3950 if (paddrp->spp_flags & SPP_HB_ENABLE) { 3951 sctp_feature_off(inp, SCTP_PCB_FLAGS_DONOT_HEARTBEAT); 3952 3953 } else if (paddrp->spp_flags & SPP_HB_DISABLE) { 3954 sctp_feature_on(inp, SCTP_PCB_FLAGS_DONOT_HEARTBEAT); 3955 } 3956 SCTP_INP_WUNLOCK(inp); 3957 } 3958 } 3959 break; 3960 case SCTP_RTOINFO: 3961 { 3962 struct sctp_rtoinfo *srto; 3963 uint32_t new_init, new_min, new_max; 3964 3965 SCTP_CHECK_AND_CAST(srto, optval, struct sctp_rtoinfo, optsize); 3966 SCTP_FIND_STCB(inp, stcb, srto->srto_assoc_id); 3967 3968 if (stcb) { 3969 if (srto->srto_initial) 3970 new_init = srto->srto_initial; 3971 else 3972 new_init = stcb->asoc.initial_rto; 3973 if (srto->srto_max) 3974 new_max = srto->srto_max; 3975 else 3976 new_max = stcb->asoc.maxrto; 3977 if (srto->srto_min) 3978 new_min = srto->srto_min; 3979 else 3980 new_min = stcb->asoc.minrto; 3981 if ((new_min <= new_init) && (new_init <= new_max)) { 3982 stcb->asoc.initial_rto = new_init; 3983 stcb->asoc.maxrto = new_max; 3984 stcb->asoc.minrto = new_min; 3985 } else { 3986 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 3987 error = EINVAL; 3988 } 3989 SCTP_TCB_UNLOCK(stcb); 3990 } else { 3991 SCTP_INP_WLOCK(inp); 3992 if (srto->srto_initial) 3993 new_init = srto->srto_initial; 3994 else 3995 new_init = inp->sctp_ep.initial_rto; 3996 if (srto->srto_max) 3997 new_max = srto->srto_max; 3998 else 3999 new_max = inp->sctp_ep.sctp_maxrto; 4000 if (srto->srto_min) 4001 new_min = srto->srto_min; 4002 else 4003 new_min = inp->sctp_ep.sctp_minrto; 4004 if ((new_min <= new_init) && (new_init <= new_max)) { 4005 inp->sctp_ep.initial_rto = new_init; 4006 inp->sctp_ep.sctp_maxrto = new_max; 4007 inp->sctp_ep.sctp_minrto = new_min; 4008 } else { 4009 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4010 error = EINVAL; 4011 } 4012 SCTP_INP_WUNLOCK(inp); 4013 } 4014 } 4015 break; 4016 case SCTP_ASSOCINFO: 4017 { 4018 struct sctp_assocparams *sasoc; 4019 4020 SCTP_CHECK_AND_CAST(sasoc, optval, struct sctp_assocparams, optsize); 4021 SCTP_FIND_STCB(inp, stcb, sasoc->sasoc_assoc_id); 4022 if (sasoc->sasoc_cookie_life) { 4023 /* boundary check the cookie life */ 4024 if (sasoc->sasoc_cookie_life < 1000) 4025 sasoc->sasoc_cookie_life = 1000; 4026 if (sasoc->sasoc_cookie_life > SCTP_MAX_COOKIE_LIFE) { 4027 sasoc->sasoc_cookie_life = SCTP_MAX_COOKIE_LIFE; 4028 } 4029 } 4030 if (stcb) { 4031 if (sasoc->sasoc_asocmaxrxt) 4032 stcb->asoc.max_send_times = sasoc->sasoc_asocmaxrxt; 4033 sasoc->sasoc_number_peer_destinations = stcb->asoc.numnets; 4034 sasoc->sasoc_peer_rwnd = 0; 4035 sasoc->sasoc_local_rwnd = 0; 4036 if (sasoc->sasoc_cookie_life) { 4037 stcb->asoc.cookie_life = MSEC_TO_TICKS(sasoc->sasoc_cookie_life); 4038 } 4039 SCTP_TCB_UNLOCK(stcb); 4040 } else { 4041 SCTP_INP_WLOCK(inp); 4042 if (sasoc->sasoc_asocmaxrxt) 4043 inp->sctp_ep.max_send_times = sasoc->sasoc_asocmaxrxt; 4044 sasoc->sasoc_number_peer_destinations = 0; 4045 sasoc->sasoc_peer_rwnd = 0; 4046 sasoc->sasoc_local_rwnd = 0; 4047 if (sasoc->sasoc_cookie_life) { 4048 inp->sctp_ep.def_cookie_life = MSEC_TO_TICKS(sasoc->sasoc_cookie_life); 4049 } 4050 SCTP_INP_WUNLOCK(inp); 4051 } 4052 } 4053 break; 4054 case SCTP_INITMSG: 4055 { 4056 struct sctp_initmsg *sinit; 4057 4058 SCTP_CHECK_AND_CAST(sinit, optval, struct sctp_initmsg, optsize); 4059 SCTP_INP_WLOCK(inp); 4060 if (sinit->sinit_num_ostreams) 4061 inp->sctp_ep.pre_open_stream_count = sinit->sinit_num_ostreams; 4062 4063 if (sinit->sinit_max_instreams) 4064 inp->sctp_ep.max_open_streams_intome = sinit->sinit_max_instreams; 4065 4066 if (sinit->sinit_max_attempts) 4067 inp->sctp_ep.max_init_times = sinit->sinit_max_attempts; 4068 4069 if (sinit->sinit_max_init_timeo) 4070 inp->sctp_ep.initial_init_rto_max = sinit->sinit_max_init_timeo; 4071 SCTP_INP_WUNLOCK(inp); 4072 } 4073 break; 4074 case SCTP_PRIMARY_ADDR: 4075 { 4076 struct sctp_setprim *spa; 4077 struct sctp_nets *net, *lnet; 4078 4079 SCTP_CHECK_AND_CAST(spa, optval, struct sctp_setprim, optsize); 4080 SCTP_FIND_STCB(inp, stcb, spa->ssp_assoc_id); 4081 4082 net = NULL; 4083 if (stcb) { 4084 net = sctp_findnet(stcb, (struct sockaddr *)&spa->ssp_addr); 4085 } else { 4086 /* 4087 * We increment here since 4088 * sctp_findassociation_ep_addr() wil do a 4089 * decrement if it finds the stcb as long as 4090 * the locked tcb (last argument) is NOT a 4091 * TCB.. aka NULL. 4092 */ 4093 SCTP_INP_INCR_REF(inp); 4094 stcb = sctp_findassociation_ep_addr(&inp, 4095 (struct sockaddr *)&spa->ssp_addr, 4096 &net, NULL, NULL); 4097 if (stcb == NULL) { 4098 SCTP_INP_DECR_REF(inp); 4099 } 4100 } 4101 4102 if ((stcb) && (net)) { 4103 if ((net != stcb->asoc.primary_destination) && 4104 (!(net->dest_state & SCTP_ADDR_UNCONFIRMED))) { 4105 /* Ok we need to set it */ 4106 lnet = stcb->asoc.primary_destination; 4107 if (sctp_set_primary_addr(stcb, (struct sockaddr *)NULL, net) == 0) { 4108 if (net->dest_state & SCTP_ADDR_SWITCH_PRIMARY) { 4109 net->dest_state |= SCTP_ADDR_DOUBLE_SWITCH; 4110 } 4111 net->dest_state |= SCTP_ADDR_SWITCH_PRIMARY; 4112 } 4113 } 4114 } else { 4115 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4116 error = EINVAL; 4117 } 4118 if (stcb) { 4119 SCTP_TCB_UNLOCK(stcb); 4120 } 4121 } 4122 break; 4123 case SCTP_SET_DYNAMIC_PRIMARY: 4124 { 4125 union sctp_sockstore *ss; 4126 4127 error = priv_check(curthread, 4128 PRIV_NETINET_RESERVEDPORT); 4129 if (error) 4130 break; 4131 4132 SCTP_CHECK_AND_CAST(ss, optval, union sctp_sockstore, optsize); 4133 /* SUPER USER CHECK? */ 4134 error = sctp_dynamic_set_primary(&ss->sa, vrf_id); 4135 } 4136 break; 4137 case SCTP_SET_PEER_PRIMARY_ADDR: 4138 { 4139 struct sctp_setpeerprim *sspp; 4140 4141 SCTP_CHECK_AND_CAST(sspp, optval, struct sctp_setpeerprim, optsize); 4142 SCTP_FIND_STCB(inp, stcb, sspp->sspp_assoc_id); 4143 if (stcb != NULL) { 4144 struct sctp_ifa *ifa; 4145 4146 ifa = sctp_find_ifa_by_addr((struct sockaddr *)&sspp->sspp_addr, 4147 stcb->asoc.vrf_id, SCTP_ADDR_NOT_LOCKED); 4148 if (ifa == NULL) { 4149 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4150 error = EINVAL; 4151 goto out_of_it; 4152 } 4153 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) { 4154 /* 4155 * Must validate the ifa found is in 4156 * our ep 4157 */ 4158 struct sctp_laddr *laddr; 4159 int found = 0; 4160 4161 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 4162 if (laddr->ifa == NULL) { 4163 SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n", 4164 __FUNCTION__); 4165 continue; 4166 } 4167 if (laddr->ifa == ifa) { 4168 found = 1; 4169 break; 4170 } 4171 } 4172 if (!found) { 4173 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4174 error = EINVAL; 4175 goto out_of_it; 4176 } 4177 } 4178 if (sctp_set_primary_ip_address_sa(stcb, 4179 (struct sockaddr *)&sspp->sspp_addr) != 0) { 4180 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4181 error = EINVAL; 4182 } 4183 out_of_it: 4184 SCTP_TCB_UNLOCK(stcb); 4185 } else { 4186 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4187 error = EINVAL; 4188 } 4189 4190 } 4191 break; 4192 case SCTP_BINDX_ADD_ADDR: 4193 { 4194 struct sctp_getaddresses *addrs; 4195 size_t sz; 4196 struct thread *td; 4197 4198 td = (struct thread *)p; 4199 SCTP_CHECK_AND_CAST(addrs, optval, struct sctp_getaddresses, 4200 optsize); 4201 if (addrs->addr->sa_family == AF_INET) { 4202 sz = sizeof(struct sctp_getaddresses) - sizeof(struct sockaddr) + sizeof(struct sockaddr_in); 4203 if (optsize < sz) { 4204 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4205 error = EINVAL; 4206 break; 4207 } 4208 if (td != NULL && (error = prison_local_ip4(td->td_ucred, &(((struct sockaddr_in *)(addrs->addr))->sin_addr)))) { 4209 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, error); 4210 break; 4211 } 4212 #ifdef INET6 4213 } else if (addrs->addr->sa_family == AF_INET6) { 4214 sz = sizeof(struct sctp_getaddresses) - sizeof(struct sockaddr) + sizeof(struct sockaddr_in6); 4215 if (optsize < sz) { 4216 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4217 error = EINVAL; 4218 break; 4219 } 4220 if (td != NULL && (error = prison_local_ip6(td->td_ucred, &(((struct sockaddr_in6 *)(addrs->addr))->sin6_addr), 4221 (SCTP_IPV6_V6ONLY(inp) != 0))) != 0) { 4222 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, error); 4223 break; 4224 } 4225 #endif 4226 } else { 4227 error = EAFNOSUPPORT; 4228 break; 4229 } 4230 sctp_bindx_add_address(so, inp, addrs->addr, 4231 addrs->sget_assoc_id, vrf_id, 4232 &error, p); 4233 } 4234 break; 4235 case SCTP_BINDX_REM_ADDR: 4236 { 4237 struct sctp_getaddresses *addrs; 4238 size_t sz; 4239 struct thread *td; 4240 4241 td = (struct thread *)p; 4242 4243 SCTP_CHECK_AND_CAST(addrs, optval, struct sctp_getaddresses, optsize); 4244 if (addrs->addr->sa_family == AF_INET) { 4245 sz = sizeof(struct sctp_getaddresses) - sizeof(struct sockaddr) + sizeof(struct sockaddr_in); 4246 if (optsize < sz) { 4247 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4248 error = EINVAL; 4249 break; 4250 } 4251 if (td != NULL && (error = prison_local_ip4(td->td_ucred, &(((struct sockaddr_in *)(addrs->addr))->sin_addr)))) { 4252 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, error); 4253 break; 4254 } 4255 #ifdef INET6 4256 } else if (addrs->addr->sa_family == AF_INET6) { 4257 sz = sizeof(struct sctp_getaddresses) - sizeof(struct sockaddr) + sizeof(struct sockaddr_in6); 4258 if (optsize < sz) { 4259 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4260 error = EINVAL; 4261 break; 4262 } 4263 if (td != NULL && (error = prison_local_ip6(td->td_ucred, &(((struct sockaddr_in6 *)(addrs->addr))->sin6_addr), 4264 (SCTP_IPV6_V6ONLY(inp) != 0))) != 0) { 4265 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, error); 4266 break; 4267 } 4268 #endif 4269 } else { 4270 error = EAFNOSUPPORT; 4271 break; 4272 } 4273 sctp_bindx_delete_address(so, inp, addrs->addr, 4274 addrs->sget_assoc_id, vrf_id, 4275 &error); 4276 } 4277 break; 4278 default: 4279 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT); 4280 error = ENOPROTOOPT; 4281 break; 4282 } /* end switch (opt) */ 4283 return (error); 4284 } 4285 4286 int 4287 sctp_ctloutput(struct socket *so, struct sockopt *sopt) 4288 { 4289 void *optval = NULL; 4290 size_t optsize = 0; 4291 struct sctp_inpcb *inp; 4292 void *p; 4293 int error = 0; 4294 4295 inp = (struct sctp_inpcb *)so->so_pcb; 4296 if (inp == 0) { 4297 /* I made the same as TCP since we are not setup? */ 4298 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4299 return (ECONNRESET); 4300 } 4301 if (sopt->sopt_level != IPPROTO_SCTP) { 4302 /* wrong proto level... send back up to IP */ 4303 #ifdef INET6 4304 if (INP_CHECK_SOCKAF(so, AF_INET6)) 4305 error = ip6_ctloutput(so, sopt); 4306 else 4307 #endif /* INET6 */ 4308 error = ip_ctloutput(so, sopt); 4309 return (error); 4310 } 4311 optsize = sopt->sopt_valsize; 4312 if (optsize) { 4313 SCTP_MALLOC(optval, void *, optsize, SCTP_M_SOCKOPT); 4314 if (optval == NULL) { 4315 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOBUFS); 4316 return (ENOBUFS); 4317 } 4318 error = sooptcopyin(sopt, optval, optsize, optsize); 4319 if (error) { 4320 SCTP_FREE(optval, SCTP_M_SOCKOPT); 4321 goto out; 4322 } 4323 } 4324 p = (void *)sopt->sopt_td; 4325 if (sopt->sopt_dir == SOPT_SET) { 4326 error = sctp_setopt(so, sopt->sopt_name, optval, optsize, p); 4327 } else if (sopt->sopt_dir == SOPT_GET) { 4328 error = sctp_getopt(so, sopt->sopt_name, optval, &optsize, p); 4329 } else { 4330 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4331 error = EINVAL; 4332 } 4333 if ((error == 0) && (optval != NULL)) { 4334 error = sooptcopyout(sopt, optval, optsize); 4335 SCTP_FREE(optval, SCTP_M_SOCKOPT); 4336 } else if (optval != NULL) { 4337 SCTP_FREE(optval, SCTP_M_SOCKOPT); 4338 } 4339 out: 4340 return (error); 4341 } 4342 4343 4344 static int 4345 sctp_connect(struct socket *so, struct sockaddr *addr, struct thread *p) 4346 { 4347 int error = 0; 4348 int create_lock_on = 0; 4349 uint32_t vrf_id; 4350 struct sctp_inpcb *inp; 4351 struct sctp_tcb *stcb = NULL; 4352 4353 inp = (struct sctp_inpcb *)so->so_pcb; 4354 if (inp == 0) { 4355 /* I made the same as TCP since we are not setup? */ 4356 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4357 return (ECONNRESET); 4358 } 4359 if (addr == NULL) { 4360 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4361 return EINVAL; 4362 } 4363 #ifdef INET6 4364 if (addr->sa_family == AF_INET6) { 4365 struct sockaddr_in6 *sin6p; 4366 4367 if (addr->sa_len != sizeof(struct sockaddr_in6)) { 4368 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4369 return (EINVAL); 4370 } 4371 sin6p = (struct sockaddr_in6 *)addr; 4372 if (p != NULL && (error = prison_remote_ip6(p->td_ucred, &sin6p->sin6_addr)) != 0) { 4373 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 4374 return (error); 4375 } 4376 } else 4377 #endif 4378 if (addr->sa_family == AF_INET) { 4379 struct sockaddr_in *sinp; 4380 4381 if (addr->sa_len != sizeof(struct sockaddr_in)) { 4382 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4383 return (EINVAL); 4384 } 4385 sinp = (struct sockaddr_in *)addr; 4386 if (p != NULL && (error = prison_remote_ip4(p->td_ucred, &sinp->sin_addr)) != 0) { 4387 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); 4388 return (error); 4389 } 4390 } else { 4391 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EAFNOSUPPORT); 4392 return (EAFNOSUPPORT); 4393 } 4394 SCTP_INP_INCR_REF(inp); 4395 SCTP_ASOC_CREATE_LOCK(inp); 4396 create_lock_on = 1; 4397 4398 4399 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 4400 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) { 4401 /* Should I really unlock ? */ 4402 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EFAULT); 4403 error = EFAULT; 4404 goto out_now; 4405 } 4406 #ifdef INET6 4407 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) && 4408 (addr->sa_family == AF_INET6)) { 4409 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4410 error = EINVAL; 4411 goto out_now; 4412 } 4413 #endif /* INET6 */ 4414 if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) == 4415 SCTP_PCB_FLAGS_UNBOUND) { 4416 /* Bind a ephemeral port */ 4417 error = sctp_inpcb_bind(so, NULL, NULL, p); 4418 if (error) { 4419 goto out_now; 4420 } 4421 } 4422 /* Now do we connect? */ 4423 if ((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) && 4424 (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_PORTREUSE))) { 4425 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4426 error = EINVAL; 4427 goto out_now; 4428 } 4429 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 4430 (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) { 4431 /* We are already connected AND the TCP model */ 4432 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EADDRINUSE); 4433 error = EADDRINUSE; 4434 goto out_now; 4435 } 4436 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { 4437 SCTP_INP_RLOCK(inp); 4438 stcb = LIST_FIRST(&inp->sctp_asoc_list); 4439 SCTP_INP_RUNLOCK(inp); 4440 } else { 4441 /* 4442 * We increment here since sctp_findassociation_ep_addr() 4443 * will do a decrement if it finds the stcb as long as the 4444 * locked tcb (last argument) is NOT a TCB.. aka NULL. 4445 */ 4446 SCTP_INP_INCR_REF(inp); 4447 stcb = sctp_findassociation_ep_addr(&inp, addr, NULL, NULL, NULL); 4448 if (stcb == NULL) { 4449 SCTP_INP_DECR_REF(inp); 4450 } else { 4451 SCTP_TCB_UNLOCK(stcb); 4452 } 4453 } 4454 if (stcb != NULL) { 4455 /* Already have or am bring up an association */ 4456 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY); 4457 error = EALREADY; 4458 goto out_now; 4459 } 4460 vrf_id = inp->def_vrf_id; 4461 /* We are GOOD to go */ 4462 stcb = sctp_aloc_assoc(inp, addr, &error, 0, vrf_id, p); 4463 if (stcb == NULL) { 4464 /* Gak! no memory */ 4465 goto out_now; 4466 } 4467 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) { 4468 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED; 4469 /* Set the connected flag so we can queue data */ 4470 SOCKBUF_LOCK(&so->so_rcv); 4471 so->so_rcv.sb_state &= ~SBS_CANTRCVMORE; 4472 SOCKBUF_UNLOCK(&so->so_rcv); 4473 SOCKBUF_LOCK(&so->so_snd); 4474 so->so_snd.sb_state &= ~SBS_CANTSENDMORE; 4475 SOCKBUF_UNLOCK(&so->so_snd); 4476 SOCK_LOCK(so); 4477 so->so_state &= ~SS_ISDISCONNECTING; 4478 SOCK_UNLOCK(so); 4479 soisconnecting(so); 4480 } 4481 SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_COOKIE_WAIT); 4482 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered); 4483 4484 /* initialize authentication parameters for the assoc */ 4485 sctp_initialize_auth_params(inp, stcb); 4486 4487 sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED); 4488 SCTP_TCB_UNLOCK(stcb); 4489 out_now: 4490 if (create_lock_on) { 4491 SCTP_ASOC_CREATE_UNLOCK(inp); 4492 } 4493 SCTP_INP_DECR_REF(inp); 4494 return error; 4495 } 4496 4497 int 4498 sctp_listen(struct socket *so, int backlog, struct thread *p) 4499 { 4500 /* 4501 * Note this module depends on the protocol processing being called 4502 * AFTER any socket level flags and backlog are applied to the 4503 * socket. The traditional way that the socket flags are applied is 4504 * AFTER protocol processing. We have made a change to the 4505 * sys/kern/uipc_socket.c module to reverse this but this MUST be in 4506 * place if the socket API for SCTP is to work properly. 4507 */ 4508 4509 int error = 0; 4510 struct sctp_inpcb *inp; 4511 4512 inp = (struct sctp_inpcb *)so->so_pcb; 4513 if (inp == 0) { 4514 /* I made the same as TCP since we are not setup? */ 4515 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4516 return (ECONNRESET); 4517 } 4518 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PORTREUSE)) { 4519 /* See if we have a listener */ 4520 struct sctp_inpcb *tinp; 4521 union sctp_sockstore store, *sp; 4522 4523 sp = &store; 4524 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) { 4525 /* not bound all */ 4526 struct sctp_laddr *laddr; 4527 4528 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 4529 memcpy(&store, &laddr->ifa->address, sizeof(store)); 4530 sp->sin.sin_port = inp->sctp_lport; 4531 tinp = sctp_pcb_findep(&sp->sa, 0, 0, inp->def_vrf_id); 4532 if (tinp && (tinp != inp) && 4533 ((tinp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) == 0) && 4534 ((tinp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) && 4535 (tinp->sctp_socket->so_qlimit)) { 4536 /* 4537 * we have a listener already and 4538 * its not this inp. 4539 */ 4540 SCTP_INP_DECR_REF(tinp); 4541 return (EADDRINUSE); 4542 } else if (tinp) { 4543 SCTP_INP_DECR_REF(tinp); 4544 } 4545 } 4546 } else { 4547 /* Setup a local addr bound all */ 4548 memset(&store, 0, sizeof(store)); 4549 store.sin.sin_port = inp->sctp_lport; 4550 #ifdef INET6 4551 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 4552 store.sa.sa_family = AF_INET6; 4553 store.sa.sa_len = sizeof(struct sockaddr_in6); 4554 } 4555 #endif 4556 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) { 4557 store.sa.sa_family = AF_INET; 4558 store.sa.sa_len = sizeof(struct sockaddr_in); 4559 } 4560 tinp = sctp_pcb_findep(&sp->sa, 0, 0, inp->def_vrf_id); 4561 if (tinp && (tinp != inp) && 4562 ((tinp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) == 0) && 4563 ((tinp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) && 4564 (tinp->sctp_socket->so_qlimit)) { 4565 /* 4566 * we have a listener already and its not 4567 * this inp. 4568 */ 4569 SCTP_INP_DECR_REF(tinp); 4570 return (EADDRINUSE); 4571 } else if (tinp) { 4572 SCTP_INP_DECR_REF(inp); 4573 } 4574 } 4575 } 4576 SCTP_INP_RLOCK(inp); 4577 #ifdef SCTP_LOCK_LOGGING 4578 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) { 4579 sctp_log_lock(inp, (struct sctp_tcb *)NULL, SCTP_LOG_LOCK_SOCK); 4580 } 4581 #endif 4582 SOCK_LOCK(so); 4583 error = solisten_proto_check(so); 4584 if (error) { 4585 SOCK_UNLOCK(so); 4586 SCTP_INP_RUNLOCK(inp); 4587 return (error); 4588 } 4589 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PORTREUSE)) && 4590 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 4591 /* 4592 * The unlucky case - We are in the tcp pool with this guy. 4593 * - Someone else is in the main inp slot. - We must move 4594 * this guy (the listener) to the main slot - We must then 4595 * move the guy that was listener to the TCP Pool. 4596 */ 4597 if (sctp_swap_inpcb_for_listen(inp)) { 4598 goto in_use; 4599 } 4600 } 4601 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 4602 (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) { 4603 /* We are already connected AND the TCP model */ 4604 in_use: 4605 SCTP_INP_RUNLOCK(inp); 4606 SOCK_UNLOCK(so); 4607 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EADDRINUSE); 4608 return (EADDRINUSE); 4609 } 4610 SCTP_INP_RUNLOCK(inp); 4611 if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) { 4612 /* We must do a bind. */ 4613 SOCK_UNLOCK(so); 4614 if ((error = sctp_inpcb_bind(so, NULL, NULL, p))) { 4615 /* bind error, probably perm */ 4616 return (error); 4617 } 4618 SOCK_LOCK(so); 4619 } 4620 /* It appears for 7.0 and on, we must always call this. */ 4621 solisten_proto(so, backlog); 4622 if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) { 4623 /* remove the ACCEPTCONN flag for one-to-many sockets */ 4624 so->so_options &= ~SO_ACCEPTCONN; 4625 } 4626 if (backlog == 0) { 4627 /* turning off listen */ 4628 so->so_options &= ~SO_ACCEPTCONN; 4629 } 4630 SOCK_UNLOCK(so); 4631 return (error); 4632 } 4633 4634 static int sctp_defered_wakeup_cnt = 0; 4635 4636 int 4637 sctp_accept(struct socket *so, struct sockaddr **addr) 4638 { 4639 struct sctp_tcb *stcb; 4640 struct sctp_inpcb *inp; 4641 union sctp_sockstore store; 4642 4643 #ifdef INET6 4644 int error; 4645 4646 #endif 4647 inp = (struct sctp_inpcb *)so->so_pcb; 4648 4649 if (inp == 0) { 4650 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4651 return (ECONNRESET); 4652 } 4653 SCTP_INP_RLOCK(inp); 4654 if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) { 4655 SCTP_INP_RUNLOCK(inp); 4656 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP); 4657 return (EOPNOTSUPP); 4658 } 4659 if (so->so_state & SS_ISDISCONNECTED) { 4660 SCTP_INP_RUNLOCK(inp); 4661 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ECONNABORTED); 4662 return (ECONNABORTED); 4663 } 4664 stcb = LIST_FIRST(&inp->sctp_asoc_list); 4665 if (stcb == NULL) { 4666 SCTP_INP_RUNLOCK(inp); 4667 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4668 return (ECONNRESET); 4669 } 4670 SCTP_TCB_LOCK(stcb); 4671 SCTP_INP_RUNLOCK(inp); 4672 store = stcb->asoc.primary_destination->ro._l_addr; 4673 stcb->asoc.state &= ~SCTP_STATE_IN_ACCEPT_QUEUE; 4674 SCTP_TCB_UNLOCK(stcb); 4675 switch (store.sa.sa_family) { 4676 case AF_INET: 4677 { 4678 struct sockaddr_in *sin; 4679 4680 SCTP_MALLOC_SONAME(sin, struct sockaddr_in *, sizeof *sin); 4681 if (sin == NULL) 4682 return (ENOMEM); 4683 sin->sin_family = AF_INET; 4684 sin->sin_len = sizeof(*sin); 4685 sin->sin_port = ((struct sockaddr_in *)&store)->sin_port; 4686 sin->sin_addr = ((struct sockaddr_in *)&store)->sin_addr; 4687 *addr = (struct sockaddr *)sin; 4688 break; 4689 } 4690 #ifdef INET6 4691 case AF_INET6: 4692 { 4693 struct sockaddr_in6 *sin6; 4694 4695 SCTP_MALLOC_SONAME(sin6, struct sockaddr_in6 *, sizeof *sin6); 4696 if (sin6 == NULL) 4697 return (ENOMEM); 4698 sin6->sin6_family = AF_INET6; 4699 sin6->sin6_len = sizeof(*sin6); 4700 sin6->sin6_port = ((struct sockaddr_in6 *)&store)->sin6_port; 4701 4702 sin6->sin6_addr = ((struct sockaddr_in6 *)&store)->sin6_addr; 4703 if ((error = sa6_recoverscope(sin6)) != 0) { 4704 SCTP_FREE_SONAME(sin6); 4705 return (error); 4706 } 4707 *addr = (struct sockaddr *)sin6; 4708 break; 4709 } 4710 #endif 4711 default: 4712 /* TSNH */ 4713 break; 4714 } 4715 /* Wake any delayed sleep action */ 4716 if (inp->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) { 4717 SCTP_INP_WLOCK(inp); 4718 inp->sctp_flags &= ~SCTP_PCB_FLAGS_DONT_WAKE; 4719 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT) { 4720 inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAKEOUTPUT; 4721 SCTP_INP_WUNLOCK(inp); 4722 SOCKBUF_LOCK(&inp->sctp_socket->so_snd); 4723 if (sowriteable(inp->sctp_socket)) { 4724 sowwakeup_locked(inp->sctp_socket); 4725 } else { 4726 SOCKBUF_UNLOCK(&inp->sctp_socket->so_snd); 4727 } 4728 SCTP_INP_WLOCK(inp); 4729 } 4730 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT) { 4731 inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAKEINPUT; 4732 SCTP_INP_WUNLOCK(inp); 4733 SOCKBUF_LOCK(&inp->sctp_socket->so_rcv); 4734 if (soreadable(inp->sctp_socket)) { 4735 sctp_defered_wakeup_cnt++; 4736 sorwakeup_locked(inp->sctp_socket); 4737 } else { 4738 SOCKBUF_UNLOCK(&inp->sctp_socket->so_rcv); 4739 } 4740 SCTP_INP_WLOCK(inp); 4741 } 4742 SCTP_INP_WUNLOCK(inp); 4743 } 4744 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 4745 SCTP_TCB_LOCK(stcb); 4746 sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_7); 4747 } 4748 return (0); 4749 } 4750 4751 int 4752 sctp_ingetaddr(struct socket *so, struct sockaddr **addr) 4753 { 4754 struct sockaddr_in *sin; 4755 uint32_t vrf_id; 4756 struct sctp_inpcb *inp; 4757 struct sctp_ifa *sctp_ifa; 4758 4759 /* 4760 * Do the malloc first in case it blocks. 4761 */ 4762 SCTP_MALLOC_SONAME(sin, struct sockaddr_in *, sizeof *sin); 4763 if (sin == NULL) 4764 return (ENOMEM); 4765 sin->sin_family = AF_INET; 4766 sin->sin_len = sizeof(*sin); 4767 inp = (struct sctp_inpcb *)so->so_pcb; 4768 if (!inp) { 4769 SCTP_FREE_SONAME(sin); 4770 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4771 return ECONNRESET; 4772 } 4773 SCTP_INP_RLOCK(inp); 4774 sin->sin_port = inp->sctp_lport; 4775 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 4776 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { 4777 struct sctp_tcb *stcb; 4778 struct sockaddr_in *sin_a; 4779 struct sctp_nets *net; 4780 int fnd; 4781 4782 stcb = LIST_FIRST(&inp->sctp_asoc_list); 4783 if (stcb == NULL) { 4784 goto notConn; 4785 } 4786 fnd = 0; 4787 sin_a = NULL; 4788 SCTP_TCB_LOCK(stcb); 4789 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 4790 sin_a = (struct sockaddr_in *)&net->ro._l_addr; 4791 if (sin_a == NULL) 4792 /* this will make coverity happy */ 4793 continue; 4794 4795 if (sin_a->sin_family == AF_INET) { 4796 fnd = 1; 4797 break; 4798 } 4799 } 4800 if ((!fnd) || (sin_a == NULL)) { 4801 /* punt */ 4802 SCTP_TCB_UNLOCK(stcb); 4803 goto notConn; 4804 } 4805 vrf_id = inp->def_vrf_id; 4806 sctp_ifa = sctp_source_address_selection(inp, 4807 stcb, 4808 (sctp_route_t *) & net->ro, 4809 net, 0, vrf_id); 4810 if (sctp_ifa) { 4811 sin->sin_addr = sctp_ifa->address.sin.sin_addr; 4812 sctp_free_ifa(sctp_ifa); 4813 } 4814 SCTP_TCB_UNLOCK(stcb); 4815 } else { 4816 /* For the bound all case you get back 0 */ 4817 notConn: 4818 sin->sin_addr.s_addr = 0; 4819 } 4820 4821 } else { 4822 /* Take the first IPv4 address in the list */ 4823 struct sctp_laddr *laddr; 4824 int fnd = 0; 4825 4826 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 4827 if (laddr->ifa->address.sa.sa_family == AF_INET) { 4828 struct sockaddr_in *sin_a; 4829 4830 sin_a = (struct sockaddr_in *)&laddr->ifa->address.sa; 4831 sin->sin_addr = sin_a->sin_addr; 4832 fnd = 1; 4833 break; 4834 } 4835 } 4836 if (!fnd) { 4837 SCTP_FREE_SONAME(sin); 4838 SCTP_INP_RUNLOCK(inp); 4839 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); 4840 return ENOENT; 4841 } 4842 } 4843 SCTP_INP_RUNLOCK(inp); 4844 (*addr) = (struct sockaddr *)sin; 4845 return (0); 4846 } 4847 4848 int 4849 sctp_peeraddr(struct socket *so, struct sockaddr **addr) 4850 { 4851 struct sockaddr_in *sin = (struct sockaddr_in *)*addr; 4852 int fnd; 4853 struct sockaddr_in *sin_a; 4854 struct sctp_inpcb *inp; 4855 struct sctp_tcb *stcb; 4856 struct sctp_nets *net; 4857 4858 /* Do the malloc first in case it blocks. */ 4859 inp = (struct sctp_inpcb *)so->so_pcb; 4860 if ((inp == NULL) || 4861 ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) { 4862 /* UDP type and listeners will drop out here */ 4863 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN); 4864 return (ENOTCONN); 4865 } 4866 SCTP_MALLOC_SONAME(sin, struct sockaddr_in *, sizeof *sin); 4867 if (sin == NULL) 4868 return (ENOMEM); 4869 sin->sin_family = AF_INET; 4870 sin->sin_len = sizeof(*sin); 4871 4872 /* We must recapture incase we blocked */ 4873 inp = (struct sctp_inpcb *)so->so_pcb; 4874 if (!inp) { 4875 SCTP_FREE_SONAME(sin); 4876 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4877 return ECONNRESET; 4878 } 4879 SCTP_INP_RLOCK(inp); 4880 stcb = LIST_FIRST(&inp->sctp_asoc_list); 4881 if (stcb) { 4882 SCTP_TCB_LOCK(stcb); 4883 } 4884 SCTP_INP_RUNLOCK(inp); 4885 if (stcb == NULL) { 4886 SCTP_FREE_SONAME(sin); 4887 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); 4888 return ECONNRESET; 4889 } 4890 fnd = 0; 4891 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 4892 sin_a = (struct sockaddr_in *)&net->ro._l_addr; 4893 if (sin_a->sin_family == AF_INET) { 4894 fnd = 1; 4895 sin->sin_port = stcb->rport; 4896 sin->sin_addr = sin_a->sin_addr; 4897 break; 4898 } 4899 } 4900 SCTP_TCB_UNLOCK(stcb); 4901 if (!fnd) { 4902 /* No IPv4 address */ 4903 SCTP_FREE_SONAME(sin); 4904 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); 4905 return ENOENT; 4906 } 4907 (*addr) = (struct sockaddr *)sin; 4908 return (0); 4909 } 4910 4911 struct pr_usrreqs sctp_usrreqs = { 4912 .pru_abort = sctp_abort, 4913 .pru_accept = sctp_accept, 4914 .pru_attach = sctp_attach, 4915 .pru_bind = sctp_bind, 4916 .pru_connect = sctp_connect, 4917 .pru_control = in_control, 4918 .pru_close = sctp_close, 4919 .pru_detach = sctp_close, 4920 .pru_sopoll = sopoll_generic, 4921 .pru_flush = sctp_flush, 4922 .pru_disconnect = sctp_disconnect, 4923 .pru_listen = sctp_listen, 4924 .pru_peeraddr = sctp_peeraddr, 4925 .pru_send = sctp_sendm, 4926 .pru_shutdown = sctp_shutdown, 4927 .pru_sockaddr = sctp_ingetaddr, 4928 .pru_sosend = sctp_sosend, 4929 .pru_soreceive = sctp_soreceive 4930 }; 4931