1 /*- 2 * Copyright (c) 2001-2007, Cisco Systems, Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions are met: 6 * 7 * a) Redistributions of source code must retain the above copyright notice, 8 * this list of conditions and the following disclaimer. 9 * 10 * b) Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in 12 * the documentation and/or other materials provided with the distribution. 13 * 14 * c) Neither the name of Cisco Systems, Inc. nor the names of its 15 * contributors may be used to endorse or promote products derived 16 * from this software without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 28 * THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 /* $KAME: sctp_usrreq.c,v 1.48 2005/03/07 23:26:08 itojun Exp $ */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 #include <netinet/sctp_os.h> 36 #include <sys/proc.h> 37 #include <netinet/sctp_pcb.h> 38 #include <netinet/sctp_header.h> 39 #include <netinet/sctp_var.h> 40 #include <netinet/sctp_output.h> 41 #include <netinet/sctp_bsd_addr.h> 42 #include <netinet/sctp_uio.h> 43 #include <netinet/sctp_asconf.h> 44 #include <netinet/sctputil.h> 45 #include <netinet/sctp_indata.h> 46 #include <netinet/sctp_asconf.h> 47 #include <netinet/sctp_timer.h> 48 #include <netinet/sctp_auth.h> 49 50 51 52 /* 53 * sysctl tunable variables 54 */ 55 int sctp_sendspace = (128 * 1024); 56 int sctp_recvspace = 128 * (1024 + 57 #ifdef INET6 58 sizeof(struct sockaddr_in6) 59 #else 60 sizeof(struct sockaddr_in) 61 #endif 62 ); 63 int sctp_mbuf_threshold_count = SCTP_DEFAULT_MBUFS_IN_CHAIN; 64 int sctp_auto_asconf = SCTP_DEFAULT_AUTO_ASCONF; 65 int sctp_ecn_enable = 1; 66 int sctp_ecn_nonce = 0; 67 int sctp_strict_sacks = 0; 68 int sctp_no_csum_on_loopback = 1; 69 int sctp_strict_init = 1; 70 int sctp_abort_if_one_2_one_hits_limit = 0; 71 int sctp_strict_data_order = 0; 72 73 int sctp_peer_chunk_oh = sizeof(struct mbuf); 74 int sctp_max_burst_default = SCTP_DEF_MAX_BURST; 75 int sctp_use_cwnd_based_maxburst = 1; 76 int sctp_do_drain = 1; 77 int sctp_warm_the_crc32_table = 0; 78 79 unsigned int sctp_max_chunks_on_queue = SCTP_ASOC_MAX_CHUNKS_ON_QUEUE; 80 unsigned int sctp_delayed_sack_time_default = SCTP_RECV_MSEC; 81 unsigned int sctp_heartbeat_interval_default = SCTP_HB_DEFAULT_MSEC; 82 unsigned int sctp_pmtu_raise_time_default = SCTP_DEF_PMTU_RAISE_SEC; 83 unsigned int sctp_shutdown_guard_time_default = SCTP_DEF_MAX_SHUTDOWN_SEC; 84 unsigned int sctp_secret_lifetime_default = SCTP_DEFAULT_SECRET_LIFE_SEC; 85 unsigned int sctp_rto_max_default = SCTP_RTO_UPPER_BOUND; 86 unsigned int sctp_rto_min_default = SCTP_RTO_LOWER_BOUND; 87 unsigned int sctp_rto_initial_default = SCTP_RTO_INITIAL; 88 unsigned int sctp_init_rto_max_default = SCTP_RTO_UPPER_BOUND; 89 unsigned int sctp_valid_cookie_life_default = SCTP_DEFAULT_COOKIE_LIFE; 90 unsigned int sctp_init_rtx_max_default = SCTP_DEF_MAX_INIT; 91 unsigned int sctp_assoc_rtx_max_default = SCTP_DEF_MAX_SEND; 92 unsigned int sctp_path_rtx_max_default = SCTP_DEF_MAX_PATH_RTX; 93 unsigned int sctp_nr_outgoing_streams_default = SCTP_OSTREAM_INITIAL; 94 unsigned int sctp_add_more_threshold = SCTP_DEFAULT_ADD_MORE; 95 96 uint32_t sctp_asoc_free_resc_limit = SCTP_DEF_ASOC_RESC_LIMIT; 97 uint32_t sctp_system_free_resc_limit = SCTP_DEF_SYSTEM_RESC_LIMIT; 98 99 int sctp_min_split_point = SCTP_DEFAULT_SPLIT_POINT_MIN; 100 int sctp_pcbtblsize = SCTP_PCBHASHSIZE; 101 int sctp_hashtblsize = SCTP_TCBHASHSIZE; 102 int sctp_chunkscale = SCTP_CHUNKQUEUE_SCALE; 103 104 unsigned int sctp_cmt_on_off = 0; 105 unsigned int sctp_cmt_sockopt_on_off = 0; 106 unsigned int sctp_cmt_use_dac = 0; 107 108 int sctp_L2_abc_variable = 1; 109 unsigned int sctp_early_fr = 0; 110 unsigned int sctp_early_fr_msec = SCTP_MINFR_MSEC_TIMER; 111 unsigned int sctp_use_rttvar_cc = 0; 112 int sctp_says_check_for_deadlock = 0; 113 unsigned int sctp_asconf_auth_nochk = 0; 114 unsigned int sctp_nat_friendly = 1; 115 unsigned int sctp_auth_disable = 0; 116 unsigned int sctp_auth_random_len = SCTP_AUTH_RANDOM_SIZE_DEFAULT; 117 unsigned int sctp_auth_hmac_id_default = SCTP_AUTH_HMAC_ID_SHA1; 118 struct sctpstat sctpstat; 119 120 #ifdef SCTP_DEBUG 121 extern uint32_t sctp_debug_on; 122 123 #endif /* SCTP_DEBUG */ 124 125 126 void 127 sctp_init(void) 128 { 129 /* Init the SCTP pcb in sctp_pcb.c */ 130 u_long sb_max_adj; 131 132 sctp_pcb_init(); 133 134 if ((nmbclusters / 8) > SCTP_ASOC_MAX_CHUNKS_ON_QUEUE) 135 sctp_max_chunks_on_queue = (nmbclusters / 8); 136 /* 137 * Allow a user to take no more than 1/2 the number of clusters or 138 * the SB_MAX whichever is smaller for the send window. 139 */ 140 sb_max_adj = (u_long)((u_quad_t) (SB_MAX) * MCLBYTES / (MSIZE + MCLBYTES)); 141 sctp_sendspace = min((min(SB_MAX, sb_max_adj)), 142 ((nmbclusters / 2) * SCTP_DEFAULT_MAXSEGMENT)); 143 /* 144 * Now for the recv window, should we take the same amount? or 145 * should I do 1/2 the SB_MAX instead in the SB_MAX min above. For 146 * now I will just copy. 147 */ 148 sctp_recvspace = sctp_sendspace; 149 150 151 } 152 153 154 155 /* 156 * cleanup of the sctppcbinfo structure. 157 * Assumes that the sctppcbinfo lock is held. 158 */ 159 void 160 sctp_pcbinfo_cleanup(void) 161 { 162 /* free the hash tables */ 163 if (sctppcbinfo.sctp_asochash != NULL) 164 SCTP_HASH_FREE(sctppcbinfo.sctp_asochash, sctppcbinfo.hashasocmark); 165 if (sctppcbinfo.sctp_ephash != NULL) 166 SCTP_HASH_FREE(sctppcbinfo.sctp_ephash, sctppcbinfo.hashmark); 167 if (sctppcbinfo.sctp_tcpephash != NULL) 168 SCTP_HASH_FREE(sctppcbinfo.sctp_tcpephash, sctppcbinfo.hashtcpmark); 169 if (sctppcbinfo.sctp_restarthash != NULL) 170 SCTP_HASH_FREE(sctppcbinfo.sctp_restarthash, sctppcbinfo.hashrestartmark); 171 } 172 173 #ifdef INET6 174 void 175 ip_2_ip6_hdr(struct ip6_hdr *ip6, struct ip *ip) 176 { 177 bzero(ip6, sizeof(*ip6)); 178 179 ip6->ip6_vfc = IPV6_VERSION; 180 ip6->ip6_plen = ip->ip_len; 181 ip6->ip6_nxt = ip->ip_p; 182 ip6->ip6_hlim = ip->ip_ttl; 183 ip6->ip6_src.s6_addr32[2] = ip6->ip6_dst.s6_addr32[2] = 184 IPV6_ADDR_INT32_SMP; 185 ip6->ip6_src.s6_addr32[3] = ip->ip_src.s_addr; 186 ip6->ip6_dst.s6_addr32[3] = ip->ip_dst.s_addr; 187 } 188 189 #endif /* INET6 */ 190 191 192 static void 193 sctp_pathmtu_adustment(struct sctp_inpcb *inp, 194 struct sctp_tcb *stcb, 195 struct sctp_nets *net, 196 uint16_t nxtsz) 197 { 198 struct sctp_tmit_chunk *chk; 199 200 /* Adjust that too */ 201 stcb->asoc.smallest_mtu = nxtsz; 202 /* now off to subtract IP_DF flag if needed */ 203 204 TAILQ_FOREACH(chk, &stcb->asoc.send_queue, sctp_next) { 205 if ((chk->send_size + IP_HDR_SIZE) > nxtsz) { 206 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; 207 } 208 } 209 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 210 if ((chk->send_size + IP_HDR_SIZE) > nxtsz) { 211 /* 212 * For this guy we also mark for immediate resend 213 * since we sent to big of chunk 214 */ 215 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; 216 if (chk->sent != SCTP_DATAGRAM_RESEND) { 217 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 218 } 219 chk->sent = SCTP_DATAGRAM_RESEND; 220 chk->rec.data.doing_fast_retransmit = 0; 221 222 /* Clear any time so NO RTT is being done */ 223 chk->do_rtt = 0; 224 if (stcb->asoc.total_flight >= chk->book_size) 225 stcb->asoc.total_flight -= chk->book_size; 226 else 227 stcb->asoc.total_flight = 0; 228 if (stcb->asoc.total_flight_count > 0) 229 stcb->asoc.total_flight_count--; 230 if (net->flight_size >= chk->book_size) 231 net->flight_size -= chk->book_size; 232 else 233 net->flight_size = 0; 234 } 235 } 236 } 237 238 static void 239 sctp_notify_mbuf(struct sctp_inpcb *inp, 240 struct sctp_tcb *stcb, 241 struct sctp_nets *net, 242 struct ip *ip, 243 struct sctphdr *sh) 244 { 245 struct icmp *icmph; 246 int totsz, tmr_stopped = 0; 247 uint16_t nxtsz; 248 249 /* protection */ 250 if ((inp == NULL) || (stcb == NULL) || (net == NULL) || 251 (ip == NULL) || (sh == NULL)) { 252 if (stcb != NULL) 253 SCTP_TCB_UNLOCK(stcb); 254 return; 255 } 256 /* First job is to verify the vtag matches what I would send */ 257 if (ntohl(sh->v_tag) != (stcb->asoc.peer_vtag)) { 258 SCTP_TCB_UNLOCK(stcb); 259 return; 260 } 261 icmph = (struct icmp *)((caddr_t)ip - (sizeof(struct icmp) - 262 sizeof(struct ip))); 263 if (icmph->icmp_type != ICMP_UNREACH) { 264 /* We only care about unreachable */ 265 SCTP_TCB_UNLOCK(stcb); 266 return; 267 } 268 if (icmph->icmp_code != ICMP_UNREACH_NEEDFRAG) { 269 /* not a unreachable message due to frag. */ 270 SCTP_TCB_UNLOCK(stcb); 271 return; 272 } 273 totsz = ip->ip_len; 274 275 nxtsz = ntohs(icmph->icmp_seq); 276 if (nxtsz == 0) { 277 /* 278 * old type router that does not tell us what the next size 279 * mtu is. Rats we will have to guess (in a educated fashion 280 * of course) 281 */ 282 nxtsz = find_next_best_mtu(totsz); 283 } 284 /* Stop any PMTU timer */ 285 if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) { 286 tmr_stopped = 1; 287 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net, 288 SCTP_FROM_SCTP_USRREQ + SCTP_LOC_1); 289 } 290 /* Adjust destination size limit */ 291 if (net->mtu > nxtsz) { 292 net->mtu = nxtsz; 293 } 294 /* now what about the ep? */ 295 if (stcb->asoc.smallest_mtu > nxtsz) { 296 sctp_pathmtu_adustment(inp, stcb, net, nxtsz); 297 } 298 if (tmr_stopped) 299 sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net); 300 301 SCTP_TCB_UNLOCK(stcb); 302 } 303 304 305 void 306 sctp_notify(struct sctp_inpcb *inp, 307 int errno, 308 struct sctphdr *sh, 309 struct sockaddr *to, 310 struct sctp_tcb *stcb, 311 struct sctp_nets *net) 312 { 313 /* protection */ 314 if ((inp == NULL) || (stcb == NULL) || (net == NULL) || 315 (sh == NULL) || (to == NULL)) { 316 return; 317 } 318 /* First job is to verify the vtag matches what I would send */ 319 if (ntohl(sh->v_tag) != (stcb->asoc.peer_vtag)) { 320 return; 321 } 322 /* FIX ME FIX ME PROTOPT i.e. no SCTP should ALWAYS be an ABORT */ 323 324 if ((errno == EHOSTUNREACH) || /* Host is not reachable */ 325 (errno == EHOSTDOWN) || /* Host is down */ 326 (errno == ECONNREFUSED) || /* Host refused the connection, (not 327 * an abort?) */ 328 (errno == ENOPROTOOPT) /* SCTP is not present on host */ 329 ) { 330 /* 331 * Hmm reachablity problems we must examine closely. If its 332 * not reachable, we may have lost a network. Or if there is 333 * NO protocol at the other end named SCTP. well we consider 334 * it a OOTB abort. 335 */ 336 if ((errno == EHOSTUNREACH) || (errno == EHOSTDOWN)) { 337 if (net->dest_state & SCTP_ADDR_REACHABLE) { 338 /* Ok that destination is NOT reachable */ 339 net->dest_state &= ~SCTP_ADDR_REACHABLE; 340 net->dest_state |= SCTP_ADDR_NOT_REACHABLE; 341 net->error_count = net->failure_threshold + 1; 342 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN, 343 stcb, SCTP_FAILED_THRESHOLD, 344 (void *)net); 345 } 346 if (stcb) 347 SCTP_TCB_UNLOCK(stcb); 348 } else { 349 /* 350 * Here the peer is either playing tricks on us, 351 * including an address that belongs to someone who 352 * does not support SCTP OR was a userland 353 * implementation that shutdown and now is dead. In 354 * either case treat it like a OOTB abort with no 355 * TCB 356 */ 357 sctp_abort_notification(stcb, SCTP_PEER_FAULTY); 358 sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_2); 359 /* no need to unlock here, since the TCB is gone */ 360 } 361 } else { 362 /* Send all others to the app */ 363 if (stcb) 364 SCTP_TCB_UNLOCK(stcb); 365 366 367 if (inp->sctp_socket) { 368 #ifdef SCTP_LOCK_LOGGING 369 sctp_log_lock(inp, stcb, SCTP_LOG_LOCK_SOCK); 370 #endif 371 SOCK_LOCK(inp->sctp_socket); 372 inp->sctp_socket->so_error = errno; 373 sctp_sowwakeup(inp, inp->sctp_socket); 374 SOCK_UNLOCK(inp->sctp_socket); 375 } 376 } 377 } 378 379 void 380 sctp_ctlinput(cmd, sa, vip) 381 int cmd; 382 struct sockaddr *sa; 383 void *vip; 384 { 385 struct ip *ip = vip; 386 struct sctphdr *sh; 387 388 if (sa->sa_family != AF_INET || 389 ((struct sockaddr_in *)sa)->sin_addr.s_addr == INADDR_ANY) { 390 return; 391 } 392 if (PRC_IS_REDIRECT(cmd)) { 393 ip = 0; 394 } else if ((unsigned)cmd >= PRC_NCMDS || inetctlerrmap[cmd] == 0) { 395 return; 396 } 397 if (ip) { 398 struct sctp_inpcb *inp = NULL; 399 struct sctp_tcb *stcb = NULL; 400 struct sctp_nets *net = NULL; 401 struct sockaddr_in to, from; 402 403 sh = (struct sctphdr *)((caddr_t)ip + (ip->ip_hl << 2)); 404 bzero(&to, sizeof(to)); 405 bzero(&from, sizeof(from)); 406 from.sin_family = to.sin_family = AF_INET; 407 from.sin_len = to.sin_len = sizeof(to); 408 from.sin_port = sh->src_port; 409 from.sin_addr = ip->ip_src; 410 to.sin_port = sh->dest_port; 411 to.sin_addr = ip->ip_dst; 412 413 /* 414 * 'to' holds the dest of the packet that failed to be sent. 415 * 'from' holds our local endpoint address. Thus we reverse 416 * the to and the from in the lookup. 417 */ 418 stcb = sctp_findassociation_addr_sa((struct sockaddr *)&from, 419 (struct sockaddr *)&to, 420 &inp, &net, 1); 421 if (stcb != NULL && inp && (inp->sctp_socket != NULL)) { 422 if (cmd != PRC_MSGSIZE) { 423 int cm; 424 425 if (cmd == PRC_HOSTDEAD) { 426 cm = EHOSTUNREACH; 427 } else { 428 cm = inetctlerrmap[cmd]; 429 } 430 sctp_notify(inp, cm, sh, 431 (struct sockaddr *)&to, stcb, 432 net); 433 } else { 434 /* handle possible ICMP size messages */ 435 sctp_notify_mbuf(inp, stcb, net, ip, sh); 436 } 437 } else { 438 if ((stcb == NULL) && (inp != NULL)) { 439 /* reduce ref-count */ 440 SCTP_INP_WLOCK(inp); 441 SCTP_INP_DECR_REF(inp); 442 SCTP_INP_WUNLOCK(inp); 443 } 444 } 445 } 446 return; 447 } 448 449 static int 450 sctp_getcred(SYSCTL_HANDLER_ARGS) 451 { 452 struct xucred xuc; 453 struct sockaddr_in addrs[2]; 454 struct sctp_inpcb *inp; 455 struct sctp_nets *net; 456 struct sctp_tcb *stcb; 457 int error; 458 459 /* 460 * XXXRW: Other instances of getcred use SUSER_ALLOWJAIL, as socket 461 * visibility is scoped using cr_canseesocket(), which it is not 462 * here. 463 */ 464 error = priv_check_cred(req->td->td_ucred, PRIV_NETINET_GETCRED, 0); 465 if (error) 466 return (error); 467 468 error = SYSCTL_IN(req, addrs, sizeof(addrs)); 469 if (error) 470 return (error); 471 472 stcb = sctp_findassociation_addr_sa(sintosa(&addrs[0]), 473 sintosa(&addrs[1]), 474 &inp, &net, 1); 475 if (stcb == NULL || inp == NULL || inp->sctp_socket == NULL) { 476 if ((inp != NULL) && (stcb == NULL)) { 477 /* reduce ref-count */ 478 SCTP_INP_WLOCK(inp); 479 SCTP_INP_DECR_REF(inp); 480 goto cred_can_cont; 481 } 482 error = ENOENT; 483 goto out; 484 } 485 SCTP_TCB_UNLOCK(stcb); 486 /* 487 * We use the write lock here, only since in the error leg we need 488 * it. If we used RLOCK, then we would have to 489 * wlock/decr/unlock/rlock. Which in theory could create a hole. 490 * Better to use higher wlock. 491 */ 492 SCTP_INP_WLOCK(inp); 493 cred_can_cont: 494 error = cr_canseesocket(req->td->td_ucred, inp->sctp_socket); 495 if (error) { 496 SCTP_INP_WUNLOCK(inp); 497 goto out; 498 } 499 cru2x(inp->sctp_socket->so_cred, &xuc); 500 SCTP_INP_WUNLOCK(inp); 501 error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred)); 502 out: 503 return (error); 504 } 505 506 SYSCTL_PROC(_net_inet_sctp, OID_AUTO, getcred, CTLTYPE_OPAQUE | CTLFLAG_RW, 507 0, 0, sctp_getcred, "S,ucred", "Get the ucred of a SCTP connection"); 508 509 static int 510 sctp_assoclist(SYSCTL_HANDLER_ARGS) 511 { 512 unsigned int number_of_endpoints; 513 unsigned int number_of_local_addresses; 514 unsigned int number_of_associations; 515 unsigned int number_of_remote_addresses; 516 unsigned int n; 517 int error; 518 struct sctp_inpcb *inp; 519 struct sctp_tcb *stcb; 520 struct sctp_nets *net; 521 struct sctp_laddr *laddr; 522 struct xsctp_inpcb xinpcb; 523 struct xsctp_tcb xstcb; 524 525 /* struct xsctp_laddr xladdr; */ 526 struct xsctp_raddr xraddr; 527 528 number_of_endpoints = 0; 529 number_of_local_addresses = 0; 530 number_of_associations = 0; 531 number_of_remote_addresses = 0; 532 533 SCTP_INP_INFO_RLOCK(); 534 if (req->oldptr == USER_ADDR_NULL) { 535 LIST_FOREACH(inp, &sctppcbinfo.listhead, sctp_list) { 536 SCTP_INP_RLOCK(inp); 537 number_of_endpoints++; 538 /* FIXME MT */ 539 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 540 number_of_local_addresses++; 541 } 542 LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) { 543 number_of_associations++; 544 /* FIXME MT */ 545 LIST_FOREACH(laddr, &stcb->asoc.sctp_local_addr_list, sctp_nxt_addr) { 546 number_of_local_addresses++; 547 } 548 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 549 number_of_remote_addresses++; 550 } 551 } 552 SCTP_INP_RUNLOCK(inp); 553 } 554 SCTP_INP_INFO_RUNLOCK(); 555 n = (number_of_endpoints + 1) * sizeof(struct xsctp_inpcb) + 556 number_of_local_addresses * sizeof(struct xsctp_laddr) + 557 number_of_associations * sizeof(struct xsctp_tcb) + 558 number_of_remote_addresses * sizeof(struct xsctp_raddr); 559 #ifdef SCTP_DEBUG 560 printf("inps = %u, stcbs = %u, laddrs = %u, raddrs = %u\n", 561 number_of_endpoints, number_of_associations, 562 number_of_local_addresses, number_of_remote_addresses); 563 #endif 564 /* request some more memory than needed */ 565 req->oldidx = (n + n / 8); 566 return 0; 567 } 568 if (req->newptr != USER_ADDR_NULL) { 569 SCTP_INP_INFO_RUNLOCK(); 570 return EPERM; 571 } 572 LIST_FOREACH(inp, &sctppcbinfo.listhead, sctp_list) { 573 SCTP_INP_RLOCK(inp); 574 number_of_local_addresses = 0; 575 number_of_associations = 0; 576 /* 577 * LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) 578 * { number_of_local_addresses++; } 579 */ 580 LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) { 581 number_of_associations++; 582 } 583 xinpcb.last = 0; 584 xinpcb.local_port = ntohs(inp->sctp_lport); 585 xinpcb.number_local_addresses = number_of_local_addresses; 586 xinpcb.number_associations = number_of_associations; 587 xinpcb.flags = inp->sctp_flags; 588 xinpcb.features = inp->sctp_features; 589 xinpcb.total_sends = inp->total_sends; 590 xinpcb.total_recvs = inp->total_recvs; 591 xinpcb.total_nospaces = inp->total_nospaces; 592 SCTP_INP_INCR_REF(inp); 593 SCTP_INP_RUNLOCK(inp); 594 SCTP_INP_INFO_RUNLOCK(); 595 error = SYSCTL_OUT(req, &xinpcb, sizeof(struct xsctp_inpcb)); 596 if (error) { 597 return error; 598 } 599 SCTP_INP_INFO_RLOCK(); 600 SCTP_INP_RLOCK(inp); 601 /* FIXME MT */ 602 /* 603 * LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) 604 * { error = SYSCTL_OUT(req, &xladdr, sizeof(struct 605 * xsctp_laddr)); if (error) { #if 606 * defined(SCTP_PER_SOCKET_LOCKING) 607 * SCTP_SOCKET_UNLOCK(SCTP_INP_SO(inp), 1); 608 * SCTP_UNLOCK_SHARED(sctppcbinfo.ipi_ep_mtx); #endif 609 * SCTP_INP_RUNLOCK(inp); SCTP_INP_INFO_RUNLOCK(); return 610 * error; } } 611 */ 612 LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) { 613 SCTP_TCB_LOCK(stcb); 614 atomic_add_int(&stcb->asoc.refcnt, 1); 615 SCTP_TCB_UNLOCK(stcb); 616 number_of_local_addresses = 0; 617 number_of_remote_addresses = 0; 618 /* FIXME MT */ 619 /* 620 * LIST_FOREACH(laddr, 621 * &stcb->asoc.sctp_local_addr_list, sctp_nxt_addr) 622 * { number_of_local_addresses++; } 623 */ 624 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 625 number_of_remote_addresses++; 626 } 627 xstcb.LocalPort = ntohs(inp->sctp_lport); 628 xstcb.RemPort = ntohs(stcb->rport); 629 if (stcb->asoc.primary_destination != NULL) 630 xstcb.RemPrimAddr = stcb->asoc.primary_destination->ro._l_addr; 631 xstcb.HeartBeatInterval = stcb->asoc.heart_beat_delay; 632 xstcb.State = SCTP_GET_STATE(&stcb->asoc); /* FIXME */ 633 xstcb.InStreams = stcb->asoc.streamincnt; 634 xstcb.OutStreams = stcb->asoc.streamoutcnt; 635 xstcb.MaxRetr = stcb->asoc.overall_error_count; 636 xstcb.PrimProcess = 0; /* not really supported yet */ 637 xstcb.T1expireds = stcb->asoc.timoinit + stcb->asoc.timocookie; 638 xstcb.T2expireds = stcb->asoc.timoshutdown + stcb->asoc.timoshutdownack; 639 xstcb.RtxChunks = stcb->asoc.marked_retrans; 640 xstcb.StartTime = stcb->asoc.start_time; 641 xstcb.DiscontinuityTime = stcb->asoc.discontinuity_time; 642 643 xstcb.number_local_addresses = number_of_local_addresses; 644 xstcb.number_remote_addresses = number_of_remote_addresses; 645 xstcb.total_sends = stcb->total_sends; 646 xstcb.total_recvs = stcb->total_recvs; 647 xstcb.local_tag = stcb->asoc.my_vtag; 648 xstcb.remote_tag = stcb->asoc.peer_vtag; 649 xstcb.initial_tsn = stcb->asoc.init_seq_number; 650 xstcb.highest_tsn = stcb->asoc.sending_seq - 1; 651 xstcb.cumulative_tsn = stcb->asoc.last_acked_seq; 652 xstcb.cumulative_tsn_ack = stcb->asoc.cumulative_tsn; 653 SCTP_INP_RUNLOCK(inp); 654 SCTP_INP_INFO_RUNLOCK(); 655 error = SYSCTL_OUT(req, &xstcb, sizeof(struct xsctp_tcb)); 656 if (error) { 657 atomic_add_int(&stcb->asoc.refcnt, -1); 658 return error; 659 } 660 /* FIXME MT */ 661 /* 662 * LIST_FOREACH(laddr, 663 * &stcb->asoc.sctp_local_addr_list, sctp_nxt_addr) 664 * { error = SYSCTL_OUT(req, &xladdr, sizeof(struct 665 * xsctp_laddr)); if (error) { #if 666 * defined(SCTP_PER_SOCKET_LOCKING) 667 * SCTP_SOCKET_UNLOCK(SCTP_INP_SO(inp), 1); 668 * SCTP_UNLOCK_SHARED(sctppcbinfo.ipi_ep_mtx); 669 * #endif SCTP_INP_RUNLOCK(inp); 670 * SCTP_INP_INFO_RUNLOCK(); return error; } 671 * */ 672 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 673 xraddr.RemAddr = net->ro._l_addr; 674 xraddr.RemAddrActive = ((net->dest_state & SCTP_ADDR_REACHABLE) == SCTP_ADDR_REACHABLE); 675 xraddr.RemAddrConfirmed = ((net->dest_state & SCTP_ADDR_UNCONFIRMED) == 0); 676 xraddr.RemAddrHBActive = ((net->dest_state & SCTP_ADDR_NOHB) == 0); 677 xraddr.RemAddrRTO = net->RTO; 678 xraddr.RemAddrMaxPathRtx = net->failure_threshold; 679 xraddr.RemAddrRtx = net->marked_retrans; 680 xraddr.RemAddrErrorCounter = net->error_count; 681 xraddr.RemAddrCwnd = net->cwnd; 682 xraddr.RemAddrFlightSize = net->flight_size; 683 xraddr.RemAddrStartTime = net->start_time; 684 error = SYSCTL_OUT(req, &xraddr, sizeof(struct xsctp_raddr)); 685 if (error) { 686 atomic_add_int(&stcb->asoc.refcnt, -1); 687 return error; 688 } 689 } 690 atomic_add_int(&stcb->asoc.refcnt, -1); 691 SCTP_INP_INFO_RLOCK(); 692 SCTP_INP_RLOCK(inp); 693 } 694 SCTP_INP_DECR_REF(inp); 695 SCTP_INP_RUNLOCK(inp); 696 } 697 SCTP_INP_INFO_RUNLOCK(); 698 699 xinpcb.last = 1; 700 xinpcb.local_port = 0; 701 xinpcb.number_local_addresses = 0; 702 xinpcb.number_associations = 0; 703 xinpcb.flags = 0; 704 xinpcb.features = 0; 705 error = SYSCTL_OUT(req, &xinpcb, sizeof(struct xsctp_inpcb)); 706 return error; 707 } 708 709 /* 710 * sysctl definitions 711 */ 712 713 SYSCTL_INT(_net_inet_sctp, OID_AUTO, sendspace, CTLFLAG_RW, 714 &sctp_sendspace, 0, "Maximum outgoing SCTP buffer size"); 715 716 SYSCTL_INT(_net_inet_sctp, OID_AUTO, recvspace, CTLFLAG_RW, 717 &sctp_recvspace, 0, "Maximum incoming SCTP buffer size"); 718 719 SYSCTL_INT(_net_inet_sctp, OID_AUTO, auto_asconf, CTLFLAG_RW, 720 &sctp_auto_asconf, 0, "Enable SCTP Auto-ASCONF"); 721 722 SYSCTL_INT(_net_inet_sctp, OID_AUTO, ecn_enable, CTLFLAG_RW, 723 &sctp_ecn_enable, 0, "Enable SCTP ECN"); 724 725 SYSCTL_INT(_net_inet_sctp, OID_AUTO, ecn_nonce, CTLFLAG_RW, 726 &sctp_ecn_nonce, 0, "Enable SCTP ECN Nonce"); 727 728 SYSCTL_INT(_net_inet_sctp, OID_AUTO, strict_sacks, CTLFLAG_RW, 729 &sctp_strict_sacks, 0, "Enable SCTP Strict SACK checking"); 730 731 SYSCTL_INT(_net_inet_sctp, OID_AUTO, loopback_nocsum, CTLFLAG_RW, 732 &sctp_no_csum_on_loopback, 0, 733 "Enable NO Csum on packets sent on loopback"); 734 735 SYSCTL_INT(_net_inet_sctp, OID_AUTO, strict_init, CTLFLAG_RW, 736 &sctp_strict_init, 0, 737 "Enable strict INIT/INIT-ACK singleton enforcement"); 738 739 SYSCTL_INT(_net_inet_sctp, OID_AUTO, peer_chkoh, CTLFLAG_RW, 740 &sctp_peer_chunk_oh, 0, 741 "Amount to debit peers rwnd per chunk sent"); 742 743 SYSCTL_INT(_net_inet_sctp, OID_AUTO, maxburst, CTLFLAG_RW, 744 &sctp_max_burst_default, 0, 745 "Default max burst for sctp endpoints"); 746 747 SYSCTL_INT(_net_inet_sctp, OID_AUTO, maxchunks, CTLFLAG_RW, 748 &sctp_max_chunks_on_queue, 0, 749 "Default max chunks on queue per asoc"); 750 751 SYSCTL_INT(_net_inet_sctp, OID_AUTO, tcbhashsize, CTLFLAG_RW, 752 &sctp_hashtblsize, 0, 753 "Tuneable for Hash table sizes"); 754 755 SYSCTL_INT(_net_inet_sctp, OID_AUTO, min_split_point, CTLFLAG_RW, 756 &sctp_min_split_point, 0, 757 "Minimum size when splitting a chunk"); 758 759 SYSCTL_INT(_net_inet_sctp, OID_AUTO, pcbhashsize, CTLFLAG_RW, 760 &sctp_pcbtblsize, 0, 761 "Tuneable for PCB Hash table sizes"); 762 763 SYSCTL_INT(_net_inet_sctp, OID_AUTO, sys_resource, CTLFLAG_RW, 764 &sctp_system_free_resc_limit, 0, 765 "Max number of cached resources in the system"); 766 767 SYSCTL_INT(_net_inet_sctp, OID_AUTO, asoc_resource, CTLFLAG_RW, 768 &sctp_asoc_free_resc_limit, 0, 769 "Max number of cached resources in an asoc"); 770 771 SYSCTL_INT(_net_inet_sctp, OID_AUTO, chunkscale, CTLFLAG_RW, 772 &sctp_chunkscale, 0, 773 "Tuneable for Scaling of number of chunks and messages"); 774 775 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, delayed_sack_time, CTLFLAG_RW, 776 &sctp_delayed_sack_time_default, 0, 777 "Default delayed SACK timer in msec"); 778 779 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, heartbeat_interval, CTLFLAG_RW, 780 &sctp_heartbeat_interval_default, 0, 781 "Default heartbeat interval in msec"); 782 783 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, pmtu_raise_time, CTLFLAG_RW, 784 &sctp_pmtu_raise_time_default, 0, 785 "Default PMTU raise timer in sec"); 786 787 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, shutdown_guard_time, CTLFLAG_RW, 788 &sctp_shutdown_guard_time_default, 0, 789 "Default shutdown guard timer in sec"); 790 791 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, secret_lifetime, CTLFLAG_RW, 792 &sctp_secret_lifetime_default, 0, 793 "Default secret lifetime in sec"); 794 795 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, rto_max, CTLFLAG_RW, 796 &sctp_rto_max_default, 0, 797 "Default maximum retransmission timeout in msec"); 798 799 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, rto_min, CTLFLAG_RW, 800 &sctp_rto_min_default, 0, 801 "Default minimum retransmission timeout in msec"); 802 803 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, rto_initial, CTLFLAG_RW, 804 &sctp_rto_initial_default, 0, 805 "Default initial retransmission timeout in msec"); 806 807 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, init_rto_max, CTLFLAG_RW, 808 &sctp_init_rto_max_default, 0, 809 "Default maximum retransmission timeout during association setup in msec"); 810 811 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, valid_cookie_life, CTLFLAG_RW, 812 &sctp_valid_cookie_life_default, 0, 813 "Default cookie lifetime in sec"); 814 815 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, init_rtx_max, CTLFLAG_RW, 816 &sctp_init_rtx_max_default, 0, 817 "Default maximum number of retransmission for INIT chunks"); 818 819 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, assoc_rtx_max, CTLFLAG_RW, 820 &sctp_assoc_rtx_max_default, 0, 821 "Default maximum number of retransmissions per association"); 822 823 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, path_rtx_max, CTLFLAG_RW, 824 &sctp_path_rtx_max_default, 0, 825 "Default maximum of retransmissions per path"); 826 827 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, add_more_on_output, CTLFLAG_RW, 828 &sctp_add_more_threshold, 0, 829 "When space wise is it worthwhile to try to add more to a socket send buffer"); 830 831 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, nr_outgoing_streams, CTLFLAG_RW, 832 &sctp_nr_outgoing_streams_default, 0, 833 "Default number of outgoing streams"); 834 835 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, cmt_on_off, CTLFLAG_RW, 836 &sctp_cmt_on_off, 0, 837 "CMT ON/OFF flag"); 838 839 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, cwnd_maxburst, CTLFLAG_RW, 840 &sctp_use_cwnd_based_maxburst, 0, 841 "Use a CWND adjusting maxburst"); 842 843 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, early_fast_retran, CTLFLAG_RW, 844 &sctp_early_fr, 0, 845 "Early Fast Retransmit with Timer"); 846 847 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, use_rttvar_congctrl, CTLFLAG_RW, 848 &sctp_use_rttvar_cc, 0, 849 "Use congestion control via rtt variation"); 850 851 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, deadlock_detect, CTLFLAG_RW, 852 &sctp_says_check_for_deadlock, 0, 853 "SMP Deadlock detection on/off"); 854 855 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, early_fast_retran_msec, CTLFLAG_RW, 856 &sctp_early_fr_msec, 0, 857 "Early Fast Retransmit minimum timer value"); 858 859 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, asconf_auth_nochk, CTLFLAG_RW, 860 &sctp_asconf_auth_nochk, 0, 861 "Disable SCTP ASCONF AUTH requirement"); 862 863 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, auth_disable, CTLFLAG_RW, 864 &sctp_auth_disable, 0, 865 "Disable SCTP AUTH chunk requirement/function"); 866 867 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, auth_random_len, CTLFLAG_RW, 868 &sctp_auth_random_len, 0, 869 "Length of AUTH RANDOMs"); 870 871 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, auth_hmac_id, CTLFLAG_RW, 872 &sctp_auth_hmac_id_default, 0, 873 "Default HMAC Id for SCTP AUTHenthication"); 874 875 SYSCTL_INT(_net_inet_sctp, OID_AUTO, abc_l_var, CTLFLAG_RW, 876 &sctp_L2_abc_variable, 0, 877 "SCTP ABC max increase per SACK (L)"); 878 879 SYSCTL_INT(_net_inet_sctp, OID_AUTO, max_chained_mbufs, CTLFLAG_RW, 880 &sctp_mbuf_threshold_count, 0, 881 "Default max number of small mbufs on a chain"); 882 883 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, cmt_use_dac, CTLFLAG_RW, 884 &sctp_cmt_use_dac, 0, 885 "CMT DAC ON/OFF flag"); 886 887 SYSCTL_INT(_net_inet_sctp, OID_AUTO, do_sctp_drain, CTLFLAG_RW, 888 &sctp_do_drain, 0, 889 "Should SCTP respond to the drain calls"); 890 891 SYSCTL_INT(_net_inet_sctp, OID_AUTO, warm_crc_table, CTLFLAG_RW, 892 &sctp_warm_the_crc32_table, 0, 893 "Should the CRC32c tables be warmed before checksum?"); 894 895 SYSCTL_INT(_net_inet_sctp, OID_AUTO, abort_at_limit, CTLFLAG_RW, 896 &sctp_abort_if_one_2_one_hits_limit, 0, 897 "When one-2-one hits qlimit abort"); 898 899 SYSCTL_INT(_net_inet_sctp, OID_AUTO, strict_data_order, CTLFLAG_RW, 900 &sctp_strict_data_order, 0, 901 "Enforce strict data ordering, abort if control inside data"); 902 903 SYSCTL_STRUCT(_net_inet_sctp, OID_AUTO, stats, CTLFLAG_RW, 904 &sctpstat, sctpstat, 905 "SCTP statistics (struct sctps_stat, netinet/sctp.h"); 906 907 SYSCTL_PROC(_net_inet_sctp, OID_AUTO, assoclist, CTLFLAG_RD, 908 0, 0, sctp_assoclist, 909 "S,xassoc", "List of active SCTP associations"); 910 911 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, nat_friendly, CTLFLAG_RW, 912 &sctp_nat_friendly, 0, 913 "SCTP NAT friendly operation"); 914 915 #ifdef SCTP_DEBUG 916 SYSCTL_INT(_net_inet_sctp, OID_AUTO, debug, CTLFLAG_RW, 917 &sctp_debug_on, 0, "Configure debug output"); 918 #endif /* SCTP_DEBUG */ 919 920 static void 921 sctp_abort(struct socket *so) 922 { 923 struct sctp_inpcb *inp; 924 uint32_t flags; 925 926 inp = (struct sctp_inpcb *)so->so_pcb; 927 if (inp == 0) 928 return; 929 930 sctp_must_try_again: 931 flags = inp->sctp_flags; 932 #ifdef SCTP_LOG_CLOSING 933 sctp_log_closing(inp, NULL, 17); 934 #endif 935 if (((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) && 936 (atomic_cmpset_int(&inp->sctp_flags, flags, (flags | SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_CLOSE_IP)))) { 937 #ifdef SCTP_LOG_CLOSING 938 sctp_log_closing(inp, NULL, 16); 939 #endif 940 sctp_inpcb_free(inp, 1, 0); 941 SOCK_LOCK(so); 942 so->so_snd.sb_cc = 0; 943 so->so_snd.sb_mb = NULL; 944 so->so_snd.sb_mbcnt = 0; 945 946 /* 947 * same for the rcv ones, they are only here for the 948 * accounting/select. 949 */ 950 so->so_rcv.sb_cc = 0; 951 so->so_rcv.sb_mb = NULL; 952 so->so_rcv.sb_mbcnt = 0; 953 /* 954 * Now null out the reference, we are completely detached. 955 */ 956 so->so_pcb = NULL; 957 SOCK_UNLOCK(so); 958 959 } else { 960 flags = inp->sctp_flags; 961 if ((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) { 962 goto sctp_must_try_again; 963 } 964 } 965 return; 966 } 967 968 static int 969 sctp_attach(struct socket *so, int proto, struct thread *p) 970 { 971 struct sctp_inpcb *inp; 972 struct inpcb *ip_inp; 973 int error; 974 975 #ifdef IPSEC 976 uint32_t flags; 977 978 #endif 979 inp = (struct sctp_inpcb *)so->so_pcb; 980 if (inp != 0) { 981 return EINVAL; 982 } 983 error = soreserve(so, sctp_sendspace, sctp_recvspace); 984 if (error) { 985 return error; 986 } 987 error = sctp_inpcb_alloc(so); 988 if (error) { 989 return error; 990 } 991 inp = (struct sctp_inpcb *)so->so_pcb; 992 SCTP_INP_WLOCK(inp); 993 994 inp->sctp_flags &= ~SCTP_PCB_FLAGS_BOUND_V6; /* I'm not v6! */ 995 ip_inp = &inp->ip_inp.inp; 996 ip_inp->inp_vflag |= INP_IPV4; 997 ip_inp->inp_ip_ttl = ip_defttl; 998 999 #ifdef IPSEC 1000 error = ipsec_init_pcbpolicy(so, &ip_inp->inp_sp); 1001 #ifdef SCTP_LOG_CLOSING 1002 sctp_log_closing(inp, NULL, 17); 1003 #endif 1004 if (error != 0) { 1005 flags = inp->sctp_flags; 1006 if (((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) && 1007 (atomic_cmpset_int(&inp->sctp_flags, flags, (flags | SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_CLOSE_IP)))) { 1008 #ifdef SCTP_LOG_CLOSING 1009 sctp_log_closing(inp, NULL, 15); 1010 #endif 1011 sctp_inpcb_free(inp, 1, 0); 1012 } 1013 return error; 1014 } 1015 #endif /* IPSEC */ 1016 SCTP_INP_WUNLOCK(inp); 1017 return 0; 1018 } 1019 1020 static int 1021 sctp_bind(struct socket *so, struct sockaddr *addr, struct thread *p) 1022 { 1023 struct sctp_inpcb *inp; 1024 int error; 1025 1026 #ifdef INET6 1027 if (addr && addr->sa_family != AF_INET) 1028 /* must be a v4 address! */ 1029 return EINVAL; 1030 #endif /* INET6 */ 1031 1032 inp = (struct sctp_inpcb *)so->so_pcb; 1033 if (inp == 0) 1034 return EINVAL; 1035 1036 error = sctp_inpcb_bind(so, addr, p); 1037 return error; 1038 } 1039 1040 static void 1041 sctp_close(struct socket *so) 1042 { 1043 struct sctp_inpcb *inp; 1044 uint32_t flags; 1045 1046 inp = (struct sctp_inpcb *)so->so_pcb; 1047 if (inp == 0) 1048 return; 1049 1050 /* 1051 * Inform all the lower layer assoc that we are done. 1052 */ 1053 sctp_must_try_again: 1054 flags = inp->sctp_flags; 1055 #ifdef SCTP_LOG_CLOSING 1056 sctp_log_closing(inp, NULL, 17); 1057 #endif 1058 if (((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) && 1059 (atomic_cmpset_int(&inp->sctp_flags, flags, (flags | SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_CLOSE_IP)))) { 1060 if (((so->so_options & SO_LINGER) && (so->so_linger == 0)) || 1061 (so->so_rcv.sb_cc > 0)) { 1062 #ifdef SCTP_LOG_CLOSING 1063 sctp_log_closing(inp, NULL, 13); 1064 #endif 1065 sctp_inpcb_free(inp, 1, 1); 1066 } else { 1067 #ifdef SCTP_LOG_CLOSING 1068 sctp_log_closing(inp, NULL, 14); 1069 #endif 1070 sctp_inpcb_free(inp, 0, 1); 1071 } 1072 /* 1073 * The socket is now detached, no matter what the state of 1074 * the SCTP association. 1075 */ 1076 SOCK_LOCK(so); 1077 so->so_snd.sb_cc = 0; 1078 so->so_snd.sb_mb = NULL; 1079 so->so_snd.sb_mbcnt = 0; 1080 1081 /* 1082 * same for the rcv ones, they are only here for the 1083 * accounting/select. 1084 */ 1085 so->so_rcv.sb_cc = 0; 1086 so->so_rcv.sb_mb = NULL; 1087 so->so_rcv.sb_mbcnt = 0; 1088 /* 1089 * Now null out the reference, we are completely detached. 1090 */ 1091 so->so_pcb = NULL; 1092 SOCK_UNLOCK(so); 1093 } else { 1094 flags = inp->sctp_flags; 1095 if ((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) { 1096 goto sctp_must_try_again; 1097 } 1098 } 1099 return; 1100 } 1101 1102 1103 int 1104 sctp_sendm(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr, 1105 struct mbuf *control, struct thread *p); 1106 1107 1108 int 1109 sctp_sendm(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr, 1110 struct mbuf *control, struct thread *p) 1111 { 1112 struct sctp_inpcb *inp; 1113 int error; 1114 1115 inp = (struct sctp_inpcb *)so->so_pcb; 1116 if (inp == 0) { 1117 if (control) { 1118 sctp_m_freem(control); 1119 control = NULL; 1120 } 1121 sctp_m_freem(m); 1122 return EINVAL; 1123 } 1124 /* Got to have an to address if we are NOT a connected socket */ 1125 if ((addr == NULL) && 1126 ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) || 1127 (inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)) 1128 ) { 1129 goto connected_type; 1130 } else if (addr == NULL) { 1131 error = EDESTADDRREQ; 1132 sctp_m_freem(m); 1133 if (control) { 1134 sctp_m_freem(control); 1135 control = NULL; 1136 } 1137 return (error); 1138 } 1139 #ifdef INET6 1140 if (addr->sa_family != AF_INET) { 1141 /* must be a v4 address! */ 1142 sctp_m_freem(m); 1143 if (control) { 1144 sctp_m_freem(control); 1145 control = NULL; 1146 } 1147 error = EDESTADDRREQ; 1148 return EINVAL; 1149 } 1150 #endif /* INET6 */ 1151 connected_type: 1152 /* now what about control */ 1153 if (control) { 1154 if (inp->control) { 1155 printf("huh? control set?\n"); 1156 sctp_m_freem(inp->control); 1157 inp->control = NULL; 1158 } 1159 inp->control = control; 1160 } 1161 /* Place the data */ 1162 if (inp->pkt) { 1163 SCTP_BUF_NEXT(inp->pkt_last) = m; 1164 inp->pkt_last = m; 1165 } else { 1166 inp->pkt_last = inp->pkt = m; 1167 } 1168 if ( 1169 /* FreeBSD uses a flag passed */ 1170 ((flags & PRUS_MORETOCOME) == 0) 1171 ) { 1172 /* 1173 * note with the current version this code will only be used 1174 * by OpenBSD-- NetBSD, FreeBSD, and MacOS have methods for 1175 * re-defining sosend to use the sctp_sosend. One can 1176 * optionally switch back to this code (by changing back the 1177 * definitions) but this is not advisable. This code is used 1178 * by FreeBSD when sending a file with sendfile() though. 1179 */ 1180 int ret; 1181 1182 ret = sctp_output(inp, inp->pkt, addr, inp->control, p, flags); 1183 inp->pkt = NULL; 1184 inp->control = NULL; 1185 return (ret); 1186 } else { 1187 return (0); 1188 } 1189 } 1190 1191 static int 1192 sctp_disconnect(struct socket *so) 1193 { 1194 struct sctp_inpcb *inp; 1195 1196 inp = (struct sctp_inpcb *)so->so_pcb; 1197 if (inp == NULL) { 1198 return (ENOTCONN); 1199 } 1200 SCTP_INP_RLOCK(inp); 1201 if (inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) { 1202 if (SCTP_LIST_EMPTY(&inp->sctp_asoc_list)) { 1203 /* No connection */ 1204 SCTP_INP_RUNLOCK(inp); 1205 return (0); 1206 } else { 1207 struct sctp_association *asoc; 1208 struct sctp_tcb *stcb; 1209 1210 stcb = LIST_FIRST(&inp->sctp_asoc_list); 1211 if (stcb == NULL) { 1212 SCTP_INP_RUNLOCK(inp); 1213 return (EINVAL); 1214 } 1215 SCTP_TCB_LOCK(stcb); 1216 asoc = &stcb->asoc; 1217 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 1218 /* We are about to be freed, out of here */ 1219 SCTP_TCB_UNLOCK(stcb); 1220 SCTP_INP_RUNLOCK(inp); 1221 return (0); 1222 } 1223 if (((so->so_options & SO_LINGER) && 1224 (so->so_linger == 0)) || 1225 (so->so_rcv.sb_cc > 0)) { 1226 if (SCTP_GET_STATE(asoc) != 1227 SCTP_STATE_COOKIE_WAIT) { 1228 /* Left with Data unread */ 1229 struct mbuf *err; 1230 1231 err = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_DONTWAIT, 1, MT_DATA); 1232 if (err) { 1233 /* 1234 * Fill in the user 1235 * initiated abort 1236 */ 1237 struct sctp_paramhdr *ph; 1238 1239 ph = mtod(err, struct sctp_paramhdr *); 1240 SCTP_BUF_LEN(err) = sizeof(struct sctp_paramhdr); 1241 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT); 1242 ph->param_length = htons(SCTP_BUF_LEN(err)); 1243 } 1244 sctp_send_abort_tcb(stcb, err); 1245 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 1246 } 1247 SCTP_INP_RUNLOCK(inp); 1248 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) || 1249 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 1250 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 1251 } 1252 sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_3); 1253 /* No unlock tcb assoc is gone */ 1254 return (0); 1255 } 1256 if (TAILQ_EMPTY(&asoc->send_queue) && 1257 TAILQ_EMPTY(&asoc->sent_queue) && 1258 (asoc->stream_queue_cnt == 0)) { 1259 /* there is nothing queued to send, so done */ 1260 if (asoc->locked_on_sending) { 1261 goto abort_anyway; 1262 } 1263 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) && 1264 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) { 1265 /* only send SHUTDOWN 1st time thru */ 1266 sctp_stop_timers_for_shutdown(stcb); 1267 sctp_send_shutdown(stcb, 1268 stcb->asoc.primary_destination); 1269 sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_T3); 1270 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) || 1271 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 1272 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 1273 } 1274 asoc->state = SCTP_STATE_SHUTDOWN_SENT; 1275 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, 1276 stcb->sctp_ep, stcb, 1277 asoc->primary_destination); 1278 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 1279 stcb->sctp_ep, stcb, 1280 asoc->primary_destination); 1281 } 1282 } else { 1283 /* 1284 * we still got (or just got) data to send, 1285 * so set SHUTDOWN_PENDING 1286 */ 1287 /* 1288 * XXX sockets draft says that SCTP_EOF 1289 * should be sent with no data. currently, 1290 * we will allow user data to be sent first 1291 * and move to SHUTDOWN-PENDING 1292 */ 1293 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING; 1294 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb, 1295 asoc->primary_destination); 1296 if (asoc->locked_on_sending) { 1297 /* Locked to send out the data */ 1298 struct sctp_stream_queue_pending *sp; 1299 1300 sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead); 1301 if (sp == NULL) { 1302 printf("Error, sp is NULL, locked on sending is non-null strm:%d\n", 1303 asoc->locked_on_sending->stream_no); 1304 } else { 1305 if ((sp->length == 0) && (sp->msg_is_complete == 0)) 1306 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT; 1307 } 1308 } 1309 if (TAILQ_EMPTY(&asoc->send_queue) && 1310 TAILQ_EMPTY(&asoc->sent_queue) && 1311 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) { 1312 struct mbuf *op_err; 1313 1314 abort_anyway: 1315 op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 1316 0, M_DONTWAIT, 1, MT_DATA); 1317 if (op_err) { 1318 /* 1319 * Fill in the user 1320 * initiated abort 1321 */ 1322 struct sctp_paramhdr *ph; 1323 uint32_t *ippp; 1324 1325 SCTP_BUF_LEN(op_err) = 1326 (sizeof(struct sctp_paramhdr) + sizeof(uint32_t)); 1327 ph = mtod(op_err, 1328 struct sctp_paramhdr *); 1329 ph->param_type = htons( 1330 SCTP_CAUSE_USER_INITIATED_ABT); 1331 ph->param_length = htons(SCTP_BUF_LEN(op_err)); 1332 ippp = (uint32_t *) (ph + 1); 1333 *ippp = htonl(SCTP_FROM_SCTP_USRREQ + SCTP_LOC_4); 1334 } 1335 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_USRREQ + SCTP_LOC_4; 1336 sctp_send_abort_tcb(stcb, op_err); 1337 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 1338 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) || 1339 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 1340 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 1341 } 1342 SCTP_INP_RUNLOCK(inp); 1343 sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_5); 1344 return (0); 1345 } 1346 } 1347 SCTP_TCB_UNLOCK(stcb); 1348 SCTP_INP_RUNLOCK(inp); 1349 return (0); 1350 } 1351 /* not reached */ 1352 } else { 1353 /* UDP model does not support this */ 1354 SCTP_INP_RUNLOCK(inp); 1355 return EOPNOTSUPP; 1356 } 1357 } 1358 1359 int 1360 sctp_shutdown(struct socket *so) 1361 { 1362 struct sctp_inpcb *inp; 1363 1364 inp = (struct sctp_inpcb *)so->so_pcb; 1365 if (inp == 0) { 1366 return EINVAL; 1367 } 1368 SCTP_INP_RLOCK(inp); 1369 /* For UDP model this is a invalid call */ 1370 if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) { 1371 /* Restore the flags that the soshutdown took away. */ 1372 so->so_rcv.sb_state &= ~SBS_CANTRCVMORE; 1373 /* This proc will wakeup for read and do nothing (I hope) */ 1374 SCTP_INP_RUNLOCK(inp); 1375 return (EOPNOTSUPP); 1376 } 1377 /* 1378 * Ok if we reach here its the TCP model and it is either a SHUT_WR 1379 * or SHUT_RDWR. This means we put the shutdown flag against it. 1380 */ 1381 { 1382 struct sctp_tcb *stcb; 1383 struct sctp_association *asoc; 1384 1385 socantsendmore(so); 1386 1387 stcb = LIST_FIRST(&inp->sctp_asoc_list); 1388 if (stcb == NULL) { 1389 /* 1390 * Ok we hit the case that the shutdown call was 1391 * made after an abort or something. Nothing to do 1392 * now. 1393 */ 1394 return (0); 1395 } 1396 SCTP_TCB_LOCK(stcb); 1397 asoc = &stcb->asoc; 1398 if (TAILQ_EMPTY(&asoc->send_queue) && 1399 TAILQ_EMPTY(&asoc->sent_queue) && 1400 (asoc->stream_queue_cnt == 0)) { 1401 if (asoc->locked_on_sending) { 1402 goto abort_anyway; 1403 } 1404 /* there is nothing queued to send, so I'm done... */ 1405 if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) { 1406 /* only send SHUTDOWN the first time through */ 1407 sctp_stop_timers_for_shutdown(stcb); 1408 sctp_send_shutdown(stcb, 1409 stcb->asoc.primary_destination); 1410 sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_T3); 1411 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) || 1412 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 1413 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 1414 } 1415 asoc->state = SCTP_STATE_SHUTDOWN_SENT; 1416 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, 1417 stcb->sctp_ep, stcb, 1418 asoc->primary_destination); 1419 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 1420 stcb->sctp_ep, stcb, 1421 asoc->primary_destination); 1422 } 1423 } else { 1424 /* 1425 * we still got (or just got) data to send, so set 1426 * SHUTDOWN_PENDING 1427 */ 1428 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING; 1429 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb, 1430 asoc->primary_destination); 1431 1432 if (asoc->locked_on_sending) { 1433 /* Locked to send out the data */ 1434 struct sctp_stream_queue_pending *sp; 1435 1436 sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead); 1437 if (sp == NULL) { 1438 printf("Error, sp is NULL, locked on sending is non-null strm:%d\n", 1439 asoc->locked_on_sending->stream_no); 1440 } else { 1441 if ((sp->length == 0) && (sp->msg_is_complete == 0)) { 1442 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT; 1443 } 1444 } 1445 } 1446 if (TAILQ_EMPTY(&asoc->send_queue) && 1447 TAILQ_EMPTY(&asoc->sent_queue) && 1448 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) { 1449 struct mbuf *op_err; 1450 1451 abort_anyway: 1452 op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 1453 0, M_DONTWAIT, 1, MT_DATA); 1454 if (op_err) { 1455 /* Fill in the user initiated abort */ 1456 struct sctp_paramhdr *ph; 1457 uint32_t *ippp; 1458 1459 SCTP_BUF_LEN(op_err) = 1460 sizeof(struct sctp_paramhdr) + sizeof(uint32_t); 1461 ph = mtod(op_err, 1462 struct sctp_paramhdr *); 1463 ph->param_type = htons( 1464 SCTP_CAUSE_USER_INITIATED_ABT); 1465 ph->param_length = htons(SCTP_BUF_LEN(op_err)); 1466 ippp = (uint32_t *) (ph + 1); 1467 *ippp = htonl(SCTP_FROM_SCTP_USRREQ + SCTP_LOC_6); 1468 } 1469 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_USRREQ + SCTP_LOC_6; 1470 sctp_abort_an_association(stcb->sctp_ep, stcb, 1471 SCTP_RESPONSE_TO_USER_REQ, 1472 op_err); 1473 goto skip_unlock; 1474 } 1475 } 1476 SCTP_TCB_UNLOCK(stcb); 1477 } 1478 skip_unlock: 1479 SCTP_INP_RUNLOCK(inp); 1480 return 0; 1481 } 1482 1483 /* 1484 * copies a "user" presentable address and removes embedded scope, etc. 1485 * returns 0 on success, 1 on error 1486 */ 1487 static uint32_t 1488 sctp_fill_user_address(struct sockaddr_storage *ss, struct sockaddr *sa) 1489 { 1490 struct sockaddr_in6 lsa6; 1491 1492 sa = (struct sockaddr *)sctp_recover_scope((struct sockaddr_in6 *)sa, 1493 &lsa6); 1494 memcpy(ss, sa, sa->sa_len); 1495 return (0); 1496 } 1497 1498 1499 1500 static size_t 1501 sctp_fill_up_addresses(struct sctp_inpcb *inp, 1502 struct sctp_tcb *stcb, 1503 size_t limit, 1504 struct sockaddr_storage *sas) 1505 { 1506 struct ifnet *ifn; 1507 struct ifaddr *ifa; 1508 int loopback_scope, ipv4_local_scope, local_scope, site_scope; 1509 size_t actual; 1510 int ipv4_addr_legal, ipv6_addr_legal; 1511 1512 actual = 0; 1513 if (limit <= 0) 1514 return (actual); 1515 1516 if (stcb) { 1517 /* Turn on all the appropriate scope */ 1518 loopback_scope = stcb->asoc.loopback_scope; 1519 ipv4_local_scope = stcb->asoc.ipv4_local_scope; 1520 local_scope = stcb->asoc.local_scope; 1521 site_scope = stcb->asoc.site_scope; 1522 } else { 1523 /* Turn on ALL scope, since we look at the EP */ 1524 loopback_scope = ipv4_local_scope = local_scope = 1525 site_scope = 1; 1526 } 1527 ipv4_addr_legal = ipv6_addr_legal = 0; 1528 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 1529 ipv6_addr_legal = 1; 1530 if (SCTP_IPV6_V6ONLY(inp) == 0) { 1531 ipv4_addr_legal = 1; 1532 } 1533 } else { 1534 ipv4_addr_legal = 1; 1535 } 1536 1537 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 1538 TAILQ_FOREACH(ifn, &ifnet, if_list) { 1539 if ((loopback_scope == 0) && 1540 (ifn->if_type == IFT_LOOP)) { 1541 /* Skip loopback if loopback_scope not set */ 1542 continue; 1543 } 1544 TAILQ_FOREACH(ifa, &ifn->if_addrlist, ifa_list) { 1545 if (stcb) { 1546 /* 1547 * For the BOUND-ALL case, the list 1548 * associated with a TCB is Always 1549 * considered a reverse list.. i.e. 1550 * it lists addresses that are NOT 1551 * part of the association. If this 1552 * is one of those we must skip it. 1553 */ 1554 if (sctp_is_addr_restricted(stcb, 1555 ifa->ifa_addr)) { 1556 continue; 1557 } 1558 } 1559 if ((ifa->ifa_addr->sa_family == AF_INET) && 1560 (ipv4_addr_legal)) { 1561 struct sockaddr_in *sin; 1562 1563 sin = (struct sockaddr_in *)ifa->ifa_addr; 1564 if (sin->sin_addr.s_addr == 0) { 1565 /* 1566 * we skip unspecifed 1567 * addresses 1568 */ 1569 continue; 1570 } 1571 if ((ipv4_local_scope == 0) && 1572 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) { 1573 continue; 1574 } 1575 if (inp->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) { 1576 in6_sin_2_v4mapsin6(sin, (struct sockaddr_in6 *)sas); 1577 ((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport; 1578 sas = (struct sockaddr_storage *)((caddr_t)sas + sizeof(struct sockaddr_in6)); 1579 actual += sizeof(sizeof(struct sockaddr_in6)); 1580 } else { 1581 memcpy(sas, sin, sizeof(*sin)); 1582 ((struct sockaddr_in *)sas)->sin_port = inp->sctp_lport; 1583 sas = (struct sockaddr_storage *)((caddr_t)sas + sizeof(*sin)); 1584 actual += sizeof(*sin); 1585 } 1586 if (actual >= limit) { 1587 return (actual); 1588 } 1589 } else if ((ifa->ifa_addr->sa_family == AF_INET6) && 1590 (ipv6_addr_legal)) { 1591 struct sockaddr_in6 *sin6; 1592 1593 sin6 = (struct sockaddr_in6 *)ifa->ifa_addr; 1594 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { 1595 /* 1596 * we skip unspecifed 1597 * addresses 1598 */ 1599 continue; 1600 } 1601 if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) { 1602 if (local_scope == 0) 1603 continue; 1604 if (sin6->sin6_scope_id == 0) { 1605 if (sa6_recoverscope(sin6) != 0) 1606 /* 1607 * bad link 1608 * local 1609 * address 1610 */ 1611 continue; 1612 } 1613 } 1614 if ((site_scope == 0) && 1615 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) { 1616 continue; 1617 } 1618 memcpy(sas, sin6, sizeof(*sin6)); 1619 ((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport; 1620 sas = (struct sockaddr_storage *)((caddr_t)sas + sizeof(*sin6)); 1621 actual += sizeof(*sin6); 1622 if (actual >= limit) { 1623 return (actual); 1624 } 1625 } 1626 } 1627 } 1628 } else { 1629 struct sctp_laddr *laddr; 1630 1631 /* 1632 * If we have a TCB and we do NOT support ASCONF (it's 1633 * turned off or otherwise) then the list is always the true 1634 * list of addresses (the else case below). Otherwise the 1635 * list on the association is a list of addresses that are 1636 * NOT part of the association. 1637 */ 1638 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_DO_ASCONF)) { 1639 /* The list is a NEGATIVE list */ 1640 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 1641 if (stcb) { 1642 if (sctp_is_addr_restricted(stcb, laddr->ifa->ifa_addr)) { 1643 continue; 1644 } 1645 } 1646 if (sctp_fill_user_address(sas, laddr->ifa->ifa_addr)) 1647 continue; 1648 1649 ((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport; 1650 sas = (struct sockaddr_storage *)((caddr_t)sas + 1651 laddr->ifa->ifa_addr->sa_len); 1652 actual += laddr->ifa->ifa_addr->sa_len; 1653 if (actual >= limit) { 1654 return (actual); 1655 } 1656 } 1657 } else { 1658 /* The list is a positive list if present */ 1659 if (stcb) { 1660 /* Must use the specific association list */ 1661 LIST_FOREACH(laddr, &stcb->asoc.sctp_local_addr_list, 1662 sctp_nxt_addr) { 1663 if (sctp_fill_user_address(sas, 1664 laddr->ifa->ifa_addr)) 1665 continue; 1666 ((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport; 1667 sas = (struct sockaddr_storage *)((caddr_t)sas + 1668 laddr->ifa->ifa_addr->sa_len); 1669 actual += laddr->ifa->ifa_addr->sa_len; 1670 if (actual >= limit) { 1671 return (actual); 1672 } 1673 } 1674 } else { 1675 /* 1676 * No endpoint so use the endpoints 1677 * individual list 1678 */ 1679 LIST_FOREACH(laddr, &inp->sctp_addr_list, 1680 sctp_nxt_addr) { 1681 if (sctp_fill_user_address(sas, 1682 laddr->ifa->ifa_addr)) 1683 continue; 1684 ((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport; 1685 sas = (struct sockaddr_storage *)((caddr_t)sas + 1686 laddr->ifa->ifa_addr->sa_len); 1687 actual += laddr->ifa->ifa_addr->sa_len; 1688 if (actual >= limit) { 1689 return (actual); 1690 } 1691 } 1692 } 1693 } 1694 } 1695 return (actual); 1696 } 1697 1698 static int 1699 sctp_count_max_addresses(struct sctp_inpcb *inp) 1700 { 1701 int cnt = 0; 1702 1703 /* 1704 * In both sub-set bound an bound_all cases we return the MAXIMUM 1705 * number of addresses that you COULD get. In reality the sub-set 1706 * bound may have an exclusion list for a given TCB OR in the 1707 * bound-all case a TCB may NOT include the loopback or other 1708 * addresses as well. 1709 */ 1710 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 1711 struct ifnet *ifn; 1712 struct ifaddr *ifa; 1713 1714 TAILQ_FOREACH(ifn, &ifnet, if_list) { 1715 TAILQ_FOREACH(ifa, &ifn->if_addrlist, ifa_list) { 1716 /* Count them if they are the right type */ 1717 if (ifa->ifa_addr->sa_family == AF_INET) { 1718 if (inp->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) 1719 cnt += sizeof(struct sockaddr_in6); 1720 else 1721 cnt += sizeof(struct sockaddr_in); 1722 1723 } else if (ifa->ifa_addr->sa_family == AF_INET6) 1724 cnt += sizeof(struct sockaddr_in6); 1725 } 1726 } 1727 } else { 1728 struct sctp_laddr *laddr; 1729 1730 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 1731 if (laddr->ifa->ifa_addr->sa_family == AF_INET) { 1732 if (inp->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) 1733 cnt += sizeof(struct sockaddr_in6); 1734 else 1735 cnt += sizeof(struct sockaddr_in); 1736 1737 } else if (laddr->ifa->ifa_addr->sa_family == AF_INET6) 1738 cnt += sizeof(struct sockaddr_in6); 1739 } 1740 } 1741 return (cnt); 1742 } 1743 1744 1745 static int 1746 sctp_do_connect_x(struct socket *so, struct sctp_inpcb *inp, void *optval, 1747 size_t optsize, void *p, int delay) 1748 { 1749 int error = 0; 1750 int creat_lock_on = 0; 1751 struct sctp_tcb *stcb = NULL; 1752 struct sockaddr *sa; 1753 int num_v6 = 0, num_v4 = 0, *totaddrp, totaddr, i; 1754 size_t incr, at; 1755 1756 #ifdef SCTP_DEBUG 1757 if (sctp_debug_on & SCTP_DEBUG_PCB1) { 1758 printf("Connectx called\n"); 1759 } 1760 #endif /* SCTP_DEBUG */ 1761 1762 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 1763 (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) { 1764 /* We are already connected AND the TCP model */ 1765 return (EADDRINUSE); 1766 } 1767 if (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) { 1768 return (EINVAL); 1769 } 1770 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { 1771 SCTP_INP_RLOCK(inp); 1772 stcb = LIST_FIRST(&inp->sctp_asoc_list); 1773 SCTP_INP_RUNLOCK(inp); 1774 } 1775 if (stcb) { 1776 return (EALREADY); 1777 } 1778 SCTP_INP_INCR_REF(inp); 1779 SCTP_ASOC_CREATE_LOCK(inp); 1780 creat_lock_on = 1; 1781 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 1782 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) { 1783 error = EFAULT; 1784 goto out_now; 1785 } 1786 totaddrp = (int *)optval; 1787 totaddr = *totaddrp; 1788 sa = (struct sockaddr *)(totaddrp + 1); 1789 at = incr = 0; 1790 /* account and validate addresses */ 1791 for (i = 0; i < totaddr; i++) { 1792 if (sa->sa_family == AF_INET) { 1793 num_v4++; 1794 incr = sizeof(struct sockaddr_in); 1795 } else if (sa->sa_family == AF_INET6) { 1796 struct sockaddr_in6 *sin6; 1797 1798 sin6 = (struct sockaddr_in6 *)sa; 1799 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 1800 /* Must be non-mapped for connectx */ 1801 error = EINVAL; 1802 goto out_now; 1803 } 1804 num_v6++; 1805 incr = sizeof(struct sockaddr_in6); 1806 } else { 1807 totaddr = i; 1808 break; 1809 } 1810 stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL); 1811 if (stcb != NULL) { 1812 /* Already have or am bring up an association */ 1813 SCTP_ASOC_CREATE_UNLOCK(inp); 1814 creat_lock_on = 0; 1815 SCTP_TCB_UNLOCK(stcb); 1816 error = EALREADY; 1817 goto out_now; 1818 } 1819 if ((at + incr) > optsize) { 1820 totaddr = i; 1821 break; 1822 } 1823 sa = (struct sockaddr *)((caddr_t)sa + incr); 1824 } 1825 sa = (struct sockaddr *)(totaddrp + 1); 1826 #ifdef INET6 1827 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) && 1828 (num_v6 > 0)) { 1829 error = EINVAL; 1830 goto out_now; 1831 } 1832 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 1833 (num_v4 > 0)) { 1834 struct in6pcb *inp6; 1835 1836 inp6 = (struct in6pcb *)inp; 1837 if (SCTP_IPV6_V6ONLY(inp6)) { 1838 /* 1839 * if IPV6_V6ONLY flag, ignore connections destined 1840 * to a v4 addr or v4-mapped addr 1841 */ 1842 error = EINVAL; 1843 goto out_now; 1844 } 1845 } 1846 #endif /* INET6 */ 1847 if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) == 1848 SCTP_PCB_FLAGS_UNBOUND) { 1849 /* Bind a ephemeral port */ 1850 SCTP_INP_WUNLOCK(inp); 1851 error = sctp_inpcb_bind(so, NULL, p); 1852 if (error) { 1853 goto out_now; 1854 } 1855 } else { 1856 SCTP_INP_WUNLOCK(inp); 1857 } 1858 1859 /* We are GOOD to go */ 1860 stcb = sctp_aloc_assoc(inp, sa, 1, &error, 0); 1861 if (stcb == NULL) { 1862 /* Gak! no memory */ 1863 goto out_now; 1864 } 1865 /* move to second address */ 1866 if (sa->sa_family == AF_INET) 1867 sa = (struct sockaddr *)((caddr_t)sa + sizeof(struct sockaddr_in)); 1868 else 1869 sa = (struct sockaddr *)((caddr_t)sa + sizeof(struct sockaddr_in6)); 1870 1871 for (i = 1; i < totaddr; i++) { 1872 if (sa->sa_family == AF_INET) { 1873 incr = sizeof(struct sockaddr_in); 1874 if (sctp_add_remote_addr(stcb, sa, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) { 1875 /* assoc gone no un-lock */ 1876 sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_7); 1877 error = ENOBUFS; 1878 goto out_now; 1879 } 1880 } else if (sa->sa_family == AF_INET6) { 1881 incr = sizeof(struct sockaddr_in6); 1882 if (sctp_add_remote_addr(stcb, sa, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) { 1883 /* assoc gone no un-lock */ 1884 sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_8); 1885 error = ENOBUFS; 1886 goto out_now; 1887 } 1888 } 1889 sa = (struct sockaddr *)((caddr_t)sa + incr); 1890 } 1891 stcb->asoc.state = SCTP_STATE_COOKIE_WAIT; 1892 1893 /* initialize authentication parameters for the assoc */ 1894 sctp_initialize_auth_params(inp, stcb); 1895 1896 if (delay) { 1897 /* doing delayed connection */ 1898 stcb->asoc.delayed_connection = 1; 1899 sctp_timer_start(SCTP_TIMER_TYPE_INIT, inp, stcb, stcb->asoc.primary_destination); 1900 } else { 1901 SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered); 1902 sctp_send_initiate(inp, stcb); 1903 } 1904 SCTP_TCB_UNLOCK(stcb); 1905 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) { 1906 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED; 1907 /* Set the connected flag so we can queue data */ 1908 soisconnecting(so); 1909 } 1910 out_now: 1911 if (creat_lock_on) 1912 SCTP_ASOC_CREATE_UNLOCK(inp); 1913 SCTP_INP_DECR_REF(inp); 1914 return error; 1915 } 1916 1917 #define SCTP_FIND_STCB(inp, stcb, assoc_id) \ 1918 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { \ 1919 SCTP_INP_RLOCK(inp); \ 1920 stcb = LIST_FIRST(&inp->sctp_asoc_list); \ 1921 if (stcb) \ 1922 SCTP_TCB_LOCK(stcb); \ 1923 SCTP_INP_RUNLOCK(inp); \ 1924 } else if (assoc_id != 0) { \ 1925 stcb = sctp_findassociation_ep_asocid(inp, assoc_id, 1); \ 1926 if (stcb == NULL) { \ 1927 error = ENOENT; \ 1928 break; \ 1929 } \ 1930 } else { \ 1931 stcb = NULL; \ 1932 } 1933 1934 #define SCTP_CHECK_AND_CAST(destp, srcp, type, size) \ 1935 if (size < sizeof(type)) { \ 1936 error = EINVAL; \ 1937 break; \ 1938 } else { \ 1939 destp = (type *)srcp; \ 1940 } 1941 1942 1943 static int 1944 sctp_getopt(struct socket *so, int optname, void *optval, size_t *optsize, 1945 void *p) 1946 { 1947 struct sctp_inpcb *inp; 1948 int error, val = 0; 1949 struct sctp_tcb *stcb = NULL; 1950 1951 if (optval == NULL) { 1952 return (EINVAL); 1953 } 1954 inp = (struct sctp_inpcb *)so->so_pcb; 1955 if (inp == 0) 1956 return EINVAL; 1957 error = 0; 1958 1959 switch (optname) { 1960 case SCTP_NODELAY: 1961 case SCTP_AUTOCLOSE: 1962 case SCTP_EXPLICIT_EOR: 1963 case SCTP_AUTO_ASCONF: 1964 case SCTP_DISABLE_FRAGMENTS: 1965 case SCTP_I_WANT_MAPPED_V4_ADDR: 1966 case SCTP_USE_EXT_RCVINFO: 1967 SCTP_INP_RLOCK(inp); 1968 switch (optname) { 1969 case SCTP_DISABLE_FRAGMENTS: 1970 val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NO_FRAGMENT); 1971 break; 1972 case SCTP_I_WANT_MAPPED_V4_ADDR: 1973 val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4); 1974 break; 1975 case SCTP_AUTO_ASCONF: 1976 val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTO_ASCONF); 1977 break; 1978 case SCTP_EXPLICIT_EOR: 1979 val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR); 1980 break; 1981 case SCTP_NODELAY: 1982 val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NODELAY); 1983 break; 1984 case SCTP_USE_EXT_RCVINFO: 1985 val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO); 1986 break; 1987 case SCTP_AUTOCLOSE: 1988 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) 1989 val = TICKS_TO_SEC(inp->sctp_ep.auto_close_time); 1990 else 1991 val = 0; 1992 break; 1993 1994 default: 1995 error = ENOPROTOOPT; 1996 } /* end switch (sopt->sopt_name) */ 1997 if (optname != SCTP_AUTOCLOSE) { 1998 /* make it an "on/off" value */ 1999 val = (val != 0); 2000 } 2001 if (*optsize < sizeof(val)) { 2002 error = EINVAL; 2003 } 2004 SCTP_INP_RUNLOCK(inp); 2005 if (error == 0) { 2006 /* return the option value */ 2007 *(int *)optval = val; 2008 *optsize = sizeof(val); 2009 } 2010 break; 2011 case SCTP_PARTIAL_DELIVERY_POINT: 2012 { 2013 uint32_t *value; 2014 2015 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize); 2016 *value = inp->partial_delivery_point; 2017 *optsize = sizeof(uint32_t); 2018 } 2019 break; 2020 case SCTP_FRAGMENT_INTERLEAVE: 2021 { 2022 uint32_t *value; 2023 2024 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize); 2025 *value = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE); 2026 *optsize = sizeof(uint32_t); 2027 } 2028 break; 2029 case SCTP_CMT_ON_OFF: 2030 { 2031 struct sctp_assoc_value *av; 2032 2033 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize); 2034 if (sctp_cmt_on_off) { 2035 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 2036 if (stcb) { 2037 av->assoc_value = stcb->asoc.sctp_cmt_on_off; 2038 SCTP_TCB_UNLOCK(stcb); 2039 2040 } else { 2041 error = ENOTCONN; 2042 } 2043 } else { 2044 error = ENOPROTOOPT; 2045 } 2046 *optsize = sizeof(*av); 2047 } 2048 break; 2049 case SCTP_GET_ADDR_LEN: 2050 { 2051 struct sctp_assoc_value *av; 2052 2053 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize); 2054 error = EINVAL; 2055 #ifdef AF_INET 2056 if (av->assoc_value == AF_INET) { 2057 av->assoc_value = sizeof(struct sockaddr_in); 2058 error = 0; 2059 } 2060 #endif 2061 #ifdef AF_INET6 2062 if (av->assoc_value == AF_INET6) { 2063 av->assoc_value = sizeof(struct sockaddr_in6); 2064 error = 0; 2065 } 2066 #endif 2067 *optsize = sizeof(*av); 2068 } 2069 break; 2070 case SCTP_GET_ASOC_ID_LIST: 2071 { 2072 struct sctp_assoc_ids *ids; 2073 int cnt, at; 2074 uint16_t orig; 2075 2076 SCTP_CHECK_AND_CAST(ids, optval, struct sctp_assoc_ids, *optsize); 2077 cnt = 0; 2078 SCTP_INP_RLOCK(inp); 2079 stcb = LIST_FIRST(&inp->sctp_asoc_list); 2080 if (stcb == NULL) { 2081 none_out_now: 2082 ids->asls_numb_present = 0; 2083 ids->asls_more_to_get = 0; 2084 SCTP_INP_RUNLOCK(inp); 2085 break; 2086 } 2087 orig = ids->asls_assoc_start; 2088 stcb = LIST_FIRST(&inp->sctp_asoc_list); 2089 while (orig) { 2090 stcb = LIST_NEXT(stcb, sctp_tcblist); 2091 orig--; 2092 cnt--; 2093 if (stcb == NULL) 2094 goto none_out_now; 2095 } 2096 if (stcb == NULL) 2097 goto none_out_now; 2098 2099 at = 0; 2100 ids->asls_numb_present = 0; 2101 ids->asls_more_to_get = 1; 2102 while (at < MAX_ASOC_IDS_RET) { 2103 ids->asls_assoc_id[at] = sctp_get_associd(stcb); 2104 at++; 2105 ids->asls_numb_present++; 2106 stcb = LIST_NEXT(stcb, sctp_tcblist); 2107 if (stcb == NULL) { 2108 ids->asls_more_to_get = 0; 2109 break; 2110 } 2111 } 2112 SCTP_INP_RUNLOCK(inp); 2113 } 2114 break; 2115 case SCTP_CONTEXT: 2116 { 2117 struct sctp_assoc_value *av; 2118 2119 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize); 2120 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 2121 2122 if (stcb) { 2123 av->assoc_value = stcb->asoc.context; 2124 SCTP_TCB_UNLOCK(stcb); 2125 } else { 2126 SCTP_INP_RLOCK(inp); 2127 av->assoc_value = inp->sctp_context; 2128 SCTP_INP_RUNLOCK(inp); 2129 } 2130 *optsize = sizeof(*av); 2131 } 2132 break; 2133 case SCTP_GET_NONCE_VALUES: 2134 { 2135 struct sctp_get_nonce_values *gnv; 2136 2137 SCTP_CHECK_AND_CAST(gnv, optval, struct sctp_get_nonce_values, *optsize); 2138 SCTP_FIND_STCB(inp, stcb, gnv->gn_assoc_id); 2139 2140 if (stcb) { 2141 gnv->gn_peers_tag = stcb->asoc.peer_vtag; 2142 gnv->gn_local_tag = stcb->asoc.my_vtag; 2143 SCTP_TCB_UNLOCK(stcb); 2144 } else { 2145 error = ENOTCONN; 2146 } 2147 *optsize = sizeof(*gnv); 2148 } 2149 break; 2150 case SCTP_DELAYED_ACK_TIME: 2151 { 2152 struct sctp_assoc_value *tm; 2153 2154 SCTP_CHECK_AND_CAST(tm, optval, struct sctp_assoc_value, *optsize); 2155 SCTP_FIND_STCB(inp, stcb, tm->assoc_id); 2156 2157 if (stcb) { 2158 tm->assoc_value = stcb->asoc.delayed_ack; 2159 SCTP_TCB_UNLOCK(stcb); 2160 } else { 2161 SCTP_INP_RLOCK(inp); 2162 tm->assoc_value = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]); 2163 SCTP_INP_RUNLOCK(inp); 2164 } 2165 *optsize = sizeof(*tm); 2166 } 2167 break; 2168 2169 case SCTP_GET_SNDBUF_USE: 2170 { 2171 struct sctp_sockstat *ss; 2172 2173 SCTP_CHECK_AND_CAST(ss, optval, struct sctp_sockstat, *optsize); 2174 SCTP_FIND_STCB(inp, stcb, ss->ss_assoc_id); 2175 2176 if (stcb) { 2177 ss->ss_total_sndbuf = stcb->asoc.total_output_queue_size; 2178 ss->ss_total_recv_buf = (stcb->asoc.size_on_reasm_queue + 2179 stcb->asoc.size_on_all_streams); 2180 SCTP_TCB_UNLOCK(stcb); 2181 } else { 2182 error = ENOTCONN; 2183 } 2184 *optsize = sizeof(struct sctp_sockstat); 2185 } 2186 break; 2187 case SCTP_MAXBURST: 2188 { 2189 uint8_t *value; 2190 2191 SCTP_CHECK_AND_CAST(value, optval, uint8_t, *optsize); 2192 2193 SCTP_INP_RLOCK(inp); 2194 *value = inp->sctp_ep.max_burst; 2195 SCTP_INP_RUNLOCK(inp); 2196 *optsize = sizeof(uint8_t); 2197 } 2198 break; 2199 /* 2200 * FIXME MT: Should this be done as the association level by 2201 * using sctp_get_frag_point? 2202 */ 2203 case SCTP_MAXSEG: 2204 { 2205 uint32_t *segsize; 2206 int ovh; 2207 2208 SCTP_CHECK_AND_CAST(segsize, optval, uint32_t, *optsize); 2209 2210 SCTP_INP_RLOCK(inp); 2211 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 2212 ovh = SCTP_MED_OVERHEAD; 2213 } else { 2214 ovh = SCTP_MED_V4_OVERHEAD; 2215 } 2216 *segsize = inp->sctp_frag_point - ovh; 2217 SCTP_INP_RUNLOCK(inp); 2218 *optsize = sizeof(uint32_t); 2219 } 2220 break; 2221 #if 0 2222 /* FIXME MT: How does this work? */ 2223 case SCTP_GET_STAT_LOG: 2224 #ifdef SCTP_STAT_LOGGING 2225 error = sctp_fill_stat_log(m); 2226 #else /* SCTP_DEBUG */ 2227 error = EOPNOTSUPP; 2228 #endif 2229 break; 2230 #endif 2231 case SCTP_EVENTS: 2232 { 2233 struct sctp_event_subscribe *events; 2234 2235 SCTP_CHECK_AND_CAST(events, optval, struct sctp_event_subscribe, *optsize); 2236 memset(events, 0, sizeof(*events)); 2237 SCTP_INP_RLOCK(inp); 2238 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) 2239 events->sctp_data_io_event = 1; 2240 2241 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVASSOCEVNT)) 2242 events->sctp_association_event = 1; 2243 2244 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVPADDREVNT)) 2245 events->sctp_address_event = 1; 2246 2247 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)) 2248 events->sctp_send_failure_event = 1; 2249 2250 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVPEERERR)) 2251 events->sctp_peer_error_event = 1; 2252 2253 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) 2254 events->sctp_shutdown_event = 1; 2255 2256 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PDAPIEVNT)) 2257 events->sctp_partial_delivery_event = 1; 2258 2259 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) 2260 events->sctp_adaptation_layer_event = 1; 2261 2262 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTHEVNT)) 2263 events->sctp_authentication_event = 1; 2264 2265 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_STREAM_RESETEVNT)) 2266 events->sctp_stream_reset_events = 1; 2267 SCTP_INP_RUNLOCK(inp); 2268 *optsize = sizeof(struct sctp_event_subscribe); 2269 } 2270 break; 2271 2272 case SCTP_ADAPTATION_LAYER: 2273 { 2274 uint32_t *value; 2275 2276 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize); 2277 2278 SCTP_INP_RLOCK(inp); 2279 *value = inp->sctp_ep.adaptation_layer_indicator; 2280 SCTP_INP_RUNLOCK(inp); 2281 *optsize = sizeof(uint32_t); 2282 } 2283 break; 2284 case SCTP_SET_INITIAL_DBG_SEQ: 2285 { 2286 uint32_t *value; 2287 2288 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize); 2289 SCTP_INP_RLOCK(inp); 2290 *value = inp->sctp_ep.initial_sequence_debug; 2291 SCTP_INP_RUNLOCK(inp); 2292 *optsize = sizeof(uint32_t); 2293 } 2294 break; 2295 case SCTP_GET_LOCAL_ADDR_SIZE: 2296 { 2297 uint32_t *value; 2298 2299 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize); 2300 SCTP_INP_RLOCK(inp); 2301 *value = sctp_count_max_addresses(inp); 2302 SCTP_INP_RUNLOCK(inp); 2303 *optsize = sizeof(uint32_t); 2304 } 2305 break; 2306 case SCTP_GET_REMOTE_ADDR_SIZE: 2307 { 2308 uint32_t *value; 2309 size_t size; 2310 struct sctp_nets *net; 2311 2312 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize); 2313 /* FIXME MT: change to sctp_assoc_value? */ 2314 SCTP_FIND_STCB(inp, stcb, (sctp_assoc_t) * value); 2315 2316 if (stcb) { 2317 size = 0; 2318 /* Count the sizes */ 2319 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 2320 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) || 2321 (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET6)) { 2322 size += sizeof(struct sockaddr_in6); 2323 } else if (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET) { 2324 size += sizeof(struct sockaddr_in); 2325 } else { 2326 /* huh */ 2327 break; 2328 } 2329 } 2330 SCTP_TCB_UNLOCK(stcb); 2331 *value = (uint32_t) size; 2332 } else { 2333 error = ENOTCONN; 2334 } 2335 *optsize = sizeof(uint32_t); 2336 } 2337 break; 2338 case SCTP_GET_PEER_ADDRESSES: 2339 /* 2340 * Get the address information, an array is passed in to 2341 * fill up we pack it. 2342 */ 2343 { 2344 size_t cpsz, left; 2345 struct sockaddr_storage *sas; 2346 struct sctp_nets *net; 2347 struct sctp_getaddresses *saddr; 2348 2349 SCTP_CHECK_AND_CAST(saddr, optval, struct sctp_getaddresses, *optsize); 2350 SCTP_FIND_STCB(inp, stcb, saddr->sget_assoc_id); 2351 2352 if (stcb) { 2353 left = (*optsize) - sizeof(struct sctp_getaddresses); 2354 *optsize = sizeof(struct sctp_getaddresses); 2355 sas = (struct sockaddr_storage *)&saddr->addr[0]; 2356 2357 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 2358 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) || 2359 (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET6)) { 2360 cpsz = sizeof(struct sockaddr_in6); 2361 } else if (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET) { 2362 cpsz = sizeof(struct sockaddr_in); 2363 } else { 2364 /* huh */ 2365 break; 2366 } 2367 if (left < cpsz) { 2368 /* not enough room. */ 2369 break; 2370 } 2371 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) && 2372 (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET)) { 2373 /* Must map the address */ 2374 in6_sin_2_v4mapsin6((struct sockaddr_in *)&net->ro._l_addr, 2375 (struct sockaddr_in6 *)sas); 2376 } else { 2377 memcpy(sas, &net->ro._l_addr, cpsz); 2378 } 2379 ((struct sockaddr_in *)sas)->sin_port = stcb->rport; 2380 2381 sas = (struct sockaddr_storage *)((caddr_t)sas + cpsz); 2382 left -= cpsz; 2383 *optsize += cpsz; 2384 } 2385 SCTP_TCB_UNLOCK(stcb); 2386 } else { 2387 error = ENOENT; 2388 } 2389 } 2390 break; 2391 case SCTP_GET_LOCAL_ADDRESSES: 2392 { 2393 size_t limit, actual; 2394 struct sockaddr_storage *sas; 2395 struct sctp_getaddresses *saddr; 2396 2397 SCTP_CHECK_AND_CAST(saddr, optval, struct sctp_getaddresses, *optsize); 2398 SCTP_FIND_STCB(inp, stcb, saddr->sget_assoc_id); 2399 2400 sas = (struct sockaddr_storage *)&saddr->addr[0]; 2401 limit = *optsize - sizeof(sctp_assoc_t); 2402 actual = sctp_fill_up_addresses(inp, stcb, limit, sas); 2403 if (stcb) 2404 SCTP_TCB_UNLOCK(stcb); 2405 *optsize = sizeof(struct sockaddr_storage) + actual; 2406 } 2407 break; 2408 case SCTP_PEER_ADDR_PARAMS: 2409 { 2410 struct sctp_paddrparams *paddrp; 2411 struct sctp_nets *net; 2412 2413 SCTP_CHECK_AND_CAST(paddrp, optval, struct sctp_paddrparams, *optsize); 2414 SCTP_FIND_STCB(inp, stcb, paddrp->spp_assoc_id); 2415 2416 net = NULL; 2417 if (stcb) { 2418 net = sctp_findnet(stcb, (struct sockaddr *)&paddrp->spp_address); 2419 } else { 2420 /* 2421 * We increment here since 2422 * sctp_findassociation_ep_addr() wil do a 2423 * decrement if it finds the stcb as long as 2424 * the locked tcb (last argument) is NOT a 2425 * TCB.. aka NULL. 2426 */ 2427 SCTP_INP_INCR_REF(inp); 2428 stcb = sctp_findassociation_ep_addr(&inp, (struct sockaddr *)&paddrp->spp_address, &net, NULL, NULL); 2429 if (stcb == NULL) { 2430 SCTP_INP_DECR_REF(inp); 2431 } 2432 } 2433 2434 if (stcb) { 2435 /* Applys to the specific association */ 2436 paddrp->spp_flags = 0; 2437 if (net) { 2438 paddrp->spp_pathmaxrxt = net->failure_threshold; 2439 paddrp->spp_pathmtu = net->mtu; 2440 /* get flags for HB */ 2441 if (net->dest_state & SCTP_ADDR_NOHB) 2442 paddrp->spp_flags |= SPP_HB_DISABLE; 2443 else 2444 paddrp->spp_flags |= SPP_HB_ENABLE; 2445 /* get flags for PMTU */ 2446 if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) { 2447 paddrp->spp_flags |= SPP_PMTUD_ENABLE; 2448 } else { 2449 paddrp->spp_flags |= SPP_PMTUD_DISABLE; 2450 } 2451 #ifdef AF_INET 2452 if (net->ro._l_addr.sin.sin_family == AF_INET) { 2453 paddrp->spp_ipv4_tos = net->tos_flowlabel & 0x000000fc; 2454 paddrp->spp_flags |= SPP_IPV4_TOS; 2455 } 2456 #endif 2457 #ifdef AF_INET6 2458 if (net->ro._l_addr.sin6.sin6_family == AF_INET6) { 2459 paddrp->spp_ipv6_flowlabel = net->tos_flowlabel; 2460 paddrp->spp_flags |= SPP_IPV6_FLOWLABEL; 2461 } 2462 #endif 2463 } else { 2464 /* 2465 * No destination so return default 2466 * value 2467 */ 2468 paddrp->spp_pathmaxrxt = stcb->asoc.def_net_failure; 2469 paddrp->spp_pathmtu = sctp_get_frag_point(stcb, &stcb->asoc); 2470 #ifdef AF_INET 2471 paddrp->spp_ipv4_tos = stcb->asoc.default_tos & 0x000000fc; 2472 paddrp->spp_flags |= SPP_IPV4_TOS; 2473 #endif 2474 #ifdef AF_INET6 2475 paddrp->spp_ipv6_flowlabel = stcb->asoc.default_flowlabel; 2476 paddrp->spp_flags |= SPP_IPV6_FLOWLABEL; 2477 #endif 2478 /* default settings should be these */ 2479 if (sctp_is_hb_timer_running(stcb)) { 2480 paddrp->spp_flags |= SPP_HB_ENABLE; 2481 } 2482 } 2483 paddrp->spp_hbinterval = stcb->asoc.heart_beat_delay; 2484 paddrp->spp_sackdelay = stcb->asoc.delayed_ack; 2485 /* 2486 * Currently we don't support no sack delay 2487 * aka SPP_SACKDELAY_DISABLE. 2488 */ 2489 paddrp->spp_flags |= SPP_SACKDELAY_ENABLE; 2490 paddrp->spp_assoc_id = sctp_get_associd(stcb); 2491 SCTP_TCB_UNLOCK(stcb); 2492 } else { 2493 /* Use endpoint defaults */ 2494 SCTP_INP_RLOCK(inp); 2495 paddrp->spp_pathmaxrxt = inp->sctp_ep.def_net_failure; 2496 paddrp->spp_hbinterval = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]); 2497 paddrp->spp_sackdelay = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]); 2498 paddrp->spp_assoc_id = (sctp_assoc_t) 0; 2499 /* get inp's default */ 2500 #ifdef AF_INET 2501 paddrp->spp_ipv4_tos = inp->ip_inp.inp.inp_ip_tos; 2502 paddrp->spp_flags |= SPP_IPV4_TOS; 2503 #endif 2504 #ifdef AF_INET6 2505 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 2506 paddrp->spp_ipv6_flowlabel = ((struct in6pcb *)inp)->in6p_flowinfo; 2507 paddrp->spp_flags |= SPP_IPV6_FLOWLABEL; 2508 } 2509 #endif 2510 /* can't return this */ 2511 paddrp->spp_pathmaxrxt = 0; 2512 paddrp->spp_pathmtu = 0; 2513 /* default behavior, no stcb */ 2514 paddrp->spp_flags = SPP_HB_ENABLE | SPP_SACKDELAY_ENABLE | SPP_PMTUD_ENABLE; 2515 2516 SCTP_INP_RUNLOCK(inp); 2517 } 2518 *optsize = sizeof(struct sctp_paddrparams); 2519 } 2520 break; 2521 case SCTP_GET_PEER_ADDR_INFO: 2522 { 2523 struct sctp_paddrinfo *paddri; 2524 struct sctp_nets *net; 2525 2526 SCTP_CHECK_AND_CAST(paddri, optval, struct sctp_paddrinfo, *optsize); 2527 SCTP_FIND_STCB(inp, stcb, paddri->spinfo_assoc_id); 2528 2529 net = NULL; 2530 if (stcb) { 2531 net = sctp_findnet(stcb, (struct sockaddr *)&paddri->spinfo_address); 2532 } else { 2533 /* 2534 * We increment here since 2535 * sctp_findassociation_ep_addr() wil do a 2536 * decrement if it finds the stcb as long as 2537 * the locked tcb (last argument) is NOT a 2538 * TCB.. aka NULL. 2539 */ 2540 SCTP_INP_INCR_REF(inp); 2541 stcb = sctp_findassociation_ep_addr(&inp, (struct sockaddr *)&paddri->spinfo_address, &net, NULL, NULL); 2542 if (stcb == NULL) { 2543 SCTP_INP_DECR_REF(inp); 2544 } 2545 } 2546 2547 if ((stcb) && (net)) { 2548 paddri->spinfo_state = net->dest_state & (SCTP_REACHABLE_MASK | SCTP_ADDR_NOHB); 2549 paddri->spinfo_cwnd = net->cwnd; 2550 paddri->spinfo_srtt = ((net->lastsa >> 2) + net->lastsv) >> 1; 2551 paddri->spinfo_rto = net->RTO; 2552 paddri->spinfo_assoc_id = sctp_get_associd(stcb); 2553 SCTP_TCB_UNLOCK(stcb); 2554 } else { 2555 if (stcb) { 2556 SCTP_TCB_UNLOCK(stcb); 2557 } 2558 error = ENOENT; 2559 } 2560 *optsize = sizeof(struct sctp_paddrinfo); 2561 } 2562 break; 2563 case SCTP_PCB_STATUS: 2564 { 2565 struct sctp_pcbinfo *spcb; 2566 2567 SCTP_CHECK_AND_CAST(spcb, optval, struct sctp_pcbinfo, *optsize); 2568 sctp_fill_pcbinfo(spcb); 2569 *optsize = sizeof(struct sctp_pcbinfo); 2570 } 2571 break; 2572 case SCTP_STATUS: 2573 { 2574 struct sctp_nets *net; 2575 struct sctp_status *sstat; 2576 2577 SCTP_CHECK_AND_CAST(sstat, optval, struct sctp_status, *optsize); 2578 SCTP_FIND_STCB(inp, stcb, sstat->sstat_assoc_id); 2579 2580 if (stcb == NULL) { 2581 error = EINVAL; 2582 break; 2583 } 2584 /* 2585 * I think passing the state is fine since 2586 * sctp_constants.h will be available to the user 2587 * land. 2588 */ 2589 sstat->sstat_state = stcb->asoc.state; 2590 sstat->sstat_rwnd = stcb->asoc.peers_rwnd; 2591 sstat->sstat_unackdata = stcb->asoc.sent_queue_cnt; 2592 /* 2593 * We can't include chunks that have been passed to 2594 * the socket layer. Only things in queue. 2595 */ 2596 sstat->sstat_penddata = (stcb->asoc.cnt_on_reasm_queue + 2597 stcb->asoc.cnt_on_all_streams); 2598 2599 2600 sstat->sstat_instrms = stcb->asoc.streamincnt; 2601 sstat->sstat_outstrms = stcb->asoc.streamoutcnt; 2602 sstat->sstat_fragmentation_point = sctp_get_frag_point(stcb, &stcb->asoc); 2603 memcpy(&sstat->sstat_primary.spinfo_address, 2604 &stcb->asoc.primary_destination->ro._l_addr, 2605 ((struct sockaddr *)(&stcb->asoc.primary_destination->ro._l_addr))->sa_len); 2606 net = stcb->asoc.primary_destination; 2607 ((struct sockaddr_in *)&sstat->sstat_primary.spinfo_address)->sin_port = stcb->rport; 2608 /* 2609 * Again the user can get info from sctp_constants.h 2610 * for what the state of the network is. 2611 */ 2612 sstat->sstat_primary.spinfo_state = net->dest_state & SCTP_REACHABLE_MASK; 2613 sstat->sstat_primary.spinfo_cwnd = net->cwnd; 2614 sstat->sstat_primary.spinfo_srtt = net->lastsa; 2615 sstat->sstat_primary.spinfo_rto = net->RTO; 2616 sstat->sstat_primary.spinfo_mtu = net->mtu; 2617 sstat->sstat_primary.spinfo_assoc_id = sctp_get_associd(stcb); 2618 SCTP_TCB_UNLOCK(stcb); 2619 *optsize = sizeof(*sstat); 2620 } 2621 break; 2622 case SCTP_RTOINFO: 2623 { 2624 struct sctp_rtoinfo *srto; 2625 2626 SCTP_CHECK_AND_CAST(srto, optval, struct sctp_rtoinfo, *optsize); 2627 SCTP_FIND_STCB(inp, stcb, srto->srto_assoc_id); 2628 2629 if (stcb) { 2630 srto->srto_initial = stcb->asoc.initial_rto; 2631 srto->srto_max = stcb->asoc.maxrto; 2632 srto->srto_min = stcb->asoc.minrto; 2633 SCTP_TCB_UNLOCK(stcb); 2634 } else { 2635 SCTP_INP_RLOCK(inp); 2636 srto->srto_initial = inp->sctp_ep.initial_rto; 2637 srto->srto_max = inp->sctp_ep.sctp_maxrto; 2638 srto->srto_min = inp->sctp_ep.sctp_minrto; 2639 SCTP_INP_RUNLOCK(inp); 2640 } 2641 *optsize = sizeof(*srto); 2642 } 2643 break; 2644 case SCTP_ASSOCINFO: 2645 { 2646 struct sctp_assocparams *sasoc; 2647 2648 SCTP_CHECK_AND_CAST(sasoc, optval, struct sctp_assocparams, *optsize); 2649 SCTP_FIND_STCB(inp, stcb, sasoc->sasoc_assoc_id); 2650 2651 if (stcb) { 2652 sasoc->sasoc_asocmaxrxt = stcb->asoc.max_send_times; 2653 sasoc->sasoc_number_peer_destinations = stcb->asoc.numnets; 2654 sasoc->sasoc_peer_rwnd = stcb->asoc.peers_rwnd; 2655 sasoc->sasoc_local_rwnd = stcb->asoc.my_rwnd; 2656 sasoc->sasoc_cookie_life = stcb->asoc.cookie_life; 2657 SCTP_TCB_UNLOCK(stcb); 2658 } else { 2659 SCTP_INP_RLOCK(inp); 2660 sasoc->sasoc_asocmaxrxt = inp->sctp_ep.max_send_times; 2661 sasoc->sasoc_number_peer_destinations = 0; 2662 sasoc->sasoc_peer_rwnd = 0; 2663 sasoc->sasoc_local_rwnd = sbspace(&inp->sctp_socket->so_rcv); 2664 sasoc->sasoc_cookie_life = inp->sctp_ep.def_cookie_life; 2665 SCTP_INP_RUNLOCK(inp); 2666 } 2667 *optsize = sizeof(*sasoc); 2668 } 2669 break; 2670 case SCTP_DEFAULT_SEND_PARAM: 2671 { 2672 struct sctp_sndrcvinfo *s_info; 2673 2674 SCTP_CHECK_AND_CAST(s_info, optval, struct sctp_sndrcvinfo, *optsize); 2675 SCTP_FIND_STCB(inp, stcb, s_info->sinfo_assoc_id); 2676 2677 if (stcb) { 2678 *s_info = stcb->asoc.def_send; 2679 SCTP_TCB_UNLOCK(stcb); 2680 } else { 2681 SCTP_INP_RLOCK(inp); 2682 *s_info = inp->def_send; 2683 SCTP_INP_RUNLOCK(inp); 2684 } 2685 *optsize = sizeof(*s_info); 2686 } 2687 break; 2688 case SCTP_INITMSG: 2689 { 2690 struct sctp_initmsg *sinit; 2691 2692 SCTP_CHECK_AND_CAST(sinit, optval, struct sctp_initmsg, *optsize); 2693 SCTP_INP_RLOCK(inp); 2694 sinit->sinit_num_ostreams = inp->sctp_ep.pre_open_stream_count; 2695 sinit->sinit_max_instreams = inp->sctp_ep.max_open_streams_intome; 2696 sinit->sinit_max_attempts = inp->sctp_ep.max_init_times; 2697 sinit->sinit_max_init_timeo = inp->sctp_ep.initial_init_rto_max; 2698 SCTP_INP_RUNLOCK(inp); 2699 *optsize = sizeof(*sinit); 2700 } 2701 break; 2702 case SCTP_PRIMARY_ADDR: 2703 /* we allow a "get" operation on this */ 2704 { 2705 struct sctp_setprim *ssp; 2706 2707 SCTP_CHECK_AND_CAST(ssp, optval, struct sctp_setprim, *optsize); 2708 SCTP_FIND_STCB(inp, stcb, ssp->ssp_assoc_id); 2709 2710 if (stcb) { 2711 /* simply copy out the sockaddr_storage... */ 2712 memcpy(&ssp->ssp_addr, &stcb->asoc.primary_destination->ro._l_addr, 2713 ((struct sockaddr *)&stcb->asoc.primary_destination->ro._l_addr)->sa_len); 2714 SCTP_TCB_UNLOCK(stcb); 2715 } else { 2716 error = EINVAL; 2717 } 2718 *optsize = sizeof(*ssp); 2719 } 2720 break; 2721 2722 case SCTP_HMAC_IDENT: 2723 { 2724 struct sctp_hmacalgo *shmac; 2725 sctp_hmaclist_t *hmaclist; 2726 uint32_t size; 2727 int i; 2728 2729 SCTP_CHECK_AND_CAST(shmac, optval, struct sctp_hmacalgo, *optsize); 2730 2731 SCTP_INP_RLOCK(inp); 2732 hmaclist = inp->sctp_ep.local_hmacs; 2733 if (hmaclist == NULL) { 2734 /* no HMACs to return */ 2735 *optsize = sizeof(*shmac); 2736 break; 2737 } 2738 /* is there room for all of the hmac ids? */ 2739 size = sizeof(*shmac) + (hmaclist->num_algo * 2740 sizeof(shmac->shmac_idents[0])); 2741 if ((size_t)(*optsize) < size) { 2742 error = EINVAL; 2743 SCTP_INP_RUNLOCK(inp); 2744 break; 2745 } 2746 /* copy in the list */ 2747 for (i = 0; i < hmaclist->num_algo; i++) 2748 shmac->shmac_idents[i] = hmaclist->hmac[i]; 2749 SCTP_INP_RUNLOCK(inp); 2750 *optsize = size; 2751 break; 2752 } 2753 case SCTP_AUTH_ACTIVE_KEY: 2754 { 2755 struct sctp_authkeyid *scact; 2756 2757 SCTP_CHECK_AND_CAST(scact, optval, struct sctp_authkeyid, *optsize); 2758 SCTP_FIND_STCB(inp, stcb, scact->scact_assoc_id); 2759 2760 if (stcb) { 2761 /* get the active key on the assoc */ 2762 scact->scact_keynumber = stcb->asoc.authinfo.assoc_keyid; 2763 SCTP_TCB_UNLOCK(stcb); 2764 } else { 2765 /* get the endpoint active key */ 2766 SCTP_INP_RLOCK(inp); 2767 scact->scact_keynumber = inp->sctp_ep.default_keyid; 2768 SCTP_INP_RUNLOCK(inp); 2769 } 2770 *optsize = sizeof(*scact); 2771 break; 2772 } 2773 case SCTP_LOCAL_AUTH_CHUNKS: 2774 { 2775 struct sctp_authchunks *sac; 2776 sctp_auth_chklist_t *chklist = NULL; 2777 size_t size = 0; 2778 2779 SCTP_CHECK_AND_CAST(sac, optval, struct sctp_authchunks, *optsize); 2780 SCTP_FIND_STCB(inp, stcb, sac->gauth_assoc_id); 2781 2782 if (stcb) { 2783 /* get off the assoc */ 2784 chklist = stcb->asoc.local_auth_chunks; 2785 /* is there enough space? */ 2786 size = sctp_auth_get_chklist_size(chklist); 2787 if (*optsize < (sizeof(struct sctp_authchunks) + size)) { 2788 error = EINVAL; 2789 } else { 2790 /* copy in the chunks */ 2791 sctp_serialize_auth_chunks(chklist, sac->gauth_chunks); 2792 } 2793 SCTP_TCB_UNLOCK(stcb); 2794 } else { 2795 /* get off the endpoint */ 2796 SCTP_INP_RLOCK(inp); 2797 chklist = inp->sctp_ep.local_auth_chunks; 2798 /* is there enough space? */ 2799 size = sctp_auth_get_chklist_size(chklist); 2800 if (*optsize < (sizeof(struct sctp_authchunks) + size)) { 2801 error = EINVAL; 2802 } else { 2803 /* copy in the chunks */ 2804 sctp_serialize_auth_chunks(chklist, sac->gauth_chunks); 2805 } 2806 SCTP_INP_RUNLOCK(inp); 2807 } 2808 *optsize = sizeof(struct sctp_authchunks) + size; 2809 break; 2810 } 2811 case SCTP_PEER_AUTH_CHUNKS: 2812 { 2813 struct sctp_authchunks *sac; 2814 sctp_auth_chklist_t *chklist = NULL; 2815 size_t size = 0; 2816 2817 SCTP_CHECK_AND_CAST(sac, optval, struct sctp_authchunks, *optsize); 2818 SCTP_FIND_STCB(inp, stcb, sac->gauth_assoc_id); 2819 2820 if (stcb) { 2821 /* get off the assoc */ 2822 chklist = stcb->asoc.peer_auth_chunks; 2823 /* is there enough space? */ 2824 size = sctp_auth_get_chklist_size(chklist); 2825 if (*optsize < (sizeof(struct sctp_authchunks) + size)) { 2826 error = EINVAL; 2827 } else { 2828 /* copy in the chunks */ 2829 sctp_serialize_auth_chunks(chklist, sac->gauth_chunks); 2830 } 2831 SCTP_TCB_UNLOCK(stcb); 2832 } else { 2833 error = ENOENT; 2834 } 2835 *optsize = sizeof(struct sctp_authchunks) + size; 2836 break; 2837 } 2838 2839 2840 default: 2841 error = ENOPROTOOPT; 2842 *optsize = 0; 2843 break; 2844 } /* end switch (sopt->sopt_name) */ 2845 return (error); 2846 } 2847 2848 2849 static int 2850 sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize, 2851 void *p) 2852 { 2853 int error, set_opt; 2854 uint32_t *mopt; 2855 struct sctp_tcb *stcb = NULL; 2856 struct sctp_inpcb *inp; 2857 2858 if (optval == NULL) { 2859 return (EINVAL); 2860 } 2861 inp = (struct sctp_inpcb *)so->so_pcb; 2862 if (inp == 0) 2863 return EINVAL; 2864 2865 error = 0; 2866 switch (optname) { 2867 case SCTP_NODELAY: 2868 case SCTP_AUTOCLOSE: 2869 case SCTP_AUTO_ASCONF: 2870 case SCTP_EXPLICIT_EOR: 2871 case SCTP_DISABLE_FRAGMENTS: 2872 case SCTP_USE_EXT_RCVINFO: 2873 case SCTP_I_WANT_MAPPED_V4_ADDR: 2874 /* copy in the option value */ 2875 SCTP_CHECK_AND_CAST(mopt, optval, uint32_t, optsize); 2876 set_opt = 0; 2877 if (error) 2878 break; 2879 switch (optname) { 2880 case SCTP_DISABLE_FRAGMENTS: 2881 set_opt = SCTP_PCB_FLAGS_NO_FRAGMENT; 2882 break; 2883 case SCTP_AUTO_ASCONF: 2884 set_opt = SCTP_PCB_FLAGS_AUTO_ASCONF; 2885 break; 2886 case SCTP_EXPLICIT_EOR: 2887 set_opt = SCTP_PCB_FLAGS_EXPLICIT_EOR; 2888 break; 2889 case SCTP_USE_EXT_RCVINFO: 2890 set_opt = SCTP_PCB_FLAGS_EXT_RCVINFO; 2891 break; 2892 case SCTP_I_WANT_MAPPED_V4_ADDR: 2893 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 2894 set_opt = SCTP_PCB_FLAGS_NEEDS_MAPPED_V4; 2895 } else { 2896 return (EINVAL); 2897 } 2898 break; 2899 case SCTP_NODELAY: 2900 set_opt = SCTP_PCB_FLAGS_NODELAY; 2901 break; 2902 case SCTP_AUTOCLOSE: 2903 set_opt = SCTP_PCB_FLAGS_AUTOCLOSE; 2904 /* 2905 * The value is in ticks. Note this does not effect 2906 * old associations, only new ones. 2907 */ 2908 inp->sctp_ep.auto_close_time = SEC_TO_TICKS(*mopt); 2909 break; 2910 } 2911 SCTP_INP_WLOCK(inp); 2912 if (*mopt != 0) { 2913 sctp_feature_on(inp, set_opt); 2914 } else { 2915 sctp_feature_off(inp, set_opt); 2916 } 2917 SCTP_INP_WUNLOCK(inp); 2918 break; 2919 case SCTP_PARTIAL_DELIVERY_POINT: 2920 { 2921 uint32_t *value; 2922 2923 SCTP_CHECK_AND_CAST(value, optval, uint32_t, optsize); 2924 inp->partial_delivery_point = *value; 2925 } 2926 break; 2927 case SCTP_FRAGMENT_INTERLEAVE: 2928 /* not yet until we re-write sctp_recvmsg() */ 2929 { 2930 uint32_t *on_off; 2931 2932 SCTP_CHECK_AND_CAST(on_off, optval, uint32_t, optsize); 2933 if (*on_off) { 2934 sctp_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE); 2935 } else { 2936 sctp_feature_off(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE); 2937 } 2938 } 2939 break; 2940 case SCTP_CMT_ON_OFF: 2941 { 2942 struct sctp_assoc_value *av; 2943 2944 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize); 2945 if (sctp_cmt_on_off) { 2946 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 2947 if (stcb) { 2948 stcb->asoc.sctp_cmt_on_off = (uint8_t) av->assoc_value; 2949 SCTP_TCB_UNLOCK(stcb); 2950 } else { 2951 error = ENOTCONN; 2952 } 2953 } else { 2954 error = ENOPROTOOPT; 2955 } 2956 } 2957 break; 2958 case SCTP_CLR_STAT_LOG: 2959 #ifdef SCTP_STAT_LOGGING 2960 sctp_clr_stat_log(); 2961 #else 2962 error = EOPNOTSUPP; 2963 #endif 2964 break; 2965 case SCTP_CONTEXT: 2966 { 2967 struct sctp_assoc_value *av; 2968 2969 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize); 2970 SCTP_FIND_STCB(inp, stcb, av->assoc_id); 2971 2972 if (stcb) { 2973 stcb->asoc.context = av->assoc_value; 2974 SCTP_TCB_UNLOCK(stcb); 2975 } else { 2976 SCTP_INP_WLOCK(inp); 2977 inp->sctp_context = av->assoc_value; 2978 SCTP_INP_WUNLOCK(inp); 2979 } 2980 } 2981 break; 2982 case SCTP_DELAYED_ACK_TIME: 2983 { 2984 struct sctp_assoc_value *tm; 2985 2986 SCTP_CHECK_AND_CAST(tm, optval, struct sctp_assoc_value, optsize); 2987 SCTP_FIND_STCB(inp, stcb, tm->assoc_id); 2988 2989 if (stcb) { 2990 stcb->asoc.delayed_ack = tm->assoc_value; 2991 SCTP_TCB_UNLOCK(stcb); 2992 } else { 2993 SCTP_INP_WLOCK(inp); 2994 inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV] = MSEC_TO_TICKS(tm->assoc_value); 2995 SCTP_INP_WUNLOCK(inp); 2996 } 2997 break; 2998 } 2999 case SCTP_AUTH_CHUNK: 3000 { 3001 struct sctp_authchunk *sauth; 3002 3003 SCTP_CHECK_AND_CAST(sauth, optval, struct sctp_authchunk, optsize); 3004 3005 SCTP_INP_WLOCK(inp); 3006 if (sctp_auth_add_chunk(sauth->sauth_chunk, inp->sctp_ep.local_auth_chunks)) 3007 error = EINVAL; 3008 SCTP_INP_WUNLOCK(inp); 3009 break; 3010 } 3011 case SCTP_AUTH_KEY: 3012 { 3013 struct sctp_authkey *sca; 3014 struct sctp_keyhead *shared_keys; 3015 sctp_sharedkey_t *shared_key; 3016 sctp_key_t *key = NULL; 3017 size_t size; 3018 3019 SCTP_CHECK_AND_CAST(sca, optval, struct sctp_authkey, optsize); 3020 SCTP_FIND_STCB(inp, stcb, sca->sca_assoc_id) 3021 size = optsize - sizeof(*sca); 3022 3023 if (stcb) { 3024 /* set it on the assoc */ 3025 shared_keys = &stcb->asoc.shared_keys; 3026 /* clear the cached keys for this key id */ 3027 sctp_clear_cachedkeys(stcb, sca->sca_keynumber); 3028 /* 3029 * create the new shared key and 3030 * insert/replace it 3031 */ 3032 if (size > 0) { 3033 key = sctp_set_key(sca->sca_key, (uint32_t) size); 3034 if (key == NULL) { 3035 error = ENOMEM; 3036 SCTP_TCB_UNLOCK(stcb); 3037 break; 3038 } 3039 } 3040 shared_key = sctp_alloc_sharedkey(); 3041 if (shared_key == NULL) { 3042 sctp_free_key(key); 3043 error = ENOMEM; 3044 SCTP_TCB_UNLOCK(stcb); 3045 break; 3046 } 3047 shared_key->key = key; 3048 shared_key->keyid = sca->sca_keynumber; 3049 sctp_insert_sharedkey(shared_keys, shared_key); 3050 SCTP_TCB_UNLOCK(stcb); 3051 } else { 3052 /* set it on the endpoint */ 3053 SCTP_INP_WLOCK(inp); 3054 shared_keys = &inp->sctp_ep.shared_keys; 3055 /* 3056 * clear the cached keys on all assocs for 3057 * this key id 3058 */ 3059 sctp_clear_cachedkeys_ep(inp, sca->sca_keynumber); 3060 /* 3061 * create the new shared key and 3062 * insert/replace it 3063 */ 3064 if (size > 0) { 3065 key = sctp_set_key(sca->sca_key, (uint32_t) size); 3066 if (key == NULL) { 3067 error = ENOMEM; 3068 SCTP_INP_WUNLOCK(inp); 3069 break; 3070 } 3071 } 3072 shared_key = sctp_alloc_sharedkey(); 3073 if (shared_key == NULL) { 3074 sctp_free_key(key); 3075 error = ENOMEM; 3076 SCTP_INP_WUNLOCK(inp); 3077 break; 3078 } 3079 shared_key->key = key; 3080 shared_key->keyid = sca->sca_keynumber; 3081 sctp_insert_sharedkey(shared_keys, shared_key); 3082 SCTP_INP_WUNLOCK(inp); 3083 } 3084 break; 3085 } 3086 case SCTP_HMAC_IDENT: 3087 { 3088 struct sctp_hmacalgo *shmac; 3089 sctp_hmaclist_t *hmaclist; 3090 uint32_t hmacid; 3091 size_t size, i; 3092 3093 SCTP_CHECK_AND_CAST(shmac, optval, struct sctp_hmacalgo, optsize); 3094 size = (optsize - sizeof(*shmac)) / sizeof(shmac->shmac_idents[0]); 3095 hmaclist = sctp_alloc_hmaclist(size); 3096 if (hmaclist == NULL) { 3097 error = ENOMEM; 3098 break; 3099 } 3100 for (i = 0; i < size; i++) { 3101 hmacid = shmac->shmac_idents[i]; 3102 if (sctp_auth_add_hmacid(hmaclist, (uint16_t) hmacid)) { 3103 /* invalid HMACs were found */ ; 3104 error = EINVAL; 3105 sctp_free_hmaclist(hmaclist); 3106 goto sctp_set_hmac_done; 3107 } 3108 } 3109 /* set it on the endpoint */ 3110 SCTP_INP_WLOCK(inp); 3111 if (inp->sctp_ep.local_hmacs) 3112 sctp_free_hmaclist(inp->sctp_ep.local_hmacs); 3113 inp->sctp_ep.local_hmacs = hmaclist; 3114 SCTP_INP_WUNLOCK(inp); 3115 sctp_set_hmac_done: 3116 break; 3117 } 3118 case SCTP_AUTH_ACTIVE_KEY: 3119 { 3120 struct sctp_authkeyid *scact; 3121 3122 SCTP_CHECK_AND_CAST(scact, optval, struct sctp_authkeyid, optsize); 3123 SCTP_FIND_STCB(inp, stcb, scact->scact_assoc_id); 3124 3125 /* set the active key on the right place */ 3126 if (stcb) { 3127 /* set the active key on the assoc */ 3128 if (sctp_auth_setactivekey(stcb, scact->scact_keynumber)) 3129 error = EINVAL; 3130 SCTP_TCB_UNLOCK(stcb); 3131 } else { 3132 /* set the active key on the endpoint */ 3133 SCTP_INP_WLOCK(inp); 3134 if (sctp_auth_setactivekey_ep(inp, scact->scact_keynumber)) 3135 error = EINVAL; 3136 SCTP_INP_WUNLOCK(inp); 3137 } 3138 break; 3139 } 3140 case SCTP_AUTH_DELETE_KEY: 3141 { 3142 struct sctp_authkeyid *scdel; 3143 3144 SCTP_CHECK_AND_CAST(scdel, optval, struct sctp_authkeyid, optsize); 3145 SCTP_FIND_STCB(inp, stcb, scdel->scact_assoc_id); 3146 3147 /* delete the key from the right place */ 3148 if (stcb) { 3149 if (sctp_delete_sharedkey(stcb, scdel->scact_keynumber)) 3150 error = EINVAL; 3151 SCTP_TCB_UNLOCK(stcb); 3152 } else { 3153 SCTP_INP_WLOCK(inp); 3154 if (sctp_delete_sharedkey_ep(inp, scdel->scact_keynumber)) 3155 error = EINVAL; 3156 SCTP_INP_WUNLOCK(inp); 3157 } 3158 break; 3159 } 3160 3161 case SCTP_RESET_STREAMS: 3162 { 3163 struct sctp_stream_reset *strrst; 3164 uint8_t send_in = 0, send_tsn = 0, send_out = 0; 3165 int i; 3166 3167 SCTP_CHECK_AND_CAST(strrst, optval, struct sctp_stream_reset, optsize); 3168 SCTP_FIND_STCB(inp, stcb, strrst->strrst_assoc_id); 3169 3170 if (stcb == NULL) { 3171 error = ENOENT; 3172 break; 3173 } 3174 if (stcb->asoc.peer_supports_strreset == 0) { 3175 /* 3176 * Peer does not support it, we return 3177 * protocol not supported since this is true 3178 * for this feature and this peer, not the 3179 * socket request in general. 3180 */ 3181 error = EPROTONOSUPPORT; 3182 SCTP_TCB_UNLOCK(stcb); 3183 break; 3184 } 3185 if (stcb->asoc.stream_reset_outstanding) { 3186 error = EALREADY; 3187 SCTP_TCB_UNLOCK(stcb); 3188 break; 3189 } 3190 if (strrst->strrst_flags == SCTP_RESET_LOCAL_RECV) { 3191 send_in = 1; 3192 } else if (strrst->strrst_flags == SCTP_RESET_LOCAL_SEND) { 3193 send_out = 1; 3194 } else if (strrst->strrst_flags == SCTP_RESET_BOTH) { 3195 send_in = 1; 3196 send_out = 1; 3197 } else if (strrst->strrst_flags == SCTP_RESET_TSN) { 3198 send_tsn = 1; 3199 } else { 3200 error = EINVAL; 3201 SCTP_TCB_UNLOCK(stcb); 3202 break; 3203 } 3204 for (i = 0; i < strrst->strrst_num_streams; i++) { 3205 if ((send_in) && 3206 3207 (strrst->strrst_list[i] > stcb->asoc.streamincnt)) { 3208 error = EINVAL; 3209 goto get_out; 3210 } 3211 if ((send_out) && 3212 (strrst->strrst_list[i] > stcb->asoc.streamoutcnt)) { 3213 error = EINVAL; 3214 goto get_out; 3215 } 3216 } 3217 if (error) { 3218 get_out: 3219 SCTP_TCB_UNLOCK(stcb); 3220 break; 3221 } 3222 error = sctp_send_str_reset_req(stcb, strrst->strrst_num_streams, 3223 strrst->strrst_list, 3224 send_out, (stcb->asoc.str_reset_seq_in - 3), 3225 send_in, send_tsn); 3226 3227 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_REQ); 3228 SCTP_TCB_UNLOCK(stcb); 3229 } 3230 break; 3231 3232 case SCTP_CONNECT_X: 3233 if (optsize < (sizeof(int) + sizeof(struct sockaddr_in))) { 3234 error = EINVAL; 3235 break; 3236 } 3237 error = sctp_do_connect_x(so, inp, optval, optsize, p, 0); 3238 break; 3239 3240 case SCTP_CONNECT_X_DELAYED: 3241 if (optsize < (sizeof(int) + sizeof(struct sockaddr_in))) { 3242 error = EINVAL; 3243 break; 3244 } 3245 error = sctp_do_connect_x(so, inp, optval, optsize, p, 1); 3246 break; 3247 3248 case SCTP_CONNECT_X_COMPLETE: 3249 { 3250 struct sockaddr *sa; 3251 struct sctp_nets *net; 3252 3253 /* FIXME MT: check correct? */ 3254 SCTP_CHECK_AND_CAST(sa, optval, struct sockaddr, optsize); 3255 3256 /* find tcb */ 3257 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { 3258 SCTP_INP_RLOCK(inp); 3259 stcb = LIST_FIRST(&inp->sctp_asoc_list); 3260 if (stcb) { 3261 SCTP_TCB_LOCK(stcb); 3262 net = sctp_findnet(stcb, sa); 3263 } 3264 SCTP_INP_RUNLOCK(inp); 3265 } else { 3266 /* 3267 * We increment here since 3268 * sctp_findassociation_ep_addr() wil do a 3269 * decrement if it finds the stcb as long as 3270 * the locked tcb (last argument) is NOT a 3271 * TCB.. aka NULL. 3272 */ 3273 SCTP_INP_INCR_REF(inp); 3274 stcb = sctp_findassociation_ep_addr(&inp, sa, &net, NULL, NULL); 3275 if (stcb == NULL) { 3276 SCTP_INP_DECR_REF(inp); 3277 } 3278 } 3279 3280 if (stcb == NULL) { 3281 error = ENOENT; 3282 break; 3283 } 3284 if (stcb->asoc.delayed_connection == 1) { 3285 stcb->asoc.delayed_connection = 0; 3286 SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered); 3287 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, 3288 stcb->asoc.primary_destination, 3289 SCTP_FROM_SCTP_USRREQ + SCTP_LOC_9); 3290 sctp_send_initiate(inp, stcb); 3291 } else { 3292 /* 3293 * already expired or did not use delayed 3294 * connectx 3295 */ 3296 error = EALREADY; 3297 } 3298 SCTP_TCB_UNLOCK(stcb); 3299 } 3300 break; 3301 case SCTP_MAXBURST: 3302 { 3303 uint8_t *burst; 3304 3305 SCTP_CHECK_AND_CAST(burst, optval, uint8_t, optsize); 3306 3307 SCTP_INP_WLOCK(inp); 3308 if (*burst) { 3309 inp->sctp_ep.max_burst = *burst; 3310 } 3311 SCTP_INP_WUNLOCK(inp); 3312 } 3313 break; 3314 case SCTP_MAXSEG: 3315 { 3316 uint32_t *segsize; 3317 int ovh; 3318 3319 SCTP_CHECK_AND_CAST(segsize, optval, uint32_t, optsize); 3320 3321 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 3322 ovh = SCTP_MED_OVERHEAD; 3323 } else { 3324 ovh = SCTP_MED_V4_OVERHEAD; 3325 } 3326 SCTP_INP_WLOCK(inp); 3327 /* FIXME MT: Why is this not allowed? */ 3328 if (*segsize) { 3329 inp->sctp_frag_point = (*segsize + ovh); 3330 } else { 3331 error = EINVAL; 3332 } 3333 SCTP_INP_WUNLOCK(inp); 3334 } 3335 break; 3336 case SCTP_EVENTS: 3337 { 3338 struct sctp_event_subscribe *events; 3339 3340 SCTP_CHECK_AND_CAST(events, optval, struct sctp_event_subscribe, optsize); 3341 3342 SCTP_INP_WLOCK(inp); 3343 if (events->sctp_data_io_event) { 3344 sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT); 3345 } else { 3346 sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT); 3347 } 3348 3349 if (events->sctp_association_event) { 3350 sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVASSOCEVNT); 3351 } else { 3352 sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVASSOCEVNT); 3353 } 3354 3355 if (events->sctp_address_event) { 3356 sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVPADDREVNT); 3357 } else { 3358 sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVPADDREVNT); 3359 } 3360 3361 if (events->sctp_send_failure_event) { 3362 sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVSENDFAILEVNT); 3363 } else { 3364 sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVSENDFAILEVNT); 3365 } 3366 3367 if (events->sctp_peer_error_event) { 3368 sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVPEERERR); 3369 } else { 3370 sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVPEERERR); 3371 } 3372 3373 if (events->sctp_shutdown_event) { 3374 sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT); 3375 } else { 3376 sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT); 3377 } 3378 3379 if (events->sctp_partial_delivery_event) { 3380 sctp_feature_on(inp, SCTP_PCB_FLAGS_PDAPIEVNT); 3381 } else { 3382 sctp_feature_off(inp, SCTP_PCB_FLAGS_PDAPIEVNT); 3383 } 3384 3385 if (events->sctp_adaptation_layer_event) { 3386 sctp_feature_on(inp, SCTP_PCB_FLAGS_ADAPTATIONEVNT); 3387 } else { 3388 sctp_feature_off(inp, SCTP_PCB_FLAGS_ADAPTATIONEVNT); 3389 } 3390 3391 if (events->sctp_authentication_event) { 3392 sctp_feature_on(inp, SCTP_PCB_FLAGS_AUTHEVNT); 3393 } else { 3394 sctp_feature_off(inp, SCTP_PCB_FLAGS_AUTHEVNT); 3395 } 3396 3397 if (events->sctp_stream_reset_events) { 3398 sctp_feature_on(inp, SCTP_PCB_FLAGS_STREAM_RESETEVNT); 3399 } else { 3400 sctp_feature_off(inp, SCTP_PCB_FLAGS_STREAM_RESETEVNT); 3401 } 3402 SCTP_INP_WUNLOCK(inp); 3403 } 3404 break; 3405 3406 case SCTP_ADAPTATION_LAYER: 3407 { 3408 struct sctp_setadaptation *adap_bits; 3409 3410 SCTP_CHECK_AND_CAST(adap_bits, optval, struct sctp_setadaptation, optsize); 3411 SCTP_INP_WLOCK(inp); 3412 inp->sctp_ep.adaptation_layer_indicator = adap_bits->ssb_adaptation_ind; 3413 SCTP_INP_WUNLOCK(inp); 3414 } 3415 break; 3416 #ifdef SCTP_DEBUG 3417 case SCTP_SET_INITIAL_DBG_SEQ: 3418 { 3419 uint32_t *vvv; 3420 3421 SCTP_CHECK_AND_CAST(vvv, optval, uint32_t, optsize); 3422 SCTP_INP_WLOCK(inp); 3423 inp->sctp_ep.initial_sequence_debug = *vvv; 3424 SCTP_INP_WUNLOCK(inp); 3425 } 3426 break; 3427 #endif 3428 case SCTP_DEFAULT_SEND_PARAM: 3429 { 3430 struct sctp_sndrcvinfo *s_info; 3431 3432 SCTP_CHECK_AND_CAST(s_info, optval, struct sctp_sndrcvinfo, optsize); 3433 SCTP_FIND_STCB(inp, stcb, s_info->sinfo_assoc_id); 3434 3435 if (stcb) { 3436 if (s_info->sinfo_stream <= stcb->asoc.streamoutcnt) { 3437 stcb->asoc.def_send = *s_info; 3438 } else { 3439 error = EINVAL; 3440 } 3441 SCTP_TCB_UNLOCK(stcb); 3442 } else { 3443 SCTP_INP_WLOCK(inp); 3444 inp->def_send = *s_info; 3445 SCTP_INP_WUNLOCK(inp); 3446 } 3447 } 3448 break; 3449 case SCTP_PEER_ADDR_PARAMS: 3450 /* Applys to the specific association */ 3451 { 3452 struct sctp_paddrparams *paddrp; 3453 struct sctp_nets *net; 3454 3455 SCTP_CHECK_AND_CAST(paddrp, optval, struct sctp_paddrparams, optsize); 3456 SCTP_FIND_STCB(inp, stcb, paddrp->spp_assoc_id); 3457 net = NULL; 3458 if (stcb) { 3459 net = sctp_findnet(stcb, (struct sockaddr *)&paddrp->spp_address); 3460 } else { 3461 /* 3462 * We increment here since 3463 * sctp_findassociation_ep_addr() wil do a 3464 * decrement if it finds the stcb as long as 3465 * the locked tcb (last argument) is NOT a 3466 * TCB.. aka NULL. 3467 */ 3468 SCTP_INP_INCR_REF(inp); 3469 stcb = sctp_findassociation_ep_addr(&inp, 3470 (struct sockaddr *)&paddrp->spp_address, 3471 &net, NULL, NULL); 3472 if (stcb == NULL) { 3473 SCTP_INP_DECR_REF(inp); 3474 } 3475 } 3476 3477 3478 if (stcb) { 3479 /************************TCB SPECIFIC SET ******************/ 3480 /* sack delay first */ 3481 if (paddrp->spp_flags & SPP_SACKDELAY_ENABLE) { 3482 /* 3483 * we do NOT support turning it off 3484 * (yet). only setting the delay. 3485 */ 3486 if (paddrp->spp_sackdelay >= SCTP_CLOCK_GRANULARITY) 3487 stcb->asoc.delayed_ack = paddrp->spp_sackdelay; 3488 else 3489 stcb->asoc.delayed_ack = SCTP_CLOCK_GRANULARITY; 3490 3491 } else if (paddrp->spp_flags & SPP_SACKDELAY_DISABLE) { 3492 stcb->asoc.delayed_ack = 0; 3493 } 3494 /* 3495 * do we change the timer for HB, we run 3496 * only one? 3497 */ 3498 if (paddrp->spp_hbinterval) 3499 stcb->asoc.heart_beat_delay = paddrp->spp_hbinterval; 3500 else if (paddrp->spp_flags & SPP_HB_TIME_IS_ZERO) 3501 stcb->asoc.heart_beat_delay = 0; 3502 3503 /* network sets ? */ 3504 if (net) { 3505 /************************NET SPECIFIC SET ******************/ 3506 if (paddrp->spp_flags & SPP_HB_DEMAND) { 3507 /* on demand HB */ 3508 sctp_send_hb(stcb, 1, net); 3509 } 3510 if (paddrp->spp_flags & SPP_HB_DISABLE) { 3511 net->dest_state |= SCTP_ADDR_NOHB; 3512 } 3513 if (paddrp->spp_flags & SPP_HB_ENABLE) { 3514 net->dest_state &= ~SCTP_ADDR_NOHB; 3515 } 3516 if (paddrp->spp_flags & SPP_PMTUD_DISABLE) { 3517 if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) { 3518 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net, 3519 SCTP_FROM_SCTP_USRREQ + SCTP_LOC_10); 3520 } 3521 if (paddrp->spp_pathmtu > SCTP_DEFAULT_MINSEGMENT) { 3522 net->mtu = paddrp->spp_pathmtu; 3523 if (net->mtu < stcb->asoc.smallest_mtu) 3524 sctp_pathmtu_adustment(inp, stcb, net, net->mtu); 3525 } 3526 } 3527 if (paddrp->spp_flags & SPP_PMTUD_ENABLE) { 3528 if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) { 3529 sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net); 3530 } 3531 } 3532 if (paddrp->spp_pathmaxrxt) 3533 net->failure_threshold = paddrp->spp_pathmaxrxt; 3534 #ifdef AF_INET 3535 if (paddrp->spp_flags & SPP_IPV4_TOS) { 3536 if (net->ro._l_addr.sin.sin_family == AF_INET) { 3537 net->tos_flowlabel = paddrp->spp_ipv4_tos & 0x000000fc; 3538 } 3539 } 3540 #endif 3541 #ifdef AF_INET6 3542 if (paddrp->spp_flags & SPP_IPV6_FLOWLABEL) { 3543 if (net->ro._l_addr.sin6.sin6_family == AF_INET6) { 3544 net->tos_flowlabel = paddrp->spp_ipv6_flowlabel; 3545 } 3546 } 3547 #endif 3548 } else { 3549 /************************ASSOC ONLY -- NO NET SPECIFIC SET ******************/ 3550 if (paddrp->spp_pathmaxrxt) 3551 stcb->asoc.def_net_failure = paddrp->spp_pathmaxrxt; 3552 3553 if (paddrp->spp_flags & SPP_HB_ENABLE) { 3554 /* Turn back on the timer */ 3555 stcb->asoc.hb_is_disabled = 0; 3556 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net); 3557 } 3558 if (paddrp->spp_flags & SPP_HB_DISABLE) { 3559 int cnt_of_unconf = 0; 3560 struct sctp_nets *lnet; 3561 3562 stcb->asoc.hb_is_disabled = 1; 3563 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 3564 if (lnet->dest_state & SCTP_ADDR_UNCONFIRMED) { 3565 cnt_of_unconf++; 3566 } 3567 } 3568 /* 3569 * stop the timer ONLY if we 3570 * have no unconfirmed 3571 * addresses 3572 */ 3573 if (cnt_of_unconf == 0) { 3574 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_11); 3575 } 3576 } 3577 if (paddrp->spp_flags & SPP_HB_ENABLE) { 3578 /* start up the timer. */ 3579 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net); 3580 } 3581 #ifdef AF_INET 3582 if (paddrp->spp_flags & SPP_IPV4_TOS) 3583 stcb->asoc.default_tos = paddrp->spp_ipv4_tos & 0x000000fc; 3584 #endif 3585 #ifdef AF_INET6 3586 if (paddrp->spp_flags & SPP_IPV6_FLOWLABEL) 3587 stcb->asoc.default_flowlabel = paddrp->spp_ipv6_flowlabel; 3588 #endif 3589 3590 } 3591 SCTP_TCB_UNLOCK(stcb); 3592 } else { 3593 /************************NO TCB, SET TO default stuff ******************/ 3594 SCTP_INP_WLOCK(inp); 3595 /* 3596 * For the TOS/FLOWLABEL stuff you set it 3597 * with the options on the socket 3598 */ 3599 if (paddrp->spp_pathmaxrxt) { 3600 inp->sctp_ep.def_net_failure = paddrp->spp_pathmaxrxt; 3601 } 3602 if (paddrp->spp_flags & SPP_HB_ENABLE) { 3603 inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT] = MSEC_TO_TICKS(paddrp->spp_hbinterval); 3604 sctp_feature_off(inp, SCTP_PCB_FLAGS_DONOT_HEARTBEAT); 3605 } else if (paddrp->spp_flags & SPP_HB_DISABLE) { 3606 sctp_feature_on(inp, SCTP_PCB_FLAGS_DONOT_HEARTBEAT); 3607 } 3608 if (paddrp->spp_flags & SPP_SACKDELAY_ENABLE) { 3609 if (paddrp->spp_sackdelay > SCTP_CLOCK_GRANULARITY) 3610 inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV] = MSEC_TO_TICKS(paddrp->spp_sackdelay); 3611 else 3612 inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV] = MSEC_TO_TICKS(SCTP_CLOCK_GRANULARITY); 3613 3614 } else if (paddrp->spp_flags & SPP_SACKDELAY_DISABLE) { 3615 inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV] = 0; 3616 } 3617 SCTP_INP_WUNLOCK(inp); 3618 } 3619 } 3620 break; 3621 case SCTP_RTOINFO: 3622 { 3623 struct sctp_rtoinfo *srto; 3624 3625 SCTP_CHECK_AND_CAST(srto, optval, struct sctp_rtoinfo, optsize); 3626 SCTP_FIND_STCB(inp, stcb, srto->srto_assoc_id); 3627 3628 if (stcb) { 3629 /* Set in ms we hope :-) */ 3630 if (srto->srto_initial > 10) 3631 stcb->asoc.initial_rto = srto->srto_initial; 3632 if (srto->srto_max > 10) 3633 stcb->asoc.maxrto = srto->srto_max; 3634 if (srto->srto_min > 10) 3635 stcb->asoc.minrto = srto->srto_min; 3636 SCTP_TCB_UNLOCK(stcb); 3637 } else { 3638 SCTP_INP_WLOCK(inp); 3639 /* 3640 * If we have a null asoc, its default for 3641 * the endpoint 3642 */ 3643 if (srto->srto_initial > 10) 3644 inp->sctp_ep.initial_rto = srto->srto_initial; 3645 if (srto->srto_max > 10) 3646 inp->sctp_ep.sctp_maxrto = srto->srto_max; 3647 if (srto->srto_min > 10) 3648 inp->sctp_ep.sctp_minrto = srto->srto_min; 3649 SCTP_INP_WUNLOCK(inp); 3650 } 3651 } 3652 break; 3653 case SCTP_ASSOCINFO: 3654 { 3655 struct sctp_assocparams *sasoc; 3656 3657 SCTP_CHECK_AND_CAST(sasoc, optval, struct sctp_assocparams, optsize); 3658 SCTP_FIND_STCB(inp, stcb, sasoc->sasoc_assoc_id); 3659 3660 if (stcb) { 3661 if (sasoc->sasoc_asocmaxrxt) 3662 stcb->asoc.max_send_times = sasoc->sasoc_asocmaxrxt; 3663 sasoc->sasoc_number_peer_destinations = stcb->asoc.numnets; 3664 sasoc->sasoc_peer_rwnd = 0; 3665 sasoc->sasoc_local_rwnd = 0; 3666 if (stcb->asoc.cookie_life) 3667 stcb->asoc.cookie_life = sasoc->sasoc_cookie_life; 3668 SCTP_TCB_UNLOCK(stcb); 3669 } else { 3670 SCTP_INP_WLOCK(inp); 3671 if (sasoc->sasoc_asocmaxrxt) 3672 inp->sctp_ep.max_send_times = sasoc->sasoc_asocmaxrxt; 3673 sasoc->sasoc_number_peer_destinations = 0; 3674 sasoc->sasoc_peer_rwnd = 0; 3675 sasoc->sasoc_local_rwnd = 0; 3676 if (sasoc->sasoc_cookie_life) 3677 inp->sctp_ep.def_cookie_life = sasoc->sasoc_cookie_life; 3678 SCTP_INP_WUNLOCK(inp); 3679 } 3680 } 3681 break; 3682 case SCTP_INITMSG: 3683 { 3684 struct sctp_initmsg *sinit; 3685 3686 SCTP_CHECK_AND_CAST(sinit, optval, struct sctp_initmsg, optsize); 3687 SCTP_INP_WLOCK(inp); 3688 if (sinit->sinit_num_ostreams) 3689 inp->sctp_ep.pre_open_stream_count = sinit->sinit_num_ostreams; 3690 3691 if (sinit->sinit_max_instreams) 3692 inp->sctp_ep.max_open_streams_intome = sinit->sinit_max_instreams; 3693 3694 if (sinit->sinit_max_attempts) 3695 inp->sctp_ep.max_init_times = sinit->sinit_max_attempts; 3696 3697 if (sinit->sinit_max_init_timeo > 10) 3698 /* 3699 * We must be at least a 100ms (we set in 3700 * ticks) 3701 */ 3702 /* FIXME MT: What is this? */ 3703 inp->sctp_ep.initial_init_rto_max = sinit->sinit_max_init_timeo; 3704 SCTP_INP_WUNLOCK(inp); 3705 } 3706 break; 3707 case SCTP_PRIMARY_ADDR: 3708 { 3709 struct sctp_setprim *spa; 3710 struct sctp_nets *net, *lnet; 3711 3712 SCTP_CHECK_AND_CAST(spa, optval, struct sctp_setprim, optsize); 3713 SCTP_FIND_STCB(inp, stcb, spa->ssp_assoc_id); 3714 3715 net = NULL; 3716 if (stcb) { 3717 net = sctp_findnet(stcb, (struct sockaddr *)&spa->ssp_addr); 3718 } else { 3719 /* 3720 * We increment here since 3721 * sctp_findassociation_ep_addr() wil do a 3722 * decrement if it finds the stcb as long as 3723 * the locked tcb (last argument) is NOT a 3724 * TCB.. aka NULL. 3725 */ 3726 SCTP_INP_INCR_REF(inp); 3727 stcb = sctp_findassociation_ep_addr(&inp, 3728 (struct sockaddr *)&spa->ssp_addr, 3729 &net, NULL, NULL); 3730 if (stcb == NULL) { 3731 SCTP_INP_DECR_REF(inp); 3732 } 3733 } 3734 3735 if ((stcb) && (net)) { 3736 if ((net != stcb->asoc.primary_destination) && 3737 (!(net->dest_state & SCTP_ADDR_UNCONFIRMED))) { 3738 /* Ok we need to set it */ 3739 lnet = stcb->asoc.primary_destination; 3740 if (sctp_set_primary_addr(stcb, (struct sockaddr *)NULL, net) == 0) { 3741 if (net->dest_state & SCTP_ADDR_SWITCH_PRIMARY) { 3742 net->dest_state |= SCTP_ADDR_DOUBLE_SWITCH; 3743 } 3744 net->dest_state |= SCTP_ADDR_SWITCH_PRIMARY; 3745 } 3746 } 3747 } else { 3748 error = EINVAL; 3749 } 3750 if (stcb) { 3751 SCTP_TCB_UNLOCK(stcb); 3752 } 3753 } 3754 break; 3755 3756 case SCTP_SET_PEER_PRIMARY_ADDR: 3757 { 3758 struct sctp_setpeerprim *sspp; 3759 3760 SCTP_CHECK_AND_CAST(sspp, optval, struct sctp_setpeerprim, optsize); 3761 SCTP_FIND_STCB(inp, stcb, sspp->sspp_assoc_id); 3762 3763 if (stcb) { 3764 if (sctp_set_primary_ip_address_sa(stcb, (struct sockaddr *)&sspp->sspp_addr) != 0) { 3765 error = EINVAL; 3766 } 3767 } else { 3768 error = EINVAL; 3769 } 3770 SCTP_TCB_UNLOCK(stcb); 3771 } 3772 break; 3773 case SCTP_BINDX_ADD_ADDR: 3774 { 3775 struct sctp_getaddresses *addrs; 3776 struct sockaddr *addr_touse; 3777 struct sockaddr_in sin; 3778 3779 SCTP_CHECK_AND_CAST(addrs, optval, struct sctp_getaddresses, optsize); 3780 3781 /* see if we're bound all already! */ 3782 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 3783 error = EINVAL; 3784 break; 3785 } 3786 addr_touse = addrs->addr; 3787 if (addrs->addr->sa_family == AF_INET6) { 3788 struct sockaddr_in6 *sin6; 3789 3790 sin6 = (struct sockaddr_in6 *)addr_touse; 3791 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 3792 in6_sin6_2_sin(&sin, sin6); 3793 addr_touse = (struct sockaddr *)&sin; 3794 } 3795 } 3796 if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) { 3797 if (p == NULL) { 3798 /* Can't get proc for Net/Open BSD */ 3799 error = EINVAL; 3800 break; 3801 } 3802 error = sctp_inpcb_bind(so, addr_touse, p); 3803 break; 3804 } 3805 /* 3806 * No locks required here since bind and mgmt_ep_sa 3807 * all do their own locking. If we do something for 3808 * the FIX: below we may need to lock in that case. 3809 */ 3810 if (addrs->sget_assoc_id == 0) { 3811 /* add the address */ 3812 struct sctp_inpcb *lep; 3813 3814 ((struct sockaddr_in *)addr_touse)->sin_port = inp->sctp_lport; 3815 lep = sctp_pcb_findep(addr_touse, 1, 0); 3816 if (lep != NULL) { 3817 /* 3818 * We must decrement the refcount 3819 * since we have the ep already and 3820 * are binding. No remove going on 3821 * here. 3822 */ 3823 SCTP_INP_DECR_REF(inp); 3824 } 3825 if (lep == inp) { 3826 /* already bound to it.. ok */ 3827 break; 3828 } else if (lep == NULL) { 3829 ((struct sockaddr_in *)addr_touse)->sin_port = 0; 3830 error = sctp_addr_mgmt_ep_sa(inp, addr_touse, 3831 SCTP_ADD_IP_ADDRESS); 3832 } else { 3833 error = EADDRNOTAVAIL; 3834 } 3835 if (error) 3836 break; 3837 3838 } else { 3839 /* 3840 * FIX: decide whether we allow assoc based 3841 * bindx 3842 */ 3843 } 3844 } 3845 break; 3846 case SCTP_BINDX_REM_ADDR: 3847 { 3848 struct sctp_getaddresses *addrs; 3849 struct sockaddr *addr_touse; 3850 struct sockaddr_in sin; 3851 3852 SCTP_CHECK_AND_CAST(addrs, optval, struct sctp_getaddresses, optsize); 3853 /* see if we're bound all already! */ 3854 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 3855 error = EINVAL; 3856 break; 3857 } 3858 addr_touse = addrs->addr; 3859 if (addrs->addr->sa_family == AF_INET6) { 3860 struct sockaddr_in6 *sin6; 3861 3862 sin6 = (struct sockaddr_in6 *)addr_touse; 3863 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 3864 in6_sin6_2_sin(&sin, sin6); 3865 addr_touse = (struct sockaddr *)&sin; 3866 } 3867 } 3868 /* 3869 * No lock required mgmt_ep_sa does its own locking. 3870 * If the FIX: below is ever changed we may need to 3871 * lock before calling association level binding. 3872 */ 3873 if (addrs->sget_assoc_id == 0) { 3874 /* delete the address */ 3875 sctp_addr_mgmt_ep_sa(inp, addr_touse, 3876 SCTP_DEL_IP_ADDRESS); 3877 } else { 3878 /* 3879 * FIX: decide whether we allow assoc based 3880 * bindx 3881 */ 3882 } 3883 } 3884 break; 3885 default: 3886 error = ENOPROTOOPT; 3887 break; 3888 } /* end switch (opt) */ 3889 return (error); 3890 } 3891 3892 3893 3894 extern int sctp_chatty_mbuf; 3895 3896 int 3897 sctp_ctloutput(struct socket *so, struct sockopt *sopt) 3898 { 3899 void *optval = NULL; 3900 size_t optsize = 0; 3901 struct sctp_inpcb *inp; 3902 void *p; 3903 int error = 0; 3904 3905 inp = (struct sctp_inpcb *)so->so_pcb; 3906 if (inp == 0) { 3907 /* I made the same as TCP since we are not setup? */ 3908 return (ECONNRESET); 3909 } 3910 if (sopt->sopt_level != IPPROTO_SCTP) { 3911 /* wrong proto level... send back up to IP */ 3912 #ifdef INET6 3913 if (INP_CHECK_SOCKAF(so, AF_INET6)) 3914 error = ip6_ctloutput(so, sopt); 3915 else 3916 #endif /* INET6 */ 3917 error = ip_ctloutput(so, sopt); 3918 return (error); 3919 } 3920 optsize = sopt->sopt_valsize; 3921 if (optsize) { 3922 SCTP_MALLOC(optval, void *, optsize, "SCTPSockOpt"); 3923 if (optval == NULL) { 3924 return (ENOBUFS); 3925 } 3926 error = sooptcopyin(sopt, optval, optsize, optsize); 3927 if (error) { 3928 SCTP_FREE(optval); 3929 goto out; 3930 } 3931 } 3932 p = (void *)sopt->sopt_td; 3933 if (sopt->sopt_dir == SOPT_SET) { 3934 error = sctp_setopt(so, sopt->sopt_name, optval, optsize, p); 3935 } else if (sopt->sopt_dir == SOPT_GET) { 3936 error = sctp_getopt(so, sopt->sopt_name, optval, &optsize, p); 3937 } else { 3938 error = EINVAL; 3939 } 3940 if ((error == 0) && (optval != NULL)) { 3941 error = sooptcopyout(sopt, optval, optsize); 3942 SCTP_FREE(optval); 3943 } else if (optval != NULL) { 3944 SCTP_FREE(optval); 3945 } 3946 out: 3947 return (error); 3948 } 3949 3950 3951 static int 3952 sctp_connect(struct socket *so, struct sockaddr *addr, struct thread *p) 3953 { 3954 int error = 0; 3955 int create_lock_on = 0; 3956 struct sctp_inpcb *inp; 3957 struct sctp_tcb *stcb = NULL; 3958 3959 inp = (struct sctp_inpcb *)so->so_pcb; 3960 if (inp == 0) { 3961 /* I made the same as TCP since we are not setup? */ 3962 return (ECONNRESET); 3963 } 3964 SCTP_ASOC_CREATE_LOCK(inp); 3965 create_lock_on = 1; 3966 3967 SCTP_INP_INCR_REF(inp); 3968 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 3969 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) { 3970 /* Should I really unlock ? */ 3971 error = EFAULT; 3972 goto out_now; 3973 } 3974 #ifdef INET6 3975 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) && 3976 (addr->sa_family == AF_INET6)) { 3977 error = EINVAL; 3978 goto out_now; 3979 } 3980 #endif /* INET6 */ 3981 if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) == 3982 SCTP_PCB_FLAGS_UNBOUND) { 3983 /* Bind a ephemeral port */ 3984 error = sctp_inpcb_bind(so, NULL, p); 3985 if (error) { 3986 goto out_now; 3987 } 3988 } 3989 /* Now do we connect? */ 3990 if (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) { 3991 error = EINVAL; 3992 goto out_now; 3993 } 3994 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 3995 (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) { 3996 /* We are already connected AND the TCP model */ 3997 error = EADDRINUSE; 3998 goto out_now; 3999 } 4000 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { 4001 SCTP_INP_RLOCK(inp); 4002 stcb = LIST_FIRST(&inp->sctp_asoc_list); 4003 if (stcb) 4004 SCTP_TCB_UNLOCK(stcb); 4005 SCTP_INP_RUNLOCK(inp); 4006 } else { 4007 /* 4008 * We increment here since sctp_findassociation_ep_addr() 4009 * wil do a decrement if it finds the stcb as long as the 4010 * locked tcb (last argument) is NOT a TCB.. aka NULL. 4011 */ 4012 SCTP_INP_INCR_REF(inp); 4013 stcb = sctp_findassociation_ep_addr(&inp, addr, NULL, NULL, NULL); 4014 if (stcb == NULL) { 4015 SCTP_INP_DECR_REF(inp); 4016 } 4017 } 4018 if (stcb != NULL) { 4019 /* Already have or am bring up an association */ 4020 error = EALREADY; 4021 goto out_now; 4022 } 4023 /* We are GOOD to go */ 4024 stcb = sctp_aloc_assoc(inp, addr, 1, &error, 0); 4025 if (stcb == NULL) { 4026 /* Gak! no memory */ 4027 return (error); 4028 } 4029 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) { 4030 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED; 4031 /* Set the connected flag so we can queue data */ 4032 soisconnecting(so); 4033 } 4034 stcb->asoc.state = SCTP_STATE_COOKIE_WAIT; 4035 SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered); 4036 4037 /* initialize authentication parameters for the assoc */ 4038 sctp_initialize_auth_params(inp, stcb); 4039 4040 sctp_send_initiate(inp, stcb); 4041 out_now: 4042 if (create_lock_on) 4043 SCTP_ASOC_CREATE_UNLOCK(inp); 4044 4045 if (stcb) 4046 SCTP_TCB_UNLOCK(stcb); 4047 SCTP_INP_DECR_REF(inp); 4048 return error; 4049 } 4050 4051 int 4052 sctp_listen(struct socket *so, int backlog, struct thread *p) 4053 { 4054 /* 4055 * Note this module depends on the protocol processing being called 4056 * AFTER any socket level flags and backlog are applied to the 4057 * socket. The traditional way that the socket flags are applied is 4058 * AFTER protocol processing. We have made a change to the 4059 * sys/kern/uipc_socket.c module to reverse this but this MUST be in 4060 * place if the socket API for SCTP is to work properly. 4061 */ 4062 4063 int error = 0; 4064 struct sctp_inpcb *inp; 4065 4066 inp = (struct sctp_inpcb *)so->so_pcb; 4067 if (inp == 0) { 4068 /* I made the same as TCP since we are not setup? */ 4069 return (ECONNRESET); 4070 } 4071 SCTP_INP_RLOCK(inp); 4072 #ifdef SCTP_LOCK_LOGGING 4073 sctp_log_lock(inp, (struct sctp_tcb *)NULL, SCTP_LOG_LOCK_SOCK); 4074 #endif 4075 SOCK_LOCK(so); 4076 error = solisten_proto_check(so); 4077 if (error) { 4078 SOCK_UNLOCK(so); 4079 return (error); 4080 } 4081 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 4082 (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) { 4083 /* We are already connected AND the TCP model */ 4084 SCTP_INP_RUNLOCK(inp); 4085 SOCK_UNLOCK(so); 4086 return (EADDRINUSE); 4087 } 4088 if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) { 4089 /* We must do a bind. */ 4090 SOCK_UNLOCK(so); 4091 SCTP_INP_RUNLOCK(inp); 4092 if ((error = sctp_inpcb_bind(so, NULL, p))) { 4093 /* bind error, probably perm */ 4094 return (error); 4095 } 4096 SOCK_LOCK(so); 4097 } else { 4098 SCTP_INP_RUNLOCK(inp); 4099 } 4100 /* It appears for 7.0 and on, we must always call this. */ 4101 solisten_proto(so, backlog); 4102 4103 if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) { 4104 /* remove the ACCEPTCONN flag for one-to-many sockets */ 4105 so->so_options &= ~SO_ACCEPTCONN; 4106 } 4107 if (backlog == 0) { 4108 /* turning off listen */ 4109 so->so_options &= ~SO_ACCEPTCONN; 4110 } 4111 SOCK_UNLOCK(so); 4112 return (error); 4113 } 4114 4115 static int sctp_defered_wakeup_cnt = 0; 4116 4117 int 4118 sctp_accept(struct socket *so, struct sockaddr **addr) 4119 { 4120 struct sctp_tcb *stcb; 4121 struct sctp_inpcb *inp; 4122 union sctp_sockstore store; 4123 4124 int error; 4125 4126 inp = (struct sctp_inpcb *)so->so_pcb; 4127 4128 if (inp == 0) { 4129 return (ECONNRESET); 4130 } 4131 SCTP_INP_RLOCK(inp); 4132 if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) { 4133 return (ENOTSUP); 4134 } 4135 if (so->so_state & SS_ISDISCONNECTED) { 4136 SCTP_INP_RUNLOCK(inp); 4137 return (ECONNABORTED); 4138 } 4139 stcb = LIST_FIRST(&inp->sctp_asoc_list); 4140 if (stcb == NULL) { 4141 SCTP_INP_RUNLOCK(inp); 4142 return (ECONNRESET); 4143 } 4144 SCTP_TCB_LOCK(stcb); 4145 SCTP_INP_RUNLOCK(inp); 4146 store = stcb->asoc.primary_destination->ro._l_addr; 4147 SCTP_TCB_UNLOCK(stcb); 4148 if (store.sa.sa_family == AF_INET) { 4149 struct sockaddr_in *sin; 4150 4151 SCTP_MALLOC_SONAME(sin, struct sockaddr_in *, sizeof *sin); 4152 sin->sin_family = AF_INET; 4153 sin->sin_len = sizeof(*sin); 4154 sin->sin_port = ((struct sockaddr_in *)&store)->sin_port; 4155 sin->sin_addr = ((struct sockaddr_in *)&store)->sin_addr; 4156 *addr = (struct sockaddr *)sin; 4157 } else { 4158 struct sockaddr_in6 *sin6; 4159 4160 SCTP_MALLOC_SONAME(sin6, struct sockaddr_in6 *, sizeof *sin6); 4161 sin6->sin6_family = AF_INET6; 4162 sin6->sin6_len = sizeof(*sin6); 4163 sin6->sin6_port = ((struct sockaddr_in6 *)&store)->sin6_port; 4164 4165 sin6->sin6_addr = ((struct sockaddr_in6 *)&store)->sin6_addr; 4166 if ((error = sa6_recoverscope(sin6)) != 0) { 4167 SCTP_FREE_SONAME(sin6); 4168 return (error); 4169 } 4170 *addr = (struct sockaddr *)sin6; 4171 } 4172 /* Wake any delayed sleep action */ 4173 if (inp->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) { 4174 SCTP_INP_WLOCK(inp); 4175 inp->sctp_flags &= ~SCTP_PCB_FLAGS_DONT_WAKE; 4176 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT) { 4177 inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAKEOUTPUT; 4178 SCTP_INP_WUNLOCK(inp); 4179 SOCKBUF_LOCK(&inp->sctp_socket->so_snd); 4180 if (sowriteable(inp->sctp_socket)) { 4181 sowwakeup_locked(inp->sctp_socket); 4182 } else { 4183 SOCKBUF_UNLOCK(&inp->sctp_socket->so_snd); 4184 } 4185 SCTP_INP_WLOCK(inp); 4186 } 4187 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT) { 4188 inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAKEINPUT; 4189 SCTP_INP_WUNLOCK(inp); 4190 SOCKBUF_LOCK(&inp->sctp_socket->so_rcv); 4191 if (soreadable(inp->sctp_socket)) { 4192 sctp_defered_wakeup_cnt++; 4193 sorwakeup_locked(inp->sctp_socket); 4194 } else { 4195 SOCKBUF_UNLOCK(&inp->sctp_socket->so_rcv); 4196 } 4197 SCTP_INP_WLOCK(inp); 4198 } 4199 SCTP_INP_WUNLOCK(inp); 4200 } 4201 return (0); 4202 } 4203 4204 int 4205 sctp_ingetaddr(struct socket *so, struct sockaddr **addr) 4206 { 4207 struct sockaddr_in *sin; 4208 4209 struct sctp_inpcb *inp; 4210 4211 /* 4212 * Do the malloc first in case it blocks. 4213 */ 4214 SCTP_MALLOC_SONAME(sin, struct sockaddr_in *, sizeof *sin); 4215 sin->sin_family = AF_INET; 4216 sin->sin_len = sizeof(*sin); 4217 inp = (struct sctp_inpcb *)so->so_pcb; 4218 if (!inp) { 4219 SCTP_FREE_SONAME(sin); 4220 return ECONNRESET; 4221 } 4222 SCTP_INP_RLOCK(inp); 4223 sin->sin_port = inp->sctp_lport; 4224 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 4225 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { 4226 struct sctp_tcb *stcb; 4227 struct sockaddr_in *sin_a; 4228 struct sctp_nets *net; 4229 int fnd; 4230 4231 stcb = LIST_FIRST(&inp->sctp_asoc_list); 4232 if (stcb == NULL) { 4233 goto notConn; 4234 } 4235 fnd = 0; 4236 sin_a = NULL; 4237 SCTP_TCB_LOCK(stcb); 4238 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 4239 sin_a = (struct sockaddr_in *)&net->ro._l_addr; 4240 if (sin_a == NULL) 4241 /* this will make coverity happy */ 4242 continue; 4243 4244 if (sin_a->sin_family == AF_INET) { 4245 fnd = 1; 4246 break; 4247 } 4248 } 4249 if ((!fnd) || (sin_a == NULL)) { 4250 /* punt */ 4251 SCTP_TCB_UNLOCK(stcb); 4252 goto notConn; 4253 } 4254 sin->sin_addr = sctp_ipv4_source_address_selection(inp, 4255 stcb, (struct route *)&net->ro, net, 0); 4256 SCTP_TCB_UNLOCK(stcb); 4257 } else { 4258 /* For the bound all case you get back 0 */ 4259 notConn: 4260 sin->sin_addr.s_addr = 0; 4261 } 4262 4263 } else { 4264 /* Take the first IPv4 address in the list */ 4265 struct sctp_laddr *laddr; 4266 int fnd = 0; 4267 4268 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 4269 if (laddr->ifa->ifa_addr->sa_family == AF_INET) { 4270 struct sockaddr_in *sin_a; 4271 4272 sin_a = (struct sockaddr_in *)laddr->ifa->ifa_addr; 4273 sin->sin_addr = sin_a->sin_addr; 4274 fnd = 1; 4275 break; 4276 } 4277 } 4278 if (!fnd) { 4279 SCTP_FREE_SONAME(sin); 4280 SCTP_INP_RUNLOCK(inp); 4281 return ENOENT; 4282 } 4283 } 4284 SCTP_INP_RUNLOCK(inp); 4285 (*addr) = (struct sockaddr *)sin; 4286 return (0); 4287 } 4288 4289 int 4290 sctp_peeraddr(struct socket *so, struct sockaddr **addr) 4291 { 4292 struct sockaddr_in *sin = (struct sockaddr_in *)*addr; 4293 4294 int fnd; 4295 struct sockaddr_in *sin_a; 4296 struct sctp_inpcb *inp; 4297 struct sctp_tcb *stcb; 4298 struct sctp_nets *net; 4299 4300 4301 /* Do the malloc first in case it blocks. */ 4302 inp = (struct sctp_inpcb *)so->so_pcb; 4303 if ((inp == NULL) || 4304 ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) { 4305 /* UDP type and listeners will drop out here */ 4306 return (ENOTCONN); 4307 } 4308 SCTP_MALLOC_SONAME(sin, struct sockaddr_in *, sizeof *sin); 4309 sin->sin_family = AF_INET; 4310 sin->sin_len = sizeof(*sin); 4311 4312 /* We must recapture incase we blocked */ 4313 inp = (struct sctp_inpcb *)so->so_pcb; 4314 if (!inp) { 4315 SCTP_FREE_SONAME(sin); 4316 return ECONNRESET; 4317 } 4318 SCTP_INP_RLOCK(inp); 4319 stcb = LIST_FIRST(&inp->sctp_asoc_list); 4320 if (stcb) 4321 SCTP_TCB_LOCK(stcb); 4322 SCTP_INP_RUNLOCK(inp); 4323 if (stcb == NULL) { 4324 SCTP_FREE_SONAME(sin); 4325 return ECONNRESET; 4326 } 4327 fnd = 0; 4328 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 4329 sin_a = (struct sockaddr_in *)&net->ro._l_addr; 4330 if (sin_a->sin_family == AF_INET) { 4331 fnd = 1; 4332 sin->sin_port = stcb->rport; 4333 sin->sin_addr = sin_a->sin_addr; 4334 break; 4335 } 4336 } 4337 SCTP_TCB_UNLOCK(stcb); 4338 if (!fnd) { 4339 /* No IPv4 address */ 4340 SCTP_FREE_SONAME(sin); 4341 return ENOENT; 4342 } 4343 (*addr) = (struct sockaddr *)sin; 4344 return (0); 4345 } 4346 4347 struct pr_usrreqs sctp_usrreqs = { 4348 .pru_abort = sctp_abort, 4349 .pru_accept = sctp_accept, 4350 .pru_attach = sctp_attach, 4351 .pru_bind = sctp_bind, 4352 .pru_connect = sctp_connect, 4353 .pru_control = in_control, 4354 .pru_close = sctp_close, 4355 .pru_detach = sctp_close, 4356 .pru_sopoll = sopoll_generic, 4357 .pru_disconnect = sctp_disconnect, 4358 .pru_listen = sctp_listen, 4359 .pru_peeraddr = sctp_peeraddr, 4360 .pru_send = sctp_sendm, 4361 .pru_shutdown = sctp_shutdown, 4362 .pru_sockaddr = sctp_ingetaddr, 4363 .pru_sosend = sctp_sosend, 4364 .pru_soreceive = sctp_soreceive 4365 }; 4366