1 /* SCTP kernel implementation 2 * (C) Copyright IBM Corp. 2001, 2004 3 * Copyright (c) 1999-2000 Cisco, Inc. 4 * Copyright (c) 1999-2001 Motorola, Inc. 5 * Copyright (c) 2001-2003 Intel Corp. 6 * Copyright (c) 2001-2002 Nokia, Inc. 7 * Copyright (c) 2001 La Monte H.P. Yarroll 8 * 9 * This file is part of the SCTP kernel implementation 10 * 11 * These functions interface with the sockets layer to implement the 12 * SCTP Extensions for the Sockets API. 13 * 14 * Note that the descriptions from the specification are USER level 15 * functions--this file is the functions which populate the struct proto 16 * for SCTP which is the BOTTOM of the sockets interface. 17 * 18 * This SCTP implementation is free software; 19 * you can redistribute it and/or modify it under the terms of 20 * the GNU General Public License as published by 21 * the Free Software Foundation; either version 2, or (at your option) 22 * any later version. 23 * 24 * This SCTP implementation is distributed in the hope that it 25 * will be useful, but WITHOUT ANY WARRANTY; without even the implied 26 * ************************ 27 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. 28 * See the GNU General Public License for more details. 29 * 30 * You should have received a copy of the GNU General Public License 31 * along with GNU CC; see the file COPYING. If not, see 32 * <http://www.gnu.org/licenses/>. 33 * 34 * Please send any bug reports or fixes you make to the 35 * email address(es): 36 * lksctp developers <linux-sctp@vger.kernel.org> 37 * 38 * Written or modified by: 39 * La Monte H.P. Yarroll <piggy@acm.org> 40 * Narasimha Budihal <narsi@refcode.org> 41 * Karl Knutson <karl@athena.chicago.il.us> 42 * Jon Grimm <jgrimm@us.ibm.com> 43 * Xingang Guo <xingang.guo@intel.com> 44 * Daisy Chang <daisyc@us.ibm.com> 45 * Sridhar Samudrala <samudrala@us.ibm.com> 46 * Inaky Perez-Gonzalez <inaky.gonzalez@intel.com> 47 * Ardelle Fan <ardelle.fan@intel.com> 48 * Ryan Layer <rmlayer@us.ibm.com> 49 * Anup Pemmaiah <pemmaiah@cc.usu.edu> 50 * Kevin Gao <kevin.gao@intel.com> 51 */ 52 53 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 54 55 #include <crypto/hash.h> 56 #include <linux/types.h> 57 #include <linux/kernel.h> 58 #include <linux/wait.h> 59 #include <linux/time.h> 60 #include <linux/sched/signal.h> 61 #include <linux/ip.h> 62 #include <linux/capability.h> 63 #include <linux/fcntl.h> 64 #include <linux/poll.h> 65 #include <linux/init.h> 66 #include <linux/slab.h> 67 #include <linux/file.h> 68 #include <linux/compat.h> 69 70 #include <net/ip.h> 71 #include <net/icmp.h> 72 #include <net/route.h> 73 #include <net/ipv6.h> 74 #include <net/inet_common.h> 75 #include <net/busy_poll.h> 76 77 #include <linux/socket.h> /* for sa_family_t */ 78 #include <linux/export.h> 79 #include <net/sock.h> 80 #include <net/sctp/sctp.h> 81 #include <net/sctp/sm.h> 82 83 /* Forward declarations for internal helper functions. */ 84 static int sctp_writeable(struct sock *sk); 85 static void sctp_wfree(struct sk_buff *skb); 86 static int sctp_wait_for_sndbuf(struct sctp_association *, long *timeo_p, 87 size_t msg_len); 88 static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p); 89 static int sctp_wait_for_connect(struct sctp_association *, long *timeo_p); 90 static int sctp_wait_for_accept(struct sock *sk, long timeo); 91 static void sctp_wait_for_close(struct sock *sk, long timeo); 92 static void sctp_destruct_sock(struct sock *sk); 93 static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt, 94 union sctp_addr *addr, int len); 95 static int sctp_bindx_add(struct sock *, struct sockaddr *, int); 96 static int sctp_bindx_rem(struct sock *, struct sockaddr *, int); 97 static int sctp_send_asconf_add_ip(struct sock *, struct sockaddr *, int); 98 static int sctp_send_asconf_del_ip(struct sock *, struct sockaddr *, int); 99 static int sctp_send_asconf(struct sctp_association *asoc, 100 struct sctp_chunk *chunk); 101 static int sctp_do_bind(struct sock *, union sctp_addr *, int); 102 static int sctp_autobind(struct sock *sk); 103 static void sctp_sock_migrate(struct sock *, struct sock *, 104 struct sctp_association *, sctp_socket_type_t); 105 106 static int sctp_memory_pressure; 107 static atomic_long_t sctp_memory_allocated; 108 struct percpu_counter sctp_sockets_allocated; 109 110 static void sctp_enter_memory_pressure(struct sock *sk) 111 { 112 sctp_memory_pressure = 1; 113 } 114 115 116 /* Get the sndbuf space available at the time on the association. */ 117 static inline int sctp_wspace(struct sctp_association *asoc) 118 { 119 int amt; 120 121 if (asoc->ep->sndbuf_policy) 122 amt = asoc->sndbuf_used; 123 else 124 amt = sk_wmem_alloc_get(asoc->base.sk); 125 126 if (amt >= asoc->base.sk->sk_sndbuf) { 127 if (asoc->base.sk->sk_userlocks & SOCK_SNDBUF_LOCK) 128 amt = 0; 129 else { 130 amt = sk_stream_wspace(asoc->base.sk); 131 if (amt < 0) 132 amt = 0; 133 } 134 } else { 135 amt = asoc->base.sk->sk_sndbuf - amt; 136 } 137 return amt; 138 } 139 140 /* Increment the used sndbuf space count of the corresponding association by 141 * the size of the outgoing data chunk. 142 * Also, set the skb destructor for sndbuf accounting later. 143 * 144 * Since it is always 1-1 between chunk and skb, and also a new skb is always 145 * allocated for chunk bundling in sctp_packet_transmit(), we can use the 146 * destructor in the data chunk skb for the purpose of the sndbuf space 147 * tracking. 148 */ 149 static inline void sctp_set_owner_w(struct sctp_chunk *chunk) 150 { 151 struct sctp_association *asoc = chunk->asoc; 152 struct sock *sk = asoc->base.sk; 153 154 /* The sndbuf space is tracked per association. */ 155 sctp_association_hold(asoc); 156 157 skb_set_owner_w(chunk->skb, sk); 158 159 chunk->skb->destructor = sctp_wfree; 160 /* Save the chunk pointer in skb for sctp_wfree to use later. */ 161 skb_shinfo(chunk->skb)->destructor_arg = chunk; 162 163 asoc->sndbuf_used += SCTP_DATA_SNDSIZE(chunk) + 164 sizeof(struct sk_buff) + 165 sizeof(struct sctp_chunk); 166 167 atomic_add(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc); 168 sk->sk_wmem_queued += chunk->skb->truesize; 169 sk_mem_charge(sk, chunk->skb->truesize); 170 } 171 172 /* Verify that this is a valid address. */ 173 static inline int sctp_verify_addr(struct sock *sk, union sctp_addr *addr, 174 int len) 175 { 176 struct sctp_af *af; 177 178 /* Verify basic sockaddr. */ 179 af = sctp_sockaddr_af(sctp_sk(sk), addr, len); 180 if (!af) 181 return -EINVAL; 182 183 /* Is this a valid SCTP address? */ 184 if (!af->addr_valid(addr, sctp_sk(sk), NULL)) 185 return -EINVAL; 186 187 if (!sctp_sk(sk)->pf->send_verify(sctp_sk(sk), (addr))) 188 return -EINVAL; 189 190 return 0; 191 } 192 193 /* Look up the association by its id. If this is not a UDP-style 194 * socket, the ID field is always ignored. 195 */ 196 struct sctp_association *sctp_id2assoc(struct sock *sk, sctp_assoc_t id) 197 { 198 struct sctp_association *asoc = NULL; 199 200 /* If this is not a UDP-style socket, assoc id should be ignored. */ 201 if (!sctp_style(sk, UDP)) { 202 /* Return NULL if the socket state is not ESTABLISHED. It 203 * could be a TCP-style listening socket or a socket which 204 * hasn't yet called connect() to establish an association. 205 */ 206 if (!sctp_sstate(sk, ESTABLISHED) && !sctp_sstate(sk, CLOSING)) 207 return NULL; 208 209 /* Get the first and the only association from the list. */ 210 if (!list_empty(&sctp_sk(sk)->ep->asocs)) 211 asoc = list_entry(sctp_sk(sk)->ep->asocs.next, 212 struct sctp_association, asocs); 213 return asoc; 214 } 215 216 /* Otherwise this is a UDP-style socket. */ 217 if (!id || (id == (sctp_assoc_t)-1)) 218 return NULL; 219 220 spin_lock_bh(&sctp_assocs_id_lock); 221 asoc = (struct sctp_association *)idr_find(&sctp_assocs_id, (int)id); 222 spin_unlock_bh(&sctp_assocs_id_lock); 223 224 if (!asoc || (asoc->base.sk != sk) || asoc->base.dead) 225 return NULL; 226 227 return asoc; 228 } 229 230 /* Look up the transport from an address and an assoc id. If both address and 231 * id are specified, the associations matching the address and the id should be 232 * the same. 233 */ 234 static struct sctp_transport *sctp_addr_id2transport(struct sock *sk, 235 struct sockaddr_storage *addr, 236 sctp_assoc_t id) 237 { 238 struct sctp_association *addr_asoc = NULL, *id_asoc = NULL; 239 struct sctp_af *af = sctp_get_af_specific(addr->ss_family); 240 union sctp_addr *laddr = (union sctp_addr *)addr; 241 struct sctp_transport *transport; 242 243 if (!af || sctp_verify_addr(sk, laddr, af->sockaddr_len)) 244 return NULL; 245 246 addr_asoc = sctp_endpoint_lookup_assoc(sctp_sk(sk)->ep, 247 laddr, 248 &transport); 249 250 if (!addr_asoc) 251 return NULL; 252 253 id_asoc = sctp_id2assoc(sk, id); 254 if (id_asoc && (id_asoc != addr_asoc)) 255 return NULL; 256 257 sctp_get_pf_specific(sk->sk_family)->addr_to_user(sctp_sk(sk), 258 (union sctp_addr *)addr); 259 260 return transport; 261 } 262 263 /* API 3.1.2 bind() - UDP Style Syntax 264 * The syntax of bind() is, 265 * 266 * ret = bind(int sd, struct sockaddr *addr, int addrlen); 267 * 268 * sd - the socket descriptor returned by socket(). 269 * addr - the address structure (struct sockaddr_in or struct 270 * sockaddr_in6 [RFC 2553]), 271 * addr_len - the size of the address structure. 272 */ 273 static int sctp_bind(struct sock *sk, struct sockaddr *addr, int addr_len) 274 { 275 int retval = 0; 276 277 lock_sock(sk); 278 279 pr_debug("%s: sk:%p, addr:%p, addr_len:%d\n", __func__, sk, 280 addr, addr_len); 281 282 /* Disallow binding twice. */ 283 if (!sctp_sk(sk)->ep->base.bind_addr.port) 284 retval = sctp_do_bind(sk, (union sctp_addr *)addr, 285 addr_len); 286 else 287 retval = -EINVAL; 288 289 release_sock(sk); 290 291 return retval; 292 } 293 294 static long sctp_get_port_local(struct sock *, union sctp_addr *); 295 296 /* Verify this is a valid sockaddr. */ 297 static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt, 298 union sctp_addr *addr, int len) 299 { 300 struct sctp_af *af; 301 302 /* Check minimum size. */ 303 if (len < sizeof (struct sockaddr)) 304 return NULL; 305 306 /* V4 mapped address are really of AF_INET family */ 307 if (addr->sa.sa_family == AF_INET6 && 308 ipv6_addr_v4mapped(&addr->v6.sin6_addr)) { 309 if (!opt->pf->af_supported(AF_INET, opt)) 310 return NULL; 311 } else { 312 /* Does this PF support this AF? */ 313 if (!opt->pf->af_supported(addr->sa.sa_family, opt)) 314 return NULL; 315 } 316 317 /* If we get this far, af is valid. */ 318 af = sctp_get_af_specific(addr->sa.sa_family); 319 320 if (len < af->sockaddr_len) 321 return NULL; 322 323 return af; 324 } 325 326 /* Bind a local address either to an endpoint or to an association. */ 327 static int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len) 328 { 329 struct net *net = sock_net(sk); 330 struct sctp_sock *sp = sctp_sk(sk); 331 struct sctp_endpoint *ep = sp->ep; 332 struct sctp_bind_addr *bp = &ep->base.bind_addr; 333 struct sctp_af *af; 334 unsigned short snum; 335 int ret = 0; 336 337 /* Common sockaddr verification. */ 338 af = sctp_sockaddr_af(sp, addr, len); 339 if (!af) { 340 pr_debug("%s: sk:%p, newaddr:%p, len:%d EINVAL\n", 341 __func__, sk, addr, len); 342 return -EINVAL; 343 } 344 345 snum = ntohs(addr->v4.sin_port); 346 347 pr_debug("%s: sk:%p, new addr:%pISc, port:%d, new port:%d, len:%d\n", 348 __func__, sk, &addr->sa, bp->port, snum, len); 349 350 /* PF specific bind() address verification. */ 351 if (!sp->pf->bind_verify(sp, addr)) 352 return -EADDRNOTAVAIL; 353 354 /* We must either be unbound, or bind to the same port. 355 * It's OK to allow 0 ports if we are already bound. 356 * We'll just inhert an already bound port in this case 357 */ 358 if (bp->port) { 359 if (!snum) 360 snum = bp->port; 361 else if (snum != bp->port) { 362 pr_debug("%s: new port %d doesn't match existing port " 363 "%d\n", __func__, snum, bp->port); 364 return -EINVAL; 365 } 366 } 367 368 if (snum && snum < inet_prot_sock(net) && 369 !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) 370 return -EACCES; 371 372 /* See if the address matches any of the addresses we may have 373 * already bound before checking against other endpoints. 374 */ 375 if (sctp_bind_addr_match(bp, addr, sp)) 376 return -EINVAL; 377 378 /* Make sure we are allowed to bind here. 379 * The function sctp_get_port_local() does duplicate address 380 * detection. 381 */ 382 addr->v4.sin_port = htons(snum); 383 if ((ret = sctp_get_port_local(sk, addr))) { 384 return -EADDRINUSE; 385 } 386 387 /* Refresh ephemeral port. */ 388 if (!bp->port) 389 bp->port = inet_sk(sk)->inet_num; 390 391 /* Add the address to the bind address list. 392 * Use GFP_ATOMIC since BHs will be disabled. 393 */ 394 ret = sctp_add_bind_addr(bp, addr, af->sockaddr_len, 395 SCTP_ADDR_SRC, GFP_ATOMIC); 396 397 /* Copy back into socket for getsockname() use. */ 398 if (!ret) { 399 inet_sk(sk)->inet_sport = htons(inet_sk(sk)->inet_num); 400 sp->pf->to_sk_saddr(addr, sk); 401 } 402 403 return ret; 404 } 405 406 /* ADDIP Section 4.1.1 Congestion Control of ASCONF Chunks 407 * 408 * R1) One and only one ASCONF Chunk MAY be in transit and unacknowledged 409 * at any one time. If a sender, after sending an ASCONF chunk, decides 410 * it needs to transfer another ASCONF Chunk, it MUST wait until the 411 * ASCONF-ACK Chunk returns from the previous ASCONF Chunk before sending a 412 * subsequent ASCONF. Note this restriction binds each side, so at any 413 * time two ASCONF may be in-transit on any given association (one sent 414 * from each endpoint). 415 */ 416 static int sctp_send_asconf(struct sctp_association *asoc, 417 struct sctp_chunk *chunk) 418 { 419 struct net *net = sock_net(asoc->base.sk); 420 int retval = 0; 421 422 /* If there is an outstanding ASCONF chunk, queue it for later 423 * transmission. 424 */ 425 if (asoc->addip_last_asconf) { 426 list_add_tail(&chunk->list, &asoc->addip_chunk_list); 427 goto out; 428 } 429 430 /* Hold the chunk until an ASCONF_ACK is received. */ 431 sctp_chunk_hold(chunk); 432 retval = sctp_primitive_ASCONF(net, asoc, chunk); 433 if (retval) 434 sctp_chunk_free(chunk); 435 else 436 asoc->addip_last_asconf = chunk; 437 438 out: 439 return retval; 440 } 441 442 /* Add a list of addresses as bind addresses to local endpoint or 443 * association. 444 * 445 * Basically run through each address specified in the addrs/addrcnt 446 * array/length pair, determine if it is IPv6 or IPv4 and call 447 * sctp_do_bind() on it. 448 * 449 * If any of them fails, then the operation will be reversed and the 450 * ones that were added will be removed. 451 * 452 * Only sctp_setsockopt_bindx() is supposed to call this function. 453 */ 454 static int sctp_bindx_add(struct sock *sk, struct sockaddr *addrs, int addrcnt) 455 { 456 int cnt; 457 int retval = 0; 458 void *addr_buf; 459 struct sockaddr *sa_addr; 460 struct sctp_af *af; 461 462 pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n", __func__, sk, 463 addrs, addrcnt); 464 465 addr_buf = addrs; 466 for (cnt = 0; cnt < addrcnt; cnt++) { 467 /* The list may contain either IPv4 or IPv6 address; 468 * determine the address length for walking thru the list. 469 */ 470 sa_addr = addr_buf; 471 af = sctp_get_af_specific(sa_addr->sa_family); 472 if (!af) { 473 retval = -EINVAL; 474 goto err_bindx_add; 475 } 476 477 retval = sctp_do_bind(sk, (union sctp_addr *)sa_addr, 478 af->sockaddr_len); 479 480 addr_buf += af->sockaddr_len; 481 482 err_bindx_add: 483 if (retval < 0) { 484 /* Failed. Cleanup the ones that have been added */ 485 if (cnt > 0) 486 sctp_bindx_rem(sk, addrs, cnt); 487 return retval; 488 } 489 } 490 491 return retval; 492 } 493 494 /* Send an ASCONF chunk with Add IP address parameters to all the peers of the 495 * associations that are part of the endpoint indicating that a list of local 496 * addresses are added to the endpoint. 497 * 498 * If any of the addresses is already in the bind address list of the 499 * association, we do not send the chunk for that association. But it will not 500 * affect other associations. 501 * 502 * Only sctp_setsockopt_bindx() is supposed to call this function. 503 */ 504 static int sctp_send_asconf_add_ip(struct sock *sk, 505 struct sockaddr *addrs, 506 int addrcnt) 507 { 508 struct net *net = sock_net(sk); 509 struct sctp_sock *sp; 510 struct sctp_endpoint *ep; 511 struct sctp_association *asoc; 512 struct sctp_bind_addr *bp; 513 struct sctp_chunk *chunk; 514 struct sctp_sockaddr_entry *laddr; 515 union sctp_addr *addr; 516 union sctp_addr saveaddr; 517 void *addr_buf; 518 struct sctp_af *af; 519 struct list_head *p; 520 int i; 521 int retval = 0; 522 523 if (!net->sctp.addip_enable) 524 return retval; 525 526 sp = sctp_sk(sk); 527 ep = sp->ep; 528 529 pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n", 530 __func__, sk, addrs, addrcnt); 531 532 list_for_each_entry(asoc, &ep->asocs, asocs) { 533 if (!asoc->peer.asconf_capable) 534 continue; 535 536 if (asoc->peer.addip_disabled_mask & SCTP_PARAM_ADD_IP) 537 continue; 538 539 if (!sctp_state(asoc, ESTABLISHED)) 540 continue; 541 542 /* Check if any address in the packed array of addresses is 543 * in the bind address list of the association. If so, 544 * do not send the asconf chunk to its peer, but continue with 545 * other associations. 546 */ 547 addr_buf = addrs; 548 for (i = 0; i < addrcnt; i++) { 549 addr = addr_buf; 550 af = sctp_get_af_specific(addr->v4.sin_family); 551 if (!af) { 552 retval = -EINVAL; 553 goto out; 554 } 555 556 if (sctp_assoc_lookup_laddr(asoc, addr)) 557 break; 558 559 addr_buf += af->sockaddr_len; 560 } 561 if (i < addrcnt) 562 continue; 563 564 /* Use the first valid address in bind addr list of 565 * association as Address Parameter of ASCONF CHUNK. 566 */ 567 bp = &asoc->base.bind_addr; 568 p = bp->address_list.next; 569 laddr = list_entry(p, struct sctp_sockaddr_entry, list); 570 chunk = sctp_make_asconf_update_ip(asoc, &laddr->a, addrs, 571 addrcnt, SCTP_PARAM_ADD_IP); 572 if (!chunk) { 573 retval = -ENOMEM; 574 goto out; 575 } 576 577 /* Add the new addresses to the bind address list with 578 * use_as_src set to 0. 579 */ 580 addr_buf = addrs; 581 for (i = 0; i < addrcnt; i++) { 582 addr = addr_buf; 583 af = sctp_get_af_specific(addr->v4.sin_family); 584 memcpy(&saveaddr, addr, af->sockaddr_len); 585 retval = sctp_add_bind_addr(bp, &saveaddr, 586 sizeof(saveaddr), 587 SCTP_ADDR_NEW, GFP_ATOMIC); 588 addr_buf += af->sockaddr_len; 589 } 590 if (asoc->src_out_of_asoc_ok) { 591 struct sctp_transport *trans; 592 593 list_for_each_entry(trans, 594 &asoc->peer.transport_addr_list, transports) { 595 /* Clear the source and route cache */ 596 sctp_transport_dst_release(trans); 597 trans->cwnd = min(4*asoc->pathmtu, max_t(__u32, 598 2*asoc->pathmtu, 4380)); 599 trans->ssthresh = asoc->peer.i.a_rwnd; 600 trans->rto = asoc->rto_initial; 601 sctp_max_rto(asoc, trans); 602 trans->rtt = trans->srtt = trans->rttvar = 0; 603 sctp_transport_route(trans, NULL, 604 sctp_sk(asoc->base.sk)); 605 } 606 } 607 retval = sctp_send_asconf(asoc, chunk); 608 } 609 610 out: 611 return retval; 612 } 613 614 /* Remove a list of addresses from bind addresses list. Do not remove the 615 * last address. 616 * 617 * Basically run through each address specified in the addrs/addrcnt 618 * array/length pair, determine if it is IPv6 or IPv4 and call 619 * sctp_del_bind() on it. 620 * 621 * If any of them fails, then the operation will be reversed and the 622 * ones that were removed will be added back. 623 * 624 * At least one address has to be left; if only one address is 625 * available, the operation will return -EBUSY. 626 * 627 * Only sctp_setsockopt_bindx() is supposed to call this function. 628 */ 629 static int sctp_bindx_rem(struct sock *sk, struct sockaddr *addrs, int addrcnt) 630 { 631 struct sctp_sock *sp = sctp_sk(sk); 632 struct sctp_endpoint *ep = sp->ep; 633 int cnt; 634 struct sctp_bind_addr *bp = &ep->base.bind_addr; 635 int retval = 0; 636 void *addr_buf; 637 union sctp_addr *sa_addr; 638 struct sctp_af *af; 639 640 pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n", 641 __func__, sk, addrs, addrcnt); 642 643 addr_buf = addrs; 644 for (cnt = 0; cnt < addrcnt; cnt++) { 645 /* If the bind address list is empty or if there is only one 646 * bind address, there is nothing more to be removed (we need 647 * at least one address here). 648 */ 649 if (list_empty(&bp->address_list) || 650 (sctp_list_single_entry(&bp->address_list))) { 651 retval = -EBUSY; 652 goto err_bindx_rem; 653 } 654 655 sa_addr = addr_buf; 656 af = sctp_get_af_specific(sa_addr->sa.sa_family); 657 if (!af) { 658 retval = -EINVAL; 659 goto err_bindx_rem; 660 } 661 662 if (!af->addr_valid(sa_addr, sp, NULL)) { 663 retval = -EADDRNOTAVAIL; 664 goto err_bindx_rem; 665 } 666 667 if (sa_addr->v4.sin_port && 668 sa_addr->v4.sin_port != htons(bp->port)) { 669 retval = -EINVAL; 670 goto err_bindx_rem; 671 } 672 673 if (!sa_addr->v4.sin_port) 674 sa_addr->v4.sin_port = htons(bp->port); 675 676 /* FIXME - There is probably a need to check if sk->sk_saddr and 677 * sk->sk_rcv_addr are currently set to one of the addresses to 678 * be removed. This is something which needs to be looked into 679 * when we are fixing the outstanding issues with multi-homing 680 * socket routing and failover schemes. Refer to comments in 681 * sctp_do_bind(). -daisy 682 */ 683 retval = sctp_del_bind_addr(bp, sa_addr); 684 685 addr_buf += af->sockaddr_len; 686 err_bindx_rem: 687 if (retval < 0) { 688 /* Failed. Add the ones that has been removed back */ 689 if (cnt > 0) 690 sctp_bindx_add(sk, addrs, cnt); 691 return retval; 692 } 693 } 694 695 return retval; 696 } 697 698 /* Send an ASCONF chunk with Delete IP address parameters to all the peers of 699 * the associations that are part of the endpoint indicating that a list of 700 * local addresses are removed from the endpoint. 701 * 702 * If any of the addresses is already in the bind address list of the 703 * association, we do not send the chunk for that association. But it will not 704 * affect other associations. 705 * 706 * Only sctp_setsockopt_bindx() is supposed to call this function. 707 */ 708 static int sctp_send_asconf_del_ip(struct sock *sk, 709 struct sockaddr *addrs, 710 int addrcnt) 711 { 712 struct net *net = sock_net(sk); 713 struct sctp_sock *sp; 714 struct sctp_endpoint *ep; 715 struct sctp_association *asoc; 716 struct sctp_transport *transport; 717 struct sctp_bind_addr *bp; 718 struct sctp_chunk *chunk; 719 union sctp_addr *laddr; 720 void *addr_buf; 721 struct sctp_af *af; 722 struct sctp_sockaddr_entry *saddr; 723 int i; 724 int retval = 0; 725 int stored = 0; 726 727 chunk = NULL; 728 if (!net->sctp.addip_enable) 729 return retval; 730 731 sp = sctp_sk(sk); 732 ep = sp->ep; 733 734 pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n", 735 __func__, sk, addrs, addrcnt); 736 737 list_for_each_entry(asoc, &ep->asocs, asocs) { 738 739 if (!asoc->peer.asconf_capable) 740 continue; 741 742 if (asoc->peer.addip_disabled_mask & SCTP_PARAM_DEL_IP) 743 continue; 744 745 if (!sctp_state(asoc, ESTABLISHED)) 746 continue; 747 748 /* Check if any address in the packed array of addresses is 749 * not present in the bind address list of the association. 750 * If so, do not send the asconf chunk to its peer, but 751 * continue with other associations. 752 */ 753 addr_buf = addrs; 754 for (i = 0; i < addrcnt; i++) { 755 laddr = addr_buf; 756 af = sctp_get_af_specific(laddr->v4.sin_family); 757 if (!af) { 758 retval = -EINVAL; 759 goto out; 760 } 761 762 if (!sctp_assoc_lookup_laddr(asoc, laddr)) 763 break; 764 765 addr_buf += af->sockaddr_len; 766 } 767 if (i < addrcnt) 768 continue; 769 770 /* Find one address in the association's bind address list 771 * that is not in the packed array of addresses. This is to 772 * make sure that we do not delete all the addresses in the 773 * association. 774 */ 775 bp = &asoc->base.bind_addr; 776 laddr = sctp_find_unmatch_addr(bp, (union sctp_addr *)addrs, 777 addrcnt, sp); 778 if ((laddr == NULL) && (addrcnt == 1)) { 779 if (asoc->asconf_addr_del_pending) 780 continue; 781 asoc->asconf_addr_del_pending = 782 kzalloc(sizeof(union sctp_addr), GFP_ATOMIC); 783 if (asoc->asconf_addr_del_pending == NULL) { 784 retval = -ENOMEM; 785 goto out; 786 } 787 asoc->asconf_addr_del_pending->sa.sa_family = 788 addrs->sa_family; 789 asoc->asconf_addr_del_pending->v4.sin_port = 790 htons(bp->port); 791 if (addrs->sa_family == AF_INET) { 792 struct sockaddr_in *sin; 793 794 sin = (struct sockaddr_in *)addrs; 795 asoc->asconf_addr_del_pending->v4.sin_addr.s_addr = sin->sin_addr.s_addr; 796 } else if (addrs->sa_family == AF_INET6) { 797 struct sockaddr_in6 *sin6; 798 799 sin6 = (struct sockaddr_in6 *)addrs; 800 asoc->asconf_addr_del_pending->v6.sin6_addr = sin6->sin6_addr; 801 } 802 803 pr_debug("%s: keep the last address asoc:%p %pISc at %p\n", 804 __func__, asoc, &asoc->asconf_addr_del_pending->sa, 805 asoc->asconf_addr_del_pending); 806 807 asoc->src_out_of_asoc_ok = 1; 808 stored = 1; 809 goto skip_mkasconf; 810 } 811 812 if (laddr == NULL) 813 return -EINVAL; 814 815 /* We do not need RCU protection throughout this loop 816 * because this is done under a socket lock from the 817 * setsockopt call. 818 */ 819 chunk = sctp_make_asconf_update_ip(asoc, laddr, addrs, addrcnt, 820 SCTP_PARAM_DEL_IP); 821 if (!chunk) { 822 retval = -ENOMEM; 823 goto out; 824 } 825 826 skip_mkasconf: 827 /* Reset use_as_src flag for the addresses in the bind address 828 * list that are to be deleted. 829 */ 830 addr_buf = addrs; 831 for (i = 0; i < addrcnt; i++) { 832 laddr = addr_buf; 833 af = sctp_get_af_specific(laddr->v4.sin_family); 834 list_for_each_entry(saddr, &bp->address_list, list) { 835 if (sctp_cmp_addr_exact(&saddr->a, laddr)) 836 saddr->state = SCTP_ADDR_DEL; 837 } 838 addr_buf += af->sockaddr_len; 839 } 840 841 /* Update the route and saddr entries for all the transports 842 * as some of the addresses in the bind address list are 843 * about to be deleted and cannot be used as source addresses. 844 */ 845 list_for_each_entry(transport, &asoc->peer.transport_addr_list, 846 transports) { 847 sctp_transport_dst_release(transport); 848 sctp_transport_route(transport, NULL, 849 sctp_sk(asoc->base.sk)); 850 } 851 852 if (stored) 853 /* We don't need to transmit ASCONF */ 854 continue; 855 retval = sctp_send_asconf(asoc, chunk); 856 } 857 out: 858 return retval; 859 } 860 861 /* set addr events to assocs in the endpoint. ep and addr_wq must be locked */ 862 int sctp_asconf_mgmt(struct sctp_sock *sp, struct sctp_sockaddr_entry *addrw) 863 { 864 struct sock *sk = sctp_opt2sk(sp); 865 union sctp_addr *addr; 866 struct sctp_af *af; 867 868 /* It is safe to write port space in caller. */ 869 addr = &addrw->a; 870 addr->v4.sin_port = htons(sp->ep->base.bind_addr.port); 871 af = sctp_get_af_specific(addr->sa.sa_family); 872 if (!af) 873 return -EINVAL; 874 if (sctp_verify_addr(sk, addr, af->sockaddr_len)) 875 return -EINVAL; 876 877 if (addrw->state == SCTP_ADDR_NEW) 878 return sctp_send_asconf_add_ip(sk, (struct sockaddr *)addr, 1); 879 else 880 return sctp_send_asconf_del_ip(sk, (struct sockaddr *)addr, 1); 881 } 882 883 /* Helper for tunneling sctp_bindx() requests through sctp_setsockopt() 884 * 885 * API 8.1 886 * int sctp_bindx(int sd, struct sockaddr *addrs, int addrcnt, 887 * int flags); 888 * 889 * If sd is an IPv4 socket, the addresses passed must be IPv4 addresses. 890 * If the sd is an IPv6 socket, the addresses passed can either be IPv4 891 * or IPv6 addresses. 892 * 893 * A single address may be specified as INADDR_ANY or IN6ADDR_ANY, see 894 * Section 3.1.2 for this usage. 895 * 896 * addrs is a pointer to an array of one or more socket addresses. Each 897 * address is contained in its appropriate structure (i.e. struct 898 * sockaddr_in or struct sockaddr_in6) the family of the address type 899 * must be used to distinguish the address length (note that this 900 * representation is termed a "packed array" of addresses). The caller 901 * specifies the number of addresses in the array with addrcnt. 902 * 903 * On success, sctp_bindx() returns 0. On failure, sctp_bindx() returns 904 * -1, and sets errno to the appropriate error code. 905 * 906 * For SCTP, the port given in each socket address must be the same, or 907 * sctp_bindx() will fail, setting errno to EINVAL. 908 * 909 * The flags parameter is formed from the bitwise OR of zero or more of 910 * the following currently defined flags: 911 * 912 * SCTP_BINDX_ADD_ADDR 913 * 914 * SCTP_BINDX_REM_ADDR 915 * 916 * SCTP_BINDX_ADD_ADDR directs SCTP to add the given addresses to the 917 * association, and SCTP_BINDX_REM_ADDR directs SCTP to remove the given 918 * addresses from the association. The two flags are mutually exclusive; 919 * if both are given, sctp_bindx() will fail with EINVAL. A caller may 920 * not remove all addresses from an association; sctp_bindx() will 921 * reject such an attempt with EINVAL. 922 * 923 * An application can use sctp_bindx(SCTP_BINDX_ADD_ADDR) to associate 924 * additional addresses with an endpoint after calling bind(). Or use 925 * sctp_bindx(SCTP_BINDX_REM_ADDR) to remove some addresses a listening 926 * socket is associated with so that no new association accepted will be 927 * associated with those addresses. If the endpoint supports dynamic 928 * address a SCTP_BINDX_REM_ADDR or SCTP_BINDX_ADD_ADDR may cause a 929 * endpoint to send the appropriate message to the peer to change the 930 * peers address lists. 931 * 932 * Adding and removing addresses from a connected association is 933 * optional functionality. Implementations that do not support this 934 * functionality should return EOPNOTSUPP. 935 * 936 * Basically do nothing but copying the addresses from user to kernel 937 * land and invoking either sctp_bindx_add() or sctp_bindx_rem() on the sk. 938 * This is used for tunneling the sctp_bindx() request through sctp_setsockopt() 939 * from userspace. 940 * 941 * We don't use copy_from_user() for optimization: we first do the 942 * sanity checks (buffer size -fast- and access check-healthy 943 * pointer); if all of those succeed, then we can alloc the memory 944 * (expensive operation) needed to copy the data to kernel. Then we do 945 * the copying without checking the user space area 946 * (__copy_from_user()). 947 * 948 * On exit there is no need to do sockfd_put(), sys_setsockopt() does 949 * it. 950 * 951 * sk The sk of the socket 952 * addrs The pointer to the addresses in user land 953 * addrssize Size of the addrs buffer 954 * op Operation to perform (add or remove, see the flags of 955 * sctp_bindx) 956 * 957 * Returns 0 if ok, <0 errno code on error. 958 */ 959 static int sctp_setsockopt_bindx(struct sock *sk, 960 struct sockaddr __user *addrs, 961 int addrs_size, int op) 962 { 963 struct sockaddr *kaddrs; 964 int err; 965 int addrcnt = 0; 966 int walk_size = 0; 967 struct sockaddr *sa_addr; 968 void *addr_buf; 969 struct sctp_af *af; 970 971 pr_debug("%s: sk:%p addrs:%p addrs_size:%d opt:%d\n", 972 __func__, sk, addrs, addrs_size, op); 973 974 if (unlikely(addrs_size <= 0)) 975 return -EINVAL; 976 977 /* Check the user passed a healthy pointer. */ 978 if (unlikely(!access_ok(VERIFY_READ, addrs, addrs_size))) 979 return -EFAULT; 980 981 /* Alloc space for the address array in kernel memory. */ 982 kaddrs = kmalloc(addrs_size, GFP_USER | __GFP_NOWARN); 983 if (unlikely(!kaddrs)) 984 return -ENOMEM; 985 986 if (__copy_from_user(kaddrs, addrs, addrs_size)) { 987 kfree(kaddrs); 988 return -EFAULT; 989 } 990 991 /* Walk through the addrs buffer and count the number of addresses. */ 992 addr_buf = kaddrs; 993 while (walk_size < addrs_size) { 994 if (walk_size + sizeof(sa_family_t) > addrs_size) { 995 kfree(kaddrs); 996 return -EINVAL; 997 } 998 999 sa_addr = addr_buf; 1000 af = sctp_get_af_specific(sa_addr->sa_family); 1001 1002 /* If the address family is not supported or if this address 1003 * causes the address buffer to overflow return EINVAL. 1004 */ 1005 if (!af || (walk_size + af->sockaddr_len) > addrs_size) { 1006 kfree(kaddrs); 1007 return -EINVAL; 1008 } 1009 addrcnt++; 1010 addr_buf += af->sockaddr_len; 1011 walk_size += af->sockaddr_len; 1012 } 1013 1014 /* Do the work. */ 1015 switch (op) { 1016 case SCTP_BINDX_ADD_ADDR: 1017 err = sctp_bindx_add(sk, kaddrs, addrcnt); 1018 if (err) 1019 goto out; 1020 err = sctp_send_asconf_add_ip(sk, kaddrs, addrcnt); 1021 break; 1022 1023 case SCTP_BINDX_REM_ADDR: 1024 err = sctp_bindx_rem(sk, kaddrs, addrcnt); 1025 if (err) 1026 goto out; 1027 err = sctp_send_asconf_del_ip(sk, kaddrs, addrcnt); 1028 break; 1029 1030 default: 1031 err = -EINVAL; 1032 break; 1033 } 1034 1035 out: 1036 kfree(kaddrs); 1037 1038 return err; 1039 } 1040 1041 /* __sctp_connect(struct sock* sk, struct sockaddr *kaddrs, int addrs_size) 1042 * 1043 * Common routine for handling connect() and sctp_connectx(). 1044 * Connect will come in with just a single address. 1045 */ 1046 static int __sctp_connect(struct sock *sk, 1047 struct sockaddr *kaddrs, 1048 int addrs_size, 1049 sctp_assoc_t *assoc_id) 1050 { 1051 struct net *net = sock_net(sk); 1052 struct sctp_sock *sp; 1053 struct sctp_endpoint *ep; 1054 struct sctp_association *asoc = NULL; 1055 struct sctp_association *asoc2; 1056 struct sctp_transport *transport; 1057 union sctp_addr to; 1058 sctp_scope_t scope; 1059 long timeo; 1060 int err = 0; 1061 int addrcnt = 0; 1062 int walk_size = 0; 1063 union sctp_addr *sa_addr = NULL; 1064 void *addr_buf; 1065 unsigned short port; 1066 unsigned int f_flags = 0; 1067 1068 sp = sctp_sk(sk); 1069 ep = sp->ep; 1070 1071 /* connect() cannot be done on a socket that is already in ESTABLISHED 1072 * state - UDP-style peeled off socket or a TCP-style socket that 1073 * is already connected. 1074 * It cannot be done even on a TCP-style listening socket. 1075 */ 1076 if (sctp_sstate(sk, ESTABLISHED) || sctp_sstate(sk, CLOSING) || 1077 (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))) { 1078 err = -EISCONN; 1079 goto out_free; 1080 } 1081 1082 /* Walk through the addrs buffer and count the number of addresses. */ 1083 addr_buf = kaddrs; 1084 while (walk_size < addrs_size) { 1085 struct sctp_af *af; 1086 1087 if (walk_size + sizeof(sa_family_t) > addrs_size) { 1088 err = -EINVAL; 1089 goto out_free; 1090 } 1091 1092 sa_addr = addr_buf; 1093 af = sctp_get_af_specific(sa_addr->sa.sa_family); 1094 1095 /* If the address family is not supported or if this address 1096 * causes the address buffer to overflow return EINVAL. 1097 */ 1098 if (!af || (walk_size + af->sockaddr_len) > addrs_size) { 1099 err = -EINVAL; 1100 goto out_free; 1101 } 1102 1103 port = ntohs(sa_addr->v4.sin_port); 1104 1105 /* Save current address so we can work with it */ 1106 memcpy(&to, sa_addr, af->sockaddr_len); 1107 1108 err = sctp_verify_addr(sk, &to, af->sockaddr_len); 1109 if (err) 1110 goto out_free; 1111 1112 /* Make sure the destination port is correctly set 1113 * in all addresses. 1114 */ 1115 if (asoc && asoc->peer.port && asoc->peer.port != port) { 1116 err = -EINVAL; 1117 goto out_free; 1118 } 1119 1120 /* Check if there already is a matching association on the 1121 * endpoint (other than the one created here). 1122 */ 1123 asoc2 = sctp_endpoint_lookup_assoc(ep, &to, &transport); 1124 if (asoc2 && asoc2 != asoc) { 1125 if (asoc2->state >= SCTP_STATE_ESTABLISHED) 1126 err = -EISCONN; 1127 else 1128 err = -EALREADY; 1129 goto out_free; 1130 } 1131 1132 /* If we could not find a matching association on the endpoint, 1133 * make sure that there is no peeled-off association matching 1134 * the peer address even on another socket. 1135 */ 1136 if (sctp_endpoint_is_peeled_off(ep, &to)) { 1137 err = -EADDRNOTAVAIL; 1138 goto out_free; 1139 } 1140 1141 if (!asoc) { 1142 /* If a bind() or sctp_bindx() is not called prior to 1143 * an sctp_connectx() call, the system picks an 1144 * ephemeral port and will choose an address set 1145 * equivalent to binding with a wildcard address. 1146 */ 1147 if (!ep->base.bind_addr.port) { 1148 if (sctp_autobind(sk)) { 1149 err = -EAGAIN; 1150 goto out_free; 1151 } 1152 } else { 1153 /* 1154 * If an unprivileged user inherits a 1-many 1155 * style socket with open associations on a 1156 * privileged port, it MAY be permitted to 1157 * accept new associations, but it SHOULD NOT 1158 * be permitted to open new associations. 1159 */ 1160 if (ep->base.bind_addr.port < 1161 inet_prot_sock(net) && 1162 !ns_capable(net->user_ns, 1163 CAP_NET_BIND_SERVICE)) { 1164 err = -EACCES; 1165 goto out_free; 1166 } 1167 } 1168 1169 scope = sctp_scope(&to); 1170 asoc = sctp_association_new(ep, sk, scope, GFP_KERNEL); 1171 if (!asoc) { 1172 err = -ENOMEM; 1173 goto out_free; 1174 } 1175 1176 err = sctp_assoc_set_bind_addr_from_ep(asoc, scope, 1177 GFP_KERNEL); 1178 if (err < 0) { 1179 goto out_free; 1180 } 1181 1182 } 1183 1184 /* Prime the peer's transport structures. */ 1185 transport = sctp_assoc_add_peer(asoc, &to, GFP_KERNEL, 1186 SCTP_UNKNOWN); 1187 if (!transport) { 1188 err = -ENOMEM; 1189 goto out_free; 1190 } 1191 1192 addrcnt++; 1193 addr_buf += af->sockaddr_len; 1194 walk_size += af->sockaddr_len; 1195 } 1196 1197 /* In case the user of sctp_connectx() wants an association 1198 * id back, assign one now. 1199 */ 1200 if (assoc_id) { 1201 err = sctp_assoc_set_id(asoc, GFP_KERNEL); 1202 if (err < 0) 1203 goto out_free; 1204 } 1205 1206 err = sctp_primitive_ASSOCIATE(net, asoc, NULL); 1207 if (err < 0) { 1208 goto out_free; 1209 } 1210 1211 /* Initialize sk's dport and daddr for getpeername() */ 1212 inet_sk(sk)->inet_dport = htons(asoc->peer.port); 1213 sp->pf->to_sk_daddr(sa_addr, sk); 1214 sk->sk_err = 0; 1215 1216 /* in-kernel sockets don't generally have a file allocated to them 1217 * if all they do is call sock_create_kern(). 1218 */ 1219 if (sk->sk_socket->file) 1220 f_flags = sk->sk_socket->file->f_flags; 1221 1222 timeo = sock_sndtimeo(sk, f_flags & O_NONBLOCK); 1223 1224 if (assoc_id) 1225 *assoc_id = asoc->assoc_id; 1226 err = sctp_wait_for_connect(asoc, &timeo); 1227 /* Note: the asoc may be freed after the return of 1228 * sctp_wait_for_connect. 1229 */ 1230 1231 /* Don't free association on exit. */ 1232 asoc = NULL; 1233 1234 out_free: 1235 pr_debug("%s: took out_free path with asoc:%p kaddrs:%p err:%d\n", 1236 __func__, asoc, kaddrs, err); 1237 1238 if (asoc) { 1239 /* sctp_primitive_ASSOCIATE may have added this association 1240 * To the hash table, try to unhash it, just in case, its a noop 1241 * if it wasn't hashed so we're safe 1242 */ 1243 sctp_association_free(asoc); 1244 } 1245 return err; 1246 } 1247 1248 /* Helper for tunneling sctp_connectx() requests through sctp_setsockopt() 1249 * 1250 * API 8.9 1251 * int sctp_connectx(int sd, struct sockaddr *addrs, int addrcnt, 1252 * sctp_assoc_t *asoc); 1253 * 1254 * If sd is an IPv4 socket, the addresses passed must be IPv4 addresses. 1255 * If the sd is an IPv6 socket, the addresses passed can either be IPv4 1256 * or IPv6 addresses. 1257 * 1258 * A single address may be specified as INADDR_ANY or IN6ADDR_ANY, see 1259 * Section 3.1.2 for this usage. 1260 * 1261 * addrs is a pointer to an array of one or more socket addresses. Each 1262 * address is contained in its appropriate structure (i.e. struct 1263 * sockaddr_in or struct sockaddr_in6) the family of the address type 1264 * must be used to distengish the address length (note that this 1265 * representation is termed a "packed array" of addresses). The caller 1266 * specifies the number of addresses in the array with addrcnt. 1267 * 1268 * On success, sctp_connectx() returns 0. It also sets the assoc_id to 1269 * the association id of the new association. On failure, sctp_connectx() 1270 * returns -1, and sets errno to the appropriate error code. The assoc_id 1271 * is not touched by the kernel. 1272 * 1273 * For SCTP, the port given in each socket address must be the same, or 1274 * sctp_connectx() will fail, setting errno to EINVAL. 1275 * 1276 * An application can use sctp_connectx to initiate an association with 1277 * an endpoint that is multi-homed. Much like sctp_bindx() this call 1278 * allows a caller to specify multiple addresses at which a peer can be 1279 * reached. The way the SCTP stack uses the list of addresses to set up 1280 * the association is implementation dependent. This function only 1281 * specifies that the stack will try to make use of all the addresses in 1282 * the list when needed. 1283 * 1284 * Note that the list of addresses passed in is only used for setting up 1285 * the association. It does not necessarily equal the set of addresses 1286 * the peer uses for the resulting association. If the caller wants to 1287 * find out the set of peer addresses, it must use sctp_getpaddrs() to 1288 * retrieve them after the association has been set up. 1289 * 1290 * Basically do nothing but copying the addresses from user to kernel 1291 * land and invoking either sctp_connectx(). This is used for tunneling 1292 * the sctp_connectx() request through sctp_setsockopt() from userspace. 1293 * 1294 * We don't use copy_from_user() for optimization: we first do the 1295 * sanity checks (buffer size -fast- and access check-healthy 1296 * pointer); if all of those succeed, then we can alloc the memory 1297 * (expensive operation) needed to copy the data to kernel. Then we do 1298 * the copying without checking the user space area 1299 * (__copy_from_user()). 1300 * 1301 * On exit there is no need to do sockfd_put(), sys_setsockopt() does 1302 * it. 1303 * 1304 * sk The sk of the socket 1305 * addrs The pointer to the addresses in user land 1306 * addrssize Size of the addrs buffer 1307 * 1308 * Returns >=0 if ok, <0 errno code on error. 1309 */ 1310 static int __sctp_setsockopt_connectx(struct sock *sk, 1311 struct sockaddr __user *addrs, 1312 int addrs_size, 1313 sctp_assoc_t *assoc_id) 1314 { 1315 struct sockaddr *kaddrs; 1316 gfp_t gfp = GFP_KERNEL; 1317 int err = 0; 1318 1319 pr_debug("%s: sk:%p addrs:%p addrs_size:%d\n", 1320 __func__, sk, addrs, addrs_size); 1321 1322 if (unlikely(addrs_size <= 0)) 1323 return -EINVAL; 1324 1325 /* Check the user passed a healthy pointer. */ 1326 if (unlikely(!access_ok(VERIFY_READ, addrs, addrs_size))) 1327 return -EFAULT; 1328 1329 /* Alloc space for the address array in kernel memory. */ 1330 if (sk->sk_socket->file) 1331 gfp = GFP_USER | __GFP_NOWARN; 1332 kaddrs = kmalloc(addrs_size, gfp); 1333 if (unlikely(!kaddrs)) 1334 return -ENOMEM; 1335 1336 if (__copy_from_user(kaddrs, addrs, addrs_size)) { 1337 err = -EFAULT; 1338 } else { 1339 err = __sctp_connect(sk, kaddrs, addrs_size, assoc_id); 1340 } 1341 1342 kfree(kaddrs); 1343 1344 return err; 1345 } 1346 1347 /* 1348 * This is an older interface. It's kept for backward compatibility 1349 * to the option that doesn't provide association id. 1350 */ 1351 static int sctp_setsockopt_connectx_old(struct sock *sk, 1352 struct sockaddr __user *addrs, 1353 int addrs_size) 1354 { 1355 return __sctp_setsockopt_connectx(sk, addrs, addrs_size, NULL); 1356 } 1357 1358 /* 1359 * New interface for the API. The since the API is done with a socket 1360 * option, to make it simple we feed back the association id is as a return 1361 * indication to the call. Error is always negative and association id is 1362 * always positive. 1363 */ 1364 static int sctp_setsockopt_connectx(struct sock *sk, 1365 struct sockaddr __user *addrs, 1366 int addrs_size) 1367 { 1368 sctp_assoc_t assoc_id = 0; 1369 int err = 0; 1370 1371 err = __sctp_setsockopt_connectx(sk, addrs, addrs_size, &assoc_id); 1372 1373 if (err) 1374 return err; 1375 else 1376 return assoc_id; 1377 } 1378 1379 /* 1380 * New (hopefully final) interface for the API. 1381 * We use the sctp_getaddrs_old structure so that use-space library 1382 * can avoid any unnecessary allocations. The only different part 1383 * is that we store the actual length of the address buffer into the 1384 * addrs_num structure member. That way we can re-use the existing 1385 * code. 1386 */ 1387 #ifdef CONFIG_COMPAT 1388 struct compat_sctp_getaddrs_old { 1389 sctp_assoc_t assoc_id; 1390 s32 addr_num; 1391 compat_uptr_t addrs; /* struct sockaddr * */ 1392 }; 1393 #endif 1394 1395 static int sctp_getsockopt_connectx3(struct sock *sk, int len, 1396 char __user *optval, 1397 int __user *optlen) 1398 { 1399 struct sctp_getaddrs_old param; 1400 sctp_assoc_t assoc_id = 0; 1401 int err = 0; 1402 1403 #ifdef CONFIG_COMPAT 1404 if (in_compat_syscall()) { 1405 struct compat_sctp_getaddrs_old param32; 1406 1407 if (len < sizeof(param32)) 1408 return -EINVAL; 1409 if (copy_from_user(¶m32, optval, sizeof(param32))) 1410 return -EFAULT; 1411 1412 param.assoc_id = param32.assoc_id; 1413 param.addr_num = param32.addr_num; 1414 param.addrs = compat_ptr(param32.addrs); 1415 } else 1416 #endif 1417 { 1418 if (len < sizeof(param)) 1419 return -EINVAL; 1420 if (copy_from_user(¶m, optval, sizeof(param))) 1421 return -EFAULT; 1422 } 1423 1424 err = __sctp_setsockopt_connectx(sk, (struct sockaddr __user *) 1425 param.addrs, param.addr_num, 1426 &assoc_id); 1427 if (err == 0 || err == -EINPROGRESS) { 1428 if (copy_to_user(optval, &assoc_id, sizeof(assoc_id))) 1429 return -EFAULT; 1430 if (put_user(sizeof(assoc_id), optlen)) 1431 return -EFAULT; 1432 } 1433 1434 return err; 1435 } 1436 1437 /* API 3.1.4 close() - UDP Style Syntax 1438 * Applications use close() to perform graceful shutdown (as described in 1439 * Section 10.1 of [SCTP]) on ALL the associations currently represented 1440 * by a UDP-style socket. 1441 * 1442 * The syntax is 1443 * 1444 * ret = close(int sd); 1445 * 1446 * sd - the socket descriptor of the associations to be closed. 1447 * 1448 * To gracefully shutdown a specific association represented by the 1449 * UDP-style socket, an application should use the sendmsg() call, 1450 * passing no user data, but including the appropriate flag in the 1451 * ancillary data (see Section xxxx). 1452 * 1453 * If sd in the close() call is a branched-off socket representing only 1454 * one association, the shutdown is performed on that association only. 1455 * 1456 * 4.1.6 close() - TCP Style Syntax 1457 * 1458 * Applications use close() to gracefully close down an association. 1459 * 1460 * The syntax is: 1461 * 1462 * int close(int sd); 1463 * 1464 * sd - the socket descriptor of the association to be closed. 1465 * 1466 * After an application calls close() on a socket descriptor, no further 1467 * socket operations will succeed on that descriptor. 1468 * 1469 * API 7.1.4 SO_LINGER 1470 * 1471 * An application using the TCP-style socket can use this option to 1472 * perform the SCTP ABORT primitive. The linger option structure is: 1473 * 1474 * struct linger { 1475 * int l_onoff; // option on/off 1476 * int l_linger; // linger time 1477 * }; 1478 * 1479 * To enable the option, set l_onoff to 1. If the l_linger value is set 1480 * to 0, calling close() is the same as the ABORT primitive. If the 1481 * value is set to a negative value, the setsockopt() call will return 1482 * an error. If the value is set to a positive value linger_time, the 1483 * close() can be blocked for at most linger_time ms. If the graceful 1484 * shutdown phase does not finish during this period, close() will 1485 * return but the graceful shutdown phase continues in the system. 1486 */ 1487 static void sctp_close(struct sock *sk, long timeout) 1488 { 1489 struct net *net = sock_net(sk); 1490 struct sctp_endpoint *ep; 1491 struct sctp_association *asoc; 1492 struct list_head *pos, *temp; 1493 unsigned int data_was_unread; 1494 1495 pr_debug("%s: sk:%p, timeout:%ld\n", __func__, sk, timeout); 1496 1497 lock_sock(sk); 1498 sk->sk_shutdown = SHUTDOWN_MASK; 1499 sk->sk_state = SCTP_SS_CLOSING; 1500 1501 ep = sctp_sk(sk)->ep; 1502 1503 /* Clean up any skbs sitting on the receive queue. */ 1504 data_was_unread = sctp_queue_purge_ulpevents(&sk->sk_receive_queue); 1505 data_was_unread += sctp_queue_purge_ulpevents(&sctp_sk(sk)->pd_lobby); 1506 1507 /* Walk all associations on an endpoint. */ 1508 list_for_each_safe(pos, temp, &ep->asocs) { 1509 asoc = list_entry(pos, struct sctp_association, asocs); 1510 1511 if (sctp_style(sk, TCP)) { 1512 /* A closed association can still be in the list if 1513 * it belongs to a TCP-style listening socket that is 1514 * not yet accepted. If so, free it. If not, send an 1515 * ABORT or SHUTDOWN based on the linger options. 1516 */ 1517 if (sctp_state(asoc, CLOSED)) { 1518 sctp_association_free(asoc); 1519 continue; 1520 } 1521 } 1522 1523 if (data_was_unread || !skb_queue_empty(&asoc->ulpq.lobby) || 1524 !skb_queue_empty(&asoc->ulpq.reasm) || 1525 (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime)) { 1526 struct sctp_chunk *chunk; 1527 1528 chunk = sctp_make_abort_user(asoc, NULL, 0); 1529 sctp_primitive_ABORT(net, asoc, chunk); 1530 } else 1531 sctp_primitive_SHUTDOWN(net, asoc, NULL); 1532 } 1533 1534 /* On a TCP-style socket, block for at most linger_time if set. */ 1535 if (sctp_style(sk, TCP) && timeout) 1536 sctp_wait_for_close(sk, timeout); 1537 1538 /* This will run the backlog queue. */ 1539 release_sock(sk); 1540 1541 /* Supposedly, no process has access to the socket, but 1542 * the net layers still may. 1543 * Also, sctp_destroy_sock() needs to be called with addr_wq_lock 1544 * held and that should be grabbed before socket lock. 1545 */ 1546 spin_lock_bh(&net->sctp.addr_wq_lock); 1547 bh_lock_sock(sk); 1548 1549 /* Hold the sock, since sk_common_release() will put sock_put() 1550 * and we have just a little more cleanup. 1551 */ 1552 sock_hold(sk); 1553 sk_common_release(sk); 1554 1555 bh_unlock_sock(sk); 1556 spin_unlock_bh(&net->sctp.addr_wq_lock); 1557 1558 sock_put(sk); 1559 1560 SCTP_DBG_OBJCNT_DEC(sock); 1561 } 1562 1563 /* Handle EPIPE error. */ 1564 static int sctp_error(struct sock *sk, int flags, int err) 1565 { 1566 if (err == -EPIPE) 1567 err = sock_error(sk) ? : -EPIPE; 1568 if (err == -EPIPE && !(flags & MSG_NOSIGNAL)) 1569 send_sig(SIGPIPE, current, 0); 1570 return err; 1571 } 1572 1573 /* API 3.1.3 sendmsg() - UDP Style Syntax 1574 * 1575 * An application uses sendmsg() and recvmsg() calls to transmit data to 1576 * and receive data from its peer. 1577 * 1578 * ssize_t sendmsg(int socket, const struct msghdr *message, 1579 * int flags); 1580 * 1581 * socket - the socket descriptor of the endpoint. 1582 * message - pointer to the msghdr structure which contains a single 1583 * user message and possibly some ancillary data. 1584 * 1585 * See Section 5 for complete description of the data 1586 * structures. 1587 * 1588 * flags - flags sent or received with the user message, see Section 1589 * 5 for complete description of the flags. 1590 * 1591 * Note: This function could use a rewrite especially when explicit 1592 * connect support comes in. 1593 */ 1594 /* BUG: We do not implement the equivalent of sk_stream_wait_memory(). */ 1595 1596 static int sctp_msghdr_parse(const struct msghdr *, sctp_cmsgs_t *); 1597 1598 static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len) 1599 { 1600 struct net *net = sock_net(sk); 1601 struct sctp_sock *sp; 1602 struct sctp_endpoint *ep; 1603 struct sctp_association *new_asoc = NULL, *asoc = NULL; 1604 struct sctp_transport *transport, *chunk_tp; 1605 struct sctp_chunk *chunk; 1606 union sctp_addr to; 1607 struct sockaddr *msg_name = NULL; 1608 struct sctp_sndrcvinfo default_sinfo; 1609 struct sctp_sndrcvinfo *sinfo; 1610 struct sctp_initmsg *sinit; 1611 sctp_assoc_t associd = 0; 1612 sctp_cmsgs_t cmsgs = { NULL }; 1613 sctp_scope_t scope; 1614 bool fill_sinfo_ttl = false, wait_connect = false; 1615 struct sctp_datamsg *datamsg; 1616 int msg_flags = msg->msg_flags; 1617 __u16 sinfo_flags = 0; 1618 long timeo; 1619 int err; 1620 1621 err = 0; 1622 sp = sctp_sk(sk); 1623 ep = sp->ep; 1624 1625 pr_debug("%s: sk:%p, msg:%p, msg_len:%zu ep:%p\n", __func__, sk, 1626 msg, msg_len, ep); 1627 1628 /* We cannot send a message over a TCP-style listening socket. */ 1629 if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)) { 1630 err = -EPIPE; 1631 goto out_nounlock; 1632 } 1633 1634 /* Parse out the SCTP CMSGs. */ 1635 err = sctp_msghdr_parse(msg, &cmsgs); 1636 if (err) { 1637 pr_debug("%s: msghdr parse err:%x\n", __func__, err); 1638 goto out_nounlock; 1639 } 1640 1641 /* Fetch the destination address for this packet. This 1642 * address only selects the association--it is not necessarily 1643 * the address we will send to. 1644 * For a peeled-off socket, msg_name is ignored. 1645 */ 1646 if (!sctp_style(sk, UDP_HIGH_BANDWIDTH) && msg->msg_name) { 1647 int msg_namelen = msg->msg_namelen; 1648 1649 err = sctp_verify_addr(sk, (union sctp_addr *)msg->msg_name, 1650 msg_namelen); 1651 if (err) 1652 return err; 1653 1654 if (msg_namelen > sizeof(to)) 1655 msg_namelen = sizeof(to); 1656 memcpy(&to, msg->msg_name, msg_namelen); 1657 msg_name = msg->msg_name; 1658 } 1659 1660 sinit = cmsgs.init; 1661 if (cmsgs.sinfo != NULL) { 1662 memset(&default_sinfo, 0, sizeof(default_sinfo)); 1663 default_sinfo.sinfo_stream = cmsgs.sinfo->snd_sid; 1664 default_sinfo.sinfo_flags = cmsgs.sinfo->snd_flags; 1665 default_sinfo.sinfo_ppid = cmsgs.sinfo->snd_ppid; 1666 default_sinfo.sinfo_context = cmsgs.sinfo->snd_context; 1667 default_sinfo.sinfo_assoc_id = cmsgs.sinfo->snd_assoc_id; 1668 1669 sinfo = &default_sinfo; 1670 fill_sinfo_ttl = true; 1671 } else { 1672 sinfo = cmsgs.srinfo; 1673 } 1674 /* Did the user specify SNDINFO/SNDRCVINFO? */ 1675 if (sinfo) { 1676 sinfo_flags = sinfo->sinfo_flags; 1677 associd = sinfo->sinfo_assoc_id; 1678 } 1679 1680 pr_debug("%s: msg_len:%zu, sinfo_flags:0x%x\n", __func__, 1681 msg_len, sinfo_flags); 1682 1683 /* SCTP_EOF or SCTP_ABORT cannot be set on a TCP-style socket. */ 1684 if (sctp_style(sk, TCP) && (sinfo_flags & (SCTP_EOF | SCTP_ABORT))) { 1685 err = -EINVAL; 1686 goto out_nounlock; 1687 } 1688 1689 /* If SCTP_EOF is set, no data can be sent. Disallow sending zero 1690 * length messages when SCTP_EOF|SCTP_ABORT is not set. 1691 * If SCTP_ABORT is set, the message length could be non zero with 1692 * the msg_iov set to the user abort reason. 1693 */ 1694 if (((sinfo_flags & SCTP_EOF) && (msg_len > 0)) || 1695 (!(sinfo_flags & (SCTP_EOF|SCTP_ABORT)) && (msg_len == 0))) { 1696 err = -EINVAL; 1697 goto out_nounlock; 1698 } 1699 1700 /* If SCTP_ADDR_OVER is set, there must be an address 1701 * specified in msg_name. 1702 */ 1703 if ((sinfo_flags & SCTP_ADDR_OVER) && (!msg->msg_name)) { 1704 err = -EINVAL; 1705 goto out_nounlock; 1706 } 1707 1708 transport = NULL; 1709 1710 pr_debug("%s: about to look up association\n", __func__); 1711 1712 lock_sock(sk); 1713 1714 /* If a msg_name has been specified, assume this is to be used. */ 1715 if (msg_name) { 1716 /* Look for a matching association on the endpoint. */ 1717 asoc = sctp_endpoint_lookup_assoc(ep, &to, &transport); 1718 1719 /* If we could not find a matching association on the 1720 * endpoint, make sure that it is not a TCP-style 1721 * socket that already has an association or there is 1722 * no peeled-off association on another socket. 1723 */ 1724 if (!asoc && 1725 ((sctp_style(sk, TCP) && 1726 (sctp_sstate(sk, ESTABLISHED) || 1727 sctp_sstate(sk, CLOSING))) || 1728 sctp_endpoint_is_peeled_off(ep, &to))) { 1729 err = -EADDRNOTAVAIL; 1730 goto out_unlock; 1731 } 1732 } else { 1733 asoc = sctp_id2assoc(sk, associd); 1734 if (!asoc) { 1735 err = -EPIPE; 1736 goto out_unlock; 1737 } 1738 } 1739 1740 if (asoc) { 1741 pr_debug("%s: just looked up association:%p\n", __func__, asoc); 1742 1743 /* We cannot send a message on a TCP-style SCTP_SS_ESTABLISHED 1744 * socket that has an association in CLOSED state. This can 1745 * happen when an accepted socket has an association that is 1746 * already CLOSED. 1747 */ 1748 if (sctp_state(asoc, CLOSED) && sctp_style(sk, TCP)) { 1749 err = -EPIPE; 1750 goto out_unlock; 1751 } 1752 1753 if (sinfo_flags & SCTP_EOF) { 1754 pr_debug("%s: shutting down association:%p\n", 1755 __func__, asoc); 1756 1757 sctp_primitive_SHUTDOWN(net, asoc, NULL); 1758 err = 0; 1759 goto out_unlock; 1760 } 1761 if (sinfo_flags & SCTP_ABORT) { 1762 1763 chunk = sctp_make_abort_user(asoc, msg, msg_len); 1764 if (!chunk) { 1765 err = -ENOMEM; 1766 goto out_unlock; 1767 } 1768 1769 pr_debug("%s: aborting association:%p\n", 1770 __func__, asoc); 1771 1772 sctp_primitive_ABORT(net, asoc, chunk); 1773 err = 0; 1774 goto out_unlock; 1775 } 1776 } 1777 1778 /* Do we need to create the association? */ 1779 if (!asoc) { 1780 pr_debug("%s: there is no association yet\n", __func__); 1781 1782 if (sinfo_flags & (SCTP_EOF | SCTP_ABORT)) { 1783 err = -EINVAL; 1784 goto out_unlock; 1785 } 1786 1787 /* Check for invalid stream against the stream counts, 1788 * either the default or the user specified stream counts. 1789 */ 1790 if (sinfo) { 1791 if (!sinit || !sinit->sinit_num_ostreams) { 1792 /* Check against the defaults. */ 1793 if (sinfo->sinfo_stream >= 1794 sp->initmsg.sinit_num_ostreams) { 1795 err = -EINVAL; 1796 goto out_unlock; 1797 } 1798 } else { 1799 /* Check against the requested. */ 1800 if (sinfo->sinfo_stream >= 1801 sinit->sinit_num_ostreams) { 1802 err = -EINVAL; 1803 goto out_unlock; 1804 } 1805 } 1806 } 1807 1808 /* 1809 * API 3.1.2 bind() - UDP Style Syntax 1810 * If a bind() or sctp_bindx() is not called prior to a 1811 * sendmsg() call that initiates a new association, the 1812 * system picks an ephemeral port and will choose an address 1813 * set equivalent to binding with a wildcard address. 1814 */ 1815 if (!ep->base.bind_addr.port) { 1816 if (sctp_autobind(sk)) { 1817 err = -EAGAIN; 1818 goto out_unlock; 1819 } 1820 } else { 1821 /* 1822 * If an unprivileged user inherits a one-to-many 1823 * style socket with open associations on a privileged 1824 * port, it MAY be permitted to accept new associations, 1825 * but it SHOULD NOT be permitted to open new 1826 * associations. 1827 */ 1828 if (ep->base.bind_addr.port < inet_prot_sock(net) && 1829 !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) { 1830 err = -EACCES; 1831 goto out_unlock; 1832 } 1833 } 1834 1835 scope = sctp_scope(&to); 1836 new_asoc = sctp_association_new(ep, sk, scope, GFP_KERNEL); 1837 if (!new_asoc) { 1838 err = -ENOMEM; 1839 goto out_unlock; 1840 } 1841 asoc = new_asoc; 1842 err = sctp_assoc_set_bind_addr_from_ep(asoc, scope, GFP_KERNEL); 1843 if (err < 0) { 1844 err = -ENOMEM; 1845 goto out_free; 1846 } 1847 1848 /* If the SCTP_INIT ancillary data is specified, set all 1849 * the association init values accordingly. 1850 */ 1851 if (sinit) { 1852 if (sinit->sinit_num_ostreams) { 1853 asoc->c.sinit_num_ostreams = 1854 sinit->sinit_num_ostreams; 1855 } 1856 if (sinit->sinit_max_instreams) { 1857 asoc->c.sinit_max_instreams = 1858 sinit->sinit_max_instreams; 1859 } 1860 if (sinit->sinit_max_attempts) { 1861 asoc->max_init_attempts 1862 = sinit->sinit_max_attempts; 1863 } 1864 if (sinit->sinit_max_init_timeo) { 1865 asoc->max_init_timeo = 1866 msecs_to_jiffies(sinit->sinit_max_init_timeo); 1867 } 1868 } 1869 1870 /* Prime the peer's transport structures. */ 1871 transport = sctp_assoc_add_peer(asoc, &to, GFP_KERNEL, SCTP_UNKNOWN); 1872 if (!transport) { 1873 err = -ENOMEM; 1874 goto out_free; 1875 } 1876 } 1877 1878 /* ASSERT: we have a valid association at this point. */ 1879 pr_debug("%s: we have a valid association\n", __func__); 1880 1881 if (!sinfo) { 1882 /* If the user didn't specify SNDINFO/SNDRCVINFO, make up 1883 * one with some defaults. 1884 */ 1885 memset(&default_sinfo, 0, sizeof(default_sinfo)); 1886 default_sinfo.sinfo_stream = asoc->default_stream; 1887 default_sinfo.sinfo_flags = asoc->default_flags; 1888 default_sinfo.sinfo_ppid = asoc->default_ppid; 1889 default_sinfo.sinfo_context = asoc->default_context; 1890 default_sinfo.sinfo_timetolive = asoc->default_timetolive; 1891 default_sinfo.sinfo_assoc_id = sctp_assoc2id(asoc); 1892 1893 sinfo = &default_sinfo; 1894 } else if (fill_sinfo_ttl) { 1895 /* In case SNDINFO was specified, we still need to fill 1896 * it with a default ttl from the assoc here. 1897 */ 1898 sinfo->sinfo_timetolive = asoc->default_timetolive; 1899 } 1900 1901 /* API 7.1.7, the sndbuf size per association bounds the 1902 * maximum size of data that can be sent in a single send call. 1903 */ 1904 if (msg_len > sk->sk_sndbuf) { 1905 err = -EMSGSIZE; 1906 goto out_free; 1907 } 1908 1909 if (asoc->pmtu_pending) 1910 sctp_assoc_pending_pmtu(asoc); 1911 1912 /* If fragmentation is disabled and the message length exceeds the 1913 * association fragmentation point, return EMSGSIZE. The I-D 1914 * does not specify what this error is, but this looks like 1915 * a great fit. 1916 */ 1917 if (sctp_sk(sk)->disable_fragments && (msg_len > asoc->frag_point)) { 1918 err = -EMSGSIZE; 1919 goto out_free; 1920 } 1921 1922 /* Check for invalid stream. */ 1923 if (sinfo->sinfo_stream >= asoc->stream->outcnt) { 1924 err = -EINVAL; 1925 goto out_free; 1926 } 1927 1928 if (sctp_wspace(asoc) < msg_len) 1929 sctp_prsctp_prune(asoc, sinfo, msg_len - sctp_wspace(asoc)); 1930 1931 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); 1932 if (!sctp_wspace(asoc)) { 1933 err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len); 1934 if (err) 1935 goto out_free; 1936 } 1937 1938 /* If an address is passed with the sendto/sendmsg call, it is used 1939 * to override the primary destination address in the TCP model, or 1940 * when SCTP_ADDR_OVER flag is set in the UDP model. 1941 */ 1942 if ((sctp_style(sk, TCP) && msg_name) || 1943 (sinfo_flags & SCTP_ADDR_OVER)) { 1944 chunk_tp = sctp_assoc_lookup_paddr(asoc, &to); 1945 if (!chunk_tp) { 1946 err = -EINVAL; 1947 goto out_free; 1948 } 1949 } else 1950 chunk_tp = NULL; 1951 1952 /* Auto-connect, if we aren't connected already. */ 1953 if (sctp_state(asoc, CLOSED)) { 1954 err = sctp_primitive_ASSOCIATE(net, asoc, NULL); 1955 if (err < 0) 1956 goto out_free; 1957 1958 wait_connect = true; 1959 pr_debug("%s: we associated primitively\n", __func__); 1960 } 1961 1962 /* Break the message into multiple chunks of maximum size. */ 1963 datamsg = sctp_datamsg_from_user(asoc, sinfo, &msg->msg_iter); 1964 if (IS_ERR(datamsg)) { 1965 err = PTR_ERR(datamsg); 1966 goto out_free; 1967 } 1968 asoc->force_delay = !!(msg->msg_flags & MSG_MORE); 1969 1970 /* Now send the (possibly) fragmented message. */ 1971 list_for_each_entry(chunk, &datamsg->chunks, frag_list) { 1972 sctp_chunk_hold(chunk); 1973 1974 /* Do accounting for the write space. */ 1975 sctp_set_owner_w(chunk); 1976 1977 chunk->transport = chunk_tp; 1978 } 1979 1980 /* Send it to the lower layers. Note: all chunks 1981 * must either fail or succeed. The lower layer 1982 * works that way today. Keep it that way or this 1983 * breaks. 1984 */ 1985 err = sctp_primitive_SEND(net, asoc, datamsg); 1986 /* Did the lower layer accept the chunk? */ 1987 if (err) { 1988 sctp_datamsg_free(datamsg); 1989 goto out_free; 1990 } 1991 1992 pr_debug("%s: we sent primitively\n", __func__); 1993 1994 sctp_datamsg_put(datamsg); 1995 err = msg_len; 1996 1997 if (unlikely(wait_connect)) { 1998 timeo = sock_sndtimeo(sk, msg_flags & MSG_DONTWAIT); 1999 sctp_wait_for_connect(asoc, &timeo); 2000 } 2001 2002 /* If we are already past ASSOCIATE, the lower 2003 * layers are responsible for association cleanup. 2004 */ 2005 goto out_unlock; 2006 2007 out_free: 2008 if (new_asoc) 2009 sctp_association_free(asoc); 2010 out_unlock: 2011 release_sock(sk); 2012 2013 out_nounlock: 2014 return sctp_error(sk, msg_flags, err); 2015 2016 #if 0 2017 do_sock_err: 2018 if (msg_len) 2019 err = msg_len; 2020 else 2021 err = sock_error(sk); 2022 goto out; 2023 2024 do_interrupted: 2025 if (msg_len) 2026 err = msg_len; 2027 goto out; 2028 #endif /* 0 */ 2029 } 2030 2031 /* This is an extended version of skb_pull() that removes the data from the 2032 * start of a skb even when data is spread across the list of skb's in the 2033 * frag_list. len specifies the total amount of data that needs to be removed. 2034 * when 'len' bytes could be removed from the skb, it returns 0. 2035 * If 'len' exceeds the total skb length, it returns the no. of bytes that 2036 * could not be removed. 2037 */ 2038 static int sctp_skb_pull(struct sk_buff *skb, int len) 2039 { 2040 struct sk_buff *list; 2041 int skb_len = skb_headlen(skb); 2042 int rlen; 2043 2044 if (len <= skb_len) { 2045 __skb_pull(skb, len); 2046 return 0; 2047 } 2048 len -= skb_len; 2049 __skb_pull(skb, skb_len); 2050 2051 skb_walk_frags(skb, list) { 2052 rlen = sctp_skb_pull(list, len); 2053 skb->len -= (len-rlen); 2054 skb->data_len -= (len-rlen); 2055 2056 if (!rlen) 2057 return 0; 2058 2059 len = rlen; 2060 } 2061 2062 return len; 2063 } 2064 2065 /* API 3.1.3 recvmsg() - UDP Style Syntax 2066 * 2067 * ssize_t recvmsg(int socket, struct msghdr *message, 2068 * int flags); 2069 * 2070 * socket - the socket descriptor of the endpoint. 2071 * message - pointer to the msghdr structure which contains a single 2072 * user message and possibly some ancillary data. 2073 * 2074 * See Section 5 for complete description of the data 2075 * structures. 2076 * 2077 * flags - flags sent or received with the user message, see Section 2078 * 5 for complete description of the flags. 2079 */ 2080 static int sctp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, 2081 int noblock, int flags, int *addr_len) 2082 { 2083 struct sctp_ulpevent *event = NULL; 2084 struct sctp_sock *sp = sctp_sk(sk); 2085 struct sk_buff *skb, *head_skb; 2086 int copied; 2087 int err = 0; 2088 int skb_len; 2089 2090 pr_debug("%s: sk:%p, msghdr:%p, len:%zd, noblock:%d, flags:0x%x, " 2091 "addr_len:%p)\n", __func__, sk, msg, len, noblock, flags, 2092 addr_len); 2093 2094 lock_sock(sk); 2095 2096 if (sctp_style(sk, TCP) && !sctp_sstate(sk, ESTABLISHED) && 2097 !sctp_sstate(sk, CLOSING) && !sctp_sstate(sk, CLOSED)) { 2098 err = -ENOTCONN; 2099 goto out; 2100 } 2101 2102 skb = sctp_skb_recv_datagram(sk, flags, noblock, &err); 2103 if (!skb) 2104 goto out; 2105 2106 /* Get the total length of the skb including any skb's in the 2107 * frag_list. 2108 */ 2109 skb_len = skb->len; 2110 2111 copied = skb_len; 2112 if (copied > len) 2113 copied = len; 2114 2115 err = skb_copy_datagram_msg(skb, 0, msg, copied); 2116 2117 event = sctp_skb2event(skb); 2118 2119 if (err) 2120 goto out_free; 2121 2122 if (event->chunk && event->chunk->head_skb) 2123 head_skb = event->chunk->head_skb; 2124 else 2125 head_skb = skb; 2126 sock_recv_ts_and_drops(msg, sk, head_skb); 2127 if (sctp_ulpevent_is_notification(event)) { 2128 msg->msg_flags |= MSG_NOTIFICATION; 2129 sp->pf->event_msgname(event, msg->msg_name, addr_len); 2130 } else { 2131 sp->pf->skb_msgname(head_skb, msg->msg_name, addr_len); 2132 } 2133 2134 /* Check if we allow SCTP_NXTINFO. */ 2135 if (sp->recvnxtinfo) 2136 sctp_ulpevent_read_nxtinfo(event, msg, sk); 2137 /* Check if we allow SCTP_RCVINFO. */ 2138 if (sp->recvrcvinfo) 2139 sctp_ulpevent_read_rcvinfo(event, msg); 2140 /* Check if we allow SCTP_SNDRCVINFO. */ 2141 if (sp->subscribe.sctp_data_io_event) 2142 sctp_ulpevent_read_sndrcvinfo(event, msg); 2143 2144 err = copied; 2145 2146 /* If skb's length exceeds the user's buffer, update the skb and 2147 * push it back to the receive_queue so that the next call to 2148 * recvmsg() will return the remaining data. Don't set MSG_EOR. 2149 */ 2150 if (skb_len > copied) { 2151 msg->msg_flags &= ~MSG_EOR; 2152 if (flags & MSG_PEEK) 2153 goto out_free; 2154 sctp_skb_pull(skb, copied); 2155 skb_queue_head(&sk->sk_receive_queue, skb); 2156 2157 /* When only partial message is copied to the user, increase 2158 * rwnd by that amount. If all the data in the skb is read, 2159 * rwnd is updated when the event is freed. 2160 */ 2161 if (!sctp_ulpevent_is_notification(event)) 2162 sctp_assoc_rwnd_increase(event->asoc, copied); 2163 goto out; 2164 } else if ((event->msg_flags & MSG_NOTIFICATION) || 2165 (event->msg_flags & MSG_EOR)) 2166 msg->msg_flags |= MSG_EOR; 2167 else 2168 msg->msg_flags &= ~MSG_EOR; 2169 2170 out_free: 2171 if (flags & MSG_PEEK) { 2172 /* Release the skb reference acquired after peeking the skb in 2173 * sctp_skb_recv_datagram(). 2174 */ 2175 kfree_skb(skb); 2176 } else { 2177 /* Free the event which includes releasing the reference to 2178 * the owner of the skb, freeing the skb and updating the 2179 * rwnd. 2180 */ 2181 sctp_ulpevent_free(event); 2182 } 2183 out: 2184 release_sock(sk); 2185 return err; 2186 } 2187 2188 /* 7.1.12 Enable/Disable message fragmentation (SCTP_DISABLE_FRAGMENTS) 2189 * 2190 * This option is a on/off flag. If enabled no SCTP message 2191 * fragmentation will be performed. Instead if a message being sent 2192 * exceeds the current PMTU size, the message will NOT be sent and 2193 * instead a error will be indicated to the user. 2194 */ 2195 static int sctp_setsockopt_disable_fragments(struct sock *sk, 2196 char __user *optval, 2197 unsigned int optlen) 2198 { 2199 int val; 2200 2201 if (optlen < sizeof(int)) 2202 return -EINVAL; 2203 2204 if (get_user(val, (int __user *)optval)) 2205 return -EFAULT; 2206 2207 sctp_sk(sk)->disable_fragments = (val == 0) ? 0 : 1; 2208 2209 return 0; 2210 } 2211 2212 static int sctp_setsockopt_events(struct sock *sk, char __user *optval, 2213 unsigned int optlen) 2214 { 2215 struct sctp_association *asoc; 2216 struct sctp_ulpevent *event; 2217 2218 if (optlen > sizeof(struct sctp_event_subscribe)) 2219 return -EINVAL; 2220 if (copy_from_user(&sctp_sk(sk)->subscribe, optval, optlen)) 2221 return -EFAULT; 2222 2223 /* At the time when a user app subscribes to SCTP_SENDER_DRY_EVENT, 2224 * if there is no data to be sent or retransmit, the stack will 2225 * immediately send up this notification. 2226 */ 2227 if (sctp_ulpevent_type_enabled(SCTP_SENDER_DRY_EVENT, 2228 &sctp_sk(sk)->subscribe)) { 2229 asoc = sctp_id2assoc(sk, 0); 2230 2231 if (asoc && sctp_outq_is_empty(&asoc->outqueue)) { 2232 event = sctp_ulpevent_make_sender_dry_event(asoc, 2233 GFP_ATOMIC); 2234 if (!event) 2235 return -ENOMEM; 2236 2237 sctp_ulpq_tail_event(&asoc->ulpq, event); 2238 } 2239 } 2240 2241 return 0; 2242 } 2243 2244 /* 7.1.8 Automatic Close of associations (SCTP_AUTOCLOSE) 2245 * 2246 * This socket option is applicable to the UDP-style socket only. When 2247 * set it will cause associations that are idle for more than the 2248 * specified number of seconds to automatically close. An association 2249 * being idle is defined an association that has NOT sent or received 2250 * user data. The special value of '0' indicates that no automatic 2251 * close of any associations should be performed. The option expects an 2252 * integer defining the number of seconds of idle time before an 2253 * association is closed. 2254 */ 2255 static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval, 2256 unsigned int optlen) 2257 { 2258 struct sctp_sock *sp = sctp_sk(sk); 2259 struct net *net = sock_net(sk); 2260 2261 /* Applicable to UDP-style socket only */ 2262 if (sctp_style(sk, TCP)) 2263 return -EOPNOTSUPP; 2264 if (optlen != sizeof(int)) 2265 return -EINVAL; 2266 if (copy_from_user(&sp->autoclose, optval, optlen)) 2267 return -EFAULT; 2268 2269 if (sp->autoclose > net->sctp.max_autoclose) 2270 sp->autoclose = net->sctp.max_autoclose; 2271 2272 return 0; 2273 } 2274 2275 /* 7.1.13 Peer Address Parameters (SCTP_PEER_ADDR_PARAMS) 2276 * 2277 * Applications can enable or disable heartbeats for any peer address of 2278 * an association, modify an address's heartbeat interval, force a 2279 * heartbeat to be sent immediately, and adjust the address's maximum 2280 * number of retransmissions sent before an address is considered 2281 * unreachable. The following structure is used to access and modify an 2282 * address's parameters: 2283 * 2284 * struct sctp_paddrparams { 2285 * sctp_assoc_t spp_assoc_id; 2286 * struct sockaddr_storage spp_address; 2287 * uint32_t spp_hbinterval; 2288 * uint16_t spp_pathmaxrxt; 2289 * uint32_t spp_pathmtu; 2290 * uint32_t spp_sackdelay; 2291 * uint32_t spp_flags; 2292 * }; 2293 * 2294 * spp_assoc_id - (one-to-many style socket) This is filled in the 2295 * application, and identifies the association for 2296 * this query. 2297 * spp_address - This specifies which address is of interest. 2298 * spp_hbinterval - This contains the value of the heartbeat interval, 2299 * in milliseconds. If a value of zero 2300 * is present in this field then no changes are to 2301 * be made to this parameter. 2302 * spp_pathmaxrxt - This contains the maximum number of 2303 * retransmissions before this address shall be 2304 * considered unreachable. If a value of zero 2305 * is present in this field then no changes are to 2306 * be made to this parameter. 2307 * spp_pathmtu - When Path MTU discovery is disabled the value 2308 * specified here will be the "fixed" path mtu. 2309 * Note that if the spp_address field is empty 2310 * then all associations on this address will 2311 * have this fixed path mtu set upon them. 2312 * 2313 * spp_sackdelay - When delayed sack is enabled, this value specifies 2314 * the number of milliseconds that sacks will be delayed 2315 * for. This value will apply to all addresses of an 2316 * association if the spp_address field is empty. Note 2317 * also, that if delayed sack is enabled and this 2318 * value is set to 0, no change is made to the last 2319 * recorded delayed sack timer value. 2320 * 2321 * spp_flags - These flags are used to control various features 2322 * on an association. The flag field may contain 2323 * zero or more of the following options. 2324 * 2325 * SPP_HB_ENABLE - Enable heartbeats on the 2326 * specified address. Note that if the address 2327 * field is empty all addresses for the association 2328 * have heartbeats enabled upon them. 2329 * 2330 * SPP_HB_DISABLE - Disable heartbeats on the 2331 * speicifed address. Note that if the address 2332 * field is empty all addresses for the association 2333 * will have their heartbeats disabled. Note also 2334 * that SPP_HB_ENABLE and SPP_HB_DISABLE are 2335 * mutually exclusive, only one of these two should 2336 * be specified. Enabling both fields will have 2337 * undetermined results. 2338 * 2339 * SPP_HB_DEMAND - Request a user initiated heartbeat 2340 * to be made immediately. 2341 * 2342 * SPP_HB_TIME_IS_ZERO - Specify's that the time for 2343 * heartbeat delayis to be set to the value of 0 2344 * milliseconds. 2345 * 2346 * SPP_PMTUD_ENABLE - This field will enable PMTU 2347 * discovery upon the specified address. Note that 2348 * if the address feild is empty then all addresses 2349 * on the association are effected. 2350 * 2351 * SPP_PMTUD_DISABLE - This field will disable PMTU 2352 * discovery upon the specified address. Note that 2353 * if the address feild is empty then all addresses 2354 * on the association are effected. Not also that 2355 * SPP_PMTUD_ENABLE and SPP_PMTUD_DISABLE are mutually 2356 * exclusive. Enabling both will have undetermined 2357 * results. 2358 * 2359 * SPP_SACKDELAY_ENABLE - Setting this flag turns 2360 * on delayed sack. The time specified in spp_sackdelay 2361 * is used to specify the sack delay for this address. Note 2362 * that if spp_address is empty then all addresses will 2363 * enable delayed sack and take on the sack delay 2364 * value specified in spp_sackdelay. 2365 * SPP_SACKDELAY_DISABLE - Setting this flag turns 2366 * off delayed sack. If the spp_address field is blank then 2367 * delayed sack is disabled for the entire association. Note 2368 * also that this field is mutually exclusive to 2369 * SPP_SACKDELAY_ENABLE, setting both will have undefined 2370 * results. 2371 */ 2372 static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params, 2373 struct sctp_transport *trans, 2374 struct sctp_association *asoc, 2375 struct sctp_sock *sp, 2376 int hb_change, 2377 int pmtud_change, 2378 int sackdelay_change) 2379 { 2380 int error; 2381 2382 if (params->spp_flags & SPP_HB_DEMAND && trans) { 2383 struct net *net = sock_net(trans->asoc->base.sk); 2384 2385 error = sctp_primitive_REQUESTHEARTBEAT(net, trans->asoc, trans); 2386 if (error) 2387 return error; 2388 } 2389 2390 /* Note that unless the spp_flag is set to SPP_HB_ENABLE the value of 2391 * this field is ignored. Note also that a value of zero indicates 2392 * the current setting should be left unchanged. 2393 */ 2394 if (params->spp_flags & SPP_HB_ENABLE) { 2395 2396 /* Re-zero the interval if the SPP_HB_TIME_IS_ZERO is 2397 * set. This lets us use 0 value when this flag 2398 * is set. 2399 */ 2400 if (params->spp_flags & SPP_HB_TIME_IS_ZERO) 2401 params->spp_hbinterval = 0; 2402 2403 if (params->spp_hbinterval || 2404 (params->spp_flags & SPP_HB_TIME_IS_ZERO)) { 2405 if (trans) { 2406 trans->hbinterval = 2407 msecs_to_jiffies(params->spp_hbinterval); 2408 } else if (asoc) { 2409 asoc->hbinterval = 2410 msecs_to_jiffies(params->spp_hbinterval); 2411 } else { 2412 sp->hbinterval = params->spp_hbinterval; 2413 } 2414 } 2415 } 2416 2417 if (hb_change) { 2418 if (trans) { 2419 trans->param_flags = 2420 (trans->param_flags & ~SPP_HB) | hb_change; 2421 } else if (asoc) { 2422 asoc->param_flags = 2423 (asoc->param_flags & ~SPP_HB) | hb_change; 2424 } else { 2425 sp->param_flags = 2426 (sp->param_flags & ~SPP_HB) | hb_change; 2427 } 2428 } 2429 2430 /* When Path MTU discovery is disabled the value specified here will 2431 * be the "fixed" path mtu (i.e. the value of the spp_flags field must 2432 * include the flag SPP_PMTUD_DISABLE for this field to have any 2433 * effect). 2434 */ 2435 if ((params->spp_flags & SPP_PMTUD_DISABLE) && params->spp_pathmtu) { 2436 if (trans) { 2437 trans->pathmtu = params->spp_pathmtu; 2438 sctp_assoc_sync_pmtu(asoc); 2439 } else if (asoc) { 2440 asoc->pathmtu = params->spp_pathmtu; 2441 } else { 2442 sp->pathmtu = params->spp_pathmtu; 2443 } 2444 } 2445 2446 if (pmtud_change) { 2447 if (trans) { 2448 int update = (trans->param_flags & SPP_PMTUD_DISABLE) && 2449 (params->spp_flags & SPP_PMTUD_ENABLE); 2450 trans->param_flags = 2451 (trans->param_flags & ~SPP_PMTUD) | pmtud_change; 2452 if (update) { 2453 sctp_transport_pmtu(trans, sctp_opt2sk(sp)); 2454 sctp_assoc_sync_pmtu(asoc); 2455 } 2456 } else if (asoc) { 2457 asoc->param_flags = 2458 (asoc->param_flags & ~SPP_PMTUD) | pmtud_change; 2459 } else { 2460 sp->param_flags = 2461 (sp->param_flags & ~SPP_PMTUD) | pmtud_change; 2462 } 2463 } 2464 2465 /* Note that unless the spp_flag is set to SPP_SACKDELAY_ENABLE the 2466 * value of this field is ignored. Note also that a value of zero 2467 * indicates the current setting should be left unchanged. 2468 */ 2469 if ((params->spp_flags & SPP_SACKDELAY_ENABLE) && params->spp_sackdelay) { 2470 if (trans) { 2471 trans->sackdelay = 2472 msecs_to_jiffies(params->spp_sackdelay); 2473 } else if (asoc) { 2474 asoc->sackdelay = 2475 msecs_to_jiffies(params->spp_sackdelay); 2476 } else { 2477 sp->sackdelay = params->spp_sackdelay; 2478 } 2479 } 2480 2481 if (sackdelay_change) { 2482 if (trans) { 2483 trans->param_flags = 2484 (trans->param_flags & ~SPP_SACKDELAY) | 2485 sackdelay_change; 2486 } else if (asoc) { 2487 asoc->param_flags = 2488 (asoc->param_flags & ~SPP_SACKDELAY) | 2489 sackdelay_change; 2490 } else { 2491 sp->param_flags = 2492 (sp->param_flags & ~SPP_SACKDELAY) | 2493 sackdelay_change; 2494 } 2495 } 2496 2497 /* Note that a value of zero indicates the current setting should be 2498 left unchanged. 2499 */ 2500 if (params->spp_pathmaxrxt) { 2501 if (trans) { 2502 trans->pathmaxrxt = params->spp_pathmaxrxt; 2503 } else if (asoc) { 2504 asoc->pathmaxrxt = params->spp_pathmaxrxt; 2505 } else { 2506 sp->pathmaxrxt = params->spp_pathmaxrxt; 2507 } 2508 } 2509 2510 return 0; 2511 } 2512 2513 static int sctp_setsockopt_peer_addr_params(struct sock *sk, 2514 char __user *optval, 2515 unsigned int optlen) 2516 { 2517 struct sctp_paddrparams params; 2518 struct sctp_transport *trans = NULL; 2519 struct sctp_association *asoc = NULL; 2520 struct sctp_sock *sp = sctp_sk(sk); 2521 int error; 2522 int hb_change, pmtud_change, sackdelay_change; 2523 2524 if (optlen != sizeof(struct sctp_paddrparams)) 2525 return -EINVAL; 2526 2527 if (copy_from_user(¶ms, optval, optlen)) 2528 return -EFAULT; 2529 2530 /* Validate flags and value parameters. */ 2531 hb_change = params.spp_flags & SPP_HB; 2532 pmtud_change = params.spp_flags & SPP_PMTUD; 2533 sackdelay_change = params.spp_flags & SPP_SACKDELAY; 2534 2535 if (hb_change == SPP_HB || 2536 pmtud_change == SPP_PMTUD || 2537 sackdelay_change == SPP_SACKDELAY || 2538 params.spp_sackdelay > 500 || 2539 (params.spp_pathmtu && 2540 params.spp_pathmtu < SCTP_DEFAULT_MINSEGMENT)) 2541 return -EINVAL; 2542 2543 /* If an address other than INADDR_ANY is specified, and 2544 * no transport is found, then the request is invalid. 2545 */ 2546 if (!sctp_is_any(sk, (union sctp_addr *)¶ms.spp_address)) { 2547 trans = sctp_addr_id2transport(sk, ¶ms.spp_address, 2548 params.spp_assoc_id); 2549 if (!trans) 2550 return -EINVAL; 2551 } 2552 2553 /* Get association, if assoc_id != 0 and the socket is a one 2554 * to many style socket, and an association was not found, then 2555 * the id was invalid. 2556 */ 2557 asoc = sctp_id2assoc(sk, params.spp_assoc_id); 2558 if (!asoc && params.spp_assoc_id && sctp_style(sk, UDP)) 2559 return -EINVAL; 2560 2561 /* Heartbeat demand can only be sent on a transport or 2562 * association, but not a socket. 2563 */ 2564 if (params.spp_flags & SPP_HB_DEMAND && !trans && !asoc) 2565 return -EINVAL; 2566 2567 /* Process parameters. */ 2568 error = sctp_apply_peer_addr_params(¶ms, trans, asoc, sp, 2569 hb_change, pmtud_change, 2570 sackdelay_change); 2571 2572 if (error) 2573 return error; 2574 2575 /* If changes are for association, also apply parameters to each 2576 * transport. 2577 */ 2578 if (!trans && asoc) { 2579 list_for_each_entry(trans, &asoc->peer.transport_addr_list, 2580 transports) { 2581 sctp_apply_peer_addr_params(¶ms, trans, asoc, sp, 2582 hb_change, pmtud_change, 2583 sackdelay_change); 2584 } 2585 } 2586 2587 return 0; 2588 } 2589 2590 static inline __u32 sctp_spp_sackdelay_enable(__u32 param_flags) 2591 { 2592 return (param_flags & ~SPP_SACKDELAY) | SPP_SACKDELAY_ENABLE; 2593 } 2594 2595 static inline __u32 sctp_spp_sackdelay_disable(__u32 param_flags) 2596 { 2597 return (param_flags & ~SPP_SACKDELAY) | SPP_SACKDELAY_DISABLE; 2598 } 2599 2600 /* 2601 * 7.1.23. Get or set delayed ack timer (SCTP_DELAYED_SACK) 2602 * 2603 * This option will effect the way delayed acks are performed. This 2604 * option allows you to get or set the delayed ack time, in 2605 * milliseconds. It also allows changing the delayed ack frequency. 2606 * Changing the frequency to 1 disables the delayed sack algorithm. If 2607 * the assoc_id is 0, then this sets or gets the endpoints default 2608 * values. If the assoc_id field is non-zero, then the set or get 2609 * effects the specified association for the one to many model (the 2610 * assoc_id field is ignored by the one to one model). Note that if 2611 * sack_delay or sack_freq are 0 when setting this option, then the 2612 * current values will remain unchanged. 2613 * 2614 * struct sctp_sack_info { 2615 * sctp_assoc_t sack_assoc_id; 2616 * uint32_t sack_delay; 2617 * uint32_t sack_freq; 2618 * }; 2619 * 2620 * sack_assoc_id - This parameter, indicates which association the user 2621 * is performing an action upon. Note that if this field's value is 2622 * zero then the endpoints default value is changed (effecting future 2623 * associations only). 2624 * 2625 * sack_delay - This parameter contains the number of milliseconds that 2626 * the user is requesting the delayed ACK timer be set to. Note that 2627 * this value is defined in the standard to be between 200 and 500 2628 * milliseconds. 2629 * 2630 * sack_freq - This parameter contains the number of packets that must 2631 * be received before a sack is sent without waiting for the delay 2632 * timer to expire. The default value for this is 2, setting this 2633 * value to 1 will disable the delayed sack algorithm. 2634 */ 2635 2636 static int sctp_setsockopt_delayed_ack(struct sock *sk, 2637 char __user *optval, unsigned int optlen) 2638 { 2639 struct sctp_sack_info params; 2640 struct sctp_transport *trans = NULL; 2641 struct sctp_association *asoc = NULL; 2642 struct sctp_sock *sp = sctp_sk(sk); 2643 2644 if (optlen == sizeof(struct sctp_sack_info)) { 2645 if (copy_from_user(¶ms, optval, optlen)) 2646 return -EFAULT; 2647 2648 if (params.sack_delay == 0 && params.sack_freq == 0) 2649 return 0; 2650 } else if (optlen == sizeof(struct sctp_assoc_value)) { 2651 pr_warn_ratelimited(DEPRECATED 2652 "%s (pid %d) " 2653 "Use of struct sctp_assoc_value in delayed_ack socket option.\n" 2654 "Use struct sctp_sack_info instead\n", 2655 current->comm, task_pid_nr(current)); 2656 if (copy_from_user(¶ms, optval, optlen)) 2657 return -EFAULT; 2658 2659 if (params.sack_delay == 0) 2660 params.sack_freq = 1; 2661 else 2662 params.sack_freq = 0; 2663 } else 2664 return -EINVAL; 2665 2666 /* Validate value parameter. */ 2667 if (params.sack_delay > 500) 2668 return -EINVAL; 2669 2670 /* Get association, if sack_assoc_id != 0 and the socket is a one 2671 * to many style socket, and an association was not found, then 2672 * the id was invalid. 2673 */ 2674 asoc = sctp_id2assoc(sk, params.sack_assoc_id); 2675 if (!asoc && params.sack_assoc_id && sctp_style(sk, UDP)) 2676 return -EINVAL; 2677 2678 if (params.sack_delay) { 2679 if (asoc) { 2680 asoc->sackdelay = 2681 msecs_to_jiffies(params.sack_delay); 2682 asoc->param_flags = 2683 sctp_spp_sackdelay_enable(asoc->param_flags); 2684 } else { 2685 sp->sackdelay = params.sack_delay; 2686 sp->param_flags = 2687 sctp_spp_sackdelay_enable(sp->param_flags); 2688 } 2689 } 2690 2691 if (params.sack_freq == 1) { 2692 if (asoc) { 2693 asoc->param_flags = 2694 sctp_spp_sackdelay_disable(asoc->param_flags); 2695 } else { 2696 sp->param_flags = 2697 sctp_spp_sackdelay_disable(sp->param_flags); 2698 } 2699 } else if (params.sack_freq > 1) { 2700 if (asoc) { 2701 asoc->sackfreq = params.sack_freq; 2702 asoc->param_flags = 2703 sctp_spp_sackdelay_enable(asoc->param_flags); 2704 } else { 2705 sp->sackfreq = params.sack_freq; 2706 sp->param_flags = 2707 sctp_spp_sackdelay_enable(sp->param_flags); 2708 } 2709 } 2710 2711 /* If change is for association, also apply to each transport. */ 2712 if (asoc) { 2713 list_for_each_entry(trans, &asoc->peer.transport_addr_list, 2714 transports) { 2715 if (params.sack_delay) { 2716 trans->sackdelay = 2717 msecs_to_jiffies(params.sack_delay); 2718 trans->param_flags = 2719 sctp_spp_sackdelay_enable(trans->param_flags); 2720 } 2721 if (params.sack_freq == 1) { 2722 trans->param_flags = 2723 sctp_spp_sackdelay_disable(trans->param_flags); 2724 } else if (params.sack_freq > 1) { 2725 trans->sackfreq = params.sack_freq; 2726 trans->param_flags = 2727 sctp_spp_sackdelay_enable(trans->param_flags); 2728 } 2729 } 2730 } 2731 2732 return 0; 2733 } 2734 2735 /* 7.1.3 Initialization Parameters (SCTP_INITMSG) 2736 * 2737 * Applications can specify protocol parameters for the default association 2738 * initialization. The option name argument to setsockopt() and getsockopt() 2739 * is SCTP_INITMSG. 2740 * 2741 * Setting initialization parameters is effective only on an unconnected 2742 * socket (for UDP-style sockets only future associations are effected 2743 * by the change). With TCP-style sockets, this option is inherited by 2744 * sockets derived from a listener socket. 2745 */ 2746 static int sctp_setsockopt_initmsg(struct sock *sk, char __user *optval, unsigned int optlen) 2747 { 2748 struct sctp_initmsg sinit; 2749 struct sctp_sock *sp = sctp_sk(sk); 2750 2751 if (optlen != sizeof(struct sctp_initmsg)) 2752 return -EINVAL; 2753 if (copy_from_user(&sinit, optval, optlen)) 2754 return -EFAULT; 2755 2756 if (sinit.sinit_num_ostreams) 2757 sp->initmsg.sinit_num_ostreams = sinit.sinit_num_ostreams; 2758 if (sinit.sinit_max_instreams) 2759 sp->initmsg.sinit_max_instreams = sinit.sinit_max_instreams; 2760 if (sinit.sinit_max_attempts) 2761 sp->initmsg.sinit_max_attempts = sinit.sinit_max_attempts; 2762 if (sinit.sinit_max_init_timeo) 2763 sp->initmsg.sinit_max_init_timeo = sinit.sinit_max_init_timeo; 2764 2765 return 0; 2766 } 2767 2768 /* 2769 * 7.1.14 Set default send parameters (SCTP_DEFAULT_SEND_PARAM) 2770 * 2771 * Applications that wish to use the sendto() system call may wish to 2772 * specify a default set of parameters that would normally be supplied 2773 * through the inclusion of ancillary data. This socket option allows 2774 * such an application to set the default sctp_sndrcvinfo structure. 2775 * The application that wishes to use this socket option simply passes 2776 * in to this call the sctp_sndrcvinfo structure defined in Section 2777 * 5.2.2) The input parameters accepted by this call include 2778 * sinfo_stream, sinfo_flags, sinfo_ppid, sinfo_context, 2779 * sinfo_timetolive. The user must provide the sinfo_assoc_id field in 2780 * to this call if the caller is using the UDP model. 2781 */ 2782 static int sctp_setsockopt_default_send_param(struct sock *sk, 2783 char __user *optval, 2784 unsigned int optlen) 2785 { 2786 struct sctp_sock *sp = sctp_sk(sk); 2787 struct sctp_association *asoc; 2788 struct sctp_sndrcvinfo info; 2789 2790 if (optlen != sizeof(info)) 2791 return -EINVAL; 2792 if (copy_from_user(&info, optval, optlen)) 2793 return -EFAULT; 2794 if (info.sinfo_flags & 2795 ~(SCTP_UNORDERED | SCTP_ADDR_OVER | 2796 SCTP_ABORT | SCTP_EOF)) 2797 return -EINVAL; 2798 2799 asoc = sctp_id2assoc(sk, info.sinfo_assoc_id); 2800 if (!asoc && info.sinfo_assoc_id && sctp_style(sk, UDP)) 2801 return -EINVAL; 2802 if (asoc) { 2803 asoc->default_stream = info.sinfo_stream; 2804 asoc->default_flags = info.sinfo_flags; 2805 asoc->default_ppid = info.sinfo_ppid; 2806 asoc->default_context = info.sinfo_context; 2807 asoc->default_timetolive = info.sinfo_timetolive; 2808 } else { 2809 sp->default_stream = info.sinfo_stream; 2810 sp->default_flags = info.sinfo_flags; 2811 sp->default_ppid = info.sinfo_ppid; 2812 sp->default_context = info.sinfo_context; 2813 sp->default_timetolive = info.sinfo_timetolive; 2814 } 2815 2816 return 0; 2817 } 2818 2819 /* RFC6458, Section 8.1.31. Set/get Default Send Parameters 2820 * (SCTP_DEFAULT_SNDINFO) 2821 */ 2822 static int sctp_setsockopt_default_sndinfo(struct sock *sk, 2823 char __user *optval, 2824 unsigned int optlen) 2825 { 2826 struct sctp_sock *sp = sctp_sk(sk); 2827 struct sctp_association *asoc; 2828 struct sctp_sndinfo info; 2829 2830 if (optlen != sizeof(info)) 2831 return -EINVAL; 2832 if (copy_from_user(&info, optval, optlen)) 2833 return -EFAULT; 2834 if (info.snd_flags & 2835 ~(SCTP_UNORDERED | SCTP_ADDR_OVER | 2836 SCTP_ABORT | SCTP_EOF)) 2837 return -EINVAL; 2838 2839 asoc = sctp_id2assoc(sk, info.snd_assoc_id); 2840 if (!asoc && info.snd_assoc_id && sctp_style(sk, UDP)) 2841 return -EINVAL; 2842 if (asoc) { 2843 asoc->default_stream = info.snd_sid; 2844 asoc->default_flags = info.snd_flags; 2845 asoc->default_ppid = info.snd_ppid; 2846 asoc->default_context = info.snd_context; 2847 } else { 2848 sp->default_stream = info.snd_sid; 2849 sp->default_flags = info.snd_flags; 2850 sp->default_ppid = info.snd_ppid; 2851 sp->default_context = info.snd_context; 2852 } 2853 2854 return 0; 2855 } 2856 2857 /* 7.1.10 Set Primary Address (SCTP_PRIMARY_ADDR) 2858 * 2859 * Requests that the local SCTP stack use the enclosed peer address as 2860 * the association primary. The enclosed address must be one of the 2861 * association peer's addresses. 2862 */ 2863 static int sctp_setsockopt_primary_addr(struct sock *sk, char __user *optval, 2864 unsigned int optlen) 2865 { 2866 struct sctp_prim prim; 2867 struct sctp_transport *trans; 2868 2869 if (optlen != sizeof(struct sctp_prim)) 2870 return -EINVAL; 2871 2872 if (copy_from_user(&prim, optval, sizeof(struct sctp_prim))) 2873 return -EFAULT; 2874 2875 trans = sctp_addr_id2transport(sk, &prim.ssp_addr, prim.ssp_assoc_id); 2876 if (!trans) 2877 return -EINVAL; 2878 2879 sctp_assoc_set_primary(trans->asoc, trans); 2880 2881 return 0; 2882 } 2883 2884 /* 2885 * 7.1.5 SCTP_NODELAY 2886 * 2887 * Turn on/off any Nagle-like algorithm. This means that packets are 2888 * generally sent as soon as possible and no unnecessary delays are 2889 * introduced, at the cost of more packets in the network. Expects an 2890 * integer boolean flag. 2891 */ 2892 static int sctp_setsockopt_nodelay(struct sock *sk, char __user *optval, 2893 unsigned int optlen) 2894 { 2895 int val; 2896 2897 if (optlen < sizeof(int)) 2898 return -EINVAL; 2899 if (get_user(val, (int __user *)optval)) 2900 return -EFAULT; 2901 2902 sctp_sk(sk)->nodelay = (val == 0) ? 0 : 1; 2903 return 0; 2904 } 2905 2906 /* 2907 * 2908 * 7.1.1 SCTP_RTOINFO 2909 * 2910 * The protocol parameters used to initialize and bound retransmission 2911 * timeout (RTO) are tunable. sctp_rtoinfo structure is used to access 2912 * and modify these parameters. 2913 * All parameters are time values, in milliseconds. A value of 0, when 2914 * modifying the parameters, indicates that the current value should not 2915 * be changed. 2916 * 2917 */ 2918 static int sctp_setsockopt_rtoinfo(struct sock *sk, char __user *optval, unsigned int optlen) 2919 { 2920 struct sctp_rtoinfo rtoinfo; 2921 struct sctp_association *asoc; 2922 unsigned long rto_min, rto_max; 2923 struct sctp_sock *sp = sctp_sk(sk); 2924 2925 if (optlen != sizeof (struct sctp_rtoinfo)) 2926 return -EINVAL; 2927 2928 if (copy_from_user(&rtoinfo, optval, optlen)) 2929 return -EFAULT; 2930 2931 asoc = sctp_id2assoc(sk, rtoinfo.srto_assoc_id); 2932 2933 /* Set the values to the specific association */ 2934 if (!asoc && rtoinfo.srto_assoc_id && sctp_style(sk, UDP)) 2935 return -EINVAL; 2936 2937 rto_max = rtoinfo.srto_max; 2938 rto_min = rtoinfo.srto_min; 2939 2940 if (rto_max) 2941 rto_max = asoc ? msecs_to_jiffies(rto_max) : rto_max; 2942 else 2943 rto_max = asoc ? asoc->rto_max : sp->rtoinfo.srto_max; 2944 2945 if (rto_min) 2946 rto_min = asoc ? msecs_to_jiffies(rto_min) : rto_min; 2947 else 2948 rto_min = asoc ? asoc->rto_min : sp->rtoinfo.srto_min; 2949 2950 if (rto_min > rto_max) 2951 return -EINVAL; 2952 2953 if (asoc) { 2954 if (rtoinfo.srto_initial != 0) 2955 asoc->rto_initial = 2956 msecs_to_jiffies(rtoinfo.srto_initial); 2957 asoc->rto_max = rto_max; 2958 asoc->rto_min = rto_min; 2959 } else { 2960 /* If there is no association or the association-id = 0 2961 * set the values to the endpoint. 2962 */ 2963 if (rtoinfo.srto_initial != 0) 2964 sp->rtoinfo.srto_initial = rtoinfo.srto_initial; 2965 sp->rtoinfo.srto_max = rto_max; 2966 sp->rtoinfo.srto_min = rto_min; 2967 } 2968 2969 return 0; 2970 } 2971 2972 /* 2973 * 2974 * 7.1.2 SCTP_ASSOCINFO 2975 * 2976 * This option is used to tune the maximum retransmission attempts 2977 * of the association. 2978 * Returns an error if the new association retransmission value is 2979 * greater than the sum of the retransmission value of the peer. 2980 * See [SCTP] for more information. 2981 * 2982 */ 2983 static int sctp_setsockopt_associnfo(struct sock *sk, char __user *optval, unsigned int optlen) 2984 { 2985 2986 struct sctp_assocparams assocparams; 2987 struct sctp_association *asoc; 2988 2989 if (optlen != sizeof(struct sctp_assocparams)) 2990 return -EINVAL; 2991 if (copy_from_user(&assocparams, optval, optlen)) 2992 return -EFAULT; 2993 2994 asoc = sctp_id2assoc(sk, assocparams.sasoc_assoc_id); 2995 2996 if (!asoc && assocparams.sasoc_assoc_id && sctp_style(sk, UDP)) 2997 return -EINVAL; 2998 2999 /* Set the values to the specific association */ 3000 if (asoc) { 3001 if (assocparams.sasoc_asocmaxrxt != 0) { 3002 __u32 path_sum = 0; 3003 int paths = 0; 3004 struct sctp_transport *peer_addr; 3005 3006 list_for_each_entry(peer_addr, &asoc->peer.transport_addr_list, 3007 transports) { 3008 path_sum += peer_addr->pathmaxrxt; 3009 paths++; 3010 } 3011 3012 /* Only validate asocmaxrxt if we have more than 3013 * one path/transport. We do this because path 3014 * retransmissions are only counted when we have more 3015 * then one path. 3016 */ 3017 if (paths > 1 && 3018 assocparams.sasoc_asocmaxrxt > path_sum) 3019 return -EINVAL; 3020 3021 asoc->max_retrans = assocparams.sasoc_asocmaxrxt; 3022 } 3023 3024 if (assocparams.sasoc_cookie_life != 0) 3025 asoc->cookie_life = ms_to_ktime(assocparams.sasoc_cookie_life); 3026 } else { 3027 /* Set the values to the endpoint */ 3028 struct sctp_sock *sp = sctp_sk(sk); 3029 3030 if (assocparams.sasoc_asocmaxrxt != 0) 3031 sp->assocparams.sasoc_asocmaxrxt = 3032 assocparams.sasoc_asocmaxrxt; 3033 if (assocparams.sasoc_cookie_life != 0) 3034 sp->assocparams.sasoc_cookie_life = 3035 assocparams.sasoc_cookie_life; 3036 } 3037 return 0; 3038 } 3039 3040 /* 3041 * 7.1.16 Set/clear IPv4 mapped addresses (SCTP_I_WANT_MAPPED_V4_ADDR) 3042 * 3043 * This socket option is a boolean flag which turns on or off mapped V4 3044 * addresses. If this option is turned on and the socket is type 3045 * PF_INET6, then IPv4 addresses will be mapped to V6 representation. 3046 * If this option is turned off, then no mapping will be done of V4 3047 * addresses and a user will receive both PF_INET6 and PF_INET type 3048 * addresses on the socket. 3049 */ 3050 static int sctp_setsockopt_mappedv4(struct sock *sk, char __user *optval, unsigned int optlen) 3051 { 3052 int val; 3053 struct sctp_sock *sp = sctp_sk(sk); 3054 3055 if (optlen < sizeof(int)) 3056 return -EINVAL; 3057 if (get_user(val, (int __user *)optval)) 3058 return -EFAULT; 3059 if (val) 3060 sp->v4mapped = 1; 3061 else 3062 sp->v4mapped = 0; 3063 3064 return 0; 3065 } 3066 3067 /* 3068 * 8.1.16. Get or Set the Maximum Fragmentation Size (SCTP_MAXSEG) 3069 * This option will get or set the maximum size to put in any outgoing 3070 * SCTP DATA chunk. If a message is larger than this size it will be 3071 * fragmented by SCTP into the specified size. Note that the underlying 3072 * SCTP implementation may fragment into smaller sized chunks when the 3073 * PMTU of the underlying association is smaller than the value set by 3074 * the user. The default value for this option is '0' which indicates 3075 * the user is NOT limiting fragmentation and only the PMTU will effect 3076 * SCTP's choice of DATA chunk size. Note also that values set larger 3077 * than the maximum size of an IP datagram will effectively let SCTP 3078 * control fragmentation (i.e. the same as setting this option to 0). 3079 * 3080 * The following structure is used to access and modify this parameter: 3081 * 3082 * struct sctp_assoc_value { 3083 * sctp_assoc_t assoc_id; 3084 * uint32_t assoc_value; 3085 * }; 3086 * 3087 * assoc_id: This parameter is ignored for one-to-one style sockets. 3088 * For one-to-many style sockets this parameter indicates which 3089 * association the user is performing an action upon. Note that if 3090 * this field's value is zero then the endpoints default value is 3091 * changed (effecting future associations only). 3092 * assoc_value: This parameter specifies the maximum size in bytes. 3093 */ 3094 static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, unsigned int optlen) 3095 { 3096 struct sctp_assoc_value params; 3097 struct sctp_association *asoc; 3098 struct sctp_sock *sp = sctp_sk(sk); 3099 int val; 3100 3101 if (optlen == sizeof(int)) { 3102 pr_warn_ratelimited(DEPRECATED 3103 "%s (pid %d) " 3104 "Use of int in maxseg socket option.\n" 3105 "Use struct sctp_assoc_value instead\n", 3106 current->comm, task_pid_nr(current)); 3107 if (copy_from_user(&val, optval, optlen)) 3108 return -EFAULT; 3109 params.assoc_id = 0; 3110 } else if (optlen == sizeof(struct sctp_assoc_value)) { 3111 if (copy_from_user(¶ms, optval, optlen)) 3112 return -EFAULT; 3113 val = params.assoc_value; 3114 } else 3115 return -EINVAL; 3116 3117 if ((val != 0) && ((val < 8) || (val > SCTP_MAX_CHUNK_LEN))) 3118 return -EINVAL; 3119 3120 asoc = sctp_id2assoc(sk, params.assoc_id); 3121 if (!asoc && params.assoc_id && sctp_style(sk, UDP)) 3122 return -EINVAL; 3123 3124 if (asoc) { 3125 if (val == 0) { 3126 val = asoc->pathmtu; 3127 val -= sp->pf->af->net_header_len; 3128 val -= sizeof(struct sctphdr) + 3129 sizeof(struct sctp_data_chunk); 3130 } 3131 asoc->user_frag = val; 3132 asoc->frag_point = sctp_frag_point(asoc, asoc->pathmtu); 3133 } else { 3134 sp->user_frag = val; 3135 } 3136 3137 return 0; 3138 } 3139 3140 3141 /* 3142 * 7.1.9 Set Peer Primary Address (SCTP_SET_PEER_PRIMARY_ADDR) 3143 * 3144 * Requests that the peer mark the enclosed address as the association 3145 * primary. The enclosed address must be one of the association's 3146 * locally bound addresses. The following structure is used to make a 3147 * set primary request: 3148 */ 3149 static int sctp_setsockopt_peer_primary_addr(struct sock *sk, char __user *optval, 3150 unsigned int optlen) 3151 { 3152 struct net *net = sock_net(sk); 3153 struct sctp_sock *sp; 3154 struct sctp_association *asoc = NULL; 3155 struct sctp_setpeerprim prim; 3156 struct sctp_chunk *chunk; 3157 struct sctp_af *af; 3158 int err; 3159 3160 sp = sctp_sk(sk); 3161 3162 if (!net->sctp.addip_enable) 3163 return -EPERM; 3164 3165 if (optlen != sizeof(struct sctp_setpeerprim)) 3166 return -EINVAL; 3167 3168 if (copy_from_user(&prim, optval, optlen)) 3169 return -EFAULT; 3170 3171 asoc = sctp_id2assoc(sk, prim.sspp_assoc_id); 3172 if (!asoc) 3173 return -EINVAL; 3174 3175 if (!asoc->peer.asconf_capable) 3176 return -EPERM; 3177 3178 if (asoc->peer.addip_disabled_mask & SCTP_PARAM_SET_PRIMARY) 3179 return -EPERM; 3180 3181 if (!sctp_state(asoc, ESTABLISHED)) 3182 return -ENOTCONN; 3183 3184 af = sctp_get_af_specific(prim.sspp_addr.ss_family); 3185 if (!af) 3186 return -EINVAL; 3187 3188 if (!af->addr_valid((union sctp_addr *)&prim.sspp_addr, sp, NULL)) 3189 return -EADDRNOTAVAIL; 3190 3191 if (!sctp_assoc_lookup_laddr(asoc, (union sctp_addr *)&prim.sspp_addr)) 3192 return -EADDRNOTAVAIL; 3193 3194 /* Create an ASCONF chunk with SET_PRIMARY parameter */ 3195 chunk = sctp_make_asconf_set_prim(asoc, 3196 (union sctp_addr *)&prim.sspp_addr); 3197 if (!chunk) 3198 return -ENOMEM; 3199 3200 err = sctp_send_asconf(asoc, chunk); 3201 3202 pr_debug("%s: we set peer primary addr primitively\n", __func__); 3203 3204 return err; 3205 } 3206 3207 static int sctp_setsockopt_adaptation_layer(struct sock *sk, char __user *optval, 3208 unsigned int optlen) 3209 { 3210 struct sctp_setadaptation adaptation; 3211 3212 if (optlen != sizeof(struct sctp_setadaptation)) 3213 return -EINVAL; 3214 if (copy_from_user(&adaptation, optval, optlen)) 3215 return -EFAULT; 3216 3217 sctp_sk(sk)->adaptation_ind = adaptation.ssb_adaptation_ind; 3218 3219 return 0; 3220 } 3221 3222 /* 3223 * 7.1.29. Set or Get the default context (SCTP_CONTEXT) 3224 * 3225 * The context field in the sctp_sndrcvinfo structure is normally only 3226 * used when a failed message is retrieved holding the value that was 3227 * sent down on the actual send call. This option allows the setting of 3228 * a default context on an association basis that will be received on 3229 * reading messages from the peer. This is especially helpful in the 3230 * one-2-many model for an application to keep some reference to an 3231 * internal state machine that is processing messages on the 3232 * association. Note that the setting of this value only effects 3233 * received messages from the peer and does not effect the value that is 3234 * saved with outbound messages. 3235 */ 3236 static int sctp_setsockopt_context(struct sock *sk, char __user *optval, 3237 unsigned int optlen) 3238 { 3239 struct sctp_assoc_value params; 3240 struct sctp_sock *sp; 3241 struct sctp_association *asoc; 3242 3243 if (optlen != sizeof(struct sctp_assoc_value)) 3244 return -EINVAL; 3245 if (copy_from_user(¶ms, optval, optlen)) 3246 return -EFAULT; 3247 3248 sp = sctp_sk(sk); 3249 3250 if (params.assoc_id != 0) { 3251 asoc = sctp_id2assoc(sk, params.assoc_id); 3252 if (!asoc) 3253 return -EINVAL; 3254 asoc->default_rcv_context = params.assoc_value; 3255 } else { 3256 sp->default_rcv_context = params.assoc_value; 3257 } 3258 3259 return 0; 3260 } 3261 3262 /* 3263 * 7.1.24. Get or set fragmented interleave (SCTP_FRAGMENT_INTERLEAVE) 3264 * 3265 * This options will at a minimum specify if the implementation is doing 3266 * fragmented interleave. Fragmented interleave, for a one to many 3267 * socket, is when subsequent calls to receive a message may return 3268 * parts of messages from different associations. Some implementations 3269 * may allow you to turn this value on or off. If so, when turned off, 3270 * no fragment interleave will occur (which will cause a head of line 3271 * blocking amongst multiple associations sharing the same one to many 3272 * socket). When this option is turned on, then each receive call may 3273 * come from a different association (thus the user must receive data 3274 * with the extended calls (e.g. sctp_recvmsg) to keep track of which 3275 * association each receive belongs to. 3276 * 3277 * This option takes a boolean value. A non-zero value indicates that 3278 * fragmented interleave is on. A value of zero indicates that 3279 * fragmented interleave is off. 3280 * 3281 * Note that it is important that an implementation that allows this 3282 * option to be turned on, have it off by default. Otherwise an unaware 3283 * application using the one to many model may become confused and act 3284 * incorrectly. 3285 */ 3286 static int sctp_setsockopt_fragment_interleave(struct sock *sk, 3287 char __user *optval, 3288 unsigned int optlen) 3289 { 3290 int val; 3291 3292 if (optlen != sizeof(int)) 3293 return -EINVAL; 3294 if (get_user(val, (int __user *)optval)) 3295 return -EFAULT; 3296 3297 sctp_sk(sk)->frag_interleave = (val == 0) ? 0 : 1; 3298 3299 return 0; 3300 } 3301 3302 /* 3303 * 8.1.21. Set or Get the SCTP Partial Delivery Point 3304 * (SCTP_PARTIAL_DELIVERY_POINT) 3305 * 3306 * This option will set or get the SCTP partial delivery point. This 3307 * point is the size of a message where the partial delivery API will be 3308 * invoked to help free up rwnd space for the peer. Setting this to a 3309 * lower value will cause partial deliveries to happen more often. The 3310 * calls argument is an integer that sets or gets the partial delivery 3311 * point. Note also that the call will fail if the user attempts to set 3312 * this value larger than the socket receive buffer size. 3313 * 3314 * Note that any single message having a length smaller than or equal to 3315 * the SCTP partial delivery point will be delivered in one single read 3316 * call as long as the user provided buffer is large enough to hold the 3317 * message. 3318 */ 3319 static int sctp_setsockopt_partial_delivery_point(struct sock *sk, 3320 char __user *optval, 3321 unsigned int optlen) 3322 { 3323 u32 val; 3324 3325 if (optlen != sizeof(u32)) 3326 return -EINVAL; 3327 if (get_user(val, (int __user *)optval)) 3328 return -EFAULT; 3329 3330 /* Note: We double the receive buffer from what the user sets 3331 * it to be, also initial rwnd is based on rcvbuf/2. 3332 */ 3333 if (val > (sk->sk_rcvbuf >> 1)) 3334 return -EINVAL; 3335 3336 sctp_sk(sk)->pd_point = val; 3337 3338 return 0; /* is this the right error code? */ 3339 } 3340 3341 /* 3342 * 7.1.28. Set or Get the maximum burst (SCTP_MAX_BURST) 3343 * 3344 * This option will allow a user to change the maximum burst of packets 3345 * that can be emitted by this association. Note that the default value 3346 * is 4, and some implementations may restrict this setting so that it 3347 * can only be lowered. 3348 * 3349 * NOTE: This text doesn't seem right. Do this on a socket basis with 3350 * future associations inheriting the socket value. 3351 */ 3352 static int sctp_setsockopt_maxburst(struct sock *sk, 3353 char __user *optval, 3354 unsigned int optlen) 3355 { 3356 struct sctp_assoc_value params; 3357 struct sctp_sock *sp; 3358 struct sctp_association *asoc; 3359 int val; 3360 int assoc_id = 0; 3361 3362 if (optlen == sizeof(int)) { 3363 pr_warn_ratelimited(DEPRECATED 3364 "%s (pid %d) " 3365 "Use of int in max_burst socket option deprecated.\n" 3366 "Use struct sctp_assoc_value instead\n", 3367 current->comm, task_pid_nr(current)); 3368 if (copy_from_user(&val, optval, optlen)) 3369 return -EFAULT; 3370 } else if (optlen == sizeof(struct sctp_assoc_value)) { 3371 if (copy_from_user(¶ms, optval, optlen)) 3372 return -EFAULT; 3373 val = params.assoc_value; 3374 assoc_id = params.assoc_id; 3375 } else 3376 return -EINVAL; 3377 3378 sp = sctp_sk(sk); 3379 3380 if (assoc_id != 0) { 3381 asoc = sctp_id2assoc(sk, assoc_id); 3382 if (!asoc) 3383 return -EINVAL; 3384 asoc->max_burst = val; 3385 } else 3386 sp->max_burst = val; 3387 3388 return 0; 3389 } 3390 3391 /* 3392 * 7.1.18. Add a chunk that must be authenticated (SCTP_AUTH_CHUNK) 3393 * 3394 * This set option adds a chunk type that the user is requesting to be 3395 * received only in an authenticated way. Changes to the list of chunks 3396 * will only effect future associations on the socket. 3397 */ 3398 static int sctp_setsockopt_auth_chunk(struct sock *sk, 3399 char __user *optval, 3400 unsigned int optlen) 3401 { 3402 struct sctp_endpoint *ep = sctp_sk(sk)->ep; 3403 struct sctp_authchunk val; 3404 3405 if (!ep->auth_enable) 3406 return -EACCES; 3407 3408 if (optlen != sizeof(struct sctp_authchunk)) 3409 return -EINVAL; 3410 if (copy_from_user(&val, optval, optlen)) 3411 return -EFAULT; 3412 3413 switch (val.sauth_chunk) { 3414 case SCTP_CID_INIT: 3415 case SCTP_CID_INIT_ACK: 3416 case SCTP_CID_SHUTDOWN_COMPLETE: 3417 case SCTP_CID_AUTH: 3418 return -EINVAL; 3419 } 3420 3421 /* add this chunk id to the endpoint */ 3422 return sctp_auth_ep_add_chunkid(ep, val.sauth_chunk); 3423 } 3424 3425 /* 3426 * 7.1.19. Get or set the list of supported HMAC Identifiers (SCTP_HMAC_IDENT) 3427 * 3428 * This option gets or sets the list of HMAC algorithms that the local 3429 * endpoint requires the peer to use. 3430 */ 3431 static int sctp_setsockopt_hmac_ident(struct sock *sk, 3432 char __user *optval, 3433 unsigned int optlen) 3434 { 3435 struct sctp_endpoint *ep = sctp_sk(sk)->ep; 3436 struct sctp_hmacalgo *hmacs; 3437 u32 idents; 3438 int err; 3439 3440 if (!ep->auth_enable) 3441 return -EACCES; 3442 3443 if (optlen < sizeof(struct sctp_hmacalgo)) 3444 return -EINVAL; 3445 3446 hmacs = memdup_user(optval, optlen); 3447 if (IS_ERR(hmacs)) 3448 return PTR_ERR(hmacs); 3449 3450 idents = hmacs->shmac_num_idents; 3451 if (idents == 0 || idents > SCTP_AUTH_NUM_HMACS || 3452 (idents * sizeof(u16)) > (optlen - sizeof(struct sctp_hmacalgo))) { 3453 err = -EINVAL; 3454 goto out; 3455 } 3456 3457 err = sctp_auth_ep_set_hmacs(ep, hmacs); 3458 out: 3459 kfree(hmacs); 3460 return err; 3461 } 3462 3463 /* 3464 * 7.1.20. Set a shared key (SCTP_AUTH_KEY) 3465 * 3466 * This option will set a shared secret key which is used to build an 3467 * association shared key. 3468 */ 3469 static int sctp_setsockopt_auth_key(struct sock *sk, 3470 char __user *optval, 3471 unsigned int optlen) 3472 { 3473 struct sctp_endpoint *ep = sctp_sk(sk)->ep; 3474 struct sctp_authkey *authkey; 3475 struct sctp_association *asoc; 3476 int ret; 3477 3478 if (!ep->auth_enable) 3479 return -EACCES; 3480 3481 if (optlen <= sizeof(struct sctp_authkey)) 3482 return -EINVAL; 3483 3484 authkey = memdup_user(optval, optlen); 3485 if (IS_ERR(authkey)) 3486 return PTR_ERR(authkey); 3487 3488 if (authkey->sca_keylength > optlen - sizeof(struct sctp_authkey)) { 3489 ret = -EINVAL; 3490 goto out; 3491 } 3492 3493 asoc = sctp_id2assoc(sk, authkey->sca_assoc_id); 3494 if (!asoc && authkey->sca_assoc_id && sctp_style(sk, UDP)) { 3495 ret = -EINVAL; 3496 goto out; 3497 } 3498 3499 ret = sctp_auth_set_key(ep, asoc, authkey); 3500 out: 3501 kzfree(authkey); 3502 return ret; 3503 } 3504 3505 /* 3506 * 7.1.21. Get or set the active shared key (SCTP_AUTH_ACTIVE_KEY) 3507 * 3508 * This option will get or set the active shared key to be used to build 3509 * the association shared key. 3510 */ 3511 static int sctp_setsockopt_active_key(struct sock *sk, 3512 char __user *optval, 3513 unsigned int optlen) 3514 { 3515 struct sctp_endpoint *ep = sctp_sk(sk)->ep; 3516 struct sctp_authkeyid val; 3517 struct sctp_association *asoc; 3518 3519 if (!ep->auth_enable) 3520 return -EACCES; 3521 3522 if (optlen != sizeof(struct sctp_authkeyid)) 3523 return -EINVAL; 3524 if (copy_from_user(&val, optval, optlen)) 3525 return -EFAULT; 3526 3527 asoc = sctp_id2assoc(sk, val.scact_assoc_id); 3528 if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP)) 3529 return -EINVAL; 3530 3531 return sctp_auth_set_active_key(ep, asoc, val.scact_keynumber); 3532 } 3533 3534 /* 3535 * 7.1.22. Delete a shared key (SCTP_AUTH_DELETE_KEY) 3536 * 3537 * This set option will delete a shared secret key from use. 3538 */ 3539 static int sctp_setsockopt_del_key(struct sock *sk, 3540 char __user *optval, 3541 unsigned int optlen) 3542 { 3543 struct sctp_endpoint *ep = sctp_sk(sk)->ep; 3544 struct sctp_authkeyid val; 3545 struct sctp_association *asoc; 3546 3547 if (!ep->auth_enable) 3548 return -EACCES; 3549 3550 if (optlen != sizeof(struct sctp_authkeyid)) 3551 return -EINVAL; 3552 if (copy_from_user(&val, optval, optlen)) 3553 return -EFAULT; 3554 3555 asoc = sctp_id2assoc(sk, val.scact_assoc_id); 3556 if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP)) 3557 return -EINVAL; 3558 3559 return sctp_auth_del_key_id(ep, asoc, val.scact_keynumber); 3560 3561 } 3562 3563 /* 3564 * 8.1.23 SCTP_AUTO_ASCONF 3565 * 3566 * This option will enable or disable the use of the automatic generation of 3567 * ASCONF chunks to add and delete addresses to an existing association. Note 3568 * that this option has two caveats namely: a) it only affects sockets that 3569 * are bound to all addresses available to the SCTP stack, and b) the system 3570 * administrator may have an overriding control that turns the ASCONF feature 3571 * off no matter what setting the socket option may have. 3572 * This option expects an integer boolean flag, where a non-zero value turns on 3573 * the option, and a zero value turns off the option. 3574 * Note. In this implementation, socket operation overrides default parameter 3575 * being set by sysctl as well as FreeBSD implementation 3576 */ 3577 static int sctp_setsockopt_auto_asconf(struct sock *sk, char __user *optval, 3578 unsigned int optlen) 3579 { 3580 int val; 3581 struct sctp_sock *sp = sctp_sk(sk); 3582 3583 if (optlen < sizeof(int)) 3584 return -EINVAL; 3585 if (get_user(val, (int __user *)optval)) 3586 return -EFAULT; 3587 if (!sctp_is_ep_boundall(sk) && val) 3588 return -EINVAL; 3589 if ((val && sp->do_auto_asconf) || (!val && !sp->do_auto_asconf)) 3590 return 0; 3591 3592 spin_lock_bh(&sock_net(sk)->sctp.addr_wq_lock); 3593 if (val == 0 && sp->do_auto_asconf) { 3594 list_del(&sp->auto_asconf_list); 3595 sp->do_auto_asconf = 0; 3596 } else if (val && !sp->do_auto_asconf) { 3597 list_add_tail(&sp->auto_asconf_list, 3598 &sock_net(sk)->sctp.auto_asconf_splist); 3599 sp->do_auto_asconf = 1; 3600 } 3601 spin_unlock_bh(&sock_net(sk)->sctp.addr_wq_lock); 3602 return 0; 3603 } 3604 3605 /* 3606 * SCTP_PEER_ADDR_THLDS 3607 * 3608 * This option allows us to alter the partially failed threshold for one or all 3609 * transports in an association. See Section 6.1 of: 3610 * http://www.ietf.org/id/draft-nishida-tsvwg-sctp-failover-05.txt 3611 */ 3612 static int sctp_setsockopt_paddr_thresholds(struct sock *sk, 3613 char __user *optval, 3614 unsigned int optlen) 3615 { 3616 struct sctp_paddrthlds val; 3617 struct sctp_transport *trans; 3618 struct sctp_association *asoc; 3619 3620 if (optlen < sizeof(struct sctp_paddrthlds)) 3621 return -EINVAL; 3622 if (copy_from_user(&val, (struct sctp_paddrthlds __user *)optval, 3623 sizeof(struct sctp_paddrthlds))) 3624 return -EFAULT; 3625 3626 3627 if (sctp_is_any(sk, (const union sctp_addr *)&val.spt_address)) { 3628 asoc = sctp_id2assoc(sk, val.spt_assoc_id); 3629 if (!asoc) 3630 return -ENOENT; 3631 list_for_each_entry(trans, &asoc->peer.transport_addr_list, 3632 transports) { 3633 if (val.spt_pathmaxrxt) 3634 trans->pathmaxrxt = val.spt_pathmaxrxt; 3635 trans->pf_retrans = val.spt_pathpfthld; 3636 } 3637 3638 if (val.spt_pathmaxrxt) 3639 asoc->pathmaxrxt = val.spt_pathmaxrxt; 3640 asoc->pf_retrans = val.spt_pathpfthld; 3641 } else { 3642 trans = sctp_addr_id2transport(sk, &val.spt_address, 3643 val.spt_assoc_id); 3644 if (!trans) 3645 return -ENOENT; 3646 3647 if (val.spt_pathmaxrxt) 3648 trans->pathmaxrxt = val.spt_pathmaxrxt; 3649 trans->pf_retrans = val.spt_pathpfthld; 3650 } 3651 3652 return 0; 3653 } 3654 3655 static int sctp_setsockopt_recvrcvinfo(struct sock *sk, 3656 char __user *optval, 3657 unsigned int optlen) 3658 { 3659 int val; 3660 3661 if (optlen < sizeof(int)) 3662 return -EINVAL; 3663 if (get_user(val, (int __user *) optval)) 3664 return -EFAULT; 3665 3666 sctp_sk(sk)->recvrcvinfo = (val == 0) ? 0 : 1; 3667 3668 return 0; 3669 } 3670 3671 static int sctp_setsockopt_recvnxtinfo(struct sock *sk, 3672 char __user *optval, 3673 unsigned int optlen) 3674 { 3675 int val; 3676 3677 if (optlen < sizeof(int)) 3678 return -EINVAL; 3679 if (get_user(val, (int __user *) optval)) 3680 return -EFAULT; 3681 3682 sctp_sk(sk)->recvnxtinfo = (val == 0) ? 0 : 1; 3683 3684 return 0; 3685 } 3686 3687 static int sctp_setsockopt_pr_supported(struct sock *sk, 3688 char __user *optval, 3689 unsigned int optlen) 3690 { 3691 struct sctp_assoc_value params; 3692 struct sctp_association *asoc; 3693 int retval = -EINVAL; 3694 3695 if (optlen != sizeof(params)) 3696 goto out; 3697 3698 if (copy_from_user(¶ms, optval, optlen)) { 3699 retval = -EFAULT; 3700 goto out; 3701 } 3702 3703 asoc = sctp_id2assoc(sk, params.assoc_id); 3704 if (asoc) { 3705 asoc->prsctp_enable = !!params.assoc_value; 3706 } else if (!params.assoc_id) { 3707 struct sctp_sock *sp = sctp_sk(sk); 3708 3709 sp->ep->prsctp_enable = !!params.assoc_value; 3710 } else { 3711 goto out; 3712 } 3713 3714 retval = 0; 3715 3716 out: 3717 return retval; 3718 } 3719 3720 static int sctp_setsockopt_default_prinfo(struct sock *sk, 3721 char __user *optval, 3722 unsigned int optlen) 3723 { 3724 struct sctp_default_prinfo info; 3725 struct sctp_association *asoc; 3726 int retval = -EINVAL; 3727 3728 if (optlen != sizeof(info)) 3729 goto out; 3730 3731 if (copy_from_user(&info, optval, sizeof(info))) { 3732 retval = -EFAULT; 3733 goto out; 3734 } 3735 3736 if (info.pr_policy & ~SCTP_PR_SCTP_MASK) 3737 goto out; 3738 3739 if (info.pr_policy == SCTP_PR_SCTP_NONE) 3740 info.pr_value = 0; 3741 3742 asoc = sctp_id2assoc(sk, info.pr_assoc_id); 3743 if (asoc) { 3744 SCTP_PR_SET_POLICY(asoc->default_flags, info.pr_policy); 3745 asoc->default_timetolive = info.pr_value; 3746 } else if (!info.pr_assoc_id) { 3747 struct sctp_sock *sp = sctp_sk(sk); 3748 3749 SCTP_PR_SET_POLICY(sp->default_flags, info.pr_policy); 3750 sp->default_timetolive = info.pr_value; 3751 } else { 3752 goto out; 3753 } 3754 3755 retval = 0; 3756 3757 out: 3758 return retval; 3759 } 3760 3761 static int sctp_setsockopt_reconfig_supported(struct sock *sk, 3762 char __user *optval, 3763 unsigned int optlen) 3764 { 3765 struct sctp_assoc_value params; 3766 struct sctp_association *asoc; 3767 int retval = -EINVAL; 3768 3769 if (optlen != sizeof(params)) 3770 goto out; 3771 3772 if (copy_from_user(¶ms, optval, optlen)) { 3773 retval = -EFAULT; 3774 goto out; 3775 } 3776 3777 asoc = sctp_id2assoc(sk, params.assoc_id); 3778 if (asoc) { 3779 asoc->reconf_enable = !!params.assoc_value; 3780 } else if (!params.assoc_id) { 3781 struct sctp_sock *sp = sctp_sk(sk); 3782 3783 sp->ep->reconf_enable = !!params.assoc_value; 3784 } else { 3785 goto out; 3786 } 3787 3788 retval = 0; 3789 3790 out: 3791 return retval; 3792 } 3793 3794 static int sctp_setsockopt_enable_strreset(struct sock *sk, 3795 char __user *optval, 3796 unsigned int optlen) 3797 { 3798 struct sctp_assoc_value params; 3799 struct sctp_association *asoc; 3800 int retval = -EINVAL; 3801 3802 if (optlen != sizeof(params)) 3803 goto out; 3804 3805 if (copy_from_user(¶ms, optval, optlen)) { 3806 retval = -EFAULT; 3807 goto out; 3808 } 3809 3810 if (params.assoc_value & (~SCTP_ENABLE_STRRESET_MASK)) 3811 goto out; 3812 3813 asoc = sctp_id2assoc(sk, params.assoc_id); 3814 if (asoc) { 3815 asoc->strreset_enable = params.assoc_value; 3816 } else if (!params.assoc_id) { 3817 struct sctp_sock *sp = sctp_sk(sk); 3818 3819 sp->ep->strreset_enable = params.assoc_value; 3820 } else { 3821 goto out; 3822 } 3823 3824 retval = 0; 3825 3826 out: 3827 return retval; 3828 } 3829 3830 static int sctp_setsockopt_reset_streams(struct sock *sk, 3831 char __user *optval, 3832 unsigned int optlen) 3833 { 3834 struct sctp_reset_streams *params; 3835 struct sctp_association *asoc; 3836 int retval = -EINVAL; 3837 3838 if (optlen < sizeof(struct sctp_reset_streams)) 3839 return -EINVAL; 3840 3841 params = memdup_user(optval, optlen); 3842 if (IS_ERR(params)) 3843 return PTR_ERR(params); 3844 3845 asoc = sctp_id2assoc(sk, params->srs_assoc_id); 3846 if (!asoc) 3847 goto out; 3848 3849 retval = sctp_send_reset_streams(asoc, params); 3850 3851 out: 3852 kfree(params); 3853 return retval; 3854 } 3855 3856 static int sctp_setsockopt_reset_assoc(struct sock *sk, 3857 char __user *optval, 3858 unsigned int optlen) 3859 { 3860 struct sctp_association *asoc; 3861 sctp_assoc_t associd; 3862 int retval = -EINVAL; 3863 3864 if (optlen != sizeof(associd)) 3865 goto out; 3866 3867 if (copy_from_user(&associd, optval, optlen)) { 3868 retval = -EFAULT; 3869 goto out; 3870 } 3871 3872 asoc = sctp_id2assoc(sk, associd); 3873 if (!asoc) 3874 goto out; 3875 3876 retval = sctp_send_reset_assoc(asoc); 3877 3878 out: 3879 return retval; 3880 } 3881 3882 static int sctp_setsockopt_add_streams(struct sock *sk, 3883 char __user *optval, 3884 unsigned int optlen) 3885 { 3886 struct sctp_association *asoc; 3887 struct sctp_add_streams params; 3888 int retval = -EINVAL; 3889 3890 if (optlen != sizeof(params)) 3891 goto out; 3892 3893 if (copy_from_user(¶ms, optval, optlen)) { 3894 retval = -EFAULT; 3895 goto out; 3896 } 3897 3898 asoc = sctp_id2assoc(sk, params.sas_assoc_id); 3899 if (!asoc) 3900 goto out; 3901 3902 retval = sctp_send_add_streams(asoc, ¶ms); 3903 3904 out: 3905 return retval; 3906 } 3907 3908 /* API 6.2 setsockopt(), getsockopt() 3909 * 3910 * Applications use setsockopt() and getsockopt() to set or retrieve 3911 * socket options. Socket options are used to change the default 3912 * behavior of sockets calls. They are described in Section 7. 3913 * 3914 * The syntax is: 3915 * 3916 * ret = getsockopt(int sd, int level, int optname, void __user *optval, 3917 * int __user *optlen); 3918 * ret = setsockopt(int sd, int level, int optname, const void __user *optval, 3919 * int optlen); 3920 * 3921 * sd - the socket descript. 3922 * level - set to IPPROTO_SCTP for all SCTP options. 3923 * optname - the option name. 3924 * optval - the buffer to store the value of the option. 3925 * optlen - the size of the buffer. 3926 */ 3927 static int sctp_setsockopt(struct sock *sk, int level, int optname, 3928 char __user *optval, unsigned int optlen) 3929 { 3930 int retval = 0; 3931 3932 pr_debug("%s: sk:%p, optname:%d\n", __func__, sk, optname); 3933 3934 /* I can hardly begin to describe how wrong this is. This is 3935 * so broken as to be worse than useless. The API draft 3936 * REALLY is NOT helpful here... I am not convinced that the 3937 * semantics of setsockopt() with a level OTHER THAN SOL_SCTP 3938 * are at all well-founded. 3939 */ 3940 if (level != SOL_SCTP) { 3941 struct sctp_af *af = sctp_sk(sk)->pf->af; 3942 retval = af->setsockopt(sk, level, optname, optval, optlen); 3943 goto out_nounlock; 3944 } 3945 3946 lock_sock(sk); 3947 3948 switch (optname) { 3949 case SCTP_SOCKOPT_BINDX_ADD: 3950 /* 'optlen' is the size of the addresses buffer. */ 3951 retval = sctp_setsockopt_bindx(sk, (struct sockaddr __user *)optval, 3952 optlen, SCTP_BINDX_ADD_ADDR); 3953 break; 3954 3955 case SCTP_SOCKOPT_BINDX_REM: 3956 /* 'optlen' is the size of the addresses buffer. */ 3957 retval = sctp_setsockopt_bindx(sk, (struct sockaddr __user *)optval, 3958 optlen, SCTP_BINDX_REM_ADDR); 3959 break; 3960 3961 case SCTP_SOCKOPT_CONNECTX_OLD: 3962 /* 'optlen' is the size of the addresses buffer. */ 3963 retval = sctp_setsockopt_connectx_old(sk, 3964 (struct sockaddr __user *)optval, 3965 optlen); 3966 break; 3967 3968 case SCTP_SOCKOPT_CONNECTX: 3969 /* 'optlen' is the size of the addresses buffer. */ 3970 retval = sctp_setsockopt_connectx(sk, 3971 (struct sockaddr __user *)optval, 3972 optlen); 3973 break; 3974 3975 case SCTP_DISABLE_FRAGMENTS: 3976 retval = sctp_setsockopt_disable_fragments(sk, optval, optlen); 3977 break; 3978 3979 case SCTP_EVENTS: 3980 retval = sctp_setsockopt_events(sk, optval, optlen); 3981 break; 3982 3983 case SCTP_AUTOCLOSE: 3984 retval = sctp_setsockopt_autoclose(sk, optval, optlen); 3985 break; 3986 3987 case SCTP_PEER_ADDR_PARAMS: 3988 retval = sctp_setsockopt_peer_addr_params(sk, optval, optlen); 3989 break; 3990 3991 case SCTP_DELAYED_SACK: 3992 retval = sctp_setsockopt_delayed_ack(sk, optval, optlen); 3993 break; 3994 case SCTP_PARTIAL_DELIVERY_POINT: 3995 retval = sctp_setsockopt_partial_delivery_point(sk, optval, optlen); 3996 break; 3997 3998 case SCTP_INITMSG: 3999 retval = sctp_setsockopt_initmsg(sk, optval, optlen); 4000 break; 4001 case SCTP_DEFAULT_SEND_PARAM: 4002 retval = sctp_setsockopt_default_send_param(sk, optval, 4003 optlen); 4004 break; 4005 case SCTP_DEFAULT_SNDINFO: 4006 retval = sctp_setsockopt_default_sndinfo(sk, optval, optlen); 4007 break; 4008 case SCTP_PRIMARY_ADDR: 4009 retval = sctp_setsockopt_primary_addr(sk, optval, optlen); 4010 break; 4011 case SCTP_SET_PEER_PRIMARY_ADDR: 4012 retval = sctp_setsockopt_peer_primary_addr(sk, optval, optlen); 4013 break; 4014 case SCTP_NODELAY: 4015 retval = sctp_setsockopt_nodelay(sk, optval, optlen); 4016 break; 4017 case SCTP_RTOINFO: 4018 retval = sctp_setsockopt_rtoinfo(sk, optval, optlen); 4019 break; 4020 case SCTP_ASSOCINFO: 4021 retval = sctp_setsockopt_associnfo(sk, optval, optlen); 4022 break; 4023 case SCTP_I_WANT_MAPPED_V4_ADDR: 4024 retval = sctp_setsockopt_mappedv4(sk, optval, optlen); 4025 break; 4026 case SCTP_MAXSEG: 4027 retval = sctp_setsockopt_maxseg(sk, optval, optlen); 4028 break; 4029 case SCTP_ADAPTATION_LAYER: 4030 retval = sctp_setsockopt_adaptation_layer(sk, optval, optlen); 4031 break; 4032 case SCTP_CONTEXT: 4033 retval = sctp_setsockopt_context(sk, optval, optlen); 4034 break; 4035 case SCTP_FRAGMENT_INTERLEAVE: 4036 retval = sctp_setsockopt_fragment_interleave(sk, optval, optlen); 4037 break; 4038 case SCTP_MAX_BURST: 4039 retval = sctp_setsockopt_maxburst(sk, optval, optlen); 4040 break; 4041 case SCTP_AUTH_CHUNK: 4042 retval = sctp_setsockopt_auth_chunk(sk, optval, optlen); 4043 break; 4044 case SCTP_HMAC_IDENT: 4045 retval = sctp_setsockopt_hmac_ident(sk, optval, optlen); 4046 break; 4047 case SCTP_AUTH_KEY: 4048 retval = sctp_setsockopt_auth_key(sk, optval, optlen); 4049 break; 4050 case SCTP_AUTH_ACTIVE_KEY: 4051 retval = sctp_setsockopt_active_key(sk, optval, optlen); 4052 break; 4053 case SCTP_AUTH_DELETE_KEY: 4054 retval = sctp_setsockopt_del_key(sk, optval, optlen); 4055 break; 4056 case SCTP_AUTO_ASCONF: 4057 retval = sctp_setsockopt_auto_asconf(sk, optval, optlen); 4058 break; 4059 case SCTP_PEER_ADDR_THLDS: 4060 retval = sctp_setsockopt_paddr_thresholds(sk, optval, optlen); 4061 break; 4062 case SCTP_RECVRCVINFO: 4063 retval = sctp_setsockopt_recvrcvinfo(sk, optval, optlen); 4064 break; 4065 case SCTP_RECVNXTINFO: 4066 retval = sctp_setsockopt_recvnxtinfo(sk, optval, optlen); 4067 break; 4068 case SCTP_PR_SUPPORTED: 4069 retval = sctp_setsockopt_pr_supported(sk, optval, optlen); 4070 break; 4071 case SCTP_DEFAULT_PRINFO: 4072 retval = sctp_setsockopt_default_prinfo(sk, optval, optlen); 4073 break; 4074 case SCTP_RECONFIG_SUPPORTED: 4075 retval = sctp_setsockopt_reconfig_supported(sk, optval, optlen); 4076 break; 4077 case SCTP_ENABLE_STREAM_RESET: 4078 retval = sctp_setsockopt_enable_strreset(sk, optval, optlen); 4079 break; 4080 case SCTP_RESET_STREAMS: 4081 retval = sctp_setsockopt_reset_streams(sk, optval, optlen); 4082 break; 4083 case SCTP_RESET_ASSOC: 4084 retval = sctp_setsockopt_reset_assoc(sk, optval, optlen); 4085 break; 4086 case SCTP_ADD_STREAMS: 4087 retval = sctp_setsockopt_add_streams(sk, optval, optlen); 4088 break; 4089 default: 4090 retval = -ENOPROTOOPT; 4091 break; 4092 } 4093 4094 release_sock(sk); 4095 4096 out_nounlock: 4097 return retval; 4098 } 4099 4100 /* API 3.1.6 connect() - UDP Style Syntax 4101 * 4102 * An application may use the connect() call in the UDP model to initiate an 4103 * association without sending data. 4104 * 4105 * The syntax is: 4106 * 4107 * ret = connect(int sd, const struct sockaddr *nam, socklen_t len); 4108 * 4109 * sd: the socket descriptor to have a new association added to. 4110 * 4111 * nam: the address structure (either struct sockaddr_in or struct 4112 * sockaddr_in6 defined in RFC2553 [7]). 4113 * 4114 * len: the size of the address. 4115 */ 4116 static int sctp_connect(struct sock *sk, struct sockaddr *addr, 4117 int addr_len) 4118 { 4119 int err = 0; 4120 struct sctp_af *af; 4121 4122 lock_sock(sk); 4123 4124 pr_debug("%s: sk:%p, sockaddr:%p, addr_len:%d\n", __func__, sk, 4125 addr, addr_len); 4126 4127 /* Validate addr_len before calling common connect/connectx routine. */ 4128 af = sctp_get_af_specific(addr->sa_family); 4129 if (!af || addr_len < af->sockaddr_len) { 4130 err = -EINVAL; 4131 } else { 4132 /* Pass correct addr len to common routine (so it knows there 4133 * is only one address being passed. 4134 */ 4135 err = __sctp_connect(sk, addr, af->sockaddr_len, NULL); 4136 } 4137 4138 release_sock(sk); 4139 return err; 4140 } 4141 4142 /* FIXME: Write comments. */ 4143 static int sctp_disconnect(struct sock *sk, int flags) 4144 { 4145 return -EOPNOTSUPP; /* STUB */ 4146 } 4147 4148 /* 4.1.4 accept() - TCP Style Syntax 4149 * 4150 * Applications use accept() call to remove an established SCTP 4151 * association from the accept queue of the endpoint. A new socket 4152 * descriptor will be returned from accept() to represent the newly 4153 * formed association. 4154 */ 4155 static struct sock *sctp_accept(struct sock *sk, int flags, int *err, bool kern) 4156 { 4157 struct sctp_sock *sp; 4158 struct sctp_endpoint *ep; 4159 struct sock *newsk = NULL; 4160 struct sctp_association *asoc; 4161 long timeo; 4162 int error = 0; 4163 4164 lock_sock(sk); 4165 4166 sp = sctp_sk(sk); 4167 ep = sp->ep; 4168 4169 if (!sctp_style(sk, TCP)) { 4170 error = -EOPNOTSUPP; 4171 goto out; 4172 } 4173 4174 if (!sctp_sstate(sk, LISTENING)) { 4175 error = -EINVAL; 4176 goto out; 4177 } 4178 4179 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); 4180 4181 error = sctp_wait_for_accept(sk, timeo); 4182 if (error) 4183 goto out; 4184 4185 /* We treat the list of associations on the endpoint as the accept 4186 * queue and pick the first association on the list. 4187 */ 4188 asoc = list_entry(ep->asocs.next, struct sctp_association, asocs); 4189 4190 newsk = sp->pf->create_accept_sk(sk, asoc, kern); 4191 if (!newsk) { 4192 error = -ENOMEM; 4193 goto out; 4194 } 4195 4196 /* Populate the fields of the newsk from the oldsk and migrate the 4197 * asoc to the newsk. 4198 */ 4199 sctp_sock_migrate(sk, newsk, asoc, SCTP_SOCKET_TCP); 4200 4201 out: 4202 release_sock(sk); 4203 *err = error; 4204 return newsk; 4205 } 4206 4207 /* The SCTP ioctl handler. */ 4208 static int sctp_ioctl(struct sock *sk, int cmd, unsigned long arg) 4209 { 4210 int rc = -ENOTCONN; 4211 4212 lock_sock(sk); 4213 4214 /* 4215 * SEQPACKET-style sockets in LISTENING state are valid, for 4216 * SCTP, so only discard TCP-style sockets in LISTENING state. 4217 */ 4218 if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)) 4219 goto out; 4220 4221 switch (cmd) { 4222 case SIOCINQ: { 4223 struct sk_buff *skb; 4224 unsigned int amount = 0; 4225 4226 skb = skb_peek(&sk->sk_receive_queue); 4227 if (skb != NULL) { 4228 /* 4229 * We will only return the amount of this packet since 4230 * that is all that will be read. 4231 */ 4232 amount = skb->len; 4233 } 4234 rc = put_user(amount, (int __user *)arg); 4235 break; 4236 } 4237 default: 4238 rc = -ENOIOCTLCMD; 4239 break; 4240 } 4241 out: 4242 release_sock(sk); 4243 return rc; 4244 } 4245 4246 /* This is the function which gets called during socket creation to 4247 * initialized the SCTP-specific portion of the sock. 4248 * The sock structure should already be zero-filled memory. 4249 */ 4250 static int sctp_init_sock(struct sock *sk) 4251 { 4252 struct net *net = sock_net(sk); 4253 struct sctp_sock *sp; 4254 4255 pr_debug("%s: sk:%p\n", __func__, sk); 4256 4257 sp = sctp_sk(sk); 4258 4259 /* Initialize the SCTP per socket area. */ 4260 switch (sk->sk_type) { 4261 case SOCK_SEQPACKET: 4262 sp->type = SCTP_SOCKET_UDP; 4263 break; 4264 case SOCK_STREAM: 4265 sp->type = SCTP_SOCKET_TCP; 4266 break; 4267 default: 4268 return -ESOCKTNOSUPPORT; 4269 } 4270 4271 sk->sk_gso_type = SKB_GSO_SCTP; 4272 4273 /* Initialize default send parameters. These parameters can be 4274 * modified with the SCTP_DEFAULT_SEND_PARAM socket option. 4275 */ 4276 sp->default_stream = 0; 4277 sp->default_ppid = 0; 4278 sp->default_flags = 0; 4279 sp->default_context = 0; 4280 sp->default_timetolive = 0; 4281 4282 sp->default_rcv_context = 0; 4283 sp->max_burst = net->sctp.max_burst; 4284 4285 sp->sctp_hmac_alg = net->sctp.sctp_hmac_alg; 4286 4287 /* Initialize default setup parameters. These parameters 4288 * can be modified with the SCTP_INITMSG socket option or 4289 * overridden by the SCTP_INIT CMSG. 4290 */ 4291 sp->initmsg.sinit_num_ostreams = sctp_max_outstreams; 4292 sp->initmsg.sinit_max_instreams = sctp_max_instreams; 4293 sp->initmsg.sinit_max_attempts = net->sctp.max_retrans_init; 4294 sp->initmsg.sinit_max_init_timeo = net->sctp.rto_max; 4295 4296 /* Initialize default RTO related parameters. These parameters can 4297 * be modified for with the SCTP_RTOINFO socket option. 4298 */ 4299 sp->rtoinfo.srto_initial = net->sctp.rto_initial; 4300 sp->rtoinfo.srto_max = net->sctp.rto_max; 4301 sp->rtoinfo.srto_min = net->sctp.rto_min; 4302 4303 /* Initialize default association related parameters. These parameters 4304 * can be modified with the SCTP_ASSOCINFO socket option. 4305 */ 4306 sp->assocparams.sasoc_asocmaxrxt = net->sctp.max_retrans_association; 4307 sp->assocparams.sasoc_number_peer_destinations = 0; 4308 sp->assocparams.sasoc_peer_rwnd = 0; 4309 sp->assocparams.sasoc_local_rwnd = 0; 4310 sp->assocparams.sasoc_cookie_life = net->sctp.valid_cookie_life; 4311 4312 /* Initialize default event subscriptions. By default, all the 4313 * options are off. 4314 */ 4315 memset(&sp->subscribe, 0, sizeof(struct sctp_event_subscribe)); 4316 4317 /* Default Peer Address Parameters. These defaults can 4318 * be modified via SCTP_PEER_ADDR_PARAMS 4319 */ 4320 sp->hbinterval = net->sctp.hb_interval; 4321 sp->pathmaxrxt = net->sctp.max_retrans_path; 4322 sp->pathmtu = 0; /* allow default discovery */ 4323 sp->sackdelay = net->sctp.sack_timeout; 4324 sp->sackfreq = 2; 4325 sp->param_flags = SPP_HB_ENABLE | 4326 SPP_PMTUD_ENABLE | 4327 SPP_SACKDELAY_ENABLE; 4328 4329 /* If enabled no SCTP message fragmentation will be performed. 4330 * Configure through SCTP_DISABLE_FRAGMENTS socket option. 4331 */ 4332 sp->disable_fragments = 0; 4333 4334 /* Enable Nagle algorithm by default. */ 4335 sp->nodelay = 0; 4336 4337 sp->recvrcvinfo = 0; 4338 sp->recvnxtinfo = 0; 4339 4340 /* Enable by default. */ 4341 sp->v4mapped = 1; 4342 4343 /* Auto-close idle associations after the configured 4344 * number of seconds. A value of 0 disables this 4345 * feature. Configure through the SCTP_AUTOCLOSE socket option, 4346 * for UDP-style sockets only. 4347 */ 4348 sp->autoclose = 0; 4349 4350 /* User specified fragmentation limit. */ 4351 sp->user_frag = 0; 4352 4353 sp->adaptation_ind = 0; 4354 4355 sp->pf = sctp_get_pf_specific(sk->sk_family); 4356 4357 /* Control variables for partial data delivery. */ 4358 atomic_set(&sp->pd_mode, 0); 4359 skb_queue_head_init(&sp->pd_lobby); 4360 sp->frag_interleave = 0; 4361 4362 /* Create a per socket endpoint structure. Even if we 4363 * change the data structure relationships, this may still 4364 * be useful for storing pre-connect address information. 4365 */ 4366 sp->ep = sctp_endpoint_new(sk, GFP_KERNEL); 4367 if (!sp->ep) 4368 return -ENOMEM; 4369 4370 sp->hmac = NULL; 4371 4372 sk->sk_destruct = sctp_destruct_sock; 4373 4374 SCTP_DBG_OBJCNT_INC(sock); 4375 4376 local_bh_disable(); 4377 percpu_counter_inc(&sctp_sockets_allocated); 4378 sock_prot_inuse_add(net, sk->sk_prot, 1); 4379 4380 /* Nothing can fail after this block, otherwise 4381 * sctp_destroy_sock() will be called without addr_wq_lock held 4382 */ 4383 if (net->sctp.default_auto_asconf) { 4384 spin_lock(&sock_net(sk)->sctp.addr_wq_lock); 4385 list_add_tail(&sp->auto_asconf_list, 4386 &net->sctp.auto_asconf_splist); 4387 sp->do_auto_asconf = 1; 4388 spin_unlock(&sock_net(sk)->sctp.addr_wq_lock); 4389 } else { 4390 sp->do_auto_asconf = 0; 4391 } 4392 4393 local_bh_enable(); 4394 4395 return 0; 4396 } 4397 4398 /* Cleanup any SCTP per socket resources. Must be called with 4399 * sock_net(sk)->sctp.addr_wq_lock held if sp->do_auto_asconf is true 4400 */ 4401 static void sctp_destroy_sock(struct sock *sk) 4402 { 4403 struct sctp_sock *sp; 4404 4405 pr_debug("%s: sk:%p\n", __func__, sk); 4406 4407 /* Release our hold on the endpoint. */ 4408 sp = sctp_sk(sk); 4409 /* This could happen during socket init, thus we bail out 4410 * early, since the rest of the below is not setup either. 4411 */ 4412 if (sp->ep == NULL) 4413 return; 4414 4415 if (sp->do_auto_asconf) { 4416 sp->do_auto_asconf = 0; 4417 list_del(&sp->auto_asconf_list); 4418 } 4419 sctp_endpoint_free(sp->ep); 4420 local_bh_disable(); 4421 percpu_counter_dec(&sctp_sockets_allocated); 4422 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); 4423 local_bh_enable(); 4424 } 4425 4426 /* Triggered when there are no references on the socket anymore */ 4427 static void sctp_destruct_sock(struct sock *sk) 4428 { 4429 struct sctp_sock *sp = sctp_sk(sk); 4430 4431 /* Free up the HMAC transform. */ 4432 crypto_free_shash(sp->hmac); 4433 4434 inet_sock_destruct(sk); 4435 } 4436 4437 /* API 4.1.7 shutdown() - TCP Style Syntax 4438 * int shutdown(int socket, int how); 4439 * 4440 * sd - the socket descriptor of the association to be closed. 4441 * how - Specifies the type of shutdown. The values are 4442 * as follows: 4443 * SHUT_RD 4444 * Disables further receive operations. No SCTP 4445 * protocol action is taken. 4446 * SHUT_WR 4447 * Disables further send operations, and initiates 4448 * the SCTP shutdown sequence. 4449 * SHUT_RDWR 4450 * Disables further send and receive operations 4451 * and initiates the SCTP shutdown sequence. 4452 */ 4453 static void sctp_shutdown(struct sock *sk, int how) 4454 { 4455 struct net *net = sock_net(sk); 4456 struct sctp_endpoint *ep; 4457 4458 if (!sctp_style(sk, TCP)) 4459 return; 4460 4461 ep = sctp_sk(sk)->ep; 4462 if (how & SEND_SHUTDOWN && !list_empty(&ep->asocs)) { 4463 struct sctp_association *asoc; 4464 4465 sk->sk_state = SCTP_SS_CLOSING; 4466 asoc = list_entry(ep->asocs.next, 4467 struct sctp_association, asocs); 4468 sctp_primitive_SHUTDOWN(net, asoc, NULL); 4469 } 4470 } 4471 4472 int sctp_get_sctp_info(struct sock *sk, struct sctp_association *asoc, 4473 struct sctp_info *info) 4474 { 4475 struct sctp_transport *prim; 4476 struct list_head *pos; 4477 int mask; 4478 4479 memset(info, 0, sizeof(*info)); 4480 if (!asoc) { 4481 struct sctp_sock *sp = sctp_sk(sk); 4482 4483 info->sctpi_s_autoclose = sp->autoclose; 4484 info->sctpi_s_adaptation_ind = sp->adaptation_ind; 4485 info->sctpi_s_pd_point = sp->pd_point; 4486 info->sctpi_s_nodelay = sp->nodelay; 4487 info->sctpi_s_disable_fragments = sp->disable_fragments; 4488 info->sctpi_s_v4mapped = sp->v4mapped; 4489 info->sctpi_s_frag_interleave = sp->frag_interleave; 4490 info->sctpi_s_type = sp->type; 4491 4492 return 0; 4493 } 4494 4495 info->sctpi_tag = asoc->c.my_vtag; 4496 info->sctpi_state = asoc->state; 4497 info->sctpi_rwnd = asoc->a_rwnd; 4498 info->sctpi_unackdata = asoc->unack_data; 4499 info->sctpi_penddata = sctp_tsnmap_pending(&asoc->peer.tsn_map); 4500 info->sctpi_instrms = asoc->stream->incnt; 4501 info->sctpi_outstrms = asoc->stream->outcnt; 4502 list_for_each(pos, &asoc->base.inqueue.in_chunk_list) 4503 info->sctpi_inqueue++; 4504 list_for_each(pos, &asoc->outqueue.out_chunk_list) 4505 info->sctpi_outqueue++; 4506 info->sctpi_overall_error = asoc->overall_error_count; 4507 info->sctpi_max_burst = asoc->max_burst; 4508 info->sctpi_maxseg = asoc->frag_point; 4509 info->sctpi_peer_rwnd = asoc->peer.rwnd; 4510 info->sctpi_peer_tag = asoc->c.peer_vtag; 4511 4512 mask = asoc->peer.ecn_capable << 1; 4513 mask = (mask | asoc->peer.ipv4_address) << 1; 4514 mask = (mask | asoc->peer.ipv6_address) << 1; 4515 mask = (mask | asoc->peer.hostname_address) << 1; 4516 mask = (mask | asoc->peer.asconf_capable) << 1; 4517 mask = (mask | asoc->peer.prsctp_capable) << 1; 4518 mask = (mask | asoc->peer.auth_capable); 4519 info->sctpi_peer_capable = mask; 4520 mask = asoc->peer.sack_needed << 1; 4521 mask = (mask | asoc->peer.sack_generation) << 1; 4522 mask = (mask | asoc->peer.zero_window_announced); 4523 info->sctpi_peer_sack = mask; 4524 4525 info->sctpi_isacks = asoc->stats.isacks; 4526 info->sctpi_osacks = asoc->stats.osacks; 4527 info->sctpi_opackets = asoc->stats.opackets; 4528 info->sctpi_ipackets = asoc->stats.ipackets; 4529 info->sctpi_rtxchunks = asoc->stats.rtxchunks; 4530 info->sctpi_outofseqtsns = asoc->stats.outofseqtsns; 4531 info->sctpi_idupchunks = asoc->stats.idupchunks; 4532 info->sctpi_gapcnt = asoc->stats.gapcnt; 4533 info->sctpi_ouodchunks = asoc->stats.ouodchunks; 4534 info->sctpi_iuodchunks = asoc->stats.iuodchunks; 4535 info->sctpi_oodchunks = asoc->stats.oodchunks; 4536 info->sctpi_iodchunks = asoc->stats.iodchunks; 4537 info->sctpi_octrlchunks = asoc->stats.octrlchunks; 4538 info->sctpi_ictrlchunks = asoc->stats.ictrlchunks; 4539 4540 prim = asoc->peer.primary_path; 4541 memcpy(&info->sctpi_p_address, &prim->ipaddr, 4542 sizeof(struct sockaddr_storage)); 4543 info->sctpi_p_state = prim->state; 4544 info->sctpi_p_cwnd = prim->cwnd; 4545 info->sctpi_p_srtt = prim->srtt; 4546 info->sctpi_p_rto = jiffies_to_msecs(prim->rto); 4547 info->sctpi_p_hbinterval = prim->hbinterval; 4548 info->sctpi_p_pathmaxrxt = prim->pathmaxrxt; 4549 info->sctpi_p_sackdelay = jiffies_to_msecs(prim->sackdelay); 4550 info->sctpi_p_ssthresh = prim->ssthresh; 4551 info->sctpi_p_partial_bytes_acked = prim->partial_bytes_acked; 4552 info->sctpi_p_flight_size = prim->flight_size; 4553 info->sctpi_p_error = prim->error_count; 4554 4555 return 0; 4556 } 4557 EXPORT_SYMBOL_GPL(sctp_get_sctp_info); 4558 4559 /* use callback to avoid exporting the core structure */ 4560 int sctp_transport_walk_start(struct rhashtable_iter *iter) 4561 { 4562 int err; 4563 4564 rhltable_walk_enter(&sctp_transport_hashtable, iter); 4565 4566 err = rhashtable_walk_start(iter); 4567 if (err && err != -EAGAIN) { 4568 rhashtable_walk_stop(iter); 4569 rhashtable_walk_exit(iter); 4570 return err; 4571 } 4572 4573 return 0; 4574 } 4575 4576 void sctp_transport_walk_stop(struct rhashtable_iter *iter) 4577 { 4578 rhashtable_walk_stop(iter); 4579 rhashtable_walk_exit(iter); 4580 } 4581 4582 struct sctp_transport *sctp_transport_get_next(struct net *net, 4583 struct rhashtable_iter *iter) 4584 { 4585 struct sctp_transport *t; 4586 4587 t = rhashtable_walk_next(iter); 4588 for (; t; t = rhashtable_walk_next(iter)) { 4589 if (IS_ERR(t)) { 4590 if (PTR_ERR(t) == -EAGAIN) 4591 continue; 4592 break; 4593 } 4594 4595 if (net_eq(sock_net(t->asoc->base.sk), net) && 4596 t->asoc->peer.primary_path == t) 4597 break; 4598 } 4599 4600 return t; 4601 } 4602 4603 struct sctp_transport *sctp_transport_get_idx(struct net *net, 4604 struct rhashtable_iter *iter, 4605 int pos) 4606 { 4607 void *obj = SEQ_START_TOKEN; 4608 4609 while (pos && (obj = sctp_transport_get_next(net, iter)) && 4610 !IS_ERR(obj)) 4611 pos--; 4612 4613 return obj; 4614 } 4615 4616 int sctp_for_each_endpoint(int (*cb)(struct sctp_endpoint *, void *), 4617 void *p) { 4618 int err = 0; 4619 int hash = 0; 4620 struct sctp_ep_common *epb; 4621 struct sctp_hashbucket *head; 4622 4623 for (head = sctp_ep_hashtable; hash < sctp_ep_hashsize; 4624 hash++, head++) { 4625 read_lock(&head->lock); 4626 sctp_for_each_hentry(epb, &head->chain) { 4627 err = cb(sctp_ep(epb), p); 4628 if (err) 4629 break; 4630 } 4631 read_unlock(&head->lock); 4632 } 4633 4634 return err; 4635 } 4636 EXPORT_SYMBOL_GPL(sctp_for_each_endpoint); 4637 4638 int sctp_transport_lookup_process(int (*cb)(struct sctp_transport *, void *), 4639 struct net *net, 4640 const union sctp_addr *laddr, 4641 const union sctp_addr *paddr, void *p) 4642 { 4643 struct sctp_transport *transport; 4644 int err; 4645 4646 rcu_read_lock(); 4647 transport = sctp_addrs_lookup_transport(net, laddr, paddr); 4648 rcu_read_unlock(); 4649 if (!transport) 4650 return -ENOENT; 4651 4652 err = cb(transport, p); 4653 sctp_transport_put(transport); 4654 4655 return err; 4656 } 4657 EXPORT_SYMBOL_GPL(sctp_transport_lookup_process); 4658 4659 int sctp_for_each_transport(int (*cb)(struct sctp_transport *, void *), 4660 struct net *net, int pos, void *p) { 4661 struct rhashtable_iter hti; 4662 void *obj; 4663 int err; 4664 4665 err = sctp_transport_walk_start(&hti); 4666 if (err) 4667 return err; 4668 4669 sctp_transport_get_idx(net, &hti, pos); 4670 obj = sctp_transport_get_next(net, &hti); 4671 for (; obj && !IS_ERR(obj); obj = sctp_transport_get_next(net, &hti)) { 4672 struct sctp_transport *transport = obj; 4673 4674 if (!sctp_transport_hold(transport)) 4675 continue; 4676 err = cb(transport, p); 4677 sctp_transport_put(transport); 4678 if (err) 4679 break; 4680 } 4681 sctp_transport_walk_stop(&hti); 4682 4683 return err; 4684 } 4685 EXPORT_SYMBOL_GPL(sctp_for_each_transport); 4686 4687 /* 7.2.1 Association Status (SCTP_STATUS) 4688 4689 * Applications can retrieve current status information about an 4690 * association, including association state, peer receiver window size, 4691 * number of unacked data chunks, and number of data chunks pending 4692 * receipt. This information is read-only. 4693 */ 4694 static int sctp_getsockopt_sctp_status(struct sock *sk, int len, 4695 char __user *optval, 4696 int __user *optlen) 4697 { 4698 struct sctp_status status; 4699 struct sctp_association *asoc = NULL; 4700 struct sctp_transport *transport; 4701 sctp_assoc_t associd; 4702 int retval = 0; 4703 4704 if (len < sizeof(status)) { 4705 retval = -EINVAL; 4706 goto out; 4707 } 4708 4709 len = sizeof(status); 4710 if (copy_from_user(&status, optval, len)) { 4711 retval = -EFAULT; 4712 goto out; 4713 } 4714 4715 associd = status.sstat_assoc_id; 4716 asoc = sctp_id2assoc(sk, associd); 4717 if (!asoc) { 4718 retval = -EINVAL; 4719 goto out; 4720 } 4721 4722 transport = asoc->peer.primary_path; 4723 4724 status.sstat_assoc_id = sctp_assoc2id(asoc); 4725 status.sstat_state = sctp_assoc_to_state(asoc); 4726 status.sstat_rwnd = asoc->peer.rwnd; 4727 status.sstat_unackdata = asoc->unack_data; 4728 4729 status.sstat_penddata = sctp_tsnmap_pending(&asoc->peer.tsn_map); 4730 status.sstat_instrms = asoc->stream->incnt; 4731 status.sstat_outstrms = asoc->stream->outcnt; 4732 status.sstat_fragmentation_point = asoc->frag_point; 4733 status.sstat_primary.spinfo_assoc_id = sctp_assoc2id(transport->asoc); 4734 memcpy(&status.sstat_primary.spinfo_address, &transport->ipaddr, 4735 transport->af_specific->sockaddr_len); 4736 /* Map ipv4 address into v4-mapped-on-v6 address. */ 4737 sctp_get_pf_specific(sk->sk_family)->addr_to_user(sctp_sk(sk), 4738 (union sctp_addr *)&status.sstat_primary.spinfo_address); 4739 status.sstat_primary.spinfo_state = transport->state; 4740 status.sstat_primary.spinfo_cwnd = transport->cwnd; 4741 status.sstat_primary.spinfo_srtt = transport->srtt; 4742 status.sstat_primary.spinfo_rto = jiffies_to_msecs(transport->rto); 4743 status.sstat_primary.spinfo_mtu = transport->pathmtu; 4744 4745 if (status.sstat_primary.spinfo_state == SCTP_UNKNOWN) 4746 status.sstat_primary.spinfo_state = SCTP_ACTIVE; 4747 4748 if (put_user(len, optlen)) { 4749 retval = -EFAULT; 4750 goto out; 4751 } 4752 4753 pr_debug("%s: len:%d, state:%d, rwnd:%d, assoc_id:%d\n", 4754 __func__, len, status.sstat_state, status.sstat_rwnd, 4755 status.sstat_assoc_id); 4756 4757 if (copy_to_user(optval, &status, len)) { 4758 retval = -EFAULT; 4759 goto out; 4760 } 4761 4762 out: 4763 return retval; 4764 } 4765 4766 4767 /* 7.2.2 Peer Address Information (SCTP_GET_PEER_ADDR_INFO) 4768 * 4769 * Applications can retrieve information about a specific peer address 4770 * of an association, including its reachability state, congestion 4771 * window, and retransmission timer values. This information is 4772 * read-only. 4773 */ 4774 static int sctp_getsockopt_peer_addr_info(struct sock *sk, int len, 4775 char __user *optval, 4776 int __user *optlen) 4777 { 4778 struct sctp_paddrinfo pinfo; 4779 struct sctp_transport *transport; 4780 int retval = 0; 4781 4782 if (len < sizeof(pinfo)) { 4783 retval = -EINVAL; 4784 goto out; 4785 } 4786 4787 len = sizeof(pinfo); 4788 if (copy_from_user(&pinfo, optval, len)) { 4789 retval = -EFAULT; 4790 goto out; 4791 } 4792 4793 transport = sctp_addr_id2transport(sk, &pinfo.spinfo_address, 4794 pinfo.spinfo_assoc_id); 4795 if (!transport) 4796 return -EINVAL; 4797 4798 pinfo.spinfo_assoc_id = sctp_assoc2id(transport->asoc); 4799 pinfo.spinfo_state = transport->state; 4800 pinfo.spinfo_cwnd = transport->cwnd; 4801 pinfo.spinfo_srtt = transport->srtt; 4802 pinfo.spinfo_rto = jiffies_to_msecs(transport->rto); 4803 pinfo.spinfo_mtu = transport->pathmtu; 4804 4805 if (pinfo.spinfo_state == SCTP_UNKNOWN) 4806 pinfo.spinfo_state = SCTP_ACTIVE; 4807 4808 if (put_user(len, optlen)) { 4809 retval = -EFAULT; 4810 goto out; 4811 } 4812 4813 if (copy_to_user(optval, &pinfo, len)) { 4814 retval = -EFAULT; 4815 goto out; 4816 } 4817 4818 out: 4819 return retval; 4820 } 4821 4822 /* 7.1.12 Enable/Disable message fragmentation (SCTP_DISABLE_FRAGMENTS) 4823 * 4824 * This option is a on/off flag. If enabled no SCTP message 4825 * fragmentation will be performed. Instead if a message being sent 4826 * exceeds the current PMTU size, the message will NOT be sent and 4827 * instead a error will be indicated to the user. 4828 */ 4829 static int sctp_getsockopt_disable_fragments(struct sock *sk, int len, 4830 char __user *optval, int __user *optlen) 4831 { 4832 int val; 4833 4834 if (len < sizeof(int)) 4835 return -EINVAL; 4836 4837 len = sizeof(int); 4838 val = (sctp_sk(sk)->disable_fragments == 1); 4839 if (put_user(len, optlen)) 4840 return -EFAULT; 4841 if (copy_to_user(optval, &val, len)) 4842 return -EFAULT; 4843 return 0; 4844 } 4845 4846 /* 7.1.15 Set notification and ancillary events (SCTP_EVENTS) 4847 * 4848 * This socket option is used to specify various notifications and 4849 * ancillary data the user wishes to receive. 4850 */ 4851 static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval, 4852 int __user *optlen) 4853 { 4854 if (len == 0) 4855 return -EINVAL; 4856 if (len > sizeof(struct sctp_event_subscribe)) 4857 len = sizeof(struct sctp_event_subscribe); 4858 if (put_user(len, optlen)) 4859 return -EFAULT; 4860 if (copy_to_user(optval, &sctp_sk(sk)->subscribe, len)) 4861 return -EFAULT; 4862 return 0; 4863 } 4864 4865 /* 7.1.8 Automatic Close of associations (SCTP_AUTOCLOSE) 4866 * 4867 * This socket option is applicable to the UDP-style socket only. When 4868 * set it will cause associations that are idle for more than the 4869 * specified number of seconds to automatically close. An association 4870 * being idle is defined an association that has NOT sent or received 4871 * user data. The special value of '0' indicates that no automatic 4872 * close of any associations should be performed. The option expects an 4873 * integer defining the number of seconds of idle time before an 4874 * association is closed. 4875 */ 4876 static int sctp_getsockopt_autoclose(struct sock *sk, int len, char __user *optval, int __user *optlen) 4877 { 4878 /* Applicable to UDP-style socket only */ 4879 if (sctp_style(sk, TCP)) 4880 return -EOPNOTSUPP; 4881 if (len < sizeof(int)) 4882 return -EINVAL; 4883 len = sizeof(int); 4884 if (put_user(len, optlen)) 4885 return -EFAULT; 4886 if (copy_to_user(optval, &sctp_sk(sk)->autoclose, sizeof(int))) 4887 return -EFAULT; 4888 return 0; 4889 } 4890 4891 /* Helper routine to branch off an association to a new socket. */ 4892 int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp) 4893 { 4894 struct sctp_association *asoc = sctp_id2assoc(sk, id); 4895 struct sctp_sock *sp = sctp_sk(sk); 4896 struct socket *sock; 4897 int err = 0; 4898 4899 if (!asoc) 4900 return -EINVAL; 4901 4902 /* If there is a thread waiting on more sndbuf space for 4903 * sending on this asoc, it cannot be peeled. 4904 */ 4905 if (waitqueue_active(&asoc->wait)) 4906 return -EBUSY; 4907 4908 /* An association cannot be branched off from an already peeled-off 4909 * socket, nor is this supported for tcp style sockets. 4910 */ 4911 if (!sctp_style(sk, UDP)) 4912 return -EINVAL; 4913 4914 /* Create a new socket. */ 4915 err = sock_create(sk->sk_family, SOCK_SEQPACKET, IPPROTO_SCTP, &sock); 4916 if (err < 0) 4917 return err; 4918 4919 sctp_copy_sock(sock->sk, sk, asoc); 4920 4921 /* Make peeled-off sockets more like 1-1 accepted sockets. 4922 * Set the daddr and initialize id to something more random 4923 */ 4924 sp->pf->to_sk_daddr(&asoc->peer.primary_addr, sk); 4925 4926 /* Populate the fields of the newsk from the oldsk and migrate the 4927 * asoc to the newsk. 4928 */ 4929 sctp_sock_migrate(sk, sock->sk, asoc, SCTP_SOCKET_UDP_HIGH_BANDWIDTH); 4930 4931 *sockp = sock; 4932 4933 return err; 4934 } 4935 EXPORT_SYMBOL(sctp_do_peeloff); 4936 4937 static int sctp_getsockopt_peeloff(struct sock *sk, int len, char __user *optval, int __user *optlen) 4938 { 4939 sctp_peeloff_arg_t peeloff; 4940 struct socket *newsock; 4941 struct file *newfile; 4942 int retval = 0; 4943 4944 if (len < sizeof(sctp_peeloff_arg_t)) 4945 return -EINVAL; 4946 len = sizeof(sctp_peeloff_arg_t); 4947 if (copy_from_user(&peeloff, optval, len)) 4948 return -EFAULT; 4949 4950 retval = sctp_do_peeloff(sk, peeloff.associd, &newsock); 4951 if (retval < 0) 4952 goto out; 4953 4954 /* Map the socket to an unused fd that can be returned to the user. */ 4955 retval = get_unused_fd_flags(0); 4956 if (retval < 0) { 4957 sock_release(newsock); 4958 goto out; 4959 } 4960 4961 newfile = sock_alloc_file(newsock, 0, NULL); 4962 if (IS_ERR(newfile)) { 4963 put_unused_fd(retval); 4964 sock_release(newsock); 4965 return PTR_ERR(newfile); 4966 } 4967 4968 pr_debug("%s: sk:%p, newsk:%p, sd:%d\n", __func__, sk, newsock->sk, 4969 retval); 4970 4971 /* Return the fd mapped to the new socket. */ 4972 if (put_user(len, optlen)) { 4973 fput(newfile); 4974 put_unused_fd(retval); 4975 return -EFAULT; 4976 } 4977 peeloff.sd = retval; 4978 if (copy_to_user(optval, &peeloff, len)) { 4979 fput(newfile); 4980 put_unused_fd(retval); 4981 return -EFAULT; 4982 } 4983 fd_install(retval, newfile); 4984 out: 4985 return retval; 4986 } 4987 4988 /* 7.1.13 Peer Address Parameters (SCTP_PEER_ADDR_PARAMS) 4989 * 4990 * Applications can enable or disable heartbeats for any peer address of 4991 * an association, modify an address's heartbeat interval, force a 4992 * heartbeat to be sent immediately, and adjust the address's maximum 4993 * number of retransmissions sent before an address is considered 4994 * unreachable. The following structure is used to access and modify an 4995 * address's parameters: 4996 * 4997 * struct sctp_paddrparams { 4998 * sctp_assoc_t spp_assoc_id; 4999 * struct sockaddr_storage spp_address; 5000 * uint32_t spp_hbinterval; 5001 * uint16_t spp_pathmaxrxt; 5002 * uint32_t spp_pathmtu; 5003 * uint32_t spp_sackdelay; 5004 * uint32_t spp_flags; 5005 * }; 5006 * 5007 * spp_assoc_id - (one-to-many style socket) This is filled in the 5008 * application, and identifies the association for 5009 * this query. 5010 * spp_address - This specifies which address is of interest. 5011 * spp_hbinterval - This contains the value of the heartbeat interval, 5012 * in milliseconds. If a value of zero 5013 * is present in this field then no changes are to 5014 * be made to this parameter. 5015 * spp_pathmaxrxt - This contains the maximum number of 5016 * retransmissions before this address shall be 5017 * considered unreachable. If a value of zero 5018 * is present in this field then no changes are to 5019 * be made to this parameter. 5020 * spp_pathmtu - When Path MTU discovery is disabled the value 5021 * specified here will be the "fixed" path mtu. 5022 * Note that if the spp_address field is empty 5023 * then all associations on this address will 5024 * have this fixed path mtu set upon them. 5025 * 5026 * spp_sackdelay - When delayed sack is enabled, this value specifies 5027 * the number of milliseconds that sacks will be delayed 5028 * for. This value will apply to all addresses of an 5029 * association if the spp_address field is empty. Note 5030 * also, that if delayed sack is enabled and this 5031 * value is set to 0, no change is made to the last 5032 * recorded delayed sack timer value. 5033 * 5034 * spp_flags - These flags are used to control various features 5035 * on an association. The flag field may contain 5036 * zero or more of the following options. 5037 * 5038 * SPP_HB_ENABLE - Enable heartbeats on the 5039 * specified address. Note that if the address 5040 * field is empty all addresses for the association 5041 * have heartbeats enabled upon them. 5042 * 5043 * SPP_HB_DISABLE - Disable heartbeats on the 5044 * speicifed address. Note that if the address 5045 * field is empty all addresses for the association 5046 * will have their heartbeats disabled. Note also 5047 * that SPP_HB_ENABLE and SPP_HB_DISABLE are 5048 * mutually exclusive, only one of these two should 5049 * be specified. Enabling both fields will have 5050 * undetermined results. 5051 * 5052 * SPP_HB_DEMAND - Request a user initiated heartbeat 5053 * to be made immediately. 5054 * 5055 * SPP_PMTUD_ENABLE - This field will enable PMTU 5056 * discovery upon the specified address. Note that 5057 * if the address feild is empty then all addresses 5058 * on the association are effected. 5059 * 5060 * SPP_PMTUD_DISABLE - This field will disable PMTU 5061 * discovery upon the specified address. Note that 5062 * if the address feild is empty then all addresses 5063 * on the association are effected. Not also that 5064 * SPP_PMTUD_ENABLE and SPP_PMTUD_DISABLE are mutually 5065 * exclusive. Enabling both will have undetermined 5066 * results. 5067 * 5068 * SPP_SACKDELAY_ENABLE - Setting this flag turns 5069 * on delayed sack. The time specified in spp_sackdelay 5070 * is used to specify the sack delay for this address. Note 5071 * that if spp_address is empty then all addresses will 5072 * enable delayed sack and take on the sack delay 5073 * value specified in spp_sackdelay. 5074 * SPP_SACKDELAY_DISABLE - Setting this flag turns 5075 * off delayed sack. If the spp_address field is blank then 5076 * delayed sack is disabled for the entire association. Note 5077 * also that this field is mutually exclusive to 5078 * SPP_SACKDELAY_ENABLE, setting both will have undefined 5079 * results. 5080 */ 5081 static int sctp_getsockopt_peer_addr_params(struct sock *sk, int len, 5082 char __user *optval, int __user *optlen) 5083 { 5084 struct sctp_paddrparams params; 5085 struct sctp_transport *trans = NULL; 5086 struct sctp_association *asoc = NULL; 5087 struct sctp_sock *sp = sctp_sk(sk); 5088 5089 if (len < sizeof(struct sctp_paddrparams)) 5090 return -EINVAL; 5091 len = sizeof(struct sctp_paddrparams); 5092 if (copy_from_user(¶ms, optval, len)) 5093 return -EFAULT; 5094 5095 /* If an address other than INADDR_ANY is specified, and 5096 * no transport is found, then the request is invalid. 5097 */ 5098 if (!sctp_is_any(sk, (union sctp_addr *)¶ms.spp_address)) { 5099 trans = sctp_addr_id2transport(sk, ¶ms.spp_address, 5100 params.spp_assoc_id); 5101 if (!trans) { 5102 pr_debug("%s: failed no transport\n", __func__); 5103 return -EINVAL; 5104 } 5105 } 5106 5107 /* Get association, if assoc_id != 0 and the socket is a one 5108 * to many style socket, and an association was not found, then 5109 * the id was invalid. 5110 */ 5111 asoc = sctp_id2assoc(sk, params.spp_assoc_id); 5112 if (!asoc && params.spp_assoc_id && sctp_style(sk, UDP)) { 5113 pr_debug("%s: failed no association\n", __func__); 5114 return -EINVAL; 5115 } 5116 5117 if (trans) { 5118 /* Fetch transport values. */ 5119 params.spp_hbinterval = jiffies_to_msecs(trans->hbinterval); 5120 params.spp_pathmtu = trans->pathmtu; 5121 params.spp_pathmaxrxt = trans->pathmaxrxt; 5122 params.spp_sackdelay = jiffies_to_msecs(trans->sackdelay); 5123 5124 /*draft-11 doesn't say what to return in spp_flags*/ 5125 params.spp_flags = trans->param_flags; 5126 } else if (asoc) { 5127 /* Fetch association values. */ 5128 params.spp_hbinterval = jiffies_to_msecs(asoc->hbinterval); 5129 params.spp_pathmtu = asoc->pathmtu; 5130 params.spp_pathmaxrxt = asoc->pathmaxrxt; 5131 params.spp_sackdelay = jiffies_to_msecs(asoc->sackdelay); 5132 5133 /*draft-11 doesn't say what to return in spp_flags*/ 5134 params.spp_flags = asoc->param_flags; 5135 } else { 5136 /* Fetch socket values. */ 5137 params.spp_hbinterval = sp->hbinterval; 5138 params.spp_pathmtu = sp->pathmtu; 5139 params.spp_sackdelay = sp->sackdelay; 5140 params.spp_pathmaxrxt = sp->pathmaxrxt; 5141 5142 /*draft-11 doesn't say what to return in spp_flags*/ 5143 params.spp_flags = sp->param_flags; 5144 } 5145 5146 if (copy_to_user(optval, ¶ms, len)) 5147 return -EFAULT; 5148 5149 if (put_user(len, optlen)) 5150 return -EFAULT; 5151 5152 return 0; 5153 } 5154 5155 /* 5156 * 7.1.23. Get or set delayed ack timer (SCTP_DELAYED_SACK) 5157 * 5158 * This option will effect the way delayed acks are performed. This 5159 * option allows you to get or set the delayed ack time, in 5160 * milliseconds. It also allows changing the delayed ack frequency. 5161 * Changing the frequency to 1 disables the delayed sack algorithm. If 5162 * the assoc_id is 0, then this sets or gets the endpoints default 5163 * values. If the assoc_id field is non-zero, then the set or get 5164 * effects the specified association for the one to many model (the 5165 * assoc_id field is ignored by the one to one model). Note that if 5166 * sack_delay or sack_freq are 0 when setting this option, then the 5167 * current values will remain unchanged. 5168 * 5169 * struct sctp_sack_info { 5170 * sctp_assoc_t sack_assoc_id; 5171 * uint32_t sack_delay; 5172 * uint32_t sack_freq; 5173 * }; 5174 * 5175 * sack_assoc_id - This parameter, indicates which association the user 5176 * is performing an action upon. Note that if this field's value is 5177 * zero then the endpoints default value is changed (effecting future 5178 * associations only). 5179 * 5180 * sack_delay - This parameter contains the number of milliseconds that 5181 * the user is requesting the delayed ACK timer be set to. Note that 5182 * this value is defined in the standard to be between 200 and 500 5183 * milliseconds. 5184 * 5185 * sack_freq - This parameter contains the number of packets that must 5186 * be received before a sack is sent without waiting for the delay 5187 * timer to expire. The default value for this is 2, setting this 5188 * value to 1 will disable the delayed sack algorithm. 5189 */ 5190 static int sctp_getsockopt_delayed_ack(struct sock *sk, int len, 5191 char __user *optval, 5192 int __user *optlen) 5193 { 5194 struct sctp_sack_info params; 5195 struct sctp_association *asoc = NULL; 5196 struct sctp_sock *sp = sctp_sk(sk); 5197 5198 if (len >= sizeof(struct sctp_sack_info)) { 5199 len = sizeof(struct sctp_sack_info); 5200 5201 if (copy_from_user(¶ms, optval, len)) 5202 return -EFAULT; 5203 } else if (len == sizeof(struct sctp_assoc_value)) { 5204 pr_warn_ratelimited(DEPRECATED 5205 "%s (pid %d) " 5206 "Use of struct sctp_assoc_value in delayed_ack socket option.\n" 5207 "Use struct sctp_sack_info instead\n", 5208 current->comm, task_pid_nr(current)); 5209 if (copy_from_user(¶ms, optval, len)) 5210 return -EFAULT; 5211 } else 5212 return -EINVAL; 5213 5214 /* Get association, if sack_assoc_id != 0 and the socket is a one 5215 * to many style socket, and an association was not found, then 5216 * the id was invalid. 5217 */ 5218 asoc = sctp_id2assoc(sk, params.sack_assoc_id); 5219 if (!asoc && params.sack_assoc_id && sctp_style(sk, UDP)) 5220 return -EINVAL; 5221 5222 if (asoc) { 5223 /* Fetch association values. */ 5224 if (asoc->param_flags & SPP_SACKDELAY_ENABLE) { 5225 params.sack_delay = jiffies_to_msecs( 5226 asoc->sackdelay); 5227 params.sack_freq = asoc->sackfreq; 5228 5229 } else { 5230 params.sack_delay = 0; 5231 params.sack_freq = 1; 5232 } 5233 } else { 5234 /* Fetch socket values. */ 5235 if (sp->param_flags & SPP_SACKDELAY_ENABLE) { 5236 params.sack_delay = sp->sackdelay; 5237 params.sack_freq = sp->sackfreq; 5238 } else { 5239 params.sack_delay = 0; 5240 params.sack_freq = 1; 5241 } 5242 } 5243 5244 if (copy_to_user(optval, ¶ms, len)) 5245 return -EFAULT; 5246 5247 if (put_user(len, optlen)) 5248 return -EFAULT; 5249 5250 return 0; 5251 } 5252 5253 /* 7.1.3 Initialization Parameters (SCTP_INITMSG) 5254 * 5255 * Applications can specify protocol parameters for the default association 5256 * initialization. The option name argument to setsockopt() and getsockopt() 5257 * is SCTP_INITMSG. 5258 * 5259 * Setting initialization parameters is effective only on an unconnected 5260 * socket (for UDP-style sockets only future associations are effected 5261 * by the change). With TCP-style sockets, this option is inherited by 5262 * sockets derived from a listener socket. 5263 */ 5264 static int sctp_getsockopt_initmsg(struct sock *sk, int len, char __user *optval, int __user *optlen) 5265 { 5266 if (len < sizeof(struct sctp_initmsg)) 5267 return -EINVAL; 5268 len = sizeof(struct sctp_initmsg); 5269 if (put_user(len, optlen)) 5270 return -EFAULT; 5271 if (copy_to_user(optval, &sctp_sk(sk)->initmsg, len)) 5272 return -EFAULT; 5273 return 0; 5274 } 5275 5276 5277 static int sctp_getsockopt_peer_addrs(struct sock *sk, int len, 5278 char __user *optval, int __user *optlen) 5279 { 5280 struct sctp_association *asoc; 5281 int cnt = 0; 5282 struct sctp_getaddrs getaddrs; 5283 struct sctp_transport *from; 5284 void __user *to; 5285 union sctp_addr temp; 5286 struct sctp_sock *sp = sctp_sk(sk); 5287 int addrlen; 5288 size_t space_left; 5289 int bytes_copied; 5290 5291 if (len < sizeof(struct sctp_getaddrs)) 5292 return -EINVAL; 5293 5294 if (copy_from_user(&getaddrs, optval, sizeof(struct sctp_getaddrs))) 5295 return -EFAULT; 5296 5297 /* For UDP-style sockets, id specifies the association to query. */ 5298 asoc = sctp_id2assoc(sk, getaddrs.assoc_id); 5299 if (!asoc) 5300 return -EINVAL; 5301 5302 to = optval + offsetof(struct sctp_getaddrs, addrs); 5303 space_left = len - offsetof(struct sctp_getaddrs, addrs); 5304 5305 list_for_each_entry(from, &asoc->peer.transport_addr_list, 5306 transports) { 5307 memcpy(&temp, &from->ipaddr, sizeof(temp)); 5308 addrlen = sctp_get_pf_specific(sk->sk_family) 5309 ->addr_to_user(sp, &temp); 5310 if (space_left < addrlen) 5311 return -ENOMEM; 5312 if (copy_to_user(to, &temp, addrlen)) 5313 return -EFAULT; 5314 to += addrlen; 5315 cnt++; 5316 space_left -= addrlen; 5317 } 5318 5319 if (put_user(cnt, &((struct sctp_getaddrs __user *)optval)->addr_num)) 5320 return -EFAULT; 5321 bytes_copied = ((char __user *)to) - optval; 5322 if (put_user(bytes_copied, optlen)) 5323 return -EFAULT; 5324 5325 return 0; 5326 } 5327 5328 static int sctp_copy_laddrs(struct sock *sk, __u16 port, void *to, 5329 size_t space_left, int *bytes_copied) 5330 { 5331 struct sctp_sockaddr_entry *addr; 5332 union sctp_addr temp; 5333 int cnt = 0; 5334 int addrlen; 5335 struct net *net = sock_net(sk); 5336 5337 rcu_read_lock(); 5338 list_for_each_entry_rcu(addr, &net->sctp.local_addr_list, list) { 5339 if (!addr->valid) 5340 continue; 5341 5342 if ((PF_INET == sk->sk_family) && 5343 (AF_INET6 == addr->a.sa.sa_family)) 5344 continue; 5345 if ((PF_INET6 == sk->sk_family) && 5346 inet_v6_ipv6only(sk) && 5347 (AF_INET == addr->a.sa.sa_family)) 5348 continue; 5349 memcpy(&temp, &addr->a, sizeof(temp)); 5350 if (!temp.v4.sin_port) 5351 temp.v4.sin_port = htons(port); 5352 5353 addrlen = sctp_get_pf_specific(sk->sk_family) 5354 ->addr_to_user(sctp_sk(sk), &temp); 5355 5356 if (space_left < addrlen) { 5357 cnt = -ENOMEM; 5358 break; 5359 } 5360 memcpy(to, &temp, addrlen); 5361 5362 to += addrlen; 5363 cnt++; 5364 space_left -= addrlen; 5365 *bytes_copied += addrlen; 5366 } 5367 rcu_read_unlock(); 5368 5369 return cnt; 5370 } 5371 5372 5373 static int sctp_getsockopt_local_addrs(struct sock *sk, int len, 5374 char __user *optval, int __user *optlen) 5375 { 5376 struct sctp_bind_addr *bp; 5377 struct sctp_association *asoc; 5378 int cnt = 0; 5379 struct sctp_getaddrs getaddrs; 5380 struct sctp_sockaddr_entry *addr; 5381 void __user *to; 5382 union sctp_addr temp; 5383 struct sctp_sock *sp = sctp_sk(sk); 5384 int addrlen; 5385 int err = 0; 5386 size_t space_left; 5387 int bytes_copied = 0; 5388 void *addrs; 5389 void *buf; 5390 5391 if (len < sizeof(struct sctp_getaddrs)) 5392 return -EINVAL; 5393 5394 if (copy_from_user(&getaddrs, optval, sizeof(struct sctp_getaddrs))) 5395 return -EFAULT; 5396 5397 /* 5398 * For UDP-style sockets, id specifies the association to query. 5399 * If the id field is set to the value '0' then the locally bound 5400 * addresses are returned without regard to any particular 5401 * association. 5402 */ 5403 if (0 == getaddrs.assoc_id) { 5404 bp = &sctp_sk(sk)->ep->base.bind_addr; 5405 } else { 5406 asoc = sctp_id2assoc(sk, getaddrs.assoc_id); 5407 if (!asoc) 5408 return -EINVAL; 5409 bp = &asoc->base.bind_addr; 5410 } 5411 5412 to = optval + offsetof(struct sctp_getaddrs, addrs); 5413 space_left = len - offsetof(struct sctp_getaddrs, addrs); 5414 5415 addrs = kmalloc(space_left, GFP_USER | __GFP_NOWARN); 5416 if (!addrs) 5417 return -ENOMEM; 5418 5419 /* If the endpoint is bound to 0.0.0.0 or ::0, get the valid 5420 * addresses from the global local address list. 5421 */ 5422 if (sctp_list_single_entry(&bp->address_list)) { 5423 addr = list_entry(bp->address_list.next, 5424 struct sctp_sockaddr_entry, list); 5425 if (sctp_is_any(sk, &addr->a)) { 5426 cnt = sctp_copy_laddrs(sk, bp->port, addrs, 5427 space_left, &bytes_copied); 5428 if (cnt < 0) { 5429 err = cnt; 5430 goto out; 5431 } 5432 goto copy_getaddrs; 5433 } 5434 } 5435 5436 buf = addrs; 5437 /* Protection on the bound address list is not needed since 5438 * in the socket option context we hold a socket lock and 5439 * thus the bound address list can't change. 5440 */ 5441 list_for_each_entry(addr, &bp->address_list, list) { 5442 memcpy(&temp, &addr->a, sizeof(temp)); 5443 addrlen = sctp_get_pf_specific(sk->sk_family) 5444 ->addr_to_user(sp, &temp); 5445 if (space_left < addrlen) { 5446 err = -ENOMEM; /*fixme: right error?*/ 5447 goto out; 5448 } 5449 memcpy(buf, &temp, addrlen); 5450 buf += addrlen; 5451 bytes_copied += addrlen; 5452 cnt++; 5453 space_left -= addrlen; 5454 } 5455 5456 copy_getaddrs: 5457 if (copy_to_user(to, addrs, bytes_copied)) { 5458 err = -EFAULT; 5459 goto out; 5460 } 5461 if (put_user(cnt, &((struct sctp_getaddrs __user *)optval)->addr_num)) { 5462 err = -EFAULT; 5463 goto out; 5464 } 5465 if (put_user(bytes_copied, optlen)) 5466 err = -EFAULT; 5467 out: 5468 kfree(addrs); 5469 return err; 5470 } 5471 5472 /* 7.1.10 Set Primary Address (SCTP_PRIMARY_ADDR) 5473 * 5474 * Requests that the local SCTP stack use the enclosed peer address as 5475 * the association primary. The enclosed address must be one of the 5476 * association peer's addresses. 5477 */ 5478 static int sctp_getsockopt_primary_addr(struct sock *sk, int len, 5479 char __user *optval, int __user *optlen) 5480 { 5481 struct sctp_prim prim; 5482 struct sctp_association *asoc; 5483 struct sctp_sock *sp = sctp_sk(sk); 5484 5485 if (len < sizeof(struct sctp_prim)) 5486 return -EINVAL; 5487 5488 len = sizeof(struct sctp_prim); 5489 5490 if (copy_from_user(&prim, optval, len)) 5491 return -EFAULT; 5492 5493 asoc = sctp_id2assoc(sk, prim.ssp_assoc_id); 5494 if (!asoc) 5495 return -EINVAL; 5496 5497 if (!asoc->peer.primary_path) 5498 return -ENOTCONN; 5499 5500 memcpy(&prim.ssp_addr, &asoc->peer.primary_path->ipaddr, 5501 asoc->peer.primary_path->af_specific->sockaddr_len); 5502 5503 sctp_get_pf_specific(sk->sk_family)->addr_to_user(sp, 5504 (union sctp_addr *)&prim.ssp_addr); 5505 5506 if (put_user(len, optlen)) 5507 return -EFAULT; 5508 if (copy_to_user(optval, &prim, len)) 5509 return -EFAULT; 5510 5511 return 0; 5512 } 5513 5514 /* 5515 * 7.1.11 Set Adaptation Layer Indicator (SCTP_ADAPTATION_LAYER) 5516 * 5517 * Requests that the local endpoint set the specified Adaptation Layer 5518 * Indication parameter for all future INIT and INIT-ACK exchanges. 5519 */ 5520 static int sctp_getsockopt_adaptation_layer(struct sock *sk, int len, 5521 char __user *optval, int __user *optlen) 5522 { 5523 struct sctp_setadaptation adaptation; 5524 5525 if (len < sizeof(struct sctp_setadaptation)) 5526 return -EINVAL; 5527 5528 len = sizeof(struct sctp_setadaptation); 5529 5530 adaptation.ssb_adaptation_ind = sctp_sk(sk)->adaptation_ind; 5531 5532 if (put_user(len, optlen)) 5533 return -EFAULT; 5534 if (copy_to_user(optval, &adaptation, len)) 5535 return -EFAULT; 5536 5537 return 0; 5538 } 5539 5540 /* 5541 * 5542 * 7.1.14 Set default send parameters (SCTP_DEFAULT_SEND_PARAM) 5543 * 5544 * Applications that wish to use the sendto() system call may wish to 5545 * specify a default set of parameters that would normally be supplied 5546 * through the inclusion of ancillary data. This socket option allows 5547 * such an application to set the default sctp_sndrcvinfo structure. 5548 5549 5550 * The application that wishes to use this socket option simply passes 5551 * in to this call the sctp_sndrcvinfo structure defined in Section 5552 * 5.2.2) The input parameters accepted by this call include 5553 * sinfo_stream, sinfo_flags, sinfo_ppid, sinfo_context, 5554 * sinfo_timetolive. The user must provide the sinfo_assoc_id field in 5555 * to this call if the caller is using the UDP model. 5556 * 5557 * For getsockopt, it get the default sctp_sndrcvinfo structure. 5558 */ 5559 static int sctp_getsockopt_default_send_param(struct sock *sk, 5560 int len, char __user *optval, 5561 int __user *optlen) 5562 { 5563 struct sctp_sock *sp = sctp_sk(sk); 5564 struct sctp_association *asoc; 5565 struct sctp_sndrcvinfo info; 5566 5567 if (len < sizeof(info)) 5568 return -EINVAL; 5569 5570 len = sizeof(info); 5571 5572 if (copy_from_user(&info, optval, len)) 5573 return -EFAULT; 5574 5575 asoc = sctp_id2assoc(sk, info.sinfo_assoc_id); 5576 if (!asoc && info.sinfo_assoc_id && sctp_style(sk, UDP)) 5577 return -EINVAL; 5578 if (asoc) { 5579 info.sinfo_stream = asoc->default_stream; 5580 info.sinfo_flags = asoc->default_flags; 5581 info.sinfo_ppid = asoc->default_ppid; 5582 info.sinfo_context = asoc->default_context; 5583 info.sinfo_timetolive = asoc->default_timetolive; 5584 } else { 5585 info.sinfo_stream = sp->default_stream; 5586 info.sinfo_flags = sp->default_flags; 5587 info.sinfo_ppid = sp->default_ppid; 5588 info.sinfo_context = sp->default_context; 5589 info.sinfo_timetolive = sp->default_timetolive; 5590 } 5591 5592 if (put_user(len, optlen)) 5593 return -EFAULT; 5594 if (copy_to_user(optval, &info, len)) 5595 return -EFAULT; 5596 5597 return 0; 5598 } 5599 5600 /* RFC6458, Section 8.1.31. Set/get Default Send Parameters 5601 * (SCTP_DEFAULT_SNDINFO) 5602 */ 5603 static int sctp_getsockopt_default_sndinfo(struct sock *sk, int len, 5604 char __user *optval, 5605 int __user *optlen) 5606 { 5607 struct sctp_sock *sp = sctp_sk(sk); 5608 struct sctp_association *asoc; 5609 struct sctp_sndinfo info; 5610 5611 if (len < sizeof(info)) 5612 return -EINVAL; 5613 5614 len = sizeof(info); 5615 5616 if (copy_from_user(&info, optval, len)) 5617 return -EFAULT; 5618 5619 asoc = sctp_id2assoc(sk, info.snd_assoc_id); 5620 if (!asoc && info.snd_assoc_id && sctp_style(sk, UDP)) 5621 return -EINVAL; 5622 if (asoc) { 5623 info.snd_sid = asoc->default_stream; 5624 info.snd_flags = asoc->default_flags; 5625 info.snd_ppid = asoc->default_ppid; 5626 info.snd_context = asoc->default_context; 5627 } else { 5628 info.snd_sid = sp->default_stream; 5629 info.snd_flags = sp->default_flags; 5630 info.snd_ppid = sp->default_ppid; 5631 info.snd_context = sp->default_context; 5632 } 5633 5634 if (put_user(len, optlen)) 5635 return -EFAULT; 5636 if (copy_to_user(optval, &info, len)) 5637 return -EFAULT; 5638 5639 return 0; 5640 } 5641 5642 /* 5643 * 5644 * 7.1.5 SCTP_NODELAY 5645 * 5646 * Turn on/off any Nagle-like algorithm. This means that packets are 5647 * generally sent as soon as possible and no unnecessary delays are 5648 * introduced, at the cost of more packets in the network. Expects an 5649 * integer boolean flag. 5650 */ 5651 5652 static int sctp_getsockopt_nodelay(struct sock *sk, int len, 5653 char __user *optval, int __user *optlen) 5654 { 5655 int val; 5656 5657 if (len < sizeof(int)) 5658 return -EINVAL; 5659 5660 len = sizeof(int); 5661 val = (sctp_sk(sk)->nodelay == 1); 5662 if (put_user(len, optlen)) 5663 return -EFAULT; 5664 if (copy_to_user(optval, &val, len)) 5665 return -EFAULT; 5666 return 0; 5667 } 5668 5669 /* 5670 * 5671 * 7.1.1 SCTP_RTOINFO 5672 * 5673 * The protocol parameters used to initialize and bound retransmission 5674 * timeout (RTO) are tunable. sctp_rtoinfo structure is used to access 5675 * and modify these parameters. 5676 * All parameters are time values, in milliseconds. A value of 0, when 5677 * modifying the parameters, indicates that the current value should not 5678 * be changed. 5679 * 5680 */ 5681 static int sctp_getsockopt_rtoinfo(struct sock *sk, int len, 5682 char __user *optval, 5683 int __user *optlen) { 5684 struct sctp_rtoinfo rtoinfo; 5685 struct sctp_association *asoc; 5686 5687 if (len < sizeof (struct sctp_rtoinfo)) 5688 return -EINVAL; 5689 5690 len = sizeof(struct sctp_rtoinfo); 5691 5692 if (copy_from_user(&rtoinfo, optval, len)) 5693 return -EFAULT; 5694 5695 asoc = sctp_id2assoc(sk, rtoinfo.srto_assoc_id); 5696 5697 if (!asoc && rtoinfo.srto_assoc_id && sctp_style(sk, UDP)) 5698 return -EINVAL; 5699 5700 /* Values corresponding to the specific association. */ 5701 if (asoc) { 5702 rtoinfo.srto_initial = jiffies_to_msecs(asoc->rto_initial); 5703 rtoinfo.srto_max = jiffies_to_msecs(asoc->rto_max); 5704 rtoinfo.srto_min = jiffies_to_msecs(asoc->rto_min); 5705 } else { 5706 /* Values corresponding to the endpoint. */ 5707 struct sctp_sock *sp = sctp_sk(sk); 5708 5709 rtoinfo.srto_initial = sp->rtoinfo.srto_initial; 5710 rtoinfo.srto_max = sp->rtoinfo.srto_max; 5711 rtoinfo.srto_min = sp->rtoinfo.srto_min; 5712 } 5713 5714 if (put_user(len, optlen)) 5715 return -EFAULT; 5716 5717 if (copy_to_user(optval, &rtoinfo, len)) 5718 return -EFAULT; 5719 5720 return 0; 5721 } 5722 5723 /* 5724 * 5725 * 7.1.2 SCTP_ASSOCINFO 5726 * 5727 * This option is used to tune the maximum retransmission attempts 5728 * of the association. 5729 * Returns an error if the new association retransmission value is 5730 * greater than the sum of the retransmission value of the peer. 5731 * See [SCTP] for more information. 5732 * 5733 */ 5734 static int sctp_getsockopt_associnfo(struct sock *sk, int len, 5735 char __user *optval, 5736 int __user *optlen) 5737 { 5738 5739 struct sctp_assocparams assocparams; 5740 struct sctp_association *asoc; 5741 struct list_head *pos; 5742 int cnt = 0; 5743 5744 if (len < sizeof (struct sctp_assocparams)) 5745 return -EINVAL; 5746 5747 len = sizeof(struct sctp_assocparams); 5748 5749 if (copy_from_user(&assocparams, optval, len)) 5750 return -EFAULT; 5751 5752 asoc = sctp_id2assoc(sk, assocparams.sasoc_assoc_id); 5753 5754 if (!asoc && assocparams.sasoc_assoc_id && sctp_style(sk, UDP)) 5755 return -EINVAL; 5756 5757 /* Values correspoinding to the specific association */ 5758 if (asoc) { 5759 assocparams.sasoc_asocmaxrxt = asoc->max_retrans; 5760 assocparams.sasoc_peer_rwnd = asoc->peer.rwnd; 5761 assocparams.sasoc_local_rwnd = asoc->a_rwnd; 5762 assocparams.sasoc_cookie_life = ktime_to_ms(asoc->cookie_life); 5763 5764 list_for_each(pos, &asoc->peer.transport_addr_list) { 5765 cnt++; 5766 } 5767 5768 assocparams.sasoc_number_peer_destinations = cnt; 5769 } else { 5770 /* Values corresponding to the endpoint */ 5771 struct sctp_sock *sp = sctp_sk(sk); 5772 5773 assocparams.sasoc_asocmaxrxt = sp->assocparams.sasoc_asocmaxrxt; 5774 assocparams.sasoc_peer_rwnd = sp->assocparams.sasoc_peer_rwnd; 5775 assocparams.sasoc_local_rwnd = sp->assocparams.sasoc_local_rwnd; 5776 assocparams.sasoc_cookie_life = 5777 sp->assocparams.sasoc_cookie_life; 5778 assocparams.sasoc_number_peer_destinations = 5779 sp->assocparams. 5780 sasoc_number_peer_destinations; 5781 } 5782 5783 if (put_user(len, optlen)) 5784 return -EFAULT; 5785 5786 if (copy_to_user(optval, &assocparams, len)) 5787 return -EFAULT; 5788 5789 return 0; 5790 } 5791 5792 /* 5793 * 7.1.16 Set/clear IPv4 mapped addresses (SCTP_I_WANT_MAPPED_V4_ADDR) 5794 * 5795 * This socket option is a boolean flag which turns on or off mapped V4 5796 * addresses. If this option is turned on and the socket is type 5797 * PF_INET6, then IPv4 addresses will be mapped to V6 representation. 5798 * If this option is turned off, then no mapping will be done of V4 5799 * addresses and a user will receive both PF_INET6 and PF_INET type 5800 * addresses on the socket. 5801 */ 5802 static int sctp_getsockopt_mappedv4(struct sock *sk, int len, 5803 char __user *optval, int __user *optlen) 5804 { 5805 int val; 5806 struct sctp_sock *sp = sctp_sk(sk); 5807 5808 if (len < sizeof(int)) 5809 return -EINVAL; 5810 5811 len = sizeof(int); 5812 val = sp->v4mapped; 5813 if (put_user(len, optlen)) 5814 return -EFAULT; 5815 if (copy_to_user(optval, &val, len)) 5816 return -EFAULT; 5817 5818 return 0; 5819 } 5820 5821 /* 5822 * 7.1.29. Set or Get the default context (SCTP_CONTEXT) 5823 * (chapter and verse is quoted at sctp_setsockopt_context()) 5824 */ 5825 static int sctp_getsockopt_context(struct sock *sk, int len, 5826 char __user *optval, int __user *optlen) 5827 { 5828 struct sctp_assoc_value params; 5829 struct sctp_sock *sp; 5830 struct sctp_association *asoc; 5831 5832 if (len < sizeof(struct sctp_assoc_value)) 5833 return -EINVAL; 5834 5835 len = sizeof(struct sctp_assoc_value); 5836 5837 if (copy_from_user(¶ms, optval, len)) 5838 return -EFAULT; 5839 5840 sp = sctp_sk(sk); 5841 5842 if (params.assoc_id != 0) { 5843 asoc = sctp_id2assoc(sk, params.assoc_id); 5844 if (!asoc) 5845 return -EINVAL; 5846 params.assoc_value = asoc->default_rcv_context; 5847 } else { 5848 params.assoc_value = sp->default_rcv_context; 5849 } 5850 5851 if (put_user(len, optlen)) 5852 return -EFAULT; 5853 if (copy_to_user(optval, ¶ms, len)) 5854 return -EFAULT; 5855 5856 return 0; 5857 } 5858 5859 /* 5860 * 8.1.16. Get or Set the Maximum Fragmentation Size (SCTP_MAXSEG) 5861 * This option will get or set the maximum size to put in any outgoing 5862 * SCTP DATA chunk. If a message is larger than this size it will be 5863 * fragmented by SCTP into the specified size. Note that the underlying 5864 * SCTP implementation may fragment into smaller sized chunks when the 5865 * PMTU of the underlying association is smaller than the value set by 5866 * the user. The default value for this option is '0' which indicates 5867 * the user is NOT limiting fragmentation and only the PMTU will effect 5868 * SCTP's choice of DATA chunk size. Note also that values set larger 5869 * than the maximum size of an IP datagram will effectively let SCTP 5870 * control fragmentation (i.e. the same as setting this option to 0). 5871 * 5872 * The following structure is used to access and modify this parameter: 5873 * 5874 * struct sctp_assoc_value { 5875 * sctp_assoc_t assoc_id; 5876 * uint32_t assoc_value; 5877 * }; 5878 * 5879 * assoc_id: This parameter is ignored for one-to-one style sockets. 5880 * For one-to-many style sockets this parameter indicates which 5881 * association the user is performing an action upon. Note that if 5882 * this field's value is zero then the endpoints default value is 5883 * changed (effecting future associations only). 5884 * assoc_value: This parameter specifies the maximum size in bytes. 5885 */ 5886 static int sctp_getsockopt_maxseg(struct sock *sk, int len, 5887 char __user *optval, int __user *optlen) 5888 { 5889 struct sctp_assoc_value params; 5890 struct sctp_association *asoc; 5891 5892 if (len == sizeof(int)) { 5893 pr_warn_ratelimited(DEPRECATED 5894 "%s (pid %d) " 5895 "Use of int in maxseg socket option.\n" 5896 "Use struct sctp_assoc_value instead\n", 5897 current->comm, task_pid_nr(current)); 5898 params.assoc_id = 0; 5899 } else if (len >= sizeof(struct sctp_assoc_value)) { 5900 len = sizeof(struct sctp_assoc_value); 5901 if (copy_from_user(¶ms, optval, sizeof(params))) 5902 return -EFAULT; 5903 } else 5904 return -EINVAL; 5905 5906 asoc = sctp_id2assoc(sk, params.assoc_id); 5907 if (!asoc && params.assoc_id && sctp_style(sk, UDP)) 5908 return -EINVAL; 5909 5910 if (asoc) 5911 params.assoc_value = asoc->frag_point; 5912 else 5913 params.assoc_value = sctp_sk(sk)->user_frag; 5914 5915 if (put_user(len, optlen)) 5916 return -EFAULT; 5917 if (len == sizeof(int)) { 5918 if (copy_to_user(optval, ¶ms.assoc_value, len)) 5919 return -EFAULT; 5920 } else { 5921 if (copy_to_user(optval, ¶ms, len)) 5922 return -EFAULT; 5923 } 5924 5925 return 0; 5926 } 5927 5928 /* 5929 * 7.1.24. Get or set fragmented interleave (SCTP_FRAGMENT_INTERLEAVE) 5930 * (chapter and verse is quoted at sctp_setsockopt_fragment_interleave()) 5931 */ 5932 static int sctp_getsockopt_fragment_interleave(struct sock *sk, int len, 5933 char __user *optval, int __user *optlen) 5934 { 5935 int val; 5936 5937 if (len < sizeof(int)) 5938 return -EINVAL; 5939 5940 len = sizeof(int); 5941 5942 val = sctp_sk(sk)->frag_interleave; 5943 if (put_user(len, optlen)) 5944 return -EFAULT; 5945 if (copy_to_user(optval, &val, len)) 5946 return -EFAULT; 5947 5948 return 0; 5949 } 5950 5951 /* 5952 * 7.1.25. Set or Get the sctp partial delivery point 5953 * (chapter and verse is quoted at sctp_setsockopt_partial_delivery_point()) 5954 */ 5955 static int sctp_getsockopt_partial_delivery_point(struct sock *sk, int len, 5956 char __user *optval, 5957 int __user *optlen) 5958 { 5959 u32 val; 5960 5961 if (len < sizeof(u32)) 5962 return -EINVAL; 5963 5964 len = sizeof(u32); 5965 5966 val = sctp_sk(sk)->pd_point; 5967 if (put_user(len, optlen)) 5968 return -EFAULT; 5969 if (copy_to_user(optval, &val, len)) 5970 return -EFAULT; 5971 5972 return 0; 5973 } 5974 5975 /* 5976 * 7.1.28. Set or Get the maximum burst (SCTP_MAX_BURST) 5977 * (chapter and verse is quoted at sctp_setsockopt_maxburst()) 5978 */ 5979 static int sctp_getsockopt_maxburst(struct sock *sk, int len, 5980 char __user *optval, 5981 int __user *optlen) 5982 { 5983 struct sctp_assoc_value params; 5984 struct sctp_sock *sp; 5985 struct sctp_association *asoc; 5986 5987 if (len == sizeof(int)) { 5988 pr_warn_ratelimited(DEPRECATED 5989 "%s (pid %d) " 5990 "Use of int in max_burst socket option.\n" 5991 "Use struct sctp_assoc_value instead\n", 5992 current->comm, task_pid_nr(current)); 5993 params.assoc_id = 0; 5994 } else if (len >= sizeof(struct sctp_assoc_value)) { 5995 len = sizeof(struct sctp_assoc_value); 5996 if (copy_from_user(¶ms, optval, len)) 5997 return -EFAULT; 5998 } else 5999 return -EINVAL; 6000 6001 sp = sctp_sk(sk); 6002 6003 if (params.assoc_id != 0) { 6004 asoc = sctp_id2assoc(sk, params.assoc_id); 6005 if (!asoc) 6006 return -EINVAL; 6007 params.assoc_value = asoc->max_burst; 6008 } else 6009 params.assoc_value = sp->max_burst; 6010 6011 if (len == sizeof(int)) { 6012 if (copy_to_user(optval, ¶ms.assoc_value, len)) 6013 return -EFAULT; 6014 } else { 6015 if (copy_to_user(optval, ¶ms, len)) 6016 return -EFAULT; 6017 } 6018 6019 return 0; 6020 6021 } 6022 6023 static int sctp_getsockopt_hmac_ident(struct sock *sk, int len, 6024 char __user *optval, int __user *optlen) 6025 { 6026 struct sctp_endpoint *ep = sctp_sk(sk)->ep; 6027 struct sctp_hmacalgo __user *p = (void __user *)optval; 6028 struct sctp_hmac_algo_param *hmacs; 6029 __u16 data_len = 0; 6030 u32 num_idents; 6031 int i; 6032 6033 if (!ep->auth_enable) 6034 return -EACCES; 6035 6036 hmacs = ep->auth_hmacs_list; 6037 data_len = ntohs(hmacs->param_hdr.length) - sizeof(sctp_paramhdr_t); 6038 6039 if (len < sizeof(struct sctp_hmacalgo) + data_len) 6040 return -EINVAL; 6041 6042 len = sizeof(struct sctp_hmacalgo) + data_len; 6043 num_idents = data_len / sizeof(u16); 6044 6045 if (put_user(len, optlen)) 6046 return -EFAULT; 6047 if (put_user(num_idents, &p->shmac_num_idents)) 6048 return -EFAULT; 6049 for (i = 0; i < num_idents; i++) { 6050 __u16 hmacid = ntohs(hmacs->hmac_ids[i]); 6051 6052 if (copy_to_user(&p->shmac_idents[i], &hmacid, sizeof(__u16))) 6053 return -EFAULT; 6054 } 6055 return 0; 6056 } 6057 6058 static int sctp_getsockopt_active_key(struct sock *sk, int len, 6059 char __user *optval, int __user *optlen) 6060 { 6061 struct sctp_endpoint *ep = sctp_sk(sk)->ep; 6062 struct sctp_authkeyid val; 6063 struct sctp_association *asoc; 6064 6065 if (!ep->auth_enable) 6066 return -EACCES; 6067 6068 if (len < sizeof(struct sctp_authkeyid)) 6069 return -EINVAL; 6070 if (copy_from_user(&val, optval, sizeof(struct sctp_authkeyid))) 6071 return -EFAULT; 6072 6073 asoc = sctp_id2assoc(sk, val.scact_assoc_id); 6074 if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP)) 6075 return -EINVAL; 6076 6077 if (asoc) 6078 val.scact_keynumber = asoc->active_key_id; 6079 else 6080 val.scact_keynumber = ep->active_key_id; 6081 6082 len = sizeof(struct sctp_authkeyid); 6083 if (put_user(len, optlen)) 6084 return -EFAULT; 6085 if (copy_to_user(optval, &val, len)) 6086 return -EFAULT; 6087 6088 return 0; 6089 } 6090 6091 static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len, 6092 char __user *optval, int __user *optlen) 6093 { 6094 struct sctp_endpoint *ep = sctp_sk(sk)->ep; 6095 struct sctp_authchunks __user *p = (void __user *)optval; 6096 struct sctp_authchunks val; 6097 struct sctp_association *asoc; 6098 struct sctp_chunks_param *ch; 6099 u32 num_chunks = 0; 6100 char __user *to; 6101 6102 if (!ep->auth_enable) 6103 return -EACCES; 6104 6105 if (len < sizeof(struct sctp_authchunks)) 6106 return -EINVAL; 6107 6108 if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks))) 6109 return -EFAULT; 6110 6111 to = p->gauth_chunks; 6112 asoc = sctp_id2assoc(sk, val.gauth_assoc_id); 6113 if (!asoc) 6114 return -EINVAL; 6115 6116 ch = asoc->peer.peer_chunks; 6117 if (!ch) 6118 goto num; 6119 6120 /* See if the user provided enough room for all the data */ 6121 num_chunks = ntohs(ch->param_hdr.length) - sizeof(sctp_paramhdr_t); 6122 if (len < num_chunks) 6123 return -EINVAL; 6124 6125 if (copy_to_user(to, ch->chunks, num_chunks)) 6126 return -EFAULT; 6127 num: 6128 len = sizeof(struct sctp_authchunks) + num_chunks; 6129 if (put_user(len, optlen)) 6130 return -EFAULT; 6131 if (put_user(num_chunks, &p->gauth_number_of_chunks)) 6132 return -EFAULT; 6133 return 0; 6134 } 6135 6136 static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len, 6137 char __user *optval, int __user *optlen) 6138 { 6139 struct sctp_endpoint *ep = sctp_sk(sk)->ep; 6140 struct sctp_authchunks __user *p = (void __user *)optval; 6141 struct sctp_authchunks val; 6142 struct sctp_association *asoc; 6143 struct sctp_chunks_param *ch; 6144 u32 num_chunks = 0; 6145 char __user *to; 6146 6147 if (!ep->auth_enable) 6148 return -EACCES; 6149 6150 if (len < sizeof(struct sctp_authchunks)) 6151 return -EINVAL; 6152 6153 if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks))) 6154 return -EFAULT; 6155 6156 to = p->gauth_chunks; 6157 asoc = sctp_id2assoc(sk, val.gauth_assoc_id); 6158 if (!asoc && val.gauth_assoc_id && sctp_style(sk, UDP)) 6159 return -EINVAL; 6160 6161 if (asoc) 6162 ch = (struct sctp_chunks_param *)asoc->c.auth_chunks; 6163 else 6164 ch = ep->auth_chunk_list; 6165 6166 if (!ch) 6167 goto num; 6168 6169 num_chunks = ntohs(ch->param_hdr.length) - sizeof(sctp_paramhdr_t); 6170 if (len < sizeof(struct sctp_authchunks) + num_chunks) 6171 return -EINVAL; 6172 6173 if (copy_to_user(to, ch->chunks, num_chunks)) 6174 return -EFAULT; 6175 num: 6176 len = sizeof(struct sctp_authchunks) + num_chunks; 6177 if (put_user(len, optlen)) 6178 return -EFAULT; 6179 if (put_user(num_chunks, &p->gauth_number_of_chunks)) 6180 return -EFAULT; 6181 6182 return 0; 6183 } 6184 6185 /* 6186 * 8.2.5. Get the Current Number of Associations (SCTP_GET_ASSOC_NUMBER) 6187 * This option gets the current number of associations that are attached 6188 * to a one-to-many style socket. The option value is an uint32_t. 6189 */ 6190 static int sctp_getsockopt_assoc_number(struct sock *sk, int len, 6191 char __user *optval, int __user *optlen) 6192 { 6193 struct sctp_sock *sp = sctp_sk(sk); 6194 struct sctp_association *asoc; 6195 u32 val = 0; 6196 6197 if (sctp_style(sk, TCP)) 6198 return -EOPNOTSUPP; 6199 6200 if (len < sizeof(u32)) 6201 return -EINVAL; 6202 6203 len = sizeof(u32); 6204 6205 list_for_each_entry(asoc, &(sp->ep->asocs), asocs) { 6206 val++; 6207 } 6208 6209 if (put_user(len, optlen)) 6210 return -EFAULT; 6211 if (copy_to_user(optval, &val, len)) 6212 return -EFAULT; 6213 6214 return 0; 6215 } 6216 6217 /* 6218 * 8.1.23 SCTP_AUTO_ASCONF 6219 * See the corresponding setsockopt entry as description 6220 */ 6221 static int sctp_getsockopt_auto_asconf(struct sock *sk, int len, 6222 char __user *optval, int __user *optlen) 6223 { 6224 int val = 0; 6225 6226 if (len < sizeof(int)) 6227 return -EINVAL; 6228 6229 len = sizeof(int); 6230 if (sctp_sk(sk)->do_auto_asconf && sctp_is_ep_boundall(sk)) 6231 val = 1; 6232 if (put_user(len, optlen)) 6233 return -EFAULT; 6234 if (copy_to_user(optval, &val, len)) 6235 return -EFAULT; 6236 return 0; 6237 } 6238 6239 /* 6240 * 8.2.6. Get the Current Identifiers of Associations 6241 * (SCTP_GET_ASSOC_ID_LIST) 6242 * 6243 * This option gets the current list of SCTP association identifiers of 6244 * the SCTP associations handled by a one-to-many style socket. 6245 */ 6246 static int sctp_getsockopt_assoc_ids(struct sock *sk, int len, 6247 char __user *optval, int __user *optlen) 6248 { 6249 struct sctp_sock *sp = sctp_sk(sk); 6250 struct sctp_association *asoc; 6251 struct sctp_assoc_ids *ids; 6252 u32 num = 0; 6253 6254 if (sctp_style(sk, TCP)) 6255 return -EOPNOTSUPP; 6256 6257 if (len < sizeof(struct sctp_assoc_ids)) 6258 return -EINVAL; 6259 6260 list_for_each_entry(asoc, &(sp->ep->asocs), asocs) { 6261 num++; 6262 } 6263 6264 if (len < sizeof(struct sctp_assoc_ids) + sizeof(sctp_assoc_t) * num) 6265 return -EINVAL; 6266 6267 len = sizeof(struct sctp_assoc_ids) + sizeof(sctp_assoc_t) * num; 6268 6269 ids = kmalloc(len, GFP_USER | __GFP_NOWARN); 6270 if (unlikely(!ids)) 6271 return -ENOMEM; 6272 6273 ids->gaids_number_of_ids = num; 6274 num = 0; 6275 list_for_each_entry(asoc, &(sp->ep->asocs), asocs) { 6276 ids->gaids_assoc_id[num++] = asoc->assoc_id; 6277 } 6278 6279 if (put_user(len, optlen) || copy_to_user(optval, ids, len)) { 6280 kfree(ids); 6281 return -EFAULT; 6282 } 6283 6284 kfree(ids); 6285 return 0; 6286 } 6287 6288 /* 6289 * SCTP_PEER_ADDR_THLDS 6290 * 6291 * This option allows us to fetch the partially failed threshold for one or all 6292 * transports in an association. See Section 6.1 of: 6293 * http://www.ietf.org/id/draft-nishida-tsvwg-sctp-failover-05.txt 6294 */ 6295 static int sctp_getsockopt_paddr_thresholds(struct sock *sk, 6296 char __user *optval, 6297 int len, 6298 int __user *optlen) 6299 { 6300 struct sctp_paddrthlds val; 6301 struct sctp_transport *trans; 6302 struct sctp_association *asoc; 6303 6304 if (len < sizeof(struct sctp_paddrthlds)) 6305 return -EINVAL; 6306 len = sizeof(struct sctp_paddrthlds); 6307 if (copy_from_user(&val, (struct sctp_paddrthlds __user *)optval, len)) 6308 return -EFAULT; 6309 6310 if (sctp_is_any(sk, (const union sctp_addr *)&val.spt_address)) { 6311 asoc = sctp_id2assoc(sk, val.spt_assoc_id); 6312 if (!asoc) 6313 return -ENOENT; 6314 6315 val.spt_pathpfthld = asoc->pf_retrans; 6316 val.spt_pathmaxrxt = asoc->pathmaxrxt; 6317 } else { 6318 trans = sctp_addr_id2transport(sk, &val.spt_address, 6319 val.spt_assoc_id); 6320 if (!trans) 6321 return -ENOENT; 6322 6323 val.spt_pathmaxrxt = trans->pathmaxrxt; 6324 val.spt_pathpfthld = trans->pf_retrans; 6325 } 6326 6327 if (put_user(len, optlen) || copy_to_user(optval, &val, len)) 6328 return -EFAULT; 6329 6330 return 0; 6331 } 6332 6333 /* 6334 * SCTP_GET_ASSOC_STATS 6335 * 6336 * This option retrieves local per endpoint statistics. It is modeled 6337 * after OpenSolaris' implementation 6338 */ 6339 static int sctp_getsockopt_assoc_stats(struct sock *sk, int len, 6340 char __user *optval, 6341 int __user *optlen) 6342 { 6343 struct sctp_assoc_stats sas; 6344 struct sctp_association *asoc = NULL; 6345 6346 /* User must provide at least the assoc id */ 6347 if (len < sizeof(sctp_assoc_t)) 6348 return -EINVAL; 6349 6350 /* Allow the struct to grow and fill in as much as possible */ 6351 len = min_t(size_t, len, sizeof(sas)); 6352 6353 if (copy_from_user(&sas, optval, len)) 6354 return -EFAULT; 6355 6356 asoc = sctp_id2assoc(sk, sas.sas_assoc_id); 6357 if (!asoc) 6358 return -EINVAL; 6359 6360 sas.sas_rtxchunks = asoc->stats.rtxchunks; 6361 sas.sas_gapcnt = asoc->stats.gapcnt; 6362 sas.sas_outofseqtsns = asoc->stats.outofseqtsns; 6363 sas.sas_osacks = asoc->stats.osacks; 6364 sas.sas_isacks = asoc->stats.isacks; 6365 sas.sas_octrlchunks = asoc->stats.octrlchunks; 6366 sas.sas_ictrlchunks = asoc->stats.ictrlchunks; 6367 sas.sas_oodchunks = asoc->stats.oodchunks; 6368 sas.sas_iodchunks = asoc->stats.iodchunks; 6369 sas.sas_ouodchunks = asoc->stats.ouodchunks; 6370 sas.sas_iuodchunks = asoc->stats.iuodchunks; 6371 sas.sas_idupchunks = asoc->stats.idupchunks; 6372 sas.sas_opackets = asoc->stats.opackets; 6373 sas.sas_ipackets = asoc->stats.ipackets; 6374 6375 /* New high max rto observed, will return 0 if not a single 6376 * RTO update took place. obs_rto_ipaddr will be bogus 6377 * in such a case 6378 */ 6379 sas.sas_maxrto = asoc->stats.max_obs_rto; 6380 memcpy(&sas.sas_obs_rto_ipaddr, &asoc->stats.obs_rto_ipaddr, 6381 sizeof(struct sockaddr_storage)); 6382 6383 /* Mark beginning of a new observation period */ 6384 asoc->stats.max_obs_rto = asoc->rto_min; 6385 6386 if (put_user(len, optlen)) 6387 return -EFAULT; 6388 6389 pr_debug("%s: len:%d, assoc_id:%d\n", __func__, len, sas.sas_assoc_id); 6390 6391 if (copy_to_user(optval, &sas, len)) 6392 return -EFAULT; 6393 6394 return 0; 6395 } 6396 6397 static int sctp_getsockopt_recvrcvinfo(struct sock *sk, int len, 6398 char __user *optval, 6399 int __user *optlen) 6400 { 6401 int val = 0; 6402 6403 if (len < sizeof(int)) 6404 return -EINVAL; 6405 6406 len = sizeof(int); 6407 if (sctp_sk(sk)->recvrcvinfo) 6408 val = 1; 6409 if (put_user(len, optlen)) 6410 return -EFAULT; 6411 if (copy_to_user(optval, &val, len)) 6412 return -EFAULT; 6413 6414 return 0; 6415 } 6416 6417 static int sctp_getsockopt_recvnxtinfo(struct sock *sk, int len, 6418 char __user *optval, 6419 int __user *optlen) 6420 { 6421 int val = 0; 6422 6423 if (len < sizeof(int)) 6424 return -EINVAL; 6425 6426 len = sizeof(int); 6427 if (sctp_sk(sk)->recvnxtinfo) 6428 val = 1; 6429 if (put_user(len, optlen)) 6430 return -EFAULT; 6431 if (copy_to_user(optval, &val, len)) 6432 return -EFAULT; 6433 6434 return 0; 6435 } 6436 6437 static int sctp_getsockopt_pr_supported(struct sock *sk, int len, 6438 char __user *optval, 6439 int __user *optlen) 6440 { 6441 struct sctp_assoc_value params; 6442 struct sctp_association *asoc; 6443 int retval = -EFAULT; 6444 6445 if (len < sizeof(params)) { 6446 retval = -EINVAL; 6447 goto out; 6448 } 6449 6450 len = sizeof(params); 6451 if (copy_from_user(¶ms, optval, len)) 6452 goto out; 6453 6454 asoc = sctp_id2assoc(sk, params.assoc_id); 6455 if (asoc) { 6456 params.assoc_value = asoc->prsctp_enable; 6457 } else if (!params.assoc_id) { 6458 struct sctp_sock *sp = sctp_sk(sk); 6459 6460 params.assoc_value = sp->ep->prsctp_enable; 6461 } else { 6462 retval = -EINVAL; 6463 goto out; 6464 } 6465 6466 if (put_user(len, optlen)) 6467 goto out; 6468 6469 if (copy_to_user(optval, ¶ms, len)) 6470 goto out; 6471 6472 retval = 0; 6473 6474 out: 6475 return retval; 6476 } 6477 6478 static int sctp_getsockopt_default_prinfo(struct sock *sk, int len, 6479 char __user *optval, 6480 int __user *optlen) 6481 { 6482 struct sctp_default_prinfo info; 6483 struct sctp_association *asoc; 6484 int retval = -EFAULT; 6485 6486 if (len < sizeof(info)) { 6487 retval = -EINVAL; 6488 goto out; 6489 } 6490 6491 len = sizeof(info); 6492 if (copy_from_user(&info, optval, len)) 6493 goto out; 6494 6495 asoc = sctp_id2assoc(sk, info.pr_assoc_id); 6496 if (asoc) { 6497 info.pr_policy = SCTP_PR_POLICY(asoc->default_flags); 6498 info.pr_value = asoc->default_timetolive; 6499 } else if (!info.pr_assoc_id) { 6500 struct sctp_sock *sp = sctp_sk(sk); 6501 6502 info.pr_policy = SCTP_PR_POLICY(sp->default_flags); 6503 info.pr_value = sp->default_timetolive; 6504 } else { 6505 retval = -EINVAL; 6506 goto out; 6507 } 6508 6509 if (put_user(len, optlen)) 6510 goto out; 6511 6512 if (copy_to_user(optval, &info, len)) 6513 goto out; 6514 6515 retval = 0; 6516 6517 out: 6518 return retval; 6519 } 6520 6521 static int sctp_getsockopt_pr_assocstatus(struct sock *sk, int len, 6522 char __user *optval, 6523 int __user *optlen) 6524 { 6525 struct sctp_prstatus params; 6526 struct sctp_association *asoc; 6527 int policy; 6528 int retval = -EINVAL; 6529 6530 if (len < sizeof(params)) 6531 goto out; 6532 6533 len = sizeof(params); 6534 if (copy_from_user(¶ms, optval, len)) { 6535 retval = -EFAULT; 6536 goto out; 6537 } 6538 6539 policy = params.sprstat_policy; 6540 if (policy & ~SCTP_PR_SCTP_MASK) 6541 goto out; 6542 6543 asoc = sctp_id2assoc(sk, params.sprstat_assoc_id); 6544 if (!asoc) 6545 goto out; 6546 6547 if (policy == SCTP_PR_SCTP_NONE) { 6548 params.sprstat_abandoned_unsent = 0; 6549 params.sprstat_abandoned_sent = 0; 6550 for (policy = 0; policy <= SCTP_PR_INDEX(MAX); policy++) { 6551 params.sprstat_abandoned_unsent += 6552 asoc->abandoned_unsent[policy]; 6553 params.sprstat_abandoned_sent += 6554 asoc->abandoned_sent[policy]; 6555 } 6556 } else { 6557 params.sprstat_abandoned_unsent = 6558 asoc->abandoned_unsent[__SCTP_PR_INDEX(policy)]; 6559 params.sprstat_abandoned_sent = 6560 asoc->abandoned_sent[__SCTP_PR_INDEX(policy)]; 6561 } 6562 6563 if (put_user(len, optlen)) { 6564 retval = -EFAULT; 6565 goto out; 6566 } 6567 6568 if (copy_to_user(optval, ¶ms, len)) { 6569 retval = -EFAULT; 6570 goto out; 6571 } 6572 6573 retval = 0; 6574 6575 out: 6576 return retval; 6577 } 6578 6579 static int sctp_getsockopt_pr_streamstatus(struct sock *sk, int len, 6580 char __user *optval, 6581 int __user *optlen) 6582 { 6583 struct sctp_stream_out *streamout; 6584 struct sctp_association *asoc; 6585 struct sctp_prstatus params; 6586 int retval = -EINVAL; 6587 int policy; 6588 6589 if (len < sizeof(params)) 6590 goto out; 6591 6592 len = sizeof(params); 6593 if (copy_from_user(¶ms, optval, len)) { 6594 retval = -EFAULT; 6595 goto out; 6596 } 6597 6598 policy = params.sprstat_policy; 6599 if (policy & ~SCTP_PR_SCTP_MASK) 6600 goto out; 6601 6602 asoc = sctp_id2assoc(sk, params.sprstat_assoc_id); 6603 if (!asoc || params.sprstat_sid >= asoc->stream->outcnt) 6604 goto out; 6605 6606 streamout = &asoc->stream->out[params.sprstat_sid]; 6607 if (policy == SCTP_PR_SCTP_NONE) { 6608 params.sprstat_abandoned_unsent = 0; 6609 params.sprstat_abandoned_sent = 0; 6610 for (policy = 0; policy <= SCTP_PR_INDEX(MAX); policy++) { 6611 params.sprstat_abandoned_unsent += 6612 streamout->abandoned_unsent[policy]; 6613 params.sprstat_abandoned_sent += 6614 streamout->abandoned_sent[policy]; 6615 } 6616 } else { 6617 params.sprstat_abandoned_unsent = 6618 streamout->abandoned_unsent[__SCTP_PR_INDEX(policy)]; 6619 params.sprstat_abandoned_sent = 6620 streamout->abandoned_sent[__SCTP_PR_INDEX(policy)]; 6621 } 6622 6623 if (put_user(len, optlen) || copy_to_user(optval, ¶ms, len)) { 6624 retval = -EFAULT; 6625 goto out; 6626 } 6627 6628 retval = 0; 6629 6630 out: 6631 return retval; 6632 } 6633 6634 static int sctp_getsockopt_reconfig_supported(struct sock *sk, int len, 6635 char __user *optval, 6636 int __user *optlen) 6637 { 6638 struct sctp_assoc_value params; 6639 struct sctp_association *asoc; 6640 int retval = -EFAULT; 6641 6642 if (len < sizeof(params)) { 6643 retval = -EINVAL; 6644 goto out; 6645 } 6646 6647 len = sizeof(params); 6648 if (copy_from_user(¶ms, optval, len)) 6649 goto out; 6650 6651 asoc = sctp_id2assoc(sk, params.assoc_id); 6652 if (asoc) { 6653 params.assoc_value = asoc->reconf_enable; 6654 } else if (!params.assoc_id) { 6655 struct sctp_sock *sp = sctp_sk(sk); 6656 6657 params.assoc_value = sp->ep->reconf_enable; 6658 } else { 6659 retval = -EINVAL; 6660 goto out; 6661 } 6662 6663 if (put_user(len, optlen)) 6664 goto out; 6665 6666 if (copy_to_user(optval, ¶ms, len)) 6667 goto out; 6668 6669 retval = 0; 6670 6671 out: 6672 return retval; 6673 } 6674 6675 static int sctp_getsockopt_enable_strreset(struct sock *sk, int len, 6676 char __user *optval, 6677 int __user *optlen) 6678 { 6679 struct sctp_assoc_value params; 6680 struct sctp_association *asoc; 6681 int retval = -EFAULT; 6682 6683 if (len < sizeof(params)) { 6684 retval = -EINVAL; 6685 goto out; 6686 } 6687 6688 len = sizeof(params); 6689 if (copy_from_user(¶ms, optval, len)) 6690 goto out; 6691 6692 asoc = sctp_id2assoc(sk, params.assoc_id); 6693 if (asoc) { 6694 params.assoc_value = asoc->strreset_enable; 6695 } else if (!params.assoc_id) { 6696 struct sctp_sock *sp = sctp_sk(sk); 6697 6698 params.assoc_value = sp->ep->strreset_enable; 6699 } else { 6700 retval = -EINVAL; 6701 goto out; 6702 } 6703 6704 if (put_user(len, optlen)) 6705 goto out; 6706 6707 if (copy_to_user(optval, ¶ms, len)) 6708 goto out; 6709 6710 retval = 0; 6711 6712 out: 6713 return retval; 6714 } 6715 6716 static int sctp_getsockopt(struct sock *sk, int level, int optname, 6717 char __user *optval, int __user *optlen) 6718 { 6719 int retval = 0; 6720 int len; 6721 6722 pr_debug("%s: sk:%p, optname:%d\n", __func__, sk, optname); 6723 6724 /* I can hardly begin to describe how wrong this is. This is 6725 * so broken as to be worse than useless. The API draft 6726 * REALLY is NOT helpful here... I am not convinced that the 6727 * semantics of getsockopt() with a level OTHER THAN SOL_SCTP 6728 * are at all well-founded. 6729 */ 6730 if (level != SOL_SCTP) { 6731 struct sctp_af *af = sctp_sk(sk)->pf->af; 6732 6733 retval = af->getsockopt(sk, level, optname, optval, optlen); 6734 return retval; 6735 } 6736 6737 if (get_user(len, optlen)) 6738 return -EFAULT; 6739 6740 if (len < 0) 6741 return -EINVAL; 6742 6743 lock_sock(sk); 6744 6745 switch (optname) { 6746 case SCTP_STATUS: 6747 retval = sctp_getsockopt_sctp_status(sk, len, optval, optlen); 6748 break; 6749 case SCTP_DISABLE_FRAGMENTS: 6750 retval = sctp_getsockopt_disable_fragments(sk, len, optval, 6751 optlen); 6752 break; 6753 case SCTP_EVENTS: 6754 retval = sctp_getsockopt_events(sk, len, optval, optlen); 6755 break; 6756 case SCTP_AUTOCLOSE: 6757 retval = sctp_getsockopt_autoclose(sk, len, optval, optlen); 6758 break; 6759 case SCTP_SOCKOPT_PEELOFF: 6760 retval = sctp_getsockopt_peeloff(sk, len, optval, optlen); 6761 break; 6762 case SCTP_PEER_ADDR_PARAMS: 6763 retval = sctp_getsockopt_peer_addr_params(sk, len, optval, 6764 optlen); 6765 break; 6766 case SCTP_DELAYED_SACK: 6767 retval = sctp_getsockopt_delayed_ack(sk, len, optval, 6768 optlen); 6769 break; 6770 case SCTP_INITMSG: 6771 retval = sctp_getsockopt_initmsg(sk, len, optval, optlen); 6772 break; 6773 case SCTP_GET_PEER_ADDRS: 6774 retval = sctp_getsockopt_peer_addrs(sk, len, optval, 6775 optlen); 6776 break; 6777 case SCTP_GET_LOCAL_ADDRS: 6778 retval = sctp_getsockopt_local_addrs(sk, len, optval, 6779 optlen); 6780 break; 6781 case SCTP_SOCKOPT_CONNECTX3: 6782 retval = sctp_getsockopt_connectx3(sk, len, optval, optlen); 6783 break; 6784 case SCTP_DEFAULT_SEND_PARAM: 6785 retval = sctp_getsockopt_default_send_param(sk, len, 6786 optval, optlen); 6787 break; 6788 case SCTP_DEFAULT_SNDINFO: 6789 retval = sctp_getsockopt_default_sndinfo(sk, len, 6790 optval, optlen); 6791 break; 6792 case SCTP_PRIMARY_ADDR: 6793 retval = sctp_getsockopt_primary_addr(sk, len, optval, optlen); 6794 break; 6795 case SCTP_NODELAY: 6796 retval = sctp_getsockopt_nodelay(sk, len, optval, optlen); 6797 break; 6798 case SCTP_RTOINFO: 6799 retval = sctp_getsockopt_rtoinfo(sk, len, optval, optlen); 6800 break; 6801 case SCTP_ASSOCINFO: 6802 retval = sctp_getsockopt_associnfo(sk, len, optval, optlen); 6803 break; 6804 case SCTP_I_WANT_MAPPED_V4_ADDR: 6805 retval = sctp_getsockopt_mappedv4(sk, len, optval, optlen); 6806 break; 6807 case SCTP_MAXSEG: 6808 retval = sctp_getsockopt_maxseg(sk, len, optval, optlen); 6809 break; 6810 case SCTP_GET_PEER_ADDR_INFO: 6811 retval = sctp_getsockopt_peer_addr_info(sk, len, optval, 6812 optlen); 6813 break; 6814 case SCTP_ADAPTATION_LAYER: 6815 retval = sctp_getsockopt_adaptation_layer(sk, len, optval, 6816 optlen); 6817 break; 6818 case SCTP_CONTEXT: 6819 retval = sctp_getsockopt_context(sk, len, optval, optlen); 6820 break; 6821 case SCTP_FRAGMENT_INTERLEAVE: 6822 retval = sctp_getsockopt_fragment_interleave(sk, len, optval, 6823 optlen); 6824 break; 6825 case SCTP_PARTIAL_DELIVERY_POINT: 6826 retval = sctp_getsockopt_partial_delivery_point(sk, len, optval, 6827 optlen); 6828 break; 6829 case SCTP_MAX_BURST: 6830 retval = sctp_getsockopt_maxburst(sk, len, optval, optlen); 6831 break; 6832 case SCTP_AUTH_KEY: 6833 case SCTP_AUTH_CHUNK: 6834 case SCTP_AUTH_DELETE_KEY: 6835 retval = -EOPNOTSUPP; 6836 break; 6837 case SCTP_HMAC_IDENT: 6838 retval = sctp_getsockopt_hmac_ident(sk, len, optval, optlen); 6839 break; 6840 case SCTP_AUTH_ACTIVE_KEY: 6841 retval = sctp_getsockopt_active_key(sk, len, optval, optlen); 6842 break; 6843 case SCTP_PEER_AUTH_CHUNKS: 6844 retval = sctp_getsockopt_peer_auth_chunks(sk, len, optval, 6845 optlen); 6846 break; 6847 case SCTP_LOCAL_AUTH_CHUNKS: 6848 retval = sctp_getsockopt_local_auth_chunks(sk, len, optval, 6849 optlen); 6850 break; 6851 case SCTP_GET_ASSOC_NUMBER: 6852 retval = sctp_getsockopt_assoc_number(sk, len, optval, optlen); 6853 break; 6854 case SCTP_GET_ASSOC_ID_LIST: 6855 retval = sctp_getsockopt_assoc_ids(sk, len, optval, optlen); 6856 break; 6857 case SCTP_AUTO_ASCONF: 6858 retval = sctp_getsockopt_auto_asconf(sk, len, optval, optlen); 6859 break; 6860 case SCTP_PEER_ADDR_THLDS: 6861 retval = sctp_getsockopt_paddr_thresholds(sk, optval, len, optlen); 6862 break; 6863 case SCTP_GET_ASSOC_STATS: 6864 retval = sctp_getsockopt_assoc_stats(sk, len, optval, optlen); 6865 break; 6866 case SCTP_RECVRCVINFO: 6867 retval = sctp_getsockopt_recvrcvinfo(sk, len, optval, optlen); 6868 break; 6869 case SCTP_RECVNXTINFO: 6870 retval = sctp_getsockopt_recvnxtinfo(sk, len, optval, optlen); 6871 break; 6872 case SCTP_PR_SUPPORTED: 6873 retval = sctp_getsockopt_pr_supported(sk, len, optval, optlen); 6874 break; 6875 case SCTP_DEFAULT_PRINFO: 6876 retval = sctp_getsockopt_default_prinfo(sk, len, optval, 6877 optlen); 6878 break; 6879 case SCTP_PR_ASSOC_STATUS: 6880 retval = sctp_getsockopt_pr_assocstatus(sk, len, optval, 6881 optlen); 6882 break; 6883 case SCTP_PR_STREAM_STATUS: 6884 retval = sctp_getsockopt_pr_streamstatus(sk, len, optval, 6885 optlen); 6886 break; 6887 case SCTP_RECONFIG_SUPPORTED: 6888 retval = sctp_getsockopt_reconfig_supported(sk, len, optval, 6889 optlen); 6890 break; 6891 case SCTP_ENABLE_STREAM_RESET: 6892 retval = sctp_getsockopt_enable_strreset(sk, len, optval, 6893 optlen); 6894 break; 6895 default: 6896 retval = -ENOPROTOOPT; 6897 break; 6898 } 6899 6900 release_sock(sk); 6901 return retval; 6902 } 6903 6904 static int sctp_hash(struct sock *sk) 6905 { 6906 /* STUB */ 6907 return 0; 6908 } 6909 6910 static void sctp_unhash(struct sock *sk) 6911 { 6912 /* STUB */ 6913 } 6914 6915 /* Check if port is acceptable. Possibly find first available port. 6916 * 6917 * The port hash table (contained in the 'global' SCTP protocol storage 6918 * returned by struct sctp_protocol *sctp_get_protocol()). The hash 6919 * table is an array of 4096 lists (sctp_bind_hashbucket). Each 6920 * list (the list number is the port number hashed out, so as you 6921 * would expect from a hash function, all the ports in a given list have 6922 * such a number that hashes out to the same list number; you were 6923 * expecting that, right?); so each list has a set of ports, with a 6924 * link to the socket (struct sock) that uses it, the port number and 6925 * a fastreuse flag (FIXME: NPI ipg). 6926 */ 6927 static struct sctp_bind_bucket *sctp_bucket_create( 6928 struct sctp_bind_hashbucket *head, struct net *, unsigned short snum); 6929 6930 static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr) 6931 { 6932 struct sctp_bind_hashbucket *head; /* hash list */ 6933 struct sctp_bind_bucket *pp; 6934 unsigned short snum; 6935 int ret; 6936 6937 snum = ntohs(addr->v4.sin_port); 6938 6939 pr_debug("%s: begins, snum:%d\n", __func__, snum); 6940 6941 local_bh_disable(); 6942 6943 if (snum == 0) { 6944 /* Search for an available port. */ 6945 int low, high, remaining, index; 6946 unsigned int rover; 6947 struct net *net = sock_net(sk); 6948 6949 inet_get_local_port_range(net, &low, &high); 6950 remaining = (high - low) + 1; 6951 rover = prandom_u32() % remaining + low; 6952 6953 do { 6954 rover++; 6955 if ((rover < low) || (rover > high)) 6956 rover = low; 6957 if (inet_is_local_reserved_port(net, rover)) 6958 continue; 6959 index = sctp_phashfn(sock_net(sk), rover); 6960 head = &sctp_port_hashtable[index]; 6961 spin_lock(&head->lock); 6962 sctp_for_each_hentry(pp, &head->chain) 6963 if ((pp->port == rover) && 6964 net_eq(sock_net(sk), pp->net)) 6965 goto next; 6966 break; 6967 next: 6968 spin_unlock(&head->lock); 6969 } while (--remaining > 0); 6970 6971 /* Exhausted local port range during search? */ 6972 ret = 1; 6973 if (remaining <= 0) 6974 goto fail; 6975 6976 /* OK, here is the one we will use. HEAD (the port 6977 * hash table list entry) is non-NULL and we hold it's 6978 * mutex. 6979 */ 6980 snum = rover; 6981 } else { 6982 /* We are given an specific port number; we verify 6983 * that it is not being used. If it is used, we will 6984 * exahust the search in the hash list corresponding 6985 * to the port number (snum) - we detect that with the 6986 * port iterator, pp being NULL. 6987 */ 6988 head = &sctp_port_hashtable[sctp_phashfn(sock_net(sk), snum)]; 6989 spin_lock(&head->lock); 6990 sctp_for_each_hentry(pp, &head->chain) { 6991 if ((pp->port == snum) && net_eq(pp->net, sock_net(sk))) 6992 goto pp_found; 6993 } 6994 } 6995 pp = NULL; 6996 goto pp_not_found; 6997 pp_found: 6998 if (!hlist_empty(&pp->owner)) { 6999 /* We had a port hash table hit - there is an 7000 * available port (pp != NULL) and it is being 7001 * used by other socket (pp->owner not empty); that other 7002 * socket is going to be sk2. 7003 */ 7004 int reuse = sk->sk_reuse; 7005 struct sock *sk2; 7006 7007 pr_debug("%s: found a possible match\n", __func__); 7008 7009 if (pp->fastreuse && sk->sk_reuse && 7010 sk->sk_state != SCTP_SS_LISTENING) 7011 goto success; 7012 7013 /* Run through the list of sockets bound to the port 7014 * (pp->port) [via the pointers bind_next and 7015 * bind_pprev in the struct sock *sk2 (pp->sk)]. On each one, 7016 * we get the endpoint they describe and run through 7017 * the endpoint's list of IP (v4 or v6) addresses, 7018 * comparing each of the addresses with the address of 7019 * the socket sk. If we find a match, then that means 7020 * that this port/socket (sk) combination are already 7021 * in an endpoint. 7022 */ 7023 sk_for_each_bound(sk2, &pp->owner) { 7024 struct sctp_endpoint *ep2; 7025 ep2 = sctp_sk(sk2)->ep; 7026 7027 if (sk == sk2 || 7028 (reuse && sk2->sk_reuse && 7029 sk2->sk_state != SCTP_SS_LISTENING)) 7030 continue; 7031 7032 if (sctp_bind_addr_conflict(&ep2->base.bind_addr, addr, 7033 sctp_sk(sk2), sctp_sk(sk))) { 7034 ret = (long)sk2; 7035 goto fail_unlock; 7036 } 7037 } 7038 7039 pr_debug("%s: found a match\n", __func__); 7040 } 7041 pp_not_found: 7042 /* If there was a hash table miss, create a new port. */ 7043 ret = 1; 7044 if (!pp && !(pp = sctp_bucket_create(head, sock_net(sk), snum))) 7045 goto fail_unlock; 7046 7047 /* In either case (hit or miss), make sure fastreuse is 1 only 7048 * if sk->sk_reuse is too (that is, if the caller requested 7049 * SO_REUSEADDR on this socket -sk-). 7050 */ 7051 if (hlist_empty(&pp->owner)) { 7052 if (sk->sk_reuse && sk->sk_state != SCTP_SS_LISTENING) 7053 pp->fastreuse = 1; 7054 else 7055 pp->fastreuse = 0; 7056 } else if (pp->fastreuse && 7057 (!sk->sk_reuse || sk->sk_state == SCTP_SS_LISTENING)) 7058 pp->fastreuse = 0; 7059 7060 /* We are set, so fill up all the data in the hash table 7061 * entry, tie the socket list information with the rest of the 7062 * sockets FIXME: Blurry, NPI (ipg). 7063 */ 7064 success: 7065 if (!sctp_sk(sk)->bind_hash) { 7066 inet_sk(sk)->inet_num = snum; 7067 sk_add_bind_node(sk, &pp->owner); 7068 sctp_sk(sk)->bind_hash = pp; 7069 } 7070 ret = 0; 7071 7072 fail_unlock: 7073 spin_unlock(&head->lock); 7074 7075 fail: 7076 local_bh_enable(); 7077 return ret; 7078 } 7079 7080 /* Assign a 'snum' port to the socket. If snum == 0, an ephemeral 7081 * port is requested. 7082 */ 7083 static int sctp_get_port(struct sock *sk, unsigned short snum) 7084 { 7085 union sctp_addr addr; 7086 struct sctp_af *af = sctp_sk(sk)->pf->af; 7087 7088 /* Set up a dummy address struct from the sk. */ 7089 af->from_sk(&addr, sk); 7090 addr.v4.sin_port = htons(snum); 7091 7092 /* Note: sk->sk_num gets filled in if ephemeral port request. */ 7093 return !!sctp_get_port_local(sk, &addr); 7094 } 7095 7096 /* 7097 * Move a socket to LISTENING state. 7098 */ 7099 static int sctp_listen_start(struct sock *sk, int backlog) 7100 { 7101 struct sctp_sock *sp = sctp_sk(sk); 7102 struct sctp_endpoint *ep = sp->ep; 7103 struct crypto_shash *tfm = NULL; 7104 char alg[32]; 7105 7106 /* Allocate HMAC for generating cookie. */ 7107 if (!sp->hmac && sp->sctp_hmac_alg) { 7108 sprintf(alg, "hmac(%s)", sp->sctp_hmac_alg); 7109 tfm = crypto_alloc_shash(alg, 0, 0); 7110 if (IS_ERR(tfm)) { 7111 net_info_ratelimited("failed to load transform for %s: %ld\n", 7112 sp->sctp_hmac_alg, PTR_ERR(tfm)); 7113 return -ENOSYS; 7114 } 7115 sctp_sk(sk)->hmac = tfm; 7116 } 7117 7118 /* 7119 * If a bind() or sctp_bindx() is not called prior to a listen() 7120 * call that allows new associations to be accepted, the system 7121 * picks an ephemeral port and will choose an address set equivalent 7122 * to binding with a wildcard address. 7123 * 7124 * This is not currently spelled out in the SCTP sockets 7125 * extensions draft, but follows the practice as seen in TCP 7126 * sockets. 7127 * 7128 */ 7129 sk->sk_state = SCTP_SS_LISTENING; 7130 if (!ep->base.bind_addr.port) { 7131 if (sctp_autobind(sk)) 7132 return -EAGAIN; 7133 } else { 7134 if (sctp_get_port(sk, inet_sk(sk)->inet_num)) { 7135 sk->sk_state = SCTP_SS_CLOSED; 7136 return -EADDRINUSE; 7137 } 7138 } 7139 7140 sk->sk_max_ack_backlog = backlog; 7141 sctp_hash_endpoint(ep); 7142 return 0; 7143 } 7144 7145 /* 7146 * 4.1.3 / 5.1.3 listen() 7147 * 7148 * By default, new associations are not accepted for UDP style sockets. 7149 * An application uses listen() to mark a socket as being able to 7150 * accept new associations. 7151 * 7152 * On TCP style sockets, applications use listen() to ready the SCTP 7153 * endpoint for accepting inbound associations. 7154 * 7155 * On both types of endpoints a backlog of '0' disables listening. 7156 * 7157 * Move a socket to LISTENING state. 7158 */ 7159 int sctp_inet_listen(struct socket *sock, int backlog) 7160 { 7161 struct sock *sk = sock->sk; 7162 struct sctp_endpoint *ep = sctp_sk(sk)->ep; 7163 int err = -EINVAL; 7164 7165 if (unlikely(backlog < 0)) 7166 return err; 7167 7168 lock_sock(sk); 7169 7170 /* Peeled-off sockets are not allowed to listen(). */ 7171 if (sctp_style(sk, UDP_HIGH_BANDWIDTH)) 7172 goto out; 7173 7174 if (sock->state != SS_UNCONNECTED) 7175 goto out; 7176 7177 if (!sctp_sstate(sk, LISTENING) && !sctp_sstate(sk, CLOSED)) 7178 goto out; 7179 7180 /* If backlog is zero, disable listening. */ 7181 if (!backlog) { 7182 if (sctp_sstate(sk, CLOSED)) 7183 goto out; 7184 7185 err = 0; 7186 sctp_unhash_endpoint(ep); 7187 sk->sk_state = SCTP_SS_CLOSED; 7188 if (sk->sk_reuse) 7189 sctp_sk(sk)->bind_hash->fastreuse = 1; 7190 goto out; 7191 } 7192 7193 /* If we are already listening, just update the backlog */ 7194 if (sctp_sstate(sk, LISTENING)) 7195 sk->sk_max_ack_backlog = backlog; 7196 else { 7197 err = sctp_listen_start(sk, backlog); 7198 if (err) 7199 goto out; 7200 } 7201 7202 err = 0; 7203 out: 7204 release_sock(sk); 7205 return err; 7206 } 7207 7208 /* 7209 * This function is done by modeling the current datagram_poll() and the 7210 * tcp_poll(). Note that, based on these implementations, we don't 7211 * lock the socket in this function, even though it seems that, 7212 * ideally, locking or some other mechanisms can be used to ensure 7213 * the integrity of the counters (sndbuf and wmem_alloc) used 7214 * in this place. We assume that we don't need locks either until proven 7215 * otherwise. 7216 * 7217 * Another thing to note is that we include the Async I/O support 7218 * here, again, by modeling the current TCP/UDP code. We don't have 7219 * a good way to test with it yet. 7220 */ 7221 unsigned int sctp_poll(struct file *file, struct socket *sock, poll_table *wait) 7222 { 7223 struct sock *sk = sock->sk; 7224 struct sctp_sock *sp = sctp_sk(sk); 7225 unsigned int mask; 7226 7227 poll_wait(file, sk_sleep(sk), wait); 7228 7229 sock_rps_record_flow(sk); 7230 7231 /* A TCP-style listening socket becomes readable when the accept queue 7232 * is not empty. 7233 */ 7234 if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)) 7235 return (!list_empty(&sp->ep->asocs)) ? 7236 (POLLIN | POLLRDNORM) : 0; 7237 7238 mask = 0; 7239 7240 /* Is there any exceptional events? */ 7241 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) 7242 mask |= POLLERR | 7243 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0); 7244 if (sk->sk_shutdown & RCV_SHUTDOWN) 7245 mask |= POLLRDHUP | POLLIN | POLLRDNORM; 7246 if (sk->sk_shutdown == SHUTDOWN_MASK) 7247 mask |= POLLHUP; 7248 7249 /* Is it readable? Reconsider this code with TCP-style support. */ 7250 if (!skb_queue_empty(&sk->sk_receive_queue)) 7251 mask |= POLLIN | POLLRDNORM; 7252 7253 /* The association is either gone or not ready. */ 7254 if (!sctp_style(sk, UDP) && sctp_sstate(sk, CLOSED)) 7255 return mask; 7256 7257 /* Is it writable? */ 7258 if (sctp_writeable(sk)) { 7259 mask |= POLLOUT | POLLWRNORM; 7260 } else { 7261 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); 7262 /* 7263 * Since the socket is not locked, the buffer 7264 * might be made available after the writeable check and 7265 * before the bit is set. This could cause a lost I/O 7266 * signal. tcp_poll() has a race breaker for this race 7267 * condition. Based on their implementation, we put 7268 * in the following code to cover it as well. 7269 */ 7270 if (sctp_writeable(sk)) 7271 mask |= POLLOUT | POLLWRNORM; 7272 } 7273 return mask; 7274 } 7275 7276 /******************************************************************** 7277 * 2nd Level Abstractions 7278 ********************************************************************/ 7279 7280 static struct sctp_bind_bucket *sctp_bucket_create( 7281 struct sctp_bind_hashbucket *head, struct net *net, unsigned short snum) 7282 { 7283 struct sctp_bind_bucket *pp; 7284 7285 pp = kmem_cache_alloc(sctp_bucket_cachep, GFP_ATOMIC); 7286 if (pp) { 7287 SCTP_DBG_OBJCNT_INC(bind_bucket); 7288 pp->port = snum; 7289 pp->fastreuse = 0; 7290 INIT_HLIST_HEAD(&pp->owner); 7291 pp->net = net; 7292 hlist_add_head(&pp->node, &head->chain); 7293 } 7294 return pp; 7295 } 7296 7297 /* Caller must hold hashbucket lock for this tb with local BH disabled */ 7298 static void sctp_bucket_destroy(struct sctp_bind_bucket *pp) 7299 { 7300 if (pp && hlist_empty(&pp->owner)) { 7301 __hlist_del(&pp->node); 7302 kmem_cache_free(sctp_bucket_cachep, pp); 7303 SCTP_DBG_OBJCNT_DEC(bind_bucket); 7304 } 7305 } 7306 7307 /* Release this socket's reference to a local port. */ 7308 static inline void __sctp_put_port(struct sock *sk) 7309 { 7310 struct sctp_bind_hashbucket *head = 7311 &sctp_port_hashtable[sctp_phashfn(sock_net(sk), 7312 inet_sk(sk)->inet_num)]; 7313 struct sctp_bind_bucket *pp; 7314 7315 spin_lock(&head->lock); 7316 pp = sctp_sk(sk)->bind_hash; 7317 __sk_del_bind_node(sk); 7318 sctp_sk(sk)->bind_hash = NULL; 7319 inet_sk(sk)->inet_num = 0; 7320 sctp_bucket_destroy(pp); 7321 spin_unlock(&head->lock); 7322 } 7323 7324 void sctp_put_port(struct sock *sk) 7325 { 7326 local_bh_disable(); 7327 __sctp_put_port(sk); 7328 local_bh_enable(); 7329 } 7330 7331 /* 7332 * The system picks an ephemeral port and choose an address set equivalent 7333 * to binding with a wildcard address. 7334 * One of those addresses will be the primary address for the association. 7335 * This automatically enables the multihoming capability of SCTP. 7336 */ 7337 static int sctp_autobind(struct sock *sk) 7338 { 7339 union sctp_addr autoaddr; 7340 struct sctp_af *af; 7341 __be16 port; 7342 7343 /* Initialize a local sockaddr structure to INADDR_ANY. */ 7344 af = sctp_sk(sk)->pf->af; 7345 7346 port = htons(inet_sk(sk)->inet_num); 7347 af->inaddr_any(&autoaddr, port); 7348 7349 return sctp_do_bind(sk, &autoaddr, af->sockaddr_len); 7350 } 7351 7352 /* Parse out IPPROTO_SCTP CMSG headers. Perform only minimal validation. 7353 * 7354 * From RFC 2292 7355 * 4.2 The cmsghdr Structure * 7356 * 7357 * When ancillary data is sent or received, any number of ancillary data 7358 * objects can be specified by the msg_control and msg_controllen members of 7359 * the msghdr structure, because each object is preceded by 7360 * a cmsghdr structure defining the object's length (the cmsg_len member). 7361 * Historically Berkeley-derived implementations have passed only one object 7362 * at a time, but this API allows multiple objects to be 7363 * passed in a single call to sendmsg() or recvmsg(). The following example 7364 * shows two ancillary data objects in a control buffer. 7365 * 7366 * |<--------------------------- msg_controllen -------------------------->| 7367 * | | 7368 * 7369 * |<----- ancillary data object ----->|<----- ancillary data object ----->| 7370 * 7371 * |<---------- CMSG_SPACE() --------->|<---------- CMSG_SPACE() --------->| 7372 * | | | 7373 * 7374 * |<---------- cmsg_len ---------->| |<--------- cmsg_len ----------->| | 7375 * 7376 * |<--------- CMSG_LEN() --------->| |<-------- CMSG_LEN() ---------->| | 7377 * | | | | | 7378 * 7379 * +-----+-----+-----+--+-----------+--+-----+-----+-----+--+-----------+--+ 7380 * |cmsg_|cmsg_|cmsg_|XX| |XX|cmsg_|cmsg_|cmsg_|XX| |XX| 7381 * 7382 * |len |level|type |XX|cmsg_data[]|XX|len |level|type |XX|cmsg_data[]|XX| 7383 * 7384 * +-----+-----+-----+--+-----------+--+-----+-----+-----+--+-----------+--+ 7385 * ^ 7386 * | 7387 * 7388 * msg_control 7389 * points here 7390 */ 7391 static int sctp_msghdr_parse(const struct msghdr *msg, sctp_cmsgs_t *cmsgs) 7392 { 7393 struct cmsghdr *cmsg; 7394 struct msghdr *my_msg = (struct msghdr *)msg; 7395 7396 for_each_cmsghdr(cmsg, my_msg) { 7397 if (!CMSG_OK(my_msg, cmsg)) 7398 return -EINVAL; 7399 7400 /* Should we parse this header or ignore? */ 7401 if (cmsg->cmsg_level != IPPROTO_SCTP) 7402 continue; 7403 7404 /* Strictly check lengths following example in SCM code. */ 7405 switch (cmsg->cmsg_type) { 7406 case SCTP_INIT: 7407 /* SCTP Socket API Extension 7408 * 5.3.1 SCTP Initiation Structure (SCTP_INIT) 7409 * 7410 * This cmsghdr structure provides information for 7411 * initializing new SCTP associations with sendmsg(). 7412 * The SCTP_INITMSG socket option uses this same data 7413 * structure. This structure is not used for 7414 * recvmsg(). 7415 * 7416 * cmsg_level cmsg_type cmsg_data[] 7417 * ------------ ------------ ---------------------- 7418 * IPPROTO_SCTP SCTP_INIT struct sctp_initmsg 7419 */ 7420 if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct sctp_initmsg))) 7421 return -EINVAL; 7422 7423 cmsgs->init = CMSG_DATA(cmsg); 7424 break; 7425 7426 case SCTP_SNDRCV: 7427 /* SCTP Socket API Extension 7428 * 5.3.2 SCTP Header Information Structure(SCTP_SNDRCV) 7429 * 7430 * This cmsghdr structure specifies SCTP options for 7431 * sendmsg() and describes SCTP header information 7432 * about a received message through recvmsg(). 7433 * 7434 * cmsg_level cmsg_type cmsg_data[] 7435 * ------------ ------------ ---------------------- 7436 * IPPROTO_SCTP SCTP_SNDRCV struct sctp_sndrcvinfo 7437 */ 7438 if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct sctp_sndrcvinfo))) 7439 return -EINVAL; 7440 7441 cmsgs->srinfo = CMSG_DATA(cmsg); 7442 7443 if (cmsgs->srinfo->sinfo_flags & 7444 ~(SCTP_UNORDERED | SCTP_ADDR_OVER | 7445 SCTP_SACK_IMMEDIATELY | SCTP_PR_SCTP_MASK | 7446 SCTP_ABORT | SCTP_EOF)) 7447 return -EINVAL; 7448 break; 7449 7450 case SCTP_SNDINFO: 7451 /* SCTP Socket API Extension 7452 * 5.3.4 SCTP Send Information Structure (SCTP_SNDINFO) 7453 * 7454 * This cmsghdr structure specifies SCTP options for 7455 * sendmsg(). This structure and SCTP_RCVINFO replaces 7456 * SCTP_SNDRCV which has been deprecated. 7457 * 7458 * cmsg_level cmsg_type cmsg_data[] 7459 * ------------ ------------ --------------------- 7460 * IPPROTO_SCTP SCTP_SNDINFO struct sctp_sndinfo 7461 */ 7462 if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct sctp_sndinfo))) 7463 return -EINVAL; 7464 7465 cmsgs->sinfo = CMSG_DATA(cmsg); 7466 7467 if (cmsgs->sinfo->snd_flags & 7468 ~(SCTP_UNORDERED | SCTP_ADDR_OVER | 7469 SCTP_SACK_IMMEDIATELY | SCTP_PR_SCTP_MASK | 7470 SCTP_ABORT | SCTP_EOF)) 7471 return -EINVAL; 7472 break; 7473 default: 7474 return -EINVAL; 7475 } 7476 } 7477 7478 return 0; 7479 } 7480 7481 /* 7482 * Wait for a packet.. 7483 * Note: This function is the same function as in core/datagram.c 7484 * with a few modifications to make lksctp work. 7485 */ 7486 static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p) 7487 { 7488 int error; 7489 DEFINE_WAIT(wait); 7490 7491 prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 7492 7493 /* Socket errors? */ 7494 error = sock_error(sk); 7495 if (error) 7496 goto out; 7497 7498 if (!skb_queue_empty(&sk->sk_receive_queue)) 7499 goto ready; 7500 7501 /* Socket shut down? */ 7502 if (sk->sk_shutdown & RCV_SHUTDOWN) 7503 goto out; 7504 7505 /* Sequenced packets can come disconnected. If so we report the 7506 * problem. 7507 */ 7508 error = -ENOTCONN; 7509 7510 /* Is there a good reason to think that we may receive some data? */ 7511 if (list_empty(&sctp_sk(sk)->ep->asocs) && !sctp_sstate(sk, LISTENING)) 7512 goto out; 7513 7514 /* Handle signals. */ 7515 if (signal_pending(current)) 7516 goto interrupted; 7517 7518 /* Let another process have a go. Since we are going to sleep 7519 * anyway. Note: This may cause odd behaviors if the message 7520 * does not fit in the user's buffer, but this seems to be the 7521 * only way to honor MSG_DONTWAIT realistically. 7522 */ 7523 release_sock(sk); 7524 *timeo_p = schedule_timeout(*timeo_p); 7525 lock_sock(sk); 7526 7527 ready: 7528 finish_wait(sk_sleep(sk), &wait); 7529 return 0; 7530 7531 interrupted: 7532 error = sock_intr_errno(*timeo_p); 7533 7534 out: 7535 finish_wait(sk_sleep(sk), &wait); 7536 *err = error; 7537 return error; 7538 } 7539 7540 /* Receive a datagram. 7541 * Note: This is pretty much the same routine as in core/datagram.c 7542 * with a few changes to make lksctp work. 7543 */ 7544 struct sk_buff *sctp_skb_recv_datagram(struct sock *sk, int flags, 7545 int noblock, int *err) 7546 { 7547 int error; 7548 struct sk_buff *skb; 7549 long timeo; 7550 7551 timeo = sock_rcvtimeo(sk, noblock); 7552 7553 pr_debug("%s: timeo:%ld, max:%ld\n", __func__, timeo, 7554 MAX_SCHEDULE_TIMEOUT); 7555 7556 do { 7557 /* Again only user level code calls this function, 7558 * so nothing interrupt level 7559 * will suddenly eat the receive_queue. 7560 * 7561 * Look at current nfs client by the way... 7562 * However, this function was correct in any case. 8) 7563 */ 7564 if (flags & MSG_PEEK) { 7565 skb = skb_peek(&sk->sk_receive_queue); 7566 if (skb) 7567 atomic_inc(&skb->users); 7568 } else { 7569 skb = __skb_dequeue(&sk->sk_receive_queue); 7570 } 7571 7572 if (skb) 7573 return skb; 7574 7575 /* Caller is allowed not to check sk->sk_err before calling. */ 7576 error = sock_error(sk); 7577 if (error) 7578 goto no_packet; 7579 7580 if (sk->sk_shutdown & RCV_SHUTDOWN) 7581 break; 7582 7583 if (sk_can_busy_loop(sk)) { 7584 sk_busy_loop(sk, noblock); 7585 7586 if (!skb_queue_empty(&sk->sk_receive_queue)) 7587 continue; 7588 } 7589 7590 /* User doesn't want to wait. */ 7591 error = -EAGAIN; 7592 if (!timeo) 7593 goto no_packet; 7594 } while (sctp_wait_for_packet(sk, err, &timeo) == 0); 7595 7596 return NULL; 7597 7598 no_packet: 7599 *err = error; 7600 return NULL; 7601 } 7602 7603 /* If sndbuf has changed, wake up per association sndbuf waiters. */ 7604 static void __sctp_write_space(struct sctp_association *asoc) 7605 { 7606 struct sock *sk = asoc->base.sk; 7607 7608 if (sctp_wspace(asoc) <= 0) 7609 return; 7610 7611 if (waitqueue_active(&asoc->wait)) 7612 wake_up_interruptible(&asoc->wait); 7613 7614 if (sctp_writeable(sk)) { 7615 struct socket_wq *wq; 7616 7617 rcu_read_lock(); 7618 wq = rcu_dereference(sk->sk_wq); 7619 if (wq) { 7620 if (waitqueue_active(&wq->wait)) 7621 wake_up_interruptible(&wq->wait); 7622 7623 /* Note that we try to include the Async I/O support 7624 * here by modeling from the current TCP/UDP code. 7625 * We have not tested with it yet. 7626 */ 7627 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) 7628 sock_wake_async(wq, SOCK_WAKE_SPACE, POLL_OUT); 7629 } 7630 rcu_read_unlock(); 7631 } 7632 } 7633 7634 static void sctp_wake_up_waiters(struct sock *sk, 7635 struct sctp_association *asoc) 7636 { 7637 struct sctp_association *tmp = asoc; 7638 7639 /* We do accounting for the sndbuf space per association, 7640 * so we only need to wake our own association. 7641 */ 7642 if (asoc->ep->sndbuf_policy) 7643 return __sctp_write_space(asoc); 7644 7645 /* If association goes down and is just flushing its 7646 * outq, then just normally notify others. 7647 */ 7648 if (asoc->base.dead) 7649 return sctp_write_space(sk); 7650 7651 /* Accounting for the sndbuf space is per socket, so we 7652 * need to wake up others, try to be fair and in case of 7653 * other associations, let them have a go first instead 7654 * of just doing a sctp_write_space() call. 7655 * 7656 * Note that we reach sctp_wake_up_waiters() only when 7657 * associations free up queued chunks, thus we are under 7658 * lock and the list of associations on a socket is 7659 * guaranteed not to change. 7660 */ 7661 for (tmp = list_next_entry(tmp, asocs); 1; 7662 tmp = list_next_entry(tmp, asocs)) { 7663 /* Manually skip the head element. */ 7664 if (&tmp->asocs == &((sctp_sk(sk))->ep->asocs)) 7665 continue; 7666 /* Wake up association. */ 7667 __sctp_write_space(tmp); 7668 /* We've reached the end. */ 7669 if (tmp == asoc) 7670 break; 7671 } 7672 } 7673 7674 /* Do accounting for the sndbuf space. 7675 * Decrement the used sndbuf space of the corresponding association by the 7676 * data size which was just transmitted(freed). 7677 */ 7678 static void sctp_wfree(struct sk_buff *skb) 7679 { 7680 struct sctp_chunk *chunk = skb_shinfo(skb)->destructor_arg; 7681 struct sctp_association *asoc = chunk->asoc; 7682 struct sock *sk = asoc->base.sk; 7683 7684 asoc->sndbuf_used -= SCTP_DATA_SNDSIZE(chunk) + 7685 sizeof(struct sk_buff) + 7686 sizeof(struct sctp_chunk); 7687 7688 atomic_sub(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc); 7689 7690 /* 7691 * This undoes what is done via sctp_set_owner_w and sk_mem_charge 7692 */ 7693 sk->sk_wmem_queued -= skb->truesize; 7694 sk_mem_uncharge(sk, skb->truesize); 7695 7696 sock_wfree(skb); 7697 sctp_wake_up_waiters(sk, asoc); 7698 7699 sctp_association_put(asoc); 7700 } 7701 7702 /* Do accounting for the receive space on the socket. 7703 * Accounting for the association is done in ulpevent.c 7704 * We set this as a destructor for the cloned data skbs so that 7705 * accounting is done at the correct time. 7706 */ 7707 void sctp_sock_rfree(struct sk_buff *skb) 7708 { 7709 struct sock *sk = skb->sk; 7710 struct sctp_ulpevent *event = sctp_skb2event(skb); 7711 7712 atomic_sub(event->rmem_len, &sk->sk_rmem_alloc); 7713 7714 /* 7715 * Mimic the behavior of sock_rfree 7716 */ 7717 sk_mem_uncharge(sk, event->rmem_len); 7718 } 7719 7720 7721 /* Helper function to wait for space in the sndbuf. */ 7722 static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p, 7723 size_t msg_len) 7724 { 7725 struct sock *sk = asoc->base.sk; 7726 int err = 0; 7727 long current_timeo = *timeo_p; 7728 DEFINE_WAIT(wait); 7729 7730 pr_debug("%s: asoc:%p, timeo:%ld, msg_len:%zu\n", __func__, asoc, 7731 *timeo_p, msg_len); 7732 7733 /* Increment the association's refcnt. */ 7734 sctp_association_hold(asoc); 7735 7736 /* Wait on the association specific sndbuf space. */ 7737 for (;;) { 7738 prepare_to_wait_exclusive(&asoc->wait, &wait, 7739 TASK_INTERRUPTIBLE); 7740 if (!*timeo_p) 7741 goto do_nonblock; 7742 if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING || 7743 asoc->base.dead) 7744 goto do_error; 7745 if (signal_pending(current)) 7746 goto do_interrupted; 7747 if (msg_len <= sctp_wspace(asoc)) 7748 break; 7749 7750 /* Let another process have a go. Since we are going 7751 * to sleep anyway. 7752 */ 7753 release_sock(sk); 7754 current_timeo = schedule_timeout(current_timeo); 7755 lock_sock(sk); 7756 7757 *timeo_p = current_timeo; 7758 } 7759 7760 out: 7761 finish_wait(&asoc->wait, &wait); 7762 7763 /* Release the association's refcnt. */ 7764 sctp_association_put(asoc); 7765 7766 return err; 7767 7768 do_error: 7769 err = -EPIPE; 7770 goto out; 7771 7772 do_interrupted: 7773 err = sock_intr_errno(*timeo_p); 7774 goto out; 7775 7776 do_nonblock: 7777 err = -EAGAIN; 7778 goto out; 7779 } 7780 7781 void sctp_data_ready(struct sock *sk) 7782 { 7783 struct socket_wq *wq; 7784 7785 rcu_read_lock(); 7786 wq = rcu_dereference(sk->sk_wq); 7787 if (skwq_has_sleeper(wq)) 7788 wake_up_interruptible_sync_poll(&wq->wait, POLLIN | 7789 POLLRDNORM | POLLRDBAND); 7790 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); 7791 rcu_read_unlock(); 7792 } 7793 7794 /* If socket sndbuf has changed, wake up all per association waiters. */ 7795 void sctp_write_space(struct sock *sk) 7796 { 7797 struct sctp_association *asoc; 7798 7799 /* Wake up the tasks in each wait queue. */ 7800 list_for_each_entry(asoc, &((sctp_sk(sk))->ep->asocs), asocs) { 7801 __sctp_write_space(asoc); 7802 } 7803 } 7804 7805 /* Is there any sndbuf space available on the socket? 7806 * 7807 * Note that sk_wmem_alloc is the sum of the send buffers on all of the 7808 * associations on the same socket. For a UDP-style socket with 7809 * multiple associations, it is possible for it to be "unwriteable" 7810 * prematurely. I assume that this is acceptable because 7811 * a premature "unwriteable" is better than an accidental "writeable" which 7812 * would cause an unwanted block under certain circumstances. For the 1-1 7813 * UDP-style sockets or TCP-style sockets, this code should work. 7814 * - Daisy 7815 */ 7816 static int sctp_writeable(struct sock *sk) 7817 { 7818 int amt = 0; 7819 7820 amt = sk->sk_sndbuf - sk_wmem_alloc_get(sk); 7821 if (amt < 0) 7822 amt = 0; 7823 return amt; 7824 } 7825 7826 /* Wait for an association to go into ESTABLISHED state. If timeout is 0, 7827 * returns immediately with EINPROGRESS. 7828 */ 7829 static int sctp_wait_for_connect(struct sctp_association *asoc, long *timeo_p) 7830 { 7831 struct sock *sk = asoc->base.sk; 7832 int err = 0; 7833 long current_timeo = *timeo_p; 7834 DEFINE_WAIT(wait); 7835 7836 pr_debug("%s: asoc:%p, timeo:%ld\n", __func__, asoc, *timeo_p); 7837 7838 /* Increment the association's refcnt. */ 7839 sctp_association_hold(asoc); 7840 7841 for (;;) { 7842 prepare_to_wait_exclusive(&asoc->wait, &wait, 7843 TASK_INTERRUPTIBLE); 7844 if (!*timeo_p) 7845 goto do_nonblock; 7846 if (sk->sk_shutdown & RCV_SHUTDOWN) 7847 break; 7848 if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING || 7849 asoc->base.dead) 7850 goto do_error; 7851 if (signal_pending(current)) 7852 goto do_interrupted; 7853 7854 if (sctp_state(asoc, ESTABLISHED)) 7855 break; 7856 7857 /* Let another process have a go. Since we are going 7858 * to sleep anyway. 7859 */ 7860 release_sock(sk); 7861 current_timeo = schedule_timeout(current_timeo); 7862 lock_sock(sk); 7863 7864 *timeo_p = current_timeo; 7865 } 7866 7867 out: 7868 finish_wait(&asoc->wait, &wait); 7869 7870 /* Release the association's refcnt. */ 7871 sctp_association_put(asoc); 7872 7873 return err; 7874 7875 do_error: 7876 if (asoc->init_err_counter + 1 > asoc->max_init_attempts) 7877 err = -ETIMEDOUT; 7878 else 7879 err = -ECONNREFUSED; 7880 goto out; 7881 7882 do_interrupted: 7883 err = sock_intr_errno(*timeo_p); 7884 goto out; 7885 7886 do_nonblock: 7887 err = -EINPROGRESS; 7888 goto out; 7889 } 7890 7891 static int sctp_wait_for_accept(struct sock *sk, long timeo) 7892 { 7893 struct sctp_endpoint *ep; 7894 int err = 0; 7895 DEFINE_WAIT(wait); 7896 7897 ep = sctp_sk(sk)->ep; 7898 7899 7900 for (;;) { 7901 prepare_to_wait_exclusive(sk_sleep(sk), &wait, 7902 TASK_INTERRUPTIBLE); 7903 7904 if (list_empty(&ep->asocs)) { 7905 release_sock(sk); 7906 timeo = schedule_timeout(timeo); 7907 lock_sock(sk); 7908 } 7909 7910 err = -EINVAL; 7911 if (!sctp_sstate(sk, LISTENING)) 7912 break; 7913 7914 err = 0; 7915 if (!list_empty(&ep->asocs)) 7916 break; 7917 7918 err = sock_intr_errno(timeo); 7919 if (signal_pending(current)) 7920 break; 7921 7922 err = -EAGAIN; 7923 if (!timeo) 7924 break; 7925 } 7926 7927 finish_wait(sk_sleep(sk), &wait); 7928 7929 return err; 7930 } 7931 7932 static void sctp_wait_for_close(struct sock *sk, long timeout) 7933 { 7934 DEFINE_WAIT(wait); 7935 7936 do { 7937 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 7938 if (list_empty(&sctp_sk(sk)->ep->asocs)) 7939 break; 7940 release_sock(sk); 7941 timeout = schedule_timeout(timeout); 7942 lock_sock(sk); 7943 } while (!signal_pending(current) && timeout); 7944 7945 finish_wait(sk_sleep(sk), &wait); 7946 } 7947 7948 static void sctp_skb_set_owner_r_frag(struct sk_buff *skb, struct sock *sk) 7949 { 7950 struct sk_buff *frag; 7951 7952 if (!skb->data_len) 7953 goto done; 7954 7955 /* Don't forget the fragments. */ 7956 skb_walk_frags(skb, frag) 7957 sctp_skb_set_owner_r_frag(frag, sk); 7958 7959 done: 7960 sctp_skb_set_owner_r(skb, sk); 7961 } 7962 7963 void sctp_copy_sock(struct sock *newsk, struct sock *sk, 7964 struct sctp_association *asoc) 7965 { 7966 struct inet_sock *inet = inet_sk(sk); 7967 struct inet_sock *newinet; 7968 7969 newsk->sk_type = sk->sk_type; 7970 newsk->sk_bound_dev_if = sk->sk_bound_dev_if; 7971 newsk->sk_flags = sk->sk_flags; 7972 newsk->sk_tsflags = sk->sk_tsflags; 7973 newsk->sk_no_check_tx = sk->sk_no_check_tx; 7974 newsk->sk_no_check_rx = sk->sk_no_check_rx; 7975 newsk->sk_reuse = sk->sk_reuse; 7976 7977 newsk->sk_shutdown = sk->sk_shutdown; 7978 newsk->sk_destruct = sctp_destruct_sock; 7979 newsk->sk_family = sk->sk_family; 7980 newsk->sk_protocol = IPPROTO_SCTP; 7981 newsk->sk_backlog_rcv = sk->sk_prot->backlog_rcv; 7982 newsk->sk_sndbuf = sk->sk_sndbuf; 7983 newsk->sk_rcvbuf = sk->sk_rcvbuf; 7984 newsk->sk_lingertime = sk->sk_lingertime; 7985 newsk->sk_rcvtimeo = sk->sk_rcvtimeo; 7986 newsk->sk_sndtimeo = sk->sk_sndtimeo; 7987 newsk->sk_rxhash = sk->sk_rxhash; 7988 7989 newinet = inet_sk(newsk); 7990 7991 /* Initialize sk's sport, dport, rcv_saddr and daddr for 7992 * getsockname() and getpeername() 7993 */ 7994 newinet->inet_sport = inet->inet_sport; 7995 newinet->inet_saddr = inet->inet_saddr; 7996 newinet->inet_rcv_saddr = inet->inet_rcv_saddr; 7997 newinet->inet_dport = htons(asoc->peer.port); 7998 newinet->pmtudisc = inet->pmtudisc; 7999 newinet->inet_id = asoc->next_tsn ^ jiffies; 8000 8001 newinet->uc_ttl = inet->uc_ttl; 8002 newinet->mc_loop = 1; 8003 newinet->mc_ttl = 1; 8004 newinet->mc_index = 0; 8005 newinet->mc_list = NULL; 8006 8007 if (newsk->sk_flags & SK_FLAGS_TIMESTAMP) 8008 net_enable_timestamp(); 8009 8010 security_sk_clone(sk, newsk); 8011 } 8012 8013 static inline void sctp_copy_descendant(struct sock *sk_to, 8014 const struct sock *sk_from) 8015 { 8016 int ancestor_size = sizeof(struct inet_sock) + 8017 sizeof(struct sctp_sock) - 8018 offsetof(struct sctp_sock, auto_asconf_list); 8019 8020 if (sk_from->sk_family == PF_INET6) 8021 ancestor_size += sizeof(struct ipv6_pinfo); 8022 8023 __inet_sk_copy_descendant(sk_to, sk_from, ancestor_size); 8024 } 8025 8026 /* Populate the fields of the newsk from the oldsk and migrate the assoc 8027 * and its messages to the newsk. 8028 */ 8029 static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk, 8030 struct sctp_association *assoc, 8031 sctp_socket_type_t type) 8032 { 8033 struct sctp_sock *oldsp = sctp_sk(oldsk); 8034 struct sctp_sock *newsp = sctp_sk(newsk); 8035 struct sctp_bind_bucket *pp; /* hash list port iterator */ 8036 struct sctp_endpoint *newep = newsp->ep; 8037 struct sk_buff *skb, *tmp; 8038 struct sctp_ulpevent *event; 8039 struct sctp_bind_hashbucket *head; 8040 8041 /* Migrate socket buffer sizes and all the socket level options to the 8042 * new socket. 8043 */ 8044 newsk->sk_sndbuf = oldsk->sk_sndbuf; 8045 newsk->sk_rcvbuf = oldsk->sk_rcvbuf; 8046 /* Brute force copy old sctp opt. */ 8047 sctp_copy_descendant(newsk, oldsk); 8048 8049 /* Restore the ep value that was overwritten with the above structure 8050 * copy. 8051 */ 8052 newsp->ep = newep; 8053 newsp->hmac = NULL; 8054 8055 /* Hook this new socket in to the bind_hash list. */ 8056 head = &sctp_port_hashtable[sctp_phashfn(sock_net(oldsk), 8057 inet_sk(oldsk)->inet_num)]; 8058 spin_lock_bh(&head->lock); 8059 pp = sctp_sk(oldsk)->bind_hash; 8060 sk_add_bind_node(newsk, &pp->owner); 8061 sctp_sk(newsk)->bind_hash = pp; 8062 inet_sk(newsk)->inet_num = inet_sk(oldsk)->inet_num; 8063 spin_unlock_bh(&head->lock); 8064 8065 /* Copy the bind_addr list from the original endpoint to the new 8066 * endpoint so that we can handle restarts properly 8067 */ 8068 sctp_bind_addr_dup(&newsp->ep->base.bind_addr, 8069 &oldsp->ep->base.bind_addr, GFP_KERNEL); 8070 8071 /* Move any messages in the old socket's receive queue that are for the 8072 * peeled off association to the new socket's receive queue. 8073 */ 8074 sctp_skb_for_each(skb, &oldsk->sk_receive_queue, tmp) { 8075 event = sctp_skb2event(skb); 8076 if (event->asoc == assoc) { 8077 __skb_unlink(skb, &oldsk->sk_receive_queue); 8078 __skb_queue_tail(&newsk->sk_receive_queue, skb); 8079 sctp_skb_set_owner_r_frag(skb, newsk); 8080 } 8081 } 8082 8083 /* Clean up any messages pending delivery due to partial 8084 * delivery. Three cases: 8085 * 1) No partial deliver; no work. 8086 * 2) Peeling off partial delivery; keep pd_lobby in new pd_lobby. 8087 * 3) Peeling off non-partial delivery; move pd_lobby to receive_queue. 8088 */ 8089 skb_queue_head_init(&newsp->pd_lobby); 8090 atomic_set(&sctp_sk(newsk)->pd_mode, assoc->ulpq.pd_mode); 8091 8092 if (atomic_read(&sctp_sk(oldsk)->pd_mode)) { 8093 struct sk_buff_head *queue; 8094 8095 /* Decide which queue to move pd_lobby skbs to. */ 8096 if (assoc->ulpq.pd_mode) { 8097 queue = &newsp->pd_lobby; 8098 } else 8099 queue = &newsk->sk_receive_queue; 8100 8101 /* Walk through the pd_lobby, looking for skbs that 8102 * need moved to the new socket. 8103 */ 8104 sctp_skb_for_each(skb, &oldsp->pd_lobby, tmp) { 8105 event = sctp_skb2event(skb); 8106 if (event->asoc == assoc) { 8107 __skb_unlink(skb, &oldsp->pd_lobby); 8108 __skb_queue_tail(queue, skb); 8109 sctp_skb_set_owner_r_frag(skb, newsk); 8110 } 8111 } 8112 8113 /* Clear up any skbs waiting for the partial 8114 * delivery to finish. 8115 */ 8116 if (assoc->ulpq.pd_mode) 8117 sctp_clear_pd(oldsk, NULL); 8118 8119 } 8120 8121 sctp_skb_for_each(skb, &assoc->ulpq.reasm, tmp) 8122 sctp_skb_set_owner_r_frag(skb, newsk); 8123 8124 sctp_skb_for_each(skb, &assoc->ulpq.lobby, tmp) 8125 sctp_skb_set_owner_r_frag(skb, newsk); 8126 8127 /* Set the type of socket to indicate that it is peeled off from the 8128 * original UDP-style socket or created with the accept() call on a 8129 * TCP-style socket.. 8130 */ 8131 newsp->type = type; 8132 8133 /* Mark the new socket "in-use" by the user so that any packets 8134 * that may arrive on the association after we've moved it are 8135 * queued to the backlog. This prevents a potential race between 8136 * backlog processing on the old socket and new-packet processing 8137 * on the new socket. 8138 * 8139 * The caller has just allocated newsk so we can guarantee that other 8140 * paths won't try to lock it and then oldsk. 8141 */ 8142 lock_sock_nested(newsk, SINGLE_DEPTH_NESTING); 8143 sctp_assoc_migrate(assoc, newsk); 8144 8145 /* If the association on the newsk is already closed before accept() 8146 * is called, set RCV_SHUTDOWN flag. 8147 */ 8148 if (sctp_state(assoc, CLOSED) && sctp_style(newsk, TCP)) { 8149 newsk->sk_state = SCTP_SS_CLOSED; 8150 newsk->sk_shutdown |= RCV_SHUTDOWN; 8151 } else { 8152 newsk->sk_state = SCTP_SS_ESTABLISHED; 8153 } 8154 8155 release_sock(newsk); 8156 } 8157 8158 8159 /* This proto struct describes the ULP interface for SCTP. */ 8160 struct proto sctp_prot = { 8161 .name = "SCTP", 8162 .owner = THIS_MODULE, 8163 .close = sctp_close, 8164 .connect = sctp_connect, 8165 .disconnect = sctp_disconnect, 8166 .accept = sctp_accept, 8167 .ioctl = sctp_ioctl, 8168 .init = sctp_init_sock, 8169 .destroy = sctp_destroy_sock, 8170 .shutdown = sctp_shutdown, 8171 .setsockopt = sctp_setsockopt, 8172 .getsockopt = sctp_getsockopt, 8173 .sendmsg = sctp_sendmsg, 8174 .recvmsg = sctp_recvmsg, 8175 .bind = sctp_bind, 8176 .backlog_rcv = sctp_backlog_rcv, 8177 .hash = sctp_hash, 8178 .unhash = sctp_unhash, 8179 .get_port = sctp_get_port, 8180 .obj_size = sizeof(struct sctp_sock), 8181 .sysctl_mem = sysctl_sctp_mem, 8182 .sysctl_rmem = sysctl_sctp_rmem, 8183 .sysctl_wmem = sysctl_sctp_wmem, 8184 .memory_pressure = &sctp_memory_pressure, 8185 .enter_memory_pressure = sctp_enter_memory_pressure, 8186 .memory_allocated = &sctp_memory_allocated, 8187 .sockets_allocated = &sctp_sockets_allocated, 8188 }; 8189 8190 #if IS_ENABLED(CONFIG_IPV6) 8191 8192 #include <net/transp_v6.h> 8193 static void sctp_v6_destroy_sock(struct sock *sk) 8194 { 8195 sctp_destroy_sock(sk); 8196 inet6_destroy_sock(sk); 8197 } 8198 8199 struct proto sctpv6_prot = { 8200 .name = "SCTPv6", 8201 .owner = THIS_MODULE, 8202 .close = sctp_close, 8203 .connect = sctp_connect, 8204 .disconnect = sctp_disconnect, 8205 .accept = sctp_accept, 8206 .ioctl = sctp_ioctl, 8207 .init = sctp_init_sock, 8208 .destroy = sctp_v6_destroy_sock, 8209 .shutdown = sctp_shutdown, 8210 .setsockopt = sctp_setsockopt, 8211 .getsockopt = sctp_getsockopt, 8212 .sendmsg = sctp_sendmsg, 8213 .recvmsg = sctp_recvmsg, 8214 .bind = sctp_bind, 8215 .backlog_rcv = sctp_backlog_rcv, 8216 .hash = sctp_hash, 8217 .unhash = sctp_unhash, 8218 .get_port = sctp_get_port, 8219 .obj_size = sizeof(struct sctp6_sock), 8220 .sysctl_mem = sysctl_sctp_mem, 8221 .sysctl_rmem = sysctl_sctp_rmem, 8222 .sysctl_wmem = sysctl_sctp_wmem, 8223 .memory_pressure = &sctp_memory_pressure, 8224 .enter_memory_pressure = sctp_enter_memory_pressure, 8225 .memory_allocated = &sctp_memory_allocated, 8226 .sockets_allocated = &sctp_sockets_allocated, 8227 }; 8228 #endif /* IS_ENABLED(CONFIG_IPV6) */ 8229