1 /* SCTP kernel implementation 2 * (C) Copyright IBM Corp. 2001, 2004 3 * Copyright (c) 1999-2000 Cisco, Inc. 4 * Copyright (c) 1999-2001 Motorola, Inc. 5 * Copyright (c) 2001-2003 Intel Corp. 6 * Copyright (c) 2001-2002 Nokia, Inc. 7 * Copyright (c) 2001 La Monte H.P. Yarroll 8 * 9 * This file is part of the SCTP kernel implementation 10 * 11 * These functions interface with the sockets layer to implement the 12 * SCTP Extensions for the Sockets API. 13 * 14 * Note that the descriptions from the specification are USER level 15 * functions--this file is the functions which populate the struct proto 16 * for SCTP which is the BOTTOM of the sockets interface. 17 * 18 * This SCTP implementation is free software; 19 * you can redistribute it and/or modify it under the terms of 20 * the GNU General Public License as published by 21 * the Free Software Foundation; either version 2, or (at your option) 22 * any later version. 23 * 24 * This SCTP implementation is distributed in the hope that it 25 * will be useful, but WITHOUT ANY WARRANTY; without even the implied 26 * ************************ 27 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. 28 * See the GNU General Public License for more details. 29 * 30 * You should have received a copy of the GNU General Public License 31 * along with GNU CC; see the file COPYING. If not, see 32 * <http://www.gnu.org/licenses/>. 33 * 34 * Please send any bug reports or fixes you make to the 35 * email address(es): 36 * lksctp developers <linux-sctp@vger.kernel.org> 37 * 38 * Written or modified by: 39 * La Monte H.P. Yarroll <piggy@acm.org> 40 * Narasimha Budihal <narsi@refcode.org> 41 * Karl Knutson <karl@athena.chicago.il.us> 42 * Jon Grimm <jgrimm@us.ibm.com> 43 * Xingang Guo <xingang.guo@intel.com> 44 * Daisy Chang <daisyc@us.ibm.com> 45 * Sridhar Samudrala <samudrala@us.ibm.com> 46 * Inaky Perez-Gonzalez <inaky.gonzalez@intel.com> 47 * Ardelle Fan <ardelle.fan@intel.com> 48 * Ryan Layer <rmlayer@us.ibm.com> 49 * Anup Pemmaiah <pemmaiah@cc.usu.edu> 50 * Kevin Gao <kevin.gao@intel.com> 51 */ 52 53 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 54 55 #include <linux/types.h> 56 #include <linux/kernel.h> 57 #include <linux/wait.h> 58 #include <linux/time.h> 59 #include <linux/ip.h> 60 #include <linux/capability.h> 61 #include <linux/fcntl.h> 62 #include <linux/poll.h> 63 #include <linux/init.h> 64 #include <linux/crypto.h> 65 #include <linux/slab.h> 66 #include <linux/file.h> 67 #include <linux/compat.h> 68 69 #include <net/ip.h> 70 #include <net/icmp.h> 71 #include <net/route.h> 72 #include <net/ipv6.h> 73 #include <net/inet_common.h> 74 #include <net/busy_poll.h> 75 76 #include <linux/socket.h> /* for sa_family_t */ 77 #include <linux/export.h> 78 #include <net/sock.h> 79 #include <net/sctp/sctp.h> 80 #include <net/sctp/sm.h> 81 82 /* Forward declarations for internal helper functions. */ 83 static int sctp_writeable(struct sock *sk); 84 static void sctp_wfree(struct sk_buff *skb); 85 static int sctp_wait_for_sndbuf(struct sctp_association *, long *timeo_p, 86 size_t msg_len); 87 static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p); 88 static int sctp_wait_for_connect(struct sctp_association *, long *timeo_p); 89 static int sctp_wait_for_accept(struct sock *sk, long timeo); 90 static void sctp_wait_for_close(struct sock *sk, long timeo); 91 static void sctp_destruct_sock(struct sock *sk); 92 static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt, 93 union sctp_addr *addr, int len); 94 static int sctp_bindx_add(struct sock *, struct sockaddr *, int); 95 static int sctp_bindx_rem(struct sock *, struct sockaddr *, int); 96 static int sctp_send_asconf_add_ip(struct sock *, struct sockaddr *, int); 97 static int sctp_send_asconf_del_ip(struct sock *, struct sockaddr *, int); 98 static int sctp_send_asconf(struct sctp_association *asoc, 99 struct sctp_chunk *chunk); 100 static int sctp_do_bind(struct sock *, union sctp_addr *, int); 101 static int sctp_autobind(struct sock *sk); 102 static void sctp_sock_migrate(struct sock *, struct sock *, 103 struct sctp_association *, sctp_socket_type_t); 104 105 static int sctp_memory_pressure; 106 static atomic_long_t sctp_memory_allocated; 107 struct percpu_counter sctp_sockets_allocated; 108 109 static void sctp_enter_memory_pressure(struct sock *sk) 110 { 111 sctp_memory_pressure = 1; 112 } 113 114 115 /* Get the sndbuf space available at the time on the association. */ 116 static inline int sctp_wspace(struct sctp_association *asoc) 117 { 118 int amt; 119 120 if (asoc->ep->sndbuf_policy) 121 amt = asoc->sndbuf_used; 122 else 123 amt = sk_wmem_alloc_get(asoc->base.sk); 124 125 if (amt >= asoc->base.sk->sk_sndbuf) { 126 if (asoc->base.sk->sk_userlocks & SOCK_SNDBUF_LOCK) 127 amt = 0; 128 else { 129 amt = sk_stream_wspace(asoc->base.sk); 130 if (amt < 0) 131 amt = 0; 132 } 133 } else { 134 amt = asoc->base.sk->sk_sndbuf - amt; 135 } 136 return amt; 137 } 138 139 /* Increment the used sndbuf space count of the corresponding association by 140 * the size of the outgoing data chunk. 141 * Also, set the skb destructor for sndbuf accounting later. 142 * 143 * Since it is always 1-1 between chunk and skb, and also a new skb is always 144 * allocated for chunk bundling in sctp_packet_transmit(), we can use the 145 * destructor in the data chunk skb for the purpose of the sndbuf space 146 * tracking. 147 */ 148 static inline void sctp_set_owner_w(struct sctp_chunk *chunk) 149 { 150 struct sctp_association *asoc = chunk->asoc; 151 struct sock *sk = asoc->base.sk; 152 153 /* The sndbuf space is tracked per association. */ 154 sctp_association_hold(asoc); 155 156 skb_set_owner_w(chunk->skb, sk); 157 158 chunk->skb->destructor = sctp_wfree; 159 /* Save the chunk pointer in skb for sctp_wfree to use later. */ 160 skb_shinfo(chunk->skb)->destructor_arg = chunk; 161 162 asoc->sndbuf_used += SCTP_DATA_SNDSIZE(chunk) + 163 sizeof(struct sk_buff) + 164 sizeof(struct sctp_chunk); 165 166 atomic_add(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc); 167 sk->sk_wmem_queued += chunk->skb->truesize; 168 sk_mem_charge(sk, chunk->skb->truesize); 169 } 170 171 /* Verify that this is a valid address. */ 172 static inline int sctp_verify_addr(struct sock *sk, union sctp_addr *addr, 173 int len) 174 { 175 struct sctp_af *af; 176 177 /* Verify basic sockaddr. */ 178 af = sctp_sockaddr_af(sctp_sk(sk), addr, len); 179 if (!af) 180 return -EINVAL; 181 182 /* Is this a valid SCTP address? */ 183 if (!af->addr_valid(addr, sctp_sk(sk), NULL)) 184 return -EINVAL; 185 186 if (!sctp_sk(sk)->pf->send_verify(sctp_sk(sk), (addr))) 187 return -EINVAL; 188 189 return 0; 190 } 191 192 /* Look up the association by its id. If this is not a UDP-style 193 * socket, the ID field is always ignored. 194 */ 195 struct sctp_association *sctp_id2assoc(struct sock *sk, sctp_assoc_t id) 196 { 197 struct sctp_association *asoc = NULL; 198 199 /* If this is not a UDP-style socket, assoc id should be ignored. */ 200 if (!sctp_style(sk, UDP)) { 201 /* Return NULL if the socket state is not ESTABLISHED. It 202 * could be a TCP-style listening socket or a socket which 203 * hasn't yet called connect() to establish an association. 204 */ 205 if (!sctp_sstate(sk, ESTABLISHED)) 206 return NULL; 207 208 /* Get the first and the only association from the list. */ 209 if (!list_empty(&sctp_sk(sk)->ep->asocs)) 210 asoc = list_entry(sctp_sk(sk)->ep->asocs.next, 211 struct sctp_association, asocs); 212 return asoc; 213 } 214 215 /* Otherwise this is a UDP-style socket. */ 216 if (!id || (id == (sctp_assoc_t)-1)) 217 return NULL; 218 219 spin_lock_bh(&sctp_assocs_id_lock); 220 asoc = (struct sctp_association *)idr_find(&sctp_assocs_id, (int)id); 221 spin_unlock_bh(&sctp_assocs_id_lock); 222 223 if (!asoc || (asoc->base.sk != sk) || asoc->base.dead) 224 return NULL; 225 226 return asoc; 227 } 228 229 /* Look up the transport from an address and an assoc id. If both address and 230 * id are specified, the associations matching the address and the id should be 231 * the same. 232 */ 233 static struct sctp_transport *sctp_addr_id2transport(struct sock *sk, 234 struct sockaddr_storage *addr, 235 sctp_assoc_t id) 236 { 237 struct sctp_association *addr_asoc = NULL, *id_asoc = NULL; 238 struct sctp_transport *transport; 239 union sctp_addr *laddr = (union sctp_addr *)addr; 240 241 addr_asoc = sctp_endpoint_lookup_assoc(sctp_sk(sk)->ep, 242 laddr, 243 &transport); 244 245 if (!addr_asoc) 246 return NULL; 247 248 id_asoc = sctp_id2assoc(sk, id); 249 if (id_asoc && (id_asoc != addr_asoc)) 250 return NULL; 251 252 sctp_get_pf_specific(sk->sk_family)->addr_to_user(sctp_sk(sk), 253 (union sctp_addr *)addr); 254 255 return transport; 256 } 257 258 /* API 3.1.2 bind() - UDP Style Syntax 259 * The syntax of bind() is, 260 * 261 * ret = bind(int sd, struct sockaddr *addr, int addrlen); 262 * 263 * sd - the socket descriptor returned by socket(). 264 * addr - the address structure (struct sockaddr_in or struct 265 * sockaddr_in6 [RFC 2553]), 266 * addr_len - the size of the address structure. 267 */ 268 static int sctp_bind(struct sock *sk, struct sockaddr *addr, int addr_len) 269 { 270 int retval = 0; 271 272 lock_sock(sk); 273 274 pr_debug("%s: sk:%p, addr:%p, addr_len:%d\n", __func__, sk, 275 addr, addr_len); 276 277 /* Disallow binding twice. */ 278 if (!sctp_sk(sk)->ep->base.bind_addr.port) 279 retval = sctp_do_bind(sk, (union sctp_addr *)addr, 280 addr_len); 281 else 282 retval = -EINVAL; 283 284 release_sock(sk); 285 286 return retval; 287 } 288 289 static long sctp_get_port_local(struct sock *, union sctp_addr *); 290 291 /* Verify this is a valid sockaddr. */ 292 static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt, 293 union sctp_addr *addr, int len) 294 { 295 struct sctp_af *af; 296 297 /* Check minimum size. */ 298 if (len < sizeof (struct sockaddr)) 299 return NULL; 300 301 /* V4 mapped address are really of AF_INET family */ 302 if (addr->sa.sa_family == AF_INET6 && 303 ipv6_addr_v4mapped(&addr->v6.sin6_addr)) { 304 if (!opt->pf->af_supported(AF_INET, opt)) 305 return NULL; 306 } else { 307 /* Does this PF support this AF? */ 308 if (!opt->pf->af_supported(addr->sa.sa_family, opt)) 309 return NULL; 310 } 311 312 /* If we get this far, af is valid. */ 313 af = sctp_get_af_specific(addr->sa.sa_family); 314 315 if (len < af->sockaddr_len) 316 return NULL; 317 318 return af; 319 } 320 321 /* Bind a local address either to an endpoint or to an association. */ 322 static int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len) 323 { 324 struct net *net = sock_net(sk); 325 struct sctp_sock *sp = sctp_sk(sk); 326 struct sctp_endpoint *ep = sp->ep; 327 struct sctp_bind_addr *bp = &ep->base.bind_addr; 328 struct sctp_af *af; 329 unsigned short snum; 330 int ret = 0; 331 332 /* Common sockaddr verification. */ 333 af = sctp_sockaddr_af(sp, addr, len); 334 if (!af) { 335 pr_debug("%s: sk:%p, newaddr:%p, len:%d EINVAL\n", 336 __func__, sk, addr, len); 337 return -EINVAL; 338 } 339 340 snum = ntohs(addr->v4.sin_port); 341 342 pr_debug("%s: sk:%p, new addr:%pISc, port:%d, new port:%d, len:%d\n", 343 __func__, sk, &addr->sa, bp->port, snum, len); 344 345 /* PF specific bind() address verification. */ 346 if (!sp->pf->bind_verify(sp, addr)) 347 return -EADDRNOTAVAIL; 348 349 /* We must either be unbound, or bind to the same port. 350 * It's OK to allow 0 ports if we are already bound. 351 * We'll just inhert an already bound port in this case 352 */ 353 if (bp->port) { 354 if (!snum) 355 snum = bp->port; 356 else if (snum != bp->port) { 357 pr_debug("%s: new port %d doesn't match existing port " 358 "%d\n", __func__, snum, bp->port); 359 return -EINVAL; 360 } 361 } 362 363 if (snum && snum < PROT_SOCK && 364 !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) 365 return -EACCES; 366 367 /* See if the address matches any of the addresses we may have 368 * already bound before checking against other endpoints. 369 */ 370 if (sctp_bind_addr_match(bp, addr, sp)) 371 return -EINVAL; 372 373 /* Make sure we are allowed to bind here. 374 * The function sctp_get_port_local() does duplicate address 375 * detection. 376 */ 377 addr->v4.sin_port = htons(snum); 378 if ((ret = sctp_get_port_local(sk, addr))) { 379 return -EADDRINUSE; 380 } 381 382 /* Refresh ephemeral port. */ 383 if (!bp->port) 384 bp->port = inet_sk(sk)->inet_num; 385 386 /* Add the address to the bind address list. 387 * Use GFP_ATOMIC since BHs will be disabled. 388 */ 389 ret = sctp_add_bind_addr(bp, addr, SCTP_ADDR_SRC, GFP_ATOMIC); 390 391 /* Copy back into socket for getsockname() use. */ 392 if (!ret) { 393 inet_sk(sk)->inet_sport = htons(inet_sk(sk)->inet_num); 394 sp->pf->to_sk_saddr(addr, sk); 395 } 396 397 return ret; 398 } 399 400 /* ADDIP Section 4.1.1 Congestion Control of ASCONF Chunks 401 * 402 * R1) One and only one ASCONF Chunk MAY be in transit and unacknowledged 403 * at any one time. If a sender, after sending an ASCONF chunk, decides 404 * it needs to transfer another ASCONF Chunk, it MUST wait until the 405 * ASCONF-ACK Chunk returns from the previous ASCONF Chunk before sending a 406 * subsequent ASCONF. Note this restriction binds each side, so at any 407 * time two ASCONF may be in-transit on any given association (one sent 408 * from each endpoint). 409 */ 410 static int sctp_send_asconf(struct sctp_association *asoc, 411 struct sctp_chunk *chunk) 412 { 413 struct net *net = sock_net(asoc->base.sk); 414 int retval = 0; 415 416 /* If there is an outstanding ASCONF chunk, queue it for later 417 * transmission. 418 */ 419 if (asoc->addip_last_asconf) { 420 list_add_tail(&chunk->list, &asoc->addip_chunk_list); 421 goto out; 422 } 423 424 /* Hold the chunk until an ASCONF_ACK is received. */ 425 sctp_chunk_hold(chunk); 426 retval = sctp_primitive_ASCONF(net, asoc, chunk); 427 if (retval) 428 sctp_chunk_free(chunk); 429 else 430 asoc->addip_last_asconf = chunk; 431 432 out: 433 return retval; 434 } 435 436 /* Add a list of addresses as bind addresses to local endpoint or 437 * association. 438 * 439 * Basically run through each address specified in the addrs/addrcnt 440 * array/length pair, determine if it is IPv6 or IPv4 and call 441 * sctp_do_bind() on it. 442 * 443 * If any of them fails, then the operation will be reversed and the 444 * ones that were added will be removed. 445 * 446 * Only sctp_setsockopt_bindx() is supposed to call this function. 447 */ 448 static int sctp_bindx_add(struct sock *sk, struct sockaddr *addrs, int addrcnt) 449 { 450 int cnt; 451 int retval = 0; 452 void *addr_buf; 453 struct sockaddr *sa_addr; 454 struct sctp_af *af; 455 456 pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n", __func__, sk, 457 addrs, addrcnt); 458 459 addr_buf = addrs; 460 for (cnt = 0; cnt < addrcnt; cnt++) { 461 /* The list may contain either IPv4 or IPv6 address; 462 * determine the address length for walking thru the list. 463 */ 464 sa_addr = addr_buf; 465 af = sctp_get_af_specific(sa_addr->sa_family); 466 if (!af) { 467 retval = -EINVAL; 468 goto err_bindx_add; 469 } 470 471 retval = sctp_do_bind(sk, (union sctp_addr *)sa_addr, 472 af->sockaddr_len); 473 474 addr_buf += af->sockaddr_len; 475 476 err_bindx_add: 477 if (retval < 0) { 478 /* Failed. Cleanup the ones that have been added */ 479 if (cnt > 0) 480 sctp_bindx_rem(sk, addrs, cnt); 481 return retval; 482 } 483 } 484 485 return retval; 486 } 487 488 /* Send an ASCONF chunk with Add IP address parameters to all the peers of the 489 * associations that are part of the endpoint indicating that a list of local 490 * addresses are added to the endpoint. 491 * 492 * If any of the addresses is already in the bind address list of the 493 * association, we do not send the chunk for that association. But it will not 494 * affect other associations. 495 * 496 * Only sctp_setsockopt_bindx() is supposed to call this function. 497 */ 498 static int sctp_send_asconf_add_ip(struct sock *sk, 499 struct sockaddr *addrs, 500 int addrcnt) 501 { 502 struct net *net = sock_net(sk); 503 struct sctp_sock *sp; 504 struct sctp_endpoint *ep; 505 struct sctp_association *asoc; 506 struct sctp_bind_addr *bp; 507 struct sctp_chunk *chunk; 508 struct sctp_sockaddr_entry *laddr; 509 union sctp_addr *addr; 510 union sctp_addr saveaddr; 511 void *addr_buf; 512 struct sctp_af *af; 513 struct list_head *p; 514 int i; 515 int retval = 0; 516 517 if (!net->sctp.addip_enable) 518 return retval; 519 520 sp = sctp_sk(sk); 521 ep = sp->ep; 522 523 pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n", 524 __func__, sk, addrs, addrcnt); 525 526 list_for_each_entry(asoc, &ep->asocs, asocs) { 527 if (!asoc->peer.asconf_capable) 528 continue; 529 530 if (asoc->peer.addip_disabled_mask & SCTP_PARAM_ADD_IP) 531 continue; 532 533 if (!sctp_state(asoc, ESTABLISHED)) 534 continue; 535 536 /* Check if any address in the packed array of addresses is 537 * in the bind address list of the association. If so, 538 * do not send the asconf chunk to its peer, but continue with 539 * other associations. 540 */ 541 addr_buf = addrs; 542 for (i = 0; i < addrcnt; i++) { 543 addr = addr_buf; 544 af = sctp_get_af_specific(addr->v4.sin_family); 545 if (!af) { 546 retval = -EINVAL; 547 goto out; 548 } 549 550 if (sctp_assoc_lookup_laddr(asoc, addr)) 551 break; 552 553 addr_buf += af->sockaddr_len; 554 } 555 if (i < addrcnt) 556 continue; 557 558 /* Use the first valid address in bind addr list of 559 * association as Address Parameter of ASCONF CHUNK. 560 */ 561 bp = &asoc->base.bind_addr; 562 p = bp->address_list.next; 563 laddr = list_entry(p, struct sctp_sockaddr_entry, list); 564 chunk = sctp_make_asconf_update_ip(asoc, &laddr->a, addrs, 565 addrcnt, SCTP_PARAM_ADD_IP); 566 if (!chunk) { 567 retval = -ENOMEM; 568 goto out; 569 } 570 571 /* Add the new addresses to the bind address list with 572 * use_as_src set to 0. 573 */ 574 addr_buf = addrs; 575 for (i = 0; i < addrcnt; i++) { 576 addr = addr_buf; 577 af = sctp_get_af_specific(addr->v4.sin_family); 578 memcpy(&saveaddr, addr, af->sockaddr_len); 579 retval = sctp_add_bind_addr(bp, &saveaddr, 580 SCTP_ADDR_NEW, GFP_ATOMIC); 581 addr_buf += af->sockaddr_len; 582 } 583 if (asoc->src_out_of_asoc_ok) { 584 struct sctp_transport *trans; 585 586 list_for_each_entry(trans, 587 &asoc->peer.transport_addr_list, transports) { 588 /* Clear the source and route cache */ 589 dst_release(trans->dst); 590 trans->cwnd = min(4*asoc->pathmtu, max_t(__u32, 591 2*asoc->pathmtu, 4380)); 592 trans->ssthresh = asoc->peer.i.a_rwnd; 593 trans->rto = asoc->rto_initial; 594 sctp_max_rto(asoc, trans); 595 trans->rtt = trans->srtt = trans->rttvar = 0; 596 sctp_transport_route(trans, NULL, 597 sctp_sk(asoc->base.sk)); 598 } 599 } 600 retval = sctp_send_asconf(asoc, chunk); 601 } 602 603 out: 604 return retval; 605 } 606 607 /* Remove a list of addresses from bind addresses list. Do not remove the 608 * last address. 609 * 610 * Basically run through each address specified in the addrs/addrcnt 611 * array/length pair, determine if it is IPv6 or IPv4 and call 612 * sctp_del_bind() on it. 613 * 614 * If any of them fails, then the operation will be reversed and the 615 * ones that were removed will be added back. 616 * 617 * At least one address has to be left; if only one address is 618 * available, the operation will return -EBUSY. 619 * 620 * Only sctp_setsockopt_bindx() is supposed to call this function. 621 */ 622 static int sctp_bindx_rem(struct sock *sk, struct sockaddr *addrs, int addrcnt) 623 { 624 struct sctp_sock *sp = sctp_sk(sk); 625 struct sctp_endpoint *ep = sp->ep; 626 int cnt; 627 struct sctp_bind_addr *bp = &ep->base.bind_addr; 628 int retval = 0; 629 void *addr_buf; 630 union sctp_addr *sa_addr; 631 struct sctp_af *af; 632 633 pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n", 634 __func__, sk, addrs, addrcnt); 635 636 addr_buf = addrs; 637 for (cnt = 0; cnt < addrcnt; cnt++) { 638 /* If the bind address list is empty or if there is only one 639 * bind address, there is nothing more to be removed (we need 640 * at least one address here). 641 */ 642 if (list_empty(&bp->address_list) || 643 (sctp_list_single_entry(&bp->address_list))) { 644 retval = -EBUSY; 645 goto err_bindx_rem; 646 } 647 648 sa_addr = addr_buf; 649 af = sctp_get_af_specific(sa_addr->sa.sa_family); 650 if (!af) { 651 retval = -EINVAL; 652 goto err_bindx_rem; 653 } 654 655 if (!af->addr_valid(sa_addr, sp, NULL)) { 656 retval = -EADDRNOTAVAIL; 657 goto err_bindx_rem; 658 } 659 660 if (sa_addr->v4.sin_port && 661 sa_addr->v4.sin_port != htons(bp->port)) { 662 retval = -EINVAL; 663 goto err_bindx_rem; 664 } 665 666 if (!sa_addr->v4.sin_port) 667 sa_addr->v4.sin_port = htons(bp->port); 668 669 /* FIXME - There is probably a need to check if sk->sk_saddr and 670 * sk->sk_rcv_addr are currently set to one of the addresses to 671 * be removed. This is something which needs to be looked into 672 * when we are fixing the outstanding issues with multi-homing 673 * socket routing and failover schemes. Refer to comments in 674 * sctp_do_bind(). -daisy 675 */ 676 retval = sctp_del_bind_addr(bp, sa_addr); 677 678 addr_buf += af->sockaddr_len; 679 err_bindx_rem: 680 if (retval < 0) { 681 /* Failed. Add the ones that has been removed back */ 682 if (cnt > 0) 683 sctp_bindx_add(sk, addrs, cnt); 684 return retval; 685 } 686 } 687 688 return retval; 689 } 690 691 /* Send an ASCONF chunk with Delete IP address parameters to all the peers of 692 * the associations that are part of the endpoint indicating that a list of 693 * local addresses are removed from the endpoint. 694 * 695 * If any of the addresses is already in the bind address list of the 696 * association, we do not send the chunk for that association. But it will not 697 * affect other associations. 698 * 699 * Only sctp_setsockopt_bindx() is supposed to call this function. 700 */ 701 static int sctp_send_asconf_del_ip(struct sock *sk, 702 struct sockaddr *addrs, 703 int addrcnt) 704 { 705 struct net *net = sock_net(sk); 706 struct sctp_sock *sp; 707 struct sctp_endpoint *ep; 708 struct sctp_association *asoc; 709 struct sctp_transport *transport; 710 struct sctp_bind_addr *bp; 711 struct sctp_chunk *chunk; 712 union sctp_addr *laddr; 713 void *addr_buf; 714 struct sctp_af *af; 715 struct sctp_sockaddr_entry *saddr; 716 int i; 717 int retval = 0; 718 int stored = 0; 719 720 chunk = NULL; 721 if (!net->sctp.addip_enable) 722 return retval; 723 724 sp = sctp_sk(sk); 725 ep = sp->ep; 726 727 pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n", 728 __func__, sk, addrs, addrcnt); 729 730 list_for_each_entry(asoc, &ep->asocs, asocs) { 731 732 if (!asoc->peer.asconf_capable) 733 continue; 734 735 if (asoc->peer.addip_disabled_mask & SCTP_PARAM_DEL_IP) 736 continue; 737 738 if (!sctp_state(asoc, ESTABLISHED)) 739 continue; 740 741 /* Check if any address in the packed array of addresses is 742 * not present in the bind address list of the association. 743 * If so, do not send the asconf chunk to its peer, but 744 * continue with other associations. 745 */ 746 addr_buf = addrs; 747 for (i = 0; i < addrcnt; i++) { 748 laddr = addr_buf; 749 af = sctp_get_af_specific(laddr->v4.sin_family); 750 if (!af) { 751 retval = -EINVAL; 752 goto out; 753 } 754 755 if (!sctp_assoc_lookup_laddr(asoc, laddr)) 756 break; 757 758 addr_buf += af->sockaddr_len; 759 } 760 if (i < addrcnt) 761 continue; 762 763 /* Find one address in the association's bind address list 764 * that is not in the packed array of addresses. This is to 765 * make sure that we do not delete all the addresses in the 766 * association. 767 */ 768 bp = &asoc->base.bind_addr; 769 laddr = sctp_find_unmatch_addr(bp, (union sctp_addr *)addrs, 770 addrcnt, sp); 771 if ((laddr == NULL) && (addrcnt == 1)) { 772 if (asoc->asconf_addr_del_pending) 773 continue; 774 asoc->asconf_addr_del_pending = 775 kzalloc(sizeof(union sctp_addr), GFP_ATOMIC); 776 if (asoc->asconf_addr_del_pending == NULL) { 777 retval = -ENOMEM; 778 goto out; 779 } 780 asoc->asconf_addr_del_pending->sa.sa_family = 781 addrs->sa_family; 782 asoc->asconf_addr_del_pending->v4.sin_port = 783 htons(bp->port); 784 if (addrs->sa_family == AF_INET) { 785 struct sockaddr_in *sin; 786 787 sin = (struct sockaddr_in *)addrs; 788 asoc->asconf_addr_del_pending->v4.sin_addr.s_addr = sin->sin_addr.s_addr; 789 } else if (addrs->sa_family == AF_INET6) { 790 struct sockaddr_in6 *sin6; 791 792 sin6 = (struct sockaddr_in6 *)addrs; 793 asoc->asconf_addr_del_pending->v6.sin6_addr = sin6->sin6_addr; 794 } 795 796 pr_debug("%s: keep the last address asoc:%p %pISc at %p\n", 797 __func__, asoc, &asoc->asconf_addr_del_pending->sa, 798 asoc->asconf_addr_del_pending); 799 800 asoc->src_out_of_asoc_ok = 1; 801 stored = 1; 802 goto skip_mkasconf; 803 } 804 805 if (laddr == NULL) 806 return -EINVAL; 807 808 /* We do not need RCU protection throughout this loop 809 * because this is done under a socket lock from the 810 * setsockopt call. 811 */ 812 chunk = sctp_make_asconf_update_ip(asoc, laddr, addrs, addrcnt, 813 SCTP_PARAM_DEL_IP); 814 if (!chunk) { 815 retval = -ENOMEM; 816 goto out; 817 } 818 819 skip_mkasconf: 820 /* Reset use_as_src flag for the addresses in the bind address 821 * list that are to be deleted. 822 */ 823 addr_buf = addrs; 824 for (i = 0; i < addrcnt; i++) { 825 laddr = addr_buf; 826 af = sctp_get_af_specific(laddr->v4.sin_family); 827 list_for_each_entry(saddr, &bp->address_list, list) { 828 if (sctp_cmp_addr_exact(&saddr->a, laddr)) 829 saddr->state = SCTP_ADDR_DEL; 830 } 831 addr_buf += af->sockaddr_len; 832 } 833 834 /* Update the route and saddr entries for all the transports 835 * as some of the addresses in the bind address list are 836 * about to be deleted and cannot be used as source addresses. 837 */ 838 list_for_each_entry(transport, &asoc->peer.transport_addr_list, 839 transports) { 840 dst_release(transport->dst); 841 sctp_transport_route(transport, NULL, 842 sctp_sk(asoc->base.sk)); 843 } 844 845 if (stored) 846 /* We don't need to transmit ASCONF */ 847 continue; 848 retval = sctp_send_asconf(asoc, chunk); 849 } 850 out: 851 return retval; 852 } 853 854 /* set addr events to assocs in the endpoint. ep and addr_wq must be locked */ 855 int sctp_asconf_mgmt(struct sctp_sock *sp, struct sctp_sockaddr_entry *addrw) 856 { 857 struct sock *sk = sctp_opt2sk(sp); 858 union sctp_addr *addr; 859 struct sctp_af *af; 860 861 /* It is safe to write port space in caller. */ 862 addr = &addrw->a; 863 addr->v4.sin_port = htons(sp->ep->base.bind_addr.port); 864 af = sctp_get_af_specific(addr->sa.sa_family); 865 if (!af) 866 return -EINVAL; 867 if (sctp_verify_addr(sk, addr, af->sockaddr_len)) 868 return -EINVAL; 869 870 if (addrw->state == SCTP_ADDR_NEW) 871 return sctp_send_asconf_add_ip(sk, (struct sockaddr *)addr, 1); 872 else 873 return sctp_send_asconf_del_ip(sk, (struct sockaddr *)addr, 1); 874 } 875 876 /* Helper for tunneling sctp_bindx() requests through sctp_setsockopt() 877 * 878 * API 8.1 879 * int sctp_bindx(int sd, struct sockaddr *addrs, int addrcnt, 880 * int flags); 881 * 882 * If sd is an IPv4 socket, the addresses passed must be IPv4 addresses. 883 * If the sd is an IPv6 socket, the addresses passed can either be IPv4 884 * or IPv6 addresses. 885 * 886 * A single address may be specified as INADDR_ANY or IN6ADDR_ANY, see 887 * Section 3.1.2 for this usage. 888 * 889 * addrs is a pointer to an array of one or more socket addresses. Each 890 * address is contained in its appropriate structure (i.e. struct 891 * sockaddr_in or struct sockaddr_in6) the family of the address type 892 * must be used to distinguish the address length (note that this 893 * representation is termed a "packed array" of addresses). The caller 894 * specifies the number of addresses in the array with addrcnt. 895 * 896 * On success, sctp_bindx() returns 0. On failure, sctp_bindx() returns 897 * -1, and sets errno to the appropriate error code. 898 * 899 * For SCTP, the port given in each socket address must be the same, or 900 * sctp_bindx() will fail, setting errno to EINVAL. 901 * 902 * The flags parameter is formed from the bitwise OR of zero or more of 903 * the following currently defined flags: 904 * 905 * SCTP_BINDX_ADD_ADDR 906 * 907 * SCTP_BINDX_REM_ADDR 908 * 909 * SCTP_BINDX_ADD_ADDR directs SCTP to add the given addresses to the 910 * association, and SCTP_BINDX_REM_ADDR directs SCTP to remove the given 911 * addresses from the association. The two flags are mutually exclusive; 912 * if both are given, sctp_bindx() will fail with EINVAL. A caller may 913 * not remove all addresses from an association; sctp_bindx() will 914 * reject such an attempt with EINVAL. 915 * 916 * An application can use sctp_bindx(SCTP_BINDX_ADD_ADDR) to associate 917 * additional addresses with an endpoint after calling bind(). Or use 918 * sctp_bindx(SCTP_BINDX_REM_ADDR) to remove some addresses a listening 919 * socket is associated with so that no new association accepted will be 920 * associated with those addresses. If the endpoint supports dynamic 921 * address a SCTP_BINDX_REM_ADDR or SCTP_BINDX_ADD_ADDR may cause a 922 * endpoint to send the appropriate message to the peer to change the 923 * peers address lists. 924 * 925 * Adding and removing addresses from a connected association is 926 * optional functionality. Implementations that do not support this 927 * functionality should return EOPNOTSUPP. 928 * 929 * Basically do nothing but copying the addresses from user to kernel 930 * land and invoking either sctp_bindx_add() or sctp_bindx_rem() on the sk. 931 * This is used for tunneling the sctp_bindx() request through sctp_setsockopt() 932 * from userspace. 933 * 934 * We don't use copy_from_user() for optimization: we first do the 935 * sanity checks (buffer size -fast- and access check-healthy 936 * pointer); if all of those succeed, then we can alloc the memory 937 * (expensive operation) needed to copy the data to kernel. Then we do 938 * the copying without checking the user space area 939 * (__copy_from_user()). 940 * 941 * On exit there is no need to do sockfd_put(), sys_setsockopt() does 942 * it. 943 * 944 * sk The sk of the socket 945 * addrs The pointer to the addresses in user land 946 * addrssize Size of the addrs buffer 947 * op Operation to perform (add or remove, see the flags of 948 * sctp_bindx) 949 * 950 * Returns 0 if ok, <0 errno code on error. 951 */ 952 static int sctp_setsockopt_bindx(struct sock *sk, 953 struct sockaddr __user *addrs, 954 int addrs_size, int op) 955 { 956 struct sockaddr *kaddrs; 957 int err; 958 int addrcnt = 0; 959 int walk_size = 0; 960 struct sockaddr *sa_addr; 961 void *addr_buf; 962 struct sctp_af *af; 963 964 pr_debug("%s: sk:%p addrs:%p addrs_size:%d opt:%d\n", 965 __func__, sk, addrs, addrs_size, op); 966 967 if (unlikely(addrs_size <= 0)) 968 return -EINVAL; 969 970 /* Check the user passed a healthy pointer. */ 971 if (unlikely(!access_ok(VERIFY_READ, addrs, addrs_size))) 972 return -EFAULT; 973 974 /* Alloc space for the address array in kernel memory. */ 975 kaddrs = kmalloc(addrs_size, GFP_KERNEL); 976 if (unlikely(!kaddrs)) 977 return -ENOMEM; 978 979 if (__copy_from_user(kaddrs, addrs, addrs_size)) { 980 kfree(kaddrs); 981 return -EFAULT; 982 } 983 984 /* Walk through the addrs buffer and count the number of addresses. */ 985 addr_buf = kaddrs; 986 while (walk_size < addrs_size) { 987 if (walk_size + sizeof(sa_family_t) > addrs_size) { 988 kfree(kaddrs); 989 return -EINVAL; 990 } 991 992 sa_addr = addr_buf; 993 af = sctp_get_af_specific(sa_addr->sa_family); 994 995 /* If the address family is not supported or if this address 996 * causes the address buffer to overflow return EINVAL. 997 */ 998 if (!af || (walk_size + af->sockaddr_len) > addrs_size) { 999 kfree(kaddrs); 1000 return -EINVAL; 1001 } 1002 addrcnt++; 1003 addr_buf += af->sockaddr_len; 1004 walk_size += af->sockaddr_len; 1005 } 1006 1007 /* Do the work. */ 1008 switch (op) { 1009 case SCTP_BINDX_ADD_ADDR: 1010 err = sctp_bindx_add(sk, kaddrs, addrcnt); 1011 if (err) 1012 goto out; 1013 err = sctp_send_asconf_add_ip(sk, kaddrs, addrcnt); 1014 break; 1015 1016 case SCTP_BINDX_REM_ADDR: 1017 err = sctp_bindx_rem(sk, kaddrs, addrcnt); 1018 if (err) 1019 goto out; 1020 err = sctp_send_asconf_del_ip(sk, kaddrs, addrcnt); 1021 break; 1022 1023 default: 1024 err = -EINVAL; 1025 break; 1026 } 1027 1028 out: 1029 kfree(kaddrs); 1030 1031 return err; 1032 } 1033 1034 /* __sctp_connect(struct sock* sk, struct sockaddr *kaddrs, int addrs_size) 1035 * 1036 * Common routine for handling connect() and sctp_connectx(). 1037 * Connect will come in with just a single address. 1038 */ 1039 static int __sctp_connect(struct sock *sk, 1040 struct sockaddr *kaddrs, 1041 int addrs_size, 1042 sctp_assoc_t *assoc_id) 1043 { 1044 struct net *net = sock_net(sk); 1045 struct sctp_sock *sp; 1046 struct sctp_endpoint *ep; 1047 struct sctp_association *asoc = NULL; 1048 struct sctp_association *asoc2; 1049 struct sctp_transport *transport; 1050 union sctp_addr to; 1051 sctp_scope_t scope; 1052 long timeo; 1053 int err = 0; 1054 int addrcnt = 0; 1055 int walk_size = 0; 1056 union sctp_addr *sa_addr = NULL; 1057 void *addr_buf; 1058 unsigned short port; 1059 unsigned int f_flags = 0; 1060 1061 sp = sctp_sk(sk); 1062 ep = sp->ep; 1063 1064 /* connect() cannot be done on a socket that is already in ESTABLISHED 1065 * state - UDP-style peeled off socket or a TCP-style socket that 1066 * is already connected. 1067 * It cannot be done even on a TCP-style listening socket. 1068 */ 1069 if (sctp_sstate(sk, ESTABLISHED) || 1070 (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))) { 1071 err = -EISCONN; 1072 goto out_free; 1073 } 1074 1075 /* Walk through the addrs buffer and count the number of addresses. */ 1076 addr_buf = kaddrs; 1077 while (walk_size < addrs_size) { 1078 struct sctp_af *af; 1079 1080 if (walk_size + sizeof(sa_family_t) > addrs_size) { 1081 err = -EINVAL; 1082 goto out_free; 1083 } 1084 1085 sa_addr = addr_buf; 1086 af = sctp_get_af_specific(sa_addr->sa.sa_family); 1087 1088 /* If the address family is not supported or if this address 1089 * causes the address buffer to overflow return EINVAL. 1090 */ 1091 if (!af || (walk_size + af->sockaddr_len) > addrs_size) { 1092 err = -EINVAL; 1093 goto out_free; 1094 } 1095 1096 port = ntohs(sa_addr->v4.sin_port); 1097 1098 /* Save current address so we can work with it */ 1099 memcpy(&to, sa_addr, af->sockaddr_len); 1100 1101 err = sctp_verify_addr(sk, &to, af->sockaddr_len); 1102 if (err) 1103 goto out_free; 1104 1105 /* Make sure the destination port is correctly set 1106 * in all addresses. 1107 */ 1108 if (asoc && asoc->peer.port && asoc->peer.port != port) { 1109 err = -EINVAL; 1110 goto out_free; 1111 } 1112 1113 /* Check if there already is a matching association on the 1114 * endpoint (other than the one created here). 1115 */ 1116 asoc2 = sctp_endpoint_lookup_assoc(ep, &to, &transport); 1117 if (asoc2 && asoc2 != asoc) { 1118 if (asoc2->state >= SCTP_STATE_ESTABLISHED) 1119 err = -EISCONN; 1120 else 1121 err = -EALREADY; 1122 goto out_free; 1123 } 1124 1125 /* If we could not find a matching association on the endpoint, 1126 * make sure that there is no peeled-off association matching 1127 * the peer address even on another socket. 1128 */ 1129 if (sctp_endpoint_is_peeled_off(ep, &to)) { 1130 err = -EADDRNOTAVAIL; 1131 goto out_free; 1132 } 1133 1134 if (!asoc) { 1135 /* If a bind() or sctp_bindx() is not called prior to 1136 * an sctp_connectx() call, the system picks an 1137 * ephemeral port and will choose an address set 1138 * equivalent to binding with a wildcard address. 1139 */ 1140 if (!ep->base.bind_addr.port) { 1141 if (sctp_autobind(sk)) { 1142 err = -EAGAIN; 1143 goto out_free; 1144 } 1145 } else { 1146 /* 1147 * If an unprivileged user inherits a 1-many 1148 * style socket with open associations on a 1149 * privileged port, it MAY be permitted to 1150 * accept new associations, but it SHOULD NOT 1151 * be permitted to open new associations. 1152 */ 1153 if (ep->base.bind_addr.port < PROT_SOCK && 1154 !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) { 1155 err = -EACCES; 1156 goto out_free; 1157 } 1158 } 1159 1160 scope = sctp_scope(&to); 1161 asoc = sctp_association_new(ep, sk, scope, GFP_KERNEL); 1162 if (!asoc) { 1163 err = -ENOMEM; 1164 goto out_free; 1165 } 1166 1167 err = sctp_assoc_set_bind_addr_from_ep(asoc, scope, 1168 GFP_KERNEL); 1169 if (err < 0) { 1170 goto out_free; 1171 } 1172 1173 } 1174 1175 /* Prime the peer's transport structures. */ 1176 transport = sctp_assoc_add_peer(asoc, &to, GFP_KERNEL, 1177 SCTP_UNKNOWN); 1178 if (!transport) { 1179 err = -ENOMEM; 1180 goto out_free; 1181 } 1182 1183 addrcnt++; 1184 addr_buf += af->sockaddr_len; 1185 walk_size += af->sockaddr_len; 1186 } 1187 1188 /* In case the user of sctp_connectx() wants an association 1189 * id back, assign one now. 1190 */ 1191 if (assoc_id) { 1192 err = sctp_assoc_set_id(asoc, GFP_KERNEL); 1193 if (err < 0) 1194 goto out_free; 1195 } 1196 1197 err = sctp_primitive_ASSOCIATE(net, asoc, NULL); 1198 if (err < 0) { 1199 goto out_free; 1200 } 1201 1202 /* Initialize sk's dport and daddr for getpeername() */ 1203 inet_sk(sk)->inet_dport = htons(asoc->peer.port); 1204 sp->pf->to_sk_daddr(sa_addr, sk); 1205 sk->sk_err = 0; 1206 1207 /* in-kernel sockets don't generally have a file allocated to them 1208 * if all they do is call sock_create_kern(). 1209 */ 1210 if (sk->sk_socket->file) 1211 f_flags = sk->sk_socket->file->f_flags; 1212 1213 timeo = sock_sndtimeo(sk, f_flags & O_NONBLOCK); 1214 1215 err = sctp_wait_for_connect(asoc, &timeo); 1216 if ((err == 0 || err == -EINPROGRESS) && assoc_id) 1217 *assoc_id = asoc->assoc_id; 1218 1219 /* Don't free association on exit. */ 1220 asoc = NULL; 1221 1222 out_free: 1223 pr_debug("%s: took out_free path with asoc:%p kaddrs:%p err:%d\n", 1224 __func__, asoc, kaddrs, err); 1225 1226 if (asoc) { 1227 /* sctp_primitive_ASSOCIATE may have added this association 1228 * To the hash table, try to unhash it, just in case, its a noop 1229 * if it wasn't hashed so we're safe 1230 */ 1231 sctp_unhash_established(asoc); 1232 sctp_association_free(asoc); 1233 } 1234 return err; 1235 } 1236 1237 /* Helper for tunneling sctp_connectx() requests through sctp_setsockopt() 1238 * 1239 * API 8.9 1240 * int sctp_connectx(int sd, struct sockaddr *addrs, int addrcnt, 1241 * sctp_assoc_t *asoc); 1242 * 1243 * If sd is an IPv4 socket, the addresses passed must be IPv4 addresses. 1244 * If the sd is an IPv6 socket, the addresses passed can either be IPv4 1245 * or IPv6 addresses. 1246 * 1247 * A single address may be specified as INADDR_ANY or IN6ADDR_ANY, see 1248 * Section 3.1.2 for this usage. 1249 * 1250 * addrs is a pointer to an array of one or more socket addresses. Each 1251 * address is contained in its appropriate structure (i.e. struct 1252 * sockaddr_in or struct sockaddr_in6) the family of the address type 1253 * must be used to distengish the address length (note that this 1254 * representation is termed a "packed array" of addresses). The caller 1255 * specifies the number of addresses in the array with addrcnt. 1256 * 1257 * On success, sctp_connectx() returns 0. It also sets the assoc_id to 1258 * the association id of the new association. On failure, sctp_connectx() 1259 * returns -1, and sets errno to the appropriate error code. The assoc_id 1260 * is not touched by the kernel. 1261 * 1262 * For SCTP, the port given in each socket address must be the same, or 1263 * sctp_connectx() will fail, setting errno to EINVAL. 1264 * 1265 * An application can use sctp_connectx to initiate an association with 1266 * an endpoint that is multi-homed. Much like sctp_bindx() this call 1267 * allows a caller to specify multiple addresses at which a peer can be 1268 * reached. The way the SCTP stack uses the list of addresses to set up 1269 * the association is implementation dependent. This function only 1270 * specifies that the stack will try to make use of all the addresses in 1271 * the list when needed. 1272 * 1273 * Note that the list of addresses passed in is only used for setting up 1274 * the association. It does not necessarily equal the set of addresses 1275 * the peer uses for the resulting association. If the caller wants to 1276 * find out the set of peer addresses, it must use sctp_getpaddrs() to 1277 * retrieve them after the association has been set up. 1278 * 1279 * Basically do nothing but copying the addresses from user to kernel 1280 * land and invoking either sctp_connectx(). This is used for tunneling 1281 * the sctp_connectx() request through sctp_setsockopt() from userspace. 1282 * 1283 * We don't use copy_from_user() for optimization: we first do the 1284 * sanity checks (buffer size -fast- and access check-healthy 1285 * pointer); if all of those succeed, then we can alloc the memory 1286 * (expensive operation) needed to copy the data to kernel. Then we do 1287 * the copying without checking the user space area 1288 * (__copy_from_user()). 1289 * 1290 * On exit there is no need to do sockfd_put(), sys_setsockopt() does 1291 * it. 1292 * 1293 * sk The sk of the socket 1294 * addrs The pointer to the addresses in user land 1295 * addrssize Size of the addrs buffer 1296 * 1297 * Returns >=0 if ok, <0 errno code on error. 1298 */ 1299 static int __sctp_setsockopt_connectx(struct sock *sk, 1300 struct sockaddr __user *addrs, 1301 int addrs_size, 1302 sctp_assoc_t *assoc_id) 1303 { 1304 int err = 0; 1305 struct sockaddr *kaddrs; 1306 1307 pr_debug("%s: sk:%p addrs:%p addrs_size:%d\n", 1308 __func__, sk, addrs, addrs_size); 1309 1310 if (unlikely(addrs_size <= 0)) 1311 return -EINVAL; 1312 1313 /* Check the user passed a healthy pointer. */ 1314 if (unlikely(!access_ok(VERIFY_READ, addrs, addrs_size))) 1315 return -EFAULT; 1316 1317 /* Alloc space for the address array in kernel memory. */ 1318 kaddrs = kmalloc(addrs_size, GFP_KERNEL); 1319 if (unlikely(!kaddrs)) 1320 return -ENOMEM; 1321 1322 if (__copy_from_user(kaddrs, addrs, addrs_size)) { 1323 err = -EFAULT; 1324 } else { 1325 err = __sctp_connect(sk, kaddrs, addrs_size, assoc_id); 1326 } 1327 1328 kfree(kaddrs); 1329 1330 return err; 1331 } 1332 1333 /* 1334 * This is an older interface. It's kept for backward compatibility 1335 * to the option that doesn't provide association id. 1336 */ 1337 static int sctp_setsockopt_connectx_old(struct sock *sk, 1338 struct sockaddr __user *addrs, 1339 int addrs_size) 1340 { 1341 return __sctp_setsockopt_connectx(sk, addrs, addrs_size, NULL); 1342 } 1343 1344 /* 1345 * New interface for the API. The since the API is done with a socket 1346 * option, to make it simple we feed back the association id is as a return 1347 * indication to the call. Error is always negative and association id is 1348 * always positive. 1349 */ 1350 static int sctp_setsockopt_connectx(struct sock *sk, 1351 struct sockaddr __user *addrs, 1352 int addrs_size) 1353 { 1354 sctp_assoc_t assoc_id = 0; 1355 int err = 0; 1356 1357 err = __sctp_setsockopt_connectx(sk, addrs, addrs_size, &assoc_id); 1358 1359 if (err) 1360 return err; 1361 else 1362 return assoc_id; 1363 } 1364 1365 /* 1366 * New (hopefully final) interface for the API. 1367 * We use the sctp_getaddrs_old structure so that use-space library 1368 * can avoid any unnecessary allocations. The only different part 1369 * is that we store the actual length of the address buffer into the 1370 * addrs_num structure member. That way we can re-use the existing 1371 * code. 1372 */ 1373 #ifdef CONFIG_COMPAT 1374 struct compat_sctp_getaddrs_old { 1375 sctp_assoc_t assoc_id; 1376 s32 addr_num; 1377 compat_uptr_t addrs; /* struct sockaddr * */ 1378 }; 1379 #endif 1380 1381 static int sctp_getsockopt_connectx3(struct sock *sk, int len, 1382 char __user *optval, 1383 int __user *optlen) 1384 { 1385 struct sctp_getaddrs_old param; 1386 sctp_assoc_t assoc_id = 0; 1387 int err = 0; 1388 1389 #ifdef CONFIG_COMPAT 1390 if (is_compat_task()) { 1391 struct compat_sctp_getaddrs_old param32; 1392 1393 if (len < sizeof(param32)) 1394 return -EINVAL; 1395 if (copy_from_user(¶m32, optval, sizeof(param32))) 1396 return -EFAULT; 1397 1398 param.assoc_id = param32.assoc_id; 1399 param.addr_num = param32.addr_num; 1400 param.addrs = compat_ptr(param32.addrs); 1401 } else 1402 #endif 1403 { 1404 if (len < sizeof(param)) 1405 return -EINVAL; 1406 if (copy_from_user(¶m, optval, sizeof(param))) 1407 return -EFAULT; 1408 } 1409 1410 err = __sctp_setsockopt_connectx(sk, (struct sockaddr __user *) 1411 param.addrs, param.addr_num, 1412 &assoc_id); 1413 if (err == 0 || err == -EINPROGRESS) { 1414 if (copy_to_user(optval, &assoc_id, sizeof(assoc_id))) 1415 return -EFAULT; 1416 if (put_user(sizeof(assoc_id), optlen)) 1417 return -EFAULT; 1418 } 1419 1420 return err; 1421 } 1422 1423 /* API 3.1.4 close() - UDP Style Syntax 1424 * Applications use close() to perform graceful shutdown (as described in 1425 * Section 10.1 of [SCTP]) on ALL the associations currently represented 1426 * by a UDP-style socket. 1427 * 1428 * The syntax is 1429 * 1430 * ret = close(int sd); 1431 * 1432 * sd - the socket descriptor of the associations to be closed. 1433 * 1434 * To gracefully shutdown a specific association represented by the 1435 * UDP-style socket, an application should use the sendmsg() call, 1436 * passing no user data, but including the appropriate flag in the 1437 * ancillary data (see Section xxxx). 1438 * 1439 * If sd in the close() call is a branched-off socket representing only 1440 * one association, the shutdown is performed on that association only. 1441 * 1442 * 4.1.6 close() - TCP Style Syntax 1443 * 1444 * Applications use close() to gracefully close down an association. 1445 * 1446 * The syntax is: 1447 * 1448 * int close(int sd); 1449 * 1450 * sd - the socket descriptor of the association to be closed. 1451 * 1452 * After an application calls close() on a socket descriptor, no further 1453 * socket operations will succeed on that descriptor. 1454 * 1455 * API 7.1.4 SO_LINGER 1456 * 1457 * An application using the TCP-style socket can use this option to 1458 * perform the SCTP ABORT primitive. The linger option structure is: 1459 * 1460 * struct linger { 1461 * int l_onoff; // option on/off 1462 * int l_linger; // linger time 1463 * }; 1464 * 1465 * To enable the option, set l_onoff to 1. If the l_linger value is set 1466 * to 0, calling close() is the same as the ABORT primitive. If the 1467 * value is set to a negative value, the setsockopt() call will return 1468 * an error. If the value is set to a positive value linger_time, the 1469 * close() can be blocked for at most linger_time ms. If the graceful 1470 * shutdown phase does not finish during this period, close() will 1471 * return but the graceful shutdown phase continues in the system. 1472 */ 1473 static void sctp_close(struct sock *sk, long timeout) 1474 { 1475 struct net *net = sock_net(sk); 1476 struct sctp_endpoint *ep; 1477 struct sctp_association *asoc; 1478 struct list_head *pos, *temp; 1479 unsigned int data_was_unread; 1480 1481 pr_debug("%s: sk:%p, timeout:%ld\n", __func__, sk, timeout); 1482 1483 lock_sock(sk); 1484 sk->sk_shutdown = SHUTDOWN_MASK; 1485 sk->sk_state = SCTP_SS_CLOSING; 1486 1487 ep = sctp_sk(sk)->ep; 1488 1489 /* Clean up any skbs sitting on the receive queue. */ 1490 data_was_unread = sctp_queue_purge_ulpevents(&sk->sk_receive_queue); 1491 data_was_unread += sctp_queue_purge_ulpevents(&sctp_sk(sk)->pd_lobby); 1492 1493 /* Walk all associations on an endpoint. */ 1494 list_for_each_safe(pos, temp, &ep->asocs) { 1495 asoc = list_entry(pos, struct sctp_association, asocs); 1496 1497 if (sctp_style(sk, TCP)) { 1498 /* A closed association can still be in the list if 1499 * it belongs to a TCP-style listening socket that is 1500 * not yet accepted. If so, free it. If not, send an 1501 * ABORT or SHUTDOWN based on the linger options. 1502 */ 1503 if (sctp_state(asoc, CLOSED)) { 1504 sctp_unhash_established(asoc); 1505 sctp_association_free(asoc); 1506 continue; 1507 } 1508 } 1509 1510 if (data_was_unread || !skb_queue_empty(&asoc->ulpq.lobby) || 1511 !skb_queue_empty(&asoc->ulpq.reasm) || 1512 (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime)) { 1513 struct sctp_chunk *chunk; 1514 1515 chunk = sctp_make_abort_user(asoc, NULL, 0); 1516 if (chunk) 1517 sctp_primitive_ABORT(net, asoc, chunk); 1518 } else 1519 sctp_primitive_SHUTDOWN(net, asoc, NULL); 1520 } 1521 1522 /* On a TCP-style socket, block for at most linger_time if set. */ 1523 if (sctp_style(sk, TCP) && timeout) 1524 sctp_wait_for_close(sk, timeout); 1525 1526 /* This will run the backlog queue. */ 1527 release_sock(sk); 1528 1529 /* Supposedly, no process has access to the socket, but 1530 * the net layers still may. 1531 * Also, sctp_destroy_sock() needs to be called with addr_wq_lock 1532 * held and that should be grabbed before socket lock. 1533 */ 1534 spin_lock_bh(&net->sctp.addr_wq_lock); 1535 bh_lock_sock(sk); 1536 1537 /* Hold the sock, since sk_common_release() will put sock_put() 1538 * and we have just a little more cleanup. 1539 */ 1540 sock_hold(sk); 1541 sk_common_release(sk); 1542 1543 bh_unlock_sock(sk); 1544 spin_unlock_bh(&net->sctp.addr_wq_lock); 1545 1546 sock_put(sk); 1547 1548 SCTP_DBG_OBJCNT_DEC(sock); 1549 } 1550 1551 /* Handle EPIPE error. */ 1552 static int sctp_error(struct sock *sk, int flags, int err) 1553 { 1554 if (err == -EPIPE) 1555 err = sock_error(sk) ? : -EPIPE; 1556 if (err == -EPIPE && !(flags & MSG_NOSIGNAL)) 1557 send_sig(SIGPIPE, current, 0); 1558 return err; 1559 } 1560 1561 /* API 3.1.3 sendmsg() - UDP Style Syntax 1562 * 1563 * An application uses sendmsg() and recvmsg() calls to transmit data to 1564 * and receive data from its peer. 1565 * 1566 * ssize_t sendmsg(int socket, const struct msghdr *message, 1567 * int flags); 1568 * 1569 * socket - the socket descriptor of the endpoint. 1570 * message - pointer to the msghdr structure which contains a single 1571 * user message and possibly some ancillary data. 1572 * 1573 * See Section 5 for complete description of the data 1574 * structures. 1575 * 1576 * flags - flags sent or received with the user message, see Section 1577 * 5 for complete description of the flags. 1578 * 1579 * Note: This function could use a rewrite especially when explicit 1580 * connect support comes in. 1581 */ 1582 /* BUG: We do not implement the equivalent of sk_stream_wait_memory(). */ 1583 1584 static int sctp_msghdr_parse(const struct msghdr *, sctp_cmsgs_t *); 1585 1586 static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len) 1587 { 1588 struct net *net = sock_net(sk); 1589 struct sctp_sock *sp; 1590 struct sctp_endpoint *ep; 1591 struct sctp_association *new_asoc = NULL, *asoc = NULL; 1592 struct sctp_transport *transport, *chunk_tp; 1593 struct sctp_chunk *chunk; 1594 union sctp_addr to; 1595 struct sockaddr *msg_name = NULL; 1596 struct sctp_sndrcvinfo default_sinfo; 1597 struct sctp_sndrcvinfo *sinfo; 1598 struct sctp_initmsg *sinit; 1599 sctp_assoc_t associd = 0; 1600 sctp_cmsgs_t cmsgs = { NULL }; 1601 sctp_scope_t scope; 1602 bool fill_sinfo_ttl = false, wait_connect = false; 1603 struct sctp_datamsg *datamsg; 1604 int msg_flags = msg->msg_flags; 1605 __u16 sinfo_flags = 0; 1606 long timeo; 1607 int err; 1608 1609 err = 0; 1610 sp = sctp_sk(sk); 1611 ep = sp->ep; 1612 1613 pr_debug("%s: sk:%p, msg:%p, msg_len:%zu ep:%p\n", __func__, sk, 1614 msg, msg_len, ep); 1615 1616 /* We cannot send a message over a TCP-style listening socket. */ 1617 if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)) { 1618 err = -EPIPE; 1619 goto out_nounlock; 1620 } 1621 1622 /* Parse out the SCTP CMSGs. */ 1623 err = sctp_msghdr_parse(msg, &cmsgs); 1624 if (err) { 1625 pr_debug("%s: msghdr parse err:%x\n", __func__, err); 1626 goto out_nounlock; 1627 } 1628 1629 /* Fetch the destination address for this packet. This 1630 * address only selects the association--it is not necessarily 1631 * the address we will send to. 1632 * For a peeled-off socket, msg_name is ignored. 1633 */ 1634 if (!sctp_style(sk, UDP_HIGH_BANDWIDTH) && msg->msg_name) { 1635 int msg_namelen = msg->msg_namelen; 1636 1637 err = sctp_verify_addr(sk, (union sctp_addr *)msg->msg_name, 1638 msg_namelen); 1639 if (err) 1640 return err; 1641 1642 if (msg_namelen > sizeof(to)) 1643 msg_namelen = sizeof(to); 1644 memcpy(&to, msg->msg_name, msg_namelen); 1645 msg_name = msg->msg_name; 1646 } 1647 1648 sinit = cmsgs.init; 1649 if (cmsgs.sinfo != NULL) { 1650 memset(&default_sinfo, 0, sizeof(default_sinfo)); 1651 default_sinfo.sinfo_stream = cmsgs.sinfo->snd_sid; 1652 default_sinfo.sinfo_flags = cmsgs.sinfo->snd_flags; 1653 default_sinfo.sinfo_ppid = cmsgs.sinfo->snd_ppid; 1654 default_sinfo.sinfo_context = cmsgs.sinfo->snd_context; 1655 default_sinfo.sinfo_assoc_id = cmsgs.sinfo->snd_assoc_id; 1656 1657 sinfo = &default_sinfo; 1658 fill_sinfo_ttl = true; 1659 } else { 1660 sinfo = cmsgs.srinfo; 1661 } 1662 /* Did the user specify SNDINFO/SNDRCVINFO? */ 1663 if (sinfo) { 1664 sinfo_flags = sinfo->sinfo_flags; 1665 associd = sinfo->sinfo_assoc_id; 1666 } 1667 1668 pr_debug("%s: msg_len:%zu, sinfo_flags:0x%x\n", __func__, 1669 msg_len, sinfo_flags); 1670 1671 /* SCTP_EOF or SCTP_ABORT cannot be set on a TCP-style socket. */ 1672 if (sctp_style(sk, TCP) && (sinfo_flags & (SCTP_EOF | SCTP_ABORT))) { 1673 err = -EINVAL; 1674 goto out_nounlock; 1675 } 1676 1677 /* If SCTP_EOF is set, no data can be sent. Disallow sending zero 1678 * length messages when SCTP_EOF|SCTP_ABORT is not set. 1679 * If SCTP_ABORT is set, the message length could be non zero with 1680 * the msg_iov set to the user abort reason. 1681 */ 1682 if (((sinfo_flags & SCTP_EOF) && (msg_len > 0)) || 1683 (!(sinfo_flags & (SCTP_EOF|SCTP_ABORT)) && (msg_len == 0))) { 1684 err = -EINVAL; 1685 goto out_nounlock; 1686 } 1687 1688 /* If SCTP_ADDR_OVER is set, there must be an address 1689 * specified in msg_name. 1690 */ 1691 if ((sinfo_flags & SCTP_ADDR_OVER) && (!msg->msg_name)) { 1692 err = -EINVAL; 1693 goto out_nounlock; 1694 } 1695 1696 transport = NULL; 1697 1698 pr_debug("%s: about to look up association\n", __func__); 1699 1700 lock_sock(sk); 1701 1702 /* If a msg_name has been specified, assume this is to be used. */ 1703 if (msg_name) { 1704 /* Look for a matching association on the endpoint. */ 1705 asoc = sctp_endpoint_lookup_assoc(ep, &to, &transport); 1706 if (!asoc) { 1707 /* If we could not find a matching association on the 1708 * endpoint, make sure that it is not a TCP-style 1709 * socket that already has an association or there is 1710 * no peeled-off association on another socket. 1711 */ 1712 if ((sctp_style(sk, TCP) && 1713 sctp_sstate(sk, ESTABLISHED)) || 1714 sctp_endpoint_is_peeled_off(ep, &to)) { 1715 err = -EADDRNOTAVAIL; 1716 goto out_unlock; 1717 } 1718 } 1719 } else { 1720 asoc = sctp_id2assoc(sk, associd); 1721 if (!asoc) { 1722 err = -EPIPE; 1723 goto out_unlock; 1724 } 1725 } 1726 1727 if (asoc) { 1728 pr_debug("%s: just looked up association:%p\n", __func__, asoc); 1729 1730 /* We cannot send a message on a TCP-style SCTP_SS_ESTABLISHED 1731 * socket that has an association in CLOSED state. This can 1732 * happen when an accepted socket has an association that is 1733 * already CLOSED. 1734 */ 1735 if (sctp_state(asoc, CLOSED) && sctp_style(sk, TCP)) { 1736 err = -EPIPE; 1737 goto out_unlock; 1738 } 1739 1740 if (sinfo_flags & SCTP_EOF) { 1741 pr_debug("%s: shutting down association:%p\n", 1742 __func__, asoc); 1743 1744 sctp_primitive_SHUTDOWN(net, asoc, NULL); 1745 err = 0; 1746 goto out_unlock; 1747 } 1748 if (sinfo_flags & SCTP_ABORT) { 1749 1750 chunk = sctp_make_abort_user(asoc, msg, msg_len); 1751 if (!chunk) { 1752 err = -ENOMEM; 1753 goto out_unlock; 1754 } 1755 1756 pr_debug("%s: aborting association:%p\n", 1757 __func__, asoc); 1758 1759 sctp_primitive_ABORT(net, asoc, chunk); 1760 err = 0; 1761 goto out_unlock; 1762 } 1763 } 1764 1765 /* Do we need to create the association? */ 1766 if (!asoc) { 1767 pr_debug("%s: there is no association yet\n", __func__); 1768 1769 if (sinfo_flags & (SCTP_EOF | SCTP_ABORT)) { 1770 err = -EINVAL; 1771 goto out_unlock; 1772 } 1773 1774 /* Check for invalid stream against the stream counts, 1775 * either the default or the user specified stream counts. 1776 */ 1777 if (sinfo) { 1778 if (!sinit || !sinit->sinit_num_ostreams) { 1779 /* Check against the defaults. */ 1780 if (sinfo->sinfo_stream >= 1781 sp->initmsg.sinit_num_ostreams) { 1782 err = -EINVAL; 1783 goto out_unlock; 1784 } 1785 } else { 1786 /* Check against the requested. */ 1787 if (sinfo->sinfo_stream >= 1788 sinit->sinit_num_ostreams) { 1789 err = -EINVAL; 1790 goto out_unlock; 1791 } 1792 } 1793 } 1794 1795 /* 1796 * API 3.1.2 bind() - UDP Style Syntax 1797 * If a bind() or sctp_bindx() is not called prior to a 1798 * sendmsg() call that initiates a new association, the 1799 * system picks an ephemeral port and will choose an address 1800 * set equivalent to binding with a wildcard address. 1801 */ 1802 if (!ep->base.bind_addr.port) { 1803 if (sctp_autobind(sk)) { 1804 err = -EAGAIN; 1805 goto out_unlock; 1806 } 1807 } else { 1808 /* 1809 * If an unprivileged user inherits a one-to-many 1810 * style socket with open associations on a privileged 1811 * port, it MAY be permitted to accept new associations, 1812 * but it SHOULD NOT be permitted to open new 1813 * associations. 1814 */ 1815 if (ep->base.bind_addr.port < PROT_SOCK && 1816 !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) { 1817 err = -EACCES; 1818 goto out_unlock; 1819 } 1820 } 1821 1822 scope = sctp_scope(&to); 1823 new_asoc = sctp_association_new(ep, sk, scope, GFP_KERNEL); 1824 if (!new_asoc) { 1825 err = -ENOMEM; 1826 goto out_unlock; 1827 } 1828 asoc = new_asoc; 1829 err = sctp_assoc_set_bind_addr_from_ep(asoc, scope, GFP_KERNEL); 1830 if (err < 0) { 1831 err = -ENOMEM; 1832 goto out_free; 1833 } 1834 1835 /* If the SCTP_INIT ancillary data is specified, set all 1836 * the association init values accordingly. 1837 */ 1838 if (sinit) { 1839 if (sinit->sinit_num_ostreams) { 1840 asoc->c.sinit_num_ostreams = 1841 sinit->sinit_num_ostreams; 1842 } 1843 if (sinit->sinit_max_instreams) { 1844 asoc->c.sinit_max_instreams = 1845 sinit->sinit_max_instreams; 1846 } 1847 if (sinit->sinit_max_attempts) { 1848 asoc->max_init_attempts 1849 = sinit->sinit_max_attempts; 1850 } 1851 if (sinit->sinit_max_init_timeo) { 1852 asoc->max_init_timeo = 1853 msecs_to_jiffies(sinit->sinit_max_init_timeo); 1854 } 1855 } 1856 1857 /* Prime the peer's transport structures. */ 1858 transport = sctp_assoc_add_peer(asoc, &to, GFP_KERNEL, SCTP_UNKNOWN); 1859 if (!transport) { 1860 err = -ENOMEM; 1861 goto out_free; 1862 } 1863 } 1864 1865 /* ASSERT: we have a valid association at this point. */ 1866 pr_debug("%s: we have a valid association\n", __func__); 1867 1868 if (!sinfo) { 1869 /* If the user didn't specify SNDINFO/SNDRCVINFO, make up 1870 * one with some defaults. 1871 */ 1872 memset(&default_sinfo, 0, sizeof(default_sinfo)); 1873 default_sinfo.sinfo_stream = asoc->default_stream; 1874 default_sinfo.sinfo_flags = asoc->default_flags; 1875 default_sinfo.sinfo_ppid = asoc->default_ppid; 1876 default_sinfo.sinfo_context = asoc->default_context; 1877 default_sinfo.sinfo_timetolive = asoc->default_timetolive; 1878 default_sinfo.sinfo_assoc_id = sctp_assoc2id(asoc); 1879 1880 sinfo = &default_sinfo; 1881 } else if (fill_sinfo_ttl) { 1882 /* In case SNDINFO was specified, we still need to fill 1883 * it with a default ttl from the assoc here. 1884 */ 1885 sinfo->sinfo_timetolive = asoc->default_timetolive; 1886 } 1887 1888 /* API 7.1.7, the sndbuf size per association bounds the 1889 * maximum size of data that can be sent in a single send call. 1890 */ 1891 if (msg_len > sk->sk_sndbuf) { 1892 err = -EMSGSIZE; 1893 goto out_free; 1894 } 1895 1896 if (asoc->pmtu_pending) 1897 sctp_assoc_pending_pmtu(sk, asoc); 1898 1899 /* If fragmentation is disabled and the message length exceeds the 1900 * association fragmentation point, return EMSGSIZE. The I-D 1901 * does not specify what this error is, but this looks like 1902 * a great fit. 1903 */ 1904 if (sctp_sk(sk)->disable_fragments && (msg_len > asoc->frag_point)) { 1905 err = -EMSGSIZE; 1906 goto out_free; 1907 } 1908 1909 /* Check for invalid stream. */ 1910 if (sinfo->sinfo_stream >= asoc->c.sinit_num_ostreams) { 1911 err = -EINVAL; 1912 goto out_free; 1913 } 1914 1915 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); 1916 if (!sctp_wspace(asoc)) { 1917 err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len); 1918 if (err) 1919 goto out_free; 1920 } 1921 1922 /* If an address is passed with the sendto/sendmsg call, it is used 1923 * to override the primary destination address in the TCP model, or 1924 * when SCTP_ADDR_OVER flag is set in the UDP model. 1925 */ 1926 if ((sctp_style(sk, TCP) && msg_name) || 1927 (sinfo_flags & SCTP_ADDR_OVER)) { 1928 chunk_tp = sctp_assoc_lookup_paddr(asoc, &to); 1929 if (!chunk_tp) { 1930 err = -EINVAL; 1931 goto out_free; 1932 } 1933 } else 1934 chunk_tp = NULL; 1935 1936 /* Auto-connect, if we aren't connected already. */ 1937 if (sctp_state(asoc, CLOSED)) { 1938 err = sctp_primitive_ASSOCIATE(net, asoc, NULL); 1939 if (err < 0) 1940 goto out_free; 1941 1942 wait_connect = true; 1943 pr_debug("%s: we associated primitively\n", __func__); 1944 } 1945 1946 /* Break the message into multiple chunks of maximum size. */ 1947 datamsg = sctp_datamsg_from_user(asoc, sinfo, &msg->msg_iter); 1948 if (IS_ERR(datamsg)) { 1949 err = PTR_ERR(datamsg); 1950 goto out_free; 1951 } 1952 1953 /* Now send the (possibly) fragmented message. */ 1954 list_for_each_entry(chunk, &datamsg->chunks, frag_list) { 1955 sctp_chunk_hold(chunk); 1956 1957 /* Do accounting for the write space. */ 1958 sctp_set_owner_w(chunk); 1959 1960 chunk->transport = chunk_tp; 1961 } 1962 1963 /* Send it to the lower layers. Note: all chunks 1964 * must either fail or succeed. The lower layer 1965 * works that way today. Keep it that way or this 1966 * breaks. 1967 */ 1968 err = sctp_primitive_SEND(net, asoc, datamsg); 1969 /* Did the lower layer accept the chunk? */ 1970 if (err) { 1971 sctp_datamsg_free(datamsg); 1972 goto out_free; 1973 } 1974 1975 pr_debug("%s: we sent primitively\n", __func__); 1976 1977 sctp_datamsg_put(datamsg); 1978 err = msg_len; 1979 1980 if (unlikely(wait_connect)) { 1981 timeo = sock_sndtimeo(sk, msg_flags & MSG_DONTWAIT); 1982 sctp_wait_for_connect(asoc, &timeo); 1983 } 1984 1985 /* If we are already past ASSOCIATE, the lower 1986 * layers are responsible for association cleanup. 1987 */ 1988 goto out_unlock; 1989 1990 out_free: 1991 if (new_asoc) { 1992 sctp_unhash_established(asoc); 1993 sctp_association_free(asoc); 1994 } 1995 out_unlock: 1996 release_sock(sk); 1997 1998 out_nounlock: 1999 return sctp_error(sk, msg_flags, err); 2000 2001 #if 0 2002 do_sock_err: 2003 if (msg_len) 2004 err = msg_len; 2005 else 2006 err = sock_error(sk); 2007 goto out; 2008 2009 do_interrupted: 2010 if (msg_len) 2011 err = msg_len; 2012 goto out; 2013 #endif /* 0 */ 2014 } 2015 2016 /* This is an extended version of skb_pull() that removes the data from the 2017 * start of a skb even when data is spread across the list of skb's in the 2018 * frag_list. len specifies the total amount of data that needs to be removed. 2019 * when 'len' bytes could be removed from the skb, it returns 0. 2020 * If 'len' exceeds the total skb length, it returns the no. of bytes that 2021 * could not be removed. 2022 */ 2023 static int sctp_skb_pull(struct sk_buff *skb, int len) 2024 { 2025 struct sk_buff *list; 2026 int skb_len = skb_headlen(skb); 2027 int rlen; 2028 2029 if (len <= skb_len) { 2030 __skb_pull(skb, len); 2031 return 0; 2032 } 2033 len -= skb_len; 2034 __skb_pull(skb, skb_len); 2035 2036 skb_walk_frags(skb, list) { 2037 rlen = sctp_skb_pull(list, len); 2038 skb->len -= (len-rlen); 2039 skb->data_len -= (len-rlen); 2040 2041 if (!rlen) 2042 return 0; 2043 2044 len = rlen; 2045 } 2046 2047 return len; 2048 } 2049 2050 /* API 3.1.3 recvmsg() - UDP Style Syntax 2051 * 2052 * ssize_t recvmsg(int socket, struct msghdr *message, 2053 * int flags); 2054 * 2055 * socket - the socket descriptor of the endpoint. 2056 * message - pointer to the msghdr structure which contains a single 2057 * user message and possibly some ancillary data. 2058 * 2059 * See Section 5 for complete description of the data 2060 * structures. 2061 * 2062 * flags - flags sent or received with the user message, see Section 2063 * 5 for complete description of the flags. 2064 */ 2065 static int sctp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, 2066 int noblock, int flags, int *addr_len) 2067 { 2068 struct sctp_ulpevent *event = NULL; 2069 struct sctp_sock *sp = sctp_sk(sk); 2070 struct sk_buff *skb; 2071 int copied; 2072 int err = 0; 2073 int skb_len; 2074 2075 pr_debug("%s: sk:%p, msghdr:%p, len:%zd, noblock:%d, flags:0x%x, " 2076 "addr_len:%p)\n", __func__, sk, msg, len, noblock, flags, 2077 addr_len); 2078 2079 lock_sock(sk); 2080 2081 if (sctp_style(sk, TCP) && !sctp_sstate(sk, ESTABLISHED)) { 2082 err = -ENOTCONN; 2083 goto out; 2084 } 2085 2086 skb = sctp_skb_recv_datagram(sk, flags, noblock, &err); 2087 if (!skb) 2088 goto out; 2089 2090 /* Get the total length of the skb including any skb's in the 2091 * frag_list. 2092 */ 2093 skb_len = skb->len; 2094 2095 copied = skb_len; 2096 if (copied > len) 2097 copied = len; 2098 2099 err = skb_copy_datagram_msg(skb, 0, msg, copied); 2100 2101 event = sctp_skb2event(skb); 2102 2103 if (err) 2104 goto out_free; 2105 2106 sock_recv_ts_and_drops(msg, sk, skb); 2107 if (sctp_ulpevent_is_notification(event)) { 2108 msg->msg_flags |= MSG_NOTIFICATION; 2109 sp->pf->event_msgname(event, msg->msg_name, addr_len); 2110 } else { 2111 sp->pf->skb_msgname(skb, msg->msg_name, addr_len); 2112 } 2113 2114 /* Check if we allow SCTP_NXTINFO. */ 2115 if (sp->recvnxtinfo) 2116 sctp_ulpevent_read_nxtinfo(event, msg, sk); 2117 /* Check if we allow SCTP_RCVINFO. */ 2118 if (sp->recvrcvinfo) 2119 sctp_ulpevent_read_rcvinfo(event, msg); 2120 /* Check if we allow SCTP_SNDRCVINFO. */ 2121 if (sp->subscribe.sctp_data_io_event) 2122 sctp_ulpevent_read_sndrcvinfo(event, msg); 2123 2124 #if 0 2125 /* FIXME: we should be calling IP/IPv6 layers. */ 2126 if (sk->sk_protinfo.af_inet.cmsg_flags) 2127 ip_cmsg_recv(msg, skb); 2128 #endif 2129 2130 err = copied; 2131 2132 /* If skb's length exceeds the user's buffer, update the skb and 2133 * push it back to the receive_queue so that the next call to 2134 * recvmsg() will return the remaining data. Don't set MSG_EOR. 2135 */ 2136 if (skb_len > copied) { 2137 msg->msg_flags &= ~MSG_EOR; 2138 if (flags & MSG_PEEK) 2139 goto out_free; 2140 sctp_skb_pull(skb, copied); 2141 skb_queue_head(&sk->sk_receive_queue, skb); 2142 2143 /* When only partial message is copied to the user, increase 2144 * rwnd by that amount. If all the data in the skb is read, 2145 * rwnd is updated when the event is freed. 2146 */ 2147 if (!sctp_ulpevent_is_notification(event)) 2148 sctp_assoc_rwnd_increase(event->asoc, copied); 2149 goto out; 2150 } else if ((event->msg_flags & MSG_NOTIFICATION) || 2151 (event->msg_flags & MSG_EOR)) 2152 msg->msg_flags |= MSG_EOR; 2153 else 2154 msg->msg_flags &= ~MSG_EOR; 2155 2156 out_free: 2157 if (flags & MSG_PEEK) { 2158 /* Release the skb reference acquired after peeking the skb in 2159 * sctp_skb_recv_datagram(). 2160 */ 2161 kfree_skb(skb); 2162 } else { 2163 /* Free the event which includes releasing the reference to 2164 * the owner of the skb, freeing the skb and updating the 2165 * rwnd. 2166 */ 2167 sctp_ulpevent_free(event); 2168 } 2169 out: 2170 release_sock(sk); 2171 return err; 2172 } 2173 2174 /* 7.1.12 Enable/Disable message fragmentation (SCTP_DISABLE_FRAGMENTS) 2175 * 2176 * This option is a on/off flag. If enabled no SCTP message 2177 * fragmentation will be performed. Instead if a message being sent 2178 * exceeds the current PMTU size, the message will NOT be sent and 2179 * instead a error will be indicated to the user. 2180 */ 2181 static int sctp_setsockopt_disable_fragments(struct sock *sk, 2182 char __user *optval, 2183 unsigned int optlen) 2184 { 2185 int val; 2186 2187 if (optlen < sizeof(int)) 2188 return -EINVAL; 2189 2190 if (get_user(val, (int __user *)optval)) 2191 return -EFAULT; 2192 2193 sctp_sk(sk)->disable_fragments = (val == 0) ? 0 : 1; 2194 2195 return 0; 2196 } 2197 2198 static int sctp_setsockopt_events(struct sock *sk, char __user *optval, 2199 unsigned int optlen) 2200 { 2201 struct sctp_association *asoc; 2202 struct sctp_ulpevent *event; 2203 2204 if (optlen > sizeof(struct sctp_event_subscribe)) 2205 return -EINVAL; 2206 if (copy_from_user(&sctp_sk(sk)->subscribe, optval, optlen)) 2207 return -EFAULT; 2208 2209 if (sctp_sk(sk)->subscribe.sctp_data_io_event) 2210 pr_warn_ratelimited(DEPRECATED "%s (pid %d) " 2211 "Requested SCTP_SNDRCVINFO event.\n" 2212 "Use SCTP_RCVINFO through SCTP_RECVRCVINFO option instead.\n", 2213 current->comm, task_pid_nr(current)); 2214 2215 /* At the time when a user app subscribes to SCTP_SENDER_DRY_EVENT, 2216 * if there is no data to be sent or retransmit, the stack will 2217 * immediately send up this notification. 2218 */ 2219 if (sctp_ulpevent_type_enabled(SCTP_SENDER_DRY_EVENT, 2220 &sctp_sk(sk)->subscribe)) { 2221 asoc = sctp_id2assoc(sk, 0); 2222 2223 if (asoc && sctp_outq_is_empty(&asoc->outqueue)) { 2224 event = sctp_ulpevent_make_sender_dry_event(asoc, 2225 GFP_ATOMIC); 2226 if (!event) 2227 return -ENOMEM; 2228 2229 sctp_ulpq_tail_event(&asoc->ulpq, event); 2230 } 2231 } 2232 2233 return 0; 2234 } 2235 2236 /* 7.1.8 Automatic Close of associations (SCTP_AUTOCLOSE) 2237 * 2238 * This socket option is applicable to the UDP-style socket only. When 2239 * set it will cause associations that are idle for more than the 2240 * specified number of seconds to automatically close. An association 2241 * being idle is defined an association that has NOT sent or received 2242 * user data. The special value of '0' indicates that no automatic 2243 * close of any associations should be performed. The option expects an 2244 * integer defining the number of seconds of idle time before an 2245 * association is closed. 2246 */ 2247 static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval, 2248 unsigned int optlen) 2249 { 2250 struct sctp_sock *sp = sctp_sk(sk); 2251 struct net *net = sock_net(sk); 2252 2253 /* Applicable to UDP-style socket only */ 2254 if (sctp_style(sk, TCP)) 2255 return -EOPNOTSUPP; 2256 if (optlen != sizeof(int)) 2257 return -EINVAL; 2258 if (copy_from_user(&sp->autoclose, optval, optlen)) 2259 return -EFAULT; 2260 2261 if (sp->autoclose > net->sctp.max_autoclose) 2262 sp->autoclose = net->sctp.max_autoclose; 2263 2264 return 0; 2265 } 2266 2267 /* 7.1.13 Peer Address Parameters (SCTP_PEER_ADDR_PARAMS) 2268 * 2269 * Applications can enable or disable heartbeats for any peer address of 2270 * an association, modify an address's heartbeat interval, force a 2271 * heartbeat to be sent immediately, and adjust the address's maximum 2272 * number of retransmissions sent before an address is considered 2273 * unreachable. The following structure is used to access and modify an 2274 * address's parameters: 2275 * 2276 * struct sctp_paddrparams { 2277 * sctp_assoc_t spp_assoc_id; 2278 * struct sockaddr_storage spp_address; 2279 * uint32_t spp_hbinterval; 2280 * uint16_t spp_pathmaxrxt; 2281 * uint32_t spp_pathmtu; 2282 * uint32_t spp_sackdelay; 2283 * uint32_t spp_flags; 2284 * }; 2285 * 2286 * spp_assoc_id - (one-to-many style socket) This is filled in the 2287 * application, and identifies the association for 2288 * this query. 2289 * spp_address - This specifies which address is of interest. 2290 * spp_hbinterval - This contains the value of the heartbeat interval, 2291 * in milliseconds. If a value of zero 2292 * is present in this field then no changes are to 2293 * be made to this parameter. 2294 * spp_pathmaxrxt - This contains the maximum number of 2295 * retransmissions before this address shall be 2296 * considered unreachable. If a value of zero 2297 * is present in this field then no changes are to 2298 * be made to this parameter. 2299 * spp_pathmtu - When Path MTU discovery is disabled the value 2300 * specified here will be the "fixed" path mtu. 2301 * Note that if the spp_address field is empty 2302 * then all associations on this address will 2303 * have this fixed path mtu set upon them. 2304 * 2305 * spp_sackdelay - When delayed sack is enabled, this value specifies 2306 * the number of milliseconds that sacks will be delayed 2307 * for. This value will apply to all addresses of an 2308 * association if the spp_address field is empty. Note 2309 * also, that if delayed sack is enabled and this 2310 * value is set to 0, no change is made to the last 2311 * recorded delayed sack timer value. 2312 * 2313 * spp_flags - These flags are used to control various features 2314 * on an association. The flag field may contain 2315 * zero or more of the following options. 2316 * 2317 * SPP_HB_ENABLE - Enable heartbeats on the 2318 * specified address. Note that if the address 2319 * field is empty all addresses for the association 2320 * have heartbeats enabled upon them. 2321 * 2322 * SPP_HB_DISABLE - Disable heartbeats on the 2323 * speicifed address. Note that if the address 2324 * field is empty all addresses for the association 2325 * will have their heartbeats disabled. Note also 2326 * that SPP_HB_ENABLE and SPP_HB_DISABLE are 2327 * mutually exclusive, only one of these two should 2328 * be specified. Enabling both fields will have 2329 * undetermined results. 2330 * 2331 * SPP_HB_DEMAND - Request a user initiated heartbeat 2332 * to be made immediately. 2333 * 2334 * SPP_HB_TIME_IS_ZERO - Specify's that the time for 2335 * heartbeat delayis to be set to the value of 0 2336 * milliseconds. 2337 * 2338 * SPP_PMTUD_ENABLE - This field will enable PMTU 2339 * discovery upon the specified address. Note that 2340 * if the address feild is empty then all addresses 2341 * on the association are effected. 2342 * 2343 * SPP_PMTUD_DISABLE - This field will disable PMTU 2344 * discovery upon the specified address. Note that 2345 * if the address feild is empty then all addresses 2346 * on the association are effected. Not also that 2347 * SPP_PMTUD_ENABLE and SPP_PMTUD_DISABLE are mutually 2348 * exclusive. Enabling both will have undetermined 2349 * results. 2350 * 2351 * SPP_SACKDELAY_ENABLE - Setting this flag turns 2352 * on delayed sack. The time specified in spp_sackdelay 2353 * is used to specify the sack delay for this address. Note 2354 * that if spp_address is empty then all addresses will 2355 * enable delayed sack and take on the sack delay 2356 * value specified in spp_sackdelay. 2357 * SPP_SACKDELAY_DISABLE - Setting this flag turns 2358 * off delayed sack. If the spp_address field is blank then 2359 * delayed sack is disabled for the entire association. Note 2360 * also that this field is mutually exclusive to 2361 * SPP_SACKDELAY_ENABLE, setting both will have undefined 2362 * results. 2363 */ 2364 static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params, 2365 struct sctp_transport *trans, 2366 struct sctp_association *asoc, 2367 struct sctp_sock *sp, 2368 int hb_change, 2369 int pmtud_change, 2370 int sackdelay_change) 2371 { 2372 int error; 2373 2374 if (params->spp_flags & SPP_HB_DEMAND && trans) { 2375 struct net *net = sock_net(trans->asoc->base.sk); 2376 2377 error = sctp_primitive_REQUESTHEARTBEAT(net, trans->asoc, trans); 2378 if (error) 2379 return error; 2380 } 2381 2382 /* Note that unless the spp_flag is set to SPP_HB_ENABLE the value of 2383 * this field is ignored. Note also that a value of zero indicates 2384 * the current setting should be left unchanged. 2385 */ 2386 if (params->spp_flags & SPP_HB_ENABLE) { 2387 2388 /* Re-zero the interval if the SPP_HB_TIME_IS_ZERO is 2389 * set. This lets us use 0 value when this flag 2390 * is set. 2391 */ 2392 if (params->spp_flags & SPP_HB_TIME_IS_ZERO) 2393 params->spp_hbinterval = 0; 2394 2395 if (params->spp_hbinterval || 2396 (params->spp_flags & SPP_HB_TIME_IS_ZERO)) { 2397 if (trans) { 2398 trans->hbinterval = 2399 msecs_to_jiffies(params->spp_hbinterval); 2400 } else if (asoc) { 2401 asoc->hbinterval = 2402 msecs_to_jiffies(params->spp_hbinterval); 2403 } else { 2404 sp->hbinterval = params->spp_hbinterval; 2405 } 2406 } 2407 } 2408 2409 if (hb_change) { 2410 if (trans) { 2411 trans->param_flags = 2412 (trans->param_flags & ~SPP_HB) | hb_change; 2413 } else if (asoc) { 2414 asoc->param_flags = 2415 (asoc->param_flags & ~SPP_HB) | hb_change; 2416 } else { 2417 sp->param_flags = 2418 (sp->param_flags & ~SPP_HB) | hb_change; 2419 } 2420 } 2421 2422 /* When Path MTU discovery is disabled the value specified here will 2423 * be the "fixed" path mtu (i.e. the value of the spp_flags field must 2424 * include the flag SPP_PMTUD_DISABLE for this field to have any 2425 * effect). 2426 */ 2427 if ((params->spp_flags & SPP_PMTUD_DISABLE) && params->spp_pathmtu) { 2428 if (trans) { 2429 trans->pathmtu = params->spp_pathmtu; 2430 sctp_assoc_sync_pmtu(sctp_opt2sk(sp), asoc); 2431 } else if (asoc) { 2432 asoc->pathmtu = params->spp_pathmtu; 2433 sctp_frag_point(asoc, params->spp_pathmtu); 2434 } else { 2435 sp->pathmtu = params->spp_pathmtu; 2436 } 2437 } 2438 2439 if (pmtud_change) { 2440 if (trans) { 2441 int update = (trans->param_flags & SPP_PMTUD_DISABLE) && 2442 (params->spp_flags & SPP_PMTUD_ENABLE); 2443 trans->param_flags = 2444 (trans->param_flags & ~SPP_PMTUD) | pmtud_change; 2445 if (update) { 2446 sctp_transport_pmtu(trans, sctp_opt2sk(sp)); 2447 sctp_assoc_sync_pmtu(sctp_opt2sk(sp), asoc); 2448 } 2449 } else if (asoc) { 2450 asoc->param_flags = 2451 (asoc->param_flags & ~SPP_PMTUD) | pmtud_change; 2452 } else { 2453 sp->param_flags = 2454 (sp->param_flags & ~SPP_PMTUD) | pmtud_change; 2455 } 2456 } 2457 2458 /* Note that unless the spp_flag is set to SPP_SACKDELAY_ENABLE the 2459 * value of this field is ignored. Note also that a value of zero 2460 * indicates the current setting should be left unchanged. 2461 */ 2462 if ((params->spp_flags & SPP_SACKDELAY_ENABLE) && params->spp_sackdelay) { 2463 if (trans) { 2464 trans->sackdelay = 2465 msecs_to_jiffies(params->spp_sackdelay); 2466 } else if (asoc) { 2467 asoc->sackdelay = 2468 msecs_to_jiffies(params->spp_sackdelay); 2469 } else { 2470 sp->sackdelay = params->spp_sackdelay; 2471 } 2472 } 2473 2474 if (sackdelay_change) { 2475 if (trans) { 2476 trans->param_flags = 2477 (trans->param_flags & ~SPP_SACKDELAY) | 2478 sackdelay_change; 2479 } else if (asoc) { 2480 asoc->param_flags = 2481 (asoc->param_flags & ~SPP_SACKDELAY) | 2482 sackdelay_change; 2483 } else { 2484 sp->param_flags = 2485 (sp->param_flags & ~SPP_SACKDELAY) | 2486 sackdelay_change; 2487 } 2488 } 2489 2490 /* Note that a value of zero indicates the current setting should be 2491 left unchanged. 2492 */ 2493 if (params->spp_pathmaxrxt) { 2494 if (trans) { 2495 trans->pathmaxrxt = params->spp_pathmaxrxt; 2496 } else if (asoc) { 2497 asoc->pathmaxrxt = params->spp_pathmaxrxt; 2498 } else { 2499 sp->pathmaxrxt = params->spp_pathmaxrxt; 2500 } 2501 } 2502 2503 return 0; 2504 } 2505 2506 static int sctp_setsockopt_peer_addr_params(struct sock *sk, 2507 char __user *optval, 2508 unsigned int optlen) 2509 { 2510 struct sctp_paddrparams params; 2511 struct sctp_transport *trans = NULL; 2512 struct sctp_association *asoc = NULL; 2513 struct sctp_sock *sp = sctp_sk(sk); 2514 int error; 2515 int hb_change, pmtud_change, sackdelay_change; 2516 2517 if (optlen != sizeof(struct sctp_paddrparams)) 2518 return -EINVAL; 2519 2520 if (copy_from_user(¶ms, optval, optlen)) 2521 return -EFAULT; 2522 2523 /* Validate flags and value parameters. */ 2524 hb_change = params.spp_flags & SPP_HB; 2525 pmtud_change = params.spp_flags & SPP_PMTUD; 2526 sackdelay_change = params.spp_flags & SPP_SACKDELAY; 2527 2528 if (hb_change == SPP_HB || 2529 pmtud_change == SPP_PMTUD || 2530 sackdelay_change == SPP_SACKDELAY || 2531 params.spp_sackdelay > 500 || 2532 (params.spp_pathmtu && 2533 params.spp_pathmtu < SCTP_DEFAULT_MINSEGMENT)) 2534 return -EINVAL; 2535 2536 /* If an address other than INADDR_ANY is specified, and 2537 * no transport is found, then the request is invalid. 2538 */ 2539 if (!sctp_is_any(sk, (union sctp_addr *)¶ms.spp_address)) { 2540 trans = sctp_addr_id2transport(sk, ¶ms.spp_address, 2541 params.spp_assoc_id); 2542 if (!trans) 2543 return -EINVAL; 2544 } 2545 2546 /* Get association, if assoc_id != 0 and the socket is a one 2547 * to many style socket, and an association was not found, then 2548 * the id was invalid. 2549 */ 2550 asoc = sctp_id2assoc(sk, params.spp_assoc_id); 2551 if (!asoc && params.spp_assoc_id && sctp_style(sk, UDP)) 2552 return -EINVAL; 2553 2554 /* Heartbeat demand can only be sent on a transport or 2555 * association, but not a socket. 2556 */ 2557 if (params.spp_flags & SPP_HB_DEMAND && !trans && !asoc) 2558 return -EINVAL; 2559 2560 /* Process parameters. */ 2561 error = sctp_apply_peer_addr_params(¶ms, trans, asoc, sp, 2562 hb_change, pmtud_change, 2563 sackdelay_change); 2564 2565 if (error) 2566 return error; 2567 2568 /* If changes are for association, also apply parameters to each 2569 * transport. 2570 */ 2571 if (!trans && asoc) { 2572 list_for_each_entry(trans, &asoc->peer.transport_addr_list, 2573 transports) { 2574 sctp_apply_peer_addr_params(¶ms, trans, asoc, sp, 2575 hb_change, pmtud_change, 2576 sackdelay_change); 2577 } 2578 } 2579 2580 return 0; 2581 } 2582 2583 static inline __u32 sctp_spp_sackdelay_enable(__u32 param_flags) 2584 { 2585 return (param_flags & ~SPP_SACKDELAY) | SPP_SACKDELAY_ENABLE; 2586 } 2587 2588 static inline __u32 sctp_spp_sackdelay_disable(__u32 param_flags) 2589 { 2590 return (param_flags & ~SPP_SACKDELAY) | SPP_SACKDELAY_DISABLE; 2591 } 2592 2593 /* 2594 * 7.1.23. Get or set delayed ack timer (SCTP_DELAYED_SACK) 2595 * 2596 * This option will effect the way delayed acks are performed. This 2597 * option allows you to get or set the delayed ack time, in 2598 * milliseconds. It also allows changing the delayed ack frequency. 2599 * Changing the frequency to 1 disables the delayed sack algorithm. If 2600 * the assoc_id is 0, then this sets or gets the endpoints default 2601 * values. If the assoc_id field is non-zero, then the set or get 2602 * effects the specified association for the one to many model (the 2603 * assoc_id field is ignored by the one to one model). Note that if 2604 * sack_delay or sack_freq are 0 when setting this option, then the 2605 * current values will remain unchanged. 2606 * 2607 * struct sctp_sack_info { 2608 * sctp_assoc_t sack_assoc_id; 2609 * uint32_t sack_delay; 2610 * uint32_t sack_freq; 2611 * }; 2612 * 2613 * sack_assoc_id - This parameter, indicates which association the user 2614 * is performing an action upon. Note that if this field's value is 2615 * zero then the endpoints default value is changed (effecting future 2616 * associations only). 2617 * 2618 * sack_delay - This parameter contains the number of milliseconds that 2619 * the user is requesting the delayed ACK timer be set to. Note that 2620 * this value is defined in the standard to be between 200 and 500 2621 * milliseconds. 2622 * 2623 * sack_freq - This parameter contains the number of packets that must 2624 * be received before a sack is sent without waiting for the delay 2625 * timer to expire. The default value for this is 2, setting this 2626 * value to 1 will disable the delayed sack algorithm. 2627 */ 2628 2629 static int sctp_setsockopt_delayed_ack(struct sock *sk, 2630 char __user *optval, unsigned int optlen) 2631 { 2632 struct sctp_sack_info params; 2633 struct sctp_transport *trans = NULL; 2634 struct sctp_association *asoc = NULL; 2635 struct sctp_sock *sp = sctp_sk(sk); 2636 2637 if (optlen == sizeof(struct sctp_sack_info)) { 2638 if (copy_from_user(¶ms, optval, optlen)) 2639 return -EFAULT; 2640 2641 if (params.sack_delay == 0 && params.sack_freq == 0) 2642 return 0; 2643 } else if (optlen == sizeof(struct sctp_assoc_value)) { 2644 pr_warn_ratelimited(DEPRECATED 2645 "%s (pid %d) " 2646 "Use of struct sctp_assoc_value in delayed_ack socket option.\n" 2647 "Use struct sctp_sack_info instead\n", 2648 current->comm, task_pid_nr(current)); 2649 if (copy_from_user(¶ms, optval, optlen)) 2650 return -EFAULT; 2651 2652 if (params.sack_delay == 0) 2653 params.sack_freq = 1; 2654 else 2655 params.sack_freq = 0; 2656 } else 2657 return -EINVAL; 2658 2659 /* Validate value parameter. */ 2660 if (params.sack_delay > 500) 2661 return -EINVAL; 2662 2663 /* Get association, if sack_assoc_id != 0 and the socket is a one 2664 * to many style socket, and an association was not found, then 2665 * the id was invalid. 2666 */ 2667 asoc = sctp_id2assoc(sk, params.sack_assoc_id); 2668 if (!asoc && params.sack_assoc_id && sctp_style(sk, UDP)) 2669 return -EINVAL; 2670 2671 if (params.sack_delay) { 2672 if (asoc) { 2673 asoc->sackdelay = 2674 msecs_to_jiffies(params.sack_delay); 2675 asoc->param_flags = 2676 sctp_spp_sackdelay_enable(asoc->param_flags); 2677 } else { 2678 sp->sackdelay = params.sack_delay; 2679 sp->param_flags = 2680 sctp_spp_sackdelay_enable(sp->param_flags); 2681 } 2682 } 2683 2684 if (params.sack_freq == 1) { 2685 if (asoc) { 2686 asoc->param_flags = 2687 sctp_spp_sackdelay_disable(asoc->param_flags); 2688 } else { 2689 sp->param_flags = 2690 sctp_spp_sackdelay_disable(sp->param_flags); 2691 } 2692 } else if (params.sack_freq > 1) { 2693 if (asoc) { 2694 asoc->sackfreq = params.sack_freq; 2695 asoc->param_flags = 2696 sctp_spp_sackdelay_enable(asoc->param_flags); 2697 } else { 2698 sp->sackfreq = params.sack_freq; 2699 sp->param_flags = 2700 sctp_spp_sackdelay_enable(sp->param_flags); 2701 } 2702 } 2703 2704 /* If change is for association, also apply to each transport. */ 2705 if (asoc) { 2706 list_for_each_entry(trans, &asoc->peer.transport_addr_list, 2707 transports) { 2708 if (params.sack_delay) { 2709 trans->sackdelay = 2710 msecs_to_jiffies(params.sack_delay); 2711 trans->param_flags = 2712 sctp_spp_sackdelay_enable(trans->param_flags); 2713 } 2714 if (params.sack_freq == 1) { 2715 trans->param_flags = 2716 sctp_spp_sackdelay_disable(trans->param_flags); 2717 } else if (params.sack_freq > 1) { 2718 trans->sackfreq = params.sack_freq; 2719 trans->param_flags = 2720 sctp_spp_sackdelay_enable(trans->param_flags); 2721 } 2722 } 2723 } 2724 2725 return 0; 2726 } 2727 2728 /* 7.1.3 Initialization Parameters (SCTP_INITMSG) 2729 * 2730 * Applications can specify protocol parameters for the default association 2731 * initialization. The option name argument to setsockopt() and getsockopt() 2732 * is SCTP_INITMSG. 2733 * 2734 * Setting initialization parameters is effective only on an unconnected 2735 * socket (for UDP-style sockets only future associations are effected 2736 * by the change). With TCP-style sockets, this option is inherited by 2737 * sockets derived from a listener socket. 2738 */ 2739 static int sctp_setsockopt_initmsg(struct sock *sk, char __user *optval, unsigned int optlen) 2740 { 2741 struct sctp_initmsg sinit; 2742 struct sctp_sock *sp = sctp_sk(sk); 2743 2744 if (optlen != sizeof(struct sctp_initmsg)) 2745 return -EINVAL; 2746 if (copy_from_user(&sinit, optval, optlen)) 2747 return -EFAULT; 2748 2749 if (sinit.sinit_num_ostreams) 2750 sp->initmsg.sinit_num_ostreams = sinit.sinit_num_ostreams; 2751 if (sinit.sinit_max_instreams) 2752 sp->initmsg.sinit_max_instreams = sinit.sinit_max_instreams; 2753 if (sinit.sinit_max_attempts) 2754 sp->initmsg.sinit_max_attempts = sinit.sinit_max_attempts; 2755 if (sinit.sinit_max_init_timeo) 2756 sp->initmsg.sinit_max_init_timeo = sinit.sinit_max_init_timeo; 2757 2758 return 0; 2759 } 2760 2761 /* 2762 * 7.1.14 Set default send parameters (SCTP_DEFAULT_SEND_PARAM) 2763 * 2764 * Applications that wish to use the sendto() system call may wish to 2765 * specify a default set of parameters that would normally be supplied 2766 * through the inclusion of ancillary data. This socket option allows 2767 * such an application to set the default sctp_sndrcvinfo structure. 2768 * The application that wishes to use this socket option simply passes 2769 * in to this call the sctp_sndrcvinfo structure defined in Section 2770 * 5.2.2) The input parameters accepted by this call include 2771 * sinfo_stream, sinfo_flags, sinfo_ppid, sinfo_context, 2772 * sinfo_timetolive. The user must provide the sinfo_assoc_id field in 2773 * to this call if the caller is using the UDP model. 2774 */ 2775 static int sctp_setsockopt_default_send_param(struct sock *sk, 2776 char __user *optval, 2777 unsigned int optlen) 2778 { 2779 struct sctp_sock *sp = sctp_sk(sk); 2780 struct sctp_association *asoc; 2781 struct sctp_sndrcvinfo info; 2782 2783 if (optlen != sizeof(info)) 2784 return -EINVAL; 2785 if (copy_from_user(&info, optval, optlen)) 2786 return -EFAULT; 2787 if (info.sinfo_flags & 2788 ~(SCTP_UNORDERED | SCTP_ADDR_OVER | 2789 SCTP_ABORT | SCTP_EOF)) 2790 return -EINVAL; 2791 2792 asoc = sctp_id2assoc(sk, info.sinfo_assoc_id); 2793 if (!asoc && info.sinfo_assoc_id && sctp_style(sk, UDP)) 2794 return -EINVAL; 2795 if (asoc) { 2796 asoc->default_stream = info.sinfo_stream; 2797 asoc->default_flags = info.sinfo_flags; 2798 asoc->default_ppid = info.sinfo_ppid; 2799 asoc->default_context = info.sinfo_context; 2800 asoc->default_timetolive = info.sinfo_timetolive; 2801 } else { 2802 sp->default_stream = info.sinfo_stream; 2803 sp->default_flags = info.sinfo_flags; 2804 sp->default_ppid = info.sinfo_ppid; 2805 sp->default_context = info.sinfo_context; 2806 sp->default_timetolive = info.sinfo_timetolive; 2807 } 2808 2809 return 0; 2810 } 2811 2812 /* RFC6458, Section 8.1.31. Set/get Default Send Parameters 2813 * (SCTP_DEFAULT_SNDINFO) 2814 */ 2815 static int sctp_setsockopt_default_sndinfo(struct sock *sk, 2816 char __user *optval, 2817 unsigned int optlen) 2818 { 2819 struct sctp_sock *sp = sctp_sk(sk); 2820 struct sctp_association *asoc; 2821 struct sctp_sndinfo info; 2822 2823 if (optlen != sizeof(info)) 2824 return -EINVAL; 2825 if (copy_from_user(&info, optval, optlen)) 2826 return -EFAULT; 2827 if (info.snd_flags & 2828 ~(SCTP_UNORDERED | SCTP_ADDR_OVER | 2829 SCTP_ABORT | SCTP_EOF)) 2830 return -EINVAL; 2831 2832 asoc = sctp_id2assoc(sk, info.snd_assoc_id); 2833 if (!asoc && info.snd_assoc_id && sctp_style(sk, UDP)) 2834 return -EINVAL; 2835 if (asoc) { 2836 asoc->default_stream = info.snd_sid; 2837 asoc->default_flags = info.snd_flags; 2838 asoc->default_ppid = info.snd_ppid; 2839 asoc->default_context = info.snd_context; 2840 } else { 2841 sp->default_stream = info.snd_sid; 2842 sp->default_flags = info.snd_flags; 2843 sp->default_ppid = info.snd_ppid; 2844 sp->default_context = info.snd_context; 2845 } 2846 2847 return 0; 2848 } 2849 2850 /* 7.1.10 Set Primary Address (SCTP_PRIMARY_ADDR) 2851 * 2852 * Requests that the local SCTP stack use the enclosed peer address as 2853 * the association primary. The enclosed address must be one of the 2854 * association peer's addresses. 2855 */ 2856 static int sctp_setsockopt_primary_addr(struct sock *sk, char __user *optval, 2857 unsigned int optlen) 2858 { 2859 struct sctp_prim prim; 2860 struct sctp_transport *trans; 2861 2862 if (optlen != sizeof(struct sctp_prim)) 2863 return -EINVAL; 2864 2865 if (copy_from_user(&prim, optval, sizeof(struct sctp_prim))) 2866 return -EFAULT; 2867 2868 trans = sctp_addr_id2transport(sk, &prim.ssp_addr, prim.ssp_assoc_id); 2869 if (!trans) 2870 return -EINVAL; 2871 2872 sctp_assoc_set_primary(trans->asoc, trans); 2873 2874 return 0; 2875 } 2876 2877 /* 2878 * 7.1.5 SCTP_NODELAY 2879 * 2880 * Turn on/off any Nagle-like algorithm. This means that packets are 2881 * generally sent as soon as possible and no unnecessary delays are 2882 * introduced, at the cost of more packets in the network. Expects an 2883 * integer boolean flag. 2884 */ 2885 static int sctp_setsockopt_nodelay(struct sock *sk, char __user *optval, 2886 unsigned int optlen) 2887 { 2888 int val; 2889 2890 if (optlen < sizeof(int)) 2891 return -EINVAL; 2892 if (get_user(val, (int __user *)optval)) 2893 return -EFAULT; 2894 2895 sctp_sk(sk)->nodelay = (val == 0) ? 0 : 1; 2896 return 0; 2897 } 2898 2899 /* 2900 * 2901 * 7.1.1 SCTP_RTOINFO 2902 * 2903 * The protocol parameters used to initialize and bound retransmission 2904 * timeout (RTO) are tunable. sctp_rtoinfo structure is used to access 2905 * and modify these parameters. 2906 * All parameters are time values, in milliseconds. A value of 0, when 2907 * modifying the parameters, indicates that the current value should not 2908 * be changed. 2909 * 2910 */ 2911 static int sctp_setsockopt_rtoinfo(struct sock *sk, char __user *optval, unsigned int optlen) 2912 { 2913 struct sctp_rtoinfo rtoinfo; 2914 struct sctp_association *asoc; 2915 unsigned long rto_min, rto_max; 2916 struct sctp_sock *sp = sctp_sk(sk); 2917 2918 if (optlen != sizeof (struct sctp_rtoinfo)) 2919 return -EINVAL; 2920 2921 if (copy_from_user(&rtoinfo, optval, optlen)) 2922 return -EFAULT; 2923 2924 asoc = sctp_id2assoc(sk, rtoinfo.srto_assoc_id); 2925 2926 /* Set the values to the specific association */ 2927 if (!asoc && rtoinfo.srto_assoc_id && sctp_style(sk, UDP)) 2928 return -EINVAL; 2929 2930 rto_max = rtoinfo.srto_max; 2931 rto_min = rtoinfo.srto_min; 2932 2933 if (rto_max) 2934 rto_max = asoc ? msecs_to_jiffies(rto_max) : rto_max; 2935 else 2936 rto_max = asoc ? asoc->rto_max : sp->rtoinfo.srto_max; 2937 2938 if (rto_min) 2939 rto_min = asoc ? msecs_to_jiffies(rto_min) : rto_min; 2940 else 2941 rto_min = asoc ? asoc->rto_min : sp->rtoinfo.srto_min; 2942 2943 if (rto_min > rto_max) 2944 return -EINVAL; 2945 2946 if (asoc) { 2947 if (rtoinfo.srto_initial != 0) 2948 asoc->rto_initial = 2949 msecs_to_jiffies(rtoinfo.srto_initial); 2950 asoc->rto_max = rto_max; 2951 asoc->rto_min = rto_min; 2952 } else { 2953 /* If there is no association or the association-id = 0 2954 * set the values to the endpoint. 2955 */ 2956 if (rtoinfo.srto_initial != 0) 2957 sp->rtoinfo.srto_initial = rtoinfo.srto_initial; 2958 sp->rtoinfo.srto_max = rto_max; 2959 sp->rtoinfo.srto_min = rto_min; 2960 } 2961 2962 return 0; 2963 } 2964 2965 /* 2966 * 2967 * 7.1.2 SCTP_ASSOCINFO 2968 * 2969 * This option is used to tune the maximum retransmission attempts 2970 * of the association. 2971 * Returns an error if the new association retransmission value is 2972 * greater than the sum of the retransmission value of the peer. 2973 * See [SCTP] for more information. 2974 * 2975 */ 2976 static int sctp_setsockopt_associnfo(struct sock *sk, char __user *optval, unsigned int optlen) 2977 { 2978 2979 struct sctp_assocparams assocparams; 2980 struct sctp_association *asoc; 2981 2982 if (optlen != sizeof(struct sctp_assocparams)) 2983 return -EINVAL; 2984 if (copy_from_user(&assocparams, optval, optlen)) 2985 return -EFAULT; 2986 2987 asoc = sctp_id2assoc(sk, assocparams.sasoc_assoc_id); 2988 2989 if (!asoc && assocparams.sasoc_assoc_id && sctp_style(sk, UDP)) 2990 return -EINVAL; 2991 2992 /* Set the values to the specific association */ 2993 if (asoc) { 2994 if (assocparams.sasoc_asocmaxrxt != 0) { 2995 __u32 path_sum = 0; 2996 int paths = 0; 2997 struct sctp_transport *peer_addr; 2998 2999 list_for_each_entry(peer_addr, &asoc->peer.transport_addr_list, 3000 transports) { 3001 path_sum += peer_addr->pathmaxrxt; 3002 paths++; 3003 } 3004 3005 /* Only validate asocmaxrxt if we have more than 3006 * one path/transport. We do this because path 3007 * retransmissions are only counted when we have more 3008 * then one path. 3009 */ 3010 if (paths > 1 && 3011 assocparams.sasoc_asocmaxrxt > path_sum) 3012 return -EINVAL; 3013 3014 asoc->max_retrans = assocparams.sasoc_asocmaxrxt; 3015 } 3016 3017 if (assocparams.sasoc_cookie_life != 0) 3018 asoc->cookie_life = ms_to_ktime(assocparams.sasoc_cookie_life); 3019 } else { 3020 /* Set the values to the endpoint */ 3021 struct sctp_sock *sp = sctp_sk(sk); 3022 3023 if (assocparams.sasoc_asocmaxrxt != 0) 3024 sp->assocparams.sasoc_asocmaxrxt = 3025 assocparams.sasoc_asocmaxrxt; 3026 if (assocparams.sasoc_cookie_life != 0) 3027 sp->assocparams.sasoc_cookie_life = 3028 assocparams.sasoc_cookie_life; 3029 } 3030 return 0; 3031 } 3032 3033 /* 3034 * 7.1.16 Set/clear IPv4 mapped addresses (SCTP_I_WANT_MAPPED_V4_ADDR) 3035 * 3036 * This socket option is a boolean flag which turns on or off mapped V4 3037 * addresses. If this option is turned on and the socket is type 3038 * PF_INET6, then IPv4 addresses will be mapped to V6 representation. 3039 * If this option is turned off, then no mapping will be done of V4 3040 * addresses and a user will receive both PF_INET6 and PF_INET type 3041 * addresses on the socket. 3042 */ 3043 static int sctp_setsockopt_mappedv4(struct sock *sk, char __user *optval, unsigned int optlen) 3044 { 3045 int val; 3046 struct sctp_sock *sp = sctp_sk(sk); 3047 3048 if (optlen < sizeof(int)) 3049 return -EINVAL; 3050 if (get_user(val, (int __user *)optval)) 3051 return -EFAULT; 3052 if (val) 3053 sp->v4mapped = 1; 3054 else 3055 sp->v4mapped = 0; 3056 3057 return 0; 3058 } 3059 3060 /* 3061 * 8.1.16. Get or Set the Maximum Fragmentation Size (SCTP_MAXSEG) 3062 * This option will get or set the maximum size to put in any outgoing 3063 * SCTP DATA chunk. If a message is larger than this size it will be 3064 * fragmented by SCTP into the specified size. Note that the underlying 3065 * SCTP implementation may fragment into smaller sized chunks when the 3066 * PMTU of the underlying association is smaller than the value set by 3067 * the user. The default value for this option is '0' which indicates 3068 * the user is NOT limiting fragmentation and only the PMTU will effect 3069 * SCTP's choice of DATA chunk size. Note also that values set larger 3070 * than the maximum size of an IP datagram will effectively let SCTP 3071 * control fragmentation (i.e. the same as setting this option to 0). 3072 * 3073 * The following structure is used to access and modify this parameter: 3074 * 3075 * struct sctp_assoc_value { 3076 * sctp_assoc_t assoc_id; 3077 * uint32_t assoc_value; 3078 * }; 3079 * 3080 * assoc_id: This parameter is ignored for one-to-one style sockets. 3081 * For one-to-many style sockets this parameter indicates which 3082 * association the user is performing an action upon. Note that if 3083 * this field's value is zero then the endpoints default value is 3084 * changed (effecting future associations only). 3085 * assoc_value: This parameter specifies the maximum size in bytes. 3086 */ 3087 static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, unsigned int optlen) 3088 { 3089 struct sctp_assoc_value params; 3090 struct sctp_association *asoc; 3091 struct sctp_sock *sp = sctp_sk(sk); 3092 int val; 3093 3094 if (optlen == sizeof(int)) { 3095 pr_warn_ratelimited(DEPRECATED 3096 "%s (pid %d) " 3097 "Use of int in maxseg socket option.\n" 3098 "Use struct sctp_assoc_value instead\n", 3099 current->comm, task_pid_nr(current)); 3100 if (copy_from_user(&val, optval, optlen)) 3101 return -EFAULT; 3102 params.assoc_id = 0; 3103 } else if (optlen == sizeof(struct sctp_assoc_value)) { 3104 if (copy_from_user(¶ms, optval, optlen)) 3105 return -EFAULT; 3106 val = params.assoc_value; 3107 } else 3108 return -EINVAL; 3109 3110 if ((val != 0) && ((val < 8) || (val > SCTP_MAX_CHUNK_LEN))) 3111 return -EINVAL; 3112 3113 asoc = sctp_id2assoc(sk, params.assoc_id); 3114 if (!asoc && params.assoc_id && sctp_style(sk, UDP)) 3115 return -EINVAL; 3116 3117 if (asoc) { 3118 if (val == 0) { 3119 val = asoc->pathmtu; 3120 val -= sp->pf->af->net_header_len; 3121 val -= sizeof(struct sctphdr) + 3122 sizeof(struct sctp_data_chunk); 3123 } 3124 asoc->user_frag = val; 3125 asoc->frag_point = sctp_frag_point(asoc, asoc->pathmtu); 3126 } else { 3127 sp->user_frag = val; 3128 } 3129 3130 return 0; 3131 } 3132 3133 3134 /* 3135 * 7.1.9 Set Peer Primary Address (SCTP_SET_PEER_PRIMARY_ADDR) 3136 * 3137 * Requests that the peer mark the enclosed address as the association 3138 * primary. The enclosed address must be one of the association's 3139 * locally bound addresses. The following structure is used to make a 3140 * set primary request: 3141 */ 3142 static int sctp_setsockopt_peer_primary_addr(struct sock *sk, char __user *optval, 3143 unsigned int optlen) 3144 { 3145 struct net *net = sock_net(sk); 3146 struct sctp_sock *sp; 3147 struct sctp_association *asoc = NULL; 3148 struct sctp_setpeerprim prim; 3149 struct sctp_chunk *chunk; 3150 struct sctp_af *af; 3151 int err; 3152 3153 sp = sctp_sk(sk); 3154 3155 if (!net->sctp.addip_enable) 3156 return -EPERM; 3157 3158 if (optlen != sizeof(struct sctp_setpeerprim)) 3159 return -EINVAL; 3160 3161 if (copy_from_user(&prim, optval, optlen)) 3162 return -EFAULT; 3163 3164 asoc = sctp_id2assoc(sk, prim.sspp_assoc_id); 3165 if (!asoc) 3166 return -EINVAL; 3167 3168 if (!asoc->peer.asconf_capable) 3169 return -EPERM; 3170 3171 if (asoc->peer.addip_disabled_mask & SCTP_PARAM_SET_PRIMARY) 3172 return -EPERM; 3173 3174 if (!sctp_state(asoc, ESTABLISHED)) 3175 return -ENOTCONN; 3176 3177 af = sctp_get_af_specific(prim.sspp_addr.ss_family); 3178 if (!af) 3179 return -EINVAL; 3180 3181 if (!af->addr_valid((union sctp_addr *)&prim.sspp_addr, sp, NULL)) 3182 return -EADDRNOTAVAIL; 3183 3184 if (!sctp_assoc_lookup_laddr(asoc, (union sctp_addr *)&prim.sspp_addr)) 3185 return -EADDRNOTAVAIL; 3186 3187 /* Create an ASCONF chunk with SET_PRIMARY parameter */ 3188 chunk = sctp_make_asconf_set_prim(asoc, 3189 (union sctp_addr *)&prim.sspp_addr); 3190 if (!chunk) 3191 return -ENOMEM; 3192 3193 err = sctp_send_asconf(asoc, chunk); 3194 3195 pr_debug("%s: we set peer primary addr primitively\n", __func__); 3196 3197 return err; 3198 } 3199 3200 static int sctp_setsockopt_adaptation_layer(struct sock *sk, char __user *optval, 3201 unsigned int optlen) 3202 { 3203 struct sctp_setadaptation adaptation; 3204 3205 if (optlen != sizeof(struct sctp_setadaptation)) 3206 return -EINVAL; 3207 if (copy_from_user(&adaptation, optval, optlen)) 3208 return -EFAULT; 3209 3210 sctp_sk(sk)->adaptation_ind = adaptation.ssb_adaptation_ind; 3211 3212 return 0; 3213 } 3214 3215 /* 3216 * 7.1.29. Set or Get the default context (SCTP_CONTEXT) 3217 * 3218 * The context field in the sctp_sndrcvinfo structure is normally only 3219 * used when a failed message is retrieved holding the value that was 3220 * sent down on the actual send call. This option allows the setting of 3221 * a default context on an association basis that will be received on 3222 * reading messages from the peer. This is especially helpful in the 3223 * one-2-many model for an application to keep some reference to an 3224 * internal state machine that is processing messages on the 3225 * association. Note that the setting of this value only effects 3226 * received messages from the peer and does not effect the value that is 3227 * saved with outbound messages. 3228 */ 3229 static int sctp_setsockopt_context(struct sock *sk, char __user *optval, 3230 unsigned int optlen) 3231 { 3232 struct sctp_assoc_value params; 3233 struct sctp_sock *sp; 3234 struct sctp_association *asoc; 3235 3236 if (optlen != sizeof(struct sctp_assoc_value)) 3237 return -EINVAL; 3238 if (copy_from_user(¶ms, optval, optlen)) 3239 return -EFAULT; 3240 3241 sp = sctp_sk(sk); 3242 3243 if (params.assoc_id != 0) { 3244 asoc = sctp_id2assoc(sk, params.assoc_id); 3245 if (!asoc) 3246 return -EINVAL; 3247 asoc->default_rcv_context = params.assoc_value; 3248 } else { 3249 sp->default_rcv_context = params.assoc_value; 3250 } 3251 3252 return 0; 3253 } 3254 3255 /* 3256 * 7.1.24. Get or set fragmented interleave (SCTP_FRAGMENT_INTERLEAVE) 3257 * 3258 * This options will at a minimum specify if the implementation is doing 3259 * fragmented interleave. Fragmented interleave, for a one to many 3260 * socket, is when subsequent calls to receive a message may return 3261 * parts of messages from different associations. Some implementations 3262 * may allow you to turn this value on or off. If so, when turned off, 3263 * no fragment interleave will occur (which will cause a head of line 3264 * blocking amongst multiple associations sharing the same one to many 3265 * socket). When this option is turned on, then each receive call may 3266 * come from a different association (thus the user must receive data 3267 * with the extended calls (e.g. sctp_recvmsg) to keep track of which 3268 * association each receive belongs to. 3269 * 3270 * This option takes a boolean value. A non-zero value indicates that 3271 * fragmented interleave is on. A value of zero indicates that 3272 * fragmented interleave is off. 3273 * 3274 * Note that it is important that an implementation that allows this 3275 * option to be turned on, have it off by default. Otherwise an unaware 3276 * application using the one to many model may become confused and act 3277 * incorrectly. 3278 */ 3279 static int sctp_setsockopt_fragment_interleave(struct sock *sk, 3280 char __user *optval, 3281 unsigned int optlen) 3282 { 3283 int val; 3284 3285 if (optlen != sizeof(int)) 3286 return -EINVAL; 3287 if (get_user(val, (int __user *)optval)) 3288 return -EFAULT; 3289 3290 sctp_sk(sk)->frag_interleave = (val == 0) ? 0 : 1; 3291 3292 return 0; 3293 } 3294 3295 /* 3296 * 8.1.21. Set or Get the SCTP Partial Delivery Point 3297 * (SCTP_PARTIAL_DELIVERY_POINT) 3298 * 3299 * This option will set or get the SCTP partial delivery point. This 3300 * point is the size of a message where the partial delivery API will be 3301 * invoked to help free up rwnd space for the peer. Setting this to a 3302 * lower value will cause partial deliveries to happen more often. The 3303 * calls argument is an integer that sets or gets the partial delivery 3304 * point. Note also that the call will fail if the user attempts to set 3305 * this value larger than the socket receive buffer size. 3306 * 3307 * Note that any single message having a length smaller than or equal to 3308 * the SCTP partial delivery point will be delivered in one single read 3309 * call as long as the user provided buffer is large enough to hold the 3310 * message. 3311 */ 3312 static int sctp_setsockopt_partial_delivery_point(struct sock *sk, 3313 char __user *optval, 3314 unsigned int optlen) 3315 { 3316 u32 val; 3317 3318 if (optlen != sizeof(u32)) 3319 return -EINVAL; 3320 if (get_user(val, (int __user *)optval)) 3321 return -EFAULT; 3322 3323 /* Note: We double the receive buffer from what the user sets 3324 * it to be, also initial rwnd is based on rcvbuf/2. 3325 */ 3326 if (val > (sk->sk_rcvbuf >> 1)) 3327 return -EINVAL; 3328 3329 sctp_sk(sk)->pd_point = val; 3330 3331 return 0; /* is this the right error code? */ 3332 } 3333 3334 /* 3335 * 7.1.28. Set or Get the maximum burst (SCTP_MAX_BURST) 3336 * 3337 * This option will allow a user to change the maximum burst of packets 3338 * that can be emitted by this association. Note that the default value 3339 * is 4, and some implementations may restrict this setting so that it 3340 * can only be lowered. 3341 * 3342 * NOTE: This text doesn't seem right. Do this on a socket basis with 3343 * future associations inheriting the socket value. 3344 */ 3345 static int sctp_setsockopt_maxburst(struct sock *sk, 3346 char __user *optval, 3347 unsigned int optlen) 3348 { 3349 struct sctp_assoc_value params; 3350 struct sctp_sock *sp; 3351 struct sctp_association *asoc; 3352 int val; 3353 int assoc_id = 0; 3354 3355 if (optlen == sizeof(int)) { 3356 pr_warn_ratelimited(DEPRECATED 3357 "%s (pid %d) " 3358 "Use of int in max_burst socket option deprecated.\n" 3359 "Use struct sctp_assoc_value instead\n", 3360 current->comm, task_pid_nr(current)); 3361 if (copy_from_user(&val, optval, optlen)) 3362 return -EFAULT; 3363 } else if (optlen == sizeof(struct sctp_assoc_value)) { 3364 if (copy_from_user(¶ms, optval, optlen)) 3365 return -EFAULT; 3366 val = params.assoc_value; 3367 assoc_id = params.assoc_id; 3368 } else 3369 return -EINVAL; 3370 3371 sp = sctp_sk(sk); 3372 3373 if (assoc_id != 0) { 3374 asoc = sctp_id2assoc(sk, assoc_id); 3375 if (!asoc) 3376 return -EINVAL; 3377 asoc->max_burst = val; 3378 } else 3379 sp->max_burst = val; 3380 3381 return 0; 3382 } 3383 3384 /* 3385 * 7.1.18. Add a chunk that must be authenticated (SCTP_AUTH_CHUNK) 3386 * 3387 * This set option adds a chunk type that the user is requesting to be 3388 * received only in an authenticated way. Changes to the list of chunks 3389 * will only effect future associations on the socket. 3390 */ 3391 static int sctp_setsockopt_auth_chunk(struct sock *sk, 3392 char __user *optval, 3393 unsigned int optlen) 3394 { 3395 struct sctp_endpoint *ep = sctp_sk(sk)->ep; 3396 struct sctp_authchunk val; 3397 3398 if (!ep->auth_enable) 3399 return -EACCES; 3400 3401 if (optlen != sizeof(struct sctp_authchunk)) 3402 return -EINVAL; 3403 if (copy_from_user(&val, optval, optlen)) 3404 return -EFAULT; 3405 3406 switch (val.sauth_chunk) { 3407 case SCTP_CID_INIT: 3408 case SCTP_CID_INIT_ACK: 3409 case SCTP_CID_SHUTDOWN_COMPLETE: 3410 case SCTP_CID_AUTH: 3411 return -EINVAL; 3412 } 3413 3414 /* add this chunk id to the endpoint */ 3415 return sctp_auth_ep_add_chunkid(ep, val.sauth_chunk); 3416 } 3417 3418 /* 3419 * 7.1.19. Get or set the list of supported HMAC Identifiers (SCTP_HMAC_IDENT) 3420 * 3421 * This option gets or sets the list of HMAC algorithms that the local 3422 * endpoint requires the peer to use. 3423 */ 3424 static int sctp_setsockopt_hmac_ident(struct sock *sk, 3425 char __user *optval, 3426 unsigned int optlen) 3427 { 3428 struct sctp_endpoint *ep = sctp_sk(sk)->ep; 3429 struct sctp_hmacalgo *hmacs; 3430 u32 idents; 3431 int err; 3432 3433 if (!ep->auth_enable) 3434 return -EACCES; 3435 3436 if (optlen < sizeof(struct sctp_hmacalgo)) 3437 return -EINVAL; 3438 3439 hmacs = memdup_user(optval, optlen); 3440 if (IS_ERR(hmacs)) 3441 return PTR_ERR(hmacs); 3442 3443 idents = hmacs->shmac_num_idents; 3444 if (idents == 0 || idents > SCTP_AUTH_NUM_HMACS || 3445 (idents * sizeof(u16)) > (optlen - sizeof(struct sctp_hmacalgo))) { 3446 err = -EINVAL; 3447 goto out; 3448 } 3449 3450 err = sctp_auth_ep_set_hmacs(ep, hmacs); 3451 out: 3452 kfree(hmacs); 3453 return err; 3454 } 3455 3456 /* 3457 * 7.1.20. Set a shared key (SCTP_AUTH_KEY) 3458 * 3459 * This option will set a shared secret key which is used to build an 3460 * association shared key. 3461 */ 3462 static int sctp_setsockopt_auth_key(struct sock *sk, 3463 char __user *optval, 3464 unsigned int optlen) 3465 { 3466 struct sctp_endpoint *ep = sctp_sk(sk)->ep; 3467 struct sctp_authkey *authkey; 3468 struct sctp_association *asoc; 3469 int ret; 3470 3471 if (!ep->auth_enable) 3472 return -EACCES; 3473 3474 if (optlen <= sizeof(struct sctp_authkey)) 3475 return -EINVAL; 3476 3477 authkey = memdup_user(optval, optlen); 3478 if (IS_ERR(authkey)) 3479 return PTR_ERR(authkey); 3480 3481 if (authkey->sca_keylength > optlen - sizeof(struct sctp_authkey)) { 3482 ret = -EINVAL; 3483 goto out; 3484 } 3485 3486 asoc = sctp_id2assoc(sk, authkey->sca_assoc_id); 3487 if (!asoc && authkey->sca_assoc_id && sctp_style(sk, UDP)) { 3488 ret = -EINVAL; 3489 goto out; 3490 } 3491 3492 ret = sctp_auth_set_key(ep, asoc, authkey); 3493 out: 3494 kzfree(authkey); 3495 return ret; 3496 } 3497 3498 /* 3499 * 7.1.21. Get or set the active shared key (SCTP_AUTH_ACTIVE_KEY) 3500 * 3501 * This option will get or set the active shared key to be used to build 3502 * the association shared key. 3503 */ 3504 static int sctp_setsockopt_active_key(struct sock *sk, 3505 char __user *optval, 3506 unsigned int optlen) 3507 { 3508 struct sctp_endpoint *ep = sctp_sk(sk)->ep; 3509 struct sctp_authkeyid val; 3510 struct sctp_association *asoc; 3511 3512 if (!ep->auth_enable) 3513 return -EACCES; 3514 3515 if (optlen != sizeof(struct sctp_authkeyid)) 3516 return -EINVAL; 3517 if (copy_from_user(&val, optval, optlen)) 3518 return -EFAULT; 3519 3520 asoc = sctp_id2assoc(sk, val.scact_assoc_id); 3521 if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP)) 3522 return -EINVAL; 3523 3524 return sctp_auth_set_active_key(ep, asoc, val.scact_keynumber); 3525 } 3526 3527 /* 3528 * 7.1.22. Delete a shared key (SCTP_AUTH_DELETE_KEY) 3529 * 3530 * This set option will delete a shared secret key from use. 3531 */ 3532 static int sctp_setsockopt_del_key(struct sock *sk, 3533 char __user *optval, 3534 unsigned int optlen) 3535 { 3536 struct sctp_endpoint *ep = sctp_sk(sk)->ep; 3537 struct sctp_authkeyid val; 3538 struct sctp_association *asoc; 3539 3540 if (!ep->auth_enable) 3541 return -EACCES; 3542 3543 if (optlen != sizeof(struct sctp_authkeyid)) 3544 return -EINVAL; 3545 if (copy_from_user(&val, optval, optlen)) 3546 return -EFAULT; 3547 3548 asoc = sctp_id2assoc(sk, val.scact_assoc_id); 3549 if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP)) 3550 return -EINVAL; 3551 3552 return sctp_auth_del_key_id(ep, asoc, val.scact_keynumber); 3553 3554 } 3555 3556 /* 3557 * 8.1.23 SCTP_AUTO_ASCONF 3558 * 3559 * This option will enable or disable the use of the automatic generation of 3560 * ASCONF chunks to add and delete addresses to an existing association. Note 3561 * that this option has two caveats namely: a) it only affects sockets that 3562 * are bound to all addresses available to the SCTP stack, and b) the system 3563 * administrator may have an overriding control that turns the ASCONF feature 3564 * off no matter what setting the socket option may have. 3565 * This option expects an integer boolean flag, where a non-zero value turns on 3566 * the option, and a zero value turns off the option. 3567 * Note. In this implementation, socket operation overrides default parameter 3568 * being set by sysctl as well as FreeBSD implementation 3569 */ 3570 static int sctp_setsockopt_auto_asconf(struct sock *sk, char __user *optval, 3571 unsigned int optlen) 3572 { 3573 int val; 3574 struct sctp_sock *sp = sctp_sk(sk); 3575 3576 if (optlen < sizeof(int)) 3577 return -EINVAL; 3578 if (get_user(val, (int __user *)optval)) 3579 return -EFAULT; 3580 if (!sctp_is_ep_boundall(sk) && val) 3581 return -EINVAL; 3582 if ((val && sp->do_auto_asconf) || (!val && !sp->do_auto_asconf)) 3583 return 0; 3584 3585 spin_lock_bh(&sock_net(sk)->sctp.addr_wq_lock); 3586 if (val == 0 && sp->do_auto_asconf) { 3587 list_del(&sp->auto_asconf_list); 3588 sp->do_auto_asconf = 0; 3589 } else if (val && !sp->do_auto_asconf) { 3590 list_add_tail(&sp->auto_asconf_list, 3591 &sock_net(sk)->sctp.auto_asconf_splist); 3592 sp->do_auto_asconf = 1; 3593 } 3594 spin_unlock_bh(&sock_net(sk)->sctp.addr_wq_lock); 3595 return 0; 3596 } 3597 3598 /* 3599 * SCTP_PEER_ADDR_THLDS 3600 * 3601 * This option allows us to alter the partially failed threshold for one or all 3602 * transports in an association. See Section 6.1 of: 3603 * http://www.ietf.org/id/draft-nishida-tsvwg-sctp-failover-05.txt 3604 */ 3605 static int sctp_setsockopt_paddr_thresholds(struct sock *sk, 3606 char __user *optval, 3607 unsigned int optlen) 3608 { 3609 struct sctp_paddrthlds val; 3610 struct sctp_transport *trans; 3611 struct sctp_association *asoc; 3612 3613 if (optlen < sizeof(struct sctp_paddrthlds)) 3614 return -EINVAL; 3615 if (copy_from_user(&val, (struct sctp_paddrthlds __user *)optval, 3616 sizeof(struct sctp_paddrthlds))) 3617 return -EFAULT; 3618 3619 3620 if (sctp_is_any(sk, (const union sctp_addr *)&val.spt_address)) { 3621 asoc = sctp_id2assoc(sk, val.spt_assoc_id); 3622 if (!asoc) 3623 return -ENOENT; 3624 list_for_each_entry(trans, &asoc->peer.transport_addr_list, 3625 transports) { 3626 if (val.spt_pathmaxrxt) 3627 trans->pathmaxrxt = val.spt_pathmaxrxt; 3628 trans->pf_retrans = val.spt_pathpfthld; 3629 } 3630 3631 if (val.spt_pathmaxrxt) 3632 asoc->pathmaxrxt = val.spt_pathmaxrxt; 3633 asoc->pf_retrans = val.spt_pathpfthld; 3634 } else { 3635 trans = sctp_addr_id2transport(sk, &val.spt_address, 3636 val.spt_assoc_id); 3637 if (!trans) 3638 return -ENOENT; 3639 3640 if (val.spt_pathmaxrxt) 3641 trans->pathmaxrxt = val.spt_pathmaxrxt; 3642 trans->pf_retrans = val.spt_pathpfthld; 3643 } 3644 3645 return 0; 3646 } 3647 3648 static int sctp_setsockopt_recvrcvinfo(struct sock *sk, 3649 char __user *optval, 3650 unsigned int optlen) 3651 { 3652 int val; 3653 3654 if (optlen < sizeof(int)) 3655 return -EINVAL; 3656 if (get_user(val, (int __user *) optval)) 3657 return -EFAULT; 3658 3659 sctp_sk(sk)->recvrcvinfo = (val == 0) ? 0 : 1; 3660 3661 return 0; 3662 } 3663 3664 static int sctp_setsockopt_recvnxtinfo(struct sock *sk, 3665 char __user *optval, 3666 unsigned int optlen) 3667 { 3668 int val; 3669 3670 if (optlen < sizeof(int)) 3671 return -EINVAL; 3672 if (get_user(val, (int __user *) optval)) 3673 return -EFAULT; 3674 3675 sctp_sk(sk)->recvnxtinfo = (val == 0) ? 0 : 1; 3676 3677 return 0; 3678 } 3679 3680 /* API 6.2 setsockopt(), getsockopt() 3681 * 3682 * Applications use setsockopt() and getsockopt() to set or retrieve 3683 * socket options. Socket options are used to change the default 3684 * behavior of sockets calls. They are described in Section 7. 3685 * 3686 * The syntax is: 3687 * 3688 * ret = getsockopt(int sd, int level, int optname, void __user *optval, 3689 * int __user *optlen); 3690 * ret = setsockopt(int sd, int level, int optname, const void __user *optval, 3691 * int optlen); 3692 * 3693 * sd - the socket descript. 3694 * level - set to IPPROTO_SCTP for all SCTP options. 3695 * optname - the option name. 3696 * optval - the buffer to store the value of the option. 3697 * optlen - the size of the buffer. 3698 */ 3699 static int sctp_setsockopt(struct sock *sk, int level, int optname, 3700 char __user *optval, unsigned int optlen) 3701 { 3702 int retval = 0; 3703 3704 pr_debug("%s: sk:%p, optname:%d\n", __func__, sk, optname); 3705 3706 /* I can hardly begin to describe how wrong this is. This is 3707 * so broken as to be worse than useless. The API draft 3708 * REALLY is NOT helpful here... I am not convinced that the 3709 * semantics of setsockopt() with a level OTHER THAN SOL_SCTP 3710 * are at all well-founded. 3711 */ 3712 if (level != SOL_SCTP) { 3713 struct sctp_af *af = sctp_sk(sk)->pf->af; 3714 retval = af->setsockopt(sk, level, optname, optval, optlen); 3715 goto out_nounlock; 3716 } 3717 3718 lock_sock(sk); 3719 3720 switch (optname) { 3721 case SCTP_SOCKOPT_BINDX_ADD: 3722 /* 'optlen' is the size of the addresses buffer. */ 3723 retval = sctp_setsockopt_bindx(sk, (struct sockaddr __user *)optval, 3724 optlen, SCTP_BINDX_ADD_ADDR); 3725 break; 3726 3727 case SCTP_SOCKOPT_BINDX_REM: 3728 /* 'optlen' is the size of the addresses buffer. */ 3729 retval = sctp_setsockopt_bindx(sk, (struct sockaddr __user *)optval, 3730 optlen, SCTP_BINDX_REM_ADDR); 3731 break; 3732 3733 case SCTP_SOCKOPT_CONNECTX_OLD: 3734 /* 'optlen' is the size of the addresses buffer. */ 3735 retval = sctp_setsockopt_connectx_old(sk, 3736 (struct sockaddr __user *)optval, 3737 optlen); 3738 break; 3739 3740 case SCTP_SOCKOPT_CONNECTX: 3741 /* 'optlen' is the size of the addresses buffer. */ 3742 retval = sctp_setsockopt_connectx(sk, 3743 (struct sockaddr __user *)optval, 3744 optlen); 3745 break; 3746 3747 case SCTP_DISABLE_FRAGMENTS: 3748 retval = sctp_setsockopt_disable_fragments(sk, optval, optlen); 3749 break; 3750 3751 case SCTP_EVENTS: 3752 retval = sctp_setsockopt_events(sk, optval, optlen); 3753 break; 3754 3755 case SCTP_AUTOCLOSE: 3756 retval = sctp_setsockopt_autoclose(sk, optval, optlen); 3757 break; 3758 3759 case SCTP_PEER_ADDR_PARAMS: 3760 retval = sctp_setsockopt_peer_addr_params(sk, optval, optlen); 3761 break; 3762 3763 case SCTP_DELAYED_SACK: 3764 retval = sctp_setsockopt_delayed_ack(sk, optval, optlen); 3765 break; 3766 case SCTP_PARTIAL_DELIVERY_POINT: 3767 retval = sctp_setsockopt_partial_delivery_point(sk, optval, optlen); 3768 break; 3769 3770 case SCTP_INITMSG: 3771 retval = sctp_setsockopt_initmsg(sk, optval, optlen); 3772 break; 3773 case SCTP_DEFAULT_SEND_PARAM: 3774 retval = sctp_setsockopt_default_send_param(sk, optval, 3775 optlen); 3776 break; 3777 case SCTP_DEFAULT_SNDINFO: 3778 retval = sctp_setsockopt_default_sndinfo(sk, optval, optlen); 3779 break; 3780 case SCTP_PRIMARY_ADDR: 3781 retval = sctp_setsockopt_primary_addr(sk, optval, optlen); 3782 break; 3783 case SCTP_SET_PEER_PRIMARY_ADDR: 3784 retval = sctp_setsockopt_peer_primary_addr(sk, optval, optlen); 3785 break; 3786 case SCTP_NODELAY: 3787 retval = sctp_setsockopt_nodelay(sk, optval, optlen); 3788 break; 3789 case SCTP_RTOINFO: 3790 retval = sctp_setsockopt_rtoinfo(sk, optval, optlen); 3791 break; 3792 case SCTP_ASSOCINFO: 3793 retval = sctp_setsockopt_associnfo(sk, optval, optlen); 3794 break; 3795 case SCTP_I_WANT_MAPPED_V4_ADDR: 3796 retval = sctp_setsockopt_mappedv4(sk, optval, optlen); 3797 break; 3798 case SCTP_MAXSEG: 3799 retval = sctp_setsockopt_maxseg(sk, optval, optlen); 3800 break; 3801 case SCTP_ADAPTATION_LAYER: 3802 retval = sctp_setsockopt_adaptation_layer(sk, optval, optlen); 3803 break; 3804 case SCTP_CONTEXT: 3805 retval = sctp_setsockopt_context(sk, optval, optlen); 3806 break; 3807 case SCTP_FRAGMENT_INTERLEAVE: 3808 retval = sctp_setsockopt_fragment_interleave(sk, optval, optlen); 3809 break; 3810 case SCTP_MAX_BURST: 3811 retval = sctp_setsockopt_maxburst(sk, optval, optlen); 3812 break; 3813 case SCTP_AUTH_CHUNK: 3814 retval = sctp_setsockopt_auth_chunk(sk, optval, optlen); 3815 break; 3816 case SCTP_HMAC_IDENT: 3817 retval = sctp_setsockopt_hmac_ident(sk, optval, optlen); 3818 break; 3819 case SCTP_AUTH_KEY: 3820 retval = sctp_setsockopt_auth_key(sk, optval, optlen); 3821 break; 3822 case SCTP_AUTH_ACTIVE_KEY: 3823 retval = sctp_setsockopt_active_key(sk, optval, optlen); 3824 break; 3825 case SCTP_AUTH_DELETE_KEY: 3826 retval = sctp_setsockopt_del_key(sk, optval, optlen); 3827 break; 3828 case SCTP_AUTO_ASCONF: 3829 retval = sctp_setsockopt_auto_asconf(sk, optval, optlen); 3830 break; 3831 case SCTP_PEER_ADDR_THLDS: 3832 retval = sctp_setsockopt_paddr_thresholds(sk, optval, optlen); 3833 break; 3834 case SCTP_RECVRCVINFO: 3835 retval = sctp_setsockopt_recvrcvinfo(sk, optval, optlen); 3836 break; 3837 case SCTP_RECVNXTINFO: 3838 retval = sctp_setsockopt_recvnxtinfo(sk, optval, optlen); 3839 break; 3840 default: 3841 retval = -ENOPROTOOPT; 3842 break; 3843 } 3844 3845 release_sock(sk); 3846 3847 out_nounlock: 3848 return retval; 3849 } 3850 3851 /* API 3.1.6 connect() - UDP Style Syntax 3852 * 3853 * An application may use the connect() call in the UDP model to initiate an 3854 * association without sending data. 3855 * 3856 * The syntax is: 3857 * 3858 * ret = connect(int sd, const struct sockaddr *nam, socklen_t len); 3859 * 3860 * sd: the socket descriptor to have a new association added to. 3861 * 3862 * nam: the address structure (either struct sockaddr_in or struct 3863 * sockaddr_in6 defined in RFC2553 [7]). 3864 * 3865 * len: the size of the address. 3866 */ 3867 static int sctp_connect(struct sock *sk, struct sockaddr *addr, 3868 int addr_len) 3869 { 3870 int err = 0; 3871 struct sctp_af *af; 3872 3873 lock_sock(sk); 3874 3875 pr_debug("%s: sk:%p, sockaddr:%p, addr_len:%d\n", __func__, sk, 3876 addr, addr_len); 3877 3878 /* Validate addr_len before calling common connect/connectx routine. */ 3879 af = sctp_get_af_specific(addr->sa_family); 3880 if (!af || addr_len < af->sockaddr_len) { 3881 err = -EINVAL; 3882 } else { 3883 /* Pass correct addr len to common routine (so it knows there 3884 * is only one address being passed. 3885 */ 3886 err = __sctp_connect(sk, addr, af->sockaddr_len, NULL); 3887 } 3888 3889 release_sock(sk); 3890 return err; 3891 } 3892 3893 /* FIXME: Write comments. */ 3894 static int sctp_disconnect(struct sock *sk, int flags) 3895 { 3896 return -EOPNOTSUPP; /* STUB */ 3897 } 3898 3899 /* 4.1.4 accept() - TCP Style Syntax 3900 * 3901 * Applications use accept() call to remove an established SCTP 3902 * association from the accept queue of the endpoint. A new socket 3903 * descriptor will be returned from accept() to represent the newly 3904 * formed association. 3905 */ 3906 static struct sock *sctp_accept(struct sock *sk, int flags, int *err) 3907 { 3908 struct sctp_sock *sp; 3909 struct sctp_endpoint *ep; 3910 struct sock *newsk = NULL; 3911 struct sctp_association *asoc; 3912 long timeo; 3913 int error = 0; 3914 3915 lock_sock(sk); 3916 3917 sp = sctp_sk(sk); 3918 ep = sp->ep; 3919 3920 if (!sctp_style(sk, TCP)) { 3921 error = -EOPNOTSUPP; 3922 goto out; 3923 } 3924 3925 if (!sctp_sstate(sk, LISTENING)) { 3926 error = -EINVAL; 3927 goto out; 3928 } 3929 3930 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); 3931 3932 error = sctp_wait_for_accept(sk, timeo); 3933 if (error) 3934 goto out; 3935 3936 /* We treat the list of associations on the endpoint as the accept 3937 * queue and pick the first association on the list. 3938 */ 3939 asoc = list_entry(ep->asocs.next, struct sctp_association, asocs); 3940 3941 newsk = sp->pf->create_accept_sk(sk, asoc); 3942 if (!newsk) { 3943 error = -ENOMEM; 3944 goto out; 3945 } 3946 3947 /* Populate the fields of the newsk from the oldsk and migrate the 3948 * asoc to the newsk. 3949 */ 3950 sctp_sock_migrate(sk, newsk, asoc, SCTP_SOCKET_TCP); 3951 3952 out: 3953 release_sock(sk); 3954 *err = error; 3955 return newsk; 3956 } 3957 3958 /* The SCTP ioctl handler. */ 3959 static int sctp_ioctl(struct sock *sk, int cmd, unsigned long arg) 3960 { 3961 int rc = -ENOTCONN; 3962 3963 lock_sock(sk); 3964 3965 /* 3966 * SEQPACKET-style sockets in LISTENING state are valid, for 3967 * SCTP, so only discard TCP-style sockets in LISTENING state. 3968 */ 3969 if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)) 3970 goto out; 3971 3972 switch (cmd) { 3973 case SIOCINQ: { 3974 struct sk_buff *skb; 3975 unsigned int amount = 0; 3976 3977 skb = skb_peek(&sk->sk_receive_queue); 3978 if (skb != NULL) { 3979 /* 3980 * We will only return the amount of this packet since 3981 * that is all that will be read. 3982 */ 3983 amount = skb->len; 3984 } 3985 rc = put_user(amount, (int __user *)arg); 3986 break; 3987 } 3988 default: 3989 rc = -ENOIOCTLCMD; 3990 break; 3991 } 3992 out: 3993 release_sock(sk); 3994 return rc; 3995 } 3996 3997 /* This is the function which gets called during socket creation to 3998 * initialized the SCTP-specific portion of the sock. 3999 * The sock structure should already be zero-filled memory. 4000 */ 4001 static int sctp_init_sock(struct sock *sk) 4002 { 4003 struct net *net = sock_net(sk); 4004 struct sctp_sock *sp; 4005 4006 pr_debug("%s: sk:%p\n", __func__, sk); 4007 4008 sp = sctp_sk(sk); 4009 4010 /* Initialize the SCTP per socket area. */ 4011 switch (sk->sk_type) { 4012 case SOCK_SEQPACKET: 4013 sp->type = SCTP_SOCKET_UDP; 4014 break; 4015 case SOCK_STREAM: 4016 sp->type = SCTP_SOCKET_TCP; 4017 break; 4018 default: 4019 return -ESOCKTNOSUPPORT; 4020 } 4021 4022 /* Initialize default send parameters. These parameters can be 4023 * modified with the SCTP_DEFAULT_SEND_PARAM socket option. 4024 */ 4025 sp->default_stream = 0; 4026 sp->default_ppid = 0; 4027 sp->default_flags = 0; 4028 sp->default_context = 0; 4029 sp->default_timetolive = 0; 4030 4031 sp->default_rcv_context = 0; 4032 sp->max_burst = net->sctp.max_burst; 4033 4034 sp->sctp_hmac_alg = net->sctp.sctp_hmac_alg; 4035 4036 /* Initialize default setup parameters. These parameters 4037 * can be modified with the SCTP_INITMSG socket option or 4038 * overridden by the SCTP_INIT CMSG. 4039 */ 4040 sp->initmsg.sinit_num_ostreams = sctp_max_outstreams; 4041 sp->initmsg.sinit_max_instreams = sctp_max_instreams; 4042 sp->initmsg.sinit_max_attempts = net->sctp.max_retrans_init; 4043 sp->initmsg.sinit_max_init_timeo = net->sctp.rto_max; 4044 4045 /* Initialize default RTO related parameters. These parameters can 4046 * be modified for with the SCTP_RTOINFO socket option. 4047 */ 4048 sp->rtoinfo.srto_initial = net->sctp.rto_initial; 4049 sp->rtoinfo.srto_max = net->sctp.rto_max; 4050 sp->rtoinfo.srto_min = net->sctp.rto_min; 4051 4052 /* Initialize default association related parameters. These parameters 4053 * can be modified with the SCTP_ASSOCINFO socket option. 4054 */ 4055 sp->assocparams.sasoc_asocmaxrxt = net->sctp.max_retrans_association; 4056 sp->assocparams.sasoc_number_peer_destinations = 0; 4057 sp->assocparams.sasoc_peer_rwnd = 0; 4058 sp->assocparams.sasoc_local_rwnd = 0; 4059 sp->assocparams.sasoc_cookie_life = net->sctp.valid_cookie_life; 4060 4061 /* Initialize default event subscriptions. By default, all the 4062 * options are off. 4063 */ 4064 memset(&sp->subscribe, 0, sizeof(struct sctp_event_subscribe)); 4065 4066 /* Default Peer Address Parameters. These defaults can 4067 * be modified via SCTP_PEER_ADDR_PARAMS 4068 */ 4069 sp->hbinterval = net->sctp.hb_interval; 4070 sp->pathmaxrxt = net->sctp.max_retrans_path; 4071 sp->pathmtu = 0; /* allow default discovery */ 4072 sp->sackdelay = net->sctp.sack_timeout; 4073 sp->sackfreq = 2; 4074 sp->param_flags = SPP_HB_ENABLE | 4075 SPP_PMTUD_ENABLE | 4076 SPP_SACKDELAY_ENABLE; 4077 4078 /* If enabled no SCTP message fragmentation will be performed. 4079 * Configure through SCTP_DISABLE_FRAGMENTS socket option. 4080 */ 4081 sp->disable_fragments = 0; 4082 4083 /* Enable Nagle algorithm by default. */ 4084 sp->nodelay = 0; 4085 4086 sp->recvrcvinfo = 0; 4087 sp->recvnxtinfo = 0; 4088 4089 /* Enable by default. */ 4090 sp->v4mapped = 1; 4091 4092 /* Auto-close idle associations after the configured 4093 * number of seconds. A value of 0 disables this 4094 * feature. Configure through the SCTP_AUTOCLOSE socket option, 4095 * for UDP-style sockets only. 4096 */ 4097 sp->autoclose = 0; 4098 4099 /* User specified fragmentation limit. */ 4100 sp->user_frag = 0; 4101 4102 sp->adaptation_ind = 0; 4103 4104 sp->pf = sctp_get_pf_specific(sk->sk_family); 4105 4106 /* Control variables for partial data delivery. */ 4107 atomic_set(&sp->pd_mode, 0); 4108 skb_queue_head_init(&sp->pd_lobby); 4109 sp->frag_interleave = 0; 4110 4111 /* Create a per socket endpoint structure. Even if we 4112 * change the data structure relationships, this may still 4113 * be useful for storing pre-connect address information. 4114 */ 4115 sp->ep = sctp_endpoint_new(sk, GFP_KERNEL); 4116 if (!sp->ep) 4117 return -ENOMEM; 4118 4119 sp->hmac = NULL; 4120 4121 sk->sk_destruct = sctp_destruct_sock; 4122 4123 SCTP_DBG_OBJCNT_INC(sock); 4124 4125 local_bh_disable(); 4126 percpu_counter_inc(&sctp_sockets_allocated); 4127 sock_prot_inuse_add(net, sk->sk_prot, 1); 4128 4129 /* Nothing can fail after this block, otherwise 4130 * sctp_destroy_sock() will be called without addr_wq_lock held 4131 */ 4132 if (net->sctp.default_auto_asconf) { 4133 spin_lock(&sock_net(sk)->sctp.addr_wq_lock); 4134 list_add_tail(&sp->auto_asconf_list, 4135 &net->sctp.auto_asconf_splist); 4136 sp->do_auto_asconf = 1; 4137 spin_unlock(&sock_net(sk)->sctp.addr_wq_lock); 4138 } else { 4139 sp->do_auto_asconf = 0; 4140 } 4141 4142 local_bh_enable(); 4143 4144 return 0; 4145 } 4146 4147 /* Cleanup any SCTP per socket resources. Must be called with 4148 * sock_net(sk)->sctp.addr_wq_lock held if sp->do_auto_asconf is true 4149 */ 4150 static void sctp_destroy_sock(struct sock *sk) 4151 { 4152 struct sctp_sock *sp; 4153 4154 pr_debug("%s: sk:%p\n", __func__, sk); 4155 4156 /* Release our hold on the endpoint. */ 4157 sp = sctp_sk(sk); 4158 /* This could happen during socket init, thus we bail out 4159 * early, since the rest of the below is not setup either. 4160 */ 4161 if (sp->ep == NULL) 4162 return; 4163 4164 if (sp->do_auto_asconf) { 4165 sp->do_auto_asconf = 0; 4166 list_del(&sp->auto_asconf_list); 4167 } 4168 sctp_endpoint_free(sp->ep); 4169 local_bh_disable(); 4170 percpu_counter_dec(&sctp_sockets_allocated); 4171 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); 4172 local_bh_enable(); 4173 } 4174 4175 /* Triggered when there are no references on the socket anymore */ 4176 static void sctp_destruct_sock(struct sock *sk) 4177 { 4178 struct sctp_sock *sp = sctp_sk(sk); 4179 4180 /* Free up the HMAC transform. */ 4181 crypto_free_hash(sp->hmac); 4182 4183 inet_sock_destruct(sk); 4184 } 4185 4186 /* API 4.1.7 shutdown() - TCP Style Syntax 4187 * int shutdown(int socket, int how); 4188 * 4189 * sd - the socket descriptor of the association to be closed. 4190 * how - Specifies the type of shutdown. The values are 4191 * as follows: 4192 * SHUT_RD 4193 * Disables further receive operations. No SCTP 4194 * protocol action is taken. 4195 * SHUT_WR 4196 * Disables further send operations, and initiates 4197 * the SCTP shutdown sequence. 4198 * SHUT_RDWR 4199 * Disables further send and receive operations 4200 * and initiates the SCTP shutdown sequence. 4201 */ 4202 static void sctp_shutdown(struct sock *sk, int how) 4203 { 4204 struct net *net = sock_net(sk); 4205 struct sctp_endpoint *ep; 4206 struct sctp_association *asoc; 4207 4208 if (!sctp_style(sk, TCP)) 4209 return; 4210 4211 if (how & SEND_SHUTDOWN) { 4212 ep = sctp_sk(sk)->ep; 4213 if (!list_empty(&ep->asocs)) { 4214 asoc = list_entry(ep->asocs.next, 4215 struct sctp_association, asocs); 4216 sctp_primitive_SHUTDOWN(net, asoc, NULL); 4217 } 4218 } 4219 } 4220 4221 /* 7.2.1 Association Status (SCTP_STATUS) 4222 4223 * Applications can retrieve current status information about an 4224 * association, including association state, peer receiver window size, 4225 * number of unacked data chunks, and number of data chunks pending 4226 * receipt. This information is read-only. 4227 */ 4228 static int sctp_getsockopt_sctp_status(struct sock *sk, int len, 4229 char __user *optval, 4230 int __user *optlen) 4231 { 4232 struct sctp_status status; 4233 struct sctp_association *asoc = NULL; 4234 struct sctp_transport *transport; 4235 sctp_assoc_t associd; 4236 int retval = 0; 4237 4238 if (len < sizeof(status)) { 4239 retval = -EINVAL; 4240 goto out; 4241 } 4242 4243 len = sizeof(status); 4244 if (copy_from_user(&status, optval, len)) { 4245 retval = -EFAULT; 4246 goto out; 4247 } 4248 4249 associd = status.sstat_assoc_id; 4250 asoc = sctp_id2assoc(sk, associd); 4251 if (!asoc) { 4252 retval = -EINVAL; 4253 goto out; 4254 } 4255 4256 transport = asoc->peer.primary_path; 4257 4258 status.sstat_assoc_id = sctp_assoc2id(asoc); 4259 status.sstat_state = sctp_assoc_to_state(asoc); 4260 status.sstat_rwnd = asoc->peer.rwnd; 4261 status.sstat_unackdata = asoc->unack_data; 4262 4263 status.sstat_penddata = sctp_tsnmap_pending(&asoc->peer.tsn_map); 4264 status.sstat_instrms = asoc->c.sinit_max_instreams; 4265 status.sstat_outstrms = asoc->c.sinit_num_ostreams; 4266 status.sstat_fragmentation_point = asoc->frag_point; 4267 status.sstat_primary.spinfo_assoc_id = sctp_assoc2id(transport->asoc); 4268 memcpy(&status.sstat_primary.spinfo_address, &transport->ipaddr, 4269 transport->af_specific->sockaddr_len); 4270 /* Map ipv4 address into v4-mapped-on-v6 address. */ 4271 sctp_get_pf_specific(sk->sk_family)->addr_to_user(sctp_sk(sk), 4272 (union sctp_addr *)&status.sstat_primary.spinfo_address); 4273 status.sstat_primary.spinfo_state = transport->state; 4274 status.sstat_primary.spinfo_cwnd = transport->cwnd; 4275 status.sstat_primary.spinfo_srtt = transport->srtt; 4276 status.sstat_primary.spinfo_rto = jiffies_to_msecs(transport->rto); 4277 status.sstat_primary.spinfo_mtu = transport->pathmtu; 4278 4279 if (status.sstat_primary.spinfo_state == SCTP_UNKNOWN) 4280 status.sstat_primary.spinfo_state = SCTP_ACTIVE; 4281 4282 if (put_user(len, optlen)) { 4283 retval = -EFAULT; 4284 goto out; 4285 } 4286 4287 pr_debug("%s: len:%d, state:%d, rwnd:%d, assoc_id:%d\n", 4288 __func__, len, status.sstat_state, status.sstat_rwnd, 4289 status.sstat_assoc_id); 4290 4291 if (copy_to_user(optval, &status, len)) { 4292 retval = -EFAULT; 4293 goto out; 4294 } 4295 4296 out: 4297 return retval; 4298 } 4299 4300 4301 /* 7.2.2 Peer Address Information (SCTP_GET_PEER_ADDR_INFO) 4302 * 4303 * Applications can retrieve information about a specific peer address 4304 * of an association, including its reachability state, congestion 4305 * window, and retransmission timer values. This information is 4306 * read-only. 4307 */ 4308 static int sctp_getsockopt_peer_addr_info(struct sock *sk, int len, 4309 char __user *optval, 4310 int __user *optlen) 4311 { 4312 struct sctp_paddrinfo pinfo; 4313 struct sctp_transport *transport; 4314 int retval = 0; 4315 4316 if (len < sizeof(pinfo)) { 4317 retval = -EINVAL; 4318 goto out; 4319 } 4320 4321 len = sizeof(pinfo); 4322 if (copy_from_user(&pinfo, optval, len)) { 4323 retval = -EFAULT; 4324 goto out; 4325 } 4326 4327 transport = sctp_addr_id2transport(sk, &pinfo.spinfo_address, 4328 pinfo.spinfo_assoc_id); 4329 if (!transport) 4330 return -EINVAL; 4331 4332 pinfo.spinfo_assoc_id = sctp_assoc2id(transport->asoc); 4333 pinfo.spinfo_state = transport->state; 4334 pinfo.spinfo_cwnd = transport->cwnd; 4335 pinfo.spinfo_srtt = transport->srtt; 4336 pinfo.spinfo_rto = jiffies_to_msecs(transport->rto); 4337 pinfo.spinfo_mtu = transport->pathmtu; 4338 4339 if (pinfo.spinfo_state == SCTP_UNKNOWN) 4340 pinfo.spinfo_state = SCTP_ACTIVE; 4341 4342 if (put_user(len, optlen)) { 4343 retval = -EFAULT; 4344 goto out; 4345 } 4346 4347 if (copy_to_user(optval, &pinfo, len)) { 4348 retval = -EFAULT; 4349 goto out; 4350 } 4351 4352 out: 4353 return retval; 4354 } 4355 4356 /* 7.1.12 Enable/Disable message fragmentation (SCTP_DISABLE_FRAGMENTS) 4357 * 4358 * This option is a on/off flag. If enabled no SCTP message 4359 * fragmentation will be performed. Instead if a message being sent 4360 * exceeds the current PMTU size, the message will NOT be sent and 4361 * instead a error will be indicated to the user. 4362 */ 4363 static int sctp_getsockopt_disable_fragments(struct sock *sk, int len, 4364 char __user *optval, int __user *optlen) 4365 { 4366 int val; 4367 4368 if (len < sizeof(int)) 4369 return -EINVAL; 4370 4371 len = sizeof(int); 4372 val = (sctp_sk(sk)->disable_fragments == 1); 4373 if (put_user(len, optlen)) 4374 return -EFAULT; 4375 if (copy_to_user(optval, &val, len)) 4376 return -EFAULT; 4377 return 0; 4378 } 4379 4380 /* 7.1.15 Set notification and ancillary events (SCTP_EVENTS) 4381 * 4382 * This socket option is used to specify various notifications and 4383 * ancillary data the user wishes to receive. 4384 */ 4385 static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval, 4386 int __user *optlen) 4387 { 4388 if (len <= 0) 4389 return -EINVAL; 4390 if (len > sizeof(struct sctp_event_subscribe)) 4391 len = sizeof(struct sctp_event_subscribe); 4392 if (put_user(len, optlen)) 4393 return -EFAULT; 4394 if (copy_to_user(optval, &sctp_sk(sk)->subscribe, len)) 4395 return -EFAULT; 4396 return 0; 4397 } 4398 4399 /* 7.1.8 Automatic Close of associations (SCTP_AUTOCLOSE) 4400 * 4401 * This socket option is applicable to the UDP-style socket only. When 4402 * set it will cause associations that are idle for more than the 4403 * specified number of seconds to automatically close. An association 4404 * being idle is defined an association that has NOT sent or received 4405 * user data. The special value of '0' indicates that no automatic 4406 * close of any associations should be performed. The option expects an 4407 * integer defining the number of seconds of idle time before an 4408 * association is closed. 4409 */ 4410 static int sctp_getsockopt_autoclose(struct sock *sk, int len, char __user *optval, int __user *optlen) 4411 { 4412 /* Applicable to UDP-style socket only */ 4413 if (sctp_style(sk, TCP)) 4414 return -EOPNOTSUPP; 4415 if (len < sizeof(int)) 4416 return -EINVAL; 4417 len = sizeof(int); 4418 if (put_user(len, optlen)) 4419 return -EFAULT; 4420 if (copy_to_user(optval, &sctp_sk(sk)->autoclose, sizeof(int))) 4421 return -EFAULT; 4422 return 0; 4423 } 4424 4425 /* Helper routine to branch off an association to a new socket. */ 4426 int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp) 4427 { 4428 struct sctp_association *asoc = sctp_id2assoc(sk, id); 4429 struct sctp_sock *sp = sctp_sk(sk); 4430 struct socket *sock; 4431 int err = 0; 4432 4433 if (!asoc) 4434 return -EINVAL; 4435 4436 /* An association cannot be branched off from an already peeled-off 4437 * socket, nor is this supported for tcp style sockets. 4438 */ 4439 if (!sctp_style(sk, UDP)) 4440 return -EINVAL; 4441 4442 /* Create a new socket. */ 4443 err = sock_create(sk->sk_family, SOCK_SEQPACKET, IPPROTO_SCTP, &sock); 4444 if (err < 0) 4445 return err; 4446 4447 sctp_copy_sock(sock->sk, sk, asoc); 4448 4449 /* Make peeled-off sockets more like 1-1 accepted sockets. 4450 * Set the daddr and initialize id to something more random 4451 */ 4452 sp->pf->to_sk_daddr(&asoc->peer.primary_addr, sk); 4453 4454 /* Populate the fields of the newsk from the oldsk and migrate the 4455 * asoc to the newsk. 4456 */ 4457 sctp_sock_migrate(sk, sock->sk, asoc, SCTP_SOCKET_UDP_HIGH_BANDWIDTH); 4458 4459 *sockp = sock; 4460 4461 return err; 4462 } 4463 EXPORT_SYMBOL(sctp_do_peeloff); 4464 4465 static int sctp_getsockopt_peeloff(struct sock *sk, int len, char __user *optval, int __user *optlen) 4466 { 4467 sctp_peeloff_arg_t peeloff; 4468 struct socket *newsock; 4469 struct file *newfile; 4470 int retval = 0; 4471 4472 if (len < sizeof(sctp_peeloff_arg_t)) 4473 return -EINVAL; 4474 len = sizeof(sctp_peeloff_arg_t); 4475 if (copy_from_user(&peeloff, optval, len)) 4476 return -EFAULT; 4477 4478 retval = sctp_do_peeloff(sk, peeloff.associd, &newsock); 4479 if (retval < 0) 4480 goto out; 4481 4482 /* Map the socket to an unused fd that can be returned to the user. */ 4483 retval = get_unused_fd_flags(0); 4484 if (retval < 0) { 4485 sock_release(newsock); 4486 goto out; 4487 } 4488 4489 newfile = sock_alloc_file(newsock, 0, NULL); 4490 if (unlikely(IS_ERR(newfile))) { 4491 put_unused_fd(retval); 4492 sock_release(newsock); 4493 return PTR_ERR(newfile); 4494 } 4495 4496 pr_debug("%s: sk:%p, newsk:%p, sd:%d\n", __func__, sk, newsock->sk, 4497 retval); 4498 4499 /* Return the fd mapped to the new socket. */ 4500 if (put_user(len, optlen)) { 4501 fput(newfile); 4502 put_unused_fd(retval); 4503 return -EFAULT; 4504 } 4505 peeloff.sd = retval; 4506 if (copy_to_user(optval, &peeloff, len)) { 4507 fput(newfile); 4508 put_unused_fd(retval); 4509 return -EFAULT; 4510 } 4511 fd_install(retval, newfile); 4512 out: 4513 return retval; 4514 } 4515 4516 /* 7.1.13 Peer Address Parameters (SCTP_PEER_ADDR_PARAMS) 4517 * 4518 * Applications can enable or disable heartbeats for any peer address of 4519 * an association, modify an address's heartbeat interval, force a 4520 * heartbeat to be sent immediately, and adjust the address's maximum 4521 * number of retransmissions sent before an address is considered 4522 * unreachable. The following structure is used to access and modify an 4523 * address's parameters: 4524 * 4525 * struct sctp_paddrparams { 4526 * sctp_assoc_t spp_assoc_id; 4527 * struct sockaddr_storage spp_address; 4528 * uint32_t spp_hbinterval; 4529 * uint16_t spp_pathmaxrxt; 4530 * uint32_t spp_pathmtu; 4531 * uint32_t spp_sackdelay; 4532 * uint32_t spp_flags; 4533 * }; 4534 * 4535 * spp_assoc_id - (one-to-many style socket) This is filled in the 4536 * application, and identifies the association for 4537 * this query. 4538 * spp_address - This specifies which address is of interest. 4539 * spp_hbinterval - This contains the value of the heartbeat interval, 4540 * in milliseconds. If a value of zero 4541 * is present in this field then no changes are to 4542 * be made to this parameter. 4543 * spp_pathmaxrxt - This contains the maximum number of 4544 * retransmissions before this address shall be 4545 * considered unreachable. If a value of zero 4546 * is present in this field then no changes are to 4547 * be made to this parameter. 4548 * spp_pathmtu - When Path MTU discovery is disabled the value 4549 * specified here will be the "fixed" path mtu. 4550 * Note that if the spp_address field is empty 4551 * then all associations on this address will 4552 * have this fixed path mtu set upon them. 4553 * 4554 * spp_sackdelay - When delayed sack is enabled, this value specifies 4555 * the number of milliseconds that sacks will be delayed 4556 * for. This value will apply to all addresses of an 4557 * association if the spp_address field is empty. Note 4558 * also, that if delayed sack is enabled and this 4559 * value is set to 0, no change is made to the last 4560 * recorded delayed sack timer value. 4561 * 4562 * spp_flags - These flags are used to control various features 4563 * on an association. The flag field may contain 4564 * zero or more of the following options. 4565 * 4566 * SPP_HB_ENABLE - Enable heartbeats on the 4567 * specified address. Note that if the address 4568 * field is empty all addresses for the association 4569 * have heartbeats enabled upon them. 4570 * 4571 * SPP_HB_DISABLE - Disable heartbeats on the 4572 * speicifed address. Note that if the address 4573 * field is empty all addresses for the association 4574 * will have their heartbeats disabled. Note also 4575 * that SPP_HB_ENABLE and SPP_HB_DISABLE are 4576 * mutually exclusive, only one of these two should 4577 * be specified. Enabling both fields will have 4578 * undetermined results. 4579 * 4580 * SPP_HB_DEMAND - Request a user initiated heartbeat 4581 * to be made immediately. 4582 * 4583 * SPP_PMTUD_ENABLE - This field will enable PMTU 4584 * discovery upon the specified address. Note that 4585 * if the address feild is empty then all addresses 4586 * on the association are effected. 4587 * 4588 * SPP_PMTUD_DISABLE - This field will disable PMTU 4589 * discovery upon the specified address. Note that 4590 * if the address feild is empty then all addresses 4591 * on the association are effected. Not also that 4592 * SPP_PMTUD_ENABLE and SPP_PMTUD_DISABLE are mutually 4593 * exclusive. Enabling both will have undetermined 4594 * results. 4595 * 4596 * SPP_SACKDELAY_ENABLE - Setting this flag turns 4597 * on delayed sack. The time specified in spp_sackdelay 4598 * is used to specify the sack delay for this address. Note 4599 * that if spp_address is empty then all addresses will 4600 * enable delayed sack and take on the sack delay 4601 * value specified in spp_sackdelay. 4602 * SPP_SACKDELAY_DISABLE - Setting this flag turns 4603 * off delayed sack. If the spp_address field is blank then 4604 * delayed sack is disabled for the entire association. Note 4605 * also that this field is mutually exclusive to 4606 * SPP_SACKDELAY_ENABLE, setting both will have undefined 4607 * results. 4608 */ 4609 static int sctp_getsockopt_peer_addr_params(struct sock *sk, int len, 4610 char __user *optval, int __user *optlen) 4611 { 4612 struct sctp_paddrparams params; 4613 struct sctp_transport *trans = NULL; 4614 struct sctp_association *asoc = NULL; 4615 struct sctp_sock *sp = sctp_sk(sk); 4616 4617 if (len < sizeof(struct sctp_paddrparams)) 4618 return -EINVAL; 4619 len = sizeof(struct sctp_paddrparams); 4620 if (copy_from_user(¶ms, optval, len)) 4621 return -EFAULT; 4622 4623 /* If an address other than INADDR_ANY is specified, and 4624 * no transport is found, then the request is invalid. 4625 */ 4626 if (!sctp_is_any(sk, (union sctp_addr *)¶ms.spp_address)) { 4627 trans = sctp_addr_id2transport(sk, ¶ms.spp_address, 4628 params.spp_assoc_id); 4629 if (!trans) { 4630 pr_debug("%s: failed no transport\n", __func__); 4631 return -EINVAL; 4632 } 4633 } 4634 4635 /* Get association, if assoc_id != 0 and the socket is a one 4636 * to many style socket, and an association was not found, then 4637 * the id was invalid. 4638 */ 4639 asoc = sctp_id2assoc(sk, params.spp_assoc_id); 4640 if (!asoc && params.spp_assoc_id && sctp_style(sk, UDP)) { 4641 pr_debug("%s: failed no association\n", __func__); 4642 return -EINVAL; 4643 } 4644 4645 if (trans) { 4646 /* Fetch transport values. */ 4647 params.spp_hbinterval = jiffies_to_msecs(trans->hbinterval); 4648 params.spp_pathmtu = trans->pathmtu; 4649 params.spp_pathmaxrxt = trans->pathmaxrxt; 4650 params.spp_sackdelay = jiffies_to_msecs(trans->sackdelay); 4651 4652 /*draft-11 doesn't say what to return in spp_flags*/ 4653 params.spp_flags = trans->param_flags; 4654 } else if (asoc) { 4655 /* Fetch association values. */ 4656 params.spp_hbinterval = jiffies_to_msecs(asoc->hbinterval); 4657 params.spp_pathmtu = asoc->pathmtu; 4658 params.spp_pathmaxrxt = asoc->pathmaxrxt; 4659 params.spp_sackdelay = jiffies_to_msecs(asoc->sackdelay); 4660 4661 /*draft-11 doesn't say what to return in spp_flags*/ 4662 params.spp_flags = asoc->param_flags; 4663 } else { 4664 /* Fetch socket values. */ 4665 params.spp_hbinterval = sp->hbinterval; 4666 params.spp_pathmtu = sp->pathmtu; 4667 params.spp_sackdelay = sp->sackdelay; 4668 params.spp_pathmaxrxt = sp->pathmaxrxt; 4669 4670 /*draft-11 doesn't say what to return in spp_flags*/ 4671 params.spp_flags = sp->param_flags; 4672 } 4673 4674 if (copy_to_user(optval, ¶ms, len)) 4675 return -EFAULT; 4676 4677 if (put_user(len, optlen)) 4678 return -EFAULT; 4679 4680 return 0; 4681 } 4682 4683 /* 4684 * 7.1.23. Get or set delayed ack timer (SCTP_DELAYED_SACK) 4685 * 4686 * This option will effect the way delayed acks are performed. This 4687 * option allows you to get or set the delayed ack time, in 4688 * milliseconds. It also allows changing the delayed ack frequency. 4689 * Changing the frequency to 1 disables the delayed sack algorithm. If 4690 * the assoc_id is 0, then this sets or gets the endpoints default 4691 * values. If the assoc_id field is non-zero, then the set or get 4692 * effects the specified association for the one to many model (the 4693 * assoc_id field is ignored by the one to one model). Note that if 4694 * sack_delay or sack_freq are 0 when setting this option, then the 4695 * current values will remain unchanged. 4696 * 4697 * struct sctp_sack_info { 4698 * sctp_assoc_t sack_assoc_id; 4699 * uint32_t sack_delay; 4700 * uint32_t sack_freq; 4701 * }; 4702 * 4703 * sack_assoc_id - This parameter, indicates which association the user 4704 * is performing an action upon. Note that if this field's value is 4705 * zero then the endpoints default value is changed (effecting future 4706 * associations only). 4707 * 4708 * sack_delay - This parameter contains the number of milliseconds that 4709 * the user is requesting the delayed ACK timer be set to. Note that 4710 * this value is defined in the standard to be between 200 and 500 4711 * milliseconds. 4712 * 4713 * sack_freq - This parameter contains the number of packets that must 4714 * be received before a sack is sent without waiting for the delay 4715 * timer to expire. The default value for this is 2, setting this 4716 * value to 1 will disable the delayed sack algorithm. 4717 */ 4718 static int sctp_getsockopt_delayed_ack(struct sock *sk, int len, 4719 char __user *optval, 4720 int __user *optlen) 4721 { 4722 struct sctp_sack_info params; 4723 struct sctp_association *asoc = NULL; 4724 struct sctp_sock *sp = sctp_sk(sk); 4725 4726 if (len >= sizeof(struct sctp_sack_info)) { 4727 len = sizeof(struct sctp_sack_info); 4728 4729 if (copy_from_user(¶ms, optval, len)) 4730 return -EFAULT; 4731 } else if (len == sizeof(struct sctp_assoc_value)) { 4732 pr_warn_ratelimited(DEPRECATED 4733 "%s (pid %d) " 4734 "Use of struct sctp_assoc_value in delayed_ack socket option.\n" 4735 "Use struct sctp_sack_info instead\n", 4736 current->comm, task_pid_nr(current)); 4737 if (copy_from_user(¶ms, optval, len)) 4738 return -EFAULT; 4739 } else 4740 return -EINVAL; 4741 4742 /* Get association, if sack_assoc_id != 0 and the socket is a one 4743 * to many style socket, and an association was not found, then 4744 * the id was invalid. 4745 */ 4746 asoc = sctp_id2assoc(sk, params.sack_assoc_id); 4747 if (!asoc && params.sack_assoc_id && sctp_style(sk, UDP)) 4748 return -EINVAL; 4749 4750 if (asoc) { 4751 /* Fetch association values. */ 4752 if (asoc->param_flags & SPP_SACKDELAY_ENABLE) { 4753 params.sack_delay = jiffies_to_msecs( 4754 asoc->sackdelay); 4755 params.sack_freq = asoc->sackfreq; 4756 4757 } else { 4758 params.sack_delay = 0; 4759 params.sack_freq = 1; 4760 } 4761 } else { 4762 /* Fetch socket values. */ 4763 if (sp->param_flags & SPP_SACKDELAY_ENABLE) { 4764 params.sack_delay = sp->sackdelay; 4765 params.sack_freq = sp->sackfreq; 4766 } else { 4767 params.sack_delay = 0; 4768 params.sack_freq = 1; 4769 } 4770 } 4771 4772 if (copy_to_user(optval, ¶ms, len)) 4773 return -EFAULT; 4774 4775 if (put_user(len, optlen)) 4776 return -EFAULT; 4777 4778 return 0; 4779 } 4780 4781 /* 7.1.3 Initialization Parameters (SCTP_INITMSG) 4782 * 4783 * Applications can specify protocol parameters for the default association 4784 * initialization. The option name argument to setsockopt() and getsockopt() 4785 * is SCTP_INITMSG. 4786 * 4787 * Setting initialization parameters is effective only on an unconnected 4788 * socket (for UDP-style sockets only future associations are effected 4789 * by the change). With TCP-style sockets, this option is inherited by 4790 * sockets derived from a listener socket. 4791 */ 4792 static int sctp_getsockopt_initmsg(struct sock *sk, int len, char __user *optval, int __user *optlen) 4793 { 4794 if (len < sizeof(struct sctp_initmsg)) 4795 return -EINVAL; 4796 len = sizeof(struct sctp_initmsg); 4797 if (put_user(len, optlen)) 4798 return -EFAULT; 4799 if (copy_to_user(optval, &sctp_sk(sk)->initmsg, len)) 4800 return -EFAULT; 4801 return 0; 4802 } 4803 4804 4805 static int sctp_getsockopt_peer_addrs(struct sock *sk, int len, 4806 char __user *optval, int __user *optlen) 4807 { 4808 struct sctp_association *asoc; 4809 int cnt = 0; 4810 struct sctp_getaddrs getaddrs; 4811 struct sctp_transport *from; 4812 void __user *to; 4813 union sctp_addr temp; 4814 struct sctp_sock *sp = sctp_sk(sk); 4815 int addrlen; 4816 size_t space_left; 4817 int bytes_copied; 4818 4819 if (len < sizeof(struct sctp_getaddrs)) 4820 return -EINVAL; 4821 4822 if (copy_from_user(&getaddrs, optval, sizeof(struct sctp_getaddrs))) 4823 return -EFAULT; 4824 4825 /* For UDP-style sockets, id specifies the association to query. */ 4826 asoc = sctp_id2assoc(sk, getaddrs.assoc_id); 4827 if (!asoc) 4828 return -EINVAL; 4829 4830 to = optval + offsetof(struct sctp_getaddrs, addrs); 4831 space_left = len - offsetof(struct sctp_getaddrs, addrs); 4832 4833 list_for_each_entry(from, &asoc->peer.transport_addr_list, 4834 transports) { 4835 memcpy(&temp, &from->ipaddr, sizeof(temp)); 4836 addrlen = sctp_get_pf_specific(sk->sk_family) 4837 ->addr_to_user(sp, &temp); 4838 if (space_left < addrlen) 4839 return -ENOMEM; 4840 if (copy_to_user(to, &temp, addrlen)) 4841 return -EFAULT; 4842 to += addrlen; 4843 cnt++; 4844 space_left -= addrlen; 4845 } 4846 4847 if (put_user(cnt, &((struct sctp_getaddrs __user *)optval)->addr_num)) 4848 return -EFAULT; 4849 bytes_copied = ((char __user *)to) - optval; 4850 if (put_user(bytes_copied, optlen)) 4851 return -EFAULT; 4852 4853 return 0; 4854 } 4855 4856 static int sctp_copy_laddrs(struct sock *sk, __u16 port, void *to, 4857 size_t space_left, int *bytes_copied) 4858 { 4859 struct sctp_sockaddr_entry *addr; 4860 union sctp_addr temp; 4861 int cnt = 0; 4862 int addrlen; 4863 struct net *net = sock_net(sk); 4864 4865 rcu_read_lock(); 4866 list_for_each_entry_rcu(addr, &net->sctp.local_addr_list, list) { 4867 if (!addr->valid) 4868 continue; 4869 4870 if ((PF_INET == sk->sk_family) && 4871 (AF_INET6 == addr->a.sa.sa_family)) 4872 continue; 4873 if ((PF_INET6 == sk->sk_family) && 4874 inet_v6_ipv6only(sk) && 4875 (AF_INET == addr->a.sa.sa_family)) 4876 continue; 4877 memcpy(&temp, &addr->a, sizeof(temp)); 4878 if (!temp.v4.sin_port) 4879 temp.v4.sin_port = htons(port); 4880 4881 addrlen = sctp_get_pf_specific(sk->sk_family) 4882 ->addr_to_user(sctp_sk(sk), &temp); 4883 4884 if (space_left < addrlen) { 4885 cnt = -ENOMEM; 4886 break; 4887 } 4888 memcpy(to, &temp, addrlen); 4889 4890 to += addrlen; 4891 cnt++; 4892 space_left -= addrlen; 4893 *bytes_copied += addrlen; 4894 } 4895 rcu_read_unlock(); 4896 4897 return cnt; 4898 } 4899 4900 4901 static int sctp_getsockopt_local_addrs(struct sock *sk, int len, 4902 char __user *optval, int __user *optlen) 4903 { 4904 struct sctp_bind_addr *bp; 4905 struct sctp_association *asoc; 4906 int cnt = 0; 4907 struct sctp_getaddrs getaddrs; 4908 struct sctp_sockaddr_entry *addr; 4909 void __user *to; 4910 union sctp_addr temp; 4911 struct sctp_sock *sp = sctp_sk(sk); 4912 int addrlen; 4913 int err = 0; 4914 size_t space_left; 4915 int bytes_copied = 0; 4916 void *addrs; 4917 void *buf; 4918 4919 if (len < sizeof(struct sctp_getaddrs)) 4920 return -EINVAL; 4921 4922 if (copy_from_user(&getaddrs, optval, sizeof(struct sctp_getaddrs))) 4923 return -EFAULT; 4924 4925 /* 4926 * For UDP-style sockets, id specifies the association to query. 4927 * If the id field is set to the value '0' then the locally bound 4928 * addresses are returned without regard to any particular 4929 * association. 4930 */ 4931 if (0 == getaddrs.assoc_id) { 4932 bp = &sctp_sk(sk)->ep->base.bind_addr; 4933 } else { 4934 asoc = sctp_id2assoc(sk, getaddrs.assoc_id); 4935 if (!asoc) 4936 return -EINVAL; 4937 bp = &asoc->base.bind_addr; 4938 } 4939 4940 to = optval + offsetof(struct sctp_getaddrs, addrs); 4941 space_left = len - offsetof(struct sctp_getaddrs, addrs); 4942 4943 addrs = kmalloc(space_left, GFP_KERNEL); 4944 if (!addrs) 4945 return -ENOMEM; 4946 4947 /* If the endpoint is bound to 0.0.0.0 or ::0, get the valid 4948 * addresses from the global local address list. 4949 */ 4950 if (sctp_list_single_entry(&bp->address_list)) { 4951 addr = list_entry(bp->address_list.next, 4952 struct sctp_sockaddr_entry, list); 4953 if (sctp_is_any(sk, &addr->a)) { 4954 cnt = sctp_copy_laddrs(sk, bp->port, addrs, 4955 space_left, &bytes_copied); 4956 if (cnt < 0) { 4957 err = cnt; 4958 goto out; 4959 } 4960 goto copy_getaddrs; 4961 } 4962 } 4963 4964 buf = addrs; 4965 /* Protection on the bound address list is not needed since 4966 * in the socket option context we hold a socket lock and 4967 * thus the bound address list can't change. 4968 */ 4969 list_for_each_entry(addr, &bp->address_list, list) { 4970 memcpy(&temp, &addr->a, sizeof(temp)); 4971 addrlen = sctp_get_pf_specific(sk->sk_family) 4972 ->addr_to_user(sp, &temp); 4973 if (space_left < addrlen) { 4974 err = -ENOMEM; /*fixme: right error?*/ 4975 goto out; 4976 } 4977 memcpy(buf, &temp, addrlen); 4978 buf += addrlen; 4979 bytes_copied += addrlen; 4980 cnt++; 4981 space_left -= addrlen; 4982 } 4983 4984 copy_getaddrs: 4985 if (copy_to_user(to, addrs, bytes_copied)) { 4986 err = -EFAULT; 4987 goto out; 4988 } 4989 if (put_user(cnt, &((struct sctp_getaddrs __user *)optval)->addr_num)) { 4990 err = -EFAULT; 4991 goto out; 4992 } 4993 if (put_user(bytes_copied, optlen)) 4994 err = -EFAULT; 4995 out: 4996 kfree(addrs); 4997 return err; 4998 } 4999 5000 /* 7.1.10 Set Primary Address (SCTP_PRIMARY_ADDR) 5001 * 5002 * Requests that the local SCTP stack use the enclosed peer address as 5003 * the association primary. The enclosed address must be one of the 5004 * association peer's addresses. 5005 */ 5006 static int sctp_getsockopt_primary_addr(struct sock *sk, int len, 5007 char __user *optval, int __user *optlen) 5008 { 5009 struct sctp_prim prim; 5010 struct sctp_association *asoc; 5011 struct sctp_sock *sp = sctp_sk(sk); 5012 5013 if (len < sizeof(struct sctp_prim)) 5014 return -EINVAL; 5015 5016 len = sizeof(struct sctp_prim); 5017 5018 if (copy_from_user(&prim, optval, len)) 5019 return -EFAULT; 5020 5021 asoc = sctp_id2assoc(sk, prim.ssp_assoc_id); 5022 if (!asoc) 5023 return -EINVAL; 5024 5025 if (!asoc->peer.primary_path) 5026 return -ENOTCONN; 5027 5028 memcpy(&prim.ssp_addr, &asoc->peer.primary_path->ipaddr, 5029 asoc->peer.primary_path->af_specific->sockaddr_len); 5030 5031 sctp_get_pf_specific(sk->sk_family)->addr_to_user(sp, 5032 (union sctp_addr *)&prim.ssp_addr); 5033 5034 if (put_user(len, optlen)) 5035 return -EFAULT; 5036 if (copy_to_user(optval, &prim, len)) 5037 return -EFAULT; 5038 5039 return 0; 5040 } 5041 5042 /* 5043 * 7.1.11 Set Adaptation Layer Indicator (SCTP_ADAPTATION_LAYER) 5044 * 5045 * Requests that the local endpoint set the specified Adaptation Layer 5046 * Indication parameter for all future INIT and INIT-ACK exchanges. 5047 */ 5048 static int sctp_getsockopt_adaptation_layer(struct sock *sk, int len, 5049 char __user *optval, int __user *optlen) 5050 { 5051 struct sctp_setadaptation adaptation; 5052 5053 if (len < sizeof(struct sctp_setadaptation)) 5054 return -EINVAL; 5055 5056 len = sizeof(struct sctp_setadaptation); 5057 5058 adaptation.ssb_adaptation_ind = sctp_sk(sk)->adaptation_ind; 5059 5060 if (put_user(len, optlen)) 5061 return -EFAULT; 5062 if (copy_to_user(optval, &adaptation, len)) 5063 return -EFAULT; 5064 5065 return 0; 5066 } 5067 5068 /* 5069 * 5070 * 7.1.14 Set default send parameters (SCTP_DEFAULT_SEND_PARAM) 5071 * 5072 * Applications that wish to use the sendto() system call may wish to 5073 * specify a default set of parameters that would normally be supplied 5074 * through the inclusion of ancillary data. This socket option allows 5075 * such an application to set the default sctp_sndrcvinfo structure. 5076 5077 5078 * The application that wishes to use this socket option simply passes 5079 * in to this call the sctp_sndrcvinfo structure defined in Section 5080 * 5.2.2) The input parameters accepted by this call include 5081 * sinfo_stream, sinfo_flags, sinfo_ppid, sinfo_context, 5082 * sinfo_timetolive. The user must provide the sinfo_assoc_id field in 5083 * to this call if the caller is using the UDP model. 5084 * 5085 * For getsockopt, it get the default sctp_sndrcvinfo structure. 5086 */ 5087 static int sctp_getsockopt_default_send_param(struct sock *sk, 5088 int len, char __user *optval, 5089 int __user *optlen) 5090 { 5091 struct sctp_sock *sp = sctp_sk(sk); 5092 struct sctp_association *asoc; 5093 struct sctp_sndrcvinfo info; 5094 5095 if (len < sizeof(info)) 5096 return -EINVAL; 5097 5098 len = sizeof(info); 5099 5100 if (copy_from_user(&info, optval, len)) 5101 return -EFAULT; 5102 5103 asoc = sctp_id2assoc(sk, info.sinfo_assoc_id); 5104 if (!asoc && info.sinfo_assoc_id && sctp_style(sk, UDP)) 5105 return -EINVAL; 5106 if (asoc) { 5107 info.sinfo_stream = asoc->default_stream; 5108 info.sinfo_flags = asoc->default_flags; 5109 info.sinfo_ppid = asoc->default_ppid; 5110 info.sinfo_context = asoc->default_context; 5111 info.sinfo_timetolive = asoc->default_timetolive; 5112 } else { 5113 info.sinfo_stream = sp->default_stream; 5114 info.sinfo_flags = sp->default_flags; 5115 info.sinfo_ppid = sp->default_ppid; 5116 info.sinfo_context = sp->default_context; 5117 info.sinfo_timetolive = sp->default_timetolive; 5118 } 5119 5120 if (put_user(len, optlen)) 5121 return -EFAULT; 5122 if (copy_to_user(optval, &info, len)) 5123 return -EFAULT; 5124 5125 return 0; 5126 } 5127 5128 /* RFC6458, Section 8.1.31. Set/get Default Send Parameters 5129 * (SCTP_DEFAULT_SNDINFO) 5130 */ 5131 static int sctp_getsockopt_default_sndinfo(struct sock *sk, int len, 5132 char __user *optval, 5133 int __user *optlen) 5134 { 5135 struct sctp_sock *sp = sctp_sk(sk); 5136 struct sctp_association *asoc; 5137 struct sctp_sndinfo info; 5138 5139 if (len < sizeof(info)) 5140 return -EINVAL; 5141 5142 len = sizeof(info); 5143 5144 if (copy_from_user(&info, optval, len)) 5145 return -EFAULT; 5146 5147 asoc = sctp_id2assoc(sk, info.snd_assoc_id); 5148 if (!asoc && info.snd_assoc_id && sctp_style(sk, UDP)) 5149 return -EINVAL; 5150 if (asoc) { 5151 info.snd_sid = asoc->default_stream; 5152 info.snd_flags = asoc->default_flags; 5153 info.snd_ppid = asoc->default_ppid; 5154 info.snd_context = asoc->default_context; 5155 } else { 5156 info.snd_sid = sp->default_stream; 5157 info.snd_flags = sp->default_flags; 5158 info.snd_ppid = sp->default_ppid; 5159 info.snd_context = sp->default_context; 5160 } 5161 5162 if (put_user(len, optlen)) 5163 return -EFAULT; 5164 if (copy_to_user(optval, &info, len)) 5165 return -EFAULT; 5166 5167 return 0; 5168 } 5169 5170 /* 5171 * 5172 * 7.1.5 SCTP_NODELAY 5173 * 5174 * Turn on/off any Nagle-like algorithm. This means that packets are 5175 * generally sent as soon as possible and no unnecessary delays are 5176 * introduced, at the cost of more packets in the network. Expects an 5177 * integer boolean flag. 5178 */ 5179 5180 static int sctp_getsockopt_nodelay(struct sock *sk, int len, 5181 char __user *optval, int __user *optlen) 5182 { 5183 int val; 5184 5185 if (len < sizeof(int)) 5186 return -EINVAL; 5187 5188 len = sizeof(int); 5189 val = (sctp_sk(sk)->nodelay == 1); 5190 if (put_user(len, optlen)) 5191 return -EFAULT; 5192 if (copy_to_user(optval, &val, len)) 5193 return -EFAULT; 5194 return 0; 5195 } 5196 5197 /* 5198 * 5199 * 7.1.1 SCTP_RTOINFO 5200 * 5201 * The protocol parameters used to initialize and bound retransmission 5202 * timeout (RTO) are tunable. sctp_rtoinfo structure is used to access 5203 * and modify these parameters. 5204 * All parameters are time values, in milliseconds. A value of 0, when 5205 * modifying the parameters, indicates that the current value should not 5206 * be changed. 5207 * 5208 */ 5209 static int sctp_getsockopt_rtoinfo(struct sock *sk, int len, 5210 char __user *optval, 5211 int __user *optlen) { 5212 struct sctp_rtoinfo rtoinfo; 5213 struct sctp_association *asoc; 5214 5215 if (len < sizeof (struct sctp_rtoinfo)) 5216 return -EINVAL; 5217 5218 len = sizeof(struct sctp_rtoinfo); 5219 5220 if (copy_from_user(&rtoinfo, optval, len)) 5221 return -EFAULT; 5222 5223 asoc = sctp_id2assoc(sk, rtoinfo.srto_assoc_id); 5224 5225 if (!asoc && rtoinfo.srto_assoc_id && sctp_style(sk, UDP)) 5226 return -EINVAL; 5227 5228 /* Values corresponding to the specific association. */ 5229 if (asoc) { 5230 rtoinfo.srto_initial = jiffies_to_msecs(asoc->rto_initial); 5231 rtoinfo.srto_max = jiffies_to_msecs(asoc->rto_max); 5232 rtoinfo.srto_min = jiffies_to_msecs(asoc->rto_min); 5233 } else { 5234 /* Values corresponding to the endpoint. */ 5235 struct sctp_sock *sp = sctp_sk(sk); 5236 5237 rtoinfo.srto_initial = sp->rtoinfo.srto_initial; 5238 rtoinfo.srto_max = sp->rtoinfo.srto_max; 5239 rtoinfo.srto_min = sp->rtoinfo.srto_min; 5240 } 5241 5242 if (put_user(len, optlen)) 5243 return -EFAULT; 5244 5245 if (copy_to_user(optval, &rtoinfo, len)) 5246 return -EFAULT; 5247 5248 return 0; 5249 } 5250 5251 /* 5252 * 5253 * 7.1.2 SCTP_ASSOCINFO 5254 * 5255 * This option is used to tune the maximum retransmission attempts 5256 * of the association. 5257 * Returns an error if the new association retransmission value is 5258 * greater than the sum of the retransmission value of the peer. 5259 * See [SCTP] for more information. 5260 * 5261 */ 5262 static int sctp_getsockopt_associnfo(struct sock *sk, int len, 5263 char __user *optval, 5264 int __user *optlen) 5265 { 5266 5267 struct sctp_assocparams assocparams; 5268 struct sctp_association *asoc; 5269 struct list_head *pos; 5270 int cnt = 0; 5271 5272 if (len < sizeof (struct sctp_assocparams)) 5273 return -EINVAL; 5274 5275 len = sizeof(struct sctp_assocparams); 5276 5277 if (copy_from_user(&assocparams, optval, len)) 5278 return -EFAULT; 5279 5280 asoc = sctp_id2assoc(sk, assocparams.sasoc_assoc_id); 5281 5282 if (!asoc && assocparams.sasoc_assoc_id && sctp_style(sk, UDP)) 5283 return -EINVAL; 5284 5285 /* Values correspoinding to the specific association */ 5286 if (asoc) { 5287 assocparams.sasoc_asocmaxrxt = asoc->max_retrans; 5288 assocparams.sasoc_peer_rwnd = asoc->peer.rwnd; 5289 assocparams.sasoc_local_rwnd = asoc->a_rwnd; 5290 assocparams.sasoc_cookie_life = ktime_to_ms(asoc->cookie_life); 5291 5292 list_for_each(pos, &asoc->peer.transport_addr_list) { 5293 cnt++; 5294 } 5295 5296 assocparams.sasoc_number_peer_destinations = cnt; 5297 } else { 5298 /* Values corresponding to the endpoint */ 5299 struct sctp_sock *sp = sctp_sk(sk); 5300 5301 assocparams.sasoc_asocmaxrxt = sp->assocparams.sasoc_asocmaxrxt; 5302 assocparams.sasoc_peer_rwnd = sp->assocparams.sasoc_peer_rwnd; 5303 assocparams.sasoc_local_rwnd = sp->assocparams.sasoc_local_rwnd; 5304 assocparams.sasoc_cookie_life = 5305 sp->assocparams.sasoc_cookie_life; 5306 assocparams.sasoc_number_peer_destinations = 5307 sp->assocparams. 5308 sasoc_number_peer_destinations; 5309 } 5310 5311 if (put_user(len, optlen)) 5312 return -EFAULT; 5313 5314 if (copy_to_user(optval, &assocparams, len)) 5315 return -EFAULT; 5316 5317 return 0; 5318 } 5319 5320 /* 5321 * 7.1.16 Set/clear IPv4 mapped addresses (SCTP_I_WANT_MAPPED_V4_ADDR) 5322 * 5323 * This socket option is a boolean flag which turns on or off mapped V4 5324 * addresses. If this option is turned on and the socket is type 5325 * PF_INET6, then IPv4 addresses will be mapped to V6 representation. 5326 * If this option is turned off, then no mapping will be done of V4 5327 * addresses and a user will receive both PF_INET6 and PF_INET type 5328 * addresses on the socket. 5329 */ 5330 static int sctp_getsockopt_mappedv4(struct sock *sk, int len, 5331 char __user *optval, int __user *optlen) 5332 { 5333 int val; 5334 struct sctp_sock *sp = sctp_sk(sk); 5335 5336 if (len < sizeof(int)) 5337 return -EINVAL; 5338 5339 len = sizeof(int); 5340 val = sp->v4mapped; 5341 if (put_user(len, optlen)) 5342 return -EFAULT; 5343 if (copy_to_user(optval, &val, len)) 5344 return -EFAULT; 5345 5346 return 0; 5347 } 5348 5349 /* 5350 * 7.1.29. Set or Get the default context (SCTP_CONTEXT) 5351 * (chapter and verse is quoted at sctp_setsockopt_context()) 5352 */ 5353 static int sctp_getsockopt_context(struct sock *sk, int len, 5354 char __user *optval, int __user *optlen) 5355 { 5356 struct sctp_assoc_value params; 5357 struct sctp_sock *sp; 5358 struct sctp_association *asoc; 5359 5360 if (len < sizeof(struct sctp_assoc_value)) 5361 return -EINVAL; 5362 5363 len = sizeof(struct sctp_assoc_value); 5364 5365 if (copy_from_user(¶ms, optval, len)) 5366 return -EFAULT; 5367 5368 sp = sctp_sk(sk); 5369 5370 if (params.assoc_id != 0) { 5371 asoc = sctp_id2assoc(sk, params.assoc_id); 5372 if (!asoc) 5373 return -EINVAL; 5374 params.assoc_value = asoc->default_rcv_context; 5375 } else { 5376 params.assoc_value = sp->default_rcv_context; 5377 } 5378 5379 if (put_user(len, optlen)) 5380 return -EFAULT; 5381 if (copy_to_user(optval, ¶ms, len)) 5382 return -EFAULT; 5383 5384 return 0; 5385 } 5386 5387 /* 5388 * 8.1.16. Get or Set the Maximum Fragmentation Size (SCTP_MAXSEG) 5389 * This option will get or set the maximum size to put in any outgoing 5390 * SCTP DATA chunk. If a message is larger than this size it will be 5391 * fragmented by SCTP into the specified size. Note that the underlying 5392 * SCTP implementation may fragment into smaller sized chunks when the 5393 * PMTU of the underlying association is smaller than the value set by 5394 * the user. The default value for this option is '0' which indicates 5395 * the user is NOT limiting fragmentation and only the PMTU will effect 5396 * SCTP's choice of DATA chunk size. Note also that values set larger 5397 * than the maximum size of an IP datagram will effectively let SCTP 5398 * control fragmentation (i.e. the same as setting this option to 0). 5399 * 5400 * The following structure is used to access and modify this parameter: 5401 * 5402 * struct sctp_assoc_value { 5403 * sctp_assoc_t assoc_id; 5404 * uint32_t assoc_value; 5405 * }; 5406 * 5407 * assoc_id: This parameter is ignored for one-to-one style sockets. 5408 * For one-to-many style sockets this parameter indicates which 5409 * association the user is performing an action upon. Note that if 5410 * this field's value is zero then the endpoints default value is 5411 * changed (effecting future associations only). 5412 * assoc_value: This parameter specifies the maximum size in bytes. 5413 */ 5414 static int sctp_getsockopt_maxseg(struct sock *sk, int len, 5415 char __user *optval, int __user *optlen) 5416 { 5417 struct sctp_assoc_value params; 5418 struct sctp_association *asoc; 5419 5420 if (len == sizeof(int)) { 5421 pr_warn_ratelimited(DEPRECATED 5422 "%s (pid %d) " 5423 "Use of int in maxseg socket option.\n" 5424 "Use struct sctp_assoc_value instead\n", 5425 current->comm, task_pid_nr(current)); 5426 params.assoc_id = 0; 5427 } else if (len >= sizeof(struct sctp_assoc_value)) { 5428 len = sizeof(struct sctp_assoc_value); 5429 if (copy_from_user(¶ms, optval, sizeof(params))) 5430 return -EFAULT; 5431 } else 5432 return -EINVAL; 5433 5434 asoc = sctp_id2assoc(sk, params.assoc_id); 5435 if (!asoc && params.assoc_id && sctp_style(sk, UDP)) 5436 return -EINVAL; 5437 5438 if (asoc) 5439 params.assoc_value = asoc->frag_point; 5440 else 5441 params.assoc_value = sctp_sk(sk)->user_frag; 5442 5443 if (put_user(len, optlen)) 5444 return -EFAULT; 5445 if (len == sizeof(int)) { 5446 if (copy_to_user(optval, ¶ms.assoc_value, len)) 5447 return -EFAULT; 5448 } else { 5449 if (copy_to_user(optval, ¶ms, len)) 5450 return -EFAULT; 5451 } 5452 5453 return 0; 5454 } 5455 5456 /* 5457 * 7.1.24. Get or set fragmented interleave (SCTP_FRAGMENT_INTERLEAVE) 5458 * (chapter and verse is quoted at sctp_setsockopt_fragment_interleave()) 5459 */ 5460 static int sctp_getsockopt_fragment_interleave(struct sock *sk, int len, 5461 char __user *optval, int __user *optlen) 5462 { 5463 int val; 5464 5465 if (len < sizeof(int)) 5466 return -EINVAL; 5467 5468 len = sizeof(int); 5469 5470 val = sctp_sk(sk)->frag_interleave; 5471 if (put_user(len, optlen)) 5472 return -EFAULT; 5473 if (copy_to_user(optval, &val, len)) 5474 return -EFAULT; 5475 5476 return 0; 5477 } 5478 5479 /* 5480 * 7.1.25. Set or Get the sctp partial delivery point 5481 * (chapter and verse is quoted at sctp_setsockopt_partial_delivery_point()) 5482 */ 5483 static int sctp_getsockopt_partial_delivery_point(struct sock *sk, int len, 5484 char __user *optval, 5485 int __user *optlen) 5486 { 5487 u32 val; 5488 5489 if (len < sizeof(u32)) 5490 return -EINVAL; 5491 5492 len = sizeof(u32); 5493 5494 val = sctp_sk(sk)->pd_point; 5495 if (put_user(len, optlen)) 5496 return -EFAULT; 5497 if (copy_to_user(optval, &val, len)) 5498 return -EFAULT; 5499 5500 return 0; 5501 } 5502 5503 /* 5504 * 7.1.28. Set or Get the maximum burst (SCTP_MAX_BURST) 5505 * (chapter and verse is quoted at sctp_setsockopt_maxburst()) 5506 */ 5507 static int sctp_getsockopt_maxburst(struct sock *sk, int len, 5508 char __user *optval, 5509 int __user *optlen) 5510 { 5511 struct sctp_assoc_value params; 5512 struct sctp_sock *sp; 5513 struct sctp_association *asoc; 5514 5515 if (len == sizeof(int)) { 5516 pr_warn_ratelimited(DEPRECATED 5517 "%s (pid %d) " 5518 "Use of int in max_burst socket option.\n" 5519 "Use struct sctp_assoc_value instead\n", 5520 current->comm, task_pid_nr(current)); 5521 params.assoc_id = 0; 5522 } else if (len >= sizeof(struct sctp_assoc_value)) { 5523 len = sizeof(struct sctp_assoc_value); 5524 if (copy_from_user(¶ms, optval, len)) 5525 return -EFAULT; 5526 } else 5527 return -EINVAL; 5528 5529 sp = sctp_sk(sk); 5530 5531 if (params.assoc_id != 0) { 5532 asoc = sctp_id2assoc(sk, params.assoc_id); 5533 if (!asoc) 5534 return -EINVAL; 5535 params.assoc_value = asoc->max_burst; 5536 } else 5537 params.assoc_value = sp->max_burst; 5538 5539 if (len == sizeof(int)) { 5540 if (copy_to_user(optval, ¶ms.assoc_value, len)) 5541 return -EFAULT; 5542 } else { 5543 if (copy_to_user(optval, ¶ms, len)) 5544 return -EFAULT; 5545 } 5546 5547 return 0; 5548 5549 } 5550 5551 static int sctp_getsockopt_hmac_ident(struct sock *sk, int len, 5552 char __user *optval, int __user *optlen) 5553 { 5554 struct sctp_endpoint *ep = sctp_sk(sk)->ep; 5555 struct sctp_hmacalgo __user *p = (void __user *)optval; 5556 struct sctp_hmac_algo_param *hmacs; 5557 __u16 data_len = 0; 5558 u32 num_idents; 5559 5560 if (!ep->auth_enable) 5561 return -EACCES; 5562 5563 hmacs = ep->auth_hmacs_list; 5564 data_len = ntohs(hmacs->param_hdr.length) - sizeof(sctp_paramhdr_t); 5565 5566 if (len < sizeof(struct sctp_hmacalgo) + data_len) 5567 return -EINVAL; 5568 5569 len = sizeof(struct sctp_hmacalgo) + data_len; 5570 num_idents = data_len / sizeof(u16); 5571 5572 if (put_user(len, optlen)) 5573 return -EFAULT; 5574 if (put_user(num_idents, &p->shmac_num_idents)) 5575 return -EFAULT; 5576 if (copy_to_user(p->shmac_idents, hmacs->hmac_ids, data_len)) 5577 return -EFAULT; 5578 return 0; 5579 } 5580 5581 static int sctp_getsockopt_active_key(struct sock *sk, int len, 5582 char __user *optval, int __user *optlen) 5583 { 5584 struct sctp_endpoint *ep = sctp_sk(sk)->ep; 5585 struct sctp_authkeyid val; 5586 struct sctp_association *asoc; 5587 5588 if (!ep->auth_enable) 5589 return -EACCES; 5590 5591 if (len < sizeof(struct sctp_authkeyid)) 5592 return -EINVAL; 5593 if (copy_from_user(&val, optval, sizeof(struct sctp_authkeyid))) 5594 return -EFAULT; 5595 5596 asoc = sctp_id2assoc(sk, val.scact_assoc_id); 5597 if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP)) 5598 return -EINVAL; 5599 5600 if (asoc) 5601 val.scact_keynumber = asoc->active_key_id; 5602 else 5603 val.scact_keynumber = ep->active_key_id; 5604 5605 len = sizeof(struct sctp_authkeyid); 5606 if (put_user(len, optlen)) 5607 return -EFAULT; 5608 if (copy_to_user(optval, &val, len)) 5609 return -EFAULT; 5610 5611 return 0; 5612 } 5613 5614 static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len, 5615 char __user *optval, int __user *optlen) 5616 { 5617 struct sctp_endpoint *ep = sctp_sk(sk)->ep; 5618 struct sctp_authchunks __user *p = (void __user *)optval; 5619 struct sctp_authchunks val; 5620 struct sctp_association *asoc; 5621 struct sctp_chunks_param *ch; 5622 u32 num_chunks = 0; 5623 char __user *to; 5624 5625 if (!ep->auth_enable) 5626 return -EACCES; 5627 5628 if (len < sizeof(struct sctp_authchunks)) 5629 return -EINVAL; 5630 5631 if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks))) 5632 return -EFAULT; 5633 5634 to = p->gauth_chunks; 5635 asoc = sctp_id2assoc(sk, val.gauth_assoc_id); 5636 if (!asoc) 5637 return -EINVAL; 5638 5639 ch = asoc->peer.peer_chunks; 5640 if (!ch) 5641 goto num; 5642 5643 /* See if the user provided enough room for all the data */ 5644 num_chunks = ntohs(ch->param_hdr.length) - sizeof(sctp_paramhdr_t); 5645 if (len < num_chunks) 5646 return -EINVAL; 5647 5648 if (copy_to_user(to, ch->chunks, num_chunks)) 5649 return -EFAULT; 5650 num: 5651 len = sizeof(struct sctp_authchunks) + num_chunks; 5652 if (put_user(len, optlen)) 5653 return -EFAULT; 5654 if (put_user(num_chunks, &p->gauth_number_of_chunks)) 5655 return -EFAULT; 5656 return 0; 5657 } 5658 5659 static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len, 5660 char __user *optval, int __user *optlen) 5661 { 5662 struct sctp_endpoint *ep = sctp_sk(sk)->ep; 5663 struct sctp_authchunks __user *p = (void __user *)optval; 5664 struct sctp_authchunks val; 5665 struct sctp_association *asoc; 5666 struct sctp_chunks_param *ch; 5667 u32 num_chunks = 0; 5668 char __user *to; 5669 5670 if (!ep->auth_enable) 5671 return -EACCES; 5672 5673 if (len < sizeof(struct sctp_authchunks)) 5674 return -EINVAL; 5675 5676 if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks))) 5677 return -EFAULT; 5678 5679 to = p->gauth_chunks; 5680 asoc = sctp_id2assoc(sk, val.gauth_assoc_id); 5681 if (!asoc && val.gauth_assoc_id && sctp_style(sk, UDP)) 5682 return -EINVAL; 5683 5684 if (asoc) 5685 ch = (struct sctp_chunks_param *)asoc->c.auth_chunks; 5686 else 5687 ch = ep->auth_chunk_list; 5688 5689 if (!ch) 5690 goto num; 5691 5692 num_chunks = ntohs(ch->param_hdr.length) - sizeof(sctp_paramhdr_t); 5693 if (len < sizeof(struct sctp_authchunks) + num_chunks) 5694 return -EINVAL; 5695 5696 if (copy_to_user(to, ch->chunks, num_chunks)) 5697 return -EFAULT; 5698 num: 5699 len = sizeof(struct sctp_authchunks) + num_chunks; 5700 if (put_user(len, optlen)) 5701 return -EFAULT; 5702 if (put_user(num_chunks, &p->gauth_number_of_chunks)) 5703 return -EFAULT; 5704 5705 return 0; 5706 } 5707 5708 /* 5709 * 8.2.5. Get the Current Number of Associations (SCTP_GET_ASSOC_NUMBER) 5710 * This option gets the current number of associations that are attached 5711 * to a one-to-many style socket. The option value is an uint32_t. 5712 */ 5713 static int sctp_getsockopt_assoc_number(struct sock *sk, int len, 5714 char __user *optval, int __user *optlen) 5715 { 5716 struct sctp_sock *sp = sctp_sk(sk); 5717 struct sctp_association *asoc; 5718 u32 val = 0; 5719 5720 if (sctp_style(sk, TCP)) 5721 return -EOPNOTSUPP; 5722 5723 if (len < sizeof(u32)) 5724 return -EINVAL; 5725 5726 len = sizeof(u32); 5727 5728 list_for_each_entry(asoc, &(sp->ep->asocs), asocs) { 5729 val++; 5730 } 5731 5732 if (put_user(len, optlen)) 5733 return -EFAULT; 5734 if (copy_to_user(optval, &val, len)) 5735 return -EFAULT; 5736 5737 return 0; 5738 } 5739 5740 /* 5741 * 8.1.23 SCTP_AUTO_ASCONF 5742 * See the corresponding setsockopt entry as description 5743 */ 5744 static int sctp_getsockopt_auto_asconf(struct sock *sk, int len, 5745 char __user *optval, int __user *optlen) 5746 { 5747 int val = 0; 5748 5749 if (len < sizeof(int)) 5750 return -EINVAL; 5751 5752 len = sizeof(int); 5753 if (sctp_sk(sk)->do_auto_asconf && sctp_is_ep_boundall(sk)) 5754 val = 1; 5755 if (put_user(len, optlen)) 5756 return -EFAULT; 5757 if (copy_to_user(optval, &val, len)) 5758 return -EFAULT; 5759 return 0; 5760 } 5761 5762 /* 5763 * 8.2.6. Get the Current Identifiers of Associations 5764 * (SCTP_GET_ASSOC_ID_LIST) 5765 * 5766 * This option gets the current list of SCTP association identifiers of 5767 * the SCTP associations handled by a one-to-many style socket. 5768 */ 5769 static int sctp_getsockopt_assoc_ids(struct sock *sk, int len, 5770 char __user *optval, int __user *optlen) 5771 { 5772 struct sctp_sock *sp = sctp_sk(sk); 5773 struct sctp_association *asoc; 5774 struct sctp_assoc_ids *ids; 5775 u32 num = 0; 5776 5777 if (sctp_style(sk, TCP)) 5778 return -EOPNOTSUPP; 5779 5780 if (len < sizeof(struct sctp_assoc_ids)) 5781 return -EINVAL; 5782 5783 list_for_each_entry(asoc, &(sp->ep->asocs), asocs) { 5784 num++; 5785 } 5786 5787 if (len < sizeof(struct sctp_assoc_ids) + sizeof(sctp_assoc_t) * num) 5788 return -EINVAL; 5789 5790 len = sizeof(struct sctp_assoc_ids) + sizeof(sctp_assoc_t) * num; 5791 5792 ids = kmalloc(len, GFP_KERNEL); 5793 if (unlikely(!ids)) 5794 return -ENOMEM; 5795 5796 ids->gaids_number_of_ids = num; 5797 num = 0; 5798 list_for_each_entry(asoc, &(sp->ep->asocs), asocs) { 5799 ids->gaids_assoc_id[num++] = asoc->assoc_id; 5800 } 5801 5802 if (put_user(len, optlen) || copy_to_user(optval, ids, len)) { 5803 kfree(ids); 5804 return -EFAULT; 5805 } 5806 5807 kfree(ids); 5808 return 0; 5809 } 5810 5811 /* 5812 * SCTP_PEER_ADDR_THLDS 5813 * 5814 * This option allows us to fetch the partially failed threshold for one or all 5815 * transports in an association. See Section 6.1 of: 5816 * http://www.ietf.org/id/draft-nishida-tsvwg-sctp-failover-05.txt 5817 */ 5818 static int sctp_getsockopt_paddr_thresholds(struct sock *sk, 5819 char __user *optval, 5820 int len, 5821 int __user *optlen) 5822 { 5823 struct sctp_paddrthlds val; 5824 struct sctp_transport *trans; 5825 struct sctp_association *asoc; 5826 5827 if (len < sizeof(struct sctp_paddrthlds)) 5828 return -EINVAL; 5829 len = sizeof(struct sctp_paddrthlds); 5830 if (copy_from_user(&val, (struct sctp_paddrthlds __user *)optval, len)) 5831 return -EFAULT; 5832 5833 if (sctp_is_any(sk, (const union sctp_addr *)&val.spt_address)) { 5834 asoc = sctp_id2assoc(sk, val.spt_assoc_id); 5835 if (!asoc) 5836 return -ENOENT; 5837 5838 val.spt_pathpfthld = asoc->pf_retrans; 5839 val.spt_pathmaxrxt = asoc->pathmaxrxt; 5840 } else { 5841 trans = sctp_addr_id2transport(sk, &val.spt_address, 5842 val.spt_assoc_id); 5843 if (!trans) 5844 return -ENOENT; 5845 5846 val.spt_pathmaxrxt = trans->pathmaxrxt; 5847 val.spt_pathpfthld = trans->pf_retrans; 5848 } 5849 5850 if (put_user(len, optlen) || copy_to_user(optval, &val, len)) 5851 return -EFAULT; 5852 5853 return 0; 5854 } 5855 5856 /* 5857 * SCTP_GET_ASSOC_STATS 5858 * 5859 * This option retrieves local per endpoint statistics. It is modeled 5860 * after OpenSolaris' implementation 5861 */ 5862 static int sctp_getsockopt_assoc_stats(struct sock *sk, int len, 5863 char __user *optval, 5864 int __user *optlen) 5865 { 5866 struct sctp_assoc_stats sas; 5867 struct sctp_association *asoc = NULL; 5868 5869 /* User must provide at least the assoc id */ 5870 if (len < sizeof(sctp_assoc_t)) 5871 return -EINVAL; 5872 5873 /* Allow the struct to grow and fill in as much as possible */ 5874 len = min_t(size_t, len, sizeof(sas)); 5875 5876 if (copy_from_user(&sas, optval, len)) 5877 return -EFAULT; 5878 5879 asoc = sctp_id2assoc(sk, sas.sas_assoc_id); 5880 if (!asoc) 5881 return -EINVAL; 5882 5883 sas.sas_rtxchunks = asoc->stats.rtxchunks; 5884 sas.sas_gapcnt = asoc->stats.gapcnt; 5885 sas.sas_outofseqtsns = asoc->stats.outofseqtsns; 5886 sas.sas_osacks = asoc->stats.osacks; 5887 sas.sas_isacks = asoc->stats.isacks; 5888 sas.sas_octrlchunks = asoc->stats.octrlchunks; 5889 sas.sas_ictrlchunks = asoc->stats.ictrlchunks; 5890 sas.sas_oodchunks = asoc->stats.oodchunks; 5891 sas.sas_iodchunks = asoc->stats.iodchunks; 5892 sas.sas_ouodchunks = asoc->stats.ouodchunks; 5893 sas.sas_iuodchunks = asoc->stats.iuodchunks; 5894 sas.sas_idupchunks = asoc->stats.idupchunks; 5895 sas.sas_opackets = asoc->stats.opackets; 5896 sas.sas_ipackets = asoc->stats.ipackets; 5897 5898 /* New high max rto observed, will return 0 if not a single 5899 * RTO update took place. obs_rto_ipaddr will be bogus 5900 * in such a case 5901 */ 5902 sas.sas_maxrto = asoc->stats.max_obs_rto; 5903 memcpy(&sas.sas_obs_rto_ipaddr, &asoc->stats.obs_rto_ipaddr, 5904 sizeof(struct sockaddr_storage)); 5905 5906 /* Mark beginning of a new observation period */ 5907 asoc->stats.max_obs_rto = asoc->rto_min; 5908 5909 if (put_user(len, optlen)) 5910 return -EFAULT; 5911 5912 pr_debug("%s: len:%d, assoc_id:%d\n", __func__, len, sas.sas_assoc_id); 5913 5914 if (copy_to_user(optval, &sas, len)) 5915 return -EFAULT; 5916 5917 return 0; 5918 } 5919 5920 static int sctp_getsockopt_recvrcvinfo(struct sock *sk, int len, 5921 char __user *optval, 5922 int __user *optlen) 5923 { 5924 int val = 0; 5925 5926 if (len < sizeof(int)) 5927 return -EINVAL; 5928 5929 len = sizeof(int); 5930 if (sctp_sk(sk)->recvrcvinfo) 5931 val = 1; 5932 if (put_user(len, optlen)) 5933 return -EFAULT; 5934 if (copy_to_user(optval, &val, len)) 5935 return -EFAULT; 5936 5937 return 0; 5938 } 5939 5940 static int sctp_getsockopt_recvnxtinfo(struct sock *sk, int len, 5941 char __user *optval, 5942 int __user *optlen) 5943 { 5944 int val = 0; 5945 5946 if (len < sizeof(int)) 5947 return -EINVAL; 5948 5949 len = sizeof(int); 5950 if (sctp_sk(sk)->recvnxtinfo) 5951 val = 1; 5952 if (put_user(len, optlen)) 5953 return -EFAULT; 5954 if (copy_to_user(optval, &val, len)) 5955 return -EFAULT; 5956 5957 return 0; 5958 } 5959 5960 static int sctp_getsockopt(struct sock *sk, int level, int optname, 5961 char __user *optval, int __user *optlen) 5962 { 5963 int retval = 0; 5964 int len; 5965 5966 pr_debug("%s: sk:%p, optname:%d\n", __func__, sk, optname); 5967 5968 /* I can hardly begin to describe how wrong this is. This is 5969 * so broken as to be worse than useless. The API draft 5970 * REALLY is NOT helpful here... I am not convinced that the 5971 * semantics of getsockopt() with a level OTHER THAN SOL_SCTP 5972 * are at all well-founded. 5973 */ 5974 if (level != SOL_SCTP) { 5975 struct sctp_af *af = sctp_sk(sk)->pf->af; 5976 5977 retval = af->getsockopt(sk, level, optname, optval, optlen); 5978 return retval; 5979 } 5980 5981 if (get_user(len, optlen)) 5982 return -EFAULT; 5983 5984 lock_sock(sk); 5985 5986 switch (optname) { 5987 case SCTP_STATUS: 5988 retval = sctp_getsockopt_sctp_status(sk, len, optval, optlen); 5989 break; 5990 case SCTP_DISABLE_FRAGMENTS: 5991 retval = sctp_getsockopt_disable_fragments(sk, len, optval, 5992 optlen); 5993 break; 5994 case SCTP_EVENTS: 5995 retval = sctp_getsockopt_events(sk, len, optval, optlen); 5996 break; 5997 case SCTP_AUTOCLOSE: 5998 retval = sctp_getsockopt_autoclose(sk, len, optval, optlen); 5999 break; 6000 case SCTP_SOCKOPT_PEELOFF: 6001 retval = sctp_getsockopt_peeloff(sk, len, optval, optlen); 6002 break; 6003 case SCTP_PEER_ADDR_PARAMS: 6004 retval = sctp_getsockopt_peer_addr_params(sk, len, optval, 6005 optlen); 6006 break; 6007 case SCTP_DELAYED_SACK: 6008 retval = sctp_getsockopt_delayed_ack(sk, len, optval, 6009 optlen); 6010 break; 6011 case SCTP_INITMSG: 6012 retval = sctp_getsockopt_initmsg(sk, len, optval, optlen); 6013 break; 6014 case SCTP_GET_PEER_ADDRS: 6015 retval = sctp_getsockopt_peer_addrs(sk, len, optval, 6016 optlen); 6017 break; 6018 case SCTP_GET_LOCAL_ADDRS: 6019 retval = sctp_getsockopt_local_addrs(sk, len, optval, 6020 optlen); 6021 break; 6022 case SCTP_SOCKOPT_CONNECTX3: 6023 retval = sctp_getsockopt_connectx3(sk, len, optval, optlen); 6024 break; 6025 case SCTP_DEFAULT_SEND_PARAM: 6026 retval = sctp_getsockopt_default_send_param(sk, len, 6027 optval, optlen); 6028 break; 6029 case SCTP_DEFAULT_SNDINFO: 6030 retval = sctp_getsockopt_default_sndinfo(sk, len, 6031 optval, optlen); 6032 break; 6033 case SCTP_PRIMARY_ADDR: 6034 retval = sctp_getsockopt_primary_addr(sk, len, optval, optlen); 6035 break; 6036 case SCTP_NODELAY: 6037 retval = sctp_getsockopt_nodelay(sk, len, optval, optlen); 6038 break; 6039 case SCTP_RTOINFO: 6040 retval = sctp_getsockopt_rtoinfo(sk, len, optval, optlen); 6041 break; 6042 case SCTP_ASSOCINFO: 6043 retval = sctp_getsockopt_associnfo(sk, len, optval, optlen); 6044 break; 6045 case SCTP_I_WANT_MAPPED_V4_ADDR: 6046 retval = sctp_getsockopt_mappedv4(sk, len, optval, optlen); 6047 break; 6048 case SCTP_MAXSEG: 6049 retval = sctp_getsockopt_maxseg(sk, len, optval, optlen); 6050 break; 6051 case SCTP_GET_PEER_ADDR_INFO: 6052 retval = sctp_getsockopt_peer_addr_info(sk, len, optval, 6053 optlen); 6054 break; 6055 case SCTP_ADAPTATION_LAYER: 6056 retval = sctp_getsockopt_adaptation_layer(sk, len, optval, 6057 optlen); 6058 break; 6059 case SCTP_CONTEXT: 6060 retval = sctp_getsockopt_context(sk, len, optval, optlen); 6061 break; 6062 case SCTP_FRAGMENT_INTERLEAVE: 6063 retval = sctp_getsockopt_fragment_interleave(sk, len, optval, 6064 optlen); 6065 break; 6066 case SCTP_PARTIAL_DELIVERY_POINT: 6067 retval = sctp_getsockopt_partial_delivery_point(sk, len, optval, 6068 optlen); 6069 break; 6070 case SCTP_MAX_BURST: 6071 retval = sctp_getsockopt_maxburst(sk, len, optval, optlen); 6072 break; 6073 case SCTP_AUTH_KEY: 6074 case SCTP_AUTH_CHUNK: 6075 case SCTP_AUTH_DELETE_KEY: 6076 retval = -EOPNOTSUPP; 6077 break; 6078 case SCTP_HMAC_IDENT: 6079 retval = sctp_getsockopt_hmac_ident(sk, len, optval, optlen); 6080 break; 6081 case SCTP_AUTH_ACTIVE_KEY: 6082 retval = sctp_getsockopt_active_key(sk, len, optval, optlen); 6083 break; 6084 case SCTP_PEER_AUTH_CHUNKS: 6085 retval = sctp_getsockopt_peer_auth_chunks(sk, len, optval, 6086 optlen); 6087 break; 6088 case SCTP_LOCAL_AUTH_CHUNKS: 6089 retval = sctp_getsockopt_local_auth_chunks(sk, len, optval, 6090 optlen); 6091 break; 6092 case SCTP_GET_ASSOC_NUMBER: 6093 retval = sctp_getsockopt_assoc_number(sk, len, optval, optlen); 6094 break; 6095 case SCTP_GET_ASSOC_ID_LIST: 6096 retval = sctp_getsockopt_assoc_ids(sk, len, optval, optlen); 6097 break; 6098 case SCTP_AUTO_ASCONF: 6099 retval = sctp_getsockopt_auto_asconf(sk, len, optval, optlen); 6100 break; 6101 case SCTP_PEER_ADDR_THLDS: 6102 retval = sctp_getsockopt_paddr_thresholds(sk, optval, len, optlen); 6103 break; 6104 case SCTP_GET_ASSOC_STATS: 6105 retval = sctp_getsockopt_assoc_stats(sk, len, optval, optlen); 6106 break; 6107 case SCTP_RECVRCVINFO: 6108 retval = sctp_getsockopt_recvrcvinfo(sk, len, optval, optlen); 6109 break; 6110 case SCTP_RECVNXTINFO: 6111 retval = sctp_getsockopt_recvnxtinfo(sk, len, optval, optlen); 6112 break; 6113 default: 6114 retval = -ENOPROTOOPT; 6115 break; 6116 } 6117 6118 release_sock(sk); 6119 return retval; 6120 } 6121 6122 static void sctp_hash(struct sock *sk) 6123 { 6124 /* STUB */ 6125 } 6126 6127 static void sctp_unhash(struct sock *sk) 6128 { 6129 /* STUB */ 6130 } 6131 6132 /* Check if port is acceptable. Possibly find first available port. 6133 * 6134 * The port hash table (contained in the 'global' SCTP protocol storage 6135 * returned by struct sctp_protocol *sctp_get_protocol()). The hash 6136 * table is an array of 4096 lists (sctp_bind_hashbucket). Each 6137 * list (the list number is the port number hashed out, so as you 6138 * would expect from a hash function, all the ports in a given list have 6139 * such a number that hashes out to the same list number; you were 6140 * expecting that, right?); so each list has a set of ports, with a 6141 * link to the socket (struct sock) that uses it, the port number and 6142 * a fastreuse flag (FIXME: NPI ipg). 6143 */ 6144 static struct sctp_bind_bucket *sctp_bucket_create( 6145 struct sctp_bind_hashbucket *head, struct net *, unsigned short snum); 6146 6147 static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr) 6148 { 6149 struct sctp_bind_hashbucket *head; /* hash list */ 6150 struct sctp_bind_bucket *pp; 6151 unsigned short snum; 6152 int ret; 6153 6154 snum = ntohs(addr->v4.sin_port); 6155 6156 pr_debug("%s: begins, snum:%d\n", __func__, snum); 6157 6158 local_bh_disable(); 6159 6160 if (snum == 0) { 6161 /* Search for an available port. */ 6162 int low, high, remaining, index; 6163 unsigned int rover; 6164 struct net *net = sock_net(sk); 6165 6166 inet_get_local_port_range(net, &low, &high); 6167 remaining = (high - low) + 1; 6168 rover = prandom_u32() % remaining + low; 6169 6170 do { 6171 rover++; 6172 if ((rover < low) || (rover > high)) 6173 rover = low; 6174 if (inet_is_local_reserved_port(net, rover)) 6175 continue; 6176 index = sctp_phashfn(sock_net(sk), rover); 6177 head = &sctp_port_hashtable[index]; 6178 spin_lock(&head->lock); 6179 sctp_for_each_hentry(pp, &head->chain) 6180 if ((pp->port == rover) && 6181 net_eq(sock_net(sk), pp->net)) 6182 goto next; 6183 break; 6184 next: 6185 spin_unlock(&head->lock); 6186 } while (--remaining > 0); 6187 6188 /* Exhausted local port range during search? */ 6189 ret = 1; 6190 if (remaining <= 0) 6191 goto fail; 6192 6193 /* OK, here is the one we will use. HEAD (the port 6194 * hash table list entry) is non-NULL and we hold it's 6195 * mutex. 6196 */ 6197 snum = rover; 6198 } else { 6199 /* We are given an specific port number; we verify 6200 * that it is not being used. If it is used, we will 6201 * exahust the search in the hash list corresponding 6202 * to the port number (snum) - we detect that with the 6203 * port iterator, pp being NULL. 6204 */ 6205 head = &sctp_port_hashtable[sctp_phashfn(sock_net(sk), snum)]; 6206 spin_lock(&head->lock); 6207 sctp_for_each_hentry(pp, &head->chain) { 6208 if ((pp->port == snum) && net_eq(pp->net, sock_net(sk))) 6209 goto pp_found; 6210 } 6211 } 6212 pp = NULL; 6213 goto pp_not_found; 6214 pp_found: 6215 if (!hlist_empty(&pp->owner)) { 6216 /* We had a port hash table hit - there is an 6217 * available port (pp != NULL) and it is being 6218 * used by other socket (pp->owner not empty); that other 6219 * socket is going to be sk2. 6220 */ 6221 int reuse = sk->sk_reuse; 6222 struct sock *sk2; 6223 6224 pr_debug("%s: found a possible match\n", __func__); 6225 6226 if (pp->fastreuse && sk->sk_reuse && 6227 sk->sk_state != SCTP_SS_LISTENING) 6228 goto success; 6229 6230 /* Run through the list of sockets bound to the port 6231 * (pp->port) [via the pointers bind_next and 6232 * bind_pprev in the struct sock *sk2 (pp->sk)]. On each one, 6233 * we get the endpoint they describe and run through 6234 * the endpoint's list of IP (v4 or v6) addresses, 6235 * comparing each of the addresses with the address of 6236 * the socket sk. If we find a match, then that means 6237 * that this port/socket (sk) combination are already 6238 * in an endpoint. 6239 */ 6240 sk_for_each_bound(sk2, &pp->owner) { 6241 struct sctp_endpoint *ep2; 6242 ep2 = sctp_sk(sk2)->ep; 6243 6244 if (sk == sk2 || 6245 (reuse && sk2->sk_reuse && 6246 sk2->sk_state != SCTP_SS_LISTENING)) 6247 continue; 6248 6249 if (sctp_bind_addr_conflict(&ep2->base.bind_addr, addr, 6250 sctp_sk(sk2), sctp_sk(sk))) { 6251 ret = (long)sk2; 6252 goto fail_unlock; 6253 } 6254 } 6255 6256 pr_debug("%s: found a match\n", __func__); 6257 } 6258 pp_not_found: 6259 /* If there was a hash table miss, create a new port. */ 6260 ret = 1; 6261 if (!pp && !(pp = sctp_bucket_create(head, sock_net(sk), snum))) 6262 goto fail_unlock; 6263 6264 /* In either case (hit or miss), make sure fastreuse is 1 only 6265 * if sk->sk_reuse is too (that is, if the caller requested 6266 * SO_REUSEADDR on this socket -sk-). 6267 */ 6268 if (hlist_empty(&pp->owner)) { 6269 if (sk->sk_reuse && sk->sk_state != SCTP_SS_LISTENING) 6270 pp->fastreuse = 1; 6271 else 6272 pp->fastreuse = 0; 6273 } else if (pp->fastreuse && 6274 (!sk->sk_reuse || sk->sk_state == SCTP_SS_LISTENING)) 6275 pp->fastreuse = 0; 6276 6277 /* We are set, so fill up all the data in the hash table 6278 * entry, tie the socket list information with the rest of the 6279 * sockets FIXME: Blurry, NPI (ipg). 6280 */ 6281 success: 6282 if (!sctp_sk(sk)->bind_hash) { 6283 inet_sk(sk)->inet_num = snum; 6284 sk_add_bind_node(sk, &pp->owner); 6285 sctp_sk(sk)->bind_hash = pp; 6286 } 6287 ret = 0; 6288 6289 fail_unlock: 6290 spin_unlock(&head->lock); 6291 6292 fail: 6293 local_bh_enable(); 6294 return ret; 6295 } 6296 6297 /* Assign a 'snum' port to the socket. If snum == 0, an ephemeral 6298 * port is requested. 6299 */ 6300 static int sctp_get_port(struct sock *sk, unsigned short snum) 6301 { 6302 union sctp_addr addr; 6303 struct sctp_af *af = sctp_sk(sk)->pf->af; 6304 6305 /* Set up a dummy address struct from the sk. */ 6306 af->from_sk(&addr, sk); 6307 addr.v4.sin_port = htons(snum); 6308 6309 /* Note: sk->sk_num gets filled in if ephemeral port request. */ 6310 return !!sctp_get_port_local(sk, &addr); 6311 } 6312 6313 /* 6314 * Move a socket to LISTENING state. 6315 */ 6316 static int sctp_listen_start(struct sock *sk, int backlog) 6317 { 6318 struct sctp_sock *sp = sctp_sk(sk); 6319 struct sctp_endpoint *ep = sp->ep; 6320 struct crypto_hash *tfm = NULL; 6321 char alg[32]; 6322 6323 /* Allocate HMAC for generating cookie. */ 6324 if (!sp->hmac && sp->sctp_hmac_alg) { 6325 sprintf(alg, "hmac(%s)", sp->sctp_hmac_alg); 6326 tfm = crypto_alloc_hash(alg, 0, CRYPTO_ALG_ASYNC); 6327 if (IS_ERR(tfm)) { 6328 net_info_ratelimited("failed to load transform for %s: %ld\n", 6329 sp->sctp_hmac_alg, PTR_ERR(tfm)); 6330 return -ENOSYS; 6331 } 6332 sctp_sk(sk)->hmac = tfm; 6333 } 6334 6335 /* 6336 * If a bind() or sctp_bindx() is not called prior to a listen() 6337 * call that allows new associations to be accepted, the system 6338 * picks an ephemeral port and will choose an address set equivalent 6339 * to binding with a wildcard address. 6340 * 6341 * This is not currently spelled out in the SCTP sockets 6342 * extensions draft, but follows the practice as seen in TCP 6343 * sockets. 6344 * 6345 */ 6346 sk->sk_state = SCTP_SS_LISTENING; 6347 if (!ep->base.bind_addr.port) { 6348 if (sctp_autobind(sk)) 6349 return -EAGAIN; 6350 } else { 6351 if (sctp_get_port(sk, inet_sk(sk)->inet_num)) { 6352 sk->sk_state = SCTP_SS_CLOSED; 6353 return -EADDRINUSE; 6354 } 6355 } 6356 6357 sk->sk_max_ack_backlog = backlog; 6358 sctp_hash_endpoint(ep); 6359 return 0; 6360 } 6361 6362 /* 6363 * 4.1.3 / 5.1.3 listen() 6364 * 6365 * By default, new associations are not accepted for UDP style sockets. 6366 * An application uses listen() to mark a socket as being able to 6367 * accept new associations. 6368 * 6369 * On TCP style sockets, applications use listen() to ready the SCTP 6370 * endpoint for accepting inbound associations. 6371 * 6372 * On both types of endpoints a backlog of '0' disables listening. 6373 * 6374 * Move a socket to LISTENING state. 6375 */ 6376 int sctp_inet_listen(struct socket *sock, int backlog) 6377 { 6378 struct sock *sk = sock->sk; 6379 struct sctp_endpoint *ep = sctp_sk(sk)->ep; 6380 int err = -EINVAL; 6381 6382 if (unlikely(backlog < 0)) 6383 return err; 6384 6385 lock_sock(sk); 6386 6387 /* Peeled-off sockets are not allowed to listen(). */ 6388 if (sctp_style(sk, UDP_HIGH_BANDWIDTH)) 6389 goto out; 6390 6391 if (sock->state != SS_UNCONNECTED) 6392 goto out; 6393 6394 /* If backlog is zero, disable listening. */ 6395 if (!backlog) { 6396 if (sctp_sstate(sk, CLOSED)) 6397 goto out; 6398 6399 err = 0; 6400 sctp_unhash_endpoint(ep); 6401 sk->sk_state = SCTP_SS_CLOSED; 6402 if (sk->sk_reuse) 6403 sctp_sk(sk)->bind_hash->fastreuse = 1; 6404 goto out; 6405 } 6406 6407 /* If we are already listening, just update the backlog */ 6408 if (sctp_sstate(sk, LISTENING)) 6409 sk->sk_max_ack_backlog = backlog; 6410 else { 6411 err = sctp_listen_start(sk, backlog); 6412 if (err) 6413 goto out; 6414 } 6415 6416 err = 0; 6417 out: 6418 release_sock(sk); 6419 return err; 6420 } 6421 6422 /* 6423 * This function is done by modeling the current datagram_poll() and the 6424 * tcp_poll(). Note that, based on these implementations, we don't 6425 * lock the socket in this function, even though it seems that, 6426 * ideally, locking or some other mechanisms can be used to ensure 6427 * the integrity of the counters (sndbuf and wmem_alloc) used 6428 * in this place. We assume that we don't need locks either until proven 6429 * otherwise. 6430 * 6431 * Another thing to note is that we include the Async I/O support 6432 * here, again, by modeling the current TCP/UDP code. We don't have 6433 * a good way to test with it yet. 6434 */ 6435 unsigned int sctp_poll(struct file *file, struct socket *sock, poll_table *wait) 6436 { 6437 struct sock *sk = sock->sk; 6438 struct sctp_sock *sp = sctp_sk(sk); 6439 unsigned int mask; 6440 6441 poll_wait(file, sk_sleep(sk), wait); 6442 6443 /* A TCP-style listening socket becomes readable when the accept queue 6444 * is not empty. 6445 */ 6446 if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)) 6447 return (!list_empty(&sp->ep->asocs)) ? 6448 (POLLIN | POLLRDNORM) : 0; 6449 6450 mask = 0; 6451 6452 /* Is there any exceptional events? */ 6453 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) 6454 mask |= POLLERR | 6455 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0); 6456 if (sk->sk_shutdown & RCV_SHUTDOWN) 6457 mask |= POLLRDHUP | POLLIN | POLLRDNORM; 6458 if (sk->sk_shutdown == SHUTDOWN_MASK) 6459 mask |= POLLHUP; 6460 6461 /* Is it readable? Reconsider this code with TCP-style support. */ 6462 if (!skb_queue_empty(&sk->sk_receive_queue)) 6463 mask |= POLLIN | POLLRDNORM; 6464 6465 /* The association is either gone or not ready. */ 6466 if (!sctp_style(sk, UDP) && sctp_sstate(sk, CLOSED)) 6467 return mask; 6468 6469 /* Is it writable? */ 6470 if (sctp_writeable(sk)) { 6471 mask |= POLLOUT | POLLWRNORM; 6472 } else { 6473 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 6474 /* 6475 * Since the socket is not locked, the buffer 6476 * might be made available after the writeable check and 6477 * before the bit is set. This could cause a lost I/O 6478 * signal. tcp_poll() has a race breaker for this race 6479 * condition. Based on their implementation, we put 6480 * in the following code to cover it as well. 6481 */ 6482 if (sctp_writeable(sk)) 6483 mask |= POLLOUT | POLLWRNORM; 6484 } 6485 return mask; 6486 } 6487 6488 /******************************************************************** 6489 * 2nd Level Abstractions 6490 ********************************************************************/ 6491 6492 static struct sctp_bind_bucket *sctp_bucket_create( 6493 struct sctp_bind_hashbucket *head, struct net *net, unsigned short snum) 6494 { 6495 struct sctp_bind_bucket *pp; 6496 6497 pp = kmem_cache_alloc(sctp_bucket_cachep, GFP_ATOMIC); 6498 if (pp) { 6499 SCTP_DBG_OBJCNT_INC(bind_bucket); 6500 pp->port = snum; 6501 pp->fastreuse = 0; 6502 INIT_HLIST_HEAD(&pp->owner); 6503 pp->net = net; 6504 hlist_add_head(&pp->node, &head->chain); 6505 } 6506 return pp; 6507 } 6508 6509 /* Caller must hold hashbucket lock for this tb with local BH disabled */ 6510 static void sctp_bucket_destroy(struct sctp_bind_bucket *pp) 6511 { 6512 if (pp && hlist_empty(&pp->owner)) { 6513 __hlist_del(&pp->node); 6514 kmem_cache_free(sctp_bucket_cachep, pp); 6515 SCTP_DBG_OBJCNT_DEC(bind_bucket); 6516 } 6517 } 6518 6519 /* Release this socket's reference to a local port. */ 6520 static inline void __sctp_put_port(struct sock *sk) 6521 { 6522 struct sctp_bind_hashbucket *head = 6523 &sctp_port_hashtable[sctp_phashfn(sock_net(sk), 6524 inet_sk(sk)->inet_num)]; 6525 struct sctp_bind_bucket *pp; 6526 6527 spin_lock(&head->lock); 6528 pp = sctp_sk(sk)->bind_hash; 6529 __sk_del_bind_node(sk); 6530 sctp_sk(sk)->bind_hash = NULL; 6531 inet_sk(sk)->inet_num = 0; 6532 sctp_bucket_destroy(pp); 6533 spin_unlock(&head->lock); 6534 } 6535 6536 void sctp_put_port(struct sock *sk) 6537 { 6538 local_bh_disable(); 6539 __sctp_put_port(sk); 6540 local_bh_enable(); 6541 } 6542 6543 /* 6544 * The system picks an ephemeral port and choose an address set equivalent 6545 * to binding with a wildcard address. 6546 * One of those addresses will be the primary address for the association. 6547 * This automatically enables the multihoming capability of SCTP. 6548 */ 6549 static int sctp_autobind(struct sock *sk) 6550 { 6551 union sctp_addr autoaddr; 6552 struct sctp_af *af; 6553 __be16 port; 6554 6555 /* Initialize a local sockaddr structure to INADDR_ANY. */ 6556 af = sctp_sk(sk)->pf->af; 6557 6558 port = htons(inet_sk(sk)->inet_num); 6559 af->inaddr_any(&autoaddr, port); 6560 6561 return sctp_do_bind(sk, &autoaddr, af->sockaddr_len); 6562 } 6563 6564 /* Parse out IPPROTO_SCTP CMSG headers. Perform only minimal validation. 6565 * 6566 * From RFC 2292 6567 * 4.2 The cmsghdr Structure * 6568 * 6569 * When ancillary data is sent or received, any number of ancillary data 6570 * objects can be specified by the msg_control and msg_controllen members of 6571 * the msghdr structure, because each object is preceded by 6572 * a cmsghdr structure defining the object's length (the cmsg_len member). 6573 * Historically Berkeley-derived implementations have passed only one object 6574 * at a time, but this API allows multiple objects to be 6575 * passed in a single call to sendmsg() or recvmsg(). The following example 6576 * shows two ancillary data objects in a control buffer. 6577 * 6578 * |<--------------------------- msg_controllen -------------------------->| 6579 * | | 6580 * 6581 * |<----- ancillary data object ----->|<----- ancillary data object ----->| 6582 * 6583 * |<---------- CMSG_SPACE() --------->|<---------- CMSG_SPACE() --------->| 6584 * | | | 6585 * 6586 * |<---------- cmsg_len ---------->| |<--------- cmsg_len ----------->| | 6587 * 6588 * |<--------- CMSG_LEN() --------->| |<-------- CMSG_LEN() ---------->| | 6589 * | | | | | 6590 * 6591 * +-----+-----+-----+--+-----------+--+-----+-----+-----+--+-----------+--+ 6592 * |cmsg_|cmsg_|cmsg_|XX| |XX|cmsg_|cmsg_|cmsg_|XX| |XX| 6593 * 6594 * |len |level|type |XX|cmsg_data[]|XX|len |level|type |XX|cmsg_data[]|XX| 6595 * 6596 * +-----+-----+-----+--+-----------+--+-----+-----+-----+--+-----------+--+ 6597 * ^ 6598 * | 6599 * 6600 * msg_control 6601 * points here 6602 */ 6603 static int sctp_msghdr_parse(const struct msghdr *msg, sctp_cmsgs_t *cmsgs) 6604 { 6605 struct cmsghdr *cmsg; 6606 struct msghdr *my_msg = (struct msghdr *)msg; 6607 6608 for_each_cmsghdr(cmsg, my_msg) { 6609 if (!CMSG_OK(my_msg, cmsg)) 6610 return -EINVAL; 6611 6612 /* Should we parse this header or ignore? */ 6613 if (cmsg->cmsg_level != IPPROTO_SCTP) 6614 continue; 6615 6616 /* Strictly check lengths following example in SCM code. */ 6617 switch (cmsg->cmsg_type) { 6618 case SCTP_INIT: 6619 /* SCTP Socket API Extension 6620 * 5.3.1 SCTP Initiation Structure (SCTP_INIT) 6621 * 6622 * This cmsghdr structure provides information for 6623 * initializing new SCTP associations with sendmsg(). 6624 * The SCTP_INITMSG socket option uses this same data 6625 * structure. This structure is not used for 6626 * recvmsg(). 6627 * 6628 * cmsg_level cmsg_type cmsg_data[] 6629 * ------------ ------------ ---------------------- 6630 * IPPROTO_SCTP SCTP_INIT struct sctp_initmsg 6631 */ 6632 if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct sctp_initmsg))) 6633 return -EINVAL; 6634 6635 cmsgs->init = CMSG_DATA(cmsg); 6636 break; 6637 6638 case SCTP_SNDRCV: 6639 /* SCTP Socket API Extension 6640 * 5.3.2 SCTP Header Information Structure(SCTP_SNDRCV) 6641 * 6642 * This cmsghdr structure specifies SCTP options for 6643 * sendmsg() and describes SCTP header information 6644 * about a received message through recvmsg(). 6645 * 6646 * cmsg_level cmsg_type cmsg_data[] 6647 * ------------ ------------ ---------------------- 6648 * IPPROTO_SCTP SCTP_SNDRCV struct sctp_sndrcvinfo 6649 */ 6650 if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct sctp_sndrcvinfo))) 6651 return -EINVAL; 6652 6653 cmsgs->srinfo = CMSG_DATA(cmsg); 6654 6655 if (cmsgs->srinfo->sinfo_flags & 6656 ~(SCTP_UNORDERED | SCTP_ADDR_OVER | 6657 SCTP_ABORT | SCTP_EOF)) 6658 return -EINVAL; 6659 break; 6660 6661 case SCTP_SNDINFO: 6662 /* SCTP Socket API Extension 6663 * 5.3.4 SCTP Send Information Structure (SCTP_SNDINFO) 6664 * 6665 * This cmsghdr structure specifies SCTP options for 6666 * sendmsg(). This structure and SCTP_RCVINFO replaces 6667 * SCTP_SNDRCV which has been deprecated. 6668 * 6669 * cmsg_level cmsg_type cmsg_data[] 6670 * ------------ ------------ --------------------- 6671 * IPPROTO_SCTP SCTP_SNDINFO struct sctp_sndinfo 6672 */ 6673 if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct sctp_sndinfo))) 6674 return -EINVAL; 6675 6676 cmsgs->sinfo = CMSG_DATA(cmsg); 6677 6678 if (cmsgs->sinfo->snd_flags & 6679 ~(SCTP_UNORDERED | SCTP_ADDR_OVER | 6680 SCTP_ABORT | SCTP_EOF)) 6681 return -EINVAL; 6682 break; 6683 default: 6684 return -EINVAL; 6685 } 6686 } 6687 6688 return 0; 6689 } 6690 6691 /* 6692 * Wait for a packet.. 6693 * Note: This function is the same function as in core/datagram.c 6694 * with a few modifications to make lksctp work. 6695 */ 6696 static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p) 6697 { 6698 int error; 6699 DEFINE_WAIT(wait); 6700 6701 prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 6702 6703 /* Socket errors? */ 6704 error = sock_error(sk); 6705 if (error) 6706 goto out; 6707 6708 if (!skb_queue_empty(&sk->sk_receive_queue)) 6709 goto ready; 6710 6711 /* Socket shut down? */ 6712 if (sk->sk_shutdown & RCV_SHUTDOWN) 6713 goto out; 6714 6715 /* Sequenced packets can come disconnected. If so we report the 6716 * problem. 6717 */ 6718 error = -ENOTCONN; 6719 6720 /* Is there a good reason to think that we may receive some data? */ 6721 if (list_empty(&sctp_sk(sk)->ep->asocs) && !sctp_sstate(sk, LISTENING)) 6722 goto out; 6723 6724 /* Handle signals. */ 6725 if (signal_pending(current)) 6726 goto interrupted; 6727 6728 /* Let another process have a go. Since we are going to sleep 6729 * anyway. Note: This may cause odd behaviors if the message 6730 * does not fit in the user's buffer, but this seems to be the 6731 * only way to honor MSG_DONTWAIT realistically. 6732 */ 6733 release_sock(sk); 6734 *timeo_p = schedule_timeout(*timeo_p); 6735 lock_sock(sk); 6736 6737 ready: 6738 finish_wait(sk_sleep(sk), &wait); 6739 return 0; 6740 6741 interrupted: 6742 error = sock_intr_errno(*timeo_p); 6743 6744 out: 6745 finish_wait(sk_sleep(sk), &wait); 6746 *err = error; 6747 return error; 6748 } 6749 6750 /* Receive a datagram. 6751 * Note: This is pretty much the same routine as in core/datagram.c 6752 * with a few changes to make lksctp work. 6753 */ 6754 struct sk_buff *sctp_skb_recv_datagram(struct sock *sk, int flags, 6755 int noblock, int *err) 6756 { 6757 int error; 6758 struct sk_buff *skb; 6759 long timeo; 6760 6761 timeo = sock_rcvtimeo(sk, noblock); 6762 6763 pr_debug("%s: timeo:%ld, max:%ld\n", __func__, timeo, 6764 MAX_SCHEDULE_TIMEOUT); 6765 6766 do { 6767 /* Again only user level code calls this function, 6768 * so nothing interrupt level 6769 * will suddenly eat the receive_queue. 6770 * 6771 * Look at current nfs client by the way... 6772 * However, this function was correct in any case. 8) 6773 */ 6774 if (flags & MSG_PEEK) { 6775 spin_lock_bh(&sk->sk_receive_queue.lock); 6776 skb = skb_peek(&sk->sk_receive_queue); 6777 if (skb) 6778 atomic_inc(&skb->users); 6779 spin_unlock_bh(&sk->sk_receive_queue.lock); 6780 } else { 6781 skb = skb_dequeue(&sk->sk_receive_queue); 6782 } 6783 6784 if (skb) 6785 return skb; 6786 6787 /* Caller is allowed not to check sk->sk_err before calling. */ 6788 error = sock_error(sk); 6789 if (error) 6790 goto no_packet; 6791 6792 if (sk->sk_shutdown & RCV_SHUTDOWN) 6793 break; 6794 6795 if (sk_can_busy_loop(sk) && 6796 sk_busy_loop(sk, noblock)) 6797 continue; 6798 6799 /* User doesn't want to wait. */ 6800 error = -EAGAIN; 6801 if (!timeo) 6802 goto no_packet; 6803 } while (sctp_wait_for_packet(sk, err, &timeo) == 0); 6804 6805 return NULL; 6806 6807 no_packet: 6808 *err = error; 6809 return NULL; 6810 } 6811 6812 /* If sndbuf has changed, wake up per association sndbuf waiters. */ 6813 static void __sctp_write_space(struct sctp_association *asoc) 6814 { 6815 struct sock *sk = asoc->base.sk; 6816 struct socket *sock = sk->sk_socket; 6817 6818 if ((sctp_wspace(asoc) > 0) && sock) { 6819 if (waitqueue_active(&asoc->wait)) 6820 wake_up_interruptible(&asoc->wait); 6821 6822 if (sctp_writeable(sk)) { 6823 wait_queue_head_t *wq = sk_sleep(sk); 6824 6825 if (wq && waitqueue_active(wq)) 6826 wake_up_interruptible(wq); 6827 6828 /* Note that we try to include the Async I/O support 6829 * here by modeling from the current TCP/UDP code. 6830 * We have not tested with it yet. 6831 */ 6832 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) 6833 sock_wake_async(sock, 6834 SOCK_WAKE_SPACE, POLL_OUT); 6835 } 6836 } 6837 } 6838 6839 static void sctp_wake_up_waiters(struct sock *sk, 6840 struct sctp_association *asoc) 6841 { 6842 struct sctp_association *tmp = asoc; 6843 6844 /* We do accounting for the sndbuf space per association, 6845 * so we only need to wake our own association. 6846 */ 6847 if (asoc->ep->sndbuf_policy) 6848 return __sctp_write_space(asoc); 6849 6850 /* If association goes down and is just flushing its 6851 * outq, then just normally notify others. 6852 */ 6853 if (asoc->base.dead) 6854 return sctp_write_space(sk); 6855 6856 /* Accounting for the sndbuf space is per socket, so we 6857 * need to wake up others, try to be fair and in case of 6858 * other associations, let them have a go first instead 6859 * of just doing a sctp_write_space() call. 6860 * 6861 * Note that we reach sctp_wake_up_waiters() only when 6862 * associations free up queued chunks, thus we are under 6863 * lock and the list of associations on a socket is 6864 * guaranteed not to change. 6865 */ 6866 for (tmp = list_next_entry(tmp, asocs); 1; 6867 tmp = list_next_entry(tmp, asocs)) { 6868 /* Manually skip the head element. */ 6869 if (&tmp->asocs == &((sctp_sk(sk))->ep->asocs)) 6870 continue; 6871 /* Wake up association. */ 6872 __sctp_write_space(tmp); 6873 /* We've reached the end. */ 6874 if (tmp == asoc) 6875 break; 6876 } 6877 } 6878 6879 /* Do accounting for the sndbuf space. 6880 * Decrement the used sndbuf space of the corresponding association by the 6881 * data size which was just transmitted(freed). 6882 */ 6883 static void sctp_wfree(struct sk_buff *skb) 6884 { 6885 struct sctp_chunk *chunk = skb_shinfo(skb)->destructor_arg; 6886 struct sctp_association *asoc = chunk->asoc; 6887 struct sock *sk = asoc->base.sk; 6888 6889 asoc->sndbuf_used -= SCTP_DATA_SNDSIZE(chunk) + 6890 sizeof(struct sk_buff) + 6891 sizeof(struct sctp_chunk); 6892 6893 atomic_sub(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc); 6894 6895 /* 6896 * This undoes what is done via sctp_set_owner_w and sk_mem_charge 6897 */ 6898 sk->sk_wmem_queued -= skb->truesize; 6899 sk_mem_uncharge(sk, skb->truesize); 6900 6901 sock_wfree(skb); 6902 sctp_wake_up_waiters(sk, asoc); 6903 6904 sctp_association_put(asoc); 6905 } 6906 6907 /* Do accounting for the receive space on the socket. 6908 * Accounting for the association is done in ulpevent.c 6909 * We set this as a destructor for the cloned data skbs so that 6910 * accounting is done at the correct time. 6911 */ 6912 void sctp_sock_rfree(struct sk_buff *skb) 6913 { 6914 struct sock *sk = skb->sk; 6915 struct sctp_ulpevent *event = sctp_skb2event(skb); 6916 6917 atomic_sub(event->rmem_len, &sk->sk_rmem_alloc); 6918 6919 /* 6920 * Mimic the behavior of sock_rfree 6921 */ 6922 sk_mem_uncharge(sk, event->rmem_len); 6923 } 6924 6925 6926 /* Helper function to wait for space in the sndbuf. */ 6927 static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p, 6928 size_t msg_len) 6929 { 6930 struct sock *sk = asoc->base.sk; 6931 int err = 0; 6932 long current_timeo = *timeo_p; 6933 DEFINE_WAIT(wait); 6934 6935 pr_debug("%s: asoc:%p, timeo:%ld, msg_len:%zu\n", __func__, asoc, 6936 *timeo_p, msg_len); 6937 6938 /* Increment the association's refcnt. */ 6939 sctp_association_hold(asoc); 6940 6941 /* Wait on the association specific sndbuf space. */ 6942 for (;;) { 6943 prepare_to_wait_exclusive(&asoc->wait, &wait, 6944 TASK_INTERRUPTIBLE); 6945 if (!*timeo_p) 6946 goto do_nonblock; 6947 if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING || 6948 asoc->base.dead) 6949 goto do_error; 6950 if (signal_pending(current)) 6951 goto do_interrupted; 6952 if (msg_len <= sctp_wspace(asoc)) 6953 break; 6954 6955 /* Let another process have a go. Since we are going 6956 * to sleep anyway. 6957 */ 6958 release_sock(sk); 6959 current_timeo = schedule_timeout(current_timeo); 6960 BUG_ON(sk != asoc->base.sk); 6961 lock_sock(sk); 6962 6963 *timeo_p = current_timeo; 6964 } 6965 6966 out: 6967 finish_wait(&asoc->wait, &wait); 6968 6969 /* Release the association's refcnt. */ 6970 sctp_association_put(asoc); 6971 6972 return err; 6973 6974 do_error: 6975 err = -EPIPE; 6976 goto out; 6977 6978 do_interrupted: 6979 err = sock_intr_errno(*timeo_p); 6980 goto out; 6981 6982 do_nonblock: 6983 err = -EAGAIN; 6984 goto out; 6985 } 6986 6987 void sctp_data_ready(struct sock *sk) 6988 { 6989 struct socket_wq *wq; 6990 6991 rcu_read_lock(); 6992 wq = rcu_dereference(sk->sk_wq); 6993 if (wq_has_sleeper(wq)) 6994 wake_up_interruptible_sync_poll(&wq->wait, POLLIN | 6995 POLLRDNORM | POLLRDBAND); 6996 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); 6997 rcu_read_unlock(); 6998 } 6999 7000 /* If socket sndbuf has changed, wake up all per association waiters. */ 7001 void sctp_write_space(struct sock *sk) 7002 { 7003 struct sctp_association *asoc; 7004 7005 /* Wake up the tasks in each wait queue. */ 7006 list_for_each_entry(asoc, &((sctp_sk(sk))->ep->asocs), asocs) { 7007 __sctp_write_space(asoc); 7008 } 7009 } 7010 7011 /* Is there any sndbuf space available on the socket? 7012 * 7013 * Note that sk_wmem_alloc is the sum of the send buffers on all of the 7014 * associations on the same socket. For a UDP-style socket with 7015 * multiple associations, it is possible for it to be "unwriteable" 7016 * prematurely. I assume that this is acceptable because 7017 * a premature "unwriteable" is better than an accidental "writeable" which 7018 * would cause an unwanted block under certain circumstances. For the 1-1 7019 * UDP-style sockets or TCP-style sockets, this code should work. 7020 * - Daisy 7021 */ 7022 static int sctp_writeable(struct sock *sk) 7023 { 7024 int amt = 0; 7025 7026 amt = sk->sk_sndbuf - sk_wmem_alloc_get(sk); 7027 if (amt < 0) 7028 amt = 0; 7029 return amt; 7030 } 7031 7032 /* Wait for an association to go into ESTABLISHED state. If timeout is 0, 7033 * returns immediately with EINPROGRESS. 7034 */ 7035 static int sctp_wait_for_connect(struct sctp_association *asoc, long *timeo_p) 7036 { 7037 struct sock *sk = asoc->base.sk; 7038 int err = 0; 7039 long current_timeo = *timeo_p; 7040 DEFINE_WAIT(wait); 7041 7042 pr_debug("%s: asoc:%p, timeo:%ld\n", __func__, asoc, *timeo_p); 7043 7044 /* Increment the association's refcnt. */ 7045 sctp_association_hold(asoc); 7046 7047 for (;;) { 7048 prepare_to_wait_exclusive(&asoc->wait, &wait, 7049 TASK_INTERRUPTIBLE); 7050 if (!*timeo_p) 7051 goto do_nonblock; 7052 if (sk->sk_shutdown & RCV_SHUTDOWN) 7053 break; 7054 if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING || 7055 asoc->base.dead) 7056 goto do_error; 7057 if (signal_pending(current)) 7058 goto do_interrupted; 7059 7060 if (sctp_state(asoc, ESTABLISHED)) 7061 break; 7062 7063 /* Let another process have a go. Since we are going 7064 * to sleep anyway. 7065 */ 7066 release_sock(sk); 7067 current_timeo = schedule_timeout(current_timeo); 7068 lock_sock(sk); 7069 7070 *timeo_p = current_timeo; 7071 } 7072 7073 out: 7074 finish_wait(&asoc->wait, &wait); 7075 7076 /* Release the association's refcnt. */ 7077 sctp_association_put(asoc); 7078 7079 return err; 7080 7081 do_error: 7082 if (asoc->init_err_counter + 1 > asoc->max_init_attempts) 7083 err = -ETIMEDOUT; 7084 else 7085 err = -ECONNREFUSED; 7086 goto out; 7087 7088 do_interrupted: 7089 err = sock_intr_errno(*timeo_p); 7090 goto out; 7091 7092 do_nonblock: 7093 err = -EINPROGRESS; 7094 goto out; 7095 } 7096 7097 static int sctp_wait_for_accept(struct sock *sk, long timeo) 7098 { 7099 struct sctp_endpoint *ep; 7100 int err = 0; 7101 DEFINE_WAIT(wait); 7102 7103 ep = sctp_sk(sk)->ep; 7104 7105 7106 for (;;) { 7107 prepare_to_wait_exclusive(sk_sleep(sk), &wait, 7108 TASK_INTERRUPTIBLE); 7109 7110 if (list_empty(&ep->asocs)) { 7111 release_sock(sk); 7112 timeo = schedule_timeout(timeo); 7113 lock_sock(sk); 7114 } 7115 7116 err = -EINVAL; 7117 if (!sctp_sstate(sk, LISTENING)) 7118 break; 7119 7120 err = 0; 7121 if (!list_empty(&ep->asocs)) 7122 break; 7123 7124 err = sock_intr_errno(timeo); 7125 if (signal_pending(current)) 7126 break; 7127 7128 err = -EAGAIN; 7129 if (!timeo) 7130 break; 7131 } 7132 7133 finish_wait(sk_sleep(sk), &wait); 7134 7135 return err; 7136 } 7137 7138 static void sctp_wait_for_close(struct sock *sk, long timeout) 7139 { 7140 DEFINE_WAIT(wait); 7141 7142 do { 7143 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 7144 if (list_empty(&sctp_sk(sk)->ep->asocs)) 7145 break; 7146 release_sock(sk); 7147 timeout = schedule_timeout(timeout); 7148 lock_sock(sk); 7149 } while (!signal_pending(current) && timeout); 7150 7151 finish_wait(sk_sleep(sk), &wait); 7152 } 7153 7154 static void sctp_skb_set_owner_r_frag(struct sk_buff *skb, struct sock *sk) 7155 { 7156 struct sk_buff *frag; 7157 7158 if (!skb->data_len) 7159 goto done; 7160 7161 /* Don't forget the fragments. */ 7162 skb_walk_frags(skb, frag) 7163 sctp_skb_set_owner_r_frag(frag, sk); 7164 7165 done: 7166 sctp_skb_set_owner_r(skb, sk); 7167 } 7168 7169 void sctp_copy_sock(struct sock *newsk, struct sock *sk, 7170 struct sctp_association *asoc) 7171 { 7172 struct inet_sock *inet = inet_sk(sk); 7173 struct inet_sock *newinet; 7174 7175 newsk->sk_type = sk->sk_type; 7176 newsk->sk_bound_dev_if = sk->sk_bound_dev_if; 7177 newsk->sk_flags = sk->sk_flags; 7178 newsk->sk_no_check_tx = sk->sk_no_check_tx; 7179 newsk->sk_no_check_rx = sk->sk_no_check_rx; 7180 newsk->sk_reuse = sk->sk_reuse; 7181 7182 newsk->sk_shutdown = sk->sk_shutdown; 7183 newsk->sk_destruct = sctp_destruct_sock; 7184 newsk->sk_family = sk->sk_family; 7185 newsk->sk_protocol = IPPROTO_SCTP; 7186 newsk->sk_backlog_rcv = sk->sk_prot->backlog_rcv; 7187 newsk->sk_sndbuf = sk->sk_sndbuf; 7188 newsk->sk_rcvbuf = sk->sk_rcvbuf; 7189 newsk->sk_lingertime = sk->sk_lingertime; 7190 newsk->sk_rcvtimeo = sk->sk_rcvtimeo; 7191 newsk->sk_sndtimeo = sk->sk_sndtimeo; 7192 7193 newinet = inet_sk(newsk); 7194 7195 /* Initialize sk's sport, dport, rcv_saddr and daddr for 7196 * getsockname() and getpeername() 7197 */ 7198 newinet->inet_sport = inet->inet_sport; 7199 newinet->inet_saddr = inet->inet_saddr; 7200 newinet->inet_rcv_saddr = inet->inet_rcv_saddr; 7201 newinet->inet_dport = htons(asoc->peer.port); 7202 newinet->pmtudisc = inet->pmtudisc; 7203 newinet->inet_id = asoc->next_tsn ^ jiffies; 7204 7205 newinet->uc_ttl = inet->uc_ttl; 7206 newinet->mc_loop = 1; 7207 newinet->mc_ttl = 1; 7208 newinet->mc_index = 0; 7209 newinet->mc_list = NULL; 7210 } 7211 7212 static inline void sctp_copy_descendant(struct sock *sk_to, 7213 const struct sock *sk_from) 7214 { 7215 int ancestor_size = sizeof(struct inet_sock) + 7216 sizeof(struct sctp_sock) - 7217 offsetof(struct sctp_sock, auto_asconf_list); 7218 7219 if (sk_from->sk_family == PF_INET6) 7220 ancestor_size += sizeof(struct ipv6_pinfo); 7221 7222 __inet_sk_copy_descendant(sk_to, sk_from, ancestor_size); 7223 } 7224 7225 /* Populate the fields of the newsk from the oldsk and migrate the assoc 7226 * and its messages to the newsk. 7227 */ 7228 static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk, 7229 struct sctp_association *assoc, 7230 sctp_socket_type_t type) 7231 { 7232 struct sctp_sock *oldsp = sctp_sk(oldsk); 7233 struct sctp_sock *newsp = sctp_sk(newsk); 7234 struct sctp_bind_bucket *pp; /* hash list port iterator */ 7235 struct sctp_endpoint *newep = newsp->ep; 7236 struct sk_buff *skb, *tmp; 7237 struct sctp_ulpevent *event; 7238 struct sctp_bind_hashbucket *head; 7239 7240 /* Migrate socket buffer sizes and all the socket level options to the 7241 * new socket. 7242 */ 7243 newsk->sk_sndbuf = oldsk->sk_sndbuf; 7244 newsk->sk_rcvbuf = oldsk->sk_rcvbuf; 7245 /* Brute force copy old sctp opt. */ 7246 sctp_copy_descendant(newsk, oldsk); 7247 7248 /* Restore the ep value that was overwritten with the above structure 7249 * copy. 7250 */ 7251 newsp->ep = newep; 7252 newsp->hmac = NULL; 7253 7254 /* Hook this new socket in to the bind_hash list. */ 7255 head = &sctp_port_hashtable[sctp_phashfn(sock_net(oldsk), 7256 inet_sk(oldsk)->inet_num)]; 7257 local_bh_disable(); 7258 spin_lock(&head->lock); 7259 pp = sctp_sk(oldsk)->bind_hash; 7260 sk_add_bind_node(newsk, &pp->owner); 7261 sctp_sk(newsk)->bind_hash = pp; 7262 inet_sk(newsk)->inet_num = inet_sk(oldsk)->inet_num; 7263 spin_unlock(&head->lock); 7264 local_bh_enable(); 7265 7266 /* Copy the bind_addr list from the original endpoint to the new 7267 * endpoint so that we can handle restarts properly 7268 */ 7269 sctp_bind_addr_dup(&newsp->ep->base.bind_addr, 7270 &oldsp->ep->base.bind_addr, GFP_KERNEL); 7271 7272 /* Move any messages in the old socket's receive queue that are for the 7273 * peeled off association to the new socket's receive queue. 7274 */ 7275 sctp_skb_for_each(skb, &oldsk->sk_receive_queue, tmp) { 7276 event = sctp_skb2event(skb); 7277 if (event->asoc == assoc) { 7278 __skb_unlink(skb, &oldsk->sk_receive_queue); 7279 __skb_queue_tail(&newsk->sk_receive_queue, skb); 7280 sctp_skb_set_owner_r_frag(skb, newsk); 7281 } 7282 } 7283 7284 /* Clean up any messages pending delivery due to partial 7285 * delivery. Three cases: 7286 * 1) No partial deliver; no work. 7287 * 2) Peeling off partial delivery; keep pd_lobby in new pd_lobby. 7288 * 3) Peeling off non-partial delivery; move pd_lobby to receive_queue. 7289 */ 7290 skb_queue_head_init(&newsp->pd_lobby); 7291 atomic_set(&sctp_sk(newsk)->pd_mode, assoc->ulpq.pd_mode); 7292 7293 if (atomic_read(&sctp_sk(oldsk)->pd_mode)) { 7294 struct sk_buff_head *queue; 7295 7296 /* Decide which queue to move pd_lobby skbs to. */ 7297 if (assoc->ulpq.pd_mode) { 7298 queue = &newsp->pd_lobby; 7299 } else 7300 queue = &newsk->sk_receive_queue; 7301 7302 /* Walk through the pd_lobby, looking for skbs that 7303 * need moved to the new socket. 7304 */ 7305 sctp_skb_for_each(skb, &oldsp->pd_lobby, tmp) { 7306 event = sctp_skb2event(skb); 7307 if (event->asoc == assoc) { 7308 __skb_unlink(skb, &oldsp->pd_lobby); 7309 __skb_queue_tail(queue, skb); 7310 sctp_skb_set_owner_r_frag(skb, newsk); 7311 } 7312 } 7313 7314 /* Clear up any skbs waiting for the partial 7315 * delivery to finish. 7316 */ 7317 if (assoc->ulpq.pd_mode) 7318 sctp_clear_pd(oldsk, NULL); 7319 7320 } 7321 7322 sctp_skb_for_each(skb, &assoc->ulpq.reasm, tmp) 7323 sctp_skb_set_owner_r_frag(skb, newsk); 7324 7325 sctp_skb_for_each(skb, &assoc->ulpq.lobby, tmp) 7326 sctp_skb_set_owner_r_frag(skb, newsk); 7327 7328 /* Set the type of socket to indicate that it is peeled off from the 7329 * original UDP-style socket or created with the accept() call on a 7330 * TCP-style socket.. 7331 */ 7332 newsp->type = type; 7333 7334 /* Mark the new socket "in-use" by the user so that any packets 7335 * that may arrive on the association after we've moved it are 7336 * queued to the backlog. This prevents a potential race between 7337 * backlog processing on the old socket and new-packet processing 7338 * on the new socket. 7339 * 7340 * The caller has just allocated newsk so we can guarantee that other 7341 * paths won't try to lock it and then oldsk. 7342 */ 7343 lock_sock_nested(newsk, SINGLE_DEPTH_NESTING); 7344 sctp_assoc_migrate(assoc, newsk); 7345 7346 /* If the association on the newsk is already closed before accept() 7347 * is called, set RCV_SHUTDOWN flag. 7348 */ 7349 if (sctp_state(assoc, CLOSED) && sctp_style(newsk, TCP)) 7350 newsk->sk_shutdown |= RCV_SHUTDOWN; 7351 7352 newsk->sk_state = SCTP_SS_ESTABLISHED; 7353 release_sock(newsk); 7354 } 7355 7356 7357 /* This proto struct describes the ULP interface for SCTP. */ 7358 struct proto sctp_prot = { 7359 .name = "SCTP", 7360 .owner = THIS_MODULE, 7361 .close = sctp_close, 7362 .connect = sctp_connect, 7363 .disconnect = sctp_disconnect, 7364 .accept = sctp_accept, 7365 .ioctl = sctp_ioctl, 7366 .init = sctp_init_sock, 7367 .destroy = sctp_destroy_sock, 7368 .shutdown = sctp_shutdown, 7369 .setsockopt = sctp_setsockopt, 7370 .getsockopt = sctp_getsockopt, 7371 .sendmsg = sctp_sendmsg, 7372 .recvmsg = sctp_recvmsg, 7373 .bind = sctp_bind, 7374 .backlog_rcv = sctp_backlog_rcv, 7375 .hash = sctp_hash, 7376 .unhash = sctp_unhash, 7377 .get_port = sctp_get_port, 7378 .obj_size = sizeof(struct sctp_sock), 7379 .sysctl_mem = sysctl_sctp_mem, 7380 .sysctl_rmem = sysctl_sctp_rmem, 7381 .sysctl_wmem = sysctl_sctp_wmem, 7382 .memory_pressure = &sctp_memory_pressure, 7383 .enter_memory_pressure = sctp_enter_memory_pressure, 7384 .memory_allocated = &sctp_memory_allocated, 7385 .sockets_allocated = &sctp_sockets_allocated, 7386 }; 7387 7388 #if IS_ENABLED(CONFIG_IPV6) 7389 7390 struct proto sctpv6_prot = { 7391 .name = "SCTPv6", 7392 .owner = THIS_MODULE, 7393 .close = sctp_close, 7394 .connect = sctp_connect, 7395 .disconnect = sctp_disconnect, 7396 .accept = sctp_accept, 7397 .ioctl = sctp_ioctl, 7398 .init = sctp_init_sock, 7399 .destroy = sctp_destroy_sock, 7400 .shutdown = sctp_shutdown, 7401 .setsockopt = sctp_setsockopt, 7402 .getsockopt = sctp_getsockopt, 7403 .sendmsg = sctp_sendmsg, 7404 .recvmsg = sctp_recvmsg, 7405 .bind = sctp_bind, 7406 .backlog_rcv = sctp_backlog_rcv, 7407 .hash = sctp_hash, 7408 .unhash = sctp_unhash, 7409 .get_port = sctp_get_port, 7410 .obj_size = sizeof(struct sctp6_sock), 7411 .sysctl_mem = sysctl_sctp_mem, 7412 .sysctl_rmem = sysctl_sctp_rmem, 7413 .sysctl_wmem = sysctl_sctp_wmem, 7414 .memory_pressure = &sctp_memory_pressure, 7415 .enter_memory_pressure = sctp_enter_memory_pressure, 7416 .memory_allocated = &sctp_memory_allocated, 7417 .sockets_allocated = &sctp_sockets_allocated, 7418 }; 7419 #endif /* IS_ENABLED(CONFIG_IPV6) */ 7420