1 /* SCTP kernel implementation 2 * (C) Copyright IBM Corp. 2001, 2004 3 * Copyright (c) 1999-2000 Cisco, Inc. 4 * Copyright (c) 1999-2001 Motorola, Inc. 5 * Copyright (c) 2001-2003 Intel Corp. 6 * Copyright (c) 2001-2002 Nokia, Inc. 7 * Copyright (c) 2001 La Monte H.P. Yarroll 8 * 9 * This file is part of the SCTP kernel implementation 10 * 11 * These functions interface with the sockets layer to implement the 12 * SCTP Extensions for the Sockets API. 13 * 14 * Note that the descriptions from the specification are USER level 15 * functions--this file is the functions which populate the struct proto 16 * for SCTP which is the BOTTOM of the sockets interface. 17 * 18 * This SCTP implementation is free software; 19 * you can redistribute it and/or modify it under the terms of 20 * the GNU General Public License as published by 21 * the Free Software Foundation; either version 2, or (at your option) 22 * any later version. 23 * 24 * This SCTP implementation is distributed in the hope that it 25 * will be useful, but WITHOUT ANY WARRANTY; without even the implied 26 * ************************ 27 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. 28 * See the GNU General Public License for more details. 29 * 30 * You should have received a copy of the GNU General Public License 31 * along with GNU CC; see the file COPYING. If not, see 32 * <http://www.gnu.org/licenses/>. 33 * 34 * Please send any bug reports or fixes you make to the 35 * email address(es): 36 * lksctp developers <linux-sctp@vger.kernel.org> 37 * 38 * Written or modified by: 39 * La Monte H.P. Yarroll <piggy@acm.org> 40 * Narasimha Budihal <narsi@refcode.org> 41 * Karl Knutson <karl@athena.chicago.il.us> 42 * Jon Grimm <jgrimm@us.ibm.com> 43 * Xingang Guo <xingang.guo@intel.com> 44 * Daisy Chang <daisyc@us.ibm.com> 45 * Sridhar Samudrala <samudrala@us.ibm.com> 46 * Inaky Perez-Gonzalez <inaky.gonzalez@intel.com> 47 * Ardelle Fan <ardelle.fan@intel.com> 48 * Ryan Layer <rmlayer@us.ibm.com> 49 * Anup Pemmaiah <pemmaiah@cc.usu.edu> 50 * Kevin Gao <kevin.gao@intel.com> 51 */ 52 53 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 54 55 #include <linux/types.h> 56 #include <linux/kernel.h> 57 #include <linux/wait.h> 58 #include <linux/time.h> 59 #include <linux/ip.h> 60 #include <linux/capability.h> 61 #include <linux/fcntl.h> 62 #include <linux/poll.h> 63 #include <linux/init.h> 64 #include <linux/crypto.h> 65 #include <linux/slab.h> 66 #include <linux/file.h> 67 #include <linux/compat.h> 68 69 #include <net/ip.h> 70 #include <net/icmp.h> 71 #include <net/route.h> 72 #include <net/ipv6.h> 73 #include <net/inet_common.h> 74 #include <net/busy_poll.h> 75 76 #include <linux/socket.h> /* for sa_family_t */ 77 #include <linux/export.h> 78 #include <net/sock.h> 79 #include <net/sctp/sctp.h> 80 #include <net/sctp/sm.h> 81 82 /* Forward declarations for internal helper functions. */ 83 static int sctp_writeable(struct sock *sk); 84 static void sctp_wfree(struct sk_buff *skb); 85 static int sctp_wait_for_sndbuf(struct sctp_association *, long *timeo_p, 86 size_t msg_len); 87 static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p); 88 static int sctp_wait_for_connect(struct sctp_association *, long *timeo_p); 89 static int sctp_wait_for_accept(struct sock *sk, long timeo); 90 static void sctp_wait_for_close(struct sock *sk, long timeo); 91 static void sctp_destruct_sock(struct sock *sk); 92 static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt, 93 union sctp_addr *addr, int len); 94 static int sctp_bindx_add(struct sock *, struct sockaddr *, int); 95 static int sctp_bindx_rem(struct sock *, struct sockaddr *, int); 96 static int sctp_send_asconf_add_ip(struct sock *, struct sockaddr *, int); 97 static int sctp_send_asconf_del_ip(struct sock *, struct sockaddr *, int); 98 static int sctp_send_asconf(struct sctp_association *asoc, 99 struct sctp_chunk *chunk); 100 static int sctp_do_bind(struct sock *, union sctp_addr *, int); 101 static int sctp_autobind(struct sock *sk); 102 static void sctp_sock_migrate(struct sock *, struct sock *, 103 struct sctp_association *, sctp_socket_type_t); 104 105 static int sctp_memory_pressure; 106 static atomic_long_t sctp_memory_allocated; 107 struct percpu_counter sctp_sockets_allocated; 108 109 static void sctp_enter_memory_pressure(struct sock *sk) 110 { 111 sctp_memory_pressure = 1; 112 } 113 114 115 /* Get the sndbuf space available at the time on the association. */ 116 static inline int sctp_wspace(struct sctp_association *asoc) 117 { 118 int amt; 119 120 if (asoc->ep->sndbuf_policy) 121 amt = asoc->sndbuf_used; 122 else 123 amt = sk_wmem_alloc_get(asoc->base.sk); 124 125 if (amt >= asoc->base.sk->sk_sndbuf) { 126 if (asoc->base.sk->sk_userlocks & SOCK_SNDBUF_LOCK) 127 amt = 0; 128 else { 129 amt = sk_stream_wspace(asoc->base.sk); 130 if (amt < 0) 131 amt = 0; 132 } 133 } else { 134 amt = asoc->base.sk->sk_sndbuf - amt; 135 } 136 return amt; 137 } 138 139 /* Increment the used sndbuf space count of the corresponding association by 140 * the size of the outgoing data chunk. 141 * Also, set the skb destructor for sndbuf accounting later. 142 * 143 * Since it is always 1-1 between chunk and skb, and also a new skb is always 144 * allocated for chunk bundling in sctp_packet_transmit(), we can use the 145 * destructor in the data chunk skb for the purpose of the sndbuf space 146 * tracking. 147 */ 148 static inline void sctp_set_owner_w(struct sctp_chunk *chunk) 149 { 150 struct sctp_association *asoc = chunk->asoc; 151 struct sock *sk = asoc->base.sk; 152 153 /* The sndbuf space is tracked per association. */ 154 sctp_association_hold(asoc); 155 156 skb_set_owner_w(chunk->skb, sk); 157 158 chunk->skb->destructor = sctp_wfree; 159 /* Save the chunk pointer in skb for sctp_wfree to use later. */ 160 skb_shinfo(chunk->skb)->destructor_arg = chunk; 161 162 asoc->sndbuf_used += SCTP_DATA_SNDSIZE(chunk) + 163 sizeof(struct sk_buff) + 164 sizeof(struct sctp_chunk); 165 166 atomic_add(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc); 167 sk->sk_wmem_queued += chunk->skb->truesize; 168 sk_mem_charge(sk, chunk->skb->truesize); 169 } 170 171 /* Verify that this is a valid address. */ 172 static inline int sctp_verify_addr(struct sock *sk, union sctp_addr *addr, 173 int len) 174 { 175 struct sctp_af *af; 176 177 /* Verify basic sockaddr. */ 178 af = sctp_sockaddr_af(sctp_sk(sk), addr, len); 179 if (!af) 180 return -EINVAL; 181 182 /* Is this a valid SCTP address? */ 183 if (!af->addr_valid(addr, sctp_sk(sk), NULL)) 184 return -EINVAL; 185 186 if (!sctp_sk(sk)->pf->send_verify(sctp_sk(sk), (addr))) 187 return -EINVAL; 188 189 return 0; 190 } 191 192 /* Look up the association by its id. If this is not a UDP-style 193 * socket, the ID field is always ignored. 194 */ 195 struct sctp_association *sctp_id2assoc(struct sock *sk, sctp_assoc_t id) 196 { 197 struct sctp_association *asoc = NULL; 198 199 /* If this is not a UDP-style socket, assoc id should be ignored. */ 200 if (!sctp_style(sk, UDP)) { 201 /* Return NULL if the socket state is not ESTABLISHED. It 202 * could be a TCP-style listening socket or a socket which 203 * hasn't yet called connect() to establish an association. 204 */ 205 if (!sctp_sstate(sk, ESTABLISHED)) 206 return NULL; 207 208 /* Get the first and the only association from the list. */ 209 if (!list_empty(&sctp_sk(sk)->ep->asocs)) 210 asoc = list_entry(sctp_sk(sk)->ep->asocs.next, 211 struct sctp_association, asocs); 212 return asoc; 213 } 214 215 /* Otherwise this is a UDP-style socket. */ 216 if (!id || (id == (sctp_assoc_t)-1)) 217 return NULL; 218 219 spin_lock_bh(&sctp_assocs_id_lock); 220 asoc = (struct sctp_association *)idr_find(&sctp_assocs_id, (int)id); 221 spin_unlock_bh(&sctp_assocs_id_lock); 222 223 if (!asoc || (asoc->base.sk != sk) || asoc->base.dead) 224 return NULL; 225 226 return asoc; 227 } 228 229 /* Look up the transport from an address and an assoc id. If both address and 230 * id are specified, the associations matching the address and the id should be 231 * the same. 232 */ 233 static struct sctp_transport *sctp_addr_id2transport(struct sock *sk, 234 struct sockaddr_storage *addr, 235 sctp_assoc_t id) 236 { 237 struct sctp_association *addr_asoc = NULL, *id_asoc = NULL; 238 struct sctp_transport *transport; 239 union sctp_addr *laddr = (union sctp_addr *)addr; 240 241 addr_asoc = sctp_endpoint_lookup_assoc(sctp_sk(sk)->ep, 242 laddr, 243 &transport); 244 245 if (!addr_asoc) 246 return NULL; 247 248 id_asoc = sctp_id2assoc(sk, id); 249 if (id_asoc && (id_asoc != addr_asoc)) 250 return NULL; 251 252 sctp_get_pf_specific(sk->sk_family)->addr_to_user(sctp_sk(sk), 253 (union sctp_addr *)addr); 254 255 return transport; 256 } 257 258 /* API 3.1.2 bind() - UDP Style Syntax 259 * The syntax of bind() is, 260 * 261 * ret = bind(int sd, struct sockaddr *addr, int addrlen); 262 * 263 * sd - the socket descriptor returned by socket(). 264 * addr - the address structure (struct sockaddr_in or struct 265 * sockaddr_in6 [RFC 2553]), 266 * addr_len - the size of the address structure. 267 */ 268 static int sctp_bind(struct sock *sk, struct sockaddr *addr, int addr_len) 269 { 270 int retval = 0; 271 272 lock_sock(sk); 273 274 pr_debug("%s: sk:%p, addr:%p, addr_len:%d\n", __func__, sk, 275 addr, addr_len); 276 277 /* Disallow binding twice. */ 278 if (!sctp_sk(sk)->ep->base.bind_addr.port) 279 retval = sctp_do_bind(sk, (union sctp_addr *)addr, 280 addr_len); 281 else 282 retval = -EINVAL; 283 284 release_sock(sk); 285 286 return retval; 287 } 288 289 static long sctp_get_port_local(struct sock *, union sctp_addr *); 290 291 /* Verify this is a valid sockaddr. */ 292 static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt, 293 union sctp_addr *addr, int len) 294 { 295 struct sctp_af *af; 296 297 /* Check minimum size. */ 298 if (len < sizeof (struct sockaddr)) 299 return NULL; 300 301 /* V4 mapped address are really of AF_INET family */ 302 if (addr->sa.sa_family == AF_INET6 && 303 ipv6_addr_v4mapped(&addr->v6.sin6_addr)) { 304 if (!opt->pf->af_supported(AF_INET, opt)) 305 return NULL; 306 } else { 307 /* Does this PF support this AF? */ 308 if (!opt->pf->af_supported(addr->sa.sa_family, opt)) 309 return NULL; 310 } 311 312 /* If we get this far, af is valid. */ 313 af = sctp_get_af_specific(addr->sa.sa_family); 314 315 if (len < af->sockaddr_len) 316 return NULL; 317 318 return af; 319 } 320 321 /* Bind a local address either to an endpoint or to an association. */ 322 static int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len) 323 { 324 struct net *net = sock_net(sk); 325 struct sctp_sock *sp = sctp_sk(sk); 326 struct sctp_endpoint *ep = sp->ep; 327 struct sctp_bind_addr *bp = &ep->base.bind_addr; 328 struct sctp_af *af; 329 unsigned short snum; 330 int ret = 0; 331 332 /* Common sockaddr verification. */ 333 af = sctp_sockaddr_af(sp, addr, len); 334 if (!af) { 335 pr_debug("%s: sk:%p, newaddr:%p, len:%d EINVAL\n", 336 __func__, sk, addr, len); 337 return -EINVAL; 338 } 339 340 snum = ntohs(addr->v4.sin_port); 341 342 pr_debug("%s: sk:%p, new addr:%pISc, port:%d, new port:%d, len:%d\n", 343 __func__, sk, &addr->sa, bp->port, snum, len); 344 345 /* PF specific bind() address verification. */ 346 if (!sp->pf->bind_verify(sp, addr)) 347 return -EADDRNOTAVAIL; 348 349 /* We must either be unbound, or bind to the same port. 350 * It's OK to allow 0 ports if we are already bound. 351 * We'll just inhert an already bound port in this case 352 */ 353 if (bp->port) { 354 if (!snum) 355 snum = bp->port; 356 else if (snum != bp->port) { 357 pr_debug("%s: new port %d doesn't match existing port " 358 "%d\n", __func__, snum, bp->port); 359 return -EINVAL; 360 } 361 } 362 363 if (snum && snum < PROT_SOCK && 364 !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) 365 return -EACCES; 366 367 /* See if the address matches any of the addresses we may have 368 * already bound before checking against other endpoints. 369 */ 370 if (sctp_bind_addr_match(bp, addr, sp)) 371 return -EINVAL; 372 373 /* Make sure we are allowed to bind here. 374 * The function sctp_get_port_local() does duplicate address 375 * detection. 376 */ 377 addr->v4.sin_port = htons(snum); 378 if ((ret = sctp_get_port_local(sk, addr))) { 379 return -EADDRINUSE; 380 } 381 382 /* Refresh ephemeral port. */ 383 if (!bp->port) 384 bp->port = inet_sk(sk)->inet_num; 385 386 /* Add the address to the bind address list. 387 * Use GFP_ATOMIC since BHs will be disabled. 388 */ 389 ret = sctp_add_bind_addr(bp, addr, SCTP_ADDR_SRC, GFP_ATOMIC); 390 391 /* Copy back into socket for getsockname() use. */ 392 if (!ret) { 393 inet_sk(sk)->inet_sport = htons(inet_sk(sk)->inet_num); 394 sp->pf->to_sk_saddr(addr, sk); 395 } 396 397 return ret; 398 } 399 400 /* ADDIP Section 4.1.1 Congestion Control of ASCONF Chunks 401 * 402 * R1) One and only one ASCONF Chunk MAY be in transit and unacknowledged 403 * at any one time. If a sender, after sending an ASCONF chunk, decides 404 * it needs to transfer another ASCONF Chunk, it MUST wait until the 405 * ASCONF-ACK Chunk returns from the previous ASCONF Chunk before sending a 406 * subsequent ASCONF. Note this restriction binds each side, so at any 407 * time two ASCONF may be in-transit on any given association (one sent 408 * from each endpoint). 409 */ 410 static int sctp_send_asconf(struct sctp_association *asoc, 411 struct sctp_chunk *chunk) 412 { 413 struct net *net = sock_net(asoc->base.sk); 414 int retval = 0; 415 416 /* If there is an outstanding ASCONF chunk, queue it for later 417 * transmission. 418 */ 419 if (asoc->addip_last_asconf) { 420 list_add_tail(&chunk->list, &asoc->addip_chunk_list); 421 goto out; 422 } 423 424 /* Hold the chunk until an ASCONF_ACK is received. */ 425 sctp_chunk_hold(chunk); 426 retval = sctp_primitive_ASCONF(net, asoc, chunk); 427 if (retval) 428 sctp_chunk_free(chunk); 429 else 430 asoc->addip_last_asconf = chunk; 431 432 out: 433 return retval; 434 } 435 436 /* Add a list of addresses as bind addresses to local endpoint or 437 * association. 438 * 439 * Basically run through each address specified in the addrs/addrcnt 440 * array/length pair, determine if it is IPv6 or IPv4 and call 441 * sctp_do_bind() on it. 442 * 443 * If any of them fails, then the operation will be reversed and the 444 * ones that were added will be removed. 445 * 446 * Only sctp_setsockopt_bindx() is supposed to call this function. 447 */ 448 static int sctp_bindx_add(struct sock *sk, struct sockaddr *addrs, int addrcnt) 449 { 450 int cnt; 451 int retval = 0; 452 void *addr_buf; 453 struct sockaddr *sa_addr; 454 struct sctp_af *af; 455 456 pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n", __func__, sk, 457 addrs, addrcnt); 458 459 addr_buf = addrs; 460 for (cnt = 0; cnt < addrcnt; cnt++) { 461 /* The list may contain either IPv4 or IPv6 address; 462 * determine the address length for walking thru the list. 463 */ 464 sa_addr = addr_buf; 465 af = sctp_get_af_specific(sa_addr->sa_family); 466 if (!af) { 467 retval = -EINVAL; 468 goto err_bindx_add; 469 } 470 471 retval = sctp_do_bind(sk, (union sctp_addr *)sa_addr, 472 af->sockaddr_len); 473 474 addr_buf += af->sockaddr_len; 475 476 err_bindx_add: 477 if (retval < 0) { 478 /* Failed. Cleanup the ones that have been added */ 479 if (cnt > 0) 480 sctp_bindx_rem(sk, addrs, cnt); 481 return retval; 482 } 483 } 484 485 return retval; 486 } 487 488 /* Send an ASCONF chunk with Add IP address parameters to all the peers of the 489 * associations that are part of the endpoint indicating that a list of local 490 * addresses are added to the endpoint. 491 * 492 * If any of the addresses is already in the bind address list of the 493 * association, we do not send the chunk for that association. But it will not 494 * affect other associations. 495 * 496 * Only sctp_setsockopt_bindx() is supposed to call this function. 497 */ 498 static int sctp_send_asconf_add_ip(struct sock *sk, 499 struct sockaddr *addrs, 500 int addrcnt) 501 { 502 struct net *net = sock_net(sk); 503 struct sctp_sock *sp; 504 struct sctp_endpoint *ep; 505 struct sctp_association *asoc; 506 struct sctp_bind_addr *bp; 507 struct sctp_chunk *chunk; 508 struct sctp_sockaddr_entry *laddr; 509 union sctp_addr *addr; 510 union sctp_addr saveaddr; 511 void *addr_buf; 512 struct sctp_af *af; 513 struct list_head *p; 514 int i; 515 int retval = 0; 516 517 if (!net->sctp.addip_enable) 518 return retval; 519 520 sp = sctp_sk(sk); 521 ep = sp->ep; 522 523 pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n", 524 __func__, sk, addrs, addrcnt); 525 526 list_for_each_entry(asoc, &ep->asocs, asocs) { 527 if (!asoc->peer.asconf_capable) 528 continue; 529 530 if (asoc->peer.addip_disabled_mask & SCTP_PARAM_ADD_IP) 531 continue; 532 533 if (!sctp_state(asoc, ESTABLISHED)) 534 continue; 535 536 /* Check if any address in the packed array of addresses is 537 * in the bind address list of the association. If so, 538 * do not send the asconf chunk to its peer, but continue with 539 * other associations. 540 */ 541 addr_buf = addrs; 542 for (i = 0; i < addrcnt; i++) { 543 addr = addr_buf; 544 af = sctp_get_af_specific(addr->v4.sin_family); 545 if (!af) { 546 retval = -EINVAL; 547 goto out; 548 } 549 550 if (sctp_assoc_lookup_laddr(asoc, addr)) 551 break; 552 553 addr_buf += af->sockaddr_len; 554 } 555 if (i < addrcnt) 556 continue; 557 558 /* Use the first valid address in bind addr list of 559 * association as Address Parameter of ASCONF CHUNK. 560 */ 561 bp = &asoc->base.bind_addr; 562 p = bp->address_list.next; 563 laddr = list_entry(p, struct sctp_sockaddr_entry, list); 564 chunk = sctp_make_asconf_update_ip(asoc, &laddr->a, addrs, 565 addrcnt, SCTP_PARAM_ADD_IP); 566 if (!chunk) { 567 retval = -ENOMEM; 568 goto out; 569 } 570 571 /* Add the new addresses to the bind address list with 572 * use_as_src set to 0. 573 */ 574 addr_buf = addrs; 575 for (i = 0; i < addrcnt; i++) { 576 addr = addr_buf; 577 af = sctp_get_af_specific(addr->v4.sin_family); 578 memcpy(&saveaddr, addr, af->sockaddr_len); 579 retval = sctp_add_bind_addr(bp, &saveaddr, 580 SCTP_ADDR_NEW, GFP_ATOMIC); 581 addr_buf += af->sockaddr_len; 582 } 583 if (asoc->src_out_of_asoc_ok) { 584 struct sctp_transport *trans; 585 586 list_for_each_entry(trans, 587 &asoc->peer.transport_addr_list, transports) { 588 /* Clear the source and route cache */ 589 dst_release(trans->dst); 590 trans->cwnd = min(4*asoc->pathmtu, max_t(__u32, 591 2*asoc->pathmtu, 4380)); 592 trans->ssthresh = asoc->peer.i.a_rwnd; 593 trans->rto = asoc->rto_initial; 594 sctp_max_rto(asoc, trans); 595 trans->rtt = trans->srtt = trans->rttvar = 0; 596 sctp_transport_route(trans, NULL, 597 sctp_sk(asoc->base.sk)); 598 } 599 } 600 retval = sctp_send_asconf(asoc, chunk); 601 } 602 603 out: 604 return retval; 605 } 606 607 /* Remove a list of addresses from bind addresses list. Do not remove the 608 * last address. 609 * 610 * Basically run through each address specified in the addrs/addrcnt 611 * array/length pair, determine if it is IPv6 or IPv4 and call 612 * sctp_del_bind() on it. 613 * 614 * If any of them fails, then the operation will be reversed and the 615 * ones that were removed will be added back. 616 * 617 * At least one address has to be left; if only one address is 618 * available, the operation will return -EBUSY. 619 * 620 * Only sctp_setsockopt_bindx() is supposed to call this function. 621 */ 622 static int sctp_bindx_rem(struct sock *sk, struct sockaddr *addrs, int addrcnt) 623 { 624 struct sctp_sock *sp = sctp_sk(sk); 625 struct sctp_endpoint *ep = sp->ep; 626 int cnt; 627 struct sctp_bind_addr *bp = &ep->base.bind_addr; 628 int retval = 0; 629 void *addr_buf; 630 union sctp_addr *sa_addr; 631 struct sctp_af *af; 632 633 pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n", 634 __func__, sk, addrs, addrcnt); 635 636 addr_buf = addrs; 637 for (cnt = 0; cnt < addrcnt; cnt++) { 638 /* If the bind address list is empty or if there is only one 639 * bind address, there is nothing more to be removed (we need 640 * at least one address here). 641 */ 642 if (list_empty(&bp->address_list) || 643 (sctp_list_single_entry(&bp->address_list))) { 644 retval = -EBUSY; 645 goto err_bindx_rem; 646 } 647 648 sa_addr = addr_buf; 649 af = sctp_get_af_specific(sa_addr->sa.sa_family); 650 if (!af) { 651 retval = -EINVAL; 652 goto err_bindx_rem; 653 } 654 655 if (!af->addr_valid(sa_addr, sp, NULL)) { 656 retval = -EADDRNOTAVAIL; 657 goto err_bindx_rem; 658 } 659 660 if (sa_addr->v4.sin_port && 661 sa_addr->v4.sin_port != htons(bp->port)) { 662 retval = -EINVAL; 663 goto err_bindx_rem; 664 } 665 666 if (!sa_addr->v4.sin_port) 667 sa_addr->v4.sin_port = htons(bp->port); 668 669 /* FIXME - There is probably a need to check if sk->sk_saddr and 670 * sk->sk_rcv_addr are currently set to one of the addresses to 671 * be removed. This is something which needs to be looked into 672 * when we are fixing the outstanding issues with multi-homing 673 * socket routing and failover schemes. Refer to comments in 674 * sctp_do_bind(). -daisy 675 */ 676 retval = sctp_del_bind_addr(bp, sa_addr); 677 678 addr_buf += af->sockaddr_len; 679 err_bindx_rem: 680 if (retval < 0) { 681 /* Failed. Add the ones that has been removed back */ 682 if (cnt > 0) 683 sctp_bindx_add(sk, addrs, cnt); 684 return retval; 685 } 686 } 687 688 return retval; 689 } 690 691 /* Send an ASCONF chunk with Delete IP address parameters to all the peers of 692 * the associations that are part of the endpoint indicating that a list of 693 * local addresses are removed from the endpoint. 694 * 695 * If any of the addresses is already in the bind address list of the 696 * association, we do not send the chunk for that association. But it will not 697 * affect other associations. 698 * 699 * Only sctp_setsockopt_bindx() is supposed to call this function. 700 */ 701 static int sctp_send_asconf_del_ip(struct sock *sk, 702 struct sockaddr *addrs, 703 int addrcnt) 704 { 705 struct net *net = sock_net(sk); 706 struct sctp_sock *sp; 707 struct sctp_endpoint *ep; 708 struct sctp_association *asoc; 709 struct sctp_transport *transport; 710 struct sctp_bind_addr *bp; 711 struct sctp_chunk *chunk; 712 union sctp_addr *laddr; 713 void *addr_buf; 714 struct sctp_af *af; 715 struct sctp_sockaddr_entry *saddr; 716 int i; 717 int retval = 0; 718 int stored = 0; 719 720 chunk = NULL; 721 if (!net->sctp.addip_enable) 722 return retval; 723 724 sp = sctp_sk(sk); 725 ep = sp->ep; 726 727 pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n", 728 __func__, sk, addrs, addrcnt); 729 730 list_for_each_entry(asoc, &ep->asocs, asocs) { 731 732 if (!asoc->peer.asconf_capable) 733 continue; 734 735 if (asoc->peer.addip_disabled_mask & SCTP_PARAM_DEL_IP) 736 continue; 737 738 if (!sctp_state(asoc, ESTABLISHED)) 739 continue; 740 741 /* Check if any address in the packed array of addresses is 742 * not present in the bind address list of the association. 743 * If so, do not send the asconf chunk to its peer, but 744 * continue with other associations. 745 */ 746 addr_buf = addrs; 747 for (i = 0; i < addrcnt; i++) { 748 laddr = addr_buf; 749 af = sctp_get_af_specific(laddr->v4.sin_family); 750 if (!af) { 751 retval = -EINVAL; 752 goto out; 753 } 754 755 if (!sctp_assoc_lookup_laddr(asoc, laddr)) 756 break; 757 758 addr_buf += af->sockaddr_len; 759 } 760 if (i < addrcnt) 761 continue; 762 763 /* Find one address in the association's bind address list 764 * that is not in the packed array of addresses. This is to 765 * make sure that we do not delete all the addresses in the 766 * association. 767 */ 768 bp = &asoc->base.bind_addr; 769 laddr = sctp_find_unmatch_addr(bp, (union sctp_addr *)addrs, 770 addrcnt, sp); 771 if ((laddr == NULL) && (addrcnt == 1)) { 772 if (asoc->asconf_addr_del_pending) 773 continue; 774 asoc->asconf_addr_del_pending = 775 kzalloc(sizeof(union sctp_addr), GFP_ATOMIC); 776 if (asoc->asconf_addr_del_pending == NULL) { 777 retval = -ENOMEM; 778 goto out; 779 } 780 asoc->asconf_addr_del_pending->sa.sa_family = 781 addrs->sa_family; 782 asoc->asconf_addr_del_pending->v4.sin_port = 783 htons(bp->port); 784 if (addrs->sa_family == AF_INET) { 785 struct sockaddr_in *sin; 786 787 sin = (struct sockaddr_in *)addrs; 788 asoc->asconf_addr_del_pending->v4.sin_addr.s_addr = sin->sin_addr.s_addr; 789 } else if (addrs->sa_family == AF_INET6) { 790 struct sockaddr_in6 *sin6; 791 792 sin6 = (struct sockaddr_in6 *)addrs; 793 asoc->asconf_addr_del_pending->v6.sin6_addr = sin6->sin6_addr; 794 } 795 796 pr_debug("%s: keep the last address asoc:%p %pISc at %p\n", 797 __func__, asoc, &asoc->asconf_addr_del_pending->sa, 798 asoc->asconf_addr_del_pending); 799 800 asoc->src_out_of_asoc_ok = 1; 801 stored = 1; 802 goto skip_mkasconf; 803 } 804 805 if (laddr == NULL) 806 return -EINVAL; 807 808 /* We do not need RCU protection throughout this loop 809 * because this is done under a socket lock from the 810 * setsockopt call. 811 */ 812 chunk = sctp_make_asconf_update_ip(asoc, laddr, addrs, addrcnt, 813 SCTP_PARAM_DEL_IP); 814 if (!chunk) { 815 retval = -ENOMEM; 816 goto out; 817 } 818 819 skip_mkasconf: 820 /* Reset use_as_src flag for the addresses in the bind address 821 * list that are to be deleted. 822 */ 823 addr_buf = addrs; 824 for (i = 0; i < addrcnt; i++) { 825 laddr = addr_buf; 826 af = sctp_get_af_specific(laddr->v4.sin_family); 827 list_for_each_entry(saddr, &bp->address_list, list) { 828 if (sctp_cmp_addr_exact(&saddr->a, laddr)) 829 saddr->state = SCTP_ADDR_DEL; 830 } 831 addr_buf += af->sockaddr_len; 832 } 833 834 /* Update the route and saddr entries for all the transports 835 * as some of the addresses in the bind address list are 836 * about to be deleted and cannot be used as source addresses. 837 */ 838 list_for_each_entry(transport, &asoc->peer.transport_addr_list, 839 transports) { 840 dst_release(transport->dst); 841 sctp_transport_route(transport, NULL, 842 sctp_sk(asoc->base.sk)); 843 } 844 845 if (stored) 846 /* We don't need to transmit ASCONF */ 847 continue; 848 retval = sctp_send_asconf(asoc, chunk); 849 } 850 out: 851 return retval; 852 } 853 854 /* set addr events to assocs in the endpoint. ep and addr_wq must be locked */ 855 int sctp_asconf_mgmt(struct sctp_sock *sp, struct sctp_sockaddr_entry *addrw) 856 { 857 struct sock *sk = sctp_opt2sk(sp); 858 union sctp_addr *addr; 859 struct sctp_af *af; 860 861 /* It is safe to write port space in caller. */ 862 addr = &addrw->a; 863 addr->v4.sin_port = htons(sp->ep->base.bind_addr.port); 864 af = sctp_get_af_specific(addr->sa.sa_family); 865 if (!af) 866 return -EINVAL; 867 if (sctp_verify_addr(sk, addr, af->sockaddr_len)) 868 return -EINVAL; 869 870 if (addrw->state == SCTP_ADDR_NEW) 871 return sctp_send_asconf_add_ip(sk, (struct sockaddr *)addr, 1); 872 else 873 return sctp_send_asconf_del_ip(sk, (struct sockaddr *)addr, 1); 874 } 875 876 /* Helper for tunneling sctp_bindx() requests through sctp_setsockopt() 877 * 878 * API 8.1 879 * int sctp_bindx(int sd, struct sockaddr *addrs, int addrcnt, 880 * int flags); 881 * 882 * If sd is an IPv4 socket, the addresses passed must be IPv4 addresses. 883 * If the sd is an IPv6 socket, the addresses passed can either be IPv4 884 * or IPv6 addresses. 885 * 886 * A single address may be specified as INADDR_ANY or IN6ADDR_ANY, see 887 * Section 3.1.2 for this usage. 888 * 889 * addrs is a pointer to an array of one or more socket addresses. Each 890 * address is contained in its appropriate structure (i.e. struct 891 * sockaddr_in or struct sockaddr_in6) the family of the address type 892 * must be used to distinguish the address length (note that this 893 * representation is termed a "packed array" of addresses). The caller 894 * specifies the number of addresses in the array with addrcnt. 895 * 896 * On success, sctp_bindx() returns 0. On failure, sctp_bindx() returns 897 * -1, and sets errno to the appropriate error code. 898 * 899 * For SCTP, the port given in each socket address must be the same, or 900 * sctp_bindx() will fail, setting errno to EINVAL. 901 * 902 * The flags parameter is formed from the bitwise OR of zero or more of 903 * the following currently defined flags: 904 * 905 * SCTP_BINDX_ADD_ADDR 906 * 907 * SCTP_BINDX_REM_ADDR 908 * 909 * SCTP_BINDX_ADD_ADDR directs SCTP to add the given addresses to the 910 * association, and SCTP_BINDX_REM_ADDR directs SCTP to remove the given 911 * addresses from the association. The two flags are mutually exclusive; 912 * if both are given, sctp_bindx() will fail with EINVAL. A caller may 913 * not remove all addresses from an association; sctp_bindx() will 914 * reject such an attempt with EINVAL. 915 * 916 * An application can use sctp_bindx(SCTP_BINDX_ADD_ADDR) to associate 917 * additional addresses with an endpoint after calling bind(). Or use 918 * sctp_bindx(SCTP_BINDX_REM_ADDR) to remove some addresses a listening 919 * socket is associated with so that no new association accepted will be 920 * associated with those addresses. If the endpoint supports dynamic 921 * address a SCTP_BINDX_REM_ADDR or SCTP_BINDX_ADD_ADDR may cause a 922 * endpoint to send the appropriate message to the peer to change the 923 * peers address lists. 924 * 925 * Adding and removing addresses from a connected association is 926 * optional functionality. Implementations that do not support this 927 * functionality should return EOPNOTSUPP. 928 * 929 * Basically do nothing but copying the addresses from user to kernel 930 * land and invoking either sctp_bindx_add() or sctp_bindx_rem() on the sk. 931 * This is used for tunneling the sctp_bindx() request through sctp_setsockopt() 932 * from userspace. 933 * 934 * We don't use copy_from_user() for optimization: we first do the 935 * sanity checks (buffer size -fast- and access check-healthy 936 * pointer); if all of those succeed, then we can alloc the memory 937 * (expensive operation) needed to copy the data to kernel. Then we do 938 * the copying without checking the user space area 939 * (__copy_from_user()). 940 * 941 * On exit there is no need to do sockfd_put(), sys_setsockopt() does 942 * it. 943 * 944 * sk The sk of the socket 945 * addrs The pointer to the addresses in user land 946 * addrssize Size of the addrs buffer 947 * op Operation to perform (add or remove, see the flags of 948 * sctp_bindx) 949 * 950 * Returns 0 if ok, <0 errno code on error. 951 */ 952 static int sctp_setsockopt_bindx(struct sock *sk, 953 struct sockaddr __user *addrs, 954 int addrs_size, int op) 955 { 956 struct sockaddr *kaddrs; 957 int err; 958 int addrcnt = 0; 959 int walk_size = 0; 960 struct sockaddr *sa_addr; 961 void *addr_buf; 962 struct sctp_af *af; 963 964 pr_debug("%s: sk:%p addrs:%p addrs_size:%d opt:%d\n", 965 __func__, sk, addrs, addrs_size, op); 966 967 if (unlikely(addrs_size <= 0)) 968 return -EINVAL; 969 970 /* Check the user passed a healthy pointer. */ 971 if (unlikely(!access_ok(VERIFY_READ, addrs, addrs_size))) 972 return -EFAULT; 973 974 /* Alloc space for the address array in kernel memory. */ 975 kaddrs = kmalloc(addrs_size, GFP_KERNEL); 976 if (unlikely(!kaddrs)) 977 return -ENOMEM; 978 979 if (__copy_from_user(kaddrs, addrs, addrs_size)) { 980 kfree(kaddrs); 981 return -EFAULT; 982 } 983 984 /* Walk through the addrs buffer and count the number of addresses. */ 985 addr_buf = kaddrs; 986 while (walk_size < addrs_size) { 987 if (walk_size + sizeof(sa_family_t) > addrs_size) { 988 kfree(kaddrs); 989 return -EINVAL; 990 } 991 992 sa_addr = addr_buf; 993 af = sctp_get_af_specific(sa_addr->sa_family); 994 995 /* If the address family is not supported or if this address 996 * causes the address buffer to overflow return EINVAL. 997 */ 998 if (!af || (walk_size + af->sockaddr_len) > addrs_size) { 999 kfree(kaddrs); 1000 return -EINVAL; 1001 } 1002 addrcnt++; 1003 addr_buf += af->sockaddr_len; 1004 walk_size += af->sockaddr_len; 1005 } 1006 1007 /* Do the work. */ 1008 switch (op) { 1009 case SCTP_BINDX_ADD_ADDR: 1010 err = sctp_bindx_add(sk, kaddrs, addrcnt); 1011 if (err) 1012 goto out; 1013 err = sctp_send_asconf_add_ip(sk, kaddrs, addrcnt); 1014 break; 1015 1016 case SCTP_BINDX_REM_ADDR: 1017 err = sctp_bindx_rem(sk, kaddrs, addrcnt); 1018 if (err) 1019 goto out; 1020 err = sctp_send_asconf_del_ip(sk, kaddrs, addrcnt); 1021 break; 1022 1023 default: 1024 err = -EINVAL; 1025 break; 1026 } 1027 1028 out: 1029 kfree(kaddrs); 1030 1031 return err; 1032 } 1033 1034 /* __sctp_connect(struct sock* sk, struct sockaddr *kaddrs, int addrs_size) 1035 * 1036 * Common routine for handling connect() and sctp_connectx(). 1037 * Connect will come in with just a single address. 1038 */ 1039 static int __sctp_connect(struct sock *sk, 1040 struct sockaddr *kaddrs, 1041 int addrs_size, 1042 sctp_assoc_t *assoc_id) 1043 { 1044 struct net *net = sock_net(sk); 1045 struct sctp_sock *sp; 1046 struct sctp_endpoint *ep; 1047 struct sctp_association *asoc = NULL; 1048 struct sctp_association *asoc2; 1049 struct sctp_transport *transport; 1050 union sctp_addr to; 1051 sctp_scope_t scope; 1052 long timeo; 1053 int err = 0; 1054 int addrcnt = 0; 1055 int walk_size = 0; 1056 union sctp_addr *sa_addr = NULL; 1057 void *addr_buf; 1058 unsigned short port; 1059 unsigned int f_flags = 0; 1060 1061 sp = sctp_sk(sk); 1062 ep = sp->ep; 1063 1064 /* connect() cannot be done on a socket that is already in ESTABLISHED 1065 * state - UDP-style peeled off socket or a TCP-style socket that 1066 * is already connected. 1067 * It cannot be done even on a TCP-style listening socket. 1068 */ 1069 if (sctp_sstate(sk, ESTABLISHED) || 1070 (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))) { 1071 err = -EISCONN; 1072 goto out_free; 1073 } 1074 1075 /* Walk through the addrs buffer and count the number of addresses. */ 1076 addr_buf = kaddrs; 1077 while (walk_size < addrs_size) { 1078 struct sctp_af *af; 1079 1080 if (walk_size + sizeof(sa_family_t) > addrs_size) { 1081 err = -EINVAL; 1082 goto out_free; 1083 } 1084 1085 sa_addr = addr_buf; 1086 af = sctp_get_af_specific(sa_addr->sa.sa_family); 1087 1088 /* If the address family is not supported or if this address 1089 * causes the address buffer to overflow return EINVAL. 1090 */ 1091 if (!af || (walk_size + af->sockaddr_len) > addrs_size) { 1092 err = -EINVAL; 1093 goto out_free; 1094 } 1095 1096 port = ntohs(sa_addr->v4.sin_port); 1097 1098 /* Save current address so we can work with it */ 1099 memcpy(&to, sa_addr, af->sockaddr_len); 1100 1101 err = sctp_verify_addr(sk, &to, af->sockaddr_len); 1102 if (err) 1103 goto out_free; 1104 1105 /* Make sure the destination port is correctly set 1106 * in all addresses. 1107 */ 1108 if (asoc && asoc->peer.port && asoc->peer.port != port) { 1109 err = -EINVAL; 1110 goto out_free; 1111 } 1112 1113 /* Check if there already is a matching association on the 1114 * endpoint (other than the one created here). 1115 */ 1116 asoc2 = sctp_endpoint_lookup_assoc(ep, &to, &transport); 1117 if (asoc2 && asoc2 != asoc) { 1118 if (asoc2->state >= SCTP_STATE_ESTABLISHED) 1119 err = -EISCONN; 1120 else 1121 err = -EALREADY; 1122 goto out_free; 1123 } 1124 1125 /* If we could not find a matching association on the endpoint, 1126 * make sure that there is no peeled-off association matching 1127 * the peer address even on another socket. 1128 */ 1129 if (sctp_endpoint_is_peeled_off(ep, &to)) { 1130 err = -EADDRNOTAVAIL; 1131 goto out_free; 1132 } 1133 1134 if (!asoc) { 1135 /* If a bind() or sctp_bindx() is not called prior to 1136 * an sctp_connectx() call, the system picks an 1137 * ephemeral port and will choose an address set 1138 * equivalent to binding with a wildcard address. 1139 */ 1140 if (!ep->base.bind_addr.port) { 1141 if (sctp_autobind(sk)) { 1142 err = -EAGAIN; 1143 goto out_free; 1144 } 1145 } else { 1146 /* 1147 * If an unprivileged user inherits a 1-many 1148 * style socket with open associations on a 1149 * privileged port, it MAY be permitted to 1150 * accept new associations, but it SHOULD NOT 1151 * be permitted to open new associations. 1152 */ 1153 if (ep->base.bind_addr.port < PROT_SOCK && 1154 !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) { 1155 err = -EACCES; 1156 goto out_free; 1157 } 1158 } 1159 1160 scope = sctp_scope(&to); 1161 asoc = sctp_association_new(ep, sk, scope, GFP_KERNEL); 1162 if (!asoc) { 1163 err = -ENOMEM; 1164 goto out_free; 1165 } 1166 1167 err = sctp_assoc_set_bind_addr_from_ep(asoc, scope, 1168 GFP_KERNEL); 1169 if (err < 0) { 1170 goto out_free; 1171 } 1172 1173 } 1174 1175 /* Prime the peer's transport structures. */ 1176 transport = sctp_assoc_add_peer(asoc, &to, GFP_KERNEL, 1177 SCTP_UNKNOWN); 1178 if (!transport) { 1179 err = -ENOMEM; 1180 goto out_free; 1181 } 1182 1183 addrcnt++; 1184 addr_buf += af->sockaddr_len; 1185 walk_size += af->sockaddr_len; 1186 } 1187 1188 /* In case the user of sctp_connectx() wants an association 1189 * id back, assign one now. 1190 */ 1191 if (assoc_id) { 1192 err = sctp_assoc_set_id(asoc, GFP_KERNEL); 1193 if (err < 0) 1194 goto out_free; 1195 } 1196 1197 err = sctp_primitive_ASSOCIATE(net, asoc, NULL); 1198 if (err < 0) { 1199 goto out_free; 1200 } 1201 1202 /* Initialize sk's dport and daddr for getpeername() */ 1203 inet_sk(sk)->inet_dport = htons(asoc->peer.port); 1204 sp->pf->to_sk_daddr(sa_addr, sk); 1205 sk->sk_err = 0; 1206 1207 /* in-kernel sockets don't generally have a file allocated to them 1208 * if all they do is call sock_create_kern(). 1209 */ 1210 if (sk->sk_socket->file) 1211 f_flags = sk->sk_socket->file->f_flags; 1212 1213 timeo = sock_sndtimeo(sk, f_flags & O_NONBLOCK); 1214 1215 err = sctp_wait_for_connect(asoc, &timeo); 1216 if ((err == 0 || err == -EINPROGRESS) && assoc_id) 1217 *assoc_id = asoc->assoc_id; 1218 1219 /* Don't free association on exit. */ 1220 asoc = NULL; 1221 1222 out_free: 1223 pr_debug("%s: took out_free path with asoc:%p kaddrs:%p err:%d\n", 1224 __func__, asoc, kaddrs, err); 1225 1226 if (asoc) { 1227 /* sctp_primitive_ASSOCIATE may have added this association 1228 * To the hash table, try to unhash it, just in case, its a noop 1229 * if it wasn't hashed so we're safe 1230 */ 1231 sctp_unhash_established(asoc); 1232 sctp_association_free(asoc); 1233 } 1234 return err; 1235 } 1236 1237 /* Helper for tunneling sctp_connectx() requests through sctp_setsockopt() 1238 * 1239 * API 8.9 1240 * int sctp_connectx(int sd, struct sockaddr *addrs, int addrcnt, 1241 * sctp_assoc_t *asoc); 1242 * 1243 * If sd is an IPv4 socket, the addresses passed must be IPv4 addresses. 1244 * If the sd is an IPv6 socket, the addresses passed can either be IPv4 1245 * or IPv6 addresses. 1246 * 1247 * A single address may be specified as INADDR_ANY or IN6ADDR_ANY, see 1248 * Section 3.1.2 for this usage. 1249 * 1250 * addrs is a pointer to an array of one or more socket addresses. Each 1251 * address is contained in its appropriate structure (i.e. struct 1252 * sockaddr_in or struct sockaddr_in6) the family of the address type 1253 * must be used to distengish the address length (note that this 1254 * representation is termed a "packed array" of addresses). The caller 1255 * specifies the number of addresses in the array with addrcnt. 1256 * 1257 * On success, sctp_connectx() returns 0. It also sets the assoc_id to 1258 * the association id of the new association. On failure, sctp_connectx() 1259 * returns -1, and sets errno to the appropriate error code. The assoc_id 1260 * is not touched by the kernel. 1261 * 1262 * For SCTP, the port given in each socket address must be the same, or 1263 * sctp_connectx() will fail, setting errno to EINVAL. 1264 * 1265 * An application can use sctp_connectx to initiate an association with 1266 * an endpoint that is multi-homed. Much like sctp_bindx() this call 1267 * allows a caller to specify multiple addresses at which a peer can be 1268 * reached. The way the SCTP stack uses the list of addresses to set up 1269 * the association is implementation dependent. This function only 1270 * specifies that the stack will try to make use of all the addresses in 1271 * the list when needed. 1272 * 1273 * Note that the list of addresses passed in is only used for setting up 1274 * the association. It does not necessarily equal the set of addresses 1275 * the peer uses for the resulting association. If the caller wants to 1276 * find out the set of peer addresses, it must use sctp_getpaddrs() to 1277 * retrieve them after the association has been set up. 1278 * 1279 * Basically do nothing but copying the addresses from user to kernel 1280 * land and invoking either sctp_connectx(). This is used for tunneling 1281 * the sctp_connectx() request through sctp_setsockopt() from userspace. 1282 * 1283 * We don't use copy_from_user() for optimization: we first do the 1284 * sanity checks (buffer size -fast- and access check-healthy 1285 * pointer); if all of those succeed, then we can alloc the memory 1286 * (expensive operation) needed to copy the data to kernel. Then we do 1287 * the copying without checking the user space area 1288 * (__copy_from_user()). 1289 * 1290 * On exit there is no need to do sockfd_put(), sys_setsockopt() does 1291 * it. 1292 * 1293 * sk The sk of the socket 1294 * addrs The pointer to the addresses in user land 1295 * addrssize Size of the addrs buffer 1296 * 1297 * Returns >=0 if ok, <0 errno code on error. 1298 */ 1299 static int __sctp_setsockopt_connectx(struct sock *sk, 1300 struct sockaddr __user *addrs, 1301 int addrs_size, 1302 sctp_assoc_t *assoc_id) 1303 { 1304 int err = 0; 1305 struct sockaddr *kaddrs; 1306 1307 pr_debug("%s: sk:%p addrs:%p addrs_size:%d\n", 1308 __func__, sk, addrs, addrs_size); 1309 1310 if (unlikely(addrs_size <= 0)) 1311 return -EINVAL; 1312 1313 /* Check the user passed a healthy pointer. */ 1314 if (unlikely(!access_ok(VERIFY_READ, addrs, addrs_size))) 1315 return -EFAULT; 1316 1317 /* Alloc space for the address array in kernel memory. */ 1318 kaddrs = kmalloc(addrs_size, GFP_KERNEL); 1319 if (unlikely(!kaddrs)) 1320 return -ENOMEM; 1321 1322 if (__copy_from_user(kaddrs, addrs, addrs_size)) { 1323 err = -EFAULT; 1324 } else { 1325 err = __sctp_connect(sk, kaddrs, addrs_size, assoc_id); 1326 } 1327 1328 kfree(kaddrs); 1329 1330 return err; 1331 } 1332 1333 /* 1334 * This is an older interface. It's kept for backward compatibility 1335 * to the option that doesn't provide association id. 1336 */ 1337 static int sctp_setsockopt_connectx_old(struct sock *sk, 1338 struct sockaddr __user *addrs, 1339 int addrs_size) 1340 { 1341 return __sctp_setsockopt_connectx(sk, addrs, addrs_size, NULL); 1342 } 1343 1344 /* 1345 * New interface for the API. The since the API is done with a socket 1346 * option, to make it simple we feed back the association id is as a return 1347 * indication to the call. Error is always negative and association id is 1348 * always positive. 1349 */ 1350 static int sctp_setsockopt_connectx(struct sock *sk, 1351 struct sockaddr __user *addrs, 1352 int addrs_size) 1353 { 1354 sctp_assoc_t assoc_id = 0; 1355 int err = 0; 1356 1357 err = __sctp_setsockopt_connectx(sk, addrs, addrs_size, &assoc_id); 1358 1359 if (err) 1360 return err; 1361 else 1362 return assoc_id; 1363 } 1364 1365 /* 1366 * New (hopefully final) interface for the API. 1367 * We use the sctp_getaddrs_old structure so that use-space library 1368 * can avoid any unnecessary allocations. The only different part 1369 * is that we store the actual length of the address buffer into the 1370 * addrs_num structure member. That way we can re-use the existing 1371 * code. 1372 */ 1373 #ifdef CONFIG_COMPAT 1374 struct compat_sctp_getaddrs_old { 1375 sctp_assoc_t assoc_id; 1376 s32 addr_num; 1377 compat_uptr_t addrs; /* struct sockaddr * */ 1378 }; 1379 #endif 1380 1381 static int sctp_getsockopt_connectx3(struct sock *sk, int len, 1382 char __user *optval, 1383 int __user *optlen) 1384 { 1385 struct sctp_getaddrs_old param; 1386 sctp_assoc_t assoc_id = 0; 1387 int err = 0; 1388 1389 #ifdef CONFIG_COMPAT 1390 if (is_compat_task()) { 1391 struct compat_sctp_getaddrs_old param32; 1392 1393 if (len < sizeof(param32)) 1394 return -EINVAL; 1395 if (copy_from_user(¶m32, optval, sizeof(param32))) 1396 return -EFAULT; 1397 1398 param.assoc_id = param32.assoc_id; 1399 param.addr_num = param32.addr_num; 1400 param.addrs = compat_ptr(param32.addrs); 1401 } else 1402 #endif 1403 { 1404 if (len < sizeof(param)) 1405 return -EINVAL; 1406 if (copy_from_user(¶m, optval, sizeof(param))) 1407 return -EFAULT; 1408 } 1409 1410 err = __sctp_setsockopt_connectx(sk, (struct sockaddr __user *) 1411 param.addrs, param.addr_num, 1412 &assoc_id); 1413 if (err == 0 || err == -EINPROGRESS) { 1414 if (copy_to_user(optval, &assoc_id, sizeof(assoc_id))) 1415 return -EFAULT; 1416 if (put_user(sizeof(assoc_id), optlen)) 1417 return -EFAULT; 1418 } 1419 1420 return err; 1421 } 1422 1423 /* API 3.1.4 close() - UDP Style Syntax 1424 * Applications use close() to perform graceful shutdown (as described in 1425 * Section 10.1 of [SCTP]) on ALL the associations currently represented 1426 * by a UDP-style socket. 1427 * 1428 * The syntax is 1429 * 1430 * ret = close(int sd); 1431 * 1432 * sd - the socket descriptor of the associations to be closed. 1433 * 1434 * To gracefully shutdown a specific association represented by the 1435 * UDP-style socket, an application should use the sendmsg() call, 1436 * passing no user data, but including the appropriate flag in the 1437 * ancillary data (see Section xxxx). 1438 * 1439 * If sd in the close() call is a branched-off socket representing only 1440 * one association, the shutdown is performed on that association only. 1441 * 1442 * 4.1.6 close() - TCP Style Syntax 1443 * 1444 * Applications use close() to gracefully close down an association. 1445 * 1446 * The syntax is: 1447 * 1448 * int close(int sd); 1449 * 1450 * sd - the socket descriptor of the association to be closed. 1451 * 1452 * After an application calls close() on a socket descriptor, no further 1453 * socket operations will succeed on that descriptor. 1454 * 1455 * API 7.1.4 SO_LINGER 1456 * 1457 * An application using the TCP-style socket can use this option to 1458 * perform the SCTP ABORT primitive. The linger option structure is: 1459 * 1460 * struct linger { 1461 * int l_onoff; // option on/off 1462 * int l_linger; // linger time 1463 * }; 1464 * 1465 * To enable the option, set l_onoff to 1. If the l_linger value is set 1466 * to 0, calling close() is the same as the ABORT primitive. If the 1467 * value is set to a negative value, the setsockopt() call will return 1468 * an error. If the value is set to a positive value linger_time, the 1469 * close() can be blocked for at most linger_time ms. If the graceful 1470 * shutdown phase does not finish during this period, close() will 1471 * return but the graceful shutdown phase continues in the system. 1472 */ 1473 static void sctp_close(struct sock *sk, long timeout) 1474 { 1475 struct net *net = sock_net(sk); 1476 struct sctp_endpoint *ep; 1477 struct sctp_association *asoc; 1478 struct list_head *pos, *temp; 1479 unsigned int data_was_unread; 1480 1481 pr_debug("%s: sk:%p, timeout:%ld\n", __func__, sk, timeout); 1482 1483 lock_sock(sk); 1484 sk->sk_shutdown = SHUTDOWN_MASK; 1485 sk->sk_state = SCTP_SS_CLOSING; 1486 1487 ep = sctp_sk(sk)->ep; 1488 1489 /* Clean up any skbs sitting on the receive queue. */ 1490 data_was_unread = sctp_queue_purge_ulpevents(&sk->sk_receive_queue); 1491 data_was_unread += sctp_queue_purge_ulpevents(&sctp_sk(sk)->pd_lobby); 1492 1493 /* Walk all associations on an endpoint. */ 1494 list_for_each_safe(pos, temp, &ep->asocs) { 1495 asoc = list_entry(pos, struct sctp_association, asocs); 1496 1497 if (sctp_style(sk, TCP)) { 1498 /* A closed association can still be in the list if 1499 * it belongs to a TCP-style listening socket that is 1500 * not yet accepted. If so, free it. If not, send an 1501 * ABORT or SHUTDOWN based on the linger options. 1502 */ 1503 if (sctp_state(asoc, CLOSED)) { 1504 sctp_unhash_established(asoc); 1505 sctp_association_free(asoc); 1506 continue; 1507 } 1508 } 1509 1510 if (data_was_unread || !skb_queue_empty(&asoc->ulpq.lobby) || 1511 !skb_queue_empty(&asoc->ulpq.reasm) || 1512 (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime)) { 1513 struct sctp_chunk *chunk; 1514 1515 chunk = sctp_make_abort_user(asoc, NULL, 0); 1516 if (chunk) 1517 sctp_primitive_ABORT(net, asoc, chunk); 1518 } else 1519 sctp_primitive_SHUTDOWN(net, asoc, NULL); 1520 } 1521 1522 /* On a TCP-style socket, block for at most linger_time if set. */ 1523 if (sctp_style(sk, TCP) && timeout) 1524 sctp_wait_for_close(sk, timeout); 1525 1526 /* This will run the backlog queue. */ 1527 release_sock(sk); 1528 1529 /* Supposedly, no process has access to the socket, but 1530 * the net layers still may. 1531 * Also, sctp_destroy_sock() needs to be called with addr_wq_lock 1532 * held and that should be grabbed before socket lock. 1533 */ 1534 spin_lock_bh(&net->sctp.addr_wq_lock); 1535 bh_lock_sock(sk); 1536 1537 /* Hold the sock, since sk_common_release() will put sock_put() 1538 * and we have just a little more cleanup. 1539 */ 1540 sock_hold(sk); 1541 sk_common_release(sk); 1542 1543 bh_unlock_sock(sk); 1544 spin_unlock_bh(&net->sctp.addr_wq_lock); 1545 1546 sock_put(sk); 1547 1548 SCTP_DBG_OBJCNT_DEC(sock); 1549 } 1550 1551 /* Handle EPIPE error. */ 1552 static int sctp_error(struct sock *sk, int flags, int err) 1553 { 1554 if (err == -EPIPE) 1555 err = sock_error(sk) ? : -EPIPE; 1556 if (err == -EPIPE && !(flags & MSG_NOSIGNAL)) 1557 send_sig(SIGPIPE, current, 0); 1558 return err; 1559 } 1560 1561 /* API 3.1.3 sendmsg() - UDP Style Syntax 1562 * 1563 * An application uses sendmsg() and recvmsg() calls to transmit data to 1564 * and receive data from its peer. 1565 * 1566 * ssize_t sendmsg(int socket, const struct msghdr *message, 1567 * int flags); 1568 * 1569 * socket - the socket descriptor of the endpoint. 1570 * message - pointer to the msghdr structure which contains a single 1571 * user message and possibly some ancillary data. 1572 * 1573 * See Section 5 for complete description of the data 1574 * structures. 1575 * 1576 * flags - flags sent or received with the user message, see Section 1577 * 5 for complete description of the flags. 1578 * 1579 * Note: This function could use a rewrite especially when explicit 1580 * connect support comes in. 1581 */ 1582 /* BUG: We do not implement the equivalent of sk_stream_wait_memory(). */ 1583 1584 static int sctp_msghdr_parse(const struct msghdr *, sctp_cmsgs_t *); 1585 1586 static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len) 1587 { 1588 struct net *net = sock_net(sk); 1589 struct sctp_sock *sp; 1590 struct sctp_endpoint *ep; 1591 struct sctp_association *new_asoc = NULL, *asoc = NULL; 1592 struct sctp_transport *transport, *chunk_tp; 1593 struct sctp_chunk *chunk; 1594 union sctp_addr to; 1595 struct sockaddr *msg_name = NULL; 1596 struct sctp_sndrcvinfo default_sinfo; 1597 struct sctp_sndrcvinfo *sinfo; 1598 struct sctp_initmsg *sinit; 1599 sctp_assoc_t associd = 0; 1600 sctp_cmsgs_t cmsgs = { NULL }; 1601 sctp_scope_t scope; 1602 bool fill_sinfo_ttl = false, wait_connect = false; 1603 struct sctp_datamsg *datamsg; 1604 int msg_flags = msg->msg_flags; 1605 __u16 sinfo_flags = 0; 1606 long timeo; 1607 int err; 1608 1609 err = 0; 1610 sp = sctp_sk(sk); 1611 ep = sp->ep; 1612 1613 pr_debug("%s: sk:%p, msg:%p, msg_len:%zu ep:%p\n", __func__, sk, 1614 msg, msg_len, ep); 1615 1616 /* We cannot send a message over a TCP-style listening socket. */ 1617 if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)) { 1618 err = -EPIPE; 1619 goto out_nounlock; 1620 } 1621 1622 /* Parse out the SCTP CMSGs. */ 1623 err = sctp_msghdr_parse(msg, &cmsgs); 1624 if (err) { 1625 pr_debug("%s: msghdr parse err:%x\n", __func__, err); 1626 goto out_nounlock; 1627 } 1628 1629 /* Fetch the destination address for this packet. This 1630 * address only selects the association--it is not necessarily 1631 * the address we will send to. 1632 * For a peeled-off socket, msg_name is ignored. 1633 */ 1634 if (!sctp_style(sk, UDP_HIGH_BANDWIDTH) && msg->msg_name) { 1635 int msg_namelen = msg->msg_namelen; 1636 1637 err = sctp_verify_addr(sk, (union sctp_addr *)msg->msg_name, 1638 msg_namelen); 1639 if (err) 1640 return err; 1641 1642 if (msg_namelen > sizeof(to)) 1643 msg_namelen = sizeof(to); 1644 memcpy(&to, msg->msg_name, msg_namelen); 1645 msg_name = msg->msg_name; 1646 } 1647 1648 sinit = cmsgs.init; 1649 if (cmsgs.sinfo != NULL) { 1650 memset(&default_sinfo, 0, sizeof(default_sinfo)); 1651 default_sinfo.sinfo_stream = cmsgs.sinfo->snd_sid; 1652 default_sinfo.sinfo_flags = cmsgs.sinfo->snd_flags; 1653 default_sinfo.sinfo_ppid = cmsgs.sinfo->snd_ppid; 1654 default_sinfo.sinfo_context = cmsgs.sinfo->snd_context; 1655 default_sinfo.sinfo_assoc_id = cmsgs.sinfo->snd_assoc_id; 1656 1657 sinfo = &default_sinfo; 1658 fill_sinfo_ttl = true; 1659 } else { 1660 sinfo = cmsgs.srinfo; 1661 } 1662 /* Did the user specify SNDINFO/SNDRCVINFO? */ 1663 if (sinfo) { 1664 sinfo_flags = sinfo->sinfo_flags; 1665 associd = sinfo->sinfo_assoc_id; 1666 } 1667 1668 pr_debug("%s: msg_len:%zu, sinfo_flags:0x%x\n", __func__, 1669 msg_len, sinfo_flags); 1670 1671 /* SCTP_EOF or SCTP_ABORT cannot be set on a TCP-style socket. */ 1672 if (sctp_style(sk, TCP) && (sinfo_flags & (SCTP_EOF | SCTP_ABORT))) { 1673 err = -EINVAL; 1674 goto out_nounlock; 1675 } 1676 1677 /* If SCTP_EOF is set, no data can be sent. Disallow sending zero 1678 * length messages when SCTP_EOF|SCTP_ABORT is not set. 1679 * If SCTP_ABORT is set, the message length could be non zero with 1680 * the msg_iov set to the user abort reason. 1681 */ 1682 if (((sinfo_flags & SCTP_EOF) && (msg_len > 0)) || 1683 (!(sinfo_flags & (SCTP_EOF|SCTP_ABORT)) && (msg_len == 0))) { 1684 err = -EINVAL; 1685 goto out_nounlock; 1686 } 1687 1688 /* If SCTP_ADDR_OVER is set, there must be an address 1689 * specified in msg_name. 1690 */ 1691 if ((sinfo_flags & SCTP_ADDR_OVER) && (!msg->msg_name)) { 1692 err = -EINVAL; 1693 goto out_nounlock; 1694 } 1695 1696 transport = NULL; 1697 1698 pr_debug("%s: about to look up association\n", __func__); 1699 1700 lock_sock(sk); 1701 1702 /* If a msg_name has been specified, assume this is to be used. */ 1703 if (msg_name) { 1704 /* Look for a matching association on the endpoint. */ 1705 asoc = sctp_endpoint_lookup_assoc(ep, &to, &transport); 1706 if (!asoc) { 1707 /* If we could not find a matching association on the 1708 * endpoint, make sure that it is not a TCP-style 1709 * socket that already has an association or there is 1710 * no peeled-off association on another socket. 1711 */ 1712 if ((sctp_style(sk, TCP) && 1713 sctp_sstate(sk, ESTABLISHED)) || 1714 sctp_endpoint_is_peeled_off(ep, &to)) { 1715 err = -EADDRNOTAVAIL; 1716 goto out_unlock; 1717 } 1718 } 1719 } else { 1720 asoc = sctp_id2assoc(sk, associd); 1721 if (!asoc) { 1722 err = -EPIPE; 1723 goto out_unlock; 1724 } 1725 } 1726 1727 if (asoc) { 1728 pr_debug("%s: just looked up association:%p\n", __func__, asoc); 1729 1730 /* We cannot send a message on a TCP-style SCTP_SS_ESTABLISHED 1731 * socket that has an association in CLOSED state. This can 1732 * happen when an accepted socket has an association that is 1733 * already CLOSED. 1734 */ 1735 if (sctp_state(asoc, CLOSED) && sctp_style(sk, TCP)) { 1736 err = -EPIPE; 1737 goto out_unlock; 1738 } 1739 1740 if (sinfo_flags & SCTP_EOF) { 1741 pr_debug("%s: shutting down association:%p\n", 1742 __func__, asoc); 1743 1744 sctp_primitive_SHUTDOWN(net, asoc, NULL); 1745 err = 0; 1746 goto out_unlock; 1747 } 1748 if (sinfo_flags & SCTP_ABORT) { 1749 1750 chunk = sctp_make_abort_user(asoc, msg, msg_len); 1751 if (!chunk) { 1752 err = -ENOMEM; 1753 goto out_unlock; 1754 } 1755 1756 pr_debug("%s: aborting association:%p\n", 1757 __func__, asoc); 1758 1759 sctp_primitive_ABORT(net, asoc, chunk); 1760 err = 0; 1761 goto out_unlock; 1762 } 1763 } 1764 1765 /* Do we need to create the association? */ 1766 if (!asoc) { 1767 pr_debug("%s: there is no association yet\n", __func__); 1768 1769 if (sinfo_flags & (SCTP_EOF | SCTP_ABORT)) { 1770 err = -EINVAL; 1771 goto out_unlock; 1772 } 1773 1774 /* Check for invalid stream against the stream counts, 1775 * either the default or the user specified stream counts. 1776 */ 1777 if (sinfo) { 1778 if (!sinit || !sinit->sinit_num_ostreams) { 1779 /* Check against the defaults. */ 1780 if (sinfo->sinfo_stream >= 1781 sp->initmsg.sinit_num_ostreams) { 1782 err = -EINVAL; 1783 goto out_unlock; 1784 } 1785 } else { 1786 /* Check against the requested. */ 1787 if (sinfo->sinfo_stream >= 1788 sinit->sinit_num_ostreams) { 1789 err = -EINVAL; 1790 goto out_unlock; 1791 } 1792 } 1793 } 1794 1795 /* 1796 * API 3.1.2 bind() - UDP Style Syntax 1797 * If a bind() or sctp_bindx() is not called prior to a 1798 * sendmsg() call that initiates a new association, the 1799 * system picks an ephemeral port and will choose an address 1800 * set equivalent to binding with a wildcard address. 1801 */ 1802 if (!ep->base.bind_addr.port) { 1803 if (sctp_autobind(sk)) { 1804 err = -EAGAIN; 1805 goto out_unlock; 1806 } 1807 } else { 1808 /* 1809 * If an unprivileged user inherits a one-to-many 1810 * style socket with open associations on a privileged 1811 * port, it MAY be permitted to accept new associations, 1812 * but it SHOULD NOT be permitted to open new 1813 * associations. 1814 */ 1815 if (ep->base.bind_addr.port < PROT_SOCK && 1816 !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) { 1817 err = -EACCES; 1818 goto out_unlock; 1819 } 1820 } 1821 1822 scope = sctp_scope(&to); 1823 new_asoc = sctp_association_new(ep, sk, scope, GFP_KERNEL); 1824 if (!new_asoc) { 1825 err = -ENOMEM; 1826 goto out_unlock; 1827 } 1828 asoc = new_asoc; 1829 err = sctp_assoc_set_bind_addr_from_ep(asoc, scope, GFP_KERNEL); 1830 if (err < 0) { 1831 err = -ENOMEM; 1832 goto out_free; 1833 } 1834 1835 /* If the SCTP_INIT ancillary data is specified, set all 1836 * the association init values accordingly. 1837 */ 1838 if (sinit) { 1839 if (sinit->sinit_num_ostreams) { 1840 asoc->c.sinit_num_ostreams = 1841 sinit->sinit_num_ostreams; 1842 } 1843 if (sinit->sinit_max_instreams) { 1844 asoc->c.sinit_max_instreams = 1845 sinit->sinit_max_instreams; 1846 } 1847 if (sinit->sinit_max_attempts) { 1848 asoc->max_init_attempts 1849 = sinit->sinit_max_attempts; 1850 } 1851 if (sinit->sinit_max_init_timeo) { 1852 asoc->max_init_timeo = 1853 msecs_to_jiffies(sinit->sinit_max_init_timeo); 1854 } 1855 } 1856 1857 /* Prime the peer's transport structures. */ 1858 transport = sctp_assoc_add_peer(asoc, &to, GFP_KERNEL, SCTP_UNKNOWN); 1859 if (!transport) { 1860 err = -ENOMEM; 1861 goto out_free; 1862 } 1863 } 1864 1865 /* ASSERT: we have a valid association at this point. */ 1866 pr_debug("%s: we have a valid association\n", __func__); 1867 1868 if (!sinfo) { 1869 /* If the user didn't specify SNDINFO/SNDRCVINFO, make up 1870 * one with some defaults. 1871 */ 1872 memset(&default_sinfo, 0, sizeof(default_sinfo)); 1873 default_sinfo.sinfo_stream = asoc->default_stream; 1874 default_sinfo.sinfo_flags = asoc->default_flags; 1875 default_sinfo.sinfo_ppid = asoc->default_ppid; 1876 default_sinfo.sinfo_context = asoc->default_context; 1877 default_sinfo.sinfo_timetolive = asoc->default_timetolive; 1878 default_sinfo.sinfo_assoc_id = sctp_assoc2id(asoc); 1879 1880 sinfo = &default_sinfo; 1881 } else if (fill_sinfo_ttl) { 1882 /* In case SNDINFO was specified, we still need to fill 1883 * it with a default ttl from the assoc here. 1884 */ 1885 sinfo->sinfo_timetolive = asoc->default_timetolive; 1886 } 1887 1888 /* API 7.1.7, the sndbuf size per association bounds the 1889 * maximum size of data that can be sent in a single send call. 1890 */ 1891 if (msg_len > sk->sk_sndbuf) { 1892 err = -EMSGSIZE; 1893 goto out_free; 1894 } 1895 1896 if (asoc->pmtu_pending) 1897 sctp_assoc_pending_pmtu(sk, asoc); 1898 1899 /* If fragmentation is disabled and the message length exceeds the 1900 * association fragmentation point, return EMSGSIZE. The I-D 1901 * does not specify what this error is, but this looks like 1902 * a great fit. 1903 */ 1904 if (sctp_sk(sk)->disable_fragments && (msg_len > asoc->frag_point)) { 1905 err = -EMSGSIZE; 1906 goto out_free; 1907 } 1908 1909 /* Check for invalid stream. */ 1910 if (sinfo->sinfo_stream >= asoc->c.sinit_num_ostreams) { 1911 err = -EINVAL; 1912 goto out_free; 1913 } 1914 1915 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); 1916 if (!sctp_wspace(asoc)) { 1917 err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len); 1918 if (err) 1919 goto out_free; 1920 } 1921 1922 /* If an address is passed with the sendto/sendmsg call, it is used 1923 * to override the primary destination address in the TCP model, or 1924 * when SCTP_ADDR_OVER flag is set in the UDP model. 1925 */ 1926 if ((sctp_style(sk, TCP) && msg_name) || 1927 (sinfo_flags & SCTP_ADDR_OVER)) { 1928 chunk_tp = sctp_assoc_lookup_paddr(asoc, &to); 1929 if (!chunk_tp) { 1930 err = -EINVAL; 1931 goto out_free; 1932 } 1933 } else 1934 chunk_tp = NULL; 1935 1936 /* Auto-connect, if we aren't connected already. */ 1937 if (sctp_state(asoc, CLOSED)) { 1938 err = sctp_primitive_ASSOCIATE(net, asoc, NULL); 1939 if (err < 0) 1940 goto out_free; 1941 1942 wait_connect = true; 1943 pr_debug("%s: we associated primitively\n", __func__); 1944 } 1945 1946 /* Break the message into multiple chunks of maximum size. */ 1947 datamsg = sctp_datamsg_from_user(asoc, sinfo, &msg->msg_iter); 1948 if (IS_ERR(datamsg)) { 1949 err = PTR_ERR(datamsg); 1950 goto out_free; 1951 } 1952 1953 /* Now send the (possibly) fragmented message. */ 1954 list_for_each_entry(chunk, &datamsg->chunks, frag_list) { 1955 sctp_chunk_hold(chunk); 1956 1957 /* Do accounting for the write space. */ 1958 sctp_set_owner_w(chunk); 1959 1960 chunk->transport = chunk_tp; 1961 } 1962 1963 /* Send it to the lower layers. Note: all chunks 1964 * must either fail or succeed. The lower layer 1965 * works that way today. Keep it that way or this 1966 * breaks. 1967 */ 1968 err = sctp_primitive_SEND(net, asoc, datamsg); 1969 /* Did the lower layer accept the chunk? */ 1970 if (err) { 1971 sctp_datamsg_free(datamsg); 1972 goto out_free; 1973 } 1974 1975 pr_debug("%s: we sent primitively\n", __func__); 1976 1977 sctp_datamsg_put(datamsg); 1978 err = msg_len; 1979 1980 if (unlikely(wait_connect)) { 1981 timeo = sock_sndtimeo(sk, msg_flags & MSG_DONTWAIT); 1982 sctp_wait_for_connect(asoc, &timeo); 1983 } 1984 1985 /* If we are already past ASSOCIATE, the lower 1986 * layers are responsible for association cleanup. 1987 */ 1988 goto out_unlock; 1989 1990 out_free: 1991 if (new_asoc) { 1992 sctp_unhash_established(asoc); 1993 sctp_association_free(asoc); 1994 } 1995 out_unlock: 1996 release_sock(sk); 1997 1998 out_nounlock: 1999 return sctp_error(sk, msg_flags, err); 2000 2001 #if 0 2002 do_sock_err: 2003 if (msg_len) 2004 err = msg_len; 2005 else 2006 err = sock_error(sk); 2007 goto out; 2008 2009 do_interrupted: 2010 if (msg_len) 2011 err = msg_len; 2012 goto out; 2013 #endif /* 0 */ 2014 } 2015 2016 /* This is an extended version of skb_pull() that removes the data from the 2017 * start of a skb even when data is spread across the list of skb's in the 2018 * frag_list. len specifies the total amount of data that needs to be removed. 2019 * when 'len' bytes could be removed from the skb, it returns 0. 2020 * If 'len' exceeds the total skb length, it returns the no. of bytes that 2021 * could not be removed. 2022 */ 2023 static int sctp_skb_pull(struct sk_buff *skb, int len) 2024 { 2025 struct sk_buff *list; 2026 int skb_len = skb_headlen(skb); 2027 int rlen; 2028 2029 if (len <= skb_len) { 2030 __skb_pull(skb, len); 2031 return 0; 2032 } 2033 len -= skb_len; 2034 __skb_pull(skb, skb_len); 2035 2036 skb_walk_frags(skb, list) { 2037 rlen = sctp_skb_pull(list, len); 2038 skb->len -= (len-rlen); 2039 skb->data_len -= (len-rlen); 2040 2041 if (!rlen) 2042 return 0; 2043 2044 len = rlen; 2045 } 2046 2047 return len; 2048 } 2049 2050 /* API 3.1.3 recvmsg() - UDP Style Syntax 2051 * 2052 * ssize_t recvmsg(int socket, struct msghdr *message, 2053 * int flags); 2054 * 2055 * socket - the socket descriptor of the endpoint. 2056 * message - pointer to the msghdr structure which contains a single 2057 * user message and possibly some ancillary data. 2058 * 2059 * See Section 5 for complete description of the data 2060 * structures. 2061 * 2062 * flags - flags sent or received with the user message, see Section 2063 * 5 for complete description of the flags. 2064 */ 2065 static int sctp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, 2066 int noblock, int flags, int *addr_len) 2067 { 2068 struct sctp_ulpevent *event = NULL; 2069 struct sctp_sock *sp = sctp_sk(sk); 2070 struct sk_buff *skb; 2071 int copied; 2072 int err = 0; 2073 int skb_len; 2074 2075 pr_debug("%s: sk:%p, msghdr:%p, len:%zd, noblock:%d, flags:0x%x, " 2076 "addr_len:%p)\n", __func__, sk, msg, len, noblock, flags, 2077 addr_len); 2078 2079 lock_sock(sk); 2080 2081 if (sctp_style(sk, TCP) && !sctp_sstate(sk, ESTABLISHED)) { 2082 err = -ENOTCONN; 2083 goto out; 2084 } 2085 2086 skb = sctp_skb_recv_datagram(sk, flags, noblock, &err); 2087 if (!skb) 2088 goto out; 2089 2090 /* Get the total length of the skb including any skb's in the 2091 * frag_list. 2092 */ 2093 skb_len = skb->len; 2094 2095 copied = skb_len; 2096 if (copied > len) 2097 copied = len; 2098 2099 err = skb_copy_datagram_msg(skb, 0, msg, copied); 2100 2101 event = sctp_skb2event(skb); 2102 2103 if (err) 2104 goto out_free; 2105 2106 sock_recv_ts_and_drops(msg, sk, skb); 2107 if (sctp_ulpevent_is_notification(event)) { 2108 msg->msg_flags |= MSG_NOTIFICATION; 2109 sp->pf->event_msgname(event, msg->msg_name, addr_len); 2110 } else { 2111 sp->pf->skb_msgname(skb, msg->msg_name, addr_len); 2112 } 2113 2114 /* Check if we allow SCTP_NXTINFO. */ 2115 if (sp->recvnxtinfo) 2116 sctp_ulpevent_read_nxtinfo(event, msg, sk); 2117 /* Check if we allow SCTP_RCVINFO. */ 2118 if (sp->recvrcvinfo) 2119 sctp_ulpevent_read_rcvinfo(event, msg); 2120 /* Check if we allow SCTP_SNDRCVINFO. */ 2121 if (sp->subscribe.sctp_data_io_event) 2122 sctp_ulpevent_read_sndrcvinfo(event, msg); 2123 2124 err = copied; 2125 2126 /* If skb's length exceeds the user's buffer, update the skb and 2127 * push it back to the receive_queue so that the next call to 2128 * recvmsg() will return the remaining data. Don't set MSG_EOR. 2129 */ 2130 if (skb_len > copied) { 2131 msg->msg_flags &= ~MSG_EOR; 2132 if (flags & MSG_PEEK) 2133 goto out_free; 2134 sctp_skb_pull(skb, copied); 2135 skb_queue_head(&sk->sk_receive_queue, skb); 2136 2137 /* When only partial message is copied to the user, increase 2138 * rwnd by that amount. If all the data in the skb is read, 2139 * rwnd is updated when the event is freed. 2140 */ 2141 if (!sctp_ulpevent_is_notification(event)) 2142 sctp_assoc_rwnd_increase(event->asoc, copied); 2143 goto out; 2144 } else if ((event->msg_flags & MSG_NOTIFICATION) || 2145 (event->msg_flags & MSG_EOR)) 2146 msg->msg_flags |= MSG_EOR; 2147 else 2148 msg->msg_flags &= ~MSG_EOR; 2149 2150 out_free: 2151 if (flags & MSG_PEEK) { 2152 /* Release the skb reference acquired after peeking the skb in 2153 * sctp_skb_recv_datagram(). 2154 */ 2155 kfree_skb(skb); 2156 } else { 2157 /* Free the event which includes releasing the reference to 2158 * the owner of the skb, freeing the skb and updating the 2159 * rwnd. 2160 */ 2161 sctp_ulpevent_free(event); 2162 } 2163 out: 2164 release_sock(sk); 2165 return err; 2166 } 2167 2168 /* 7.1.12 Enable/Disable message fragmentation (SCTP_DISABLE_FRAGMENTS) 2169 * 2170 * This option is a on/off flag. If enabled no SCTP message 2171 * fragmentation will be performed. Instead if a message being sent 2172 * exceeds the current PMTU size, the message will NOT be sent and 2173 * instead a error will be indicated to the user. 2174 */ 2175 static int sctp_setsockopt_disable_fragments(struct sock *sk, 2176 char __user *optval, 2177 unsigned int optlen) 2178 { 2179 int val; 2180 2181 if (optlen < sizeof(int)) 2182 return -EINVAL; 2183 2184 if (get_user(val, (int __user *)optval)) 2185 return -EFAULT; 2186 2187 sctp_sk(sk)->disable_fragments = (val == 0) ? 0 : 1; 2188 2189 return 0; 2190 } 2191 2192 static int sctp_setsockopt_events(struct sock *sk, char __user *optval, 2193 unsigned int optlen) 2194 { 2195 struct sctp_association *asoc; 2196 struct sctp_ulpevent *event; 2197 2198 if (optlen > sizeof(struct sctp_event_subscribe)) 2199 return -EINVAL; 2200 if (copy_from_user(&sctp_sk(sk)->subscribe, optval, optlen)) 2201 return -EFAULT; 2202 2203 if (sctp_sk(sk)->subscribe.sctp_data_io_event) 2204 pr_warn_ratelimited(DEPRECATED "%s (pid %d) " 2205 "Requested SCTP_SNDRCVINFO event.\n" 2206 "Use SCTP_RCVINFO through SCTP_RECVRCVINFO option instead.\n", 2207 current->comm, task_pid_nr(current)); 2208 2209 /* At the time when a user app subscribes to SCTP_SENDER_DRY_EVENT, 2210 * if there is no data to be sent or retransmit, the stack will 2211 * immediately send up this notification. 2212 */ 2213 if (sctp_ulpevent_type_enabled(SCTP_SENDER_DRY_EVENT, 2214 &sctp_sk(sk)->subscribe)) { 2215 asoc = sctp_id2assoc(sk, 0); 2216 2217 if (asoc && sctp_outq_is_empty(&asoc->outqueue)) { 2218 event = sctp_ulpevent_make_sender_dry_event(asoc, 2219 GFP_ATOMIC); 2220 if (!event) 2221 return -ENOMEM; 2222 2223 sctp_ulpq_tail_event(&asoc->ulpq, event); 2224 } 2225 } 2226 2227 return 0; 2228 } 2229 2230 /* 7.1.8 Automatic Close of associations (SCTP_AUTOCLOSE) 2231 * 2232 * This socket option is applicable to the UDP-style socket only. When 2233 * set it will cause associations that are idle for more than the 2234 * specified number of seconds to automatically close. An association 2235 * being idle is defined an association that has NOT sent or received 2236 * user data. The special value of '0' indicates that no automatic 2237 * close of any associations should be performed. The option expects an 2238 * integer defining the number of seconds of idle time before an 2239 * association is closed. 2240 */ 2241 static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval, 2242 unsigned int optlen) 2243 { 2244 struct sctp_sock *sp = sctp_sk(sk); 2245 struct net *net = sock_net(sk); 2246 2247 /* Applicable to UDP-style socket only */ 2248 if (sctp_style(sk, TCP)) 2249 return -EOPNOTSUPP; 2250 if (optlen != sizeof(int)) 2251 return -EINVAL; 2252 if (copy_from_user(&sp->autoclose, optval, optlen)) 2253 return -EFAULT; 2254 2255 if (sp->autoclose > net->sctp.max_autoclose) 2256 sp->autoclose = net->sctp.max_autoclose; 2257 2258 return 0; 2259 } 2260 2261 /* 7.1.13 Peer Address Parameters (SCTP_PEER_ADDR_PARAMS) 2262 * 2263 * Applications can enable or disable heartbeats for any peer address of 2264 * an association, modify an address's heartbeat interval, force a 2265 * heartbeat to be sent immediately, and adjust the address's maximum 2266 * number of retransmissions sent before an address is considered 2267 * unreachable. The following structure is used to access and modify an 2268 * address's parameters: 2269 * 2270 * struct sctp_paddrparams { 2271 * sctp_assoc_t spp_assoc_id; 2272 * struct sockaddr_storage spp_address; 2273 * uint32_t spp_hbinterval; 2274 * uint16_t spp_pathmaxrxt; 2275 * uint32_t spp_pathmtu; 2276 * uint32_t spp_sackdelay; 2277 * uint32_t spp_flags; 2278 * }; 2279 * 2280 * spp_assoc_id - (one-to-many style socket) This is filled in the 2281 * application, and identifies the association for 2282 * this query. 2283 * spp_address - This specifies which address is of interest. 2284 * spp_hbinterval - This contains the value of the heartbeat interval, 2285 * in milliseconds. If a value of zero 2286 * is present in this field then no changes are to 2287 * be made to this parameter. 2288 * spp_pathmaxrxt - This contains the maximum number of 2289 * retransmissions before this address shall be 2290 * considered unreachable. If a value of zero 2291 * is present in this field then no changes are to 2292 * be made to this parameter. 2293 * spp_pathmtu - When Path MTU discovery is disabled the value 2294 * specified here will be the "fixed" path mtu. 2295 * Note that if the spp_address field is empty 2296 * then all associations on this address will 2297 * have this fixed path mtu set upon them. 2298 * 2299 * spp_sackdelay - When delayed sack is enabled, this value specifies 2300 * the number of milliseconds that sacks will be delayed 2301 * for. This value will apply to all addresses of an 2302 * association if the spp_address field is empty. Note 2303 * also, that if delayed sack is enabled and this 2304 * value is set to 0, no change is made to the last 2305 * recorded delayed sack timer value. 2306 * 2307 * spp_flags - These flags are used to control various features 2308 * on an association. The flag field may contain 2309 * zero or more of the following options. 2310 * 2311 * SPP_HB_ENABLE - Enable heartbeats on the 2312 * specified address. Note that if the address 2313 * field is empty all addresses for the association 2314 * have heartbeats enabled upon them. 2315 * 2316 * SPP_HB_DISABLE - Disable heartbeats on the 2317 * speicifed address. Note that if the address 2318 * field is empty all addresses for the association 2319 * will have their heartbeats disabled. Note also 2320 * that SPP_HB_ENABLE and SPP_HB_DISABLE are 2321 * mutually exclusive, only one of these two should 2322 * be specified. Enabling both fields will have 2323 * undetermined results. 2324 * 2325 * SPP_HB_DEMAND - Request a user initiated heartbeat 2326 * to be made immediately. 2327 * 2328 * SPP_HB_TIME_IS_ZERO - Specify's that the time for 2329 * heartbeat delayis to be set to the value of 0 2330 * milliseconds. 2331 * 2332 * SPP_PMTUD_ENABLE - This field will enable PMTU 2333 * discovery upon the specified address. Note that 2334 * if the address feild is empty then all addresses 2335 * on the association are effected. 2336 * 2337 * SPP_PMTUD_DISABLE - This field will disable PMTU 2338 * discovery upon the specified address. Note that 2339 * if the address feild is empty then all addresses 2340 * on the association are effected. Not also that 2341 * SPP_PMTUD_ENABLE and SPP_PMTUD_DISABLE are mutually 2342 * exclusive. Enabling both will have undetermined 2343 * results. 2344 * 2345 * SPP_SACKDELAY_ENABLE - Setting this flag turns 2346 * on delayed sack. The time specified in spp_sackdelay 2347 * is used to specify the sack delay for this address. Note 2348 * that if spp_address is empty then all addresses will 2349 * enable delayed sack and take on the sack delay 2350 * value specified in spp_sackdelay. 2351 * SPP_SACKDELAY_DISABLE - Setting this flag turns 2352 * off delayed sack. If the spp_address field is blank then 2353 * delayed sack is disabled for the entire association. Note 2354 * also that this field is mutually exclusive to 2355 * SPP_SACKDELAY_ENABLE, setting both will have undefined 2356 * results. 2357 */ 2358 static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params, 2359 struct sctp_transport *trans, 2360 struct sctp_association *asoc, 2361 struct sctp_sock *sp, 2362 int hb_change, 2363 int pmtud_change, 2364 int sackdelay_change) 2365 { 2366 int error; 2367 2368 if (params->spp_flags & SPP_HB_DEMAND && trans) { 2369 struct net *net = sock_net(trans->asoc->base.sk); 2370 2371 error = sctp_primitive_REQUESTHEARTBEAT(net, trans->asoc, trans); 2372 if (error) 2373 return error; 2374 } 2375 2376 /* Note that unless the spp_flag is set to SPP_HB_ENABLE the value of 2377 * this field is ignored. Note also that a value of zero indicates 2378 * the current setting should be left unchanged. 2379 */ 2380 if (params->spp_flags & SPP_HB_ENABLE) { 2381 2382 /* Re-zero the interval if the SPP_HB_TIME_IS_ZERO is 2383 * set. This lets us use 0 value when this flag 2384 * is set. 2385 */ 2386 if (params->spp_flags & SPP_HB_TIME_IS_ZERO) 2387 params->spp_hbinterval = 0; 2388 2389 if (params->spp_hbinterval || 2390 (params->spp_flags & SPP_HB_TIME_IS_ZERO)) { 2391 if (trans) { 2392 trans->hbinterval = 2393 msecs_to_jiffies(params->spp_hbinterval); 2394 } else if (asoc) { 2395 asoc->hbinterval = 2396 msecs_to_jiffies(params->spp_hbinterval); 2397 } else { 2398 sp->hbinterval = params->spp_hbinterval; 2399 } 2400 } 2401 } 2402 2403 if (hb_change) { 2404 if (trans) { 2405 trans->param_flags = 2406 (trans->param_flags & ~SPP_HB) | hb_change; 2407 } else if (asoc) { 2408 asoc->param_flags = 2409 (asoc->param_flags & ~SPP_HB) | hb_change; 2410 } else { 2411 sp->param_flags = 2412 (sp->param_flags & ~SPP_HB) | hb_change; 2413 } 2414 } 2415 2416 /* When Path MTU discovery is disabled the value specified here will 2417 * be the "fixed" path mtu (i.e. the value of the spp_flags field must 2418 * include the flag SPP_PMTUD_DISABLE for this field to have any 2419 * effect). 2420 */ 2421 if ((params->spp_flags & SPP_PMTUD_DISABLE) && params->spp_pathmtu) { 2422 if (trans) { 2423 trans->pathmtu = params->spp_pathmtu; 2424 sctp_assoc_sync_pmtu(sctp_opt2sk(sp), asoc); 2425 } else if (asoc) { 2426 asoc->pathmtu = params->spp_pathmtu; 2427 sctp_frag_point(asoc, params->spp_pathmtu); 2428 } else { 2429 sp->pathmtu = params->spp_pathmtu; 2430 } 2431 } 2432 2433 if (pmtud_change) { 2434 if (trans) { 2435 int update = (trans->param_flags & SPP_PMTUD_DISABLE) && 2436 (params->spp_flags & SPP_PMTUD_ENABLE); 2437 trans->param_flags = 2438 (trans->param_flags & ~SPP_PMTUD) | pmtud_change; 2439 if (update) { 2440 sctp_transport_pmtu(trans, sctp_opt2sk(sp)); 2441 sctp_assoc_sync_pmtu(sctp_opt2sk(sp), asoc); 2442 } 2443 } else if (asoc) { 2444 asoc->param_flags = 2445 (asoc->param_flags & ~SPP_PMTUD) | pmtud_change; 2446 } else { 2447 sp->param_flags = 2448 (sp->param_flags & ~SPP_PMTUD) | pmtud_change; 2449 } 2450 } 2451 2452 /* Note that unless the spp_flag is set to SPP_SACKDELAY_ENABLE the 2453 * value of this field is ignored. Note also that a value of zero 2454 * indicates the current setting should be left unchanged. 2455 */ 2456 if ((params->spp_flags & SPP_SACKDELAY_ENABLE) && params->spp_sackdelay) { 2457 if (trans) { 2458 trans->sackdelay = 2459 msecs_to_jiffies(params->spp_sackdelay); 2460 } else if (asoc) { 2461 asoc->sackdelay = 2462 msecs_to_jiffies(params->spp_sackdelay); 2463 } else { 2464 sp->sackdelay = params->spp_sackdelay; 2465 } 2466 } 2467 2468 if (sackdelay_change) { 2469 if (trans) { 2470 trans->param_flags = 2471 (trans->param_flags & ~SPP_SACKDELAY) | 2472 sackdelay_change; 2473 } else if (asoc) { 2474 asoc->param_flags = 2475 (asoc->param_flags & ~SPP_SACKDELAY) | 2476 sackdelay_change; 2477 } else { 2478 sp->param_flags = 2479 (sp->param_flags & ~SPP_SACKDELAY) | 2480 sackdelay_change; 2481 } 2482 } 2483 2484 /* Note that a value of zero indicates the current setting should be 2485 left unchanged. 2486 */ 2487 if (params->spp_pathmaxrxt) { 2488 if (trans) { 2489 trans->pathmaxrxt = params->spp_pathmaxrxt; 2490 } else if (asoc) { 2491 asoc->pathmaxrxt = params->spp_pathmaxrxt; 2492 } else { 2493 sp->pathmaxrxt = params->spp_pathmaxrxt; 2494 } 2495 } 2496 2497 return 0; 2498 } 2499 2500 static int sctp_setsockopt_peer_addr_params(struct sock *sk, 2501 char __user *optval, 2502 unsigned int optlen) 2503 { 2504 struct sctp_paddrparams params; 2505 struct sctp_transport *trans = NULL; 2506 struct sctp_association *asoc = NULL; 2507 struct sctp_sock *sp = sctp_sk(sk); 2508 int error; 2509 int hb_change, pmtud_change, sackdelay_change; 2510 2511 if (optlen != sizeof(struct sctp_paddrparams)) 2512 return -EINVAL; 2513 2514 if (copy_from_user(¶ms, optval, optlen)) 2515 return -EFAULT; 2516 2517 /* Validate flags and value parameters. */ 2518 hb_change = params.spp_flags & SPP_HB; 2519 pmtud_change = params.spp_flags & SPP_PMTUD; 2520 sackdelay_change = params.spp_flags & SPP_SACKDELAY; 2521 2522 if (hb_change == SPP_HB || 2523 pmtud_change == SPP_PMTUD || 2524 sackdelay_change == SPP_SACKDELAY || 2525 params.spp_sackdelay > 500 || 2526 (params.spp_pathmtu && 2527 params.spp_pathmtu < SCTP_DEFAULT_MINSEGMENT)) 2528 return -EINVAL; 2529 2530 /* If an address other than INADDR_ANY is specified, and 2531 * no transport is found, then the request is invalid. 2532 */ 2533 if (!sctp_is_any(sk, (union sctp_addr *)¶ms.spp_address)) { 2534 trans = sctp_addr_id2transport(sk, ¶ms.spp_address, 2535 params.spp_assoc_id); 2536 if (!trans) 2537 return -EINVAL; 2538 } 2539 2540 /* Get association, if assoc_id != 0 and the socket is a one 2541 * to many style socket, and an association was not found, then 2542 * the id was invalid. 2543 */ 2544 asoc = sctp_id2assoc(sk, params.spp_assoc_id); 2545 if (!asoc && params.spp_assoc_id && sctp_style(sk, UDP)) 2546 return -EINVAL; 2547 2548 /* Heartbeat demand can only be sent on a transport or 2549 * association, but not a socket. 2550 */ 2551 if (params.spp_flags & SPP_HB_DEMAND && !trans && !asoc) 2552 return -EINVAL; 2553 2554 /* Process parameters. */ 2555 error = sctp_apply_peer_addr_params(¶ms, trans, asoc, sp, 2556 hb_change, pmtud_change, 2557 sackdelay_change); 2558 2559 if (error) 2560 return error; 2561 2562 /* If changes are for association, also apply parameters to each 2563 * transport. 2564 */ 2565 if (!trans && asoc) { 2566 list_for_each_entry(trans, &asoc->peer.transport_addr_list, 2567 transports) { 2568 sctp_apply_peer_addr_params(¶ms, trans, asoc, sp, 2569 hb_change, pmtud_change, 2570 sackdelay_change); 2571 } 2572 } 2573 2574 return 0; 2575 } 2576 2577 static inline __u32 sctp_spp_sackdelay_enable(__u32 param_flags) 2578 { 2579 return (param_flags & ~SPP_SACKDELAY) | SPP_SACKDELAY_ENABLE; 2580 } 2581 2582 static inline __u32 sctp_spp_sackdelay_disable(__u32 param_flags) 2583 { 2584 return (param_flags & ~SPP_SACKDELAY) | SPP_SACKDELAY_DISABLE; 2585 } 2586 2587 /* 2588 * 7.1.23. Get or set delayed ack timer (SCTP_DELAYED_SACK) 2589 * 2590 * This option will effect the way delayed acks are performed. This 2591 * option allows you to get or set the delayed ack time, in 2592 * milliseconds. It also allows changing the delayed ack frequency. 2593 * Changing the frequency to 1 disables the delayed sack algorithm. If 2594 * the assoc_id is 0, then this sets or gets the endpoints default 2595 * values. If the assoc_id field is non-zero, then the set or get 2596 * effects the specified association for the one to many model (the 2597 * assoc_id field is ignored by the one to one model). Note that if 2598 * sack_delay or sack_freq are 0 when setting this option, then the 2599 * current values will remain unchanged. 2600 * 2601 * struct sctp_sack_info { 2602 * sctp_assoc_t sack_assoc_id; 2603 * uint32_t sack_delay; 2604 * uint32_t sack_freq; 2605 * }; 2606 * 2607 * sack_assoc_id - This parameter, indicates which association the user 2608 * is performing an action upon. Note that if this field's value is 2609 * zero then the endpoints default value is changed (effecting future 2610 * associations only). 2611 * 2612 * sack_delay - This parameter contains the number of milliseconds that 2613 * the user is requesting the delayed ACK timer be set to. Note that 2614 * this value is defined in the standard to be between 200 and 500 2615 * milliseconds. 2616 * 2617 * sack_freq - This parameter contains the number of packets that must 2618 * be received before a sack is sent without waiting for the delay 2619 * timer to expire. The default value for this is 2, setting this 2620 * value to 1 will disable the delayed sack algorithm. 2621 */ 2622 2623 static int sctp_setsockopt_delayed_ack(struct sock *sk, 2624 char __user *optval, unsigned int optlen) 2625 { 2626 struct sctp_sack_info params; 2627 struct sctp_transport *trans = NULL; 2628 struct sctp_association *asoc = NULL; 2629 struct sctp_sock *sp = sctp_sk(sk); 2630 2631 if (optlen == sizeof(struct sctp_sack_info)) { 2632 if (copy_from_user(¶ms, optval, optlen)) 2633 return -EFAULT; 2634 2635 if (params.sack_delay == 0 && params.sack_freq == 0) 2636 return 0; 2637 } else if (optlen == sizeof(struct sctp_assoc_value)) { 2638 pr_warn_ratelimited(DEPRECATED 2639 "%s (pid %d) " 2640 "Use of struct sctp_assoc_value in delayed_ack socket option.\n" 2641 "Use struct sctp_sack_info instead\n", 2642 current->comm, task_pid_nr(current)); 2643 if (copy_from_user(¶ms, optval, optlen)) 2644 return -EFAULT; 2645 2646 if (params.sack_delay == 0) 2647 params.sack_freq = 1; 2648 else 2649 params.sack_freq = 0; 2650 } else 2651 return -EINVAL; 2652 2653 /* Validate value parameter. */ 2654 if (params.sack_delay > 500) 2655 return -EINVAL; 2656 2657 /* Get association, if sack_assoc_id != 0 and the socket is a one 2658 * to many style socket, and an association was not found, then 2659 * the id was invalid. 2660 */ 2661 asoc = sctp_id2assoc(sk, params.sack_assoc_id); 2662 if (!asoc && params.sack_assoc_id && sctp_style(sk, UDP)) 2663 return -EINVAL; 2664 2665 if (params.sack_delay) { 2666 if (asoc) { 2667 asoc->sackdelay = 2668 msecs_to_jiffies(params.sack_delay); 2669 asoc->param_flags = 2670 sctp_spp_sackdelay_enable(asoc->param_flags); 2671 } else { 2672 sp->sackdelay = params.sack_delay; 2673 sp->param_flags = 2674 sctp_spp_sackdelay_enable(sp->param_flags); 2675 } 2676 } 2677 2678 if (params.sack_freq == 1) { 2679 if (asoc) { 2680 asoc->param_flags = 2681 sctp_spp_sackdelay_disable(asoc->param_flags); 2682 } else { 2683 sp->param_flags = 2684 sctp_spp_sackdelay_disable(sp->param_flags); 2685 } 2686 } else if (params.sack_freq > 1) { 2687 if (asoc) { 2688 asoc->sackfreq = params.sack_freq; 2689 asoc->param_flags = 2690 sctp_spp_sackdelay_enable(asoc->param_flags); 2691 } else { 2692 sp->sackfreq = params.sack_freq; 2693 sp->param_flags = 2694 sctp_spp_sackdelay_enable(sp->param_flags); 2695 } 2696 } 2697 2698 /* If change is for association, also apply to each transport. */ 2699 if (asoc) { 2700 list_for_each_entry(trans, &asoc->peer.transport_addr_list, 2701 transports) { 2702 if (params.sack_delay) { 2703 trans->sackdelay = 2704 msecs_to_jiffies(params.sack_delay); 2705 trans->param_flags = 2706 sctp_spp_sackdelay_enable(trans->param_flags); 2707 } 2708 if (params.sack_freq == 1) { 2709 trans->param_flags = 2710 sctp_spp_sackdelay_disable(trans->param_flags); 2711 } else if (params.sack_freq > 1) { 2712 trans->sackfreq = params.sack_freq; 2713 trans->param_flags = 2714 sctp_spp_sackdelay_enable(trans->param_flags); 2715 } 2716 } 2717 } 2718 2719 return 0; 2720 } 2721 2722 /* 7.1.3 Initialization Parameters (SCTP_INITMSG) 2723 * 2724 * Applications can specify protocol parameters for the default association 2725 * initialization. The option name argument to setsockopt() and getsockopt() 2726 * is SCTP_INITMSG. 2727 * 2728 * Setting initialization parameters is effective only on an unconnected 2729 * socket (for UDP-style sockets only future associations are effected 2730 * by the change). With TCP-style sockets, this option is inherited by 2731 * sockets derived from a listener socket. 2732 */ 2733 static int sctp_setsockopt_initmsg(struct sock *sk, char __user *optval, unsigned int optlen) 2734 { 2735 struct sctp_initmsg sinit; 2736 struct sctp_sock *sp = sctp_sk(sk); 2737 2738 if (optlen != sizeof(struct sctp_initmsg)) 2739 return -EINVAL; 2740 if (copy_from_user(&sinit, optval, optlen)) 2741 return -EFAULT; 2742 2743 if (sinit.sinit_num_ostreams) 2744 sp->initmsg.sinit_num_ostreams = sinit.sinit_num_ostreams; 2745 if (sinit.sinit_max_instreams) 2746 sp->initmsg.sinit_max_instreams = sinit.sinit_max_instreams; 2747 if (sinit.sinit_max_attempts) 2748 sp->initmsg.sinit_max_attempts = sinit.sinit_max_attempts; 2749 if (sinit.sinit_max_init_timeo) 2750 sp->initmsg.sinit_max_init_timeo = sinit.sinit_max_init_timeo; 2751 2752 return 0; 2753 } 2754 2755 /* 2756 * 7.1.14 Set default send parameters (SCTP_DEFAULT_SEND_PARAM) 2757 * 2758 * Applications that wish to use the sendto() system call may wish to 2759 * specify a default set of parameters that would normally be supplied 2760 * through the inclusion of ancillary data. This socket option allows 2761 * such an application to set the default sctp_sndrcvinfo structure. 2762 * The application that wishes to use this socket option simply passes 2763 * in to this call the sctp_sndrcvinfo structure defined in Section 2764 * 5.2.2) The input parameters accepted by this call include 2765 * sinfo_stream, sinfo_flags, sinfo_ppid, sinfo_context, 2766 * sinfo_timetolive. The user must provide the sinfo_assoc_id field in 2767 * to this call if the caller is using the UDP model. 2768 */ 2769 static int sctp_setsockopt_default_send_param(struct sock *sk, 2770 char __user *optval, 2771 unsigned int optlen) 2772 { 2773 struct sctp_sock *sp = sctp_sk(sk); 2774 struct sctp_association *asoc; 2775 struct sctp_sndrcvinfo info; 2776 2777 if (optlen != sizeof(info)) 2778 return -EINVAL; 2779 if (copy_from_user(&info, optval, optlen)) 2780 return -EFAULT; 2781 if (info.sinfo_flags & 2782 ~(SCTP_UNORDERED | SCTP_ADDR_OVER | 2783 SCTP_ABORT | SCTP_EOF)) 2784 return -EINVAL; 2785 2786 asoc = sctp_id2assoc(sk, info.sinfo_assoc_id); 2787 if (!asoc && info.sinfo_assoc_id && sctp_style(sk, UDP)) 2788 return -EINVAL; 2789 if (asoc) { 2790 asoc->default_stream = info.sinfo_stream; 2791 asoc->default_flags = info.sinfo_flags; 2792 asoc->default_ppid = info.sinfo_ppid; 2793 asoc->default_context = info.sinfo_context; 2794 asoc->default_timetolive = info.sinfo_timetolive; 2795 } else { 2796 sp->default_stream = info.sinfo_stream; 2797 sp->default_flags = info.sinfo_flags; 2798 sp->default_ppid = info.sinfo_ppid; 2799 sp->default_context = info.sinfo_context; 2800 sp->default_timetolive = info.sinfo_timetolive; 2801 } 2802 2803 return 0; 2804 } 2805 2806 /* RFC6458, Section 8.1.31. Set/get Default Send Parameters 2807 * (SCTP_DEFAULT_SNDINFO) 2808 */ 2809 static int sctp_setsockopt_default_sndinfo(struct sock *sk, 2810 char __user *optval, 2811 unsigned int optlen) 2812 { 2813 struct sctp_sock *sp = sctp_sk(sk); 2814 struct sctp_association *asoc; 2815 struct sctp_sndinfo info; 2816 2817 if (optlen != sizeof(info)) 2818 return -EINVAL; 2819 if (copy_from_user(&info, optval, optlen)) 2820 return -EFAULT; 2821 if (info.snd_flags & 2822 ~(SCTP_UNORDERED | SCTP_ADDR_OVER | 2823 SCTP_ABORT | SCTP_EOF)) 2824 return -EINVAL; 2825 2826 asoc = sctp_id2assoc(sk, info.snd_assoc_id); 2827 if (!asoc && info.snd_assoc_id && sctp_style(sk, UDP)) 2828 return -EINVAL; 2829 if (asoc) { 2830 asoc->default_stream = info.snd_sid; 2831 asoc->default_flags = info.snd_flags; 2832 asoc->default_ppid = info.snd_ppid; 2833 asoc->default_context = info.snd_context; 2834 } else { 2835 sp->default_stream = info.snd_sid; 2836 sp->default_flags = info.snd_flags; 2837 sp->default_ppid = info.snd_ppid; 2838 sp->default_context = info.snd_context; 2839 } 2840 2841 return 0; 2842 } 2843 2844 /* 7.1.10 Set Primary Address (SCTP_PRIMARY_ADDR) 2845 * 2846 * Requests that the local SCTP stack use the enclosed peer address as 2847 * the association primary. The enclosed address must be one of the 2848 * association peer's addresses. 2849 */ 2850 static int sctp_setsockopt_primary_addr(struct sock *sk, char __user *optval, 2851 unsigned int optlen) 2852 { 2853 struct sctp_prim prim; 2854 struct sctp_transport *trans; 2855 2856 if (optlen != sizeof(struct sctp_prim)) 2857 return -EINVAL; 2858 2859 if (copy_from_user(&prim, optval, sizeof(struct sctp_prim))) 2860 return -EFAULT; 2861 2862 trans = sctp_addr_id2transport(sk, &prim.ssp_addr, prim.ssp_assoc_id); 2863 if (!trans) 2864 return -EINVAL; 2865 2866 sctp_assoc_set_primary(trans->asoc, trans); 2867 2868 return 0; 2869 } 2870 2871 /* 2872 * 7.1.5 SCTP_NODELAY 2873 * 2874 * Turn on/off any Nagle-like algorithm. This means that packets are 2875 * generally sent as soon as possible and no unnecessary delays are 2876 * introduced, at the cost of more packets in the network. Expects an 2877 * integer boolean flag. 2878 */ 2879 static int sctp_setsockopt_nodelay(struct sock *sk, char __user *optval, 2880 unsigned int optlen) 2881 { 2882 int val; 2883 2884 if (optlen < sizeof(int)) 2885 return -EINVAL; 2886 if (get_user(val, (int __user *)optval)) 2887 return -EFAULT; 2888 2889 sctp_sk(sk)->nodelay = (val == 0) ? 0 : 1; 2890 return 0; 2891 } 2892 2893 /* 2894 * 2895 * 7.1.1 SCTP_RTOINFO 2896 * 2897 * The protocol parameters used to initialize and bound retransmission 2898 * timeout (RTO) are tunable. sctp_rtoinfo structure is used to access 2899 * and modify these parameters. 2900 * All parameters are time values, in milliseconds. A value of 0, when 2901 * modifying the parameters, indicates that the current value should not 2902 * be changed. 2903 * 2904 */ 2905 static int sctp_setsockopt_rtoinfo(struct sock *sk, char __user *optval, unsigned int optlen) 2906 { 2907 struct sctp_rtoinfo rtoinfo; 2908 struct sctp_association *asoc; 2909 unsigned long rto_min, rto_max; 2910 struct sctp_sock *sp = sctp_sk(sk); 2911 2912 if (optlen != sizeof (struct sctp_rtoinfo)) 2913 return -EINVAL; 2914 2915 if (copy_from_user(&rtoinfo, optval, optlen)) 2916 return -EFAULT; 2917 2918 asoc = sctp_id2assoc(sk, rtoinfo.srto_assoc_id); 2919 2920 /* Set the values to the specific association */ 2921 if (!asoc && rtoinfo.srto_assoc_id && sctp_style(sk, UDP)) 2922 return -EINVAL; 2923 2924 rto_max = rtoinfo.srto_max; 2925 rto_min = rtoinfo.srto_min; 2926 2927 if (rto_max) 2928 rto_max = asoc ? msecs_to_jiffies(rto_max) : rto_max; 2929 else 2930 rto_max = asoc ? asoc->rto_max : sp->rtoinfo.srto_max; 2931 2932 if (rto_min) 2933 rto_min = asoc ? msecs_to_jiffies(rto_min) : rto_min; 2934 else 2935 rto_min = asoc ? asoc->rto_min : sp->rtoinfo.srto_min; 2936 2937 if (rto_min > rto_max) 2938 return -EINVAL; 2939 2940 if (asoc) { 2941 if (rtoinfo.srto_initial != 0) 2942 asoc->rto_initial = 2943 msecs_to_jiffies(rtoinfo.srto_initial); 2944 asoc->rto_max = rto_max; 2945 asoc->rto_min = rto_min; 2946 } else { 2947 /* If there is no association or the association-id = 0 2948 * set the values to the endpoint. 2949 */ 2950 if (rtoinfo.srto_initial != 0) 2951 sp->rtoinfo.srto_initial = rtoinfo.srto_initial; 2952 sp->rtoinfo.srto_max = rto_max; 2953 sp->rtoinfo.srto_min = rto_min; 2954 } 2955 2956 return 0; 2957 } 2958 2959 /* 2960 * 2961 * 7.1.2 SCTP_ASSOCINFO 2962 * 2963 * This option is used to tune the maximum retransmission attempts 2964 * of the association. 2965 * Returns an error if the new association retransmission value is 2966 * greater than the sum of the retransmission value of the peer. 2967 * See [SCTP] for more information. 2968 * 2969 */ 2970 static int sctp_setsockopt_associnfo(struct sock *sk, char __user *optval, unsigned int optlen) 2971 { 2972 2973 struct sctp_assocparams assocparams; 2974 struct sctp_association *asoc; 2975 2976 if (optlen != sizeof(struct sctp_assocparams)) 2977 return -EINVAL; 2978 if (copy_from_user(&assocparams, optval, optlen)) 2979 return -EFAULT; 2980 2981 asoc = sctp_id2assoc(sk, assocparams.sasoc_assoc_id); 2982 2983 if (!asoc && assocparams.sasoc_assoc_id && sctp_style(sk, UDP)) 2984 return -EINVAL; 2985 2986 /* Set the values to the specific association */ 2987 if (asoc) { 2988 if (assocparams.sasoc_asocmaxrxt != 0) { 2989 __u32 path_sum = 0; 2990 int paths = 0; 2991 struct sctp_transport *peer_addr; 2992 2993 list_for_each_entry(peer_addr, &asoc->peer.transport_addr_list, 2994 transports) { 2995 path_sum += peer_addr->pathmaxrxt; 2996 paths++; 2997 } 2998 2999 /* Only validate asocmaxrxt if we have more than 3000 * one path/transport. We do this because path 3001 * retransmissions are only counted when we have more 3002 * then one path. 3003 */ 3004 if (paths > 1 && 3005 assocparams.sasoc_asocmaxrxt > path_sum) 3006 return -EINVAL; 3007 3008 asoc->max_retrans = assocparams.sasoc_asocmaxrxt; 3009 } 3010 3011 if (assocparams.sasoc_cookie_life != 0) 3012 asoc->cookie_life = ms_to_ktime(assocparams.sasoc_cookie_life); 3013 } else { 3014 /* Set the values to the endpoint */ 3015 struct sctp_sock *sp = sctp_sk(sk); 3016 3017 if (assocparams.sasoc_asocmaxrxt != 0) 3018 sp->assocparams.sasoc_asocmaxrxt = 3019 assocparams.sasoc_asocmaxrxt; 3020 if (assocparams.sasoc_cookie_life != 0) 3021 sp->assocparams.sasoc_cookie_life = 3022 assocparams.sasoc_cookie_life; 3023 } 3024 return 0; 3025 } 3026 3027 /* 3028 * 7.1.16 Set/clear IPv4 mapped addresses (SCTP_I_WANT_MAPPED_V4_ADDR) 3029 * 3030 * This socket option is a boolean flag which turns on or off mapped V4 3031 * addresses. If this option is turned on and the socket is type 3032 * PF_INET6, then IPv4 addresses will be mapped to V6 representation. 3033 * If this option is turned off, then no mapping will be done of V4 3034 * addresses and a user will receive both PF_INET6 and PF_INET type 3035 * addresses on the socket. 3036 */ 3037 static int sctp_setsockopt_mappedv4(struct sock *sk, char __user *optval, unsigned int optlen) 3038 { 3039 int val; 3040 struct sctp_sock *sp = sctp_sk(sk); 3041 3042 if (optlen < sizeof(int)) 3043 return -EINVAL; 3044 if (get_user(val, (int __user *)optval)) 3045 return -EFAULT; 3046 if (val) 3047 sp->v4mapped = 1; 3048 else 3049 sp->v4mapped = 0; 3050 3051 return 0; 3052 } 3053 3054 /* 3055 * 8.1.16. Get or Set the Maximum Fragmentation Size (SCTP_MAXSEG) 3056 * This option will get or set the maximum size to put in any outgoing 3057 * SCTP DATA chunk. If a message is larger than this size it will be 3058 * fragmented by SCTP into the specified size. Note that the underlying 3059 * SCTP implementation may fragment into smaller sized chunks when the 3060 * PMTU of the underlying association is smaller than the value set by 3061 * the user. The default value for this option is '0' which indicates 3062 * the user is NOT limiting fragmentation and only the PMTU will effect 3063 * SCTP's choice of DATA chunk size. Note also that values set larger 3064 * than the maximum size of an IP datagram will effectively let SCTP 3065 * control fragmentation (i.e. the same as setting this option to 0). 3066 * 3067 * The following structure is used to access and modify this parameter: 3068 * 3069 * struct sctp_assoc_value { 3070 * sctp_assoc_t assoc_id; 3071 * uint32_t assoc_value; 3072 * }; 3073 * 3074 * assoc_id: This parameter is ignored for one-to-one style sockets. 3075 * For one-to-many style sockets this parameter indicates which 3076 * association the user is performing an action upon. Note that if 3077 * this field's value is zero then the endpoints default value is 3078 * changed (effecting future associations only). 3079 * assoc_value: This parameter specifies the maximum size in bytes. 3080 */ 3081 static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, unsigned int optlen) 3082 { 3083 struct sctp_assoc_value params; 3084 struct sctp_association *asoc; 3085 struct sctp_sock *sp = sctp_sk(sk); 3086 int val; 3087 3088 if (optlen == sizeof(int)) { 3089 pr_warn_ratelimited(DEPRECATED 3090 "%s (pid %d) " 3091 "Use of int in maxseg socket option.\n" 3092 "Use struct sctp_assoc_value instead\n", 3093 current->comm, task_pid_nr(current)); 3094 if (copy_from_user(&val, optval, optlen)) 3095 return -EFAULT; 3096 params.assoc_id = 0; 3097 } else if (optlen == sizeof(struct sctp_assoc_value)) { 3098 if (copy_from_user(¶ms, optval, optlen)) 3099 return -EFAULT; 3100 val = params.assoc_value; 3101 } else 3102 return -EINVAL; 3103 3104 if ((val != 0) && ((val < 8) || (val > SCTP_MAX_CHUNK_LEN))) 3105 return -EINVAL; 3106 3107 asoc = sctp_id2assoc(sk, params.assoc_id); 3108 if (!asoc && params.assoc_id && sctp_style(sk, UDP)) 3109 return -EINVAL; 3110 3111 if (asoc) { 3112 if (val == 0) { 3113 val = asoc->pathmtu; 3114 val -= sp->pf->af->net_header_len; 3115 val -= sizeof(struct sctphdr) + 3116 sizeof(struct sctp_data_chunk); 3117 } 3118 asoc->user_frag = val; 3119 asoc->frag_point = sctp_frag_point(asoc, asoc->pathmtu); 3120 } else { 3121 sp->user_frag = val; 3122 } 3123 3124 return 0; 3125 } 3126 3127 3128 /* 3129 * 7.1.9 Set Peer Primary Address (SCTP_SET_PEER_PRIMARY_ADDR) 3130 * 3131 * Requests that the peer mark the enclosed address as the association 3132 * primary. The enclosed address must be one of the association's 3133 * locally bound addresses. The following structure is used to make a 3134 * set primary request: 3135 */ 3136 static int sctp_setsockopt_peer_primary_addr(struct sock *sk, char __user *optval, 3137 unsigned int optlen) 3138 { 3139 struct net *net = sock_net(sk); 3140 struct sctp_sock *sp; 3141 struct sctp_association *asoc = NULL; 3142 struct sctp_setpeerprim prim; 3143 struct sctp_chunk *chunk; 3144 struct sctp_af *af; 3145 int err; 3146 3147 sp = sctp_sk(sk); 3148 3149 if (!net->sctp.addip_enable) 3150 return -EPERM; 3151 3152 if (optlen != sizeof(struct sctp_setpeerprim)) 3153 return -EINVAL; 3154 3155 if (copy_from_user(&prim, optval, optlen)) 3156 return -EFAULT; 3157 3158 asoc = sctp_id2assoc(sk, prim.sspp_assoc_id); 3159 if (!asoc) 3160 return -EINVAL; 3161 3162 if (!asoc->peer.asconf_capable) 3163 return -EPERM; 3164 3165 if (asoc->peer.addip_disabled_mask & SCTP_PARAM_SET_PRIMARY) 3166 return -EPERM; 3167 3168 if (!sctp_state(asoc, ESTABLISHED)) 3169 return -ENOTCONN; 3170 3171 af = sctp_get_af_specific(prim.sspp_addr.ss_family); 3172 if (!af) 3173 return -EINVAL; 3174 3175 if (!af->addr_valid((union sctp_addr *)&prim.sspp_addr, sp, NULL)) 3176 return -EADDRNOTAVAIL; 3177 3178 if (!sctp_assoc_lookup_laddr(asoc, (union sctp_addr *)&prim.sspp_addr)) 3179 return -EADDRNOTAVAIL; 3180 3181 /* Create an ASCONF chunk with SET_PRIMARY parameter */ 3182 chunk = sctp_make_asconf_set_prim(asoc, 3183 (union sctp_addr *)&prim.sspp_addr); 3184 if (!chunk) 3185 return -ENOMEM; 3186 3187 err = sctp_send_asconf(asoc, chunk); 3188 3189 pr_debug("%s: we set peer primary addr primitively\n", __func__); 3190 3191 return err; 3192 } 3193 3194 static int sctp_setsockopt_adaptation_layer(struct sock *sk, char __user *optval, 3195 unsigned int optlen) 3196 { 3197 struct sctp_setadaptation adaptation; 3198 3199 if (optlen != sizeof(struct sctp_setadaptation)) 3200 return -EINVAL; 3201 if (copy_from_user(&adaptation, optval, optlen)) 3202 return -EFAULT; 3203 3204 sctp_sk(sk)->adaptation_ind = adaptation.ssb_adaptation_ind; 3205 3206 return 0; 3207 } 3208 3209 /* 3210 * 7.1.29. Set or Get the default context (SCTP_CONTEXT) 3211 * 3212 * The context field in the sctp_sndrcvinfo structure is normally only 3213 * used when a failed message is retrieved holding the value that was 3214 * sent down on the actual send call. This option allows the setting of 3215 * a default context on an association basis that will be received on 3216 * reading messages from the peer. This is especially helpful in the 3217 * one-2-many model for an application to keep some reference to an 3218 * internal state machine that is processing messages on the 3219 * association. Note that the setting of this value only effects 3220 * received messages from the peer and does not effect the value that is 3221 * saved with outbound messages. 3222 */ 3223 static int sctp_setsockopt_context(struct sock *sk, char __user *optval, 3224 unsigned int optlen) 3225 { 3226 struct sctp_assoc_value params; 3227 struct sctp_sock *sp; 3228 struct sctp_association *asoc; 3229 3230 if (optlen != sizeof(struct sctp_assoc_value)) 3231 return -EINVAL; 3232 if (copy_from_user(¶ms, optval, optlen)) 3233 return -EFAULT; 3234 3235 sp = sctp_sk(sk); 3236 3237 if (params.assoc_id != 0) { 3238 asoc = sctp_id2assoc(sk, params.assoc_id); 3239 if (!asoc) 3240 return -EINVAL; 3241 asoc->default_rcv_context = params.assoc_value; 3242 } else { 3243 sp->default_rcv_context = params.assoc_value; 3244 } 3245 3246 return 0; 3247 } 3248 3249 /* 3250 * 7.1.24. Get or set fragmented interleave (SCTP_FRAGMENT_INTERLEAVE) 3251 * 3252 * This options will at a minimum specify if the implementation is doing 3253 * fragmented interleave. Fragmented interleave, for a one to many 3254 * socket, is when subsequent calls to receive a message may return 3255 * parts of messages from different associations. Some implementations 3256 * may allow you to turn this value on or off. If so, when turned off, 3257 * no fragment interleave will occur (which will cause a head of line 3258 * blocking amongst multiple associations sharing the same one to many 3259 * socket). When this option is turned on, then each receive call may 3260 * come from a different association (thus the user must receive data 3261 * with the extended calls (e.g. sctp_recvmsg) to keep track of which 3262 * association each receive belongs to. 3263 * 3264 * This option takes a boolean value. A non-zero value indicates that 3265 * fragmented interleave is on. A value of zero indicates that 3266 * fragmented interleave is off. 3267 * 3268 * Note that it is important that an implementation that allows this 3269 * option to be turned on, have it off by default. Otherwise an unaware 3270 * application using the one to many model may become confused and act 3271 * incorrectly. 3272 */ 3273 static int sctp_setsockopt_fragment_interleave(struct sock *sk, 3274 char __user *optval, 3275 unsigned int optlen) 3276 { 3277 int val; 3278 3279 if (optlen != sizeof(int)) 3280 return -EINVAL; 3281 if (get_user(val, (int __user *)optval)) 3282 return -EFAULT; 3283 3284 sctp_sk(sk)->frag_interleave = (val == 0) ? 0 : 1; 3285 3286 return 0; 3287 } 3288 3289 /* 3290 * 8.1.21. Set or Get the SCTP Partial Delivery Point 3291 * (SCTP_PARTIAL_DELIVERY_POINT) 3292 * 3293 * This option will set or get the SCTP partial delivery point. This 3294 * point is the size of a message where the partial delivery API will be 3295 * invoked to help free up rwnd space for the peer. Setting this to a 3296 * lower value will cause partial deliveries to happen more often. The 3297 * calls argument is an integer that sets or gets the partial delivery 3298 * point. Note also that the call will fail if the user attempts to set 3299 * this value larger than the socket receive buffer size. 3300 * 3301 * Note that any single message having a length smaller than or equal to 3302 * the SCTP partial delivery point will be delivered in one single read 3303 * call as long as the user provided buffer is large enough to hold the 3304 * message. 3305 */ 3306 static int sctp_setsockopt_partial_delivery_point(struct sock *sk, 3307 char __user *optval, 3308 unsigned int optlen) 3309 { 3310 u32 val; 3311 3312 if (optlen != sizeof(u32)) 3313 return -EINVAL; 3314 if (get_user(val, (int __user *)optval)) 3315 return -EFAULT; 3316 3317 /* Note: We double the receive buffer from what the user sets 3318 * it to be, also initial rwnd is based on rcvbuf/2. 3319 */ 3320 if (val > (sk->sk_rcvbuf >> 1)) 3321 return -EINVAL; 3322 3323 sctp_sk(sk)->pd_point = val; 3324 3325 return 0; /* is this the right error code? */ 3326 } 3327 3328 /* 3329 * 7.1.28. Set or Get the maximum burst (SCTP_MAX_BURST) 3330 * 3331 * This option will allow a user to change the maximum burst of packets 3332 * that can be emitted by this association. Note that the default value 3333 * is 4, and some implementations may restrict this setting so that it 3334 * can only be lowered. 3335 * 3336 * NOTE: This text doesn't seem right. Do this on a socket basis with 3337 * future associations inheriting the socket value. 3338 */ 3339 static int sctp_setsockopt_maxburst(struct sock *sk, 3340 char __user *optval, 3341 unsigned int optlen) 3342 { 3343 struct sctp_assoc_value params; 3344 struct sctp_sock *sp; 3345 struct sctp_association *asoc; 3346 int val; 3347 int assoc_id = 0; 3348 3349 if (optlen == sizeof(int)) { 3350 pr_warn_ratelimited(DEPRECATED 3351 "%s (pid %d) " 3352 "Use of int in max_burst socket option deprecated.\n" 3353 "Use struct sctp_assoc_value instead\n", 3354 current->comm, task_pid_nr(current)); 3355 if (copy_from_user(&val, optval, optlen)) 3356 return -EFAULT; 3357 } else if (optlen == sizeof(struct sctp_assoc_value)) { 3358 if (copy_from_user(¶ms, optval, optlen)) 3359 return -EFAULT; 3360 val = params.assoc_value; 3361 assoc_id = params.assoc_id; 3362 } else 3363 return -EINVAL; 3364 3365 sp = sctp_sk(sk); 3366 3367 if (assoc_id != 0) { 3368 asoc = sctp_id2assoc(sk, assoc_id); 3369 if (!asoc) 3370 return -EINVAL; 3371 asoc->max_burst = val; 3372 } else 3373 sp->max_burst = val; 3374 3375 return 0; 3376 } 3377 3378 /* 3379 * 7.1.18. Add a chunk that must be authenticated (SCTP_AUTH_CHUNK) 3380 * 3381 * This set option adds a chunk type that the user is requesting to be 3382 * received only in an authenticated way. Changes to the list of chunks 3383 * will only effect future associations on the socket. 3384 */ 3385 static int sctp_setsockopt_auth_chunk(struct sock *sk, 3386 char __user *optval, 3387 unsigned int optlen) 3388 { 3389 struct sctp_endpoint *ep = sctp_sk(sk)->ep; 3390 struct sctp_authchunk val; 3391 3392 if (!ep->auth_enable) 3393 return -EACCES; 3394 3395 if (optlen != sizeof(struct sctp_authchunk)) 3396 return -EINVAL; 3397 if (copy_from_user(&val, optval, optlen)) 3398 return -EFAULT; 3399 3400 switch (val.sauth_chunk) { 3401 case SCTP_CID_INIT: 3402 case SCTP_CID_INIT_ACK: 3403 case SCTP_CID_SHUTDOWN_COMPLETE: 3404 case SCTP_CID_AUTH: 3405 return -EINVAL; 3406 } 3407 3408 /* add this chunk id to the endpoint */ 3409 return sctp_auth_ep_add_chunkid(ep, val.sauth_chunk); 3410 } 3411 3412 /* 3413 * 7.1.19. Get or set the list of supported HMAC Identifiers (SCTP_HMAC_IDENT) 3414 * 3415 * This option gets or sets the list of HMAC algorithms that the local 3416 * endpoint requires the peer to use. 3417 */ 3418 static int sctp_setsockopt_hmac_ident(struct sock *sk, 3419 char __user *optval, 3420 unsigned int optlen) 3421 { 3422 struct sctp_endpoint *ep = sctp_sk(sk)->ep; 3423 struct sctp_hmacalgo *hmacs; 3424 u32 idents; 3425 int err; 3426 3427 if (!ep->auth_enable) 3428 return -EACCES; 3429 3430 if (optlen < sizeof(struct sctp_hmacalgo)) 3431 return -EINVAL; 3432 3433 hmacs = memdup_user(optval, optlen); 3434 if (IS_ERR(hmacs)) 3435 return PTR_ERR(hmacs); 3436 3437 idents = hmacs->shmac_num_idents; 3438 if (idents == 0 || idents > SCTP_AUTH_NUM_HMACS || 3439 (idents * sizeof(u16)) > (optlen - sizeof(struct sctp_hmacalgo))) { 3440 err = -EINVAL; 3441 goto out; 3442 } 3443 3444 err = sctp_auth_ep_set_hmacs(ep, hmacs); 3445 out: 3446 kfree(hmacs); 3447 return err; 3448 } 3449 3450 /* 3451 * 7.1.20. Set a shared key (SCTP_AUTH_KEY) 3452 * 3453 * This option will set a shared secret key which is used to build an 3454 * association shared key. 3455 */ 3456 static int sctp_setsockopt_auth_key(struct sock *sk, 3457 char __user *optval, 3458 unsigned int optlen) 3459 { 3460 struct sctp_endpoint *ep = sctp_sk(sk)->ep; 3461 struct sctp_authkey *authkey; 3462 struct sctp_association *asoc; 3463 int ret; 3464 3465 if (!ep->auth_enable) 3466 return -EACCES; 3467 3468 if (optlen <= sizeof(struct sctp_authkey)) 3469 return -EINVAL; 3470 3471 authkey = memdup_user(optval, optlen); 3472 if (IS_ERR(authkey)) 3473 return PTR_ERR(authkey); 3474 3475 if (authkey->sca_keylength > optlen - sizeof(struct sctp_authkey)) { 3476 ret = -EINVAL; 3477 goto out; 3478 } 3479 3480 asoc = sctp_id2assoc(sk, authkey->sca_assoc_id); 3481 if (!asoc && authkey->sca_assoc_id && sctp_style(sk, UDP)) { 3482 ret = -EINVAL; 3483 goto out; 3484 } 3485 3486 ret = sctp_auth_set_key(ep, asoc, authkey); 3487 out: 3488 kzfree(authkey); 3489 return ret; 3490 } 3491 3492 /* 3493 * 7.1.21. Get or set the active shared key (SCTP_AUTH_ACTIVE_KEY) 3494 * 3495 * This option will get or set the active shared key to be used to build 3496 * the association shared key. 3497 */ 3498 static int sctp_setsockopt_active_key(struct sock *sk, 3499 char __user *optval, 3500 unsigned int optlen) 3501 { 3502 struct sctp_endpoint *ep = sctp_sk(sk)->ep; 3503 struct sctp_authkeyid val; 3504 struct sctp_association *asoc; 3505 3506 if (!ep->auth_enable) 3507 return -EACCES; 3508 3509 if (optlen != sizeof(struct sctp_authkeyid)) 3510 return -EINVAL; 3511 if (copy_from_user(&val, optval, optlen)) 3512 return -EFAULT; 3513 3514 asoc = sctp_id2assoc(sk, val.scact_assoc_id); 3515 if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP)) 3516 return -EINVAL; 3517 3518 return sctp_auth_set_active_key(ep, asoc, val.scact_keynumber); 3519 } 3520 3521 /* 3522 * 7.1.22. Delete a shared key (SCTP_AUTH_DELETE_KEY) 3523 * 3524 * This set option will delete a shared secret key from use. 3525 */ 3526 static int sctp_setsockopt_del_key(struct sock *sk, 3527 char __user *optval, 3528 unsigned int optlen) 3529 { 3530 struct sctp_endpoint *ep = sctp_sk(sk)->ep; 3531 struct sctp_authkeyid val; 3532 struct sctp_association *asoc; 3533 3534 if (!ep->auth_enable) 3535 return -EACCES; 3536 3537 if (optlen != sizeof(struct sctp_authkeyid)) 3538 return -EINVAL; 3539 if (copy_from_user(&val, optval, optlen)) 3540 return -EFAULT; 3541 3542 asoc = sctp_id2assoc(sk, val.scact_assoc_id); 3543 if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP)) 3544 return -EINVAL; 3545 3546 return sctp_auth_del_key_id(ep, asoc, val.scact_keynumber); 3547 3548 } 3549 3550 /* 3551 * 8.1.23 SCTP_AUTO_ASCONF 3552 * 3553 * This option will enable or disable the use of the automatic generation of 3554 * ASCONF chunks to add and delete addresses to an existing association. Note 3555 * that this option has two caveats namely: a) it only affects sockets that 3556 * are bound to all addresses available to the SCTP stack, and b) the system 3557 * administrator may have an overriding control that turns the ASCONF feature 3558 * off no matter what setting the socket option may have. 3559 * This option expects an integer boolean flag, where a non-zero value turns on 3560 * the option, and a zero value turns off the option. 3561 * Note. In this implementation, socket operation overrides default parameter 3562 * being set by sysctl as well as FreeBSD implementation 3563 */ 3564 static int sctp_setsockopt_auto_asconf(struct sock *sk, char __user *optval, 3565 unsigned int optlen) 3566 { 3567 int val; 3568 struct sctp_sock *sp = sctp_sk(sk); 3569 3570 if (optlen < sizeof(int)) 3571 return -EINVAL; 3572 if (get_user(val, (int __user *)optval)) 3573 return -EFAULT; 3574 if (!sctp_is_ep_boundall(sk) && val) 3575 return -EINVAL; 3576 if ((val && sp->do_auto_asconf) || (!val && !sp->do_auto_asconf)) 3577 return 0; 3578 3579 spin_lock_bh(&sock_net(sk)->sctp.addr_wq_lock); 3580 if (val == 0 && sp->do_auto_asconf) { 3581 list_del(&sp->auto_asconf_list); 3582 sp->do_auto_asconf = 0; 3583 } else if (val && !sp->do_auto_asconf) { 3584 list_add_tail(&sp->auto_asconf_list, 3585 &sock_net(sk)->sctp.auto_asconf_splist); 3586 sp->do_auto_asconf = 1; 3587 } 3588 spin_unlock_bh(&sock_net(sk)->sctp.addr_wq_lock); 3589 return 0; 3590 } 3591 3592 /* 3593 * SCTP_PEER_ADDR_THLDS 3594 * 3595 * This option allows us to alter the partially failed threshold for one or all 3596 * transports in an association. See Section 6.1 of: 3597 * http://www.ietf.org/id/draft-nishida-tsvwg-sctp-failover-05.txt 3598 */ 3599 static int sctp_setsockopt_paddr_thresholds(struct sock *sk, 3600 char __user *optval, 3601 unsigned int optlen) 3602 { 3603 struct sctp_paddrthlds val; 3604 struct sctp_transport *trans; 3605 struct sctp_association *asoc; 3606 3607 if (optlen < sizeof(struct sctp_paddrthlds)) 3608 return -EINVAL; 3609 if (copy_from_user(&val, (struct sctp_paddrthlds __user *)optval, 3610 sizeof(struct sctp_paddrthlds))) 3611 return -EFAULT; 3612 3613 3614 if (sctp_is_any(sk, (const union sctp_addr *)&val.spt_address)) { 3615 asoc = sctp_id2assoc(sk, val.spt_assoc_id); 3616 if (!asoc) 3617 return -ENOENT; 3618 list_for_each_entry(trans, &asoc->peer.transport_addr_list, 3619 transports) { 3620 if (val.spt_pathmaxrxt) 3621 trans->pathmaxrxt = val.spt_pathmaxrxt; 3622 trans->pf_retrans = val.spt_pathpfthld; 3623 } 3624 3625 if (val.spt_pathmaxrxt) 3626 asoc->pathmaxrxt = val.spt_pathmaxrxt; 3627 asoc->pf_retrans = val.spt_pathpfthld; 3628 } else { 3629 trans = sctp_addr_id2transport(sk, &val.spt_address, 3630 val.spt_assoc_id); 3631 if (!trans) 3632 return -ENOENT; 3633 3634 if (val.spt_pathmaxrxt) 3635 trans->pathmaxrxt = val.spt_pathmaxrxt; 3636 trans->pf_retrans = val.spt_pathpfthld; 3637 } 3638 3639 return 0; 3640 } 3641 3642 static int sctp_setsockopt_recvrcvinfo(struct sock *sk, 3643 char __user *optval, 3644 unsigned int optlen) 3645 { 3646 int val; 3647 3648 if (optlen < sizeof(int)) 3649 return -EINVAL; 3650 if (get_user(val, (int __user *) optval)) 3651 return -EFAULT; 3652 3653 sctp_sk(sk)->recvrcvinfo = (val == 0) ? 0 : 1; 3654 3655 return 0; 3656 } 3657 3658 static int sctp_setsockopt_recvnxtinfo(struct sock *sk, 3659 char __user *optval, 3660 unsigned int optlen) 3661 { 3662 int val; 3663 3664 if (optlen < sizeof(int)) 3665 return -EINVAL; 3666 if (get_user(val, (int __user *) optval)) 3667 return -EFAULT; 3668 3669 sctp_sk(sk)->recvnxtinfo = (val == 0) ? 0 : 1; 3670 3671 return 0; 3672 } 3673 3674 /* API 6.2 setsockopt(), getsockopt() 3675 * 3676 * Applications use setsockopt() and getsockopt() to set or retrieve 3677 * socket options. Socket options are used to change the default 3678 * behavior of sockets calls. They are described in Section 7. 3679 * 3680 * The syntax is: 3681 * 3682 * ret = getsockopt(int sd, int level, int optname, void __user *optval, 3683 * int __user *optlen); 3684 * ret = setsockopt(int sd, int level, int optname, const void __user *optval, 3685 * int optlen); 3686 * 3687 * sd - the socket descript. 3688 * level - set to IPPROTO_SCTP for all SCTP options. 3689 * optname - the option name. 3690 * optval - the buffer to store the value of the option. 3691 * optlen - the size of the buffer. 3692 */ 3693 static int sctp_setsockopt(struct sock *sk, int level, int optname, 3694 char __user *optval, unsigned int optlen) 3695 { 3696 int retval = 0; 3697 3698 pr_debug("%s: sk:%p, optname:%d\n", __func__, sk, optname); 3699 3700 /* I can hardly begin to describe how wrong this is. This is 3701 * so broken as to be worse than useless. The API draft 3702 * REALLY is NOT helpful here... I am not convinced that the 3703 * semantics of setsockopt() with a level OTHER THAN SOL_SCTP 3704 * are at all well-founded. 3705 */ 3706 if (level != SOL_SCTP) { 3707 struct sctp_af *af = sctp_sk(sk)->pf->af; 3708 retval = af->setsockopt(sk, level, optname, optval, optlen); 3709 goto out_nounlock; 3710 } 3711 3712 lock_sock(sk); 3713 3714 switch (optname) { 3715 case SCTP_SOCKOPT_BINDX_ADD: 3716 /* 'optlen' is the size of the addresses buffer. */ 3717 retval = sctp_setsockopt_bindx(sk, (struct sockaddr __user *)optval, 3718 optlen, SCTP_BINDX_ADD_ADDR); 3719 break; 3720 3721 case SCTP_SOCKOPT_BINDX_REM: 3722 /* 'optlen' is the size of the addresses buffer. */ 3723 retval = sctp_setsockopt_bindx(sk, (struct sockaddr __user *)optval, 3724 optlen, SCTP_BINDX_REM_ADDR); 3725 break; 3726 3727 case SCTP_SOCKOPT_CONNECTX_OLD: 3728 /* 'optlen' is the size of the addresses buffer. */ 3729 retval = sctp_setsockopt_connectx_old(sk, 3730 (struct sockaddr __user *)optval, 3731 optlen); 3732 break; 3733 3734 case SCTP_SOCKOPT_CONNECTX: 3735 /* 'optlen' is the size of the addresses buffer. */ 3736 retval = sctp_setsockopt_connectx(sk, 3737 (struct sockaddr __user *)optval, 3738 optlen); 3739 break; 3740 3741 case SCTP_DISABLE_FRAGMENTS: 3742 retval = sctp_setsockopt_disable_fragments(sk, optval, optlen); 3743 break; 3744 3745 case SCTP_EVENTS: 3746 retval = sctp_setsockopt_events(sk, optval, optlen); 3747 break; 3748 3749 case SCTP_AUTOCLOSE: 3750 retval = sctp_setsockopt_autoclose(sk, optval, optlen); 3751 break; 3752 3753 case SCTP_PEER_ADDR_PARAMS: 3754 retval = sctp_setsockopt_peer_addr_params(sk, optval, optlen); 3755 break; 3756 3757 case SCTP_DELAYED_SACK: 3758 retval = sctp_setsockopt_delayed_ack(sk, optval, optlen); 3759 break; 3760 case SCTP_PARTIAL_DELIVERY_POINT: 3761 retval = sctp_setsockopt_partial_delivery_point(sk, optval, optlen); 3762 break; 3763 3764 case SCTP_INITMSG: 3765 retval = sctp_setsockopt_initmsg(sk, optval, optlen); 3766 break; 3767 case SCTP_DEFAULT_SEND_PARAM: 3768 retval = sctp_setsockopt_default_send_param(sk, optval, 3769 optlen); 3770 break; 3771 case SCTP_DEFAULT_SNDINFO: 3772 retval = sctp_setsockopt_default_sndinfo(sk, optval, optlen); 3773 break; 3774 case SCTP_PRIMARY_ADDR: 3775 retval = sctp_setsockopt_primary_addr(sk, optval, optlen); 3776 break; 3777 case SCTP_SET_PEER_PRIMARY_ADDR: 3778 retval = sctp_setsockopt_peer_primary_addr(sk, optval, optlen); 3779 break; 3780 case SCTP_NODELAY: 3781 retval = sctp_setsockopt_nodelay(sk, optval, optlen); 3782 break; 3783 case SCTP_RTOINFO: 3784 retval = sctp_setsockopt_rtoinfo(sk, optval, optlen); 3785 break; 3786 case SCTP_ASSOCINFO: 3787 retval = sctp_setsockopt_associnfo(sk, optval, optlen); 3788 break; 3789 case SCTP_I_WANT_MAPPED_V4_ADDR: 3790 retval = sctp_setsockopt_mappedv4(sk, optval, optlen); 3791 break; 3792 case SCTP_MAXSEG: 3793 retval = sctp_setsockopt_maxseg(sk, optval, optlen); 3794 break; 3795 case SCTP_ADAPTATION_LAYER: 3796 retval = sctp_setsockopt_adaptation_layer(sk, optval, optlen); 3797 break; 3798 case SCTP_CONTEXT: 3799 retval = sctp_setsockopt_context(sk, optval, optlen); 3800 break; 3801 case SCTP_FRAGMENT_INTERLEAVE: 3802 retval = sctp_setsockopt_fragment_interleave(sk, optval, optlen); 3803 break; 3804 case SCTP_MAX_BURST: 3805 retval = sctp_setsockopt_maxburst(sk, optval, optlen); 3806 break; 3807 case SCTP_AUTH_CHUNK: 3808 retval = sctp_setsockopt_auth_chunk(sk, optval, optlen); 3809 break; 3810 case SCTP_HMAC_IDENT: 3811 retval = sctp_setsockopt_hmac_ident(sk, optval, optlen); 3812 break; 3813 case SCTP_AUTH_KEY: 3814 retval = sctp_setsockopt_auth_key(sk, optval, optlen); 3815 break; 3816 case SCTP_AUTH_ACTIVE_KEY: 3817 retval = sctp_setsockopt_active_key(sk, optval, optlen); 3818 break; 3819 case SCTP_AUTH_DELETE_KEY: 3820 retval = sctp_setsockopt_del_key(sk, optval, optlen); 3821 break; 3822 case SCTP_AUTO_ASCONF: 3823 retval = sctp_setsockopt_auto_asconf(sk, optval, optlen); 3824 break; 3825 case SCTP_PEER_ADDR_THLDS: 3826 retval = sctp_setsockopt_paddr_thresholds(sk, optval, optlen); 3827 break; 3828 case SCTP_RECVRCVINFO: 3829 retval = sctp_setsockopt_recvrcvinfo(sk, optval, optlen); 3830 break; 3831 case SCTP_RECVNXTINFO: 3832 retval = sctp_setsockopt_recvnxtinfo(sk, optval, optlen); 3833 break; 3834 default: 3835 retval = -ENOPROTOOPT; 3836 break; 3837 } 3838 3839 release_sock(sk); 3840 3841 out_nounlock: 3842 return retval; 3843 } 3844 3845 /* API 3.1.6 connect() - UDP Style Syntax 3846 * 3847 * An application may use the connect() call in the UDP model to initiate an 3848 * association without sending data. 3849 * 3850 * The syntax is: 3851 * 3852 * ret = connect(int sd, const struct sockaddr *nam, socklen_t len); 3853 * 3854 * sd: the socket descriptor to have a new association added to. 3855 * 3856 * nam: the address structure (either struct sockaddr_in or struct 3857 * sockaddr_in6 defined in RFC2553 [7]). 3858 * 3859 * len: the size of the address. 3860 */ 3861 static int sctp_connect(struct sock *sk, struct sockaddr *addr, 3862 int addr_len) 3863 { 3864 int err = 0; 3865 struct sctp_af *af; 3866 3867 lock_sock(sk); 3868 3869 pr_debug("%s: sk:%p, sockaddr:%p, addr_len:%d\n", __func__, sk, 3870 addr, addr_len); 3871 3872 /* Validate addr_len before calling common connect/connectx routine. */ 3873 af = sctp_get_af_specific(addr->sa_family); 3874 if (!af || addr_len < af->sockaddr_len) { 3875 err = -EINVAL; 3876 } else { 3877 /* Pass correct addr len to common routine (so it knows there 3878 * is only one address being passed. 3879 */ 3880 err = __sctp_connect(sk, addr, af->sockaddr_len, NULL); 3881 } 3882 3883 release_sock(sk); 3884 return err; 3885 } 3886 3887 /* FIXME: Write comments. */ 3888 static int sctp_disconnect(struct sock *sk, int flags) 3889 { 3890 return -EOPNOTSUPP; /* STUB */ 3891 } 3892 3893 /* 4.1.4 accept() - TCP Style Syntax 3894 * 3895 * Applications use accept() call to remove an established SCTP 3896 * association from the accept queue of the endpoint. A new socket 3897 * descriptor will be returned from accept() to represent the newly 3898 * formed association. 3899 */ 3900 static struct sock *sctp_accept(struct sock *sk, int flags, int *err) 3901 { 3902 struct sctp_sock *sp; 3903 struct sctp_endpoint *ep; 3904 struct sock *newsk = NULL; 3905 struct sctp_association *asoc; 3906 long timeo; 3907 int error = 0; 3908 3909 lock_sock(sk); 3910 3911 sp = sctp_sk(sk); 3912 ep = sp->ep; 3913 3914 if (!sctp_style(sk, TCP)) { 3915 error = -EOPNOTSUPP; 3916 goto out; 3917 } 3918 3919 if (!sctp_sstate(sk, LISTENING)) { 3920 error = -EINVAL; 3921 goto out; 3922 } 3923 3924 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); 3925 3926 error = sctp_wait_for_accept(sk, timeo); 3927 if (error) 3928 goto out; 3929 3930 /* We treat the list of associations on the endpoint as the accept 3931 * queue and pick the first association on the list. 3932 */ 3933 asoc = list_entry(ep->asocs.next, struct sctp_association, asocs); 3934 3935 newsk = sp->pf->create_accept_sk(sk, asoc); 3936 if (!newsk) { 3937 error = -ENOMEM; 3938 goto out; 3939 } 3940 3941 /* Populate the fields of the newsk from the oldsk and migrate the 3942 * asoc to the newsk. 3943 */ 3944 sctp_sock_migrate(sk, newsk, asoc, SCTP_SOCKET_TCP); 3945 3946 out: 3947 release_sock(sk); 3948 *err = error; 3949 return newsk; 3950 } 3951 3952 /* The SCTP ioctl handler. */ 3953 static int sctp_ioctl(struct sock *sk, int cmd, unsigned long arg) 3954 { 3955 int rc = -ENOTCONN; 3956 3957 lock_sock(sk); 3958 3959 /* 3960 * SEQPACKET-style sockets in LISTENING state are valid, for 3961 * SCTP, so only discard TCP-style sockets in LISTENING state. 3962 */ 3963 if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)) 3964 goto out; 3965 3966 switch (cmd) { 3967 case SIOCINQ: { 3968 struct sk_buff *skb; 3969 unsigned int amount = 0; 3970 3971 skb = skb_peek(&sk->sk_receive_queue); 3972 if (skb != NULL) { 3973 /* 3974 * We will only return the amount of this packet since 3975 * that is all that will be read. 3976 */ 3977 amount = skb->len; 3978 } 3979 rc = put_user(amount, (int __user *)arg); 3980 break; 3981 } 3982 default: 3983 rc = -ENOIOCTLCMD; 3984 break; 3985 } 3986 out: 3987 release_sock(sk); 3988 return rc; 3989 } 3990 3991 /* This is the function which gets called during socket creation to 3992 * initialized the SCTP-specific portion of the sock. 3993 * The sock structure should already be zero-filled memory. 3994 */ 3995 static int sctp_init_sock(struct sock *sk) 3996 { 3997 struct net *net = sock_net(sk); 3998 struct sctp_sock *sp; 3999 4000 pr_debug("%s: sk:%p\n", __func__, sk); 4001 4002 sp = sctp_sk(sk); 4003 4004 /* Initialize the SCTP per socket area. */ 4005 switch (sk->sk_type) { 4006 case SOCK_SEQPACKET: 4007 sp->type = SCTP_SOCKET_UDP; 4008 break; 4009 case SOCK_STREAM: 4010 sp->type = SCTP_SOCKET_TCP; 4011 break; 4012 default: 4013 return -ESOCKTNOSUPPORT; 4014 } 4015 4016 /* Initialize default send parameters. These parameters can be 4017 * modified with the SCTP_DEFAULT_SEND_PARAM socket option. 4018 */ 4019 sp->default_stream = 0; 4020 sp->default_ppid = 0; 4021 sp->default_flags = 0; 4022 sp->default_context = 0; 4023 sp->default_timetolive = 0; 4024 4025 sp->default_rcv_context = 0; 4026 sp->max_burst = net->sctp.max_burst; 4027 4028 sp->sctp_hmac_alg = net->sctp.sctp_hmac_alg; 4029 4030 /* Initialize default setup parameters. These parameters 4031 * can be modified with the SCTP_INITMSG socket option or 4032 * overridden by the SCTP_INIT CMSG. 4033 */ 4034 sp->initmsg.sinit_num_ostreams = sctp_max_outstreams; 4035 sp->initmsg.sinit_max_instreams = sctp_max_instreams; 4036 sp->initmsg.sinit_max_attempts = net->sctp.max_retrans_init; 4037 sp->initmsg.sinit_max_init_timeo = net->sctp.rto_max; 4038 4039 /* Initialize default RTO related parameters. These parameters can 4040 * be modified for with the SCTP_RTOINFO socket option. 4041 */ 4042 sp->rtoinfo.srto_initial = net->sctp.rto_initial; 4043 sp->rtoinfo.srto_max = net->sctp.rto_max; 4044 sp->rtoinfo.srto_min = net->sctp.rto_min; 4045 4046 /* Initialize default association related parameters. These parameters 4047 * can be modified with the SCTP_ASSOCINFO socket option. 4048 */ 4049 sp->assocparams.sasoc_asocmaxrxt = net->sctp.max_retrans_association; 4050 sp->assocparams.sasoc_number_peer_destinations = 0; 4051 sp->assocparams.sasoc_peer_rwnd = 0; 4052 sp->assocparams.sasoc_local_rwnd = 0; 4053 sp->assocparams.sasoc_cookie_life = net->sctp.valid_cookie_life; 4054 4055 /* Initialize default event subscriptions. By default, all the 4056 * options are off. 4057 */ 4058 memset(&sp->subscribe, 0, sizeof(struct sctp_event_subscribe)); 4059 4060 /* Default Peer Address Parameters. These defaults can 4061 * be modified via SCTP_PEER_ADDR_PARAMS 4062 */ 4063 sp->hbinterval = net->sctp.hb_interval; 4064 sp->pathmaxrxt = net->sctp.max_retrans_path; 4065 sp->pathmtu = 0; /* allow default discovery */ 4066 sp->sackdelay = net->sctp.sack_timeout; 4067 sp->sackfreq = 2; 4068 sp->param_flags = SPP_HB_ENABLE | 4069 SPP_PMTUD_ENABLE | 4070 SPP_SACKDELAY_ENABLE; 4071 4072 /* If enabled no SCTP message fragmentation will be performed. 4073 * Configure through SCTP_DISABLE_FRAGMENTS socket option. 4074 */ 4075 sp->disable_fragments = 0; 4076 4077 /* Enable Nagle algorithm by default. */ 4078 sp->nodelay = 0; 4079 4080 sp->recvrcvinfo = 0; 4081 sp->recvnxtinfo = 0; 4082 4083 /* Enable by default. */ 4084 sp->v4mapped = 1; 4085 4086 /* Auto-close idle associations after the configured 4087 * number of seconds. A value of 0 disables this 4088 * feature. Configure through the SCTP_AUTOCLOSE socket option, 4089 * for UDP-style sockets only. 4090 */ 4091 sp->autoclose = 0; 4092 4093 /* User specified fragmentation limit. */ 4094 sp->user_frag = 0; 4095 4096 sp->adaptation_ind = 0; 4097 4098 sp->pf = sctp_get_pf_specific(sk->sk_family); 4099 4100 /* Control variables for partial data delivery. */ 4101 atomic_set(&sp->pd_mode, 0); 4102 skb_queue_head_init(&sp->pd_lobby); 4103 sp->frag_interleave = 0; 4104 4105 /* Create a per socket endpoint structure. Even if we 4106 * change the data structure relationships, this may still 4107 * be useful for storing pre-connect address information. 4108 */ 4109 sp->ep = sctp_endpoint_new(sk, GFP_KERNEL); 4110 if (!sp->ep) 4111 return -ENOMEM; 4112 4113 sp->hmac = NULL; 4114 4115 sk->sk_destruct = sctp_destruct_sock; 4116 4117 SCTP_DBG_OBJCNT_INC(sock); 4118 4119 local_bh_disable(); 4120 percpu_counter_inc(&sctp_sockets_allocated); 4121 sock_prot_inuse_add(net, sk->sk_prot, 1); 4122 4123 /* Nothing can fail after this block, otherwise 4124 * sctp_destroy_sock() will be called without addr_wq_lock held 4125 */ 4126 if (net->sctp.default_auto_asconf) { 4127 spin_lock(&sock_net(sk)->sctp.addr_wq_lock); 4128 list_add_tail(&sp->auto_asconf_list, 4129 &net->sctp.auto_asconf_splist); 4130 sp->do_auto_asconf = 1; 4131 spin_unlock(&sock_net(sk)->sctp.addr_wq_lock); 4132 } else { 4133 sp->do_auto_asconf = 0; 4134 } 4135 4136 local_bh_enable(); 4137 4138 return 0; 4139 } 4140 4141 /* Cleanup any SCTP per socket resources. Must be called with 4142 * sock_net(sk)->sctp.addr_wq_lock held if sp->do_auto_asconf is true 4143 */ 4144 static void sctp_destroy_sock(struct sock *sk) 4145 { 4146 struct sctp_sock *sp; 4147 4148 pr_debug("%s: sk:%p\n", __func__, sk); 4149 4150 /* Release our hold on the endpoint. */ 4151 sp = sctp_sk(sk); 4152 /* This could happen during socket init, thus we bail out 4153 * early, since the rest of the below is not setup either. 4154 */ 4155 if (sp->ep == NULL) 4156 return; 4157 4158 if (sp->do_auto_asconf) { 4159 sp->do_auto_asconf = 0; 4160 list_del(&sp->auto_asconf_list); 4161 } 4162 sctp_endpoint_free(sp->ep); 4163 local_bh_disable(); 4164 percpu_counter_dec(&sctp_sockets_allocated); 4165 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); 4166 local_bh_enable(); 4167 } 4168 4169 /* Triggered when there are no references on the socket anymore */ 4170 static void sctp_destruct_sock(struct sock *sk) 4171 { 4172 struct sctp_sock *sp = sctp_sk(sk); 4173 4174 /* Free up the HMAC transform. */ 4175 crypto_free_hash(sp->hmac); 4176 4177 inet_sock_destruct(sk); 4178 } 4179 4180 /* API 4.1.7 shutdown() - TCP Style Syntax 4181 * int shutdown(int socket, int how); 4182 * 4183 * sd - the socket descriptor of the association to be closed. 4184 * how - Specifies the type of shutdown. The values are 4185 * as follows: 4186 * SHUT_RD 4187 * Disables further receive operations. No SCTP 4188 * protocol action is taken. 4189 * SHUT_WR 4190 * Disables further send operations, and initiates 4191 * the SCTP shutdown sequence. 4192 * SHUT_RDWR 4193 * Disables further send and receive operations 4194 * and initiates the SCTP shutdown sequence. 4195 */ 4196 static void sctp_shutdown(struct sock *sk, int how) 4197 { 4198 struct net *net = sock_net(sk); 4199 struct sctp_endpoint *ep; 4200 struct sctp_association *asoc; 4201 4202 if (!sctp_style(sk, TCP)) 4203 return; 4204 4205 if (how & SEND_SHUTDOWN) { 4206 ep = sctp_sk(sk)->ep; 4207 if (!list_empty(&ep->asocs)) { 4208 asoc = list_entry(ep->asocs.next, 4209 struct sctp_association, asocs); 4210 sctp_primitive_SHUTDOWN(net, asoc, NULL); 4211 } 4212 } 4213 } 4214 4215 /* 7.2.1 Association Status (SCTP_STATUS) 4216 4217 * Applications can retrieve current status information about an 4218 * association, including association state, peer receiver window size, 4219 * number of unacked data chunks, and number of data chunks pending 4220 * receipt. This information is read-only. 4221 */ 4222 static int sctp_getsockopt_sctp_status(struct sock *sk, int len, 4223 char __user *optval, 4224 int __user *optlen) 4225 { 4226 struct sctp_status status; 4227 struct sctp_association *asoc = NULL; 4228 struct sctp_transport *transport; 4229 sctp_assoc_t associd; 4230 int retval = 0; 4231 4232 if (len < sizeof(status)) { 4233 retval = -EINVAL; 4234 goto out; 4235 } 4236 4237 len = sizeof(status); 4238 if (copy_from_user(&status, optval, len)) { 4239 retval = -EFAULT; 4240 goto out; 4241 } 4242 4243 associd = status.sstat_assoc_id; 4244 asoc = sctp_id2assoc(sk, associd); 4245 if (!asoc) { 4246 retval = -EINVAL; 4247 goto out; 4248 } 4249 4250 transport = asoc->peer.primary_path; 4251 4252 status.sstat_assoc_id = sctp_assoc2id(asoc); 4253 status.sstat_state = sctp_assoc_to_state(asoc); 4254 status.sstat_rwnd = asoc->peer.rwnd; 4255 status.sstat_unackdata = asoc->unack_data; 4256 4257 status.sstat_penddata = sctp_tsnmap_pending(&asoc->peer.tsn_map); 4258 status.sstat_instrms = asoc->c.sinit_max_instreams; 4259 status.sstat_outstrms = asoc->c.sinit_num_ostreams; 4260 status.sstat_fragmentation_point = asoc->frag_point; 4261 status.sstat_primary.spinfo_assoc_id = sctp_assoc2id(transport->asoc); 4262 memcpy(&status.sstat_primary.spinfo_address, &transport->ipaddr, 4263 transport->af_specific->sockaddr_len); 4264 /* Map ipv4 address into v4-mapped-on-v6 address. */ 4265 sctp_get_pf_specific(sk->sk_family)->addr_to_user(sctp_sk(sk), 4266 (union sctp_addr *)&status.sstat_primary.spinfo_address); 4267 status.sstat_primary.spinfo_state = transport->state; 4268 status.sstat_primary.spinfo_cwnd = transport->cwnd; 4269 status.sstat_primary.spinfo_srtt = transport->srtt; 4270 status.sstat_primary.spinfo_rto = jiffies_to_msecs(transport->rto); 4271 status.sstat_primary.spinfo_mtu = transport->pathmtu; 4272 4273 if (status.sstat_primary.spinfo_state == SCTP_UNKNOWN) 4274 status.sstat_primary.spinfo_state = SCTP_ACTIVE; 4275 4276 if (put_user(len, optlen)) { 4277 retval = -EFAULT; 4278 goto out; 4279 } 4280 4281 pr_debug("%s: len:%d, state:%d, rwnd:%d, assoc_id:%d\n", 4282 __func__, len, status.sstat_state, status.sstat_rwnd, 4283 status.sstat_assoc_id); 4284 4285 if (copy_to_user(optval, &status, len)) { 4286 retval = -EFAULT; 4287 goto out; 4288 } 4289 4290 out: 4291 return retval; 4292 } 4293 4294 4295 /* 7.2.2 Peer Address Information (SCTP_GET_PEER_ADDR_INFO) 4296 * 4297 * Applications can retrieve information about a specific peer address 4298 * of an association, including its reachability state, congestion 4299 * window, and retransmission timer values. This information is 4300 * read-only. 4301 */ 4302 static int sctp_getsockopt_peer_addr_info(struct sock *sk, int len, 4303 char __user *optval, 4304 int __user *optlen) 4305 { 4306 struct sctp_paddrinfo pinfo; 4307 struct sctp_transport *transport; 4308 int retval = 0; 4309 4310 if (len < sizeof(pinfo)) { 4311 retval = -EINVAL; 4312 goto out; 4313 } 4314 4315 len = sizeof(pinfo); 4316 if (copy_from_user(&pinfo, optval, len)) { 4317 retval = -EFAULT; 4318 goto out; 4319 } 4320 4321 transport = sctp_addr_id2transport(sk, &pinfo.spinfo_address, 4322 pinfo.spinfo_assoc_id); 4323 if (!transport) 4324 return -EINVAL; 4325 4326 pinfo.spinfo_assoc_id = sctp_assoc2id(transport->asoc); 4327 pinfo.spinfo_state = transport->state; 4328 pinfo.spinfo_cwnd = transport->cwnd; 4329 pinfo.spinfo_srtt = transport->srtt; 4330 pinfo.spinfo_rto = jiffies_to_msecs(transport->rto); 4331 pinfo.spinfo_mtu = transport->pathmtu; 4332 4333 if (pinfo.spinfo_state == SCTP_UNKNOWN) 4334 pinfo.spinfo_state = SCTP_ACTIVE; 4335 4336 if (put_user(len, optlen)) { 4337 retval = -EFAULT; 4338 goto out; 4339 } 4340 4341 if (copy_to_user(optval, &pinfo, len)) { 4342 retval = -EFAULT; 4343 goto out; 4344 } 4345 4346 out: 4347 return retval; 4348 } 4349 4350 /* 7.1.12 Enable/Disable message fragmentation (SCTP_DISABLE_FRAGMENTS) 4351 * 4352 * This option is a on/off flag. If enabled no SCTP message 4353 * fragmentation will be performed. Instead if a message being sent 4354 * exceeds the current PMTU size, the message will NOT be sent and 4355 * instead a error will be indicated to the user. 4356 */ 4357 static int sctp_getsockopt_disable_fragments(struct sock *sk, int len, 4358 char __user *optval, int __user *optlen) 4359 { 4360 int val; 4361 4362 if (len < sizeof(int)) 4363 return -EINVAL; 4364 4365 len = sizeof(int); 4366 val = (sctp_sk(sk)->disable_fragments == 1); 4367 if (put_user(len, optlen)) 4368 return -EFAULT; 4369 if (copy_to_user(optval, &val, len)) 4370 return -EFAULT; 4371 return 0; 4372 } 4373 4374 /* 7.1.15 Set notification and ancillary events (SCTP_EVENTS) 4375 * 4376 * This socket option is used to specify various notifications and 4377 * ancillary data the user wishes to receive. 4378 */ 4379 static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval, 4380 int __user *optlen) 4381 { 4382 if (len <= 0) 4383 return -EINVAL; 4384 if (len > sizeof(struct sctp_event_subscribe)) 4385 len = sizeof(struct sctp_event_subscribe); 4386 if (put_user(len, optlen)) 4387 return -EFAULT; 4388 if (copy_to_user(optval, &sctp_sk(sk)->subscribe, len)) 4389 return -EFAULT; 4390 return 0; 4391 } 4392 4393 /* 7.1.8 Automatic Close of associations (SCTP_AUTOCLOSE) 4394 * 4395 * This socket option is applicable to the UDP-style socket only. When 4396 * set it will cause associations that are idle for more than the 4397 * specified number of seconds to automatically close. An association 4398 * being idle is defined an association that has NOT sent or received 4399 * user data. The special value of '0' indicates that no automatic 4400 * close of any associations should be performed. The option expects an 4401 * integer defining the number of seconds of idle time before an 4402 * association is closed. 4403 */ 4404 static int sctp_getsockopt_autoclose(struct sock *sk, int len, char __user *optval, int __user *optlen) 4405 { 4406 /* Applicable to UDP-style socket only */ 4407 if (sctp_style(sk, TCP)) 4408 return -EOPNOTSUPP; 4409 if (len < sizeof(int)) 4410 return -EINVAL; 4411 len = sizeof(int); 4412 if (put_user(len, optlen)) 4413 return -EFAULT; 4414 if (copy_to_user(optval, &sctp_sk(sk)->autoclose, sizeof(int))) 4415 return -EFAULT; 4416 return 0; 4417 } 4418 4419 /* Helper routine to branch off an association to a new socket. */ 4420 int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp) 4421 { 4422 struct sctp_association *asoc = sctp_id2assoc(sk, id); 4423 struct sctp_sock *sp = sctp_sk(sk); 4424 struct socket *sock; 4425 int err = 0; 4426 4427 if (!asoc) 4428 return -EINVAL; 4429 4430 /* An association cannot be branched off from an already peeled-off 4431 * socket, nor is this supported for tcp style sockets. 4432 */ 4433 if (!sctp_style(sk, UDP)) 4434 return -EINVAL; 4435 4436 /* Create a new socket. */ 4437 err = sock_create(sk->sk_family, SOCK_SEQPACKET, IPPROTO_SCTP, &sock); 4438 if (err < 0) 4439 return err; 4440 4441 sctp_copy_sock(sock->sk, sk, asoc); 4442 4443 /* Make peeled-off sockets more like 1-1 accepted sockets. 4444 * Set the daddr and initialize id to something more random 4445 */ 4446 sp->pf->to_sk_daddr(&asoc->peer.primary_addr, sk); 4447 4448 /* Populate the fields of the newsk from the oldsk and migrate the 4449 * asoc to the newsk. 4450 */ 4451 sctp_sock_migrate(sk, sock->sk, asoc, SCTP_SOCKET_UDP_HIGH_BANDWIDTH); 4452 4453 *sockp = sock; 4454 4455 return err; 4456 } 4457 EXPORT_SYMBOL(sctp_do_peeloff); 4458 4459 static int sctp_getsockopt_peeloff(struct sock *sk, int len, char __user *optval, int __user *optlen) 4460 { 4461 sctp_peeloff_arg_t peeloff; 4462 struct socket *newsock; 4463 struct file *newfile; 4464 int retval = 0; 4465 4466 if (len < sizeof(sctp_peeloff_arg_t)) 4467 return -EINVAL; 4468 len = sizeof(sctp_peeloff_arg_t); 4469 if (copy_from_user(&peeloff, optval, len)) 4470 return -EFAULT; 4471 4472 retval = sctp_do_peeloff(sk, peeloff.associd, &newsock); 4473 if (retval < 0) 4474 goto out; 4475 4476 /* Map the socket to an unused fd that can be returned to the user. */ 4477 retval = get_unused_fd_flags(0); 4478 if (retval < 0) { 4479 sock_release(newsock); 4480 goto out; 4481 } 4482 4483 newfile = sock_alloc_file(newsock, 0, NULL); 4484 if (unlikely(IS_ERR(newfile))) { 4485 put_unused_fd(retval); 4486 sock_release(newsock); 4487 return PTR_ERR(newfile); 4488 } 4489 4490 pr_debug("%s: sk:%p, newsk:%p, sd:%d\n", __func__, sk, newsock->sk, 4491 retval); 4492 4493 /* Return the fd mapped to the new socket. */ 4494 if (put_user(len, optlen)) { 4495 fput(newfile); 4496 put_unused_fd(retval); 4497 return -EFAULT; 4498 } 4499 peeloff.sd = retval; 4500 if (copy_to_user(optval, &peeloff, len)) { 4501 fput(newfile); 4502 put_unused_fd(retval); 4503 return -EFAULT; 4504 } 4505 fd_install(retval, newfile); 4506 out: 4507 return retval; 4508 } 4509 4510 /* 7.1.13 Peer Address Parameters (SCTP_PEER_ADDR_PARAMS) 4511 * 4512 * Applications can enable or disable heartbeats for any peer address of 4513 * an association, modify an address's heartbeat interval, force a 4514 * heartbeat to be sent immediately, and adjust the address's maximum 4515 * number of retransmissions sent before an address is considered 4516 * unreachable. The following structure is used to access and modify an 4517 * address's parameters: 4518 * 4519 * struct sctp_paddrparams { 4520 * sctp_assoc_t spp_assoc_id; 4521 * struct sockaddr_storage spp_address; 4522 * uint32_t spp_hbinterval; 4523 * uint16_t spp_pathmaxrxt; 4524 * uint32_t spp_pathmtu; 4525 * uint32_t spp_sackdelay; 4526 * uint32_t spp_flags; 4527 * }; 4528 * 4529 * spp_assoc_id - (one-to-many style socket) This is filled in the 4530 * application, and identifies the association for 4531 * this query. 4532 * spp_address - This specifies which address is of interest. 4533 * spp_hbinterval - This contains the value of the heartbeat interval, 4534 * in milliseconds. If a value of zero 4535 * is present in this field then no changes are to 4536 * be made to this parameter. 4537 * spp_pathmaxrxt - This contains the maximum number of 4538 * retransmissions before this address shall be 4539 * considered unreachable. If a value of zero 4540 * is present in this field then no changes are to 4541 * be made to this parameter. 4542 * spp_pathmtu - When Path MTU discovery is disabled the value 4543 * specified here will be the "fixed" path mtu. 4544 * Note that if the spp_address field is empty 4545 * then all associations on this address will 4546 * have this fixed path mtu set upon them. 4547 * 4548 * spp_sackdelay - When delayed sack is enabled, this value specifies 4549 * the number of milliseconds that sacks will be delayed 4550 * for. This value will apply to all addresses of an 4551 * association if the spp_address field is empty. Note 4552 * also, that if delayed sack is enabled and this 4553 * value is set to 0, no change is made to the last 4554 * recorded delayed sack timer value. 4555 * 4556 * spp_flags - These flags are used to control various features 4557 * on an association. The flag field may contain 4558 * zero or more of the following options. 4559 * 4560 * SPP_HB_ENABLE - Enable heartbeats on the 4561 * specified address. Note that if the address 4562 * field is empty all addresses for the association 4563 * have heartbeats enabled upon them. 4564 * 4565 * SPP_HB_DISABLE - Disable heartbeats on the 4566 * speicifed address. Note that if the address 4567 * field is empty all addresses for the association 4568 * will have their heartbeats disabled. Note also 4569 * that SPP_HB_ENABLE and SPP_HB_DISABLE are 4570 * mutually exclusive, only one of these two should 4571 * be specified. Enabling both fields will have 4572 * undetermined results. 4573 * 4574 * SPP_HB_DEMAND - Request a user initiated heartbeat 4575 * to be made immediately. 4576 * 4577 * SPP_PMTUD_ENABLE - This field will enable PMTU 4578 * discovery upon the specified address. Note that 4579 * if the address feild is empty then all addresses 4580 * on the association are effected. 4581 * 4582 * SPP_PMTUD_DISABLE - This field will disable PMTU 4583 * discovery upon the specified address. Note that 4584 * if the address feild is empty then all addresses 4585 * on the association are effected. Not also that 4586 * SPP_PMTUD_ENABLE and SPP_PMTUD_DISABLE are mutually 4587 * exclusive. Enabling both will have undetermined 4588 * results. 4589 * 4590 * SPP_SACKDELAY_ENABLE - Setting this flag turns 4591 * on delayed sack. The time specified in spp_sackdelay 4592 * is used to specify the sack delay for this address. Note 4593 * that if spp_address is empty then all addresses will 4594 * enable delayed sack and take on the sack delay 4595 * value specified in spp_sackdelay. 4596 * SPP_SACKDELAY_DISABLE - Setting this flag turns 4597 * off delayed sack. If the spp_address field is blank then 4598 * delayed sack is disabled for the entire association. Note 4599 * also that this field is mutually exclusive to 4600 * SPP_SACKDELAY_ENABLE, setting both will have undefined 4601 * results. 4602 */ 4603 static int sctp_getsockopt_peer_addr_params(struct sock *sk, int len, 4604 char __user *optval, int __user *optlen) 4605 { 4606 struct sctp_paddrparams params; 4607 struct sctp_transport *trans = NULL; 4608 struct sctp_association *asoc = NULL; 4609 struct sctp_sock *sp = sctp_sk(sk); 4610 4611 if (len < sizeof(struct sctp_paddrparams)) 4612 return -EINVAL; 4613 len = sizeof(struct sctp_paddrparams); 4614 if (copy_from_user(¶ms, optval, len)) 4615 return -EFAULT; 4616 4617 /* If an address other than INADDR_ANY is specified, and 4618 * no transport is found, then the request is invalid. 4619 */ 4620 if (!sctp_is_any(sk, (union sctp_addr *)¶ms.spp_address)) { 4621 trans = sctp_addr_id2transport(sk, ¶ms.spp_address, 4622 params.spp_assoc_id); 4623 if (!trans) { 4624 pr_debug("%s: failed no transport\n", __func__); 4625 return -EINVAL; 4626 } 4627 } 4628 4629 /* Get association, if assoc_id != 0 and the socket is a one 4630 * to many style socket, and an association was not found, then 4631 * the id was invalid. 4632 */ 4633 asoc = sctp_id2assoc(sk, params.spp_assoc_id); 4634 if (!asoc && params.spp_assoc_id && sctp_style(sk, UDP)) { 4635 pr_debug("%s: failed no association\n", __func__); 4636 return -EINVAL; 4637 } 4638 4639 if (trans) { 4640 /* Fetch transport values. */ 4641 params.spp_hbinterval = jiffies_to_msecs(trans->hbinterval); 4642 params.spp_pathmtu = trans->pathmtu; 4643 params.spp_pathmaxrxt = trans->pathmaxrxt; 4644 params.spp_sackdelay = jiffies_to_msecs(trans->sackdelay); 4645 4646 /*draft-11 doesn't say what to return in spp_flags*/ 4647 params.spp_flags = trans->param_flags; 4648 } else if (asoc) { 4649 /* Fetch association values. */ 4650 params.spp_hbinterval = jiffies_to_msecs(asoc->hbinterval); 4651 params.spp_pathmtu = asoc->pathmtu; 4652 params.spp_pathmaxrxt = asoc->pathmaxrxt; 4653 params.spp_sackdelay = jiffies_to_msecs(asoc->sackdelay); 4654 4655 /*draft-11 doesn't say what to return in spp_flags*/ 4656 params.spp_flags = asoc->param_flags; 4657 } else { 4658 /* Fetch socket values. */ 4659 params.spp_hbinterval = sp->hbinterval; 4660 params.spp_pathmtu = sp->pathmtu; 4661 params.spp_sackdelay = sp->sackdelay; 4662 params.spp_pathmaxrxt = sp->pathmaxrxt; 4663 4664 /*draft-11 doesn't say what to return in spp_flags*/ 4665 params.spp_flags = sp->param_flags; 4666 } 4667 4668 if (copy_to_user(optval, ¶ms, len)) 4669 return -EFAULT; 4670 4671 if (put_user(len, optlen)) 4672 return -EFAULT; 4673 4674 return 0; 4675 } 4676 4677 /* 4678 * 7.1.23. Get or set delayed ack timer (SCTP_DELAYED_SACK) 4679 * 4680 * This option will effect the way delayed acks are performed. This 4681 * option allows you to get or set the delayed ack time, in 4682 * milliseconds. It also allows changing the delayed ack frequency. 4683 * Changing the frequency to 1 disables the delayed sack algorithm. If 4684 * the assoc_id is 0, then this sets or gets the endpoints default 4685 * values. If the assoc_id field is non-zero, then the set or get 4686 * effects the specified association for the one to many model (the 4687 * assoc_id field is ignored by the one to one model). Note that if 4688 * sack_delay or sack_freq are 0 when setting this option, then the 4689 * current values will remain unchanged. 4690 * 4691 * struct sctp_sack_info { 4692 * sctp_assoc_t sack_assoc_id; 4693 * uint32_t sack_delay; 4694 * uint32_t sack_freq; 4695 * }; 4696 * 4697 * sack_assoc_id - This parameter, indicates which association the user 4698 * is performing an action upon. Note that if this field's value is 4699 * zero then the endpoints default value is changed (effecting future 4700 * associations only). 4701 * 4702 * sack_delay - This parameter contains the number of milliseconds that 4703 * the user is requesting the delayed ACK timer be set to. Note that 4704 * this value is defined in the standard to be between 200 and 500 4705 * milliseconds. 4706 * 4707 * sack_freq - This parameter contains the number of packets that must 4708 * be received before a sack is sent without waiting for the delay 4709 * timer to expire. The default value for this is 2, setting this 4710 * value to 1 will disable the delayed sack algorithm. 4711 */ 4712 static int sctp_getsockopt_delayed_ack(struct sock *sk, int len, 4713 char __user *optval, 4714 int __user *optlen) 4715 { 4716 struct sctp_sack_info params; 4717 struct sctp_association *asoc = NULL; 4718 struct sctp_sock *sp = sctp_sk(sk); 4719 4720 if (len >= sizeof(struct sctp_sack_info)) { 4721 len = sizeof(struct sctp_sack_info); 4722 4723 if (copy_from_user(¶ms, optval, len)) 4724 return -EFAULT; 4725 } else if (len == sizeof(struct sctp_assoc_value)) { 4726 pr_warn_ratelimited(DEPRECATED 4727 "%s (pid %d) " 4728 "Use of struct sctp_assoc_value in delayed_ack socket option.\n" 4729 "Use struct sctp_sack_info instead\n", 4730 current->comm, task_pid_nr(current)); 4731 if (copy_from_user(¶ms, optval, len)) 4732 return -EFAULT; 4733 } else 4734 return -EINVAL; 4735 4736 /* Get association, if sack_assoc_id != 0 and the socket is a one 4737 * to many style socket, and an association was not found, then 4738 * the id was invalid. 4739 */ 4740 asoc = sctp_id2assoc(sk, params.sack_assoc_id); 4741 if (!asoc && params.sack_assoc_id && sctp_style(sk, UDP)) 4742 return -EINVAL; 4743 4744 if (asoc) { 4745 /* Fetch association values. */ 4746 if (asoc->param_flags & SPP_SACKDELAY_ENABLE) { 4747 params.sack_delay = jiffies_to_msecs( 4748 asoc->sackdelay); 4749 params.sack_freq = asoc->sackfreq; 4750 4751 } else { 4752 params.sack_delay = 0; 4753 params.sack_freq = 1; 4754 } 4755 } else { 4756 /* Fetch socket values. */ 4757 if (sp->param_flags & SPP_SACKDELAY_ENABLE) { 4758 params.sack_delay = sp->sackdelay; 4759 params.sack_freq = sp->sackfreq; 4760 } else { 4761 params.sack_delay = 0; 4762 params.sack_freq = 1; 4763 } 4764 } 4765 4766 if (copy_to_user(optval, ¶ms, len)) 4767 return -EFAULT; 4768 4769 if (put_user(len, optlen)) 4770 return -EFAULT; 4771 4772 return 0; 4773 } 4774 4775 /* 7.1.3 Initialization Parameters (SCTP_INITMSG) 4776 * 4777 * Applications can specify protocol parameters for the default association 4778 * initialization. The option name argument to setsockopt() and getsockopt() 4779 * is SCTP_INITMSG. 4780 * 4781 * Setting initialization parameters is effective only on an unconnected 4782 * socket (for UDP-style sockets only future associations are effected 4783 * by the change). With TCP-style sockets, this option is inherited by 4784 * sockets derived from a listener socket. 4785 */ 4786 static int sctp_getsockopt_initmsg(struct sock *sk, int len, char __user *optval, int __user *optlen) 4787 { 4788 if (len < sizeof(struct sctp_initmsg)) 4789 return -EINVAL; 4790 len = sizeof(struct sctp_initmsg); 4791 if (put_user(len, optlen)) 4792 return -EFAULT; 4793 if (copy_to_user(optval, &sctp_sk(sk)->initmsg, len)) 4794 return -EFAULT; 4795 return 0; 4796 } 4797 4798 4799 static int sctp_getsockopt_peer_addrs(struct sock *sk, int len, 4800 char __user *optval, int __user *optlen) 4801 { 4802 struct sctp_association *asoc; 4803 int cnt = 0; 4804 struct sctp_getaddrs getaddrs; 4805 struct sctp_transport *from; 4806 void __user *to; 4807 union sctp_addr temp; 4808 struct sctp_sock *sp = sctp_sk(sk); 4809 int addrlen; 4810 size_t space_left; 4811 int bytes_copied; 4812 4813 if (len < sizeof(struct sctp_getaddrs)) 4814 return -EINVAL; 4815 4816 if (copy_from_user(&getaddrs, optval, sizeof(struct sctp_getaddrs))) 4817 return -EFAULT; 4818 4819 /* For UDP-style sockets, id specifies the association to query. */ 4820 asoc = sctp_id2assoc(sk, getaddrs.assoc_id); 4821 if (!asoc) 4822 return -EINVAL; 4823 4824 to = optval + offsetof(struct sctp_getaddrs, addrs); 4825 space_left = len - offsetof(struct sctp_getaddrs, addrs); 4826 4827 list_for_each_entry(from, &asoc->peer.transport_addr_list, 4828 transports) { 4829 memcpy(&temp, &from->ipaddr, sizeof(temp)); 4830 addrlen = sctp_get_pf_specific(sk->sk_family) 4831 ->addr_to_user(sp, &temp); 4832 if (space_left < addrlen) 4833 return -ENOMEM; 4834 if (copy_to_user(to, &temp, addrlen)) 4835 return -EFAULT; 4836 to += addrlen; 4837 cnt++; 4838 space_left -= addrlen; 4839 } 4840 4841 if (put_user(cnt, &((struct sctp_getaddrs __user *)optval)->addr_num)) 4842 return -EFAULT; 4843 bytes_copied = ((char __user *)to) - optval; 4844 if (put_user(bytes_copied, optlen)) 4845 return -EFAULT; 4846 4847 return 0; 4848 } 4849 4850 static int sctp_copy_laddrs(struct sock *sk, __u16 port, void *to, 4851 size_t space_left, int *bytes_copied) 4852 { 4853 struct sctp_sockaddr_entry *addr; 4854 union sctp_addr temp; 4855 int cnt = 0; 4856 int addrlen; 4857 struct net *net = sock_net(sk); 4858 4859 rcu_read_lock(); 4860 list_for_each_entry_rcu(addr, &net->sctp.local_addr_list, list) { 4861 if (!addr->valid) 4862 continue; 4863 4864 if ((PF_INET == sk->sk_family) && 4865 (AF_INET6 == addr->a.sa.sa_family)) 4866 continue; 4867 if ((PF_INET6 == sk->sk_family) && 4868 inet_v6_ipv6only(sk) && 4869 (AF_INET == addr->a.sa.sa_family)) 4870 continue; 4871 memcpy(&temp, &addr->a, sizeof(temp)); 4872 if (!temp.v4.sin_port) 4873 temp.v4.sin_port = htons(port); 4874 4875 addrlen = sctp_get_pf_specific(sk->sk_family) 4876 ->addr_to_user(sctp_sk(sk), &temp); 4877 4878 if (space_left < addrlen) { 4879 cnt = -ENOMEM; 4880 break; 4881 } 4882 memcpy(to, &temp, addrlen); 4883 4884 to += addrlen; 4885 cnt++; 4886 space_left -= addrlen; 4887 *bytes_copied += addrlen; 4888 } 4889 rcu_read_unlock(); 4890 4891 return cnt; 4892 } 4893 4894 4895 static int sctp_getsockopt_local_addrs(struct sock *sk, int len, 4896 char __user *optval, int __user *optlen) 4897 { 4898 struct sctp_bind_addr *bp; 4899 struct sctp_association *asoc; 4900 int cnt = 0; 4901 struct sctp_getaddrs getaddrs; 4902 struct sctp_sockaddr_entry *addr; 4903 void __user *to; 4904 union sctp_addr temp; 4905 struct sctp_sock *sp = sctp_sk(sk); 4906 int addrlen; 4907 int err = 0; 4908 size_t space_left; 4909 int bytes_copied = 0; 4910 void *addrs; 4911 void *buf; 4912 4913 if (len < sizeof(struct sctp_getaddrs)) 4914 return -EINVAL; 4915 4916 if (copy_from_user(&getaddrs, optval, sizeof(struct sctp_getaddrs))) 4917 return -EFAULT; 4918 4919 /* 4920 * For UDP-style sockets, id specifies the association to query. 4921 * If the id field is set to the value '0' then the locally bound 4922 * addresses are returned without regard to any particular 4923 * association. 4924 */ 4925 if (0 == getaddrs.assoc_id) { 4926 bp = &sctp_sk(sk)->ep->base.bind_addr; 4927 } else { 4928 asoc = sctp_id2assoc(sk, getaddrs.assoc_id); 4929 if (!asoc) 4930 return -EINVAL; 4931 bp = &asoc->base.bind_addr; 4932 } 4933 4934 to = optval + offsetof(struct sctp_getaddrs, addrs); 4935 space_left = len - offsetof(struct sctp_getaddrs, addrs); 4936 4937 addrs = kmalloc(space_left, GFP_KERNEL); 4938 if (!addrs) 4939 return -ENOMEM; 4940 4941 /* If the endpoint is bound to 0.0.0.0 or ::0, get the valid 4942 * addresses from the global local address list. 4943 */ 4944 if (sctp_list_single_entry(&bp->address_list)) { 4945 addr = list_entry(bp->address_list.next, 4946 struct sctp_sockaddr_entry, list); 4947 if (sctp_is_any(sk, &addr->a)) { 4948 cnt = sctp_copy_laddrs(sk, bp->port, addrs, 4949 space_left, &bytes_copied); 4950 if (cnt < 0) { 4951 err = cnt; 4952 goto out; 4953 } 4954 goto copy_getaddrs; 4955 } 4956 } 4957 4958 buf = addrs; 4959 /* Protection on the bound address list is not needed since 4960 * in the socket option context we hold a socket lock and 4961 * thus the bound address list can't change. 4962 */ 4963 list_for_each_entry(addr, &bp->address_list, list) { 4964 memcpy(&temp, &addr->a, sizeof(temp)); 4965 addrlen = sctp_get_pf_specific(sk->sk_family) 4966 ->addr_to_user(sp, &temp); 4967 if (space_left < addrlen) { 4968 err = -ENOMEM; /*fixme: right error?*/ 4969 goto out; 4970 } 4971 memcpy(buf, &temp, addrlen); 4972 buf += addrlen; 4973 bytes_copied += addrlen; 4974 cnt++; 4975 space_left -= addrlen; 4976 } 4977 4978 copy_getaddrs: 4979 if (copy_to_user(to, addrs, bytes_copied)) { 4980 err = -EFAULT; 4981 goto out; 4982 } 4983 if (put_user(cnt, &((struct sctp_getaddrs __user *)optval)->addr_num)) { 4984 err = -EFAULT; 4985 goto out; 4986 } 4987 if (put_user(bytes_copied, optlen)) 4988 err = -EFAULT; 4989 out: 4990 kfree(addrs); 4991 return err; 4992 } 4993 4994 /* 7.1.10 Set Primary Address (SCTP_PRIMARY_ADDR) 4995 * 4996 * Requests that the local SCTP stack use the enclosed peer address as 4997 * the association primary. The enclosed address must be one of the 4998 * association peer's addresses. 4999 */ 5000 static int sctp_getsockopt_primary_addr(struct sock *sk, int len, 5001 char __user *optval, int __user *optlen) 5002 { 5003 struct sctp_prim prim; 5004 struct sctp_association *asoc; 5005 struct sctp_sock *sp = sctp_sk(sk); 5006 5007 if (len < sizeof(struct sctp_prim)) 5008 return -EINVAL; 5009 5010 len = sizeof(struct sctp_prim); 5011 5012 if (copy_from_user(&prim, optval, len)) 5013 return -EFAULT; 5014 5015 asoc = sctp_id2assoc(sk, prim.ssp_assoc_id); 5016 if (!asoc) 5017 return -EINVAL; 5018 5019 if (!asoc->peer.primary_path) 5020 return -ENOTCONN; 5021 5022 memcpy(&prim.ssp_addr, &asoc->peer.primary_path->ipaddr, 5023 asoc->peer.primary_path->af_specific->sockaddr_len); 5024 5025 sctp_get_pf_specific(sk->sk_family)->addr_to_user(sp, 5026 (union sctp_addr *)&prim.ssp_addr); 5027 5028 if (put_user(len, optlen)) 5029 return -EFAULT; 5030 if (copy_to_user(optval, &prim, len)) 5031 return -EFAULT; 5032 5033 return 0; 5034 } 5035 5036 /* 5037 * 7.1.11 Set Adaptation Layer Indicator (SCTP_ADAPTATION_LAYER) 5038 * 5039 * Requests that the local endpoint set the specified Adaptation Layer 5040 * Indication parameter for all future INIT and INIT-ACK exchanges. 5041 */ 5042 static int sctp_getsockopt_adaptation_layer(struct sock *sk, int len, 5043 char __user *optval, int __user *optlen) 5044 { 5045 struct sctp_setadaptation adaptation; 5046 5047 if (len < sizeof(struct sctp_setadaptation)) 5048 return -EINVAL; 5049 5050 len = sizeof(struct sctp_setadaptation); 5051 5052 adaptation.ssb_adaptation_ind = sctp_sk(sk)->adaptation_ind; 5053 5054 if (put_user(len, optlen)) 5055 return -EFAULT; 5056 if (copy_to_user(optval, &adaptation, len)) 5057 return -EFAULT; 5058 5059 return 0; 5060 } 5061 5062 /* 5063 * 5064 * 7.1.14 Set default send parameters (SCTP_DEFAULT_SEND_PARAM) 5065 * 5066 * Applications that wish to use the sendto() system call may wish to 5067 * specify a default set of parameters that would normally be supplied 5068 * through the inclusion of ancillary data. This socket option allows 5069 * such an application to set the default sctp_sndrcvinfo structure. 5070 5071 5072 * The application that wishes to use this socket option simply passes 5073 * in to this call the sctp_sndrcvinfo structure defined in Section 5074 * 5.2.2) The input parameters accepted by this call include 5075 * sinfo_stream, sinfo_flags, sinfo_ppid, sinfo_context, 5076 * sinfo_timetolive. The user must provide the sinfo_assoc_id field in 5077 * to this call if the caller is using the UDP model. 5078 * 5079 * For getsockopt, it get the default sctp_sndrcvinfo structure. 5080 */ 5081 static int sctp_getsockopt_default_send_param(struct sock *sk, 5082 int len, char __user *optval, 5083 int __user *optlen) 5084 { 5085 struct sctp_sock *sp = sctp_sk(sk); 5086 struct sctp_association *asoc; 5087 struct sctp_sndrcvinfo info; 5088 5089 if (len < sizeof(info)) 5090 return -EINVAL; 5091 5092 len = sizeof(info); 5093 5094 if (copy_from_user(&info, optval, len)) 5095 return -EFAULT; 5096 5097 asoc = sctp_id2assoc(sk, info.sinfo_assoc_id); 5098 if (!asoc && info.sinfo_assoc_id && sctp_style(sk, UDP)) 5099 return -EINVAL; 5100 if (asoc) { 5101 info.sinfo_stream = asoc->default_stream; 5102 info.sinfo_flags = asoc->default_flags; 5103 info.sinfo_ppid = asoc->default_ppid; 5104 info.sinfo_context = asoc->default_context; 5105 info.sinfo_timetolive = asoc->default_timetolive; 5106 } else { 5107 info.sinfo_stream = sp->default_stream; 5108 info.sinfo_flags = sp->default_flags; 5109 info.sinfo_ppid = sp->default_ppid; 5110 info.sinfo_context = sp->default_context; 5111 info.sinfo_timetolive = sp->default_timetolive; 5112 } 5113 5114 if (put_user(len, optlen)) 5115 return -EFAULT; 5116 if (copy_to_user(optval, &info, len)) 5117 return -EFAULT; 5118 5119 return 0; 5120 } 5121 5122 /* RFC6458, Section 8.1.31. Set/get Default Send Parameters 5123 * (SCTP_DEFAULT_SNDINFO) 5124 */ 5125 static int sctp_getsockopt_default_sndinfo(struct sock *sk, int len, 5126 char __user *optval, 5127 int __user *optlen) 5128 { 5129 struct sctp_sock *sp = sctp_sk(sk); 5130 struct sctp_association *asoc; 5131 struct sctp_sndinfo info; 5132 5133 if (len < sizeof(info)) 5134 return -EINVAL; 5135 5136 len = sizeof(info); 5137 5138 if (copy_from_user(&info, optval, len)) 5139 return -EFAULT; 5140 5141 asoc = sctp_id2assoc(sk, info.snd_assoc_id); 5142 if (!asoc && info.snd_assoc_id && sctp_style(sk, UDP)) 5143 return -EINVAL; 5144 if (asoc) { 5145 info.snd_sid = asoc->default_stream; 5146 info.snd_flags = asoc->default_flags; 5147 info.snd_ppid = asoc->default_ppid; 5148 info.snd_context = asoc->default_context; 5149 } else { 5150 info.snd_sid = sp->default_stream; 5151 info.snd_flags = sp->default_flags; 5152 info.snd_ppid = sp->default_ppid; 5153 info.snd_context = sp->default_context; 5154 } 5155 5156 if (put_user(len, optlen)) 5157 return -EFAULT; 5158 if (copy_to_user(optval, &info, len)) 5159 return -EFAULT; 5160 5161 return 0; 5162 } 5163 5164 /* 5165 * 5166 * 7.1.5 SCTP_NODELAY 5167 * 5168 * Turn on/off any Nagle-like algorithm. This means that packets are 5169 * generally sent as soon as possible and no unnecessary delays are 5170 * introduced, at the cost of more packets in the network. Expects an 5171 * integer boolean flag. 5172 */ 5173 5174 static int sctp_getsockopt_nodelay(struct sock *sk, int len, 5175 char __user *optval, int __user *optlen) 5176 { 5177 int val; 5178 5179 if (len < sizeof(int)) 5180 return -EINVAL; 5181 5182 len = sizeof(int); 5183 val = (sctp_sk(sk)->nodelay == 1); 5184 if (put_user(len, optlen)) 5185 return -EFAULT; 5186 if (copy_to_user(optval, &val, len)) 5187 return -EFAULT; 5188 return 0; 5189 } 5190 5191 /* 5192 * 5193 * 7.1.1 SCTP_RTOINFO 5194 * 5195 * The protocol parameters used to initialize and bound retransmission 5196 * timeout (RTO) are tunable. sctp_rtoinfo structure is used to access 5197 * and modify these parameters. 5198 * All parameters are time values, in milliseconds. A value of 0, when 5199 * modifying the parameters, indicates that the current value should not 5200 * be changed. 5201 * 5202 */ 5203 static int sctp_getsockopt_rtoinfo(struct sock *sk, int len, 5204 char __user *optval, 5205 int __user *optlen) { 5206 struct sctp_rtoinfo rtoinfo; 5207 struct sctp_association *asoc; 5208 5209 if (len < sizeof (struct sctp_rtoinfo)) 5210 return -EINVAL; 5211 5212 len = sizeof(struct sctp_rtoinfo); 5213 5214 if (copy_from_user(&rtoinfo, optval, len)) 5215 return -EFAULT; 5216 5217 asoc = sctp_id2assoc(sk, rtoinfo.srto_assoc_id); 5218 5219 if (!asoc && rtoinfo.srto_assoc_id && sctp_style(sk, UDP)) 5220 return -EINVAL; 5221 5222 /* Values corresponding to the specific association. */ 5223 if (asoc) { 5224 rtoinfo.srto_initial = jiffies_to_msecs(asoc->rto_initial); 5225 rtoinfo.srto_max = jiffies_to_msecs(asoc->rto_max); 5226 rtoinfo.srto_min = jiffies_to_msecs(asoc->rto_min); 5227 } else { 5228 /* Values corresponding to the endpoint. */ 5229 struct sctp_sock *sp = sctp_sk(sk); 5230 5231 rtoinfo.srto_initial = sp->rtoinfo.srto_initial; 5232 rtoinfo.srto_max = sp->rtoinfo.srto_max; 5233 rtoinfo.srto_min = sp->rtoinfo.srto_min; 5234 } 5235 5236 if (put_user(len, optlen)) 5237 return -EFAULT; 5238 5239 if (copy_to_user(optval, &rtoinfo, len)) 5240 return -EFAULT; 5241 5242 return 0; 5243 } 5244 5245 /* 5246 * 5247 * 7.1.2 SCTP_ASSOCINFO 5248 * 5249 * This option is used to tune the maximum retransmission attempts 5250 * of the association. 5251 * Returns an error if the new association retransmission value is 5252 * greater than the sum of the retransmission value of the peer. 5253 * See [SCTP] for more information. 5254 * 5255 */ 5256 static int sctp_getsockopt_associnfo(struct sock *sk, int len, 5257 char __user *optval, 5258 int __user *optlen) 5259 { 5260 5261 struct sctp_assocparams assocparams; 5262 struct sctp_association *asoc; 5263 struct list_head *pos; 5264 int cnt = 0; 5265 5266 if (len < sizeof (struct sctp_assocparams)) 5267 return -EINVAL; 5268 5269 len = sizeof(struct sctp_assocparams); 5270 5271 if (copy_from_user(&assocparams, optval, len)) 5272 return -EFAULT; 5273 5274 asoc = sctp_id2assoc(sk, assocparams.sasoc_assoc_id); 5275 5276 if (!asoc && assocparams.sasoc_assoc_id && sctp_style(sk, UDP)) 5277 return -EINVAL; 5278 5279 /* Values correspoinding to the specific association */ 5280 if (asoc) { 5281 assocparams.sasoc_asocmaxrxt = asoc->max_retrans; 5282 assocparams.sasoc_peer_rwnd = asoc->peer.rwnd; 5283 assocparams.sasoc_local_rwnd = asoc->a_rwnd; 5284 assocparams.sasoc_cookie_life = ktime_to_ms(asoc->cookie_life); 5285 5286 list_for_each(pos, &asoc->peer.transport_addr_list) { 5287 cnt++; 5288 } 5289 5290 assocparams.sasoc_number_peer_destinations = cnt; 5291 } else { 5292 /* Values corresponding to the endpoint */ 5293 struct sctp_sock *sp = sctp_sk(sk); 5294 5295 assocparams.sasoc_asocmaxrxt = sp->assocparams.sasoc_asocmaxrxt; 5296 assocparams.sasoc_peer_rwnd = sp->assocparams.sasoc_peer_rwnd; 5297 assocparams.sasoc_local_rwnd = sp->assocparams.sasoc_local_rwnd; 5298 assocparams.sasoc_cookie_life = 5299 sp->assocparams.sasoc_cookie_life; 5300 assocparams.sasoc_number_peer_destinations = 5301 sp->assocparams. 5302 sasoc_number_peer_destinations; 5303 } 5304 5305 if (put_user(len, optlen)) 5306 return -EFAULT; 5307 5308 if (copy_to_user(optval, &assocparams, len)) 5309 return -EFAULT; 5310 5311 return 0; 5312 } 5313 5314 /* 5315 * 7.1.16 Set/clear IPv4 mapped addresses (SCTP_I_WANT_MAPPED_V4_ADDR) 5316 * 5317 * This socket option is a boolean flag which turns on or off mapped V4 5318 * addresses. If this option is turned on and the socket is type 5319 * PF_INET6, then IPv4 addresses will be mapped to V6 representation. 5320 * If this option is turned off, then no mapping will be done of V4 5321 * addresses and a user will receive both PF_INET6 and PF_INET type 5322 * addresses on the socket. 5323 */ 5324 static int sctp_getsockopt_mappedv4(struct sock *sk, int len, 5325 char __user *optval, int __user *optlen) 5326 { 5327 int val; 5328 struct sctp_sock *sp = sctp_sk(sk); 5329 5330 if (len < sizeof(int)) 5331 return -EINVAL; 5332 5333 len = sizeof(int); 5334 val = sp->v4mapped; 5335 if (put_user(len, optlen)) 5336 return -EFAULT; 5337 if (copy_to_user(optval, &val, len)) 5338 return -EFAULT; 5339 5340 return 0; 5341 } 5342 5343 /* 5344 * 7.1.29. Set or Get the default context (SCTP_CONTEXT) 5345 * (chapter and verse is quoted at sctp_setsockopt_context()) 5346 */ 5347 static int sctp_getsockopt_context(struct sock *sk, int len, 5348 char __user *optval, int __user *optlen) 5349 { 5350 struct sctp_assoc_value params; 5351 struct sctp_sock *sp; 5352 struct sctp_association *asoc; 5353 5354 if (len < sizeof(struct sctp_assoc_value)) 5355 return -EINVAL; 5356 5357 len = sizeof(struct sctp_assoc_value); 5358 5359 if (copy_from_user(¶ms, optval, len)) 5360 return -EFAULT; 5361 5362 sp = sctp_sk(sk); 5363 5364 if (params.assoc_id != 0) { 5365 asoc = sctp_id2assoc(sk, params.assoc_id); 5366 if (!asoc) 5367 return -EINVAL; 5368 params.assoc_value = asoc->default_rcv_context; 5369 } else { 5370 params.assoc_value = sp->default_rcv_context; 5371 } 5372 5373 if (put_user(len, optlen)) 5374 return -EFAULT; 5375 if (copy_to_user(optval, ¶ms, len)) 5376 return -EFAULT; 5377 5378 return 0; 5379 } 5380 5381 /* 5382 * 8.1.16. Get or Set the Maximum Fragmentation Size (SCTP_MAXSEG) 5383 * This option will get or set the maximum size to put in any outgoing 5384 * SCTP DATA chunk. If a message is larger than this size it will be 5385 * fragmented by SCTP into the specified size. Note that the underlying 5386 * SCTP implementation may fragment into smaller sized chunks when the 5387 * PMTU of the underlying association is smaller than the value set by 5388 * the user. The default value for this option is '0' which indicates 5389 * the user is NOT limiting fragmentation and only the PMTU will effect 5390 * SCTP's choice of DATA chunk size. Note also that values set larger 5391 * than the maximum size of an IP datagram will effectively let SCTP 5392 * control fragmentation (i.e. the same as setting this option to 0). 5393 * 5394 * The following structure is used to access and modify this parameter: 5395 * 5396 * struct sctp_assoc_value { 5397 * sctp_assoc_t assoc_id; 5398 * uint32_t assoc_value; 5399 * }; 5400 * 5401 * assoc_id: This parameter is ignored for one-to-one style sockets. 5402 * For one-to-many style sockets this parameter indicates which 5403 * association the user is performing an action upon. Note that if 5404 * this field's value is zero then the endpoints default value is 5405 * changed (effecting future associations only). 5406 * assoc_value: This parameter specifies the maximum size in bytes. 5407 */ 5408 static int sctp_getsockopt_maxseg(struct sock *sk, int len, 5409 char __user *optval, int __user *optlen) 5410 { 5411 struct sctp_assoc_value params; 5412 struct sctp_association *asoc; 5413 5414 if (len == sizeof(int)) { 5415 pr_warn_ratelimited(DEPRECATED 5416 "%s (pid %d) " 5417 "Use of int in maxseg socket option.\n" 5418 "Use struct sctp_assoc_value instead\n", 5419 current->comm, task_pid_nr(current)); 5420 params.assoc_id = 0; 5421 } else if (len >= sizeof(struct sctp_assoc_value)) { 5422 len = sizeof(struct sctp_assoc_value); 5423 if (copy_from_user(¶ms, optval, sizeof(params))) 5424 return -EFAULT; 5425 } else 5426 return -EINVAL; 5427 5428 asoc = sctp_id2assoc(sk, params.assoc_id); 5429 if (!asoc && params.assoc_id && sctp_style(sk, UDP)) 5430 return -EINVAL; 5431 5432 if (asoc) 5433 params.assoc_value = asoc->frag_point; 5434 else 5435 params.assoc_value = sctp_sk(sk)->user_frag; 5436 5437 if (put_user(len, optlen)) 5438 return -EFAULT; 5439 if (len == sizeof(int)) { 5440 if (copy_to_user(optval, ¶ms.assoc_value, len)) 5441 return -EFAULT; 5442 } else { 5443 if (copy_to_user(optval, ¶ms, len)) 5444 return -EFAULT; 5445 } 5446 5447 return 0; 5448 } 5449 5450 /* 5451 * 7.1.24. Get or set fragmented interleave (SCTP_FRAGMENT_INTERLEAVE) 5452 * (chapter and verse is quoted at sctp_setsockopt_fragment_interleave()) 5453 */ 5454 static int sctp_getsockopt_fragment_interleave(struct sock *sk, int len, 5455 char __user *optval, int __user *optlen) 5456 { 5457 int val; 5458 5459 if (len < sizeof(int)) 5460 return -EINVAL; 5461 5462 len = sizeof(int); 5463 5464 val = sctp_sk(sk)->frag_interleave; 5465 if (put_user(len, optlen)) 5466 return -EFAULT; 5467 if (copy_to_user(optval, &val, len)) 5468 return -EFAULT; 5469 5470 return 0; 5471 } 5472 5473 /* 5474 * 7.1.25. Set or Get the sctp partial delivery point 5475 * (chapter and verse is quoted at sctp_setsockopt_partial_delivery_point()) 5476 */ 5477 static int sctp_getsockopt_partial_delivery_point(struct sock *sk, int len, 5478 char __user *optval, 5479 int __user *optlen) 5480 { 5481 u32 val; 5482 5483 if (len < sizeof(u32)) 5484 return -EINVAL; 5485 5486 len = sizeof(u32); 5487 5488 val = sctp_sk(sk)->pd_point; 5489 if (put_user(len, optlen)) 5490 return -EFAULT; 5491 if (copy_to_user(optval, &val, len)) 5492 return -EFAULT; 5493 5494 return 0; 5495 } 5496 5497 /* 5498 * 7.1.28. Set or Get the maximum burst (SCTP_MAX_BURST) 5499 * (chapter and verse is quoted at sctp_setsockopt_maxburst()) 5500 */ 5501 static int sctp_getsockopt_maxburst(struct sock *sk, int len, 5502 char __user *optval, 5503 int __user *optlen) 5504 { 5505 struct sctp_assoc_value params; 5506 struct sctp_sock *sp; 5507 struct sctp_association *asoc; 5508 5509 if (len == sizeof(int)) { 5510 pr_warn_ratelimited(DEPRECATED 5511 "%s (pid %d) " 5512 "Use of int in max_burst socket option.\n" 5513 "Use struct sctp_assoc_value instead\n", 5514 current->comm, task_pid_nr(current)); 5515 params.assoc_id = 0; 5516 } else if (len >= sizeof(struct sctp_assoc_value)) { 5517 len = sizeof(struct sctp_assoc_value); 5518 if (copy_from_user(¶ms, optval, len)) 5519 return -EFAULT; 5520 } else 5521 return -EINVAL; 5522 5523 sp = sctp_sk(sk); 5524 5525 if (params.assoc_id != 0) { 5526 asoc = sctp_id2assoc(sk, params.assoc_id); 5527 if (!asoc) 5528 return -EINVAL; 5529 params.assoc_value = asoc->max_burst; 5530 } else 5531 params.assoc_value = sp->max_burst; 5532 5533 if (len == sizeof(int)) { 5534 if (copy_to_user(optval, ¶ms.assoc_value, len)) 5535 return -EFAULT; 5536 } else { 5537 if (copy_to_user(optval, ¶ms, len)) 5538 return -EFAULT; 5539 } 5540 5541 return 0; 5542 5543 } 5544 5545 static int sctp_getsockopt_hmac_ident(struct sock *sk, int len, 5546 char __user *optval, int __user *optlen) 5547 { 5548 struct sctp_endpoint *ep = sctp_sk(sk)->ep; 5549 struct sctp_hmacalgo __user *p = (void __user *)optval; 5550 struct sctp_hmac_algo_param *hmacs; 5551 __u16 data_len = 0; 5552 u32 num_idents; 5553 5554 if (!ep->auth_enable) 5555 return -EACCES; 5556 5557 hmacs = ep->auth_hmacs_list; 5558 data_len = ntohs(hmacs->param_hdr.length) - sizeof(sctp_paramhdr_t); 5559 5560 if (len < sizeof(struct sctp_hmacalgo) + data_len) 5561 return -EINVAL; 5562 5563 len = sizeof(struct sctp_hmacalgo) + data_len; 5564 num_idents = data_len / sizeof(u16); 5565 5566 if (put_user(len, optlen)) 5567 return -EFAULT; 5568 if (put_user(num_idents, &p->shmac_num_idents)) 5569 return -EFAULT; 5570 if (copy_to_user(p->shmac_idents, hmacs->hmac_ids, data_len)) 5571 return -EFAULT; 5572 return 0; 5573 } 5574 5575 static int sctp_getsockopt_active_key(struct sock *sk, int len, 5576 char __user *optval, int __user *optlen) 5577 { 5578 struct sctp_endpoint *ep = sctp_sk(sk)->ep; 5579 struct sctp_authkeyid val; 5580 struct sctp_association *asoc; 5581 5582 if (!ep->auth_enable) 5583 return -EACCES; 5584 5585 if (len < sizeof(struct sctp_authkeyid)) 5586 return -EINVAL; 5587 if (copy_from_user(&val, optval, sizeof(struct sctp_authkeyid))) 5588 return -EFAULT; 5589 5590 asoc = sctp_id2assoc(sk, val.scact_assoc_id); 5591 if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP)) 5592 return -EINVAL; 5593 5594 if (asoc) 5595 val.scact_keynumber = asoc->active_key_id; 5596 else 5597 val.scact_keynumber = ep->active_key_id; 5598 5599 len = sizeof(struct sctp_authkeyid); 5600 if (put_user(len, optlen)) 5601 return -EFAULT; 5602 if (copy_to_user(optval, &val, len)) 5603 return -EFAULT; 5604 5605 return 0; 5606 } 5607 5608 static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len, 5609 char __user *optval, int __user *optlen) 5610 { 5611 struct sctp_endpoint *ep = sctp_sk(sk)->ep; 5612 struct sctp_authchunks __user *p = (void __user *)optval; 5613 struct sctp_authchunks val; 5614 struct sctp_association *asoc; 5615 struct sctp_chunks_param *ch; 5616 u32 num_chunks = 0; 5617 char __user *to; 5618 5619 if (!ep->auth_enable) 5620 return -EACCES; 5621 5622 if (len < sizeof(struct sctp_authchunks)) 5623 return -EINVAL; 5624 5625 if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks))) 5626 return -EFAULT; 5627 5628 to = p->gauth_chunks; 5629 asoc = sctp_id2assoc(sk, val.gauth_assoc_id); 5630 if (!asoc) 5631 return -EINVAL; 5632 5633 ch = asoc->peer.peer_chunks; 5634 if (!ch) 5635 goto num; 5636 5637 /* See if the user provided enough room for all the data */ 5638 num_chunks = ntohs(ch->param_hdr.length) - sizeof(sctp_paramhdr_t); 5639 if (len < num_chunks) 5640 return -EINVAL; 5641 5642 if (copy_to_user(to, ch->chunks, num_chunks)) 5643 return -EFAULT; 5644 num: 5645 len = sizeof(struct sctp_authchunks) + num_chunks; 5646 if (put_user(len, optlen)) 5647 return -EFAULT; 5648 if (put_user(num_chunks, &p->gauth_number_of_chunks)) 5649 return -EFAULT; 5650 return 0; 5651 } 5652 5653 static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len, 5654 char __user *optval, int __user *optlen) 5655 { 5656 struct sctp_endpoint *ep = sctp_sk(sk)->ep; 5657 struct sctp_authchunks __user *p = (void __user *)optval; 5658 struct sctp_authchunks val; 5659 struct sctp_association *asoc; 5660 struct sctp_chunks_param *ch; 5661 u32 num_chunks = 0; 5662 char __user *to; 5663 5664 if (!ep->auth_enable) 5665 return -EACCES; 5666 5667 if (len < sizeof(struct sctp_authchunks)) 5668 return -EINVAL; 5669 5670 if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks))) 5671 return -EFAULT; 5672 5673 to = p->gauth_chunks; 5674 asoc = sctp_id2assoc(sk, val.gauth_assoc_id); 5675 if (!asoc && val.gauth_assoc_id && sctp_style(sk, UDP)) 5676 return -EINVAL; 5677 5678 if (asoc) 5679 ch = (struct sctp_chunks_param *)asoc->c.auth_chunks; 5680 else 5681 ch = ep->auth_chunk_list; 5682 5683 if (!ch) 5684 goto num; 5685 5686 num_chunks = ntohs(ch->param_hdr.length) - sizeof(sctp_paramhdr_t); 5687 if (len < sizeof(struct sctp_authchunks) + num_chunks) 5688 return -EINVAL; 5689 5690 if (copy_to_user(to, ch->chunks, num_chunks)) 5691 return -EFAULT; 5692 num: 5693 len = sizeof(struct sctp_authchunks) + num_chunks; 5694 if (put_user(len, optlen)) 5695 return -EFAULT; 5696 if (put_user(num_chunks, &p->gauth_number_of_chunks)) 5697 return -EFAULT; 5698 5699 return 0; 5700 } 5701 5702 /* 5703 * 8.2.5. Get the Current Number of Associations (SCTP_GET_ASSOC_NUMBER) 5704 * This option gets the current number of associations that are attached 5705 * to a one-to-many style socket. The option value is an uint32_t. 5706 */ 5707 static int sctp_getsockopt_assoc_number(struct sock *sk, int len, 5708 char __user *optval, int __user *optlen) 5709 { 5710 struct sctp_sock *sp = sctp_sk(sk); 5711 struct sctp_association *asoc; 5712 u32 val = 0; 5713 5714 if (sctp_style(sk, TCP)) 5715 return -EOPNOTSUPP; 5716 5717 if (len < sizeof(u32)) 5718 return -EINVAL; 5719 5720 len = sizeof(u32); 5721 5722 list_for_each_entry(asoc, &(sp->ep->asocs), asocs) { 5723 val++; 5724 } 5725 5726 if (put_user(len, optlen)) 5727 return -EFAULT; 5728 if (copy_to_user(optval, &val, len)) 5729 return -EFAULT; 5730 5731 return 0; 5732 } 5733 5734 /* 5735 * 8.1.23 SCTP_AUTO_ASCONF 5736 * See the corresponding setsockopt entry as description 5737 */ 5738 static int sctp_getsockopt_auto_asconf(struct sock *sk, int len, 5739 char __user *optval, int __user *optlen) 5740 { 5741 int val = 0; 5742 5743 if (len < sizeof(int)) 5744 return -EINVAL; 5745 5746 len = sizeof(int); 5747 if (sctp_sk(sk)->do_auto_asconf && sctp_is_ep_boundall(sk)) 5748 val = 1; 5749 if (put_user(len, optlen)) 5750 return -EFAULT; 5751 if (copy_to_user(optval, &val, len)) 5752 return -EFAULT; 5753 return 0; 5754 } 5755 5756 /* 5757 * 8.2.6. Get the Current Identifiers of Associations 5758 * (SCTP_GET_ASSOC_ID_LIST) 5759 * 5760 * This option gets the current list of SCTP association identifiers of 5761 * the SCTP associations handled by a one-to-many style socket. 5762 */ 5763 static int sctp_getsockopt_assoc_ids(struct sock *sk, int len, 5764 char __user *optval, int __user *optlen) 5765 { 5766 struct sctp_sock *sp = sctp_sk(sk); 5767 struct sctp_association *asoc; 5768 struct sctp_assoc_ids *ids; 5769 u32 num = 0; 5770 5771 if (sctp_style(sk, TCP)) 5772 return -EOPNOTSUPP; 5773 5774 if (len < sizeof(struct sctp_assoc_ids)) 5775 return -EINVAL; 5776 5777 list_for_each_entry(asoc, &(sp->ep->asocs), asocs) { 5778 num++; 5779 } 5780 5781 if (len < sizeof(struct sctp_assoc_ids) + sizeof(sctp_assoc_t) * num) 5782 return -EINVAL; 5783 5784 len = sizeof(struct sctp_assoc_ids) + sizeof(sctp_assoc_t) * num; 5785 5786 ids = kmalloc(len, GFP_KERNEL); 5787 if (unlikely(!ids)) 5788 return -ENOMEM; 5789 5790 ids->gaids_number_of_ids = num; 5791 num = 0; 5792 list_for_each_entry(asoc, &(sp->ep->asocs), asocs) { 5793 ids->gaids_assoc_id[num++] = asoc->assoc_id; 5794 } 5795 5796 if (put_user(len, optlen) || copy_to_user(optval, ids, len)) { 5797 kfree(ids); 5798 return -EFAULT; 5799 } 5800 5801 kfree(ids); 5802 return 0; 5803 } 5804 5805 /* 5806 * SCTP_PEER_ADDR_THLDS 5807 * 5808 * This option allows us to fetch the partially failed threshold for one or all 5809 * transports in an association. See Section 6.1 of: 5810 * http://www.ietf.org/id/draft-nishida-tsvwg-sctp-failover-05.txt 5811 */ 5812 static int sctp_getsockopt_paddr_thresholds(struct sock *sk, 5813 char __user *optval, 5814 int len, 5815 int __user *optlen) 5816 { 5817 struct sctp_paddrthlds val; 5818 struct sctp_transport *trans; 5819 struct sctp_association *asoc; 5820 5821 if (len < sizeof(struct sctp_paddrthlds)) 5822 return -EINVAL; 5823 len = sizeof(struct sctp_paddrthlds); 5824 if (copy_from_user(&val, (struct sctp_paddrthlds __user *)optval, len)) 5825 return -EFAULT; 5826 5827 if (sctp_is_any(sk, (const union sctp_addr *)&val.spt_address)) { 5828 asoc = sctp_id2assoc(sk, val.spt_assoc_id); 5829 if (!asoc) 5830 return -ENOENT; 5831 5832 val.spt_pathpfthld = asoc->pf_retrans; 5833 val.spt_pathmaxrxt = asoc->pathmaxrxt; 5834 } else { 5835 trans = sctp_addr_id2transport(sk, &val.spt_address, 5836 val.spt_assoc_id); 5837 if (!trans) 5838 return -ENOENT; 5839 5840 val.spt_pathmaxrxt = trans->pathmaxrxt; 5841 val.spt_pathpfthld = trans->pf_retrans; 5842 } 5843 5844 if (put_user(len, optlen) || copy_to_user(optval, &val, len)) 5845 return -EFAULT; 5846 5847 return 0; 5848 } 5849 5850 /* 5851 * SCTP_GET_ASSOC_STATS 5852 * 5853 * This option retrieves local per endpoint statistics. It is modeled 5854 * after OpenSolaris' implementation 5855 */ 5856 static int sctp_getsockopt_assoc_stats(struct sock *sk, int len, 5857 char __user *optval, 5858 int __user *optlen) 5859 { 5860 struct sctp_assoc_stats sas; 5861 struct sctp_association *asoc = NULL; 5862 5863 /* User must provide at least the assoc id */ 5864 if (len < sizeof(sctp_assoc_t)) 5865 return -EINVAL; 5866 5867 /* Allow the struct to grow and fill in as much as possible */ 5868 len = min_t(size_t, len, sizeof(sas)); 5869 5870 if (copy_from_user(&sas, optval, len)) 5871 return -EFAULT; 5872 5873 asoc = sctp_id2assoc(sk, sas.sas_assoc_id); 5874 if (!asoc) 5875 return -EINVAL; 5876 5877 sas.sas_rtxchunks = asoc->stats.rtxchunks; 5878 sas.sas_gapcnt = asoc->stats.gapcnt; 5879 sas.sas_outofseqtsns = asoc->stats.outofseqtsns; 5880 sas.sas_osacks = asoc->stats.osacks; 5881 sas.sas_isacks = asoc->stats.isacks; 5882 sas.sas_octrlchunks = asoc->stats.octrlchunks; 5883 sas.sas_ictrlchunks = asoc->stats.ictrlchunks; 5884 sas.sas_oodchunks = asoc->stats.oodchunks; 5885 sas.sas_iodchunks = asoc->stats.iodchunks; 5886 sas.sas_ouodchunks = asoc->stats.ouodchunks; 5887 sas.sas_iuodchunks = asoc->stats.iuodchunks; 5888 sas.sas_idupchunks = asoc->stats.idupchunks; 5889 sas.sas_opackets = asoc->stats.opackets; 5890 sas.sas_ipackets = asoc->stats.ipackets; 5891 5892 /* New high max rto observed, will return 0 if not a single 5893 * RTO update took place. obs_rto_ipaddr will be bogus 5894 * in such a case 5895 */ 5896 sas.sas_maxrto = asoc->stats.max_obs_rto; 5897 memcpy(&sas.sas_obs_rto_ipaddr, &asoc->stats.obs_rto_ipaddr, 5898 sizeof(struct sockaddr_storage)); 5899 5900 /* Mark beginning of a new observation period */ 5901 asoc->stats.max_obs_rto = asoc->rto_min; 5902 5903 if (put_user(len, optlen)) 5904 return -EFAULT; 5905 5906 pr_debug("%s: len:%d, assoc_id:%d\n", __func__, len, sas.sas_assoc_id); 5907 5908 if (copy_to_user(optval, &sas, len)) 5909 return -EFAULT; 5910 5911 return 0; 5912 } 5913 5914 static int sctp_getsockopt_recvrcvinfo(struct sock *sk, int len, 5915 char __user *optval, 5916 int __user *optlen) 5917 { 5918 int val = 0; 5919 5920 if (len < sizeof(int)) 5921 return -EINVAL; 5922 5923 len = sizeof(int); 5924 if (sctp_sk(sk)->recvrcvinfo) 5925 val = 1; 5926 if (put_user(len, optlen)) 5927 return -EFAULT; 5928 if (copy_to_user(optval, &val, len)) 5929 return -EFAULT; 5930 5931 return 0; 5932 } 5933 5934 static int sctp_getsockopt_recvnxtinfo(struct sock *sk, int len, 5935 char __user *optval, 5936 int __user *optlen) 5937 { 5938 int val = 0; 5939 5940 if (len < sizeof(int)) 5941 return -EINVAL; 5942 5943 len = sizeof(int); 5944 if (sctp_sk(sk)->recvnxtinfo) 5945 val = 1; 5946 if (put_user(len, optlen)) 5947 return -EFAULT; 5948 if (copy_to_user(optval, &val, len)) 5949 return -EFAULT; 5950 5951 return 0; 5952 } 5953 5954 static int sctp_getsockopt(struct sock *sk, int level, int optname, 5955 char __user *optval, int __user *optlen) 5956 { 5957 int retval = 0; 5958 int len; 5959 5960 pr_debug("%s: sk:%p, optname:%d\n", __func__, sk, optname); 5961 5962 /* I can hardly begin to describe how wrong this is. This is 5963 * so broken as to be worse than useless. The API draft 5964 * REALLY is NOT helpful here... I am not convinced that the 5965 * semantics of getsockopt() with a level OTHER THAN SOL_SCTP 5966 * are at all well-founded. 5967 */ 5968 if (level != SOL_SCTP) { 5969 struct sctp_af *af = sctp_sk(sk)->pf->af; 5970 5971 retval = af->getsockopt(sk, level, optname, optval, optlen); 5972 return retval; 5973 } 5974 5975 if (get_user(len, optlen)) 5976 return -EFAULT; 5977 5978 lock_sock(sk); 5979 5980 switch (optname) { 5981 case SCTP_STATUS: 5982 retval = sctp_getsockopt_sctp_status(sk, len, optval, optlen); 5983 break; 5984 case SCTP_DISABLE_FRAGMENTS: 5985 retval = sctp_getsockopt_disable_fragments(sk, len, optval, 5986 optlen); 5987 break; 5988 case SCTP_EVENTS: 5989 retval = sctp_getsockopt_events(sk, len, optval, optlen); 5990 break; 5991 case SCTP_AUTOCLOSE: 5992 retval = sctp_getsockopt_autoclose(sk, len, optval, optlen); 5993 break; 5994 case SCTP_SOCKOPT_PEELOFF: 5995 retval = sctp_getsockopt_peeloff(sk, len, optval, optlen); 5996 break; 5997 case SCTP_PEER_ADDR_PARAMS: 5998 retval = sctp_getsockopt_peer_addr_params(sk, len, optval, 5999 optlen); 6000 break; 6001 case SCTP_DELAYED_SACK: 6002 retval = sctp_getsockopt_delayed_ack(sk, len, optval, 6003 optlen); 6004 break; 6005 case SCTP_INITMSG: 6006 retval = sctp_getsockopt_initmsg(sk, len, optval, optlen); 6007 break; 6008 case SCTP_GET_PEER_ADDRS: 6009 retval = sctp_getsockopt_peer_addrs(sk, len, optval, 6010 optlen); 6011 break; 6012 case SCTP_GET_LOCAL_ADDRS: 6013 retval = sctp_getsockopt_local_addrs(sk, len, optval, 6014 optlen); 6015 break; 6016 case SCTP_SOCKOPT_CONNECTX3: 6017 retval = sctp_getsockopt_connectx3(sk, len, optval, optlen); 6018 break; 6019 case SCTP_DEFAULT_SEND_PARAM: 6020 retval = sctp_getsockopt_default_send_param(sk, len, 6021 optval, optlen); 6022 break; 6023 case SCTP_DEFAULT_SNDINFO: 6024 retval = sctp_getsockopt_default_sndinfo(sk, len, 6025 optval, optlen); 6026 break; 6027 case SCTP_PRIMARY_ADDR: 6028 retval = sctp_getsockopt_primary_addr(sk, len, optval, optlen); 6029 break; 6030 case SCTP_NODELAY: 6031 retval = sctp_getsockopt_nodelay(sk, len, optval, optlen); 6032 break; 6033 case SCTP_RTOINFO: 6034 retval = sctp_getsockopt_rtoinfo(sk, len, optval, optlen); 6035 break; 6036 case SCTP_ASSOCINFO: 6037 retval = sctp_getsockopt_associnfo(sk, len, optval, optlen); 6038 break; 6039 case SCTP_I_WANT_MAPPED_V4_ADDR: 6040 retval = sctp_getsockopt_mappedv4(sk, len, optval, optlen); 6041 break; 6042 case SCTP_MAXSEG: 6043 retval = sctp_getsockopt_maxseg(sk, len, optval, optlen); 6044 break; 6045 case SCTP_GET_PEER_ADDR_INFO: 6046 retval = sctp_getsockopt_peer_addr_info(sk, len, optval, 6047 optlen); 6048 break; 6049 case SCTP_ADAPTATION_LAYER: 6050 retval = sctp_getsockopt_adaptation_layer(sk, len, optval, 6051 optlen); 6052 break; 6053 case SCTP_CONTEXT: 6054 retval = sctp_getsockopt_context(sk, len, optval, optlen); 6055 break; 6056 case SCTP_FRAGMENT_INTERLEAVE: 6057 retval = sctp_getsockopt_fragment_interleave(sk, len, optval, 6058 optlen); 6059 break; 6060 case SCTP_PARTIAL_DELIVERY_POINT: 6061 retval = sctp_getsockopt_partial_delivery_point(sk, len, optval, 6062 optlen); 6063 break; 6064 case SCTP_MAX_BURST: 6065 retval = sctp_getsockopt_maxburst(sk, len, optval, optlen); 6066 break; 6067 case SCTP_AUTH_KEY: 6068 case SCTP_AUTH_CHUNK: 6069 case SCTP_AUTH_DELETE_KEY: 6070 retval = -EOPNOTSUPP; 6071 break; 6072 case SCTP_HMAC_IDENT: 6073 retval = sctp_getsockopt_hmac_ident(sk, len, optval, optlen); 6074 break; 6075 case SCTP_AUTH_ACTIVE_KEY: 6076 retval = sctp_getsockopt_active_key(sk, len, optval, optlen); 6077 break; 6078 case SCTP_PEER_AUTH_CHUNKS: 6079 retval = sctp_getsockopt_peer_auth_chunks(sk, len, optval, 6080 optlen); 6081 break; 6082 case SCTP_LOCAL_AUTH_CHUNKS: 6083 retval = sctp_getsockopt_local_auth_chunks(sk, len, optval, 6084 optlen); 6085 break; 6086 case SCTP_GET_ASSOC_NUMBER: 6087 retval = sctp_getsockopt_assoc_number(sk, len, optval, optlen); 6088 break; 6089 case SCTP_GET_ASSOC_ID_LIST: 6090 retval = sctp_getsockopt_assoc_ids(sk, len, optval, optlen); 6091 break; 6092 case SCTP_AUTO_ASCONF: 6093 retval = sctp_getsockopt_auto_asconf(sk, len, optval, optlen); 6094 break; 6095 case SCTP_PEER_ADDR_THLDS: 6096 retval = sctp_getsockopt_paddr_thresholds(sk, optval, len, optlen); 6097 break; 6098 case SCTP_GET_ASSOC_STATS: 6099 retval = sctp_getsockopt_assoc_stats(sk, len, optval, optlen); 6100 break; 6101 case SCTP_RECVRCVINFO: 6102 retval = sctp_getsockopt_recvrcvinfo(sk, len, optval, optlen); 6103 break; 6104 case SCTP_RECVNXTINFO: 6105 retval = sctp_getsockopt_recvnxtinfo(sk, len, optval, optlen); 6106 break; 6107 default: 6108 retval = -ENOPROTOOPT; 6109 break; 6110 } 6111 6112 release_sock(sk); 6113 return retval; 6114 } 6115 6116 static void sctp_hash(struct sock *sk) 6117 { 6118 /* STUB */ 6119 } 6120 6121 static void sctp_unhash(struct sock *sk) 6122 { 6123 /* STUB */ 6124 } 6125 6126 /* Check if port is acceptable. Possibly find first available port. 6127 * 6128 * The port hash table (contained in the 'global' SCTP protocol storage 6129 * returned by struct sctp_protocol *sctp_get_protocol()). The hash 6130 * table is an array of 4096 lists (sctp_bind_hashbucket). Each 6131 * list (the list number is the port number hashed out, so as you 6132 * would expect from a hash function, all the ports in a given list have 6133 * such a number that hashes out to the same list number; you were 6134 * expecting that, right?); so each list has a set of ports, with a 6135 * link to the socket (struct sock) that uses it, the port number and 6136 * a fastreuse flag (FIXME: NPI ipg). 6137 */ 6138 static struct sctp_bind_bucket *sctp_bucket_create( 6139 struct sctp_bind_hashbucket *head, struct net *, unsigned short snum); 6140 6141 static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr) 6142 { 6143 struct sctp_bind_hashbucket *head; /* hash list */ 6144 struct sctp_bind_bucket *pp; 6145 unsigned short snum; 6146 int ret; 6147 6148 snum = ntohs(addr->v4.sin_port); 6149 6150 pr_debug("%s: begins, snum:%d\n", __func__, snum); 6151 6152 local_bh_disable(); 6153 6154 if (snum == 0) { 6155 /* Search for an available port. */ 6156 int low, high, remaining, index; 6157 unsigned int rover; 6158 struct net *net = sock_net(sk); 6159 6160 inet_get_local_port_range(net, &low, &high); 6161 remaining = (high - low) + 1; 6162 rover = prandom_u32() % remaining + low; 6163 6164 do { 6165 rover++; 6166 if ((rover < low) || (rover > high)) 6167 rover = low; 6168 if (inet_is_local_reserved_port(net, rover)) 6169 continue; 6170 index = sctp_phashfn(sock_net(sk), rover); 6171 head = &sctp_port_hashtable[index]; 6172 spin_lock(&head->lock); 6173 sctp_for_each_hentry(pp, &head->chain) 6174 if ((pp->port == rover) && 6175 net_eq(sock_net(sk), pp->net)) 6176 goto next; 6177 break; 6178 next: 6179 spin_unlock(&head->lock); 6180 } while (--remaining > 0); 6181 6182 /* Exhausted local port range during search? */ 6183 ret = 1; 6184 if (remaining <= 0) 6185 goto fail; 6186 6187 /* OK, here is the one we will use. HEAD (the port 6188 * hash table list entry) is non-NULL and we hold it's 6189 * mutex. 6190 */ 6191 snum = rover; 6192 } else { 6193 /* We are given an specific port number; we verify 6194 * that it is not being used. If it is used, we will 6195 * exahust the search in the hash list corresponding 6196 * to the port number (snum) - we detect that with the 6197 * port iterator, pp being NULL. 6198 */ 6199 head = &sctp_port_hashtable[sctp_phashfn(sock_net(sk), snum)]; 6200 spin_lock(&head->lock); 6201 sctp_for_each_hentry(pp, &head->chain) { 6202 if ((pp->port == snum) && net_eq(pp->net, sock_net(sk))) 6203 goto pp_found; 6204 } 6205 } 6206 pp = NULL; 6207 goto pp_not_found; 6208 pp_found: 6209 if (!hlist_empty(&pp->owner)) { 6210 /* We had a port hash table hit - there is an 6211 * available port (pp != NULL) and it is being 6212 * used by other socket (pp->owner not empty); that other 6213 * socket is going to be sk2. 6214 */ 6215 int reuse = sk->sk_reuse; 6216 struct sock *sk2; 6217 6218 pr_debug("%s: found a possible match\n", __func__); 6219 6220 if (pp->fastreuse && sk->sk_reuse && 6221 sk->sk_state != SCTP_SS_LISTENING) 6222 goto success; 6223 6224 /* Run through the list of sockets bound to the port 6225 * (pp->port) [via the pointers bind_next and 6226 * bind_pprev in the struct sock *sk2 (pp->sk)]. On each one, 6227 * we get the endpoint they describe and run through 6228 * the endpoint's list of IP (v4 or v6) addresses, 6229 * comparing each of the addresses with the address of 6230 * the socket sk. If we find a match, then that means 6231 * that this port/socket (sk) combination are already 6232 * in an endpoint. 6233 */ 6234 sk_for_each_bound(sk2, &pp->owner) { 6235 struct sctp_endpoint *ep2; 6236 ep2 = sctp_sk(sk2)->ep; 6237 6238 if (sk == sk2 || 6239 (reuse && sk2->sk_reuse && 6240 sk2->sk_state != SCTP_SS_LISTENING)) 6241 continue; 6242 6243 if (sctp_bind_addr_conflict(&ep2->base.bind_addr, addr, 6244 sctp_sk(sk2), sctp_sk(sk))) { 6245 ret = (long)sk2; 6246 goto fail_unlock; 6247 } 6248 } 6249 6250 pr_debug("%s: found a match\n", __func__); 6251 } 6252 pp_not_found: 6253 /* If there was a hash table miss, create a new port. */ 6254 ret = 1; 6255 if (!pp && !(pp = sctp_bucket_create(head, sock_net(sk), snum))) 6256 goto fail_unlock; 6257 6258 /* In either case (hit or miss), make sure fastreuse is 1 only 6259 * if sk->sk_reuse is too (that is, if the caller requested 6260 * SO_REUSEADDR on this socket -sk-). 6261 */ 6262 if (hlist_empty(&pp->owner)) { 6263 if (sk->sk_reuse && sk->sk_state != SCTP_SS_LISTENING) 6264 pp->fastreuse = 1; 6265 else 6266 pp->fastreuse = 0; 6267 } else if (pp->fastreuse && 6268 (!sk->sk_reuse || sk->sk_state == SCTP_SS_LISTENING)) 6269 pp->fastreuse = 0; 6270 6271 /* We are set, so fill up all the data in the hash table 6272 * entry, tie the socket list information with the rest of the 6273 * sockets FIXME: Blurry, NPI (ipg). 6274 */ 6275 success: 6276 if (!sctp_sk(sk)->bind_hash) { 6277 inet_sk(sk)->inet_num = snum; 6278 sk_add_bind_node(sk, &pp->owner); 6279 sctp_sk(sk)->bind_hash = pp; 6280 } 6281 ret = 0; 6282 6283 fail_unlock: 6284 spin_unlock(&head->lock); 6285 6286 fail: 6287 local_bh_enable(); 6288 return ret; 6289 } 6290 6291 /* Assign a 'snum' port to the socket. If snum == 0, an ephemeral 6292 * port is requested. 6293 */ 6294 static int sctp_get_port(struct sock *sk, unsigned short snum) 6295 { 6296 union sctp_addr addr; 6297 struct sctp_af *af = sctp_sk(sk)->pf->af; 6298 6299 /* Set up a dummy address struct from the sk. */ 6300 af->from_sk(&addr, sk); 6301 addr.v4.sin_port = htons(snum); 6302 6303 /* Note: sk->sk_num gets filled in if ephemeral port request. */ 6304 return !!sctp_get_port_local(sk, &addr); 6305 } 6306 6307 /* 6308 * Move a socket to LISTENING state. 6309 */ 6310 static int sctp_listen_start(struct sock *sk, int backlog) 6311 { 6312 struct sctp_sock *sp = sctp_sk(sk); 6313 struct sctp_endpoint *ep = sp->ep; 6314 struct crypto_hash *tfm = NULL; 6315 char alg[32]; 6316 6317 /* Allocate HMAC for generating cookie. */ 6318 if (!sp->hmac && sp->sctp_hmac_alg) { 6319 sprintf(alg, "hmac(%s)", sp->sctp_hmac_alg); 6320 tfm = crypto_alloc_hash(alg, 0, CRYPTO_ALG_ASYNC); 6321 if (IS_ERR(tfm)) { 6322 net_info_ratelimited("failed to load transform for %s: %ld\n", 6323 sp->sctp_hmac_alg, PTR_ERR(tfm)); 6324 return -ENOSYS; 6325 } 6326 sctp_sk(sk)->hmac = tfm; 6327 } 6328 6329 /* 6330 * If a bind() or sctp_bindx() is not called prior to a listen() 6331 * call that allows new associations to be accepted, the system 6332 * picks an ephemeral port and will choose an address set equivalent 6333 * to binding with a wildcard address. 6334 * 6335 * This is not currently spelled out in the SCTP sockets 6336 * extensions draft, but follows the practice as seen in TCP 6337 * sockets. 6338 * 6339 */ 6340 sk->sk_state = SCTP_SS_LISTENING; 6341 if (!ep->base.bind_addr.port) { 6342 if (sctp_autobind(sk)) 6343 return -EAGAIN; 6344 } else { 6345 if (sctp_get_port(sk, inet_sk(sk)->inet_num)) { 6346 sk->sk_state = SCTP_SS_CLOSED; 6347 return -EADDRINUSE; 6348 } 6349 } 6350 6351 sk->sk_max_ack_backlog = backlog; 6352 sctp_hash_endpoint(ep); 6353 return 0; 6354 } 6355 6356 /* 6357 * 4.1.3 / 5.1.3 listen() 6358 * 6359 * By default, new associations are not accepted for UDP style sockets. 6360 * An application uses listen() to mark a socket as being able to 6361 * accept new associations. 6362 * 6363 * On TCP style sockets, applications use listen() to ready the SCTP 6364 * endpoint for accepting inbound associations. 6365 * 6366 * On both types of endpoints a backlog of '0' disables listening. 6367 * 6368 * Move a socket to LISTENING state. 6369 */ 6370 int sctp_inet_listen(struct socket *sock, int backlog) 6371 { 6372 struct sock *sk = sock->sk; 6373 struct sctp_endpoint *ep = sctp_sk(sk)->ep; 6374 int err = -EINVAL; 6375 6376 if (unlikely(backlog < 0)) 6377 return err; 6378 6379 lock_sock(sk); 6380 6381 /* Peeled-off sockets are not allowed to listen(). */ 6382 if (sctp_style(sk, UDP_HIGH_BANDWIDTH)) 6383 goto out; 6384 6385 if (sock->state != SS_UNCONNECTED) 6386 goto out; 6387 6388 /* If backlog is zero, disable listening. */ 6389 if (!backlog) { 6390 if (sctp_sstate(sk, CLOSED)) 6391 goto out; 6392 6393 err = 0; 6394 sctp_unhash_endpoint(ep); 6395 sk->sk_state = SCTP_SS_CLOSED; 6396 if (sk->sk_reuse) 6397 sctp_sk(sk)->bind_hash->fastreuse = 1; 6398 goto out; 6399 } 6400 6401 /* If we are already listening, just update the backlog */ 6402 if (sctp_sstate(sk, LISTENING)) 6403 sk->sk_max_ack_backlog = backlog; 6404 else { 6405 err = sctp_listen_start(sk, backlog); 6406 if (err) 6407 goto out; 6408 } 6409 6410 err = 0; 6411 out: 6412 release_sock(sk); 6413 return err; 6414 } 6415 6416 /* 6417 * This function is done by modeling the current datagram_poll() and the 6418 * tcp_poll(). Note that, based on these implementations, we don't 6419 * lock the socket in this function, even though it seems that, 6420 * ideally, locking or some other mechanisms can be used to ensure 6421 * the integrity of the counters (sndbuf and wmem_alloc) used 6422 * in this place. We assume that we don't need locks either until proven 6423 * otherwise. 6424 * 6425 * Another thing to note is that we include the Async I/O support 6426 * here, again, by modeling the current TCP/UDP code. We don't have 6427 * a good way to test with it yet. 6428 */ 6429 unsigned int sctp_poll(struct file *file, struct socket *sock, poll_table *wait) 6430 { 6431 struct sock *sk = sock->sk; 6432 struct sctp_sock *sp = sctp_sk(sk); 6433 unsigned int mask; 6434 6435 poll_wait(file, sk_sleep(sk), wait); 6436 6437 /* A TCP-style listening socket becomes readable when the accept queue 6438 * is not empty. 6439 */ 6440 if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)) 6441 return (!list_empty(&sp->ep->asocs)) ? 6442 (POLLIN | POLLRDNORM) : 0; 6443 6444 mask = 0; 6445 6446 /* Is there any exceptional events? */ 6447 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) 6448 mask |= POLLERR | 6449 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0); 6450 if (sk->sk_shutdown & RCV_SHUTDOWN) 6451 mask |= POLLRDHUP | POLLIN | POLLRDNORM; 6452 if (sk->sk_shutdown == SHUTDOWN_MASK) 6453 mask |= POLLHUP; 6454 6455 /* Is it readable? Reconsider this code with TCP-style support. */ 6456 if (!skb_queue_empty(&sk->sk_receive_queue)) 6457 mask |= POLLIN | POLLRDNORM; 6458 6459 /* The association is either gone or not ready. */ 6460 if (!sctp_style(sk, UDP) && sctp_sstate(sk, CLOSED)) 6461 return mask; 6462 6463 /* Is it writable? */ 6464 if (sctp_writeable(sk)) { 6465 mask |= POLLOUT | POLLWRNORM; 6466 } else { 6467 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 6468 /* 6469 * Since the socket is not locked, the buffer 6470 * might be made available after the writeable check and 6471 * before the bit is set. This could cause a lost I/O 6472 * signal. tcp_poll() has a race breaker for this race 6473 * condition. Based on their implementation, we put 6474 * in the following code to cover it as well. 6475 */ 6476 if (sctp_writeable(sk)) 6477 mask |= POLLOUT | POLLWRNORM; 6478 } 6479 return mask; 6480 } 6481 6482 /******************************************************************** 6483 * 2nd Level Abstractions 6484 ********************************************************************/ 6485 6486 static struct sctp_bind_bucket *sctp_bucket_create( 6487 struct sctp_bind_hashbucket *head, struct net *net, unsigned short snum) 6488 { 6489 struct sctp_bind_bucket *pp; 6490 6491 pp = kmem_cache_alloc(sctp_bucket_cachep, GFP_ATOMIC); 6492 if (pp) { 6493 SCTP_DBG_OBJCNT_INC(bind_bucket); 6494 pp->port = snum; 6495 pp->fastreuse = 0; 6496 INIT_HLIST_HEAD(&pp->owner); 6497 pp->net = net; 6498 hlist_add_head(&pp->node, &head->chain); 6499 } 6500 return pp; 6501 } 6502 6503 /* Caller must hold hashbucket lock for this tb with local BH disabled */ 6504 static void sctp_bucket_destroy(struct sctp_bind_bucket *pp) 6505 { 6506 if (pp && hlist_empty(&pp->owner)) { 6507 __hlist_del(&pp->node); 6508 kmem_cache_free(sctp_bucket_cachep, pp); 6509 SCTP_DBG_OBJCNT_DEC(bind_bucket); 6510 } 6511 } 6512 6513 /* Release this socket's reference to a local port. */ 6514 static inline void __sctp_put_port(struct sock *sk) 6515 { 6516 struct sctp_bind_hashbucket *head = 6517 &sctp_port_hashtable[sctp_phashfn(sock_net(sk), 6518 inet_sk(sk)->inet_num)]; 6519 struct sctp_bind_bucket *pp; 6520 6521 spin_lock(&head->lock); 6522 pp = sctp_sk(sk)->bind_hash; 6523 __sk_del_bind_node(sk); 6524 sctp_sk(sk)->bind_hash = NULL; 6525 inet_sk(sk)->inet_num = 0; 6526 sctp_bucket_destroy(pp); 6527 spin_unlock(&head->lock); 6528 } 6529 6530 void sctp_put_port(struct sock *sk) 6531 { 6532 local_bh_disable(); 6533 __sctp_put_port(sk); 6534 local_bh_enable(); 6535 } 6536 6537 /* 6538 * The system picks an ephemeral port and choose an address set equivalent 6539 * to binding with a wildcard address. 6540 * One of those addresses will be the primary address for the association. 6541 * This automatically enables the multihoming capability of SCTP. 6542 */ 6543 static int sctp_autobind(struct sock *sk) 6544 { 6545 union sctp_addr autoaddr; 6546 struct sctp_af *af; 6547 __be16 port; 6548 6549 /* Initialize a local sockaddr structure to INADDR_ANY. */ 6550 af = sctp_sk(sk)->pf->af; 6551 6552 port = htons(inet_sk(sk)->inet_num); 6553 af->inaddr_any(&autoaddr, port); 6554 6555 return sctp_do_bind(sk, &autoaddr, af->sockaddr_len); 6556 } 6557 6558 /* Parse out IPPROTO_SCTP CMSG headers. Perform only minimal validation. 6559 * 6560 * From RFC 2292 6561 * 4.2 The cmsghdr Structure * 6562 * 6563 * When ancillary data is sent or received, any number of ancillary data 6564 * objects can be specified by the msg_control and msg_controllen members of 6565 * the msghdr structure, because each object is preceded by 6566 * a cmsghdr structure defining the object's length (the cmsg_len member). 6567 * Historically Berkeley-derived implementations have passed only one object 6568 * at a time, but this API allows multiple objects to be 6569 * passed in a single call to sendmsg() or recvmsg(). The following example 6570 * shows two ancillary data objects in a control buffer. 6571 * 6572 * |<--------------------------- msg_controllen -------------------------->| 6573 * | | 6574 * 6575 * |<----- ancillary data object ----->|<----- ancillary data object ----->| 6576 * 6577 * |<---------- CMSG_SPACE() --------->|<---------- CMSG_SPACE() --------->| 6578 * | | | 6579 * 6580 * |<---------- cmsg_len ---------->| |<--------- cmsg_len ----------->| | 6581 * 6582 * |<--------- CMSG_LEN() --------->| |<-------- CMSG_LEN() ---------->| | 6583 * | | | | | 6584 * 6585 * +-----+-----+-----+--+-----------+--+-----+-----+-----+--+-----------+--+ 6586 * |cmsg_|cmsg_|cmsg_|XX| |XX|cmsg_|cmsg_|cmsg_|XX| |XX| 6587 * 6588 * |len |level|type |XX|cmsg_data[]|XX|len |level|type |XX|cmsg_data[]|XX| 6589 * 6590 * +-----+-----+-----+--+-----------+--+-----+-----+-----+--+-----------+--+ 6591 * ^ 6592 * | 6593 * 6594 * msg_control 6595 * points here 6596 */ 6597 static int sctp_msghdr_parse(const struct msghdr *msg, sctp_cmsgs_t *cmsgs) 6598 { 6599 struct cmsghdr *cmsg; 6600 struct msghdr *my_msg = (struct msghdr *)msg; 6601 6602 for_each_cmsghdr(cmsg, my_msg) { 6603 if (!CMSG_OK(my_msg, cmsg)) 6604 return -EINVAL; 6605 6606 /* Should we parse this header or ignore? */ 6607 if (cmsg->cmsg_level != IPPROTO_SCTP) 6608 continue; 6609 6610 /* Strictly check lengths following example in SCM code. */ 6611 switch (cmsg->cmsg_type) { 6612 case SCTP_INIT: 6613 /* SCTP Socket API Extension 6614 * 5.3.1 SCTP Initiation Structure (SCTP_INIT) 6615 * 6616 * This cmsghdr structure provides information for 6617 * initializing new SCTP associations with sendmsg(). 6618 * The SCTP_INITMSG socket option uses this same data 6619 * structure. This structure is not used for 6620 * recvmsg(). 6621 * 6622 * cmsg_level cmsg_type cmsg_data[] 6623 * ------------ ------------ ---------------------- 6624 * IPPROTO_SCTP SCTP_INIT struct sctp_initmsg 6625 */ 6626 if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct sctp_initmsg))) 6627 return -EINVAL; 6628 6629 cmsgs->init = CMSG_DATA(cmsg); 6630 break; 6631 6632 case SCTP_SNDRCV: 6633 /* SCTP Socket API Extension 6634 * 5.3.2 SCTP Header Information Structure(SCTP_SNDRCV) 6635 * 6636 * This cmsghdr structure specifies SCTP options for 6637 * sendmsg() and describes SCTP header information 6638 * about a received message through recvmsg(). 6639 * 6640 * cmsg_level cmsg_type cmsg_data[] 6641 * ------------ ------------ ---------------------- 6642 * IPPROTO_SCTP SCTP_SNDRCV struct sctp_sndrcvinfo 6643 */ 6644 if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct sctp_sndrcvinfo))) 6645 return -EINVAL; 6646 6647 cmsgs->srinfo = CMSG_DATA(cmsg); 6648 6649 if (cmsgs->srinfo->sinfo_flags & 6650 ~(SCTP_UNORDERED | SCTP_ADDR_OVER | 6651 SCTP_ABORT | SCTP_EOF)) 6652 return -EINVAL; 6653 break; 6654 6655 case SCTP_SNDINFO: 6656 /* SCTP Socket API Extension 6657 * 5.3.4 SCTP Send Information Structure (SCTP_SNDINFO) 6658 * 6659 * This cmsghdr structure specifies SCTP options for 6660 * sendmsg(). This structure and SCTP_RCVINFO replaces 6661 * SCTP_SNDRCV which has been deprecated. 6662 * 6663 * cmsg_level cmsg_type cmsg_data[] 6664 * ------------ ------------ --------------------- 6665 * IPPROTO_SCTP SCTP_SNDINFO struct sctp_sndinfo 6666 */ 6667 if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct sctp_sndinfo))) 6668 return -EINVAL; 6669 6670 cmsgs->sinfo = CMSG_DATA(cmsg); 6671 6672 if (cmsgs->sinfo->snd_flags & 6673 ~(SCTP_UNORDERED | SCTP_ADDR_OVER | 6674 SCTP_ABORT | SCTP_EOF)) 6675 return -EINVAL; 6676 break; 6677 default: 6678 return -EINVAL; 6679 } 6680 } 6681 6682 return 0; 6683 } 6684 6685 /* 6686 * Wait for a packet.. 6687 * Note: This function is the same function as in core/datagram.c 6688 * with a few modifications to make lksctp work. 6689 */ 6690 static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p) 6691 { 6692 int error; 6693 DEFINE_WAIT(wait); 6694 6695 prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 6696 6697 /* Socket errors? */ 6698 error = sock_error(sk); 6699 if (error) 6700 goto out; 6701 6702 if (!skb_queue_empty(&sk->sk_receive_queue)) 6703 goto ready; 6704 6705 /* Socket shut down? */ 6706 if (sk->sk_shutdown & RCV_SHUTDOWN) 6707 goto out; 6708 6709 /* Sequenced packets can come disconnected. If so we report the 6710 * problem. 6711 */ 6712 error = -ENOTCONN; 6713 6714 /* Is there a good reason to think that we may receive some data? */ 6715 if (list_empty(&sctp_sk(sk)->ep->asocs) && !sctp_sstate(sk, LISTENING)) 6716 goto out; 6717 6718 /* Handle signals. */ 6719 if (signal_pending(current)) 6720 goto interrupted; 6721 6722 /* Let another process have a go. Since we are going to sleep 6723 * anyway. Note: This may cause odd behaviors if the message 6724 * does not fit in the user's buffer, but this seems to be the 6725 * only way to honor MSG_DONTWAIT realistically. 6726 */ 6727 release_sock(sk); 6728 *timeo_p = schedule_timeout(*timeo_p); 6729 lock_sock(sk); 6730 6731 ready: 6732 finish_wait(sk_sleep(sk), &wait); 6733 return 0; 6734 6735 interrupted: 6736 error = sock_intr_errno(*timeo_p); 6737 6738 out: 6739 finish_wait(sk_sleep(sk), &wait); 6740 *err = error; 6741 return error; 6742 } 6743 6744 /* Receive a datagram. 6745 * Note: This is pretty much the same routine as in core/datagram.c 6746 * with a few changes to make lksctp work. 6747 */ 6748 struct sk_buff *sctp_skb_recv_datagram(struct sock *sk, int flags, 6749 int noblock, int *err) 6750 { 6751 int error; 6752 struct sk_buff *skb; 6753 long timeo; 6754 6755 timeo = sock_rcvtimeo(sk, noblock); 6756 6757 pr_debug("%s: timeo:%ld, max:%ld\n", __func__, timeo, 6758 MAX_SCHEDULE_TIMEOUT); 6759 6760 do { 6761 /* Again only user level code calls this function, 6762 * so nothing interrupt level 6763 * will suddenly eat the receive_queue. 6764 * 6765 * Look at current nfs client by the way... 6766 * However, this function was correct in any case. 8) 6767 */ 6768 if (flags & MSG_PEEK) { 6769 spin_lock_bh(&sk->sk_receive_queue.lock); 6770 skb = skb_peek(&sk->sk_receive_queue); 6771 if (skb) 6772 atomic_inc(&skb->users); 6773 spin_unlock_bh(&sk->sk_receive_queue.lock); 6774 } else { 6775 skb = skb_dequeue(&sk->sk_receive_queue); 6776 } 6777 6778 if (skb) 6779 return skb; 6780 6781 /* Caller is allowed not to check sk->sk_err before calling. */ 6782 error = sock_error(sk); 6783 if (error) 6784 goto no_packet; 6785 6786 if (sk->sk_shutdown & RCV_SHUTDOWN) 6787 break; 6788 6789 if (sk_can_busy_loop(sk) && 6790 sk_busy_loop(sk, noblock)) 6791 continue; 6792 6793 /* User doesn't want to wait. */ 6794 error = -EAGAIN; 6795 if (!timeo) 6796 goto no_packet; 6797 } while (sctp_wait_for_packet(sk, err, &timeo) == 0); 6798 6799 return NULL; 6800 6801 no_packet: 6802 *err = error; 6803 return NULL; 6804 } 6805 6806 /* If sndbuf has changed, wake up per association sndbuf waiters. */ 6807 static void __sctp_write_space(struct sctp_association *asoc) 6808 { 6809 struct sock *sk = asoc->base.sk; 6810 struct socket *sock = sk->sk_socket; 6811 6812 if ((sctp_wspace(asoc) > 0) && sock) { 6813 if (waitqueue_active(&asoc->wait)) 6814 wake_up_interruptible(&asoc->wait); 6815 6816 if (sctp_writeable(sk)) { 6817 wait_queue_head_t *wq = sk_sleep(sk); 6818 6819 if (wq && waitqueue_active(wq)) 6820 wake_up_interruptible(wq); 6821 6822 /* Note that we try to include the Async I/O support 6823 * here by modeling from the current TCP/UDP code. 6824 * We have not tested with it yet. 6825 */ 6826 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) 6827 sock_wake_async(sock, 6828 SOCK_WAKE_SPACE, POLL_OUT); 6829 } 6830 } 6831 } 6832 6833 static void sctp_wake_up_waiters(struct sock *sk, 6834 struct sctp_association *asoc) 6835 { 6836 struct sctp_association *tmp = asoc; 6837 6838 /* We do accounting for the sndbuf space per association, 6839 * so we only need to wake our own association. 6840 */ 6841 if (asoc->ep->sndbuf_policy) 6842 return __sctp_write_space(asoc); 6843 6844 /* If association goes down and is just flushing its 6845 * outq, then just normally notify others. 6846 */ 6847 if (asoc->base.dead) 6848 return sctp_write_space(sk); 6849 6850 /* Accounting for the sndbuf space is per socket, so we 6851 * need to wake up others, try to be fair and in case of 6852 * other associations, let them have a go first instead 6853 * of just doing a sctp_write_space() call. 6854 * 6855 * Note that we reach sctp_wake_up_waiters() only when 6856 * associations free up queued chunks, thus we are under 6857 * lock and the list of associations on a socket is 6858 * guaranteed not to change. 6859 */ 6860 for (tmp = list_next_entry(tmp, asocs); 1; 6861 tmp = list_next_entry(tmp, asocs)) { 6862 /* Manually skip the head element. */ 6863 if (&tmp->asocs == &((sctp_sk(sk))->ep->asocs)) 6864 continue; 6865 /* Wake up association. */ 6866 __sctp_write_space(tmp); 6867 /* We've reached the end. */ 6868 if (tmp == asoc) 6869 break; 6870 } 6871 } 6872 6873 /* Do accounting for the sndbuf space. 6874 * Decrement the used sndbuf space of the corresponding association by the 6875 * data size which was just transmitted(freed). 6876 */ 6877 static void sctp_wfree(struct sk_buff *skb) 6878 { 6879 struct sctp_chunk *chunk = skb_shinfo(skb)->destructor_arg; 6880 struct sctp_association *asoc = chunk->asoc; 6881 struct sock *sk = asoc->base.sk; 6882 6883 asoc->sndbuf_used -= SCTP_DATA_SNDSIZE(chunk) + 6884 sizeof(struct sk_buff) + 6885 sizeof(struct sctp_chunk); 6886 6887 atomic_sub(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc); 6888 6889 /* 6890 * This undoes what is done via sctp_set_owner_w and sk_mem_charge 6891 */ 6892 sk->sk_wmem_queued -= skb->truesize; 6893 sk_mem_uncharge(sk, skb->truesize); 6894 6895 sock_wfree(skb); 6896 sctp_wake_up_waiters(sk, asoc); 6897 6898 sctp_association_put(asoc); 6899 } 6900 6901 /* Do accounting for the receive space on the socket. 6902 * Accounting for the association is done in ulpevent.c 6903 * We set this as a destructor for the cloned data skbs so that 6904 * accounting is done at the correct time. 6905 */ 6906 void sctp_sock_rfree(struct sk_buff *skb) 6907 { 6908 struct sock *sk = skb->sk; 6909 struct sctp_ulpevent *event = sctp_skb2event(skb); 6910 6911 atomic_sub(event->rmem_len, &sk->sk_rmem_alloc); 6912 6913 /* 6914 * Mimic the behavior of sock_rfree 6915 */ 6916 sk_mem_uncharge(sk, event->rmem_len); 6917 } 6918 6919 6920 /* Helper function to wait for space in the sndbuf. */ 6921 static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p, 6922 size_t msg_len) 6923 { 6924 struct sock *sk = asoc->base.sk; 6925 int err = 0; 6926 long current_timeo = *timeo_p; 6927 DEFINE_WAIT(wait); 6928 6929 pr_debug("%s: asoc:%p, timeo:%ld, msg_len:%zu\n", __func__, asoc, 6930 *timeo_p, msg_len); 6931 6932 /* Increment the association's refcnt. */ 6933 sctp_association_hold(asoc); 6934 6935 /* Wait on the association specific sndbuf space. */ 6936 for (;;) { 6937 prepare_to_wait_exclusive(&asoc->wait, &wait, 6938 TASK_INTERRUPTIBLE); 6939 if (!*timeo_p) 6940 goto do_nonblock; 6941 if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING || 6942 asoc->base.dead) 6943 goto do_error; 6944 if (signal_pending(current)) 6945 goto do_interrupted; 6946 if (msg_len <= sctp_wspace(asoc)) 6947 break; 6948 6949 /* Let another process have a go. Since we are going 6950 * to sleep anyway. 6951 */ 6952 release_sock(sk); 6953 current_timeo = schedule_timeout(current_timeo); 6954 BUG_ON(sk != asoc->base.sk); 6955 lock_sock(sk); 6956 6957 *timeo_p = current_timeo; 6958 } 6959 6960 out: 6961 finish_wait(&asoc->wait, &wait); 6962 6963 /* Release the association's refcnt. */ 6964 sctp_association_put(asoc); 6965 6966 return err; 6967 6968 do_error: 6969 err = -EPIPE; 6970 goto out; 6971 6972 do_interrupted: 6973 err = sock_intr_errno(*timeo_p); 6974 goto out; 6975 6976 do_nonblock: 6977 err = -EAGAIN; 6978 goto out; 6979 } 6980 6981 void sctp_data_ready(struct sock *sk) 6982 { 6983 struct socket_wq *wq; 6984 6985 rcu_read_lock(); 6986 wq = rcu_dereference(sk->sk_wq); 6987 if (wq_has_sleeper(wq)) 6988 wake_up_interruptible_sync_poll(&wq->wait, POLLIN | 6989 POLLRDNORM | POLLRDBAND); 6990 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); 6991 rcu_read_unlock(); 6992 } 6993 6994 /* If socket sndbuf has changed, wake up all per association waiters. */ 6995 void sctp_write_space(struct sock *sk) 6996 { 6997 struct sctp_association *asoc; 6998 6999 /* Wake up the tasks in each wait queue. */ 7000 list_for_each_entry(asoc, &((sctp_sk(sk))->ep->asocs), asocs) { 7001 __sctp_write_space(asoc); 7002 } 7003 } 7004 7005 /* Is there any sndbuf space available on the socket? 7006 * 7007 * Note that sk_wmem_alloc is the sum of the send buffers on all of the 7008 * associations on the same socket. For a UDP-style socket with 7009 * multiple associations, it is possible for it to be "unwriteable" 7010 * prematurely. I assume that this is acceptable because 7011 * a premature "unwriteable" is better than an accidental "writeable" which 7012 * would cause an unwanted block under certain circumstances. For the 1-1 7013 * UDP-style sockets or TCP-style sockets, this code should work. 7014 * - Daisy 7015 */ 7016 static int sctp_writeable(struct sock *sk) 7017 { 7018 int amt = 0; 7019 7020 amt = sk->sk_sndbuf - sk_wmem_alloc_get(sk); 7021 if (amt < 0) 7022 amt = 0; 7023 return amt; 7024 } 7025 7026 /* Wait for an association to go into ESTABLISHED state. If timeout is 0, 7027 * returns immediately with EINPROGRESS. 7028 */ 7029 static int sctp_wait_for_connect(struct sctp_association *asoc, long *timeo_p) 7030 { 7031 struct sock *sk = asoc->base.sk; 7032 int err = 0; 7033 long current_timeo = *timeo_p; 7034 DEFINE_WAIT(wait); 7035 7036 pr_debug("%s: asoc:%p, timeo:%ld\n", __func__, asoc, *timeo_p); 7037 7038 /* Increment the association's refcnt. */ 7039 sctp_association_hold(asoc); 7040 7041 for (;;) { 7042 prepare_to_wait_exclusive(&asoc->wait, &wait, 7043 TASK_INTERRUPTIBLE); 7044 if (!*timeo_p) 7045 goto do_nonblock; 7046 if (sk->sk_shutdown & RCV_SHUTDOWN) 7047 break; 7048 if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING || 7049 asoc->base.dead) 7050 goto do_error; 7051 if (signal_pending(current)) 7052 goto do_interrupted; 7053 7054 if (sctp_state(asoc, ESTABLISHED)) 7055 break; 7056 7057 /* Let another process have a go. Since we are going 7058 * to sleep anyway. 7059 */ 7060 release_sock(sk); 7061 current_timeo = schedule_timeout(current_timeo); 7062 lock_sock(sk); 7063 7064 *timeo_p = current_timeo; 7065 } 7066 7067 out: 7068 finish_wait(&asoc->wait, &wait); 7069 7070 /* Release the association's refcnt. */ 7071 sctp_association_put(asoc); 7072 7073 return err; 7074 7075 do_error: 7076 if (asoc->init_err_counter + 1 > asoc->max_init_attempts) 7077 err = -ETIMEDOUT; 7078 else 7079 err = -ECONNREFUSED; 7080 goto out; 7081 7082 do_interrupted: 7083 err = sock_intr_errno(*timeo_p); 7084 goto out; 7085 7086 do_nonblock: 7087 err = -EINPROGRESS; 7088 goto out; 7089 } 7090 7091 static int sctp_wait_for_accept(struct sock *sk, long timeo) 7092 { 7093 struct sctp_endpoint *ep; 7094 int err = 0; 7095 DEFINE_WAIT(wait); 7096 7097 ep = sctp_sk(sk)->ep; 7098 7099 7100 for (;;) { 7101 prepare_to_wait_exclusive(sk_sleep(sk), &wait, 7102 TASK_INTERRUPTIBLE); 7103 7104 if (list_empty(&ep->asocs)) { 7105 release_sock(sk); 7106 timeo = schedule_timeout(timeo); 7107 lock_sock(sk); 7108 } 7109 7110 err = -EINVAL; 7111 if (!sctp_sstate(sk, LISTENING)) 7112 break; 7113 7114 err = 0; 7115 if (!list_empty(&ep->asocs)) 7116 break; 7117 7118 err = sock_intr_errno(timeo); 7119 if (signal_pending(current)) 7120 break; 7121 7122 err = -EAGAIN; 7123 if (!timeo) 7124 break; 7125 } 7126 7127 finish_wait(sk_sleep(sk), &wait); 7128 7129 return err; 7130 } 7131 7132 static void sctp_wait_for_close(struct sock *sk, long timeout) 7133 { 7134 DEFINE_WAIT(wait); 7135 7136 do { 7137 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 7138 if (list_empty(&sctp_sk(sk)->ep->asocs)) 7139 break; 7140 release_sock(sk); 7141 timeout = schedule_timeout(timeout); 7142 lock_sock(sk); 7143 } while (!signal_pending(current) && timeout); 7144 7145 finish_wait(sk_sleep(sk), &wait); 7146 } 7147 7148 static void sctp_skb_set_owner_r_frag(struct sk_buff *skb, struct sock *sk) 7149 { 7150 struct sk_buff *frag; 7151 7152 if (!skb->data_len) 7153 goto done; 7154 7155 /* Don't forget the fragments. */ 7156 skb_walk_frags(skb, frag) 7157 sctp_skb_set_owner_r_frag(frag, sk); 7158 7159 done: 7160 sctp_skb_set_owner_r(skb, sk); 7161 } 7162 7163 void sctp_copy_sock(struct sock *newsk, struct sock *sk, 7164 struct sctp_association *asoc) 7165 { 7166 struct inet_sock *inet = inet_sk(sk); 7167 struct inet_sock *newinet; 7168 7169 newsk->sk_type = sk->sk_type; 7170 newsk->sk_bound_dev_if = sk->sk_bound_dev_if; 7171 newsk->sk_flags = sk->sk_flags; 7172 newsk->sk_no_check_tx = sk->sk_no_check_tx; 7173 newsk->sk_no_check_rx = sk->sk_no_check_rx; 7174 newsk->sk_reuse = sk->sk_reuse; 7175 7176 newsk->sk_shutdown = sk->sk_shutdown; 7177 newsk->sk_destruct = sctp_destruct_sock; 7178 newsk->sk_family = sk->sk_family; 7179 newsk->sk_protocol = IPPROTO_SCTP; 7180 newsk->sk_backlog_rcv = sk->sk_prot->backlog_rcv; 7181 newsk->sk_sndbuf = sk->sk_sndbuf; 7182 newsk->sk_rcvbuf = sk->sk_rcvbuf; 7183 newsk->sk_lingertime = sk->sk_lingertime; 7184 newsk->sk_rcvtimeo = sk->sk_rcvtimeo; 7185 newsk->sk_sndtimeo = sk->sk_sndtimeo; 7186 7187 newinet = inet_sk(newsk); 7188 7189 /* Initialize sk's sport, dport, rcv_saddr and daddr for 7190 * getsockname() and getpeername() 7191 */ 7192 newinet->inet_sport = inet->inet_sport; 7193 newinet->inet_saddr = inet->inet_saddr; 7194 newinet->inet_rcv_saddr = inet->inet_rcv_saddr; 7195 newinet->inet_dport = htons(asoc->peer.port); 7196 newinet->pmtudisc = inet->pmtudisc; 7197 newinet->inet_id = asoc->next_tsn ^ jiffies; 7198 7199 newinet->uc_ttl = inet->uc_ttl; 7200 newinet->mc_loop = 1; 7201 newinet->mc_ttl = 1; 7202 newinet->mc_index = 0; 7203 newinet->mc_list = NULL; 7204 } 7205 7206 static inline void sctp_copy_descendant(struct sock *sk_to, 7207 const struct sock *sk_from) 7208 { 7209 int ancestor_size = sizeof(struct inet_sock) + 7210 sizeof(struct sctp_sock) - 7211 offsetof(struct sctp_sock, auto_asconf_list); 7212 7213 if (sk_from->sk_family == PF_INET6) 7214 ancestor_size += sizeof(struct ipv6_pinfo); 7215 7216 __inet_sk_copy_descendant(sk_to, sk_from, ancestor_size); 7217 } 7218 7219 /* Populate the fields of the newsk from the oldsk and migrate the assoc 7220 * and its messages to the newsk. 7221 */ 7222 static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk, 7223 struct sctp_association *assoc, 7224 sctp_socket_type_t type) 7225 { 7226 struct sctp_sock *oldsp = sctp_sk(oldsk); 7227 struct sctp_sock *newsp = sctp_sk(newsk); 7228 struct sctp_bind_bucket *pp; /* hash list port iterator */ 7229 struct sctp_endpoint *newep = newsp->ep; 7230 struct sk_buff *skb, *tmp; 7231 struct sctp_ulpevent *event; 7232 struct sctp_bind_hashbucket *head; 7233 7234 /* Migrate socket buffer sizes and all the socket level options to the 7235 * new socket. 7236 */ 7237 newsk->sk_sndbuf = oldsk->sk_sndbuf; 7238 newsk->sk_rcvbuf = oldsk->sk_rcvbuf; 7239 /* Brute force copy old sctp opt. */ 7240 sctp_copy_descendant(newsk, oldsk); 7241 7242 /* Restore the ep value that was overwritten with the above structure 7243 * copy. 7244 */ 7245 newsp->ep = newep; 7246 newsp->hmac = NULL; 7247 7248 /* Hook this new socket in to the bind_hash list. */ 7249 head = &sctp_port_hashtable[sctp_phashfn(sock_net(oldsk), 7250 inet_sk(oldsk)->inet_num)]; 7251 local_bh_disable(); 7252 spin_lock(&head->lock); 7253 pp = sctp_sk(oldsk)->bind_hash; 7254 sk_add_bind_node(newsk, &pp->owner); 7255 sctp_sk(newsk)->bind_hash = pp; 7256 inet_sk(newsk)->inet_num = inet_sk(oldsk)->inet_num; 7257 spin_unlock(&head->lock); 7258 local_bh_enable(); 7259 7260 /* Copy the bind_addr list from the original endpoint to the new 7261 * endpoint so that we can handle restarts properly 7262 */ 7263 sctp_bind_addr_dup(&newsp->ep->base.bind_addr, 7264 &oldsp->ep->base.bind_addr, GFP_KERNEL); 7265 7266 /* Move any messages in the old socket's receive queue that are for the 7267 * peeled off association to the new socket's receive queue. 7268 */ 7269 sctp_skb_for_each(skb, &oldsk->sk_receive_queue, tmp) { 7270 event = sctp_skb2event(skb); 7271 if (event->asoc == assoc) { 7272 __skb_unlink(skb, &oldsk->sk_receive_queue); 7273 __skb_queue_tail(&newsk->sk_receive_queue, skb); 7274 sctp_skb_set_owner_r_frag(skb, newsk); 7275 } 7276 } 7277 7278 /* Clean up any messages pending delivery due to partial 7279 * delivery. Three cases: 7280 * 1) No partial deliver; no work. 7281 * 2) Peeling off partial delivery; keep pd_lobby in new pd_lobby. 7282 * 3) Peeling off non-partial delivery; move pd_lobby to receive_queue. 7283 */ 7284 skb_queue_head_init(&newsp->pd_lobby); 7285 atomic_set(&sctp_sk(newsk)->pd_mode, assoc->ulpq.pd_mode); 7286 7287 if (atomic_read(&sctp_sk(oldsk)->pd_mode)) { 7288 struct sk_buff_head *queue; 7289 7290 /* Decide which queue to move pd_lobby skbs to. */ 7291 if (assoc->ulpq.pd_mode) { 7292 queue = &newsp->pd_lobby; 7293 } else 7294 queue = &newsk->sk_receive_queue; 7295 7296 /* Walk through the pd_lobby, looking for skbs that 7297 * need moved to the new socket. 7298 */ 7299 sctp_skb_for_each(skb, &oldsp->pd_lobby, tmp) { 7300 event = sctp_skb2event(skb); 7301 if (event->asoc == assoc) { 7302 __skb_unlink(skb, &oldsp->pd_lobby); 7303 __skb_queue_tail(queue, skb); 7304 sctp_skb_set_owner_r_frag(skb, newsk); 7305 } 7306 } 7307 7308 /* Clear up any skbs waiting for the partial 7309 * delivery to finish. 7310 */ 7311 if (assoc->ulpq.pd_mode) 7312 sctp_clear_pd(oldsk, NULL); 7313 7314 } 7315 7316 sctp_skb_for_each(skb, &assoc->ulpq.reasm, tmp) 7317 sctp_skb_set_owner_r_frag(skb, newsk); 7318 7319 sctp_skb_for_each(skb, &assoc->ulpq.lobby, tmp) 7320 sctp_skb_set_owner_r_frag(skb, newsk); 7321 7322 /* Set the type of socket to indicate that it is peeled off from the 7323 * original UDP-style socket or created with the accept() call on a 7324 * TCP-style socket.. 7325 */ 7326 newsp->type = type; 7327 7328 /* Mark the new socket "in-use" by the user so that any packets 7329 * that may arrive on the association after we've moved it are 7330 * queued to the backlog. This prevents a potential race between 7331 * backlog processing on the old socket and new-packet processing 7332 * on the new socket. 7333 * 7334 * The caller has just allocated newsk so we can guarantee that other 7335 * paths won't try to lock it and then oldsk. 7336 */ 7337 lock_sock_nested(newsk, SINGLE_DEPTH_NESTING); 7338 sctp_assoc_migrate(assoc, newsk); 7339 7340 /* If the association on the newsk is already closed before accept() 7341 * is called, set RCV_SHUTDOWN flag. 7342 */ 7343 if (sctp_state(assoc, CLOSED) && sctp_style(newsk, TCP)) 7344 newsk->sk_shutdown |= RCV_SHUTDOWN; 7345 7346 newsk->sk_state = SCTP_SS_ESTABLISHED; 7347 release_sock(newsk); 7348 } 7349 7350 7351 /* This proto struct describes the ULP interface for SCTP. */ 7352 struct proto sctp_prot = { 7353 .name = "SCTP", 7354 .owner = THIS_MODULE, 7355 .close = sctp_close, 7356 .connect = sctp_connect, 7357 .disconnect = sctp_disconnect, 7358 .accept = sctp_accept, 7359 .ioctl = sctp_ioctl, 7360 .init = sctp_init_sock, 7361 .destroy = sctp_destroy_sock, 7362 .shutdown = sctp_shutdown, 7363 .setsockopt = sctp_setsockopt, 7364 .getsockopt = sctp_getsockopt, 7365 .sendmsg = sctp_sendmsg, 7366 .recvmsg = sctp_recvmsg, 7367 .bind = sctp_bind, 7368 .backlog_rcv = sctp_backlog_rcv, 7369 .hash = sctp_hash, 7370 .unhash = sctp_unhash, 7371 .get_port = sctp_get_port, 7372 .obj_size = sizeof(struct sctp_sock), 7373 .sysctl_mem = sysctl_sctp_mem, 7374 .sysctl_rmem = sysctl_sctp_rmem, 7375 .sysctl_wmem = sysctl_sctp_wmem, 7376 .memory_pressure = &sctp_memory_pressure, 7377 .enter_memory_pressure = sctp_enter_memory_pressure, 7378 .memory_allocated = &sctp_memory_allocated, 7379 .sockets_allocated = &sctp_sockets_allocated, 7380 }; 7381 7382 #if IS_ENABLED(CONFIG_IPV6) 7383 7384 struct proto sctpv6_prot = { 7385 .name = "SCTPv6", 7386 .owner = THIS_MODULE, 7387 .close = sctp_close, 7388 .connect = sctp_connect, 7389 .disconnect = sctp_disconnect, 7390 .accept = sctp_accept, 7391 .ioctl = sctp_ioctl, 7392 .init = sctp_init_sock, 7393 .destroy = sctp_destroy_sock, 7394 .shutdown = sctp_shutdown, 7395 .setsockopt = sctp_setsockopt, 7396 .getsockopt = sctp_getsockopt, 7397 .sendmsg = sctp_sendmsg, 7398 .recvmsg = sctp_recvmsg, 7399 .bind = sctp_bind, 7400 .backlog_rcv = sctp_backlog_rcv, 7401 .hash = sctp_hash, 7402 .unhash = sctp_unhash, 7403 .get_port = sctp_get_port, 7404 .obj_size = sizeof(struct sctp6_sock), 7405 .sysctl_mem = sysctl_sctp_mem, 7406 .sysctl_rmem = sysctl_sctp_rmem, 7407 .sysctl_wmem = sysctl_sctp_wmem, 7408 .memory_pressure = &sctp_memory_pressure, 7409 .enter_memory_pressure = sctp_enter_memory_pressure, 7410 .memory_allocated = &sctp_memory_allocated, 7411 .sockets_allocated = &sctp_sockets_allocated, 7412 }; 7413 #endif /* IS_ENABLED(CONFIG_IPV6) */ 7414