1 /* SCTP kernel implementation 2 * (C) Copyright IBM Corp. 2001, 2004 3 * Copyright (c) 1999-2000 Cisco, Inc. 4 * Copyright (c) 1999-2001 Motorola, Inc. 5 * Copyright (c) 2001-2003 Intel Corp. 6 * Copyright (c) 2001-2002 Nokia, Inc. 7 * Copyright (c) 2001 La Monte H.P. Yarroll 8 * 9 * This file is part of the SCTP kernel implementation 10 * 11 * These functions interface with the sockets layer to implement the 12 * SCTP Extensions for the Sockets API. 13 * 14 * Note that the descriptions from the specification are USER level 15 * functions--this file is the functions which populate the struct proto 16 * for SCTP which is the BOTTOM of the sockets interface. 17 * 18 * This SCTP implementation is free software; 19 * you can redistribute it and/or modify it under the terms of 20 * the GNU General Public License as published by 21 * the Free Software Foundation; either version 2, or (at your option) 22 * any later version. 23 * 24 * This SCTP implementation is distributed in the hope that it 25 * will be useful, but WITHOUT ANY WARRANTY; without even the implied 26 * ************************ 27 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. 28 * See the GNU General Public License for more details. 29 * 30 * You should have received a copy of the GNU General Public License 31 * along with GNU CC; see the file COPYING. If not, see 32 * <http://www.gnu.org/licenses/>. 33 * 34 * Please send any bug reports or fixes you make to the 35 * email address(es): 36 * lksctp developers <linux-sctp@vger.kernel.org> 37 * 38 * Written or modified by: 39 * La Monte H.P. Yarroll <piggy@acm.org> 40 * Narasimha Budihal <narsi@refcode.org> 41 * Karl Knutson <karl@athena.chicago.il.us> 42 * Jon Grimm <jgrimm@us.ibm.com> 43 * Xingang Guo <xingang.guo@intel.com> 44 * Daisy Chang <daisyc@us.ibm.com> 45 * Sridhar Samudrala <samudrala@us.ibm.com> 46 * Inaky Perez-Gonzalez <inaky.gonzalez@intel.com> 47 * Ardelle Fan <ardelle.fan@intel.com> 48 * Ryan Layer <rmlayer@us.ibm.com> 49 * Anup Pemmaiah <pemmaiah@cc.usu.edu> 50 * Kevin Gao <kevin.gao@intel.com> 51 */ 52 53 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 54 55 #include <linux/types.h> 56 #include <linux/kernel.h> 57 #include <linux/wait.h> 58 #include <linux/time.h> 59 #include <linux/ip.h> 60 #include <linux/capability.h> 61 #include <linux/fcntl.h> 62 #include <linux/poll.h> 63 #include <linux/init.h> 64 #include <linux/crypto.h> 65 #include <linux/slab.h> 66 #include <linux/file.h> 67 #include <linux/compat.h> 68 69 #include <net/ip.h> 70 #include <net/icmp.h> 71 #include <net/route.h> 72 #include <net/ipv6.h> 73 #include <net/inet_common.h> 74 #include <net/busy_poll.h> 75 76 #include <linux/socket.h> /* for sa_family_t */ 77 #include <linux/export.h> 78 #include <net/sock.h> 79 #include <net/sctp/sctp.h> 80 #include <net/sctp/sm.h> 81 82 /* Forward declarations for internal helper functions. */ 83 static int sctp_writeable(struct sock *sk); 84 static void sctp_wfree(struct sk_buff *skb); 85 static int sctp_wait_for_sndbuf(struct sctp_association *, long *timeo_p, 86 size_t msg_len); 87 static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p); 88 static int sctp_wait_for_connect(struct sctp_association *, long *timeo_p); 89 static int sctp_wait_for_accept(struct sock *sk, long timeo); 90 static void sctp_wait_for_close(struct sock *sk, long timeo); 91 static void sctp_destruct_sock(struct sock *sk); 92 static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt, 93 union sctp_addr *addr, int len); 94 static int sctp_bindx_add(struct sock *, struct sockaddr *, int); 95 static int sctp_bindx_rem(struct sock *, struct sockaddr *, int); 96 static int sctp_send_asconf_add_ip(struct sock *, struct sockaddr *, int); 97 static int sctp_send_asconf_del_ip(struct sock *, struct sockaddr *, int); 98 static int sctp_send_asconf(struct sctp_association *asoc, 99 struct sctp_chunk *chunk); 100 static int sctp_do_bind(struct sock *, union sctp_addr *, int); 101 static int sctp_autobind(struct sock *sk); 102 static void sctp_sock_migrate(struct sock *, struct sock *, 103 struct sctp_association *, sctp_socket_type_t); 104 105 static int sctp_memory_pressure; 106 static atomic_long_t sctp_memory_allocated; 107 struct percpu_counter sctp_sockets_allocated; 108 109 static void sctp_enter_memory_pressure(struct sock *sk) 110 { 111 sctp_memory_pressure = 1; 112 } 113 114 115 /* Get the sndbuf space available at the time on the association. */ 116 static inline int sctp_wspace(struct sctp_association *asoc) 117 { 118 int amt; 119 120 if (asoc->ep->sndbuf_policy) 121 amt = asoc->sndbuf_used; 122 else 123 amt = sk_wmem_alloc_get(asoc->base.sk); 124 125 if (amt >= asoc->base.sk->sk_sndbuf) { 126 if (asoc->base.sk->sk_userlocks & SOCK_SNDBUF_LOCK) 127 amt = 0; 128 else { 129 amt = sk_stream_wspace(asoc->base.sk); 130 if (amt < 0) 131 amt = 0; 132 } 133 } else { 134 amt = asoc->base.sk->sk_sndbuf - amt; 135 } 136 return amt; 137 } 138 139 /* Increment the used sndbuf space count of the corresponding association by 140 * the size of the outgoing data chunk. 141 * Also, set the skb destructor for sndbuf accounting later. 142 * 143 * Since it is always 1-1 between chunk and skb, and also a new skb is always 144 * allocated for chunk bundling in sctp_packet_transmit(), we can use the 145 * destructor in the data chunk skb for the purpose of the sndbuf space 146 * tracking. 147 */ 148 static inline void sctp_set_owner_w(struct sctp_chunk *chunk) 149 { 150 struct sctp_association *asoc = chunk->asoc; 151 struct sock *sk = asoc->base.sk; 152 153 /* The sndbuf space is tracked per association. */ 154 sctp_association_hold(asoc); 155 156 skb_set_owner_w(chunk->skb, sk); 157 158 chunk->skb->destructor = sctp_wfree; 159 /* Save the chunk pointer in skb for sctp_wfree to use later. */ 160 skb_shinfo(chunk->skb)->destructor_arg = chunk; 161 162 asoc->sndbuf_used += SCTP_DATA_SNDSIZE(chunk) + 163 sizeof(struct sk_buff) + 164 sizeof(struct sctp_chunk); 165 166 atomic_add(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc); 167 sk->sk_wmem_queued += chunk->skb->truesize; 168 sk_mem_charge(sk, chunk->skb->truesize); 169 } 170 171 /* Verify that this is a valid address. */ 172 static inline int sctp_verify_addr(struct sock *sk, union sctp_addr *addr, 173 int len) 174 { 175 struct sctp_af *af; 176 177 /* Verify basic sockaddr. */ 178 af = sctp_sockaddr_af(sctp_sk(sk), addr, len); 179 if (!af) 180 return -EINVAL; 181 182 /* Is this a valid SCTP address? */ 183 if (!af->addr_valid(addr, sctp_sk(sk), NULL)) 184 return -EINVAL; 185 186 if (!sctp_sk(sk)->pf->send_verify(sctp_sk(sk), (addr))) 187 return -EINVAL; 188 189 return 0; 190 } 191 192 /* Look up the association by its id. If this is not a UDP-style 193 * socket, the ID field is always ignored. 194 */ 195 struct sctp_association *sctp_id2assoc(struct sock *sk, sctp_assoc_t id) 196 { 197 struct sctp_association *asoc = NULL; 198 199 /* If this is not a UDP-style socket, assoc id should be ignored. */ 200 if (!sctp_style(sk, UDP)) { 201 /* Return NULL if the socket state is not ESTABLISHED. It 202 * could be a TCP-style listening socket or a socket which 203 * hasn't yet called connect() to establish an association. 204 */ 205 if (!sctp_sstate(sk, ESTABLISHED)) 206 return NULL; 207 208 /* Get the first and the only association from the list. */ 209 if (!list_empty(&sctp_sk(sk)->ep->asocs)) 210 asoc = list_entry(sctp_sk(sk)->ep->asocs.next, 211 struct sctp_association, asocs); 212 return asoc; 213 } 214 215 /* Otherwise this is a UDP-style socket. */ 216 if (!id || (id == (sctp_assoc_t)-1)) 217 return NULL; 218 219 spin_lock_bh(&sctp_assocs_id_lock); 220 asoc = (struct sctp_association *)idr_find(&sctp_assocs_id, (int)id); 221 spin_unlock_bh(&sctp_assocs_id_lock); 222 223 if (!asoc || (asoc->base.sk != sk) || asoc->base.dead) 224 return NULL; 225 226 return asoc; 227 } 228 229 /* Look up the transport from an address and an assoc id. If both address and 230 * id are specified, the associations matching the address and the id should be 231 * the same. 232 */ 233 static struct sctp_transport *sctp_addr_id2transport(struct sock *sk, 234 struct sockaddr_storage *addr, 235 sctp_assoc_t id) 236 { 237 struct sctp_association *addr_asoc = NULL, *id_asoc = NULL; 238 struct sctp_transport *transport; 239 union sctp_addr *laddr = (union sctp_addr *)addr; 240 241 addr_asoc = sctp_endpoint_lookup_assoc(sctp_sk(sk)->ep, 242 laddr, 243 &transport); 244 245 if (!addr_asoc) 246 return NULL; 247 248 id_asoc = sctp_id2assoc(sk, id); 249 if (id_asoc && (id_asoc != addr_asoc)) 250 return NULL; 251 252 sctp_get_pf_specific(sk->sk_family)->addr_to_user(sctp_sk(sk), 253 (union sctp_addr *)addr); 254 255 return transport; 256 } 257 258 /* API 3.1.2 bind() - UDP Style Syntax 259 * The syntax of bind() is, 260 * 261 * ret = bind(int sd, struct sockaddr *addr, int addrlen); 262 * 263 * sd - the socket descriptor returned by socket(). 264 * addr - the address structure (struct sockaddr_in or struct 265 * sockaddr_in6 [RFC 2553]), 266 * addr_len - the size of the address structure. 267 */ 268 static int sctp_bind(struct sock *sk, struct sockaddr *addr, int addr_len) 269 { 270 int retval = 0; 271 272 lock_sock(sk); 273 274 pr_debug("%s: sk:%p, addr:%p, addr_len:%d\n", __func__, sk, 275 addr, addr_len); 276 277 /* Disallow binding twice. */ 278 if (!sctp_sk(sk)->ep->base.bind_addr.port) 279 retval = sctp_do_bind(sk, (union sctp_addr *)addr, 280 addr_len); 281 else 282 retval = -EINVAL; 283 284 release_sock(sk); 285 286 return retval; 287 } 288 289 static long sctp_get_port_local(struct sock *, union sctp_addr *); 290 291 /* Verify this is a valid sockaddr. */ 292 static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt, 293 union sctp_addr *addr, int len) 294 { 295 struct sctp_af *af; 296 297 /* Check minimum size. */ 298 if (len < sizeof (struct sockaddr)) 299 return NULL; 300 301 /* V4 mapped address are really of AF_INET family */ 302 if (addr->sa.sa_family == AF_INET6 && 303 ipv6_addr_v4mapped(&addr->v6.sin6_addr)) { 304 if (!opt->pf->af_supported(AF_INET, opt)) 305 return NULL; 306 } else { 307 /* Does this PF support this AF? */ 308 if (!opt->pf->af_supported(addr->sa.sa_family, opt)) 309 return NULL; 310 } 311 312 /* If we get this far, af is valid. */ 313 af = sctp_get_af_specific(addr->sa.sa_family); 314 315 if (len < af->sockaddr_len) 316 return NULL; 317 318 return af; 319 } 320 321 /* Bind a local address either to an endpoint or to an association. */ 322 static int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len) 323 { 324 struct net *net = sock_net(sk); 325 struct sctp_sock *sp = sctp_sk(sk); 326 struct sctp_endpoint *ep = sp->ep; 327 struct sctp_bind_addr *bp = &ep->base.bind_addr; 328 struct sctp_af *af; 329 unsigned short snum; 330 int ret = 0; 331 332 /* Common sockaddr verification. */ 333 af = sctp_sockaddr_af(sp, addr, len); 334 if (!af) { 335 pr_debug("%s: sk:%p, newaddr:%p, len:%d EINVAL\n", 336 __func__, sk, addr, len); 337 return -EINVAL; 338 } 339 340 snum = ntohs(addr->v4.sin_port); 341 342 pr_debug("%s: sk:%p, new addr:%pISc, port:%d, new port:%d, len:%d\n", 343 __func__, sk, &addr->sa, bp->port, snum, len); 344 345 /* PF specific bind() address verification. */ 346 if (!sp->pf->bind_verify(sp, addr)) 347 return -EADDRNOTAVAIL; 348 349 /* We must either be unbound, or bind to the same port. 350 * It's OK to allow 0 ports if we are already bound. 351 * We'll just inhert an already bound port in this case 352 */ 353 if (bp->port) { 354 if (!snum) 355 snum = bp->port; 356 else if (snum != bp->port) { 357 pr_debug("%s: new port %d doesn't match existing port " 358 "%d\n", __func__, snum, bp->port); 359 return -EINVAL; 360 } 361 } 362 363 if (snum && snum < PROT_SOCK && 364 !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) 365 return -EACCES; 366 367 /* See if the address matches any of the addresses we may have 368 * already bound before checking against other endpoints. 369 */ 370 if (sctp_bind_addr_match(bp, addr, sp)) 371 return -EINVAL; 372 373 /* Make sure we are allowed to bind here. 374 * The function sctp_get_port_local() does duplicate address 375 * detection. 376 */ 377 addr->v4.sin_port = htons(snum); 378 if ((ret = sctp_get_port_local(sk, addr))) { 379 return -EADDRINUSE; 380 } 381 382 /* Refresh ephemeral port. */ 383 if (!bp->port) 384 bp->port = inet_sk(sk)->inet_num; 385 386 /* Add the address to the bind address list. 387 * Use GFP_ATOMIC since BHs will be disabled. 388 */ 389 ret = sctp_add_bind_addr(bp, addr, SCTP_ADDR_SRC, GFP_ATOMIC); 390 391 /* Copy back into socket for getsockname() use. */ 392 if (!ret) { 393 inet_sk(sk)->inet_sport = htons(inet_sk(sk)->inet_num); 394 sp->pf->to_sk_saddr(addr, sk); 395 } 396 397 return ret; 398 } 399 400 /* ADDIP Section 4.1.1 Congestion Control of ASCONF Chunks 401 * 402 * R1) One and only one ASCONF Chunk MAY be in transit and unacknowledged 403 * at any one time. If a sender, after sending an ASCONF chunk, decides 404 * it needs to transfer another ASCONF Chunk, it MUST wait until the 405 * ASCONF-ACK Chunk returns from the previous ASCONF Chunk before sending a 406 * subsequent ASCONF. Note this restriction binds each side, so at any 407 * time two ASCONF may be in-transit on any given association (one sent 408 * from each endpoint). 409 */ 410 static int sctp_send_asconf(struct sctp_association *asoc, 411 struct sctp_chunk *chunk) 412 { 413 struct net *net = sock_net(asoc->base.sk); 414 int retval = 0; 415 416 /* If there is an outstanding ASCONF chunk, queue it for later 417 * transmission. 418 */ 419 if (asoc->addip_last_asconf) { 420 list_add_tail(&chunk->list, &asoc->addip_chunk_list); 421 goto out; 422 } 423 424 /* Hold the chunk until an ASCONF_ACK is received. */ 425 sctp_chunk_hold(chunk); 426 retval = sctp_primitive_ASCONF(net, asoc, chunk); 427 if (retval) 428 sctp_chunk_free(chunk); 429 else 430 asoc->addip_last_asconf = chunk; 431 432 out: 433 return retval; 434 } 435 436 /* Add a list of addresses as bind addresses to local endpoint or 437 * association. 438 * 439 * Basically run through each address specified in the addrs/addrcnt 440 * array/length pair, determine if it is IPv6 or IPv4 and call 441 * sctp_do_bind() on it. 442 * 443 * If any of them fails, then the operation will be reversed and the 444 * ones that were added will be removed. 445 * 446 * Only sctp_setsockopt_bindx() is supposed to call this function. 447 */ 448 static int sctp_bindx_add(struct sock *sk, struct sockaddr *addrs, int addrcnt) 449 { 450 int cnt; 451 int retval = 0; 452 void *addr_buf; 453 struct sockaddr *sa_addr; 454 struct sctp_af *af; 455 456 pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n", __func__, sk, 457 addrs, addrcnt); 458 459 addr_buf = addrs; 460 for (cnt = 0; cnt < addrcnt; cnt++) { 461 /* The list may contain either IPv4 or IPv6 address; 462 * determine the address length for walking thru the list. 463 */ 464 sa_addr = addr_buf; 465 af = sctp_get_af_specific(sa_addr->sa_family); 466 if (!af) { 467 retval = -EINVAL; 468 goto err_bindx_add; 469 } 470 471 retval = sctp_do_bind(sk, (union sctp_addr *)sa_addr, 472 af->sockaddr_len); 473 474 addr_buf += af->sockaddr_len; 475 476 err_bindx_add: 477 if (retval < 0) { 478 /* Failed. Cleanup the ones that have been added */ 479 if (cnt > 0) 480 sctp_bindx_rem(sk, addrs, cnt); 481 return retval; 482 } 483 } 484 485 return retval; 486 } 487 488 /* Send an ASCONF chunk with Add IP address parameters to all the peers of the 489 * associations that are part of the endpoint indicating that a list of local 490 * addresses are added to the endpoint. 491 * 492 * If any of the addresses is already in the bind address list of the 493 * association, we do not send the chunk for that association. But it will not 494 * affect other associations. 495 * 496 * Only sctp_setsockopt_bindx() is supposed to call this function. 497 */ 498 static int sctp_send_asconf_add_ip(struct sock *sk, 499 struct sockaddr *addrs, 500 int addrcnt) 501 { 502 struct net *net = sock_net(sk); 503 struct sctp_sock *sp; 504 struct sctp_endpoint *ep; 505 struct sctp_association *asoc; 506 struct sctp_bind_addr *bp; 507 struct sctp_chunk *chunk; 508 struct sctp_sockaddr_entry *laddr; 509 union sctp_addr *addr; 510 union sctp_addr saveaddr; 511 void *addr_buf; 512 struct sctp_af *af; 513 struct list_head *p; 514 int i; 515 int retval = 0; 516 517 if (!net->sctp.addip_enable) 518 return retval; 519 520 sp = sctp_sk(sk); 521 ep = sp->ep; 522 523 pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n", 524 __func__, sk, addrs, addrcnt); 525 526 list_for_each_entry(asoc, &ep->asocs, asocs) { 527 if (!asoc->peer.asconf_capable) 528 continue; 529 530 if (asoc->peer.addip_disabled_mask & SCTP_PARAM_ADD_IP) 531 continue; 532 533 if (!sctp_state(asoc, ESTABLISHED)) 534 continue; 535 536 /* Check if any address in the packed array of addresses is 537 * in the bind address list of the association. If so, 538 * do not send the asconf chunk to its peer, but continue with 539 * other associations. 540 */ 541 addr_buf = addrs; 542 for (i = 0; i < addrcnt; i++) { 543 addr = addr_buf; 544 af = sctp_get_af_specific(addr->v4.sin_family); 545 if (!af) { 546 retval = -EINVAL; 547 goto out; 548 } 549 550 if (sctp_assoc_lookup_laddr(asoc, addr)) 551 break; 552 553 addr_buf += af->sockaddr_len; 554 } 555 if (i < addrcnt) 556 continue; 557 558 /* Use the first valid address in bind addr list of 559 * association as Address Parameter of ASCONF CHUNK. 560 */ 561 bp = &asoc->base.bind_addr; 562 p = bp->address_list.next; 563 laddr = list_entry(p, struct sctp_sockaddr_entry, list); 564 chunk = sctp_make_asconf_update_ip(asoc, &laddr->a, addrs, 565 addrcnt, SCTP_PARAM_ADD_IP); 566 if (!chunk) { 567 retval = -ENOMEM; 568 goto out; 569 } 570 571 /* Add the new addresses to the bind address list with 572 * use_as_src set to 0. 573 */ 574 addr_buf = addrs; 575 for (i = 0; i < addrcnt; i++) { 576 addr = addr_buf; 577 af = sctp_get_af_specific(addr->v4.sin_family); 578 memcpy(&saveaddr, addr, af->sockaddr_len); 579 retval = sctp_add_bind_addr(bp, &saveaddr, 580 SCTP_ADDR_NEW, GFP_ATOMIC); 581 addr_buf += af->sockaddr_len; 582 } 583 if (asoc->src_out_of_asoc_ok) { 584 struct sctp_transport *trans; 585 586 list_for_each_entry(trans, 587 &asoc->peer.transport_addr_list, transports) { 588 /* Clear the source and route cache */ 589 dst_release(trans->dst); 590 trans->cwnd = min(4*asoc->pathmtu, max_t(__u32, 591 2*asoc->pathmtu, 4380)); 592 trans->ssthresh = asoc->peer.i.a_rwnd; 593 trans->rto = asoc->rto_initial; 594 sctp_max_rto(asoc, trans); 595 trans->rtt = trans->srtt = trans->rttvar = 0; 596 sctp_transport_route(trans, NULL, 597 sctp_sk(asoc->base.sk)); 598 } 599 } 600 retval = sctp_send_asconf(asoc, chunk); 601 } 602 603 out: 604 return retval; 605 } 606 607 /* Remove a list of addresses from bind addresses list. Do not remove the 608 * last address. 609 * 610 * Basically run through each address specified in the addrs/addrcnt 611 * array/length pair, determine if it is IPv6 or IPv4 and call 612 * sctp_del_bind() on it. 613 * 614 * If any of them fails, then the operation will be reversed and the 615 * ones that were removed will be added back. 616 * 617 * At least one address has to be left; if only one address is 618 * available, the operation will return -EBUSY. 619 * 620 * Only sctp_setsockopt_bindx() is supposed to call this function. 621 */ 622 static int sctp_bindx_rem(struct sock *sk, struct sockaddr *addrs, int addrcnt) 623 { 624 struct sctp_sock *sp = sctp_sk(sk); 625 struct sctp_endpoint *ep = sp->ep; 626 int cnt; 627 struct sctp_bind_addr *bp = &ep->base.bind_addr; 628 int retval = 0; 629 void *addr_buf; 630 union sctp_addr *sa_addr; 631 struct sctp_af *af; 632 633 pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n", 634 __func__, sk, addrs, addrcnt); 635 636 addr_buf = addrs; 637 for (cnt = 0; cnt < addrcnt; cnt++) { 638 /* If the bind address list is empty or if there is only one 639 * bind address, there is nothing more to be removed (we need 640 * at least one address here). 641 */ 642 if (list_empty(&bp->address_list) || 643 (sctp_list_single_entry(&bp->address_list))) { 644 retval = -EBUSY; 645 goto err_bindx_rem; 646 } 647 648 sa_addr = addr_buf; 649 af = sctp_get_af_specific(sa_addr->sa.sa_family); 650 if (!af) { 651 retval = -EINVAL; 652 goto err_bindx_rem; 653 } 654 655 if (!af->addr_valid(sa_addr, sp, NULL)) { 656 retval = -EADDRNOTAVAIL; 657 goto err_bindx_rem; 658 } 659 660 if (sa_addr->v4.sin_port && 661 sa_addr->v4.sin_port != htons(bp->port)) { 662 retval = -EINVAL; 663 goto err_bindx_rem; 664 } 665 666 if (!sa_addr->v4.sin_port) 667 sa_addr->v4.sin_port = htons(bp->port); 668 669 /* FIXME - There is probably a need to check if sk->sk_saddr and 670 * sk->sk_rcv_addr are currently set to one of the addresses to 671 * be removed. This is something which needs to be looked into 672 * when we are fixing the outstanding issues with multi-homing 673 * socket routing and failover schemes. Refer to comments in 674 * sctp_do_bind(). -daisy 675 */ 676 retval = sctp_del_bind_addr(bp, sa_addr); 677 678 addr_buf += af->sockaddr_len; 679 err_bindx_rem: 680 if (retval < 0) { 681 /* Failed. Add the ones that has been removed back */ 682 if (cnt > 0) 683 sctp_bindx_add(sk, addrs, cnt); 684 return retval; 685 } 686 } 687 688 return retval; 689 } 690 691 /* Send an ASCONF chunk with Delete IP address parameters to all the peers of 692 * the associations that are part of the endpoint indicating that a list of 693 * local addresses are removed from the endpoint. 694 * 695 * If any of the addresses is already in the bind address list of the 696 * association, we do not send the chunk for that association. But it will not 697 * affect other associations. 698 * 699 * Only sctp_setsockopt_bindx() is supposed to call this function. 700 */ 701 static int sctp_send_asconf_del_ip(struct sock *sk, 702 struct sockaddr *addrs, 703 int addrcnt) 704 { 705 struct net *net = sock_net(sk); 706 struct sctp_sock *sp; 707 struct sctp_endpoint *ep; 708 struct sctp_association *asoc; 709 struct sctp_transport *transport; 710 struct sctp_bind_addr *bp; 711 struct sctp_chunk *chunk; 712 union sctp_addr *laddr; 713 void *addr_buf; 714 struct sctp_af *af; 715 struct sctp_sockaddr_entry *saddr; 716 int i; 717 int retval = 0; 718 int stored = 0; 719 720 chunk = NULL; 721 if (!net->sctp.addip_enable) 722 return retval; 723 724 sp = sctp_sk(sk); 725 ep = sp->ep; 726 727 pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n", 728 __func__, sk, addrs, addrcnt); 729 730 list_for_each_entry(asoc, &ep->asocs, asocs) { 731 732 if (!asoc->peer.asconf_capable) 733 continue; 734 735 if (asoc->peer.addip_disabled_mask & SCTP_PARAM_DEL_IP) 736 continue; 737 738 if (!sctp_state(asoc, ESTABLISHED)) 739 continue; 740 741 /* Check if any address in the packed array of addresses is 742 * not present in the bind address list of the association. 743 * If so, do not send the asconf chunk to its peer, but 744 * continue with other associations. 745 */ 746 addr_buf = addrs; 747 for (i = 0; i < addrcnt; i++) { 748 laddr = addr_buf; 749 af = sctp_get_af_specific(laddr->v4.sin_family); 750 if (!af) { 751 retval = -EINVAL; 752 goto out; 753 } 754 755 if (!sctp_assoc_lookup_laddr(asoc, laddr)) 756 break; 757 758 addr_buf += af->sockaddr_len; 759 } 760 if (i < addrcnt) 761 continue; 762 763 /* Find one address in the association's bind address list 764 * that is not in the packed array of addresses. This is to 765 * make sure that we do not delete all the addresses in the 766 * association. 767 */ 768 bp = &asoc->base.bind_addr; 769 laddr = sctp_find_unmatch_addr(bp, (union sctp_addr *)addrs, 770 addrcnt, sp); 771 if ((laddr == NULL) && (addrcnt == 1)) { 772 if (asoc->asconf_addr_del_pending) 773 continue; 774 asoc->asconf_addr_del_pending = 775 kzalloc(sizeof(union sctp_addr), GFP_ATOMIC); 776 if (asoc->asconf_addr_del_pending == NULL) { 777 retval = -ENOMEM; 778 goto out; 779 } 780 asoc->asconf_addr_del_pending->sa.sa_family = 781 addrs->sa_family; 782 asoc->asconf_addr_del_pending->v4.sin_port = 783 htons(bp->port); 784 if (addrs->sa_family == AF_INET) { 785 struct sockaddr_in *sin; 786 787 sin = (struct sockaddr_in *)addrs; 788 asoc->asconf_addr_del_pending->v4.sin_addr.s_addr = sin->sin_addr.s_addr; 789 } else if (addrs->sa_family == AF_INET6) { 790 struct sockaddr_in6 *sin6; 791 792 sin6 = (struct sockaddr_in6 *)addrs; 793 asoc->asconf_addr_del_pending->v6.sin6_addr = sin6->sin6_addr; 794 } 795 796 pr_debug("%s: keep the last address asoc:%p %pISc at %p\n", 797 __func__, asoc, &asoc->asconf_addr_del_pending->sa, 798 asoc->asconf_addr_del_pending); 799 800 asoc->src_out_of_asoc_ok = 1; 801 stored = 1; 802 goto skip_mkasconf; 803 } 804 805 if (laddr == NULL) 806 return -EINVAL; 807 808 /* We do not need RCU protection throughout this loop 809 * because this is done under a socket lock from the 810 * setsockopt call. 811 */ 812 chunk = sctp_make_asconf_update_ip(asoc, laddr, addrs, addrcnt, 813 SCTP_PARAM_DEL_IP); 814 if (!chunk) { 815 retval = -ENOMEM; 816 goto out; 817 } 818 819 skip_mkasconf: 820 /* Reset use_as_src flag for the addresses in the bind address 821 * list that are to be deleted. 822 */ 823 addr_buf = addrs; 824 for (i = 0; i < addrcnt; i++) { 825 laddr = addr_buf; 826 af = sctp_get_af_specific(laddr->v4.sin_family); 827 list_for_each_entry(saddr, &bp->address_list, list) { 828 if (sctp_cmp_addr_exact(&saddr->a, laddr)) 829 saddr->state = SCTP_ADDR_DEL; 830 } 831 addr_buf += af->sockaddr_len; 832 } 833 834 /* Update the route and saddr entries for all the transports 835 * as some of the addresses in the bind address list are 836 * about to be deleted and cannot be used as source addresses. 837 */ 838 list_for_each_entry(transport, &asoc->peer.transport_addr_list, 839 transports) { 840 dst_release(transport->dst); 841 sctp_transport_route(transport, NULL, 842 sctp_sk(asoc->base.sk)); 843 } 844 845 if (stored) 846 /* We don't need to transmit ASCONF */ 847 continue; 848 retval = sctp_send_asconf(asoc, chunk); 849 } 850 out: 851 return retval; 852 } 853 854 /* set addr events to assocs in the endpoint. ep and addr_wq must be locked */ 855 int sctp_asconf_mgmt(struct sctp_sock *sp, struct sctp_sockaddr_entry *addrw) 856 { 857 struct sock *sk = sctp_opt2sk(sp); 858 union sctp_addr *addr; 859 struct sctp_af *af; 860 861 /* It is safe to write port space in caller. */ 862 addr = &addrw->a; 863 addr->v4.sin_port = htons(sp->ep->base.bind_addr.port); 864 af = sctp_get_af_specific(addr->sa.sa_family); 865 if (!af) 866 return -EINVAL; 867 if (sctp_verify_addr(sk, addr, af->sockaddr_len)) 868 return -EINVAL; 869 870 if (addrw->state == SCTP_ADDR_NEW) 871 return sctp_send_asconf_add_ip(sk, (struct sockaddr *)addr, 1); 872 else 873 return sctp_send_asconf_del_ip(sk, (struct sockaddr *)addr, 1); 874 } 875 876 /* Helper for tunneling sctp_bindx() requests through sctp_setsockopt() 877 * 878 * API 8.1 879 * int sctp_bindx(int sd, struct sockaddr *addrs, int addrcnt, 880 * int flags); 881 * 882 * If sd is an IPv4 socket, the addresses passed must be IPv4 addresses. 883 * If the sd is an IPv6 socket, the addresses passed can either be IPv4 884 * or IPv6 addresses. 885 * 886 * A single address may be specified as INADDR_ANY or IN6ADDR_ANY, see 887 * Section 3.1.2 for this usage. 888 * 889 * addrs is a pointer to an array of one or more socket addresses. Each 890 * address is contained in its appropriate structure (i.e. struct 891 * sockaddr_in or struct sockaddr_in6) the family of the address type 892 * must be used to distinguish the address length (note that this 893 * representation is termed a "packed array" of addresses). The caller 894 * specifies the number of addresses in the array with addrcnt. 895 * 896 * On success, sctp_bindx() returns 0. On failure, sctp_bindx() returns 897 * -1, and sets errno to the appropriate error code. 898 * 899 * For SCTP, the port given in each socket address must be the same, or 900 * sctp_bindx() will fail, setting errno to EINVAL. 901 * 902 * The flags parameter is formed from the bitwise OR of zero or more of 903 * the following currently defined flags: 904 * 905 * SCTP_BINDX_ADD_ADDR 906 * 907 * SCTP_BINDX_REM_ADDR 908 * 909 * SCTP_BINDX_ADD_ADDR directs SCTP to add the given addresses to the 910 * association, and SCTP_BINDX_REM_ADDR directs SCTP to remove the given 911 * addresses from the association. The two flags are mutually exclusive; 912 * if both are given, sctp_bindx() will fail with EINVAL. A caller may 913 * not remove all addresses from an association; sctp_bindx() will 914 * reject such an attempt with EINVAL. 915 * 916 * An application can use sctp_bindx(SCTP_BINDX_ADD_ADDR) to associate 917 * additional addresses with an endpoint after calling bind(). Or use 918 * sctp_bindx(SCTP_BINDX_REM_ADDR) to remove some addresses a listening 919 * socket is associated with so that no new association accepted will be 920 * associated with those addresses. If the endpoint supports dynamic 921 * address a SCTP_BINDX_REM_ADDR or SCTP_BINDX_ADD_ADDR may cause a 922 * endpoint to send the appropriate message to the peer to change the 923 * peers address lists. 924 * 925 * Adding and removing addresses from a connected association is 926 * optional functionality. Implementations that do not support this 927 * functionality should return EOPNOTSUPP. 928 * 929 * Basically do nothing but copying the addresses from user to kernel 930 * land and invoking either sctp_bindx_add() or sctp_bindx_rem() on the sk. 931 * This is used for tunneling the sctp_bindx() request through sctp_setsockopt() 932 * from userspace. 933 * 934 * We don't use copy_from_user() for optimization: we first do the 935 * sanity checks (buffer size -fast- and access check-healthy 936 * pointer); if all of those succeed, then we can alloc the memory 937 * (expensive operation) needed to copy the data to kernel. Then we do 938 * the copying without checking the user space area 939 * (__copy_from_user()). 940 * 941 * On exit there is no need to do sockfd_put(), sys_setsockopt() does 942 * it. 943 * 944 * sk The sk of the socket 945 * addrs The pointer to the addresses in user land 946 * addrssize Size of the addrs buffer 947 * op Operation to perform (add or remove, see the flags of 948 * sctp_bindx) 949 * 950 * Returns 0 if ok, <0 errno code on error. 951 */ 952 static int sctp_setsockopt_bindx(struct sock *sk, 953 struct sockaddr __user *addrs, 954 int addrs_size, int op) 955 { 956 struct sockaddr *kaddrs; 957 int err; 958 int addrcnt = 0; 959 int walk_size = 0; 960 struct sockaddr *sa_addr; 961 void *addr_buf; 962 struct sctp_af *af; 963 964 pr_debug("%s: sk:%p addrs:%p addrs_size:%d opt:%d\n", 965 __func__, sk, addrs, addrs_size, op); 966 967 if (unlikely(addrs_size <= 0)) 968 return -EINVAL; 969 970 /* Check the user passed a healthy pointer. */ 971 if (unlikely(!access_ok(VERIFY_READ, addrs, addrs_size))) 972 return -EFAULT; 973 974 /* Alloc space for the address array in kernel memory. */ 975 kaddrs = kmalloc(addrs_size, GFP_KERNEL); 976 if (unlikely(!kaddrs)) 977 return -ENOMEM; 978 979 if (__copy_from_user(kaddrs, addrs, addrs_size)) { 980 kfree(kaddrs); 981 return -EFAULT; 982 } 983 984 /* Walk through the addrs buffer and count the number of addresses. */ 985 addr_buf = kaddrs; 986 while (walk_size < addrs_size) { 987 if (walk_size + sizeof(sa_family_t) > addrs_size) { 988 kfree(kaddrs); 989 return -EINVAL; 990 } 991 992 sa_addr = addr_buf; 993 af = sctp_get_af_specific(sa_addr->sa_family); 994 995 /* If the address family is not supported or if this address 996 * causes the address buffer to overflow return EINVAL. 997 */ 998 if (!af || (walk_size + af->sockaddr_len) > addrs_size) { 999 kfree(kaddrs); 1000 return -EINVAL; 1001 } 1002 addrcnt++; 1003 addr_buf += af->sockaddr_len; 1004 walk_size += af->sockaddr_len; 1005 } 1006 1007 /* Do the work. */ 1008 switch (op) { 1009 case SCTP_BINDX_ADD_ADDR: 1010 err = sctp_bindx_add(sk, kaddrs, addrcnt); 1011 if (err) 1012 goto out; 1013 err = sctp_send_asconf_add_ip(sk, kaddrs, addrcnt); 1014 break; 1015 1016 case SCTP_BINDX_REM_ADDR: 1017 err = sctp_bindx_rem(sk, kaddrs, addrcnt); 1018 if (err) 1019 goto out; 1020 err = sctp_send_asconf_del_ip(sk, kaddrs, addrcnt); 1021 break; 1022 1023 default: 1024 err = -EINVAL; 1025 break; 1026 } 1027 1028 out: 1029 kfree(kaddrs); 1030 1031 return err; 1032 } 1033 1034 /* __sctp_connect(struct sock* sk, struct sockaddr *kaddrs, int addrs_size) 1035 * 1036 * Common routine for handling connect() and sctp_connectx(). 1037 * Connect will come in with just a single address. 1038 */ 1039 static int __sctp_connect(struct sock *sk, 1040 struct sockaddr *kaddrs, 1041 int addrs_size, 1042 sctp_assoc_t *assoc_id) 1043 { 1044 struct net *net = sock_net(sk); 1045 struct sctp_sock *sp; 1046 struct sctp_endpoint *ep; 1047 struct sctp_association *asoc = NULL; 1048 struct sctp_association *asoc2; 1049 struct sctp_transport *transport; 1050 union sctp_addr to; 1051 sctp_scope_t scope; 1052 long timeo; 1053 int err = 0; 1054 int addrcnt = 0; 1055 int walk_size = 0; 1056 union sctp_addr *sa_addr = NULL; 1057 void *addr_buf; 1058 unsigned short port; 1059 unsigned int f_flags = 0; 1060 1061 sp = sctp_sk(sk); 1062 ep = sp->ep; 1063 1064 /* connect() cannot be done on a socket that is already in ESTABLISHED 1065 * state - UDP-style peeled off socket or a TCP-style socket that 1066 * is already connected. 1067 * It cannot be done even on a TCP-style listening socket. 1068 */ 1069 if (sctp_sstate(sk, ESTABLISHED) || 1070 (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))) { 1071 err = -EISCONN; 1072 goto out_free; 1073 } 1074 1075 /* Walk through the addrs buffer and count the number of addresses. */ 1076 addr_buf = kaddrs; 1077 while (walk_size < addrs_size) { 1078 struct sctp_af *af; 1079 1080 if (walk_size + sizeof(sa_family_t) > addrs_size) { 1081 err = -EINVAL; 1082 goto out_free; 1083 } 1084 1085 sa_addr = addr_buf; 1086 af = sctp_get_af_specific(sa_addr->sa.sa_family); 1087 1088 /* If the address family is not supported or if this address 1089 * causes the address buffer to overflow return EINVAL. 1090 */ 1091 if (!af || (walk_size + af->sockaddr_len) > addrs_size) { 1092 err = -EINVAL; 1093 goto out_free; 1094 } 1095 1096 port = ntohs(sa_addr->v4.sin_port); 1097 1098 /* Save current address so we can work with it */ 1099 memcpy(&to, sa_addr, af->sockaddr_len); 1100 1101 err = sctp_verify_addr(sk, &to, af->sockaddr_len); 1102 if (err) 1103 goto out_free; 1104 1105 /* Make sure the destination port is correctly set 1106 * in all addresses. 1107 */ 1108 if (asoc && asoc->peer.port && asoc->peer.port != port) { 1109 err = -EINVAL; 1110 goto out_free; 1111 } 1112 1113 /* Check if there already is a matching association on the 1114 * endpoint (other than the one created here). 1115 */ 1116 asoc2 = sctp_endpoint_lookup_assoc(ep, &to, &transport); 1117 if (asoc2 && asoc2 != asoc) { 1118 if (asoc2->state >= SCTP_STATE_ESTABLISHED) 1119 err = -EISCONN; 1120 else 1121 err = -EALREADY; 1122 goto out_free; 1123 } 1124 1125 /* If we could not find a matching association on the endpoint, 1126 * make sure that there is no peeled-off association matching 1127 * the peer address even on another socket. 1128 */ 1129 if (sctp_endpoint_is_peeled_off(ep, &to)) { 1130 err = -EADDRNOTAVAIL; 1131 goto out_free; 1132 } 1133 1134 if (!asoc) { 1135 /* If a bind() or sctp_bindx() is not called prior to 1136 * an sctp_connectx() call, the system picks an 1137 * ephemeral port and will choose an address set 1138 * equivalent to binding with a wildcard address. 1139 */ 1140 if (!ep->base.bind_addr.port) { 1141 if (sctp_autobind(sk)) { 1142 err = -EAGAIN; 1143 goto out_free; 1144 } 1145 } else { 1146 /* 1147 * If an unprivileged user inherits a 1-many 1148 * style socket with open associations on a 1149 * privileged port, it MAY be permitted to 1150 * accept new associations, but it SHOULD NOT 1151 * be permitted to open new associations. 1152 */ 1153 if (ep->base.bind_addr.port < PROT_SOCK && 1154 !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) { 1155 err = -EACCES; 1156 goto out_free; 1157 } 1158 } 1159 1160 scope = sctp_scope(&to); 1161 asoc = sctp_association_new(ep, sk, scope, GFP_KERNEL); 1162 if (!asoc) { 1163 err = -ENOMEM; 1164 goto out_free; 1165 } 1166 1167 err = sctp_assoc_set_bind_addr_from_ep(asoc, scope, 1168 GFP_KERNEL); 1169 if (err < 0) { 1170 goto out_free; 1171 } 1172 1173 } 1174 1175 /* Prime the peer's transport structures. */ 1176 transport = sctp_assoc_add_peer(asoc, &to, GFP_KERNEL, 1177 SCTP_UNKNOWN); 1178 if (!transport) { 1179 err = -ENOMEM; 1180 goto out_free; 1181 } 1182 1183 addrcnt++; 1184 addr_buf += af->sockaddr_len; 1185 walk_size += af->sockaddr_len; 1186 } 1187 1188 /* In case the user of sctp_connectx() wants an association 1189 * id back, assign one now. 1190 */ 1191 if (assoc_id) { 1192 err = sctp_assoc_set_id(asoc, GFP_KERNEL); 1193 if (err < 0) 1194 goto out_free; 1195 } 1196 1197 err = sctp_primitive_ASSOCIATE(net, asoc, NULL); 1198 if (err < 0) { 1199 goto out_free; 1200 } 1201 1202 /* Initialize sk's dport and daddr for getpeername() */ 1203 inet_sk(sk)->inet_dport = htons(asoc->peer.port); 1204 sp->pf->to_sk_daddr(sa_addr, sk); 1205 sk->sk_err = 0; 1206 1207 /* in-kernel sockets don't generally have a file allocated to them 1208 * if all they do is call sock_create_kern(). 1209 */ 1210 if (sk->sk_socket->file) 1211 f_flags = sk->sk_socket->file->f_flags; 1212 1213 timeo = sock_sndtimeo(sk, f_flags & O_NONBLOCK); 1214 1215 err = sctp_wait_for_connect(asoc, &timeo); 1216 if ((err == 0 || err == -EINPROGRESS) && assoc_id) 1217 *assoc_id = asoc->assoc_id; 1218 1219 /* Don't free association on exit. */ 1220 asoc = NULL; 1221 1222 out_free: 1223 pr_debug("%s: took out_free path with asoc:%p kaddrs:%p err:%d\n", 1224 __func__, asoc, kaddrs, err); 1225 1226 if (asoc) { 1227 /* sctp_primitive_ASSOCIATE may have added this association 1228 * To the hash table, try to unhash it, just in case, its a noop 1229 * if it wasn't hashed so we're safe 1230 */ 1231 sctp_unhash_established(asoc); 1232 sctp_association_free(asoc); 1233 } 1234 return err; 1235 } 1236 1237 /* Helper for tunneling sctp_connectx() requests through sctp_setsockopt() 1238 * 1239 * API 8.9 1240 * int sctp_connectx(int sd, struct sockaddr *addrs, int addrcnt, 1241 * sctp_assoc_t *asoc); 1242 * 1243 * If sd is an IPv4 socket, the addresses passed must be IPv4 addresses. 1244 * If the sd is an IPv6 socket, the addresses passed can either be IPv4 1245 * or IPv6 addresses. 1246 * 1247 * A single address may be specified as INADDR_ANY or IN6ADDR_ANY, see 1248 * Section 3.1.2 for this usage. 1249 * 1250 * addrs is a pointer to an array of one or more socket addresses. Each 1251 * address is contained in its appropriate structure (i.e. struct 1252 * sockaddr_in or struct sockaddr_in6) the family of the address type 1253 * must be used to distengish the address length (note that this 1254 * representation is termed a "packed array" of addresses). The caller 1255 * specifies the number of addresses in the array with addrcnt. 1256 * 1257 * On success, sctp_connectx() returns 0. It also sets the assoc_id to 1258 * the association id of the new association. On failure, sctp_connectx() 1259 * returns -1, and sets errno to the appropriate error code. The assoc_id 1260 * is not touched by the kernel. 1261 * 1262 * For SCTP, the port given in each socket address must be the same, or 1263 * sctp_connectx() will fail, setting errno to EINVAL. 1264 * 1265 * An application can use sctp_connectx to initiate an association with 1266 * an endpoint that is multi-homed. Much like sctp_bindx() this call 1267 * allows a caller to specify multiple addresses at which a peer can be 1268 * reached. The way the SCTP stack uses the list of addresses to set up 1269 * the association is implementation dependent. This function only 1270 * specifies that the stack will try to make use of all the addresses in 1271 * the list when needed. 1272 * 1273 * Note that the list of addresses passed in is only used for setting up 1274 * the association. It does not necessarily equal the set of addresses 1275 * the peer uses for the resulting association. If the caller wants to 1276 * find out the set of peer addresses, it must use sctp_getpaddrs() to 1277 * retrieve them after the association has been set up. 1278 * 1279 * Basically do nothing but copying the addresses from user to kernel 1280 * land and invoking either sctp_connectx(). This is used for tunneling 1281 * the sctp_connectx() request through sctp_setsockopt() from userspace. 1282 * 1283 * We don't use copy_from_user() for optimization: we first do the 1284 * sanity checks (buffer size -fast- and access check-healthy 1285 * pointer); if all of those succeed, then we can alloc the memory 1286 * (expensive operation) needed to copy the data to kernel. Then we do 1287 * the copying without checking the user space area 1288 * (__copy_from_user()). 1289 * 1290 * On exit there is no need to do sockfd_put(), sys_setsockopt() does 1291 * it. 1292 * 1293 * sk The sk of the socket 1294 * addrs The pointer to the addresses in user land 1295 * addrssize Size of the addrs buffer 1296 * 1297 * Returns >=0 if ok, <0 errno code on error. 1298 */ 1299 static int __sctp_setsockopt_connectx(struct sock *sk, 1300 struct sockaddr __user *addrs, 1301 int addrs_size, 1302 sctp_assoc_t *assoc_id) 1303 { 1304 int err = 0; 1305 struct sockaddr *kaddrs; 1306 1307 pr_debug("%s: sk:%p addrs:%p addrs_size:%d\n", 1308 __func__, sk, addrs, addrs_size); 1309 1310 if (unlikely(addrs_size <= 0)) 1311 return -EINVAL; 1312 1313 /* Check the user passed a healthy pointer. */ 1314 if (unlikely(!access_ok(VERIFY_READ, addrs, addrs_size))) 1315 return -EFAULT; 1316 1317 /* Alloc space for the address array in kernel memory. */ 1318 kaddrs = kmalloc(addrs_size, GFP_KERNEL); 1319 if (unlikely(!kaddrs)) 1320 return -ENOMEM; 1321 1322 if (__copy_from_user(kaddrs, addrs, addrs_size)) { 1323 err = -EFAULT; 1324 } else { 1325 err = __sctp_connect(sk, kaddrs, addrs_size, assoc_id); 1326 } 1327 1328 kfree(kaddrs); 1329 1330 return err; 1331 } 1332 1333 /* 1334 * This is an older interface. It's kept for backward compatibility 1335 * to the option that doesn't provide association id. 1336 */ 1337 static int sctp_setsockopt_connectx_old(struct sock *sk, 1338 struct sockaddr __user *addrs, 1339 int addrs_size) 1340 { 1341 return __sctp_setsockopt_connectx(sk, addrs, addrs_size, NULL); 1342 } 1343 1344 /* 1345 * New interface for the API. The since the API is done with a socket 1346 * option, to make it simple we feed back the association id is as a return 1347 * indication to the call. Error is always negative and association id is 1348 * always positive. 1349 */ 1350 static int sctp_setsockopt_connectx(struct sock *sk, 1351 struct sockaddr __user *addrs, 1352 int addrs_size) 1353 { 1354 sctp_assoc_t assoc_id = 0; 1355 int err = 0; 1356 1357 err = __sctp_setsockopt_connectx(sk, addrs, addrs_size, &assoc_id); 1358 1359 if (err) 1360 return err; 1361 else 1362 return assoc_id; 1363 } 1364 1365 /* 1366 * New (hopefully final) interface for the API. 1367 * We use the sctp_getaddrs_old structure so that use-space library 1368 * can avoid any unnecessary allocations. The only different part 1369 * is that we store the actual length of the address buffer into the 1370 * addrs_num structure member. That way we can re-use the existing 1371 * code. 1372 */ 1373 #ifdef CONFIG_COMPAT 1374 struct compat_sctp_getaddrs_old { 1375 sctp_assoc_t assoc_id; 1376 s32 addr_num; 1377 compat_uptr_t addrs; /* struct sockaddr * */ 1378 }; 1379 #endif 1380 1381 static int sctp_getsockopt_connectx3(struct sock *sk, int len, 1382 char __user *optval, 1383 int __user *optlen) 1384 { 1385 struct sctp_getaddrs_old param; 1386 sctp_assoc_t assoc_id = 0; 1387 int err = 0; 1388 1389 #ifdef CONFIG_COMPAT 1390 if (is_compat_task()) { 1391 struct compat_sctp_getaddrs_old param32; 1392 1393 if (len < sizeof(param32)) 1394 return -EINVAL; 1395 if (copy_from_user(¶m32, optval, sizeof(param32))) 1396 return -EFAULT; 1397 1398 param.assoc_id = param32.assoc_id; 1399 param.addr_num = param32.addr_num; 1400 param.addrs = compat_ptr(param32.addrs); 1401 } else 1402 #endif 1403 { 1404 if (len < sizeof(param)) 1405 return -EINVAL; 1406 if (copy_from_user(¶m, optval, sizeof(param))) 1407 return -EFAULT; 1408 } 1409 1410 err = __sctp_setsockopt_connectx(sk, (struct sockaddr __user *) 1411 param.addrs, param.addr_num, 1412 &assoc_id); 1413 if (err == 0 || err == -EINPROGRESS) { 1414 if (copy_to_user(optval, &assoc_id, sizeof(assoc_id))) 1415 return -EFAULT; 1416 if (put_user(sizeof(assoc_id), optlen)) 1417 return -EFAULT; 1418 } 1419 1420 return err; 1421 } 1422 1423 /* API 3.1.4 close() - UDP Style Syntax 1424 * Applications use close() to perform graceful shutdown (as described in 1425 * Section 10.1 of [SCTP]) on ALL the associations currently represented 1426 * by a UDP-style socket. 1427 * 1428 * The syntax is 1429 * 1430 * ret = close(int sd); 1431 * 1432 * sd - the socket descriptor of the associations to be closed. 1433 * 1434 * To gracefully shutdown a specific association represented by the 1435 * UDP-style socket, an application should use the sendmsg() call, 1436 * passing no user data, but including the appropriate flag in the 1437 * ancillary data (see Section xxxx). 1438 * 1439 * If sd in the close() call is a branched-off socket representing only 1440 * one association, the shutdown is performed on that association only. 1441 * 1442 * 4.1.6 close() - TCP Style Syntax 1443 * 1444 * Applications use close() to gracefully close down an association. 1445 * 1446 * The syntax is: 1447 * 1448 * int close(int sd); 1449 * 1450 * sd - the socket descriptor of the association to be closed. 1451 * 1452 * After an application calls close() on a socket descriptor, no further 1453 * socket operations will succeed on that descriptor. 1454 * 1455 * API 7.1.4 SO_LINGER 1456 * 1457 * An application using the TCP-style socket can use this option to 1458 * perform the SCTP ABORT primitive. The linger option structure is: 1459 * 1460 * struct linger { 1461 * int l_onoff; // option on/off 1462 * int l_linger; // linger time 1463 * }; 1464 * 1465 * To enable the option, set l_onoff to 1. If the l_linger value is set 1466 * to 0, calling close() is the same as the ABORT primitive. If the 1467 * value is set to a negative value, the setsockopt() call will return 1468 * an error. If the value is set to a positive value linger_time, the 1469 * close() can be blocked for at most linger_time ms. If the graceful 1470 * shutdown phase does not finish during this period, close() will 1471 * return but the graceful shutdown phase continues in the system. 1472 */ 1473 static void sctp_close(struct sock *sk, long timeout) 1474 { 1475 struct net *net = sock_net(sk); 1476 struct sctp_endpoint *ep; 1477 struct sctp_association *asoc; 1478 struct list_head *pos, *temp; 1479 unsigned int data_was_unread; 1480 1481 pr_debug("%s: sk:%p, timeout:%ld\n", __func__, sk, timeout); 1482 1483 lock_sock(sk); 1484 sk->sk_shutdown = SHUTDOWN_MASK; 1485 sk->sk_state = SCTP_SS_CLOSING; 1486 1487 ep = sctp_sk(sk)->ep; 1488 1489 /* Clean up any skbs sitting on the receive queue. */ 1490 data_was_unread = sctp_queue_purge_ulpevents(&sk->sk_receive_queue); 1491 data_was_unread += sctp_queue_purge_ulpevents(&sctp_sk(sk)->pd_lobby); 1492 1493 /* Walk all associations on an endpoint. */ 1494 list_for_each_safe(pos, temp, &ep->asocs) { 1495 asoc = list_entry(pos, struct sctp_association, asocs); 1496 1497 if (sctp_style(sk, TCP)) { 1498 /* A closed association can still be in the list if 1499 * it belongs to a TCP-style listening socket that is 1500 * not yet accepted. If so, free it. If not, send an 1501 * ABORT or SHUTDOWN based on the linger options. 1502 */ 1503 if (sctp_state(asoc, CLOSED)) { 1504 sctp_unhash_established(asoc); 1505 sctp_association_free(asoc); 1506 continue; 1507 } 1508 } 1509 1510 if (data_was_unread || !skb_queue_empty(&asoc->ulpq.lobby) || 1511 !skb_queue_empty(&asoc->ulpq.reasm) || 1512 (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime)) { 1513 struct sctp_chunk *chunk; 1514 1515 chunk = sctp_make_abort_user(asoc, NULL, 0); 1516 if (chunk) 1517 sctp_primitive_ABORT(net, asoc, chunk); 1518 } else 1519 sctp_primitive_SHUTDOWN(net, asoc, NULL); 1520 } 1521 1522 /* On a TCP-style socket, block for at most linger_time if set. */ 1523 if (sctp_style(sk, TCP) && timeout) 1524 sctp_wait_for_close(sk, timeout); 1525 1526 /* This will run the backlog queue. */ 1527 release_sock(sk); 1528 1529 /* Supposedly, no process has access to the socket, but 1530 * the net layers still may. 1531 */ 1532 local_bh_disable(); 1533 bh_lock_sock(sk); 1534 1535 /* Hold the sock, since sk_common_release() will put sock_put() 1536 * and we have just a little more cleanup. 1537 */ 1538 sock_hold(sk); 1539 sk_common_release(sk); 1540 1541 bh_unlock_sock(sk); 1542 local_bh_enable(); 1543 1544 sock_put(sk); 1545 1546 SCTP_DBG_OBJCNT_DEC(sock); 1547 } 1548 1549 /* Handle EPIPE error. */ 1550 static int sctp_error(struct sock *sk, int flags, int err) 1551 { 1552 if (err == -EPIPE) 1553 err = sock_error(sk) ? : -EPIPE; 1554 if (err == -EPIPE && !(flags & MSG_NOSIGNAL)) 1555 send_sig(SIGPIPE, current, 0); 1556 return err; 1557 } 1558 1559 /* API 3.1.3 sendmsg() - UDP Style Syntax 1560 * 1561 * An application uses sendmsg() and recvmsg() calls to transmit data to 1562 * and receive data from its peer. 1563 * 1564 * ssize_t sendmsg(int socket, const struct msghdr *message, 1565 * int flags); 1566 * 1567 * socket - the socket descriptor of the endpoint. 1568 * message - pointer to the msghdr structure which contains a single 1569 * user message and possibly some ancillary data. 1570 * 1571 * See Section 5 for complete description of the data 1572 * structures. 1573 * 1574 * flags - flags sent or received with the user message, see Section 1575 * 5 for complete description of the flags. 1576 * 1577 * Note: This function could use a rewrite especially when explicit 1578 * connect support comes in. 1579 */ 1580 /* BUG: We do not implement the equivalent of sk_stream_wait_memory(). */ 1581 1582 static int sctp_msghdr_parse(const struct msghdr *, sctp_cmsgs_t *); 1583 1584 static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len) 1585 { 1586 struct net *net = sock_net(sk); 1587 struct sctp_sock *sp; 1588 struct sctp_endpoint *ep; 1589 struct sctp_association *new_asoc = NULL, *asoc = NULL; 1590 struct sctp_transport *transport, *chunk_tp; 1591 struct sctp_chunk *chunk; 1592 union sctp_addr to; 1593 struct sockaddr *msg_name = NULL; 1594 struct sctp_sndrcvinfo default_sinfo; 1595 struct sctp_sndrcvinfo *sinfo; 1596 struct sctp_initmsg *sinit; 1597 sctp_assoc_t associd = 0; 1598 sctp_cmsgs_t cmsgs = { NULL }; 1599 sctp_scope_t scope; 1600 bool fill_sinfo_ttl = false, wait_connect = false; 1601 struct sctp_datamsg *datamsg; 1602 int msg_flags = msg->msg_flags; 1603 __u16 sinfo_flags = 0; 1604 long timeo; 1605 int err; 1606 1607 err = 0; 1608 sp = sctp_sk(sk); 1609 ep = sp->ep; 1610 1611 pr_debug("%s: sk:%p, msg:%p, msg_len:%zu ep:%p\n", __func__, sk, 1612 msg, msg_len, ep); 1613 1614 /* We cannot send a message over a TCP-style listening socket. */ 1615 if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)) { 1616 err = -EPIPE; 1617 goto out_nounlock; 1618 } 1619 1620 /* Parse out the SCTP CMSGs. */ 1621 err = sctp_msghdr_parse(msg, &cmsgs); 1622 if (err) { 1623 pr_debug("%s: msghdr parse err:%x\n", __func__, err); 1624 goto out_nounlock; 1625 } 1626 1627 /* Fetch the destination address for this packet. This 1628 * address only selects the association--it is not necessarily 1629 * the address we will send to. 1630 * For a peeled-off socket, msg_name is ignored. 1631 */ 1632 if (!sctp_style(sk, UDP_HIGH_BANDWIDTH) && msg->msg_name) { 1633 int msg_namelen = msg->msg_namelen; 1634 1635 err = sctp_verify_addr(sk, (union sctp_addr *)msg->msg_name, 1636 msg_namelen); 1637 if (err) 1638 return err; 1639 1640 if (msg_namelen > sizeof(to)) 1641 msg_namelen = sizeof(to); 1642 memcpy(&to, msg->msg_name, msg_namelen); 1643 msg_name = msg->msg_name; 1644 } 1645 1646 sinit = cmsgs.init; 1647 if (cmsgs.sinfo != NULL) { 1648 memset(&default_sinfo, 0, sizeof(default_sinfo)); 1649 default_sinfo.sinfo_stream = cmsgs.sinfo->snd_sid; 1650 default_sinfo.sinfo_flags = cmsgs.sinfo->snd_flags; 1651 default_sinfo.sinfo_ppid = cmsgs.sinfo->snd_ppid; 1652 default_sinfo.sinfo_context = cmsgs.sinfo->snd_context; 1653 default_sinfo.sinfo_assoc_id = cmsgs.sinfo->snd_assoc_id; 1654 1655 sinfo = &default_sinfo; 1656 fill_sinfo_ttl = true; 1657 } else { 1658 sinfo = cmsgs.srinfo; 1659 } 1660 /* Did the user specify SNDINFO/SNDRCVINFO? */ 1661 if (sinfo) { 1662 sinfo_flags = sinfo->sinfo_flags; 1663 associd = sinfo->sinfo_assoc_id; 1664 } 1665 1666 pr_debug("%s: msg_len:%zu, sinfo_flags:0x%x\n", __func__, 1667 msg_len, sinfo_flags); 1668 1669 /* SCTP_EOF or SCTP_ABORT cannot be set on a TCP-style socket. */ 1670 if (sctp_style(sk, TCP) && (sinfo_flags & (SCTP_EOF | SCTP_ABORT))) { 1671 err = -EINVAL; 1672 goto out_nounlock; 1673 } 1674 1675 /* If SCTP_EOF is set, no data can be sent. Disallow sending zero 1676 * length messages when SCTP_EOF|SCTP_ABORT is not set. 1677 * If SCTP_ABORT is set, the message length could be non zero with 1678 * the msg_iov set to the user abort reason. 1679 */ 1680 if (((sinfo_flags & SCTP_EOF) && (msg_len > 0)) || 1681 (!(sinfo_flags & (SCTP_EOF|SCTP_ABORT)) && (msg_len == 0))) { 1682 err = -EINVAL; 1683 goto out_nounlock; 1684 } 1685 1686 /* If SCTP_ADDR_OVER is set, there must be an address 1687 * specified in msg_name. 1688 */ 1689 if ((sinfo_flags & SCTP_ADDR_OVER) && (!msg->msg_name)) { 1690 err = -EINVAL; 1691 goto out_nounlock; 1692 } 1693 1694 transport = NULL; 1695 1696 pr_debug("%s: about to look up association\n", __func__); 1697 1698 lock_sock(sk); 1699 1700 /* If a msg_name has been specified, assume this is to be used. */ 1701 if (msg_name) { 1702 /* Look for a matching association on the endpoint. */ 1703 asoc = sctp_endpoint_lookup_assoc(ep, &to, &transport); 1704 if (!asoc) { 1705 /* If we could not find a matching association on the 1706 * endpoint, make sure that it is not a TCP-style 1707 * socket that already has an association or there is 1708 * no peeled-off association on another socket. 1709 */ 1710 if ((sctp_style(sk, TCP) && 1711 sctp_sstate(sk, ESTABLISHED)) || 1712 sctp_endpoint_is_peeled_off(ep, &to)) { 1713 err = -EADDRNOTAVAIL; 1714 goto out_unlock; 1715 } 1716 } 1717 } else { 1718 asoc = sctp_id2assoc(sk, associd); 1719 if (!asoc) { 1720 err = -EPIPE; 1721 goto out_unlock; 1722 } 1723 } 1724 1725 if (asoc) { 1726 pr_debug("%s: just looked up association:%p\n", __func__, asoc); 1727 1728 /* We cannot send a message on a TCP-style SCTP_SS_ESTABLISHED 1729 * socket that has an association in CLOSED state. This can 1730 * happen when an accepted socket has an association that is 1731 * already CLOSED. 1732 */ 1733 if (sctp_state(asoc, CLOSED) && sctp_style(sk, TCP)) { 1734 err = -EPIPE; 1735 goto out_unlock; 1736 } 1737 1738 if (sinfo_flags & SCTP_EOF) { 1739 pr_debug("%s: shutting down association:%p\n", 1740 __func__, asoc); 1741 1742 sctp_primitive_SHUTDOWN(net, asoc, NULL); 1743 err = 0; 1744 goto out_unlock; 1745 } 1746 if (sinfo_flags & SCTP_ABORT) { 1747 1748 chunk = sctp_make_abort_user(asoc, msg, msg_len); 1749 if (!chunk) { 1750 err = -ENOMEM; 1751 goto out_unlock; 1752 } 1753 1754 pr_debug("%s: aborting association:%p\n", 1755 __func__, asoc); 1756 1757 sctp_primitive_ABORT(net, asoc, chunk); 1758 err = 0; 1759 goto out_unlock; 1760 } 1761 } 1762 1763 /* Do we need to create the association? */ 1764 if (!asoc) { 1765 pr_debug("%s: there is no association yet\n", __func__); 1766 1767 if (sinfo_flags & (SCTP_EOF | SCTP_ABORT)) { 1768 err = -EINVAL; 1769 goto out_unlock; 1770 } 1771 1772 /* Check for invalid stream against the stream counts, 1773 * either the default or the user specified stream counts. 1774 */ 1775 if (sinfo) { 1776 if (!sinit || !sinit->sinit_num_ostreams) { 1777 /* Check against the defaults. */ 1778 if (sinfo->sinfo_stream >= 1779 sp->initmsg.sinit_num_ostreams) { 1780 err = -EINVAL; 1781 goto out_unlock; 1782 } 1783 } else { 1784 /* Check against the requested. */ 1785 if (sinfo->sinfo_stream >= 1786 sinit->sinit_num_ostreams) { 1787 err = -EINVAL; 1788 goto out_unlock; 1789 } 1790 } 1791 } 1792 1793 /* 1794 * API 3.1.2 bind() - UDP Style Syntax 1795 * If a bind() or sctp_bindx() is not called prior to a 1796 * sendmsg() call that initiates a new association, the 1797 * system picks an ephemeral port and will choose an address 1798 * set equivalent to binding with a wildcard address. 1799 */ 1800 if (!ep->base.bind_addr.port) { 1801 if (sctp_autobind(sk)) { 1802 err = -EAGAIN; 1803 goto out_unlock; 1804 } 1805 } else { 1806 /* 1807 * If an unprivileged user inherits a one-to-many 1808 * style socket with open associations on a privileged 1809 * port, it MAY be permitted to accept new associations, 1810 * but it SHOULD NOT be permitted to open new 1811 * associations. 1812 */ 1813 if (ep->base.bind_addr.port < PROT_SOCK && 1814 !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) { 1815 err = -EACCES; 1816 goto out_unlock; 1817 } 1818 } 1819 1820 scope = sctp_scope(&to); 1821 new_asoc = sctp_association_new(ep, sk, scope, GFP_KERNEL); 1822 if (!new_asoc) { 1823 err = -ENOMEM; 1824 goto out_unlock; 1825 } 1826 asoc = new_asoc; 1827 err = sctp_assoc_set_bind_addr_from_ep(asoc, scope, GFP_KERNEL); 1828 if (err < 0) { 1829 err = -ENOMEM; 1830 goto out_free; 1831 } 1832 1833 /* If the SCTP_INIT ancillary data is specified, set all 1834 * the association init values accordingly. 1835 */ 1836 if (sinit) { 1837 if (sinit->sinit_num_ostreams) { 1838 asoc->c.sinit_num_ostreams = 1839 sinit->sinit_num_ostreams; 1840 } 1841 if (sinit->sinit_max_instreams) { 1842 asoc->c.sinit_max_instreams = 1843 sinit->sinit_max_instreams; 1844 } 1845 if (sinit->sinit_max_attempts) { 1846 asoc->max_init_attempts 1847 = sinit->sinit_max_attempts; 1848 } 1849 if (sinit->sinit_max_init_timeo) { 1850 asoc->max_init_timeo = 1851 msecs_to_jiffies(sinit->sinit_max_init_timeo); 1852 } 1853 } 1854 1855 /* Prime the peer's transport structures. */ 1856 transport = sctp_assoc_add_peer(asoc, &to, GFP_KERNEL, SCTP_UNKNOWN); 1857 if (!transport) { 1858 err = -ENOMEM; 1859 goto out_free; 1860 } 1861 } 1862 1863 /* ASSERT: we have a valid association at this point. */ 1864 pr_debug("%s: we have a valid association\n", __func__); 1865 1866 if (!sinfo) { 1867 /* If the user didn't specify SNDINFO/SNDRCVINFO, make up 1868 * one with some defaults. 1869 */ 1870 memset(&default_sinfo, 0, sizeof(default_sinfo)); 1871 default_sinfo.sinfo_stream = asoc->default_stream; 1872 default_sinfo.sinfo_flags = asoc->default_flags; 1873 default_sinfo.sinfo_ppid = asoc->default_ppid; 1874 default_sinfo.sinfo_context = asoc->default_context; 1875 default_sinfo.sinfo_timetolive = asoc->default_timetolive; 1876 default_sinfo.sinfo_assoc_id = sctp_assoc2id(asoc); 1877 1878 sinfo = &default_sinfo; 1879 } else if (fill_sinfo_ttl) { 1880 /* In case SNDINFO was specified, we still need to fill 1881 * it with a default ttl from the assoc here. 1882 */ 1883 sinfo->sinfo_timetolive = asoc->default_timetolive; 1884 } 1885 1886 /* API 7.1.7, the sndbuf size per association bounds the 1887 * maximum size of data that can be sent in a single send call. 1888 */ 1889 if (msg_len > sk->sk_sndbuf) { 1890 err = -EMSGSIZE; 1891 goto out_free; 1892 } 1893 1894 if (asoc->pmtu_pending) 1895 sctp_assoc_pending_pmtu(sk, asoc); 1896 1897 /* If fragmentation is disabled and the message length exceeds the 1898 * association fragmentation point, return EMSGSIZE. The I-D 1899 * does not specify what this error is, but this looks like 1900 * a great fit. 1901 */ 1902 if (sctp_sk(sk)->disable_fragments && (msg_len > asoc->frag_point)) { 1903 err = -EMSGSIZE; 1904 goto out_free; 1905 } 1906 1907 /* Check for invalid stream. */ 1908 if (sinfo->sinfo_stream >= asoc->c.sinit_num_ostreams) { 1909 err = -EINVAL; 1910 goto out_free; 1911 } 1912 1913 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); 1914 if (!sctp_wspace(asoc)) { 1915 err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len); 1916 if (err) 1917 goto out_free; 1918 } 1919 1920 /* If an address is passed with the sendto/sendmsg call, it is used 1921 * to override the primary destination address in the TCP model, or 1922 * when SCTP_ADDR_OVER flag is set in the UDP model. 1923 */ 1924 if ((sctp_style(sk, TCP) && msg_name) || 1925 (sinfo_flags & SCTP_ADDR_OVER)) { 1926 chunk_tp = sctp_assoc_lookup_paddr(asoc, &to); 1927 if (!chunk_tp) { 1928 err = -EINVAL; 1929 goto out_free; 1930 } 1931 } else 1932 chunk_tp = NULL; 1933 1934 /* Auto-connect, if we aren't connected already. */ 1935 if (sctp_state(asoc, CLOSED)) { 1936 err = sctp_primitive_ASSOCIATE(net, asoc, NULL); 1937 if (err < 0) 1938 goto out_free; 1939 1940 wait_connect = true; 1941 pr_debug("%s: we associated primitively\n", __func__); 1942 } 1943 1944 /* Break the message into multiple chunks of maximum size. */ 1945 datamsg = sctp_datamsg_from_user(asoc, sinfo, &msg->msg_iter); 1946 if (IS_ERR(datamsg)) { 1947 err = PTR_ERR(datamsg); 1948 goto out_free; 1949 } 1950 1951 /* Now send the (possibly) fragmented message. */ 1952 list_for_each_entry(chunk, &datamsg->chunks, frag_list) { 1953 sctp_chunk_hold(chunk); 1954 1955 /* Do accounting for the write space. */ 1956 sctp_set_owner_w(chunk); 1957 1958 chunk->transport = chunk_tp; 1959 } 1960 1961 /* Send it to the lower layers. Note: all chunks 1962 * must either fail or succeed. The lower layer 1963 * works that way today. Keep it that way or this 1964 * breaks. 1965 */ 1966 err = sctp_primitive_SEND(net, asoc, datamsg); 1967 /* Did the lower layer accept the chunk? */ 1968 if (err) { 1969 sctp_datamsg_free(datamsg); 1970 goto out_free; 1971 } 1972 1973 pr_debug("%s: we sent primitively\n", __func__); 1974 1975 sctp_datamsg_put(datamsg); 1976 err = msg_len; 1977 1978 if (unlikely(wait_connect)) { 1979 timeo = sock_sndtimeo(sk, msg_flags & MSG_DONTWAIT); 1980 sctp_wait_for_connect(asoc, &timeo); 1981 } 1982 1983 /* If we are already past ASSOCIATE, the lower 1984 * layers are responsible for association cleanup. 1985 */ 1986 goto out_unlock; 1987 1988 out_free: 1989 if (new_asoc) { 1990 sctp_unhash_established(asoc); 1991 sctp_association_free(asoc); 1992 } 1993 out_unlock: 1994 release_sock(sk); 1995 1996 out_nounlock: 1997 return sctp_error(sk, msg_flags, err); 1998 1999 #if 0 2000 do_sock_err: 2001 if (msg_len) 2002 err = msg_len; 2003 else 2004 err = sock_error(sk); 2005 goto out; 2006 2007 do_interrupted: 2008 if (msg_len) 2009 err = msg_len; 2010 goto out; 2011 #endif /* 0 */ 2012 } 2013 2014 /* This is an extended version of skb_pull() that removes the data from the 2015 * start of a skb even when data is spread across the list of skb's in the 2016 * frag_list. len specifies the total amount of data that needs to be removed. 2017 * when 'len' bytes could be removed from the skb, it returns 0. 2018 * If 'len' exceeds the total skb length, it returns the no. of bytes that 2019 * could not be removed. 2020 */ 2021 static int sctp_skb_pull(struct sk_buff *skb, int len) 2022 { 2023 struct sk_buff *list; 2024 int skb_len = skb_headlen(skb); 2025 int rlen; 2026 2027 if (len <= skb_len) { 2028 __skb_pull(skb, len); 2029 return 0; 2030 } 2031 len -= skb_len; 2032 __skb_pull(skb, skb_len); 2033 2034 skb_walk_frags(skb, list) { 2035 rlen = sctp_skb_pull(list, len); 2036 skb->len -= (len-rlen); 2037 skb->data_len -= (len-rlen); 2038 2039 if (!rlen) 2040 return 0; 2041 2042 len = rlen; 2043 } 2044 2045 return len; 2046 } 2047 2048 /* API 3.1.3 recvmsg() - UDP Style Syntax 2049 * 2050 * ssize_t recvmsg(int socket, struct msghdr *message, 2051 * int flags); 2052 * 2053 * socket - the socket descriptor of the endpoint. 2054 * message - pointer to the msghdr structure which contains a single 2055 * user message and possibly some ancillary data. 2056 * 2057 * See Section 5 for complete description of the data 2058 * structures. 2059 * 2060 * flags - flags sent or received with the user message, see Section 2061 * 5 for complete description of the flags. 2062 */ 2063 static int sctp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, 2064 int noblock, int flags, int *addr_len) 2065 { 2066 struct sctp_ulpevent *event = NULL; 2067 struct sctp_sock *sp = sctp_sk(sk); 2068 struct sk_buff *skb; 2069 int copied; 2070 int err = 0; 2071 int skb_len; 2072 2073 pr_debug("%s: sk:%p, msghdr:%p, len:%zd, noblock:%d, flags:0x%x, " 2074 "addr_len:%p)\n", __func__, sk, msg, len, noblock, flags, 2075 addr_len); 2076 2077 lock_sock(sk); 2078 2079 if (sctp_style(sk, TCP) && !sctp_sstate(sk, ESTABLISHED)) { 2080 err = -ENOTCONN; 2081 goto out; 2082 } 2083 2084 skb = sctp_skb_recv_datagram(sk, flags, noblock, &err); 2085 if (!skb) 2086 goto out; 2087 2088 /* Get the total length of the skb including any skb's in the 2089 * frag_list. 2090 */ 2091 skb_len = skb->len; 2092 2093 copied = skb_len; 2094 if (copied > len) 2095 copied = len; 2096 2097 err = skb_copy_datagram_msg(skb, 0, msg, copied); 2098 2099 event = sctp_skb2event(skb); 2100 2101 if (err) 2102 goto out_free; 2103 2104 sock_recv_ts_and_drops(msg, sk, skb); 2105 if (sctp_ulpevent_is_notification(event)) { 2106 msg->msg_flags |= MSG_NOTIFICATION; 2107 sp->pf->event_msgname(event, msg->msg_name, addr_len); 2108 } else { 2109 sp->pf->skb_msgname(skb, msg->msg_name, addr_len); 2110 } 2111 2112 /* Check if we allow SCTP_NXTINFO. */ 2113 if (sp->recvnxtinfo) 2114 sctp_ulpevent_read_nxtinfo(event, msg, sk); 2115 /* Check if we allow SCTP_RCVINFO. */ 2116 if (sp->recvrcvinfo) 2117 sctp_ulpevent_read_rcvinfo(event, msg); 2118 /* Check if we allow SCTP_SNDRCVINFO. */ 2119 if (sp->subscribe.sctp_data_io_event) 2120 sctp_ulpevent_read_sndrcvinfo(event, msg); 2121 2122 #if 0 2123 /* FIXME: we should be calling IP/IPv6 layers. */ 2124 if (sk->sk_protinfo.af_inet.cmsg_flags) 2125 ip_cmsg_recv(msg, skb); 2126 #endif 2127 2128 err = copied; 2129 2130 /* If skb's length exceeds the user's buffer, update the skb and 2131 * push it back to the receive_queue so that the next call to 2132 * recvmsg() will return the remaining data. Don't set MSG_EOR. 2133 */ 2134 if (skb_len > copied) { 2135 msg->msg_flags &= ~MSG_EOR; 2136 if (flags & MSG_PEEK) 2137 goto out_free; 2138 sctp_skb_pull(skb, copied); 2139 skb_queue_head(&sk->sk_receive_queue, skb); 2140 2141 /* When only partial message is copied to the user, increase 2142 * rwnd by that amount. If all the data in the skb is read, 2143 * rwnd is updated when the event is freed. 2144 */ 2145 if (!sctp_ulpevent_is_notification(event)) 2146 sctp_assoc_rwnd_increase(event->asoc, copied); 2147 goto out; 2148 } else if ((event->msg_flags & MSG_NOTIFICATION) || 2149 (event->msg_flags & MSG_EOR)) 2150 msg->msg_flags |= MSG_EOR; 2151 else 2152 msg->msg_flags &= ~MSG_EOR; 2153 2154 out_free: 2155 if (flags & MSG_PEEK) { 2156 /* Release the skb reference acquired after peeking the skb in 2157 * sctp_skb_recv_datagram(). 2158 */ 2159 kfree_skb(skb); 2160 } else { 2161 /* Free the event which includes releasing the reference to 2162 * the owner of the skb, freeing the skb and updating the 2163 * rwnd. 2164 */ 2165 sctp_ulpevent_free(event); 2166 } 2167 out: 2168 release_sock(sk); 2169 return err; 2170 } 2171 2172 /* 7.1.12 Enable/Disable message fragmentation (SCTP_DISABLE_FRAGMENTS) 2173 * 2174 * This option is a on/off flag. If enabled no SCTP message 2175 * fragmentation will be performed. Instead if a message being sent 2176 * exceeds the current PMTU size, the message will NOT be sent and 2177 * instead a error will be indicated to the user. 2178 */ 2179 static int sctp_setsockopt_disable_fragments(struct sock *sk, 2180 char __user *optval, 2181 unsigned int optlen) 2182 { 2183 int val; 2184 2185 if (optlen < sizeof(int)) 2186 return -EINVAL; 2187 2188 if (get_user(val, (int __user *)optval)) 2189 return -EFAULT; 2190 2191 sctp_sk(sk)->disable_fragments = (val == 0) ? 0 : 1; 2192 2193 return 0; 2194 } 2195 2196 static int sctp_setsockopt_events(struct sock *sk, char __user *optval, 2197 unsigned int optlen) 2198 { 2199 struct sctp_association *asoc; 2200 struct sctp_ulpevent *event; 2201 2202 if (optlen > sizeof(struct sctp_event_subscribe)) 2203 return -EINVAL; 2204 if (copy_from_user(&sctp_sk(sk)->subscribe, optval, optlen)) 2205 return -EFAULT; 2206 2207 if (sctp_sk(sk)->subscribe.sctp_data_io_event) 2208 pr_warn_ratelimited(DEPRECATED "%s (pid %d) " 2209 "Requested SCTP_SNDRCVINFO event.\n" 2210 "Use SCTP_RCVINFO through SCTP_RECVRCVINFO option instead.\n", 2211 current->comm, task_pid_nr(current)); 2212 2213 /* At the time when a user app subscribes to SCTP_SENDER_DRY_EVENT, 2214 * if there is no data to be sent or retransmit, the stack will 2215 * immediately send up this notification. 2216 */ 2217 if (sctp_ulpevent_type_enabled(SCTP_SENDER_DRY_EVENT, 2218 &sctp_sk(sk)->subscribe)) { 2219 asoc = sctp_id2assoc(sk, 0); 2220 2221 if (asoc && sctp_outq_is_empty(&asoc->outqueue)) { 2222 event = sctp_ulpevent_make_sender_dry_event(asoc, 2223 GFP_ATOMIC); 2224 if (!event) 2225 return -ENOMEM; 2226 2227 sctp_ulpq_tail_event(&asoc->ulpq, event); 2228 } 2229 } 2230 2231 return 0; 2232 } 2233 2234 /* 7.1.8 Automatic Close of associations (SCTP_AUTOCLOSE) 2235 * 2236 * This socket option is applicable to the UDP-style socket only. When 2237 * set it will cause associations that are idle for more than the 2238 * specified number of seconds to automatically close. An association 2239 * being idle is defined an association that has NOT sent or received 2240 * user data. The special value of '0' indicates that no automatic 2241 * close of any associations should be performed. The option expects an 2242 * integer defining the number of seconds of idle time before an 2243 * association is closed. 2244 */ 2245 static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval, 2246 unsigned int optlen) 2247 { 2248 struct sctp_sock *sp = sctp_sk(sk); 2249 struct net *net = sock_net(sk); 2250 2251 /* Applicable to UDP-style socket only */ 2252 if (sctp_style(sk, TCP)) 2253 return -EOPNOTSUPP; 2254 if (optlen != sizeof(int)) 2255 return -EINVAL; 2256 if (copy_from_user(&sp->autoclose, optval, optlen)) 2257 return -EFAULT; 2258 2259 if (sp->autoclose > net->sctp.max_autoclose) 2260 sp->autoclose = net->sctp.max_autoclose; 2261 2262 return 0; 2263 } 2264 2265 /* 7.1.13 Peer Address Parameters (SCTP_PEER_ADDR_PARAMS) 2266 * 2267 * Applications can enable or disable heartbeats for any peer address of 2268 * an association, modify an address's heartbeat interval, force a 2269 * heartbeat to be sent immediately, and adjust the address's maximum 2270 * number of retransmissions sent before an address is considered 2271 * unreachable. The following structure is used to access and modify an 2272 * address's parameters: 2273 * 2274 * struct sctp_paddrparams { 2275 * sctp_assoc_t spp_assoc_id; 2276 * struct sockaddr_storage spp_address; 2277 * uint32_t spp_hbinterval; 2278 * uint16_t spp_pathmaxrxt; 2279 * uint32_t spp_pathmtu; 2280 * uint32_t spp_sackdelay; 2281 * uint32_t spp_flags; 2282 * }; 2283 * 2284 * spp_assoc_id - (one-to-many style socket) This is filled in the 2285 * application, and identifies the association for 2286 * this query. 2287 * spp_address - This specifies which address is of interest. 2288 * spp_hbinterval - This contains the value of the heartbeat interval, 2289 * in milliseconds. If a value of zero 2290 * is present in this field then no changes are to 2291 * be made to this parameter. 2292 * spp_pathmaxrxt - This contains the maximum number of 2293 * retransmissions before this address shall be 2294 * considered unreachable. If a value of zero 2295 * is present in this field then no changes are to 2296 * be made to this parameter. 2297 * spp_pathmtu - When Path MTU discovery is disabled the value 2298 * specified here will be the "fixed" path mtu. 2299 * Note that if the spp_address field is empty 2300 * then all associations on this address will 2301 * have this fixed path mtu set upon them. 2302 * 2303 * spp_sackdelay - When delayed sack is enabled, this value specifies 2304 * the number of milliseconds that sacks will be delayed 2305 * for. This value will apply to all addresses of an 2306 * association if the spp_address field is empty. Note 2307 * also, that if delayed sack is enabled and this 2308 * value is set to 0, no change is made to the last 2309 * recorded delayed sack timer value. 2310 * 2311 * spp_flags - These flags are used to control various features 2312 * on an association. The flag field may contain 2313 * zero or more of the following options. 2314 * 2315 * SPP_HB_ENABLE - Enable heartbeats on the 2316 * specified address. Note that if the address 2317 * field is empty all addresses for the association 2318 * have heartbeats enabled upon them. 2319 * 2320 * SPP_HB_DISABLE - Disable heartbeats on the 2321 * speicifed address. Note that if the address 2322 * field is empty all addresses for the association 2323 * will have their heartbeats disabled. Note also 2324 * that SPP_HB_ENABLE and SPP_HB_DISABLE are 2325 * mutually exclusive, only one of these two should 2326 * be specified. Enabling both fields will have 2327 * undetermined results. 2328 * 2329 * SPP_HB_DEMAND - Request a user initiated heartbeat 2330 * to be made immediately. 2331 * 2332 * SPP_HB_TIME_IS_ZERO - Specify's that the time for 2333 * heartbeat delayis to be set to the value of 0 2334 * milliseconds. 2335 * 2336 * SPP_PMTUD_ENABLE - This field will enable PMTU 2337 * discovery upon the specified address. Note that 2338 * if the address feild is empty then all addresses 2339 * on the association are effected. 2340 * 2341 * SPP_PMTUD_DISABLE - This field will disable PMTU 2342 * discovery upon the specified address. Note that 2343 * if the address feild is empty then all addresses 2344 * on the association are effected. Not also that 2345 * SPP_PMTUD_ENABLE and SPP_PMTUD_DISABLE are mutually 2346 * exclusive. Enabling both will have undetermined 2347 * results. 2348 * 2349 * SPP_SACKDELAY_ENABLE - Setting this flag turns 2350 * on delayed sack. The time specified in spp_sackdelay 2351 * is used to specify the sack delay for this address. Note 2352 * that if spp_address is empty then all addresses will 2353 * enable delayed sack and take on the sack delay 2354 * value specified in spp_sackdelay. 2355 * SPP_SACKDELAY_DISABLE - Setting this flag turns 2356 * off delayed sack. If the spp_address field is blank then 2357 * delayed sack is disabled for the entire association. Note 2358 * also that this field is mutually exclusive to 2359 * SPP_SACKDELAY_ENABLE, setting both will have undefined 2360 * results. 2361 */ 2362 static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params, 2363 struct sctp_transport *trans, 2364 struct sctp_association *asoc, 2365 struct sctp_sock *sp, 2366 int hb_change, 2367 int pmtud_change, 2368 int sackdelay_change) 2369 { 2370 int error; 2371 2372 if (params->spp_flags & SPP_HB_DEMAND && trans) { 2373 struct net *net = sock_net(trans->asoc->base.sk); 2374 2375 error = sctp_primitive_REQUESTHEARTBEAT(net, trans->asoc, trans); 2376 if (error) 2377 return error; 2378 } 2379 2380 /* Note that unless the spp_flag is set to SPP_HB_ENABLE the value of 2381 * this field is ignored. Note also that a value of zero indicates 2382 * the current setting should be left unchanged. 2383 */ 2384 if (params->spp_flags & SPP_HB_ENABLE) { 2385 2386 /* Re-zero the interval if the SPP_HB_TIME_IS_ZERO is 2387 * set. This lets us use 0 value when this flag 2388 * is set. 2389 */ 2390 if (params->spp_flags & SPP_HB_TIME_IS_ZERO) 2391 params->spp_hbinterval = 0; 2392 2393 if (params->spp_hbinterval || 2394 (params->spp_flags & SPP_HB_TIME_IS_ZERO)) { 2395 if (trans) { 2396 trans->hbinterval = 2397 msecs_to_jiffies(params->spp_hbinterval); 2398 } else if (asoc) { 2399 asoc->hbinterval = 2400 msecs_to_jiffies(params->spp_hbinterval); 2401 } else { 2402 sp->hbinterval = params->spp_hbinterval; 2403 } 2404 } 2405 } 2406 2407 if (hb_change) { 2408 if (trans) { 2409 trans->param_flags = 2410 (trans->param_flags & ~SPP_HB) | hb_change; 2411 } else if (asoc) { 2412 asoc->param_flags = 2413 (asoc->param_flags & ~SPP_HB) | hb_change; 2414 } else { 2415 sp->param_flags = 2416 (sp->param_flags & ~SPP_HB) | hb_change; 2417 } 2418 } 2419 2420 /* When Path MTU discovery is disabled the value specified here will 2421 * be the "fixed" path mtu (i.e. the value of the spp_flags field must 2422 * include the flag SPP_PMTUD_DISABLE for this field to have any 2423 * effect). 2424 */ 2425 if ((params->spp_flags & SPP_PMTUD_DISABLE) && params->spp_pathmtu) { 2426 if (trans) { 2427 trans->pathmtu = params->spp_pathmtu; 2428 sctp_assoc_sync_pmtu(sctp_opt2sk(sp), asoc); 2429 } else if (asoc) { 2430 asoc->pathmtu = params->spp_pathmtu; 2431 sctp_frag_point(asoc, params->spp_pathmtu); 2432 } else { 2433 sp->pathmtu = params->spp_pathmtu; 2434 } 2435 } 2436 2437 if (pmtud_change) { 2438 if (trans) { 2439 int update = (trans->param_flags & SPP_PMTUD_DISABLE) && 2440 (params->spp_flags & SPP_PMTUD_ENABLE); 2441 trans->param_flags = 2442 (trans->param_flags & ~SPP_PMTUD) | pmtud_change; 2443 if (update) { 2444 sctp_transport_pmtu(trans, sctp_opt2sk(sp)); 2445 sctp_assoc_sync_pmtu(sctp_opt2sk(sp), asoc); 2446 } 2447 } else if (asoc) { 2448 asoc->param_flags = 2449 (asoc->param_flags & ~SPP_PMTUD) | pmtud_change; 2450 } else { 2451 sp->param_flags = 2452 (sp->param_flags & ~SPP_PMTUD) | pmtud_change; 2453 } 2454 } 2455 2456 /* Note that unless the spp_flag is set to SPP_SACKDELAY_ENABLE the 2457 * value of this field is ignored. Note also that a value of zero 2458 * indicates the current setting should be left unchanged. 2459 */ 2460 if ((params->spp_flags & SPP_SACKDELAY_ENABLE) && params->spp_sackdelay) { 2461 if (trans) { 2462 trans->sackdelay = 2463 msecs_to_jiffies(params->spp_sackdelay); 2464 } else if (asoc) { 2465 asoc->sackdelay = 2466 msecs_to_jiffies(params->spp_sackdelay); 2467 } else { 2468 sp->sackdelay = params->spp_sackdelay; 2469 } 2470 } 2471 2472 if (sackdelay_change) { 2473 if (trans) { 2474 trans->param_flags = 2475 (trans->param_flags & ~SPP_SACKDELAY) | 2476 sackdelay_change; 2477 } else if (asoc) { 2478 asoc->param_flags = 2479 (asoc->param_flags & ~SPP_SACKDELAY) | 2480 sackdelay_change; 2481 } else { 2482 sp->param_flags = 2483 (sp->param_flags & ~SPP_SACKDELAY) | 2484 sackdelay_change; 2485 } 2486 } 2487 2488 /* Note that a value of zero indicates the current setting should be 2489 left unchanged. 2490 */ 2491 if (params->spp_pathmaxrxt) { 2492 if (trans) { 2493 trans->pathmaxrxt = params->spp_pathmaxrxt; 2494 } else if (asoc) { 2495 asoc->pathmaxrxt = params->spp_pathmaxrxt; 2496 } else { 2497 sp->pathmaxrxt = params->spp_pathmaxrxt; 2498 } 2499 } 2500 2501 return 0; 2502 } 2503 2504 static int sctp_setsockopt_peer_addr_params(struct sock *sk, 2505 char __user *optval, 2506 unsigned int optlen) 2507 { 2508 struct sctp_paddrparams params; 2509 struct sctp_transport *trans = NULL; 2510 struct sctp_association *asoc = NULL; 2511 struct sctp_sock *sp = sctp_sk(sk); 2512 int error; 2513 int hb_change, pmtud_change, sackdelay_change; 2514 2515 if (optlen != sizeof(struct sctp_paddrparams)) 2516 return -EINVAL; 2517 2518 if (copy_from_user(¶ms, optval, optlen)) 2519 return -EFAULT; 2520 2521 /* Validate flags and value parameters. */ 2522 hb_change = params.spp_flags & SPP_HB; 2523 pmtud_change = params.spp_flags & SPP_PMTUD; 2524 sackdelay_change = params.spp_flags & SPP_SACKDELAY; 2525 2526 if (hb_change == SPP_HB || 2527 pmtud_change == SPP_PMTUD || 2528 sackdelay_change == SPP_SACKDELAY || 2529 params.spp_sackdelay > 500 || 2530 (params.spp_pathmtu && 2531 params.spp_pathmtu < SCTP_DEFAULT_MINSEGMENT)) 2532 return -EINVAL; 2533 2534 /* If an address other than INADDR_ANY is specified, and 2535 * no transport is found, then the request is invalid. 2536 */ 2537 if (!sctp_is_any(sk, (union sctp_addr *)¶ms.spp_address)) { 2538 trans = sctp_addr_id2transport(sk, ¶ms.spp_address, 2539 params.spp_assoc_id); 2540 if (!trans) 2541 return -EINVAL; 2542 } 2543 2544 /* Get association, if assoc_id != 0 and the socket is a one 2545 * to many style socket, and an association was not found, then 2546 * the id was invalid. 2547 */ 2548 asoc = sctp_id2assoc(sk, params.spp_assoc_id); 2549 if (!asoc && params.spp_assoc_id && sctp_style(sk, UDP)) 2550 return -EINVAL; 2551 2552 /* Heartbeat demand can only be sent on a transport or 2553 * association, but not a socket. 2554 */ 2555 if (params.spp_flags & SPP_HB_DEMAND && !trans && !asoc) 2556 return -EINVAL; 2557 2558 /* Process parameters. */ 2559 error = sctp_apply_peer_addr_params(¶ms, trans, asoc, sp, 2560 hb_change, pmtud_change, 2561 sackdelay_change); 2562 2563 if (error) 2564 return error; 2565 2566 /* If changes are for association, also apply parameters to each 2567 * transport. 2568 */ 2569 if (!trans && asoc) { 2570 list_for_each_entry(trans, &asoc->peer.transport_addr_list, 2571 transports) { 2572 sctp_apply_peer_addr_params(¶ms, trans, asoc, sp, 2573 hb_change, pmtud_change, 2574 sackdelay_change); 2575 } 2576 } 2577 2578 return 0; 2579 } 2580 2581 static inline __u32 sctp_spp_sackdelay_enable(__u32 param_flags) 2582 { 2583 return (param_flags & ~SPP_SACKDELAY) | SPP_SACKDELAY_ENABLE; 2584 } 2585 2586 static inline __u32 sctp_spp_sackdelay_disable(__u32 param_flags) 2587 { 2588 return (param_flags & ~SPP_SACKDELAY) | SPP_SACKDELAY_DISABLE; 2589 } 2590 2591 /* 2592 * 7.1.23. Get or set delayed ack timer (SCTP_DELAYED_SACK) 2593 * 2594 * This option will effect the way delayed acks are performed. This 2595 * option allows you to get or set the delayed ack time, in 2596 * milliseconds. It also allows changing the delayed ack frequency. 2597 * Changing the frequency to 1 disables the delayed sack algorithm. If 2598 * the assoc_id is 0, then this sets or gets the endpoints default 2599 * values. If the assoc_id field is non-zero, then the set or get 2600 * effects the specified association for the one to many model (the 2601 * assoc_id field is ignored by the one to one model). Note that if 2602 * sack_delay or sack_freq are 0 when setting this option, then the 2603 * current values will remain unchanged. 2604 * 2605 * struct sctp_sack_info { 2606 * sctp_assoc_t sack_assoc_id; 2607 * uint32_t sack_delay; 2608 * uint32_t sack_freq; 2609 * }; 2610 * 2611 * sack_assoc_id - This parameter, indicates which association the user 2612 * is performing an action upon. Note that if this field's value is 2613 * zero then the endpoints default value is changed (effecting future 2614 * associations only). 2615 * 2616 * sack_delay - This parameter contains the number of milliseconds that 2617 * the user is requesting the delayed ACK timer be set to. Note that 2618 * this value is defined in the standard to be between 200 and 500 2619 * milliseconds. 2620 * 2621 * sack_freq - This parameter contains the number of packets that must 2622 * be received before a sack is sent without waiting for the delay 2623 * timer to expire. The default value for this is 2, setting this 2624 * value to 1 will disable the delayed sack algorithm. 2625 */ 2626 2627 static int sctp_setsockopt_delayed_ack(struct sock *sk, 2628 char __user *optval, unsigned int optlen) 2629 { 2630 struct sctp_sack_info params; 2631 struct sctp_transport *trans = NULL; 2632 struct sctp_association *asoc = NULL; 2633 struct sctp_sock *sp = sctp_sk(sk); 2634 2635 if (optlen == sizeof(struct sctp_sack_info)) { 2636 if (copy_from_user(¶ms, optval, optlen)) 2637 return -EFAULT; 2638 2639 if (params.sack_delay == 0 && params.sack_freq == 0) 2640 return 0; 2641 } else if (optlen == sizeof(struct sctp_assoc_value)) { 2642 pr_warn_ratelimited(DEPRECATED 2643 "%s (pid %d) " 2644 "Use of struct sctp_assoc_value in delayed_ack socket option.\n" 2645 "Use struct sctp_sack_info instead\n", 2646 current->comm, task_pid_nr(current)); 2647 if (copy_from_user(¶ms, optval, optlen)) 2648 return -EFAULT; 2649 2650 if (params.sack_delay == 0) 2651 params.sack_freq = 1; 2652 else 2653 params.sack_freq = 0; 2654 } else 2655 return -EINVAL; 2656 2657 /* Validate value parameter. */ 2658 if (params.sack_delay > 500) 2659 return -EINVAL; 2660 2661 /* Get association, if sack_assoc_id != 0 and the socket is a one 2662 * to many style socket, and an association was not found, then 2663 * the id was invalid. 2664 */ 2665 asoc = sctp_id2assoc(sk, params.sack_assoc_id); 2666 if (!asoc && params.sack_assoc_id && sctp_style(sk, UDP)) 2667 return -EINVAL; 2668 2669 if (params.sack_delay) { 2670 if (asoc) { 2671 asoc->sackdelay = 2672 msecs_to_jiffies(params.sack_delay); 2673 asoc->param_flags = 2674 sctp_spp_sackdelay_enable(asoc->param_flags); 2675 } else { 2676 sp->sackdelay = params.sack_delay; 2677 sp->param_flags = 2678 sctp_spp_sackdelay_enable(sp->param_flags); 2679 } 2680 } 2681 2682 if (params.sack_freq == 1) { 2683 if (asoc) { 2684 asoc->param_flags = 2685 sctp_spp_sackdelay_disable(asoc->param_flags); 2686 } else { 2687 sp->param_flags = 2688 sctp_spp_sackdelay_disable(sp->param_flags); 2689 } 2690 } else if (params.sack_freq > 1) { 2691 if (asoc) { 2692 asoc->sackfreq = params.sack_freq; 2693 asoc->param_flags = 2694 sctp_spp_sackdelay_enable(asoc->param_flags); 2695 } else { 2696 sp->sackfreq = params.sack_freq; 2697 sp->param_flags = 2698 sctp_spp_sackdelay_enable(sp->param_flags); 2699 } 2700 } 2701 2702 /* If change is for association, also apply to each transport. */ 2703 if (asoc) { 2704 list_for_each_entry(trans, &asoc->peer.transport_addr_list, 2705 transports) { 2706 if (params.sack_delay) { 2707 trans->sackdelay = 2708 msecs_to_jiffies(params.sack_delay); 2709 trans->param_flags = 2710 sctp_spp_sackdelay_enable(trans->param_flags); 2711 } 2712 if (params.sack_freq == 1) { 2713 trans->param_flags = 2714 sctp_spp_sackdelay_disable(trans->param_flags); 2715 } else if (params.sack_freq > 1) { 2716 trans->sackfreq = params.sack_freq; 2717 trans->param_flags = 2718 sctp_spp_sackdelay_enable(trans->param_flags); 2719 } 2720 } 2721 } 2722 2723 return 0; 2724 } 2725 2726 /* 7.1.3 Initialization Parameters (SCTP_INITMSG) 2727 * 2728 * Applications can specify protocol parameters for the default association 2729 * initialization. The option name argument to setsockopt() and getsockopt() 2730 * is SCTP_INITMSG. 2731 * 2732 * Setting initialization parameters is effective only on an unconnected 2733 * socket (for UDP-style sockets only future associations are effected 2734 * by the change). With TCP-style sockets, this option is inherited by 2735 * sockets derived from a listener socket. 2736 */ 2737 static int sctp_setsockopt_initmsg(struct sock *sk, char __user *optval, unsigned int optlen) 2738 { 2739 struct sctp_initmsg sinit; 2740 struct sctp_sock *sp = sctp_sk(sk); 2741 2742 if (optlen != sizeof(struct sctp_initmsg)) 2743 return -EINVAL; 2744 if (copy_from_user(&sinit, optval, optlen)) 2745 return -EFAULT; 2746 2747 if (sinit.sinit_num_ostreams) 2748 sp->initmsg.sinit_num_ostreams = sinit.sinit_num_ostreams; 2749 if (sinit.sinit_max_instreams) 2750 sp->initmsg.sinit_max_instreams = sinit.sinit_max_instreams; 2751 if (sinit.sinit_max_attempts) 2752 sp->initmsg.sinit_max_attempts = sinit.sinit_max_attempts; 2753 if (sinit.sinit_max_init_timeo) 2754 sp->initmsg.sinit_max_init_timeo = sinit.sinit_max_init_timeo; 2755 2756 return 0; 2757 } 2758 2759 /* 2760 * 7.1.14 Set default send parameters (SCTP_DEFAULT_SEND_PARAM) 2761 * 2762 * Applications that wish to use the sendto() system call may wish to 2763 * specify a default set of parameters that would normally be supplied 2764 * through the inclusion of ancillary data. This socket option allows 2765 * such an application to set the default sctp_sndrcvinfo structure. 2766 * The application that wishes to use this socket option simply passes 2767 * in to this call the sctp_sndrcvinfo structure defined in Section 2768 * 5.2.2) The input parameters accepted by this call include 2769 * sinfo_stream, sinfo_flags, sinfo_ppid, sinfo_context, 2770 * sinfo_timetolive. The user must provide the sinfo_assoc_id field in 2771 * to this call if the caller is using the UDP model. 2772 */ 2773 static int sctp_setsockopt_default_send_param(struct sock *sk, 2774 char __user *optval, 2775 unsigned int optlen) 2776 { 2777 struct sctp_sock *sp = sctp_sk(sk); 2778 struct sctp_association *asoc; 2779 struct sctp_sndrcvinfo info; 2780 2781 if (optlen != sizeof(info)) 2782 return -EINVAL; 2783 if (copy_from_user(&info, optval, optlen)) 2784 return -EFAULT; 2785 if (info.sinfo_flags & 2786 ~(SCTP_UNORDERED | SCTP_ADDR_OVER | 2787 SCTP_ABORT | SCTP_EOF)) 2788 return -EINVAL; 2789 2790 asoc = sctp_id2assoc(sk, info.sinfo_assoc_id); 2791 if (!asoc && info.sinfo_assoc_id && sctp_style(sk, UDP)) 2792 return -EINVAL; 2793 if (asoc) { 2794 asoc->default_stream = info.sinfo_stream; 2795 asoc->default_flags = info.sinfo_flags; 2796 asoc->default_ppid = info.sinfo_ppid; 2797 asoc->default_context = info.sinfo_context; 2798 asoc->default_timetolive = info.sinfo_timetolive; 2799 } else { 2800 sp->default_stream = info.sinfo_stream; 2801 sp->default_flags = info.sinfo_flags; 2802 sp->default_ppid = info.sinfo_ppid; 2803 sp->default_context = info.sinfo_context; 2804 sp->default_timetolive = info.sinfo_timetolive; 2805 } 2806 2807 return 0; 2808 } 2809 2810 /* RFC6458, Section 8.1.31. Set/get Default Send Parameters 2811 * (SCTP_DEFAULT_SNDINFO) 2812 */ 2813 static int sctp_setsockopt_default_sndinfo(struct sock *sk, 2814 char __user *optval, 2815 unsigned int optlen) 2816 { 2817 struct sctp_sock *sp = sctp_sk(sk); 2818 struct sctp_association *asoc; 2819 struct sctp_sndinfo info; 2820 2821 if (optlen != sizeof(info)) 2822 return -EINVAL; 2823 if (copy_from_user(&info, optval, optlen)) 2824 return -EFAULT; 2825 if (info.snd_flags & 2826 ~(SCTP_UNORDERED | SCTP_ADDR_OVER | 2827 SCTP_ABORT | SCTP_EOF)) 2828 return -EINVAL; 2829 2830 asoc = sctp_id2assoc(sk, info.snd_assoc_id); 2831 if (!asoc && info.snd_assoc_id && sctp_style(sk, UDP)) 2832 return -EINVAL; 2833 if (asoc) { 2834 asoc->default_stream = info.snd_sid; 2835 asoc->default_flags = info.snd_flags; 2836 asoc->default_ppid = info.snd_ppid; 2837 asoc->default_context = info.snd_context; 2838 } else { 2839 sp->default_stream = info.snd_sid; 2840 sp->default_flags = info.snd_flags; 2841 sp->default_ppid = info.snd_ppid; 2842 sp->default_context = info.snd_context; 2843 } 2844 2845 return 0; 2846 } 2847 2848 /* 7.1.10 Set Primary Address (SCTP_PRIMARY_ADDR) 2849 * 2850 * Requests that the local SCTP stack use the enclosed peer address as 2851 * the association primary. The enclosed address must be one of the 2852 * association peer's addresses. 2853 */ 2854 static int sctp_setsockopt_primary_addr(struct sock *sk, char __user *optval, 2855 unsigned int optlen) 2856 { 2857 struct sctp_prim prim; 2858 struct sctp_transport *trans; 2859 2860 if (optlen != sizeof(struct sctp_prim)) 2861 return -EINVAL; 2862 2863 if (copy_from_user(&prim, optval, sizeof(struct sctp_prim))) 2864 return -EFAULT; 2865 2866 trans = sctp_addr_id2transport(sk, &prim.ssp_addr, prim.ssp_assoc_id); 2867 if (!trans) 2868 return -EINVAL; 2869 2870 sctp_assoc_set_primary(trans->asoc, trans); 2871 2872 return 0; 2873 } 2874 2875 /* 2876 * 7.1.5 SCTP_NODELAY 2877 * 2878 * Turn on/off any Nagle-like algorithm. This means that packets are 2879 * generally sent as soon as possible and no unnecessary delays are 2880 * introduced, at the cost of more packets in the network. Expects an 2881 * integer boolean flag. 2882 */ 2883 static int sctp_setsockopt_nodelay(struct sock *sk, char __user *optval, 2884 unsigned int optlen) 2885 { 2886 int val; 2887 2888 if (optlen < sizeof(int)) 2889 return -EINVAL; 2890 if (get_user(val, (int __user *)optval)) 2891 return -EFAULT; 2892 2893 sctp_sk(sk)->nodelay = (val == 0) ? 0 : 1; 2894 return 0; 2895 } 2896 2897 /* 2898 * 2899 * 7.1.1 SCTP_RTOINFO 2900 * 2901 * The protocol parameters used to initialize and bound retransmission 2902 * timeout (RTO) are tunable. sctp_rtoinfo structure is used to access 2903 * and modify these parameters. 2904 * All parameters are time values, in milliseconds. A value of 0, when 2905 * modifying the parameters, indicates that the current value should not 2906 * be changed. 2907 * 2908 */ 2909 static int sctp_setsockopt_rtoinfo(struct sock *sk, char __user *optval, unsigned int optlen) 2910 { 2911 struct sctp_rtoinfo rtoinfo; 2912 struct sctp_association *asoc; 2913 unsigned long rto_min, rto_max; 2914 struct sctp_sock *sp = sctp_sk(sk); 2915 2916 if (optlen != sizeof (struct sctp_rtoinfo)) 2917 return -EINVAL; 2918 2919 if (copy_from_user(&rtoinfo, optval, optlen)) 2920 return -EFAULT; 2921 2922 asoc = sctp_id2assoc(sk, rtoinfo.srto_assoc_id); 2923 2924 /* Set the values to the specific association */ 2925 if (!asoc && rtoinfo.srto_assoc_id && sctp_style(sk, UDP)) 2926 return -EINVAL; 2927 2928 rto_max = rtoinfo.srto_max; 2929 rto_min = rtoinfo.srto_min; 2930 2931 if (rto_max) 2932 rto_max = asoc ? msecs_to_jiffies(rto_max) : rto_max; 2933 else 2934 rto_max = asoc ? asoc->rto_max : sp->rtoinfo.srto_max; 2935 2936 if (rto_min) 2937 rto_min = asoc ? msecs_to_jiffies(rto_min) : rto_min; 2938 else 2939 rto_min = asoc ? asoc->rto_min : sp->rtoinfo.srto_min; 2940 2941 if (rto_min > rto_max) 2942 return -EINVAL; 2943 2944 if (asoc) { 2945 if (rtoinfo.srto_initial != 0) 2946 asoc->rto_initial = 2947 msecs_to_jiffies(rtoinfo.srto_initial); 2948 asoc->rto_max = rto_max; 2949 asoc->rto_min = rto_min; 2950 } else { 2951 /* If there is no association or the association-id = 0 2952 * set the values to the endpoint. 2953 */ 2954 if (rtoinfo.srto_initial != 0) 2955 sp->rtoinfo.srto_initial = rtoinfo.srto_initial; 2956 sp->rtoinfo.srto_max = rto_max; 2957 sp->rtoinfo.srto_min = rto_min; 2958 } 2959 2960 return 0; 2961 } 2962 2963 /* 2964 * 2965 * 7.1.2 SCTP_ASSOCINFO 2966 * 2967 * This option is used to tune the maximum retransmission attempts 2968 * of the association. 2969 * Returns an error if the new association retransmission value is 2970 * greater than the sum of the retransmission value of the peer. 2971 * See [SCTP] for more information. 2972 * 2973 */ 2974 static int sctp_setsockopt_associnfo(struct sock *sk, char __user *optval, unsigned int optlen) 2975 { 2976 2977 struct sctp_assocparams assocparams; 2978 struct sctp_association *asoc; 2979 2980 if (optlen != sizeof(struct sctp_assocparams)) 2981 return -EINVAL; 2982 if (copy_from_user(&assocparams, optval, optlen)) 2983 return -EFAULT; 2984 2985 asoc = sctp_id2assoc(sk, assocparams.sasoc_assoc_id); 2986 2987 if (!asoc && assocparams.sasoc_assoc_id && sctp_style(sk, UDP)) 2988 return -EINVAL; 2989 2990 /* Set the values to the specific association */ 2991 if (asoc) { 2992 if (assocparams.sasoc_asocmaxrxt != 0) { 2993 __u32 path_sum = 0; 2994 int paths = 0; 2995 struct sctp_transport *peer_addr; 2996 2997 list_for_each_entry(peer_addr, &asoc->peer.transport_addr_list, 2998 transports) { 2999 path_sum += peer_addr->pathmaxrxt; 3000 paths++; 3001 } 3002 3003 /* Only validate asocmaxrxt if we have more than 3004 * one path/transport. We do this because path 3005 * retransmissions are only counted when we have more 3006 * then one path. 3007 */ 3008 if (paths > 1 && 3009 assocparams.sasoc_asocmaxrxt > path_sum) 3010 return -EINVAL; 3011 3012 asoc->max_retrans = assocparams.sasoc_asocmaxrxt; 3013 } 3014 3015 if (assocparams.sasoc_cookie_life != 0) 3016 asoc->cookie_life = ms_to_ktime(assocparams.sasoc_cookie_life); 3017 } else { 3018 /* Set the values to the endpoint */ 3019 struct sctp_sock *sp = sctp_sk(sk); 3020 3021 if (assocparams.sasoc_asocmaxrxt != 0) 3022 sp->assocparams.sasoc_asocmaxrxt = 3023 assocparams.sasoc_asocmaxrxt; 3024 if (assocparams.sasoc_cookie_life != 0) 3025 sp->assocparams.sasoc_cookie_life = 3026 assocparams.sasoc_cookie_life; 3027 } 3028 return 0; 3029 } 3030 3031 /* 3032 * 7.1.16 Set/clear IPv4 mapped addresses (SCTP_I_WANT_MAPPED_V4_ADDR) 3033 * 3034 * This socket option is a boolean flag which turns on or off mapped V4 3035 * addresses. If this option is turned on and the socket is type 3036 * PF_INET6, then IPv4 addresses will be mapped to V6 representation. 3037 * If this option is turned off, then no mapping will be done of V4 3038 * addresses and a user will receive both PF_INET6 and PF_INET type 3039 * addresses on the socket. 3040 */ 3041 static int sctp_setsockopt_mappedv4(struct sock *sk, char __user *optval, unsigned int optlen) 3042 { 3043 int val; 3044 struct sctp_sock *sp = sctp_sk(sk); 3045 3046 if (optlen < sizeof(int)) 3047 return -EINVAL; 3048 if (get_user(val, (int __user *)optval)) 3049 return -EFAULT; 3050 if (val) 3051 sp->v4mapped = 1; 3052 else 3053 sp->v4mapped = 0; 3054 3055 return 0; 3056 } 3057 3058 /* 3059 * 8.1.16. Get or Set the Maximum Fragmentation Size (SCTP_MAXSEG) 3060 * This option will get or set the maximum size to put in any outgoing 3061 * SCTP DATA chunk. If a message is larger than this size it will be 3062 * fragmented by SCTP into the specified size. Note that the underlying 3063 * SCTP implementation may fragment into smaller sized chunks when the 3064 * PMTU of the underlying association is smaller than the value set by 3065 * the user. The default value for this option is '0' which indicates 3066 * the user is NOT limiting fragmentation and only the PMTU will effect 3067 * SCTP's choice of DATA chunk size. Note also that values set larger 3068 * than the maximum size of an IP datagram will effectively let SCTP 3069 * control fragmentation (i.e. the same as setting this option to 0). 3070 * 3071 * The following structure is used to access and modify this parameter: 3072 * 3073 * struct sctp_assoc_value { 3074 * sctp_assoc_t assoc_id; 3075 * uint32_t assoc_value; 3076 * }; 3077 * 3078 * assoc_id: This parameter is ignored for one-to-one style sockets. 3079 * For one-to-many style sockets this parameter indicates which 3080 * association the user is performing an action upon. Note that if 3081 * this field's value is zero then the endpoints default value is 3082 * changed (effecting future associations only). 3083 * assoc_value: This parameter specifies the maximum size in bytes. 3084 */ 3085 static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, unsigned int optlen) 3086 { 3087 struct sctp_assoc_value params; 3088 struct sctp_association *asoc; 3089 struct sctp_sock *sp = sctp_sk(sk); 3090 int val; 3091 3092 if (optlen == sizeof(int)) { 3093 pr_warn_ratelimited(DEPRECATED 3094 "%s (pid %d) " 3095 "Use of int in maxseg socket option.\n" 3096 "Use struct sctp_assoc_value instead\n", 3097 current->comm, task_pid_nr(current)); 3098 if (copy_from_user(&val, optval, optlen)) 3099 return -EFAULT; 3100 params.assoc_id = 0; 3101 } else if (optlen == sizeof(struct sctp_assoc_value)) { 3102 if (copy_from_user(¶ms, optval, optlen)) 3103 return -EFAULT; 3104 val = params.assoc_value; 3105 } else 3106 return -EINVAL; 3107 3108 if ((val != 0) && ((val < 8) || (val > SCTP_MAX_CHUNK_LEN))) 3109 return -EINVAL; 3110 3111 asoc = sctp_id2assoc(sk, params.assoc_id); 3112 if (!asoc && params.assoc_id && sctp_style(sk, UDP)) 3113 return -EINVAL; 3114 3115 if (asoc) { 3116 if (val == 0) { 3117 val = asoc->pathmtu; 3118 val -= sp->pf->af->net_header_len; 3119 val -= sizeof(struct sctphdr) + 3120 sizeof(struct sctp_data_chunk); 3121 } 3122 asoc->user_frag = val; 3123 asoc->frag_point = sctp_frag_point(asoc, asoc->pathmtu); 3124 } else { 3125 sp->user_frag = val; 3126 } 3127 3128 return 0; 3129 } 3130 3131 3132 /* 3133 * 7.1.9 Set Peer Primary Address (SCTP_SET_PEER_PRIMARY_ADDR) 3134 * 3135 * Requests that the peer mark the enclosed address as the association 3136 * primary. The enclosed address must be one of the association's 3137 * locally bound addresses. The following structure is used to make a 3138 * set primary request: 3139 */ 3140 static int sctp_setsockopt_peer_primary_addr(struct sock *sk, char __user *optval, 3141 unsigned int optlen) 3142 { 3143 struct net *net = sock_net(sk); 3144 struct sctp_sock *sp; 3145 struct sctp_association *asoc = NULL; 3146 struct sctp_setpeerprim prim; 3147 struct sctp_chunk *chunk; 3148 struct sctp_af *af; 3149 int err; 3150 3151 sp = sctp_sk(sk); 3152 3153 if (!net->sctp.addip_enable) 3154 return -EPERM; 3155 3156 if (optlen != sizeof(struct sctp_setpeerprim)) 3157 return -EINVAL; 3158 3159 if (copy_from_user(&prim, optval, optlen)) 3160 return -EFAULT; 3161 3162 asoc = sctp_id2assoc(sk, prim.sspp_assoc_id); 3163 if (!asoc) 3164 return -EINVAL; 3165 3166 if (!asoc->peer.asconf_capable) 3167 return -EPERM; 3168 3169 if (asoc->peer.addip_disabled_mask & SCTP_PARAM_SET_PRIMARY) 3170 return -EPERM; 3171 3172 if (!sctp_state(asoc, ESTABLISHED)) 3173 return -ENOTCONN; 3174 3175 af = sctp_get_af_specific(prim.sspp_addr.ss_family); 3176 if (!af) 3177 return -EINVAL; 3178 3179 if (!af->addr_valid((union sctp_addr *)&prim.sspp_addr, sp, NULL)) 3180 return -EADDRNOTAVAIL; 3181 3182 if (!sctp_assoc_lookup_laddr(asoc, (union sctp_addr *)&prim.sspp_addr)) 3183 return -EADDRNOTAVAIL; 3184 3185 /* Create an ASCONF chunk with SET_PRIMARY parameter */ 3186 chunk = sctp_make_asconf_set_prim(asoc, 3187 (union sctp_addr *)&prim.sspp_addr); 3188 if (!chunk) 3189 return -ENOMEM; 3190 3191 err = sctp_send_asconf(asoc, chunk); 3192 3193 pr_debug("%s: we set peer primary addr primitively\n", __func__); 3194 3195 return err; 3196 } 3197 3198 static int sctp_setsockopt_adaptation_layer(struct sock *sk, char __user *optval, 3199 unsigned int optlen) 3200 { 3201 struct sctp_setadaptation adaptation; 3202 3203 if (optlen != sizeof(struct sctp_setadaptation)) 3204 return -EINVAL; 3205 if (copy_from_user(&adaptation, optval, optlen)) 3206 return -EFAULT; 3207 3208 sctp_sk(sk)->adaptation_ind = adaptation.ssb_adaptation_ind; 3209 3210 return 0; 3211 } 3212 3213 /* 3214 * 7.1.29. Set or Get the default context (SCTP_CONTEXT) 3215 * 3216 * The context field in the sctp_sndrcvinfo structure is normally only 3217 * used when a failed message is retrieved holding the value that was 3218 * sent down on the actual send call. This option allows the setting of 3219 * a default context on an association basis that will be received on 3220 * reading messages from the peer. This is especially helpful in the 3221 * one-2-many model for an application to keep some reference to an 3222 * internal state machine that is processing messages on the 3223 * association. Note that the setting of this value only effects 3224 * received messages from the peer and does not effect the value that is 3225 * saved with outbound messages. 3226 */ 3227 static int sctp_setsockopt_context(struct sock *sk, char __user *optval, 3228 unsigned int optlen) 3229 { 3230 struct sctp_assoc_value params; 3231 struct sctp_sock *sp; 3232 struct sctp_association *asoc; 3233 3234 if (optlen != sizeof(struct sctp_assoc_value)) 3235 return -EINVAL; 3236 if (copy_from_user(¶ms, optval, optlen)) 3237 return -EFAULT; 3238 3239 sp = sctp_sk(sk); 3240 3241 if (params.assoc_id != 0) { 3242 asoc = sctp_id2assoc(sk, params.assoc_id); 3243 if (!asoc) 3244 return -EINVAL; 3245 asoc->default_rcv_context = params.assoc_value; 3246 } else { 3247 sp->default_rcv_context = params.assoc_value; 3248 } 3249 3250 return 0; 3251 } 3252 3253 /* 3254 * 7.1.24. Get or set fragmented interleave (SCTP_FRAGMENT_INTERLEAVE) 3255 * 3256 * This options will at a minimum specify if the implementation is doing 3257 * fragmented interleave. Fragmented interleave, for a one to many 3258 * socket, is when subsequent calls to receive a message may return 3259 * parts of messages from different associations. Some implementations 3260 * may allow you to turn this value on or off. If so, when turned off, 3261 * no fragment interleave will occur (which will cause a head of line 3262 * blocking amongst multiple associations sharing the same one to many 3263 * socket). When this option is turned on, then each receive call may 3264 * come from a different association (thus the user must receive data 3265 * with the extended calls (e.g. sctp_recvmsg) to keep track of which 3266 * association each receive belongs to. 3267 * 3268 * This option takes a boolean value. A non-zero value indicates that 3269 * fragmented interleave is on. A value of zero indicates that 3270 * fragmented interleave is off. 3271 * 3272 * Note that it is important that an implementation that allows this 3273 * option to be turned on, have it off by default. Otherwise an unaware 3274 * application using the one to many model may become confused and act 3275 * incorrectly. 3276 */ 3277 static int sctp_setsockopt_fragment_interleave(struct sock *sk, 3278 char __user *optval, 3279 unsigned int optlen) 3280 { 3281 int val; 3282 3283 if (optlen != sizeof(int)) 3284 return -EINVAL; 3285 if (get_user(val, (int __user *)optval)) 3286 return -EFAULT; 3287 3288 sctp_sk(sk)->frag_interleave = (val == 0) ? 0 : 1; 3289 3290 return 0; 3291 } 3292 3293 /* 3294 * 8.1.21. Set or Get the SCTP Partial Delivery Point 3295 * (SCTP_PARTIAL_DELIVERY_POINT) 3296 * 3297 * This option will set or get the SCTP partial delivery point. This 3298 * point is the size of a message where the partial delivery API will be 3299 * invoked to help free up rwnd space for the peer. Setting this to a 3300 * lower value will cause partial deliveries to happen more often. The 3301 * calls argument is an integer that sets or gets the partial delivery 3302 * point. Note also that the call will fail if the user attempts to set 3303 * this value larger than the socket receive buffer size. 3304 * 3305 * Note that any single message having a length smaller than or equal to 3306 * the SCTP partial delivery point will be delivered in one single read 3307 * call as long as the user provided buffer is large enough to hold the 3308 * message. 3309 */ 3310 static int sctp_setsockopt_partial_delivery_point(struct sock *sk, 3311 char __user *optval, 3312 unsigned int optlen) 3313 { 3314 u32 val; 3315 3316 if (optlen != sizeof(u32)) 3317 return -EINVAL; 3318 if (get_user(val, (int __user *)optval)) 3319 return -EFAULT; 3320 3321 /* Note: We double the receive buffer from what the user sets 3322 * it to be, also initial rwnd is based on rcvbuf/2. 3323 */ 3324 if (val > (sk->sk_rcvbuf >> 1)) 3325 return -EINVAL; 3326 3327 sctp_sk(sk)->pd_point = val; 3328 3329 return 0; /* is this the right error code? */ 3330 } 3331 3332 /* 3333 * 7.1.28. Set or Get the maximum burst (SCTP_MAX_BURST) 3334 * 3335 * This option will allow a user to change the maximum burst of packets 3336 * that can be emitted by this association. Note that the default value 3337 * is 4, and some implementations may restrict this setting so that it 3338 * can only be lowered. 3339 * 3340 * NOTE: This text doesn't seem right. Do this on a socket basis with 3341 * future associations inheriting the socket value. 3342 */ 3343 static int sctp_setsockopt_maxburst(struct sock *sk, 3344 char __user *optval, 3345 unsigned int optlen) 3346 { 3347 struct sctp_assoc_value params; 3348 struct sctp_sock *sp; 3349 struct sctp_association *asoc; 3350 int val; 3351 int assoc_id = 0; 3352 3353 if (optlen == sizeof(int)) { 3354 pr_warn_ratelimited(DEPRECATED 3355 "%s (pid %d) " 3356 "Use of int in max_burst socket option deprecated.\n" 3357 "Use struct sctp_assoc_value instead\n", 3358 current->comm, task_pid_nr(current)); 3359 if (copy_from_user(&val, optval, optlen)) 3360 return -EFAULT; 3361 } else if (optlen == sizeof(struct sctp_assoc_value)) { 3362 if (copy_from_user(¶ms, optval, optlen)) 3363 return -EFAULT; 3364 val = params.assoc_value; 3365 assoc_id = params.assoc_id; 3366 } else 3367 return -EINVAL; 3368 3369 sp = sctp_sk(sk); 3370 3371 if (assoc_id != 0) { 3372 asoc = sctp_id2assoc(sk, assoc_id); 3373 if (!asoc) 3374 return -EINVAL; 3375 asoc->max_burst = val; 3376 } else 3377 sp->max_burst = val; 3378 3379 return 0; 3380 } 3381 3382 /* 3383 * 7.1.18. Add a chunk that must be authenticated (SCTP_AUTH_CHUNK) 3384 * 3385 * This set option adds a chunk type that the user is requesting to be 3386 * received only in an authenticated way. Changes to the list of chunks 3387 * will only effect future associations on the socket. 3388 */ 3389 static int sctp_setsockopt_auth_chunk(struct sock *sk, 3390 char __user *optval, 3391 unsigned int optlen) 3392 { 3393 struct sctp_endpoint *ep = sctp_sk(sk)->ep; 3394 struct sctp_authchunk val; 3395 3396 if (!ep->auth_enable) 3397 return -EACCES; 3398 3399 if (optlen != sizeof(struct sctp_authchunk)) 3400 return -EINVAL; 3401 if (copy_from_user(&val, optval, optlen)) 3402 return -EFAULT; 3403 3404 switch (val.sauth_chunk) { 3405 case SCTP_CID_INIT: 3406 case SCTP_CID_INIT_ACK: 3407 case SCTP_CID_SHUTDOWN_COMPLETE: 3408 case SCTP_CID_AUTH: 3409 return -EINVAL; 3410 } 3411 3412 /* add this chunk id to the endpoint */ 3413 return sctp_auth_ep_add_chunkid(ep, val.sauth_chunk); 3414 } 3415 3416 /* 3417 * 7.1.19. Get or set the list of supported HMAC Identifiers (SCTP_HMAC_IDENT) 3418 * 3419 * This option gets or sets the list of HMAC algorithms that the local 3420 * endpoint requires the peer to use. 3421 */ 3422 static int sctp_setsockopt_hmac_ident(struct sock *sk, 3423 char __user *optval, 3424 unsigned int optlen) 3425 { 3426 struct sctp_endpoint *ep = sctp_sk(sk)->ep; 3427 struct sctp_hmacalgo *hmacs; 3428 u32 idents; 3429 int err; 3430 3431 if (!ep->auth_enable) 3432 return -EACCES; 3433 3434 if (optlen < sizeof(struct sctp_hmacalgo)) 3435 return -EINVAL; 3436 3437 hmacs = memdup_user(optval, optlen); 3438 if (IS_ERR(hmacs)) 3439 return PTR_ERR(hmacs); 3440 3441 idents = hmacs->shmac_num_idents; 3442 if (idents == 0 || idents > SCTP_AUTH_NUM_HMACS || 3443 (idents * sizeof(u16)) > (optlen - sizeof(struct sctp_hmacalgo))) { 3444 err = -EINVAL; 3445 goto out; 3446 } 3447 3448 err = sctp_auth_ep_set_hmacs(ep, hmacs); 3449 out: 3450 kfree(hmacs); 3451 return err; 3452 } 3453 3454 /* 3455 * 7.1.20. Set a shared key (SCTP_AUTH_KEY) 3456 * 3457 * This option will set a shared secret key which is used to build an 3458 * association shared key. 3459 */ 3460 static int sctp_setsockopt_auth_key(struct sock *sk, 3461 char __user *optval, 3462 unsigned int optlen) 3463 { 3464 struct sctp_endpoint *ep = sctp_sk(sk)->ep; 3465 struct sctp_authkey *authkey; 3466 struct sctp_association *asoc; 3467 int ret; 3468 3469 if (!ep->auth_enable) 3470 return -EACCES; 3471 3472 if (optlen <= sizeof(struct sctp_authkey)) 3473 return -EINVAL; 3474 3475 authkey = memdup_user(optval, optlen); 3476 if (IS_ERR(authkey)) 3477 return PTR_ERR(authkey); 3478 3479 if (authkey->sca_keylength > optlen - sizeof(struct sctp_authkey)) { 3480 ret = -EINVAL; 3481 goto out; 3482 } 3483 3484 asoc = sctp_id2assoc(sk, authkey->sca_assoc_id); 3485 if (!asoc && authkey->sca_assoc_id && sctp_style(sk, UDP)) { 3486 ret = -EINVAL; 3487 goto out; 3488 } 3489 3490 ret = sctp_auth_set_key(ep, asoc, authkey); 3491 out: 3492 kzfree(authkey); 3493 return ret; 3494 } 3495 3496 /* 3497 * 7.1.21. Get or set the active shared key (SCTP_AUTH_ACTIVE_KEY) 3498 * 3499 * This option will get or set the active shared key to be used to build 3500 * the association shared key. 3501 */ 3502 static int sctp_setsockopt_active_key(struct sock *sk, 3503 char __user *optval, 3504 unsigned int optlen) 3505 { 3506 struct sctp_endpoint *ep = sctp_sk(sk)->ep; 3507 struct sctp_authkeyid val; 3508 struct sctp_association *asoc; 3509 3510 if (!ep->auth_enable) 3511 return -EACCES; 3512 3513 if (optlen != sizeof(struct sctp_authkeyid)) 3514 return -EINVAL; 3515 if (copy_from_user(&val, optval, optlen)) 3516 return -EFAULT; 3517 3518 asoc = sctp_id2assoc(sk, val.scact_assoc_id); 3519 if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP)) 3520 return -EINVAL; 3521 3522 return sctp_auth_set_active_key(ep, asoc, val.scact_keynumber); 3523 } 3524 3525 /* 3526 * 7.1.22. Delete a shared key (SCTP_AUTH_DELETE_KEY) 3527 * 3528 * This set option will delete a shared secret key from use. 3529 */ 3530 static int sctp_setsockopt_del_key(struct sock *sk, 3531 char __user *optval, 3532 unsigned int optlen) 3533 { 3534 struct sctp_endpoint *ep = sctp_sk(sk)->ep; 3535 struct sctp_authkeyid val; 3536 struct sctp_association *asoc; 3537 3538 if (!ep->auth_enable) 3539 return -EACCES; 3540 3541 if (optlen != sizeof(struct sctp_authkeyid)) 3542 return -EINVAL; 3543 if (copy_from_user(&val, optval, optlen)) 3544 return -EFAULT; 3545 3546 asoc = sctp_id2assoc(sk, val.scact_assoc_id); 3547 if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP)) 3548 return -EINVAL; 3549 3550 return sctp_auth_del_key_id(ep, asoc, val.scact_keynumber); 3551 3552 } 3553 3554 /* 3555 * 8.1.23 SCTP_AUTO_ASCONF 3556 * 3557 * This option will enable or disable the use of the automatic generation of 3558 * ASCONF chunks to add and delete addresses to an existing association. Note 3559 * that this option has two caveats namely: a) it only affects sockets that 3560 * are bound to all addresses available to the SCTP stack, and b) the system 3561 * administrator may have an overriding control that turns the ASCONF feature 3562 * off no matter what setting the socket option may have. 3563 * This option expects an integer boolean flag, where a non-zero value turns on 3564 * the option, and a zero value turns off the option. 3565 * Note. In this implementation, socket operation overrides default parameter 3566 * being set by sysctl as well as FreeBSD implementation 3567 */ 3568 static int sctp_setsockopt_auto_asconf(struct sock *sk, char __user *optval, 3569 unsigned int optlen) 3570 { 3571 int val; 3572 struct sctp_sock *sp = sctp_sk(sk); 3573 3574 if (optlen < sizeof(int)) 3575 return -EINVAL; 3576 if (get_user(val, (int __user *)optval)) 3577 return -EFAULT; 3578 if (!sctp_is_ep_boundall(sk) && val) 3579 return -EINVAL; 3580 if ((val && sp->do_auto_asconf) || (!val && !sp->do_auto_asconf)) 3581 return 0; 3582 3583 if (val == 0 && sp->do_auto_asconf) { 3584 list_del(&sp->auto_asconf_list); 3585 sp->do_auto_asconf = 0; 3586 } else if (val && !sp->do_auto_asconf) { 3587 list_add_tail(&sp->auto_asconf_list, 3588 &sock_net(sk)->sctp.auto_asconf_splist); 3589 sp->do_auto_asconf = 1; 3590 } 3591 return 0; 3592 } 3593 3594 /* 3595 * SCTP_PEER_ADDR_THLDS 3596 * 3597 * This option allows us to alter the partially failed threshold for one or all 3598 * transports in an association. See Section 6.1 of: 3599 * http://www.ietf.org/id/draft-nishida-tsvwg-sctp-failover-05.txt 3600 */ 3601 static int sctp_setsockopt_paddr_thresholds(struct sock *sk, 3602 char __user *optval, 3603 unsigned int optlen) 3604 { 3605 struct sctp_paddrthlds val; 3606 struct sctp_transport *trans; 3607 struct sctp_association *asoc; 3608 3609 if (optlen < sizeof(struct sctp_paddrthlds)) 3610 return -EINVAL; 3611 if (copy_from_user(&val, (struct sctp_paddrthlds __user *)optval, 3612 sizeof(struct sctp_paddrthlds))) 3613 return -EFAULT; 3614 3615 3616 if (sctp_is_any(sk, (const union sctp_addr *)&val.spt_address)) { 3617 asoc = sctp_id2assoc(sk, val.spt_assoc_id); 3618 if (!asoc) 3619 return -ENOENT; 3620 list_for_each_entry(trans, &asoc->peer.transport_addr_list, 3621 transports) { 3622 if (val.spt_pathmaxrxt) 3623 trans->pathmaxrxt = val.spt_pathmaxrxt; 3624 trans->pf_retrans = val.spt_pathpfthld; 3625 } 3626 3627 if (val.spt_pathmaxrxt) 3628 asoc->pathmaxrxt = val.spt_pathmaxrxt; 3629 asoc->pf_retrans = val.spt_pathpfthld; 3630 } else { 3631 trans = sctp_addr_id2transport(sk, &val.spt_address, 3632 val.spt_assoc_id); 3633 if (!trans) 3634 return -ENOENT; 3635 3636 if (val.spt_pathmaxrxt) 3637 trans->pathmaxrxt = val.spt_pathmaxrxt; 3638 trans->pf_retrans = val.spt_pathpfthld; 3639 } 3640 3641 return 0; 3642 } 3643 3644 static int sctp_setsockopt_recvrcvinfo(struct sock *sk, 3645 char __user *optval, 3646 unsigned int optlen) 3647 { 3648 int val; 3649 3650 if (optlen < sizeof(int)) 3651 return -EINVAL; 3652 if (get_user(val, (int __user *) optval)) 3653 return -EFAULT; 3654 3655 sctp_sk(sk)->recvrcvinfo = (val == 0) ? 0 : 1; 3656 3657 return 0; 3658 } 3659 3660 static int sctp_setsockopt_recvnxtinfo(struct sock *sk, 3661 char __user *optval, 3662 unsigned int optlen) 3663 { 3664 int val; 3665 3666 if (optlen < sizeof(int)) 3667 return -EINVAL; 3668 if (get_user(val, (int __user *) optval)) 3669 return -EFAULT; 3670 3671 sctp_sk(sk)->recvnxtinfo = (val == 0) ? 0 : 1; 3672 3673 return 0; 3674 } 3675 3676 /* API 6.2 setsockopt(), getsockopt() 3677 * 3678 * Applications use setsockopt() and getsockopt() to set or retrieve 3679 * socket options. Socket options are used to change the default 3680 * behavior of sockets calls. They are described in Section 7. 3681 * 3682 * The syntax is: 3683 * 3684 * ret = getsockopt(int sd, int level, int optname, void __user *optval, 3685 * int __user *optlen); 3686 * ret = setsockopt(int sd, int level, int optname, const void __user *optval, 3687 * int optlen); 3688 * 3689 * sd - the socket descript. 3690 * level - set to IPPROTO_SCTP for all SCTP options. 3691 * optname - the option name. 3692 * optval - the buffer to store the value of the option. 3693 * optlen - the size of the buffer. 3694 */ 3695 static int sctp_setsockopt(struct sock *sk, int level, int optname, 3696 char __user *optval, unsigned int optlen) 3697 { 3698 int retval = 0; 3699 3700 pr_debug("%s: sk:%p, optname:%d\n", __func__, sk, optname); 3701 3702 /* I can hardly begin to describe how wrong this is. This is 3703 * so broken as to be worse than useless. The API draft 3704 * REALLY is NOT helpful here... I am not convinced that the 3705 * semantics of setsockopt() with a level OTHER THAN SOL_SCTP 3706 * are at all well-founded. 3707 */ 3708 if (level != SOL_SCTP) { 3709 struct sctp_af *af = sctp_sk(sk)->pf->af; 3710 retval = af->setsockopt(sk, level, optname, optval, optlen); 3711 goto out_nounlock; 3712 } 3713 3714 lock_sock(sk); 3715 3716 switch (optname) { 3717 case SCTP_SOCKOPT_BINDX_ADD: 3718 /* 'optlen' is the size of the addresses buffer. */ 3719 retval = sctp_setsockopt_bindx(sk, (struct sockaddr __user *)optval, 3720 optlen, SCTP_BINDX_ADD_ADDR); 3721 break; 3722 3723 case SCTP_SOCKOPT_BINDX_REM: 3724 /* 'optlen' is the size of the addresses buffer. */ 3725 retval = sctp_setsockopt_bindx(sk, (struct sockaddr __user *)optval, 3726 optlen, SCTP_BINDX_REM_ADDR); 3727 break; 3728 3729 case SCTP_SOCKOPT_CONNECTX_OLD: 3730 /* 'optlen' is the size of the addresses buffer. */ 3731 retval = sctp_setsockopt_connectx_old(sk, 3732 (struct sockaddr __user *)optval, 3733 optlen); 3734 break; 3735 3736 case SCTP_SOCKOPT_CONNECTX: 3737 /* 'optlen' is the size of the addresses buffer. */ 3738 retval = sctp_setsockopt_connectx(sk, 3739 (struct sockaddr __user *)optval, 3740 optlen); 3741 break; 3742 3743 case SCTP_DISABLE_FRAGMENTS: 3744 retval = sctp_setsockopt_disable_fragments(sk, optval, optlen); 3745 break; 3746 3747 case SCTP_EVENTS: 3748 retval = sctp_setsockopt_events(sk, optval, optlen); 3749 break; 3750 3751 case SCTP_AUTOCLOSE: 3752 retval = sctp_setsockopt_autoclose(sk, optval, optlen); 3753 break; 3754 3755 case SCTP_PEER_ADDR_PARAMS: 3756 retval = sctp_setsockopt_peer_addr_params(sk, optval, optlen); 3757 break; 3758 3759 case SCTP_DELAYED_SACK: 3760 retval = sctp_setsockopt_delayed_ack(sk, optval, optlen); 3761 break; 3762 case SCTP_PARTIAL_DELIVERY_POINT: 3763 retval = sctp_setsockopt_partial_delivery_point(sk, optval, optlen); 3764 break; 3765 3766 case SCTP_INITMSG: 3767 retval = sctp_setsockopt_initmsg(sk, optval, optlen); 3768 break; 3769 case SCTP_DEFAULT_SEND_PARAM: 3770 retval = sctp_setsockopt_default_send_param(sk, optval, 3771 optlen); 3772 break; 3773 case SCTP_DEFAULT_SNDINFO: 3774 retval = sctp_setsockopt_default_sndinfo(sk, optval, optlen); 3775 break; 3776 case SCTP_PRIMARY_ADDR: 3777 retval = sctp_setsockopt_primary_addr(sk, optval, optlen); 3778 break; 3779 case SCTP_SET_PEER_PRIMARY_ADDR: 3780 retval = sctp_setsockopt_peer_primary_addr(sk, optval, optlen); 3781 break; 3782 case SCTP_NODELAY: 3783 retval = sctp_setsockopt_nodelay(sk, optval, optlen); 3784 break; 3785 case SCTP_RTOINFO: 3786 retval = sctp_setsockopt_rtoinfo(sk, optval, optlen); 3787 break; 3788 case SCTP_ASSOCINFO: 3789 retval = sctp_setsockopt_associnfo(sk, optval, optlen); 3790 break; 3791 case SCTP_I_WANT_MAPPED_V4_ADDR: 3792 retval = sctp_setsockopt_mappedv4(sk, optval, optlen); 3793 break; 3794 case SCTP_MAXSEG: 3795 retval = sctp_setsockopt_maxseg(sk, optval, optlen); 3796 break; 3797 case SCTP_ADAPTATION_LAYER: 3798 retval = sctp_setsockopt_adaptation_layer(sk, optval, optlen); 3799 break; 3800 case SCTP_CONTEXT: 3801 retval = sctp_setsockopt_context(sk, optval, optlen); 3802 break; 3803 case SCTP_FRAGMENT_INTERLEAVE: 3804 retval = sctp_setsockopt_fragment_interleave(sk, optval, optlen); 3805 break; 3806 case SCTP_MAX_BURST: 3807 retval = sctp_setsockopt_maxburst(sk, optval, optlen); 3808 break; 3809 case SCTP_AUTH_CHUNK: 3810 retval = sctp_setsockopt_auth_chunk(sk, optval, optlen); 3811 break; 3812 case SCTP_HMAC_IDENT: 3813 retval = sctp_setsockopt_hmac_ident(sk, optval, optlen); 3814 break; 3815 case SCTP_AUTH_KEY: 3816 retval = sctp_setsockopt_auth_key(sk, optval, optlen); 3817 break; 3818 case SCTP_AUTH_ACTIVE_KEY: 3819 retval = sctp_setsockopt_active_key(sk, optval, optlen); 3820 break; 3821 case SCTP_AUTH_DELETE_KEY: 3822 retval = sctp_setsockopt_del_key(sk, optval, optlen); 3823 break; 3824 case SCTP_AUTO_ASCONF: 3825 retval = sctp_setsockopt_auto_asconf(sk, optval, optlen); 3826 break; 3827 case SCTP_PEER_ADDR_THLDS: 3828 retval = sctp_setsockopt_paddr_thresholds(sk, optval, optlen); 3829 break; 3830 case SCTP_RECVRCVINFO: 3831 retval = sctp_setsockopt_recvrcvinfo(sk, optval, optlen); 3832 break; 3833 case SCTP_RECVNXTINFO: 3834 retval = sctp_setsockopt_recvnxtinfo(sk, optval, optlen); 3835 break; 3836 default: 3837 retval = -ENOPROTOOPT; 3838 break; 3839 } 3840 3841 release_sock(sk); 3842 3843 out_nounlock: 3844 return retval; 3845 } 3846 3847 /* API 3.1.6 connect() - UDP Style Syntax 3848 * 3849 * An application may use the connect() call in the UDP model to initiate an 3850 * association without sending data. 3851 * 3852 * The syntax is: 3853 * 3854 * ret = connect(int sd, const struct sockaddr *nam, socklen_t len); 3855 * 3856 * sd: the socket descriptor to have a new association added to. 3857 * 3858 * nam: the address structure (either struct sockaddr_in or struct 3859 * sockaddr_in6 defined in RFC2553 [7]). 3860 * 3861 * len: the size of the address. 3862 */ 3863 static int sctp_connect(struct sock *sk, struct sockaddr *addr, 3864 int addr_len) 3865 { 3866 int err = 0; 3867 struct sctp_af *af; 3868 3869 lock_sock(sk); 3870 3871 pr_debug("%s: sk:%p, sockaddr:%p, addr_len:%d\n", __func__, sk, 3872 addr, addr_len); 3873 3874 /* Validate addr_len before calling common connect/connectx routine. */ 3875 af = sctp_get_af_specific(addr->sa_family); 3876 if (!af || addr_len < af->sockaddr_len) { 3877 err = -EINVAL; 3878 } else { 3879 /* Pass correct addr len to common routine (so it knows there 3880 * is only one address being passed. 3881 */ 3882 err = __sctp_connect(sk, addr, af->sockaddr_len, NULL); 3883 } 3884 3885 release_sock(sk); 3886 return err; 3887 } 3888 3889 /* FIXME: Write comments. */ 3890 static int sctp_disconnect(struct sock *sk, int flags) 3891 { 3892 return -EOPNOTSUPP; /* STUB */ 3893 } 3894 3895 /* 4.1.4 accept() - TCP Style Syntax 3896 * 3897 * Applications use accept() call to remove an established SCTP 3898 * association from the accept queue of the endpoint. A new socket 3899 * descriptor will be returned from accept() to represent the newly 3900 * formed association. 3901 */ 3902 static struct sock *sctp_accept(struct sock *sk, int flags, int *err) 3903 { 3904 struct sctp_sock *sp; 3905 struct sctp_endpoint *ep; 3906 struct sock *newsk = NULL; 3907 struct sctp_association *asoc; 3908 long timeo; 3909 int error = 0; 3910 3911 lock_sock(sk); 3912 3913 sp = sctp_sk(sk); 3914 ep = sp->ep; 3915 3916 if (!sctp_style(sk, TCP)) { 3917 error = -EOPNOTSUPP; 3918 goto out; 3919 } 3920 3921 if (!sctp_sstate(sk, LISTENING)) { 3922 error = -EINVAL; 3923 goto out; 3924 } 3925 3926 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); 3927 3928 error = sctp_wait_for_accept(sk, timeo); 3929 if (error) 3930 goto out; 3931 3932 /* We treat the list of associations on the endpoint as the accept 3933 * queue and pick the first association on the list. 3934 */ 3935 asoc = list_entry(ep->asocs.next, struct sctp_association, asocs); 3936 3937 newsk = sp->pf->create_accept_sk(sk, asoc); 3938 if (!newsk) { 3939 error = -ENOMEM; 3940 goto out; 3941 } 3942 3943 /* Populate the fields of the newsk from the oldsk and migrate the 3944 * asoc to the newsk. 3945 */ 3946 sctp_sock_migrate(sk, newsk, asoc, SCTP_SOCKET_TCP); 3947 3948 out: 3949 release_sock(sk); 3950 *err = error; 3951 return newsk; 3952 } 3953 3954 /* The SCTP ioctl handler. */ 3955 static int sctp_ioctl(struct sock *sk, int cmd, unsigned long arg) 3956 { 3957 int rc = -ENOTCONN; 3958 3959 lock_sock(sk); 3960 3961 /* 3962 * SEQPACKET-style sockets in LISTENING state are valid, for 3963 * SCTP, so only discard TCP-style sockets in LISTENING state. 3964 */ 3965 if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)) 3966 goto out; 3967 3968 switch (cmd) { 3969 case SIOCINQ: { 3970 struct sk_buff *skb; 3971 unsigned int amount = 0; 3972 3973 skb = skb_peek(&sk->sk_receive_queue); 3974 if (skb != NULL) { 3975 /* 3976 * We will only return the amount of this packet since 3977 * that is all that will be read. 3978 */ 3979 amount = skb->len; 3980 } 3981 rc = put_user(amount, (int __user *)arg); 3982 break; 3983 } 3984 default: 3985 rc = -ENOIOCTLCMD; 3986 break; 3987 } 3988 out: 3989 release_sock(sk); 3990 return rc; 3991 } 3992 3993 /* This is the function which gets called during socket creation to 3994 * initialized the SCTP-specific portion of the sock. 3995 * The sock structure should already be zero-filled memory. 3996 */ 3997 static int sctp_init_sock(struct sock *sk) 3998 { 3999 struct net *net = sock_net(sk); 4000 struct sctp_sock *sp; 4001 4002 pr_debug("%s: sk:%p\n", __func__, sk); 4003 4004 sp = sctp_sk(sk); 4005 4006 /* Initialize the SCTP per socket area. */ 4007 switch (sk->sk_type) { 4008 case SOCK_SEQPACKET: 4009 sp->type = SCTP_SOCKET_UDP; 4010 break; 4011 case SOCK_STREAM: 4012 sp->type = SCTP_SOCKET_TCP; 4013 break; 4014 default: 4015 return -ESOCKTNOSUPPORT; 4016 } 4017 4018 /* Initialize default send parameters. These parameters can be 4019 * modified with the SCTP_DEFAULT_SEND_PARAM socket option. 4020 */ 4021 sp->default_stream = 0; 4022 sp->default_ppid = 0; 4023 sp->default_flags = 0; 4024 sp->default_context = 0; 4025 sp->default_timetolive = 0; 4026 4027 sp->default_rcv_context = 0; 4028 sp->max_burst = net->sctp.max_burst; 4029 4030 sp->sctp_hmac_alg = net->sctp.sctp_hmac_alg; 4031 4032 /* Initialize default setup parameters. These parameters 4033 * can be modified with the SCTP_INITMSG socket option or 4034 * overridden by the SCTP_INIT CMSG. 4035 */ 4036 sp->initmsg.sinit_num_ostreams = sctp_max_outstreams; 4037 sp->initmsg.sinit_max_instreams = sctp_max_instreams; 4038 sp->initmsg.sinit_max_attempts = net->sctp.max_retrans_init; 4039 sp->initmsg.sinit_max_init_timeo = net->sctp.rto_max; 4040 4041 /* Initialize default RTO related parameters. These parameters can 4042 * be modified for with the SCTP_RTOINFO socket option. 4043 */ 4044 sp->rtoinfo.srto_initial = net->sctp.rto_initial; 4045 sp->rtoinfo.srto_max = net->sctp.rto_max; 4046 sp->rtoinfo.srto_min = net->sctp.rto_min; 4047 4048 /* Initialize default association related parameters. These parameters 4049 * can be modified with the SCTP_ASSOCINFO socket option. 4050 */ 4051 sp->assocparams.sasoc_asocmaxrxt = net->sctp.max_retrans_association; 4052 sp->assocparams.sasoc_number_peer_destinations = 0; 4053 sp->assocparams.sasoc_peer_rwnd = 0; 4054 sp->assocparams.sasoc_local_rwnd = 0; 4055 sp->assocparams.sasoc_cookie_life = net->sctp.valid_cookie_life; 4056 4057 /* Initialize default event subscriptions. By default, all the 4058 * options are off. 4059 */ 4060 memset(&sp->subscribe, 0, sizeof(struct sctp_event_subscribe)); 4061 4062 /* Default Peer Address Parameters. These defaults can 4063 * be modified via SCTP_PEER_ADDR_PARAMS 4064 */ 4065 sp->hbinterval = net->sctp.hb_interval; 4066 sp->pathmaxrxt = net->sctp.max_retrans_path; 4067 sp->pathmtu = 0; /* allow default discovery */ 4068 sp->sackdelay = net->sctp.sack_timeout; 4069 sp->sackfreq = 2; 4070 sp->param_flags = SPP_HB_ENABLE | 4071 SPP_PMTUD_ENABLE | 4072 SPP_SACKDELAY_ENABLE; 4073 4074 /* If enabled no SCTP message fragmentation will be performed. 4075 * Configure through SCTP_DISABLE_FRAGMENTS socket option. 4076 */ 4077 sp->disable_fragments = 0; 4078 4079 /* Enable Nagle algorithm by default. */ 4080 sp->nodelay = 0; 4081 4082 sp->recvrcvinfo = 0; 4083 sp->recvnxtinfo = 0; 4084 4085 /* Enable by default. */ 4086 sp->v4mapped = 1; 4087 4088 /* Auto-close idle associations after the configured 4089 * number of seconds. A value of 0 disables this 4090 * feature. Configure through the SCTP_AUTOCLOSE socket option, 4091 * for UDP-style sockets only. 4092 */ 4093 sp->autoclose = 0; 4094 4095 /* User specified fragmentation limit. */ 4096 sp->user_frag = 0; 4097 4098 sp->adaptation_ind = 0; 4099 4100 sp->pf = sctp_get_pf_specific(sk->sk_family); 4101 4102 /* Control variables for partial data delivery. */ 4103 atomic_set(&sp->pd_mode, 0); 4104 skb_queue_head_init(&sp->pd_lobby); 4105 sp->frag_interleave = 0; 4106 4107 /* Create a per socket endpoint structure. Even if we 4108 * change the data structure relationships, this may still 4109 * be useful for storing pre-connect address information. 4110 */ 4111 sp->ep = sctp_endpoint_new(sk, GFP_KERNEL); 4112 if (!sp->ep) 4113 return -ENOMEM; 4114 4115 sp->hmac = NULL; 4116 4117 sk->sk_destruct = sctp_destruct_sock; 4118 4119 SCTP_DBG_OBJCNT_INC(sock); 4120 4121 local_bh_disable(); 4122 percpu_counter_inc(&sctp_sockets_allocated); 4123 sock_prot_inuse_add(net, sk->sk_prot, 1); 4124 if (net->sctp.default_auto_asconf) { 4125 list_add_tail(&sp->auto_asconf_list, 4126 &net->sctp.auto_asconf_splist); 4127 sp->do_auto_asconf = 1; 4128 } else 4129 sp->do_auto_asconf = 0; 4130 local_bh_enable(); 4131 4132 return 0; 4133 } 4134 4135 /* Cleanup any SCTP per socket resources. */ 4136 static void sctp_destroy_sock(struct sock *sk) 4137 { 4138 struct sctp_sock *sp; 4139 4140 pr_debug("%s: sk:%p\n", __func__, sk); 4141 4142 /* Release our hold on the endpoint. */ 4143 sp = sctp_sk(sk); 4144 /* This could happen during socket init, thus we bail out 4145 * early, since the rest of the below is not setup either. 4146 */ 4147 if (sp->ep == NULL) 4148 return; 4149 4150 if (sp->do_auto_asconf) { 4151 sp->do_auto_asconf = 0; 4152 list_del(&sp->auto_asconf_list); 4153 } 4154 sctp_endpoint_free(sp->ep); 4155 local_bh_disable(); 4156 percpu_counter_dec(&sctp_sockets_allocated); 4157 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); 4158 local_bh_enable(); 4159 } 4160 4161 /* Triggered when there are no references on the socket anymore */ 4162 static void sctp_destruct_sock(struct sock *sk) 4163 { 4164 struct sctp_sock *sp = sctp_sk(sk); 4165 4166 /* Free up the HMAC transform. */ 4167 crypto_free_hash(sp->hmac); 4168 4169 inet_sock_destruct(sk); 4170 } 4171 4172 /* API 4.1.7 shutdown() - TCP Style Syntax 4173 * int shutdown(int socket, int how); 4174 * 4175 * sd - the socket descriptor of the association to be closed. 4176 * how - Specifies the type of shutdown. The values are 4177 * as follows: 4178 * SHUT_RD 4179 * Disables further receive operations. No SCTP 4180 * protocol action is taken. 4181 * SHUT_WR 4182 * Disables further send operations, and initiates 4183 * the SCTP shutdown sequence. 4184 * SHUT_RDWR 4185 * Disables further send and receive operations 4186 * and initiates the SCTP shutdown sequence. 4187 */ 4188 static void sctp_shutdown(struct sock *sk, int how) 4189 { 4190 struct net *net = sock_net(sk); 4191 struct sctp_endpoint *ep; 4192 struct sctp_association *asoc; 4193 4194 if (!sctp_style(sk, TCP)) 4195 return; 4196 4197 if (how & SEND_SHUTDOWN) { 4198 ep = sctp_sk(sk)->ep; 4199 if (!list_empty(&ep->asocs)) { 4200 asoc = list_entry(ep->asocs.next, 4201 struct sctp_association, asocs); 4202 sctp_primitive_SHUTDOWN(net, asoc, NULL); 4203 } 4204 } 4205 } 4206 4207 /* 7.2.1 Association Status (SCTP_STATUS) 4208 4209 * Applications can retrieve current status information about an 4210 * association, including association state, peer receiver window size, 4211 * number of unacked data chunks, and number of data chunks pending 4212 * receipt. This information is read-only. 4213 */ 4214 static int sctp_getsockopt_sctp_status(struct sock *sk, int len, 4215 char __user *optval, 4216 int __user *optlen) 4217 { 4218 struct sctp_status status; 4219 struct sctp_association *asoc = NULL; 4220 struct sctp_transport *transport; 4221 sctp_assoc_t associd; 4222 int retval = 0; 4223 4224 if (len < sizeof(status)) { 4225 retval = -EINVAL; 4226 goto out; 4227 } 4228 4229 len = sizeof(status); 4230 if (copy_from_user(&status, optval, len)) { 4231 retval = -EFAULT; 4232 goto out; 4233 } 4234 4235 associd = status.sstat_assoc_id; 4236 asoc = sctp_id2assoc(sk, associd); 4237 if (!asoc) { 4238 retval = -EINVAL; 4239 goto out; 4240 } 4241 4242 transport = asoc->peer.primary_path; 4243 4244 status.sstat_assoc_id = sctp_assoc2id(asoc); 4245 status.sstat_state = sctp_assoc_to_state(asoc); 4246 status.sstat_rwnd = asoc->peer.rwnd; 4247 status.sstat_unackdata = asoc->unack_data; 4248 4249 status.sstat_penddata = sctp_tsnmap_pending(&asoc->peer.tsn_map); 4250 status.sstat_instrms = asoc->c.sinit_max_instreams; 4251 status.sstat_outstrms = asoc->c.sinit_num_ostreams; 4252 status.sstat_fragmentation_point = asoc->frag_point; 4253 status.sstat_primary.spinfo_assoc_id = sctp_assoc2id(transport->asoc); 4254 memcpy(&status.sstat_primary.spinfo_address, &transport->ipaddr, 4255 transport->af_specific->sockaddr_len); 4256 /* Map ipv4 address into v4-mapped-on-v6 address. */ 4257 sctp_get_pf_specific(sk->sk_family)->addr_to_user(sctp_sk(sk), 4258 (union sctp_addr *)&status.sstat_primary.spinfo_address); 4259 status.sstat_primary.spinfo_state = transport->state; 4260 status.sstat_primary.spinfo_cwnd = transport->cwnd; 4261 status.sstat_primary.spinfo_srtt = transport->srtt; 4262 status.sstat_primary.spinfo_rto = jiffies_to_msecs(transport->rto); 4263 status.sstat_primary.spinfo_mtu = transport->pathmtu; 4264 4265 if (status.sstat_primary.spinfo_state == SCTP_UNKNOWN) 4266 status.sstat_primary.spinfo_state = SCTP_ACTIVE; 4267 4268 if (put_user(len, optlen)) { 4269 retval = -EFAULT; 4270 goto out; 4271 } 4272 4273 pr_debug("%s: len:%d, state:%d, rwnd:%d, assoc_id:%d\n", 4274 __func__, len, status.sstat_state, status.sstat_rwnd, 4275 status.sstat_assoc_id); 4276 4277 if (copy_to_user(optval, &status, len)) { 4278 retval = -EFAULT; 4279 goto out; 4280 } 4281 4282 out: 4283 return retval; 4284 } 4285 4286 4287 /* 7.2.2 Peer Address Information (SCTP_GET_PEER_ADDR_INFO) 4288 * 4289 * Applications can retrieve information about a specific peer address 4290 * of an association, including its reachability state, congestion 4291 * window, and retransmission timer values. This information is 4292 * read-only. 4293 */ 4294 static int sctp_getsockopt_peer_addr_info(struct sock *sk, int len, 4295 char __user *optval, 4296 int __user *optlen) 4297 { 4298 struct sctp_paddrinfo pinfo; 4299 struct sctp_transport *transport; 4300 int retval = 0; 4301 4302 if (len < sizeof(pinfo)) { 4303 retval = -EINVAL; 4304 goto out; 4305 } 4306 4307 len = sizeof(pinfo); 4308 if (copy_from_user(&pinfo, optval, len)) { 4309 retval = -EFAULT; 4310 goto out; 4311 } 4312 4313 transport = sctp_addr_id2transport(sk, &pinfo.spinfo_address, 4314 pinfo.spinfo_assoc_id); 4315 if (!transport) 4316 return -EINVAL; 4317 4318 pinfo.spinfo_assoc_id = sctp_assoc2id(transport->asoc); 4319 pinfo.spinfo_state = transport->state; 4320 pinfo.spinfo_cwnd = transport->cwnd; 4321 pinfo.spinfo_srtt = transport->srtt; 4322 pinfo.spinfo_rto = jiffies_to_msecs(transport->rto); 4323 pinfo.spinfo_mtu = transport->pathmtu; 4324 4325 if (pinfo.spinfo_state == SCTP_UNKNOWN) 4326 pinfo.spinfo_state = SCTP_ACTIVE; 4327 4328 if (put_user(len, optlen)) { 4329 retval = -EFAULT; 4330 goto out; 4331 } 4332 4333 if (copy_to_user(optval, &pinfo, len)) { 4334 retval = -EFAULT; 4335 goto out; 4336 } 4337 4338 out: 4339 return retval; 4340 } 4341 4342 /* 7.1.12 Enable/Disable message fragmentation (SCTP_DISABLE_FRAGMENTS) 4343 * 4344 * This option is a on/off flag. If enabled no SCTP message 4345 * fragmentation will be performed. Instead if a message being sent 4346 * exceeds the current PMTU size, the message will NOT be sent and 4347 * instead a error will be indicated to the user. 4348 */ 4349 static int sctp_getsockopt_disable_fragments(struct sock *sk, int len, 4350 char __user *optval, int __user *optlen) 4351 { 4352 int val; 4353 4354 if (len < sizeof(int)) 4355 return -EINVAL; 4356 4357 len = sizeof(int); 4358 val = (sctp_sk(sk)->disable_fragments == 1); 4359 if (put_user(len, optlen)) 4360 return -EFAULT; 4361 if (copy_to_user(optval, &val, len)) 4362 return -EFAULT; 4363 return 0; 4364 } 4365 4366 /* 7.1.15 Set notification and ancillary events (SCTP_EVENTS) 4367 * 4368 * This socket option is used to specify various notifications and 4369 * ancillary data the user wishes to receive. 4370 */ 4371 static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval, 4372 int __user *optlen) 4373 { 4374 if (len <= 0) 4375 return -EINVAL; 4376 if (len > sizeof(struct sctp_event_subscribe)) 4377 len = sizeof(struct sctp_event_subscribe); 4378 if (put_user(len, optlen)) 4379 return -EFAULT; 4380 if (copy_to_user(optval, &sctp_sk(sk)->subscribe, len)) 4381 return -EFAULT; 4382 return 0; 4383 } 4384 4385 /* 7.1.8 Automatic Close of associations (SCTP_AUTOCLOSE) 4386 * 4387 * This socket option is applicable to the UDP-style socket only. When 4388 * set it will cause associations that are idle for more than the 4389 * specified number of seconds to automatically close. An association 4390 * being idle is defined an association that has NOT sent or received 4391 * user data. The special value of '0' indicates that no automatic 4392 * close of any associations should be performed. The option expects an 4393 * integer defining the number of seconds of idle time before an 4394 * association is closed. 4395 */ 4396 static int sctp_getsockopt_autoclose(struct sock *sk, int len, char __user *optval, int __user *optlen) 4397 { 4398 /* Applicable to UDP-style socket only */ 4399 if (sctp_style(sk, TCP)) 4400 return -EOPNOTSUPP; 4401 if (len < sizeof(int)) 4402 return -EINVAL; 4403 len = sizeof(int); 4404 if (put_user(len, optlen)) 4405 return -EFAULT; 4406 if (copy_to_user(optval, &sctp_sk(sk)->autoclose, sizeof(int))) 4407 return -EFAULT; 4408 return 0; 4409 } 4410 4411 /* Helper routine to branch off an association to a new socket. */ 4412 int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp) 4413 { 4414 struct sctp_association *asoc = sctp_id2assoc(sk, id); 4415 struct sctp_sock *sp = sctp_sk(sk); 4416 struct socket *sock; 4417 int err = 0; 4418 4419 if (!asoc) 4420 return -EINVAL; 4421 4422 /* An association cannot be branched off from an already peeled-off 4423 * socket, nor is this supported for tcp style sockets. 4424 */ 4425 if (!sctp_style(sk, UDP)) 4426 return -EINVAL; 4427 4428 /* Create a new socket. */ 4429 err = sock_create(sk->sk_family, SOCK_SEQPACKET, IPPROTO_SCTP, &sock); 4430 if (err < 0) 4431 return err; 4432 4433 sctp_copy_sock(sock->sk, sk, asoc); 4434 4435 /* Make peeled-off sockets more like 1-1 accepted sockets. 4436 * Set the daddr and initialize id to something more random 4437 */ 4438 sp->pf->to_sk_daddr(&asoc->peer.primary_addr, sk); 4439 4440 /* Populate the fields of the newsk from the oldsk and migrate the 4441 * asoc to the newsk. 4442 */ 4443 sctp_sock_migrate(sk, sock->sk, asoc, SCTP_SOCKET_UDP_HIGH_BANDWIDTH); 4444 4445 *sockp = sock; 4446 4447 return err; 4448 } 4449 EXPORT_SYMBOL(sctp_do_peeloff); 4450 4451 static int sctp_getsockopt_peeloff(struct sock *sk, int len, char __user *optval, int __user *optlen) 4452 { 4453 sctp_peeloff_arg_t peeloff; 4454 struct socket *newsock; 4455 struct file *newfile; 4456 int retval = 0; 4457 4458 if (len < sizeof(sctp_peeloff_arg_t)) 4459 return -EINVAL; 4460 len = sizeof(sctp_peeloff_arg_t); 4461 if (copy_from_user(&peeloff, optval, len)) 4462 return -EFAULT; 4463 4464 retval = sctp_do_peeloff(sk, peeloff.associd, &newsock); 4465 if (retval < 0) 4466 goto out; 4467 4468 /* Map the socket to an unused fd that can be returned to the user. */ 4469 retval = get_unused_fd_flags(0); 4470 if (retval < 0) { 4471 sock_release(newsock); 4472 goto out; 4473 } 4474 4475 newfile = sock_alloc_file(newsock, 0, NULL); 4476 if (unlikely(IS_ERR(newfile))) { 4477 put_unused_fd(retval); 4478 sock_release(newsock); 4479 return PTR_ERR(newfile); 4480 } 4481 4482 pr_debug("%s: sk:%p, newsk:%p, sd:%d\n", __func__, sk, newsock->sk, 4483 retval); 4484 4485 /* Return the fd mapped to the new socket. */ 4486 if (put_user(len, optlen)) { 4487 fput(newfile); 4488 put_unused_fd(retval); 4489 return -EFAULT; 4490 } 4491 peeloff.sd = retval; 4492 if (copy_to_user(optval, &peeloff, len)) { 4493 fput(newfile); 4494 put_unused_fd(retval); 4495 return -EFAULT; 4496 } 4497 fd_install(retval, newfile); 4498 out: 4499 return retval; 4500 } 4501 4502 /* 7.1.13 Peer Address Parameters (SCTP_PEER_ADDR_PARAMS) 4503 * 4504 * Applications can enable or disable heartbeats for any peer address of 4505 * an association, modify an address's heartbeat interval, force a 4506 * heartbeat to be sent immediately, and adjust the address's maximum 4507 * number of retransmissions sent before an address is considered 4508 * unreachable. The following structure is used to access and modify an 4509 * address's parameters: 4510 * 4511 * struct sctp_paddrparams { 4512 * sctp_assoc_t spp_assoc_id; 4513 * struct sockaddr_storage spp_address; 4514 * uint32_t spp_hbinterval; 4515 * uint16_t spp_pathmaxrxt; 4516 * uint32_t spp_pathmtu; 4517 * uint32_t spp_sackdelay; 4518 * uint32_t spp_flags; 4519 * }; 4520 * 4521 * spp_assoc_id - (one-to-many style socket) This is filled in the 4522 * application, and identifies the association for 4523 * this query. 4524 * spp_address - This specifies which address is of interest. 4525 * spp_hbinterval - This contains the value of the heartbeat interval, 4526 * in milliseconds. If a value of zero 4527 * is present in this field then no changes are to 4528 * be made to this parameter. 4529 * spp_pathmaxrxt - This contains the maximum number of 4530 * retransmissions before this address shall be 4531 * considered unreachable. If a value of zero 4532 * is present in this field then no changes are to 4533 * be made to this parameter. 4534 * spp_pathmtu - When Path MTU discovery is disabled the value 4535 * specified here will be the "fixed" path mtu. 4536 * Note that if the spp_address field is empty 4537 * then all associations on this address will 4538 * have this fixed path mtu set upon them. 4539 * 4540 * spp_sackdelay - When delayed sack is enabled, this value specifies 4541 * the number of milliseconds that sacks will be delayed 4542 * for. This value will apply to all addresses of an 4543 * association if the spp_address field is empty. Note 4544 * also, that if delayed sack is enabled and this 4545 * value is set to 0, no change is made to the last 4546 * recorded delayed sack timer value. 4547 * 4548 * spp_flags - These flags are used to control various features 4549 * on an association. The flag field may contain 4550 * zero or more of the following options. 4551 * 4552 * SPP_HB_ENABLE - Enable heartbeats on the 4553 * specified address. Note that if the address 4554 * field is empty all addresses for the association 4555 * have heartbeats enabled upon them. 4556 * 4557 * SPP_HB_DISABLE - Disable heartbeats on the 4558 * speicifed address. Note that if the address 4559 * field is empty all addresses for the association 4560 * will have their heartbeats disabled. Note also 4561 * that SPP_HB_ENABLE and SPP_HB_DISABLE are 4562 * mutually exclusive, only one of these two should 4563 * be specified. Enabling both fields will have 4564 * undetermined results. 4565 * 4566 * SPP_HB_DEMAND - Request a user initiated heartbeat 4567 * to be made immediately. 4568 * 4569 * SPP_PMTUD_ENABLE - This field will enable PMTU 4570 * discovery upon the specified address. Note that 4571 * if the address feild is empty then all addresses 4572 * on the association are effected. 4573 * 4574 * SPP_PMTUD_DISABLE - This field will disable PMTU 4575 * discovery upon the specified address. Note that 4576 * if the address feild is empty then all addresses 4577 * on the association are effected. Not also that 4578 * SPP_PMTUD_ENABLE and SPP_PMTUD_DISABLE are mutually 4579 * exclusive. Enabling both will have undetermined 4580 * results. 4581 * 4582 * SPP_SACKDELAY_ENABLE - Setting this flag turns 4583 * on delayed sack. The time specified in spp_sackdelay 4584 * is used to specify the sack delay for this address. Note 4585 * that if spp_address is empty then all addresses will 4586 * enable delayed sack and take on the sack delay 4587 * value specified in spp_sackdelay. 4588 * SPP_SACKDELAY_DISABLE - Setting this flag turns 4589 * off delayed sack. If the spp_address field is blank then 4590 * delayed sack is disabled for the entire association. Note 4591 * also that this field is mutually exclusive to 4592 * SPP_SACKDELAY_ENABLE, setting both will have undefined 4593 * results. 4594 */ 4595 static int sctp_getsockopt_peer_addr_params(struct sock *sk, int len, 4596 char __user *optval, int __user *optlen) 4597 { 4598 struct sctp_paddrparams params; 4599 struct sctp_transport *trans = NULL; 4600 struct sctp_association *asoc = NULL; 4601 struct sctp_sock *sp = sctp_sk(sk); 4602 4603 if (len < sizeof(struct sctp_paddrparams)) 4604 return -EINVAL; 4605 len = sizeof(struct sctp_paddrparams); 4606 if (copy_from_user(¶ms, optval, len)) 4607 return -EFAULT; 4608 4609 /* If an address other than INADDR_ANY is specified, and 4610 * no transport is found, then the request is invalid. 4611 */ 4612 if (!sctp_is_any(sk, (union sctp_addr *)¶ms.spp_address)) { 4613 trans = sctp_addr_id2transport(sk, ¶ms.spp_address, 4614 params.spp_assoc_id); 4615 if (!trans) { 4616 pr_debug("%s: failed no transport\n", __func__); 4617 return -EINVAL; 4618 } 4619 } 4620 4621 /* Get association, if assoc_id != 0 and the socket is a one 4622 * to many style socket, and an association was not found, then 4623 * the id was invalid. 4624 */ 4625 asoc = sctp_id2assoc(sk, params.spp_assoc_id); 4626 if (!asoc && params.spp_assoc_id && sctp_style(sk, UDP)) { 4627 pr_debug("%s: failed no association\n", __func__); 4628 return -EINVAL; 4629 } 4630 4631 if (trans) { 4632 /* Fetch transport values. */ 4633 params.spp_hbinterval = jiffies_to_msecs(trans->hbinterval); 4634 params.spp_pathmtu = trans->pathmtu; 4635 params.spp_pathmaxrxt = trans->pathmaxrxt; 4636 params.spp_sackdelay = jiffies_to_msecs(trans->sackdelay); 4637 4638 /*draft-11 doesn't say what to return in spp_flags*/ 4639 params.spp_flags = trans->param_flags; 4640 } else if (asoc) { 4641 /* Fetch association values. */ 4642 params.spp_hbinterval = jiffies_to_msecs(asoc->hbinterval); 4643 params.spp_pathmtu = asoc->pathmtu; 4644 params.spp_pathmaxrxt = asoc->pathmaxrxt; 4645 params.spp_sackdelay = jiffies_to_msecs(asoc->sackdelay); 4646 4647 /*draft-11 doesn't say what to return in spp_flags*/ 4648 params.spp_flags = asoc->param_flags; 4649 } else { 4650 /* Fetch socket values. */ 4651 params.spp_hbinterval = sp->hbinterval; 4652 params.spp_pathmtu = sp->pathmtu; 4653 params.spp_sackdelay = sp->sackdelay; 4654 params.spp_pathmaxrxt = sp->pathmaxrxt; 4655 4656 /*draft-11 doesn't say what to return in spp_flags*/ 4657 params.spp_flags = sp->param_flags; 4658 } 4659 4660 if (copy_to_user(optval, ¶ms, len)) 4661 return -EFAULT; 4662 4663 if (put_user(len, optlen)) 4664 return -EFAULT; 4665 4666 return 0; 4667 } 4668 4669 /* 4670 * 7.1.23. Get or set delayed ack timer (SCTP_DELAYED_SACK) 4671 * 4672 * This option will effect the way delayed acks are performed. This 4673 * option allows you to get or set the delayed ack time, in 4674 * milliseconds. It also allows changing the delayed ack frequency. 4675 * Changing the frequency to 1 disables the delayed sack algorithm. If 4676 * the assoc_id is 0, then this sets or gets the endpoints default 4677 * values. If the assoc_id field is non-zero, then the set or get 4678 * effects the specified association for the one to many model (the 4679 * assoc_id field is ignored by the one to one model). Note that if 4680 * sack_delay or sack_freq are 0 when setting this option, then the 4681 * current values will remain unchanged. 4682 * 4683 * struct sctp_sack_info { 4684 * sctp_assoc_t sack_assoc_id; 4685 * uint32_t sack_delay; 4686 * uint32_t sack_freq; 4687 * }; 4688 * 4689 * sack_assoc_id - This parameter, indicates which association the user 4690 * is performing an action upon. Note that if this field's value is 4691 * zero then the endpoints default value is changed (effecting future 4692 * associations only). 4693 * 4694 * sack_delay - This parameter contains the number of milliseconds that 4695 * the user is requesting the delayed ACK timer be set to. Note that 4696 * this value is defined in the standard to be between 200 and 500 4697 * milliseconds. 4698 * 4699 * sack_freq - This parameter contains the number of packets that must 4700 * be received before a sack is sent without waiting for the delay 4701 * timer to expire. The default value for this is 2, setting this 4702 * value to 1 will disable the delayed sack algorithm. 4703 */ 4704 static int sctp_getsockopt_delayed_ack(struct sock *sk, int len, 4705 char __user *optval, 4706 int __user *optlen) 4707 { 4708 struct sctp_sack_info params; 4709 struct sctp_association *asoc = NULL; 4710 struct sctp_sock *sp = sctp_sk(sk); 4711 4712 if (len >= sizeof(struct sctp_sack_info)) { 4713 len = sizeof(struct sctp_sack_info); 4714 4715 if (copy_from_user(¶ms, optval, len)) 4716 return -EFAULT; 4717 } else if (len == sizeof(struct sctp_assoc_value)) { 4718 pr_warn_ratelimited(DEPRECATED 4719 "%s (pid %d) " 4720 "Use of struct sctp_assoc_value in delayed_ack socket option.\n" 4721 "Use struct sctp_sack_info instead\n", 4722 current->comm, task_pid_nr(current)); 4723 if (copy_from_user(¶ms, optval, len)) 4724 return -EFAULT; 4725 } else 4726 return -EINVAL; 4727 4728 /* Get association, if sack_assoc_id != 0 and the socket is a one 4729 * to many style socket, and an association was not found, then 4730 * the id was invalid. 4731 */ 4732 asoc = sctp_id2assoc(sk, params.sack_assoc_id); 4733 if (!asoc && params.sack_assoc_id && sctp_style(sk, UDP)) 4734 return -EINVAL; 4735 4736 if (asoc) { 4737 /* Fetch association values. */ 4738 if (asoc->param_flags & SPP_SACKDELAY_ENABLE) { 4739 params.sack_delay = jiffies_to_msecs( 4740 asoc->sackdelay); 4741 params.sack_freq = asoc->sackfreq; 4742 4743 } else { 4744 params.sack_delay = 0; 4745 params.sack_freq = 1; 4746 } 4747 } else { 4748 /* Fetch socket values. */ 4749 if (sp->param_flags & SPP_SACKDELAY_ENABLE) { 4750 params.sack_delay = sp->sackdelay; 4751 params.sack_freq = sp->sackfreq; 4752 } else { 4753 params.sack_delay = 0; 4754 params.sack_freq = 1; 4755 } 4756 } 4757 4758 if (copy_to_user(optval, ¶ms, len)) 4759 return -EFAULT; 4760 4761 if (put_user(len, optlen)) 4762 return -EFAULT; 4763 4764 return 0; 4765 } 4766 4767 /* 7.1.3 Initialization Parameters (SCTP_INITMSG) 4768 * 4769 * Applications can specify protocol parameters for the default association 4770 * initialization. The option name argument to setsockopt() and getsockopt() 4771 * is SCTP_INITMSG. 4772 * 4773 * Setting initialization parameters is effective only on an unconnected 4774 * socket (for UDP-style sockets only future associations are effected 4775 * by the change). With TCP-style sockets, this option is inherited by 4776 * sockets derived from a listener socket. 4777 */ 4778 static int sctp_getsockopt_initmsg(struct sock *sk, int len, char __user *optval, int __user *optlen) 4779 { 4780 if (len < sizeof(struct sctp_initmsg)) 4781 return -EINVAL; 4782 len = sizeof(struct sctp_initmsg); 4783 if (put_user(len, optlen)) 4784 return -EFAULT; 4785 if (copy_to_user(optval, &sctp_sk(sk)->initmsg, len)) 4786 return -EFAULT; 4787 return 0; 4788 } 4789 4790 4791 static int sctp_getsockopt_peer_addrs(struct sock *sk, int len, 4792 char __user *optval, int __user *optlen) 4793 { 4794 struct sctp_association *asoc; 4795 int cnt = 0; 4796 struct sctp_getaddrs getaddrs; 4797 struct sctp_transport *from; 4798 void __user *to; 4799 union sctp_addr temp; 4800 struct sctp_sock *sp = sctp_sk(sk); 4801 int addrlen; 4802 size_t space_left; 4803 int bytes_copied; 4804 4805 if (len < sizeof(struct sctp_getaddrs)) 4806 return -EINVAL; 4807 4808 if (copy_from_user(&getaddrs, optval, sizeof(struct sctp_getaddrs))) 4809 return -EFAULT; 4810 4811 /* For UDP-style sockets, id specifies the association to query. */ 4812 asoc = sctp_id2assoc(sk, getaddrs.assoc_id); 4813 if (!asoc) 4814 return -EINVAL; 4815 4816 to = optval + offsetof(struct sctp_getaddrs, addrs); 4817 space_left = len - offsetof(struct sctp_getaddrs, addrs); 4818 4819 list_for_each_entry(from, &asoc->peer.transport_addr_list, 4820 transports) { 4821 memcpy(&temp, &from->ipaddr, sizeof(temp)); 4822 addrlen = sctp_get_pf_specific(sk->sk_family) 4823 ->addr_to_user(sp, &temp); 4824 if (space_left < addrlen) 4825 return -ENOMEM; 4826 if (copy_to_user(to, &temp, addrlen)) 4827 return -EFAULT; 4828 to += addrlen; 4829 cnt++; 4830 space_left -= addrlen; 4831 } 4832 4833 if (put_user(cnt, &((struct sctp_getaddrs __user *)optval)->addr_num)) 4834 return -EFAULT; 4835 bytes_copied = ((char __user *)to) - optval; 4836 if (put_user(bytes_copied, optlen)) 4837 return -EFAULT; 4838 4839 return 0; 4840 } 4841 4842 static int sctp_copy_laddrs(struct sock *sk, __u16 port, void *to, 4843 size_t space_left, int *bytes_copied) 4844 { 4845 struct sctp_sockaddr_entry *addr; 4846 union sctp_addr temp; 4847 int cnt = 0; 4848 int addrlen; 4849 struct net *net = sock_net(sk); 4850 4851 rcu_read_lock(); 4852 list_for_each_entry_rcu(addr, &net->sctp.local_addr_list, list) { 4853 if (!addr->valid) 4854 continue; 4855 4856 if ((PF_INET == sk->sk_family) && 4857 (AF_INET6 == addr->a.sa.sa_family)) 4858 continue; 4859 if ((PF_INET6 == sk->sk_family) && 4860 inet_v6_ipv6only(sk) && 4861 (AF_INET == addr->a.sa.sa_family)) 4862 continue; 4863 memcpy(&temp, &addr->a, sizeof(temp)); 4864 if (!temp.v4.sin_port) 4865 temp.v4.sin_port = htons(port); 4866 4867 addrlen = sctp_get_pf_specific(sk->sk_family) 4868 ->addr_to_user(sctp_sk(sk), &temp); 4869 4870 if (space_left < addrlen) { 4871 cnt = -ENOMEM; 4872 break; 4873 } 4874 memcpy(to, &temp, addrlen); 4875 4876 to += addrlen; 4877 cnt++; 4878 space_left -= addrlen; 4879 *bytes_copied += addrlen; 4880 } 4881 rcu_read_unlock(); 4882 4883 return cnt; 4884 } 4885 4886 4887 static int sctp_getsockopt_local_addrs(struct sock *sk, int len, 4888 char __user *optval, int __user *optlen) 4889 { 4890 struct sctp_bind_addr *bp; 4891 struct sctp_association *asoc; 4892 int cnt = 0; 4893 struct sctp_getaddrs getaddrs; 4894 struct sctp_sockaddr_entry *addr; 4895 void __user *to; 4896 union sctp_addr temp; 4897 struct sctp_sock *sp = sctp_sk(sk); 4898 int addrlen; 4899 int err = 0; 4900 size_t space_left; 4901 int bytes_copied = 0; 4902 void *addrs; 4903 void *buf; 4904 4905 if (len < sizeof(struct sctp_getaddrs)) 4906 return -EINVAL; 4907 4908 if (copy_from_user(&getaddrs, optval, sizeof(struct sctp_getaddrs))) 4909 return -EFAULT; 4910 4911 /* 4912 * For UDP-style sockets, id specifies the association to query. 4913 * If the id field is set to the value '0' then the locally bound 4914 * addresses are returned without regard to any particular 4915 * association. 4916 */ 4917 if (0 == getaddrs.assoc_id) { 4918 bp = &sctp_sk(sk)->ep->base.bind_addr; 4919 } else { 4920 asoc = sctp_id2assoc(sk, getaddrs.assoc_id); 4921 if (!asoc) 4922 return -EINVAL; 4923 bp = &asoc->base.bind_addr; 4924 } 4925 4926 to = optval + offsetof(struct sctp_getaddrs, addrs); 4927 space_left = len - offsetof(struct sctp_getaddrs, addrs); 4928 4929 addrs = kmalloc(space_left, GFP_KERNEL); 4930 if (!addrs) 4931 return -ENOMEM; 4932 4933 /* If the endpoint is bound to 0.0.0.0 or ::0, get the valid 4934 * addresses from the global local address list. 4935 */ 4936 if (sctp_list_single_entry(&bp->address_list)) { 4937 addr = list_entry(bp->address_list.next, 4938 struct sctp_sockaddr_entry, list); 4939 if (sctp_is_any(sk, &addr->a)) { 4940 cnt = sctp_copy_laddrs(sk, bp->port, addrs, 4941 space_left, &bytes_copied); 4942 if (cnt < 0) { 4943 err = cnt; 4944 goto out; 4945 } 4946 goto copy_getaddrs; 4947 } 4948 } 4949 4950 buf = addrs; 4951 /* Protection on the bound address list is not needed since 4952 * in the socket option context we hold a socket lock and 4953 * thus the bound address list can't change. 4954 */ 4955 list_for_each_entry(addr, &bp->address_list, list) { 4956 memcpy(&temp, &addr->a, sizeof(temp)); 4957 addrlen = sctp_get_pf_specific(sk->sk_family) 4958 ->addr_to_user(sp, &temp); 4959 if (space_left < addrlen) { 4960 err = -ENOMEM; /*fixme: right error?*/ 4961 goto out; 4962 } 4963 memcpy(buf, &temp, addrlen); 4964 buf += addrlen; 4965 bytes_copied += addrlen; 4966 cnt++; 4967 space_left -= addrlen; 4968 } 4969 4970 copy_getaddrs: 4971 if (copy_to_user(to, addrs, bytes_copied)) { 4972 err = -EFAULT; 4973 goto out; 4974 } 4975 if (put_user(cnt, &((struct sctp_getaddrs __user *)optval)->addr_num)) { 4976 err = -EFAULT; 4977 goto out; 4978 } 4979 if (put_user(bytes_copied, optlen)) 4980 err = -EFAULT; 4981 out: 4982 kfree(addrs); 4983 return err; 4984 } 4985 4986 /* 7.1.10 Set Primary Address (SCTP_PRIMARY_ADDR) 4987 * 4988 * Requests that the local SCTP stack use the enclosed peer address as 4989 * the association primary. The enclosed address must be one of the 4990 * association peer's addresses. 4991 */ 4992 static int sctp_getsockopt_primary_addr(struct sock *sk, int len, 4993 char __user *optval, int __user *optlen) 4994 { 4995 struct sctp_prim prim; 4996 struct sctp_association *asoc; 4997 struct sctp_sock *sp = sctp_sk(sk); 4998 4999 if (len < sizeof(struct sctp_prim)) 5000 return -EINVAL; 5001 5002 len = sizeof(struct sctp_prim); 5003 5004 if (copy_from_user(&prim, optval, len)) 5005 return -EFAULT; 5006 5007 asoc = sctp_id2assoc(sk, prim.ssp_assoc_id); 5008 if (!asoc) 5009 return -EINVAL; 5010 5011 if (!asoc->peer.primary_path) 5012 return -ENOTCONN; 5013 5014 memcpy(&prim.ssp_addr, &asoc->peer.primary_path->ipaddr, 5015 asoc->peer.primary_path->af_specific->sockaddr_len); 5016 5017 sctp_get_pf_specific(sk->sk_family)->addr_to_user(sp, 5018 (union sctp_addr *)&prim.ssp_addr); 5019 5020 if (put_user(len, optlen)) 5021 return -EFAULT; 5022 if (copy_to_user(optval, &prim, len)) 5023 return -EFAULT; 5024 5025 return 0; 5026 } 5027 5028 /* 5029 * 7.1.11 Set Adaptation Layer Indicator (SCTP_ADAPTATION_LAYER) 5030 * 5031 * Requests that the local endpoint set the specified Adaptation Layer 5032 * Indication parameter for all future INIT and INIT-ACK exchanges. 5033 */ 5034 static int sctp_getsockopt_adaptation_layer(struct sock *sk, int len, 5035 char __user *optval, int __user *optlen) 5036 { 5037 struct sctp_setadaptation adaptation; 5038 5039 if (len < sizeof(struct sctp_setadaptation)) 5040 return -EINVAL; 5041 5042 len = sizeof(struct sctp_setadaptation); 5043 5044 adaptation.ssb_adaptation_ind = sctp_sk(sk)->adaptation_ind; 5045 5046 if (put_user(len, optlen)) 5047 return -EFAULT; 5048 if (copy_to_user(optval, &adaptation, len)) 5049 return -EFAULT; 5050 5051 return 0; 5052 } 5053 5054 /* 5055 * 5056 * 7.1.14 Set default send parameters (SCTP_DEFAULT_SEND_PARAM) 5057 * 5058 * Applications that wish to use the sendto() system call may wish to 5059 * specify a default set of parameters that would normally be supplied 5060 * through the inclusion of ancillary data. This socket option allows 5061 * such an application to set the default sctp_sndrcvinfo structure. 5062 5063 5064 * The application that wishes to use this socket option simply passes 5065 * in to this call the sctp_sndrcvinfo structure defined in Section 5066 * 5.2.2) The input parameters accepted by this call include 5067 * sinfo_stream, sinfo_flags, sinfo_ppid, sinfo_context, 5068 * sinfo_timetolive. The user must provide the sinfo_assoc_id field in 5069 * to this call if the caller is using the UDP model. 5070 * 5071 * For getsockopt, it get the default sctp_sndrcvinfo structure. 5072 */ 5073 static int sctp_getsockopt_default_send_param(struct sock *sk, 5074 int len, char __user *optval, 5075 int __user *optlen) 5076 { 5077 struct sctp_sock *sp = sctp_sk(sk); 5078 struct sctp_association *asoc; 5079 struct sctp_sndrcvinfo info; 5080 5081 if (len < sizeof(info)) 5082 return -EINVAL; 5083 5084 len = sizeof(info); 5085 5086 if (copy_from_user(&info, optval, len)) 5087 return -EFAULT; 5088 5089 asoc = sctp_id2assoc(sk, info.sinfo_assoc_id); 5090 if (!asoc && info.sinfo_assoc_id && sctp_style(sk, UDP)) 5091 return -EINVAL; 5092 if (asoc) { 5093 info.sinfo_stream = asoc->default_stream; 5094 info.sinfo_flags = asoc->default_flags; 5095 info.sinfo_ppid = asoc->default_ppid; 5096 info.sinfo_context = asoc->default_context; 5097 info.sinfo_timetolive = asoc->default_timetolive; 5098 } else { 5099 info.sinfo_stream = sp->default_stream; 5100 info.sinfo_flags = sp->default_flags; 5101 info.sinfo_ppid = sp->default_ppid; 5102 info.sinfo_context = sp->default_context; 5103 info.sinfo_timetolive = sp->default_timetolive; 5104 } 5105 5106 if (put_user(len, optlen)) 5107 return -EFAULT; 5108 if (copy_to_user(optval, &info, len)) 5109 return -EFAULT; 5110 5111 return 0; 5112 } 5113 5114 /* RFC6458, Section 8.1.31. Set/get Default Send Parameters 5115 * (SCTP_DEFAULT_SNDINFO) 5116 */ 5117 static int sctp_getsockopt_default_sndinfo(struct sock *sk, int len, 5118 char __user *optval, 5119 int __user *optlen) 5120 { 5121 struct sctp_sock *sp = sctp_sk(sk); 5122 struct sctp_association *asoc; 5123 struct sctp_sndinfo info; 5124 5125 if (len < sizeof(info)) 5126 return -EINVAL; 5127 5128 len = sizeof(info); 5129 5130 if (copy_from_user(&info, optval, len)) 5131 return -EFAULT; 5132 5133 asoc = sctp_id2assoc(sk, info.snd_assoc_id); 5134 if (!asoc && info.snd_assoc_id && sctp_style(sk, UDP)) 5135 return -EINVAL; 5136 if (asoc) { 5137 info.snd_sid = asoc->default_stream; 5138 info.snd_flags = asoc->default_flags; 5139 info.snd_ppid = asoc->default_ppid; 5140 info.snd_context = asoc->default_context; 5141 } else { 5142 info.snd_sid = sp->default_stream; 5143 info.snd_flags = sp->default_flags; 5144 info.snd_ppid = sp->default_ppid; 5145 info.snd_context = sp->default_context; 5146 } 5147 5148 if (put_user(len, optlen)) 5149 return -EFAULT; 5150 if (copy_to_user(optval, &info, len)) 5151 return -EFAULT; 5152 5153 return 0; 5154 } 5155 5156 /* 5157 * 5158 * 7.1.5 SCTP_NODELAY 5159 * 5160 * Turn on/off any Nagle-like algorithm. This means that packets are 5161 * generally sent as soon as possible and no unnecessary delays are 5162 * introduced, at the cost of more packets in the network. Expects an 5163 * integer boolean flag. 5164 */ 5165 5166 static int sctp_getsockopt_nodelay(struct sock *sk, int len, 5167 char __user *optval, int __user *optlen) 5168 { 5169 int val; 5170 5171 if (len < sizeof(int)) 5172 return -EINVAL; 5173 5174 len = sizeof(int); 5175 val = (sctp_sk(sk)->nodelay == 1); 5176 if (put_user(len, optlen)) 5177 return -EFAULT; 5178 if (copy_to_user(optval, &val, len)) 5179 return -EFAULT; 5180 return 0; 5181 } 5182 5183 /* 5184 * 5185 * 7.1.1 SCTP_RTOINFO 5186 * 5187 * The protocol parameters used to initialize and bound retransmission 5188 * timeout (RTO) are tunable. sctp_rtoinfo structure is used to access 5189 * and modify these parameters. 5190 * All parameters are time values, in milliseconds. A value of 0, when 5191 * modifying the parameters, indicates that the current value should not 5192 * be changed. 5193 * 5194 */ 5195 static int sctp_getsockopt_rtoinfo(struct sock *sk, int len, 5196 char __user *optval, 5197 int __user *optlen) { 5198 struct sctp_rtoinfo rtoinfo; 5199 struct sctp_association *asoc; 5200 5201 if (len < sizeof (struct sctp_rtoinfo)) 5202 return -EINVAL; 5203 5204 len = sizeof(struct sctp_rtoinfo); 5205 5206 if (copy_from_user(&rtoinfo, optval, len)) 5207 return -EFAULT; 5208 5209 asoc = sctp_id2assoc(sk, rtoinfo.srto_assoc_id); 5210 5211 if (!asoc && rtoinfo.srto_assoc_id && sctp_style(sk, UDP)) 5212 return -EINVAL; 5213 5214 /* Values corresponding to the specific association. */ 5215 if (asoc) { 5216 rtoinfo.srto_initial = jiffies_to_msecs(asoc->rto_initial); 5217 rtoinfo.srto_max = jiffies_to_msecs(asoc->rto_max); 5218 rtoinfo.srto_min = jiffies_to_msecs(asoc->rto_min); 5219 } else { 5220 /* Values corresponding to the endpoint. */ 5221 struct sctp_sock *sp = sctp_sk(sk); 5222 5223 rtoinfo.srto_initial = sp->rtoinfo.srto_initial; 5224 rtoinfo.srto_max = sp->rtoinfo.srto_max; 5225 rtoinfo.srto_min = sp->rtoinfo.srto_min; 5226 } 5227 5228 if (put_user(len, optlen)) 5229 return -EFAULT; 5230 5231 if (copy_to_user(optval, &rtoinfo, len)) 5232 return -EFAULT; 5233 5234 return 0; 5235 } 5236 5237 /* 5238 * 5239 * 7.1.2 SCTP_ASSOCINFO 5240 * 5241 * This option is used to tune the maximum retransmission attempts 5242 * of the association. 5243 * Returns an error if the new association retransmission value is 5244 * greater than the sum of the retransmission value of the peer. 5245 * See [SCTP] for more information. 5246 * 5247 */ 5248 static int sctp_getsockopt_associnfo(struct sock *sk, int len, 5249 char __user *optval, 5250 int __user *optlen) 5251 { 5252 5253 struct sctp_assocparams assocparams; 5254 struct sctp_association *asoc; 5255 struct list_head *pos; 5256 int cnt = 0; 5257 5258 if (len < sizeof (struct sctp_assocparams)) 5259 return -EINVAL; 5260 5261 len = sizeof(struct sctp_assocparams); 5262 5263 if (copy_from_user(&assocparams, optval, len)) 5264 return -EFAULT; 5265 5266 asoc = sctp_id2assoc(sk, assocparams.sasoc_assoc_id); 5267 5268 if (!asoc && assocparams.sasoc_assoc_id && sctp_style(sk, UDP)) 5269 return -EINVAL; 5270 5271 /* Values correspoinding to the specific association */ 5272 if (asoc) { 5273 assocparams.sasoc_asocmaxrxt = asoc->max_retrans; 5274 assocparams.sasoc_peer_rwnd = asoc->peer.rwnd; 5275 assocparams.sasoc_local_rwnd = asoc->a_rwnd; 5276 assocparams.sasoc_cookie_life = ktime_to_ms(asoc->cookie_life); 5277 5278 list_for_each(pos, &asoc->peer.transport_addr_list) { 5279 cnt++; 5280 } 5281 5282 assocparams.sasoc_number_peer_destinations = cnt; 5283 } else { 5284 /* Values corresponding to the endpoint */ 5285 struct sctp_sock *sp = sctp_sk(sk); 5286 5287 assocparams.sasoc_asocmaxrxt = sp->assocparams.sasoc_asocmaxrxt; 5288 assocparams.sasoc_peer_rwnd = sp->assocparams.sasoc_peer_rwnd; 5289 assocparams.sasoc_local_rwnd = sp->assocparams.sasoc_local_rwnd; 5290 assocparams.sasoc_cookie_life = 5291 sp->assocparams.sasoc_cookie_life; 5292 assocparams.sasoc_number_peer_destinations = 5293 sp->assocparams. 5294 sasoc_number_peer_destinations; 5295 } 5296 5297 if (put_user(len, optlen)) 5298 return -EFAULT; 5299 5300 if (copy_to_user(optval, &assocparams, len)) 5301 return -EFAULT; 5302 5303 return 0; 5304 } 5305 5306 /* 5307 * 7.1.16 Set/clear IPv4 mapped addresses (SCTP_I_WANT_MAPPED_V4_ADDR) 5308 * 5309 * This socket option is a boolean flag which turns on or off mapped V4 5310 * addresses. If this option is turned on and the socket is type 5311 * PF_INET6, then IPv4 addresses will be mapped to V6 representation. 5312 * If this option is turned off, then no mapping will be done of V4 5313 * addresses and a user will receive both PF_INET6 and PF_INET type 5314 * addresses on the socket. 5315 */ 5316 static int sctp_getsockopt_mappedv4(struct sock *sk, int len, 5317 char __user *optval, int __user *optlen) 5318 { 5319 int val; 5320 struct sctp_sock *sp = sctp_sk(sk); 5321 5322 if (len < sizeof(int)) 5323 return -EINVAL; 5324 5325 len = sizeof(int); 5326 val = sp->v4mapped; 5327 if (put_user(len, optlen)) 5328 return -EFAULT; 5329 if (copy_to_user(optval, &val, len)) 5330 return -EFAULT; 5331 5332 return 0; 5333 } 5334 5335 /* 5336 * 7.1.29. Set or Get the default context (SCTP_CONTEXT) 5337 * (chapter and verse is quoted at sctp_setsockopt_context()) 5338 */ 5339 static int sctp_getsockopt_context(struct sock *sk, int len, 5340 char __user *optval, int __user *optlen) 5341 { 5342 struct sctp_assoc_value params; 5343 struct sctp_sock *sp; 5344 struct sctp_association *asoc; 5345 5346 if (len < sizeof(struct sctp_assoc_value)) 5347 return -EINVAL; 5348 5349 len = sizeof(struct sctp_assoc_value); 5350 5351 if (copy_from_user(¶ms, optval, len)) 5352 return -EFAULT; 5353 5354 sp = sctp_sk(sk); 5355 5356 if (params.assoc_id != 0) { 5357 asoc = sctp_id2assoc(sk, params.assoc_id); 5358 if (!asoc) 5359 return -EINVAL; 5360 params.assoc_value = asoc->default_rcv_context; 5361 } else { 5362 params.assoc_value = sp->default_rcv_context; 5363 } 5364 5365 if (put_user(len, optlen)) 5366 return -EFAULT; 5367 if (copy_to_user(optval, ¶ms, len)) 5368 return -EFAULT; 5369 5370 return 0; 5371 } 5372 5373 /* 5374 * 8.1.16. Get or Set the Maximum Fragmentation Size (SCTP_MAXSEG) 5375 * This option will get or set the maximum size to put in any outgoing 5376 * SCTP DATA chunk. If a message is larger than this size it will be 5377 * fragmented by SCTP into the specified size. Note that the underlying 5378 * SCTP implementation may fragment into smaller sized chunks when the 5379 * PMTU of the underlying association is smaller than the value set by 5380 * the user. The default value for this option is '0' which indicates 5381 * the user is NOT limiting fragmentation and only the PMTU will effect 5382 * SCTP's choice of DATA chunk size. Note also that values set larger 5383 * than the maximum size of an IP datagram will effectively let SCTP 5384 * control fragmentation (i.e. the same as setting this option to 0). 5385 * 5386 * The following structure is used to access and modify this parameter: 5387 * 5388 * struct sctp_assoc_value { 5389 * sctp_assoc_t assoc_id; 5390 * uint32_t assoc_value; 5391 * }; 5392 * 5393 * assoc_id: This parameter is ignored for one-to-one style sockets. 5394 * For one-to-many style sockets this parameter indicates which 5395 * association the user is performing an action upon. Note that if 5396 * this field's value is zero then the endpoints default value is 5397 * changed (effecting future associations only). 5398 * assoc_value: This parameter specifies the maximum size in bytes. 5399 */ 5400 static int sctp_getsockopt_maxseg(struct sock *sk, int len, 5401 char __user *optval, int __user *optlen) 5402 { 5403 struct sctp_assoc_value params; 5404 struct sctp_association *asoc; 5405 5406 if (len == sizeof(int)) { 5407 pr_warn_ratelimited(DEPRECATED 5408 "%s (pid %d) " 5409 "Use of int in maxseg socket option.\n" 5410 "Use struct sctp_assoc_value instead\n", 5411 current->comm, task_pid_nr(current)); 5412 params.assoc_id = 0; 5413 } else if (len >= sizeof(struct sctp_assoc_value)) { 5414 len = sizeof(struct sctp_assoc_value); 5415 if (copy_from_user(¶ms, optval, sizeof(params))) 5416 return -EFAULT; 5417 } else 5418 return -EINVAL; 5419 5420 asoc = sctp_id2assoc(sk, params.assoc_id); 5421 if (!asoc && params.assoc_id && sctp_style(sk, UDP)) 5422 return -EINVAL; 5423 5424 if (asoc) 5425 params.assoc_value = asoc->frag_point; 5426 else 5427 params.assoc_value = sctp_sk(sk)->user_frag; 5428 5429 if (put_user(len, optlen)) 5430 return -EFAULT; 5431 if (len == sizeof(int)) { 5432 if (copy_to_user(optval, ¶ms.assoc_value, len)) 5433 return -EFAULT; 5434 } else { 5435 if (copy_to_user(optval, ¶ms, len)) 5436 return -EFAULT; 5437 } 5438 5439 return 0; 5440 } 5441 5442 /* 5443 * 7.1.24. Get or set fragmented interleave (SCTP_FRAGMENT_INTERLEAVE) 5444 * (chapter and verse is quoted at sctp_setsockopt_fragment_interleave()) 5445 */ 5446 static int sctp_getsockopt_fragment_interleave(struct sock *sk, int len, 5447 char __user *optval, int __user *optlen) 5448 { 5449 int val; 5450 5451 if (len < sizeof(int)) 5452 return -EINVAL; 5453 5454 len = sizeof(int); 5455 5456 val = sctp_sk(sk)->frag_interleave; 5457 if (put_user(len, optlen)) 5458 return -EFAULT; 5459 if (copy_to_user(optval, &val, len)) 5460 return -EFAULT; 5461 5462 return 0; 5463 } 5464 5465 /* 5466 * 7.1.25. Set or Get the sctp partial delivery point 5467 * (chapter and verse is quoted at sctp_setsockopt_partial_delivery_point()) 5468 */ 5469 static int sctp_getsockopt_partial_delivery_point(struct sock *sk, int len, 5470 char __user *optval, 5471 int __user *optlen) 5472 { 5473 u32 val; 5474 5475 if (len < sizeof(u32)) 5476 return -EINVAL; 5477 5478 len = sizeof(u32); 5479 5480 val = sctp_sk(sk)->pd_point; 5481 if (put_user(len, optlen)) 5482 return -EFAULT; 5483 if (copy_to_user(optval, &val, len)) 5484 return -EFAULT; 5485 5486 return 0; 5487 } 5488 5489 /* 5490 * 7.1.28. Set or Get the maximum burst (SCTP_MAX_BURST) 5491 * (chapter and verse is quoted at sctp_setsockopt_maxburst()) 5492 */ 5493 static int sctp_getsockopt_maxburst(struct sock *sk, int len, 5494 char __user *optval, 5495 int __user *optlen) 5496 { 5497 struct sctp_assoc_value params; 5498 struct sctp_sock *sp; 5499 struct sctp_association *asoc; 5500 5501 if (len == sizeof(int)) { 5502 pr_warn_ratelimited(DEPRECATED 5503 "%s (pid %d) " 5504 "Use of int in max_burst socket option.\n" 5505 "Use struct sctp_assoc_value instead\n", 5506 current->comm, task_pid_nr(current)); 5507 params.assoc_id = 0; 5508 } else if (len >= sizeof(struct sctp_assoc_value)) { 5509 len = sizeof(struct sctp_assoc_value); 5510 if (copy_from_user(¶ms, optval, len)) 5511 return -EFAULT; 5512 } else 5513 return -EINVAL; 5514 5515 sp = sctp_sk(sk); 5516 5517 if (params.assoc_id != 0) { 5518 asoc = sctp_id2assoc(sk, params.assoc_id); 5519 if (!asoc) 5520 return -EINVAL; 5521 params.assoc_value = asoc->max_burst; 5522 } else 5523 params.assoc_value = sp->max_burst; 5524 5525 if (len == sizeof(int)) { 5526 if (copy_to_user(optval, ¶ms.assoc_value, len)) 5527 return -EFAULT; 5528 } else { 5529 if (copy_to_user(optval, ¶ms, len)) 5530 return -EFAULT; 5531 } 5532 5533 return 0; 5534 5535 } 5536 5537 static int sctp_getsockopt_hmac_ident(struct sock *sk, int len, 5538 char __user *optval, int __user *optlen) 5539 { 5540 struct sctp_endpoint *ep = sctp_sk(sk)->ep; 5541 struct sctp_hmacalgo __user *p = (void __user *)optval; 5542 struct sctp_hmac_algo_param *hmacs; 5543 __u16 data_len = 0; 5544 u32 num_idents; 5545 5546 if (!ep->auth_enable) 5547 return -EACCES; 5548 5549 hmacs = ep->auth_hmacs_list; 5550 data_len = ntohs(hmacs->param_hdr.length) - sizeof(sctp_paramhdr_t); 5551 5552 if (len < sizeof(struct sctp_hmacalgo) + data_len) 5553 return -EINVAL; 5554 5555 len = sizeof(struct sctp_hmacalgo) + data_len; 5556 num_idents = data_len / sizeof(u16); 5557 5558 if (put_user(len, optlen)) 5559 return -EFAULT; 5560 if (put_user(num_idents, &p->shmac_num_idents)) 5561 return -EFAULT; 5562 if (copy_to_user(p->shmac_idents, hmacs->hmac_ids, data_len)) 5563 return -EFAULT; 5564 return 0; 5565 } 5566 5567 static int sctp_getsockopt_active_key(struct sock *sk, int len, 5568 char __user *optval, int __user *optlen) 5569 { 5570 struct sctp_endpoint *ep = sctp_sk(sk)->ep; 5571 struct sctp_authkeyid val; 5572 struct sctp_association *asoc; 5573 5574 if (!ep->auth_enable) 5575 return -EACCES; 5576 5577 if (len < sizeof(struct sctp_authkeyid)) 5578 return -EINVAL; 5579 if (copy_from_user(&val, optval, sizeof(struct sctp_authkeyid))) 5580 return -EFAULT; 5581 5582 asoc = sctp_id2assoc(sk, val.scact_assoc_id); 5583 if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP)) 5584 return -EINVAL; 5585 5586 if (asoc) 5587 val.scact_keynumber = asoc->active_key_id; 5588 else 5589 val.scact_keynumber = ep->active_key_id; 5590 5591 len = sizeof(struct sctp_authkeyid); 5592 if (put_user(len, optlen)) 5593 return -EFAULT; 5594 if (copy_to_user(optval, &val, len)) 5595 return -EFAULT; 5596 5597 return 0; 5598 } 5599 5600 static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len, 5601 char __user *optval, int __user *optlen) 5602 { 5603 struct sctp_endpoint *ep = sctp_sk(sk)->ep; 5604 struct sctp_authchunks __user *p = (void __user *)optval; 5605 struct sctp_authchunks val; 5606 struct sctp_association *asoc; 5607 struct sctp_chunks_param *ch; 5608 u32 num_chunks = 0; 5609 char __user *to; 5610 5611 if (!ep->auth_enable) 5612 return -EACCES; 5613 5614 if (len < sizeof(struct sctp_authchunks)) 5615 return -EINVAL; 5616 5617 if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks))) 5618 return -EFAULT; 5619 5620 to = p->gauth_chunks; 5621 asoc = sctp_id2assoc(sk, val.gauth_assoc_id); 5622 if (!asoc) 5623 return -EINVAL; 5624 5625 ch = asoc->peer.peer_chunks; 5626 if (!ch) 5627 goto num; 5628 5629 /* See if the user provided enough room for all the data */ 5630 num_chunks = ntohs(ch->param_hdr.length) - sizeof(sctp_paramhdr_t); 5631 if (len < num_chunks) 5632 return -EINVAL; 5633 5634 if (copy_to_user(to, ch->chunks, num_chunks)) 5635 return -EFAULT; 5636 num: 5637 len = sizeof(struct sctp_authchunks) + num_chunks; 5638 if (put_user(len, optlen)) 5639 return -EFAULT; 5640 if (put_user(num_chunks, &p->gauth_number_of_chunks)) 5641 return -EFAULT; 5642 return 0; 5643 } 5644 5645 static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len, 5646 char __user *optval, int __user *optlen) 5647 { 5648 struct sctp_endpoint *ep = sctp_sk(sk)->ep; 5649 struct sctp_authchunks __user *p = (void __user *)optval; 5650 struct sctp_authchunks val; 5651 struct sctp_association *asoc; 5652 struct sctp_chunks_param *ch; 5653 u32 num_chunks = 0; 5654 char __user *to; 5655 5656 if (!ep->auth_enable) 5657 return -EACCES; 5658 5659 if (len < sizeof(struct sctp_authchunks)) 5660 return -EINVAL; 5661 5662 if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks))) 5663 return -EFAULT; 5664 5665 to = p->gauth_chunks; 5666 asoc = sctp_id2assoc(sk, val.gauth_assoc_id); 5667 if (!asoc && val.gauth_assoc_id && sctp_style(sk, UDP)) 5668 return -EINVAL; 5669 5670 if (asoc) 5671 ch = (struct sctp_chunks_param *)asoc->c.auth_chunks; 5672 else 5673 ch = ep->auth_chunk_list; 5674 5675 if (!ch) 5676 goto num; 5677 5678 num_chunks = ntohs(ch->param_hdr.length) - sizeof(sctp_paramhdr_t); 5679 if (len < sizeof(struct sctp_authchunks) + num_chunks) 5680 return -EINVAL; 5681 5682 if (copy_to_user(to, ch->chunks, num_chunks)) 5683 return -EFAULT; 5684 num: 5685 len = sizeof(struct sctp_authchunks) + num_chunks; 5686 if (put_user(len, optlen)) 5687 return -EFAULT; 5688 if (put_user(num_chunks, &p->gauth_number_of_chunks)) 5689 return -EFAULT; 5690 5691 return 0; 5692 } 5693 5694 /* 5695 * 8.2.5. Get the Current Number of Associations (SCTP_GET_ASSOC_NUMBER) 5696 * This option gets the current number of associations that are attached 5697 * to a one-to-many style socket. The option value is an uint32_t. 5698 */ 5699 static int sctp_getsockopt_assoc_number(struct sock *sk, int len, 5700 char __user *optval, int __user *optlen) 5701 { 5702 struct sctp_sock *sp = sctp_sk(sk); 5703 struct sctp_association *asoc; 5704 u32 val = 0; 5705 5706 if (sctp_style(sk, TCP)) 5707 return -EOPNOTSUPP; 5708 5709 if (len < sizeof(u32)) 5710 return -EINVAL; 5711 5712 len = sizeof(u32); 5713 5714 list_for_each_entry(asoc, &(sp->ep->asocs), asocs) { 5715 val++; 5716 } 5717 5718 if (put_user(len, optlen)) 5719 return -EFAULT; 5720 if (copy_to_user(optval, &val, len)) 5721 return -EFAULT; 5722 5723 return 0; 5724 } 5725 5726 /* 5727 * 8.1.23 SCTP_AUTO_ASCONF 5728 * See the corresponding setsockopt entry as description 5729 */ 5730 static int sctp_getsockopt_auto_asconf(struct sock *sk, int len, 5731 char __user *optval, int __user *optlen) 5732 { 5733 int val = 0; 5734 5735 if (len < sizeof(int)) 5736 return -EINVAL; 5737 5738 len = sizeof(int); 5739 if (sctp_sk(sk)->do_auto_asconf && sctp_is_ep_boundall(sk)) 5740 val = 1; 5741 if (put_user(len, optlen)) 5742 return -EFAULT; 5743 if (copy_to_user(optval, &val, len)) 5744 return -EFAULT; 5745 return 0; 5746 } 5747 5748 /* 5749 * 8.2.6. Get the Current Identifiers of Associations 5750 * (SCTP_GET_ASSOC_ID_LIST) 5751 * 5752 * This option gets the current list of SCTP association identifiers of 5753 * the SCTP associations handled by a one-to-many style socket. 5754 */ 5755 static int sctp_getsockopt_assoc_ids(struct sock *sk, int len, 5756 char __user *optval, int __user *optlen) 5757 { 5758 struct sctp_sock *sp = sctp_sk(sk); 5759 struct sctp_association *asoc; 5760 struct sctp_assoc_ids *ids; 5761 u32 num = 0; 5762 5763 if (sctp_style(sk, TCP)) 5764 return -EOPNOTSUPP; 5765 5766 if (len < sizeof(struct sctp_assoc_ids)) 5767 return -EINVAL; 5768 5769 list_for_each_entry(asoc, &(sp->ep->asocs), asocs) { 5770 num++; 5771 } 5772 5773 if (len < sizeof(struct sctp_assoc_ids) + sizeof(sctp_assoc_t) * num) 5774 return -EINVAL; 5775 5776 len = sizeof(struct sctp_assoc_ids) + sizeof(sctp_assoc_t) * num; 5777 5778 ids = kmalloc(len, GFP_KERNEL); 5779 if (unlikely(!ids)) 5780 return -ENOMEM; 5781 5782 ids->gaids_number_of_ids = num; 5783 num = 0; 5784 list_for_each_entry(asoc, &(sp->ep->asocs), asocs) { 5785 ids->gaids_assoc_id[num++] = asoc->assoc_id; 5786 } 5787 5788 if (put_user(len, optlen) || copy_to_user(optval, ids, len)) { 5789 kfree(ids); 5790 return -EFAULT; 5791 } 5792 5793 kfree(ids); 5794 return 0; 5795 } 5796 5797 /* 5798 * SCTP_PEER_ADDR_THLDS 5799 * 5800 * This option allows us to fetch the partially failed threshold for one or all 5801 * transports in an association. See Section 6.1 of: 5802 * http://www.ietf.org/id/draft-nishida-tsvwg-sctp-failover-05.txt 5803 */ 5804 static int sctp_getsockopt_paddr_thresholds(struct sock *sk, 5805 char __user *optval, 5806 int len, 5807 int __user *optlen) 5808 { 5809 struct sctp_paddrthlds val; 5810 struct sctp_transport *trans; 5811 struct sctp_association *asoc; 5812 5813 if (len < sizeof(struct sctp_paddrthlds)) 5814 return -EINVAL; 5815 len = sizeof(struct sctp_paddrthlds); 5816 if (copy_from_user(&val, (struct sctp_paddrthlds __user *)optval, len)) 5817 return -EFAULT; 5818 5819 if (sctp_is_any(sk, (const union sctp_addr *)&val.spt_address)) { 5820 asoc = sctp_id2assoc(sk, val.spt_assoc_id); 5821 if (!asoc) 5822 return -ENOENT; 5823 5824 val.spt_pathpfthld = asoc->pf_retrans; 5825 val.spt_pathmaxrxt = asoc->pathmaxrxt; 5826 } else { 5827 trans = sctp_addr_id2transport(sk, &val.spt_address, 5828 val.spt_assoc_id); 5829 if (!trans) 5830 return -ENOENT; 5831 5832 val.spt_pathmaxrxt = trans->pathmaxrxt; 5833 val.spt_pathpfthld = trans->pf_retrans; 5834 } 5835 5836 if (put_user(len, optlen) || copy_to_user(optval, &val, len)) 5837 return -EFAULT; 5838 5839 return 0; 5840 } 5841 5842 /* 5843 * SCTP_GET_ASSOC_STATS 5844 * 5845 * This option retrieves local per endpoint statistics. It is modeled 5846 * after OpenSolaris' implementation 5847 */ 5848 static int sctp_getsockopt_assoc_stats(struct sock *sk, int len, 5849 char __user *optval, 5850 int __user *optlen) 5851 { 5852 struct sctp_assoc_stats sas; 5853 struct sctp_association *asoc = NULL; 5854 5855 /* User must provide at least the assoc id */ 5856 if (len < sizeof(sctp_assoc_t)) 5857 return -EINVAL; 5858 5859 /* Allow the struct to grow and fill in as much as possible */ 5860 len = min_t(size_t, len, sizeof(sas)); 5861 5862 if (copy_from_user(&sas, optval, len)) 5863 return -EFAULT; 5864 5865 asoc = sctp_id2assoc(sk, sas.sas_assoc_id); 5866 if (!asoc) 5867 return -EINVAL; 5868 5869 sas.sas_rtxchunks = asoc->stats.rtxchunks; 5870 sas.sas_gapcnt = asoc->stats.gapcnt; 5871 sas.sas_outofseqtsns = asoc->stats.outofseqtsns; 5872 sas.sas_osacks = asoc->stats.osacks; 5873 sas.sas_isacks = asoc->stats.isacks; 5874 sas.sas_octrlchunks = asoc->stats.octrlchunks; 5875 sas.sas_ictrlchunks = asoc->stats.ictrlchunks; 5876 sas.sas_oodchunks = asoc->stats.oodchunks; 5877 sas.sas_iodchunks = asoc->stats.iodchunks; 5878 sas.sas_ouodchunks = asoc->stats.ouodchunks; 5879 sas.sas_iuodchunks = asoc->stats.iuodchunks; 5880 sas.sas_idupchunks = asoc->stats.idupchunks; 5881 sas.sas_opackets = asoc->stats.opackets; 5882 sas.sas_ipackets = asoc->stats.ipackets; 5883 5884 /* New high max rto observed, will return 0 if not a single 5885 * RTO update took place. obs_rto_ipaddr will be bogus 5886 * in such a case 5887 */ 5888 sas.sas_maxrto = asoc->stats.max_obs_rto; 5889 memcpy(&sas.sas_obs_rto_ipaddr, &asoc->stats.obs_rto_ipaddr, 5890 sizeof(struct sockaddr_storage)); 5891 5892 /* Mark beginning of a new observation period */ 5893 asoc->stats.max_obs_rto = asoc->rto_min; 5894 5895 if (put_user(len, optlen)) 5896 return -EFAULT; 5897 5898 pr_debug("%s: len:%d, assoc_id:%d\n", __func__, len, sas.sas_assoc_id); 5899 5900 if (copy_to_user(optval, &sas, len)) 5901 return -EFAULT; 5902 5903 return 0; 5904 } 5905 5906 static int sctp_getsockopt_recvrcvinfo(struct sock *sk, int len, 5907 char __user *optval, 5908 int __user *optlen) 5909 { 5910 int val = 0; 5911 5912 if (len < sizeof(int)) 5913 return -EINVAL; 5914 5915 len = sizeof(int); 5916 if (sctp_sk(sk)->recvrcvinfo) 5917 val = 1; 5918 if (put_user(len, optlen)) 5919 return -EFAULT; 5920 if (copy_to_user(optval, &val, len)) 5921 return -EFAULT; 5922 5923 return 0; 5924 } 5925 5926 static int sctp_getsockopt_recvnxtinfo(struct sock *sk, int len, 5927 char __user *optval, 5928 int __user *optlen) 5929 { 5930 int val = 0; 5931 5932 if (len < sizeof(int)) 5933 return -EINVAL; 5934 5935 len = sizeof(int); 5936 if (sctp_sk(sk)->recvnxtinfo) 5937 val = 1; 5938 if (put_user(len, optlen)) 5939 return -EFAULT; 5940 if (copy_to_user(optval, &val, len)) 5941 return -EFAULT; 5942 5943 return 0; 5944 } 5945 5946 static int sctp_getsockopt(struct sock *sk, int level, int optname, 5947 char __user *optval, int __user *optlen) 5948 { 5949 int retval = 0; 5950 int len; 5951 5952 pr_debug("%s: sk:%p, optname:%d\n", __func__, sk, optname); 5953 5954 /* I can hardly begin to describe how wrong this is. This is 5955 * so broken as to be worse than useless. The API draft 5956 * REALLY is NOT helpful here... I am not convinced that the 5957 * semantics of getsockopt() with a level OTHER THAN SOL_SCTP 5958 * are at all well-founded. 5959 */ 5960 if (level != SOL_SCTP) { 5961 struct sctp_af *af = sctp_sk(sk)->pf->af; 5962 5963 retval = af->getsockopt(sk, level, optname, optval, optlen); 5964 return retval; 5965 } 5966 5967 if (get_user(len, optlen)) 5968 return -EFAULT; 5969 5970 lock_sock(sk); 5971 5972 switch (optname) { 5973 case SCTP_STATUS: 5974 retval = sctp_getsockopt_sctp_status(sk, len, optval, optlen); 5975 break; 5976 case SCTP_DISABLE_FRAGMENTS: 5977 retval = sctp_getsockopt_disable_fragments(sk, len, optval, 5978 optlen); 5979 break; 5980 case SCTP_EVENTS: 5981 retval = sctp_getsockopt_events(sk, len, optval, optlen); 5982 break; 5983 case SCTP_AUTOCLOSE: 5984 retval = sctp_getsockopt_autoclose(sk, len, optval, optlen); 5985 break; 5986 case SCTP_SOCKOPT_PEELOFF: 5987 retval = sctp_getsockopt_peeloff(sk, len, optval, optlen); 5988 break; 5989 case SCTP_PEER_ADDR_PARAMS: 5990 retval = sctp_getsockopt_peer_addr_params(sk, len, optval, 5991 optlen); 5992 break; 5993 case SCTP_DELAYED_SACK: 5994 retval = sctp_getsockopt_delayed_ack(sk, len, optval, 5995 optlen); 5996 break; 5997 case SCTP_INITMSG: 5998 retval = sctp_getsockopt_initmsg(sk, len, optval, optlen); 5999 break; 6000 case SCTP_GET_PEER_ADDRS: 6001 retval = sctp_getsockopt_peer_addrs(sk, len, optval, 6002 optlen); 6003 break; 6004 case SCTP_GET_LOCAL_ADDRS: 6005 retval = sctp_getsockopt_local_addrs(sk, len, optval, 6006 optlen); 6007 break; 6008 case SCTP_SOCKOPT_CONNECTX3: 6009 retval = sctp_getsockopt_connectx3(sk, len, optval, optlen); 6010 break; 6011 case SCTP_DEFAULT_SEND_PARAM: 6012 retval = sctp_getsockopt_default_send_param(sk, len, 6013 optval, optlen); 6014 break; 6015 case SCTP_DEFAULT_SNDINFO: 6016 retval = sctp_getsockopt_default_sndinfo(sk, len, 6017 optval, optlen); 6018 break; 6019 case SCTP_PRIMARY_ADDR: 6020 retval = sctp_getsockopt_primary_addr(sk, len, optval, optlen); 6021 break; 6022 case SCTP_NODELAY: 6023 retval = sctp_getsockopt_nodelay(sk, len, optval, optlen); 6024 break; 6025 case SCTP_RTOINFO: 6026 retval = sctp_getsockopt_rtoinfo(sk, len, optval, optlen); 6027 break; 6028 case SCTP_ASSOCINFO: 6029 retval = sctp_getsockopt_associnfo(sk, len, optval, optlen); 6030 break; 6031 case SCTP_I_WANT_MAPPED_V4_ADDR: 6032 retval = sctp_getsockopt_mappedv4(sk, len, optval, optlen); 6033 break; 6034 case SCTP_MAXSEG: 6035 retval = sctp_getsockopt_maxseg(sk, len, optval, optlen); 6036 break; 6037 case SCTP_GET_PEER_ADDR_INFO: 6038 retval = sctp_getsockopt_peer_addr_info(sk, len, optval, 6039 optlen); 6040 break; 6041 case SCTP_ADAPTATION_LAYER: 6042 retval = sctp_getsockopt_adaptation_layer(sk, len, optval, 6043 optlen); 6044 break; 6045 case SCTP_CONTEXT: 6046 retval = sctp_getsockopt_context(sk, len, optval, optlen); 6047 break; 6048 case SCTP_FRAGMENT_INTERLEAVE: 6049 retval = sctp_getsockopt_fragment_interleave(sk, len, optval, 6050 optlen); 6051 break; 6052 case SCTP_PARTIAL_DELIVERY_POINT: 6053 retval = sctp_getsockopt_partial_delivery_point(sk, len, optval, 6054 optlen); 6055 break; 6056 case SCTP_MAX_BURST: 6057 retval = sctp_getsockopt_maxburst(sk, len, optval, optlen); 6058 break; 6059 case SCTP_AUTH_KEY: 6060 case SCTP_AUTH_CHUNK: 6061 case SCTP_AUTH_DELETE_KEY: 6062 retval = -EOPNOTSUPP; 6063 break; 6064 case SCTP_HMAC_IDENT: 6065 retval = sctp_getsockopt_hmac_ident(sk, len, optval, optlen); 6066 break; 6067 case SCTP_AUTH_ACTIVE_KEY: 6068 retval = sctp_getsockopt_active_key(sk, len, optval, optlen); 6069 break; 6070 case SCTP_PEER_AUTH_CHUNKS: 6071 retval = sctp_getsockopt_peer_auth_chunks(sk, len, optval, 6072 optlen); 6073 break; 6074 case SCTP_LOCAL_AUTH_CHUNKS: 6075 retval = sctp_getsockopt_local_auth_chunks(sk, len, optval, 6076 optlen); 6077 break; 6078 case SCTP_GET_ASSOC_NUMBER: 6079 retval = sctp_getsockopt_assoc_number(sk, len, optval, optlen); 6080 break; 6081 case SCTP_GET_ASSOC_ID_LIST: 6082 retval = sctp_getsockopt_assoc_ids(sk, len, optval, optlen); 6083 break; 6084 case SCTP_AUTO_ASCONF: 6085 retval = sctp_getsockopt_auto_asconf(sk, len, optval, optlen); 6086 break; 6087 case SCTP_PEER_ADDR_THLDS: 6088 retval = sctp_getsockopt_paddr_thresholds(sk, optval, len, optlen); 6089 break; 6090 case SCTP_GET_ASSOC_STATS: 6091 retval = sctp_getsockopt_assoc_stats(sk, len, optval, optlen); 6092 break; 6093 case SCTP_RECVRCVINFO: 6094 retval = sctp_getsockopt_recvrcvinfo(sk, len, optval, optlen); 6095 break; 6096 case SCTP_RECVNXTINFO: 6097 retval = sctp_getsockopt_recvnxtinfo(sk, len, optval, optlen); 6098 break; 6099 default: 6100 retval = -ENOPROTOOPT; 6101 break; 6102 } 6103 6104 release_sock(sk); 6105 return retval; 6106 } 6107 6108 static void sctp_hash(struct sock *sk) 6109 { 6110 /* STUB */ 6111 } 6112 6113 static void sctp_unhash(struct sock *sk) 6114 { 6115 /* STUB */ 6116 } 6117 6118 /* Check if port is acceptable. Possibly find first available port. 6119 * 6120 * The port hash table (contained in the 'global' SCTP protocol storage 6121 * returned by struct sctp_protocol *sctp_get_protocol()). The hash 6122 * table is an array of 4096 lists (sctp_bind_hashbucket). Each 6123 * list (the list number is the port number hashed out, so as you 6124 * would expect from a hash function, all the ports in a given list have 6125 * such a number that hashes out to the same list number; you were 6126 * expecting that, right?); so each list has a set of ports, with a 6127 * link to the socket (struct sock) that uses it, the port number and 6128 * a fastreuse flag (FIXME: NPI ipg). 6129 */ 6130 static struct sctp_bind_bucket *sctp_bucket_create( 6131 struct sctp_bind_hashbucket *head, struct net *, unsigned short snum); 6132 6133 static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr) 6134 { 6135 struct sctp_bind_hashbucket *head; /* hash list */ 6136 struct sctp_bind_bucket *pp; 6137 unsigned short snum; 6138 int ret; 6139 6140 snum = ntohs(addr->v4.sin_port); 6141 6142 pr_debug("%s: begins, snum:%d\n", __func__, snum); 6143 6144 local_bh_disable(); 6145 6146 if (snum == 0) { 6147 /* Search for an available port. */ 6148 int low, high, remaining, index; 6149 unsigned int rover; 6150 struct net *net = sock_net(sk); 6151 6152 inet_get_local_port_range(net, &low, &high); 6153 remaining = (high - low) + 1; 6154 rover = prandom_u32() % remaining + low; 6155 6156 do { 6157 rover++; 6158 if ((rover < low) || (rover > high)) 6159 rover = low; 6160 if (inet_is_local_reserved_port(net, rover)) 6161 continue; 6162 index = sctp_phashfn(sock_net(sk), rover); 6163 head = &sctp_port_hashtable[index]; 6164 spin_lock(&head->lock); 6165 sctp_for_each_hentry(pp, &head->chain) 6166 if ((pp->port == rover) && 6167 net_eq(sock_net(sk), pp->net)) 6168 goto next; 6169 break; 6170 next: 6171 spin_unlock(&head->lock); 6172 } while (--remaining > 0); 6173 6174 /* Exhausted local port range during search? */ 6175 ret = 1; 6176 if (remaining <= 0) 6177 goto fail; 6178 6179 /* OK, here is the one we will use. HEAD (the port 6180 * hash table list entry) is non-NULL and we hold it's 6181 * mutex. 6182 */ 6183 snum = rover; 6184 } else { 6185 /* We are given an specific port number; we verify 6186 * that it is not being used. If it is used, we will 6187 * exahust the search in the hash list corresponding 6188 * to the port number (snum) - we detect that with the 6189 * port iterator, pp being NULL. 6190 */ 6191 head = &sctp_port_hashtable[sctp_phashfn(sock_net(sk), snum)]; 6192 spin_lock(&head->lock); 6193 sctp_for_each_hentry(pp, &head->chain) { 6194 if ((pp->port == snum) && net_eq(pp->net, sock_net(sk))) 6195 goto pp_found; 6196 } 6197 } 6198 pp = NULL; 6199 goto pp_not_found; 6200 pp_found: 6201 if (!hlist_empty(&pp->owner)) { 6202 /* We had a port hash table hit - there is an 6203 * available port (pp != NULL) and it is being 6204 * used by other socket (pp->owner not empty); that other 6205 * socket is going to be sk2. 6206 */ 6207 int reuse = sk->sk_reuse; 6208 struct sock *sk2; 6209 6210 pr_debug("%s: found a possible match\n", __func__); 6211 6212 if (pp->fastreuse && sk->sk_reuse && 6213 sk->sk_state != SCTP_SS_LISTENING) 6214 goto success; 6215 6216 /* Run through the list of sockets bound to the port 6217 * (pp->port) [via the pointers bind_next and 6218 * bind_pprev in the struct sock *sk2 (pp->sk)]. On each one, 6219 * we get the endpoint they describe and run through 6220 * the endpoint's list of IP (v4 or v6) addresses, 6221 * comparing each of the addresses with the address of 6222 * the socket sk. If we find a match, then that means 6223 * that this port/socket (sk) combination are already 6224 * in an endpoint. 6225 */ 6226 sk_for_each_bound(sk2, &pp->owner) { 6227 struct sctp_endpoint *ep2; 6228 ep2 = sctp_sk(sk2)->ep; 6229 6230 if (sk == sk2 || 6231 (reuse && sk2->sk_reuse && 6232 sk2->sk_state != SCTP_SS_LISTENING)) 6233 continue; 6234 6235 if (sctp_bind_addr_conflict(&ep2->base.bind_addr, addr, 6236 sctp_sk(sk2), sctp_sk(sk))) { 6237 ret = (long)sk2; 6238 goto fail_unlock; 6239 } 6240 } 6241 6242 pr_debug("%s: found a match\n", __func__); 6243 } 6244 pp_not_found: 6245 /* If there was a hash table miss, create a new port. */ 6246 ret = 1; 6247 if (!pp && !(pp = sctp_bucket_create(head, sock_net(sk), snum))) 6248 goto fail_unlock; 6249 6250 /* In either case (hit or miss), make sure fastreuse is 1 only 6251 * if sk->sk_reuse is too (that is, if the caller requested 6252 * SO_REUSEADDR on this socket -sk-). 6253 */ 6254 if (hlist_empty(&pp->owner)) { 6255 if (sk->sk_reuse && sk->sk_state != SCTP_SS_LISTENING) 6256 pp->fastreuse = 1; 6257 else 6258 pp->fastreuse = 0; 6259 } else if (pp->fastreuse && 6260 (!sk->sk_reuse || sk->sk_state == SCTP_SS_LISTENING)) 6261 pp->fastreuse = 0; 6262 6263 /* We are set, so fill up all the data in the hash table 6264 * entry, tie the socket list information with the rest of the 6265 * sockets FIXME: Blurry, NPI (ipg). 6266 */ 6267 success: 6268 if (!sctp_sk(sk)->bind_hash) { 6269 inet_sk(sk)->inet_num = snum; 6270 sk_add_bind_node(sk, &pp->owner); 6271 sctp_sk(sk)->bind_hash = pp; 6272 } 6273 ret = 0; 6274 6275 fail_unlock: 6276 spin_unlock(&head->lock); 6277 6278 fail: 6279 local_bh_enable(); 6280 return ret; 6281 } 6282 6283 /* Assign a 'snum' port to the socket. If snum == 0, an ephemeral 6284 * port is requested. 6285 */ 6286 static int sctp_get_port(struct sock *sk, unsigned short snum) 6287 { 6288 union sctp_addr addr; 6289 struct sctp_af *af = sctp_sk(sk)->pf->af; 6290 6291 /* Set up a dummy address struct from the sk. */ 6292 af->from_sk(&addr, sk); 6293 addr.v4.sin_port = htons(snum); 6294 6295 /* Note: sk->sk_num gets filled in if ephemeral port request. */ 6296 return !!sctp_get_port_local(sk, &addr); 6297 } 6298 6299 /* 6300 * Move a socket to LISTENING state. 6301 */ 6302 static int sctp_listen_start(struct sock *sk, int backlog) 6303 { 6304 struct sctp_sock *sp = sctp_sk(sk); 6305 struct sctp_endpoint *ep = sp->ep; 6306 struct crypto_hash *tfm = NULL; 6307 char alg[32]; 6308 6309 /* Allocate HMAC for generating cookie. */ 6310 if (!sp->hmac && sp->sctp_hmac_alg) { 6311 sprintf(alg, "hmac(%s)", sp->sctp_hmac_alg); 6312 tfm = crypto_alloc_hash(alg, 0, CRYPTO_ALG_ASYNC); 6313 if (IS_ERR(tfm)) { 6314 net_info_ratelimited("failed to load transform for %s: %ld\n", 6315 sp->sctp_hmac_alg, PTR_ERR(tfm)); 6316 return -ENOSYS; 6317 } 6318 sctp_sk(sk)->hmac = tfm; 6319 } 6320 6321 /* 6322 * If a bind() or sctp_bindx() is not called prior to a listen() 6323 * call that allows new associations to be accepted, the system 6324 * picks an ephemeral port and will choose an address set equivalent 6325 * to binding with a wildcard address. 6326 * 6327 * This is not currently spelled out in the SCTP sockets 6328 * extensions draft, but follows the practice as seen in TCP 6329 * sockets. 6330 * 6331 */ 6332 sk->sk_state = SCTP_SS_LISTENING; 6333 if (!ep->base.bind_addr.port) { 6334 if (sctp_autobind(sk)) 6335 return -EAGAIN; 6336 } else { 6337 if (sctp_get_port(sk, inet_sk(sk)->inet_num)) { 6338 sk->sk_state = SCTP_SS_CLOSED; 6339 return -EADDRINUSE; 6340 } 6341 } 6342 6343 sk->sk_max_ack_backlog = backlog; 6344 sctp_hash_endpoint(ep); 6345 return 0; 6346 } 6347 6348 /* 6349 * 4.1.3 / 5.1.3 listen() 6350 * 6351 * By default, new associations are not accepted for UDP style sockets. 6352 * An application uses listen() to mark a socket as being able to 6353 * accept new associations. 6354 * 6355 * On TCP style sockets, applications use listen() to ready the SCTP 6356 * endpoint for accepting inbound associations. 6357 * 6358 * On both types of endpoints a backlog of '0' disables listening. 6359 * 6360 * Move a socket to LISTENING state. 6361 */ 6362 int sctp_inet_listen(struct socket *sock, int backlog) 6363 { 6364 struct sock *sk = sock->sk; 6365 struct sctp_endpoint *ep = sctp_sk(sk)->ep; 6366 int err = -EINVAL; 6367 6368 if (unlikely(backlog < 0)) 6369 return err; 6370 6371 lock_sock(sk); 6372 6373 /* Peeled-off sockets are not allowed to listen(). */ 6374 if (sctp_style(sk, UDP_HIGH_BANDWIDTH)) 6375 goto out; 6376 6377 if (sock->state != SS_UNCONNECTED) 6378 goto out; 6379 6380 /* If backlog is zero, disable listening. */ 6381 if (!backlog) { 6382 if (sctp_sstate(sk, CLOSED)) 6383 goto out; 6384 6385 err = 0; 6386 sctp_unhash_endpoint(ep); 6387 sk->sk_state = SCTP_SS_CLOSED; 6388 if (sk->sk_reuse) 6389 sctp_sk(sk)->bind_hash->fastreuse = 1; 6390 goto out; 6391 } 6392 6393 /* If we are already listening, just update the backlog */ 6394 if (sctp_sstate(sk, LISTENING)) 6395 sk->sk_max_ack_backlog = backlog; 6396 else { 6397 err = sctp_listen_start(sk, backlog); 6398 if (err) 6399 goto out; 6400 } 6401 6402 err = 0; 6403 out: 6404 release_sock(sk); 6405 return err; 6406 } 6407 6408 /* 6409 * This function is done by modeling the current datagram_poll() and the 6410 * tcp_poll(). Note that, based on these implementations, we don't 6411 * lock the socket in this function, even though it seems that, 6412 * ideally, locking or some other mechanisms can be used to ensure 6413 * the integrity of the counters (sndbuf and wmem_alloc) used 6414 * in this place. We assume that we don't need locks either until proven 6415 * otherwise. 6416 * 6417 * Another thing to note is that we include the Async I/O support 6418 * here, again, by modeling the current TCP/UDP code. We don't have 6419 * a good way to test with it yet. 6420 */ 6421 unsigned int sctp_poll(struct file *file, struct socket *sock, poll_table *wait) 6422 { 6423 struct sock *sk = sock->sk; 6424 struct sctp_sock *sp = sctp_sk(sk); 6425 unsigned int mask; 6426 6427 poll_wait(file, sk_sleep(sk), wait); 6428 6429 /* A TCP-style listening socket becomes readable when the accept queue 6430 * is not empty. 6431 */ 6432 if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)) 6433 return (!list_empty(&sp->ep->asocs)) ? 6434 (POLLIN | POLLRDNORM) : 0; 6435 6436 mask = 0; 6437 6438 /* Is there any exceptional events? */ 6439 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) 6440 mask |= POLLERR | 6441 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0); 6442 if (sk->sk_shutdown & RCV_SHUTDOWN) 6443 mask |= POLLRDHUP | POLLIN | POLLRDNORM; 6444 if (sk->sk_shutdown == SHUTDOWN_MASK) 6445 mask |= POLLHUP; 6446 6447 /* Is it readable? Reconsider this code with TCP-style support. */ 6448 if (!skb_queue_empty(&sk->sk_receive_queue)) 6449 mask |= POLLIN | POLLRDNORM; 6450 6451 /* The association is either gone or not ready. */ 6452 if (!sctp_style(sk, UDP) && sctp_sstate(sk, CLOSED)) 6453 return mask; 6454 6455 /* Is it writable? */ 6456 if (sctp_writeable(sk)) { 6457 mask |= POLLOUT | POLLWRNORM; 6458 } else { 6459 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 6460 /* 6461 * Since the socket is not locked, the buffer 6462 * might be made available after the writeable check and 6463 * before the bit is set. This could cause a lost I/O 6464 * signal. tcp_poll() has a race breaker for this race 6465 * condition. Based on their implementation, we put 6466 * in the following code to cover it as well. 6467 */ 6468 if (sctp_writeable(sk)) 6469 mask |= POLLOUT | POLLWRNORM; 6470 } 6471 return mask; 6472 } 6473 6474 /******************************************************************** 6475 * 2nd Level Abstractions 6476 ********************************************************************/ 6477 6478 static struct sctp_bind_bucket *sctp_bucket_create( 6479 struct sctp_bind_hashbucket *head, struct net *net, unsigned short snum) 6480 { 6481 struct sctp_bind_bucket *pp; 6482 6483 pp = kmem_cache_alloc(sctp_bucket_cachep, GFP_ATOMIC); 6484 if (pp) { 6485 SCTP_DBG_OBJCNT_INC(bind_bucket); 6486 pp->port = snum; 6487 pp->fastreuse = 0; 6488 INIT_HLIST_HEAD(&pp->owner); 6489 pp->net = net; 6490 hlist_add_head(&pp->node, &head->chain); 6491 } 6492 return pp; 6493 } 6494 6495 /* Caller must hold hashbucket lock for this tb with local BH disabled */ 6496 static void sctp_bucket_destroy(struct sctp_bind_bucket *pp) 6497 { 6498 if (pp && hlist_empty(&pp->owner)) { 6499 __hlist_del(&pp->node); 6500 kmem_cache_free(sctp_bucket_cachep, pp); 6501 SCTP_DBG_OBJCNT_DEC(bind_bucket); 6502 } 6503 } 6504 6505 /* Release this socket's reference to a local port. */ 6506 static inline void __sctp_put_port(struct sock *sk) 6507 { 6508 struct sctp_bind_hashbucket *head = 6509 &sctp_port_hashtable[sctp_phashfn(sock_net(sk), 6510 inet_sk(sk)->inet_num)]; 6511 struct sctp_bind_bucket *pp; 6512 6513 spin_lock(&head->lock); 6514 pp = sctp_sk(sk)->bind_hash; 6515 __sk_del_bind_node(sk); 6516 sctp_sk(sk)->bind_hash = NULL; 6517 inet_sk(sk)->inet_num = 0; 6518 sctp_bucket_destroy(pp); 6519 spin_unlock(&head->lock); 6520 } 6521 6522 void sctp_put_port(struct sock *sk) 6523 { 6524 local_bh_disable(); 6525 __sctp_put_port(sk); 6526 local_bh_enable(); 6527 } 6528 6529 /* 6530 * The system picks an ephemeral port and choose an address set equivalent 6531 * to binding with a wildcard address. 6532 * One of those addresses will be the primary address for the association. 6533 * This automatically enables the multihoming capability of SCTP. 6534 */ 6535 static int sctp_autobind(struct sock *sk) 6536 { 6537 union sctp_addr autoaddr; 6538 struct sctp_af *af; 6539 __be16 port; 6540 6541 /* Initialize a local sockaddr structure to INADDR_ANY. */ 6542 af = sctp_sk(sk)->pf->af; 6543 6544 port = htons(inet_sk(sk)->inet_num); 6545 af->inaddr_any(&autoaddr, port); 6546 6547 return sctp_do_bind(sk, &autoaddr, af->sockaddr_len); 6548 } 6549 6550 /* Parse out IPPROTO_SCTP CMSG headers. Perform only minimal validation. 6551 * 6552 * From RFC 2292 6553 * 4.2 The cmsghdr Structure * 6554 * 6555 * When ancillary data is sent or received, any number of ancillary data 6556 * objects can be specified by the msg_control and msg_controllen members of 6557 * the msghdr structure, because each object is preceded by 6558 * a cmsghdr structure defining the object's length (the cmsg_len member). 6559 * Historically Berkeley-derived implementations have passed only one object 6560 * at a time, but this API allows multiple objects to be 6561 * passed in a single call to sendmsg() or recvmsg(). The following example 6562 * shows two ancillary data objects in a control buffer. 6563 * 6564 * |<--------------------------- msg_controllen -------------------------->| 6565 * | | 6566 * 6567 * |<----- ancillary data object ----->|<----- ancillary data object ----->| 6568 * 6569 * |<---------- CMSG_SPACE() --------->|<---------- CMSG_SPACE() --------->| 6570 * | | | 6571 * 6572 * |<---------- cmsg_len ---------->| |<--------- cmsg_len ----------->| | 6573 * 6574 * |<--------- CMSG_LEN() --------->| |<-------- CMSG_LEN() ---------->| | 6575 * | | | | | 6576 * 6577 * +-----+-----+-----+--+-----------+--+-----+-----+-----+--+-----------+--+ 6578 * |cmsg_|cmsg_|cmsg_|XX| |XX|cmsg_|cmsg_|cmsg_|XX| |XX| 6579 * 6580 * |len |level|type |XX|cmsg_data[]|XX|len |level|type |XX|cmsg_data[]|XX| 6581 * 6582 * +-----+-----+-----+--+-----------+--+-----+-----+-----+--+-----------+--+ 6583 * ^ 6584 * | 6585 * 6586 * msg_control 6587 * points here 6588 */ 6589 static int sctp_msghdr_parse(const struct msghdr *msg, sctp_cmsgs_t *cmsgs) 6590 { 6591 struct cmsghdr *cmsg; 6592 struct msghdr *my_msg = (struct msghdr *)msg; 6593 6594 for_each_cmsghdr(cmsg, my_msg) { 6595 if (!CMSG_OK(my_msg, cmsg)) 6596 return -EINVAL; 6597 6598 /* Should we parse this header or ignore? */ 6599 if (cmsg->cmsg_level != IPPROTO_SCTP) 6600 continue; 6601 6602 /* Strictly check lengths following example in SCM code. */ 6603 switch (cmsg->cmsg_type) { 6604 case SCTP_INIT: 6605 /* SCTP Socket API Extension 6606 * 5.3.1 SCTP Initiation Structure (SCTP_INIT) 6607 * 6608 * This cmsghdr structure provides information for 6609 * initializing new SCTP associations with sendmsg(). 6610 * The SCTP_INITMSG socket option uses this same data 6611 * structure. This structure is not used for 6612 * recvmsg(). 6613 * 6614 * cmsg_level cmsg_type cmsg_data[] 6615 * ------------ ------------ ---------------------- 6616 * IPPROTO_SCTP SCTP_INIT struct sctp_initmsg 6617 */ 6618 if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct sctp_initmsg))) 6619 return -EINVAL; 6620 6621 cmsgs->init = CMSG_DATA(cmsg); 6622 break; 6623 6624 case SCTP_SNDRCV: 6625 /* SCTP Socket API Extension 6626 * 5.3.2 SCTP Header Information Structure(SCTP_SNDRCV) 6627 * 6628 * This cmsghdr structure specifies SCTP options for 6629 * sendmsg() and describes SCTP header information 6630 * about a received message through recvmsg(). 6631 * 6632 * cmsg_level cmsg_type cmsg_data[] 6633 * ------------ ------------ ---------------------- 6634 * IPPROTO_SCTP SCTP_SNDRCV struct sctp_sndrcvinfo 6635 */ 6636 if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct sctp_sndrcvinfo))) 6637 return -EINVAL; 6638 6639 cmsgs->srinfo = CMSG_DATA(cmsg); 6640 6641 if (cmsgs->srinfo->sinfo_flags & 6642 ~(SCTP_UNORDERED | SCTP_ADDR_OVER | 6643 SCTP_ABORT | SCTP_EOF)) 6644 return -EINVAL; 6645 break; 6646 6647 case SCTP_SNDINFO: 6648 /* SCTP Socket API Extension 6649 * 5.3.4 SCTP Send Information Structure (SCTP_SNDINFO) 6650 * 6651 * This cmsghdr structure specifies SCTP options for 6652 * sendmsg(). This structure and SCTP_RCVINFO replaces 6653 * SCTP_SNDRCV which has been deprecated. 6654 * 6655 * cmsg_level cmsg_type cmsg_data[] 6656 * ------------ ------------ --------------------- 6657 * IPPROTO_SCTP SCTP_SNDINFO struct sctp_sndinfo 6658 */ 6659 if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct sctp_sndinfo))) 6660 return -EINVAL; 6661 6662 cmsgs->sinfo = CMSG_DATA(cmsg); 6663 6664 if (cmsgs->sinfo->snd_flags & 6665 ~(SCTP_UNORDERED | SCTP_ADDR_OVER | 6666 SCTP_ABORT | SCTP_EOF)) 6667 return -EINVAL; 6668 break; 6669 default: 6670 return -EINVAL; 6671 } 6672 } 6673 6674 return 0; 6675 } 6676 6677 /* 6678 * Wait for a packet.. 6679 * Note: This function is the same function as in core/datagram.c 6680 * with a few modifications to make lksctp work. 6681 */ 6682 static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p) 6683 { 6684 int error; 6685 DEFINE_WAIT(wait); 6686 6687 prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 6688 6689 /* Socket errors? */ 6690 error = sock_error(sk); 6691 if (error) 6692 goto out; 6693 6694 if (!skb_queue_empty(&sk->sk_receive_queue)) 6695 goto ready; 6696 6697 /* Socket shut down? */ 6698 if (sk->sk_shutdown & RCV_SHUTDOWN) 6699 goto out; 6700 6701 /* Sequenced packets can come disconnected. If so we report the 6702 * problem. 6703 */ 6704 error = -ENOTCONN; 6705 6706 /* Is there a good reason to think that we may receive some data? */ 6707 if (list_empty(&sctp_sk(sk)->ep->asocs) && !sctp_sstate(sk, LISTENING)) 6708 goto out; 6709 6710 /* Handle signals. */ 6711 if (signal_pending(current)) 6712 goto interrupted; 6713 6714 /* Let another process have a go. Since we are going to sleep 6715 * anyway. Note: This may cause odd behaviors if the message 6716 * does not fit in the user's buffer, but this seems to be the 6717 * only way to honor MSG_DONTWAIT realistically. 6718 */ 6719 release_sock(sk); 6720 *timeo_p = schedule_timeout(*timeo_p); 6721 lock_sock(sk); 6722 6723 ready: 6724 finish_wait(sk_sleep(sk), &wait); 6725 return 0; 6726 6727 interrupted: 6728 error = sock_intr_errno(*timeo_p); 6729 6730 out: 6731 finish_wait(sk_sleep(sk), &wait); 6732 *err = error; 6733 return error; 6734 } 6735 6736 /* Receive a datagram. 6737 * Note: This is pretty much the same routine as in core/datagram.c 6738 * with a few changes to make lksctp work. 6739 */ 6740 struct sk_buff *sctp_skb_recv_datagram(struct sock *sk, int flags, 6741 int noblock, int *err) 6742 { 6743 int error; 6744 struct sk_buff *skb; 6745 long timeo; 6746 6747 timeo = sock_rcvtimeo(sk, noblock); 6748 6749 pr_debug("%s: timeo:%ld, max:%ld\n", __func__, timeo, 6750 MAX_SCHEDULE_TIMEOUT); 6751 6752 do { 6753 /* Again only user level code calls this function, 6754 * so nothing interrupt level 6755 * will suddenly eat the receive_queue. 6756 * 6757 * Look at current nfs client by the way... 6758 * However, this function was correct in any case. 8) 6759 */ 6760 if (flags & MSG_PEEK) { 6761 spin_lock_bh(&sk->sk_receive_queue.lock); 6762 skb = skb_peek(&sk->sk_receive_queue); 6763 if (skb) 6764 atomic_inc(&skb->users); 6765 spin_unlock_bh(&sk->sk_receive_queue.lock); 6766 } else { 6767 skb = skb_dequeue(&sk->sk_receive_queue); 6768 } 6769 6770 if (skb) 6771 return skb; 6772 6773 /* Caller is allowed not to check sk->sk_err before calling. */ 6774 error = sock_error(sk); 6775 if (error) 6776 goto no_packet; 6777 6778 if (sk->sk_shutdown & RCV_SHUTDOWN) 6779 break; 6780 6781 if (sk_can_busy_loop(sk) && 6782 sk_busy_loop(sk, noblock)) 6783 continue; 6784 6785 /* User doesn't want to wait. */ 6786 error = -EAGAIN; 6787 if (!timeo) 6788 goto no_packet; 6789 } while (sctp_wait_for_packet(sk, err, &timeo) == 0); 6790 6791 return NULL; 6792 6793 no_packet: 6794 *err = error; 6795 return NULL; 6796 } 6797 6798 /* If sndbuf has changed, wake up per association sndbuf waiters. */ 6799 static void __sctp_write_space(struct sctp_association *asoc) 6800 { 6801 struct sock *sk = asoc->base.sk; 6802 struct socket *sock = sk->sk_socket; 6803 6804 if ((sctp_wspace(asoc) > 0) && sock) { 6805 if (waitqueue_active(&asoc->wait)) 6806 wake_up_interruptible(&asoc->wait); 6807 6808 if (sctp_writeable(sk)) { 6809 wait_queue_head_t *wq = sk_sleep(sk); 6810 6811 if (wq && waitqueue_active(wq)) 6812 wake_up_interruptible(wq); 6813 6814 /* Note that we try to include the Async I/O support 6815 * here by modeling from the current TCP/UDP code. 6816 * We have not tested with it yet. 6817 */ 6818 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) 6819 sock_wake_async(sock, 6820 SOCK_WAKE_SPACE, POLL_OUT); 6821 } 6822 } 6823 } 6824 6825 static void sctp_wake_up_waiters(struct sock *sk, 6826 struct sctp_association *asoc) 6827 { 6828 struct sctp_association *tmp = asoc; 6829 6830 /* We do accounting for the sndbuf space per association, 6831 * so we only need to wake our own association. 6832 */ 6833 if (asoc->ep->sndbuf_policy) 6834 return __sctp_write_space(asoc); 6835 6836 /* If association goes down and is just flushing its 6837 * outq, then just normally notify others. 6838 */ 6839 if (asoc->base.dead) 6840 return sctp_write_space(sk); 6841 6842 /* Accounting for the sndbuf space is per socket, so we 6843 * need to wake up others, try to be fair and in case of 6844 * other associations, let them have a go first instead 6845 * of just doing a sctp_write_space() call. 6846 * 6847 * Note that we reach sctp_wake_up_waiters() only when 6848 * associations free up queued chunks, thus we are under 6849 * lock and the list of associations on a socket is 6850 * guaranteed not to change. 6851 */ 6852 for (tmp = list_next_entry(tmp, asocs); 1; 6853 tmp = list_next_entry(tmp, asocs)) { 6854 /* Manually skip the head element. */ 6855 if (&tmp->asocs == &((sctp_sk(sk))->ep->asocs)) 6856 continue; 6857 /* Wake up association. */ 6858 __sctp_write_space(tmp); 6859 /* We've reached the end. */ 6860 if (tmp == asoc) 6861 break; 6862 } 6863 } 6864 6865 /* Do accounting for the sndbuf space. 6866 * Decrement the used sndbuf space of the corresponding association by the 6867 * data size which was just transmitted(freed). 6868 */ 6869 static void sctp_wfree(struct sk_buff *skb) 6870 { 6871 struct sctp_chunk *chunk = skb_shinfo(skb)->destructor_arg; 6872 struct sctp_association *asoc = chunk->asoc; 6873 struct sock *sk = asoc->base.sk; 6874 6875 asoc->sndbuf_used -= SCTP_DATA_SNDSIZE(chunk) + 6876 sizeof(struct sk_buff) + 6877 sizeof(struct sctp_chunk); 6878 6879 atomic_sub(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc); 6880 6881 /* 6882 * This undoes what is done via sctp_set_owner_w and sk_mem_charge 6883 */ 6884 sk->sk_wmem_queued -= skb->truesize; 6885 sk_mem_uncharge(sk, skb->truesize); 6886 6887 sock_wfree(skb); 6888 sctp_wake_up_waiters(sk, asoc); 6889 6890 sctp_association_put(asoc); 6891 } 6892 6893 /* Do accounting for the receive space on the socket. 6894 * Accounting for the association is done in ulpevent.c 6895 * We set this as a destructor for the cloned data skbs so that 6896 * accounting is done at the correct time. 6897 */ 6898 void sctp_sock_rfree(struct sk_buff *skb) 6899 { 6900 struct sock *sk = skb->sk; 6901 struct sctp_ulpevent *event = sctp_skb2event(skb); 6902 6903 atomic_sub(event->rmem_len, &sk->sk_rmem_alloc); 6904 6905 /* 6906 * Mimic the behavior of sock_rfree 6907 */ 6908 sk_mem_uncharge(sk, event->rmem_len); 6909 } 6910 6911 6912 /* Helper function to wait for space in the sndbuf. */ 6913 static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p, 6914 size_t msg_len) 6915 { 6916 struct sock *sk = asoc->base.sk; 6917 int err = 0; 6918 long current_timeo = *timeo_p; 6919 DEFINE_WAIT(wait); 6920 6921 pr_debug("%s: asoc:%p, timeo:%ld, msg_len:%zu\n", __func__, asoc, 6922 *timeo_p, msg_len); 6923 6924 /* Increment the association's refcnt. */ 6925 sctp_association_hold(asoc); 6926 6927 /* Wait on the association specific sndbuf space. */ 6928 for (;;) { 6929 prepare_to_wait_exclusive(&asoc->wait, &wait, 6930 TASK_INTERRUPTIBLE); 6931 if (!*timeo_p) 6932 goto do_nonblock; 6933 if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING || 6934 asoc->base.dead) 6935 goto do_error; 6936 if (signal_pending(current)) 6937 goto do_interrupted; 6938 if (msg_len <= sctp_wspace(asoc)) 6939 break; 6940 6941 /* Let another process have a go. Since we are going 6942 * to sleep anyway. 6943 */ 6944 release_sock(sk); 6945 current_timeo = schedule_timeout(current_timeo); 6946 BUG_ON(sk != asoc->base.sk); 6947 lock_sock(sk); 6948 6949 *timeo_p = current_timeo; 6950 } 6951 6952 out: 6953 finish_wait(&asoc->wait, &wait); 6954 6955 /* Release the association's refcnt. */ 6956 sctp_association_put(asoc); 6957 6958 return err; 6959 6960 do_error: 6961 err = -EPIPE; 6962 goto out; 6963 6964 do_interrupted: 6965 err = sock_intr_errno(*timeo_p); 6966 goto out; 6967 6968 do_nonblock: 6969 err = -EAGAIN; 6970 goto out; 6971 } 6972 6973 void sctp_data_ready(struct sock *sk) 6974 { 6975 struct socket_wq *wq; 6976 6977 rcu_read_lock(); 6978 wq = rcu_dereference(sk->sk_wq); 6979 if (wq_has_sleeper(wq)) 6980 wake_up_interruptible_sync_poll(&wq->wait, POLLIN | 6981 POLLRDNORM | POLLRDBAND); 6982 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); 6983 rcu_read_unlock(); 6984 } 6985 6986 /* If socket sndbuf has changed, wake up all per association waiters. */ 6987 void sctp_write_space(struct sock *sk) 6988 { 6989 struct sctp_association *asoc; 6990 6991 /* Wake up the tasks in each wait queue. */ 6992 list_for_each_entry(asoc, &((sctp_sk(sk))->ep->asocs), asocs) { 6993 __sctp_write_space(asoc); 6994 } 6995 } 6996 6997 /* Is there any sndbuf space available on the socket? 6998 * 6999 * Note that sk_wmem_alloc is the sum of the send buffers on all of the 7000 * associations on the same socket. For a UDP-style socket with 7001 * multiple associations, it is possible for it to be "unwriteable" 7002 * prematurely. I assume that this is acceptable because 7003 * a premature "unwriteable" is better than an accidental "writeable" which 7004 * would cause an unwanted block under certain circumstances. For the 1-1 7005 * UDP-style sockets or TCP-style sockets, this code should work. 7006 * - Daisy 7007 */ 7008 static int sctp_writeable(struct sock *sk) 7009 { 7010 int amt = 0; 7011 7012 amt = sk->sk_sndbuf - sk_wmem_alloc_get(sk); 7013 if (amt < 0) 7014 amt = 0; 7015 return amt; 7016 } 7017 7018 /* Wait for an association to go into ESTABLISHED state. If timeout is 0, 7019 * returns immediately with EINPROGRESS. 7020 */ 7021 static int sctp_wait_for_connect(struct sctp_association *asoc, long *timeo_p) 7022 { 7023 struct sock *sk = asoc->base.sk; 7024 int err = 0; 7025 long current_timeo = *timeo_p; 7026 DEFINE_WAIT(wait); 7027 7028 pr_debug("%s: asoc:%p, timeo:%ld\n", __func__, asoc, *timeo_p); 7029 7030 /* Increment the association's refcnt. */ 7031 sctp_association_hold(asoc); 7032 7033 for (;;) { 7034 prepare_to_wait_exclusive(&asoc->wait, &wait, 7035 TASK_INTERRUPTIBLE); 7036 if (!*timeo_p) 7037 goto do_nonblock; 7038 if (sk->sk_shutdown & RCV_SHUTDOWN) 7039 break; 7040 if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING || 7041 asoc->base.dead) 7042 goto do_error; 7043 if (signal_pending(current)) 7044 goto do_interrupted; 7045 7046 if (sctp_state(asoc, ESTABLISHED)) 7047 break; 7048 7049 /* Let another process have a go. Since we are going 7050 * to sleep anyway. 7051 */ 7052 release_sock(sk); 7053 current_timeo = schedule_timeout(current_timeo); 7054 lock_sock(sk); 7055 7056 *timeo_p = current_timeo; 7057 } 7058 7059 out: 7060 finish_wait(&asoc->wait, &wait); 7061 7062 /* Release the association's refcnt. */ 7063 sctp_association_put(asoc); 7064 7065 return err; 7066 7067 do_error: 7068 if (asoc->init_err_counter + 1 > asoc->max_init_attempts) 7069 err = -ETIMEDOUT; 7070 else 7071 err = -ECONNREFUSED; 7072 goto out; 7073 7074 do_interrupted: 7075 err = sock_intr_errno(*timeo_p); 7076 goto out; 7077 7078 do_nonblock: 7079 err = -EINPROGRESS; 7080 goto out; 7081 } 7082 7083 static int sctp_wait_for_accept(struct sock *sk, long timeo) 7084 { 7085 struct sctp_endpoint *ep; 7086 int err = 0; 7087 DEFINE_WAIT(wait); 7088 7089 ep = sctp_sk(sk)->ep; 7090 7091 7092 for (;;) { 7093 prepare_to_wait_exclusive(sk_sleep(sk), &wait, 7094 TASK_INTERRUPTIBLE); 7095 7096 if (list_empty(&ep->asocs)) { 7097 release_sock(sk); 7098 timeo = schedule_timeout(timeo); 7099 lock_sock(sk); 7100 } 7101 7102 err = -EINVAL; 7103 if (!sctp_sstate(sk, LISTENING)) 7104 break; 7105 7106 err = 0; 7107 if (!list_empty(&ep->asocs)) 7108 break; 7109 7110 err = sock_intr_errno(timeo); 7111 if (signal_pending(current)) 7112 break; 7113 7114 err = -EAGAIN; 7115 if (!timeo) 7116 break; 7117 } 7118 7119 finish_wait(sk_sleep(sk), &wait); 7120 7121 return err; 7122 } 7123 7124 static void sctp_wait_for_close(struct sock *sk, long timeout) 7125 { 7126 DEFINE_WAIT(wait); 7127 7128 do { 7129 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 7130 if (list_empty(&sctp_sk(sk)->ep->asocs)) 7131 break; 7132 release_sock(sk); 7133 timeout = schedule_timeout(timeout); 7134 lock_sock(sk); 7135 } while (!signal_pending(current) && timeout); 7136 7137 finish_wait(sk_sleep(sk), &wait); 7138 } 7139 7140 static void sctp_skb_set_owner_r_frag(struct sk_buff *skb, struct sock *sk) 7141 { 7142 struct sk_buff *frag; 7143 7144 if (!skb->data_len) 7145 goto done; 7146 7147 /* Don't forget the fragments. */ 7148 skb_walk_frags(skb, frag) 7149 sctp_skb_set_owner_r_frag(frag, sk); 7150 7151 done: 7152 sctp_skb_set_owner_r(skb, sk); 7153 } 7154 7155 void sctp_copy_sock(struct sock *newsk, struct sock *sk, 7156 struct sctp_association *asoc) 7157 { 7158 struct inet_sock *inet = inet_sk(sk); 7159 struct inet_sock *newinet; 7160 7161 newsk->sk_type = sk->sk_type; 7162 newsk->sk_bound_dev_if = sk->sk_bound_dev_if; 7163 newsk->sk_flags = sk->sk_flags; 7164 newsk->sk_no_check_tx = sk->sk_no_check_tx; 7165 newsk->sk_no_check_rx = sk->sk_no_check_rx; 7166 newsk->sk_reuse = sk->sk_reuse; 7167 7168 newsk->sk_shutdown = sk->sk_shutdown; 7169 newsk->sk_destruct = sctp_destruct_sock; 7170 newsk->sk_family = sk->sk_family; 7171 newsk->sk_protocol = IPPROTO_SCTP; 7172 newsk->sk_backlog_rcv = sk->sk_prot->backlog_rcv; 7173 newsk->sk_sndbuf = sk->sk_sndbuf; 7174 newsk->sk_rcvbuf = sk->sk_rcvbuf; 7175 newsk->sk_lingertime = sk->sk_lingertime; 7176 newsk->sk_rcvtimeo = sk->sk_rcvtimeo; 7177 newsk->sk_sndtimeo = sk->sk_sndtimeo; 7178 7179 newinet = inet_sk(newsk); 7180 7181 /* Initialize sk's sport, dport, rcv_saddr and daddr for 7182 * getsockname() and getpeername() 7183 */ 7184 newinet->inet_sport = inet->inet_sport; 7185 newinet->inet_saddr = inet->inet_saddr; 7186 newinet->inet_rcv_saddr = inet->inet_rcv_saddr; 7187 newinet->inet_dport = htons(asoc->peer.port); 7188 newinet->pmtudisc = inet->pmtudisc; 7189 newinet->inet_id = asoc->next_tsn ^ jiffies; 7190 7191 newinet->uc_ttl = inet->uc_ttl; 7192 newinet->mc_loop = 1; 7193 newinet->mc_ttl = 1; 7194 newinet->mc_index = 0; 7195 newinet->mc_list = NULL; 7196 } 7197 7198 /* Populate the fields of the newsk from the oldsk and migrate the assoc 7199 * and its messages to the newsk. 7200 */ 7201 static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk, 7202 struct sctp_association *assoc, 7203 sctp_socket_type_t type) 7204 { 7205 struct sctp_sock *oldsp = sctp_sk(oldsk); 7206 struct sctp_sock *newsp = sctp_sk(newsk); 7207 struct sctp_bind_bucket *pp; /* hash list port iterator */ 7208 struct sctp_endpoint *newep = newsp->ep; 7209 struct sk_buff *skb, *tmp; 7210 struct sctp_ulpevent *event; 7211 struct sctp_bind_hashbucket *head; 7212 struct list_head tmplist; 7213 7214 /* Migrate socket buffer sizes and all the socket level options to the 7215 * new socket. 7216 */ 7217 newsk->sk_sndbuf = oldsk->sk_sndbuf; 7218 newsk->sk_rcvbuf = oldsk->sk_rcvbuf; 7219 /* Brute force copy old sctp opt. */ 7220 if (oldsp->do_auto_asconf) { 7221 memcpy(&tmplist, &newsp->auto_asconf_list, sizeof(tmplist)); 7222 inet_sk_copy_descendant(newsk, oldsk); 7223 memcpy(&newsp->auto_asconf_list, &tmplist, sizeof(tmplist)); 7224 } else 7225 inet_sk_copy_descendant(newsk, oldsk); 7226 7227 /* Restore the ep value that was overwritten with the above structure 7228 * copy. 7229 */ 7230 newsp->ep = newep; 7231 newsp->hmac = NULL; 7232 7233 /* Hook this new socket in to the bind_hash list. */ 7234 head = &sctp_port_hashtable[sctp_phashfn(sock_net(oldsk), 7235 inet_sk(oldsk)->inet_num)]; 7236 local_bh_disable(); 7237 spin_lock(&head->lock); 7238 pp = sctp_sk(oldsk)->bind_hash; 7239 sk_add_bind_node(newsk, &pp->owner); 7240 sctp_sk(newsk)->bind_hash = pp; 7241 inet_sk(newsk)->inet_num = inet_sk(oldsk)->inet_num; 7242 spin_unlock(&head->lock); 7243 local_bh_enable(); 7244 7245 /* Copy the bind_addr list from the original endpoint to the new 7246 * endpoint so that we can handle restarts properly 7247 */ 7248 sctp_bind_addr_dup(&newsp->ep->base.bind_addr, 7249 &oldsp->ep->base.bind_addr, GFP_KERNEL); 7250 7251 /* Move any messages in the old socket's receive queue that are for the 7252 * peeled off association to the new socket's receive queue. 7253 */ 7254 sctp_skb_for_each(skb, &oldsk->sk_receive_queue, tmp) { 7255 event = sctp_skb2event(skb); 7256 if (event->asoc == assoc) { 7257 __skb_unlink(skb, &oldsk->sk_receive_queue); 7258 __skb_queue_tail(&newsk->sk_receive_queue, skb); 7259 sctp_skb_set_owner_r_frag(skb, newsk); 7260 } 7261 } 7262 7263 /* Clean up any messages pending delivery due to partial 7264 * delivery. Three cases: 7265 * 1) No partial deliver; no work. 7266 * 2) Peeling off partial delivery; keep pd_lobby in new pd_lobby. 7267 * 3) Peeling off non-partial delivery; move pd_lobby to receive_queue. 7268 */ 7269 skb_queue_head_init(&newsp->pd_lobby); 7270 atomic_set(&sctp_sk(newsk)->pd_mode, assoc->ulpq.pd_mode); 7271 7272 if (atomic_read(&sctp_sk(oldsk)->pd_mode)) { 7273 struct sk_buff_head *queue; 7274 7275 /* Decide which queue to move pd_lobby skbs to. */ 7276 if (assoc->ulpq.pd_mode) { 7277 queue = &newsp->pd_lobby; 7278 } else 7279 queue = &newsk->sk_receive_queue; 7280 7281 /* Walk through the pd_lobby, looking for skbs that 7282 * need moved to the new socket. 7283 */ 7284 sctp_skb_for_each(skb, &oldsp->pd_lobby, tmp) { 7285 event = sctp_skb2event(skb); 7286 if (event->asoc == assoc) { 7287 __skb_unlink(skb, &oldsp->pd_lobby); 7288 __skb_queue_tail(queue, skb); 7289 sctp_skb_set_owner_r_frag(skb, newsk); 7290 } 7291 } 7292 7293 /* Clear up any skbs waiting for the partial 7294 * delivery to finish. 7295 */ 7296 if (assoc->ulpq.pd_mode) 7297 sctp_clear_pd(oldsk, NULL); 7298 7299 } 7300 7301 sctp_skb_for_each(skb, &assoc->ulpq.reasm, tmp) 7302 sctp_skb_set_owner_r_frag(skb, newsk); 7303 7304 sctp_skb_for_each(skb, &assoc->ulpq.lobby, tmp) 7305 sctp_skb_set_owner_r_frag(skb, newsk); 7306 7307 /* Set the type of socket to indicate that it is peeled off from the 7308 * original UDP-style socket or created with the accept() call on a 7309 * TCP-style socket.. 7310 */ 7311 newsp->type = type; 7312 7313 /* Mark the new socket "in-use" by the user so that any packets 7314 * that may arrive on the association after we've moved it are 7315 * queued to the backlog. This prevents a potential race between 7316 * backlog processing on the old socket and new-packet processing 7317 * on the new socket. 7318 * 7319 * The caller has just allocated newsk so we can guarantee that other 7320 * paths won't try to lock it and then oldsk. 7321 */ 7322 lock_sock_nested(newsk, SINGLE_DEPTH_NESTING); 7323 sctp_assoc_migrate(assoc, newsk); 7324 7325 /* If the association on the newsk is already closed before accept() 7326 * is called, set RCV_SHUTDOWN flag. 7327 */ 7328 if (sctp_state(assoc, CLOSED) && sctp_style(newsk, TCP)) 7329 newsk->sk_shutdown |= RCV_SHUTDOWN; 7330 7331 newsk->sk_state = SCTP_SS_ESTABLISHED; 7332 release_sock(newsk); 7333 } 7334 7335 7336 /* This proto struct describes the ULP interface for SCTP. */ 7337 struct proto sctp_prot = { 7338 .name = "SCTP", 7339 .owner = THIS_MODULE, 7340 .close = sctp_close, 7341 .connect = sctp_connect, 7342 .disconnect = sctp_disconnect, 7343 .accept = sctp_accept, 7344 .ioctl = sctp_ioctl, 7345 .init = sctp_init_sock, 7346 .destroy = sctp_destroy_sock, 7347 .shutdown = sctp_shutdown, 7348 .setsockopt = sctp_setsockopt, 7349 .getsockopt = sctp_getsockopt, 7350 .sendmsg = sctp_sendmsg, 7351 .recvmsg = sctp_recvmsg, 7352 .bind = sctp_bind, 7353 .backlog_rcv = sctp_backlog_rcv, 7354 .hash = sctp_hash, 7355 .unhash = sctp_unhash, 7356 .get_port = sctp_get_port, 7357 .obj_size = sizeof(struct sctp_sock), 7358 .sysctl_mem = sysctl_sctp_mem, 7359 .sysctl_rmem = sysctl_sctp_rmem, 7360 .sysctl_wmem = sysctl_sctp_wmem, 7361 .memory_pressure = &sctp_memory_pressure, 7362 .enter_memory_pressure = sctp_enter_memory_pressure, 7363 .memory_allocated = &sctp_memory_allocated, 7364 .sockets_allocated = &sctp_sockets_allocated, 7365 }; 7366 7367 #if IS_ENABLED(CONFIG_IPV6) 7368 7369 struct proto sctpv6_prot = { 7370 .name = "SCTPv6", 7371 .owner = THIS_MODULE, 7372 .close = sctp_close, 7373 .connect = sctp_connect, 7374 .disconnect = sctp_disconnect, 7375 .accept = sctp_accept, 7376 .ioctl = sctp_ioctl, 7377 .init = sctp_init_sock, 7378 .destroy = sctp_destroy_sock, 7379 .shutdown = sctp_shutdown, 7380 .setsockopt = sctp_setsockopt, 7381 .getsockopt = sctp_getsockopt, 7382 .sendmsg = sctp_sendmsg, 7383 .recvmsg = sctp_recvmsg, 7384 .bind = sctp_bind, 7385 .backlog_rcv = sctp_backlog_rcv, 7386 .hash = sctp_hash, 7387 .unhash = sctp_unhash, 7388 .get_port = sctp_get_port, 7389 .obj_size = sizeof(struct sctp6_sock), 7390 .sysctl_mem = sysctl_sctp_mem, 7391 .sysctl_rmem = sysctl_sctp_rmem, 7392 .sysctl_wmem = sysctl_sctp_wmem, 7393 .memory_pressure = &sctp_memory_pressure, 7394 .enter_memory_pressure = sctp_enter_memory_pressure, 7395 .memory_allocated = &sctp_memory_allocated, 7396 .sockets_allocated = &sctp_sockets_allocated, 7397 }; 7398 #endif /* IS_ENABLED(CONFIG_IPV6) */ 7399