1 /* SCTP kernel implementation 2 * (C) Copyright IBM Corp. 2001, 2004 3 * Copyright (c) 1999-2000 Cisco, Inc. 4 * Copyright (c) 1999-2001 Motorola, Inc. 5 * Copyright (c) 2001-2003 Intel Corp. 6 * Copyright (c) 2001-2002 Nokia, Inc. 7 * Copyright (c) 2001 La Monte H.P. Yarroll 8 * 9 * This file is part of the SCTP kernel implementation 10 * 11 * These functions interface with the sockets layer to implement the 12 * SCTP Extensions for the Sockets API. 13 * 14 * Note that the descriptions from the specification are USER level 15 * functions--this file is the functions which populate the struct proto 16 * for SCTP which is the BOTTOM of the sockets interface. 17 * 18 * This SCTP implementation is free software; 19 * you can redistribute it and/or modify it under the terms of 20 * the GNU General Public License as published by 21 * the Free Software Foundation; either version 2, or (at your option) 22 * any later version. 23 * 24 * This SCTP implementation is distributed in the hope that it 25 * will be useful, but WITHOUT ANY WARRANTY; without even the implied 26 * ************************ 27 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. 28 * See the GNU General Public License for more details. 29 * 30 * You should have received a copy of the GNU General Public License 31 * along with GNU CC; see the file COPYING. If not, see 32 * <http://www.gnu.org/licenses/>. 33 * 34 * Please send any bug reports or fixes you make to the 35 * email address(es): 36 * lksctp developers <linux-sctp@vger.kernel.org> 37 * 38 * Written or modified by: 39 * La Monte H.P. Yarroll <piggy@acm.org> 40 * Narasimha Budihal <narsi@refcode.org> 41 * Karl Knutson <karl@athena.chicago.il.us> 42 * Jon Grimm <jgrimm@us.ibm.com> 43 * Xingang Guo <xingang.guo@intel.com> 44 * Daisy Chang <daisyc@us.ibm.com> 45 * Sridhar Samudrala <samudrala@us.ibm.com> 46 * Inaky Perez-Gonzalez <inaky.gonzalez@intel.com> 47 * Ardelle Fan <ardelle.fan@intel.com> 48 * Ryan Layer <rmlayer@us.ibm.com> 49 * Anup Pemmaiah <pemmaiah@cc.usu.edu> 50 * Kevin Gao <kevin.gao@intel.com> 51 */ 52 53 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 54 55 #include <crypto/hash.h> 56 #include <linux/types.h> 57 #include <linux/kernel.h> 58 #include <linux/wait.h> 59 #include <linux/time.h> 60 #include <linux/ip.h> 61 #include <linux/capability.h> 62 #include <linux/fcntl.h> 63 #include <linux/poll.h> 64 #include <linux/init.h> 65 #include <linux/slab.h> 66 #include <linux/file.h> 67 #include <linux/compat.h> 68 69 #include <net/ip.h> 70 #include <net/icmp.h> 71 #include <net/route.h> 72 #include <net/ipv6.h> 73 #include <net/inet_common.h> 74 #include <net/busy_poll.h> 75 76 #include <linux/socket.h> /* for sa_family_t */ 77 #include <linux/export.h> 78 #include <net/sock.h> 79 #include <net/sctp/sctp.h> 80 #include <net/sctp/sm.h> 81 82 /* Forward declarations for internal helper functions. */ 83 static int sctp_writeable(struct sock *sk); 84 static void sctp_wfree(struct sk_buff *skb); 85 static int sctp_wait_for_sndbuf(struct sctp_association *, long *timeo_p, 86 size_t msg_len); 87 static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p); 88 static int sctp_wait_for_connect(struct sctp_association *, long *timeo_p); 89 static int sctp_wait_for_accept(struct sock *sk, long timeo); 90 static void sctp_wait_for_close(struct sock *sk, long timeo); 91 static void sctp_destruct_sock(struct sock *sk); 92 static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt, 93 union sctp_addr *addr, int len); 94 static int sctp_bindx_add(struct sock *, struct sockaddr *, int); 95 static int sctp_bindx_rem(struct sock *, struct sockaddr *, int); 96 static int sctp_send_asconf_add_ip(struct sock *, struct sockaddr *, int); 97 static int sctp_send_asconf_del_ip(struct sock *, struct sockaddr *, int); 98 static int sctp_send_asconf(struct sctp_association *asoc, 99 struct sctp_chunk *chunk); 100 static int sctp_do_bind(struct sock *, union sctp_addr *, int); 101 static int sctp_autobind(struct sock *sk); 102 static void sctp_sock_migrate(struct sock *, struct sock *, 103 struct sctp_association *, sctp_socket_type_t); 104 105 static int sctp_memory_pressure; 106 static atomic_long_t sctp_memory_allocated; 107 struct percpu_counter sctp_sockets_allocated; 108 109 static void sctp_enter_memory_pressure(struct sock *sk) 110 { 111 sctp_memory_pressure = 1; 112 } 113 114 115 /* Get the sndbuf space available at the time on the association. */ 116 static inline int sctp_wspace(struct sctp_association *asoc) 117 { 118 int amt; 119 120 if (asoc->ep->sndbuf_policy) 121 amt = asoc->sndbuf_used; 122 else 123 amt = sk_wmem_alloc_get(asoc->base.sk); 124 125 if (amt >= asoc->base.sk->sk_sndbuf) { 126 if (asoc->base.sk->sk_userlocks & SOCK_SNDBUF_LOCK) 127 amt = 0; 128 else { 129 amt = sk_stream_wspace(asoc->base.sk); 130 if (amt < 0) 131 amt = 0; 132 } 133 } else { 134 amt = asoc->base.sk->sk_sndbuf - amt; 135 } 136 return amt; 137 } 138 139 /* Increment the used sndbuf space count of the corresponding association by 140 * the size of the outgoing data chunk. 141 * Also, set the skb destructor for sndbuf accounting later. 142 * 143 * Since it is always 1-1 between chunk and skb, and also a new skb is always 144 * allocated for chunk bundling in sctp_packet_transmit(), we can use the 145 * destructor in the data chunk skb for the purpose of the sndbuf space 146 * tracking. 147 */ 148 static inline void sctp_set_owner_w(struct sctp_chunk *chunk) 149 { 150 struct sctp_association *asoc = chunk->asoc; 151 struct sock *sk = asoc->base.sk; 152 153 /* The sndbuf space is tracked per association. */ 154 sctp_association_hold(asoc); 155 156 skb_set_owner_w(chunk->skb, sk); 157 158 chunk->skb->destructor = sctp_wfree; 159 /* Save the chunk pointer in skb for sctp_wfree to use later. */ 160 skb_shinfo(chunk->skb)->destructor_arg = chunk; 161 162 asoc->sndbuf_used += SCTP_DATA_SNDSIZE(chunk) + 163 sizeof(struct sk_buff) + 164 sizeof(struct sctp_chunk); 165 166 atomic_add(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc); 167 sk->sk_wmem_queued += chunk->skb->truesize; 168 sk_mem_charge(sk, chunk->skb->truesize); 169 } 170 171 /* Verify that this is a valid address. */ 172 static inline int sctp_verify_addr(struct sock *sk, union sctp_addr *addr, 173 int len) 174 { 175 struct sctp_af *af; 176 177 /* Verify basic sockaddr. */ 178 af = sctp_sockaddr_af(sctp_sk(sk), addr, len); 179 if (!af) 180 return -EINVAL; 181 182 /* Is this a valid SCTP address? */ 183 if (!af->addr_valid(addr, sctp_sk(sk), NULL)) 184 return -EINVAL; 185 186 if (!sctp_sk(sk)->pf->send_verify(sctp_sk(sk), (addr))) 187 return -EINVAL; 188 189 return 0; 190 } 191 192 /* Look up the association by its id. If this is not a UDP-style 193 * socket, the ID field is always ignored. 194 */ 195 struct sctp_association *sctp_id2assoc(struct sock *sk, sctp_assoc_t id) 196 { 197 struct sctp_association *asoc = NULL; 198 199 /* If this is not a UDP-style socket, assoc id should be ignored. */ 200 if (!sctp_style(sk, UDP)) { 201 /* Return NULL if the socket state is not ESTABLISHED. It 202 * could be a TCP-style listening socket or a socket which 203 * hasn't yet called connect() to establish an association. 204 */ 205 if (!sctp_sstate(sk, ESTABLISHED) && !sctp_sstate(sk, CLOSING)) 206 return NULL; 207 208 /* Get the first and the only association from the list. */ 209 if (!list_empty(&sctp_sk(sk)->ep->asocs)) 210 asoc = list_entry(sctp_sk(sk)->ep->asocs.next, 211 struct sctp_association, asocs); 212 return asoc; 213 } 214 215 /* Otherwise this is a UDP-style socket. */ 216 if (!id || (id == (sctp_assoc_t)-1)) 217 return NULL; 218 219 spin_lock_bh(&sctp_assocs_id_lock); 220 asoc = (struct sctp_association *)idr_find(&sctp_assocs_id, (int)id); 221 spin_unlock_bh(&sctp_assocs_id_lock); 222 223 if (!asoc || (asoc->base.sk != sk) || asoc->base.dead) 224 return NULL; 225 226 return asoc; 227 } 228 229 /* Look up the transport from an address and an assoc id. If both address and 230 * id are specified, the associations matching the address and the id should be 231 * the same. 232 */ 233 static struct sctp_transport *sctp_addr_id2transport(struct sock *sk, 234 struct sockaddr_storage *addr, 235 sctp_assoc_t id) 236 { 237 struct sctp_association *addr_asoc = NULL, *id_asoc = NULL; 238 struct sctp_af *af = sctp_get_af_specific(addr->ss_family); 239 union sctp_addr *laddr = (union sctp_addr *)addr; 240 struct sctp_transport *transport; 241 242 if (sctp_verify_addr(sk, laddr, af->sockaddr_len)) 243 return NULL; 244 245 addr_asoc = sctp_endpoint_lookup_assoc(sctp_sk(sk)->ep, 246 laddr, 247 &transport); 248 249 if (!addr_asoc) 250 return NULL; 251 252 id_asoc = sctp_id2assoc(sk, id); 253 if (id_asoc && (id_asoc != addr_asoc)) 254 return NULL; 255 256 sctp_get_pf_specific(sk->sk_family)->addr_to_user(sctp_sk(sk), 257 (union sctp_addr *)addr); 258 259 return transport; 260 } 261 262 /* API 3.1.2 bind() - UDP Style Syntax 263 * The syntax of bind() is, 264 * 265 * ret = bind(int sd, struct sockaddr *addr, int addrlen); 266 * 267 * sd - the socket descriptor returned by socket(). 268 * addr - the address structure (struct sockaddr_in or struct 269 * sockaddr_in6 [RFC 2553]), 270 * addr_len - the size of the address structure. 271 */ 272 static int sctp_bind(struct sock *sk, struct sockaddr *addr, int addr_len) 273 { 274 int retval = 0; 275 276 lock_sock(sk); 277 278 pr_debug("%s: sk:%p, addr:%p, addr_len:%d\n", __func__, sk, 279 addr, addr_len); 280 281 /* Disallow binding twice. */ 282 if (!sctp_sk(sk)->ep->base.bind_addr.port) 283 retval = sctp_do_bind(sk, (union sctp_addr *)addr, 284 addr_len); 285 else 286 retval = -EINVAL; 287 288 release_sock(sk); 289 290 return retval; 291 } 292 293 static long sctp_get_port_local(struct sock *, union sctp_addr *); 294 295 /* Verify this is a valid sockaddr. */ 296 static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt, 297 union sctp_addr *addr, int len) 298 { 299 struct sctp_af *af; 300 301 /* Check minimum size. */ 302 if (len < sizeof (struct sockaddr)) 303 return NULL; 304 305 /* V4 mapped address are really of AF_INET family */ 306 if (addr->sa.sa_family == AF_INET6 && 307 ipv6_addr_v4mapped(&addr->v6.sin6_addr)) { 308 if (!opt->pf->af_supported(AF_INET, opt)) 309 return NULL; 310 } else { 311 /* Does this PF support this AF? */ 312 if (!opt->pf->af_supported(addr->sa.sa_family, opt)) 313 return NULL; 314 } 315 316 /* If we get this far, af is valid. */ 317 af = sctp_get_af_specific(addr->sa.sa_family); 318 319 if (len < af->sockaddr_len) 320 return NULL; 321 322 return af; 323 } 324 325 /* Bind a local address either to an endpoint or to an association. */ 326 static int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len) 327 { 328 struct net *net = sock_net(sk); 329 struct sctp_sock *sp = sctp_sk(sk); 330 struct sctp_endpoint *ep = sp->ep; 331 struct sctp_bind_addr *bp = &ep->base.bind_addr; 332 struct sctp_af *af; 333 unsigned short snum; 334 int ret = 0; 335 336 /* Common sockaddr verification. */ 337 af = sctp_sockaddr_af(sp, addr, len); 338 if (!af) { 339 pr_debug("%s: sk:%p, newaddr:%p, len:%d EINVAL\n", 340 __func__, sk, addr, len); 341 return -EINVAL; 342 } 343 344 snum = ntohs(addr->v4.sin_port); 345 346 pr_debug("%s: sk:%p, new addr:%pISc, port:%d, new port:%d, len:%d\n", 347 __func__, sk, &addr->sa, bp->port, snum, len); 348 349 /* PF specific bind() address verification. */ 350 if (!sp->pf->bind_verify(sp, addr)) 351 return -EADDRNOTAVAIL; 352 353 /* We must either be unbound, or bind to the same port. 354 * It's OK to allow 0 ports if we are already bound. 355 * We'll just inhert an already bound port in this case 356 */ 357 if (bp->port) { 358 if (!snum) 359 snum = bp->port; 360 else if (snum != bp->port) { 361 pr_debug("%s: new port %d doesn't match existing port " 362 "%d\n", __func__, snum, bp->port); 363 return -EINVAL; 364 } 365 } 366 367 if (snum && snum < PROT_SOCK && 368 !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) 369 return -EACCES; 370 371 /* See if the address matches any of the addresses we may have 372 * already bound before checking against other endpoints. 373 */ 374 if (sctp_bind_addr_match(bp, addr, sp)) 375 return -EINVAL; 376 377 /* Make sure we are allowed to bind here. 378 * The function sctp_get_port_local() does duplicate address 379 * detection. 380 */ 381 addr->v4.sin_port = htons(snum); 382 if ((ret = sctp_get_port_local(sk, addr))) { 383 return -EADDRINUSE; 384 } 385 386 /* Refresh ephemeral port. */ 387 if (!bp->port) 388 bp->port = inet_sk(sk)->inet_num; 389 390 /* Add the address to the bind address list. 391 * Use GFP_ATOMIC since BHs will be disabled. 392 */ 393 ret = sctp_add_bind_addr(bp, addr, af->sockaddr_len, 394 SCTP_ADDR_SRC, GFP_ATOMIC); 395 396 /* Copy back into socket for getsockname() use. */ 397 if (!ret) { 398 inet_sk(sk)->inet_sport = htons(inet_sk(sk)->inet_num); 399 sp->pf->to_sk_saddr(addr, sk); 400 } 401 402 return ret; 403 } 404 405 /* ADDIP Section 4.1.1 Congestion Control of ASCONF Chunks 406 * 407 * R1) One and only one ASCONF Chunk MAY be in transit and unacknowledged 408 * at any one time. If a sender, after sending an ASCONF chunk, decides 409 * it needs to transfer another ASCONF Chunk, it MUST wait until the 410 * ASCONF-ACK Chunk returns from the previous ASCONF Chunk before sending a 411 * subsequent ASCONF. Note this restriction binds each side, so at any 412 * time two ASCONF may be in-transit on any given association (one sent 413 * from each endpoint). 414 */ 415 static int sctp_send_asconf(struct sctp_association *asoc, 416 struct sctp_chunk *chunk) 417 { 418 struct net *net = sock_net(asoc->base.sk); 419 int retval = 0; 420 421 /* If there is an outstanding ASCONF chunk, queue it for later 422 * transmission. 423 */ 424 if (asoc->addip_last_asconf) { 425 list_add_tail(&chunk->list, &asoc->addip_chunk_list); 426 goto out; 427 } 428 429 /* Hold the chunk until an ASCONF_ACK is received. */ 430 sctp_chunk_hold(chunk); 431 retval = sctp_primitive_ASCONF(net, asoc, chunk); 432 if (retval) 433 sctp_chunk_free(chunk); 434 else 435 asoc->addip_last_asconf = chunk; 436 437 out: 438 return retval; 439 } 440 441 /* Add a list of addresses as bind addresses to local endpoint or 442 * association. 443 * 444 * Basically run through each address specified in the addrs/addrcnt 445 * array/length pair, determine if it is IPv6 or IPv4 and call 446 * sctp_do_bind() on it. 447 * 448 * If any of them fails, then the operation will be reversed and the 449 * ones that were added will be removed. 450 * 451 * Only sctp_setsockopt_bindx() is supposed to call this function. 452 */ 453 static int sctp_bindx_add(struct sock *sk, struct sockaddr *addrs, int addrcnt) 454 { 455 int cnt; 456 int retval = 0; 457 void *addr_buf; 458 struct sockaddr *sa_addr; 459 struct sctp_af *af; 460 461 pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n", __func__, sk, 462 addrs, addrcnt); 463 464 addr_buf = addrs; 465 for (cnt = 0; cnt < addrcnt; cnt++) { 466 /* The list may contain either IPv4 or IPv6 address; 467 * determine the address length for walking thru the list. 468 */ 469 sa_addr = addr_buf; 470 af = sctp_get_af_specific(sa_addr->sa_family); 471 if (!af) { 472 retval = -EINVAL; 473 goto err_bindx_add; 474 } 475 476 retval = sctp_do_bind(sk, (union sctp_addr *)sa_addr, 477 af->sockaddr_len); 478 479 addr_buf += af->sockaddr_len; 480 481 err_bindx_add: 482 if (retval < 0) { 483 /* Failed. Cleanup the ones that have been added */ 484 if (cnt > 0) 485 sctp_bindx_rem(sk, addrs, cnt); 486 return retval; 487 } 488 } 489 490 return retval; 491 } 492 493 /* Send an ASCONF chunk with Add IP address parameters to all the peers of the 494 * associations that are part of the endpoint indicating that a list of local 495 * addresses are added to the endpoint. 496 * 497 * If any of the addresses is already in the bind address list of the 498 * association, we do not send the chunk for that association. But it will not 499 * affect other associations. 500 * 501 * Only sctp_setsockopt_bindx() is supposed to call this function. 502 */ 503 static int sctp_send_asconf_add_ip(struct sock *sk, 504 struct sockaddr *addrs, 505 int addrcnt) 506 { 507 struct net *net = sock_net(sk); 508 struct sctp_sock *sp; 509 struct sctp_endpoint *ep; 510 struct sctp_association *asoc; 511 struct sctp_bind_addr *bp; 512 struct sctp_chunk *chunk; 513 struct sctp_sockaddr_entry *laddr; 514 union sctp_addr *addr; 515 union sctp_addr saveaddr; 516 void *addr_buf; 517 struct sctp_af *af; 518 struct list_head *p; 519 int i; 520 int retval = 0; 521 522 if (!net->sctp.addip_enable) 523 return retval; 524 525 sp = sctp_sk(sk); 526 ep = sp->ep; 527 528 pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n", 529 __func__, sk, addrs, addrcnt); 530 531 list_for_each_entry(asoc, &ep->asocs, asocs) { 532 if (!asoc->peer.asconf_capable) 533 continue; 534 535 if (asoc->peer.addip_disabled_mask & SCTP_PARAM_ADD_IP) 536 continue; 537 538 if (!sctp_state(asoc, ESTABLISHED)) 539 continue; 540 541 /* Check if any address in the packed array of addresses is 542 * in the bind address list of the association. If so, 543 * do not send the asconf chunk to its peer, but continue with 544 * other associations. 545 */ 546 addr_buf = addrs; 547 for (i = 0; i < addrcnt; i++) { 548 addr = addr_buf; 549 af = sctp_get_af_specific(addr->v4.sin_family); 550 if (!af) { 551 retval = -EINVAL; 552 goto out; 553 } 554 555 if (sctp_assoc_lookup_laddr(asoc, addr)) 556 break; 557 558 addr_buf += af->sockaddr_len; 559 } 560 if (i < addrcnt) 561 continue; 562 563 /* Use the first valid address in bind addr list of 564 * association as Address Parameter of ASCONF CHUNK. 565 */ 566 bp = &asoc->base.bind_addr; 567 p = bp->address_list.next; 568 laddr = list_entry(p, struct sctp_sockaddr_entry, list); 569 chunk = sctp_make_asconf_update_ip(asoc, &laddr->a, addrs, 570 addrcnt, SCTP_PARAM_ADD_IP); 571 if (!chunk) { 572 retval = -ENOMEM; 573 goto out; 574 } 575 576 /* Add the new addresses to the bind address list with 577 * use_as_src set to 0. 578 */ 579 addr_buf = addrs; 580 for (i = 0; i < addrcnt; i++) { 581 addr = addr_buf; 582 af = sctp_get_af_specific(addr->v4.sin_family); 583 memcpy(&saveaddr, addr, af->sockaddr_len); 584 retval = sctp_add_bind_addr(bp, &saveaddr, 585 sizeof(saveaddr), 586 SCTP_ADDR_NEW, GFP_ATOMIC); 587 addr_buf += af->sockaddr_len; 588 } 589 if (asoc->src_out_of_asoc_ok) { 590 struct sctp_transport *trans; 591 592 list_for_each_entry(trans, 593 &asoc->peer.transport_addr_list, transports) { 594 /* Clear the source and route cache */ 595 dst_release(trans->dst); 596 trans->cwnd = min(4*asoc->pathmtu, max_t(__u32, 597 2*asoc->pathmtu, 4380)); 598 trans->ssthresh = asoc->peer.i.a_rwnd; 599 trans->rto = asoc->rto_initial; 600 sctp_max_rto(asoc, trans); 601 trans->rtt = trans->srtt = trans->rttvar = 0; 602 sctp_transport_route(trans, NULL, 603 sctp_sk(asoc->base.sk)); 604 } 605 } 606 retval = sctp_send_asconf(asoc, chunk); 607 } 608 609 out: 610 return retval; 611 } 612 613 /* Remove a list of addresses from bind addresses list. Do not remove the 614 * last address. 615 * 616 * Basically run through each address specified in the addrs/addrcnt 617 * array/length pair, determine if it is IPv6 or IPv4 and call 618 * sctp_del_bind() on it. 619 * 620 * If any of them fails, then the operation will be reversed and the 621 * ones that were removed will be added back. 622 * 623 * At least one address has to be left; if only one address is 624 * available, the operation will return -EBUSY. 625 * 626 * Only sctp_setsockopt_bindx() is supposed to call this function. 627 */ 628 static int sctp_bindx_rem(struct sock *sk, struct sockaddr *addrs, int addrcnt) 629 { 630 struct sctp_sock *sp = sctp_sk(sk); 631 struct sctp_endpoint *ep = sp->ep; 632 int cnt; 633 struct sctp_bind_addr *bp = &ep->base.bind_addr; 634 int retval = 0; 635 void *addr_buf; 636 union sctp_addr *sa_addr; 637 struct sctp_af *af; 638 639 pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n", 640 __func__, sk, addrs, addrcnt); 641 642 addr_buf = addrs; 643 for (cnt = 0; cnt < addrcnt; cnt++) { 644 /* If the bind address list is empty or if there is only one 645 * bind address, there is nothing more to be removed (we need 646 * at least one address here). 647 */ 648 if (list_empty(&bp->address_list) || 649 (sctp_list_single_entry(&bp->address_list))) { 650 retval = -EBUSY; 651 goto err_bindx_rem; 652 } 653 654 sa_addr = addr_buf; 655 af = sctp_get_af_specific(sa_addr->sa.sa_family); 656 if (!af) { 657 retval = -EINVAL; 658 goto err_bindx_rem; 659 } 660 661 if (!af->addr_valid(sa_addr, sp, NULL)) { 662 retval = -EADDRNOTAVAIL; 663 goto err_bindx_rem; 664 } 665 666 if (sa_addr->v4.sin_port && 667 sa_addr->v4.sin_port != htons(bp->port)) { 668 retval = -EINVAL; 669 goto err_bindx_rem; 670 } 671 672 if (!sa_addr->v4.sin_port) 673 sa_addr->v4.sin_port = htons(bp->port); 674 675 /* FIXME - There is probably a need to check if sk->sk_saddr and 676 * sk->sk_rcv_addr are currently set to one of the addresses to 677 * be removed. This is something which needs to be looked into 678 * when we are fixing the outstanding issues with multi-homing 679 * socket routing and failover schemes. Refer to comments in 680 * sctp_do_bind(). -daisy 681 */ 682 retval = sctp_del_bind_addr(bp, sa_addr); 683 684 addr_buf += af->sockaddr_len; 685 err_bindx_rem: 686 if (retval < 0) { 687 /* Failed. Add the ones that has been removed back */ 688 if (cnt > 0) 689 sctp_bindx_add(sk, addrs, cnt); 690 return retval; 691 } 692 } 693 694 return retval; 695 } 696 697 /* Send an ASCONF chunk with Delete IP address parameters to all the peers of 698 * the associations that are part of the endpoint indicating that a list of 699 * local addresses are removed from the endpoint. 700 * 701 * If any of the addresses is already in the bind address list of the 702 * association, we do not send the chunk for that association. But it will not 703 * affect other associations. 704 * 705 * Only sctp_setsockopt_bindx() is supposed to call this function. 706 */ 707 static int sctp_send_asconf_del_ip(struct sock *sk, 708 struct sockaddr *addrs, 709 int addrcnt) 710 { 711 struct net *net = sock_net(sk); 712 struct sctp_sock *sp; 713 struct sctp_endpoint *ep; 714 struct sctp_association *asoc; 715 struct sctp_transport *transport; 716 struct sctp_bind_addr *bp; 717 struct sctp_chunk *chunk; 718 union sctp_addr *laddr; 719 void *addr_buf; 720 struct sctp_af *af; 721 struct sctp_sockaddr_entry *saddr; 722 int i; 723 int retval = 0; 724 int stored = 0; 725 726 chunk = NULL; 727 if (!net->sctp.addip_enable) 728 return retval; 729 730 sp = sctp_sk(sk); 731 ep = sp->ep; 732 733 pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n", 734 __func__, sk, addrs, addrcnt); 735 736 list_for_each_entry(asoc, &ep->asocs, asocs) { 737 738 if (!asoc->peer.asconf_capable) 739 continue; 740 741 if (asoc->peer.addip_disabled_mask & SCTP_PARAM_DEL_IP) 742 continue; 743 744 if (!sctp_state(asoc, ESTABLISHED)) 745 continue; 746 747 /* Check if any address in the packed array of addresses is 748 * not present in the bind address list of the association. 749 * If so, do not send the asconf chunk to its peer, but 750 * continue with other associations. 751 */ 752 addr_buf = addrs; 753 for (i = 0; i < addrcnt; i++) { 754 laddr = addr_buf; 755 af = sctp_get_af_specific(laddr->v4.sin_family); 756 if (!af) { 757 retval = -EINVAL; 758 goto out; 759 } 760 761 if (!sctp_assoc_lookup_laddr(asoc, laddr)) 762 break; 763 764 addr_buf += af->sockaddr_len; 765 } 766 if (i < addrcnt) 767 continue; 768 769 /* Find one address in the association's bind address list 770 * that is not in the packed array of addresses. This is to 771 * make sure that we do not delete all the addresses in the 772 * association. 773 */ 774 bp = &asoc->base.bind_addr; 775 laddr = sctp_find_unmatch_addr(bp, (union sctp_addr *)addrs, 776 addrcnt, sp); 777 if ((laddr == NULL) && (addrcnt == 1)) { 778 if (asoc->asconf_addr_del_pending) 779 continue; 780 asoc->asconf_addr_del_pending = 781 kzalloc(sizeof(union sctp_addr), GFP_ATOMIC); 782 if (asoc->asconf_addr_del_pending == NULL) { 783 retval = -ENOMEM; 784 goto out; 785 } 786 asoc->asconf_addr_del_pending->sa.sa_family = 787 addrs->sa_family; 788 asoc->asconf_addr_del_pending->v4.sin_port = 789 htons(bp->port); 790 if (addrs->sa_family == AF_INET) { 791 struct sockaddr_in *sin; 792 793 sin = (struct sockaddr_in *)addrs; 794 asoc->asconf_addr_del_pending->v4.sin_addr.s_addr = sin->sin_addr.s_addr; 795 } else if (addrs->sa_family == AF_INET6) { 796 struct sockaddr_in6 *sin6; 797 798 sin6 = (struct sockaddr_in6 *)addrs; 799 asoc->asconf_addr_del_pending->v6.sin6_addr = sin6->sin6_addr; 800 } 801 802 pr_debug("%s: keep the last address asoc:%p %pISc at %p\n", 803 __func__, asoc, &asoc->asconf_addr_del_pending->sa, 804 asoc->asconf_addr_del_pending); 805 806 asoc->src_out_of_asoc_ok = 1; 807 stored = 1; 808 goto skip_mkasconf; 809 } 810 811 if (laddr == NULL) 812 return -EINVAL; 813 814 /* We do not need RCU protection throughout this loop 815 * because this is done under a socket lock from the 816 * setsockopt call. 817 */ 818 chunk = sctp_make_asconf_update_ip(asoc, laddr, addrs, addrcnt, 819 SCTP_PARAM_DEL_IP); 820 if (!chunk) { 821 retval = -ENOMEM; 822 goto out; 823 } 824 825 skip_mkasconf: 826 /* Reset use_as_src flag for the addresses in the bind address 827 * list that are to be deleted. 828 */ 829 addr_buf = addrs; 830 for (i = 0; i < addrcnt; i++) { 831 laddr = addr_buf; 832 af = sctp_get_af_specific(laddr->v4.sin_family); 833 list_for_each_entry(saddr, &bp->address_list, list) { 834 if (sctp_cmp_addr_exact(&saddr->a, laddr)) 835 saddr->state = SCTP_ADDR_DEL; 836 } 837 addr_buf += af->sockaddr_len; 838 } 839 840 /* Update the route and saddr entries for all the transports 841 * as some of the addresses in the bind address list are 842 * about to be deleted and cannot be used as source addresses. 843 */ 844 list_for_each_entry(transport, &asoc->peer.transport_addr_list, 845 transports) { 846 dst_release(transport->dst); 847 sctp_transport_route(transport, NULL, 848 sctp_sk(asoc->base.sk)); 849 } 850 851 if (stored) 852 /* We don't need to transmit ASCONF */ 853 continue; 854 retval = sctp_send_asconf(asoc, chunk); 855 } 856 out: 857 return retval; 858 } 859 860 /* set addr events to assocs in the endpoint. ep and addr_wq must be locked */ 861 int sctp_asconf_mgmt(struct sctp_sock *sp, struct sctp_sockaddr_entry *addrw) 862 { 863 struct sock *sk = sctp_opt2sk(sp); 864 union sctp_addr *addr; 865 struct sctp_af *af; 866 867 /* It is safe to write port space in caller. */ 868 addr = &addrw->a; 869 addr->v4.sin_port = htons(sp->ep->base.bind_addr.port); 870 af = sctp_get_af_specific(addr->sa.sa_family); 871 if (!af) 872 return -EINVAL; 873 if (sctp_verify_addr(sk, addr, af->sockaddr_len)) 874 return -EINVAL; 875 876 if (addrw->state == SCTP_ADDR_NEW) 877 return sctp_send_asconf_add_ip(sk, (struct sockaddr *)addr, 1); 878 else 879 return sctp_send_asconf_del_ip(sk, (struct sockaddr *)addr, 1); 880 } 881 882 /* Helper for tunneling sctp_bindx() requests through sctp_setsockopt() 883 * 884 * API 8.1 885 * int sctp_bindx(int sd, struct sockaddr *addrs, int addrcnt, 886 * int flags); 887 * 888 * If sd is an IPv4 socket, the addresses passed must be IPv4 addresses. 889 * If the sd is an IPv6 socket, the addresses passed can either be IPv4 890 * or IPv6 addresses. 891 * 892 * A single address may be specified as INADDR_ANY or IN6ADDR_ANY, see 893 * Section 3.1.2 for this usage. 894 * 895 * addrs is a pointer to an array of one or more socket addresses. Each 896 * address is contained in its appropriate structure (i.e. struct 897 * sockaddr_in or struct sockaddr_in6) the family of the address type 898 * must be used to distinguish the address length (note that this 899 * representation is termed a "packed array" of addresses). The caller 900 * specifies the number of addresses in the array with addrcnt. 901 * 902 * On success, sctp_bindx() returns 0. On failure, sctp_bindx() returns 903 * -1, and sets errno to the appropriate error code. 904 * 905 * For SCTP, the port given in each socket address must be the same, or 906 * sctp_bindx() will fail, setting errno to EINVAL. 907 * 908 * The flags parameter is formed from the bitwise OR of zero or more of 909 * the following currently defined flags: 910 * 911 * SCTP_BINDX_ADD_ADDR 912 * 913 * SCTP_BINDX_REM_ADDR 914 * 915 * SCTP_BINDX_ADD_ADDR directs SCTP to add the given addresses to the 916 * association, and SCTP_BINDX_REM_ADDR directs SCTP to remove the given 917 * addresses from the association. The two flags are mutually exclusive; 918 * if both are given, sctp_bindx() will fail with EINVAL. A caller may 919 * not remove all addresses from an association; sctp_bindx() will 920 * reject such an attempt with EINVAL. 921 * 922 * An application can use sctp_bindx(SCTP_BINDX_ADD_ADDR) to associate 923 * additional addresses with an endpoint after calling bind(). Or use 924 * sctp_bindx(SCTP_BINDX_REM_ADDR) to remove some addresses a listening 925 * socket is associated with so that no new association accepted will be 926 * associated with those addresses. If the endpoint supports dynamic 927 * address a SCTP_BINDX_REM_ADDR or SCTP_BINDX_ADD_ADDR may cause a 928 * endpoint to send the appropriate message to the peer to change the 929 * peers address lists. 930 * 931 * Adding and removing addresses from a connected association is 932 * optional functionality. Implementations that do not support this 933 * functionality should return EOPNOTSUPP. 934 * 935 * Basically do nothing but copying the addresses from user to kernel 936 * land and invoking either sctp_bindx_add() or sctp_bindx_rem() on the sk. 937 * This is used for tunneling the sctp_bindx() request through sctp_setsockopt() 938 * from userspace. 939 * 940 * We don't use copy_from_user() for optimization: we first do the 941 * sanity checks (buffer size -fast- and access check-healthy 942 * pointer); if all of those succeed, then we can alloc the memory 943 * (expensive operation) needed to copy the data to kernel. Then we do 944 * the copying without checking the user space area 945 * (__copy_from_user()). 946 * 947 * On exit there is no need to do sockfd_put(), sys_setsockopt() does 948 * it. 949 * 950 * sk The sk of the socket 951 * addrs The pointer to the addresses in user land 952 * addrssize Size of the addrs buffer 953 * op Operation to perform (add or remove, see the flags of 954 * sctp_bindx) 955 * 956 * Returns 0 if ok, <0 errno code on error. 957 */ 958 static int sctp_setsockopt_bindx(struct sock *sk, 959 struct sockaddr __user *addrs, 960 int addrs_size, int op) 961 { 962 struct sockaddr *kaddrs; 963 int err; 964 int addrcnt = 0; 965 int walk_size = 0; 966 struct sockaddr *sa_addr; 967 void *addr_buf; 968 struct sctp_af *af; 969 970 pr_debug("%s: sk:%p addrs:%p addrs_size:%d opt:%d\n", 971 __func__, sk, addrs, addrs_size, op); 972 973 if (unlikely(addrs_size <= 0)) 974 return -EINVAL; 975 976 /* Check the user passed a healthy pointer. */ 977 if (unlikely(!access_ok(VERIFY_READ, addrs, addrs_size))) 978 return -EFAULT; 979 980 /* Alloc space for the address array in kernel memory. */ 981 kaddrs = kmalloc(addrs_size, GFP_USER | __GFP_NOWARN); 982 if (unlikely(!kaddrs)) 983 return -ENOMEM; 984 985 if (__copy_from_user(kaddrs, addrs, addrs_size)) { 986 kfree(kaddrs); 987 return -EFAULT; 988 } 989 990 /* Walk through the addrs buffer and count the number of addresses. */ 991 addr_buf = kaddrs; 992 while (walk_size < addrs_size) { 993 if (walk_size + sizeof(sa_family_t) > addrs_size) { 994 kfree(kaddrs); 995 return -EINVAL; 996 } 997 998 sa_addr = addr_buf; 999 af = sctp_get_af_specific(sa_addr->sa_family); 1000 1001 /* If the address family is not supported or if this address 1002 * causes the address buffer to overflow return EINVAL. 1003 */ 1004 if (!af || (walk_size + af->sockaddr_len) > addrs_size) { 1005 kfree(kaddrs); 1006 return -EINVAL; 1007 } 1008 addrcnt++; 1009 addr_buf += af->sockaddr_len; 1010 walk_size += af->sockaddr_len; 1011 } 1012 1013 /* Do the work. */ 1014 switch (op) { 1015 case SCTP_BINDX_ADD_ADDR: 1016 err = sctp_bindx_add(sk, kaddrs, addrcnt); 1017 if (err) 1018 goto out; 1019 err = sctp_send_asconf_add_ip(sk, kaddrs, addrcnt); 1020 break; 1021 1022 case SCTP_BINDX_REM_ADDR: 1023 err = sctp_bindx_rem(sk, kaddrs, addrcnt); 1024 if (err) 1025 goto out; 1026 err = sctp_send_asconf_del_ip(sk, kaddrs, addrcnt); 1027 break; 1028 1029 default: 1030 err = -EINVAL; 1031 break; 1032 } 1033 1034 out: 1035 kfree(kaddrs); 1036 1037 return err; 1038 } 1039 1040 /* __sctp_connect(struct sock* sk, struct sockaddr *kaddrs, int addrs_size) 1041 * 1042 * Common routine for handling connect() and sctp_connectx(). 1043 * Connect will come in with just a single address. 1044 */ 1045 static int __sctp_connect(struct sock *sk, 1046 struct sockaddr *kaddrs, 1047 int addrs_size, 1048 sctp_assoc_t *assoc_id) 1049 { 1050 struct net *net = sock_net(sk); 1051 struct sctp_sock *sp; 1052 struct sctp_endpoint *ep; 1053 struct sctp_association *asoc = NULL; 1054 struct sctp_association *asoc2; 1055 struct sctp_transport *transport; 1056 union sctp_addr to; 1057 sctp_scope_t scope; 1058 long timeo; 1059 int err = 0; 1060 int addrcnt = 0; 1061 int walk_size = 0; 1062 union sctp_addr *sa_addr = NULL; 1063 void *addr_buf; 1064 unsigned short port; 1065 unsigned int f_flags = 0; 1066 1067 sp = sctp_sk(sk); 1068 ep = sp->ep; 1069 1070 /* connect() cannot be done on a socket that is already in ESTABLISHED 1071 * state - UDP-style peeled off socket or a TCP-style socket that 1072 * is already connected. 1073 * It cannot be done even on a TCP-style listening socket. 1074 */ 1075 if (sctp_sstate(sk, ESTABLISHED) || sctp_sstate(sk, CLOSING) || 1076 (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))) { 1077 err = -EISCONN; 1078 goto out_free; 1079 } 1080 1081 /* Walk through the addrs buffer and count the number of addresses. */ 1082 addr_buf = kaddrs; 1083 while (walk_size < addrs_size) { 1084 struct sctp_af *af; 1085 1086 if (walk_size + sizeof(sa_family_t) > addrs_size) { 1087 err = -EINVAL; 1088 goto out_free; 1089 } 1090 1091 sa_addr = addr_buf; 1092 af = sctp_get_af_specific(sa_addr->sa.sa_family); 1093 1094 /* If the address family is not supported or if this address 1095 * causes the address buffer to overflow return EINVAL. 1096 */ 1097 if (!af || (walk_size + af->sockaddr_len) > addrs_size) { 1098 err = -EINVAL; 1099 goto out_free; 1100 } 1101 1102 port = ntohs(sa_addr->v4.sin_port); 1103 1104 /* Save current address so we can work with it */ 1105 memcpy(&to, sa_addr, af->sockaddr_len); 1106 1107 err = sctp_verify_addr(sk, &to, af->sockaddr_len); 1108 if (err) 1109 goto out_free; 1110 1111 /* Make sure the destination port is correctly set 1112 * in all addresses. 1113 */ 1114 if (asoc && asoc->peer.port && asoc->peer.port != port) { 1115 err = -EINVAL; 1116 goto out_free; 1117 } 1118 1119 /* Check if there already is a matching association on the 1120 * endpoint (other than the one created here). 1121 */ 1122 asoc2 = sctp_endpoint_lookup_assoc(ep, &to, &transport); 1123 if (asoc2 && asoc2 != asoc) { 1124 if (asoc2->state >= SCTP_STATE_ESTABLISHED) 1125 err = -EISCONN; 1126 else 1127 err = -EALREADY; 1128 goto out_free; 1129 } 1130 1131 /* If we could not find a matching association on the endpoint, 1132 * make sure that there is no peeled-off association matching 1133 * the peer address even on another socket. 1134 */ 1135 if (sctp_endpoint_is_peeled_off(ep, &to)) { 1136 err = -EADDRNOTAVAIL; 1137 goto out_free; 1138 } 1139 1140 if (!asoc) { 1141 /* If a bind() or sctp_bindx() is not called prior to 1142 * an sctp_connectx() call, the system picks an 1143 * ephemeral port and will choose an address set 1144 * equivalent to binding with a wildcard address. 1145 */ 1146 if (!ep->base.bind_addr.port) { 1147 if (sctp_autobind(sk)) { 1148 err = -EAGAIN; 1149 goto out_free; 1150 } 1151 } else { 1152 /* 1153 * If an unprivileged user inherits a 1-many 1154 * style socket with open associations on a 1155 * privileged port, it MAY be permitted to 1156 * accept new associations, but it SHOULD NOT 1157 * be permitted to open new associations. 1158 */ 1159 if (ep->base.bind_addr.port < PROT_SOCK && 1160 !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) { 1161 err = -EACCES; 1162 goto out_free; 1163 } 1164 } 1165 1166 scope = sctp_scope(&to); 1167 asoc = sctp_association_new(ep, sk, scope, GFP_KERNEL); 1168 if (!asoc) { 1169 err = -ENOMEM; 1170 goto out_free; 1171 } 1172 1173 err = sctp_assoc_set_bind_addr_from_ep(asoc, scope, 1174 GFP_KERNEL); 1175 if (err < 0) { 1176 goto out_free; 1177 } 1178 1179 } 1180 1181 /* Prime the peer's transport structures. */ 1182 transport = sctp_assoc_add_peer(asoc, &to, GFP_KERNEL, 1183 SCTP_UNKNOWN); 1184 if (!transport) { 1185 err = -ENOMEM; 1186 goto out_free; 1187 } 1188 1189 addrcnt++; 1190 addr_buf += af->sockaddr_len; 1191 walk_size += af->sockaddr_len; 1192 } 1193 1194 /* In case the user of sctp_connectx() wants an association 1195 * id back, assign one now. 1196 */ 1197 if (assoc_id) { 1198 err = sctp_assoc_set_id(asoc, GFP_KERNEL); 1199 if (err < 0) 1200 goto out_free; 1201 } 1202 1203 err = sctp_primitive_ASSOCIATE(net, asoc, NULL); 1204 if (err < 0) { 1205 goto out_free; 1206 } 1207 1208 /* Initialize sk's dport and daddr for getpeername() */ 1209 inet_sk(sk)->inet_dport = htons(asoc->peer.port); 1210 sp->pf->to_sk_daddr(sa_addr, sk); 1211 sk->sk_err = 0; 1212 1213 /* in-kernel sockets don't generally have a file allocated to them 1214 * if all they do is call sock_create_kern(). 1215 */ 1216 if (sk->sk_socket->file) 1217 f_flags = sk->sk_socket->file->f_flags; 1218 1219 timeo = sock_sndtimeo(sk, f_flags & O_NONBLOCK); 1220 1221 if (assoc_id) 1222 *assoc_id = asoc->assoc_id; 1223 err = sctp_wait_for_connect(asoc, &timeo); 1224 /* Note: the asoc may be freed after the return of 1225 * sctp_wait_for_connect. 1226 */ 1227 1228 /* Don't free association on exit. */ 1229 asoc = NULL; 1230 1231 out_free: 1232 pr_debug("%s: took out_free path with asoc:%p kaddrs:%p err:%d\n", 1233 __func__, asoc, kaddrs, err); 1234 1235 if (asoc) { 1236 /* sctp_primitive_ASSOCIATE may have added this association 1237 * To the hash table, try to unhash it, just in case, its a noop 1238 * if it wasn't hashed so we're safe 1239 */ 1240 sctp_association_free(asoc); 1241 } 1242 return err; 1243 } 1244 1245 /* Helper for tunneling sctp_connectx() requests through sctp_setsockopt() 1246 * 1247 * API 8.9 1248 * int sctp_connectx(int sd, struct sockaddr *addrs, int addrcnt, 1249 * sctp_assoc_t *asoc); 1250 * 1251 * If sd is an IPv4 socket, the addresses passed must be IPv4 addresses. 1252 * If the sd is an IPv6 socket, the addresses passed can either be IPv4 1253 * or IPv6 addresses. 1254 * 1255 * A single address may be specified as INADDR_ANY or IN6ADDR_ANY, see 1256 * Section 3.1.2 for this usage. 1257 * 1258 * addrs is a pointer to an array of one or more socket addresses. Each 1259 * address is contained in its appropriate structure (i.e. struct 1260 * sockaddr_in or struct sockaddr_in6) the family of the address type 1261 * must be used to distengish the address length (note that this 1262 * representation is termed a "packed array" of addresses). The caller 1263 * specifies the number of addresses in the array with addrcnt. 1264 * 1265 * On success, sctp_connectx() returns 0. It also sets the assoc_id to 1266 * the association id of the new association. On failure, sctp_connectx() 1267 * returns -1, and sets errno to the appropriate error code. The assoc_id 1268 * is not touched by the kernel. 1269 * 1270 * For SCTP, the port given in each socket address must be the same, or 1271 * sctp_connectx() will fail, setting errno to EINVAL. 1272 * 1273 * An application can use sctp_connectx to initiate an association with 1274 * an endpoint that is multi-homed. Much like sctp_bindx() this call 1275 * allows a caller to specify multiple addresses at which a peer can be 1276 * reached. The way the SCTP stack uses the list of addresses to set up 1277 * the association is implementation dependent. This function only 1278 * specifies that the stack will try to make use of all the addresses in 1279 * the list when needed. 1280 * 1281 * Note that the list of addresses passed in is only used for setting up 1282 * the association. It does not necessarily equal the set of addresses 1283 * the peer uses for the resulting association. If the caller wants to 1284 * find out the set of peer addresses, it must use sctp_getpaddrs() to 1285 * retrieve them after the association has been set up. 1286 * 1287 * Basically do nothing but copying the addresses from user to kernel 1288 * land and invoking either sctp_connectx(). This is used for tunneling 1289 * the sctp_connectx() request through sctp_setsockopt() from userspace. 1290 * 1291 * We don't use copy_from_user() for optimization: we first do the 1292 * sanity checks (buffer size -fast- and access check-healthy 1293 * pointer); if all of those succeed, then we can alloc the memory 1294 * (expensive operation) needed to copy the data to kernel. Then we do 1295 * the copying without checking the user space area 1296 * (__copy_from_user()). 1297 * 1298 * On exit there is no need to do sockfd_put(), sys_setsockopt() does 1299 * it. 1300 * 1301 * sk The sk of the socket 1302 * addrs The pointer to the addresses in user land 1303 * addrssize Size of the addrs buffer 1304 * 1305 * Returns >=0 if ok, <0 errno code on error. 1306 */ 1307 static int __sctp_setsockopt_connectx(struct sock *sk, 1308 struct sockaddr __user *addrs, 1309 int addrs_size, 1310 sctp_assoc_t *assoc_id) 1311 { 1312 struct sockaddr *kaddrs; 1313 gfp_t gfp = GFP_KERNEL; 1314 int err = 0; 1315 1316 pr_debug("%s: sk:%p addrs:%p addrs_size:%d\n", 1317 __func__, sk, addrs, addrs_size); 1318 1319 if (unlikely(addrs_size <= 0)) 1320 return -EINVAL; 1321 1322 /* Check the user passed a healthy pointer. */ 1323 if (unlikely(!access_ok(VERIFY_READ, addrs, addrs_size))) 1324 return -EFAULT; 1325 1326 /* Alloc space for the address array in kernel memory. */ 1327 if (sk->sk_socket->file) 1328 gfp = GFP_USER | __GFP_NOWARN; 1329 kaddrs = kmalloc(addrs_size, gfp); 1330 if (unlikely(!kaddrs)) 1331 return -ENOMEM; 1332 1333 if (__copy_from_user(kaddrs, addrs, addrs_size)) { 1334 err = -EFAULT; 1335 } else { 1336 err = __sctp_connect(sk, kaddrs, addrs_size, assoc_id); 1337 } 1338 1339 kfree(kaddrs); 1340 1341 return err; 1342 } 1343 1344 /* 1345 * This is an older interface. It's kept for backward compatibility 1346 * to the option that doesn't provide association id. 1347 */ 1348 static int sctp_setsockopt_connectx_old(struct sock *sk, 1349 struct sockaddr __user *addrs, 1350 int addrs_size) 1351 { 1352 return __sctp_setsockopt_connectx(sk, addrs, addrs_size, NULL); 1353 } 1354 1355 /* 1356 * New interface for the API. The since the API is done with a socket 1357 * option, to make it simple we feed back the association id is as a return 1358 * indication to the call. Error is always negative and association id is 1359 * always positive. 1360 */ 1361 static int sctp_setsockopt_connectx(struct sock *sk, 1362 struct sockaddr __user *addrs, 1363 int addrs_size) 1364 { 1365 sctp_assoc_t assoc_id = 0; 1366 int err = 0; 1367 1368 err = __sctp_setsockopt_connectx(sk, addrs, addrs_size, &assoc_id); 1369 1370 if (err) 1371 return err; 1372 else 1373 return assoc_id; 1374 } 1375 1376 /* 1377 * New (hopefully final) interface for the API. 1378 * We use the sctp_getaddrs_old structure so that use-space library 1379 * can avoid any unnecessary allocations. The only different part 1380 * is that we store the actual length of the address buffer into the 1381 * addrs_num structure member. That way we can re-use the existing 1382 * code. 1383 */ 1384 #ifdef CONFIG_COMPAT 1385 struct compat_sctp_getaddrs_old { 1386 sctp_assoc_t assoc_id; 1387 s32 addr_num; 1388 compat_uptr_t addrs; /* struct sockaddr * */ 1389 }; 1390 #endif 1391 1392 static int sctp_getsockopt_connectx3(struct sock *sk, int len, 1393 char __user *optval, 1394 int __user *optlen) 1395 { 1396 struct sctp_getaddrs_old param; 1397 sctp_assoc_t assoc_id = 0; 1398 int err = 0; 1399 1400 #ifdef CONFIG_COMPAT 1401 if (in_compat_syscall()) { 1402 struct compat_sctp_getaddrs_old param32; 1403 1404 if (len < sizeof(param32)) 1405 return -EINVAL; 1406 if (copy_from_user(¶m32, optval, sizeof(param32))) 1407 return -EFAULT; 1408 1409 param.assoc_id = param32.assoc_id; 1410 param.addr_num = param32.addr_num; 1411 param.addrs = compat_ptr(param32.addrs); 1412 } else 1413 #endif 1414 { 1415 if (len < sizeof(param)) 1416 return -EINVAL; 1417 if (copy_from_user(¶m, optval, sizeof(param))) 1418 return -EFAULT; 1419 } 1420 1421 err = __sctp_setsockopt_connectx(sk, (struct sockaddr __user *) 1422 param.addrs, param.addr_num, 1423 &assoc_id); 1424 if (err == 0 || err == -EINPROGRESS) { 1425 if (copy_to_user(optval, &assoc_id, sizeof(assoc_id))) 1426 return -EFAULT; 1427 if (put_user(sizeof(assoc_id), optlen)) 1428 return -EFAULT; 1429 } 1430 1431 return err; 1432 } 1433 1434 /* API 3.1.4 close() - UDP Style Syntax 1435 * Applications use close() to perform graceful shutdown (as described in 1436 * Section 10.1 of [SCTP]) on ALL the associations currently represented 1437 * by a UDP-style socket. 1438 * 1439 * The syntax is 1440 * 1441 * ret = close(int sd); 1442 * 1443 * sd - the socket descriptor of the associations to be closed. 1444 * 1445 * To gracefully shutdown a specific association represented by the 1446 * UDP-style socket, an application should use the sendmsg() call, 1447 * passing no user data, but including the appropriate flag in the 1448 * ancillary data (see Section xxxx). 1449 * 1450 * If sd in the close() call is a branched-off socket representing only 1451 * one association, the shutdown is performed on that association only. 1452 * 1453 * 4.1.6 close() - TCP Style Syntax 1454 * 1455 * Applications use close() to gracefully close down an association. 1456 * 1457 * The syntax is: 1458 * 1459 * int close(int sd); 1460 * 1461 * sd - the socket descriptor of the association to be closed. 1462 * 1463 * After an application calls close() on a socket descriptor, no further 1464 * socket operations will succeed on that descriptor. 1465 * 1466 * API 7.1.4 SO_LINGER 1467 * 1468 * An application using the TCP-style socket can use this option to 1469 * perform the SCTP ABORT primitive. The linger option structure is: 1470 * 1471 * struct linger { 1472 * int l_onoff; // option on/off 1473 * int l_linger; // linger time 1474 * }; 1475 * 1476 * To enable the option, set l_onoff to 1. If the l_linger value is set 1477 * to 0, calling close() is the same as the ABORT primitive. If the 1478 * value is set to a negative value, the setsockopt() call will return 1479 * an error. If the value is set to a positive value linger_time, the 1480 * close() can be blocked for at most linger_time ms. If the graceful 1481 * shutdown phase does not finish during this period, close() will 1482 * return but the graceful shutdown phase continues in the system. 1483 */ 1484 static void sctp_close(struct sock *sk, long timeout) 1485 { 1486 struct net *net = sock_net(sk); 1487 struct sctp_endpoint *ep; 1488 struct sctp_association *asoc; 1489 struct list_head *pos, *temp; 1490 unsigned int data_was_unread; 1491 1492 pr_debug("%s: sk:%p, timeout:%ld\n", __func__, sk, timeout); 1493 1494 lock_sock(sk); 1495 sk->sk_shutdown = SHUTDOWN_MASK; 1496 sk->sk_state = SCTP_SS_CLOSING; 1497 1498 ep = sctp_sk(sk)->ep; 1499 1500 /* Clean up any skbs sitting on the receive queue. */ 1501 data_was_unread = sctp_queue_purge_ulpevents(&sk->sk_receive_queue); 1502 data_was_unread += sctp_queue_purge_ulpevents(&sctp_sk(sk)->pd_lobby); 1503 1504 /* Walk all associations on an endpoint. */ 1505 list_for_each_safe(pos, temp, &ep->asocs) { 1506 asoc = list_entry(pos, struct sctp_association, asocs); 1507 1508 if (sctp_style(sk, TCP)) { 1509 /* A closed association can still be in the list if 1510 * it belongs to a TCP-style listening socket that is 1511 * not yet accepted. If so, free it. If not, send an 1512 * ABORT or SHUTDOWN based on the linger options. 1513 */ 1514 if (sctp_state(asoc, CLOSED)) { 1515 sctp_association_free(asoc); 1516 continue; 1517 } 1518 } 1519 1520 if (data_was_unread || !skb_queue_empty(&asoc->ulpq.lobby) || 1521 !skb_queue_empty(&asoc->ulpq.reasm) || 1522 (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime)) { 1523 struct sctp_chunk *chunk; 1524 1525 chunk = sctp_make_abort_user(asoc, NULL, 0); 1526 sctp_primitive_ABORT(net, asoc, chunk); 1527 } else 1528 sctp_primitive_SHUTDOWN(net, asoc, NULL); 1529 } 1530 1531 /* On a TCP-style socket, block for at most linger_time if set. */ 1532 if (sctp_style(sk, TCP) && timeout) 1533 sctp_wait_for_close(sk, timeout); 1534 1535 /* This will run the backlog queue. */ 1536 release_sock(sk); 1537 1538 /* Supposedly, no process has access to the socket, but 1539 * the net layers still may. 1540 * Also, sctp_destroy_sock() needs to be called with addr_wq_lock 1541 * held and that should be grabbed before socket lock. 1542 */ 1543 spin_lock_bh(&net->sctp.addr_wq_lock); 1544 bh_lock_sock(sk); 1545 1546 /* Hold the sock, since sk_common_release() will put sock_put() 1547 * and we have just a little more cleanup. 1548 */ 1549 sock_hold(sk); 1550 sk_common_release(sk); 1551 1552 bh_unlock_sock(sk); 1553 spin_unlock_bh(&net->sctp.addr_wq_lock); 1554 1555 sock_put(sk); 1556 1557 SCTP_DBG_OBJCNT_DEC(sock); 1558 } 1559 1560 /* Handle EPIPE error. */ 1561 static int sctp_error(struct sock *sk, int flags, int err) 1562 { 1563 if (err == -EPIPE) 1564 err = sock_error(sk) ? : -EPIPE; 1565 if (err == -EPIPE && !(flags & MSG_NOSIGNAL)) 1566 send_sig(SIGPIPE, current, 0); 1567 return err; 1568 } 1569 1570 /* API 3.1.3 sendmsg() - UDP Style Syntax 1571 * 1572 * An application uses sendmsg() and recvmsg() calls to transmit data to 1573 * and receive data from its peer. 1574 * 1575 * ssize_t sendmsg(int socket, const struct msghdr *message, 1576 * int flags); 1577 * 1578 * socket - the socket descriptor of the endpoint. 1579 * message - pointer to the msghdr structure which contains a single 1580 * user message and possibly some ancillary data. 1581 * 1582 * See Section 5 for complete description of the data 1583 * structures. 1584 * 1585 * flags - flags sent or received with the user message, see Section 1586 * 5 for complete description of the flags. 1587 * 1588 * Note: This function could use a rewrite especially when explicit 1589 * connect support comes in. 1590 */ 1591 /* BUG: We do not implement the equivalent of sk_stream_wait_memory(). */ 1592 1593 static int sctp_msghdr_parse(const struct msghdr *, sctp_cmsgs_t *); 1594 1595 static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len) 1596 { 1597 struct net *net = sock_net(sk); 1598 struct sctp_sock *sp; 1599 struct sctp_endpoint *ep; 1600 struct sctp_association *new_asoc = NULL, *asoc = NULL; 1601 struct sctp_transport *transport, *chunk_tp; 1602 struct sctp_chunk *chunk; 1603 union sctp_addr to; 1604 struct sockaddr *msg_name = NULL; 1605 struct sctp_sndrcvinfo default_sinfo; 1606 struct sctp_sndrcvinfo *sinfo; 1607 struct sctp_initmsg *sinit; 1608 sctp_assoc_t associd = 0; 1609 sctp_cmsgs_t cmsgs = { NULL }; 1610 sctp_scope_t scope; 1611 bool fill_sinfo_ttl = false, wait_connect = false; 1612 struct sctp_datamsg *datamsg; 1613 int msg_flags = msg->msg_flags; 1614 __u16 sinfo_flags = 0; 1615 long timeo; 1616 int err; 1617 1618 err = 0; 1619 sp = sctp_sk(sk); 1620 ep = sp->ep; 1621 1622 pr_debug("%s: sk:%p, msg:%p, msg_len:%zu ep:%p\n", __func__, sk, 1623 msg, msg_len, ep); 1624 1625 /* We cannot send a message over a TCP-style listening socket. */ 1626 if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)) { 1627 err = -EPIPE; 1628 goto out_nounlock; 1629 } 1630 1631 /* Parse out the SCTP CMSGs. */ 1632 err = sctp_msghdr_parse(msg, &cmsgs); 1633 if (err) { 1634 pr_debug("%s: msghdr parse err:%x\n", __func__, err); 1635 goto out_nounlock; 1636 } 1637 1638 /* Fetch the destination address for this packet. This 1639 * address only selects the association--it is not necessarily 1640 * the address we will send to. 1641 * For a peeled-off socket, msg_name is ignored. 1642 */ 1643 if (!sctp_style(sk, UDP_HIGH_BANDWIDTH) && msg->msg_name) { 1644 int msg_namelen = msg->msg_namelen; 1645 1646 err = sctp_verify_addr(sk, (union sctp_addr *)msg->msg_name, 1647 msg_namelen); 1648 if (err) 1649 return err; 1650 1651 if (msg_namelen > sizeof(to)) 1652 msg_namelen = sizeof(to); 1653 memcpy(&to, msg->msg_name, msg_namelen); 1654 msg_name = msg->msg_name; 1655 } 1656 1657 sinit = cmsgs.init; 1658 if (cmsgs.sinfo != NULL) { 1659 memset(&default_sinfo, 0, sizeof(default_sinfo)); 1660 default_sinfo.sinfo_stream = cmsgs.sinfo->snd_sid; 1661 default_sinfo.sinfo_flags = cmsgs.sinfo->snd_flags; 1662 default_sinfo.sinfo_ppid = cmsgs.sinfo->snd_ppid; 1663 default_sinfo.sinfo_context = cmsgs.sinfo->snd_context; 1664 default_sinfo.sinfo_assoc_id = cmsgs.sinfo->snd_assoc_id; 1665 1666 sinfo = &default_sinfo; 1667 fill_sinfo_ttl = true; 1668 } else { 1669 sinfo = cmsgs.srinfo; 1670 } 1671 /* Did the user specify SNDINFO/SNDRCVINFO? */ 1672 if (sinfo) { 1673 sinfo_flags = sinfo->sinfo_flags; 1674 associd = sinfo->sinfo_assoc_id; 1675 } 1676 1677 pr_debug("%s: msg_len:%zu, sinfo_flags:0x%x\n", __func__, 1678 msg_len, sinfo_flags); 1679 1680 /* SCTP_EOF or SCTP_ABORT cannot be set on a TCP-style socket. */ 1681 if (sctp_style(sk, TCP) && (sinfo_flags & (SCTP_EOF | SCTP_ABORT))) { 1682 err = -EINVAL; 1683 goto out_nounlock; 1684 } 1685 1686 /* If SCTP_EOF is set, no data can be sent. Disallow sending zero 1687 * length messages when SCTP_EOF|SCTP_ABORT is not set. 1688 * If SCTP_ABORT is set, the message length could be non zero with 1689 * the msg_iov set to the user abort reason. 1690 */ 1691 if (((sinfo_flags & SCTP_EOF) && (msg_len > 0)) || 1692 (!(sinfo_flags & (SCTP_EOF|SCTP_ABORT)) && (msg_len == 0))) { 1693 err = -EINVAL; 1694 goto out_nounlock; 1695 } 1696 1697 /* If SCTP_ADDR_OVER is set, there must be an address 1698 * specified in msg_name. 1699 */ 1700 if ((sinfo_flags & SCTP_ADDR_OVER) && (!msg->msg_name)) { 1701 err = -EINVAL; 1702 goto out_nounlock; 1703 } 1704 1705 transport = NULL; 1706 1707 pr_debug("%s: about to look up association\n", __func__); 1708 1709 lock_sock(sk); 1710 1711 /* If a msg_name has been specified, assume this is to be used. */ 1712 if (msg_name) { 1713 /* Look for a matching association on the endpoint. */ 1714 asoc = sctp_endpoint_lookup_assoc(ep, &to, &transport); 1715 1716 /* If we could not find a matching association on the 1717 * endpoint, make sure that it is not a TCP-style 1718 * socket that already has an association or there is 1719 * no peeled-off association on another socket. 1720 */ 1721 if (!asoc && 1722 ((sctp_style(sk, TCP) && 1723 (sctp_sstate(sk, ESTABLISHED) || 1724 sctp_sstate(sk, CLOSING))) || 1725 sctp_endpoint_is_peeled_off(ep, &to))) { 1726 err = -EADDRNOTAVAIL; 1727 goto out_unlock; 1728 } 1729 } else { 1730 asoc = sctp_id2assoc(sk, associd); 1731 if (!asoc) { 1732 err = -EPIPE; 1733 goto out_unlock; 1734 } 1735 } 1736 1737 if (asoc) { 1738 pr_debug("%s: just looked up association:%p\n", __func__, asoc); 1739 1740 /* We cannot send a message on a TCP-style SCTP_SS_ESTABLISHED 1741 * socket that has an association in CLOSED state. This can 1742 * happen when an accepted socket has an association that is 1743 * already CLOSED. 1744 */ 1745 if (sctp_state(asoc, CLOSED) && sctp_style(sk, TCP)) { 1746 err = -EPIPE; 1747 goto out_unlock; 1748 } 1749 1750 if (sinfo_flags & SCTP_EOF) { 1751 pr_debug("%s: shutting down association:%p\n", 1752 __func__, asoc); 1753 1754 sctp_primitive_SHUTDOWN(net, asoc, NULL); 1755 err = 0; 1756 goto out_unlock; 1757 } 1758 if (sinfo_flags & SCTP_ABORT) { 1759 1760 chunk = sctp_make_abort_user(asoc, msg, msg_len); 1761 if (!chunk) { 1762 err = -ENOMEM; 1763 goto out_unlock; 1764 } 1765 1766 pr_debug("%s: aborting association:%p\n", 1767 __func__, asoc); 1768 1769 sctp_primitive_ABORT(net, asoc, chunk); 1770 err = 0; 1771 goto out_unlock; 1772 } 1773 } 1774 1775 /* Do we need to create the association? */ 1776 if (!asoc) { 1777 pr_debug("%s: there is no association yet\n", __func__); 1778 1779 if (sinfo_flags & (SCTP_EOF | SCTP_ABORT)) { 1780 err = -EINVAL; 1781 goto out_unlock; 1782 } 1783 1784 /* Check for invalid stream against the stream counts, 1785 * either the default or the user specified stream counts. 1786 */ 1787 if (sinfo) { 1788 if (!sinit || !sinit->sinit_num_ostreams) { 1789 /* Check against the defaults. */ 1790 if (sinfo->sinfo_stream >= 1791 sp->initmsg.sinit_num_ostreams) { 1792 err = -EINVAL; 1793 goto out_unlock; 1794 } 1795 } else { 1796 /* Check against the requested. */ 1797 if (sinfo->sinfo_stream >= 1798 sinit->sinit_num_ostreams) { 1799 err = -EINVAL; 1800 goto out_unlock; 1801 } 1802 } 1803 } 1804 1805 /* 1806 * API 3.1.2 bind() - UDP Style Syntax 1807 * If a bind() or sctp_bindx() is not called prior to a 1808 * sendmsg() call that initiates a new association, the 1809 * system picks an ephemeral port and will choose an address 1810 * set equivalent to binding with a wildcard address. 1811 */ 1812 if (!ep->base.bind_addr.port) { 1813 if (sctp_autobind(sk)) { 1814 err = -EAGAIN; 1815 goto out_unlock; 1816 } 1817 } else { 1818 /* 1819 * If an unprivileged user inherits a one-to-many 1820 * style socket with open associations on a privileged 1821 * port, it MAY be permitted to accept new associations, 1822 * but it SHOULD NOT be permitted to open new 1823 * associations. 1824 */ 1825 if (ep->base.bind_addr.port < PROT_SOCK && 1826 !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) { 1827 err = -EACCES; 1828 goto out_unlock; 1829 } 1830 } 1831 1832 scope = sctp_scope(&to); 1833 new_asoc = sctp_association_new(ep, sk, scope, GFP_KERNEL); 1834 if (!new_asoc) { 1835 err = -ENOMEM; 1836 goto out_unlock; 1837 } 1838 asoc = new_asoc; 1839 err = sctp_assoc_set_bind_addr_from_ep(asoc, scope, GFP_KERNEL); 1840 if (err < 0) { 1841 err = -ENOMEM; 1842 goto out_free; 1843 } 1844 1845 /* If the SCTP_INIT ancillary data is specified, set all 1846 * the association init values accordingly. 1847 */ 1848 if (sinit) { 1849 if (sinit->sinit_num_ostreams) { 1850 asoc->c.sinit_num_ostreams = 1851 sinit->sinit_num_ostreams; 1852 } 1853 if (sinit->sinit_max_instreams) { 1854 asoc->c.sinit_max_instreams = 1855 sinit->sinit_max_instreams; 1856 } 1857 if (sinit->sinit_max_attempts) { 1858 asoc->max_init_attempts 1859 = sinit->sinit_max_attempts; 1860 } 1861 if (sinit->sinit_max_init_timeo) { 1862 asoc->max_init_timeo = 1863 msecs_to_jiffies(sinit->sinit_max_init_timeo); 1864 } 1865 } 1866 1867 /* Prime the peer's transport structures. */ 1868 transport = sctp_assoc_add_peer(asoc, &to, GFP_KERNEL, SCTP_UNKNOWN); 1869 if (!transport) { 1870 err = -ENOMEM; 1871 goto out_free; 1872 } 1873 } 1874 1875 /* ASSERT: we have a valid association at this point. */ 1876 pr_debug("%s: we have a valid association\n", __func__); 1877 1878 if (!sinfo) { 1879 /* If the user didn't specify SNDINFO/SNDRCVINFO, make up 1880 * one with some defaults. 1881 */ 1882 memset(&default_sinfo, 0, sizeof(default_sinfo)); 1883 default_sinfo.sinfo_stream = asoc->default_stream; 1884 default_sinfo.sinfo_flags = asoc->default_flags; 1885 default_sinfo.sinfo_ppid = asoc->default_ppid; 1886 default_sinfo.sinfo_context = asoc->default_context; 1887 default_sinfo.sinfo_timetolive = asoc->default_timetolive; 1888 default_sinfo.sinfo_assoc_id = sctp_assoc2id(asoc); 1889 1890 sinfo = &default_sinfo; 1891 } else if (fill_sinfo_ttl) { 1892 /* In case SNDINFO was specified, we still need to fill 1893 * it with a default ttl from the assoc here. 1894 */ 1895 sinfo->sinfo_timetolive = asoc->default_timetolive; 1896 } 1897 1898 /* API 7.1.7, the sndbuf size per association bounds the 1899 * maximum size of data that can be sent in a single send call. 1900 */ 1901 if (msg_len > sk->sk_sndbuf) { 1902 err = -EMSGSIZE; 1903 goto out_free; 1904 } 1905 1906 if (asoc->pmtu_pending) 1907 sctp_assoc_pending_pmtu(sk, asoc); 1908 1909 /* If fragmentation is disabled and the message length exceeds the 1910 * association fragmentation point, return EMSGSIZE. The I-D 1911 * does not specify what this error is, but this looks like 1912 * a great fit. 1913 */ 1914 if (sctp_sk(sk)->disable_fragments && (msg_len > asoc->frag_point)) { 1915 err = -EMSGSIZE; 1916 goto out_free; 1917 } 1918 1919 /* Check for invalid stream. */ 1920 if (sinfo->sinfo_stream >= asoc->c.sinit_num_ostreams) { 1921 err = -EINVAL; 1922 goto out_free; 1923 } 1924 1925 if (sctp_wspace(asoc) < msg_len) 1926 sctp_prsctp_prune(asoc, sinfo, msg_len - sctp_wspace(asoc)); 1927 1928 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); 1929 if (!sctp_wspace(asoc)) { 1930 err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len); 1931 if (err) 1932 goto out_free; 1933 } 1934 1935 /* If an address is passed with the sendto/sendmsg call, it is used 1936 * to override the primary destination address in the TCP model, or 1937 * when SCTP_ADDR_OVER flag is set in the UDP model. 1938 */ 1939 if ((sctp_style(sk, TCP) && msg_name) || 1940 (sinfo_flags & SCTP_ADDR_OVER)) { 1941 chunk_tp = sctp_assoc_lookup_paddr(asoc, &to); 1942 if (!chunk_tp) { 1943 err = -EINVAL; 1944 goto out_free; 1945 } 1946 } else 1947 chunk_tp = NULL; 1948 1949 /* Auto-connect, if we aren't connected already. */ 1950 if (sctp_state(asoc, CLOSED)) { 1951 err = sctp_primitive_ASSOCIATE(net, asoc, NULL); 1952 if (err < 0) 1953 goto out_free; 1954 1955 wait_connect = true; 1956 pr_debug("%s: we associated primitively\n", __func__); 1957 } 1958 1959 /* Break the message into multiple chunks of maximum size. */ 1960 datamsg = sctp_datamsg_from_user(asoc, sinfo, &msg->msg_iter); 1961 if (IS_ERR(datamsg)) { 1962 err = PTR_ERR(datamsg); 1963 goto out_free; 1964 } 1965 1966 /* Now send the (possibly) fragmented message. */ 1967 list_for_each_entry(chunk, &datamsg->chunks, frag_list) { 1968 sctp_chunk_hold(chunk); 1969 1970 /* Do accounting for the write space. */ 1971 sctp_set_owner_w(chunk); 1972 1973 chunk->transport = chunk_tp; 1974 } 1975 1976 /* Send it to the lower layers. Note: all chunks 1977 * must either fail or succeed. The lower layer 1978 * works that way today. Keep it that way or this 1979 * breaks. 1980 */ 1981 err = sctp_primitive_SEND(net, asoc, datamsg); 1982 /* Did the lower layer accept the chunk? */ 1983 if (err) { 1984 sctp_datamsg_free(datamsg); 1985 goto out_free; 1986 } 1987 1988 pr_debug("%s: we sent primitively\n", __func__); 1989 1990 sctp_datamsg_put(datamsg); 1991 err = msg_len; 1992 1993 if (unlikely(wait_connect)) { 1994 timeo = sock_sndtimeo(sk, msg_flags & MSG_DONTWAIT); 1995 sctp_wait_for_connect(asoc, &timeo); 1996 } 1997 1998 /* If we are already past ASSOCIATE, the lower 1999 * layers are responsible for association cleanup. 2000 */ 2001 goto out_unlock; 2002 2003 out_free: 2004 if (new_asoc) 2005 sctp_association_free(asoc); 2006 out_unlock: 2007 release_sock(sk); 2008 2009 out_nounlock: 2010 return sctp_error(sk, msg_flags, err); 2011 2012 #if 0 2013 do_sock_err: 2014 if (msg_len) 2015 err = msg_len; 2016 else 2017 err = sock_error(sk); 2018 goto out; 2019 2020 do_interrupted: 2021 if (msg_len) 2022 err = msg_len; 2023 goto out; 2024 #endif /* 0 */ 2025 } 2026 2027 /* This is an extended version of skb_pull() that removes the data from the 2028 * start of a skb even when data is spread across the list of skb's in the 2029 * frag_list. len specifies the total amount of data that needs to be removed. 2030 * when 'len' bytes could be removed from the skb, it returns 0. 2031 * If 'len' exceeds the total skb length, it returns the no. of bytes that 2032 * could not be removed. 2033 */ 2034 static int sctp_skb_pull(struct sk_buff *skb, int len) 2035 { 2036 struct sk_buff *list; 2037 int skb_len = skb_headlen(skb); 2038 int rlen; 2039 2040 if (len <= skb_len) { 2041 __skb_pull(skb, len); 2042 return 0; 2043 } 2044 len -= skb_len; 2045 __skb_pull(skb, skb_len); 2046 2047 skb_walk_frags(skb, list) { 2048 rlen = sctp_skb_pull(list, len); 2049 skb->len -= (len-rlen); 2050 skb->data_len -= (len-rlen); 2051 2052 if (!rlen) 2053 return 0; 2054 2055 len = rlen; 2056 } 2057 2058 return len; 2059 } 2060 2061 /* API 3.1.3 recvmsg() - UDP Style Syntax 2062 * 2063 * ssize_t recvmsg(int socket, struct msghdr *message, 2064 * int flags); 2065 * 2066 * socket - the socket descriptor of the endpoint. 2067 * message - pointer to the msghdr structure which contains a single 2068 * user message and possibly some ancillary data. 2069 * 2070 * See Section 5 for complete description of the data 2071 * structures. 2072 * 2073 * flags - flags sent or received with the user message, see Section 2074 * 5 for complete description of the flags. 2075 */ 2076 static int sctp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, 2077 int noblock, int flags, int *addr_len) 2078 { 2079 struct sctp_ulpevent *event = NULL; 2080 struct sctp_sock *sp = sctp_sk(sk); 2081 struct sk_buff *skb, *head_skb; 2082 int copied; 2083 int err = 0; 2084 int skb_len; 2085 2086 pr_debug("%s: sk:%p, msghdr:%p, len:%zd, noblock:%d, flags:0x%x, " 2087 "addr_len:%p)\n", __func__, sk, msg, len, noblock, flags, 2088 addr_len); 2089 2090 lock_sock(sk); 2091 2092 if (sctp_style(sk, TCP) && !sctp_sstate(sk, ESTABLISHED) && 2093 !sctp_sstate(sk, CLOSING) && !sctp_sstate(sk, CLOSED)) { 2094 err = -ENOTCONN; 2095 goto out; 2096 } 2097 2098 skb = sctp_skb_recv_datagram(sk, flags, noblock, &err); 2099 if (!skb) 2100 goto out; 2101 2102 /* Get the total length of the skb including any skb's in the 2103 * frag_list. 2104 */ 2105 skb_len = skb->len; 2106 2107 copied = skb_len; 2108 if (copied > len) 2109 copied = len; 2110 2111 err = skb_copy_datagram_msg(skb, 0, msg, copied); 2112 2113 event = sctp_skb2event(skb); 2114 2115 if (err) 2116 goto out_free; 2117 2118 if (event->chunk && event->chunk->head_skb) 2119 head_skb = event->chunk->head_skb; 2120 else 2121 head_skb = skb; 2122 sock_recv_ts_and_drops(msg, sk, head_skb); 2123 if (sctp_ulpevent_is_notification(event)) { 2124 msg->msg_flags |= MSG_NOTIFICATION; 2125 sp->pf->event_msgname(event, msg->msg_name, addr_len); 2126 } else { 2127 sp->pf->skb_msgname(head_skb, msg->msg_name, addr_len); 2128 } 2129 2130 /* Check if we allow SCTP_NXTINFO. */ 2131 if (sp->recvnxtinfo) 2132 sctp_ulpevent_read_nxtinfo(event, msg, sk); 2133 /* Check if we allow SCTP_RCVINFO. */ 2134 if (sp->recvrcvinfo) 2135 sctp_ulpevent_read_rcvinfo(event, msg); 2136 /* Check if we allow SCTP_SNDRCVINFO. */ 2137 if (sp->subscribe.sctp_data_io_event) 2138 sctp_ulpevent_read_sndrcvinfo(event, msg); 2139 2140 err = copied; 2141 2142 /* If skb's length exceeds the user's buffer, update the skb and 2143 * push it back to the receive_queue so that the next call to 2144 * recvmsg() will return the remaining data. Don't set MSG_EOR. 2145 */ 2146 if (skb_len > copied) { 2147 msg->msg_flags &= ~MSG_EOR; 2148 if (flags & MSG_PEEK) 2149 goto out_free; 2150 sctp_skb_pull(skb, copied); 2151 skb_queue_head(&sk->sk_receive_queue, skb); 2152 2153 /* When only partial message is copied to the user, increase 2154 * rwnd by that amount. If all the data in the skb is read, 2155 * rwnd is updated when the event is freed. 2156 */ 2157 if (!sctp_ulpevent_is_notification(event)) 2158 sctp_assoc_rwnd_increase(event->asoc, copied); 2159 goto out; 2160 } else if ((event->msg_flags & MSG_NOTIFICATION) || 2161 (event->msg_flags & MSG_EOR)) 2162 msg->msg_flags |= MSG_EOR; 2163 else 2164 msg->msg_flags &= ~MSG_EOR; 2165 2166 out_free: 2167 if (flags & MSG_PEEK) { 2168 /* Release the skb reference acquired after peeking the skb in 2169 * sctp_skb_recv_datagram(). 2170 */ 2171 kfree_skb(skb); 2172 } else { 2173 /* Free the event which includes releasing the reference to 2174 * the owner of the skb, freeing the skb and updating the 2175 * rwnd. 2176 */ 2177 sctp_ulpevent_free(event); 2178 } 2179 out: 2180 release_sock(sk); 2181 return err; 2182 } 2183 2184 /* 7.1.12 Enable/Disable message fragmentation (SCTP_DISABLE_FRAGMENTS) 2185 * 2186 * This option is a on/off flag. If enabled no SCTP message 2187 * fragmentation will be performed. Instead if a message being sent 2188 * exceeds the current PMTU size, the message will NOT be sent and 2189 * instead a error will be indicated to the user. 2190 */ 2191 static int sctp_setsockopt_disable_fragments(struct sock *sk, 2192 char __user *optval, 2193 unsigned int optlen) 2194 { 2195 int val; 2196 2197 if (optlen < sizeof(int)) 2198 return -EINVAL; 2199 2200 if (get_user(val, (int __user *)optval)) 2201 return -EFAULT; 2202 2203 sctp_sk(sk)->disable_fragments = (val == 0) ? 0 : 1; 2204 2205 return 0; 2206 } 2207 2208 static int sctp_setsockopt_events(struct sock *sk, char __user *optval, 2209 unsigned int optlen) 2210 { 2211 struct sctp_association *asoc; 2212 struct sctp_ulpevent *event; 2213 2214 if (optlen > sizeof(struct sctp_event_subscribe)) 2215 return -EINVAL; 2216 if (copy_from_user(&sctp_sk(sk)->subscribe, optval, optlen)) 2217 return -EFAULT; 2218 2219 /* At the time when a user app subscribes to SCTP_SENDER_DRY_EVENT, 2220 * if there is no data to be sent or retransmit, the stack will 2221 * immediately send up this notification. 2222 */ 2223 if (sctp_ulpevent_type_enabled(SCTP_SENDER_DRY_EVENT, 2224 &sctp_sk(sk)->subscribe)) { 2225 asoc = sctp_id2assoc(sk, 0); 2226 2227 if (asoc && sctp_outq_is_empty(&asoc->outqueue)) { 2228 event = sctp_ulpevent_make_sender_dry_event(asoc, 2229 GFP_ATOMIC); 2230 if (!event) 2231 return -ENOMEM; 2232 2233 sctp_ulpq_tail_event(&asoc->ulpq, event); 2234 } 2235 } 2236 2237 return 0; 2238 } 2239 2240 /* 7.1.8 Automatic Close of associations (SCTP_AUTOCLOSE) 2241 * 2242 * This socket option is applicable to the UDP-style socket only. When 2243 * set it will cause associations that are idle for more than the 2244 * specified number of seconds to automatically close. An association 2245 * being idle is defined an association that has NOT sent or received 2246 * user data. The special value of '0' indicates that no automatic 2247 * close of any associations should be performed. The option expects an 2248 * integer defining the number of seconds of idle time before an 2249 * association is closed. 2250 */ 2251 static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval, 2252 unsigned int optlen) 2253 { 2254 struct sctp_sock *sp = sctp_sk(sk); 2255 struct net *net = sock_net(sk); 2256 2257 /* Applicable to UDP-style socket only */ 2258 if (sctp_style(sk, TCP)) 2259 return -EOPNOTSUPP; 2260 if (optlen != sizeof(int)) 2261 return -EINVAL; 2262 if (copy_from_user(&sp->autoclose, optval, optlen)) 2263 return -EFAULT; 2264 2265 if (sp->autoclose > net->sctp.max_autoclose) 2266 sp->autoclose = net->sctp.max_autoclose; 2267 2268 return 0; 2269 } 2270 2271 /* 7.1.13 Peer Address Parameters (SCTP_PEER_ADDR_PARAMS) 2272 * 2273 * Applications can enable or disable heartbeats for any peer address of 2274 * an association, modify an address's heartbeat interval, force a 2275 * heartbeat to be sent immediately, and adjust the address's maximum 2276 * number of retransmissions sent before an address is considered 2277 * unreachable. The following structure is used to access and modify an 2278 * address's parameters: 2279 * 2280 * struct sctp_paddrparams { 2281 * sctp_assoc_t spp_assoc_id; 2282 * struct sockaddr_storage spp_address; 2283 * uint32_t spp_hbinterval; 2284 * uint16_t spp_pathmaxrxt; 2285 * uint32_t spp_pathmtu; 2286 * uint32_t spp_sackdelay; 2287 * uint32_t spp_flags; 2288 * }; 2289 * 2290 * spp_assoc_id - (one-to-many style socket) This is filled in the 2291 * application, and identifies the association for 2292 * this query. 2293 * spp_address - This specifies which address is of interest. 2294 * spp_hbinterval - This contains the value of the heartbeat interval, 2295 * in milliseconds. If a value of zero 2296 * is present in this field then no changes are to 2297 * be made to this parameter. 2298 * spp_pathmaxrxt - This contains the maximum number of 2299 * retransmissions before this address shall be 2300 * considered unreachable. If a value of zero 2301 * is present in this field then no changes are to 2302 * be made to this parameter. 2303 * spp_pathmtu - When Path MTU discovery is disabled the value 2304 * specified here will be the "fixed" path mtu. 2305 * Note that if the spp_address field is empty 2306 * then all associations on this address will 2307 * have this fixed path mtu set upon them. 2308 * 2309 * spp_sackdelay - When delayed sack is enabled, this value specifies 2310 * the number of milliseconds that sacks will be delayed 2311 * for. This value will apply to all addresses of an 2312 * association if the spp_address field is empty. Note 2313 * also, that if delayed sack is enabled and this 2314 * value is set to 0, no change is made to the last 2315 * recorded delayed sack timer value. 2316 * 2317 * spp_flags - These flags are used to control various features 2318 * on an association. The flag field may contain 2319 * zero or more of the following options. 2320 * 2321 * SPP_HB_ENABLE - Enable heartbeats on the 2322 * specified address. Note that if the address 2323 * field is empty all addresses for the association 2324 * have heartbeats enabled upon them. 2325 * 2326 * SPP_HB_DISABLE - Disable heartbeats on the 2327 * speicifed address. Note that if the address 2328 * field is empty all addresses for the association 2329 * will have their heartbeats disabled. Note also 2330 * that SPP_HB_ENABLE and SPP_HB_DISABLE are 2331 * mutually exclusive, only one of these two should 2332 * be specified. Enabling both fields will have 2333 * undetermined results. 2334 * 2335 * SPP_HB_DEMAND - Request a user initiated heartbeat 2336 * to be made immediately. 2337 * 2338 * SPP_HB_TIME_IS_ZERO - Specify's that the time for 2339 * heartbeat delayis to be set to the value of 0 2340 * milliseconds. 2341 * 2342 * SPP_PMTUD_ENABLE - This field will enable PMTU 2343 * discovery upon the specified address. Note that 2344 * if the address feild is empty then all addresses 2345 * on the association are effected. 2346 * 2347 * SPP_PMTUD_DISABLE - This field will disable PMTU 2348 * discovery upon the specified address. Note that 2349 * if the address feild is empty then all addresses 2350 * on the association are effected. Not also that 2351 * SPP_PMTUD_ENABLE and SPP_PMTUD_DISABLE are mutually 2352 * exclusive. Enabling both will have undetermined 2353 * results. 2354 * 2355 * SPP_SACKDELAY_ENABLE - Setting this flag turns 2356 * on delayed sack. The time specified in spp_sackdelay 2357 * is used to specify the sack delay for this address. Note 2358 * that if spp_address is empty then all addresses will 2359 * enable delayed sack and take on the sack delay 2360 * value specified in spp_sackdelay. 2361 * SPP_SACKDELAY_DISABLE - Setting this flag turns 2362 * off delayed sack. If the spp_address field is blank then 2363 * delayed sack is disabled for the entire association. Note 2364 * also that this field is mutually exclusive to 2365 * SPP_SACKDELAY_ENABLE, setting both will have undefined 2366 * results. 2367 */ 2368 static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params, 2369 struct sctp_transport *trans, 2370 struct sctp_association *asoc, 2371 struct sctp_sock *sp, 2372 int hb_change, 2373 int pmtud_change, 2374 int sackdelay_change) 2375 { 2376 int error; 2377 2378 if (params->spp_flags & SPP_HB_DEMAND && trans) { 2379 struct net *net = sock_net(trans->asoc->base.sk); 2380 2381 error = sctp_primitive_REQUESTHEARTBEAT(net, trans->asoc, trans); 2382 if (error) 2383 return error; 2384 } 2385 2386 /* Note that unless the spp_flag is set to SPP_HB_ENABLE the value of 2387 * this field is ignored. Note also that a value of zero indicates 2388 * the current setting should be left unchanged. 2389 */ 2390 if (params->spp_flags & SPP_HB_ENABLE) { 2391 2392 /* Re-zero the interval if the SPP_HB_TIME_IS_ZERO is 2393 * set. This lets us use 0 value when this flag 2394 * is set. 2395 */ 2396 if (params->spp_flags & SPP_HB_TIME_IS_ZERO) 2397 params->spp_hbinterval = 0; 2398 2399 if (params->spp_hbinterval || 2400 (params->spp_flags & SPP_HB_TIME_IS_ZERO)) { 2401 if (trans) { 2402 trans->hbinterval = 2403 msecs_to_jiffies(params->spp_hbinterval); 2404 } else if (asoc) { 2405 asoc->hbinterval = 2406 msecs_to_jiffies(params->spp_hbinterval); 2407 } else { 2408 sp->hbinterval = params->spp_hbinterval; 2409 } 2410 } 2411 } 2412 2413 if (hb_change) { 2414 if (trans) { 2415 trans->param_flags = 2416 (trans->param_flags & ~SPP_HB) | hb_change; 2417 } else if (asoc) { 2418 asoc->param_flags = 2419 (asoc->param_flags & ~SPP_HB) | hb_change; 2420 } else { 2421 sp->param_flags = 2422 (sp->param_flags & ~SPP_HB) | hb_change; 2423 } 2424 } 2425 2426 /* When Path MTU discovery is disabled the value specified here will 2427 * be the "fixed" path mtu (i.e. the value of the spp_flags field must 2428 * include the flag SPP_PMTUD_DISABLE for this field to have any 2429 * effect). 2430 */ 2431 if ((params->spp_flags & SPP_PMTUD_DISABLE) && params->spp_pathmtu) { 2432 if (trans) { 2433 trans->pathmtu = params->spp_pathmtu; 2434 sctp_assoc_sync_pmtu(sctp_opt2sk(sp), asoc); 2435 } else if (asoc) { 2436 asoc->pathmtu = params->spp_pathmtu; 2437 sctp_frag_point(asoc, params->spp_pathmtu); 2438 } else { 2439 sp->pathmtu = params->spp_pathmtu; 2440 } 2441 } 2442 2443 if (pmtud_change) { 2444 if (trans) { 2445 int update = (trans->param_flags & SPP_PMTUD_DISABLE) && 2446 (params->spp_flags & SPP_PMTUD_ENABLE); 2447 trans->param_flags = 2448 (trans->param_flags & ~SPP_PMTUD) | pmtud_change; 2449 if (update) { 2450 sctp_transport_pmtu(trans, sctp_opt2sk(sp)); 2451 sctp_assoc_sync_pmtu(sctp_opt2sk(sp), asoc); 2452 } 2453 } else if (asoc) { 2454 asoc->param_flags = 2455 (asoc->param_flags & ~SPP_PMTUD) | pmtud_change; 2456 } else { 2457 sp->param_flags = 2458 (sp->param_flags & ~SPP_PMTUD) | pmtud_change; 2459 } 2460 } 2461 2462 /* Note that unless the spp_flag is set to SPP_SACKDELAY_ENABLE the 2463 * value of this field is ignored. Note also that a value of zero 2464 * indicates the current setting should be left unchanged. 2465 */ 2466 if ((params->spp_flags & SPP_SACKDELAY_ENABLE) && params->spp_sackdelay) { 2467 if (trans) { 2468 trans->sackdelay = 2469 msecs_to_jiffies(params->spp_sackdelay); 2470 } else if (asoc) { 2471 asoc->sackdelay = 2472 msecs_to_jiffies(params->spp_sackdelay); 2473 } else { 2474 sp->sackdelay = params->spp_sackdelay; 2475 } 2476 } 2477 2478 if (sackdelay_change) { 2479 if (trans) { 2480 trans->param_flags = 2481 (trans->param_flags & ~SPP_SACKDELAY) | 2482 sackdelay_change; 2483 } else if (asoc) { 2484 asoc->param_flags = 2485 (asoc->param_flags & ~SPP_SACKDELAY) | 2486 sackdelay_change; 2487 } else { 2488 sp->param_flags = 2489 (sp->param_flags & ~SPP_SACKDELAY) | 2490 sackdelay_change; 2491 } 2492 } 2493 2494 /* Note that a value of zero indicates the current setting should be 2495 left unchanged. 2496 */ 2497 if (params->spp_pathmaxrxt) { 2498 if (trans) { 2499 trans->pathmaxrxt = params->spp_pathmaxrxt; 2500 } else if (asoc) { 2501 asoc->pathmaxrxt = params->spp_pathmaxrxt; 2502 } else { 2503 sp->pathmaxrxt = params->spp_pathmaxrxt; 2504 } 2505 } 2506 2507 return 0; 2508 } 2509 2510 static int sctp_setsockopt_peer_addr_params(struct sock *sk, 2511 char __user *optval, 2512 unsigned int optlen) 2513 { 2514 struct sctp_paddrparams params; 2515 struct sctp_transport *trans = NULL; 2516 struct sctp_association *asoc = NULL; 2517 struct sctp_sock *sp = sctp_sk(sk); 2518 int error; 2519 int hb_change, pmtud_change, sackdelay_change; 2520 2521 if (optlen != sizeof(struct sctp_paddrparams)) 2522 return -EINVAL; 2523 2524 if (copy_from_user(¶ms, optval, optlen)) 2525 return -EFAULT; 2526 2527 /* Validate flags and value parameters. */ 2528 hb_change = params.spp_flags & SPP_HB; 2529 pmtud_change = params.spp_flags & SPP_PMTUD; 2530 sackdelay_change = params.spp_flags & SPP_SACKDELAY; 2531 2532 if (hb_change == SPP_HB || 2533 pmtud_change == SPP_PMTUD || 2534 sackdelay_change == SPP_SACKDELAY || 2535 params.spp_sackdelay > 500 || 2536 (params.spp_pathmtu && 2537 params.spp_pathmtu < SCTP_DEFAULT_MINSEGMENT)) 2538 return -EINVAL; 2539 2540 /* If an address other than INADDR_ANY is specified, and 2541 * no transport is found, then the request is invalid. 2542 */ 2543 if (!sctp_is_any(sk, (union sctp_addr *)¶ms.spp_address)) { 2544 trans = sctp_addr_id2transport(sk, ¶ms.spp_address, 2545 params.spp_assoc_id); 2546 if (!trans) 2547 return -EINVAL; 2548 } 2549 2550 /* Get association, if assoc_id != 0 and the socket is a one 2551 * to many style socket, and an association was not found, then 2552 * the id was invalid. 2553 */ 2554 asoc = sctp_id2assoc(sk, params.spp_assoc_id); 2555 if (!asoc && params.spp_assoc_id && sctp_style(sk, UDP)) 2556 return -EINVAL; 2557 2558 /* Heartbeat demand can only be sent on a transport or 2559 * association, but not a socket. 2560 */ 2561 if (params.spp_flags & SPP_HB_DEMAND && !trans && !asoc) 2562 return -EINVAL; 2563 2564 /* Process parameters. */ 2565 error = sctp_apply_peer_addr_params(¶ms, trans, asoc, sp, 2566 hb_change, pmtud_change, 2567 sackdelay_change); 2568 2569 if (error) 2570 return error; 2571 2572 /* If changes are for association, also apply parameters to each 2573 * transport. 2574 */ 2575 if (!trans && asoc) { 2576 list_for_each_entry(trans, &asoc->peer.transport_addr_list, 2577 transports) { 2578 sctp_apply_peer_addr_params(¶ms, trans, asoc, sp, 2579 hb_change, pmtud_change, 2580 sackdelay_change); 2581 } 2582 } 2583 2584 return 0; 2585 } 2586 2587 static inline __u32 sctp_spp_sackdelay_enable(__u32 param_flags) 2588 { 2589 return (param_flags & ~SPP_SACKDELAY) | SPP_SACKDELAY_ENABLE; 2590 } 2591 2592 static inline __u32 sctp_spp_sackdelay_disable(__u32 param_flags) 2593 { 2594 return (param_flags & ~SPP_SACKDELAY) | SPP_SACKDELAY_DISABLE; 2595 } 2596 2597 /* 2598 * 7.1.23. Get or set delayed ack timer (SCTP_DELAYED_SACK) 2599 * 2600 * This option will effect the way delayed acks are performed. This 2601 * option allows you to get or set the delayed ack time, in 2602 * milliseconds. It also allows changing the delayed ack frequency. 2603 * Changing the frequency to 1 disables the delayed sack algorithm. If 2604 * the assoc_id is 0, then this sets or gets the endpoints default 2605 * values. If the assoc_id field is non-zero, then the set or get 2606 * effects the specified association for the one to many model (the 2607 * assoc_id field is ignored by the one to one model). Note that if 2608 * sack_delay or sack_freq are 0 when setting this option, then the 2609 * current values will remain unchanged. 2610 * 2611 * struct sctp_sack_info { 2612 * sctp_assoc_t sack_assoc_id; 2613 * uint32_t sack_delay; 2614 * uint32_t sack_freq; 2615 * }; 2616 * 2617 * sack_assoc_id - This parameter, indicates which association the user 2618 * is performing an action upon. Note that if this field's value is 2619 * zero then the endpoints default value is changed (effecting future 2620 * associations only). 2621 * 2622 * sack_delay - This parameter contains the number of milliseconds that 2623 * the user is requesting the delayed ACK timer be set to. Note that 2624 * this value is defined in the standard to be between 200 and 500 2625 * milliseconds. 2626 * 2627 * sack_freq - This parameter contains the number of packets that must 2628 * be received before a sack is sent without waiting for the delay 2629 * timer to expire. The default value for this is 2, setting this 2630 * value to 1 will disable the delayed sack algorithm. 2631 */ 2632 2633 static int sctp_setsockopt_delayed_ack(struct sock *sk, 2634 char __user *optval, unsigned int optlen) 2635 { 2636 struct sctp_sack_info params; 2637 struct sctp_transport *trans = NULL; 2638 struct sctp_association *asoc = NULL; 2639 struct sctp_sock *sp = sctp_sk(sk); 2640 2641 if (optlen == sizeof(struct sctp_sack_info)) { 2642 if (copy_from_user(¶ms, optval, optlen)) 2643 return -EFAULT; 2644 2645 if (params.sack_delay == 0 && params.sack_freq == 0) 2646 return 0; 2647 } else if (optlen == sizeof(struct sctp_assoc_value)) { 2648 pr_warn_ratelimited(DEPRECATED 2649 "%s (pid %d) " 2650 "Use of struct sctp_assoc_value in delayed_ack socket option.\n" 2651 "Use struct sctp_sack_info instead\n", 2652 current->comm, task_pid_nr(current)); 2653 if (copy_from_user(¶ms, optval, optlen)) 2654 return -EFAULT; 2655 2656 if (params.sack_delay == 0) 2657 params.sack_freq = 1; 2658 else 2659 params.sack_freq = 0; 2660 } else 2661 return -EINVAL; 2662 2663 /* Validate value parameter. */ 2664 if (params.sack_delay > 500) 2665 return -EINVAL; 2666 2667 /* Get association, if sack_assoc_id != 0 and the socket is a one 2668 * to many style socket, and an association was not found, then 2669 * the id was invalid. 2670 */ 2671 asoc = sctp_id2assoc(sk, params.sack_assoc_id); 2672 if (!asoc && params.sack_assoc_id && sctp_style(sk, UDP)) 2673 return -EINVAL; 2674 2675 if (params.sack_delay) { 2676 if (asoc) { 2677 asoc->sackdelay = 2678 msecs_to_jiffies(params.sack_delay); 2679 asoc->param_flags = 2680 sctp_spp_sackdelay_enable(asoc->param_flags); 2681 } else { 2682 sp->sackdelay = params.sack_delay; 2683 sp->param_flags = 2684 sctp_spp_sackdelay_enable(sp->param_flags); 2685 } 2686 } 2687 2688 if (params.sack_freq == 1) { 2689 if (asoc) { 2690 asoc->param_flags = 2691 sctp_spp_sackdelay_disable(asoc->param_flags); 2692 } else { 2693 sp->param_flags = 2694 sctp_spp_sackdelay_disable(sp->param_flags); 2695 } 2696 } else if (params.sack_freq > 1) { 2697 if (asoc) { 2698 asoc->sackfreq = params.sack_freq; 2699 asoc->param_flags = 2700 sctp_spp_sackdelay_enable(asoc->param_flags); 2701 } else { 2702 sp->sackfreq = params.sack_freq; 2703 sp->param_flags = 2704 sctp_spp_sackdelay_enable(sp->param_flags); 2705 } 2706 } 2707 2708 /* If change is for association, also apply to each transport. */ 2709 if (asoc) { 2710 list_for_each_entry(trans, &asoc->peer.transport_addr_list, 2711 transports) { 2712 if (params.sack_delay) { 2713 trans->sackdelay = 2714 msecs_to_jiffies(params.sack_delay); 2715 trans->param_flags = 2716 sctp_spp_sackdelay_enable(trans->param_flags); 2717 } 2718 if (params.sack_freq == 1) { 2719 trans->param_flags = 2720 sctp_spp_sackdelay_disable(trans->param_flags); 2721 } else if (params.sack_freq > 1) { 2722 trans->sackfreq = params.sack_freq; 2723 trans->param_flags = 2724 sctp_spp_sackdelay_enable(trans->param_flags); 2725 } 2726 } 2727 } 2728 2729 return 0; 2730 } 2731 2732 /* 7.1.3 Initialization Parameters (SCTP_INITMSG) 2733 * 2734 * Applications can specify protocol parameters for the default association 2735 * initialization. The option name argument to setsockopt() and getsockopt() 2736 * is SCTP_INITMSG. 2737 * 2738 * Setting initialization parameters is effective only on an unconnected 2739 * socket (for UDP-style sockets only future associations are effected 2740 * by the change). With TCP-style sockets, this option is inherited by 2741 * sockets derived from a listener socket. 2742 */ 2743 static int sctp_setsockopt_initmsg(struct sock *sk, char __user *optval, unsigned int optlen) 2744 { 2745 struct sctp_initmsg sinit; 2746 struct sctp_sock *sp = sctp_sk(sk); 2747 2748 if (optlen != sizeof(struct sctp_initmsg)) 2749 return -EINVAL; 2750 if (copy_from_user(&sinit, optval, optlen)) 2751 return -EFAULT; 2752 2753 if (sinit.sinit_num_ostreams) 2754 sp->initmsg.sinit_num_ostreams = sinit.sinit_num_ostreams; 2755 if (sinit.sinit_max_instreams) 2756 sp->initmsg.sinit_max_instreams = sinit.sinit_max_instreams; 2757 if (sinit.sinit_max_attempts) 2758 sp->initmsg.sinit_max_attempts = sinit.sinit_max_attempts; 2759 if (sinit.sinit_max_init_timeo) 2760 sp->initmsg.sinit_max_init_timeo = sinit.sinit_max_init_timeo; 2761 2762 return 0; 2763 } 2764 2765 /* 2766 * 7.1.14 Set default send parameters (SCTP_DEFAULT_SEND_PARAM) 2767 * 2768 * Applications that wish to use the sendto() system call may wish to 2769 * specify a default set of parameters that would normally be supplied 2770 * through the inclusion of ancillary data. This socket option allows 2771 * such an application to set the default sctp_sndrcvinfo structure. 2772 * The application that wishes to use this socket option simply passes 2773 * in to this call the sctp_sndrcvinfo structure defined in Section 2774 * 5.2.2) The input parameters accepted by this call include 2775 * sinfo_stream, sinfo_flags, sinfo_ppid, sinfo_context, 2776 * sinfo_timetolive. The user must provide the sinfo_assoc_id field in 2777 * to this call if the caller is using the UDP model. 2778 */ 2779 static int sctp_setsockopt_default_send_param(struct sock *sk, 2780 char __user *optval, 2781 unsigned int optlen) 2782 { 2783 struct sctp_sock *sp = sctp_sk(sk); 2784 struct sctp_association *asoc; 2785 struct sctp_sndrcvinfo info; 2786 2787 if (optlen != sizeof(info)) 2788 return -EINVAL; 2789 if (copy_from_user(&info, optval, optlen)) 2790 return -EFAULT; 2791 if (info.sinfo_flags & 2792 ~(SCTP_UNORDERED | SCTP_ADDR_OVER | 2793 SCTP_ABORT | SCTP_EOF)) 2794 return -EINVAL; 2795 2796 asoc = sctp_id2assoc(sk, info.sinfo_assoc_id); 2797 if (!asoc && info.sinfo_assoc_id && sctp_style(sk, UDP)) 2798 return -EINVAL; 2799 if (asoc) { 2800 asoc->default_stream = info.sinfo_stream; 2801 asoc->default_flags = info.sinfo_flags; 2802 asoc->default_ppid = info.sinfo_ppid; 2803 asoc->default_context = info.sinfo_context; 2804 asoc->default_timetolive = info.sinfo_timetolive; 2805 } else { 2806 sp->default_stream = info.sinfo_stream; 2807 sp->default_flags = info.sinfo_flags; 2808 sp->default_ppid = info.sinfo_ppid; 2809 sp->default_context = info.sinfo_context; 2810 sp->default_timetolive = info.sinfo_timetolive; 2811 } 2812 2813 return 0; 2814 } 2815 2816 /* RFC6458, Section 8.1.31. Set/get Default Send Parameters 2817 * (SCTP_DEFAULT_SNDINFO) 2818 */ 2819 static int sctp_setsockopt_default_sndinfo(struct sock *sk, 2820 char __user *optval, 2821 unsigned int optlen) 2822 { 2823 struct sctp_sock *sp = sctp_sk(sk); 2824 struct sctp_association *asoc; 2825 struct sctp_sndinfo info; 2826 2827 if (optlen != sizeof(info)) 2828 return -EINVAL; 2829 if (copy_from_user(&info, optval, optlen)) 2830 return -EFAULT; 2831 if (info.snd_flags & 2832 ~(SCTP_UNORDERED | SCTP_ADDR_OVER | 2833 SCTP_ABORT | SCTP_EOF)) 2834 return -EINVAL; 2835 2836 asoc = sctp_id2assoc(sk, info.snd_assoc_id); 2837 if (!asoc && info.snd_assoc_id && sctp_style(sk, UDP)) 2838 return -EINVAL; 2839 if (asoc) { 2840 asoc->default_stream = info.snd_sid; 2841 asoc->default_flags = info.snd_flags; 2842 asoc->default_ppid = info.snd_ppid; 2843 asoc->default_context = info.snd_context; 2844 } else { 2845 sp->default_stream = info.snd_sid; 2846 sp->default_flags = info.snd_flags; 2847 sp->default_ppid = info.snd_ppid; 2848 sp->default_context = info.snd_context; 2849 } 2850 2851 return 0; 2852 } 2853 2854 /* 7.1.10 Set Primary Address (SCTP_PRIMARY_ADDR) 2855 * 2856 * Requests that the local SCTP stack use the enclosed peer address as 2857 * the association primary. The enclosed address must be one of the 2858 * association peer's addresses. 2859 */ 2860 static int sctp_setsockopt_primary_addr(struct sock *sk, char __user *optval, 2861 unsigned int optlen) 2862 { 2863 struct sctp_prim prim; 2864 struct sctp_transport *trans; 2865 2866 if (optlen != sizeof(struct sctp_prim)) 2867 return -EINVAL; 2868 2869 if (copy_from_user(&prim, optval, sizeof(struct sctp_prim))) 2870 return -EFAULT; 2871 2872 trans = sctp_addr_id2transport(sk, &prim.ssp_addr, prim.ssp_assoc_id); 2873 if (!trans) 2874 return -EINVAL; 2875 2876 sctp_assoc_set_primary(trans->asoc, trans); 2877 2878 return 0; 2879 } 2880 2881 /* 2882 * 7.1.5 SCTP_NODELAY 2883 * 2884 * Turn on/off any Nagle-like algorithm. This means that packets are 2885 * generally sent as soon as possible and no unnecessary delays are 2886 * introduced, at the cost of more packets in the network. Expects an 2887 * integer boolean flag. 2888 */ 2889 static int sctp_setsockopt_nodelay(struct sock *sk, char __user *optval, 2890 unsigned int optlen) 2891 { 2892 int val; 2893 2894 if (optlen < sizeof(int)) 2895 return -EINVAL; 2896 if (get_user(val, (int __user *)optval)) 2897 return -EFAULT; 2898 2899 sctp_sk(sk)->nodelay = (val == 0) ? 0 : 1; 2900 return 0; 2901 } 2902 2903 /* 2904 * 2905 * 7.1.1 SCTP_RTOINFO 2906 * 2907 * The protocol parameters used to initialize and bound retransmission 2908 * timeout (RTO) are tunable. sctp_rtoinfo structure is used to access 2909 * and modify these parameters. 2910 * All parameters are time values, in milliseconds. A value of 0, when 2911 * modifying the parameters, indicates that the current value should not 2912 * be changed. 2913 * 2914 */ 2915 static int sctp_setsockopt_rtoinfo(struct sock *sk, char __user *optval, unsigned int optlen) 2916 { 2917 struct sctp_rtoinfo rtoinfo; 2918 struct sctp_association *asoc; 2919 unsigned long rto_min, rto_max; 2920 struct sctp_sock *sp = sctp_sk(sk); 2921 2922 if (optlen != sizeof (struct sctp_rtoinfo)) 2923 return -EINVAL; 2924 2925 if (copy_from_user(&rtoinfo, optval, optlen)) 2926 return -EFAULT; 2927 2928 asoc = sctp_id2assoc(sk, rtoinfo.srto_assoc_id); 2929 2930 /* Set the values to the specific association */ 2931 if (!asoc && rtoinfo.srto_assoc_id && sctp_style(sk, UDP)) 2932 return -EINVAL; 2933 2934 rto_max = rtoinfo.srto_max; 2935 rto_min = rtoinfo.srto_min; 2936 2937 if (rto_max) 2938 rto_max = asoc ? msecs_to_jiffies(rto_max) : rto_max; 2939 else 2940 rto_max = asoc ? asoc->rto_max : sp->rtoinfo.srto_max; 2941 2942 if (rto_min) 2943 rto_min = asoc ? msecs_to_jiffies(rto_min) : rto_min; 2944 else 2945 rto_min = asoc ? asoc->rto_min : sp->rtoinfo.srto_min; 2946 2947 if (rto_min > rto_max) 2948 return -EINVAL; 2949 2950 if (asoc) { 2951 if (rtoinfo.srto_initial != 0) 2952 asoc->rto_initial = 2953 msecs_to_jiffies(rtoinfo.srto_initial); 2954 asoc->rto_max = rto_max; 2955 asoc->rto_min = rto_min; 2956 } else { 2957 /* If there is no association or the association-id = 0 2958 * set the values to the endpoint. 2959 */ 2960 if (rtoinfo.srto_initial != 0) 2961 sp->rtoinfo.srto_initial = rtoinfo.srto_initial; 2962 sp->rtoinfo.srto_max = rto_max; 2963 sp->rtoinfo.srto_min = rto_min; 2964 } 2965 2966 return 0; 2967 } 2968 2969 /* 2970 * 2971 * 7.1.2 SCTP_ASSOCINFO 2972 * 2973 * This option is used to tune the maximum retransmission attempts 2974 * of the association. 2975 * Returns an error if the new association retransmission value is 2976 * greater than the sum of the retransmission value of the peer. 2977 * See [SCTP] for more information. 2978 * 2979 */ 2980 static int sctp_setsockopt_associnfo(struct sock *sk, char __user *optval, unsigned int optlen) 2981 { 2982 2983 struct sctp_assocparams assocparams; 2984 struct sctp_association *asoc; 2985 2986 if (optlen != sizeof(struct sctp_assocparams)) 2987 return -EINVAL; 2988 if (copy_from_user(&assocparams, optval, optlen)) 2989 return -EFAULT; 2990 2991 asoc = sctp_id2assoc(sk, assocparams.sasoc_assoc_id); 2992 2993 if (!asoc && assocparams.sasoc_assoc_id && sctp_style(sk, UDP)) 2994 return -EINVAL; 2995 2996 /* Set the values to the specific association */ 2997 if (asoc) { 2998 if (assocparams.sasoc_asocmaxrxt != 0) { 2999 __u32 path_sum = 0; 3000 int paths = 0; 3001 struct sctp_transport *peer_addr; 3002 3003 list_for_each_entry(peer_addr, &asoc->peer.transport_addr_list, 3004 transports) { 3005 path_sum += peer_addr->pathmaxrxt; 3006 paths++; 3007 } 3008 3009 /* Only validate asocmaxrxt if we have more than 3010 * one path/transport. We do this because path 3011 * retransmissions are only counted when we have more 3012 * then one path. 3013 */ 3014 if (paths > 1 && 3015 assocparams.sasoc_asocmaxrxt > path_sum) 3016 return -EINVAL; 3017 3018 asoc->max_retrans = assocparams.sasoc_asocmaxrxt; 3019 } 3020 3021 if (assocparams.sasoc_cookie_life != 0) 3022 asoc->cookie_life = ms_to_ktime(assocparams.sasoc_cookie_life); 3023 } else { 3024 /* Set the values to the endpoint */ 3025 struct sctp_sock *sp = sctp_sk(sk); 3026 3027 if (assocparams.sasoc_asocmaxrxt != 0) 3028 sp->assocparams.sasoc_asocmaxrxt = 3029 assocparams.sasoc_asocmaxrxt; 3030 if (assocparams.sasoc_cookie_life != 0) 3031 sp->assocparams.sasoc_cookie_life = 3032 assocparams.sasoc_cookie_life; 3033 } 3034 return 0; 3035 } 3036 3037 /* 3038 * 7.1.16 Set/clear IPv4 mapped addresses (SCTP_I_WANT_MAPPED_V4_ADDR) 3039 * 3040 * This socket option is a boolean flag which turns on or off mapped V4 3041 * addresses. If this option is turned on and the socket is type 3042 * PF_INET6, then IPv4 addresses will be mapped to V6 representation. 3043 * If this option is turned off, then no mapping will be done of V4 3044 * addresses and a user will receive both PF_INET6 and PF_INET type 3045 * addresses on the socket. 3046 */ 3047 static int sctp_setsockopt_mappedv4(struct sock *sk, char __user *optval, unsigned int optlen) 3048 { 3049 int val; 3050 struct sctp_sock *sp = sctp_sk(sk); 3051 3052 if (optlen < sizeof(int)) 3053 return -EINVAL; 3054 if (get_user(val, (int __user *)optval)) 3055 return -EFAULT; 3056 if (val) 3057 sp->v4mapped = 1; 3058 else 3059 sp->v4mapped = 0; 3060 3061 return 0; 3062 } 3063 3064 /* 3065 * 8.1.16. Get or Set the Maximum Fragmentation Size (SCTP_MAXSEG) 3066 * This option will get or set the maximum size to put in any outgoing 3067 * SCTP DATA chunk. If a message is larger than this size it will be 3068 * fragmented by SCTP into the specified size. Note that the underlying 3069 * SCTP implementation may fragment into smaller sized chunks when the 3070 * PMTU of the underlying association is smaller than the value set by 3071 * the user. The default value for this option is '0' which indicates 3072 * the user is NOT limiting fragmentation and only the PMTU will effect 3073 * SCTP's choice of DATA chunk size. Note also that values set larger 3074 * than the maximum size of an IP datagram will effectively let SCTP 3075 * control fragmentation (i.e. the same as setting this option to 0). 3076 * 3077 * The following structure is used to access and modify this parameter: 3078 * 3079 * struct sctp_assoc_value { 3080 * sctp_assoc_t assoc_id; 3081 * uint32_t assoc_value; 3082 * }; 3083 * 3084 * assoc_id: This parameter is ignored for one-to-one style sockets. 3085 * For one-to-many style sockets this parameter indicates which 3086 * association the user is performing an action upon. Note that if 3087 * this field's value is zero then the endpoints default value is 3088 * changed (effecting future associations only). 3089 * assoc_value: This parameter specifies the maximum size in bytes. 3090 */ 3091 static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, unsigned int optlen) 3092 { 3093 struct sctp_assoc_value params; 3094 struct sctp_association *asoc; 3095 struct sctp_sock *sp = sctp_sk(sk); 3096 int val; 3097 3098 if (optlen == sizeof(int)) { 3099 pr_warn_ratelimited(DEPRECATED 3100 "%s (pid %d) " 3101 "Use of int in maxseg socket option.\n" 3102 "Use struct sctp_assoc_value instead\n", 3103 current->comm, task_pid_nr(current)); 3104 if (copy_from_user(&val, optval, optlen)) 3105 return -EFAULT; 3106 params.assoc_id = 0; 3107 } else if (optlen == sizeof(struct sctp_assoc_value)) { 3108 if (copy_from_user(¶ms, optval, optlen)) 3109 return -EFAULT; 3110 val = params.assoc_value; 3111 } else 3112 return -EINVAL; 3113 3114 if ((val != 0) && ((val < 8) || (val > SCTP_MAX_CHUNK_LEN))) 3115 return -EINVAL; 3116 3117 asoc = sctp_id2assoc(sk, params.assoc_id); 3118 if (!asoc && params.assoc_id && sctp_style(sk, UDP)) 3119 return -EINVAL; 3120 3121 if (asoc) { 3122 if (val == 0) { 3123 val = asoc->pathmtu; 3124 val -= sp->pf->af->net_header_len; 3125 val -= sizeof(struct sctphdr) + 3126 sizeof(struct sctp_data_chunk); 3127 } 3128 asoc->user_frag = val; 3129 asoc->frag_point = sctp_frag_point(asoc, asoc->pathmtu); 3130 } else { 3131 sp->user_frag = val; 3132 } 3133 3134 return 0; 3135 } 3136 3137 3138 /* 3139 * 7.1.9 Set Peer Primary Address (SCTP_SET_PEER_PRIMARY_ADDR) 3140 * 3141 * Requests that the peer mark the enclosed address as the association 3142 * primary. The enclosed address must be one of the association's 3143 * locally bound addresses. The following structure is used to make a 3144 * set primary request: 3145 */ 3146 static int sctp_setsockopt_peer_primary_addr(struct sock *sk, char __user *optval, 3147 unsigned int optlen) 3148 { 3149 struct net *net = sock_net(sk); 3150 struct sctp_sock *sp; 3151 struct sctp_association *asoc = NULL; 3152 struct sctp_setpeerprim prim; 3153 struct sctp_chunk *chunk; 3154 struct sctp_af *af; 3155 int err; 3156 3157 sp = sctp_sk(sk); 3158 3159 if (!net->sctp.addip_enable) 3160 return -EPERM; 3161 3162 if (optlen != sizeof(struct sctp_setpeerprim)) 3163 return -EINVAL; 3164 3165 if (copy_from_user(&prim, optval, optlen)) 3166 return -EFAULT; 3167 3168 asoc = sctp_id2assoc(sk, prim.sspp_assoc_id); 3169 if (!asoc) 3170 return -EINVAL; 3171 3172 if (!asoc->peer.asconf_capable) 3173 return -EPERM; 3174 3175 if (asoc->peer.addip_disabled_mask & SCTP_PARAM_SET_PRIMARY) 3176 return -EPERM; 3177 3178 if (!sctp_state(asoc, ESTABLISHED)) 3179 return -ENOTCONN; 3180 3181 af = sctp_get_af_specific(prim.sspp_addr.ss_family); 3182 if (!af) 3183 return -EINVAL; 3184 3185 if (!af->addr_valid((union sctp_addr *)&prim.sspp_addr, sp, NULL)) 3186 return -EADDRNOTAVAIL; 3187 3188 if (!sctp_assoc_lookup_laddr(asoc, (union sctp_addr *)&prim.sspp_addr)) 3189 return -EADDRNOTAVAIL; 3190 3191 /* Create an ASCONF chunk with SET_PRIMARY parameter */ 3192 chunk = sctp_make_asconf_set_prim(asoc, 3193 (union sctp_addr *)&prim.sspp_addr); 3194 if (!chunk) 3195 return -ENOMEM; 3196 3197 err = sctp_send_asconf(asoc, chunk); 3198 3199 pr_debug("%s: we set peer primary addr primitively\n", __func__); 3200 3201 return err; 3202 } 3203 3204 static int sctp_setsockopt_adaptation_layer(struct sock *sk, char __user *optval, 3205 unsigned int optlen) 3206 { 3207 struct sctp_setadaptation adaptation; 3208 3209 if (optlen != sizeof(struct sctp_setadaptation)) 3210 return -EINVAL; 3211 if (copy_from_user(&adaptation, optval, optlen)) 3212 return -EFAULT; 3213 3214 sctp_sk(sk)->adaptation_ind = adaptation.ssb_adaptation_ind; 3215 3216 return 0; 3217 } 3218 3219 /* 3220 * 7.1.29. Set or Get the default context (SCTP_CONTEXT) 3221 * 3222 * The context field in the sctp_sndrcvinfo structure is normally only 3223 * used when a failed message is retrieved holding the value that was 3224 * sent down on the actual send call. This option allows the setting of 3225 * a default context on an association basis that will be received on 3226 * reading messages from the peer. This is especially helpful in the 3227 * one-2-many model for an application to keep some reference to an 3228 * internal state machine that is processing messages on the 3229 * association. Note that the setting of this value only effects 3230 * received messages from the peer and does not effect the value that is 3231 * saved with outbound messages. 3232 */ 3233 static int sctp_setsockopt_context(struct sock *sk, char __user *optval, 3234 unsigned int optlen) 3235 { 3236 struct sctp_assoc_value params; 3237 struct sctp_sock *sp; 3238 struct sctp_association *asoc; 3239 3240 if (optlen != sizeof(struct sctp_assoc_value)) 3241 return -EINVAL; 3242 if (copy_from_user(¶ms, optval, optlen)) 3243 return -EFAULT; 3244 3245 sp = sctp_sk(sk); 3246 3247 if (params.assoc_id != 0) { 3248 asoc = sctp_id2assoc(sk, params.assoc_id); 3249 if (!asoc) 3250 return -EINVAL; 3251 asoc->default_rcv_context = params.assoc_value; 3252 } else { 3253 sp->default_rcv_context = params.assoc_value; 3254 } 3255 3256 return 0; 3257 } 3258 3259 /* 3260 * 7.1.24. Get or set fragmented interleave (SCTP_FRAGMENT_INTERLEAVE) 3261 * 3262 * This options will at a minimum specify if the implementation is doing 3263 * fragmented interleave. Fragmented interleave, for a one to many 3264 * socket, is when subsequent calls to receive a message may return 3265 * parts of messages from different associations. Some implementations 3266 * may allow you to turn this value on or off. If so, when turned off, 3267 * no fragment interleave will occur (which will cause a head of line 3268 * blocking amongst multiple associations sharing the same one to many 3269 * socket). When this option is turned on, then each receive call may 3270 * come from a different association (thus the user must receive data 3271 * with the extended calls (e.g. sctp_recvmsg) to keep track of which 3272 * association each receive belongs to. 3273 * 3274 * This option takes a boolean value. A non-zero value indicates that 3275 * fragmented interleave is on. A value of zero indicates that 3276 * fragmented interleave is off. 3277 * 3278 * Note that it is important that an implementation that allows this 3279 * option to be turned on, have it off by default. Otherwise an unaware 3280 * application using the one to many model may become confused and act 3281 * incorrectly. 3282 */ 3283 static int sctp_setsockopt_fragment_interleave(struct sock *sk, 3284 char __user *optval, 3285 unsigned int optlen) 3286 { 3287 int val; 3288 3289 if (optlen != sizeof(int)) 3290 return -EINVAL; 3291 if (get_user(val, (int __user *)optval)) 3292 return -EFAULT; 3293 3294 sctp_sk(sk)->frag_interleave = (val == 0) ? 0 : 1; 3295 3296 return 0; 3297 } 3298 3299 /* 3300 * 8.1.21. Set or Get the SCTP Partial Delivery Point 3301 * (SCTP_PARTIAL_DELIVERY_POINT) 3302 * 3303 * This option will set or get the SCTP partial delivery point. This 3304 * point is the size of a message where the partial delivery API will be 3305 * invoked to help free up rwnd space for the peer. Setting this to a 3306 * lower value will cause partial deliveries to happen more often. The 3307 * calls argument is an integer that sets or gets the partial delivery 3308 * point. Note also that the call will fail if the user attempts to set 3309 * this value larger than the socket receive buffer size. 3310 * 3311 * Note that any single message having a length smaller than or equal to 3312 * the SCTP partial delivery point will be delivered in one single read 3313 * call as long as the user provided buffer is large enough to hold the 3314 * message. 3315 */ 3316 static int sctp_setsockopt_partial_delivery_point(struct sock *sk, 3317 char __user *optval, 3318 unsigned int optlen) 3319 { 3320 u32 val; 3321 3322 if (optlen != sizeof(u32)) 3323 return -EINVAL; 3324 if (get_user(val, (int __user *)optval)) 3325 return -EFAULT; 3326 3327 /* Note: We double the receive buffer from what the user sets 3328 * it to be, also initial rwnd is based on rcvbuf/2. 3329 */ 3330 if (val > (sk->sk_rcvbuf >> 1)) 3331 return -EINVAL; 3332 3333 sctp_sk(sk)->pd_point = val; 3334 3335 return 0; /* is this the right error code? */ 3336 } 3337 3338 /* 3339 * 7.1.28. Set or Get the maximum burst (SCTP_MAX_BURST) 3340 * 3341 * This option will allow a user to change the maximum burst of packets 3342 * that can be emitted by this association. Note that the default value 3343 * is 4, and some implementations may restrict this setting so that it 3344 * can only be lowered. 3345 * 3346 * NOTE: This text doesn't seem right. Do this on a socket basis with 3347 * future associations inheriting the socket value. 3348 */ 3349 static int sctp_setsockopt_maxburst(struct sock *sk, 3350 char __user *optval, 3351 unsigned int optlen) 3352 { 3353 struct sctp_assoc_value params; 3354 struct sctp_sock *sp; 3355 struct sctp_association *asoc; 3356 int val; 3357 int assoc_id = 0; 3358 3359 if (optlen == sizeof(int)) { 3360 pr_warn_ratelimited(DEPRECATED 3361 "%s (pid %d) " 3362 "Use of int in max_burst socket option deprecated.\n" 3363 "Use struct sctp_assoc_value instead\n", 3364 current->comm, task_pid_nr(current)); 3365 if (copy_from_user(&val, optval, optlen)) 3366 return -EFAULT; 3367 } else if (optlen == sizeof(struct sctp_assoc_value)) { 3368 if (copy_from_user(¶ms, optval, optlen)) 3369 return -EFAULT; 3370 val = params.assoc_value; 3371 assoc_id = params.assoc_id; 3372 } else 3373 return -EINVAL; 3374 3375 sp = sctp_sk(sk); 3376 3377 if (assoc_id != 0) { 3378 asoc = sctp_id2assoc(sk, assoc_id); 3379 if (!asoc) 3380 return -EINVAL; 3381 asoc->max_burst = val; 3382 } else 3383 sp->max_burst = val; 3384 3385 return 0; 3386 } 3387 3388 /* 3389 * 7.1.18. Add a chunk that must be authenticated (SCTP_AUTH_CHUNK) 3390 * 3391 * This set option adds a chunk type that the user is requesting to be 3392 * received only in an authenticated way. Changes to the list of chunks 3393 * will only effect future associations on the socket. 3394 */ 3395 static int sctp_setsockopt_auth_chunk(struct sock *sk, 3396 char __user *optval, 3397 unsigned int optlen) 3398 { 3399 struct sctp_endpoint *ep = sctp_sk(sk)->ep; 3400 struct sctp_authchunk val; 3401 3402 if (!ep->auth_enable) 3403 return -EACCES; 3404 3405 if (optlen != sizeof(struct sctp_authchunk)) 3406 return -EINVAL; 3407 if (copy_from_user(&val, optval, optlen)) 3408 return -EFAULT; 3409 3410 switch (val.sauth_chunk) { 3411 case SCTP_CID_INIT: 3412 case SCTP_CID_INIT_ACK: 3413 case SCTP_CID_SHUTDOWN_COMPLETE: 3414 case SCTP_CID_AUTH: 3415 return -EINVAL; 3416 } 3417 3418 /* add this chunk id to the endpoint */ 3419 return sctp_auth_ep_add_chunkid(ep, val.sauth_chunk); 3420 } 3421 3422 /* 3423 * 7.1.19. Get or set the list of supported HMAC Identifiers (SCTP_HMAC_IDENT) 3424 * 3425 * This option gets or sets the list of HMAC algorithms that the local 3426 * endpoint requires the peer to use. 3427 */ 3428 static int sctp_setsockopt_hmac_ident(struct sock *sk, 3429 char __user *optval, 3430 unsigned int optlen) 3431 { 3432 struct sctp_endpoint *ep = sctp_sk(sk)->ep; 3433 struct sctp_hmacalgo *hmacs; 3434 u32 idents; 3435 int err; 3436 3437 if (!ep->auth_enable) 3438 return -EACCES; 3439 3440 if (optlen < sizeof(struct sctp_hmacalgo)) 3441 return -EINVAL; 3442 3443 hmacs = memdup_user(optval, optlen); 3444 if (IS_ERR(hmacs)) 3445 return PTR_ERR(hmacs); 3446 3447 idents = hmacs->shmac_num_idents; 3448 if (idents == 0 || idents > SCTP_AUTH_NUM_HMACS || 3449 (idents * sizeof(u16)) > (optlen - sizeof(struct sctp_hmacalgo))) { 3450 err = -EINVAL; 3451 goto out; 3452 } 3453 3454 err = sctp_auth_ep_set_hmacs(ep, hmacs); 3455 out: 3456 kfree(hmacs); 3457 return err; 3458 } 3459 3460 /* 3461 * 7.1.20. Set a shared key (SCTP_AUTH_KEY) 3462 * 3463 * This option will set a shared secret key which is used to build an 3464 * association shared key. 3465 */ 3466 static int sctp_setsockopt_auth_key(struct sock *sk, 3467 char __user *optval, 3468 unsigned int optlen) 3469 { 3470 struct sctp_endpoint *ep = sctp_sk(sk)->ep; 3471 struct sctp_authkey *authkey; 3472 struct sctp_association *asoc; 3473 int ret; 3474 3475 if (!ep->auth_enable) 3476 return -EACCES; 3477 3478 if (optlen <= sizeof(struct sctp_authkey)) 3479 return -EINVAL; 3480 3481 authkey = memdup_user(optval, optlen); 3482 if (IS_ERR(authkey)) 3483 return PTR_ERR(authkey); 3484 3485 if (authkey->sca_keylength > optlen - sizeof(struct sctp_authkey)) { 3486 ret = -EINVAL; 3487 goto out; 3488 } 3489 3490 asoc = sctp_id2assoc(sk, authkey->sca_assoc_id); 3491 if (!asoc && authkey->sca_assoc_id && sctp_style(sk, UDP)) { 3492 ret = -EINVAL; 3493 goto out; 3494 } 3495 3496 ret = sctp_auth_set_key(ep, asoc, authkey); 3497 out: 3498 kzfree(authkey); 3499 return ret; 3500 } 3501 3502 /* 3503 * 7.1.21. Get or set the active shared key (SCTP_AUTH_ACTIVE_KEY) 3504 * 3505 * This option will get or set the active shared key to be used to build 3506 * the association shared key. 3507 */ 3508 static int sctp_setsockopt_active_key(struct sock *sk, 3509 char __user *optval, 3510 unsigned int optlen) 3511 { 3512 struct sctp_endpoint *ep = sctp_sk(sk)->ep; 3513 struct sctp_authkeyid val; 3514 struct sctp_association *asoc; 3515 3516 if (!ep->auth_enable) 3517 return -EACCES; 3518 3519 if (optlen != sizeof(struct sctp_authkeyid)) 3520 return -EINVAL; 3521 if (copy_from_user(&val, optval, optlen)) 3522 return -EFAULT; 3523 3524 asoc = sctp_id2assoc(sk, val.scact_assoc_id); 3525 if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP)) 3526 return -EINVAL; 3527 3528 return sctp_auth_set_active_key(ep, asoc, val.scact_keynumber); 3529 } 3530 3531 /* 3532 * 7.1.22. Delete a shared key (SCTP_AUTH_DELETE_KEY) 3533 * 3534 * This set option will delete a shared secret key from use. 3535 */ 3536 static int sctp_setsockopt_del_key(struct sock *sk, 3537 char __user *optval, 3538 unsigned int optlen) 3539 { 3540 struct sctp_endpoint *ep = sctp_sk(sk)->ep; 3541 struct sctp_authkeyid val; 3542 struct sctp_association *asoc; 3543 3544 if (!ep->auth_enable) 3545 return -EACCES; 3546 3547 if (optlen != sizeof(struct sctp_authkeyid)) 3548 return -EINVAL; 3549 if (copy_from_user(&val, optval, optlen)) 3550 return -EFAULT; 3551 3552 asoc = sctp_id2assoc(sk, val.scact_assoc_id); 3553 if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP)) 3554 return -EINVAL; 3555 3556 return sctp_auth_del_key_id(ep, asoc, val.scact_keynumber); 3557 3558 } 3559 3560 /* 3561 * 8.1.23 SCTP_AUTO_ASCONF 3562 * 3563 * This option will enable or disable the use of the automatic generation of 3564 * ASCONF chunks to add and delete addresses to an existing association. Note 3565 * that this option has two caveats namely: a) it only affects sockets that 3566 * are bound to all addresses available to the SCTP stack, and b) the system 3567 * administrator may have an overriding control that turns the ASCONF feature 3568 * off no matter what setting the socket option may have. 3569 * This option expects an integer boolean flag, where a non-zero value turns on 3570 * the option, and a zero value turns off the option. 3571 * Note. In this implementation, socket operation overrides default parameter 3572 * being set by sysctl as well as FreeBSD implementation 3573 */ 3574 static int sctp_setsockopt_auto_asconf(struct sock *sk, char __user *optval, 3575 unsigned int optlen) 3576 { 3577 int val; 3578 struct sctp_sock *sp = sctp_sk(sk); 3579 3580 if (optlen < sizeof(int)) 3581 return -EINVAL; 3582 if (get_user(val, (int __user *)optval)) 3583 return -EFAULT; 3584 if (!sctp_is_ep_boundall(sk) && val) 3585 return -EINVAL; 3586 if ((val && sp->do_auto_asconf) || (!val && !sp->do_auto_asconf)) 3587 return 0; 3588 3589 spin_lock_bh(&sock_net(sk)->sctp.addr_wq_lock); 3590 if (val == 0 && sp->do_auto_asconf) { 3591 list_del(&sp->auto_asconf_list); 3592 sp->do_auto_asconf = 0; 3593 } else if (val && !sp->do_auto_asconf) { 3594 list_add_tail(&sp->auto_asconf_list, 3595 &sock_net(sk)->sctp.auto_asconf_splist); 3596 sp->do_auto_asconf = 1; 3597 } 3598 spin_unlock_bh(&sock_net(sk)->sctp.addr_wq_lock); 3599 return 0; 3600 } 3601 3602 /* 3603 * SCTP_PEER_ADDR_THLDS 3604 * 3605 * This option allows us to alter the partially failed threshold for one or all 3606 * transports in an association. See Section 6.1 of: 3607 * http://www.ietf.org/id/draft-nishida-tsvwg-sctp-failover-05.txt 3608 */ 3609 static int sctp_setsockopt_paddr_thresholds(struct sock *sk, 3610 char __user *optval, 3611 unsigned int optlen) 3612 { 3613 struct sctp_paddrthlds val; 3614 struct sctp_transport *trans; 3615 struct sctp_association *asoc; 3616 3617 if (optlen < sizeof(struct sctp_paddrthlds)) 3618 return -EINVAL; 3619 if (copy_from_user(&val, (struct sctp_paddrthlds __user *)optval, 3620 sizeof(struct sctp_paddrthlds))) 3621 return -EFAULT; 3622 3623 3624 if (sctp_is_any(sk, (const union sctp_addr *)&val.spt_address)) { 3625 asoc = sctp_id2assoc(sk, val.spt_assoc_id); 3626 if (!asoc) 3627 return -ENOENT; 3628 list_for_each_entry(trans, &asoc->peer.transport_addr_list, 3629 transports) { 3630 if (val.spt_pathmaxrxt) 3631 trans->pathmaxrxt = val.spt_pathmaxrxt; 3632 trans->pf_retrans = val.spt_pathpfthld; 3633 } 3634 3635 if (val.spt_pathmaxrxt) 3636 asoc->pathmaxrxt = val.spt_pathmaxrxt; 3637 asoc->pf_retrans = val.spt_pathpfthld; 3638 } else { 3639 trans = sctp_addr_id2transport(sk, &val.spt_address, 3640 val.spt_assoc_id); 3641 if (!trans) 3642 return -ENOENT; 3643 3644 if (val.spt_pathmaxrxt) 3645 trans->pathmaxrxt = val.spt_pathmaxrxt; 3646 trans->pf_retrans = val.spt_pathpfthld; 3647 } 3648 3649 return 0; 3650 } 3651 3652 static int sctp_setsockopt_recvrcvinfo(struct sock *sk, 3653 char __user *optval, 3654 unsigned int optlen) 3655 { 3656 int val; 3657 3658 if (optlen < sizeof(int)) 3659 return -EINVAL; 3660 if (get_user(val, (int __user *) optval)) 3661 return -EFAULT; 3662 3663 sctp_sk(sk)->recvrcvinfo = (val == 0) ? 0 : 1; 3664 3665 return 0; 3666 } 3667 3668 static int sctp_setsockopt_recvnxtinfo(struct sock *sk, 3669 char __user *optval, 3670 unsigned int optlen) 3671 { 3672 int val; 3673 3674 if (optlen < sizeof(int)) 3675 return -EINVAL; 3676 if (get_user(val, (int __user *) optval)) 3677 return -EFAULT; 3678 3679 sctp_sk(sk)->recvnxtinfo = (val == 0) ? 0 : 1; 3680 3681 return 0; 3682 } 3683 3684 static int sctp_setsockopt_pr_supported(struct sock *sk, 3685 char __user *optval, 3686 unsigned int optlen) 3687 { 3688 struct sctp_assoc_value params; 3689 struct sctp_association *asoc; 3690 int retval = -EINVAL; 3691 3692 if (optlen != sizeof(params)) 3693 goto out; 3694 3695 if (copy_from_user(¶ms, optval, optlen)) { 3696 retval = -EFAULT; 3697 goto out; 3698 } 3699 3700 asoc = sctp_id2assoc(sk, params.assoc_id); 3701 if (asoc) { 3702 asoc->prsctp_enable = !!params.assoc_value; 3703 } else if (!params.assoc_id) { 3704 struct sctp_sock *sp = sctp_sk(sk); 3705 3706 sp->ep->prsctp_enable = !!params.assoc_value; 3707 } else { 3708 goto out; 3709 } 3710 3711 retval = 0; 3712 3713 out: 3714 return retval; 3715 } 3716 3717 static int sctp_setsockopt_default_prinfo(struct sock *sk, 3718 char __user *optval, 3719 unsigned int optlen) 3720 { 3721 struct sctp_default_prinfo info; 3722 struct sctp_association *asoc; 3723 int retval = -EINVAL; 3724 3725 if (optlen != sizeof(info)) 3726 goto out; 3727 3728 if (copy_from_user(&info, optval, sizeof(info))) { 3729 retval = -EFAULT; 3730 goto out; 3731 } 3732 3733 if (info.pr_policy & ~SCTP_PR_SCTP_MASK) 3734 goto out; 3735 3736 if (info.pr_policy == SCTP_PR_SCTP_NONE) 3737 info.pr_value = 0; 3738 3739 asoc = sctp_id2assoc(sk, info.pr_assoc_id); 3740 if (asoc) { 3741 SCTP_PR_SET_POLICY(asoc->default_flags, info.pr_policy); 3742 asoc->default_timetolive = info.pr_value; 3743 } else if (!info.pr_assoc_id) { 3744 struct sctp_sock *sp = sctp_sk(sk); 3745 3746 SCTP_PR_SET_POLICY(sp->default_flags, info.pr_policy); 3747 sp->default_timetolive = info.pr_value; 3748 } else { 3749 goto out; 3750 } 3751 3752 retval = 0; 3753 3754 out: 3755 return retval; 3756 } 3757 3758 /* API 6.2 setsockopt(), getsockopt() 3759 * 3760 * Applications use setsockopt() and getsockopt() to set or retrieve 3761 * socket options. Socket options are used to change the default 3762 * behavior of sockets calls. They are described in Section 7. 3763 * 3764 * The syntax is: 3765 * 3766 * ret = getsockopt(int sd, int level, int optname, void __user *optval, 3767 * int __user *optlen); 3768 * ret = setsockopt(int sd, int level, int optname, const void __user *optval, 3769 * int optlen); 3770 * 3771 * sd - the socket descript. 3772 * level - set to IPPROTO_SCTP for all SCTP options. 3773 * optname - the option name. 3774 * optval - the buffer to store the value of the option. 3775 * optlen - the size of the buffer. 3776 */ 3777 static int sctp_setsockopt(struct sock *sk, int level, int optname, 3778 char __user *optval, unsigned int optlen) 3779 { 3780 int retval = 0; 3781 3782 pr_debug("%s: sk:%p, optname:%d\n", __func__, sk, optname); 3783 3784 /* I can hardly begin to describe how wrong this is. This is 3785 * so broken as to be worse than useless. The API draft 3786 * REALLY is NOT helpful here... I am not convinced that the 3787 * semantics of setsockopt() with a level OTHER THAN SOL_SCTP 3788 * are at all well-founded. 3789 */ 3790 if (level != SOL_SCTP) { 3791 struct sctp_af *af = sctp_sk(sk)->pf->af; 3792 retval = af->setsockopt(sk, level, optname, optval, optlen); 3793 goto out_nounlock; 3794 } 3795 3796 lock_sock(sk); 3797 3798 switch (optname) { 3799 case SCTP_SOCKOPT_BINDX_ADD: 3800 /* 'optlen' is the size of the addresses buffer. */ 3801 retval = sctp_setsockopt_bindx(sk, (struct sockaddr __user *)optval, 3802 optlen, SCTP_BINDX_ADD_ADDR); 3803 break; 3804 3805 case SCTP_SOCKOPT_BINDX_REM: 3806 /* 'optlen' is the size of the addresses buffer. */ 3807 retval = sctp_setsockopt_bindx(sk, (struct sockaddr __user *)optval, 3808 optlen, SCTP_BINDX_REM_ADDR); 3809 break; 3810 3811 case SCTP_SOCKOPT_CONNECTX_OLD: 3812 /* 'optlen' is the size of the addresses buffer. */ 3813 retval = sctp_setsockopt_connectx_old(sk, 3814 (struct sockaddr __user *)optval, 3815 optlen); 3816 break; 3817 3818 case SCTP_SOCKOPT_CONNECTX: 3819 /* 'optlen' is the size of the addresses buffer. */ 3820 retval = sctp_setsockopt_connectx(sk, 3821 (struct sockaddr __user *)optval, 3822 optlen); 3823 break; 3824 3825 case SCTP_DISABLE_FRAGMENTS: 3826 retval = sctp_setsockopt_disable_fragments(sk, optval, optlen); 3827 break; 3828 3829 case SCTP_EVENTS: 3830 retval = sctp_setsockopt_events(sk, optval, optlen); 3831 break; 3832 3833 case SCTP_AUTOCLOSE: 3834 retval = sctp_setsockopt_autoclose(sk, optval, optlen); 3835 break; 3836 3837 case SCTP_PEER_ADDR_PARAMS: 3838 retval = sctp_setsockopt_peer_addr_params(sk, optval, optlen); 3839 break; 3840 3841 case SCTP_DELAYED_SACK: 3842 retval = sctp_setsockopt_delayed_ack(sk, optval, optlen); 3843 break; 3844 case SCTP_PARTIAL_DELIVERY_POINT: 3845 retval = sctp_setsockopt_partial_delivery_point(sk, optval, optlen); 3846 break; 3847 3848 case SCTP_INITMSG: 3849 retval = sctp_setsockopt_initmsg(sk, optval, optlen); 3850 break; 3851 case SCTP_DEFAULT_SEND_PARAM: 3852 retval = sctp_setsockopt_default_send_param(sk, optval, 3853 optlen); 3854 break; 3855 case SCTP_DEFAULT_SNDINFO: 3856 retval = sctp_setsockopt_default_sndinfo(sk, optval, optlen); 3857 break; 3858 case SCTP_PRIMARY_ADDR: 3859 retval = sctp_setsockopt_primary_addr(sk, optval, optlen); 3860 break; 3861 case SCTP_SET_PEER_PRIMARY_ADDR: 3862 retval = sctp_setsockopt_peer_primary_addr(sk, optval, optlen); 3863 break; 3864 case SCTP_NODELAY: 3865 retval = sctp_setsockopt_nodelay(sk, optval, optlen); 3866 break; 3867 case SCTP_RTOINFO: 3868 retval = sctp_setsockopt_rtoinfo(sk, optval, optlen); 3869 break; 3870 case SCTP_ASSOCINFO: 3871 retval = sctp_setsockopt_associnfo(sk, optval, optlen); 3872 break; 3873 case SCTP_I_WANT_MAPPED_V4_ADDR: 3874 retval = sctp_setsockopt_mappedv4(sk, optval, optlen); 3875 break; 3876 case SCTP_MAXSEG: 3877 retval = sctp_setsockopt_maxseg(sk, optval, optlen); 3878 break; 3879 case SCTP_ADAPTATION_LAYER: 3880 retval = sctp_setsockopt_adaptation_layer(sk, optval, optlen); 3881 break; 3882 case SCTP_CONTEXT: 3883 retval = sctp_setsockopt_context(sk, optval, optlen); 3884 break; 3885 case SCTP_FRAGMENT_INTERLEAVE: 3886 retval = sctp_setsockopt_fragment_interleave(sk, optval, optlen); 3887 break; 3888 case SCTP_MAX_BURST: 3889 retval = sctp_setsockopt_maxburst(sk, optval, optlen); 3890 break; 3891 case SCTP_AUTH_CHUNK: 3892 retval = sctp_setsockopt_auth_chunk(sk, optval, optlen); 3893 break; 3894 case SCTP_HMAC_IDENT: 3895 retval = sctp_setsockopt_hmac_ident(sk, optval, optlen); 3896 break; 3897 case SCTP_AUTH_KEY: 3898 retval = sctp_setsockopt_auth_key(sk, optval, optlen); 3899 break; 3900 case SCTP_AUTH_ACTIVE_KEY: 3901 retval = sctp_setsockopt_active_key(sk, optval, optlen); 3902 break; 3903 case SCTP_AUTH_DELETE_KEY: 3904 retval = sctp_setsockopt_del_key(sk, optval, optlen); 3905 break; 3906 case SCTP_AUTO_ASCONF: 3907 retval = sctp_setsockopt_auto_asconf(sk, optval, optlen); 3908 break; 3909 case SCTP_PEER_ADDR_THLDS: 3910 retval = sctp_setsockopt_paddr_thresholds(sk, optval, optlen); 3911 break; 3912 case SCTP_RECVRCVINFO: 3913 retval = sctp_setsockopt_recvrcvinfo(sk, optval, optlen); 3914 break; 3915 case SCTP_RECVNXTINFO: 3916 retval = sctp_setsockopt_recvnxtinfo(sk, optval, optlen); 3917 break; 3918 case SCTP_PR_SUPPORTED: 3919 retval = sctp_setsockopt_pr_supported(sk, optval, optlen); 3920 break; 3921 case SCTP_DEFAULT_PRINFO: 3922 retval = sctp_setsockopt_default_prinfo(sk, optval, optlen); 3923 break; 3924 default: 3925 retval = -ENOPROTOOPT; 3926 break; 3927 } 3928 3929 release_sock(sk); 3930 3931 out_nounlock: 3932 return retval; 3933 } 3934 3935 /* API 3.1.6 connect() - UDP Style Syntax 3936 * 3937 * An application may use the connect() call in the UDP model to initiate an 3938 * association without sending data. 3939 * 3940 * The syntax is: 3941 * 3942 * ret = connect(int sd, const struct sockaddr *nam, socklen_t len); 3943 * 3944 * sd: the socket descriptor to have a new association added to. 3945 * 3946 * nam: the address structure (either struct sockaddr_in or struct 3947 * sockaddr_in6 defined in RFC2553 [7]). 3948 * 3949 * len: the size of the address. 3950 */ 3951 static int sctp_connect(struct sock *sk, struct sockaddr *addr, 3952 int addr_len) 3953 { 3954 int err = 0; 3955 struct sctp_af *af; 3956 3957 lock_sock(sk); 3958 3959 pr_debug("%s: sk:%p, sockaddr:%p, addr_len:%d\n", __func__, sk, 3960 addr, addr_len); 3961 3962 /* Validate addr_len before calling common connect/connectx routine. */ 3963 af = sctp_get_af_specific(addr->sa_family); 3964 if (!af || addr_len < af->sockaddr_len) { 3965 err = -EINVAL; 3966 } else { 3967 /* Pass correct addr len to common routine (so it knows there 3968 * is only one address being passed. 3969 */ 3970 err = __sctp_connect(sk, addr, af->sockaddr_len, NULL); 3971 } 3972 3973 release_sock(sk); 3974 return err; 3975 } 3976 3977 /* FIXME: Write comments. */ 3978 static int sctp_disconnect(struct sock *sk, int flags) 3979 { 3980 return -EOPNOTSUPP; /* STUB */ 3981 } 3982 3983 /* 4.1.4 accept() - TCP Style Syntax 3984 * 3985 * Applications use accept() call to remove an established SCTP 3986 * association from the accept queue of the endpoint. A new socket 3987 * descriptor will be returned from accept() to represent the newly 3988 * formed association. 3989 */ 3990 static struct sock *sctp_accept(struct sock *sk, int flags, int *err) 3991 { 3992 struct sctp_sock *sp; 3993 struct sctp_endpoint *ep; 3994 struct sock *newsk = NULL; 3995 struct sctp_association *asoc; 3996 long timeo; 3997 int error = 0; 3998 3999 lock_sock(sk); 4000 4001 sp = sctp_sk(sk); 4002 ep = sp->ep; 4003 4004 if (!sctp_style(sk, TCP)) { 4005 error = -EOPNOTSUPP; 4006 goto out; 4007 } 4008 4009 if (!sctp_sstate(sk, LISTENING)) { 4010 error = -EINVAL; 4011 goto out; 4012 } 4013 4014 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); 4015 4016 error = sctp_wait_for_accept(sk, timeo); 4017 if (error) 4018 goto out; 4019 4020 /* We treat the list of associations on the endpoint as the accept 4021 * queue and pick the first association on the list. 4022 */ 4023 asoc = list_entry(ep->asocs.next, struct sctp_association, asocs); 4024 4025 newsk = sp->pf->create_accept_sk(sk, asoc); 4026 if (!newsk) { 4027 error = -ENOMEM; 4028 goto out; 4029 } 4030 4031 /* Populate the fields of the newsk from the oldsk and migrate the 4032 * asoc to the newsk. 4033 */ 4034 sctp_sock_migrate(sk, newsk, asoc, SCTP_SOCKET_TCP); 4035 4036 out: 4037 release_sock(sk); 4038 *err = error; 4039 return newsk; 4040 } 4041 4042 /* The SCTP ioctl handler. */ 4043 static int sctp_ioctl(struct sock *sk, int cmd, unsigned long arg) 4044 { 4045 int rc = -ENOTCONN; 4046 4047 lock_sock(sk); 4048 4049 /* 4050 * SEQPACKET-style sockets in LISTENING state are valid, for 4051 * SCTP, so only discard TCP-style sockets in LISTENING state. 4052 */ 4053 if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)) 4054 goto out; 4055 4056 switch (cmd) { 4057 case SIOCINQ: { 4058 struct sk_buff *skb; 4059 unsigned int amount = 0; 4060 4061 skb = skb_peek(&sk->sk_receive_queue); 4062 if (skb != NULL) { 4063 /* 4064 * We will only return the amount of this packet since 4065 * that is all that will be read. 4066 */ 4067 amount = skb->len; 4068 } 4069 rc = put_user(amount, (int __user *)arg); 4070 break; 4071 } 4072 default: 4073 rc = -ENOIOCTLCMD; 4074 break; 4075 } 4076 out: 4077 release_sock(sk); 4078 return rc; 4079 } 4080 4081 /* This is the function which gets called during socket creation to 4082 * initialized the SCTP-specific portion of the sock. 4083 * The sock structure should already be zero-filled memory. 4084 */ 4085 static int sctp_init_sock(struct sock *sk) 4086 { 4087 struct net *net = sock_net(sk); 4088 struct sctp_sock *sp; 4089 4090 pr_debug("%s: sk:%p\n", __func__, sk); 4091 4092 sp = sctp_sk(sk); 4093 4094 /* Initialize the SCTP per socket area. */ 4095 switch (sk->sk_type) { 4096 case SOCK_SEQPACKET: 4097 sp->type = SCTP_SOCKET_UDP; 4098 break; 4099 case SOCK_STREAM: 4100 sp->type = SCTP_SOCKET_TCP; 4101 break; 4102 default: 4103 return -ESOCKTNOSUPPORT; 4104 } 4105 4106 sk->sk_gso_type = SKB_GSO_SCTP; 4107 4108 /* Initialize default send parameters. These parameters can be 4109 * modified with the SCTP_DEFAULT_SEND_PARAM socket option. 4110 */ 4111 sp->default_stream = 0; 4112 sp->default_ppid = 0; 4113 sp->default_flags = 0; 4114 sp->default_context = 0; 4115 sp->default_timetolive = 0; 4116 4117 sp->default_rcv_context = 0; 4118 sp->max_burst = net->sctp.max_burst; 4119 4120 sp->sctp_hmac_alg = net->sctp.sctp_hmac_alg; 4121 4122 /* Initialize default setup parameters. These parameters 4123 * can be modified with the SCTP_INITMSG socket option or 4124 * overridden by the SCTP_INIT CMSG. 4125 */ 4126 sp->initmsg.sinit_num_ostreams = sctp_max_outstreams; 4127 sp->initmsg.sinit_max_instreams = sctp_max_instreams; 4128 sp->initmsg.sinit_max_attempts = net->sctp.max_retrans_init; 4129 sp->initmsg.sinit_max_init_timeo = net->sctp.rto_max; 4130 4131 /* Initialize default RTO related parameters. These parameters can 4132 * be modified for with the SCTP_RTOINFO socket option. 4133 */ 4134 sp->rtoinfo.srto_initial = net->sctp.rto_initial; 4135 sp->rtoinfo.srto_max = net->sctp.rto_max; 4136 sp->rtoinfo.srto_min = net->sctp.rto_min; 4137 4138 /* Initialize default association related parameters. These parameters 4139 * can be modified with the SCTP_ASSOCINFO socket option. 4140 */ 4141 sp->assocparams.sasoc_asocmaxrxt = net->sctp.max_retrans_association; 4142 sp->assocparams.sasoc_number_peer_destinations = 0; 4143 sp->assocparams.sasoc_peer_rwnd = 0; 4144 sp->assocparams.sasoc_local_rwnd = 0; 4145 sp->assocparams.sasoc_cookie_life = net->sctp.valid_cookie_life; 4146 4147 /* Initialize default event subscriptions. By default, all the 4148 * options are off. 4149 */ 4150 memset(&sp->subscribe, 0, sizeof(struct sctp_event_subscribe)); 4151 4152 /* Default Peer Address Parameters. These defaults can 4153 * be modified via SCTP_PEER_ADDR_PARAMS 4154 */ 4155 sp->hbinterval = net->sctp.hb_interval; 4156 sp->pathmaxrxt = net->sctp.max_retrans_path; 4157 sp->pathmtu = 0; /* allow default discovery */ 4158 sp->sackdelay = net->sctp.sack_timeout; 4159 sp->sackfreq = 2; 4160 sp->param_flags = SPP_HB_ENABLE | 4161 SPP_PMTUD_ENABLE | 4162 SPP_SACKDELAY_ENABLE; 4163 4164 /* If enabled no SCTP message fragmentation will be performed. 4165 * Configure through SCTP_DISABLE_FRAGMENTS socket option. 4166 */ 4167 sp->disable_fragments = 0; 4168 4169 /* Enable Nagle algorithm by default. */ 4170 sp->nodelay = 0; 4171 4172 sp->recvrcvinfo = 0; 4173 sp->recvnxtinfo = 0; 4174 4175 /* Enable by default. */ 4176 sp->v4mapped = 1; 4177 4178 /* Auto-close idle associations after the configured 4179 * number of seconds. A value of 0 disables this 4180 * feature. Configure through the SCTP_AUTOCLOSE socket option, 4181 * for UDP-style sockets only. 4182 */ 4183 sp->autoclose = 0; 4184 4185 /* User specified fragmentation limit. */ 4186 sp->user_frag = 0; 4187 4188 sp->adaptation_ind = 0; 4189 4190 sp->pf = sctp_get_pf_specific(sk->sk_family); 4191 4192 /* Control variables for partial data delivery. */ 4193 atomic_set(&sp->pd_mode, 0); 4194 skb_queue_head_init(&sp->pd_lobby); 4195 sp->frag_interleave = 0; 4196 4197 /* Create a per socket endpoint structure. Even if we 4198 * change the data structure relationships, this may still 4199 * be useful for storing pre-connect address information. 4200 */ 4201 sp->ep = sctp_endpoint_new(sk, GFP_KERNEL); 4202 if (!sp->ep) 4203 return -ENOMEM; 4204 4205 sp->hmac = NULL; 4206 4207 sk->sk_destruct = sctp_destruct_sock; 4208 4209 SCTP_DBG_OBJCNT_INC(sock); 4210 4211 local_bh_disable(); 4212 percpu_counter_inc(&sctp_sockets_allocated); 4213 sock_prot_inuse_add(net, sk->sk_prot, 1); 4214 4215 /* Nothing can fail after this block, otherwise 4216 * sctp_destroy_sock() will be called without addr_wq_lock held 4217 */ 4218 if (net->sctp.default_auto_asconf) { 4219 spin_lock(&sock_net(sk)->sctp.addr_wq_lock); 4220 list_add_tail(&sp->auto_asconf_list, 4221 &net->sctp.auto_asconf_splist); 4222 sp->do_auto_asconf = 1; 4223 spin_unlock(&sock_net(sk)->sctp.addr_wq_lock); 4224 } else { 4225 sp->do_auto_asconf = 0; 4226 } 4227 4228 local_bh_enable(); 4229 4230 return 0; 4231 } 4232 4233 /* Cleanup any SCTP per socket resources. Must be called with 4234 * sock_net(sk)->sctp.addr_wq_lock held if sp->do_auto_asconf is true 4235 */ 4236 static void sctp_destroy_sock(struct sock *sk) 4237 { 4238 struct sctp_sock *sp; 4239 4240 pr_debug("%s: sk:%p\n", __func__, sk); 4241 4242 /* Release our hold on the endpoint. */ 4243 sp = sctp_sk(sk); 4244 /* This could happen during socket init, thus we bail out 4245 * early, since the rest of the below is not setup either. 4246 */ 4247 if (sp->ep == NULL) 4248 return; 4249 4250 if (sp->do_auto_asconf) { 4251 sp->do_auto_asconf = 0; 4252 list_del(&sp->auto_asconf_list); 4253 } 4254 sctp_endpoint_free(sp->ep); 4255 local_bh_disable(); 4256 percpu_counter_dec(&sctp_sockets_allocated); 4257 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); 4258 local_bh_enable(); 4259 } 4260 4261 /* Triggered when there are no references on the socket anymore */ 4262 static void sctp_destruct_sock(struct sock *sk) 4263 { 4264 struct sctp_sock *sp = sctp_sk(sk); 4265 4266 /* Free up the HMAC transform. */ 4267 crypto_free_shash(sp->hmac); 4268 4269 inet_sock_destruct(sk); 4270 } 4271 4272 /* API 4.1.7 shutdown() - TCP Style Syntax 4273 * int shutdown(int socket, int how); 4274 * 4275 * sd - the socket descriptor of the association to be closed. 4276 * how - Specifies the type of shutdown. The values are 4277 * as follows: 4278 * SHUT_RD 4279 * Disables further receive operations. No SCTP 4280 * protocol action is taken. 4281 * SHUT_WR 4282 * Disables further send operations, and initiates 4283 * the SCTP shutdown sequence. 4284 * SHUT_RDWR 4285 * Disables further send and receive operations 4286 * and initiates the SCTP shutdown sequence. 4287 */ 4288 static void sctp_shutdown(struct sock *sk, int how) 4289 { 4290 struct net *net = sock_net(sk); 4291 struct sctp_endpoint *ep; 4292 4293 if (!sctp_style(sk, TCP)) 4294 return; 4295 4296 ep = sctp_sk(sk)->ep; 4297 if (how & SEND_SHUTDOWN && !list_empty(&ep->asocs)) { 4298 struct sctp_association *asoc; 4299 4300 sk->sk_state = SCTP_SS_CLOSING; 4301 asoc = list_entry(ep->asocs.next, 4302 struct sctp_association, asocs); 4303 sctp_primitive_SHUTDOWN(net, asoc, NULL); 4304 } 4305 } 4306 4307 int sctp_get_sctp_info(struct sock *sk, struct sctp_association *asoc, 4308 struct sctp_info *info) 4309 { 4310 struct sctp_transport *prim; 4311 struct list_head *pos; 4312 int mask; 4313 4314 memset(info, 0, sizeof(*info)); 4315 if (!asoc) { 4316 struct sctp_sock *sp = sctp_sk(sk); 4317 4318 info->sctpi_s_autoclose = sp->autoclose; 4319 info->sctpi_s_adaptation_ind = sp->adaptation_ind; 4320 info->sctpi_s_pd_point = sp->pd_point; 4321 info->sctpi_s_nodelay = sp->nodelay; 4322 info->sctpi_s_disable_fragments = sp->disable_fragments; 4323 info->sctpi_s_v4mapped = sp->v4mapped; 4324 info->sctpi_s_frag_interleave = sp->frag_interleave; 4325 info->sctpi_s_type = sp->type; 4326 4327 return 0; 4328 } 4329 4330 info->sctpi_tag = asoc->c.my_vtag; 4331 info->sctpi_state = asoc->state; 4332 info->sctpi_rwnd = asoc->a_rwnd; 4333 info->sctpi_unackdata = asoc->unack_data; 4334 info->sctpi_penddata = sctp_tsnmap_pending(&asoc->peer.tsn_map); 4335 info->sctpi_instrms = asoc->c.sinit_max_instreams; 4336 info->sctpi_outstrms = asoc->c.sinit_num_ostreams; 4337 list_for_each(pos, &asoc->base.inqueue.in_chunk_list) 4338 info->sctpi_inqueue++; 4339 list_for_each(pos, &asoc->outqueue.out_chunk_list) 4340 info->sctpi_outqueue++; 4341 info->sctpi_overall_error = asoc->overall_error_count; 4342 info->sctpi_max_burst = asoc->max_burst; 4343 info->sctpi_maxseg = asoc->frag_point; 4344 info->sctpi_peer_rwnd = asoc->peer.rwnd; 4345 info->sctpi_peer_tag = asoc->c.peer_vtag; 4346 4347 mask = asoc->peer.ecn_capable << 1; 4348 mask = (mask | asoc->peer.ipv4_address) << 1; 4349 mask = (mask | asoc->peer.ipv6_address) << 1; 4350 mask = (mask | asoc->peer.hostname_address) << 1; 4351 mask = (mask | asoc->peer.asconf_capable) << 1; 4352 mask = (mask | asoc->peer.prsctp_capable) << 1; 4353 mask = (mask | asoc->peer.auth_capable); 4354 info->sctpi_peer_capable = mask; 4355 mask = asoc->peer.sack_needed << 1; 4356 mask = (mask | asoc->peer.sack_generation) << 1; 4357 mask = (mask | asoc->peer.zero_window_announced); 4358 info->sctpi_peer_sack = mask; 4359 4360 info->sctpi_isacks = asoc->stats.isacks; 4361 info->sctpi_osacks = asoc->stats.osacks; 4362 info->sctpi_opackets = asoc->stats.opackets; 4363 info->sctpi_ipackets = asoc->stats.ipackets; 4364 info->sctpi_rtxchunks = asoc->stats.rtxchunks; 4365 info->sctpi_outofseqtsns = asoc->stats.outofseqtsns; 4366 info->sctpi_idupchunks = asoc->stats.idupchunks; 4367 info->sctpi_gapcnt = asoc->stats.gapcnt; 4368 info->sctpi_ouodchunks = asoc->stats.ouodchunks; 4369 info->sctpi_iuodchunks = asoc->stats.iuodchunks; 4370 info->sctpi_oodchunks = asoc->stats.oodchunks; 4371 info->sctpi_iodchunks = asoc->stats.iodchunks; 4372 info->sctpi_octrlchunks = asoc->stats.octrlchunks; 4373 info->sctpi_ictrlchunks = asoc->stats.ictrlchunks; 4374 4375 prim = asoc->peer.primary_path; 4376 memcpy(&info->sctpi_p_address, &prim->ipaddr, 4377 sizeof(struct sockaddr_storage)); 4378 info->sctpi_p_state = prim->state; 4379 info->sctpi_p_cwnd = prim->cwnd; 4380 info->sctpi_p_srtt = prim->srtt; 4381 info->sctpi_p_rto = jiffies_to_msecs(prim->rto); 4382 info->sctpi_p_hbinterval = prim->hbinterval; 4383 info->sctpi_p_pathmaxrxt = prim->pathmaxrxt; 4384 info->sctpi_p_sackdelay = jiffies_to_msecs(prim->sackdelay); 4385 info->sctpi_p_ssthresh = prim->ssthresh; 4386 info->sctpi_p_partial_bytes_acked = prim->partial_bytes_acked; 4387 info->sctpi_p_flight_size = prim->flight_size; 4388 info->sctpi_p_error = prim->error_count; 4389 4390 return 0; 4391 } 4392 EXPORT_SYMBOL_GPL(sctp_get_sctp_info); 4393 4394 /* use callback to avoid exporting the core structure */ 4395 int sctp_transport_walk_start(struct rhashtable_iter *iter) 4396 { 4397 int err; 4398 4399 rhltable_walk_enter(&sctp_transport_hashtable, iter); 4400 4401 err = rhashtable_walk_start(iter); 4402 if (err && err != -EAGAIN) { 4403 rhashtable_walk_stop(iter); 4404 rhashtable_walk_exit(iter); 4405 return err; 4406 } 4407 4408 return 0; 4409 } 4410 4411 void sctp_transport_walk_stop(struct rhashtable_iter *iter) 4412 { 4413 rhashtable_walk_stop(iter); 4414 rhashtable_walk_exit(iter); 4415 } 4416 4417 struct sctp_transport *sctp_transport_get_next(struct net *net, 4418 struct rhashtable_iter *iter) 4419 { 4420 struct sctp_transport *t; 4421 4422 t = rhashtable_walk_next(iter); 4423 for (; t; t = rhashtable_walk_next(iter)) { 4424 if (IS_ERR(t)) { 4425 if (PTR_ERR(t) == -EAGAIN) 4426 continue; 4427 break; 4428 } 4429 4430 if (net_eq(sock_net(t->asoc->base.sk), net) && 4431 t->asoc->peer.primary_path == t) 4432 break; 4433 } 4434 4435 return t; 4436 } 4437 4438 struct sctp_transport *sctp_transport_get_idx(struct net *net, 4439 struct rhashtable_iter *iter, 4440 int pos) 4441 { 4442 void *obj = SEQ_START_TOKEN; 4443 4444 while (pos && (obj = sctp_transport_get_next(net, iter)) && 4445 !IS_ERR(obj)) 4446 pos--; 4447 4448 return obj; 4449 } 4450 4451 int sctp_for_each_endpoint(int (*cb)(struct sctp_endpoint *, void *), 4452 void *p) { 4453 int err = 0; 4454 int hash = 0; 4455 struct sctp_ep_common *epb; 4456 struct sctp_hashbucket *head; 4457 4458 for (head = sctp_ep_hashtable; hash < sctp_ep_hashsize; 4459 hash++, head++) { 4460 read_lock(&head->lock); 4461 sctp_for_each_hentry(epb, &head->chain) { 4462 err = cb(sctp_ep(epb), p); 4463 if (err) 4464 break; 4465 } 4466 read_unlock(&head->lock); 4467 } 4468 4469 return err; 4470 } 4471 EXPORT_SYMBOL_GPL(sctp_for_each_endpoint); 4472 4473 int sctp_transport_lookup_process(int (*cb)(struct sctp_transport *, void *), 4474 struct net *net, 4475 const union sctp_addr *laddr, 4476 const union sctp_addr *paddr, void *p) 4477 { 4478 struct sctp_transport *transport; 4479 int err; 4480 4481 rcu_read_lock(); 4482 transport = sctp_addrs_lookup_transport(net, laddr, paddr); 4483 rcu_read_unlock(); 4484 if (!transport) 4485 return -ENOENT; 4486 4487 err = cb(transport, p); 4488 sctp_transport_put(transport); 4489 4490 return err; 4491 } 4492 EXPORT_SYMBOL_GPL(sctp_transport_lookup_process); 4493 4494 int sctp_for_each_transport(int (*cb)(struct sctp_transport *, void *), 4495 struct net *net, int pos, void *p) { 4496 struct rhashtable_iter hti; 4497 void *obj; 4498 int err; 4499 4500 err = sctp_transport_walk_start(&hti); 4501 if (err) 4502 return err; 4503 4504 sctp_transport_get_idx(net, &hti, pos); 4505 obj = sctp_transport_get_next(net, &hti); 4506 for (; obj && !IS_ERR(obj); obj = sctp_transport_get_next(net, &hti)) { 4507 struct sctp_transport *transport = obj; 4508 4509 if (!sctp_transport_hold(transport)) 4510 continue; 4511 err = cb(transport, p); 4512 sctp_transport_put(transport); 4513 if (err) 4514 break; 4515 } 4516 sctp_transport_walk_stop(&hti); 4517 4518 return err; 4519 } 4520 EXPORT_SYMBOL_GPL(sctp_for_each_transport); 4521 4522 /* 7.2.1 Association Status (SCTP_STATUS) 4523 4524 * Applications can retrieve current status information about an 4525 * association, including association state, peer receiver window size, 4526 * number of unacked data chunks, and number of data chunks pending 4527 * receipt. This information is read-only. 4528 */ 4529 static int sctp_getsockopt_sctp_status(struct sock *sk, int len, 4530 char __user *optval, 4531 int __user *optlen) 4532 { 4533 struct sctp_status status; 4534 struct sctp_association *asoc = NULL; 4535 struct sctp_transport *transport; 4536 sctp_assoc_t associd; 4537 int retval = 0; 4538 4539 if (len < sizeof(status)) { 4540 retval = -EINVAL; 4541 goto out; 4542 } 4543 4544 len = sizeof(status); 4545 if (copy_from_user(&status, optval, len)) { 4546 retval = -EFAULT; 4547 goto out; 4548 } 4549 4550 associd = status.sstat_assoc_id; 4551 asoc = sctp_id2assoc(sk, associd); 4552 if (!asoc) { 4553 retval = -EINVAL; 4554 goto out; 4555 } 4556 4557 transport = asoc->peer.primary_path; 4558 4559 status.sstat_assoc_id = sctp_assoc2id(asoc); 4560 status.sstat_state = sctp_assoc_to_state(asoc); 4561 status.sstat_rwnd = asoc->peer.rwnd; 4562 status.sstat_unackdata = asoc->unack_data; 4563 4564 status.sstat_penddata = sctp_tsnmap_pending(&asoc->peer.tsn_map); 4565 status.sstat_instrms = asoc->c.sinit_max_instreams; 4566 status.sstat_outstrms = asoc->c.sinit_num_ostreams; 4567 status.sstat_fragmentation_point = asoc->frag_point; 4568 status.sstat_primary.spinfo_assoc_id = sctp_assoc2id(transport->asoc); 4569 memcpy(&status.sstat_primary.spinfo_address, &transport->ipaddr, 4570 transport->af_specific->sockaddr_len); 4571 /* Map ipv4 address into v4-mapped-on-v6 address. */ 4572 sctp_get_pf_specific(sk->sk_family)->addr_to_user(sctp_sk(sk), 4573 (union sctp_addr *)&status.sstat_primary.spinfo_address); 4574 status.sstat_primary.spinfo_state = transport->state; 4575 status.sstat_primary.spinfo_cwnd = transport->cwnd; 4576 status.sstat_primary.spinfo_srtt = transport->srtt; 4577 status.sstat_primary.spinfo_rto = jiffies_to_msecs(transport->rto); 4578 status.sstat_primary.spinfo_mtu = transport->pathmtu; 4579 4580 if (status.sstat_primary.spinfo_state == SCTP_UNKNOWN) 4581 status.sstat_primary.spinfo_state = SCTP_ACTIVE; 4582 4583 if (put_user(len, optlen)) { 4584 retval = -EFAULT; 4585 goto out; 4586 } 4587 4588 pr_debug("%s: len:%d, state:%d, rwnd:%d, assoc_id:%d\n", 4589 __func__, len, status.sstat_state, status.sstat_rwnd, 4590 status.sstat_assoc_id); 4591 4592 if (copy_to_user(optval, &status, len)) { 4593 retval = -EFAULT; 4594 goto out; 4595 } 4596 4597 out: 4598 return retval; 4599 } 4600 4601 4602 /* 7.2.2 Peer Address Information (SCTP_GET_PEER_ADDR_INFO) 4603 * 4604 * Applications can retrieve information about a specific peer address 4605 * of an association, including its reachability state, congestion 4606 * window, and retransmission timer values. This information is 4607 * read-only. 4608 */ 4609 static int sctp_getsockopt_peer_addr_info(struct sock *sk, int len, 4610 char __user *optval, 4611 int __user *optlen) 4612 { 4613 struct sctp_paddrinfo pinfo; 4614 struct sctp_transport *transport; 4615 int retval = 0; 4616 4617 if (len < sizeof(pinfo)) { 4618 retval = -EINVAL; 4619 goto out; 4620 } 4621 4622 len = sizeof(pinfo); 4623 if (copy_from_user(&pinfo, optval, len)) { 4624 retval = -EFAULT; 4625 goto out; 4626 } 4627 4628 transport = sctp_addr_id2transport(sk, &pinfo.spinfo_address, 4629 pinfo.spinfo_assoc_id); 4630 if (!transport) 4631 return -EINVAL; 4632 4633 pinfo.spinfo_assoc_id = sctp_assoc2id(transport->asoc); 4634 pinfo.spinfo_state = transport->state; 4635 pinfo.spinfo_cwnd = transport->cwnd; 4636 pinfo.spinfo_srtt = transport->srtt; 4637 pinfo.spinfo_rto = jiffies_to_msecs(transport->rto); 4638 pinfo.spinfo_mtu = transport->pathmtu; 4639 4640 if (pinfo.spinfo_state == SCTP_UNKNOWN) 4641 pinfo.spinfo_state = SCTP_ACTIVE; 4642 4643 if (put_user(len, optlen)) { 4644 retval = -EFAULT; 4645 goto out; 4646 } 4647 4648 if (copy_to_user(optval, &pinfo, len)) { 4649 retval = -EFAULT; 4650 goto out; 4651 } 4652 4653 out: 4654 return retval; 4655 } 4656 4657 /* 7.1.12 Enable/Disable message fragmentation (SCTP_DISABLE_FRAGMENTS) 4658 * 4659 * This option is a on/off flag. If enabled no SCTP message 4660 * fragmentation will be performed. Instead if a message being sent 4661 * exceeds the current PMTU size, the message will NOT be sent and 4662 * instead a error will be indicated to the user. 4663 */ 4664 static int sctp_getsockopt_disable_fragments(struct sock *sk, int len, 4665 char __user *optval, int __user *optlen) 4666 { 4667 int val; 4668 4669 if (len < sizeof(int)) 4670 return -EINVAL; 4671 4672 len = sizeof(int); 4673 val = (sctp_sk(sk)->disable_fragments == 1); 4674 if (put_user(len, optlen)) 4675 return -EFAULT; 4676 if (copy_to_user(optval, &val, len)) 4677 return -EFAULT; 4678 return 0; 4679 } 4680 4681 /* 7.1.15 Set notification and ancillary events (SCTP_EVENTS) 4682 * 4683 * This socket option is used to specify various notifications and 4684 * ancillary data the user wishes to receive. 4685 */ 4686 static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval, 4687 int __user *optlen) 4688 { 4689 if (len == 0) 4690 return -EINVAL; 4691 if (len > sizeof(struct sctp_event_subscribe)) 4692 len = sizeof(struct sctp_event_subscribe); 4693 if (put_user(len, optlen)) 4694 return -EFAULT; 4695 if (copy_to_user(optval, &sctp_sk(sk)->subscribe, len)) 4696 return -EFAULT; 4697 return 0; 4698 } 4699 4700 /* 7.1.8 Automatic Close of associations (SCTP_AUTOCLOSE) 4701 * 4702 * This socket option is applicable to the UDP-style socket only. When 4703 * set it will cause associations that are idle for more than the 4704 * specified number of seconds to automatically close. An association 4705 * being idle is defined an association that has NOT sent or received 4706 * user data. The special value of '0' indicates that no automatic 4707 * close of any associations should be performed. The option expects an 4708 * integer defining the number of seconds of idle time before an 4709 * association is closed. 4710 */ 4711 static int sctp_getsockopt_autoclose(struct sock *sk, int len, char __user *optval, int __user *optlen) 4712 { 4713 /* Applicable to UDP-style socket only */ 4714 if (sctp_style(sk, TCP)) 4715 return -EOPNOTSUPP; 4716 if (len < sizeof(int)) 4717 return -EINVAL; 4718 len = sizeof(int); 4719 if (put_user(len, optlen)) 4720 return -EFAULT; 4721 if (copy_to_user(optval, &sctp_sk(sk)->autoclose, sizeof(int))) 4722 return -EFAULT; 4723 return 0; 4724 } 4725 4726 /* Helper routine to branch off an association to a new socket. */ 4727 int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp) 4728 { 4729 struct sctp_association *asoc = sctp_id2assoc(sk, id); 4730 struct sctp_sock *sp = sctp_sk(sk); 4731 struct socket *sock; 4732 int err = 0; 4733 4734 if (!asoc) 4735 return -EINVAL; 4736 4737 /* An association cannot be branched off from an already peeled-off 4738 * socket, nor is this supported for tcp style sockets. 4739 */ 4740 if (!sctp_style(sk, UDP)) 4741 return -EINVAL; 4742 4743 /* Create a new socket. */ 4744 err = sock_create(sk->sk_family, SOCK_SEQPACKET, IPPROTO_SCTP, &sock); 4745 if (err < 0) 4746 return err; 4747 4748 sctp_copy_sock(sock->sk, sk, asoc); 4749 4750 /* Make peeled-off sockets more like 1-1 accepted sockets. 4751 * Set the daddr and initialize id to something more random 4752 */ 4753 sp->pf->to_sk_daddr(&asoc->peer.primary_addr, sk); 4754 4755 /* Populate the fields of the newsk from the oldsk and migrate the 4756 * asoc to the newsk. 4757 */ 4758 sctp_sock_migrate(sk, sock->sk, asoc, SCTP_SOCKET_UDP_HIGH_BANDWIDTH); 4759 4760 *sockp = sock; 4761 4762 return err; 4763 } 4764 EXPORT_SYMBOL(sctp_do_peeloff); 4765 4766 static int sctp_getsockopt_peeloff(struct sock *sk, int len, char __user *optval, int __user *optlen) 4767 { 4768 sctp_peeloff_arg_t peeloff; 4769 struct socket *newsock; 4770 struct file *newfile; 4771 int retval = 0; 4772 4773 if (len < sizeof(sctp_peeloff_arg_t)) 4774 return -EINVAL; 4775 len = sizeof(sctp_peeloff_arg_t); 4776 if (copy_from_user(&peeloff, optval, len)) 4777 return -EFAULT; 4778 4779 retval = sctp_do_peeloff(sk, peeloff.associd, &newsock); 4780 if (retval < 0) 4781 goto out; 4782 4783 /* Map the socket to an unused fd that can be returned to the user. */ 4784 retval = get_unused_fd_flags(0); 4785 if (retval < 0) { 4786 sock_release(newsock); 4787 goto out; 4788 } 4789 4790 newfile = sock_alloc_file(newsock, 0, NULL); 4791 if (IS_ERR(newfile)) { 4792 put_unused_fd(retval); 4793 sock_release(newsock); 4794 return PTR_ERR(newfile); 4795 } 4796 4797 pr_debug("%s: sk:%p, newsk:%p, sd:%d\n", __func__, sk, newsock->sk, 4798 retval); 4799 4800 /* Return the fd mapped to the new socket. */ 4801 if (put_user(len, optlen)) { 4802 fput(newfile); 4803 put_unused_fd(retval); 4804 return -EFAULT; 4805 } 4806 peeloff.sd = retval; 4807 if (copy_to_user(optval, &peeloff, len)) { 4808 fput(newfile); 4809 put_unused_fd(retval); 4810 return -EFAULT; 4811 } 4812 fd_install(retval, newfile); 4813 out: 4814 return retval; 4815 } 4816 4817 /* 7.1.13 Peer Address Parameters (SCTP_PEER_ADDR_PARAMS) 4818 * 4819 * Applications can enable or disable heartbeats for any peer address of 4820 * an association, modify an address's heartbeat interval, force a 4821 * heartbeat to be sent immediately, and adjust the address's maximum 4822 * number of retransmissions sent before an address is considered 4823 * unreachable. The following structure is used to access and modify an 4824 * address's parameters: 4825 * 4826 * struct sctp_paddrparams { 4827 * sctp_assoc_t spp_assoc_id; 4828 * struct sockaddr_storage spp_address; 4829 * uint32_t spp_hbinterval; 4830 * uint16_t spp_pathmaxrxt; 4831 * uint32_t spp_pathmtu; 4832 * uint32_t spp_sackdelay; 4833 * uint32_t spp_flags; 4834 * }; 4835 * 4836 * spp_assoc_id - (one-to-many style socket) This is filled in the 4837 * application, and identifies the association for 4838 * this query. 4839 * spp_address - This specifies which address is of interest. 4840 * spp_hbinterval - This contains the value of the heartbeat interval, 4841 * in milliseconds. If a value of zero 4842 * is present in this field then no changes are to 4843 * be made to this parameter. 4844 * spp_pathmaxrxt - This contains the maximum number of 4845 * retransmissions before this address shall be 4846 * considered unreachable. If a value of zero 4847 * is present in this field then no changes are to 4848 * be made to this parameter. 4849 * spp_pathmtu - When Path MTU discovery is disabled the value 4850 * specified here will be the "fixed" path mtu. 4851 * Note that if the spp_address field is empty 4852 * then all associations on this address will 4853 * have this fixed path mtu set upon them. 4854 * 4855 * spp_sackdelay - When delayed sack is enabled, this value specifies 4856 * the number of milliseconds that sacks will be delayed 4857 * for. This value will apply to all addresses of an 4858 * association if the spp_address field is empty. Note 4859 * also, that if delayed sack is enabled and this 4860 * value is set to 0, no change is made to the last 4861 * recorded delayed sack timer value. 4862 * 4863 * spp_flags - These flags are used to control various features 4864 * on an association. The flag field may contain 4865 * zero or more of the following options. 4866 * 4867 * SPP_HB_ENABLE - Enable heartbeats on the 4868 * specified address. Note that if the address 4869 * field is empty all addresses for the association 4870 * have heartbeats enabled upon them. 4871 * 4872 * SPP_HB_DISABLE - Disable heartbeats on the 4873 * speicifed address. Note that if the address 4874 * field is empty all addresses for the association 4875 * will have their heartbeats disabled. Note also 4876 * that SPP_HB_ENABLE and SPP_HB_DISABLE are 4877 * mutually exclusive, only one of these two should 4878 * be specified. Enabling both fields will have 4879 * undetermined results. 4880 * 4881 * SPP_HB_DEMAND - Request a user initiated heartbeat 4882 * to be made immediately. 4883 * 4884 * SPP_PMTUD_ENABLE - This field will enable PMTU 4885 * discovery upon the specified address. Note that 4886 * if the address feild is empty then all addresses 4887 * on the association are effected. 4888 * 4889 * SPP_PMTUD_DISABLE - This field will disable PMTU 4890 * discovery upon the specified address. Note that 4891 * if the address feild is empty then all addresses 4892 * on the association are effected. Not also that 4893 * SPP_PMTUD_ENABLE and SPP_PMTUD_DISABLE are mutually 4894 * exclusive. Enabling both will have undetermined 4895 * results. 4896 * 4897 * SPP_SACKDELAY_ENABLE - Setting this flag turns 4898 * on delayed sack. The time specified in spp_sackdelay 4899 * is used to specify the sack delay for this address. Note 4900 * that if spp_address is empty then all addresses will 4901 * enable delayed sack and take on the sack delay 4902 * value specified in spp_sackdelay. 4903 * SPP_SACKDELAY_DISABLE - Setting this flag turns 4904 * off delayed sack. If the spp_address field is blank then 4905 * delayed sack is disabled for the entire association. Note 4906 * also that this field is mutually exclusive to 4907 * SPP_SACKDELAY_ENABLE, setting both will have undefined 4908 * results. 4909 */ 4910 static int sctp_getsockopt_peer_addr_params(struct sock *sk, int len, 4911 char __user *optval, int __user *optlen) 4912 { 4913 struct sctp_paddrparams params; 4914 struct sctp_transport *trans = NULL; 4915 struct sctp_association *asoc = NULL; 4916 struct sctp_sock *sp = sctp_sk(sk); 4917 4918 if (len < sizeof(struct sctp_paddrparams)) 4919 return -EINVAL; 4920 len = sizeof(struct sctp_paddrparams); 4921 if (copy_from_user(¶ms, optval, len)) 4922 return -EFAULT; 4923 4924 /* If an address other than INADDR_ANY is specified, and 4925 * no transport is found, then the request is invalid. 4926 */ 4927 if (!sctp_is_any(sk, (union sctp_addr *)¶ms.spp_address)) { 4928 trans = sctp_addr_id2transport(sk, ¶ms.spp_address, 4929 params.spp_assoc_id); 4930 if (!trans) { 4931 pr_debug("%s: failed no transport\n", __func__); 4932 return -EINVAL; 4933 } 4934 } 4935 4936 /* Get association, if assoc_id != 0 and the socket is a one 4937 * to many style socket, and an association was not found, then 4938 * the id was invalid. 4939 */ 4940 asoc = sctp_id2assoc(sk, params.spp_assoc_id); 4941 if (!asoc && params.spp_assoc_id && sctp_style(sk, UDP)) { 4942 pr_debug("%s: failed no association\n", __func__); 4943 return -EINVAL; 4944 } 4945 4946 if (trans) { 4947 /* Fetch transport values. */ 4948 params.spp_hbinterval = jiffies_to_msecs(trans->hbinterval); 4949 params.spp_pathmtu = trans->pathmtu; 4950 params.spp_pathmaxrxt = trans->pathmaxrxt; 4951 params.spp_sackdelay = jiffies_to_msecs(trans->sackdelay); 4952 4953 /*draft-11 doesn't say what to return in spp_flags*/ 4954 params.spp_flags = trans->param_flags; 4955 } else if (asoc) { 4956 /* Fetch association values. */ 4957 params.spp_hbinterval = jiffies_to_msecs(asoc->hbinterval); 4958 params.spp_pathmtu = asoc->pathmtu; 4959 params.spp_pathmaxrxt = asoc->pathmaxrxt; 4960 params.spp_sackdelay = jiffies_to_msecs(asoc->sackdelay); 4961 4962 /*draft-11 doesn't say what to return in spp_flags*/ 4963 params.spp_flags = asoc->param_flags; 4964 } else { 4965 /* Fetch socket values. */ 4966 params.spp_hbinterval = sp->hbinterval; 4967 params.spp_pathmtu = sp->pathmtu; 4968 params.spp_sackdelay = sp->sackdelay; 4969 params.spp_pathmaxrxt = sp->pathmaxrxt; 4970 4971 /*draft-11 doesn't say what to return in spp_flags*/ 4972 params.spp_flags = sp->param_flags; 4973 } 4974 4975 if (copy_to_user(optval, ¶ms, len)) 4976 return -EFAULT; 4977 4978 if (put_user(len, optlen)) 4979 return -EFAULT; 4980 4981 return 0; 4982 } 4983 4984 /* 4985 * 7.1.23. Get or set delayed ack timer (SCTP_DELAYED_SACK) 4986 * 4987 * This option will effect the way delayed acks are performed. This 4988 * option allows you to get or set the delayed ack time, in 4989 * milliseconds. It also allows changing the delayed ack frequency. 4990 * Changing the frequency to 1 disables the delayed sack algorithm. If 4991 * the assoc_id is 0, then this sets or gets the endpoints default 4992 * values. If the assoc_id field is non-zero, then the set or get 4993 * effects the specified association for the one to many model (the 4994 * assoc_id field is ignored by the one to one model). Note that if 4995 * sack_delay or sack_freq are 0 when setting this option, then the 4996 * current values will remain unchanged. 4997 * 4998 * struct sctp_sack_info { 4999 * sctp_assoc_t sack_assoc_id; 5000 * uint32_t sack_delay; 5001 * uint32_t sack_freq; 5002 * }; 5003 * 5004 * sack_assoc_id - This parameter, indicates which association the user 5005 * is performing an action upon. Note that if this field's value is 5006 * zero then the endpoints default value is changed (effecting future 5007 * associations only). 5008 * 5009 * sack_delay - This parameter contains the number of milliseconds that 5010 * the user is requesting the delayed ACK timer be set to. Note that 5011 * this value is defined in the standard to be between 200 and 500 5012 * milliseconds. 5013 * 5014 * sack_freq - This parameter contains the number of packets that must 5015 * be received before a sack is sent without waiting for the delay 5016 * timer to expire. The default value for this is 2, setting this 5017 * value to 1 will disable the delayed sack algorithm. 5018 */ 5019 static int sctp_getsockopt_delayed_ack(struct sock *sk, int len, 5020 char __user *optval, 5021 int __user *optlen) 5022 { 5023 struct sctp_sack_info params; 5024 struct sctp_association *asoc = NULL; 5025 struct sctp_sock *sp = sctp_sk(sk); 5026 5027 if (len >= sizeof(struct sctp_sack_info)) { 5028 len = sizeof(struct sctp_sack_info); 5029 5030 if (copy_from_user(¶ms, optval, len)) 5031 return -EFAULT; 5032 } else if (len == sizeof(struct sctp_assoc_value)) { 5033 pr_warn_ratelimited(DEPRECATED 5034 "%s (pid %d) " 5035 "Use of struct sctp_assoc_value in delayed_ack socket option.\n" 5036 "Use struct sctp_sack_info instead\n", 5037 current->comm, task_pid_nr(current)); 5038 if (copy_from_user(¶ms, optval, len)) 5039 return -EFAULT; 5040 } else 5041 return -EINVAL; 5042 5043 /* Get association, if sack_assoc_id != 0 and the socket is a one 5044 * to many style socket, and an association was not found, then 5045 * the id was invalid. 5046 */ 5047 asoc = sctp_id2assoc(sk, params.sack_assoc_id); 5048 if (!asoc && params.sack_assoc_id && sctp_style(sk, UDP)) 5049 return -EINVAL; 5050 5051 if (asoc) { 5052 /* Fetch association values. */ 5053 if (asoc->param_flags & SPP_SACKDELAY_ENABLE) { 5054 params.sack_delay = jiffies_to_msecs( 5055 asoc->sackdelay); 5056 params.sack_freq = asoc->sackfreq; 5057 5058 } else { 5059 params.sack_delay = 0; 5060 params.sack_freq = 1; 5061 } 5062 } else { 5063 /* Fetch socket values. */ 5064 if (sp->param_flags & SPP_SACKDELAY_ENABLE) { 5065 params.sack_delay = sp->sackdelay; 5066 params.sack_freq = sp->sackfreq; 5067 } else { 5068 params.sack_delay = 0; 5069 params.sack_freq = 1; 5070 } 5071 } 5072 5073 if (copy_to_user(optval, ¶ms, len)) 5074 return -EFAULT; 5075 5076 if (put_user(len, optlen)) 5077 return -EFAULT; 5078 5079 return 0; 5080 } 5081 5082 /* 7.1.3 Initialization Parameters (SCTP_INITMSG) 5083 * 5084 * Applications can specify protocol parameters for the default association 5085 * initialization. The option name argument to setsockopt() and getsockopt() 5086 * is SCTP_INITMSG. 5087 * 5088 * Setting initialization parameters is effective only on an unconnected 5089 * socket (for UDP-style sockets only future associations are effected 5090 * by the change). With TCP-style sockets, this option is inherited by 5091 * sockets derived from a listener socket. 5092 */ 5093 static int sctp_getsockopt_initmsg(struct sock *sk, int len, char __user *optval, int __user *optlen) 5094 { 5095 if (len < sizeof(struct sctp_initmsg)) 5096 return -EINVAL; 5097 len = sizeof(struct sctp_initmsg); 5098 if (put_user(len, optlen)) 5099 return -EFAULT; 5100 if (copy_to_user(optval, &sctp_sk(sk)->initmsg, len)) 5101 return -EFAULT; 5102 return 0; 5103 } 5104 5105 5106 static int sctp_getsockopt_peer_addrs(struct sock *sk, int len, 5107 char __user *optval, int __user *optlen) 5108 { 5109 struct sctp_association *asoc; 5110 int cnt = 0; 5111 struct sctp_getaddrs getaddrs; 5112 struct sctp_transport *from; 5113 void __user *to; 5114 union sctp_addr temp; 5115 struct sctp_sock *sp = sctp_sk(sk); 5116 int addrlen; 5117 size_t space_left; 5118 int bytes_copied; 5119 5120 if (len < sizeof(struct sctp_getaddrs)) 5121 return -EINVAL; 5122 5123 if (copy_from_user(&getaddrs, optval, sizeof(struct sctp_getaddrs))) 5124 return -EFAULT; 5125 5126 /* For UDP-style sockets, id specifies the association to query. */ 5127 asoc = sctp_id2assoc(sk, getaddrs.assoc_id); 5128 if (!asoc) 5129 return -EINVAL; 5130 5131 to = optval + offsetof(struct sctp_getaddrs, addrs); 5132 space_left = len - offsetof(struct sctp_getaddrs, addrs); 5133 5134 list_for_each_entry(from, &asoc->peer.transport_addr_list, 5135 transports) { 5136 memcpy(&temp, &from->ipaddr, sizeof(temp)); 5137 addrlen = sctp_get_pf_specific(sk->sk_family) 5138 ->addr_to_user(sp, &temp); 5139 if (space_left < addrlen) 5140 return -ENOMEM; 5141 if (copy_to_user(to, &temp, addrlen)) 5142 return -EFAULT; 5143 to += addrlen; 5144 cnt++; 5145 space_left -= addrlen; 5146 } 5147 5148 if (put_user(cnt, &((struct sctp_getaddrs __user *)optval)->addr_num)) 5149 return -EFAULT; 5150 bytes_copied = ((char __user *)to) - optval; 5151 if (put_user(bytes_copied, optlen)) 5152 return -EFAULT; 5153 5154 return 0; 5155 } 5156 5157 static int sctp_copy_laddrs(struct sock *sk, __u16 port, void *to, 5158 size_t space_left, int *bytes_copied) 5159 { 5160 struct sctp_sockaddr_entry *addr; 5161 union sctp_addr temp; 5162 int cnt = 0; 5163 int addrlen; 5164 struct net *net = sock_net(sk); 5165 5166 rcu_read_lock(); 5167 list_for_each_entry_rcu(addr, &net->sctp.local_addr_list, list) { 5168 if (!addr->valid) 5169 continue; 5170 5171 if ((PF_INET == sk->sk_family) && 5172 (AF_INET6 == addr->a.sa.sa_family)) 5173 continue; 5174 if ((PF_INET6 == sk->sk_family) && 5175 inet_v6_ipv6only(sk) && 5176 (AF_INET == addr->a.sa.sa_family)) 5177 continue; 5178 memcpy(&temp, &addr->a, sizeof(temp)); 5179 if (!temp.v4.sin_port) 5180 temp.v4.sin_port = htons(port); 5181 5182 addrlen = sctp_get_pf_specific(sk->sk_family) 5183 ->addr_to_user(sctp_sk(sk), &temp); 5184 5185 if (space_left < addrlen) { 5186 cnt = -ENOMEM; 5187 break; 5188 } 5189 memcpy(to, &temp, addrlen); 5190 5191 to += addrlen; 5192 cnt++; 5193 space_left -= addrlen; 5194 *bytes_copied += addrlen; 5195 } 5196 rcu_read_unlock(); 5197 5198 return cnt; 5199 } 5200 5201 5202 static int sctp_getsockopt_local_addrs(struct sock *sk, int len, 5203 char __user *optval, int __user *optlen) 5204 { 5205 struct sctp_bind_addr *bp; 5206 struct sctp_association *asoc; 5207 int cnt = 0; 5208 struct sctp_getaddrs getaddrs; 5209 struct sctp_sockaddr_entry *addr; 5210 void __user *to; 5211 union sctp_addr temp; 5212 struct sctp_sock *sp = sctp_sk(sk); 5213 int addrlen; 5214 int err = 0; 5215 size_t space_left; 5216 int bytes_copied = 0; 5217 void *addrs; 5218 void *buf; 5219 5220 if (len < sizeof(struct sctp_getaddrs)) 5221 return -EINVAL; 5222 5223 if (copy_from_user(&getaddrs, optval, sizeof(struct sctp_getaddrs))) 5224 return -EFAULT; 5225 5226 /* 5227 * For UDP-style sockets, id specifies the association to query. 5228 * If the id field is set to the value '0' then the locally bound 5229 * addresses are returned without regard to any particular 5230 * association. 5231 */ 5232 if (0 == getaddrs.assoc_id) { 5233 bp = &sctp_sk(sk)->ep->base.bind_addr; 5234 } else { 5235 asoc = sctp_id2assoc(sk, getaddrs.assoc_id); 5236 if (!asoc) 5237 return -EINVAL; 5238 bp = &asoc->base.bind_addr; 5239 } 5240 5241 to = optval + offsetof(struct sctp_getaddrs, addrs); 5242 space_left = len - offsetof(struct sctp_getaddrs, addrs); 5243 5244 addrs = kmalloc(space_left, GFP_USER | __GFP_NOWARN); 5245 if (!addrs) 5246 return -ENOMEM; 5247 5248 /* If the endpoint is bound to 0.0.0.0 or ::0, get the valid 5249 * addresses from the global local address list. 5250 */ 5251 if (sctp_list_single_entry(&bp->address_list)) { 5252 addr = list_entry(bp->address_list.next, 5253 struct sctp_sockaddr_entry, list); 5254 if (sctp_is_any(sk, &addr->a)) { 5255 cnt = sctp_copy_laddrs(sk, bp->port, addrs, 5256 space_left, &bytes_copied); 5257 if (cnt < 0) { 5258 err = cnt; 5259 goto out; 5260 } 5261 goto copy_getaddrs; 5262 } 5263 } 5264 5265 buf = addrs; 5266 /* Protection on the bound address list is not needed since 5267 * in the socket option context we hold a socket lock and 5268 * thus the bound address list can't change. 5269 */ 5270 list_for_each_entry(addr, &bp->address_list, list) { 5271 memcpy(&temp, &addr->a, sizeof(temp)); 5272 addrlen = sctp_get_pf_specific(sk->sk_family) 5273 ->addr_to_user(sp, &temp); 5274 if (space_left < addrlen) { 5275 err = -ENOMEM; /*fixme: right error?*/ 5276 goto out; 5277 } 5278 memcpy(buf, &temp, addrlen); 5279 buf += addrlen; 5280 bytes_copied += addrlen; 5281 cnt++; 5282 space_left -= addrlen; 5283 } 5284 5285 copy_getaddrs: 5286 if (copy_to_user(to, addrs, bytes_copied)) { 5287 err = -EFAULT; 5288 goto out; 5289 } 5290 if (put_user(cnt, &((struct sctp_getaddrs __user *)optval)->addr_num)) { 5291 err = -EFAULT; 5292 goto out; 5293 } 5294 if (put_user(bytes_copied, optlen)) 5295 err = -EFAULT; 5296 out: 5297 kfree(addrs); 5298 return err; 5299 } 5300 5301 /* 7.1.10 Set Primary Address (SCTP_PRIMARY_ADDR) 5302 * 5303 * Requests that the local SCTP stack use the enclosed peer address as 5304 * the association primary. The enclosed address must be one of the 5305 * association peer's addresses. 5306 */ 5307 static int sctp_getsockopt_primary_addr(struct sock *sk, int len, 5308 char __user *optval, int __user *optlen) 5309 { 5310 struct sctp_prim prim; 5311 struct sctp_association *asoc; 5312 struct sctp_sock *sp = sctp_sk(sk); 5313 5314 if (len < sizeof(struct sctp_prim)) 5315 return -EINVAL; 5316 5317 len = sizeof(struct sctp_prim); 5318 5319 if (copy_from_user(&prim, optval, len)) 5320 return -EFAULT; 5321 5322 asoc = sctp_id2assoc(sk, prim.ssp_assoc_id); 5323 if (!asoc) 5324 return -EINVAL; 5325 5326 if (!asoc->peer.primary_path) 5327 return -ENOTCONN; 5328 5329 memcpy(&prim.ssp_addr, &asoc->peer.primary_path->ipaddr, 5330 asoc->peer.primary_path->af_specific->sockaddr_len); 5331 5332 sctp_get_pf_specific(sk->sk_family)->addr_to_user(sp, 5333 (union sctp_addr *)&prim.ssp_addr); 5334 5335 if (put_user(len, optlen)) 5336 return -EFAULT; 5337 if (copy_to_user(optval, &prim, len)) 5338 return -EFAULT; 5339 5340 return 0; 5341 } 5342 5343 /* 5344 * 7.1.11 Set Adaptation Layer Indicator (SCTP_ADAPTATION_LAYER) 5345 * 5346 * Requests that the local endpoint set the specified Adaptation Layer 5347 * Indication parameter for all future INIT and INIT-ACK exchanges. 5348 */ 5349 static int sctp_getsockopt_adaptation_layer(struct sock *sk, int len, 5350 char __user *optval, int __user *optlen) 5351 { 5352 struct sctp_setadaptation adaptation; 5353 5354 if (len < sizeof(struct sctp_setadaptation)) 5355 return -EINVAL; 5356 5357 len = sizeof(struct sctp_setadaptation); 5358 5359 adaptation.ssb_adaptation_ind = sctp_sk(sk)->adaptation_ind; 5360 5361 if (put_user(len, optlen)) 5362 return -EFAULT; 5363 if (copy_to_user(optval, &adaptation, len)) 5364 return -EFAULT; 5365 5366 return 0; 5367 } 5368 5369 /* 5370 * 5371 * 7.1.14 Set default send parameters (SCTP_DEFAULT_SEND_PARAM) 5372 * 5373 * Applications that wish to use the sendto() system call may wish to 5374 * specify a default set of parameters that would normally be supplied 5375 * through the inclusion of ancillary data. This socket option allows 5376 * such an application to set the default sctp_sndrcvinfo structure. 5377 5378 5379 * The application that wishes to use this socket option simply passes 5380 * in to this call the sctp_sndrcvinfo structure defined in Section 5381 * 5.2.2) The input parameters accepted by this call include 5382 * sinfo_stream, sinfo_flags, sinfo_ppid, sinfo_context, 5383 * sinfo_timetolive. The user must provide the sinfo_assoc_id field in 5384 * to this call if the caller is using the UDP model. 5385 * 5386 * For getsockopt, it get the default sctp_sndrcvinfo structure. 5387 */ 5388 static int sctp_getsockopt_default_send_param(struct sock *sk, 5389 int len, char __user *optval, 5390 int __user *optlen) 5391 { 5392 struct sctp_sock *sp = sctp_sk(sk); 5393 struct sctp_association *asoc; 5394 struct sctp_sndrcvinfo info; 5395 5396 if (len < sizeof(info)) 5397 return -EINVAL; 5398 5399 len = sizeof(info); 5400 5401 if (copy_from_user(&info, optval, len)) 5402 return -EFAULT; 5403 5404 asoc = sctp_id2assoc(sk, info.sinfo_assoc_id); 5405 if (!asoc && info.sinfo_assoc_id && sctp_style(sk, UDP)) 5406 return -EINVAL; 5407 if (asoc) { 5408 info.sinfo_stream = asoc->default_stream; 5409 info.sinfo_flags = asoc->default_flags; 5410 info.sinfo_ppid = asoc->default_ppid; 5411 info.sinfo_context = asoc->default_context; 5412 info.sinfo_timetolive = asoc->default_timetolive; 5413 } else { 5414 info.sinfo_stream = sp->default_stream; 5415 info.sinfo_flags = sp->default_flags; 5416 info.sinfo_ppid = sp->default_ppid; 5417 info.sinfo_context = sp->default_context; 5418 info.sinfo_timetolive = sp->default_timetolive; 5419 } 5420 5421 if (put_user(len, optlen)) 5422 return -EFAULT; 5423 if (copy_to_user(optval, &info, len)) 5424 return -EFAULT; 5425 5426 return 0; 5427 } 5428 5429 /* RFC6458, Section 8.1.31. Set/get Default Send Parameters 5430 * (SCTP_DEFAULT_SNDINFO) 5431 */ 5432 static int sctp_getsockopt_default_sndinfo(struct sock *sk, int len, 5433 char __user *optval, 5434 int __user *optlen) 5435 { 5436 struct sctp_sock *sp = sctp_sk(sk); 5437 struct sctp_association *asoc; 5438 struct sctp_sndinfo info; 5439 5440 if (len < sizeof(info)) 5441 return -EINVAL; 5442 5443 len = sizeof(info); 5444 5445 if (copy_from_user(&info, optval, len)) 5446 return -EFAULT; 5447 5448 asoc = sctp_id2assoc(sk, info.snd_assoc_id); 5449 if (!asoc && info.snd_assoc_id && sctp_style(sk, UDP)) 5450 return -EINVAL; 5451 if (asoc) { 5452 info.snd_sid = asoc->default_stream; 5453 info.snd_flags = asoc->default_flags; 5454 info.snd_ppid = asoc->default_ppid; 5455 info.snd_context = asoc->default_context; 5456 } else { 5457 info.snd_sid = sp->default_stream; 5458 info.snd_flags = sp->default_flags; 5459 info.snd_ppid = sp->default_ppid; 5460 info.snd_context = sp->default_context; 5461 } 5462 5463 if (put_user(len, optlen)) 5464 return -EFAULT; 5465 if (copy_to_user(optval, &info, len)) 5466 return -EFAULT; 5467 5468 return 0; 5469 } 5470 5471 /* 5472 * 5473 * 7.1.5 SCTP_NODELAY 5474 * 5475 * Turn on/off any Nagle-like algorithm. This means that packets are 5476 * generally sent as soon as possible and no unnecessary delays are 5477 * introduced, at the cost of more packets in the network. Expects an 5478 * integer boolean flag. 5479 */ 5480 5481 static int sctp_getsockopt_nodelay(struct sock *sk, int len, 5482 char __user *optval, int __user *optlen) 5483 { 5484 int val; 5485 5486 if (len < sizeof(int)) 5487 return -EINVAL; 5488 5489 len = sizeof(int); 5490 val = (sctp_sk(sk)->nodelay == 1); 5491 if (put_user(len, optlen)) 5492 return -EFAULT; 5493 if (copy_to_user(optval, &val, len)) 5494 return -EFAULT; 5495 return 0; 5496 } 5497 5498 /* 5499 * 5500 * 7.1.1 SCTP_RTOINFO 5501 * 5502 * The protocol parameters used to initialize and bound retransmission 5503 * timeout (RTO) are tunable. sctp_rtoinfo structure is used to access 5504 * and modify these parameters. 5505 * All parameters are time values, in milliseconds. A value of 0, when 5506 * modifying the parameters, indicates that the current value should not 5507 * be changed. 5508 * 5509 */ 5510 static int sctp_getsockopt_rtoinfo(struct sock *sk, int len, 5511 char __user *optval, 5512 int __user *optlen) { 5513 struct sctp_rtoinfo rtoinfo; 5514 struct sctp_association *asoc; 5515 5516 if (len < sizeof (struct sctp_rtoinfo)) 5517 return -EINVAL; 5518 5519 len = sizeof(struct sctp_rtoinfo); 5520 5521 if (copy_from_user(&rtoinfo, optval, len)) 5522 return -EFAULT; 5523 5524 asoc = sctp_id2assoc(sk, rtoinfo.srto_assoc_id); 5525 5526 if (!asoc && rtoinfo.srto_assoc_id && sctp_style(sk, UDP)) 5527 return -EINVAL; 5528 5529 /* Values corresponding to the specific association. */ 5530 if (asoc) { 5531 rtoinfo.srto_initial = jiffies_to_msecs(asoc->rto_initial); 5532 rtoinfo.srto_max = jiffies_to_msecs(asoc->rto_max); 5533 rtoinfo.srto_min = jiffies_to_msecs(asoc->rto_min); 5534 } else { 5535 /* Values corresponding to the endpoint. */ 5536 struct sctp_sock *sp = sctp_sk(sk); 5537 5538 rtoinfo.srto_initial = sp->rtoinfo.srto_initial; 5539 rtoinfo.srto_max = sp->rtoinfo.srto_max; 5540 rtoinfo.srto_min = sp->rtoinfo.srto_min; 5541 } 5542 5543 if (put_user(len, optlen)) 5544 return -EFAULT; 5545 5546 if (copy_to_user(optval, &rtoinfo, len)) 5547 return -EFAULT; 5548 5549 return 0; 5550 } 5551 5552 /* 5553 * 5554 * 7.1.2 SCTP_ASSOCINFO 5555 * 5556 * This option is used to tune the maximum retransmission attempts 5557 * of the association. 5558 * Returns an error if the new association retransmission value is 5559 * greater than the sum of the retransmission value of the peer. 5560 * See [SCTP] for more information. 5561 * 5562 */ 5563 static int sctp_getsockopt_associnfo(struct sock *sk, int len, 5564 char __user *optval, 5565 int __user *optlen) 5566 { 5567 5568 struct sctp_assocparams assocparams; 5569 struct sctp_association *asoc; 5570 struct list_head *pos; 5571 int cnt = 0; 5572 5573 if (len < sizeof (struct sctp_assocparams)) 5574 return -EINVAL; 5575 5576 len = sizeof(struct sctp_assocparams); 5577 5578 if (copy_from_user(&assocparams, optval, len)) 5579 return -EFAULT; 5580 5581 asoc = sctp_id2assoc(sk, assocparams.sasoc_assoc_id); 5582 5583 if (!asoc && assocparams.sasoc_assoc_id && sctp_style(sk, UDP)) 5584 return -EINVAL; 5585 5586 /* Values correspoinding to the specific association */ 5587 if (asoc) { 5588 assocparams.sasoc_asocmaxrxt = asoc->max_retrans; 5589 assocparams.sasoc_peer_rwnd = asoc->peer.rwnd; 5590 assocparams.sasoc_local_rwnd = asoc->a_rwnd; 5591 assocparams.sasoc_cookie_life = ktime_to_ms(asoc->cookie_life); 5592 5593 list_for_each(pos, &asoc->peer.transport_addr_list) { 5594 cnt++; 5595 } 5596 5597 assocparams.sasoc_number_peer_destinations = cnt; 5598 } else { 5599 /* Values corresponding to the endpoint */ 5600 struct sctp_sock *sp = sctp_sk(sk); 5601 5602 assocparams.sasoc_asocmaxrxt = sp->assocparams.sasoc_asocmaxrxt; 5603 assocparams.sasoc_peer_rwnd = sp->assocparams.sasoc_peer_rwnd; 5604 assocparams.sasoc_local_rwnd = sp->assocparams.sasoc_local_rwnd; 5605 assocparams.sasoc_cookie_life = 5606 sp->assocparams.sasoc_cookie_life; 5607 assocparams.sasoc_number_peer_destinations = 5608 sp->assocparams. 5609 sasoc_number_peer_destinations; 5610 } 5611 5612 if (put_user(len, optlen)) 5613 return -EFAULT; 5614 5615 if (copy_to_user(optval, &assocparams, len)) 5616 return -EFAULT; 5617 5618 return 0; 5619 } 5620 5621 /* 5622 * 7.1.16 Set/clear IPv4 mapped addresses (SCTP_I_WANT_MAPPED_V4_ADDR) 5623 * 5624 * This socket option is a boolean flag which turns on or off mapped V4 5625 * addresses. If this option is turned on and the socket is type 5626 * PF_INET6, then IPv4 addresses will be mapped to V6 representation. 5627 * If this option is turned off, then no mapping will be done of V4 5628 * addresses and a user will receive both PF_INET6 and PF_INET type 5629 * addresses on the socket. 5630 */ 5631 static int sctp_getsockopt_mappedv4(struct sock *sk, int len, 5632 char __user *optval, int __user *optlen) 5633 { 5634 int val; 5635 struct sctp_sock *sp = sctp_sk(sk); 5636 5637 if (len < sizeof(int)) 5638 return -EINVAL; 5639 5640 len = sizeof(int); 5641 val = sp->v4mapped; 5642 if (put_user(len, optlen)) 5643 return -EFAULT; 5644 if (copy_to_user(optval, &val, len)) 5645 return -EFAULT; 5646 5647 return 0; 5648 } 5649 5650 /* 5651 * 7.1.29. Set or Get the default context (SCTP_CONTEXT) 5652 * (chapter and verse is quoted at sctp_setsockopt_context()) 5653 */ 5654 static int sctp_getsockopt_context(struct sock *sk, int len, 5655 char __user *optval, int __user *optlen) 5656 { 5657 struct sctp_assoc_value params; 5658 struct sctp_sock *sp; 5659 struct sctp_association *asoc; 5660 5661 if (len < sizeof(struct sctp_assoc_value)) 5662 return -EINVAL; 5663 5664 len = sizeof(struct sctp_assoc_value); 5665 5666 if (copy_from_user(¶ms, optval, len)) 5667 return -EFAULT; 5668 5669 sp = sctp_sk(sk); 5670 5671 if (params.assoc_id != 0) { 5672 asoc = sctp_id2assoc(sk, params.assoc_id); 5673 if (!asoc) 5674 return -EINVAL; 5675 params.assoc_value = asoc->default_rcv_context; 5676 } else { 5677 params.assoc_value = sp->default_rcv_context; 5678 } 5679 5680 if (put_user(len, optlen)) 5681 return -EFAULT; 5682 if (copy_to_user(optval, ¶ms, len)) 5683 return -EFAULT; 5684 5685 return 0; 5686 } 5687 5688 /* 5689 * 8.1.16. Get or Set the Maximum Fragmentation Size (SCTP_MAXSEG) 5690 * This option will get or set the maximum size to put in any outgoing 5691 * SCTP DATA chunk. If a message is larger than this size it will be 5692 * fragmented by SCTP into the specified size. Note that the underlying 5693 * SCTP implementation may fragment into smaller sized chunks when the 5694 * PMTU of the underlying association is smaller than the value set by 5695 * the user. The default value for this option is '0' which indicates 5696 * the user is NOT limiting fragmentation and only the PMTU will effect 5697 * SCTP's choice of DATA chunk size. Note also that values set larger 5698 * than the maximum size of an IP datagram will effectively let SCTP 5699 * control fragmentation (i.e. the same as setting this option to 0). 5700 * 5701 * The following structure is used to access and modify this parameter: 5702 * 5703 * struct sctp_assoc_value { 5704 * sctp_assoc_t assoc_id; 5705 * uint32_t assoc_value; 5706 * }; 5707 * 5708 * assoc_id: This parameter is ignored for one-to-one style sockets. 5709 * For one-to-many style sockets this parameter indicates which 5710 * association the user is performing an action upon. Note that if 5711 * this field's value is zero then the endpoints default value is 5712 * changed (effecting future associations only). 5713 * assoc_value: This parameter specifies the maximum size in bytes. 5714 */ 5715 static int sctp_getsockopt_maxseg(struct sock *sk, int len, 5716 char __user *optval, int __user *optlen) 5717 { 5718 struct sctp_assoc_value params; 5719 struct sctp_association *asoc; 5720 5721 if (len == sizeof(int)) { 5722 pr_warn_ratelimited(DEPRECATED 5723 "%s (pid %d) " 5724 "Use of int in maxseg socket option.\n" 5725 "Use struct sctp_assoc_value instead\n", 5726 current->comm, task_pid_nr(current)); 5727 params.assoc_id = 0; 5728 } else if (len >= sizeof(struct sctp_assoc_value)) { 5729 len = sizeof(struct sctp_assoc_value); 5730 if (copy_from_user(¶ms, optval, sizeof(params))) 5731 return -EFAULT; 5732 } else 5733 return -EINVAL; 5734 5735 asoc = sctp_id2assoc(sk, params.assoc_id); 5736 if (!asoc && params.assoc_id && sctp_style(sk, UDP)) 5737 return -EINVAL; 5738 5739 if (asoc) 5740 params.assoc_value = asoc->frag_point; 5741 else 5742 params.assoc_value = sctp_sk(sk)->user_frag; 5743 5744 if (put_user(len, optlen)) 5745 return -EFAULT; 5746 if (len == sizeof(int)) { 5747 if (copy_to_user(optval, ¶ms.assoc_value, len)) 5748 return -EFAULT; 5749 } else { 5750 if (copy_to_user(optval, ¶ms, len)) 5751 return -EFAULT; 5752 } 5753 5754 return 0; 5755 } 5756 5757 /* 5758 * 7.1.24. Get or set fragmented interleave (SCTP_FRAGMENT_INTERLEAVE) 5759 * (chapter and verse is quoted at sctp_setsockopt_fragment_interleave()) 5760 */ 5761 static int sctp_getsockopt_fragment_interleave(struct sock *sk, int len, 5762 char __user *optval, int __user *optlen) 5763 { 5764 int val; 5765 5766 if (len < sizeof(int)) 5767 return -EINVAL; 5768 5769 len = sizeof(int); 5770 5771 val = sctp_sk(sk)->frag_interleave; 5772 if (put_user(len, optlen)) 5773 return -EFAULT; 5774 if (copy_to_user(optval, &val, len)) 5775 return -EFAULT; 5776 5777 return 0; 5778 } 5779 5780 /* 5781 * 7.1.25. Set or Get the sctp partial delivery point 5782 * (chapter and verse is quoted at sctp_setsockopt_partial_delivery_point()) 5783 */ 5784 static int sctp_getsockopt_partial_delivery_point(struct sock *sk, int len, 5785 char __user *optval, 5786 int __user *optlen) 5787 { 5788 u32 val; 5789 5790 if (len < sizeof(u32)) 5791 return -EINVAL; 5792 5793 len = sizeof(u32); 5794 5795 val = sctp_sk(sk)->pd_point; 5796 if (put_user(len, optlen)) 5797 return -EFAULT; 5798 if (copy_to_user(optval, &val, len)) 5799 return -EFAULT; 5800 5801 return 0; 5802 } 5803 5804 /* 5805 * 7.1.28. Set or Get the maximum burst (SCTP_MAX_BURST) 5806 * (chapter and verse is quoted at sctp_setsockopt_maxburst()) 5807 */ 5808 static int sctp_getsockopt_maxburst(struct sock *sk, int len, 5809 char __user *optval, 5810 int __user *optlen) 5811 { 5812 struct sctp_assoc_value params; 5813 struct sctp_sock *sp; 5814 struct sctp_association *asoc; 5815 5816 if (len == sizeof(int)) { 5817 pr_warn_ratelimited(DEPRECATED 5818 "%s (pid %d) " 5819 "Use of int in max_burst socket option.\n" 5820 "Use struct sctp_assoc_value instead\n", 5821 current->comm, task_pid_nr(current)); 5822 params.assoc_id = 0; 5823 } else if (len >= sizeof(struct sctp_assoc_value)) { 5824 len = sizeof(struct sctp_assoc_value); 5825 if (copy_from_user(¶ms, optval, len)) 5826 return -EFAULT; 5827 } else 5828 return -EINVAL; 5829 5830 sp = sctp_sk(sk); 5831 5832 if (params.assoc_id != 0) { 5833 asoc = sctp_id2assoc(sk, params.assoc_id); 5834 if (!asoc) 5835 return -EINVAL; 5836 params.assoc_value = asoc->max_burst; 5837 } else 5838 params.assoc_value = sp->max_burst; 5839 5840 if (len == sizeof(int)) { 5841 if (copy_to_user(optval, ¶ms.assoc_value, len)) 5842 return -EFAULT; 5843 } else { 5844 if (copy_to_user(optval, ¶ms, len)) 5845 return -EFAULT; 5846 } 5847 5848 return 0; 5849 5850 } 5851 5852 static int sctp_getsockopt_hmac_ident(struct sock *sk, int len, 5853 char __user *optval, int __user *optlen) 5854 { 5855 struct sctp_endpoint *ep = sctp_sk(sk)->ep; 5856 struct sctp_hmacalgo __user *p = (void __user *)optval; 5857 struct sctp_hmac_algo_param *hmacs; 5858 __u16 data_len = 0; 5859 u32 num_idents; 5860 int i; 5861 5862 if (!ep->auth_enable) 5863 return -EACCES; 5864 5865 hmacs = ep->auth_hmacs_list; 5866 data_len = ntohs(hmacs->param_hdr.length) - sizeof(sctp_paramhdr_t); 5867 5868 if (len < sizeof(struct sctp_hmacalgo) + data_len) 5869 return -EINVAL; 5870 5871 len = sizeof(struct sctp_hmacalgo) + data_len; 5872 num_idents = data_len / sizeof(u16); 5873 5874 if (put_user(len, optlen)) 5875 return -EFAULT; 5876 if (put_user(num_idents, &p->shmac_num_idents)) 5877 return -EFAULT; 5878 for (i = 0; i < num_idents; i++) { 5879 __u16 hmacid = ntohs(hmacs->hmac_ids[i]); 5880 5881 if (copy_to_user(&p->shmac_idents[i], &hmacid, sizeof(__u16))) 5882 return -EFAULT; 5883 } 5884 return 0; 5885 } 5886 5887 static int sctp_getsockopt_active_key(struct sock *sk, int len, 5888 char __user *optval, int __user *optlen) 5889 { 5890 struct sctp_endpoint *ep = sctp_sk(sk)->ep; 5891 struct sctp_authkeyid val; 5892 struct sctp_association *asoc; 5893 5894 if (!ep->auth_enable) 5895 return -EACCES; 5896 5897 if (len < sizeof(struct sctp_authkeyid)) 5898 return -EINVAL; 5899 if (copy_from_user(&val, optval, sizeof(struct sctp_authkeyid))) 5900 return -EFAULT; 5901 5902 asoc = sctp_id2assoc(sk, val.scact_assoc_id); 5903 if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP)) 5904 return -EINVAL; 5905 5906 if (asoc) 5907 val.scact_keynumber = asoc->active_key_id; 5908 else 5909 val.scact_keynumber = ep->active_key_id; 5910 5911 len = sizeof(struct sctp_authkeyid); 5912 if (put_user(len, optlen)) 5913 return -EFAULT; 5914 if (copy_to_user(optval, &val, len)) 5915 return -EFAULT; 5916 5917 return 0; 5918 } 5919 5920 static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len, 5921 char __user *optval, int __user *optlen) 5922 { 5923 struct sctp_endpoint *ep = sctp_sk(sk)->ep; 5924 struct sctp_authchunks __user *p = (void __user *)optval; 5925 struct sctp_authchunks val; 5926 struct sctp_association *asoc; 5927 struct sctp_chunks_param *ch; 5928 u32 num_chunks = 0; 5929 char __user *to; 5930 5931 if (!ep->auth_enable) 5932 return -EACCES; 5933 5934 if (len < sizeof(struct sctp_authchunks)) 5935 return -EINVAL; 5936 5937 if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks))) 5938 return -EFAULT; 5939 5940 to = p->gauth_chunks; 5941 asoc = sctp_id2assoc(sk, val.gauth_assoc_id); 5942 if (!asoc) 5943 return -EINVAL; 5944 5945 ch = asoc->peer.peer_chunks; 5946 if (!ch) 5947 goto num; 5948 5949 /* See if the user provided enough room for all the data */ 5950 num_chunks = ntohs(ch->param_hdr.length) - sizeof(sctp_paramhdr_t); 5951 if (len < num_chunks) 5952 return -EINVAL; 5953 5954 if (copy_to_user(to, ch->chunks, num_chunks)) 5955 return -EFAULT; 5956 num: 5957 len = sizeof(struct sctp_authchunks) + num_chunks; 5958 if (put_user(len, optlen)) 5959 return -EFAULT; 5960 if (put_user(num_chunks, &p->gauth_number_of_chunks)) 5961 return -EFAULT; 5962 return 0; 5963 } 5964 5965 static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len, 5966 char __user *optval, int __user *optlen) 5967 { 5968 struct sctp_endpoint *ep = sctp_sk(sk)->ep; 5969 struct sctp_authchunks __user *p = (void __user *)optval; 5970 struct sctp_authchunks val; 5971 struct sctp_association *asoc; 5972 struct sctp_chunks_param *ch; 5973 u32 num_chunks = 0; 5974 char __user *to; 5975 5976 if (!ep->auth_enable) 5977 return -EACCES; 5978 5979 if (len < sizeof(struct sctp_authchunks)) 5980 return -EINVAL; 5981 5982 if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks))) 5983 return -EFAULT; 5984 5985 to = p->gauth_chunks; 5986 asoc = sctp_id2assoc(sk, val.gauth_assoc_id); 5987 if (!asoc && val.gauth_assoc_id && sctp_style(sk, UDP)) 5988 return -EINVAL; 5989 5990 if (asoc) 5991 ch = (struct sctp_chunks_param *)asoc->c.auth_chunks; 5992 else 5993 ch = ep->auth_chunk_list; 5994 5995 if (!ch) 5996 goto num; 5997 5998 num_chunks = ntohs(ch->param_hdr.length) - sizeof(sctp_paramhdr_t); 5999 if (len < sizeof(struct sctp_authchunks) + num_chunks) 6000 return -EINVAL; 6001 6002 if (copy_to_user(to, ch->chunks, num_chunks)) 6003 return -EFAULT; 6004 num: 6005 len = sizeof(struct sctp_authchunks) + num_chunks; 6006 if (put_user(len, optlen)) 6007 return -EFAULT; 6008 if (put_user(num_chunks, &p->gauth_number_of_chunks)) 6009 return -EFAULT; 6010 6011 return 0; 6012 } 6013 6014 /* 6015 * 8.2.5. Get the Current Number of Associations (SCTP_GET_ASSOC_NUMBER) 6016 * This option gets the current number of associations that are attached 6017 * to a one-to-many style socket. The option value is an uint32_t. 6018 */ 6019 static int sctp_getsockopt_assoc_number(struct sock *sk, int len, 6020 char __user *optval, int __user *optlen) 6021 { 6022 struct sctp_sock *sp = sctp_sk(sk); 6023 struct sctp_association *asoc; 6024 u32 val = 0; 6025 6026 if (sctp_style(sk, TCP)) 6027 return -EOPNOTSUPP; 6028 6029 if (len < sizeof(u32)) 6030 return -EINVAL; 6031 6032 len = sizeof(u32); 6033 6034 list_for_each_entry(asoc, &(sp->ep->asocs), asocs) { 6035 val++; 6036 } 6037 6038 if (put_user(len, optlen)) 6039 return -EFAULT; 6040 if (copy_to_user(optval, &val, len)) 6041 return -EFAULT; 6042 6043 return 0; 6044 } 6045 6046 /* 6047 * 8.1.23 SCTP_AUTO_ASCONF 6048 * See the corresponding setsockopt entry as description 6049 */ 6050 static int sctp_getsockopt_auto_asconf(struct sock *sk, int len, 6051 char __user *optval, int __user *optlen) 6052 { 6053 int val = 0; 6054 6055 if (len < sizeof(int)) 6056 return -EINVAL; 6057 6058 len = sizeof(int); 6059 if (sctp_sk(sk)->do_auto_asconf && sctp_is_ep_boundall(sk)) 6060 val = 1; 6061 if (put_user(len, optlen)) 6062 return -EFAULT; 6063 if (copy_to_user(optval, &val, len)) 6064 return -EFAULT; 6065 return 0; 6066 } 6067 6068 /* 6069 * 8.2.6. Get the Current Identifiers of Associations 6070 * (SCTP_GET_ASSOC_ID_LIST) 6071 * 6072 * This option gets the current list of SCTP association identifiers of 6073 * the SCTP associations handled by a one-to-many style socket. 6074 */ 6075 static int sctp_getsockopt_assoc_ids(struct sock *sk, int len, 6076 char __user *optval, int __user *optlen) 6077 { 6078 struct sctp_sock *sp = sctp_sk(sk); 6079 struct sctp_association *asoc; 6080 struct sctp_assoc_ids *ids; 6081 u32 num = 0; 6082 6083 if (sctp_style(sk, TCP)) 6084 return -EOPNOTSUPP; 6085 6086 if (len < sizeof(struct sctp_assoc_ids)) 6087 return -EINVAL; 6088 6089 list_for_each_entry(asoc, &(sp->ep->asocs), asocs) { 6090 num++; 6091 } 6092 6093 if (len < sizeof(struct sctp_assoc_ids) + sizeof(sctp_assoc_t) * num) 6094 return -EINVAL; 6095 6096 len = sizeof(struct sctp_assoc_ids) + sizeof(sctp_assoc_t) * num; 6097 6098 ids = kmalloc(len, GFP_USER | __GFP_NOWARN); 6099 if (unlikely(!ids)) 6100 return -ENOMEM; 6101 6102 ids->gaids_number_of_ids = num; 6103 num = 0; 6104 list_for_each_entry(asoc, &(sp->ep->asocs), asocs) { 6105 ids->gaids_assoc_id[num++] = asoc->assoc_id; 6106 } 6107 6108 if (put_user(len, optlen) || copy_to_user(optval, ids, len)) { 6109 kfree(ids); 6110 return -EFAULT; 6111 } 6112 6113 kfree(ids); 6114 return 0; 6115 } 6116 6117 /* 6118 * SCTP_PEER_ADDR_THLDS 6119 * 6120 * This option allows us to fetch the partially failed threshold for one or all 6121 * transports in an association. See Section 6.1 of: 6122 * http://www.ietf.org/id/draft-nishida-tsvwg-sctp-failover-05.txt 6123 */ 6124 static int sctp_getsockopt_paddr_thresholds(struct sock *sk, 6125 char __user *optval, 6126 int len, 6127 int __user *optlen) 6128 { 6129 struct sctp_paddrthlds val; 6130 struct sctp_transport *trans; 6131 struct sctp_association *asoc; 6132 6133 if (len < sizeof(struct sctp_paddrthlds)) 6134 return -EINVAL; 6135 len = sizeof(struct sctp_paddrthlds); 6136 if (copy_from_user(&val, (struct sctp_paddrthlds __user *)optval, len)) 6137 return -EFAULT; 6138 6139 if (sctp_is_any(sk, (const union sctp_addr *)&val.spt_address)) { 6140 asoc = sctp_id2assoc(sk, val.spt_assoc_id); 6141 if (!asoc) 6142 return -ENOENT; 6143 6144 val.spt_pathpfthld = asoc->pf_retrans; 6145 val.spt_pathmaxrxt = asoc->pathmaxrxt; 6146 } else { 6147 trans = sctp_addr_id2transport(sk, &val.spt_address, 6148 val.spt_assoc_id); 6149 if (!trans) 6150 return -ENOENT; 6151 6152 val.spt_pathmaxrxt = trans->pathmaxrxt; 6153 val.spt_pathpfthld = trans->pf_retrans; 6154 } 6155 6156 if (put_user(len, optlen) || copy_to_user(optval, &val, len)) 6157 return -EFAULT; 6158 6159 return 0; 6160 } 6161 6162 /* 6163 * SCTP_GET_ASSOC_STATS 6164 * 6165 * This option retrieves local per endpoint statistics. It is modeled 6166 * after OpenSolaris' implementation 6167 */ 6168 static int sctp_getsockopt_assoc_stats(struct sock *sk, int len, 6169 char __user *optval, 6170 int __user *optlen) 6171 { 6172 struct sctp_assoc_stats sas; 6173 struct sctp_association *asoc = NULL; 6174 6175 /* User must provide at least the assoc id */ 6176 if (len < sizeof(sctp_assoc_t)) 6177 return -EINVAL; 6178 6179 /* Allow the struct to grow and fill in as much as possible */ 6180 len = min_t(size_t, len, sizeof(sas)); 6181 6182 if (copy_from_user(&sas, optval, len)) 6183 return -EFAULT; 6184 6185 asoc = sctp_id2assoc(sk, sas.sas_assoc_id); 6186 if (!asoc) 6187 return -EINVAL; 6188 6189 sas.sas_rtxchunks = asoc->stats.rtxchunks; 6190 sas.sas_gapcnt = asoc->stats.gapcnt; 6191 sas.sas_outofseqtsns = asoc->stats.outofseqtsns; 6192 sas.sas_osacks = asoc->stats.osacks; 6193 sas.sas_isacks = asoc->stats.isacks; 6194 sas.sas_octrlchunks = asoc->stats.octrlchunks; 6195 sas.sas_ictrlchunks = asoc->stats.ictrlchunks; 6196 sas.sas_oodchunks = asoc->stats.oodchunks; 6197 sas.sas_iodchunks = asoc->stats.iodchunks; 6198 sas.sas_ouodchunks = asoc->stats.ouodchunks; 6199 sas.sas_iuodchunks = asoc->stats.iuodchunks; 6200 sas.sas_idupchunks = asoc->stats.idupchunks; 6201 sas.sas_opackets = asoc->stats.opackets; 6202 sas.sas_ipackets = asoc->stats.ipackets; 6203 6204 /* New high max rto observed, will return 0 if not a single 6205 * RTO update took place. obs_rto_ipaddr will be bogus 6206 * in such a case 6207 */ 6208 sas.sas_maxrto = asoc->stats.max_obs_rto; 6209 memcpy(&sas.sas_obs_rto_ipaddr, &asoc->stats.obs_rto_ipaddr, 6210 sizeof(struct sockaddr_storage)); 6211 6212 /* Mark beginning of a new observation period */ 6213 asoc->stats.max_obs_rto = asoc->rto_min; 6214 6215 if (put_user(len, optlen)) 6216 return -EFAULT; 6217 6218 pr_debug("%s: len:%d, assoc_id:%d\n", __func__, len, sas.sas_assoc_id); 6219 6220 if (copy_to_user(optval, &sas, len)) 6221 return -EFAULT; 6222 6223 return 0; 6224 } 6225 6226 static int sctp_getsockopt_recvrcvinfo(struct sock *sk, int len, 6227 char __user *optval, 6228 int __user *optlen) 6229 { 6230 int val = 0; 6231 6232 if (len < sizeof(int)) 6233 return -EINVAL; 6234 6235 len = sizeof(int); 6236 if (sctp_sk(sk)->recvrcvinfo) 6237 val = 1; 6238 if (put_user(len, optlen)) 6239 return -EFAULT; 6240 if (copy_to_user(optval, &val, len)) 6241 return -EFAULT; 6242 6243 return 0; 6244 } 6245 6246 static int sctp_getsockopt_recvnxtinfo(struct sock *sk, int len, 6247 char __user *optval, 6248 int __user *optlen) 6249 { 6250 int val = 0; 6251 6252 if (len < sizeof(int)) 6253 return -EINVAL; 6254 6255 len = sizeof(int); 6256 if (sctp_sk(sk)->recvnxtinfo) 6257 val = 1; 6258 if (put_user(len, optlen)) 6259 return -EFAULT; 6260 if (copy_to_user(optval, &val, len)) 6261 return -EFAULT; 6262 6263 return 0; 6264 } 6265 6266 static int sctp_getsockopt_pr_supported(struct sock *sk, int len, 6267 char __user *optval, 6268 int __user *optlen) 6269 { 6270 struct sctp_assoc_value params; 6271 struct sctp_association *asoc; 6272 int retval = -EFAULT; 6273 6274 if (len < sizeof(params)) { 6275 retval = -EINVAL; 6276 goto out; 6277 } 6278 6279 len = sizeof(params); 6280 if (copy_from_user(¶ms, optval, len)) 6281 goto out; 6282 6283 asoc = sctp_id2assoc(sk, params.assoc_id); 6284 if (asoc) { 6285 params.assoc_value = asoc->prsctp_enable; 6286 } else if (!params.assoc_id) { 6287 struct sctp_sock *sp = sctp_sk(sk); 6288 6289 params.assoc_value = sp->ep->prsctp_enable; 6290 } else { 6291 retval = -EINVAL; 6292 goto out; 6293 } 6294 6295 if (put_user(len, optlen)) 6296 goto out; 6297 6298 if (copy_to_user(optval, ¶ms, len)) 6299 goto out; 6300 6301 retval = 0; 6302 6303 out: 6304 return retval; 6305 } 6306 6307 static int sctp_getsockopt_default_prinfo(struct sock *sk, int len, 6308 char __user *optval, 6309 int __user *optlen) 6310 { 6311 struct sctp_default_prinfo info; 6312 struct sctp_association *asoc; 6313 int retval = -EFAULT; 6314 6315 if (len < sizeof(info)) { 6316 retval = -EINVAL; 6317 goto out; 6318 } 6319 6320 len = sizeof(info); 6321 if (copy_from_user(&info, optval, len)) 6322 goto out; 6323 6324 asoc = sctp_id2assoc(sk, info.pr_assoc_id); 6325 if (asoc) { 6326 info.pr_policy = SCTP_PR_POLICY(asoc->default_flags); 6327 info.pr_value = asoc->default_timetolive; 6328 } else if (!info.pr_assoc_id) { 6329 struct sctp_sock *sp = sctp_sk(sk); 6330 6331 info.pr_policy = SCTP_PR_POLICY(sp->default_flags); 6332 info.pr_value = sp->default_timetolive; 6333 } else { 6334 retval = -EINVAL; 6335 goto out; 6336 } 6337 6338 if (put_user(len, optlen)) 6339 goto out; 6340 6341 if (copy_to_user(optval, &info, len)) 6342 goto out; 6343 6344 retval = 0; 6345 6346 out: 6347 return retval; 6348 } 6349 6350 static int sctp_getsockopt_pr_assocstatus(struct sock *sk, int len, 6351 char __user *optval, 6352 int __user *optlen) 6353 { 6354 struct sctp_prstatus params; 6355 struct sctp_association *asoc; 6356 int policy; 6357 int retval = -EINVAL; 6358 6359 if (len < sizeof(params)) 6360 goto out; 6361 6362 len = sizeof(params); 6363 if (copy_from_user(¶ms, optval, len)) { 6364 retval = -EFAULT; 6365 goto out; 6366 } 6367 6368 policy = params.sprstat_policy; 6369 if (policy & ~SCTP_PR_SCTP_MASK) 6370 goto out; 6371 6372 asoc = sctp_id2assoc(sk, params.sprstat_assoc_id); 6373 if (!asoc) 6374 goto out; 6375 6376 if (policy == SCTP_PR_SCTP_NONE) { 6377 params.sprstat_abandoned_unsent = 0; 6378 params.sprstat_abandoned_sent = 0; 6379 for (policy = 0; policy <= SCTP_PR_INDEX(MAX); policy++) { 6380 params.sprstat_abandoned_unsent += 6381 asoc->abandoned_unsent[policy]; 6382 params.sprstat_abandoned_sent += 6383 asoc->abandoned_sent[policy]; 6384 } 6385 } else { 6386 params.sprstat_abandoned_unsent = 6387 asoc->abandoned_unsent[__SCTP_PR_INDEX(policy)]; 6388 params.sprstat_abandoned_sent = 6389 asoc->abandoned_sent[__SCTP_PR_INDEX(policy)]; 6390 } 6391 6392 if (put_user(len, optlen)) { 6393 retval = -EFAULT; 6394 goto out; 6395 } 6396 6397 if (copy_to_user(optval, ¶ms, len)) { 6398 retval = -EFAULT; 6399 goto out; 6400 } 6401 6402 retval = 0; 6403 6404 out: 6405 return retval; 6406 } 6407 6408 static int sctp_getsockopt(struct sock *sk, int level, int optname, 6409 char __user *optval, int __user *optlen) 6410 { 6411 int retval = 0; 6412 int len; 6413 6414 pr_debug("%s: sk:%p, optname:%d\n", __func__, sk, optname); 6415 6416 /* I can hardly begin to describe how wrong this is. This is 6417 * so broken as to be worse than useless. The API draft 6418 * REALLY is NOT helpful here... I am not convinced that the 6419 * semantics of getsockopt() with a level OTHER THAN SOL_SCTP 6420 * are at all well-founded. 6421 */ 6422 if (level != SOL_SCTP) { 6423 struct sctp_af *af = sctp_sk(sk)->pf->af; 6424 6425 retval = af->getsockopt(sk, level, optname, optval, optlen); 6426 return retval; 6427 } 6428 6429 if (get_user(len, optlen)) 6430 return -EFAULT; 6431 6432 if (len < 0) 6433 return -EINVAL; 6434 6435 lock_sock(sk); 6436 6437 switch (optname) { 6438 case SCTP_STATUS: 6439 retval = sctp_getsockopt_sctp_status(sk, len, optval, optlen); 6440 break; 6441 case SCTP_DISABLE_FRAGMENTS: 6442 retval = sctp_getsockopt_disable_fragments(sk, len, optval, 6443 optlen); 6444 break; 6445 case SCTP_EVENTS: 6446 retval = sctp_getsockopt_events(sk, len, optval, optlen); 6447 break; 6448 case SCTP_AUTOCLOSE: 6449 retval = sctp_getsockopt_autoclose(sk, len, optval, optlen); 6450 break; 6451 case SCTP_SOCKOPT_PEELOFF: 6452 retval = sctp_getsockopt_peeloff(sk, len, optval, optlen); 6453 break; 6454 case SCTP_PEER_ADDR_PARAMS: 6455 retval = sctp_getsockopt_peer_addr_params(sk, len, optval, 6456 optlen); 6457 break; 6458 case SCTP_DELAYED_SACK: 6459 retval = sctp_getsockopt_delayed_ack(sk, len, optval, 6460 optlen); 6461 break; 6462 case SCTP_INITMSG: 6463 retval = sctp_getsockopt_initmsg(sk, len, optval, optlen); 6464 break; 6465 case SCTP_GET_PEER_ADDRS: 6466 retval = sctp_getsockopt_peer_addrs(sk, len, optval, 6467 optlen); 6468 break; 6469 case SCTP_GET_LOCAL_ADDRS: 6470 retval = sctp_getsockopt_local_addrs(sk, len, optval, 6471 optlen); 6472 break; 6473 case SCTP_SOCKOPT_CONNECTX3: 6474 retval = sctp_getsockopt_connectx3(sk, len, optval, optlen); 6475 break; 6476 case SCTP_DEFAULT_SEND_PARAM: 6477 retval = sctp_getsockopt_default_send_param(sk, len, 6478 optval, optlen); 6479 break; 6480 case SCTP_DEFAULT_SNDINFO: 6481 retval = sctp_getsockopt_default_sndinfo(sk, len, 6482 optval, optlen); 6483 break; 6484 case SCTP_PRIMARY_ADDR: 6485 retval = sctp_getsockopt_primary_addr(sk, len, optval, optlen); 6486 break; 6487 case SCTP_NODELAY: 6488 retval = sctp_getsockopt_nodelay(sk, len, optval, optlen); 6489 break; 6490 case SCTP_RTOINFO: 6491 retval = sctp_getsockopt_rtoinfo(sk, len, optval, optlen); 6492 break; 6493 case SCTP_ASSOCINFO: 6494 retval = sctp_getsockopt_associnfo(sk, len, optval, optlen); 6495 break; 6496 case SCTP_I_WANT_MAPPED_V4_ADDR: 6497 retval = sctp_getsockopt_mappedv4(sk, len, optval, optlen); 6498 break; 6499 case SCTP_MAXSEG: 6500 retval = sctp_getsockopt_maxseg(sk, len, optval, optlen); 6501 break; 6502 case SCTP_GET_PEER_ADDR_INFO: 6503 retval = sctp_getsockopt_peer_addr_info(sk, len, optval, 6504 optlen); 6505 break; 6506 case SCTP_ADAPTATION_LAYER: 6507 retval = sctp_getsockopt_adaptation_layer(sk, len, optval, 6508 optlen); 6509 break; 6510 case SCTP_CONTEXT: 6511 retval = sctp_getsockopt_context(sk, len, optval, optlen); 6512 break; 6513 case SCTP_FRAGMENT_INTERLEAVE: 6514 retval = sctp_getsockopt_fragment_interleave(sk, len, optval, 6515 optlen); 6516 break; 6517 case SCTP_PARTIAL_DELIVERY_POINT: 6518 retval = sctp_getsockopt_partial_delivery_point(sk, len, optval, 6519 optlen); 6520 break; 6521 case SCTP_MAX_BURST: 6522 retval = sctp_getsockopt_maxburst(sk, len, optval, optlen); 6523 break; 6524 case SCTP_AUTH_KEY: 6525 case SCTP_AUTH_CHUNK: 6526 case SCTP_AUTH_DELETE_KEY: 6527 retval = -EOPNOTSUPP; 6528 break; 6529 case SCTP_HMAC_IDENT: 6530 retval = sctp_getsockopt_hmac_ident(sk, len, optval, optlen); 6531 break; 6532 case SCTP_AUTH_ACTIVE_KEY: 6533 retval = sctp_getsockopt_active_key(sk, len, optval, optlen); 6534 break; 6535 case SCTP_PEER_AUTH_CHUNKS: 6536 retval = sctp_getsockopt_peer_auth_chunks(sk, len, optval, 6537 optlen); 6538 break; 6539 case SCTP_LOCAL_AUTH_CHUNKS: 6540 retval = sctp_getsockopt_local_auth_chunks(sk, len, optval, 6541 optlen); 6542 break; 6543 case SCTP_GET_ASSOC_NUMBER: 6544 retval = sctp_getsockopt_assoc_number(sk, len, optval, optlen); 6545 break; 6546 case SCTP_GET_ASSOC_ID_LIST: 6547 retval = sctp_getsockopt_assoc_ids(sk, len, optval, optlen); 6548 break; 6549 case SCTP_AUTO_ASCONF: 6550 retval = sctp_getsockopt_auto_asconf(sk, len, optval, optlen); 6551 break; 6552 case SCTP_PEER_ADDR_THLDS: 6553 retval = sctp_getsockopt_paddr_thresholds(sk, optval, len, optlen); 6554 break; 6555 case SCTP_GET_ASSOC_STATS: 6556 retval = sctp_getsockopt_assoc_stats(sk, len, optval, optlen); 6557 break; 6558 case SCTP_RECVRCVINFO: 6559 retval = sctp_getsockopt_recvrcvinfo(sk, len, optval, optlen); 6560 break; 6561 case SCTP_RECVNXTINFO: 6562 retval = sctp_getsockopt_recvnxtinfo(sk, len, optval, optlen); 6563 break; 6564 case SCTP_PR_SUPPORTED: 6565 retval = sctp_getsockopt_pr_supported(sk, len, optval, optlen); 6566 break; 6567 case SCTP_DEFAULT_PRINFO: 6568 retval = sctp_getsockopt_default_prinfo(sk, len, optval, 6569 optlen); 6570 break; 6571 case SCTP_PR_ASSOC_STATUS: 6572 retval = sctp_getsockopt_pr_assocstatus(sk, len, optval, 6573 optlen); 6574 break; 6575 default: 6576 retval = -ENOPROTOOPT; 6577 break; 6578 } 6579 6580 release_sock(sk); 6581 return retval; 6582 } 6583 6584 static int sctp_hash(struct sock *sk) 6585 { 6586 /* STUB */ 6587 return 0; 6588 } 6589 6590 static void sctp_unhash(struct sock *sk) 6591 { 6592 /* STUB */ 6593 } 6594 6595 /* Check if port is acceptable. Possibly find first available port. 6596 * 6597 * The port hash table (contained in the 'global' SCTP protocol storage 6598 * returned by struct sctp_protocol *sctp_get_protocol()). The hash 6599 * table is an array of 4096 lists (sctp_bind_hashbucket). Each 6600 * list (the list number is the port number hashed out, so as you 6601 * would expect from a hash function, all the ports in a given list have 6602 * such a number that hashes out to the same list number; you were 6603 * expecting that, right?); so each list has a set of ports, with a 6604 * link to the socket (struct sock) that uses it, the port number and 6605 * a fastreuse flag (FIXME: NPI ipg). 6606 */ 6607 static struct sctp_bind_bucket *sctp_bucket_create( 6608 struct sctp_bind_hashbucket *head, struct net *, unsigned short snum); 6609 6610 static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr) 6611 { 6612 struct sctp_bind_hashbucket *head; /* hash list */ 6613 struct sctp_bind_bucket *pp; 6614 unsigned short snum; 6615 int ret; 6616 6617 snum = ntohs(addr->v4.sin_port); 6618 6619 pr_debug("%s: begins, snum:%d\n", __func__, snum); 6620 6621 local_bh_disable(); 6622 6623 if (snum == 0) { 6624 /* Search for an available port. */ 6625 int low, high, remaining, index; 6626 unsigned int rover; 6627 struct net *net = sock_net(sk); 6628 6629 inet_get_local_port_range(net, &low, &high); 6630 remaining = (high - low) + 1; 6631 rover = prandom_u32() % remaining + low; 6632 6633 do { 6634 rover++; 6635 if ((rover < low) || (rover > high)) 6636 rover = low; 6637 if (inet_is_local_reserved_port(net, rover)) 6638 continue; 6639 index = sctp_phashfn(sock_net(sk), rover); 6640 head = &sctp_port_hashtable[index]; 6641 spin_lock(&head->lock); 6642 sctp_for_each_hentry(pp, &head->chain) 6643 if ((pp->port == rover) && 6644 net_eq(sock_net(sk), pp->net)) 6645 goto next; 6646 break; 6647 next: 6648 spin_unlock(&head->lock); 6649 } while (--remaining > 0); 6650 6651 /* Exhausted local port range during search? */ 6652 ret = 1; 6653 if (remaining <= 0) 6654 goto fail; 6655 6656 /* OK, here is the one we will use. HEAD (the port 6657 * hash table list entry) is non-NULL and we hold it's 6658 * mutex. 6659 */ 6660 snum = rover; 6661 } else { 6662 /* We are given an specific port number; we verify 6663 * that it is not being used. If it is used, we will 6664 * exahust the search in the hash list corresponding 6665 * to the port number (snum) - we detect that with the 6666 * port iterator, pp being NULL. 6667 */ 6668 head = &sctp_port_hashtable[sctp_phashfn(sock_net(sk), snum)]; 6669 spin_lock(&head->lock); 6670 sctp_for_each_hentry(pp, &head->chain) { 6671 if ((pp->port == snum) && net_eq(pp->net, sock_net(sk))) 6672 goto pp_found; 6673 } 6674 } 6675 pp = NULL; 6676 goto pp_not_found; 6677 pp_found: 6678 if (!hlist_empty(&pp->owner)) { 6679 /* We had a port hash table hit - there is an 6680 * available port (pp != NULL) and it is being 6681 * used by other socket (pp->owner not empty); that other 6682 * socket is going to be sk2. 6683 */ 6684 int reuse = sk->sk_reuse; 6685 struct sock *sk2; 6686 6687 pr_debug("%s: found a possible match\n", __func__); 6688 6689 if (pp->fastreuse && sk->sk_reuse && 6690 sk->sk_state != SCTP_SS_LISTENING) 6691 goto success; 6692 6693 /* Run through the list of sockets bound to the port 6694 * (pp->port) [via the pointers bind_next and 6695 * bind_pprev in the struct sock *sk2 (pp->sk)]. On each one, 6696 * we get the endpoint they describe and run through 6697 * the endpoint's list of IP (v4 or v6) addresses, 6698 * comparing each of the addresses with the address of 6699 * the socket sk. If we find a match, then that means 6700 * that this port/socket (sk) combination are already 6701 * in an endpoint. 6702 */ 6703 sk_for_each_bound(sk2, &pp->owner) { 6704 struct sctp_endpoint *ep2; 6705 ep2 = sctp_sk(sk2)->ep; 6706 6707 if (sk == sk2 || 6708 (reuse && sk2->sk_reuse && 6709 sk2->sk_state != SCTP_SS_LISTENING)) 6710 continue; 6711 6712 if (sctp_bind_addr_conflict(&ep2->base.bind_addr, addr, 6713 sctp_sk(sk2), sctp_sk(sk))) { 6714 ret = (long)sk2; 6715 goto fail_unlock; 6716 } 6717 } 6718 6719 pr_debug("%s: found a match\n", __func__); 6720 } 6721 pp_not_found: 6722 /* If there was a hash table miss, create a new port. */ 6723 ret = 1; 6724 if (!pp && !(pp = sctp_bucket_create(head, sock_net(sk), snum))) 6725 goto fail_unlock; 6726 6727 /* In either case (hit or miss), make sure fastreuse is 1 only 6728 * if sk->sk_reuse is too (that is, if the caller requested 6729 * SO_REUSEADDR on this socket -sk-). 6730 */ 6731 if (hlist_empty(&pp->owner)) { 6732 if (sk->sk_reuse && sk->sk_state != SCTP_SS_LISTENING) 6733 pp->fastreuse = 1; 6734 else 6735 pp->fastreuse = 0; 6736 } else if (pp->fastreuse && 6737 (!sk->sk_reuse || sk->sk_state == SCTP_SS_LISTENING)) 6738 pp->fastreuse = 0; 6739 6740 /* We are set, so fill up all the data in the hash table 6741 * entry, tie the socket list information with the rest of the 6742 * sockets FIXME: Blurry, NPI (ipg). 6743 */ 6744 success: 6745 if (!sctp_sk(sk)->bind_hash) { 6746 inet_sk(sk)->inet_num = snum; 6747 sk_add_bind_node(sk, &pp->owner); 6748 sctp_sk(sk)->bind_hash = pp; 6749 } 6750 ret = 0; 6751 6752 fail_unlock: 6753 spin_unlock(&head->lock); 6754 6755 fail: 6756 local_bh_enable(); 6757 return ret; 6758 } 6759 6760 /* Assign a 'snum' port to the socket. If snum == 0, an ephemeral 6761 * port is requested. 6762 */ 6763 static int sctp_get_port(struct sock *sk, unsigned short snum) 6764 { 6765 union sctp_addr addr; 6766 struct sctp_af *af = sctp_sk(sk)->pf->af; 6767 6768 /* Set up a dummy address struct from the sk. */ 6769 af->from_sk(&addr, sk); 6770 addr.v4.sin_port = htons(snum); 6771 6772 /* Note: sk->sk_num gets filled in if ephemeral port request. */ 6773 return !!sctp_get_port_local(sk, &addr); 6774 } 6775 6776 /* 6777 * Move a socket to LISTENING state. 6778 */ 6779 static int sctp_listen_start(struct sock *sk, int backlog) 6780 { 6781 struct sctp_sock *sp = sctp_sk(sk); 6782 struct sctp_endpoint *ep = sp->ep; 6783 struct crypto_shash *tfm = NULL; 6784 char alg[32]; 6785 6786 /* Allocate HMAC for generating cookie. */ 6787 if (!sp->hmac && sp->sctp_hmac_alg) { 6788 sprintf(alg, "hmac(%s)", sp->sctp_hmac_alg); 6789 tfm = crypto_alloc_shash(alg, 0, 0); 6790 if (IS_ERR(tfm)) { 6791 net_info_ratelimited("failed to load transform for %s: %ld\n", 6792 sp->sctp_hmac_alg, PTR_ERR(tfm)); 6793 return -ENOSYS; 6794 } 6795 sctp_sk(sk)->hmac = tfm; 6796 } 6797 6798 /* 6799 * If a bind() or sctp_bindx() is not called prior to a listen() 6800 * call that allows new associations to be accepted, the system 6801 * picks an ephemeral port and will choose an address set equivalent 6802 * to binding with a wildcard address. 6803 * 6804 * This is not currently spelled out in the SCTP sockets 6805 * extensions draft, but follows the practice as seen in TCP 6806 * sockets. 6807 * 6808 */ 6809 sk->sk_state = SCTP_SS_LISTENING; 6810 if (!ep->base.bind_addr.port) { 6811 if (sctp_autobind(sk)) 6812 return -EAGAIN; 6813 } else { 6814 if (sctp_get_port(sk, inet_sk(sk)->inet_num)) { 6815 sk->sk_state = SCTP_SS_CLOSED; 6816 return -EADDRINUSE; 6817 } 6818 } 6819 6820 sk->sk_max_ack_backlog = backlog; 6821 sctp_hash_endpoint(ep); 6822 return 0; 6823 } 6824 6825 /* 6826 * 4.1.3 / 5.1.3 listen() 6827 * 6828 * By default, new associations are not accepted for UDP style sockets. 6829 * An application uses listen() to mark a socket as being able to 6830 * accept new associations. 6831 * 6832 * On TCP style sockets, applications use listen() to ready the SCTP 6833 * endpoint for accepting inbound associations. 6834 * 6835 * On both types of endpoints a backlog of '0' disables listening. 6836 * 6837 * Move a socket to LISTENING state. 6838 */ 6839 int sctp_inet_listen(struct socket *sock, int backlog) 6840 { 6841 struct sock *sk = sock->sk; 6842 struct sctp_endpoint *ep = sctp_sk(sk)->ep; 6843 int err = -EINVAL; 6844 6845 if (unlikely(backlog < 0)) 6846 return err; 6847 6848 lock_sock(sk); 6849 6850 /* Peeled-off sockets are not allowed to listen(). */ 6851 if (sctp_style(sk, UDP_HIGH_BANDWIDTH)) 6852 goto out; 6853 6854 if (sock->state != SS_UNCONNECTED) 6855 goto out; 6856 6857 /* If backlog is zero, disable listening. */ 6858 if (!backlog) { 6859 if (sctp_sstate(sk, CLOSED)) 6860 goto out; 6861 6862 err = 0; 6863 sctp_unhash_endpoint(ep); 6864 sk->sk_state = SCTP_SS_CLOSED; 6865 if (sk->sk_reuse) 6866 sctp_sk(sk)->bind_hash->fastreuse = 1; 6867 goto out; 6868 } 6869 6870 /* If we are already listening, just update the backlog */ 6871 if (sctp_sstate(sk, LISTENING)) 6872 sk->sk_max_ack_backlog = backlog; 6873 else { 6874 err = sctp_listen_start(sk, backlog); 6875 if (err) 6876 goto out; 6877 } 6878 6879 err = 0; 6880 out: 6881 release_sock(sk); 6882 return err; 6883 } 6884 6885 /* 6886 * This function is done by modeling the current datagram_poll() and the 6887 * tcp_poll(). Note that, based on these implementations, we don't 6888 * lock the socket in this function, even though it seems that, 6889 * ideally, locking or some other mechanisms can be used to ensure 6890 * the integrity of the counters (sndbuf and wmem_alloc) used 6891 * in this place. We assume that we don't need locks either until proven 6892 * otherwise. 6893 * 6894 * Another thing to note is that we include the Async I/O support 6895 * here, again, by modeling the current TCP/UDP code. We don't have 6896 * a good way to test with it yet. 6897 */ 6898 unsigned int sctp_poll(struct file *file, struct socket *sock, poll_table *wait) 6899 { 6900 struct sock *sk = sock->sk; 6901 struct sctp_sock *sp = sctp_sk(sk); 6902 unsigned int mask; 6903 6904 poll_wait(file, sk_sleep(sk), wait); 6905 6906 sock_rps_record_flow(sk); 6907 6908 /* A TCP-style listening socket becomes readable when the accept queue 6909 * is not empty. 6910 */ 6911 if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)) 6912 return (!list_empty(&sp->ep->asocs)) ? 6913 (POLLIN | POLLRDNORM) : 0; 6914 6915 mask = 0; 6916 6917 /* Is there any exceptional events? */ 6918 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) 6919 mask |= POLLERR | 6920 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0); 6921 if (sk->sk_shutdown & RCV_SHUTDOWN) 6922 mask |= POLLRDHUP | POLLIN | POLLRDNORM; 6923 if (sk->sk_shutdown == SHUTDOWN_MASK) 6924 mask |= POLLHUP; 6925 6926 /* Is it readable? Reconsider this code with TCP-style support. */ 6927 if (!skb_queue_empty(&sk->sk_receive_queue)) 6928 mask |= POLLIN | POLLRDNORM; 6929 6930 /* The association is either gone or not ready. */ 6931 if (!sctp_style(sk, UDP) && sctp_sstate(sk, CLOSED)) 6932 return mask; 6933 6934 /* Is it writable? */ 6935 if (sctp_writeable(sk)) { 6936 mask |= POLLOUT | POLLWRNORM; 6937 } else { 6938 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); 6939 /* 6940 * Since the socket is not locked, the buffer 6941 * might be made available after the writeable check and 6942 * before the bit is set. This could cause a lost I/O 6943 * signal. tcp_poll() has a race breaker for this race 6944 * condition. Based on their implementation, we put 6945 * in the following code to cover it as well. 6946 */ 6947 if (sctp_writeable(sk)) 6948 mask |= POLLOUT | POLLWRNORM; 6949 } 6950 return mask; 6951 } 6952 6953 /******************************************************************** 6954 * 2nd Level Abstractions 6955 ********************************************************************/ 6956 6957 static struct sctp_bind_bucket *sctp_bucket_create( 6958 struct sctp_bind_hashbucket *head, struct net *net, unsigned short snum) 6959 { 6960 struct sctp_bind_bucket *pp; 6961 6962 pp = kmem_cache_alloc(sctp_bucket_cachep, GFP_ATOMIC); 6963 if (pp) { 6964 SCTP_DBG_OBJCNT_INC(bind_bucket); 6965 pp->port = snum; 6966 pp->fastreuse = 0; 6967 INIT_HLIST_HEAD(&pp->owner); 6968 pp->net = net; 6969 hlist_add_head(&pp->node, &head->chain); 6970 } 6971 return pp; 6972 } 6973 6974 /* Caller must hold hashbucket lock for this tb with local BH disabled */ 6975 static void sctp_bucket_destroy(struct sctp_bind_bucket *pp) 6976 { 6977 if (pp && hlist_empty(&pp->owner)) { 6978 __hlist_del(&pp->node); 6979 kmem_cache_free(sctp_bucket_cachep, pp); 6980 SCTP_DBG_OBJCNT_DEC(bind_bucket); 6981 } 6982 } 6983 6984 /* Release this socket's reference to a local port. */ 6985 static inline void __sctp_put_port(struct sock *sk) 6986 { 6987 struct sctp_bind_hashbucket *head = 6988 &sctp_port_hashtable[sctp_phashfn(sock_net(sk), 6989 inet_sk(sk)->inet_num)]; 6990 struct sctp_bind_bucket *pp; 6991 6992 spin_lock(&head->lock); 6993 pp = sctp_sk(sk)->bind_hash; 6994 __sk_del_bind_node(sk); 6995 sctp_sk(sk)->bind_hash = NULL; 6996 inet_sk(sk)->inet_num = 0; 6997 sctp_bucket_destroy(pp); 6998 spin_unlock(&head->lock); 6999 } 7000 7001 void sctp_put_port(struct sock *sk) 7002 { 7003 local_bh_disable(); 7004 __sctp_put_port(sk); 7005 local_bh_enable(); 7006 } 7007 7008 /* 7009 * The system picks an ephemeral port and choose an address set equivalent 7010 * to binding with a wildcard address. 7011 * One of those addresses will be the primary address for the association. 7012 * This automatically enables the multihoming capability of SCTP. 7013 */ 7014 static int sctp_autobind(struct sock *sk) 7015 { 7016 union sctp_addr autoaddr; 7017 struct sctp_af *af; 7018 __be16 port; 7019 7020 /* Initialize a local sockaddr structure to INADDR_ANY. */ 7021 af = sctp_sk(sk)->pf->af; 7022 7023 port = htons(inet_sk(sk)->inet_num); 7024 af->inaddr_any(&autoaddr, port); 7025 7026 return sctp_do_bind(sk, &autoaddr, af->sockaddr_len); 7027 } 7028 7029 /* Parse out IPPROTO_SCTP CMSG headers. Perform only minimal validation. 7030 * 7031 * From RFC 2292 7032 * 4.2 The cmsghdr Structure * 7033 * 7034 * When ancillary data is sent or received, any number of ancillary data 7035 * objects can be specified by the msg_control and msg_controllen members of 7036 * the msghdr structure, because each object is preceded by 7037 * a cmsghdr structure defining the object's length (the cmsg_len member). 7038 * Historically Berkeley-derived implementations have passed only one object 7039 * at a time, but this API allows multiple objects to be 7040 * passed in a single call to sendmsg() or recvmsg(). The following example 7041 * shows two ancillary data objects in a control buffer. 7042 * 7043 * |<--------------------------- msg_controllen -------------------------->| 7044 * | | 7045 * 7046 * |<----- ancillary data object ----->|<----- ancillary data object ----->| 7047 * 7048 * |<---------- CMSG_SPACE() --------->|<---------- CMSG_SPACE() --------->| 7049 * | | | 7050 * 7051 * |<---------- cmsg_len ---------->| |<--------- cmsg_len ----------->| | 7052 * 7053 * |<--------- CMSG_LEN() --------->| |<-------- CMSG_LEN() ---------->| | 7054 * | | | | | 7055 * 7056 * +-----+-----+-----+--+-----------+--+-----+-----+-----+--+-----------+--+ 7057 * |cmsg_|cmsg_|cmsg_|XX| |XX|cmsg_|cmsg_|cmsg_|XX| |XX| 7058 * 7059 * |len |level|type |XX|cmsg_data[]|XX|len |level|type |XX|cmsg_data[]|XX| 7060 * 7061 * +-----+-----+-----+--+-----------+--+-----+-----+-----+--+-----------+--+ 7062 * ^ 7063 * | 7064 * 7065 * msg_control 7066 * points here 7067 */ 7068 static int sctp_msghdr_parse(const struct msghdr *msg, sctp_cmsgs_t *cmsgs) 7069 { 7070 struct cmsghdr *cmsg; 7071 struct msghdr *my_msg = (struct msghdr *)msg; 7072 7073 for_each_cmsghdr(cmsg, my_msg) { 7074 if (!CMSG_OK(my_msg, cmsg)) 7075 return -EINVAL; 7076 7077 /* Should we parse this header or ignore? */ 7078 if (cmsg->cmsg_level != IPPROTO_SCTP) 7079 continue; 7080 7081 /* Strictly check lengths following example in SCM code. */ 7082 switch (cmsg->cmsg_type) { 7083 case SCTP_INIT: 7084 /* SCTP Socket API Extension 7085 * 5.3.1 SCTP Initiation Structure (SCTP_INIT) 7086 * 7087 * This cmsghdr structure provides information for 7088 * initializing new SCTP associations with sendmsg(). 7089 * The SCTP_INITMSG socket option uses this same data 7090 * structure. This structure is not used for 7091 * recvmsg(). 7092 * 7093 * cmsg_level cmsg_type cmsg_data[] 7094 * ------------ ------------ ---------------------- 7095 * IPPROTO_SCTP SCTP_INIT struct sctp_initmsg 7096 */ 7097 if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct sctp_initmsg))) 7098 return -EINVAL; 7099 7100 cmsgs->init = CMSG_DATA(cmsg); 7101 break; 7102 7103 case SCTP_SNDRCV: 7104 /* SCTP Socket API Extension 7105 * 5.3.2 SCTP Header Information Structure(SCTP_SNDRCV) 7106 * 7107 * This cmsghdr structure specifies SCTP options for 7108 * sendmsg() and describes SCTP header information 7109 * about a received message through recvmsg(). 7110 * 7111 * cmsg_level cmsg_type cmsg_data[] 7112 * ------------ ------------ ---------------------- 7113 * IPPROTO_SCTP SCTP_SNDRCV struct sctp_sndrcvinfo 7114 */ 7115 if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct sctp_sndrcvinfo))) 7116 return -EINVAL; 7117 7118 cmsgs->srinfo = CMSG_DATA(cmsg); 7119 7120 if (cmsgs->srinfo->sinfo_flags & 7121 ~(SCTP_UNORDERED | SCTP_ADDR_OVER | 7122 SCTP_SACK_IMMEDIATELY | SCTP_PR_SCTP_MASK | 7123 SCTP_ABORT | SCTP_EOF)) 7124 return -EINVAL; 7125 break; 7126 7127 case SCTP_SNDINFO: 7128 /* SCTP Socket API Extension 7129 * 5.3.4 SCTP Send Information Structure (SCTP_SNDINFO) 7130 * 7131 * This cmsghdr structure specifies SCTP options for 7132 * sendmsg(). This structure and SCTP_RCVINFO replaces 7133 * SCTP_SNDRCV which has been deprecated. 7134 * 7135 * cmsg_level cmsg_type cmsg_data[] 7136 * ------------ ------------ --------------------- 7137 * IPPROTO_SCTP SCTP_SNDINFO struct sctp_sndinfo 7138 */ 7139 if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct sctp_sndinfo))) 7140 return -EINVAL; 7141 7142 cmsgs->sinfo = CMSG_DATA(cmsg); 7143 7144 if (cmsgs->sinfo->snd_flags & 7145 ~(SCTP_UNORDERED | SCTP_ADDR_OVER | 7146 SCTP_SACK_IMMEDIATELY | SCTP_PR_SCTP_MASK | 7147 SCTP_ABORT | SCTP_EOF)) 7148 return -EINVAL; 7149 break; 7150 default: 7151 return -EINVAL; 7152 } 7153 } 7154 7155 return 0; 7156 } 7157 7158 /* 7159 * Wait for a packet.. 7160 * Note: This function is the same function as in core/datagram.c 7161 * with a few modifications to make lksctp work. 7162 */ 7163 static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p) 7164 { 7165 int error; 7166 DEFINE_WAIT(wait); 7167 7168 prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 7169 7170 /* Socket errors? */ 7171 error = sock_error(sk); 7172 if (error) 7173 goto out; 7174 7175 if (!skb_queue_empty(&sk->sk_receive_queue)) 7176 goto ready; 7177 7178 /* Socket shut down? */ 7179 if (sk->sk_shutdown & RCV_SHUTDOWN) 7180 goto out; 7181 7182 /* Sequenced packets can come disconnected. If so we report the 7183 * problem. 7184 */ 7185 error = -ENOTCONN; 7186 7187 /* Is there a good reason to think that we may receive some data? */ 7188 if (list_empty(&sctp_sk(sk)->ep->asocs) && !sctp_sstate(sk, LISTENING)) 7189 goto out; 7190 7191 /* Handle signals. */ 7192 if (signal_pending(current)) 7193 goto interrupted; 7194 7195 /* Let another process have a go. Since we are going to sleep 7196 * anyway. Note: This may cause odd behaviors if the message 7197 * does not fit in the user's buffer, but this seems to be the 7198 * only way to honor MSG_DONTWAIT realistically. 7199 */ 7200 release_sock(sk); 7201 *timeo_p = schedule_timeout(*timeo_p); 7202 lock_sock(sk); 7203 7204 ready: 7205 finish_wait(sk_sleep(sk), &wait); 7206 return 0; 7207 7208 interrupted: 7209 error = sock_intr_errno(*timeo_p); 7210 7211 out: 7212 finish_wait(sk_sleep(sk), &wait); 7213 *err = error; 7214 return error; 7215 } 7216 7217 /* Receive a datagram. 7218 * Note: This is pretty much the same routine as in core/datagram.c 7219 * with a few changes to make lksctp work. 7220 */ 7221 struct sk_buff *sctp_skb_recv_datagram(struct sock *sk, int flags, 7222 int noblock, int *err) 7223 { 7224 int error; 7225 struct sk_buff *skb; 7226 long timeo; 7227 7228 timeo = sock_rcvtimeo(sk, noblock); 7229 7230 pr_debug("%s: timeo:%ld, max:%ld\n", __func__, timeo, 7231 MAX_SCHEDULE_TIMEOUT); 7232 7233 do { 7234 /* Again only user level code calls this function, 7235 * so nothing interrupt level 7236 * will suddenly eat the receive_queue. 7237 * 7238 * Look at current nfs client by the way... 7239 * However, this function was correct in any case. 8) 7240 */ 7241 if (flags & MSG_PEEK) { 7242 skb = skb_peek(&sk->sk_receive_queue); 7243 if (skb) 7244 atomic_inc(&skb->users); 7245 } else { 7246 skb = __skb_dequeue(&sk->sk_receive_queue); 7247 } 7248 7249 if (skb) 7250 return skb; 7251 7252 /* Caller is allowed not to check sk->sk_err before calling. */ 7253 error = sock_error(sk); 7254 if (error) 7255 goto no_packet; 7256 7257 if (sk->sk_shutdown & RCV_SHUTDOWN) 7258 break; 7259 7260 if (sk_can_busy_loop(sk) && 7261 sk_busy_loop(sk, noblock)) 7262 continue; 7263 7264 /* User doesn't want to wait. */ 7265 error = -EAGAIN; 7266 if (!timeo) 7267 goto no_packet; 7268 } while (sctp_wait_for_packet(sk, err, &timeo) == 0); 7269 7270 return NULL; 7271 7272 no_packet: 7273 *err = error; 7274 return NULL; 7275 } 7276 7277 /* If sndbuf has changed, wake up per association sndbuf waiters. */ 7278 static void __sctp_write_space(struct sctp_association *asoc) 7279 { 7280 struct sock *sk = asoc->base.sk; 7281 7282 if (sctp_wspace(asoc) <= 0) 7283 return; 7284 7285 if (waitqueue_active(&asoc->wait)) 7286 wake_up_interruptible(&asoc->wait); 7287 7288 if (sctp_writeable(sk)) { 7289 struct socket_wq *wq; 7290 7291 rcu_read_lock(); 7292 wq = rcu_dereference(sk->sk_wq); 7293 if (wq) { 7294 if (waitqueue_active(&wq->wait)) 7295 wake_up_interruptible(&wq->wait); 7296 7297 /* Note that we try to include the Async I/O support 7298 * here by modeling from the current TCP/UDP code. 7299 * We have not tested with it yet. 7300 */ 7301 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) 7302 sock_wake_async(wq, SOCK_WAKE_SPACE, POLL_OUT); 7303 } 7304 rcu_read_unlock(); 7305 } 7306 } 7307 7308 static void sctp_wake_up_waiters(struct sock *sk, 7309 struct sctp_association *asoc) 7310 { 7311 struct sctp_association *tmp = asoc; 7312 7313 /* We do accounting for the sndbuf space per association, 7314 * so we only need to wake our own association. 7315 */ 7316 if (asoc->ep->sndbuf_policy) 7317 return __sctp_write_space(asoc); 7318 7319 /* If association goes down and is just flushing its 7320 * outq, then just normally notify others. 7321 */ 7322 if (asoc->base.dead) 7323 return sctp_write_space(sk); 7324 7325 /* Accounting for the sndbuf space is per socket, so we 7326 * need to wake up others, try to be fair and in case of 7327 * other associations, let them have a go first instead 7328 * of just doing a sctp_write_space() call. 7329 * 7330 * Note that we reach sctp_wake_up_waiters() only when 7331 * associations free up queued chunks, thus we are under 7332 * lock and the list of associations on a socket is 7333 * guaranteed not to change. 7334 */ 7335 for (tmp = list_next_entry(tmp, asocs); 1; 7336 tmp = list_next_entry(tmp, asocs)) { 7337 /* Manually skip the head element. */ 7338 if (&tmp->asocs == &((sctp_sk(sk))->ep->asocs)) 7339 continue; 7340 /* Wake up association. */ 7341 __sctp_write_space(tmp); 7342 /* We've reached the end. */ 7343 if (tmp == asoc) 7344 break; 7345 } 7346 } 7347 7348 /* Do accounting for the sndbuf space. 7349 * Decrement the used sndbuf space of the corresponding association by the 7350 * data size which was just transmitted(freed). 7351 */ 7352 static void sctp_wfree(struct sk_buff *skb) 7353 { 7354 struct sctp_chunk *chunk = skb_shinfo(skb)->destructor_arg; 7355 struct sctp_association *asoc = chunk->asoc; 7356 struct sock *sk = asoc->base.sk; 7357 7358 asoc->sndbuf_used -= SCTP_DATA_SNDSIZE(chunk) + 7359 sizeof(struct sk_buff) + 7360 sizeof(struct sctp_chunk); 7361 7362 atomic_sub(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc); 7363 7364 /* 7365 * This undoes what is done via sctp_set_owner_w and sk_mem_charge 7366 */ 7367 sk->sk_wmem_queued -= skb->truesize; 7368 sk_mem_uncharge(sk, skb->truesize); 7369 7370 sock_wfree(skb); 7371 sctp_wake_up_waiters(sk, asoc); 7372 7373 sctp_association_put(asoc); 7374 } 7375 7376 /* Do accounting for the receive space on the socket. 7377 * Accounting for the association is done in ulpevent.c 7378 * We set this as a destructor for the cloned data skbs so that 7379 * accounting is done at the correct time. 7380 */ 7381 void sctp_sock_rfree(struct sk_buff *skb) 7382 { 7383 struct sock *sk = skb->sk; 7384 struct sctp_ulpevent *event = sctp_skb2event(skb); 7385 7386 atomic_sub(event->rmem_len, &sk->sk_rmem_alloc); 7387 7388 /* 7389 * Mimic the behavior of sock_rfree 7390 */ 7391 sk_mem_uncharge(sk, event->rmem_len); 7392 } 7393 7394 7395 /* Helper function to wait for space in the sndbuf. */ 7396 static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p, 7397 size_t msg_len) 7398 { 7399 struct sock *sk = asoc->base.sk; 7400 int err = 0; 7401 long current_timeo = *timeo_p; 7402 DEFINE_WAIT(wait); 7403 7404 pr_debug("%s: asoc:%p, timeo:%ld, msg_len:%zu\n", __func__, asoc, 7405 *timeo_p, msg_len); 7406 7407 /* Increment the association's refcnt. */ 7408 sctp_association_hold(asoc); 7409 7410 /* Wait on the association specific sndbuf space. */ 7411 for (;;) { 7412 prepare_to_wait_exclusive(&asoc->wait, &wait, 7413 TASK_INTERRUPTIBLE); 7414 if (!*timeo_p) 7415 goto do_nonblock; 7416 if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING || 7417 asoc->base.dead) 7418 goto do_error; 7419 if (signal_pending(current)) 7420 goto do_interrupted; 7421 if (msg_len <= sctp_wspace(asoc)) 7422 break; 7423 7424 /* Let another process have a go. Since we are going 7425 * to sleep anyway. 7426 */ 7427 release_sock(sk); 7428 current_timeo = schedule_timeout(current_timeo); 7429 BUG_ON(sk != asoc->base.sk); 7430 lock_sock(sk); 7431 7432 *timeo_p = current_timeo; 7433 } 7434 7435 out: 7436 finish_wait(&asoc->wait, &wait); 7437 7438 /* Release the association's refcnt. */ 7439 sctp_association_put(asoc); 7440 7441 return err; 7442 7443 do_error: 7444 err = -EPIPE; 7445 goto out; 7446 7447 do_interrupted: 7448 err = sock_intr_errno(*timeo_p); 7449 goto out; 7450 7451 do_nonblock: 7452 err = -EAGAIN; 7453 goto out; 7454 } 7455 7456 void sctp_data_ready(struct sock *sk) 7457 { 7458 struct socket_wq *wq; 7459 7460 rcu_read_lock(); 7461 wq = rcu_dereference(sk->sk_wq); 7462 if (skwq_has_sleeper(wq)) 7463 wake_up_interruptible_sync_poll(&wq->wait, POLLIN | 7464 POLLRDNORM | POLLRDBAND); 7465 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); 7466 rcu_read_unlock(); 7467 } 7468 7469 /* If socket sndbuf has changed, wake up all per association waiters. */ 7470 void sctp_write_space(struct sock *sk) 7471 { 7472 struct sctp_association *asoc; 7473 7474 /* Wake up the tasks in each wait queue. */ 7475 list_for_each_entry(asoc, &((sctp_sk(sk))->ep->asocs), asocs) { 7476 __sctp_write_space(asoc); 7477 } 7478 } 7479 7480 /* Is there any sndbuf space available on the socket? 7481 * 7482 * Note that sk_wmem_alloc is the sum of the send buffers on all of the 7483 * associations on the same socket. For a UDP-style socket with 7484 * multiple associations, it is possible for it to be "unwriteable" 7485 * prematurely. I assume that this is acceptable because 7486 * a premature "unwriteable" is better than an accidental "writeable" which 7487 * would cause an unwanted block under certain circumstances. For the 1-1 7488 * UDP-style sockets or TCP-style sockets, this code should work. 7489 * - Daisy 7490 */ 7491 static int sctp_writeable(struct sock *sk) 7492 { 7493 int amt = 0; 7494 7495 amt = sk->sk_sndbuf - sk_wmem_alloc_get(sk); 7496 if (amt < 0) 7497 amt = 0; 7498 return amt; 7499 } 7500 7501 /* Wait for an association to go into ESTABLISHED state. If timeout is 0, 7502 * returns immediately with EINPROGRESS. 7503 */ 7504 static int sctp_wait_for_connect(struct sctp_association *asoc, long *timeo_p) 7505 { 7506 struct sock *sk = asoc->base.sk; 7507 int err = 0; 7508 long current_timeo = *timeo_p; 7509 DEFINE_WAIT(wait); 7510 7511 pr_debug("%s: asoc:%p, timeo:%ld\n", __func__, asoc, *timeo_p); 7512 7513 /* Increment the association's refcnt. */ 7514 sctp_association_hold(asoc); 7515 7516 for (;;) { 7517 prepare_to_wait_exclusive(&asoc->wait, &wait, 7518 TASK_INTERRUPTIBLE); 7519 if (!*timeo_p) 7520 goto do_nonblock; 7521 if (sk->sk_shutdown & RCV_SHUTDOWN) 7522 break; 7523 if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING || 7524 asoc->base.dead) 7525 goto do_error; 7526 if (signal_pending(current)) 7527 goto do_interrupted; 7528 7529 if (sctp_state(asoc, ESTABLISHED)) 7530 break; 7531 7532 /* Let another process have a go. Since we are going 7533 * to sleep anyway. 7534 */ 7535 release_sock(sk); 7536 current_timeo = schedule_timeout(current_timeo); 7537 lock_sock(sk); 7538 7539 *timeo_p = current_timeo; 7540 } 7541 7542 out: 7543 finish_wait(&asoc->wait, &wait); 7544 7545 /* Release the association's refcnt. */ 7546 sctp_association_put(asoc); 7547 7548 return err; 7549 7550 do_error: 7551 if (asoc->init_err_counter + 1 > asoc->max_init_attempts) 7552 err = -ETIMEDOUT; 7553 else 7554 err = -ECONNREFUSED; 7555 goto out; 7556 7557 do_interrupted: 7558 err = sock_intr_errno(*timeo_p); 7559 goto out; 7560 7561 do_nonblock: 7562 err = -EINPROGRESS; 7563 goto out; 7564 } 7565 7566 static int sctp_wait_for_accept(struct sock *sk, long timeo) 7567 { 7568 struct sctp_endpoint *ep; 7569 int err = 0; 7570 DEFINE_WAIT(wait); 7571 7572 ep = sctp_sk(sk)->ep; 7573 7574 7575 for (;;) { 7576 prepare_to_wait_exclusive(sk_sleep(sk), &wait, 7577 TASK_INTERRUPTIBLE); 7578 7579 if (list_empty(&ep->asocs)) { 7580 release_sock(sk); 7581 timeo = schedule_timeout(timeo); 7582 lock_sock(sk); 7583 } 7584 7585 err = -EINVAL; 7586 if (!sctp_sstate(sk, LISTENING)) 7587 break; 7588 7589 err = 0; 7590 if (!list_empty(&ep->asocs)) 7591 break; 7592 7593 err = sock_intr_errno(timeo); 7594 if (signal_pending(current)) 7595 break; 7596 7597 err = -EAGAIN; 7598 if (!timeo) 7599 break; 7600 } 7601 7602 finish_wait(sk_sleep(sk), &wait); 7603 7604 return err; 7605 } 7606 7607 static void sctp_wait_for_close(struct sock *sk, long timeout) 7608 { 7609 DEFINE_WAIT(wait); 7610 7611 do { 7612 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 7613 if (list_empty(&sctp_sk(sk)->ep->asocs)) 7614 break; 7615 release_sock(sk); 7616 timeout = schedule_timeout(timeout); 7617 lock_sock(sk); 7618 } while (!signal_pending(current) && timeout); 7619 7620 finish_wait(sk_sleep(sk), &wait); 7621 } 7622 7623 static void sctp_skb_set_owner_r_frag(struct sk_buff *skb, struct sock *sk) 7624 { 7625 struct sk_buff *frag; 7626 7627 if (!skb->data_len) 7628 goto done; 7629 7630 /* Don't forget the fragments. */ 7631 skb_walk_frags(skb, frag) 7632 sctp_skb_set_owner_r_frag(frag, sk); 7633 7634 done: 7635 sctp_skb_set_owner_r(skb, sk); 7636 } 7637 7638 void sctp_copy_sock(struct sock *newsk, struct sock *sk, 7639 struct sctp_association *asoc) 7640 { 7641 struct inet_sock *inet = inet_sk(sk); 7642 struct inet_sock *newinet; 7643 7644 newsk->sk_type = sk->sk_type; 7645 newsk->sk_bound_dev_if = sk->sk_bound_dev_if; 7646 newsk->sk_flags = sk->sk_flags; 7647 newsk->sk_tsflags = sk->sk_tsflags; 7648 newsk->sk_no_check_tx = sk->sk_no_check_tx; 7649 newsk->sk_no_check_rx = sk->sk_no_check_rx; 7650 newsk->sk_reuse = sk->sk_reuse; 7651 7652 newsk->sk_shutdown = sk->sk_shutdown; 7653 newsk->sk_destruct = sctp_destruct_sock; 7654 newsk->sk_family = sk->sk_family; 7655 newsk->sk_protocol = IPPROTO_SCTP; 7656 newsk->sk_backlog_rcv = sk->sk_prot->backlog_rcv; 7657 newsk->sk_sndbuf = sk->sk_sndbuf; 7658 newsk->sk_rcvbuf = sk->sk_rcvbuf; 7659 newsk->sk_lingertime = sk->sk_lingertime; 7660 newsk->sk_rcvtimeo = sk->sk_rcvtimeo; 7661 newsk->sk_sndtimeo = sk->sk_sndtimeo; 7662 newsk->sk_rxhash = sk->sk_rxhash; 7663 7664 newinet = inet_sk(newsk); 7665 7666 /* Initialize sk's sport, dport, rcv_saddr and daddr for 7667 * getsockname() and getpeername() 7668 */ 7669 newinet->inet_sport = inet->inet_sport; 7670 newinet->inet_saddr = inet->inet_saddr; 7671 newinet->inet_rcv_saddr = inet->inet_rcv_saddr; 7672 newinet->inet_dport = htons(asoc->peer.port); 7673 newinet->pmtudisc = inet->pmtudisc; 7674 newinet->inet_id = asoc->next_tsn ^ jiffies; 7675 7676 newinet->uc_ttl = inet->uc_ttl; 7677 newinet->mc_loop = 1; 7678 newinet->mc_ttl = 1; 7679 newinet->mc_index = 0; 7680 newinet->mc_list = NULL; 7681 7682 if (newsk->sk_flags & SK_FLAGS_TIMESTAMP) 7683 net_enable_timestamp(); 7684 7685 security_sk_clone(sk, newsk); 7686 } 7687 7688 static inline void sctp_copy_descendant(struct sock *sk_to, 7689 const struct sock *sk_from) 7690 { 7691 int ancestor_size = sizeof(struct inet_sock) + 7692 sizeof(struct sctp_sock) - 7693 offsetof(struct sctp_sock, auto_asconf_list); 7694 7695 if (sk_from->sk_family == PF_INET6) 7696 ancestor_size += sizeof(struct ipv6_pinfo); 7697 7698 __inet_sk_copy_descendant(sk_to, sk_from, ancestor_size); 7699 } 7700 7701 /* Populate the fields of the newsk from the oldsk and migrate the assoc 7702 * and its messages to the newsk. 7703 */ 7704 static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk, 7705 struct sctp_association *assoc, 7706 sctp_socket_type_t type) 7707 { 7708 struct sctp_sock *oldsp = sctp_sk(oldsk); 7709 struct sctp_sock *newsp = sctp_sk(newsk); 7710 struct sctp_bind_bucket *pp; /* hash list port iterator */ 7711 struct sctp_endpoint *newep = newsp->ep; 7712 struct sk_buff *skb, *tmp; 7713 struct sctp_ulpevent *event; 7714 struct sctp_bind_hashbucket *head; 7715 7716 /* Migrate socket buffer sizes and all the socket level options to the 7717 * new socket. 7718 */ 7719 newsk->sk_sndbuf = oldsk->sk_sndbuf; 7720 newsk->sk_rcvbuf = oldsk->sk_rcvbuf; 7721 /* Brute force copy old sctp opt. */ 7722 sctp_copy_descendant(newsk, oldsk); 7723 7724 /* Restore the ep value that was overwritten with the above structure 7725 * copy. 7726 */ 7727 newsp->ep = newep; 7728 newsp->hmac = NULL; 7729 7730 /* Hook this new socket in to the bind_hash list. */ 7731 head = &sctp_port_hashtable[sctp_phashfn(sock_net(oldsk), 7732 inet_sk(oldsk)->inet_num)]; 7733 spin_lock_bh(&head->lock); 7734 pp = sctp_sk(oldsk)->bind_hash; 7735 sk_add_bind_node(newsk, &pp->owner); 7736 sctp_sk(newsk)->bind_hash = pp; 7737 inet_sk(newsk)->inet_num = inet_sk(oldsk)->inet_num; 7738 spin_unlock_bh(&head->lock); 7739 7740 /* Copy the bind_addr list from the original endpoint to the new 7741 * endpoint so that we can handle restarts properly 7742 */ 7743 sctp_bind_addr_dup(&newsp->ep->base.bind_addr, 7744 &oldsp->ep->base.bind_addr, GFP_KERNEL); 7745 7746 /* Move any messages in the old socket's receive queue that are for the 7747 * peeled off association to the new socket's receive queue. 7748 */ 7749 sctp_skb_for_each(skb, &oldsk->sk_receive_queue, tmp) { 7750 event = sctp_skb2event(skb); 7751 if (event->asoc == assoc) { 7752 __skb_unlink(skb, &oldsk->sk_receive_queue); 7753 __skb_queue_tail(&newsk->sk_receive_queue, skb); 7754 sctp_skb_set_owner_r_frag(skb, newsk); 7755 } 7756 } 7757 7758 /* Clean up any messages pending delivery due to partial 7759 * delivery. Three cases: 7760 * 1) No partial deliver; no work. 7761 * 2) Peeling off partial delivery; keep pd_lobby in new pd_lobby. 7762 * 3) Peeling off non-partial delivery; move pd_lobby to receive_queue. 7763 */ 7764 skb_queue_head_init(&newsp->pd_lobby); 7765 atomic_set(&sctp_sk(newsk)->pd_mode, assoc->ulpq.pd_mode); 7766 7767 if (atomic_read(&sctp_sk(oldsk)->pd_mode)) { 7768 struct sk_buff_head *queue; 7769 7770 /* Decide which queue to move pd_lobby skbs to. */ 7771 if (assoc->ulpq.pd_mode) { 7772 queue = &newsp->pd_lobby; 7773 } else 7774 queue = &newsk->sk_receive_queue; 7775 7776 /* Walk through the pd_lobby, looking for skbs that 7777 * need moved to the new socket. 7778 */ 7779 sctp_skb_for_each(skb, &oldsp->pd_lobby, tmp) { 7780 event = sctp_skb2event(skb); 7781 if (event->asoc == assoc) { 7782 __skb_unlink(skb, &oldsp->pd_lobby); 7783 __skb_queue_tail(queue, skb); 7784 sctp_skb_set_owner_r_frag(skb, newsk); 7785 } 7786 } 7787 7788 /* Clear up any skbs waiting for the partial 7789 * delivery to finish. 7790 */ 7791 if (assoc->ulpq.pd_mode) 7792 sctp_clear_pd(oldsk, NULL); 7793 7794 } 7795 7796 sctp_skb_for_each(skb, &assoc->ulpq.reasm, tmp) 7797 sctp_skb_set_owner_r_frag(skb, newsk); 7798 7799 sctp_skb_for_each(skb, &assoc->ulpq.lobby, tmp) 7800 sctp_skb_set_owner_r_frag(skb, newsk); 7801 7802 /* Set the type of socket to indicate that it is peeled off from the 7803 * original UDP-style socket or created with the accept() call on a 7804 * TCP-style socket.. 7805 */ 7806 newsp->type = type; 7807 7808 /* Mark the new socket "in-use" by the user so that any packets 7809 * that may arrive on the association after we've moved it are 7810 * queued to the backlog. This prevents a potential race between 7811 * backlog processing on the old socket and new-packet processing 7812 * on the new socket. 7813 * 7814 * The caller has just allocated newsk so we can guarantee that other 7815 * paths won't try to lock it and then oldsk. 7816 */ 7817 lock_sock_nested(newsk, SINGLE_DEPTH_NESTING); 7818 sctp_assoc_migrate(assoc, newsk); 7819 7820 /* If the association on the newsk is already closed before accept() 7821 * is called, set RCV_SHUTDOWN flag. 7822 */ 7823 if (sctp_state(assoc, CLOSED) && sctp_style(newsk, TCP)) { 7824 newsk->sk_state = SCTP_SS_CLOSED; 7825 newsk->sk_shutdown |= RCV_SHUTDOWN; 7826 } else { 7827 newsk->sk_state = SCTP_SS_ESTABLISHED; 7828 } 7829 7830 release_sock(newsk); 7831 } 7832 7833 7834 /* This proto struct describes the ULP interface for SCTP. */ 7835 struct proto sctp_prot = { 7836 .name = "SCTP", 7837 .owner = THIS_MODULE, 7838 .close = sctp_close, 7839 .connect = sctp_connect, 7840 .disconnect = sctp_disconnect, 7841 .accept = sctp_accept, 7842 .ioctl = sctp_ioctl, 7843 .init = sctp_init_sock, 7844 .destroy = sctp_destroy_sock, 7845 .shutdown = sctp_shutdown, 7846 .setsockopt = sctp_setsockopt, 7847 .getsockopt = sctp_getsockopt, 7848 .sendmsg = sctp_sendmsg, 7849 .recvmsg = sctp_recvmsg, 7850 .bind = sctp_bind, 7851 .backlog_rcv = sctp_backlog_rcv, 7852 .hash = sctp_hash, 7853 .unhash = sctp_unhash, 7854 .get_port = sctp_get_port, 7855 .obj_size = sizeof(struct sctp_sock), 7856 .sysctl_mem = sysctl_sctp_mem, 7857 .sysctl_rmem = sysctl_sctp_rmem, 7858 .sysctl_wmem = sysctl_sctp_wmem, 7859 .memory_pressure = &sctp_memory_pressure, 7860 .enter_memory_pressure = sctp_enter_memory_pressure, 7861 .memory_allocated = &sctp_memory_allocated, 7862 .sockets_allocated = &sctp_sockets_allocated, 7863 }; 7864 7865 #if IS_ENABLED(CONFIG_IPV6) 7866 7867 #include <net/transp_v6.h> 7868 static void sctp_v6_destroy_sock(struct sock *sk) 7869 { 7870 sctp_destroy_sock(sk); 7871 inet6_destroy_sock(sk); 7872 } 7873 7874 struct proto sctpv6_prot = { 7875 .name = "SCTPv6", 7876 .owner = THIS_MODULE, 7877 .close = sctp_close, 7878 .connect = sctp_connect, 7879 .disconnect = sctp_disconnect, 7880 .accept = sctp_accept, 7881 .ioctl = sctp_ioctl, 7882 .init = sctp_init_sock, 7883 .destroy = sctp_v6_destroy_sock, 7884 .shutdown = sctp_shutdown, 7885 .setsockopt = sctp_setsockopt, 7886 .getsockopt = sctp_getsockopt, 7887 .sendmsg = sctp_sendmsg, 7888 .recvmsg = sctp_recvmsg, 7889 .bind = sctp_bind, 7890 .backlog_rcv = sctp_backlog_rcv, 7891 .hash = sctp_hash, 7892 .unhash = sctp_unhash, 7893 .get_port = sctp_get_port, 7894 .obj_size = sizeof(struct sctp6_sock), 7895 .sysctl_mem = sysctl_sctp_mem, 7896 .sysctl_rmem = sysctl_sctp_rmem, 7897 .sysctl_wmem = sysctl_sctp_wmem, 7898 .memory_pressure = &sctp_memory_pressure, 7899 .enter_memory_pressure = sctp_enter_memory_pressure, 7900 .memory_allocated = &sctp_memory_allocated, 7901 .sockets_allocated = &sctp_sockets_allocated, 7902 }; 7903 #endif /* IS_ENABLED(CONFIG_IPV6) */ 7904