1 /* SCTP kernel implementation 2 * (C) Copyright IBM Corp. 2001, 2004 3 * Copyright (c) 1999-2000 Cisco, Inc. 4 * Copyright (c) 1999-2001 Motorola, Inc. 5 * Copyright (c) 2001-2003 Intel Corp. 6 * Copyright (c) 2001-2002 Nokia, Inc. 7 * Copyright (c) 2001 La Monte H.P. Yarroll 8 * 9 * This file is part of the SCTP kernel implementation 10 * 11 * These functions interface with the sockets layer to implement the 12 * SCTP Extensions for the Sockets API. 13 * 14 * Note that the descriptions from the specification are USER level 15 * functions--this file is the functions which populate the struct proto 16 * for SCTP which is the BOTTOM of the sockets interface. 17 * 18 * This SCTP implementation is free software; 19 * you can redistribute it and/or modify it under the terms of 20 * the GNU General Public License as published by 21 * the Free Software Foundation; either version 2, or (at your option) 22 * any later version. 23 * 24 * This SCTP implementation is distributed in the hope that it 25 * will be useful, but WITHOUT ANY WARRANTY; without even the implied 26 * ************************ 27 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. 28 * See the GNU General Public License for more details. 29 * 30 * You should have received a copy of the GNU General Public License 31 * along with GNU CC; see the file COPYING. If not, write to 32 * the Free Software Foundation, 59 Temple Place - Suite 330, 33 * Boston, MA 02111-1307, USA. 34 * 35 * Please send any bug reports or fixes you make to the 36 * email address(es): 37 * lksctp developers <lksctp-developers@lists.sourceforge.net> 38 * 39 * Or submit a bug report through the following website: 40 * http://www.sf.net/projects/lksctp 41 * 42 * Written or modified by: 43 * La Monte H.P. Yarroll <piggy@acm.org> 44 * Narasimha Budihal <narsi@refcode.org> 45 * Karl Knutson <karl@athena.chicago.il.us> 46 * Jon Grimm <jgrimm@us.ibm.com> 47 * Xingang Guo <xingang.guo@intel.com> 48 * Daisy Chang <daisyc@us.ibm.com> 49 * Sridhar Samudrala <samudrala@us.ibm.com> 50 * Inaky Perez-Gonzalez <inaky.gonzalez@intel.com> 51 * Ardelle Fan <ardelle.fan@intel.com> 52 * Ryan Layer <rmlayer@us.ibm.com> 53 * Anup Pemmaiah <pemmaiah@cc.usu.edu> 54 * Kevin Gao <kevin.gao@intel.com> 55 * 56 * Any bugs reported given to us we will try to fix... any fixes shared will 57 * be incorporated into the next SCTP release. 58 */ 59 60 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 61 62 #include <linux/types.h> 63 #include <linux/kernel.h> 64 #include <linux/wait.h> 65 #include <linux/time.h> 66 #include <linux/ip.h> 67 #include <linux/capability.h> 68 #include <linux/fcntl.h> 69 #include <linux/poll.h> 70 #include <linux/init.h> 71 #include <linux/crypto.h> 72 #include <linux/slab.h> 73 #include <linux/file.h> 74 75 #include <net/ip.h> 76 #include <net/icmp.h> 77 #include <net/route.h> 78 #include <net/ipv6.h> 79 #include <net/inet_common.h> 80 81 #include <linux/socket.h> /* for sa_family_t */ 82 #include <linux/export.h> 83 #include <net/sock.h> 84 #include <net/sctp/sctp.h> 85 #include <net/sctp/sm.h> 86 87 /* Forward declarations for internal helper functions. */ 88 static int sctp_writeable(struct sock *sk); 89 static void sctp_wfree(struct sk_buff *skb); 90 static int sctp_wait_for_sndbuf(struct sctp_association *, long *timeo_p, 91 size_t msg_len); 92 static int sctp_wait_for_packet(struct sock * sk, int *err, long *timeo_p); 93 static int sctp_wait_for_connect(struct sctp_association *, long *timeo_p); 94 static int sctp_wait_for_accept(struct sock *sk, long timeo); 95 static void sctp_wait_for_close(struct sock *sk, long timeo); 96 static void sctp_destruct_sock(struct sock *sk); 97 static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt, 98 union sctp_addr *addr, int len); 99 static int sctp_bindx_add(struct sock *, struct sockaddr *, int); 100 static int sctp_bindx_rem(struct sock *, struct sockaddr *, int); 101 static int sctp_send_asconf_add_ip(struct sock *, struct sockaddr *, int); 102 static int sctp_send_asconf_del_ip(struct sock *, struct sockaddr *, int); 103 static int sctp_send_asconf(struct sctp_association *asoc, 104 struct sctp_chunk *chunk); 105 static int sctp_do_bind(struct sock *, union sctp_addr *, int); 106 static int sctp_autobind(struct sock *sk); 107 static void sctp_sock_migrate(struct sock *, struct sock *, 108 struct sctp_association *, sctp_socket_type_t); 109 110 extern struct kmem_cache *sctp_bucket_cachep; 111 extern long sysctl_sctp_mem[3]; 112 extern int sysctl_sctp_rmem[3]; 113 extern int sysctl_sctp_wmem[3]; 114 115 static int sctp_memory_pressure; 116 static atomic_long_t sctp_memory_allocated; 117 struct percpu_counter sctp_sockets_allocated; 118 119 static void sctp_enter_memory_pressure(struct sock *sk) 120 { 121 sctp_memory_pressure = 1; 122 } 123 124 125 /* Get the sndbuf space available at the time on the association. */ 126 static inline int sctp_wspace(struct sctp_association *asoc) 127 { 128 int amt; 129 130 if (asoc->ep->sndbuf_policy) 131 amt = asoc->sndbuf_used; 132 else 133 amt = sk_wmem_alloc_get(asoc->base.sk); 134 135 if (amt >= asoc->base.sk->sk_sndbuf) { 136 if (asoc->base.sk->sk_userlocks & SOCK_SNDBUF_LOCK) 137 amt = 0; 138 else { 139 amt = sk_stream_wspace(asoc->base.sk); 140 if (amt < 0) 141 amt = 0; 142 } 143 } else { 144 amt = asoc->base.sk->sk_sndbuf - amt; 145 } 146 return amt; 147 } 148 149 /* Increment the used sndbuf space count of the corresponding association by 150 * the size of the outgoing data chunk. 151 * Also, set the skb destructor for sndbuf accounting later. 152 * 153 * Since it is always 1-1 between chunk and skb, and also a new skb is always 154 * allocated for chunk bundling in sctp_packet_transmit(), we can use the 155 * destructor in the data chunk skb for the purpose of the sndbuf space 156 * tracking. 157 */ 158 static inline void sctp_set_owner_w(struct sctp_chunk *chunk) 159 { 160 struct sctp_association *asoc = chunk->asoc; 161 struct sock *sk = asoc->base.sk; 162 163 /* The sndbuf space is tracked per association. */ 164 sctp_association_hold(asoc); 165 166 skb_set_owner_w(chunk->skb, sk); 167 168 chunk->skb->destructor = sctp_wfree; 169 /* Save the chunk pointer in skb for sctp_wfree to use later. */ 170 *((struct sctp_chunk **)(chunk->skb->cb)) = chunk; 171 172 asoc->sndbuf_used += SCTP_DATA_SNDSIZE(chunk) + 173 sizeof(struct sk_buff) + 174 sizeof(struct sctp_chunk); 175 176 atomic_add(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc); 177 sk->sk_wmem_queued += chunk->skb->truesize; 178 sk_mem_charge(sk, chunk->skb->truesize); 179 } 180 181 /* Verify that this is a valid address. */ 182 static inline int sctp_verify_addr(struct sock *sk, union sctp_addr *addr, 183 int len) 184 { 185 struct sctp_af *af; 186 187 /* Verify basic sockaddr. */ 188 af = sctp_sockaddr_af(sctp_sk(sk), addr, len); 189 if (!af) 190 return -EINVAL; 191 192 /* Is this a valid SCTP address? */ 193 if (!af->addr_valid(addr, sctp_sk(sk), NULL)) 194 return -EINVAL; 195 196 if (!sctp_sk(sk)->pf->send_verify(sctp_sk(sk), (addr))) 197 return -EINVAL; 198 199 return 0; 200 } 201 202 /* Look up the association by its id. If this is not a UDP-style 203 * socket, the ID field is always ignored. 204 */ 205 struct sctp_association *sctp_id2assoc(struct sock *sk, sctp_assoc_t id) 206 { 207 struct sctp_association *asoc = NULL; 208 209 /* If this is not a UDP-style socket, assoc id should be ignored. */ 210 if (!sctp_style(sk, UDP)) { 211 /* Return NULL if the socket state is not ESTABLISHED. It 212 * could be a TCP-style listening socket or a socket which 213 * hasn't yet called connect() to establish an association. 214 */ 215 if (!sctp_sstate(sk, ESTABLISHED)) 216 return NULL; 217 218 /* Get the first and the only association from the list. */ 219 if (!list_empty(&sctp_sk(sk)->ep->asocs)) 220 asoc = list_entry(sctp_sk(sk)->ep->asocs.next, 221 struct sctp_association, asocs); 222 return asoc; 223 } 224 225 /* Otherwise this is a UDP-style socket. */ 226 if (!id || (id == (sctp_assoc_t)-1)) 227 return NULL; 228 229 spin_lock_bh(&sctp_assocs_id_lock); 230 asoc = (struct sctp_association *)idr_find(&sctp_assocs_id, (int)id); 231 spin_unlock_bh(&sctp_assocs_id_lock); 232 233 if (!asoc || (asoc->base.sk != sk) || asoc->base.dead) 234 return NULL; 235 236 return asoc; 237 } 238 239 /* Look up the transport from an address and an assoc id. If both address and 240 * id are specified, the associations matching the address and the id should be 241 * the same. 242 */ 243 static struct sctp_transport *sctp_addr_id2transport(struct sock *sk, 244 struct sockaddr_storage *addr, 245 sctp_assoc_t id) 246 { 247 struct sctp_association *addr_asoc = NULL, *id_asoc = NULL; 248 struct sctp_transport *transport; 249 union sctp_addr *laddr = (union sctp_addr *)addr; 250 251 addr_asoc = sctp_endpoint_lookup_assoc(sctp_sk(sk)->ep, 252 laddr, 253 &transport); 254 255 if (!addr_asoc) 256 return NULL; 257 258 id_asoc = sctp_id2assoc(sk, id); 259 if (id_asoc && (id_asoc != addr_asoc)) 260 return NULL; 261 262 sctp_get_pf_specific(sk->sk_family)->addr_v4map(sctp_sk(sk), 263 (union sctp_addr *)addr); 264 265 return transport; 266 } 267 268 /* API 3.1.2 bind() - UDP Style Syntax 269 * The syntax of bind() is, 270 * 271 * ret = bind(int sd, struct sockaddr *addr, int addrlen); 272 * 273 * sd - the socket descriptor returned by socket(). 274 * addr - the address structure (struct sockaddr_in or struct 275 * sockaddr_in6 [RFC 2553]), 276 * addr_len - the size of the address structure. 277 */ 278 static int sctp_bind(struct sock *sk, struct sockaddr *addr, int addr_len) 279 { 280 int retval = 0; 281 282 sctp_lock_sock(sk); 283 284 pr_debug("%s: sk:%p, addr:%p, addr_len:%d\n", __func__, sk, 285 addr, addr_len); 286 287 /* Disallow binding twice. */ 288 if (!sctp_sk(sk)->ep->base.bind_addr.port) 289 retval = sctp_do_bind(sk, (union sctp_addr *)addr, 290 addr_len); 291 else 292 retval = -EINVAL; 293 294 sctp_release_sock(sk); 295 296 return retval; 297 } 298 299 static long sctp_get_port_local(struct sock *, union sctp_addr *); 300 301 /* Verify this is a valid sockaddr. */ 302 static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt, 303 union sctp_addr *addr, int len) 304 { 305 struct sctp_af *af; 306 307 /* Check minimum size. */ 308 if (len < sizeof (struct sockaddr)) 309 return NULL; 310 311 /* V4 mapped address are really of AF_INET family */ 312 if (addr->sa.sa_family == AF_INET6 && 313 ipv6_addr_v4mapped(&addr->v6.sin6_addr)) { 314 if (!opt->pf->af_supported(AF_INET, opt)) 315 return NULL; 316 } else { 317 /* Does this PF support this AF? */ 318 if (!opt->pf->af_supported(addr->sa.sa_family, opt)) 319 return NULL; 320 } 321 322 /* If we get this far, af is valid. */ 323 af = sctp_get_af_specific(addr->sa.sa_family); 324 325 if (len < af->sockaddr_len) 326 return NULL; 327 328 return af; 329 } 330 331 /* Bind a local address either to an endpoint or to an association. */ 332 static int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len) 333 { 334 struct net *net = sock_net(sk); 335 struct sctp_sock *sp = sctp_sk(sk); 336 struct sctp_endpoint *ep = sp->ep; 337 struct sctp_bind_addr *bp = &ep->base.bind_addr; 338 struct sctp_af *af; 339 unsigned short snum; 340 int ret = 0; 341 342 /* Common sockaddr verification. */ 343 af = sctp_sockaddr_af(sp, addr, len); 344 if (!af) { 345 pr_debug("%s: sk:%p, newaddr:%p, len:%d EINVAL\n", 346 __func__, sk, addr, len); 347 return -EINVAL; 348 } 349 350 snum = ntohs(addr->v4.sin_port); 351 352 pr_debug("%s: sk:%p, new addr:%pISc, port:%d, new port:%d, len:%d\n", 353 __func__, sk, &addr->sa, bp->port, snum, len); 354 355 /* PF specific bind() address verification. */ 356 if (!sp->pf->bind_verify(sp, addr)) 357 return -EADDRNOTAVAIL; 358 359 /* We must either be unbound, or bind to the same port. 360 * It's OK to allow 0 ports if we are already bound. 361 * We'll just inhert an already bound port in this case 362 */ 363 if (bp->port) { 364 if (!snum) 365 snum = bp->port; 366 else if (snum != bp->port) { 367 pr_debug("%s: new port %d doesn't match existing port " 368 "%d\n", __func__, snum, bp->port); 369 return -EINVAL; 370 } 371 } 372 373 if (snum && snum < PROT_SOCK && 374 !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) 375 return -EACCES; 376 377 /* See if the address matches any of the addresses we may have 378 * already bound before checking against other endpoints. 379 */ 380 if (sctp_bind_addr_match(bp, addr, sp)) 381 return -EINVAL; 382 383 /* Make sure we are allowed to bind here. 384 * The function sctp_get_port_local() does duplicate address 385 * detection. 386 */ 387 addr->v4.sin_port = htons(snum); 388 if ((ret = sctp_get_port_local(sk, addr))) { 389 return -EADDRINUSE; 390 } 391 392 /* Refresh ephemeral port. */ 393 if (!bp->port) 394 bp->port = inet_sk(sk)->inet_num; 395 396 /* Add the address to the bind address list. 397 * Use GFP_ATOMIC since BHs will be disabled. 398 */ 399 ret = sctp_add_bind_addr(bp, addr, SCTP_ADDR_SRC, GFP_ATOMIC); 400 401 /* Copy back into socket for getsockname() use. */ 402 if (!ret) { 403 inet_sk(sk)->inet_sport = htons(inet_sk(sk)->inet_num); 404 af->to_sk_saddr(addr, sk); 405 } 406 407 return ret; 408 } 409 410 /* ADDIP Section 4.1.1 Congestion Control of ASCONF Chunks 411 * 412 * R1) One and only one ASCONF Chunk MAY be in transit and unacknowledged 413 * at any one time. If a sender, after sending an ASCONF chunk, decides 414 * it needs to transfer another ASCONF Chunk, it MUST wait until the 415 * ASCONF-ACK Chunk returns from the previous ASCONF Chunk before sending a 416 * subsequent ASCONF. Note this restriction binds each side, so at any 417 * time two ASCONF may be in-transit on any given association (one sent 418 * from each endpoint). 419 */ 420 static int sctp_send_asconf(struct sctp_association *asoc, 421 struct sctp_chunk *chunk) 422 { 423 struct net *net = sock_net(asoc->base.sk); 424 int retval = 0; 425 426 /* If there is an outstanding ASCONF chunk, queue it for later 427 * transmission. 428 */ 429 if (asoc->addip_last_asconf) { 430 list_add_tail(&chunk->list, &asoc->addip_chunk_list); 431 goto out; 432 } 433 434 /* Hold the chunk until an ASCONF_ACK is received. */ 435 sctp_chunk_hold(chunk); 436 retval = sctp_primitive_ASCONF(net, asoc, chunk); 437 if (retval) 438 sctp_chunk_free(chunk); 439 else 440 asoc->addip_last_asconf = chunk; 441 442 out: 443 return retval; 444 } 445 446 /* Add a list of addresses as bind addresses to local endpoint or 447 * association. 448 * 449 * Basically run through each address specified in the addrs/addrcnt 450 * array/length pair, determine if it is IPv6 or IPv4 and call 451 * sctp_do_bind() on it. 452 * 453 * If any of them fails, then the operation will be reversed and the 454 * ones that were added will be removed. 455 * 456 * Only sctp_setsockopt_bindx() is supposed to call this function. 457 */ 458 static int sctp_bindx_add(struct sock *sk, struct sockaddr *addrs, int addrcnt) 459 { 460 int cnt; 461 int retval = 0; 462 void *addr_buf; 463 struct sockaddr *sa_addr; 464 struct sctp_af *af; 465 466 pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n", __func__, sk, 467 addrs, addrcnt); 468 469 addr_buf = addrs; 470 for (cnt = 0; cnt < addrcnt; cnt++) { 471 /* The list may contain either IPv4 or IPv6 address; 472 * determine the address length for walking thru the list. 473 */ 474 sa_addr = addr_buf; 475 af = sctp_get_af_specific(sa_addr->sa_family); 476 if (!af) { 477 retval = -EINVAL; 478 goto err_bindx_add; 479 } 480 481 retval = sctp_do_bind(sk, (union sctp_addr *)sa_addr, 482 af->sockaddr_len); 483 484 addr_buf += af->sockaddr_len; 485 486 err_bindx_add: 487 if (retval < 0) { 488 /* Failed. Cleanup the ones that have been added */ 489 if (cnt > 0) 490 sctp_bindx_rem(sk, addrs, cnt); 491 return retval; 492 } 493 } 494 495 return retval; 496 } 497 498 /* Send an ASCONF chunk with Add IP address parameters to all the peers of the 499 * associations that are part of the endpoint indicating that a list of local 500 * addresses are added to the endpoint. 501 * 502 * If any of the addresses is already in the bind address list of the 503 * association, we do not send the chunk for that association. But it will not 504 * affect other associations. 505 * 506 * Only sctp_setsockopt_bindx() is supposed to call this function. 507 */ 508 static int sctp_send_asconf_add_ip(struct sock *sk, 509 struct sockaddr *addrs, 510 int addrcnt) 511 { 512 struct net *net = sock_net(sk); 513 struct sctp_sock *sp; 514 struct sctp_endpoint *ep; 515 struct sctp_association *asoc; 516 struct sctp_bind_addr *bp; 517 struct sctp_chunk *chunk; 518 struct sctp_sockaddr_entry *laddr; 519 union sctp_addr *addr; 520 union sctp_addr saveaddr; 521 void *addr_buf; 522 struct sctp_af *af; 523 struct list_head *p; 524 int i; 525 int retval = 0; 526 527 if (!net->sctp.addip_enable) 528 return retval; 529 530 sp = sctp_sk(sk); 531 ep = sp->ep; 532 533 pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n", 534 __func__, sk, addrs, addrcnt); 535 536 list_for_each_entry(asoc, &ep->asocs, asocs) { 537 if (!asoc->peer.asconf_capable) 538 continue; 539 540 if (asoc->peer.addip_disabled_mask & SCTP_PARAM_ADD_IP) 541 continue; 542 543 if (!sctp_state(asoc, ESTABLISHED)) 544 continue; 545 546 /* Check if any address in the packed array of addresses is 547 * in the bind address list of the association. If so, 548 * do not send the asconf chunk to its peer, but continue with 549 * other associations. 550 */ 551 addr_buf = addrs; 552 for (i = 0; i < addrcnt; i++) { 553 addr = addr_buf; 554 af = sctp_get_af_specific(addr->v4.sin_family); 555 if (!af) { 556 retval = -EINVAL; 557 goto out; 558 } 559 560 if (sctp_assoc_lookup_laddr(asoc, addr)) 561 break; 562 563 addr_buf += af->sockaddr_len; 564 } 565 if (i < addrcnt) 566 continue; 567 568 /* Use the first valid address in bind addr list of 569 * association as Address Parameter of ASCONF CHUNK. 570 */ 571 bp = &asoc->base.bind_addr; 572 p = bp->address_list.next; 573 laddr = list_entry(p, struct sctp_sockaddr_entry, list); 574 chunk = sctp_make_asconf_update_ip(asoc, &laddr->a, addrs, 575 addrcnt, SCTP_PARAM_ADD_IP); 576 if (!chunk) { 577 retval = -ENOMEM; 578 goto out; 579 } 580 581 /* Add the new addresses to the bind address list with 582 * use_as_src set to 0. 583 */ 584 addr_buf = addrs; 585 for (i = 0; i < addrcnt; i++) { 586 addr = addr_buf; 587 af = sctp_get_af_specific(addr->v4.sin_family); 588 memcpy(&saveaddr, addr, af->sockaddr_len); 589 retval = sctp_add_bind_addr(bp, &saveaddr, 590 SCTP_ADDR_NEW, GFP_ATOMIC); 591 addr_buf += af->sockaddr_len; 592 } 593 if (asoc->src_out_of_asoc_ok) { 594 struct sctp_transport *trans; 595 596 list_for_each_entry(trans, 597 &asoc->peer.transport_addr_list, transports) { 598 /* Clear the source and route cache */ 599 dst_release(trans->dst); 600 trans->cwnd = min(4*asoc->pathmtu, max_t(__u32, 601 2*asoc->pathmtu, 4380)); 602 trans->ssthresh = asoc->peer.i.a_rwnd; 603 trans->rto = asoc->rto_initial; 604 sctp_max_rto(asoc, trans); 605 trans->rtt = trans->srtt = trans->rttvar = 0; 606 sctp_transport_route(trans, NULL, 607 sctp_sk(asoc->base.sk)); 608 } 609 } 610 retval = sctp_send_asconf(asoc, chunk); 611 } 612 613 out: 614 return retval; 615 } 616 617 /* Remove a list of addresses from bind addresses list. Do not remove the 618 * last address. 619 * 620 * Basically run through each address specified in the addrs/addrcnt 621 * array/length pair, determine if it is IPv6 or IPv4 and call 622 * sctp_del_bind() on it. 623 * 624 * If any of them fails, then the operation will be reversed and the 625 * ones that were removed will be added back. 626 * 627 * At least one address has to be left; if only one address is 628 * available, the operation will return -EBUSY. 629 * 630 * Only sctp_setsockopt_bindx() is supposed to call this function. 631 */ 632 static int sctp_bindx_rem(struct sock *sk, struct sockaddr *addrs, int addrcnt) 633 { 634 struct sctp_sock *sp = sctp_sk(sk); 635 struct sctp_endpoint *ep = sp->ep; 636 int cnt; 637 struct sctp_bind_addr *bp = &ep->base.bind_addr; 638 int retval = 0; 639 void *addr_buf; 640 union sctp_addr *sa_addr; 641 struct sctp_af *af; 642 643 pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n", 644 __func__, sk, addrs, addrcnt); 645 646 addr_buf = addrs; 647 for (cnt = 0; cnt < addrcnt; cnt++) { 648 /* If the bind address list is empty or if there is only one 649 * bind address, there is nothing more to be removed (we need 650 * at least one address here). 651 */ 652 if (list_empty(&bp->address_list) || 653 (sctp_list_single_entry(&bp->address_list))) { 654 retval = -EBUSY; 655 goto err_bindx_rem; 656 } 657 658 sa_addr = addr_buf; 659 af = sctp_get_af_specific(sa_addr->sa.sa_family); 660 if (!af) { 661 retval = -EINVAL; 662 goto err_bindx_rem; 663 } 664 665 if (!af->addr_valid(sa_addr, sp, NULL)) { 666 retval = -EADDRNOTAVAIL; 667 goto err_bindx_rem; 668 } 669 670 if (sa_addr->v4.sin_port && 671 sa_addr->v4.sin_port != htons(bp->port)) { 672 retval = -EINVAL; 673 goto err_bindx_rem; 674 } 675 676 if (!sa_addr->v4.sin_port) 677 sa_addr->v4.sin_port = htons(bp->port); 678 679 /* FIXME - There is probably a need to check if sk->sk_saddr and 680 * sk->sk_rcv_addr are currently set to one of the addresses to 681 * be removed. This is something which needs to be looked into 682 * when we are fixing the outstanding issues with multi-homing 683 * socket routing and failover schemes. Refer to comments in 684 * sctp_do_bind(). -daisy 685 */ 686 retval = sctp_del_bind_addr(bp, sa_addr); 687 688 addr_buf += af->sockaddr_len; 689 err_bindx_rem: 690 if (retval < 0) { 691 /* Failed. Add the ones that has been removed back */ 692 if (cnt > 0) 693 sctp_bindx_add(sk, addrs, cnt); 694 return retval; 695 } 696 } 697 698 return retval; 699 } 700 701 /* Send an ASCONF chunk with Delete IP address parameters to all the peers of 702 * the associations that are part of the endpoint indicating that a list of 703 * local addresses are removed from the endpoint. 704 * 705 * If any of the addresses is already in the bind address list of the 706 * association, we do not send the chunk for that association. But it will not 707 * affect other associations. 708 * 709 * Only sctp_setsockopt_bindx() is supposed to call this function. 710 */ 711 static int sctp_send_asconf_del_ip(struct sock *sk, 712 struct sockaddr *addrs, 713 int addrcnt) 714 { 715 struct net *net = sock_net(sk); 716 struct sctp_sock *sp; 717 struct sctp_endpoint *ep; 718 struct sctp_association *asoc; 719 struct sctp_transport *transport; 720 struct sctp_bind_addr *bp; 721 struct sctp_chunk *chunk; 722 union sctp_addr *laddr; 723 void *addr_buf; 724 struct sctp_af *af; 725 struct sctp_sockaddr_entry *saddr; 726 int i; 727 int retval = 0; 728 int stored = 0; 729 730 chunk = NULL; 731 if (!net->sctp.addip_enable) 732 return retval; 733 734 sp = sctp_sk(sk); 735 ep = sp->ep; 736 737 pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n", 738 __func__, sk, addrs, addrcnt); 739 740 list_for_each_entry(asoc, &ep->asocs, asocs) { 741 742 if (!asoc->peer.asconf_capable) 743 continue; 744 745 if (asoc->peer.addip_disabled_mask & SCTP_PARAM_DEL_IP) 746 continue; 747 748 if (!sctp_state(asoc, ESTABLISHED)) 749 continue; 750 751 /* Check if any address in the packed array of addresses is 752 * not present in the bind address list of the association. 753 * If so, do not send the asconf chunk to its peer, but 754 * continue with other associations. 755 */ 756 addr_buf = addrs; 757 for (i = 0; i < addrcnt; i++) { 758 laddr = addr_buf; 759 af = sctp_get_af_specific(laddr->v4.sin_family); 760 if (!af) { 761 retval = -EINVAL; 762 goto out; 763 } 764 765 if (!sctp_assoc_lookup_laddr(asoc, laddr)) 766 break; 767 768 addr_buf += af->sockaddr_len; 769 } 770 if (i < addrcnt) 771 continue; 772 773 /* Find one address in the association's bind address list 774 * that is not in the packed array of addresses. This is to 775 * make sure that we do not delete all the addresses in the 776 * association. 777 */ 778 bp = &asoc->base.bind_addr; 779 laddr = sctp_find_unmatch_addr(bp, (union sctp_addr *)addrs, 780 addrcnt, sp); 781 if ((laddr == NULL) && (addrcnt == 1)) { 782 if (asoc->asconf_addr_del_pending) 783 continue; 784 asoc->asconf_addr_del_pending = 785 kzalloc(sizeof(union sctp_addr), GFP_ATOMIC); 786 if (asoc->asconf_addr_del_pending == NULL) { 787 retval = -ENOMEM; 788 goto out; 789 } 790 asoc->asconf_addr_del_pending->sa.sa_family = 791 addrs->sa_family; 792 asoc->asconf_addr_del_pending->v4.sin_port = 793 htons(bp->port); 794 if (addrs->sa_family == AF_INET) { 795 struct sockaddr_in *sin; 796 797 sin = (struct sockaddr_in *)addrs; 798 asoc->asconf_addr_del_pending->v4.sin_addr.s_addr = sin->sin_addr.s_addr; 799 } else if (addrs->sa_family == AF_INET6) { 800 struct sockaddr_in6 *sin6; 801 802 sin6 = (struct sockaddr_in6 *)addrs; 803 asoc->asconf_addr_del_pending->v6.sin6_addr = sin6->sin6_addr; 804 } 805 806 pr_debug("%s: keep the last address asoc:%p %pISc at %p\n", 807 __func__, asoc, &asoc->asconf_addr_del_pending->sa, 808 asoc->asconf_addr_del_pending); 809 810 asoc->src_out_of_asoc_ok = 1; 811 stored = 1; 812 goto skip_mkasconf; 813 } 814 815 /* We do not need RCU protection throughout this loop 816 * because this is done under a socket lock from the 817 * setsockopt call. 818 */ 819 chunk = sctp_make_asconf_update_ip(asoc, laddr, addrs, addrcnt, 820 SCTP_PARAM_DEL_IP); 821 if (!chunk) { 822 retval = -ENOMEM; 823 goto out; 824 } 825 826 skip_mkasconf: 827 /* Reset use_as_src flag for the addresses in the bind address 828 * list that are to be deleted. 829 */ 830 addr_buf = addrs; 831 for (i = 0; i < addrcnt; i++) { 832 laddr = addr_buf; 833 af = sctp_get_af_specific(laddr->v4.sin_family); 834 list_for_each_entry(saddr, &bp->address_list, list) { 835 if (sctp_cmp_addr_exact(&saddr->a, laddr)) 836 saddr->state = SCTP_ADDR_DEL; 837 } 838 addr_buf += af->sockaddr_len; 839 } 840 841 /* Update the route and saddr entries for all the transports 842 * as some of the addresses in the bind address list are 843 * about to be deleted and cannot be used as source addresses. 844 */ 845 list_for_each_entry(transport, &asoc->peer.transport_addr_list, 846 transports) { 847 dst_release(transport->dst); 848 sctp_transport_route(transport, NULL, 849 sctp_sk(asoc->base.sk)); 850 } 851 852 if (stored) 853 /* We don't need to transmit ASCONF */ 854 continue; 855 retval = sctp_send_asconf(asoc, chunk); 856 } 857 out: 858 return retval; 859 } 860 861 /* set addr events to assocs in the endpoint. ep and addr_wq must be locked */ 862 int sctp_asconf_mgmt(struct sctp_sock *sp, struct sctp_sockaddr_entry *addrw) 863 { 864 struct sock *sk = sctp_opt2sk(sp); 865 union sctp_addr *addr; 866 struct sctp_af *af; 867 868 /* It is safe to write port space in caller. */ 869 addr = &addrw->a; 870 addr->v4.sin_port = htons(sp->ep->base.bind_addr.port); 871 af = sctp_get_af_specific(addr->sa.sa_family); 872 if (!af) 873 return -EINVAL; 874 if (sctp_verify_addr(sk, addr, af->sockaddr_len)) 875 return -EINVAL; 876 877 if (addrw->state == SCTP_ADDR_NEW) 878 return sctp_send_asconf_add_ip(sk, (struct sockaddr *)addr, 1); 879 else 880 return sctp_send_asconf_del_ip(sk, (struct sockaddr *)addr, 1); 881 } 882 883 /* Helper for tunneling sctp_bindx() requests through sctp_setsockopt() 884 * 885 * API 8.1 886 * int sctp_bindx(int sd, struct sockaddr *addrs, int addrcnt, 887 * int flags); 888 * 889 * If sd is an IPv4 socket, the addresses passed must be IPv4 addresses. 890 * If the sd is an IPv6 socket, the addresses passed can either be IPv4 891 * or IPv6 addresses. 892 * 893 * A single address may be specified as INADDR_ANY or IN6ADDR_ANY, see 894 * Section 3.1.2 for this usage. 895 * 896 * addrs is a pointer to an array of one or more socket addresses. Each 897 * address is contained in its appropriate structure (i.e. struct 898 * sockaddr_in or struct sockaddr_in6) the family of the address type 899 * must be used to distinguish the address length (note that this 900 * representation is termed a "packed array" of addresses). The caller 901 * specifies the number of addresses in the array with addrcnt. 902 * 903 * On success, sctp_bindx() returns 0. On failure, sctp_bindx() returns 904 * -1, and sets errno to the appropriate error code. 905 * 906 * For SCTP, the port given in each socket address must be the same, or 907 * sctp_bindx() will fail, setting errno to EINVAL. 908 * 909 * The flags parameter is formed from the bitwise OR of zero or more of 910 * the following currently defined flags: 911 * 912 * SCTP_BINDX_ADD_ADDR 913 * 914 * SCTP_BINDX_REM_ADDR 915 * 916 * SCTP_BINDX_ADD_ADDR directs SCTP to add the given addresses to the 917 * association, and SCTP_BINDX_REM_ADDR directs SCTP to remove the given 918 * addresses from the association. The two flags are mutually exclusive; 919 * if both are given, sctp_bindx() will fail with EINVAL. A caller may 920 * not remove all addresses from an association; sctp_bindx() will 921 * reject such an attempt with EINVAL. 922 * 923 * An application can use sctp_bindx(SCTP_BINDX_ADD_ADDR) to associate 924 * additional addresses with an endpoint after calling bind(). Or use 925 * sctp_bindx(SCTP_BINDX_REM_ADDR) to remove some addresses a listening 926 * socket is associated with so that no new association accepted will be 927 * associated with those addresses. If the endpoint supports dynamic 928 * address a SCTP_BINDX_REM_ADDR or SCTP_BINDX_ADD_ADDR may cause a 929 * endpoint to send the appropriate message to the peer to change the 930 * peers address lists. 931 * 932 * Adding and removing addresses from a connected association is 933 * optional functionality. Implementations that do not support this 934 * functionality should return EOPNOTSUPP. 935 * 936 * Basically do nothing but copying the addresses from user to kernel 937 * land and invoking either sctp_bindx_add() or sctp_bindx_rem() on the sk. 938 * This is used for tunneling the sctp_bindx() request through sctp_setsockopt() 939 * from userspace. 940 * 941 * We don't use copy_from_user() for optimization: we first do the 942 * sanity checks (buffer size -fast- and access check-healthy 943 * pointer); if all of those succeed, then we can alloc the memory 944 * (expensive operation) needed to copy the data to kernel. Then we do 945 * the copying without checking the user space area 946 * (__copy_from_user()). 947 * 948 * On exit there is no need to do sockfd_put(), sys_setsockopt() does 949 * it. 950 * 951 * sk The sk of the socket 952 * addrs The pointer to the addresses in user land 953 * addrssize Size of the addrs buffer 954 * op Operation to perform (add or remove, see the flags of 955 * sctp_bindx) 956 * 957 * Returns 0 if ok, <0 errno code on error. 958 */ 959 static int sctp_setsockopt_bindx(struct sock* sk, 960 struct sockaddr __user *addrs, 961 int addrs_size, int op) 962 { 963 struct sockaddr *kaddrs; 964 int err; 965 int addrcnt = 0; 966 int walk_size = 0; 967 struct sockaddr *sa_addr; 968 void *addr_buf; 969 struct sctp_af *af; 970 971 pr_debug("%s: sk:%p addrs:%p addrs_size:%d opt:%d\n", 972 __func__, sk, addrs, addrs_size, op); 973 974 if (unlikely(addrs_size <= 0)) 975 return -EINVAL; 976 977 /* Check the user passed a healthy pointer. */ 978 if (unlikely(!access_ok(VERIFY_READ, addrs, addrs_size))) 979 return -EFAULT; 980 981 /* Alloc space for the address array in kernel memory. */ 982 kaddrs = kmalloc(addrs_size, GFP_KERNEL); 983 if (unlikely(!kaddrs)) 984 return -ENOMEM; 985 986 if (__copy_from_user(kaddrs, addrs, addrs_size)) { 987 kfree(kaddrs); 988 return -EFAULT; 989 } 990 991 /* Walk through the addrs buffer and count the number of addresses. */ 992 addr_buf = kaddrs; 993 while (walk_size < addrs_size) { 994 if (walk_size + sizeof(sa_family_t) > addrs_size) { 995 kfree(kaddrs); 996 return -EINVAL; 997 } 998 999 sa_addr = addr_buf; 1000 af = sctp_get_af_specific(sa_addr->sa_family); 1001 1002 /* If the address family is not supported or if this address 1003 * causes the address buffer to overflow return EINVAL. 1004 */ 1005 if (!af || (walk_size + af->sockaddr_len) > addrs_size) { 1006 kfree(kaddrs); 1007 return -EINVAL; 1008 } 1009 addrcnt++; 1010 addr_buf += af->sockaddr_len; 1011 walk_size += af->sockaddr_len; 1012 } 1013 1014 /* Do the work. */ 1015 switch (op) { 1016 case SCTP_BINDX_ADD_ADDR: 1017 err = sctp_bindx_add(sk, kaddrs, addrcnt); 1018 if (err) 1019 goto out; 1020 err = sctp_send_asconf_add_ip(sk, kaddrs, addrcnt); 1021 break; 1022 1023 case SCTP_BINDX_REM_ADDR: 1024 err = sctp_bindx_rem(sk, kaddrs, addrcnt); 1025 if (err) 1026 goto out; 1027 err = sctp_send_asconf_del_ip(sk, kaddrs, addrcnt); 1028 break; 1029 1030 default: 1031 err = -EINVAL; 1032 break; 1033 } 1034 1035 out: 1036 kfree(kaddrs); 1037 1038 return err; 1039 } 1040 1041 /* __sctp_connect(struct sock* sk, struct sockaddr *kaddrs, int addrs_size) 1042 * 1043 * Common routine for handling connect() and sctp_connectx(). 1044 * Connect will come in with just a single address. 1045 */ 1046 static int __sctp_connect(struct sock* sk, 1047 struct sockaddr *kaddrs, 1048 int addrs_size, 1049 sctp_assoc_t *assoc_id) 1050 { 1051 struct net *net = sock_net(sk); 1052 struct sctp_sock *sp; 1053 struct sctp_endpoint *ep; 1054 struct sctp_association *asoc = NULL; 1055 struct sctp_association *asoc2; 1056 struct sctp_transport *transport; 1057 union sctp_addr to; 1058 struct sctp_af *af; 1059 sctp_scope_t scope; 1060 long timeo; 1061 int err = 0; 1062 int addrcnt = 0; 1063 int walk_size = 0; 1064 union sctp_addr *sa_addr = NULL; 1065 void *addr_buf; 1066 unsigned short port; 1067 unsigned int f_flags = 0; 1068 1069 sp = sctp_sk(sk); 1070 ep = sp->ep; 1071 1072 /* connect() cannot be done on a socket that is already in ESTABLISHED 1073 * state - UDP-style peeled off socket or a TCP-style socket that 1074 * is already connected. 1075 * It cannot be done even on a TCP-style listening socket. 1076 */ 1077 if (sctp_sstate(sk, ESTABLISHED) || 1078 (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))) { 1079 err = -EISCONN; 1080 goto out_free; 1081 } 1082 1083 /* Walk through the addrs buffer and count the number of addresses. */ 1084 addr_buf = kaddrs; 1085 while (walk_size < addrs_size) { 1086 if (walk_size + sizeof(sa_family_t) > addrs_size) { 1087 err = -EINVAL; 1088 goto out_free; 1089 } 1090 1091 sa_addr = addr_buf; 1092 af = sctp_get_af_specific(sa_addr->sa.sa_family); 1093 1094 /* If the address family is not supported or if this address 1095 * causes the address buffer to overflow return EINVAL. 1096 */ 1097 if (!af || (walk_size + af->sockaddr_len) > addrs_size) { 1098 err = -EINVAL; 1099 goto out_free; 1100 } 1101 1102 port = ntohs(sa_addr->v4.sin_port); 1103 1104 /* Save current address so we can work with it */ 1105 memcpy(&to, sa_addr, af->sockaddr_len); 1106 1107 err = sctp_verify_addr(sk, &to, af->sockaddr_len); 1108 if (err) 1109 goto out_free; 1110 1111 /* Make sure the destination port is correctly set 1112 * in all addresses. 1113 */ 1114 if (asoc && asoc->peer.port && asoc->peer.port != port) { 1115 err = -EINVAL; 1116 goto out_free; 1117 } 1118 1119 /* Check if there already is a matching association on the 1120 * endpoint (other than the one created here). 1121 */ 1122 asoc2 = sctp_endpoint_lookup_assoc(ep, &to, &transport); 1123 if (asoc2 && asoc2 != asoc) { 1124 if (asoc2->state >= SCTP_STATE_ESTABLISHED) 1125 err = -EISCONN; 1126 else 1127 err = -EALREADY; 1128 goto out_free; 1129 } 1130 1131 /* If we could not find a matching association on the endpoint, 1132 * make sure that there is no peeled-off association matching 1133 * the peer address even on another socket. 1134 */ 1135 if (sctp_endpoint_is_peeled_off(ep, &to)) { 1136 err = -EADDRNOTAVAIL; 1137 goto out_free; 1138 } 1139 1140 if (!asoc) { 1141 /* If a bind() or sctp_bindx() is not called prior to 1142 * an sctp_connectx() call, the system picks an 1143 * ephemeral port and will choose an address set 1144 * equivalent to binding with a wildcard address. 1145 */ 1146 if (!ep->base.bind_addr.port) { 1147 if (sctp_autobind(sk)) { 1148 err = -EAGAIN; 1149 goto out_free; 1150 } 1151 } else { 1152 /* 1153 * If an unprivileged user inherits a 1-many 1154 * style socket with open associations on a 1155 * privileged port, it MAY be permitted to 1156 * accept new associations, but it SHOULD NOT 1157 * be permitted to open new associations. 1158 */ 1159 if (ep->base.bind_addr.port < PROT_SOCK && 1160 !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) { 1161 err = -EACCES; 1162 goto out_free; 1163 } 1164 } 1165 1166 scope = sctp_scope(&to); 1167 asoc = sctp_association_new(ep, sk, scope, GFP_KERNEL); 1168 if (!asoc) { 1169 err = -ENOMEM; 1170 goto out_free; 1171 } 1172 1173 err = sctp_assoc_set_bind_addr_from_ep(asoc, scope, 1174 GFP_KERNEL); 1175 if (err < 0) { 1176 goto out_free; 1177 } 1178 1179 } 1180 1181 /* Prime the peer's transport structures. */ 1182 transport = sctp_assoc_add_peer(asoc, &to, GFP_KERNEL, 1183 SCTP_UNKNOWN); 1184 if (!transport) { 1185 err = -ENOMEM; 1186 goto out_free; 1187 } 1188 1189 addrcnt++; 1190 addr_buf += af->sockaddr_len; 1191 walk_size += af->sockaddr_len; 1192 } 1193 1194 /* In case the user of sctp_connectx() wants an association 1195 * id back, assign one now. 1196 */ 1197 if (assoc_id) { 1198 err = sctp_assoc_set_id(asoc, GFP_KERNEL); 1199 if (err < 0) 1200 goto out_free; 1201 } 1202 1203 err = sctp_primitive_ASSOCIATE(net, asoc, NULL); 1204 if (err < 0) { 1205 goto out_free; 1206 } 1207 1208 /* Initialize sk's dport and daddr for getpeername() */ 1209 inet_sk(sk)->inet_dport = htons(asoc->peer.port); 1210 af = sctp_get_af_specific(sa_addr->sa.sa_family); 1211 af->to_sk_daddr(sa_addr, sk); 1212 sk->sk_err = 0; 1213 1214 /* in-kernel sockets don't generally have a file allocated to them 1215 * if all they do is call sock_create_kern(). 1216 */ 1217 if (sk->sk_socket->file) 1218 f_flags = sk->sk_socket->file->f_flags; 1219 1220 timeo = sock_sndtimeo(sk, f_flags & O_NONBLOCK); 1221 1222 err = sctp_wait_for_connect(asoc, &timeo); 1223 if ((err == 0 || err == -EINPROGRESS) && assoc_id) 1224 *assoc_id = asoc->assoc_id; 1225 1226 /* Don't free association on exit. */ 1227 asoc = NULL; 1228 1229 out_free: 1230 pr_debug("%s: took out_free path with asoc:%p kaddrs:%p err:%d\n", 1231 __func__, asoc, kaddrs, err); 1232 1233 if (asoc) { 1234 /* sctp_primitive_ASSOCIATE may have added this association 1235 * To the hash table, try to unhash it, just in case, its a noop 1236 * if it wasn't hashed so we're safe 1237 */ 1238 sctp_unhash_established(asoc); 1239 sctp_association_free(asoc); 1240 } 1241 return err; 1242 } 1243 1244 /* Helper for tunneling sctp_connectx() requests through sctp_setsockopt() 1245 * 1246 * API 8.9 1247 * int sctp_connectx(int sd, struct sockaddr *addrs, int addrcnt, 1248 * sctp_assoc_t *asoc); 1249 * 1250 * If sd is an IPv4 socket, the addresses passed must be IPv4 addresses. 1251 * If the sd is an IPv6 socket, the addresses passed can either be IPv4 1252 * or IPv6 addresses. 1253 * 1254 * A single address may be specified as INADDR_ANY or IN6ADDR_ANY, see 1255 * Section 3.1.2 for this usage. 1256 * 1257 * addrs is a pointer to an array of one or more socket addresses. Each 1258 * address is contained in its appropriate structure (i.e. struct 1259 * sockaddr_in or struct sockaddr_in6) the family of the address type 1260 * must be used to distengish the address length (note that this 1261 * representation is termed a "packed array" of addresses). The caller 1262 * specifies the number of addresses in the array with addrcnt. 1263 * 1264 * On success, sctp_connectx() returns 0. It also sets the assoc_id to 1265 * the association id of the new association. On failure, sctp_connectx() 1266 * returns -1, and sets errno to the appropriate error code. The assoc_id 1267 * is not touched by the kernel. 1268 * 1269 * For SCTP, the port given in each socket address must be the same, or 1270 * sctp_connectx() will fail, setting errno to EINVAL. 1271 * 1272 * An application can use sctp_connectx to initiate an association with 1273 * an endpoint that is multi-homed. Much like sctp_bindx() this call 1274 * allows a caller to specify multiple addresses at which a peer can be 1275 * reached. The way the SCTP stack uses the list of addresses to set up 1276 * the association is implementation dependent. This function only 1277 * specifies that the stack will try to make use of all the addresses in 1278 * the list when needed. 1279 * 1280 * Note that the list of addresses passed in is only used for setting up 1281 * the association. It does not necessarily equal the set of addresses 1282 * the peer uses for the resulting association. If the caller wants to 1283 * find out the set of peer addresses, it must use sctp_getpaddrs() to 1284 * retrieve them after the association has been set up. 1285 * 1286 * Basically do nothing but copying the addresses from user to kernel 1287 * land and invoking either sctp_connectx(). This is used for tunneling 1288 * the sctp_connectx() request through sctp_setsockopt() from userspace. 1289 * 1290 * We don't use copy_from_user() for optimization: we first do the 1291 * sanity checks (buffer size -fast- and access check-healthy 1292 * pointer); if all of those succeed, then we can alloc the memory 1293 * (expensive operation) needed to copy the data to kernel. Then we do 1294 * the copying without checking the user space area 1295 * (__copy_from_user()). 1296 * 1297 * On exit there is no need to do sockfd_put(), sys_setsockopt() does 1298 * it. 1299 * 1300 * sk The sk of the socket 1301 * addrs The pointer to the addresses in user land 1302 * addrssize Size of the addrs buffer 1303 * 1304 * Returns >=0 if ok, <0 errno code on error. 1305 */ 1306 static int __sctp_setsockopt_connectx(struct sock* sk, 1307 struct sockaddr __user *addrs, 1308 int addrs_size, 1309 sctp_assoc_t *assoc_id) 1310 { 1311 int err = 0; 1312 struct sockaddr *kaddrs; 1313 1314 pr_debug("%s: sk:%p addrs:%p addrs_size:%d\n", 1315 __func__, sk, addrs, addrs_size); 1316 1317 if (unlikely(addrs_size <= 0)) 1318 return -EINVAL; 1319 1320 /* Check the user passed a healthy pointer. */ 1321 if (unlikely(!access_ok(VERIFY_READ, addrs, addrs_size))) 1322 return -EFAULT; 1323 1324 /* Alloc space for the address array in kernel memory. */ 1325 kaddrs = kmalloc(addrs_size, GFP_KERNEL); 1326 if (unlikely(!kaddrs)) 1327 return -ENOMEM; 1328 1329 if (__copy_from_user(kaddrs, addrs, addrs_size)) { 1330 err = -EFAULT; 1331 } else { 1332 err = __sctp_connect(sk, kaddrs, addrs_size, assoc_id); 1333 } 1334 1335 kfree(kaddrs); 1336 1337 return err; 1338 } 1339 1340 /* 1341 * This is an older interface. It's kept for backward compatibility 1342 * to the option that doesn't provide association id. 1343 */ 1344 static int sctp_setsockopt_connectx_old(struct sock* sk, 1345 struct sockaddr __user *addrs, 1346 int addrs_size) 1347 { 1348 return __sctp_setsockopt_connectx(sk, addrs, addrs_size, NULL); 1349 } 1350 1351 /* 1352 * New interface for the API. The since the API is done with a socket 1353 * option, to make it simple we feed back the association id is as a return 1354 * indication to the call. Error is always negative and association id is 1355 * always positive. 1356 */ 1357 static int sctp_setsockopt_connectx(struct sock* sk, 1358 struct sockaddr __user *addrs, 1359 int addrs_size) 1360 { 1361 sctp_assoc_t assoc_id = 0; 1362 int err = 0; 1363 1364 err = __sctp_setsockopt_connectx(sk, addrs, addrs_size, &assoc_id); 1365 1366 if (err) 1367 return err; 1368 else 1369 return assoc_id; 1370 } 1371 1372 /* 1373 * New (hopefully final) interface for the API. 1374 * We use the sctp_getaddrs_old structure so that use-space library 1375 * can avoid any unnecessary allocations. The only defferent part 1376 * is that we store the actual length of the address buffer into the 1377 * addrs_num structure member. That way we can re-use the existing 1378 * code. 1379 */ 1380 static int sctp_getsockopt_connectx3(struct sock* sk, int len, 1381 char __user *optval, 1382 int __user *optlen) 1383 { 1384 struct sctp_getaddrs_old param; 1385 sctp_assoc_t assoc_id = 0; 1386 int err = 0; 1387 1388 if (len < sizeof(param)) 1389 return -EINVAL; 1390 1391 if (copy_from_user(¶m, optval, sizeof(param))) 1392 return -EFAULT; 1393 1394 err = __sctp_setsockopt_connectx(sk, 1395 (struct sockaddr __user *)param.addrs, 1396 param.addr_num, &assoc_id); 1397 1398 if (err == 0 || err == -EINPROGRESS) { 1399 if (copy_to_user(optval, &assoc_id, sizeof(assoc_id))) 1400 return -EFAULT; 1401 if (put_user(sizeof(assoc_id), optlen)) 1402 return -EFAULT; 1403 } 1404 1405 return err; 1406 } 1407 1408 /* API 3.1.4 close() - UDP Style Syntax 1409 * Applications use close() to perform graceful shutdown (as described in 1410 * Section 10.1 of [SCTP]) on ALL the associations currently represented 1411 * by a UDP-style socket. 1412 * 1413 * The syntax is 1414 * 1415 * ret = close(int sd); 1416 * 1417 * sd - the socket descriptor of the associations to be closed. 1418 * 1419 * To gracefully shutdown a specific association represented by the 1420 * UDP-style socket, an application should use the sendmsg() call, 1421 * passing no user data, but including the appropriate flag in the 1422 * ancillary data (see Section xxxx). 1423 * 1424 * If sd in the close() call is a branched-off socket representing only 1425 * one association, the shutdown is performed on that association only. 1426 * 1427 * 4.1.6 close() - TCP Style Syntax 1428 * 1429 * Applications use close() to gracefully close down an association. 1430 * 1431 * The syntax is: 1432 * 1433 * int close(int sd); 1434 * 1435 * sd - the socket descriptor of the association to be closed. 1436 * 1437 * After an application calls close() on a socket descriptor, no further 1438 * socket operations will succeed on that descriptor. 1439 * 1440 * API 7.1.4 SO_LINGER 1441 * 1442 * An application using the TCP-style socket can use this option to 1443 * perform the SCTP ABORT primitive. The linger option structure is: 1444 * 1445 * struct linger { 1446 * int l_onoff; // option on/off 1447 * int l_linger; // linger time 1448 * }; 1449 * 1450 * To enable the option, set l_onoff to 1. If the l_linger value is set 1451 * to 0, calling close() is the same as the ABORT primitive. If the 1452 * value is set to a negative value, the setsockopt() call will return 1453 * an error. If the value is set to a positive value linger_time, the 1454 * close() can be blocked for at most linger_time ms. If the graceful 1455 * shutdown phase does not finish during this period, close() will 1456 * return but the graceful shutdown phase continues in the system. 1457 */ 1458 static void sctp_close(struct sock *sk, long timeout) 1459 { 1460 struct net *net = sock_net(sk); 1461 struct sctp_endpoint *ep; 1462 struct sctp_association *asoc; 1463 struct list_head *pos, *temp; 1464 unsigned int data_was_unread; 1465 1466 pr_debug("%s: sk:%p, timeout:%ld\n", __func__, sk, timeout); 1467 1468 sctp_lock_sock(sk); 1469 sk->sk_shutdown = SHUTDOWN_MASK; 1470 sk->sk_state = SCTP_SS_CLOSING; 1471 1472 ep = sctp_sk(sk)->ep; 1473 1474 /* Clean up any skbs sitting on the receive queue. */ 1475 data_was_unread = sctp_queue_purge_ulpevents(&sk->sk_receive_queue); 1476 data_was_unread += sctp_queue_purge_ulpevents(&sctp_sk(sk)->pd_lobby); 1477 1478 /* Walk all associations on an endpoint. */ 1479 list_for_each_safe(pos, temp, &ep->asocs) { 1480 asoc = list_entry(pos, struct sctp_association, asocs); 1481 1482 if (sctp_style(sk, TCP)) { 1483 /* A closed association can still be in the list if 1484 * it belongs to a TCP-style listening socket that is 1485 * not yet accepted. If so, free it. If not, send an 1486 * ABORT or SHUTDOWN based on the linger options. 1487 */ 1488 if (sctp_state(asoc, CLOSED)) { 1489 sctp_unhash_established(asoc); 1490 sctp_association_free(asoc); 1491 continue; 1492 } 1493 } 1494 1495 if (data_was_unread || !skb_queue_empty(&asoc->ulpq.lobby) || 1496 !skb_queue_empty(&asoc->ulpq.reasm) || 1497 (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime)) { 1498 struct sctp_chunk *chunk; 1499 1500 chunk = sctp_make_abort_user(asoc, NULL, 0); 1501 if (chunk) 1502 sctp_primitive_ABORT(net, asoc, chunk); 1503 } else 1504 sctp_primitive_SHUTDOWN(net, asoc, NULL); 1505 } 1506 1507 /* On a TCP-style socket, block for at most linger_time if set. */ 1508 if (sctp_style(sk, TCP) && timeout) 1509 sctp_wait_for_close(sk, timeout); 1510 1511 /* This will run the backlog queue. */ 1512 sctp_release_sock(sk); 1513 1514 /* Supposedly, no process has access to the socket, but 1515 * the net layers still may. 1516 */ 1517 sctp_local_bh_disable(); 1518 sctp_bh_lock_sock(sk); 1519 1520 /* Hold the sock, since sk_common_release() will put sock_put() 1521 * and we have just a little more cleanup. 1522 */ 1523 sock_hold(sk); 1524 sk_common_release(sk); 1525 1526 sctp_bh_unlock_sock(sk); 1527 sctp_local_bh_enable(); 1528 1529 sock_put(sk); 1530 1531 SCTP_DBG_OBJCNT_DEC(sock); 1532 } 1533 1534 /* Handle EPIPE error. */ 1535 static int sctp_error(struct sock *sk, int flags, int err) 1536 { 1537 if (err == -EPIPE) 1538 err = sock_error(sk) ? : -EPIPE; 1539 if (err == -EPIPE && !(flags & MSG_NOSIGNAL)) 1540 send_sig(SIGPIPE, current, 0); 1541 return err; 1542 } 1543 1544 /* API 3.1.3 sendmsg() - UDP Style Syntax 1545 * 1546 * An application uses sendmsg() and recvmsg() calls to transmit data to 1547 * and receive data from its peer. 1548 * 1549 * ssize_t sendmsg(int socket, const struct msghdr *message, 1550 * int flags); 1551 * 1552 * socket - the socket descriptor of the endpoint. 1553 * message - pointer to the msghdr structure which contains a single 1554 * user message and possibly some ancillary data. 1555 * 1556 * See Section 5 for complete description of the data 1557 * structures. 1558 * 1559 * flags - flags sent or received with the user message, see Section 1560 * 5 for complete description of the flags. 1561 * 1562 * Note: This function could use a rewrite especially when explicit 1563 * connect support comes in. 1564 */ 1565 /* BUG: We do not implement the equivalent of sk_stream_wait_memory(). */ 1566 1567 static int sctp_msghdr_parse(const struct msghdr *, sctp_cmsgs_t *); 1568 1569 static int sctp_sendmsg(struct kiocb *iocb, struct sock *sk, 1570 struct msghdr *msg, size_t msg_len) 1571 { 1572 struct net *net = sock_net(sk); 1573 struct sctp_sock *sp; 1574 struct sctp_endpoint *ep; 1575 struct sctp_association *new_asoc=NULL, *asoc=NULL; 1576 struct sctp_transport *transport, *chunk_tp; 1577 struct sctp_chunk *chunk; 1578 union sctp_addr to; 1579 struct sockaddr *msg_name = NULL; 1580 struct sctp_sndrcvinfo default_sinfo; 1581 struct sctp_sndrcvinfo *sinfo; 1582 struct sctp_initmsg *sinit; 1583 sctp_assoc_t associd = 0; 1584 sctp_cmsgs_t cmsgs = { NULL }; 1585 int err; 1586 sctp_scope_t scope; 1587 long timeo; 1588 __u16 sinfo_flags = 0; 1589 struct sctp_datamsg *datamsg; 1590 int msg_flags = msg->msg_flags; 1591 1592 err = 0; 1593 sp = sctp_sk(sk); 1594 ep = sp->ep; 1595 1596 pr_debug("%s: sk:%p, msg:%p, msg_len:%zu ep:%p\n", __func__, sk, 1597 msg, msg_len, ep); 1598 1599 /* We cannot send a message over a TCP-style listening socket. */ 1600 if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)) { 1601 err = -EPIPE; 1602 goto out_nounlock; 1603 } 1604 1605 /* Parse out the SCTP CMSGs. */ 1606 err = sctp_msghdr_parse(msg, &cmsgs); 1607 if (err) { 1608 pr_debug("%s: msghdr parse err:%x\n", __func__, err); 1609 goto out_nounlock; 1610 } 1611 1612 /* Fetch the destination address for this packet. This 1613 * address only selects the association--it is not necessarily 1614 * the address we will send to. 1615 * For a peeled-off socket, msg_name is ignored. 1616 */ 1617 if (!sctp_style(sk, UDP_HIGH_BANDWIDTH) && msg->msg_name) { 1618 int msg_namelen = msg->msg_namelen; 1619 1620 err = sctp_verify_addr(sk, (union sctp_addr *)msg->msg_name, 1621 msg_namelen); 1622 if (err) 1623 return err; 1624 1625 if (msg_namelen > sizeof(to)) 1626 msg_namelen = sizeof(to); 1627 memcpy(&to, msg->msg_name, msg_namelen); 1628 msg_name = msg->msg_name; 1629 } 1630 1631 sinfo = cmsgs.info; 1632 sinit = cmsgs.init; 1633 1634 /* Did the user specify SNDRCVINFO? */ 1635 if (sinfo) { 1636 sinfo_flags = sinfo->sinfo_flags; 1637 associd = sinfo->sinfo_assoc_id; 1638 } 1639 1640 pr_debug("%s: msg_len:%zu, sinfo_flags:0x%x\n", __func__, 1641 msg_len, sinfo_flags); 1642 1643 /* SCTP_EOF or SCTP_ABORT cannot be set on a TCP-style socket. */ 1644 if (sctp_style(sk, TCP) && (sinfo_flags & (SCTP_EOF | SCTP_ABORT))) { 1645 err = -EINVAL; 1646 goto out_nounlock; 1647 } 1648 1649 /* If SCTP_EOF is set, no data can be sent. Disallow sending zero 1650 * length messages when SCTP_EOF|SCTP_ABORT is not set. 1651 * If SCTP_ABORT is set, the message length could be non zero with 1652 * the msg_iov set to the user abort reason. 1653 */ 1654 if (((sinfo_flags & SCTP_EOF) && (msg_len > 0)) || 1655 (!(sinfo_flags & (SCTP_EOF|SCTP_ABORT)) && (msg_len == 0))) { 1656 err = -EINVAL; 1657 goto out_nounlock; 1658 } 1659 1660 /* If SCTP_ADDR_OVER is set, there must be an address 1661 * specified in msg_name. 1662 */ 1663 if ((sinfo_flags & SCTP_ADDR_OVER) && (!msg->msg_name)) { 1664 err = -EINVAL; 1665 goto out_nounlock; 1666 } 1667 1668 transport = NULL; 1669 1670 pr_debug("%s: about to look up association\n", __func__); 1671 1672 sctp_lock_sock(sk); 1673 1674 /* If a msg_name has been specified, assume this is to be used. */ 1675 if (msg_name) { 1676 /* Look for a matching association on the endpoint. */ 1677 asoc = sctp_endpoint_lookup_assoc(ep, &to, &transport); 1678 if (!asoc) { 1679 /* If we could not find a matching association on the 1680 * endpoint, make sure that it is not a TCP-style 1681 * socket that already has an association or there is 1682 * no peeled-off association on another socket. 1683 */ 1684 if ((sctp_style(sk, TCP) && 1685 sctp_sstate(sk, ESTABLISHED)) || 1686 sctp_endpoint_is_peeled_off(ep, &to)) { 1687 err = -EADDRNOTAVAIL; 1688 goto out_unlock; 1689 } 1690 } 1691 } else { 1692 asoc = sctp_id2assoc(sk, associd); 1693 if (!asoc) { 1694 err = -EPIPE; 1695 goto out_unlock; 1696 } 1697 } 1698 1699 if (asoc) { 1700 pr_debug("%s: just looked up association:%p\n", __func__, asoc); 1701 1702 /* We cannot send a message on a TCP-style SCTP_SS_ESTABLISHED 1703 * socket that has an association in CLOSED state. This can 1704 * happen when an accepted socket has an association that is 1705 * already CLOSED. 1706 */ 1707 if (sctp_state(asoc, CLOSED) && sctp_style(sk, TCP)) { 1708 err = -EPIPE; 1709 goto out_unlock; 1710 } 1711 1712 if (sinfo_flags & SCTP_EOF) { 1713 pr_debug("%s: shutting down association:%p\n", 1714 __func__, asoc); 1715 1716 sctp_primitive_SHUTDOWN(net, asoc, NULL); 1717 err = 0; 1718 goto out_unlock; 1719 } 1720 if (sinfo_flags & SCTP_ABORT) { 1721 1722 chunk = sctp_make_abort_user(asoc, msg, msg_len); 1723 if (!chunk) { 1724 err = -ENOMEM; 1725 goto out_unlock; 1726 } 1727 1728 pr_debug("%s: aborting association:%p\n", 1729 __func__, asoc); 1730 1731 sctp_primitive_ABORT(net, asoc, chunk); 1732 err = 0; 1733 goto out_unlock; 1734 } 1735 } 1736 1737 /* Do we need to create the association? */ 1738 if (!asoc) { 1739 pr_debug("%s: there is no association yet\n", __func__); 1740 1741 if (sinfo_flags & (SCTP_EOF | SCTP_ABORT)) { 1742 err = -EINVAL; 1743 goto out_unlock; 1744 } 1745 1746 /* Check for invalid stream against the stream counts, 1747 * either the default or the user specified stream counts. 1748 */ 1749 if (sinfo) { 1750 if (!sinit || (sinit && !sinit->sinit_num_ostreams)) { 1751 /* Check against the defaults. */ 1752 if (sinfo->sinfo_stream >= 1753 sp->initmsg.sinit_num_ostreams) { 1754 err = -EINVAL; 1755 goto out_unlock; 1756 } 1757 } else { 1758 /* Check against the requested. */ 1759 if (sinfo->sinfo_stream >= 1760 sinit->sinit_num_ostreams) { 1761 err = -EINVAL; 1762 goto out_unlock; 1763 } 1764 } 1765 } 1766 1767 /* 1768 * API 3.1.2 bind() - UDP Style Syntax 1769 * If a bind() or sctp_bindx() is not called prior to a 1770 * sendmsg() call that initiates a new association, the 1771 * system picks an ephemeral port and will choose an address 1772 * set equivalent to binding with a wildcard address. 1773 */ 1774 if (!ep->base.bind_addr.port) { 1775 if (sctp_autobind(sk)) { 1776 err = -EAGAIN; 1777 goto out_unlock; 1778 } 1779 } else { 1780 /* 1781 * If an unprivileged user inherits a one-to-many 1782 * style socket with open associations on a privileged 1783 * port, it MAY be permitted to accept new associations, 1784 * but it SHOULD NOT be permitted to open new 1785 * associations. 1786 */ 1787 if (ep->base.bind_addr.port < PROT_SOCK && 1788 !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) { 1789 err = -EACCES; 1790 goto out_unlock; 1791 } 1792 } 1793 1794 scope = sctp_scope(&to); 1795 new_asoc = sctp_association_new(ep, sk, scope, GFP_KERNEL); 1796 if (!new_asoc) { 1797 err = -ENOMEM; 1798 goto out_unlock; 1799 } 1800 asoc = new_asoc; 1801 err = sctp_assoc_set_bind_addr_from_ep(asoc, scope, GFP_KERNEL); 1802 if (err < 0) { 1803 err = -ENOMEM; 1804 goto out_free; 1805 } 1806 1807 /* If the SCTP_INIT ancillary data is specified, set all 1808 * the association init values accordingly. 1809 */ 1810 if (sinit) { 1811 if (sinit->sinit_num_ostreams) { 1812 asoc->c.sinit_num_ostreams = 1813 sinit->sinit_num_ostreams; 1814 } 1815 if (sinit->sinit_max_instreams) { 1816 asoc->c.sinit_max_instreams = 1817 sinit->sinit_max_instreams; 1818 } 1819 if (sinit->sinit_max_attempts) { 1820 asoc->max_init_attempts 1821 = sinit->sinit_max_attempts; 1822 } 1823 if (sinit->sinit_max_init_timeo) { 1824 asoc->max_init_timeo = 1825 msecs_to_jiffies(sinit->sinit_max_init_timeo); 1826 } 1827 } 1828 1829 /* Prime the peer's transport structures. */ 1830 transport = sctp_assoc_add_peer(asoc, &to, GFP_KERNEL, SCTP_UNKNOWN); 1831 if (!transport) { 1832 err = -ENOMEM; 1833 goto out_free; 1834 } 1835 } 1836 1837 /* ASSERT: we have a valid association at this point. */ 1838 pr_debug("%s: we have a valid association\n", __func__); 1839 1840 if (!sinfo) { 1841 /* If the user didn't specify SNDRCVINFO, make up one with 1842 * some defaults. 1843 */ 1844 memset(&default_sinfo, 0, sizeof(default_sinfo)); 1845 default_sinfo.sinfo_stream = asoc->default_stream; 1846 default_sinfo.sinfo_flags = asoc->default_flags; 1847 default_sinfo.sinfo_ppid = asoc->default_ppid; 1848 default_sinfo.sinfo_context = asoc->default_context; 1849 default_sinfo.sinfo_timetolive = asoc->default_timetolive; 1850 default_sinfo.sinfo_assoc_id = sctp_assoc2id(asoc); 1851 sinfo = &default_sinfo; 1852 } 1853 1854 /* API 7.1.7, the sndbuf size per association bounds the 1855 * maximum size of data that can be sent in a single send call. 1856 */ 1857 if (msg_len > sk->sk_sndbuf) { 1858 err = -EMSGSIZE; 1859 goto out_free; 1860 } 1861 1862 if (asoc->pmtu_pending) 1863 sctp_assoc_pending_pmtu(sk, asoc); 1864 1865 /* If fragmentation is disabled and the message length exceeds the 1866 * association fragmentation point, return EMSGSIZE. The I-D 1867 * does not specify what this error is, but this looks like 1868 * a great fit. 1869 */ 1870 if (sctp_sk(sk)->disable_fragments && (msg_len > asoc->frag_point)) { 1871 err = -EMSGSIZE; 1872 goto out_free; 1873 } 1874 1875 /* Check for invalid stream. */ 1876 if (sinfo->sinfo_stream >= asoc->c.sinit_num_ostreams) { 1877 err = -EINVAL; 1878 goto out_free; 1879 } 1880 1881 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); 1882 if (!sctp_wspace(asoc)) { 1883 err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len); 1884 if (err) 1885 goto out_free; 1886 } 1887 1888 /* If an address is passed with the sendto/sendmsg call, it is used 1889 * to override the primary destination address in the TCP model, or 1890 * when SCTP_ADDR_OVER flag is set in the UDP model. 1891 */ 1892 if ((sctp_style(sk, TCP) && msg_name) || 1893 (sinfo_flags & SCTP_ADDR_OVER)) { 1894 chunk_tp = sctp_assoc_lookup_paddr(asoc, &to); 1895 if (!chunk_tp) { 1896 err = -EINVAL; 1897 goto out_free; 1898 } 1899 } else 1900 chunk_tp = NULL; 1901 1902 /* Auto-connect, if we aren't connected already. */ 1903 if (sctp_state(asoc, CLOSED)) { 1904 err = sctp_primitive_ASSOCIATE(net, asoc, NULL); 1905 if (err < 0) 1906 goto out_free; 1907 1908 pr_debug("%s: we associated primitively\n", __func__); 1909 } 1910 1911 /* Break the message into multiple chunks of maximum size. */ 1912 datamsg = sctp_datamsg_from_user(asoc, sinfo, msg, msg_len); 1913 if (IS_ERR(datamsg)) { 1914 err = PTR_ERR(datamsg); 1915 goto out_free; 1916 } 1917 1918 /* Now send the (possibly) fragmented message. */ 1919 list_for_each_entry(chunk, &datamsg->chunks, frag_list) { 1920 sctp_chunk_hold(chunk); 1921 1922 /* Do accounting for the write space. */ 1923 sctp_set_owner_w(chunk); 1924 1925 chunk->transport = chunk_tp; 1926 } 1927 1928 /* Send it to the lower layers. Note: all chunks 1929 * must either fail or succeed. The lower layer 1930 * works that way today. Keep it that way or this 1931 * breaks. 1932 */ 1933 err = sctp_primitive_SEND(net, asoc, datamsg); 1934 /* Did the lower layer accept the chunk? */ 1935 if (err) { 1936 sctp_datamsg_free(datamsg); 1937 goto out_free; 1938 } 1939 1940 pr_debug("%s: we sent primitively\n", __func__); 1941 1942 sctp_datamsg_put(datamsg); 1943 err = msg_len; 1944 1945 /* If we are already past ASSOCIATE, the lower 1946 * layers are responsible for association cleanup. 1947 */ 1948 goto out_unlock; 1949 1950 out_free: 1951 if (new_asoc) { 1952 sctp_unhash_established(asoc); 1953 sctp_association_free(asoc); 1954 } 1955 out_unlock: 1956 sctp_release_sock(sk); 1957 1958 out_nounlock: 1959 return sctp_error(sk, msg_flags, err); 1960 1961 #if 0 1962 do_sock_err: 1963 if (msg_len) 1964 err = msg_len; 1965 else 1966 err = sock_error(sk); 1967 goto out; 1968 1969 do_interrupted: 1970 if (msg_len) 1971 err = msg_len; 1972 goto out; 1973 #endif /* 0 */ 1974 } 1975 1976 /* This is an extended version of skb_pull() that removes the data from the 1977 * start of a skb even when data is spread across the list of skb's in the 1978 * frag_list. len specifies the total amount of data that needs to be removed. 1979 * when 'len' bytes could be removed from the skb, it returns 0. 1980 * If 'len' exceeds the total skb length, it returns the no. of bytes that 1981 * could not be removed. 1982 */ 1983 static int sctp_skb_pull(struct sk_buff *skb, int len) 1984 { 1985 struct sk_buff *list; 1986 int skb_len = skb_headlen(skb); 1987 int rlen; 1988 1989 if (len <= skb_len) { 1990 __skb_pull(skb, len); 1991 return 0; 1992 } 1993 len -= skb_len; 1994 __skb_pull(skb, skb_len); 1995 1996 skb_walk_frags(skb, list) { 1997 rlen = sctp_skb_pull(list, len); 1998 skb->len -= (len-rlen); 1999 skb->data_len -= (len-rlen); 2000 2001 if (!rlen) 2002 return 0; 2003 2004 len = rlen; 2005 } 2006 2007 return len; 2008 } 2009 2010 /* API 3.1.3 recvmsg() - UDP Style Syntax 2011 * 2012 * ssize_t recvmsg(int socket, struct msghdr *message, 2013 * int flags); 2014 * 2015 * socket - the socket descriptor of the endpoint. 2016 * message - pointer to the msghdr structure which contains a single 2017 * user message and possibly some ancillary data. 2018 * 2019 * See Section 5 for complete description of the data 2020 * structures. 2021 * 2022 * flags - flags sent or received with the user message, see Section 2023 * 5 for complete description of the flags. 2024 */ 2025 static struct sk_buff *sctp_skb_recv_datagram(struct sock *, int, int, int *); 2026 2027 static int sctp_recvmsg(struct kiocb *iocb, struct sock *sk, 2028 struct msghdr *msg, size_t len, int noblock, 2029 int flags, int *addr_len) 2030 { 2031 struct sctp_ulpevent *event = NULL; 2032 struct sctp_sock *sp = sctp_sk(sk); 2033 struct sk_buff *skb; 2034 int copied; 2035 int err = 0; 2036 int skb_len; 2037 2038 pr_debug("%s: sk:%p, msghdr:%p, len:%zd, noblock:%d, flags:0x%x, " 2039 "addr_len:%p)\n", __func__, sk, msg, len, noblock, flags, 2040 addr_len); 2041 2042 sctp_lock_sock(sk); 2043 2044 if (sctp_style(sk, TCP) && !sctp_sstate(sk, ESTABLISHED)) { 2045 err = -ENOTCONN; 2046 goto out; 2047 } 2048 2049 skb = sctp_skb_recv_datagram(sk, flags, noblock, &err); 2050 if (!skb) 2051 goto out; 2052 2053 /* Get the total length of the skb including any skb's in the 2054 * frag_list. 2055 */ 2056 skb_len = skb->len; 2057 2058 copied = skb_len; 2059 if (copied > len) 2060 copied = len; 2061 2062 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); 2063 2064 event = sctp_skb2event(skb); 2065 2066 if (err) 2067 goto out_free; 2068 2069 sock_recv_ts_and_drops(msg, sk, skb); 2070 if (sctp_ulpevent_is_notification(event)) { 2071 msg->msg_flags |= MSG_NOTIFICATION; 2072 sp->pf->event_msgname(event, msg->msg_name, addr_len); 2073 } else { 2074 sp->pf->skb_msgname(skb, msg->msg_name, addr_len); 2075 } 2076 2077 /* Check if we allow SCTP_SNDRCVINFO. */ 2078 if (sp->subscribe.sctp_data_io_event) 2079 sctp_ulpevent_read_sndrcvinfo(event, msg); 2080 #if 0 2081 /* FIXME: we should be calling IP/IPv6 layers. */ 2082 if (sk->sk_protinfo.af_inet.cmsg_flags) 2083 ip_cmsg_recv(msg, skb); 2084 #endif 2085 2086 err = copied; 2087 2088 /* If skb's length exceeds the user's buffer, update the skb and 2089 * push it back to the receive_queue so that the next call to 2090 * recvmsg() will return the remaining data. Don't set MSG_EOR. 2091 */ 2092 if (skb_len > copied) { 2093 msg->msg_flags &= ~MSG_EOR; 2094 if (flags & MSG_PEEK) 2095 goto out_free; 2096 sctp_skb_pull(skb, copied); 2097 skb_queue_head(&sk->sk_receive_queue, skb); 2098 2099 /* When only partial message is copied to the user, increase 2100 * rwnd by that amount. If all the data in the skb is read, 2101 * rwnd is updated when the event is freed. 2102 */ 2103 if (!sctp_ulpevent_is_notification(event)) 2104 sctp_assoc_rwnd_increase(event->asoc, copied); 2105 goto out; 2106 } else if ((event->msg_flags & MSG_NOTIFICATION) || 2107 (event->msg_flags & MSG_EOR)) 2108 msg->msg_flags |= MSG_EOR; 2109 else 2110 msg->msg_flags &= ~MSG_EOR; 2111 2112 out_free: 2113 if (flags & MSG_PEEK) { 2114 /* Release the skb reference acquired after peeking the skb in 2115 * sctp_skb_recv_datagram(). 2116 */ 2117 kfree_skb(skb); 2118 } else { 2119 /* Free the event which includes releasing the reference to 2120 * the owner of the skb, freeing the skb and updating the 2121 * rwnd. 2122 */ 2123 sctp_ulpevent_free(event); 2124 } 2125 out: 2126 sctp_release_sock(sk); 2127 return err; 2128 } 2129 2130 /* 7.1.12 Enable/Disable message fragmentation (SCTP_DISABLE_FRAGMENTS) 2131 * 2132 * This option is a on/off flag. If enabled no SCTP message 2133 * fragmentation will be performed. Instead if a message being sent 2134 * exceeds the current PMTU size, the message will NOT be sent and 2135 * instead a error will be indicated to the user. 2136 */ 2137 static int sctp_setsockopt_disable_fragments(struct sock *sk, 2138 char __user *optval, 2139 unsigned int optlen) 2140 { 2141 int val; 2142 2143 if (optlen < sizeof(int)) 2144 return -EINVAL; 2145 2146 if (get_user(val, (int __user *)optval)) 2147 return -EFAULT; 2148 2149 sctp_sk(sk)->disable_fragments = (val == 0) ? 0 : 1; 2150 2151 return 0; 2152 } 2153 2154 static int sctp_setsockopt_events(struct sock *sk, char __user *optval, 2155 unsigned int optlen) 2156 { 2157 struct sctp_association *asoc; 2158 struct sctp_ulpevent *event; 2159 2160 if (optlen > sizeof(struct sctp_event_subscribe)) 2161 return -EINVAL; 2162 if (copy_from_user(&sctp_sk(sk)->subscribe, optval, optlen)) 2163 return -EFAULT; 2164 2165 /* 2166 * At the time when a user app subscribes to SCTP_SENDER_DRY_EVENT, 2167 * if there is no data to be sent or retransmit, the stack will 2168 * immediately send up this notification. 2169 */ 2170 if (sctp_ulpevent_type_enabled(SCTP_SENDER_DRY_EVENT, 2171 &sctp_sk(sk)->subscribe)) { 2172 asoc = sctp_id2assoc(sk, 0); 2173 2174 if (asoc && sctp_outq_is_empty(&asoc->outqueue)) { 2175 event = sctp_ulpevent_make_sender_dry_event(asoc, 2176 GFP_ATOMIC); 2177 if (!event) 2178 return -ENOMEM; 2179 2180 sctp_ulpq_tail_event(&asoc->ulpq, event); 2181 } 2182 } 2183 2184 return 0; 2185 } 2186 2187 /* 7.1.8 Automatic Close of associations (SCTP_AUTOCLOSE) 2188 * 2189 * This socket option is applicable to the UDP-style socket only. When 2190 * set it will cause associations that are idle for more than the 2191 * specified number of seconds to automatically close. An association 2192 * being idle is defined an association that has NOT sent or received 2193 * user data. The special value of '0' indicates that no automatic 2194 * close of any associations should be performed. The option expects an 2195 * integer defining the number of seconds of idle time before an 2196 * association is closed. 2197 */ 2198 static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval, 2199 unsigned int optlen) 2200 { 2201 struct sctp_sock *sp = sctp_sk(sk); 2202 2203 /* Applicable to UDP-style socket only */ 2204 if (sctp_style(sk, TCP)) 2205 return -EOPNOTSUPP; 2206 if (optlen != sizeof(int)) 2207 return -EINVAL; 2208 if (copy_from_user(&sp->autoclose, optval, optlen)) 2209 return -EFAULT; 2210 2211 return 0; 2212 } 2213 2214 /* 7.1.13 Peer Address Parameters (SCTP_PEER_ADDR_PARAMS) 2215 * 2216 * Applications can enable or disable heartbeats for any peer address of 2217 * an association, modify an address's heartbeat interval, force a 2218 * heartbeat to be sent immediately, and adjust the address's maximum 2219 * number of retransmissions sent before an address is considered 2220 * unreachable. The following structure is used to access and modify an 2221 * address's parameters: 2222 * 2223 * struct sctp_paddrparams { 2224 * sctp_assoc_t spp_assoc_id; 2225 * struct sockaddr_storage spp_address; 2226 * uint32_t spp_hbinterval; 2227 * uint16_t spp_pathmaxrxt; 2228 * uint32_t spp_pathmtu; 2229 * uint32_t spp_sackdelay; 2230 * uint32_t spp_flags; 2231 * }; 2232 * 2233 * spp_assoc_id - (one-to-many style socket) This is filled in the 2234 * application, and identifies the association for 2235 * this query. 2236 * spp_address - This specifies which address is of interest. 2237 * spp_hbinterval - This contains the value of the heartbeat interval, 2238 * in milliseconds. If a value of zero 2239 * is present in this field then no changes are to 2240 * be made to this parameter. 2241 * spp_pathmaxrxt - This contains the maximum number of 2242 * retransmissions before this address shall be 2243 * considered unreachable. If a value of zero 2244 * is present in this field then no changes are to 2245 * be made to this parameter. 2246 * spp_pathmtu - When Path MTU discovery is disabled the value 2247 * specified here will be the "fixed" path mtu. 2248 * Note that if the spp_address field is empty 2249 * then all associations on this address will 2250 * have this fixed path mtu set upon them. 2251 * 2252 * spp_sackdelay - When delayed sack is enabled, this value specifies 2253 * the number of milliseconds that sacks will be delayed 2254 * for. This value will apply to all addresses of an 2255 * association if the spp_address field is empty. Note 2256 * also, that if delayed sack is enabled and this 2257 * value is set to 0, no change is made to the last 2258 * recorded delayed sack timer value. 2259 * 2260 * spp_flags - These flags are used to control various features 2261 * on an association. The flag field may contain 2262 * zero or more of the following options. 2263 * 2264 * SPP_HB_ENABLE - Enable heartbeats on the 2265 * specified address. Note that if the address 2266 * field is empty all addresses for the association 2267 * have heartbeats enabled upon them. 2268 * 2269 * SPP_HB_DISABLE - Disable heartbeats on the 2270 * speicifed address. Note that if the address 2271 * field is empty all addresses for the association 2272 * will have their heartbeats disabled. Note also 2273 * that SPP_HB_ENABLE and SPP_HB_DISABLE are 2274 * mutually exclusive, only one of these two should 2275 * be specified. Enabling both fields will have 2276 * undetermined results. 2277 * 2278 * SPP_HB_DEMAND - Request a user initiated heartbeat 2279 * to be made immediately. 2280 * 2281 * SPP_HB_TIME_IS_ZERO - Specify's that the time for 2282 * heartbeat delayis to be set to the value of 0 2283 * milliseconds. 2284 * 2285 * SPP_PMTUD_ENABLE - This field will enable PMTU 2286 * discovery upon the specified address. Note that 2287 * if the address feild is empty then all addresses 2288 * on the association are effected. 2289 * 2290 * SPP_PMTUD_DISABLE - This field will disable PMTU 2291 * discovery upon the specified address. Note that 2292 * if the address feild is empty then all addresses 2293 * on the association are effected. Not also that 2294 * SPP_PMTUD_ENABLE and SPP_PMTUD_DISABLE are mutually 2295 * exclusive. Enabling both will have undetermined 2296 * results. 2297 * 2298 * SPP_SACKDELAY_ENABLE - Setting this flag turns 2299 * on delayed sack. The time specified in spp_sackdelay 2300 * is used to specify the sack delay for this address. Note 2301 * that if spp_address is empty then all addresses will 2302 * enable delayed sack and take on the sack delay 2303 * value specified in spp_sackdelay. 2304 * SPP_SACKDELAY_DISABLE - Setting this flag turns 2305 * off delayed sack. If the spp_address field is blank then 2306 * delayed sack is disabled for the entire association. Note 2307 * also that this field is mutually exclusive to 2308 * SPP_SACKDELAY_ENABLE, setting both will have undefined 2309 * results. 2310 */ 2311 static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params, 2312 struct sctp_transport *trans, 2313 struct sctp_association *asoc, 2314 struct sctp_sock *sp, 2315 int hb_change, 2316 int pmtud_change, 2317 int sackdelay_change) 2318 { 2319 int error; 2320 2321 if (params->spp_flags & SPP_HB_DEMAND && trans) { 2322 struct net *net = sock_net(trans->asoc->base.sk); 2323 2324 error = sctp_primitive_REQUESTHEARTBEAT(net, trans->asoc, trans); 2325 if (error) 2326 return error; 2327 } 2328 2329 /* Note that unless the spp_flag is set to SPP_HB_ENABLE the value of 2330 * this field is ignored. Note also that a value of zero indicates 2331 * the current setting should be left unchanged. 2332 */ 2333 if (params->spp_flags & SPP_HB_ENABLE) { 2334 2335 /* Re-zero the interval if the SPP_HB_TIME_IS_ZERO is 2336 * set. This lets us use 0 value when this flag 2337 * is set. 2338 */ 2339 if (params->spp_flags & SPP_HB_TIME_IS_ZERO) 2340 params->spp_hbinterval = 0; 2341 2342 if (params->spp_hbinterval || 2343 (params->spp_flags & SPP_HB_TIME_IS_ZERO)) { 2344 if (trans) { 2345 trans->hbinterval = 2346 msecs_to_jiffies(params->spp_hbinterval); 2347 } else if (asoc) { 2348 asoc->hbinterval = 2349 msecs_to_jiffies(params->spp_hbinterval); 2350 } else { 2351 sp->hbinterval = params->spp_hbinterval; 2352 } 2353 } 2354 } 2355 2356 if (hb_change) { 2357 if (trans) { 2358 trans->param_flags = 2359 (trans->param_flags & ~SPP_HB) | hb_change; 2360 } else if (asoc) { 2361 asoc->param_flags = 2362 (asoc->param_flags & ~SPP_HB) | hb_change; 2363 } else { 2364 sp->param_flags = 2365 (sp->param_flags & ~SPP_HB) | hb_change; 2366 } 2367 } 2368 2369 /* When Path MTU discovery is disabled the value specified here will 2370 * be the "fixed" path mtu (i.e. the value of the spp_flags field must 2371 * include the flag SPP_PMTUD_DISABLE for this field to have any 2372 * effect). 2373 */ 2374 if ((params->spp_flags & SPP_PMTUD_DISABLE) && params->spp_pathmtu) { 2375 if (trans) { 2376 trans->pathmtu = params->spp_pathmtu; 2377 sctp_assoc_sync_pmtu(sctp_opt2sk(sp), asoc); 2378 } else if (asoc) { 2379 asoc->pathmtu = params->spp_pathmtu; 2380 sctp_frag_point(asoc, params->spp_pathmtu); 2381 } else { 2382 sp->pathmtu = params->spp_pathmtu; 2383 } 2384 } 2385 2386 if (pmtud_change) { 2387 if (trans) { 2388 int update = (trans->param_flags & SPP_PMTUD_DISABLE) && 2389 (params->spp_flags & SPP_PMTUD_ENABLE); 2390 trans->param_flags = 2391 (trans->param_flags & ~SPP_PMTUD) | pmtud_change; 2392 if (update) { 2393 sctp_transport_pmtu(trans, sctp_opt2sk(sp)); 2394 sctp_assoc_sync_pmtu(sctp_opt2sk(sp), asoc); 2395 } 2396 } else if (asoc) { 2397 asoc->param_flags = 2398 (asoc->param_flags & ~SPP_PMTUD) | pmtud_change; 2399 } else { 2400 sp->param_flags = 2401 (sp->param_flags & ~SPP_PMTUD) | pmtud_change; 2402 } 2403 } 2404 2405 /* Note that unless the spp_flag is set to SPP_SACKDELAY_ENABLE the 2406 * value of this field is ignored. Note also that a value of zero 2407 * indicates the current setting should be left unchanged. 2408 */ 2409 if ((params->spp_flags & SPP_SACKDELAY_ENABLE) && params->spp_sackdelay) { 2410 if (trans) { 2411 trans->sackdelay = 2412 msecs_to_jiffies(params->spp_sackdelay); 2413 } else if (asoc) { 2414 asoc->sackdelay = 2415 msecs_to_jiffies(params->spp_sackdelay); 2416 } else { 2417 sp->sackdelay = params->spp_sackdelay; 2418 } 2419 } 2420 2421 if (sackdelay_change) { 2422 if (trans) { 2423 trans->param_flags = 2424 (trans->param_flags & ~SPP_SACKDELAY) | 2425 sackdelay_change; 2426 } else if (asoc) { 2427 asoc->param_flags = 2428 (asoc->param_flags & ~SPP_SACKDELAY) | 2429 sackdelay_change; 2430 } else { 2431 sp->param_flags = 2432 (sp->param_flags & ~SPP_SACKDELAY) | 2433 sackdelay_change; 2434 } 2435 } 2436 2437 /* Note that a value of zero indicates the current setting should be 2438 left unchanged. 2439 */ 2440 if (params->spp_pathmaxrxt) { 2441 if (trans) { 2442 trans->pathmaxrxt = params->spp_pathmaxrxt; 2443 } else if (asoc) { 2444 asoc->pathmaxrxt = params->spp_pathmaxrxt; 2445 } else { 2446 sp->pathmaxrxt = params->spp_pathmaxrxt; 2447 } 2448 } 2449 2450 return 0; 2451 } 2452 2453 static int sctp_setsockopt_peer_addr_params(struct sock *sk, 2454 char __user *optval, 2455 unsigned int optlen) 2456 { 2457 struct sctp_paddrparams params; 2458 struct sctp_transport *trans = NULL; 2459 struct sctp_association *asoc = NULL; 2460 struct sctp_sock *sp = sctp_sk(sk); 2461 int error; 2462 int hb_change, pmtud_change, sackdelay_change; 2463 2464 if (optlen != sizeof(struct sctp_paddrparams)) 2465 return - EINVAL; 2466 2467 if (copy_from_user(¶ms, optval, optlen)) 2468 return -EFAULT; 2469 2470 /* Validate flags and value parameters. */ 2471 hb_change = params.spp_flags & SPP_HB; 2472 pmtud_change = params.spp_flags & SPP_PMTUD; 2473 sackdelay_change = params.spp_flags & SPP_SACKDELAY; 2474 2475 if (hb_change == SPP_HB || 2476 pmtud_change == SPP_PMTUD || 2477 sackdelay_change == SPP_SACKDELAY || 2478 params.spp_sackdelay > 500 || 2479 (params.spp_pathmtu && 2480 params.spp_pathmtu < SCTP_DEFAULT_MINSEGMENT)) 2481 return -EINVAL; 2482 2483 /* If an address other than INADDR_ANY is specified, and 2484 * no transport is found, then the request is invalid. 2485 */ 2486 if (!sctp_is_any(sk, ( union sctp_addr *)¶ms.spp_address)) { 2487 trans = sctp_addr_id2transport(sk, ¶ms.spp_address, 2488 params.spp_assoc_id); 2489 if (!trans) 2490 return -EINVAL; 2491 } 2492 2493 /* Get association, if assoc_id != 0 and the socket is a one 2494 * to many style socket, and an association was not found, then 2495 * the id was invalid. 2496 */ 2497 asoc = sctp_id2assoc(sk, params.spp_assoc_id); 2498 if (!asoc && params.spp_assoc_id && sctp_style(sk, UDP)) 2499 return -EINVAL; 2500 2501 /* Heartbeat demand can only be sent on a transport or 2502 * association, but not a socket. 2503 */ 2504 if (params.spp_flags & SPP_HB_DEMAND && !trans && !asoc) 2505 return -EINVAL; 2506 2507 /* Process parameters. */ 2508 error = sctp_apply_peer_addr_params(¶ms, trans, asoc, sp, 2509 hb_change, pmtud_change, 2510 sackdelay_change); 2511 2512 if (error) 2513 return error; 2514 2515 /* If changes are for association, also apply parameters to each 2516 * transport. 2517 */ 2518 if (!trans && asoc) { 2519 list_for_each_entry(trans, &asoc->peer.transport_addr_list, 2520 transports) { 2521 sctp_apply_peer_addr_params(¶ms, trans, asoc, sp, 2522 hb_change, pmtud_change, 2523 sackdelay_change); 2524 } 2525 } 2526 2527 return 0; 2528 } 2529 2530 /* 2531 * 7.1.23. Get or set delayed ack timer (SCTP_DELAYED_SACK) 2532 * 2533 * This option will effect the way delayed acks are performed. This 2534 * option allows you to get or set the delayed ack time, in 2535 * milliseconds. It also allows changing the delayed ack frequency. 2536 * Changing the frequency to 1 disables the delayed sack algorithm. If 2537 * the assoc_id is 0, then this sets or gets the endpoints default 2538 * values. If the assoc_id field is non-zero, then the set or get 2539 * effects the specified association for the one to many model (the 2540 * assoc_id field is ignored by the one to one model). Note that if 2541 * sack_delay or sack_freq are 0 when setting this option, then the 2542 * current values will remain unchanged. 2543 * 2544 * struct sctp_sack_info { 2545 * sctp_assoc_t sack_assoc_id; 2546 * uint32_t sack_delay; 2547 * uint32_t sack_freq; 2548 * }; 2549 * 2550 * sack_assoc_id - This parameter, indicates which association the user 2551 * is performing an action upon. Note that if this field's value is 2552 * zero then the endpoints default value is changed (effecting future 2553 * associations only). 2554 * 2555 * sack_delay - This parameter contains the number of milliseconds that 2556 * the user is requesting the delayed ACK timer be set to. Note that 2557 * this value is defined in the standard to be between 200 and 500 2558 * milliseconds. 2559 * 2560 * sack_freq - This parameter contains the number of packets that must 2561 * be received before a sack is sent without waiting for the delay 2562 * timer to expire. The default value for this is 2, setting this 2563 * value to 1 will disable the delayed sack algorithm. 2564 */ 2565 2566 static int sctp_setsockopt_delayed_ack(struct sock *sk, 2567 char __user *optval, unsigned int optlen) 2568 { 2569 struct sctp_sack_info params; 2570 struct sctp_transport *trans = NULL; 2571 struct sctp_association *asoc = NULL; 2572 struct sctp_sock *sp = sctp_sk(sk); 2573 2574 if (optlen == sizeof(struct sctp_sack_info)) { 2575 if (copy_from_user(¶ms, optval, optlen)) 2576 return -EFAULT; 2577 2578 if (params.sack_delay == 0 && params.sack_freq == 0) 2579 return 0; 2580 } else if (optlen == sizeof(struct sctp_assoc_value)) { 2581 pr_warn("Use of struct sctp_assoc_value in delayed_ack socket option deprecated\n"); 2582 pr_warn("Use struct sctp_sack_info instead\n"); 2583 if (copy_from_user(¶ms, optval, optlen)) 2584 return -EFAULT; 2585 2586 if (params.sack_delay == 0) 2587 params.sack_freq = 1; 2588 else 2589 params.sack_freq = 0; 2590 } else 2591 return - EINVAL; 2592 2593 /* Validate value parameter. */ 2594 if (params.sack_delay > 500) 2595 return -EINVAL; 2596 2597 /* Get association, if sack_assoc_id != 0 and the socket is a one 2598 * to many style socket, and an association was not found, then 2599 * the id was invalid. 2600 */ 2601 asoc = sctp_id2assoc(sk, params.sack_assoc_id); 2602 if (!asoc && params.sack_assoc_id && sctp_style(sk, UDP)) 2603 return -EINVAL; 2604 2605 if (params.sack_delay) { 2606 if (asoc) { 2607 asoc->sackdelay = 2608 msecs_to_jiffies(params.sack_delay); 2609 asoc->param_flags = 2610 (asoc->param_flags & ~SPP_SACKDELAY) | 2611 SPP_SACKDELAY_ENABLE; 2612 } else { 2613 sp->sackdelay = params.sack_delay; 2614 sp->param_flags = 2615 (sp->param_flags & ~SPP_SACKDELAY) | 2616 SPP_SACKDELAY_ENABLE; 2617 } 2618 } 2619 2620 if (params.sack_freq == 1) { 2621 if (asoc) { 2622 asoc->param_flags = 2623 (asoc->param_flags & ~SPP_SACKDELAY) | 2624 SPP_SACKDELAY_DISABLE; 2625 } else { 2626 sp->param_flags = 2627 (sp->param_flags & ~SPP_SACKDELAY) | 2628 SPP_SACKDELAY_DISABLE; 2629 } 2630 } else if (params.sack_freq > 1) { 2631 if (asoc) { 2632 asoc->sackfreq = params.sack_freq; 2633 asoc->param_flags = 2634 (asoc->param_flags & ~SPP_SACKDELAY) | 2635 SPP_SACKDELAY_ENABLE; 2636 } else { 2637 sp->sackfreq = params.sack_freq; 2638 sp->param_flags = 2639 (sp->param_flags & ~SPP_SACKDELAY) | 2640 SPP_SACKDELAY_ENABLE; 2641 } 2642 } 2643 2644 /* If change is for association, also apply to each transport. */ 2645 if (asoc) { 2646 list_for_each_entry(trans, &asoc->peer.transport_addr_list, 2647 transports) { 2648 if (params.sack_delay) { 2649 trans->sackdelay = 2650 msecs_to_jiffies(params.sack_delay); 2651 trans->param_flags = 2652 (trans->param_flags & ~SPP_SACKDELAY) | 2653 SPP_SACKDELAY_ENABLE; 2654 } 2655 if (params.sack_freq == 1) { 2656 trans->param_flags = 2657 (trans->param_flags & ~SPP_SACKDELAY) | 2658 SPP_SACKDELAY_DISABLE; 2659 } else if (params.sack_freq > 1) { 2660 trans->sackfreq = params.sack_freq; 2661 trans->param_flags = 2662 (trans->param_flags & ~SPP_SACKDELAY) | 2663 SPP_SACKDELAY_ENABLE; 2664 } 2665 } 2666 } 2667 2668 return 0; 2669 } 2670 2671 /* 7.1.3 Initialization Parameters (SCTP_INITMSG) 2672 * 2673 * Applications can specify protocol parameters for the default association 2674 * initialization. The option name argument to setsockopt() and getsockopt() 2675 * is SCTP_INITMSG. 2676 * 2677 * Setting initialization parameters is effective only on an unconnected 2678 * socket (for UDP-style sockets only future associations are effected 2679 * by the change). With TCP-style sockets, this option is inherited by 2680 * sockets derived from a listener socket. 2681 */ 2682 static int sctp_setsockopt_initmsg(struct sock *sk, char __user *optval, unsigned int optlen) 2683 { 2684 struct sctp_initmsg sinit; 2685 struct sctp_sock *sp = sctp_sk(sk); 2686 2687 if (optlen != sizeof(struct sctp_initmsg)) 2688 return -EINVAL; 2689 if (copy_from_user(&sinit, optval, optlen)) 2690 return -EFAULT; 2691 2692 if (sinit.sinit_num_ostreams) 2693 sp->initmsg.sinit_num_ostreams = sinit.sinit_num_ostreams; 2694 if (sinit.sinit_max_instreams) 2695 sp->initmsg.sinit_max_instreams = sinit.sinit_max_instreams; 2696 if (sinit.sinit_max_attempts) 2697 sp->initmsg.sinit_max_attempts = sinit.sinit_max_attempts; 2698 if (sinit.sinit_max_init_timeo) 2699 sp->initmsg.sinit_max_init_timeo = sinit.sinit_max_init_timeo; 2700 2701 return 0; 2702 } 2703 2704 /* 2705 * 7.1.14 Set default send parameters (SCTP_DEFAULT_SEND_PARAM) 2706 * 2707 * Applications that wish to use the sendto() system call may wish to 2708 * specify a default set of parameters that would normally be supplied 2709 * through the inclusion of ancillary data. This socket option allows 2710 * such an application to set the default sctp_sndrcvinfo structure. 2711 * The application that wishes to use this socket option simply passes 2712 * in to this call the sctp_sndrcvinfo structure defined in Section 2713 * 5.2.2) The input parameters accepted by this call include 2714 * sinfo_stream, sinfo_flags, sinfo_ppid, sinfo_context, 2715 * sinfo_timetolive. The user must provide the sinfo_assoc_id field in 2716 * to this call if the caller is using the UDP model. 2717 */ 2718 static int sctp_setsockopt_default_send_param(struct sock *sk, 2719 char __user *optval, 2720 unsigned int optlen) 2721 { 2722 struct sctp_sndrcvinfo info; 2723 struct sctp_association *asoc; 2724 struct sctp_sock *sp = sctp_sk(sk); 2725 2726 if (optlen != sizeof(struct sctp_sndrcvinfo)) 2727 return -EINVAL; 2728 if (copy_from_user(&info, optval, optlen)) 2729 return -EFAULT; 2730 2731 asoc = sctp_id2assoc(sk, info.sinfo_assoc_id); 2732 if (!asoc && info.sinfo_assoc_id && sctp_style(sk, UDP)) 2733 return -EINVAL; 2734 2735 if (asoc) { 2736 asoc->default_stream = info.sinfo_stream; 2737 asoc->default_flags = info.sinfo_flags; 2738 asoc->default_ppid = info.sinfo_ppid; 2739 asoc->default_context = info.sinfo_context; 2740 asoc->default_timetolive = info.sinfo_timetolive; 2741 } else { 2742 sp->default_stream = info.sinfo_stream; 2743 sp->default_flags = info.sinfo_flags; 2744 sp->default_ppid = info.sinfo_ppid; 2745 sp->default_context = info.sinfo_context; 2746 sp->default_timetolive = info.sinfo_timetolive; 2747 } 2748 2749 return 0; 2750 } 2751 2752 /* 7.1.10 Set Primary Address (SCTP_PRIMARY_ADDR) 2753 * 2754 * Requests that the local SCTP stack use the enclosed peer address as 2755 * the association primary. The enclosed address must be one of the 2756 * association peer's addresses. 2757 */ 2758 static int sctp_setsockopt_primary_addr(struct sock *sk, char __user *optval, 2759 unsigned int optlen) 2760 { 2761 struct sctp_prim prim; 2762 struct sctp_transport *trans; 2763 2764 if (optlen != sizeof(struct sctp_prim)) 2765 return -EINVAL; 2766 2767 if (copy_from_user(&prim, optval, sizeof(struct sctp_prim))) 2768 return -EFAULT; 2769 2770 trans = sctp_addr_id2transport(sk, &prim.ssp_addr, prim.ssp_assoc_id); 2771 if (!trans) 2772 return -EINVAL; 2773 2774 sctp_assoc_set_primary(trans->asoc, trans); 2775 2776 return 0; 2777 } 2778 2779 /* 2780 * 7.1.5 SCTP_NODELAY 2781 * 2782 * Turn on/off any Nagle-like algorithm. This means that packets are 2783 * generally sent as soon as possible and no unnecessary delays are 2784 * introduced, at the cost of more packets in the network. Expects an 2785 * integer boolean flag. 2786 */ 2787 static int sctp_setsockopt_nodelay(struct sock *sk, char __user *optval, 2788 unsigned int optlen) 2789 { 2790 int val; 2791 2792 if (optlen < sizeof(int)) 2793 return -EINVAL; 2794 if (get_user(val, (int __user *)optval)) 2795 return -EFAULT; 2796 2797 sctp_sk(sk)->nodelay = (val == 0) ? 0 : 1; 2798 return 0; 2799 } 2800 2801 /* 2802 * 2803 * 7.1.1 SCTP_RTOINFO 2804 * 2805 * The protocol parameters used to initialize and bound retransmission 2806 * timeout (RTO) are tunable. sctp_rtoinfo structure is used to access 2807 * and modify these parameters. 2808 * All parameters are time values, in milliseconds. A value of 0, when 2809 * modifying the parameters, indicates that the current value should not 2810 * be changed. 2811 * 2812 */ 2813 static int sctp_setsockopt_rtoinfo(struct sock *sk, char __user *optval, unsigned int optlen) 2814 { 2815 struct sctp_rtoinfo rtoinfo; 2816 struct sctp_association *asoc; 2817 2818 if (optlen != sizeof (struct sctp_rtoinfo)) 2819 return -EINVAL; 2820 2821 if (copy_from_user(&rtoinfo, optval, optlen)) 2822 return -EFAULT; 2823 2824 asoc = sctp_id2assoc(sk, rtoinfo.srto_assoc_id); 2825 2826 /* Set the values to the specific association */ 2827 if (!asoc && rtoinfo.srto_assoc_id && sctp_style(sk, UDP)) 2828 return -EINVAL; 2829 2830 if (asoc) { 2831 if (rtoinfo.srto_initial != 0) 2832 asoc->rto_initial = 2833 msecs_to_jiffies(rtoinfo.srto_initial); 2834 if (rtoinfo.srto_max != 0) 2835 asoc->rto_max = msecs_to_jiffies(rtoinfo.srto_max); 2836 if (rtoinfo.srto_min != 0) 2837 asoc->rto_min = msecs_to_jiffies(rtoinfo.srto_min); 2838 } else { 2839 /* If there is no association or the association-id = 0 2840 * set the values to the endpoint. 2841 */ 2842 struct sctp_sock *sp = sctp_sk(sk); 2843 2844 if (rtoinfo.srto_initial != 0) 2845 sp->rtoinfo.srto_initial = rtoinfo.srto_initial; 2846 if (rtoinfo.srto_max != 0) 2847 sp->rtoinfo.srto_max = rtoinfo.srto_max; 2848 if (rtoinfo.srto_min != 0) 2849 sp->rtoinfo.srto_min = rtoinfo.srto_min; 2850 } 2851 2852 return 0; 2853 } 2854 2855 /* 2856 * 2857 * 7.1.2 SCTP_ASSOCINFO 2858 * 2859 * This option is used to tune the maximum retransmission attempts 2860 * of the association. 2861 * Returns an error if the new association retransmission value is 2862 * greater than the sum of the retransmission value of the peer. 2863 * See [SCTP] for more information. 2864 * 2865 */ 2866 static int sctp_setsockopt_associnfo(struct sock *sk, char __user *optval, unsigned int optlen) 2867 { 2868 2869 struct sctp_assocparams assocparams; 2870 struct sctp_association *asoc; 2871 2872 if (optlen != sizeof(struct sctp_assocparams)) 2873 return -EINVAL; 2874 if (copy_from_user(&assocparams, optval, optlen)) 2875 return -EFAULT; 2876 2877 asoc = sctp_id2assoc(sk, assocparams.sasoc_assoc_id); 2878 2879 if (!asoc && assocparams.sasoc_assoc_id && sctp_style(sk, UDP)) 2880 return -EINVAL; 2881 2882 /* Set the values to the specific association */ 2883 if (asoc) { 2884 if (assocparams.sasoc_asocmaxrxt != 0) { 2885 __u32 path_sum = 0; 2886 int paths = 0; 2887 struct sctp_transport *peer_addr; 2888 2889 list_for_each_entry(peer_addr, &asoc->peer.transport_addr_list, 2890 transports) { 2891 path_sum += peer_addr->pathmaxrxt; 2892 paths++; 2893 } 2894 2895 /* Only validate asocmaxrxt if we have more than 2896 * one path/transport. We do this because path 2897 * retransmissions are only counted when we have more 2898 * then one path. 2899 */ 2900 if (paths > 1 && 2901 assocparams.sasoc_asocmaxrxt > path_sum) 2902 return -EINVAL; 2903 2904 asoc->max_retrans = assocparams.sasoc_asocmaxrxt; 2905 } 2906 2907 if (assocparams.sasoc_cookie_life != 0) 2908 asoc->cookie_life = ms_to_ktime(assocparams.sasoc_cookie_life); 2909 } else { 2910 /* Set the values to the endpoint */ 2911 struct sctp_sock *sp = sctp_sk(sk); 2912 2913 if (assocparams.sasoc_asocmaxrxt != 0) 2914 sp->assocparams.sasoc_asocmaxrxt = 2915 assocparams.sasoc_asocmaxrxt; 2916 if (assocparams.sasoc_cookie_life != 0) 2917 sp->assocparams.sasoc_cookie_life = 2918 assocparams.sasoc_cookie_life; 2919 } 2920 return 0; 2921 } 2922 2923 /* 2924 * 7.1.16 Set/clear IPv4 mapped addresses (SCTP_I_WANT_MAPPED_V4_ADDR) 2925 * 2926 * This socket option is a boolean flag which turns on or off mapped V4 2927 * addresses. If this option is turned on and the socket is type 2928 * PF_INET6, then IPv4 addresses will be mapped to V6 representation. 2929 * If this option is turned off, then no mapping will be done of V4 2930 * addresses and a user will receive both PF_INET6 and PF_INET type 2931 * addresses on the socket. 2932 */ 2933 static int sctp_setsockopt_mappedv4(struct sock *sk, char __user *optval, unsigned int optlen) 2934 { 2935 int val; 2936 struct sctp_sock *sp = sctp_sk(sk); 2937 2938 if (optlen < sizeof(int)) 2939 return -EINVAL; 2940 if (get_user(val, (int __user *)optval)) 2941 return -EFAULT; 2942 if (val) 2943 sp->v4mapped = 1; 2944 else 2945 sp->v4mapped = 0; 2946 2947 return 0; 2948 } 2949 2950 /* 2951 * 8.1.16. Get or Set the Maximum Fragmentation Size (SCTP_MAXSEG) 2952 * This option will get or set the maximum size to put in any outgoing 2953 * SCTP DATA chunk. If a message is larger than this size it will be 2954 * fragmented by SCTP into the specified size. Note that the underlying 2955 * SCTP implementation may fragment into smaller sized chunks when the 2956 * PMTU of the underlying association is smaller than the value set by 2957 * the user. The default value for this option is '0' which indicates 2958 * the user is NOT limiting fragmentation and only the PMTU will effect 2959 * SCTP's choice of DATA chunk size. Note also that values set larger 2960 * than the maximum size of an IP datagram will effectively let SCTP 2961 * control fragmentation (i.e. the same as setting this option to 0). 2962 * 2963 * The following structure is used to access and modify this parameter: 2964 * 2965 * struct sctp_assoc_value { 2966 * sctp_assoc_t assoc_id; 2967 * uint32_t assoc_value; 2968 * }; 2969 * 2970 * assoc_id: This parameter is ignored for one-to-one style sockets. 2971 * For one-to-many style sockets this parameter indicates which 2972 * association the user is performing an action upon. Note that if 2973 * this field's value is zero then the endpoints default value is 2974 * changed (effecting future associations only). 2975 * assoc_value: This parameter specifies the maximum size in bytes. 2976 */ 2977 static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, unsigned int optlen) 2978 { 2979 struct sctp_assoc_value params; 2980 struct sctp_association *asoc; 2981 struct sctp_sock *sp = sctp_sk(sk); 2982 int val; 2983 2984 if (optlen == sizeof(int)) { 2985 pr_warn("Use of int in maxseg socket option deprecated\n"); 2986 pr_warn("Use struct sctp_assoc_value instead\n"); 2987 if (copy_from_user(&val, optval, optlen)) 2988 return -EFAULT; 2989 params.assoc_id = 0; 2990 } else if (optlen == sizeof(struct sctp_assoc_value)) { 2991 if (copy_from_user(¶ms, optval, optlen)) 2992 return -EFAULT; 2993 val = params.assoc_value; 2994 } else 2995 return -EINVAL; 2996 2997 if ((val != 0) && ((val < 8) || (val > SCTP_MAX_CHUNK_LEN))) 2998 return -EINVAL; 2999 3000 asoc = sctp_id2assoc(sk, params.assoc_id); 3001 if (!asoc && params.assoc_id && sctp_style(sk, UDP)) 3002 return -EINVAL; 3003 3004 if (asoc) { 3005 if (val == 0) { 3006 val = asoc->pathmtu; 3007 val -= sp->pf->af->net_header_len; 3008 val -= sizeof(struct sctphdr) + 3009 sizeof(struct sctp_data_chunk); 3010 } 3011 asoc->user_frag = val; 3012 asoc->frag_point = sctp_frag_point(asoc, asoc->pathmtu); 3013 } else { 3014 sp->user_frag = val; 3015 } 3016 3017 return 0; 3018 } 3019 3020 3021 /* 3022 * 7.1.9 Set Peer Primary Address (SCTP_SET_PEER_PRIMARY_ADDR) 3023 * 3024 * Requests that the peer mark the enclosed address as the association 3025 * primary. The enclosed address must be one of the association's 3026 * locally bound addresses. The following structure is used to make a 3027 * set primary request: 3028 */ 3029 static int sctp_setsockopt_peer_primary_addr(struct sock *sk, char __user *optval, 3030 unsigned int optlen) 3031 { 3032 struct net *net = sock_net(sk); 3033 struct sctp_sock *sp; 3034 struct sctp_association *asoc = NULL; 3035 struct sctp_setpeerprim prim; 3036 struct sctp_chunk *chunk; 3037 struct sctp_af *af; 3038 int err; 3039 3040 sp = sctp_sk(sk); 3041 3042 if (!net->sctp.addip_enable) 3043 return -EPERM; 3044 3045 if (optlen != sizeof(struct sctp_setpeerprim)) 3046 return -EINVAL; 3047 3048 if (copy_from_user(&prim, optval, optlen)) 3049 return -EFAULT; 3050 3051 asoc = sctp_id2assoc(sk, prim.sspp_assoc_id); 3052 if (!asoc) 3053 return -EINVAL; 3054 3055 if (!asoc->peer.asconf_capable) 3056 return -EPERM; 3057 3058 if (asoc->peer.addip_disabled_mask & SCTP_PARAM_SET_PRIMARY) 3059 return -EPERM; 3060 3061 if (!sctp_state(asoc, ESTABLISHED)) 3062 return -ENOTCONN; 3063 3064 af = sctp_get_af_specific(prim.sspp_addr.ss_family); 3065 if (!af) 3066 return -EINVAL; 3067 3068 if (!af->addr_valid((union sctp_addr *)&prim.sspp_addr, sp, NULL)) 3069 return -EADDRNOTAVAIL; 3070 3071 if (!sctp_assoc_lookup_laddr(asoc, (union sctp_addr *)&prim.sspp_addr)) 3072 return -EADDRNOTAVAIL; 3073 3074 /* Create an ASCONF chunk with SET_PRIMARY parameter */ 3075 chunk = sctp_make_asconf_set_prim(asoc, 3076 (union sctp_addr *)&prim.sspp_addr); 3077 if (!chunk) 3078 return -ENOMEM; 3079 3080 err = sctp_send_asconf(asoc, chunk); 3081 3082 pr_debug("%s: we set peer primary addr primitively\n", __func__); 3083 3084 return err; 3085 } 3086 3087 static int sctp_setsockopt_adaptation_layer(struct sock *sk, char __user *optval, 3088 unsigned int optlen) 3089 { 3090 struct sctp_setadaptation adaptation; 3091 3092 if (optlen != sizeof(struct sctp_setadaptation)) 3093 return -EINVAL; 3094 if (copy_from_user(&adaptation, optval, optlen)) 3095 return -EFAULT; 3096 3097 sctp_sk(sk)->adaptation_ind = adaptation.ssb_adaptation_ind; 3098 3099 return 0; 3100 } 3101 3102 /* 3103 * 7.1.29. Set or Get the default context (SCTP_CONTEXT) 3104 * 3105 * The context field in the sctp_sndrcvinfo structure is normally only 3106 * used when a failed message is retrieved holding the value that was 3107 * sent down on the actual send call. This option allows the setting of 3108 * a default context on an association basis that will be received on 3109 * reading messages from the peer. This is especially helpful in the 3110 * one-2-many model for an application to keep some reference to an 3111 * internal state machine that is processing messages on the 3112 * association. Note that the setting of this value only effects 3113 * received messages from the peer and does not effect the value that is 3114 * saved with outbound messages. 3115 */ 3116 static int sctp_setsockopt_context(struct sock *sk, char __user *optval, 3117 unsigned int optlen) 3118 { 3119 struct sctp_assoc_value params; 3120 struct sctp_sock *sp; 3121 struct sctp_association *asoc; 3122 3123 if (optlen != sizeof(struct sctp_assoc_value)) 3124 return -EINVAL; 3125 if (copy_from_user(¶ms, optval, optlen)) 3126 return -EFAULT; 3127 3128 sp = sctp_sk(sk); 3129 3130 if (params.assoc_id != 0) { 3131 asoc = sctp_id2assoc(sk, params.assoc_id); 3132 if (!asoc) 3133 return -EINVAL; 3134 asoc->default_rcv_context = params.assoc_value; 3135 } else { 3136 sp->default_rcv_context = params.assoc_value; 3137 } 3138 3139 return 0; 3140 } 3141 3142 /* 3143 * 7.1.24. Get or set fragmented interleave (SCTP_FRAGMENT_INTERLEAVE) 3144 * 3145 * This options will at a minimum specify if the implementation is doing 3146 * fragmented interleave. Fragmented interleave, for a one to many 3147 * socket, is when subsequent calls to receive a message may return 3148 * parts of messages from different associations. Some implementations 3149 * may allow you to turn this value on or off. If so, when turned off, 3150 * no fragment interleave will occur (which will cause a head of line 3151 * blocking amongst multiple associations sharing the same one to many 3152 * socket). When this option is turned on, then each receive call may 3153 * come from a different association (thus the user must receive data 3154 * with the extended calls (e.g. sctp_recvmsg) to keep track of which 3155 * association each receive belongs to. 3156 * 3157 * This option takes a boolean value. A non-zero value indicates that 3158 * fragmented interleave is on. A value of zero indicates that 3159 * fragmented interleave is off. 3160 * 3161 * Note that it is important that an implementation that allows this 3162 * option to be turned on, have it off by default. Otherwise an unaware 3163 * application using the one to many model may become confused and act 3164 * incorrectly. 3165 */ 3166 static int sctp_setsockopt_fragment_interleave(struct sock *sk, 3167 char __user *optval, 3168 unsigned int optlen) 3169 { 3170 int val; 3171 3172 if (optlen != sizeof(int)) 3173 return -EINVAL; 3174 if (get_user(val, (int __user *)optval)) 3175 return -EFAULT; 3176 3177 sctp_sk(sk)->frag_interleave = (val == 0) ? 0 : 1; 3178 3179 return 0; 3180 } 3181 3182 /* 3183 * 8.1.21. Set or Get the SCTP Partial Delivery Point 3184 * (SCTP_PARTIAL_DELIVERY_POINT) 3185 * 3186 * This option will set or get the SCTP partial delivery point. This 3187 * point is the size of a message where the partial delivery API will be 3188 * invoked to help free up rwnd space for the peer. Setting this to a 3189 * lower value will cause partial deliveries to happen more often. The 3190 * calls argument is an integer that sets or gets the partial delivery 3191 * point. Note also that the call will fail if the user attempts to set 3192 * this value larger than the socket receive buffer size. 3193 * 3194 * Note that any single message having a length smaller than or equal to 3195 * the SCTP partial delivery point will be delivered in one single read 3196 * call as long as the user provided buffer is large enough to hold the 3197 * message. 3198 */ 3199 static int sctp_setsockopt_partial_delivery_point(struct sock *sk, 3200 char __user *optval, 3201 unsigned int optlen) 3202 { 3203 u32 val; 3204 3205 if (optlen != sizeof(u32)) 3206 return -EINVAL; 3207 if (get_user(val, (int __user *)optval)) 3208 return -EFAULT; 3209 3210 /* Note: We double the receive buffer from what the user sets 3211 * it to be, also initial rwnd is based on rcvbuf/2. 3212 */ 3213 if (val > (sk->sk_rcvbuf >> 1)) 3214 return -EINVAL; 3215 3216 sctp_sk(sk)->pd_point = val; 3217 3218 return 0; /* is this the right error code? */ 3219 } 3220 3221 /* 3222 * 7.1.28. Set or Get the maximum burst (SCTP_MAX_BURST) 3223 * 3224 * This option will allow a user to change the maximum burst of packets 3225 * that can be emitted by this association. Note that the default value 3226 * is 4, and some implementations may restrict this setting so that it 3227 * can only be lowered. 3228 * 3229 * NOTE: This text doesn't seem right. Do this on a socket basis with 3230 * future associations inheriting the socket value. 3231 */ 3232 static int sctp_setsockopt_maxburst(struct sock *sk, 3233 char __user *optval, 3234 unsigned int optlen) 3235 { 3236 struct sctp_assoc_value params; 3237 struct sctp_sock *sp; 3238 struct sctp_association *asoc; 3239 int val; 3240 int assoc_id = 0; 3241 3242 if (optlen == sizeof(int)) { 3243 pr_warn("Use of int in max_burst socket option deprecated\n"); 3244 pr_warn("Use struct sctp_assoc_value instead\n"); 3245 if (copy_from_user(&val, optval, optlen)) 3246 return -EFAULT; 3247 } else if (optlen == sizeof(struct sctp_assoc_value)) { 3248 if (copy_from_user(¶ms, optval, optlen)) 3249 return -EFAULT; 3250 val = params.assoc_value; 3251 assoc_id = params.assoc_id; 3252 } else 3253 return -EINVAL; 3254 3255 sp = sctp_sk(sk); 3256 3257 if (assoc_id != 0) { 3258 asoc = sctp_id2assoc(sk, assoc_id); 3259 if (!asoc) 3260 return -EINVAL; 3261 asoc->max_burst = val; 3262 } else 3263 sp->max_burst = val; 3264 3265 return 0; 3266 } 3267 3268 /* 3269 * 7.1.18. Add a chunk that must be authenticated (SCTP_AUTH_CHUNK) 3270 * 3271 * This set option adds a chunk type that the user is requesting to be 3272 * received only in an authenticated way. Changes to the list of chunks 3273 * will only effect future associations on the socket. 3274 */ 3275 static int sctp_setsockopt_auth_chunk(struct sock *sk, 3276 char __user *optval, 3277 unsigned int optlen) 3278 { 3279 struct net *net = sock_net(sk); 3280 struct sctp_authchunk val; 3281 3282 if (!net->sctp.auth_enable) 3283 return -EACCES; 3284 3285 if (optlen != sizeof(struct sctp_authchunk)) 3286 return -EINVAL; 3287 if (copy_from_user(&val, optval, optlen)) 3288 return -EFAULT; 3289 3290 switch (val.sauth_chunk) { 3291 case SCTP_CID_INIT: 3292 case SCTP_CID_INIT_ACK: 3293 case SCTP_CID_SHUTDOWN_COMPLETE: 3294 case SCTP_CID_AUTH: 3295 return -EINVAL; 3296 } 3297 3298 /* add this chunk id to the endpoint */ 3299 return sctp_auth_ep_add_chunkid(sctp_sk(sk)->ep, val.sauth_chunk); 3300 } 3301 3302 /* 3303 * 7.1.19. Get or set the list of supported HMAC Identifiers (SCTP_HMAC_IDENT) 3304 * 3305 * This option gets or sets the list of HMAC algorithms that the local 3306 * endpoint requires the peer to use. 3307 */ 3308 static int sctp_setsockopt_hmac_ident(struct sock *sk, 3309 char __user *optval, 3310 unsigned int optlen) 3311 { 3312 struct net *net = sock_net(sk); 3313 struct sctp_hmacalgo *hmacs; 3314 u32 idents; 3315 int err; 3316 3317 if (!net->sctp.auth_enable) 3318 return -EACCES; 3319 3320 if (optlen < sizeof(struct sctp_hmacalgo)) 3321 return -EINVAL; 3322 3323 hmacs= memdup_user(optval, optlen); 3324 if (IS_ERR(hmacs)) 3325 return PTR_ERR(hmacs); 3326 3327 idents = hmacs->shmac_num_idents; 3328 if (idents == 0 || idents > SCTP_AUTH_NUM_HMACS || 3329 (idents * sizeof(u16)) > (optlen - sizeof(struct sctp_hmacalgo))) { 3330 err = -EINVAL; 3331 goto out; 3332 } 3333 3334 err = sctp_auth_ep_set_hmacs(sctp_sk(sk)->ep, hmacs); 3335 out: 3336 kfree(hmacs); 3337 return err; 3338 } 3339 3340 /* 3341 * 7.1.20. Set a shared key (SCTP_AUTH_KEY) 3342 * 3343 * This option will set a shared secret key which is used to build an 3344 * association shared key. 3345 */ 3346 static int sctp_setsockopt_auth_key(struct sock *sk, 3347 char __user *optval, 3348 unsigned int optlen) 3349 { 3350 struct net *net = sock_net(sk); 3351 struct sctp_authkey *authkey; 3352 struct sctp_association *asoc; 3353 int ret; 3354 3355 if (!net->sctp.auth_enable) 3356 return -EACCES; 3357 3358 if (optlen <= sizeof(struct sctp_authkey)) 3359 return -EINVAL; 3360 3361 authkey= memdup_user(optval, optlen); 3362 if (IS_ERR(authkey)) 3363 return PTR_ERR(authkey); 3364 3365 if (authkey->sca_keylength > optlen - sizeof(struct sctp_authkey)) { 3366 ret = -EINVAL; 3367 goto out; 3368 } 3369 3370 asoc = sctp_id2assoc(sk, authkey->sca_assoc_id); 3371 if (!asoc && authkey->sca_assoc_id && sctp_style(sk, UDP)) { 3372 ret = -EINVAL; 3373 goto out; 3374 } 3375 3376 ret = sctp_auth_set_key(sctp_sk(sk)->ep, asoc, authkey); 3377 out: 3378 kzfree(authkey); 3379 return ret; 3380 } 3381 3382 /* 3383 * 7.1.21. Get or set the active shared key (SCTP_AUTH_ACTIVE_KEY) 3384 * 3385 * This option will get or set the active shared key to be used to build 3386 * the association shared key. 3387 */ 3388 static int sctp_setsockopt_active_key(struct sock *sk, 3389 char __user *optval, 3390 unsigned int optlen) 3391 { 3392 struct net *net = sock_net(sk); 3393 struct sctp_authkeyid val; 3394 struct sctp_association *asoc; 3395 3396 if (!net->sctp.auth_enable) 3397 return -EACCES; 3398 3399 if (optlen != sizeof(struct sctp_authkeyid)) 3400 return -EINVAL; 3401 if (copy_from_user(&val, optval, optlen)) 3402 return -EFAULT; 3403 3404 asoc = sctp_id2assoc(sk, val.scact_assoc_id); 3405 if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP)) 3406 return -EINVAL; 3407 3408 return sctp_auth_set_active_key(sctp_sk(sk)->ep, asoc, 3409 val.scact_keynumber); 3410 } 3411 3412 /* 3413 * 7.1.22. Delete a shared key (SCTP_AUTH_DELETE_KEY) 3414 * 3415 * This set option will delete a shared secret key from use. 3416 */ 3417 static int sctp_setsockopt_del_key(struct sock *sk, 3418 char __user *optval, 3419 unsigned int optlen) 3420 { 3421 struct net *net = sock_net(sk); 3422 struct sctp_authkeyid val; 3423 struct sctp_association *asoc; 3424 3425 if (!net->sctp.auth_enable) 3426 return -EACCES; 3427 3428 if (optlen != sizeof(struct sctp_authkeyid)) 3429 return -EINVAL; 3430 if (copy_from_user(&val, optval, optlen)) 3431 return -EFAULT; 3432 3433 asoc = sctp_id2assoc(sk, val.scact_assoc_id); 3434 if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP)) 3435 return -EINVAL; 3436 3437 return sctp_auth_del_key_id(sctp_sk(sk)->ep, asoc, 3438 val.scact_keynumber); 3439 3440 } 3441 3442 /* 3443 * 8.1.23 SCTP_AUTO_ASCONF 3444 * 3445 * This option will enable or disable the use of the automatic generation of 3446 * ASCONF chunks to add and delete addresses to an existing association. Note 3447 * that this option has two caveats namely: a) it only affects sockets that 3448 * are bound to all addresses available to the SCTP stack, and b) the system 3449 * administrator may have an overriding control that turns the ASCONF feature 3450 * off no matter what setting the socket option may have. 3451 * This option expects an integer boolean flag, where a non-zero value turns on 3452 * the option, and a zero value turns off the option. 3453 * Note. In this implementation, socket operation overrides default parameter 3454 * being set by sysctl as well as FreeBSD implementation 3455 */ 3456 static int sctp_setsockopt_auto_asconf(struct sock *sk, char __user *optval, 3457 unsigned int optlen) 3458 { 3459 int val; 3460 struct sctp_sock *sp = sctp_sk(sk); 3461 3462 if (optlen < sizeof(int)) 3463 return -EINVAL; 3464 if (get_user(val, (int __user *)optval)) 3465 return -EFAULT; 3466 if (!sctp_is_ep_boundall(sk) && val) 3467 return -EINVAL; 3468 if ((val && sp->do_auto_asconf) || (!val && !sp->do_auto_asconf)) 3469 return 0; 3470 3471 if (val == 0 && sp->do_auto_asconf) { 3472 list_del(&sp->auto_asconf_list); 3473 sp->do_auto_asconf = 0; 3474 } else if (val && !sp->do_auto_asconf) { 3475 list_add_tail(&sp->auto_asconf_list, 3476 &sock_net(sk)->sctp.auto_asconf_splist); 3477 sp->do_auto_asconf = 1; 3478 } 3479 return 0; 3480 } 3481 3482 3483 /* 3484 * SCTP_PEER_ADDR_THLDS 3485 * 3486 * This option allows us to alter the partially failed threshold for one or all 3487 * transports in an association. See Section 6.1 of: 3488 * http://www.ietf.org/id/draft-nishida-tsvwg-sctp-failover-05.txt 3489 */ 3490 static int sctp_setsockopt_paddr_thresholds(struct sock *sk, 3491 char __user *optval, 3492 unsigned int optlen) 3493 { 3494 struct sctp_paddrthlds val; 3495 struct sctp_transport *trans; 3496 struct sctp_association *asoc; 3497 3498 if (optlen < sizeof(struct sctp_paddrthlds)) 3499 return -EINVAL; 3500 if (copy_from_user(&val, (struct sctp_paddrthlds __user *)optval, 3501 sizeof(struct sctp_paddrthlds))) 3502 return -EFAULT; 3503 3504 3505 if (sctp_is_any(sk, (const union sctp_addr *)&val.spt_address)) { 3506 asoc = sctp_id2assoc(sk, val.spt_assoc_id); 3507 if (!asoc) 3508 return -ENOENT; 3509 list_for_each_entry(trans, &asoc->peer.transport_addr_list, 3510 transports) { 3511 if (val.spt_pathmaxrxt) 3512 trans->pathmaxrxt = val.spt_pathmaxrxt; 3513 trans->pf_retrans = val.spt_pathpfthld; 3514 } 3515 3516 if (val.spt_pathmaxrxt) 3517 asoc->pathmaxrxt = val.spt_pathmaxrxt; 3518 asoc->pf_retrans = val.spt_pathpfthld; 3519 } else { 3520 trans = sctp_addr_id2transport(sk, &val.spt_address, 3521 val.spt_assoc_id); 3522 if (!trans) 3523 return -ENOENT; 3524 3525 if (val.spt_pathmaxrxt) 3526 trans->pathmaxrxt = val.spt_pathmaxrxt; 3527 trans->pf_retrans = val.spt_pathpfthld; 3528 } 3529 3530 return 0; 3531 } 3532 3533 /* API 6.2 setsockopt(), getsockopt() 3534 * 3535 * Applications use setsockopt() and getsockopt() to set or retrieve 3536 * socket options. Socket options are used to change the default 3537 * behavior of sockets calls. They are described in Section 7. 3538 * 3539 * The syntax is: 3540 * 3541 * ret = getsockopt(int sd, int level, int optname, void __user *optval, 3542 * int __user *optlen); 3543 * ret = setsockopt(int sd, int level, int optname, const void __user *optval, 3544 * int optlen); 3545 * 3546 * sd - the socket descript. 3547 * level - set to IPPROTO_SCTP for all SCTP options. 3548 * optname - the option name. 3549 * optval - the buffer to store the value of the option. 3550 * optlen - the size of the buffer. 3551 */ 3552 static int sctp_setsockopt(struct sock *sk, int level, int optname, 3553 char __user *optval, unsigned int optlen) 3554 { 3555 int retval = 0; 3556 3557 pr_debug("%s: sk:%p, optname:%d\n", __func__, sk, optname); 3558 3559 /* I can hardly begin to describe how wrong this is. This is 3560 * so broken as to be worse than useless. The API draft 3561 * REALLY is NOT helpful here... I am not convinced that the 3562 * semantics of setsockopt() with a level OTHER THAN SOL_SCTP 3563 * are at all well-founded. 3564 */ 3565 if (level != SOL_SCTP) { 3566 struct sctp_af *af = sctp_sk(sk)->pf->af; 3567 retval = af->setsockopt(sk, level, optname, optval, optlen); 3568 goto out_nounlock; 3569 } 3570 3571 sctp_lock_sock(sk); 3572 3573 switch (optname) { 3574 case SCTP_SOCKOPT_BINDX_ADD: 3575 /* 'optlen' is the size of the addresses buffer. */ 3576 retval = sctp_setsockopt_bindx(sk, (struct sockaddr __user *)optval, 3577 optlen, SCTP_BINDX_ADD_ADDR); 3578 break; 3579 3580 case SCTP_SOCKOPT_BINDX_REM: 3581 /* 'optlen' is the size of the addresses buffer. */ 3582 retval = sctp_setsockopt_bindx(sk, (struct sockaddr __user *)optval, 3583 optlen, SCTP_BINDX_REM_ADDR); 3584 break; 3585 3586 case SCTP_SOCKOPT_CONNECTX_OLD: 3587 /* 'optlen' is the size of the addresses buffer. */ 3588 retval = sctp_setsockopt_connectx_old(sk, 3589 (struct sockaddr __user *)optval, 3590 optlen); 3591 break; 3592 3593 case SCTP_SOCKOPT_CONNECTX: 3594 /* 'optlen' is the size of the addresses buffer. */ 3595 retval = sctp_setsockopt_connectx(sk, 3596 (struct sockaddr __user *)optval, 3597 optlen); 3598 break; 3599 3600 case SCTP_DISABLE_FRAGMENTS: 3601 retval = sctp_setsockopt_disable_fragments(sk, optval, optlen); 3602 break; 3603 3604 case SCTP_EVENTS: 3605 retval = sctp_setsockopt_events(sk, optval, optlen); 3606 break; 3607 3608 case SCTP_AUTOCLOSE: 3609 retval = sctp_setsockopt_autoclose(sk, optval, optlen); 3610 break; 3611 3612 case SCTP_PEER_ADDR_PARAMS: 3613 retval = sctp_setsockopt_peer_addr_params(sk, optval, optlen); 3614 break; 3615 3616 case SCTP_DELAYED_SACK: 3617 retval = sctp_setsockopt_delayed_ack(sk, optval, optlen); 3618 break; 3619 case SCTP_PARTIAL_DELIVERY_POINT: 3620 retval = sctp_setsockopt_partial_delivery_point(sk, optval, optlen); 3621 break; 3622 3623 case SCTP_INITMSG: 3624 retval = sctp_setsockopt_initmsg(sk, optval, optlen); 3625 break; 3626 case SCTP_DEFAULT_SEND_PARAM: 3627 retval = sctp_setsockopt_default_send_param(sk, optval, 3628 optlen); 3629 break; 3630 case SCTP_PRIMARY_ADDR: 3631 retval = sctp_setsockopt_primary_addr(sk, optval, optlen); 3632 break; 3633 case SCTP_SET_PEER_PRIMARY_ADDR: 3634 retval = sctp_setsockopt_peer_primary_addr(sk, optval, optlen); 3635 break; 3636 case SCTP_NODELAY: 3637 retval = sctp_setsockopt_nodelay(sk, optval, optlen); 3638 break; 3639 case SCTP_RTOINFO: 3640 retval = sctp_setsockopt_rtoinfo(sk, optval, optlen); 3641 break; 3642 case SCTP_ASSOCINFO: 3643 retval = sctp_setsockopt_associnfo(sk, optval, optlen); 3644 break; 3645 case SCTP_I_WANT_MAPPED_V4_ADDR: 3646 retval = sctp_setsockopt_mappedv4(sk, optval, optlen); 3647 break; 3648 case SCTP_MAXSEG: 3649 retval = sctp_setsockopt_maxseg(sk, optval, optlen); 3650 break; 3651 case SCTP_ADAPTATION_LAYER: 3652 retval = sctp_setsockopt_adaptation_layer(sk, optval, optlen); 3653 break; 3654 case SCTP_CONTEXT: 3655 retval = sctp_setsockopt_context(sk, optval, optlen); 3656 break; 3657 case SCTP_FRAGMENT_INTERLEAVE: 3658 retval = sctp_setsockopt_fragment_interleave(sk, optval, optlen); 3659 break; 3660 case SCTP_MAX_BURST: 3661 retval = sctp_setsockopt_maxburst(sk, optval, optlen); 3662 break; 3663 case SCTP_AUTH_CHUNK: 3664 retval = sctp_setsockopt_auth_chunk(sk, optval, optlen); 3665 break; 3666 case SCTP_HMAC_IDENT: 3667 retval = sctp_setsockopt_hmac_ident(sk, optval, optlen); 3668 break; 3669 case SCTP_AUTH_KEY: 3670 retval = sctp_setsockopt_auth_key(sk, optval, optlen); 3671 break; 3672 case SCTP_AUTH_ACTIVE_KEY: 3673 retval = sctp_setsockopt_active_key(sk, optval, optlen); 3674 break; 3675 case SCTP_AUTH_DELETE_KEY: 3676 retval = sctp_setsockopt_del_key(sk, optval, optlen); 3677 break; 3678 case SCTP_AUTO_ASCONF: 3679 retval = sctp_setsockopt_auto_asconf(sk, optval, optlen); 3680 break; 3681 case SCTP_PEER_ADDR_THLDS: 3682 retval = sctp_setsockopt_paddr_thresholds(sk, optval, optlen); 3683 break; 3684 default: 3685 retval = -ENOPROTOOPT; 3686 break; 3687 } 3688 3689 sctp_release_sock(sk); 3690 3691 out_nounlock: 3692 return retval; 3693 } 3694 3695 /* API 3.1.6 connect() - UDP Style Syntax 3696 * 3697 * An application may use the connect() call in the UDP model to initiate an 3698 * association without sending data. 3699 * 3700 * The syntax is: 3701 * 3702 * ret = connect(int sd, const struct sockaddr *nam, socklen_t len); 3703 * 3704 * sd: the socket descriptor to have a new association added to. 3705 * 3706 * nam: the address structure (either struct sockaddr_in or struct 3707 * sockaddr_in6 defined in RFC2553 [7]). 3708 * 3709 * len: the size of the address. 3710 */ 3711 static int sctp_connect(struct sock *sk, struct sockaddr *addr, 3712 int addr_len) 3713 { 3714 int err = 0; 3715 struct sctp_af *af; 3716 3717 sctp_lock_sock(sk); 3718 3719 pr_debug("%s: sk:%p, sockaddr:%p, addr_len:%d\n", __func__, sk, 3720 addr, addr_len); 3721 3722 /* Validate addr_len before calling common connect/connectx routine. */ 3723 af = sctp_get_af_specific(addr->sa_family); 3724 if (!af || addr_len < af->sockaddr_len) { 3725 err = -EINVAL; 3726 } else { 3727 /* Pass correct addr len to common routine (so it knows there 3728 * is only one address being passed. 3729 */ 3730 err = __sctp_connect(sk, addr, af->sockaddr_len, NULL); 3731 } 3732 3733 sctp_release_sock(sk); 3734 return err; 3735 } 3736 3737 /* FIXME: Write comments. */ 3738 static int sctp_disconnect(struct sock *sk, int flags) 3739 { 3740 return -EOPNOTSUPP; /* STUB */ 3741 } 3742 3743 /* 4.1.4 accept() - TCP Style Syntax 3744 * 3745 * Applications use accept() call to remove an established SCTP 3746 * association from the accept queue of the endpoint. A new socket 3747 * descriptor will be returned from accept() to represent the newly 3748 * formed association. 3749 */ 3750 static struct sock *sctp_accept(struct sock *sk, int flags, int *err) 3751 { 3752 struct sctp_sock *sp; 3753 struct sctp_endpoint *ep; 3754 struct sock *newsk = NULL; 3755 struct sctp_association *asoc; 3756 long timeo; 3757 int error = 0; 3758 3759 sctp_lock_sock(sk); 3760 3761 sp = sctp_sk(sk); 3762 ep = sp->ep; 3763 3764 if (!sctp_style(sk, TCP)) { 3765 error = -EOPNOTSUPP; 3766 goto out; 3767 } 3768 3769 if (!sctp_sstate(sk, LISTENING)) { 3770 error = -EINVAL; 3771 goto out; 3772 } 3773 3774 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); 3775 3776 error = sctp_wait_for_accept(sk, timeo); 3777 if (error) 3778 goto out; 3779 3780 /* We treat the list of associations on the endpoint as the accept 3781 * queue and pick the first association on the list. 3782 */ 3783 asoc = list_entry(ep->asocs.next, struct sctp_association, asocs); 3784 3785 newsk = sp->pf->create_accept_sk(sk, asoc); 3786 if (!newsk) { 3787 error = -ENOMEM; 3788 goto out; 3789 } 3790 3791 /* Populate the fields of the newsk from the oldsk and migrate the 3792 * asoc to the newsk. 3793 */ 3794 sctp_sock_migrate(sk, newsk, asoc, SCTP_SOCKET_TCP); 3795 3796 out: 3797 sctp_release_sock(sk); 3798 *err = error; 3799 return newsk; 3800 } 3801 3802 /* The SCTP ioctl handler. */ 3803 static int sctp_ioctl(struct sock *sk, int cmd, unsigned long arg) 3804 { 3805 int rc = -ENOTCONN; 3806 3807 sctp_lock_sock(sk); 3808 3809 /* 3810 * SEQPACKET-style sockets in LISTENING state are valid, for 3811 * SCTP, so only discard TCP-style sockets in LISTENING state. 3812 */ 3813 if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)) 3814 goto out; 3815 3816 switch (cmd) { 3817 case SIOCINQ: { 3818 struct sk_buff *skb; 3819 unsigned int amount = 0; 3820 3821 skb = skb_peek(&sk->sk_receive_queue); 3822 if (skb != NULL) { 3823 /* 3824 * We will only return the amount of this packet since 3825 * that is all that will be read. 3826 */ 3827 amount = skb->len; 3828 } 3829 rc = put_user(amount, (int __user *)arg); 3830 break; 3831 } 3832 default: 3833 rc = -ENOIOCTLCMD; 3834 break; 3835 } 3836 out: 3837 sctp_release_sock(sk); 3838 return rc; 3839 } 3840 3841 /* This is the function which gets called during socket creation to 3842 * initialized the SCTP-specific portion of the sock. 3843 * The sock structure should already be zero-filled memory. 3844 */ 3845 static int sctp_init_sock(struct sock *sk) 3846 { 3847 struct net *net = sock_net(sk); 3848 struct sctp_sock *sp; 3849 3850 pr_debug("%s: sk:%p\n", __func__, sk); 3851 3852 sp = sctp_sk(sk); 3853 3854 /* Initialize the SCTP per socket area. */ 3855 switch (sk->sk_type) { 3856 case SOCK_SEQPACKET: 3857 sp->type = SCTP_SOCKET_UDP; 3858 break; 3859 case SOCK_STREAM: 3860 sp->type = SCTP_SOCKET_TCP; 3861 break; 3862 default: 3863 return -ESOCKTNOSUPPORT; 3864 } 3865 3866 /* Initialize default send parameters. These parameters can be 3867 * modified with the SCTP_DEFAULT_SEND_PARAM socket option. 3868 */ 3869 sp->default_stream = 0; 3870 sp->default_ppid = 0; 3871 sp->default_flags = 0; 3872 sp->default_context = 0; 3873 sp->default_timetolive = 0; 3874 3875 sp->default_rcv_context = 0; 3876 sp->max_burst = net->sctp.max_burst; 3877 3878 sp->sctp_hmac_alg = net->sctp.sctp_hmac_alg; 3879 3880 /* Initialize default setup parameters. These parameters 3881 * can be modified with the SCTP_INITMSG socket option or 3882 * overridden by the SCTP_INIT CMSG. 3883 */ 3884 sp->initmsg.sinit_num_ostreams = sctp_max_outstreams; 3885 sp->initmsg.sinit_max_instreams = sctp_max_instreams; 3886 sp->initmsg.sinit_max_attempts = net->sctp.max_retrans_init; 3887 sp->initmsg.sinit_max_init_timeo = net->sctp.rto_max; 3888 3889 /* Initialize default RTO related parameters. These parameters can 3890 * be modified for with the SCTP_RTOINFO socket option. 3891 */ 3892 sp->rtoinfo.srto_initial = net->sctp.rto_initial; 3893 sp->rtoinfo.srto_max = net->sctp.rto_max; 3894 sp->rtoinfo.srto_min = net->sctp.rto_min; 3895 3896 /* Initialize default association related parameters. These parameters 3897 * can be modified with the SCTP_ASSOCINFO socket option. 3898 */ 3899 sp->assocparams.sasoc_asocmaxrxt = net->sctp.max_retrans_association; 3900 sp->assocparams.sasoc_number_peer_destinations = 0; 3901 sp->assocparams.sasoc_peer_rwnd = 0; 3902 sp->assocparams.sasoc_local_rwnd = 0; 3903 sp->assocparams.sasoc_cookie_life = net->sctp.valid_cookie_life; 3904 3905 /* Initialize default event subscriptions. By default, all the 3906 * options are off. 3907 */ 3908 memset(&sp->subscribe, 0, sizeof(struct sctp_event_subscribe)); 3909 3910 /* Default Peer Address Parameters. These defaults can 3911 * be modified via SCTP_PEER_ADDR_PARAMS 3912 */ 3913 sp->hbinterval = net->sctp.hb_interval; 3914 sp->pathmaxrxt = net->sctp.max_retrans_path; 3915 sp->pathmtu = 0; // allow default discovery 3916 sp->sackdelay = net->sctp.sack_timeout; 3917 sp->sackfreq = 2; 3918 sp->param_flags = SPP_HB_ENABLE | 3919 SPP_PMTUD_ENABLE | 3920 SPP_SACKDELAY_ENABLE; 3921 3922 /* If enabled no SCTP message fragmentation will be performed. 3923 * Configure through SCTP_DISABLE_FRAGMENTS socket option. 3924 */ 3925 sp->disable_fragments = 0; 3926 3927 /* Enable Nagle algorithm by default. */ 3928 sp->nodelay = 0; 3929 3930 /* Enable by default. */ 3931 sp->v4mapped = 1; 3932 3933 /* Auto-close idle associations after the configured 3934 * number of seconds. A value of 0 disables this 3935 * feature. Configure through the SCTP_AUTOCLOSE socket option, 3936 * for UDP-style sockets only. 3937 */ 3938 sp->autoclose = 0; 3939 3940 /* User specified fragmentation limit. */ 3941 sp->user_frag = 0; 3942 3943 sp->adaptation_ind = 0; 3944 3945 sp->pf = sctp_get_pf_specific(sk->sk_family); 3946 3947 /* Control variables for partial data delivery. */ 3948 atomic_set(&sp->pd_mode, 0); 3949 skb_queue_head_init(&sp->pd_lobby); 3950 sp->frag_interleave = 0; 3951 3952 /* Create a per socket endpoint structure. Even if we 3953 * change the data structure relationships, this may still 3954 * be useful for storing pre-connect address information. 3955 */ 3956 sp->ep = sctp_endpoint_new(sk, GFP_KERNEL); 3957 if (!sp->ep) 3958 return -ENOMEM; 3959 3960 sp->hmac = NULL; 3961 3962 sk->sk_destruct = sctp_destruct_sock; 3963 3964 SCTP_DBG_OBJCNT_INC(sock); 3965 3966 local_bh_disable(); 3967 percpu_counter_inc(&sctp_sockets_allocated); 3968 sock_prot_inuse_add(net, sk->sk_prot, 1); 3969 if (net->sctp.default_auto_asconf) { 3970 list_add_tail(&sp->auto_asconf_list, 3971 &net->sctp.auto_asconf_splist); 3972 sp->do_auto_asconf = 1; 3973 } else 3974 sp->do_auto_asconf = 0; 3975 local_bh_enable(); 3976 3977 return 0; 3978 } 3979 3980 /* Cleanup any SCTP per socket resources. */ 3981 static void sctp_destroy_sock(struct sock *sk) 3982 { 3983 struct sctp_sock *sp; 3984 3985 pr_debug("%s: sk:%p\n", __func__, sk); 3986 3987 /* Release our hold on the endpoint. */ 3988 sp = sctp_sk(sk); 3989 /* This could happen during socket init, thus we bail out 3990 * early, since the rest of the below is not setup either. 3991 */ 3992 if (sp->ep == NULL) 3993 return; 3994 3995 if (sp->do_auto_asconf) { 3996 sp->do_auto_asconf = 0; 3997 list_del(&sp->auto_asconf_list); 3998 } 3999 sctp_endpoint_free(sp->ep); 4000 local_bh_disable(); 4001 percpu_counter_dec(&sctp_sockets_allocated); 4002 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); 4003 local_bh_enable(); 4004 } 4005 4006 /* Triggered when there are no references on the socket anymore */ 4007 static void sctp_destruct_sock(struct sock *sk) 4008 { 4009 struct sctp_sock *sp = sctp_sk(sk); 4010 4011 /* Free up the HMAC transform. */ 4012 crypto_free_hash(sp->hmac); 4013 4014 inet_sock_destruct(sk); 4015 } 4016 4017 /* API 4.1.7 shutdown() - TCP Style Syntax 4018 * int shutdown(int socket, int how); 4019 * 4020 * sd - the socket descriptor of the association to be closed. 4021 * how - Specifies the type of shutdown. The values are 4022 * as follows: 4023 * SHUT_RD 4024 * Disables further receive operations. No SCTP 4025 * protocol action is taken. 4026 * SHUT_WR 4027 * Disables further send operations, and initiates 4028 * the SCTP shutdown sequence. 4029 * SHUT_RDWR 4030 * Disables further send and receive operations 4031 * and initiates the SCTP shutdown sequence. 4032 */ 4033 static void sctp_shutdown(struct sock *sk, int how) 4034 { 4035 struct net *net = sock_net(sk); 4036 struct sctp_endpoint *ep; 4037 struct sctp_association *asoc; 4038 4039 if (!sctp_style(sk, TCP)) 4040 return; 4041 4042 if (how & SEND_SHUTDOWN) { 4043 ep = sctp_sk(sk)->ep; 4044 if (!list_empty(&ep->asocs)) { 4045 asoc = list_entry(ep->asocs.next, 4046 struct sctp_association, asocs); 4047 sctp_primitive_SHUTDOWN(net, asoc, NULL); 4048 } 4049 } 4050 } 4051 4052 /* 7.2.1 Association Status (SCTP_STATUS) 4053 4054 * Applications can retrieve current status information about an 4055 * association, including association state, peer receiver window size, 4056 * number of unacked data chunks, and number of data chunks pending 4057 * receipt. This information is read-only. 4058 */ 4059 static int sctp_getsockopt_sctp_status(struct sock *sk, int len, 4060 char __user *optval, 4061 int __user *optlen) 4062 { 4063 struct sctp_status status; 4064 struct sctp_association *asoc = NULL; 4065 struct sctp_transport *transport; 4066 sctp_assoc_t associd; 4067 int retval = 0; 4068 4069 if (len < sizeof(status)) { 4070 retval = -EINVAL; 4071 goto out; 4072 } 4073 4074 len = sizeof(status); 4075 if (copy_from_user(&status, optval, len)) { 4076 retval = -EFAULT; 4077 goto out; 4078 } 4079 4080 associd = status.sstat_assoc_id; 4081 asoc = sctp_id2assoc(sk, associd); 4082 if (!asoc) { 4083 retval = -EINVAL; 4084 goto out; 4085 } 4086 4087 transport = asoc->peer.primary_path; 4088 4089 status.sstat_assoc_id = sctp_assoc2id(asoc); 4090 status.sstat_state = asoc->state; 4091 status.sstat_rwnd = asoc->peer.rwnd; 4092 status.sstat_unackdata = asoc->unack_data; 4093 4094 status.sstat_penddata = sctp_tsnmap_pending(&asoc->peer.tsn_map); 4095 status.sstat_instrms = asoc->c.sinit_max_instreams; 4096 status.sstat_outstrms = asoc->c.sinit_num_ostreams; 4097 status.sstat_fragmentation_point = asoc->frag_point; 4098 status.sstat_primary.spinfo_assoc_id = sctp_assoc2id(transport->asoc); 4099 memcpy(&status.sstat_primary.spinfo_address, &transport->ipaddr, 4100 transport->af_specific->sockaddr_len); 4101 /* Map ipv4 address into v4-mapped-on-v6 address. */ 4102 sctp_get_pf_specific(sk->sk_family)->addr_v4map(sctp_sk(sk), 4103 (union sctp_addr *)&status.sstat_primary.spinfo_address); 4104 status.sstat_primary.spinfo_state = transport->state; 4105 status.sstat_primary.spinfo_cwnd = transport->cwnd; 4106 status.sstat_primary.spinfo_srtt = transport->srtt; 4107 status.sstat_primary.spinfo_rto = jiffies_to_msecs(transport->rto); 4108 status.sstat_primary.spinfo_mtu = transport->pathmtu; 4109 4110 if (status.sstat_primary.spinfo_state == SCTP_UNKNOWN) 4111 status.sstat_primary.spinfo_state = SCTP_ACTIVE; 4112 4113 if (put_user(len, optlen)) { 4114 retval = -EFAULT; 4115 goto out; 4116 } 4117 4118 pr_debug("%s: len:%d, state:%d, rwnd:%d, assoc_id:%d\n", 4119 __func__, len, status.sstat_state, status.sstat_rwnd, 4120 status.sstat_assoc_id); 4121 4122 if (copy_to_user(optval, &status, len)) { 4123 retval = -EFAULT; 4124 goto out; 4125 } 4126 4127 out: 4128 return retval; 4129 } 4130 4131 4132 /* 7.2.2 Peer Address Information (SCTP_GET_PEER_ADDR_INFO) 4133 * 4134 * Applications can retrieve information about a specific peer address 4135 * of an association, including its reachability state, congestion 4136 * window, and retransmission timer values. This information is 4137 * read-only. 4138 */ 4139 static int sctp_getsockopt_peer_addr_info(struct sock *sk, int len, 4140 char __user *optval, 4141 int __user *optlen) 4142 { 4143 struct sctp_paddrinfo pinfo; 4144 struct sctp_transport *transport; 4145 int retval = 0; 4146 4147 if (len < sizeof(pinfo)) { 4148 retval = -EINVAL; 4149 goto out; 4150 } 4151 4152 len = sizeof(pinfo); 4153 if (copy_from_user(&pinfo, optval, len)) { 4154 retval = -EFAULT; 4155 goto out; 4156 } 4157 4158 transport = sctp_addr_id2transport(sk, &pinfo.spinfo_address, 4159 pinfo.spinfo_assoc_id); 4160 if (!transport) 4161 return -EINVAL; 4162 4163 pinfo.spinfo_assoc_id = sctp_assoc2id(transport->asoc); 4164 pinfo.spinfo_state = transport->state; 4165 pinfo.spinfo_cwnd = transport->cwnd; 4166 pinfo.spinfo_srtt = transport->srtt; 4167 pinfo.spinfo_rto = jiffies_to_msecs(transport->rto); 4168 pinfo.spinfo_mtu = transport->pathmtu; 4169 4170 if (pinfo.spinfo_state == SCTP_UNKNOWN) 4171 pinfo.spinfo_state = SCTP_ACTIVE; 4172 4173 if (put_user(len, optlen)) { 4174 retval = -EFAULT; 4175 goto out; 4176 } 4177 4178 if (copy_to_user(optval, &pinfo, len)) { 4179 retval = -EFAULT; 4180 goto out; 4181 } 4182 4183 out: 4184 return retval; 4185 } 4186 4187 /* 7.1.12 Enable/Disable message fragmentation (SCTP_DISABLE_FRAGMENTS) 4188 * 4189 * This option is a on/off flag. If enabled no SCTP message 4190 * fragmentation will be performed. Instead if a message being sent 4191 * exceeds the current PMTU size, the message will NOT be sent and 4192 * instead a error will be indicated to the user. 4193 */ 4194 static int sctp_getsockopt_disable_fragments(struct sock *sk, int len, 4195 char __user *optval, int __user *optlen) 4196 { 4197 int val; 4198 4199 if (len < sizeof(int)) 4200 return -EINVAL; 4201 4202 len = sizeof(int); 4203 val = (sctp_sk(sk)->disable_fragments == 1); 4204 if (put_user(len, optlen)) 4205 return -EFAULT; 4206 if (copy_to_user(optval, &val, len)) 4207 return -EFAULT; 4208 return 0; 4209 } 4210 4211 /* 7.1.15 Set notification and ancillary events (SCTP_EVENTS) 4212 * 4213 * This socket option is used to specify various notifications and 4214 * ancillary data the user wishes to receive. 4215 */ 4216 static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval, 4217 int __user *optlen) 4218 { 4219 if (len <= 0) 4220 return -EINVAL; 4221 if (len > sizeof(struct sctp_event_subscribe)) 4222 len = sizeof(struct sctp_event_subscribe); 4223 if (put_user(len, optlen)) 4224 return -EFAULT; 4225 if (copy_to_user(optval, &sctp_sk(sk)->subscribe, len)) 4226 return -EFAULT; 4227 return 0; 4228 } 4229 4230 /* 7.1.8 Automatic Close of associations (SCTP_AUTOCLOSE) 4231 * 4232 * This socket option is applicable to the UDP-style socket only. When 4233 * set it will cause associations that are idle for more than the 4234 * specified number of seconds to automatically close. An association 4235 * being idle is defined an association that has NOT sent or received 4236 * user data. The special value of '0' indicates that no automatic 4237 * close of any associations should be performed. The option expects an 4238 * integer defining the number of seconds of idle time before an 4239 * association is closed. 4240 */ 4241 static int sctp_getsockopt_autoclose(struct sock *sk, int len, char __user *optval, int __user *optlen) 4242 { 4243 /* Applicable to UDP-style socket only */ 4244 if (sctp_style(sk, TCP)) 4245 return -EOPNOTSUPP; 4246 if (len < sizeof(int)) 4247 return -EINVAL; 4248 len = sizeof(int); 4249 if (put_user(len, optlen)) 4250 return -EFAULT; 4251 if (copy_to_user(optval, &sctp_sk(sk)->autoclose, sizeof(int))) 4252 return -EFAULT; 4253 return 0; 4254 } 4255 4256 /* Helper routine to branch off an association to a new socket. */ 4257 int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp) 4258 { 4259 struct sctp_association *asoc = sctp_id2assoc(sk, id); 4260 struct socket *sock; 4261 struct sctp_af *af; 4262 int err = 0; 4263 4264 if (!asoc) 4265 return -EINVAL; 4266 4267 /* An association cannot be branched off from an already peeled-off 4268 * socket, nor is this supported for tcp style sockets. 4269 */ 4270 if (!sctp_style(sk, UDP)) 4271 return -EINVAL; 4272 4273 /* Create a new socket. */ 4274 err = sock_create(sk->sk_family, SOCK_SEQPACKET, IPPROTO_SCTP, &sock); 4275 if (err < 0) 4276 return err; 4277 4278 sctp_copy_sock(sock->sk, sk, asoc); 4279 4280 /* Make peeled-off sockets more like 1-1 accepted sockets. 4281 * Set the daddr and initialize id to something more random 4282 */ 4283 af = sctp_get_af_specific(asoc->peer.primary_addr.sa.sa_family); 4284 af->to_sk_daddr(&asoc->peer.primary_addr, sk); 4285 4286 /* Populate the fields of the newsk from the oldsk and migrate the 4287 * asoc to the newsk. 4288 */ 4289 sctp_sock_migrate(sk, sock->sk, asoc, SCTP_SOCKET_UDP_HIGH_BANDWIDTH); 4290 4291 *sockp = sock; 4292 4293 return err; 4294 } 4295 EXPORT_SYMBOL(sctp_do_peeloff); 4296 4297 static int sctp_getsockopt_peeloff(struct sock *sk, int len, char __user *optval, int __user *optlen) 4298 { 4299 sctp_peeloff_arg_t peeloff; 4300 struct socket *newsock; 4301 struct file *newfile; 4302 int retval = 0; 4303 4304 if (len < sizeof(sctp_peeloff_arg_t)) 4305 return -EINVAL; 4306 len = sizeof(sctp_peeloff_arg_t); 4307 if (copy_from_user(&peeloff, optval, len)) 4308 return -EFAULT; 4309 4310 retval = sctp_do_peeloff(sk, peeloff.associd, &newsock); 4311 if (retval < 0) 4312 goto out; 4313 4314 /* Map the socket to an unused fd that can be returned to the user. */ 4315 retval = get_unused_fd_flags(0); 4316 if (retval < 0) { 4317 sock_release(newsock); 4318 goto out; 4319 } 4320 4321 newfile = sock_alloc_file(newsock, 0, NULL); 4322 if (unlikely(IS_ERR(newfile))) { 4323 put_unused_fd(retval); 4324 sock_release(newsock); 4325 return PTR_ERR(newfile); 4326 } 4327 4328 pr_debug("%s: sk:%p, newsk:%p, sd:%d\n", __func__, sk, newsock->sk, 4329 retval); 4330 4331 /* Return the fd mapped to the new socket. */ 4332 if (put_user(len, optlen)) { 4333 fput(newfile); 4334 put_unused_fd(retval); 4335 return -EFAULT; 4336 } 4337 peeloff.sd = retval; 4338 if (copy_to_user(optval, &peeloff, len)) { 4339 fput(newfile); 4340 put_unused_fd(retval); 4341 return -EFAULT; 4342 } 4343 fd_install(retval, newfile); 4344 out: 4345 return retval; 4346 } 4347 4348 /* 7.1.13 Peer Address Parameters (SCTP_PEER_ADDR_PARAMS) 4349 * 4350 * Applications can enable or disable heartbeats for any peer address of 4351 * an association, modify an address's heartbeat interval, force a 4352 * heartbeat to be sent immediately, and adjust the address's maximum 4353 * number of retransmissions sent before an address is considered 4354 * unreachable. The following structure is used to access and modify an 4355 * address's parameters: 4356 * 4357 * struct sctp_paddrparams { 4358 * sctp_assoc_t spp_assoc_id; 4359 * struct sockaddr_storage spp_address; 4360 * uint32_t spp_hbinterval; 4361 * uint16_t spp_pathmaxrxt; 4362 * uint32_t spp_pathmtu; 4363 * uint32_t spp_sackdelay; 4364 * uint32_t spp_flags; 4365 * }; 4366 * 4367 * spp_assoc_id - (one-to-many style socket) This is filled in the 4368 * application, and identifies the association for 4369 * this query. 4370 * spp_address - This specifies which address is of interest. 4371 * spp_hbinterval - This contains the value of the heartbeat interval, 4372 * in milliseconds. If a value of zero 4373 * is present in this field then no changes are to 4374 * be made to this parameter. 4375 * spp_pathmaxrxt - This contains the maximum number of 4376 * retransmissions before this address shall be 4377 * considered unreachable. If a value of zero 4378 * is present in this field then no changes are to 4379 * be made to this parameter. 4380 * spp_pathmtu - When Path MTU discovery is disabled the value 4381 * specified here will be the "fixed" path mtu. 4382 * Note that if the spp_address field is empty 4383 * then all associations on this address will 4384 * have this fixed path mtu set upon them. 4385 * 4386 * spp_sackdelay - When delayed sack is enabled, this value specifies 4387 * the number of milliseconds that sacks will be delayed 4388 * for. This value will apply to all addresses of an 4389 * association if the spp_address field is empty. Note 4390 * also, that if delayed sack is enabled and this 4391 * value is set to 0, no change is made to the last 4392 * recorded delayed sack timer value. 4393 * 4394 * spp_flags - These flags are used to control various features 4395 * on an association. The flag field may contain 4396 * zero or more of the following options. 4397 * 4398 * SPP_HB_ENABLE - Enable heartbeats on the 4399 * specified address. Note that if the address 4400 * field is empty all addresses for the association 4401 * have heartbeats enabled upon them. 4402 * 4403 * SPP_HB_DISABLE - Disable heartbeats on the 4404 * speicifed address. Note that if the address 4405 * field is empty all addresses for the association 4406 * will have their heartbeats disabled. Note also 4407 * that SPP_HB_ENABLE and SPP_HB_DISABLE are 4408 * mutually exclusive, only one of these two should 4409 * be specified. Enabling both fields will have 4410 * undetermined results. 4411 * 4412 * SPP_HB_DEMAND - Request a user initiated heartbeat 4413 * to be made immediately. 4414 * 4415 * SPP_PMTUD_ENABLE - This field will enable PMTU 4416 * discovery upon the specified address. Note that 4417 * if the address feild is empty then all addresses 4418 * on the association are effected. 4419 * 4420 * SPP_PMTUD_DISABLE - This field will disable PMTU 4421 * discovery upon the specified address. Note that 4422 * if the address feild is empty then all addresses 4423 * on the association are effected. Not also that 4424 * SPP_PMTUD_ENABLE and SPP_PMTUD_DISABLE are mutually 4425 * exclusive. Enabling both will have undetermined 4426 * results. 4427 * 4428 * SPP_SACKDELAY_ENABLE - Setting this flag turns 4429 * on delayed sack. The time specified in spp_sackdelay 4430 * is used to specify the sack delay for this address. Note 4431 * that if spp_address is empty then all addresses will 4432 * enable delayed sack and take on the sack delay 4433 * value specified in spp_sackdelay. 4434 * SPP_SACKDELAY_DISABLE - Setting this flag turns 4435 * off delayed sack. If the spp_address field is blank then 4436 * delayed sack is disabled for the entire association. Note 4437 * also that this field is mutually exclusive to 4438 * SPP_SACKDELAY_ENABLE, setting both will have undefined 4439 * results. 4440 */ 4441 static int sctp_getsockopt_peer_addr_params(struct sock *sk, int len, 4442 char __user *optval, int __user *optlen) 4443 { 4444 struct sctp_paddrparams params; 4445 struct sctp_transport *trans = NULL; 4446 struct sctp_association *asoc = NULL; 4447 struct sctp_sock *sp = sctp_sk(sk); 4448 4449 if (len < sizeof(struct sctp_paddrparams)) 4450 return -EINVAL; 4451 len = sizeof(struct sctp_paddrparams); 4452 if (copy_from_user(¶ms, optval, len)) 4453 return -EFAULT; 4454 4455 /* If an address other than INADDR_ANY is specified, and 4456 * no transport is found, then the request is invalid. 4457 */ 4458 if (!sctp_is_any(sk, ( union sctp_addr *)¶ms.spp_address)) { 4459 trans = sctp_addr_id2transport(sk, ¶ms.spp_address, 4460 params.spp_assoc_id); 4461 if (!trans) { 4462 pr_debug("%s: failed no transport\n", __func__); 4463 return -EINVAL; 4464 } 4465 } 4466 4467 /* Get association, if assoc_id != 0 and the socket is a one 4468 * to many style socket, and an association was not found, then 4469 * the id was invalid. 4470 */ 4471 asoc = sctp_id2assoc(sk, params.spp_assoc_id); 4472 if (!asoc && params.spp_assoc_id && sctp_style(sk, UDP)) { 4473 pr_debug("%s: failed no association\n", __func__); 4474 return -EINVAL; 4475 } 4476 4477 if (trans) { 4478 /* Fetch transport values. */ 4479 params.spp_hbinterval = jiffies_to_msecs(trans->hbinterval); 4480 params.spp_pathmtu = trans->pathmtu; 4481 params.spp_pathmaxrxt = trans->pathmaxrxt; 4482 params.spp_sackdelay = jiffies_to_msecs(trans->sackdelay); 4483 4484 /*draft-11 doesn't say what to return in spp_flags*/ 4485 params.spp_flags = trans->param_flags; 4486 } else if (asoc) { 4487 /* Fetch association values. */ 4488 params.spp_hbinterval = jiffies_to_msecs(asoc->hbinterval); 4489 params.spp_pathmtu = asoc->pathmtu; 4490 params.spp_pathmaxrxt = asoc->pathmaxrxt; 4491 params.spp_sackdelay = jiffies_to_msecs(asoc->sackdelay); 4492 4493 /*draft-11 doesn't say what to return in spp_flags*/ 4494 params.spp_flags = asoc->param_flags; 4495 } else { 4496 /* Fetch socket values. */ 4497 params.spp_hbinterval = sp->hbinterval; 4498 params.spp_pathmtu = sp->pathmtu; 4499 params.spp_sackdelay = sp->sackdelay; 4500 params.spp_pathmaxrxt = sp->pathmaxrxt; 4501 4502 /*draft-11 doesn't say what to return in spp_flags*/ 4503 params.spp_flags = sp->param_flags; 4504 } 4505 4506 if (copy_to_user(optval, ¶ms, len)) 4507 return -EFAULT; 4508 4509 if (put_user(len, optlen)) 4510 return -EFAULT; 4511 4512 return 0; 4513 } 4514 4515 /* 4516 * 7.1.23. Get or set delayed ack timer (SCTP_DELAYED_SACK) 4517 * 4518 * This option will effect the way delayed acks are performed. This 4519 * option allows you to get or set the delayed ack time, in 4520 * milliseconds. It also allows changing the delayed ack frequency. 4521 * Changing the frequency to 1 disables the delayed sack algorithm. If 4522 * the assoc_id is 0, then this sets or gets the endpoints default 4523 * values. If the assoc_id field is non-zero, then the set or get 4524 * effects the specified association for the one to many model (the 4525 * assoc_id field is ignored by the one to one model). Note that if 4526 * sack_delay or sack_freq are 0 when setting this option, then the 4527 * current values will remain unchanged. 4528 * 4529 * struct sctp_sack_info { 4530 * sctp_assoc_t sack_assoc_id; 4531 * uint32_t sack_delay; 4532 * uint32_t sack_freq; 4533 * }; 4534 * 4535 * sack_assoc_id - This parameter, indicates which association the user 4536 * is performing an action upon. Note that if this field's value is 4537 * zero then the endpoints default value is changed (effecting future 4538 * associations only). 4539 * 4540 * sack_delay - This parameter contains the number of milliseconds that 4541 * the user is requesting the delayed ACK timer be set to. Note that 4542 * this value is defined in the standard to be between 200 and 500 4543 * milliseconds. 4544 * 4545 * sack_freq - This parameter contains the number of packets that must 4546 * be received before a sack is sent without waiting for the delay 4547 * timer to expire. The default value for this is 2, setting this 4548 * value to 1 will disable the delayed sack algorithm. 4549 */ 4550 static int sctp_getsockopt_delayed_ack(struct sock *sk, int len, 4551 char __user *optval, 4552 int __user *optlen) 4553 { 4554 struct sctp_sack_info params; 4555 struct sctp_association *asoc = NULL; 4556 struct sctp_sock *sp = sctp_sk(sk); 4557 4558 if (len >= sizeof(struct sctp_sack_info)) { 4559 len = sizeof(struct sctp_sack_info); 4560 4561 if (copy_from_user(¶ms, optval, len)) 4562 return -EFAULT; 4563 } else if (len == sizeof(struct sctp_assoc_value)) { 4564 pr_warn("Use of struct sctp_assoc_value in delayed_ack socket option deprecated\n"); 4565 pr_warn("Use struct sctp_sack_info instead\n"); 4566 if (copy_from_user(¶ms, optval, len)) 4567 return -EFAULT; 4568 } else 4569 return - EINVAL; 4570 4571 /* Get association, if sack_assoc_id != 0 and the socket is a one 4572 * to many style socket, and an association was not found, then 4573 * the id was invalid. 4574 */ 4575 asoc = sctp_id2assoc(sk, params.sack_assoc_id); 4576 if (!asoc && params.sack_assoc_id && sctp_style(sk, UDP)) 4577 return -EINVAL; 4578 4579 if (asoc) { 4580 /* Fetch association values. */ 4581 if (asoc->param_flags & SPP_SACKDELAY_ENABLE) { 4582 params.sack_delay = jiffies_to_msecs( 4583 asoc->sackdelay); 4584 params.sack_freq = asoc->sackfreq; 4585 4586 } else { 4587 params.sack_delay = 0; 4588 params.sack_freq = 1; 4589 } 4590 } else { 4591 /* Fetch socket values. */ 4592 if (sp->param_flags & SPP_SACKDELAY_ENABLE) { 4593 params.sack_delay = sp->sackdelay; 4594 params.sack_freq = sp->sackfreq; 4595 } else { 4596 params.sack_delay = 0; 4597 params.sack_freq = 1; 4598 } 4599 } 4600 4601 if (copy_to_user(optval, ¶ms, len)) 4602 return -EFAULT; 4603 4604 if (put_user(len, optlen)) 4605 return -EFAULT; 4606 4607 return 0; 4608 } 4609 4610 /* 7.1.3 Initialization Parameters (SCTP_INITMSG) 4611 * 4612 * Applications can specify protocol parameters for the default association 4613 * initialization. The option name argument to setsockopt() and getsockopt() 4614 * is SCTP_INITMSG. 4615 * 4616 * Setting initialization parameters is effective only on an unconnected 4617 * socket (for UDP-style sockets only future associations are effected 4618 * by the change). With TCP-style sockets, this option is inherited by 4619 * sockets derived from a listener socket. 4620 */ 4621 static int sctp_getsockopt_initmsg(struct sock *sk, int len, char __user *optval, int __user *optlen) 4622 { 4623 if (len < sizeof(struct sctp_initmsg)) 4624 return -EINVAL; 4625 len = sizeof(struct sctp_initmsg); 4626 if (put_user(len, optlen)) 4627 return -EFAULT; 4628 if (copy_to_user(optval, &sctp_sk(sk)->initmsg, len)) 4629 return -EFAULT; 4630 return 0; 4631 } 4632 4633 4634 static int sctp_getsockopt_peer_addrs(struct sock *sk, int len, 4635 char __user *optval, int __user *optlen) 4636 { 4637 struct sctp_association *asoc; 4638 int cnt = 0; 4639 struct sctp_getaddrs getaddrs; 4640 struct sctp_transport *from; 4641 void __user *to; 4642 union sctp_addr temp; 4643 struct sctp_sock *sp = sctp_sk(sk); 4644 int addrlen; 4645 size_t space_left; 4646 int bytes_copied; 4647 4648 if (len < sizeof(struct sctp_getaddrs)) 4649 return -EINVAL; 4650 4651 if (copy_from_user(&getaddrs, optval, sizeof(struct sctp_getaddrs))) 4652 return -EFAULT; 4653 4654 /* For UDP-style sockets, id specifies the association to query. */ 4655 asoc = sctp_id2assoc(sk, getaddrs.assoc_id); 4656 if (!asoc) 4657 return -EINVAL; 4658 4659 to = optval + offsetof(struct sctp_getaddrs,addrs); 4660 space_left = len - offsetof(struct sctp_getaddrs,addrs); 4661 4662 list_for_each_entry(from, &asoc->peer.transport_addr_list, 4663 transports) { 4664 memcpy(&temp, &from->ipaddr, sizeof(temp)); 4665 sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, &temp); 4666 addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len; 4667 if (space_left < addrlen) 4668 return -ENOMEM; 4669 if (copy_to_user(to, &temp, addrlen)) 4670 return -EFAULT; 4671 to += addrlen; 4672 cnt++; 4673 space_left -= addrlen; 4674 } 4675 4676 if (put_user(cnt, &((struct sctp_getaddrs __user *)optval)->addr_num)) 4677 return -EFAULT; 4678 bytes_copied = ((char __user *)to) - optval; 4679 if (put_user(bytes_copied, optlen)) 4680 return -EFAULT; 4681 4682 return 0; 4683 } 4684 4685 static int sctp_copy_laddrs(struct sock *sk, __u16 port, void *to, 4686 size_t space_left, int *bytes_copied) 4687 { 4688 struct sctp_sockaddr_entry *addr; 4689 union sctp_addr temp; 4690 int cnt = 0; 4691 int addrlen; 4692 struct net *net = sock_net(sk); 4693 4694 rcu_read_lock(); 4695 list_for_each_entry_rcu(addr, &net->sctp.local_addr_list, list) { 4696 if (!addr->valid) 4697 continue; 4698 4699 if ((PF_INET == sk->sk_family) && 4700 (AF_INET6 == addr->a.sa.sa_family)) 4701 continue; 4702 if ((PF_INET6 == sk->sk_family) && 4703 inet_v6_ipv6only(sk) && 4704 (AF_INET == addr->a.sa.sa_family)) 4705 continue; 4706 memcpy(&temp, &addr->a, sizeof(temp)); 4707 if (!temp.v4.sin_port) 4708 temp.v4.sin_port = htons(port); 4709 4710 sctp_get_pf_specific(sk->sk_family)->addr_v4map(sctp_sk(sk), 4711 &temp); 4712 addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len; 4713 if (space_left < addrlen) { 4714 cnt = -ENOMEM; 4715 break; 4716 } 4717 memcpy(to, &temp, addrlen); 4718 4719 to += addrlen; 4720 cnt ++; 4721 space_left -= addrlen; 4722 *bytes_copied += addrlen; 4723 } 4724 rcu_read_unlock(); 4725 4726 return cnt; 4727 } 4728 4729 4730 static int sctp_getsockopt_local_addrs(struct sock *sk, int len, 4731 char __user *optval, int __user *optlen) 4732 { 4733 struct sctp_bind_addr *bp; 4734 struct sctp_association *asoc; 4735 int cnt = 0; 4736 struct sctp_getaddrs getaddrs; 4737 struct sctp_sockaddr_entry *addr; 4738 void __user *to; 4739 union sctp_addr temp; 4740 struct sctp_sock *sp = sctp_sk(sk); 4741 int addrlen; 4742 int err = 0; 4743 size_t space_left; 4744 int bytes_copied = 0; 4745 void *addrs; 4746 void *buf; 4747 4748 if (len < sizeof(struct sctp_getaddrs)) 4749 return -EINVAL; 4750 4751 if (copy_from_user(&getaddrs, optval, sizeof(struct sctp_getaddrs))) 4752 return -EFAULT; 4753 4754 /* 4755 * For UDP-style sockets, id specifies the association to query. 4756 * If the id field is set to the value '0' then the locally bound 4757 * addresses are returned without regard to any particular 4758 * association. 4759 */ 4760 if (0 == getaddrs.assoc_id) { 4761 bp = &sctp_sk(sk)->ep->base.bind_addr; 4762 } else { 4763 asoc = sctp_id2assoc(sk, getaddrs.assoc_id); 4764 if (!asoc) 4765 return -EINVAL; 4766 bp = &asoc->base.bind_addr; 4767 } 4768 4769 to = optval + offsetof(struct sctp_getaddrs,addrs); 4770 space_left = len - offsetof(struct sctp_getaddrs,addrs); 4771 4772 addrs = kmalloc(space_left, GFP_KERNEL); 4773 if (!addrs) 4774 return -ENOMEM; 4775 4776 /* If the endpoint is bound to 0.0.0.0 or ::0, get the valid 4777 * addresses from the global local address list. 4778 */ 4779 if (sctp_list_single_entry(&bp->address_list)) { 4780 addr = list_entry(bp->address_list.next, 4781 struct sctp_sockaddr_entry, list); 4782 if (sctp_is_any(sk, &addr->a)) { 4783 cnt = sctp_copy_laddrs(sk, bp->port, addrs, 4784 space_left, &bytes_copied); 4785 if (cnt < 0) { 4786 err = cnt; 4787 goto out; 4788 } 4789 goto copy_getaddrs; 4790 } 4791 } 4792 4793 buf = addrs; 4794 /* Protection on the bound address list is not needed since 4795 * in the socket option context we hold a socket lock and 4796 * thus the bound address list can't change. 4797 */ 4798 list_for_each_entry(addr, &bp->address_list, list) { 4799 memcpy(&temp, &addr->a, sizeof(temp)); 4800 sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, &temp); 4801 addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len; 4802 if (space_left < addrlen) { 4803 err = -ENOMEM; /*fixme: right error?*/ 4804 goto out; 4805 } 4806 memcpy(buf, &temp, addrlen); 4807 buf += addrlen; 4808 bytes_copied += addrlen; 4809 cnt ++; 4810 space_left -= addrlen; 4811 } 4812 4813 copy_getaddrs: 4814 if (copy_to_user(to, addrs, bytes_copied)) { 4815 err = -EFAULT; 4816 goto out; 4817 } 4818 if (put_user(cnt, &((struct sctp_getaddrs __user *)optval)->addr_num)) { 4819 err = -EFAULT; 4820 goto out; 4821 } 4822 if (put_user(bytes_copied, optlen)) 4823 err = -EFAULT; 4824 out: 4825 kfree(addrs); 4826 return err; 4827 } 4828 4829 /* 7.1.10 Set Primary Address (SCTP_PRIMARY_ADDR) 4830 * 4831 * Requests that the local SCTP stack use the enclosed peer address as 4832 * the association primary. The enclosed address must be one of the 4833 * association peer's addresses. 4834 */ 4835 static int sctp_getsockopt_primary_addr(struct sock *sk, int len, 4836 char __user *optval, int __user *optlen) 4837 { 4838 struct sctp_prim prim; 4839 struct sctp_association *asoc; 4840 struct sctp_sock *sp = sctp_sk(sk); 4841 4842 if (len < sizeof(struct sctp_prim)) 4843 return -EINVAL; 4844 4845 len = sizeof(struct sctp_prim); 4846 4847 if (copy_from_user(&prim, optval, len)) 4848 return -EFAULT; 4849 4850 asoc = sctp_id2assoc(sk, prim.ssp_assoc_id); 4851 if (!asoc) 4852 return -EINVAL; 4853 4854 if (!asoc->peer.primary_path) 4855 return -ENOTCONN; 4856 4857 memcpy(&prim.ssp_addr, &asoc->peer.primary_path->ipaddr, 4858 asoc->peer.primary_path->af_specific->sockaddr_len); 4859 4860 sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, 4861 (union sctp_addr *)&prim.ssp_addr); 4862 4863 if (put_user(len, optlen)) 4864 return -EFAULT; 4865 if (copy_to_user(optval, &prim, len)) 4866 return -EFAULT; 4867 4868 return 0; 4869 } 4870 4871 /* 4872 * 7.1.11 Set Adaptation Layer Indicator (SCTP_ADAPTATION_LAYER) 4873 * 4874 * Requests that the local endpoint set the specified Adaptation Layer 4875 * Indication parameter for all future INIT and INIT-ACK exchanges. 4876 */ 4877 static int sctp_getsockopt_adaptation_layer(struct sock *sk, int len, 4878 char __user *optval, int __user *optlen) 4879 { 4880 struct sctp_setadaptation adaptation; 4881 4882 if (len < sizeof(struct sctp_setadaptation)) 4883 return -EINVAL; 4884 4885 len = sizeof(struct sctp_setadaptation); 4886 4887 adaptation.ssb_adaptation_ind = sctp_sk(sk)->adaptation_ind; 4888 4889 if (put_user(len, optlen)) 4890 return -EFAULT; 4891 if (copy_to_user(optval, &adaptation, len)) 4892 return -EFAULT; 4893 4894 return 0; 4895 } 4896 4897 /* 4898 * 4899 * 7.1.14 Set default send parameters (SCTP_DEFAULT_SEND_PARAM) 4900 * 4901 * Applications that wish to use the sendto() system call may wish to 4902 * specify a default set of parameters that would normally be supplied 4903 * through the inclusion of ancillary data. This socket option allows 4904 * such an application to set the default sctp_sndrcvinfo structure. 4905 4906 4907 * The application that wishes to use this socket option simply passes 4908 * in to this call the sctp_sndrcvinfo structure defined in Section 4909 * 5.2.2) The input parameters accepted by this call include 4910 * sinfo_stream, sinfo_flags, sinfo_ppid, sinfo_context, 4911 * sinfo_timetolive. The user must provide the sinfo_assoc_id field in 4912 * to this call if the caller is using the UDP model. 4913 * 4914 * For getsockopt, it get the default sctp_sndrcvinfo structure. 4915 */ 4916 static int sctp_getsockopt_default_send_param(struct sock *sk, 4917 int len, char __user *optval, 4918 int __user *optlen) 4919 { 4920 struct sctp_sndrcvinfo info; 4921 struct sctp_association *asoc; 4922 struct sctp_sock *sp = sctp_sk(sk); 4923 4924 if (len < sizeof(struct sctp_sndrcvinfo)) 4925 return -EINVAL; 4926 4927 len = sizeof(struct sctp_sndrcvinfo); 4928 4929 if (copy_from_user(&info, optval, len)) 4930 return -EFAULT; 4931 4932 asoc = sctp_id2assoc(sk, info.sinfo_assoc_id); 4933 if (!asoc && info.sinfo_assoc_id && sctp_style(sk, UDP)) 4934 return -EINVAL; 4935 4936 if (asoc) { 4937 info.sinfo_stream = asoc->default_stream; 4938 info.sinfo_flags = asoc->default_flags; 4939 info.sinfo_ppid = asoc->default_ppid; 4940 info.sinfo_context = asoc->default_context; 4941 info.sinfo_timetolive = asoc->default_timetolive; 4942 } else { 4943 info.sinfo_stream = sp->default_stream; 4944 info.sinfo_flags = sp->default_flags; 4945 info.sinfo_ppid = sp->default_ppid; 4946 info.sinfo_context = sp->default_context; 4947 info.sinfo_timetolive = sp->default_timetolive; 4948 } 4949 4950 if (put_user(len, optlen)) 4951 return -EFAULT; 4952 if (copy_to_user(optval, &info, len)) 4953 return -EFAULT; 4954 4955 return 0; 4956 } 4957 4958 /* 4959 * 4960 * 7.1.5 SCTP_NODELAY 4961 * 4962 * Turn on/off any Nagle-like algorithm. This means that packets are 4963 * generally sent as soon as possible and no unnecessary delays are 4964 * introduced, at the cost of more packets in the network. Expects an 4965 * integer boolean flag. 4966 */ 4967 4968 static int sctp_getsockopt_nodelay(struct sock *sk, int len, 4969 char __user *optval, int __user *optlen) 4970 { 4971 int val; 4972 4973 if (len < sizeof(int)) 4974 return -EINVAL; 4975 4976 len = sizeof(int); 4977 val = (sctp_sk(sk)->nodelay == 1); 4978 if (put_user(len, optlen)) 4979 return -EFAULT; 4980 if (copy_to_user(optval, &val, len)) 4981 return -EFAULT; 4982 return 0; 4983 } 4984 4985 /* 4986 * 4987 * 7.1.1 SCTP_RTOINFO 4988 * 4989 * The protocol parameters used to initialize and bound retransmission 4990 * timeout (RTO) are tunable. sctp_rtoinfo structure is used to access 4991 * and modify these parameters. 4992 * All parameters are time values, in milliseconds. A value of 0, when 4993 * modifying the parameters, indicates that the current value should not 4994 * be changed. 4995 * 4996 */ 4997 static int sctp_getsockopt_rtoinfo(struct sock *sk, int len, 4998 char __user *optval, 4999 int __user *optlen) { 5000 struct sctp_rtoinfo rtoinfo; 5001 struct sctp_association *asoc; 5002 5003 if (len < sizeof (struct sctp_rtoinfo)) 5004 return -EINVAL; 5005 5006 len = sizeof(struct sctp_rtoinfo); 5007 5008 if (copy_from_user(&rtoinfo, optval, len)) 5009 return -EFAULT; 5010 5011 asoc = sctp_id2assoc(sk, rtoinfo.srto_assoc_id); 5012 5013 if (!asoc && rtoinfo.srto_assoc_id && sctp_style(sk, UDP)) 5014 return -EINVAL; 5015 5016 /* Values corresponding to the specific association. */ 5017 if (asoc) { 5018 rtoinfo.srto_initial = jiffies_to_msecs(asoc->rto_initial); 5019 rtoinfo.srto_max = jiffies_to_msecs(asoc->rto_max); 5020 rtoinfo.srto_min = jiffies_to_msecs(asoc->rto_min); 5021 } else { 5022 /* Values corresponding to the endpoint. */ 5023 struct sctp_sock *sp = sctp_sk(sk); 5024 5025 rtoinfo.srto_initial = sp->rtoinfo.srto_initial; 5026 rtoinfo.srto_max = sp->rtoinfo.srto_max; 5027 rtoinfo.srto_min = sp->rtoinfo.srto_min; 5028 } 5029 5030 if (put_user(len, optlen)) 5031 return -EFAULT; 5032 5033 if (copy_to_user(optval, &rtoinfo, len)) 5034 return -EFAULT; 5035 5036 return 0; 5037 } 5038 5039 /* 5040 * 5041 * 7.1.2 SCTP_ASSOCINFO 5042 * 5043 * This option is used to tune the maximum retransmission attempts 5044 * of the association. 5045 * Returns an error if the new association retransmission value is 5046 * greater than the sum of the retransmission value of the peer. 5047 * See [SCTP] for more information. 5048 * 5049 */ 5050 static int sctp_getsockopt_associnfo(struct sock *sk, int len, 5051 char __user *optval, 5052 int __user *optlen) 5053 { 5054 5055 struct sctp_assocparams assocparams; 5056 struct sctp_association *asoc; 5057 struct list_head *pos; 5058 int cnt = 0; 5059 5060 if (len < sizeof (struct sctp_assocparams)) 5061 return -EINVAL; 5062 5063 len = sizeof(struct sctp_assocparams); 5064 5065 if (copy_from_user(&assocparams, optval, len)) 5066 return -EFAULT; 5067 5068 asoc = sctp_id2assoc(sk, assocparams.sasoc_assoc_id); 5069 5070 if (!asoc && assocparams.sasoc_assoc_id && sctp_style(sk, UDP)) 5071 return -EINVAL; 5072 5073 /* Values correspoinding to the specific association */ 5074 if (asoc) { 5075 assocparams.sasoc_asocmaxrxt = asoc->max_retrans; 5076 assocparams.sasoc_peer_rwnd = asoc->peer.rwnd; 5077 assocparams.sasoc_local_rwnd = asoc->a_rwnd; 5078 assocparams.sasoc_cookie_life = ktime_to_ms(asoc->cookie_life); 5079 5080 list_for_each(pos, &asoc->peer.transport_addr_list) { 5081 cnt ++; 5082 } 5083 5084 assocparams.sasoc_number_peer_destinations = cnt; 5085 } else { 5086 /* Values corresponding to the endpoint */ 5087 struct sctp_sock *sp = sctp_sk(sk); 5088 5089 assocparams.sasoc_asocmaxrxt = sp->assocparams.sasoc_asocmaxrxt; 5090 assocparams.sasoc_peer_rwnd = sp->assocparams.sasoc_peer_rwnd; 5091 assocparams.sasoc_local_rwnd = sp->assocparams.sasoc_local_rwnd; 5092 assocparams.sasoc_cookie_life = 5093 sp->assocparams.sasoc_cookie_life; 5094 assocparams.sasoc_number_peer_destinations = 5095 sp->assocparams. 5096 sasoc_number_peer_destinations; 5097 } 5098 5099 if (put_user(len, optlen)) 5100 return -EFAULT; 5101 5102 if (copy_to_user(optval, &assocparams, len)) 5103 return -EFAULT; 5104 5105 return 0; 5106 } 5107 5108 /* 5109 * 7.1.16 Set/clear IPv4 mapped addresses (SCTP_I_WANT_MAPPED_V4_ADDR) 5110 * 5111 * This socket option is a boolean flag which turns on or off mapped V4 5112 * addresses. If this option is turned on and the socket is type 5113 * PF_INET6, then IPv4 addresses will be mapped to V6 representation. 5114 * If this option is turned off, then no mapping will be done of V4 5115 * addresses and a user will receive both PF_INET6 and PF_INET type 5116 * addresses on the socket. 5117 */ 5118 static int sctp_getsockopt_mappedv4(struct sock *sk, int len, 5119 char __user *optval, int __user *optlen) 5120 { 5121 int val; 5122 struct sctp_sock *sp = sctp_sk(sk); 5123 5124 if (len < sizeof(int)) 5125 return -EINVAL; 5126 5127 len = sizeof(int); 5128 val = sp->v4mapped; 5129 if (put_user(len, optlen)) 5130 return -EFAULT; 5131 if (copy_to_user(optval, &val, len)) 5132 return -EFAULT; 5133 5134 return 0; 5135 } 5136 5137 /* 5138 * 7.1.29. Set or Get the default context (SCTP_CONTEXT) 5139 * (chapter and verse is quoted at sctp_setsockopt_context()) 5140 */ 5141 static int sctp_getsockopt_context(struct sock *sk, int len, 5142 char __user *optval, int __user *optlen) 5143 { 5144 struct sctp_assoc_value params; 5145 struct sctp_sock *sp; 5146 struct sctp_association *asoc; 5147 5148 if (len < sizeof(struct sctp_assoc_value)) 5149 return -EINVAL; 5150 5151 len = sizeof(struct sctp_assoc_value); 5152 5153 if (copy_from_user(¶ms, optval, len)) 5154 return -EFAULT; 5155 5156 sp = sctp_sk(sk); 5157 5158 if (params.assoc_id != 0) { 5159 asoc = sctp_id2assoc(sk, params.assoc_id); 5160 if (!asoc) 5161 return -EINVAL; 5162 params.assoc_value = asoc->default_rcv_context; 5163 } else { 5164 params.assoc_value = sp->default_rcv_context; 5165 } 5166 5167 if (put_user(len, optlen)) 5168 return -EFAULT; 5169 if (copy_to_user(optval, ¶ms, len)) 5170 return -EFAULT; 5171 5172 return 0; 5173 } 5174 5175 /* 5176 * 8.1.16. Get or Set the Maximum Fragmentation Size (SCTP_MAXSEG) 5177 * This option will get or set the maximum size to put in any outgoing 5178 * SCTP DATA chunk. If a message is larger than this size it will be 5179 * fragmented by SCTP into the specified size. Note that the underlying 5180 * SCTP implementation may fragment into smaller sized chunks when the 5181 * PMTU of the underlying association is smaller than the value set by 5182 * the user. The default value for this option is '0' which indicates 5183 * the user is NOT limiting fragmentation and only the PMTU will effect 5184 * SCTP's choice of DATA chunk size. Note also that values set larger 5185 * than the maximum size of an IP datagram will effectively let SCTP 5186 * control fragmentation (i.e. the same as setting this option to 0). 5187 * 5188 * The following structure is used to access and modify this parameter: 5189 * 5190 * struct sctp_assoc_value { 5191 * sctp_assoc_t assoc_id; 5192 * uint32_t assoc_value; 5193 * }; 5194 * 5195 * assoc_id: This parameter is ignored for one-to-one style sockets. 5196 * For one-to-many style sockets this parameter indicates which 5197 * association the user is performing an action upon. Note that if 5198 * this field's value is zero then the endpoints default value is 5199 * changed (effecting future associations only). 5200 * assoc_value: This parameter specifies the maximum size in bytes. 5201 */ 5202 static int sctp_getsockopt_maxseg(struct sock *sk, int len, 5203 char __user *optval, int __user *optlen) 5204 { 5205 struct sctp_assoc_value params; 5206 struct sctp_association *asoc; 5207 5208 if (len == sizeof(int)) { 5209 pr_warn("Use of int in maxseg socket option deprecated\n"); 5210 pr_warn("Use struct sctp_assoc_value instead\n"); 5211 params.assoc_id = 0; 5212 } else if (len >= sizeof(struct sctp_assoc_value)) { 5213 len = sizeof(struct sctp_assoc_value); 5214 if (copy_from_user(¶ms, optval, sizeof(params))) 5215 return -EFAULT; 5216 } else 5217 return -EINVAL; 5218 5219 asoc = sctp_id2assoc(sk, params.assoc_id); 5220 if (!asoc && params.assoc_id && sctp_style(sk, UDP)) 5221 return -EINVAL; 5222 5223 if (asoc) 5224 params.assoc_value = asoc->frag_point; 5225 else 5226 params.assoc_value = sctp_sk(sk)->user_frag; 5227 5228 if (put_user(len, optlen)) 5229 return -EFAULT; 5230 if (len == sizeof(int)) { 5231 if (copy_to_user(optval, ¶ms.assoc_value, len)) 5232 return -EFAULT; 5233 } else { 5234 if (copy_to_user(optval, ¶ms, len)) 5235 return -EFAULT; 5236 } 5237 5238 return 0; 5239 } 5240 5241 /* 5242 * 7.1.24. Get or set fragmented interleave (SCTP_FRAGMENT_INTERLEAVE) 5243 * (chapter and verse is quoted at sctp_setsockopt_fragment_interleave()) 5244 */ 5245 static int sctp_getsockopt_fragment_interleave(struct sock *sk, int len, 5246 char __user *optval, int __user *optlen) 5247 { 5248 int val; 5249 5250 if (len < sizeof(int)) 5251 return -EINVAL; 5252 5253 len = sizeof(int); 5254 5255 val = sctp_sk(sk)->frag_interleave; 5256 if (put_user(len, optlen)) 5257 return -EFAULT; 5258 if (copy_to_user(optval, &val, len)) 5259 return -EFAULT; 5260 5261 return 0; 5262 } 5263 5264 /* 5265 * 7.1.25. Set or Get the sctp partial delivery point 5266 * (chapter and verse is quoted at sctp_setsockopt_partial_delivery_point()) 5267 */ 5268 static int sctp_getsockopt_partial_delivery_point(struct sock *sk, int len, 5269 char __user *optval, 5270 int __user *optlen) 5271 { 5272 u32 val; 5273 5274 if (len < sizeof(u32)) 5275 return -EINVAL; 5276 5277 len = sizeof(u32); 5278 5279 val = sctp_sk(sk)->pd_point; 5280 if (put_user(len, optlen)) 5281 return -EFAULT; 5282 if (copy_to_user(optval, &val, len)) 5283 return -EFAULT; 5284 5285 return 0; 5286 } 5287 5288 /* 5289 * 7.1.28. Set or Get the maximum burst (SCTP_MAX_BURST) 5290 * (chapter and verse is quoted at sctp_setsockopt_maxburst()) 5291 */ 5292 static int sctp_getsockopt_maxburst(struct sock *sk, int len, 5293 char __user *optval, 5294 int __user *optlen) 5295 { 5296 struct sctp_assoc_value params; 5297 struct sctp_sock *sp; 5298 struct sctp_association *asoc; 5299 5300 if (len == sizeof(int)) { 5301 pr_warn("Use of int in max_burst socket option deprecated\n"); 5302 pr_warn("Use struct sctp_assoc_value instead\n"); 5303 params.assoc_id = 0; 5304 } else if (len >= sizeof(struct sctp_assoc_value)) { 5305 len = sizeof(struct sctp_assoc_value); 5306 if (copy_from_user(¶ms, optval, len)) 5307 return -EFAULT; 5308 } else 5309 return -EINVAL; 5310 5311 sp = sctp_sk(sk); 5312 5313 if (params.assoc_id != 0) { 5314 asoc = sctp_id2assoc(sk, params.assoc_id); 5315 if (!asoc) 5316 return -EINVAL; 5317 params.assoc_value = asoc->max_burst; 5318 } else 5319 params.assoc_value = sp->max_burst; 5320 5321 if (len == sizeof(int)) { 5322 if (copy_to_user(optval, ¶ms.assoc_value, len)) 5323 return -EFAULT; 5324 } else { 5325 if (copy_to_user(optval, ¶ms, len)) 5326 return -EFAULT; 5327 } 5328 5329 return 0; 5330 5331 } 5332 5333 static int sctp_getsockopt_hmac_ident(struct sock *sk, int len, 5334 char __user *optval, int __user *optlen) 5335 { 5336 struct net *net = sock_net(sk); 5337 struct sctp_hmacalgo __user *p = (void __user *)optval; 5338 struct sctp_hmac_algo_param *hmacs; 5339 __u16 data_len = 0; 5340 u32 num_idents; 5341 5342 if (!net->sctp.auth_enable) 5343 return -EACCES; 5344 5345 hmacs = sctp_sk(sk)->ep->auth_hmacs_list; 5346 data_len = ntohs(hmacs->param_hdr.length) - sizeof(sctp_paramhdr_t); 5347 5348 if (len < sizeof(struct sctp_hmacalgo) + data_len) 5349 return -EINVAL; 5350 5351 len = sizeof(struct sctp_hmacalgo) + data_len; 5352 num_idents = data_len / sizeof(u16); 5353 5354 if (put_user(len, optlen)) 5355 return -EFAULT; 5356 if (put_user(num_idents, &p->shmac_num_idents)) 5357 return -EFAULT; 5358 if (copy_to_user(p->shmac_idents, hmacs->hmac_ids, data_len)) 5359 return -EFAULT; 5360 return 0; 5361 } 5362 5363 static int sctp_getsockopt_active_key(struct sock *sk, int len, 5364 char __user *optval, int __user *optlen) 5365 { 5366 struct net *net = sock_net(sk); 5367 struct sctp_authkeyid val; 5368 struct sctp_association *asoc; 5369 5370 if (!net->sctp.auth_enable) 5371 return -EACCES; 5372 5373 if (len < sizeof(struct sctp_authkeyid)) 5374 return -EINVAL; 5375 if (copy_from_user(&val, optval, sizeof(struct sctp_authkeyid))) 5376 return -EFAULT; 5377 5378 asoc = sctp_id2assoc(sk, val.scact_assoc_id); 5379 if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP)) 5380 return -EINVAL; 5381 5382 if (asoc) 5383 val.scact_keynumber = asoc->active_key_id; 5384 else 5385 val.scact_keynumber = sctp_sk(sk)->ep->active_key_id; 5386 5387 len = sizeof(struct sctp_authkeyid); 5388 if (put_user(len, optlen)) 5389 return -EFAULT; 5390 if (copy_to_user(optval, &val, len)) 5391 return -EFAULT; 5392 5393 return 0; 5394 } 5395 5396 static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len, 5397 char __user *optval, int __user *optlen) 5398 { 5399 struct net *net = sock_net(sk); 5400 struct sctp_authchunks __user *p = (void __user *)optval; 5401 struct sctp_authchunks val; 5402 struct sctp_association *asoc; 5403 struct sctp_chunks_param *ch; 5404 u32 num_chunks = 0; 5405 char __user *to; 5406 5407 if (!net->sctp.auth_enable) 5408 return -EACCES; 5409 5410 if (len < sizeof(struct sctp_authchunks)) 5411 return -EINVAL; 5412 5413 if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks))) 5414 return -EFAULT; 5415 5416 to = p->gauth_chunks; 5417 asoc = sctp_id2assoc(sk, val.gauth_assoc_id); 5418 if (!asoc) 5419 return -EINVAL; 5420 5421 ch = asoc->peer.peer_chunks; 5422 if (!ch) 5423 goto num; 5424 5425 /* See if the user provided enough room for all the data */ 5426 num_chunks = ntohs(ch->param_hdr.length) - sizeof(sctp_paramhdr_t); 5427 if (len < num_chunks) 5428 return -EINVAL; 5429 5430 if (copy_to_user(to, ch->chunks, num_chunks)) 5431 return -EFAULT; 5432 num: 5433 len = sizeof(struct sctp_authchunks) + num_chunks; 5434 if (put_user(len, optlen)) return -EFAULT; 5435 if (put_user(num_chunks, &p->gauth_number_of_chunks)) 5436 return -EFAULT; 5437 return 0; 5438 } 5439 5440 static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len, 5441 char __user *optval, int __user *optlen) 5442 { 5443 struct net *net = sock_net(sk); 5444 struct sctp_authchunks __user *p = (void __user *)optval; 5445 struct sctp_authchunks val; 5446 struct sctp_association *asoc; 5447 struct sctp_chunks_param *ch; 5448 u32 num_chunks = 0; 5449 char __user *to; 5450 5451 if (!net->sctp.auth_enable) 5452 return -EACCES; 5453 5454 if (len < sizeof(struct sctp_authchunks)) 5455 return -EINVAL; 5456 5457 if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks))) 5458 return -EFAULT; 5459 5460 to = p->gauth_chunks; 5461 asoc = sctp_id2assoc(sk, val.gauth_assoc_id); 5462 if (!asoc && val.gauth_assoc_id && sctp_style(sk, UDP)) 5463 return -EINVAL; 5464 5465 if (asoc) 5466 ch = (struct sctp_chunks_param*)asoc->c.auth_chunks; 5467 else 5468 ch = sctp_sk(sk)->ep->auth_chunk_list; 5469 5470 if (!ch) 5471 goto num; 5472 5473 num_chunks = ntohs(ch->param_hdr.length) - sizeof(sctp_paramhdr_t); 5474 if (len < sizeof(struct sctp_authchunks) + num_chunks) 5475 return -EINVAL; 5476 5477 if (copy_to_user(to, ch->chunks, num_chunks)) 5478 return -EFAULT; 5479 num: 5480 len = sizeof(struct sctp_authchunks) + num_chunks; 5481 if (put_user(len, optlen)) 5482 return -EFAULT; 5483 if (put_user(num_chunks, &p->gauth_number_of_chunks)) 5484 return -EFAULT; 5485 5486 return 0; 5487 } 5488 5489 /* 5490 * 8.2.5. Get the Current Number of Associations (SCTP_GET_ASSOC_NUMBER) 5491 * This option gets the current number of associations that are attached 5492 * to a one-to-many style socket. The option value is an uint32_t. 5493 */ 5494 static int sctp_getsockopt_assoc_number(struct sock *sk, int len, 5495 char __user *optval, int __user *optlen) 5496 { 5497 struct sctp_sock *sp = sctp_sk(sk); 5498 struct sctp_association *asoc; 5499 u32 val = 0; 5500 5501 if (sctp_style(sk, TCP)) 5502 return -EOPNOTSUPP; 5503 5504 if (len < sizeof(u32)) 5505 return -EINVAL; 5506 5507 len = sizeof(u32); 5508 5509 list_for_each_entry(asoc, &(sp->ep->asocs), asocs) { 5510 val++; 5511 } 5512 5513 if (put_user(len, optlen)) 5514 return -EFAULT; 5515 if (copy_to_user(optval, &val, len)) 5516 return -EFAULT; 5517 5518 return 0; 5519 } 5520 5521 /* 5522 * 8.1.23 SCTP_AUTO_ASCONF 5523 * See the corresponding setsockopt entry as description 5524 */ 5525 static int sctp_getsockopt_auto_asconf(struct sock *sk, int len, 5526 char __user *optval, int __user *optlen) 5527 { 5528 int val = 0; 5529 5530 if (len < sizeof(int)) 5531 return -EINVAL; 5532 5533 len = sizeof(int); 5534 if (sctp_sk(sk)->do_auto_asconf && sctp_is_ep_boundall(sk)) 5535 val = 1; 5536 if (put_user(len, optlen)) 5537 return -EFAULT; 5538 if (copy_to_user(optval, &val, len)) 5539 return -EFAULT; 5540 return 0; 5541 } 5542 5543 /* 5544 * 8.2.6. Get the Current Identifiers of Associations 5545 * (SCTP_GET_ASSOC_ID_LIST) 5546 * 5547 * This option gets the current list of SCTP association identifiers of 5548 * the SCTP associations handled by a one-to-many style socket. 5549 */ 5550 static int sctp_getsockopt_assoc_ids(struct sock *sk, int len, 5551 char __user *optval, int __user *optlen) 5552 { 5553 struct sctp_sock *sp = sctp_sk(sk); 5554 struct sctp_association *asoc; 5555 struct sctp_assoc_ids *ids; 5556 u32 num = 0; 5557 5558 if (sctp_style(sk, TCP)) 5559 return -EOPNOTSUPP; 5560 5561 if (len < sizeof(struct sctp_assoc_ids)) 5562 return -EINVAL; 5563 5564 list_for_each_entry(asoc, &(sp->ep->asocs), asocs) { 5565 num++; 5566 } 5567 5568 if (len < sizeof(struct sctp_assoc_ids) + sizeof(sctp_assoc_t) * num) 5569 return -EINVAL; 5570 5571 len = sizeof(struct sctp_assoc_ids) + sizeof(sctp_assoc_t) * num; 5572 5573 ids = kmalloc(len, GFP_KERNEL); 5574 if (unlikely(!ids)) 5575 return -ENOMEM; 5576 5577 ids->gaids_number_of_ids = num; 5578 num = 0; 5579 list_for_each_entry(asoc, &(sp->ep->asocs), asocs) { 5580 ids->gaids_assoc_id[num++] = asoc->assoc_id; 5581 } 5582 5583 if (put_user(len, optlen) || copy_to_user(optval, ids, len)) { 5584 kfree(ids); 5585 return -EFAULT; 5586 } 5587 5588 kfree(ids); 5589 return 0; 5590 } 5591 5592 /* 5593 * SCTP_PEER_ADDR_THLDS 5594 * 5595 * This option allows us to fetch the partially failed threshold for one or all 5596 * transports in an association. See Section 6.1 of: 5597 * http://www.ietf.org/id/draft-nishida-tsvwg-sctp-failover-05.txt 5598 */ 5599 static int sctp_getsockopt_paddr_thresholds(struct sock *sk, 5600 char __user *optval, 5601 int len, 5602 int __user *optlen) 5603 { 5604 struct sctp_paddrthlds val; 5605 struct sctp_transport *trans; 5606 struct sctp_association *asoc; 5607 5608 if (len < sizeof(struct sctp_paddrthlds)) 5609 return -EINVAL; 5610 len = sizeof(struct sctp_paddrthlds); 5611 if (copy_from_user(&val, (struct sctp_paddrthlds __user *)optval, len)) 5612 return -EFAULT; 5613 5614 if (sctp_is_any(sk, (const union sctp_addr *)&val.spt_address)) { 5615 asoc = sctp_id2assoc(sk, val.spt_assoc_id); 5616 if (!asoc) 5617 return -ENOENT; 5618 5619 val.spt_pathpfthld = asoc->pf_retrans; 5620 val.spt_pathmaxrxt = asoc->pathmaxrxt; 5621 } else { 5622 trans = sctp_addr_id2transport(sk, &val.spt_address, 5623 val.spt_assoc_id); 5624 if (!trans) 5625 return -ENOENT; 5626 5627 val.spt_pathmaxrxt = trans->pathmaxrxt; 5628 val.spt_pathpfthld = trans->pf_retrans; 5629 } 5630 5631 if (put_user(len, optlen) || copy_to_user(optval, &val, len)) 5632 return -EFAULT; 5633 5634 return 0; 5635 } 5636 5637 /* 5638 * SCTP_GET_ASSOC_STATS 5639 * 5640 * This option retrieves local per endpoint statistics. It is modeled 5641 * after OpenSolaris' implementation 5642 */ 5643 static int sctp_getsockopt_assoc_stats(struct sock *sk, int len, 5644 char __user *optval, 5645 int __user *optlen) 5646 { 5647 struct sctp_assoc_stats sas; 5648 struct sctp_association *asoc = NULL; 5649 5650 /* User must provide at least the assoc id */ 5651 if (len < sizeof(sctp_assoc_t)) 5652 return -EINVAL; 5653 5654 /* Allow the struct to grow and fill in as much as possible */ 5655 len = min_t(size_t, len, sizeof(sas)); 5656 5657 if (copy_from_user(&sas, optval, len)) 5658 return -EFAULT; 5659 5660 asoc = sctp_id2assoc(sk, sas.sas_assoc_id); 5661 if (!asoc) 5662 return -EINVAL; 5663 5664 sas.sas_rtxchunks = asoc->stats.rtxchunks; 5665 sas.sas_gapcnt = asoc->stats.gapcnt; 5666 sas.sas_outofseqtsns = asoc->stats.outofseqtsns; 5667 sas.sas_osacks = asoc->stats.osacks; 5668 sas.sas_isacks = asoc->stats.isacks; 5669 sas.sas_octrlchunks = asoc->stats.octrlchunks; 5670 sas.sas_ictrlchunks = asoc->stats.ictrlchunks; 5671 sas.sas_oodchunks = asoc->stats.oodchunks; 5672 sas.sas_iodchunks = asoc->stats.iodchunks; 5673 sas.sas_ouodchunks = asoc->stats.ouodchunks; 5674 sas.sas_iuodchunks = asoc->stats.iuodchunks; 5675 sas.sas_idupchunks = asoc->stats.idupchunks; 5676 sas.sas_opackets = asoc->stats.opackets; 5677 sas.sas_ipackets = asoc->stats.ipackets; 5678 5679 /* New high max rto observed, will return 0 if not a single 5680 * RTO update took place. obs_rto_ipaddr will be bogus 5681 * in such a case 5682 */ 5683 sas.sas_maxrto = asoc->stats.max_obs_rto; 5684 memcpy(&sas.sas_obs_rto_ipaddr, &asoc->stats.obs_rto_ipaddr, 5685 sizeof(struct sockaddr_storage)); 5686 5687 /* Mark beginning of a new observation period */ 5688 asoc->stats.max_obs_rto = asoc->rto_min; 5689 5690 if (put_user(len, optlen)) 5691 return -EFAULT; 5692 5693 pr_debug("%s: len:%d, assoc_id:%d\n", __func__, len, sas.sas_assoc_id); 5694 5695 if (copy_to_user(optval, &sas, len)) 5696 return -EFAULT; 5697 5698 return 0; 5699 } 5700 5701 static int sctp_getsockopt(struct sock *sk, int level, int optname, 5702 char __user *optval, int __user *optlen) 5703 { 5704 int retval = 0; 5705 int len; 5706 5707 pr_debug("%s: sk:%p, optname:%d\n", __func__, sk, optname); 5708 5709 /* I can hardly begin to describe how wrong this is. This is 5710 * so broken as to be worse than useless. The API draft 5711 * REALLY is NOT helpful here... I am not convinced that the 5712 * semantics of getsockopt() with a level OTHER THAN SOL_SCTP 5713 * are at all well-founded. 5714 */ 5715 if (level != SOL_SCTP) { 5716 struct sctp_af *af = sctp_sk(sk)->pf->af; 5717 5718 retval = af->getsockopt(sk, level, optname, optval, optlen); 5719 return retval; 5720 } 5721 5722 if (get_user(len, optlen)) 5723 return -EFAULT; 5724 5725 sctp_lock_sock(sk); 5726 5727 switch (optname) { 5728 case SCTP_STATUS: 5729 retval = sctp_getsockopt_sctp_status(sk, len, optval, optlen); 5730 break; 5731 case SCTP_DISABLE_FRAGMENTS: 5732 retval = sctp_getsockopt_disable_fragments(sk, len, optval, 5733 optlen); 5734 break; 5735 case SCTP_EVENTS: 5736 retval = sctp_getsockopt_events(sk, len, optval, optlen); 5737 break; 5738 case SCTP_AUTOCLOSE: 5739 retval = sctp_getsockopt_autoclose(sk, len, optval, optlen); 5740 break; 5741 case SCTP_SOCKOPT_PEELOFF: 5742 retval = sctp_getsockopt_peeloff(sk, len, optval, optlen); 5743 break; 5744 case SCTP_PEER_ADDR_PARAMS: 5745 retval = sctp_getsockopt_peer_addr_params(sk, len, optval, 5746 optlen); 5747 break; 5748 case SCTP_DELAYED_SACK: 5749 retval = sctp_getsockopt_delayed_ack(sk, len, optval, 5750 optlen); 5751 break; 5752 case SCTP_INITMSG: 5753 retval = sctp_getsockopt_initmsg(sk, len, optval, optlen); 5754 break; 5755 case SCTP_GET_PEER_ADDRS: 5756 retval = sctp_getsockopt_peer_addrs(sk, len, optval, 5757 optlen); 5758 break; 5759 case SCTP_GET_LOCAL_ADDRS: 5760 retval = sctp_getsockopt_local_addrs(sk, len, optval, 5761 optlen); 5762 break; 5763 case SCTP_SOCKOPT_CONNECTX3: 5764 retval = sctp_getsockopt_connectx3(sk, len, optval, optlen); 5765 break; 5766 case SCTP_DEFAULT_SEND_PARAM: 5767 retval = sctp_getsockopt_default_send_param(sk, len, 5768 optval, optlen); 5769 break; 5770 case SCTP_PRIMARY_ADDR: 5771 retval = sctp_getsockopt_primary_addr(sk, len, optval, optlen); 5772 break; 5773 case SCTP_NODELAY: 5774 retval = sctp_getsockopt_nodelay(sk, len, optval, optlen); 5775 break; 5776 case SCTP_RTOINFO: 5777 retval = sctp_getsockopt_rtoinfo(sk, len, optval, optlen); 5778 break; 5779 case SCTP_ASSOCINFO: 5780 retval = sctp_getsockopt_associnfo(sk, len, optval, optlen); 5781 break; 5782 case SCTP_I_WANT_MAPPED_V4_ADDR: 5783 retval = sctp_getsockopt_mappedv4(sk, len, optval, optlen); 5784 break; 5785 case SCTP_MAXSEG: 5786 retval = sctp_getsockopt_maxseg(sk, len, optval, optlen); 5787 break; 5788 case SCTP_GET_PEER_ADDR_INFO: 5789 retval = sctp_getsockopt_peer_addr_info(sk, len, optval, 5790 optlen); 5791 break; 5792 case SCTP_ADAPTATION_LAYER: 5793 retval = sctp_getsockopt_adaptation_layer(sk, len, optval, 5794 optlen); 5795 break; 5796 case SCTP_CONTEXT: 5797 retval = sctp_getsockopt_context(sk, len, optval, optlen); 5798 break; 5799 case SCTP_FRAGMENT_INTERLEAVE: 5800 retval = sctp_getsockopt_fragment_interleave(sk, len, optval, 5801 optlen); 5802 break; 5803 case SCTP_PARTIAL_DELIVERY_POINT: 5804 retval = sctp_getsockopt_partial_delivery_point(sk, len, optval, 5805 optlen); 5806 break; 5807 case SCTP_MAX_BURST: 5808 retval = sctp_getsockopt_maxburst(sk, len, optval, optlen); 5809 break; 5810 case SCTP_AUTH_KEY: 5811 case SCTP_AUTH_CHUNK: 5812 case SCTP_AUTH_DELETE_KEY: 5813 retval = -EOPNOTSUPP; 5814 break; 5815 case SCTP_HMAC_IDENT: 5816 retval = sctp_getsockopt_hmac_ident(sk, len, optval, optlen); 5817 break; 5818 case SCTP_AUTH_ACTIVE_KEY: 5819 retval = sctp_getsockopt_active_key(sk, len, optval, optlen); 5820 break; 5821 case SCTP_PEER_AUTH_CHUNKS: 5822 retval = sctp_getsockopt_peer_auth_chunks(sk, len, optval, 5823 optlen); 5824 break; 5825 case SCTP_LOCAL_AUTH_CHUNKS: 5826 retval = sctp_getsockopt_local_auth_chunks(sk, len, optval, 5827 optlen); 5828 break; 5829 case SCTP_GET_ASSOC_NUMBER: 5830 retval = sctp_getsockopt_assoc_number(sk, len, optval, optlen); 5831 break; 5832 case SCTP_GET_ASSOC_ID_LIST: 5833 retval = sctp_getsockopt_assoc_ids(sk, len, optval, optlen); 5834 break; 5835 case SCTP_AUTO_ASCONF: 5836 retval = sctp_getsockopt_auto_asconf(sk, len, optval, optlen); 5837 break; 5838 case SCTP_PEER_ADDR_THLDS: 5839 retval = sctp_getsockopt_paddr_thresholds(sk, optval, len, optlen); 5840 break; 5841 case SCTP_GET_ASSOC_STATS: 5842 retval = sctp_getsockopt_assoc_stats(sk, len, optval, optlen); 5843 break; 5844 default: 5845 retval = -ENOPROTOOPT; 5846 break; 5847 } 5848 5849 sctp_release_sock(sk); 5850 return retval; 5851 } 5852 5853 static void sctp_hash(struct sock *sk) 5854 { 5855 /* STUB */ 5856 } 5857 5858 static void sctp_unhash(struct sock *sk) 5859 { 5860 /* STUB */ 5861 } 5862 5863 /* Check if port is acceptable. Possibly find first available port. 5864 * 5865 * The port hash table (contained in the 'global' SCTP protocol storage 5866 * returned by struct sctp_protocol *sctp_get_protocol()). The hash 5867 * table is an array of 4096 lists (sctp_bind_hashbucket). Each 5868 * list (the list number is the port number hashed out, so as you 5869 * would expect from a hash function, all the ports in a given list have 5870 * such a number that hashes out to the same list number; you were 5871 * expecting that, right?); so each list has a set of ports, with a 5872 * link to the socket (struct sock) that uses it, the port number and 5873 * a fastreuse flag (FIXME: NPI ipg). 5874 */ 5875 static struct sctp_bind_bucket *sctp_bucket_create( 5876 struct sctp_bind_hashbucket *head, struct net *, unsigned short snum); 5877 5878 static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr) 5879 { 5880 struct sctp_bind_hashbucket *head; /* hash list */ 5881 struct sctp_bind_bucket *pp; 5882 unsigned short snum; 5883 int ret; 5884 5885 snum = ntohs(addr->v4.sin_port); 5886 5887 pr_debug("%s: begins, snum:%d\n", __func__, snum); 5888 5889 sctp_local_bh_disable(); 5890 5891 if (snum == 0) { 5892 /* Search for an available port. */ 5893 int low, high, remaining, index; 5894 unsigned int rover; 5895 5896 inet_get_local_port_range(&low, &high); 5897 remaining = (high - low) + 1; 5898 rover = net_random() % remaining + low; 5899 5900 do { 5901 rover++; 5902 if ((rover < low) || (rover > high)) 5903 rover = low; 5904 if (inet_is_reserved_local_port(rover)) 5905 continue; 5906 index = sctp_phashfn(sock_net(sk), rover); 5907 head = &sctp_port_hashtable[index]; 5908 sctp_spin_lock(&head->lock); 5909 sctp_for_each_hentry(pp, &head->chain) 5910 if ((pp->port == rover) && 5911 net_eq(sock_net(sk), pp->net)) 5912 goto next; 5913 break; 5914 next: 5915 sctp_spin_unlock(&head->lock); 5916 } while (--remaining > 0); 5917 5918 /* Exhausted local port range during search? */ 5919 ret = 1; 5920 if (remaining <= 0) 5921 goto fail; 5922 5923 /* OK, here is the one we will use. HEAD (the port 5924 * hash table list entry) is non-NULL and we hold it's 5925 * mutex. 5926 */ 5927 snum = rover; 5928 } else { 5929 /* We are given an specific port number; we verify 5930 * that it is not being used. If it is used, we will 5931 * exahust the search in the hash list corresponding 5932 * to the port number (snum) - we detect that with the 5933 * port iterator, pp being NULL. 5934 */ 5935 head = &sctp_port_hashtable[sctp_phashfn(sock_net(sk), snum)]; 5936 sctp_spin_lock(&head->lock); 5937 sctp_for_each_hentry(pp, &head->chain) { 5938 if ((pp->port == snum) && net_eq(pp->net, sock_net(sk))) 5939 goto pp_found; 5940 } 5941 } 5942 pp = NULL; 5943 goto pp_not_found; 5944 pp_found: 5945 if (!hlist_empty(&pp->owner)) { 5946 /* We had a port hash table hit - there is an 5947 * available port (pp != NULL) and it is being 5948 * used by other socket (pp->owner not empty); that other 5949 * socket is going to be sk2. 5950 */ 5951 int reuse = sk->sk_reuse; 5952 struct sock *sk2; 5953 5954 pr_debug("%s: found a possible match\n", __func__); 5955 5956 if (pp->fastreuse && sk->sk_reuse && 5957 sk->sk_state != SCTP_SS_LISTENING) 5958 goto success; 5959 5960 /* Run through the list of sockets bound to the port 5961 * (pp->port) [via the pointers bind_next and 5962 * bind_pprev in the struct sock *sk2 (pp->sk)]. On each one, 5963 * we get the endpoint they describe and run through 5964 * the endpoint's list of IP (v4 or v6) addresses, 5965 * comparing each of the addresses with the address of 5966 * the socket sk. If we find a match, then that means 5967 * that this port/socket (sk) combination are already 5968 * in an endpoint. 5969 */ 5970 sk_for_each_bound(sk2, &pp->owner) { 5971 struct sctp_endpoint *ep2; 5972 ep2 = sctp_sk(sk2)->ep; 5973 5974 if (sk == sk2 || 5975 (reuse && sk2->sk_reuse && 5976 sk2->sk_state != SCTP_SS_LISTENING)) 5977 continue; 5978 5979 if (sctp_bind_addr_conflict(&ep2->base.bind_addr, addr, 5980 sctp_sk(sk2), sctp_sk(sk))) { 5981 ret = (long)sk2; 5982 goto fail_unlock; 5983 } 5984 } 5985 5986 pr_debug("%s: found a match\n", __func__); 5987 } 5988 pp_not_found: 5989 /* If there was a hash table miss, create a new port. */ 5990 ret = 1; 5991 if (!pp && !(pp = sctp_bucket_create(head, sock_net(sk), snum))) 5992 goto fail_unlock; 5993 5994 /* In either case (hit or miss), make sure fastreuse is 1 only 5995 * if sk->sk_reuse is too (that is, if the caller requested 5996 * SO_REUSEADDR on this socket -sk-). 5997 */ 5998 if (hlist_empty(&pp->owner)) { 5999 if (sk->sk_reuse && sk->sk_state != SCTP_SS_LISTENING) 6000 pp->fastreuse = 1; 6001 else 6002 pp->fastreuse = 0; 6003 } else if (pp->fastreuse && 6004 (!sk->sk_reuse || sk->sk_state == SCTP_SS_LISTENING)) 6005 pp->fastreuse = 0; 6006 6007 /* We are set, so fill up all the data in the hash table 6008 * entry, tie the socket list information with the rest of the 6009 * sockets FIXME: Blurry, NPI (ipg). 6010 */ 6011 success: 6012 if (!sctp_sk(sk)->bind_hash) { 6013 inet_sk(sk)->inet_num = snum; 6014 sk_add_bind_node(sk, &pp->owner); 6015 sctp_sk(sk)->bind_hash = pp; 6016 } 6017 ret = 0; 6018 6019 fail_unlock: 6020 sctp_spin_unlock(&head->lock); 6021 6022 fail: 6023 sctp_local_bh_enable(); 6024 return ret; 6025 } 6026 6027 /* Assign a 'snum' port to the socket. If snum == 0, an ephemeral 6028 * port is requested. 6029 */ 6030 static int sctp_get_port(struct sock *sk, unsigned short snum) 6031 { 6032 union sctp_addr addr; 6033 struct sctp_af *af = sctp_sk(sk)->pf->af; 6034 6035 /* Set up a dummy address struct from the sk. */ 6036 af->from_sk(&addr, sk); 6037 addr.v4.sin_port = htons(snum); 6038 6039 /* Note: sk->sk_num gets filled in if ephemeral port request. */ 6040 return !!sctp_get_port_local(sk, &addr); 6041 } 6042 6043 /* 6044 * Move a socket to LISTENING state. 6045 */ 6046 static int sctp_listen_start(struct sock *sk, int backlog) 6047 { 6048 struct sctp_sock *sp = sctp_sk(sk); 6049 struct sctp_endpoint *ep = sp->ep; 6050 struct crypto_hash *tfm = NULL; 6051 char alg[32]; 6052 6053 /* Allocate HMAC for generating cookie. */ 6054 if (!sp->hmac && sp->sctp_hmac_alg) { 6055 sprintf(alg, "hmac(%s)", sp->sctp_hmac_alg); 6056 tfm = crypto_alloc_hash(alg, 0, CRYPTO_ALG_ASYNC); 6057 if (IS_ERR(tfm)) { 6058 net_info_ratelimited("failed to load transform for %s: %ld\n", 6059 sp->sctp_hmac_alg, PTR_ERR(tfm)); 6060 return -ENOSYS; 6061 } 6062 sctp_sk(sk)->hmac = tfm; 6063 } 6064 6065 /* 6066 * If a bind() or sctp_bindx() is not called prior to a listen() 6067 * call that allows new associations to be accepted, the system 6068 * picks an ephemeral port and will choose an address set equivalent 6069 * to binding with a wildcard address. 6070 * 6071 * This is not currently spelled out in the SCTP sockets 6072 * extensions draft, but follows the practice as seen in TCP 6073 * sockets. 6074 * 6075 */ 6076 sk->sk_state = SCTP_SS_LISTENING; 6077 if (!ep->base.bind_addr.port) { 6078 if (sctp_autobind(sk)) 6079 return -EAGAIN; 6080 } else { 6081 if (sctp_get_port(sk, inet_sk(sk)->inet_num)) { 6082 sk->sk_state = SCTP_SS_CLOSED; 6083 return -EADDRINUSE; 6084 } 6085 } 6086 6087 sk->sk_max_ack_backlog = backlog; 6088 sctp_hash_endpoint(ep); 6089 return 0; 6090 } 6091 6092 /* 6093 * 4.1.3 / 5.1.3 listen() 6094 * 6095 * By default, new associations are not accepted for UDP style sockets. 6096 * An application uses listen() to mark a socket as being able to 6097 * accept new associations. 6098 * 6099 * On TCP style sockets, applications use listen() to ready the SCTP 6100 * endpoint for accepting inbound associations. 6101 * 6102 * On both types of endpoints a backlog of '0' disables listening. 6103 * 6104 * Move a socket to LISTENING state. 6105 */ 6106 int sctp_inet_listen(struct socket *sock, int backlog) 6107 { 6108 struct sock *sk = sock->sk; 6109 struct sctp_endpoint *ep = sctp_sk(sk)->ep; 6110 int err = -EINVAL; 6111 6112 if (unlikely(backlog < 0)) 6113 return err; 6114 6115 sctp_lock_sock(sk); 6116 6117 /* Peeled-off sockets are not allowed to listen(). */ 6118 if (sctp_style(sk, UDP_HIGH_BANDWIDTH)) 6119 goto out; 6120 6121 if (sock->state != SS_UNCONNECTED) 6122 goto out; 6123 6124 /* If backlog is zero, disable listening. */ 6125 if (!backlog) { 6126 if (sctp_sstate(sk, CLOSED)) 6127 goto out; 6128 6129 err = 0; 6130 sctp_unhash_endpoint(ep); 6131 sk->sk_state = SCTP_SS_CLOSED; 6132 if (sk->sk_reuse) 6133 sctp_sk(sk)->bind_hash->fastreuse = 1; 6134 goto out; 6135 } 6136 6137 /* If we are already listening, just update the backlog */ 6138 if (sctp_sstate(sk, LISTENING)) 6139 sk->sk_max_ack_backlog = backlog; 6140 else { 6141 err = sctp_listen_start(sk, backlog); 6142 if (err) 6143 goto out; 6144 } 6145 6146 err = 0; 6147 out: 6148 sctp_release_sock(sk); 6149 return err; 6150 } 6151 6152 /* 6153 * This function is done by modeling the current datagram_poll() and the 6154 * tcp_poll(). Note that, based on these implementations, we don't 6155 * lock the socket in this function, even though it seems that, 6156 * ideally, locking or some other mechanisms can be used to ensure 6157 * the integrity of the counters (sndbuf and wmem_alloc) used 6158 * in this place. We assume that we don't need locks either until proven 6159 * otherwise. 6160 * 6161 * Another thing to note is that we include the Async I/O support 6162 * here, again, by modeling the current TCP/UDP code. We don't have 6163 * a good way to test with it yet. 6164 */ 6165 unsigned int sctp_poll(struct file *file, struct socket *sock, poll_table *wait) 6166 { 6167 struct sock *sk = sock->sk; 6168 struct sctp_sock *sp = sctp_sk(sk); 6169 unsigned int mask; 6170 6171 poll_wait(file, sk_sleep(sk), wait); 6172 6173 /* A TCP-style listening socket becomes readable when the accept queue 6174 * is not empty. 6175 */ 6176 if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)) 6177 return (!list_empty(&sp->ep->asocs)) ? 6178 (POLLIN | POLLRDNORM) : 0; 6179 6180 mask = 0; 6181 6182 /* Is there any exceptional events? */ 6183 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) 6184 mask |= POLLERR | 6185 sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0; 6186 if (sk->sk_shutdown & RCV_SHUTDOWN) 6187 mask |= POLLRDHUP | POLLIN | POLLRDNORM; 6188 if (sk->sk_shutdown == SHUTDOWN_MASK) 6189 mask |= POLLHUP; 6190 6191 /* Is it readable? Reconsider this code with TCP-style support. */ 6192 if (!skb_queue_empty(&sk->sk_receive_queue)) 6193 mask |= POLLIN | POLLRDNORM; 6194 6195 /* The association is either gone or not ready. */ 6196 if (!sctp_style(sk, UDP) && sctp_sstate(sk, CLOSED)) 6197 return mask; 6198 6199 /* Is it writable? */ 6200 if (sctp_writeable(sk)) { 6201 mask |= POLLOUT | POLLWRNORM; 6202 } else { 6203 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 6204 /* 6205 * Since the socket is not locked, the buffer 6206 * might be made available after the writeable check and 6207 * before the bit is set. This could cause a lost I/O 6208 * signal. tcp_poll() has a race breaker for this race 6209 * condition. Based on their implementation, we put 6210 * in the following code to cover it as well. 6211 */ 6212 if (sctp_writeable(sk)) 6213 mask |= POLLOUT | POLLWRNORM; 6214 } 6215 return mask; 6216 } 6217 6218 /******************************************************************** 6219 * 2nd Level Abstractions 6220 ********************************************************************/ 6221 6222 static struct sctp_bind_bucket *sctp_bucket_create( 6223 struct sctp_bind_hashbucket *head, struct net *net, unsigned short snum) 6224 { 6225 struct sctp_bind_bucket *pp; 6226 6227 pp = kmem_cache_alloc(sctp_bucket_cachep, GFP_ATOMIC); 6228 if (pp) { 6229 SCTP_DBG_OBJCNT_INC(bind_bucket); 6230 pp->port = snum; 6231 pp->fastreuse = 0; 6232 INIT_HLIST_HEAD(&pp->owner); 6233 pp->net = net; 6234 hlist_add_head(&pp->node, &head->chain); 6235 } 6236 return pp; 6237 } 6238 6239 /* Caller must hold hashbucket lock for this tb with local BH disabled */ 6240 static void sctp_bucket_destroy(struct sctp_bind_bucket *pp) 6241 { 6242 if (pp && hlist_empty(&pp->owner)) { 6243 __hlist_del(&pp->node); 6244 kmem_cache_free(sctp_bucket_cachep, pp); 6245 SCTP_DBG_OBJCNT_DEC(bind_bucket); 6246 } 6247 } 6248 6249 /* Release this socket's reference to a local port. */ 6250 static inline void __sctp_put_port(struct sock *sk) 6251 { 6252 struct sctp_bind_hashbucket *head = 6253 &sctp_port_hashtable[sctp_phashfn(sock_net(sk), 6254 inet_sk(sk)->inet_num)]; 6255 struct sctp_bind_bucket *pp; 6256 6257 sctp_spin_lock(&head->lock); 6258 pp = sctp_sk(sk)->bind_hash; 6259 __sk_del_bind_node(sk); 6260 sctp_sk(sk)->bind_hash = NULL; 6261 inet_sk(sk)->inet_num = 0; 6262 sctp_bucket_destroy(pp); 6263 sctp_spin_unlock(&head->lock); 6264 } 6265 6266 void sctp_put_port(struct sock *sk) 6267 { 6268 sctp_local_bh_disable(); 6269 __sctp_put_port(sk); 6270 sctp_local_bh_enable(); 6271 } 6272 6273 /* 6274 * The system picks an ephemeral port and choose an address set equivalent 6275 * to binding with a wildcard address. 6276 * One of those addresses will be the primary address for the association. 6277 * This automatically enables the multihoming capability of SCTP. 6278 */ 6279 static int sctp_autobind(struct sock *sk) 6280 { 6281 union sctp_addr autoaddr; 6282 struct sctp_af *af; 6283 __be16 port; 6284 6285 /* Initialize a local sockaddr structure to INADDR_ANY. */ 6286 af = sctp_sk(sk)->pf->af; 6287 6288 port = htons(inet_sk(sk)->inet_num); 6289 af->inaddr_any(&autoaddr, port); 6290 6291 return sctp_do_bind(sk, &autoaddr, af->sockaddr_len); 6292 } 6293 6294 /* Parse out IPPROTO_SCTP CMSG headers. Perform only minimal validation. 6295 * 6296 * From RFC 2292 6297 * 4.2 The cmsghdr Structure * 6298 * 6299 * When ancillary data is sent or received, any number of ancillary data 6300 * objects can be specified by the msg_control and msg_controllen members of 6301 * the msghdr structure, because each object is preceded by 6302 * a cmsghdr structure defining the object's length (the cmsg_len member). 6303 * Historically Berkeley-derived implementations have passed only one object 6304 * at a time, but this API allows multiple objects to be 6305 * passed in a single call to sendmsg() or recvmsg(). The following example 6306 * shows two ancillary data objects in a control buffer. 6307 * 6308 * |<--------------------------- msg_controllen -------------------------->| 6309 * | | 6310 * 6311 * |<----- ancillary data object ----->|<----- ancillary data object ----->| 6312 * 6313 * |<---------- CMSG_SPACE() --------->|<---------- CMSG_SPACE() --------->| 6314 * | | | 6315 * 6316 * |<---------- cmsg_len ---------->| |<--------- cmsg_len ----------->| | 6317 * 6318 * |<--------- CMSG_LEN() --------->| |<-------- CMSG_LEN() ---------->| | 6319 * | | | | | 6320 * 6321 * +-----+-----+-----+--+-----------+--+-----+-----+-----+--+-----------+--+ 6322 * |cmsg_|cmsg_|cmsg_|XX| |XX|cmsg_|cmsg_|cmsg_|XX| |XX| 6323 * 6324 * |len |level|type |XX|cmsg_data[]|XX|len |level|type |XX|cmsg_data[]|XX| 6325 * 6326 * +-----+-----+-----+--+-----------+--+-----+-----+-----+--+-----------+--+ 6327 * ^ 6328 * | 6329 * 6330 * msg_control 6331 * points here 6332 */ 6333 static int sctp_msghdr_parse(const struct msghdr *msg, sctp_cmsgs_t *cmsgs) 6334 { 6335 struct cmsghdr *cmsg; 6336 struct msghdr *my_msg = (struct msghdr *)msg; 6337 6338 for (cmsg = CMSG_FIRSTHDR(msg); 6339 cmsg != NULL; 6340 cmsg = CMSG_NXTHDR(my_msg, cmsg)) { 6341 if (!CMSG_OK(my_msg, cmsg)) 6342 return -EINVAL; 6343 6344 /* Should we parse this header or ignore? */ 6345 if (cmsg->cmsg_level != IPPROTO_SCTP) 6346 continue; 6347 6348 /* Strictly check lengths following example in SCM code. */ 6349 switch (cmsg->cmsg_type) { 6350 case SCTP_INIT: 6351 /* SCTP Socket API Extension 6352 * 5.2.1 SCTP Initiation Structure (SCTP_INIT) 6353 * 6354 * This cmsghdr structure provides information for 6355 * initializing new SCTP associations with sendmsg(). 6356 * The SCTP_INITMSG socket option uses this same data 6357 * structure. This structure is not used for 6358 * recvmsg(). 6359 * 6360 * cmsg_level cmsg_type cmsg_data[] 6361 * ------------ ------------ ---------------------- 6362 * IPPROTO_SCTP SCTP_INIT struct sctp_initmsg 6363 */ 6364 if (cmsg->cmsg_len != 6365 CMSG_LEN(sizeof(struct sctp_initmsg))) 6366 return -EINVAL; 6367 cmsgs->init = (struct sctp_initmsg *)CMSG_DATA(cmsg); 6368 break; 6369 6370 case SCTP_SNDRCV: 6371 /* SCTP Socket API Extension 6372 * 5.2.2 SCTP Header Information Structure(SCTP_SNDRCV) 6373 * 6374 * This cmsghdr structure specifies SCTP options for 6375 * sendmsg() and describes SCTP header information 6376 * about a received message through recvmsg(). 6377 * 6378 * cmsg_level cmsg_type cmsg_data[] 6379 * ------------ ------------ ---------------------- 6380 * IPPROTO_SCTP SCTP_SNDRCV struct sctp_sndrcvinfo 6381 */ 6382 if (cmsg->cmsg_len != 6383 CMSG_LEN(sizeof(struct sctp_sndrcvinfo))) 6384 return -EINVAL; 6385 6386 cmsgs->info = 6387 (struct sctp_sndrcvinfo *)CMSG_DATA(cmsg); 6388 6389 /* Minimally, validate the sinfo_flags. */ 6390 if (cmsgs->info->sinfo_flags & 6391 ~(SCTP_UNORDERED | SCTP_ADDR_OVER | 6392 SCTP_ABORT | SCTP_EOF)) 6393 return -EINVAL; 6394 break; 6395 6396 default: 6397 return -EINVAL; 6398 } 6399 } 6400 return 0; 6401 } 6402 6403 /* 6404 * Wait for a packet.. 6405 * Note: This function is the same function as in core/datagram.c 6406 * with a few modifications to make lksctp work. 6407 */ 6408 static int sctp_wait_for_packet(struct sock * sk, int *err, long *timeo_p) 6409 { 6410 int error; 6411 DEFINE_WAIT(wait); 6412 6413 prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 6414 6415 /* Socket errors? */ 6416 error = sock_error(sk); 6417 if (error) 6418 goto out; 6419 6420 if (!skb_queue_empty(&sk->sk_receive_queue)) 6421 goto ready; 6422 6423 /* Socket shut down? */ 6424 if (sk->sk_shutdown & RCV_SHUTDOWN) 6425 goto out; 6426 6427 /* Sequenced packets can come disconnected. If so we report the 6428 * problem. 6429 */ 6430 error = -ENOTCONN; 6431 6432 /* Is there a good reason to think that we may receive some data? */ 6433 if (list_empty(&sctp_sk(sk)->ep->asocs) && !sctp_sstate(sk, LISTENING)) 6434 goto out; 6435 6436 /* Handle signals. */ 6437 if (signal_pending(current)) 6438 goto interrupted; 6439 6440 /* Let another process have a go. Since we are going to sleep 6441 * anyway. Note: This may cause odd behaviors if the message 6442 * does not fit in the user's buffer, but this seems to be the 6443 * only way to honor MSG_DONTWAIT realistically. 6444 */ 6445 sctp_release_sock(sk); 6446 *timeo_p = schedule_timeout(*timeo_p); 6447 sctp_lock_sock(sk); 6448 6449 ready: 6450 finish_wait(sk_sleep(sk), &wait); 6451 return 0; 6452 6453 interrupted: 6454 error = sock_intr_errno(*timeo_p); 6455 6456 out: 6457 finish_wait(sk_sleep(sk), &wait); 6458 *err = error; 6459 return error; 6460 } 6461 6462 /* Receive a datagram. 6463 * Note: This is pretty much the same routine as in core/datagram.c 6464 * with a few changes to make lksctp work. 6465 */ 6466 static struct sk_buff *sctp_skb_recv_datagram(struct sock *sk, int flags, 6467 int noblock, int *err) 6468 { 6469 int error; 6470 struct sk_buff *skb; 6471 long timeo; 6472 6473 timeo = sock_rcvtimeo(sk, noblock); 6474 6475 pr_debug("%s: timeo:%ld, max:%ld\n", __func__, timeo, 6476 MAX_SCHEDULE_TIMEOUT); 6477 6478 do { 6479 /* Again only user level code calls this function, 6480 * so nothing interrupt level 6481 * will suddenly eat the receive_queue. 6482 * 6483 * Look at current nfs client by the way... 6484 * However, this function was correct in any case. 8) 6485 */ 6486 if (flags & MSG_PEEK) { 6487 spin_lock_bh(&sk->sk_receive_queue.lock); 6488 skb = skb_peek(&sk->sk_receive_queue); 6489 if (skb) 6490 atomic_inc(&skb->users); 6491 spin_unlock_bh(&sk->sk_receive_queue.lock); 6492 } else { 6493 skb = skb_dequeue(&sk->sk_receive_queue); 6494 } 6495 6496 if (skb) 6497 return skb; 6498 6499 /* Caller is allowed not to check sk->sk_err before calling. */ 6500 error = sock_error(sk); 6501 if (error) 6502 goto no_packet; 6503 6504 if (sk->sk_shutdown & RCV_SHUTDOWN) 6505 break; 6506 6507 /* User doesn't want to wait. */ 6508 error = -EAGAIN; 6509 if (!timeo) 6510 goto no_packet; 6511 } while (sctp_wait_for_packet(sk, err, &timeo) == 0); 6512 6513 return NULL; 6514 6515 no_packet: 6516 *err = error; 6517 return NULL; 6518 } 6519 6520 /* If sndbuf has changed, wake up per association sndbuf waiters. */ 6521 static void __sctp_write_space(struct sctp_association *asoc) 6522 { 6523 struct sock *sk = asoc->base.sk; 6524 struct socket *sock = sk->sk_socket; 6525 6526 if ((sctp_wspace(asoc) > 0) && sock) { 6527 if (waitqueue_active(&asoc->wait)) 6528 wake_up_interruptible(&asoc->wait); 6529 6530 if (sctp_writeable(sk)) { 6531 wait_queue_head_t *wq = sk_sleep(sk); 6532 6533 if (wq && waitqueue_active(wq)) 6534 wake_up_interruptible(wq); 6535 6536 /* Note that we try to include the Async I/O support 6537 * here by modeling from the current TCP/UDP code. 6538 * We have not tested with it yet. 6539 */ 6540 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) 6541 sock_wake_async(sock, 6542 SOCK_WAKE_SPACE, POLL_OUT); 6543 } 6544 } 6545 } 6546 6547 /* Do accounting for the sndbuf space. 6548 * Decrement the used sndbuf space of the corresponding association by the 6549 * data size which was just transmitted(freed). 6550 */ 6551 static void sctp_wfree(struct sk_buff *skb) 6552 { 6553 struct sctp_association *asoc; 6554 struct sctp_chunk *chunk; 6555 struct sock *sk; 6556 6557 /* Get the saved chunk pointer. */ 6558 chunk = *((struct sctp_chunk **)(skb->cb)); 6559 asoc = chunk->asoc; 6560 sk = asoc->base.sk; 6561 asoc->sndbuf_used -= SCTP_DATA_SNDSIZE(chunk) + 6562 sizeof(struct sk_buff) + 6563 sizeof(struct sctp_chunk); 6564 6565 atomic_sub(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc); 6566 6567 /* 6568 * This undoes what is done via sctp_set_owner_w and sk_mem_charge 6569 */ 6570 sk->sk_wmem_queued -= skb->truesize; 6571 sk_mem_uncharge(sk, skb->truesize); 6572 6573 sock_wfree(skb); 6574 __sctp_write_space(asoc); 6575 6576 sctp_association_put(asoc); 6577 } 6578 6579 /* Do accounting for the receive space on the socket. 6580 * Accounting for the association is done in ulpevent.c 6581 * We set this as a destructor for the cloned data skbs so that 6582 * accounting is done at the correct time. 6583 */ 6584 void sctp_sock_rfree(struct sk_buff *skb) 6585 { 6586 struct sock *sk = skb->sk; 6587 struct sctp_ulpevent *event = sctp_skb2event(skb); 6588 6589 atomic_sub(event->rmem_len, &sk->sk_rmem_alloc); 6590 6591 /* 6592 * Mimic the behavior of sock_rfree 6593 */ 6594 sk_mem_uncharge(sk, event->rmem_len); 6595 } 6596 6597 6598 /* Helper function to wait for space in the sndbuf. */ 6599 static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p, 6600 size_t msg_len) 6601 { 6602 struct sock *sk = asoc->base.sk; 6603 int err = 0; 6604 long current_timeo = *timeo_p; 6605 DEFINE_WAIT(wait); 6606 6607 pr_debug("%s: asoc:%p, timeo:%ld, msg_len:%zu\n", __func__, asoc, 6608 *timeo_p, msg_len); 6609 6610 /* Increment the association's refcnt. */ 6611 sctp_association_hold(asoc); 6612 6613 /* Wait on the association specific sndbuf space. */ 6614 for (;;) { 6615 prepare_to_wait_exclusive(&asoc->wait, &wait, 6616 TASK_INTERRUPTIBLE); 6617 if (!*timeo_p) 6618 goto do_nonblock; 6619 if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING || 6620 asoc->base.dead) 6621 goto do_error; 6622 if (signal_pending(current)) 6623 goto do_interrupted; 6624 if (msg_len <= sctp_wspace(asoc)) 6625 break; 6626 6627 /* Let another process have a go. Since we are going 6628 * to sleep anyway. 6629 */ 6630 sctp_release_sock(sk); 6631 current_timeo = schedule_timeout(current_timeo); 6632 BUG_ON(sk != asoc->base.sk); 6633 sctp_lock_sock(sk); 6634 6635 *timeo_p = current_timeo; 6636 } 6637 6638 out: 6639 finish_wait(&asoc->wait, &wait); 6640 6641 /* Release the association's refcnt. */ 6642 sctp_association_put(asoc); 6643 6644 return err; 6645 6646 do_error: 6647 err = -EPIPE; 6648 goto out; 6649 6650 do_interrupted: 6651 err = sock_intr_errno(*timeo_p); 6652 goto out; 6653 6654 do_nonblock: 6655 err = -EAGAIN; 6656 goto out; 6657 } 6658 6659 void sctp_data_ready(struct sock *sk, int len) 6660 { 6661 struct socket_wq *wq; 6662 6663 rcu_read_lock(); 6664 wq = rcu_dereference(sk->sk_wq); 6665 if (wq_has_sleeper(wq)) 6666 wake_up_interruptible_sync_poll(&wq->wait, POLLIN | 6667 POLLRDNORM | POLLRDBAND); 6668 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); 6669 rcu_read_unlock(); 6670 } 6671 6672 /* If socket sndbuf has changed, wake up all per association waiters. */ 6673 void sctp_write_space(struct sock *sk) 6674 { 6675 struct sctp_association *asoc; 6676 6677 /* Wake up the tasks in each wait queue. */ 6678 list_for_each_entry(asoc, &((sctp_sk(sk))->ep->asocs), asocs) { 6679 __sctp_write_space(asoc); 6680 } 6681 } 6682 6683 /* Is there any sndbuf space available on the socket? 6684 * 6685 * Note that sk_wmem_alloc is the sum of the send buffers on all of the 6686 * associations on the same socket. For a UDP-style socket with 6687 * multiple associations, it is possible for it to be "unwriteable" 6688 * prematurely. I assume that this is acceptable because 6689 * a premature "unwriteable" is better than an accidental "writeable" which 6690 * would cause an unwanted block under certain circumstances. For the 1-1 6691 * UDP-style sockets or TCP-style sockets, this code should work. 6692 * - Daisy 6693 */ 6694 static int sctp_writeable(struct sock *sk) 6695 { 6696 int amt = 0; 6697 6698 amt = sk->sk_sndbuf - sk_wmem_alloc_get(sk); 6699 if (amt < 0) 6700 amt = 0; 6701 return amt; 6702 } 6703 6704 /* Wait for an association to go into ESTABLISHED state. If timeout is 0, 6705 * returns immediately with EINPROGRESS. 6706 */ 6707 static int sctp_wait_for_connect(struct sctp_association *asoc, long *timeo_p) 6708 { 6709 struct sock *sk = asoc->base.sk; 6710 int err = 0; 6711 long current_timeo = *timeo_p; 6712 DEFINE_WAIT(wait); 6713 6714 pr_debug("%s: asoc:%p, timeo:%ld\n", __func__, asoc, *timeo_p); 6715 6716 /* Increment the association's refcnt. */ 6717 sctp_association_hold(asoc); 6718 6719 for (;;) { 6720 prepare_to_wait_exclusive(&asoc->wait, &wait, 6721 TASK_INTERRUPTIBLE); 6722 if (!*timeo_p) 6723 goto do_nonblock; 6724 if (sk->sk_shutdown & RCV_SHUTDOWN) 6725 break; 6726 if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING || 6727 asoc->base.dead) 6728 goto do_error; 6729 if (signal_pending(current)) 6730 goto do_interrupted; 6731 6732 if (sctp_state(asoc, ESTABLISHED)) 6733 break; 6734 6735 /* Let another process have a go. Since we are going 6736 * to sleep anyway. 6737 */ 6738 sctp_release_sock(sk); 6739 current_timeo = schedule_timeout(current_timeo); 6740 sctp_lock_sock(sk); 6741 6742 *timeo_p = current_timeo; 6743 } 6744 6745 out: 6746 finish_wait(&asoc->wait, &wait); 6747 6748 /* Release the association's refcnt. */ 6749 sctp_association_put(asoc); 6750 6751 return err; 6752 6753 do_error: 6754 if (asoc->init_err_counter + 1 > asoc->max_init_attempts) 6755 err = -ETIMEDOUT; 6756 else 6757 err = -ECONNREFUSED; 6758 goto out; 6759 6760 do_interrupted: 6761 err = sock_intr_errno(*timeo_p); 6762 goto out; 6763 6764 do_nonblock: 6765 err = -EINPROGRESS; 6766 goto out; 6767 } 6768 6769 static int sctp_wait_for_accept(struct sock *sk, long timeo) 6770 { 6771 struct sctp_endpoint *ep; 6772 int err = 0; 6773 DEFINE_WAIT(wait); 6774 6775 ep = sctp_sk(sk)->ep; 6776 6777 6778 for (;;) { 6779 prepare_to_wait_exclusive(sk_sleep(sk), &wait, 6780 TASK_INTERRUPTIBLE); 6781 6782 if (list_empty(&ep->asocs)) { 6783 sctp_release_sock(sk); 6784 timeo = schedule_timeout(timeo); 6785 sctp_lock_sock(sk); 6786 } 6787 6788 err = -EINVAL; 6789 if (!sctp_sstate(sk, LISTENING)) 6790 break; 6791 6792 err = 0; 6793 if (!list_empty(&ep->asocs)) 6794 break; 6795 6796 err = sock_intr_errno(timeo); 6797 if (signal_pending(current)) 6798 break; 6799 6800 err = -EAGAIN; 6801 if (!timeo) 6802 break; 6803 } 6804 6805 finish_wait(sk_sleep(sk), &wait); 6806 6807 return err; 6808 } 6809 6810 static void sctp_wait_for_close(struct sock *sk, long timeout) 6811 { 6812 DEFINE_WAIT(wait); 6813 6814 do { 6815 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 6816 if (list_empty(&sctp_sk(sk)->ep->asocs)) 6817 break; 6818 sctp_release_sock(sk); 6819 timeout = schedule_timeout(timeout); 6820 sctp_lock_sock(sk); 6821 } while (!signal_pending(current) && timeout); 6822 6823 finish_wait(sk_sleep(sk), &wait); 6824 } 6825 6826 static void sctp_skb_set_owner_r_frag(struct sk_buff *skb, struct sock *sk) 6827 { 6828 struct sk_buff *frag; 6829 6830 if (!skb->data_len) 6831 goto done; 6832 6833 /* Don't forget the fragments. */ 6834 skb_walk_frags(skb, frag) 6835 sctp_skb_set_owner_r_frag(frag, sk); 6836 6837 done: 6838 sctp_skb_set_owner_r(skb, sk); 6839 } 6840 6841 void sctp_copy_sock(struct sock *newsk, struct sock *sk, 6842 struct sctp_association *asoc) 6843 { 6844 struct inet_sock *inet = inet_sk(sk); 6845 struct inet_sock *newinet; 6846 6847 newsk->sk_type = sk->sk_type; 6848 newsk->sk_bound_dev_if = sk->sk_bound_dev_if; 6849 newsk->sk_flags = sk->sk_flags; 6850 newsk->sk_no_check = sk->sk_no_check; 6851 newsk->sk_reuse = sk->sk_reuse; 6852 6853 newsk->sk_shutdown = sk->sk_shutdown; 6854 newsk->sk_destruct = sctp_destruct_sock; 6855 newsk->sk_family = sk->sk_family; 6856 newsk->sk_protocol = IPPROTO_SCTP; 6857 newsk->sk_backlog_rcv = sk->sk_prot->backlog_rcv; 6858 newsk->sk_sndbuf = sk->sk_sndbuf; 6859 newsk->sk_rcvbuf = sk->sk_rcvbuf; 6860 newsk->sk_lingertime = sk->sk_lingertime; 6861 newsk->sk_rcvtimeo = sk->sk_rcvtimeo; 6862 newsk->sk_sndtimeo = sk->sk_sndtimeo; 6863 6864 newinet = inet_sk(newsk); 6865 6866 /* Initialize sk's sport, dport, rcv_saddr and daddr for 6867 * getsockname() and getpeername() 6868 */ 6869 newinet->inet_sport = inet->inet_sport; 6870 newinet->inet_saddr = inet->inet_saddr; 6871 newinet->inet_rcv_saddr = inet->inet_rcv_saddr; 6872 newinet->inet_dport = htons(asoc->peer.port); 6873 newinet->pmtudisc = inet->pmtudisc; 6874 newinet->inet_id = asoc->next_tsn ^ jiffies; 6875 6876 newinet->uc_ttl = inet->uc_ttl; 6877 newinet->mc_loop = 1; 6878 newinet->mc_ttl = 1; 6879 newinet->mc_index = 0; 6880 newinet->mc_list = NULL; 6881 } 6882 6883 /* Populate the fields of the newsk from the oldsk and migrate the assoc 6884 * and its messages to the newsk. 6885 */ 6886 static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk, 6887 struct sctp_association *assoc, 6888 sctp_socket_type_t type) 6889 { 6890 struct sctp_sock *oldsp = sctp_sk(oldsk); 6891 struct sctp_sock *newsp = sctp_sk(newsk); 6892 struct sctp_bind_bucket *pp; /* hash list port iterator */ 6893 struct sctp_endpoint *newep = newsp->ep; 6894 struct sk_buff *skb, *tmp; 6895 struct sctp_ulpevent *event; 6896 struct sctp_bind_hashbucket *head; 6897 struct list_head tmplist; 6898 6899 /* Migrate socket buffer sizes and all the socket level options to the 6900 * new socket. 6901 */ 6902 newsk->sk_sndbuf = oldsk->sk_sndbuf; 6903 newsk->sk_rcvbuf = oldsk->sk_rcvbuf; 6904 /* Brute force copy old sctp opt. */ 6905 if (oldsp->do_auto_asconf) { 6906 memcpy(&tmplist, &newsp->auto_asconf_list, sizeof(tmplist)); 6907 inet_sk_copy_descendant(newsk, oldsk); 6908 memcpy(&newsp->auto_asconf_list, &tmplist, sizeof(tmplist)); 6909 } else 6910 inet_sk_copy_descendant(newsk, oldsk); 6911 6912 /* Restore the ep value that was overwritten with the above structure 6913 * copy. 6914 */ 6915 newsp->ep = newep; 6916 newsp->hmac = NULL; 6917 6918 /* Hook this new socket in to the bind_hash list. */ 6919 head = &sctp_port_hashtable[sctp_phashfn(sock_net(oldsk), 6920 inet_sk(oldsk)->inet_num)]; 6921 sctp_local_bh_disable(); 6922 sctp_spin_lock(&head->lock); 6923 pp = sctp_sk(oldsk)->bind_hash; 6924 sk_add_bind_node(newsk, &pp->owner); 6925 sctp_sk(newsk)->bind_hash = pp; 6926 inet_sk(newsk)->inet_num = inet_sk(oldsk)->inet_num; 6927 sctp_spin_unlock(&head->lock); 6928 sctp_local_bh_enable(); 6929 6930 /* Copy the bind_addr list from the original endpoint to the new 6931 * endpoint so that we can handle restarts properly 6932 */ 6933 sctp_bind_addr_dup(&newsp->ep->base.bind_addr, 6934 &oldsp->ep->base.bind_addr, GFP_KERNEL); 6935 6936 /* Move any messages in the old socket's receive queue that are for the 6937 * peeled off association to the new socket's receive queue. 6938 */ 6939 sctp_skb_for_each(skb, &oldsk->sk_receive_queue, tmp) { 6940 event = sctp_skb2event(skb); 6941 if (event->asoc == assoc) { 6942 __skb_unlink(skb, &oldsk->sk_receive_queue); 6943 __skb_queue_tail(&newsk->sk_receive_queue, skb); 6944 sctp_skb_set_owner_r_frag(skb, newsk); 6945 } 6946 } 6947 6948 /* Clean up any messages pending delivery due to partial 6949 * delivery. Three cases: 6950 * 1) No partial deliver; no work. 6951 * 2) Peeling off partial delivery; keep pd_lobby in new pd_lobby. 6952 * 3) Peeling off non-partial delivery; move pd_lobby to receive_queue. 6953 */ 6954 skb_queue_head_init(&newsp->pd_lobby); 6955 atomic_set(&sctp_sk(newsk)->pd_mode, assoc->ulpq.pd_mode); 6956 6957 if (atomic_read(&sctp_sk(oldsk)->pd_mode)) { 6958 struct sk_buff_head *queue; 6959 6960 /* Decide which queue to move pd_lobby skbs to. */ 6961 if (assoc->ulpq.pd_mode) { 6962 queue = &newsp->pd_lobby; 6963 } else 6964 queue = &newsk->sk_receive_queue; 6965 6966 /* Walk through the pd_lobby, looking for skbs that 6967 * need moved to the new socket. 6968 */ 6969 sctp_skb_for_each(skb, &oldsp->pd_lobby, tmp) { 6970 event = sctp_skb2event(skb); 6971 if (event->asoc == assoc) { 6972 __skb_unlink(skb, &oldsp->pd_lobby); 6973 __skb_queue_tail(queue, skb); 6974 sctp_skb_set_owner_r_frag(skb, newsk); 6975 } 6976 } 6977 6978 /* Clear up any skbs waiting for the partial 6979 * delivery to finish. 6980 */ 6981 if (assoc->ulpq.pd_mode) 6982 sctp_clear_pd(oldsk, NULL); 6983 6984 } 6985 6986 sctp_skb_for_each(skb, &assoc->ulpq.reasm, tmp) 6987 sctp_skb_set_owner_r_frag(skb, newsk); 6988 6989 sctp_skb_for_each(skb, &assoc->ulpq.lobby, tmp) 6990 sctp_skb_set_owner_r_frag(skb, newsk); 6991 6992 /* Set the type of socket to indicate that it is peeled off from the 6993 * original UDP-style socket or created with the accept() call on a 6994 * TCP-style socket.. 6995 */ 6996 newsp->type = type; 6997 6998 /* Mark the new socket "in-use" by the user so that any packets 6999 * that may arrive on the association after we've moved it are 7000 * queued to the backlog. This prevents a potential race between 7001 * backlog processing on the old socket and new-packet processing 7002 * on the new socket. 7003 * 7004 * The caller has just allocated newsk so we can guarantee that other 7005 * paths won't try to lock it and then oldsk. 7006 */ 7007 lock_sock_nested(newsk, SINGLE_DEPTH_NESTING); 7008 sctp_assoc_migrate(assoc, newsk); 7009 7010 /* If the association on the newsk is already closed before accept() 7011 * is called, set RCV_SHUTDOWN flag. 7012 */ 7013 if (sctp_state(assoc, CLOSED) && sctp_style(newsk, TCP)) 7014 newsk->sk_shutdown |= RCV_SHUTDOWN; 7015 7016 newsk->sk_state = SCTP_SS_ESTABLISHED; 7017 sctp_release_sock(newsk); 7018 } 7019 7020 7021 /* This proto struct describes the ULP interface for SCTP. */ 7022 struct proto sctp_prot = { 7023 .name = "SCTP", 7024 .owner = THIS_MODULE, 7025 .close = sctp_close, 7026 .connect = sctp_connect, 7027 .disconnect = sctp_disconnect, 7028 .accept = sctp_accept, 7029 .ioctl = sctp_ioctl, 7030 .init = sctp_init_sock, 7031 .destroy = sctp_destroy_sock, 7032 .shutdown = sctp_shutdown, 7033 .setsockopt = sctp_setsockopt, 7034 .getsockopt = sctp_getsockopt, 7035 .sendmsg = sctp_sendmsg, 7036 .recvmsg = sctp_recvmsg, 7037 .bind = sctp_bind, 7038 .backlog_rcv = sctp_backlog_rcv, 7039 .hash = sctp_hash, 7040 .unhash = sctp_unhash, 7041 .get_port = sctp_get_port, 7042 .obj_size = sizeof(struct sctp_sock), 7043 .sysctl_mem = sysctl_sctp_mem, 7044 .sysctl_rmem = sysctl_sctp_rmem, 7045 .sysctl_wmem = sysctl_sctp_wmem, 7046 .memory_pressure = &sctp_memory_pressure, 7047 .enter_memory_pressure = sctp_enter_memory_pressure, 7048 .memory_allocated = &sctp_memory_allocated, 7049 .sockets_allocated = &sctp_sockets_allocated, 7050 }; 7051 7052 #if IS_ENABLED(CONFIG_IPV6) 7053 7054 struct proto sctpv6_prot = { 7055 .name = "SCTPv6", 7056 .owner = THIS_MODULE, 7057 .close = sctp_close, 7058 .connect = sctp_connect, 7059 .disconnect = sctp_disconnect, 7060 .accept = sctp_accept, 7061 .ioctl = sctp_ioctl, 7062 .init = sctp_init_sock, 7063 .destroy = sctp_destroy_sock, 7064 .shutdown = sctp_shutdown, 7065 .setsockopt = sctp_setsockopt, 7066 .getsockopt = sctp_getsockopt, 7067 .sendmsg = sctp_sendmsg, 7068 .recvmsg = sctp_recvmsg, 7069 .bind = sctp_bind, 7070 .backlog_rcv = sctp_backlog_rcv, 7071 .hash = sctp_hash, 7072 .unhash = sctp_unhash, 7073 .get_port = sctp_get_port, 7074 .obj_size = sizeof(struct sctp6_sock), 7075 .sysctl_mem = sysctl_sctp_mem, 7076 .sysctl_rmem = sysctl_sctp_rmem, 7077 .sysctl_wmem = sysctl_sctp_wmem, 7078 .memory_pressure = &sctp_memory_pressure, 7079 .enter_memory_pressure = sctp_enter_memory_pressure, 7080 .memory_allocated = &sctp_memory_allocated, 7081 .sockets_allocated = &sctp_sockets_allocated, 7082 }; 7083 #endif /* IS_ENABLED(CONFIG_IPV6) */ 7084