1 /* SCTP kernel implementation 2 * (C) Copyright IBM Corp. 2001, 2004 3 * Copyright (c) 1999-2000 Cisco, Inc. 4 * Copyright (c) 1999-2001 Motorola, Inc. 5 * Copyright (c) 2001-2003 Intel Corp. 6 * Copyright (c) 2001-2002 Nokia, Inc. 7 * Copyright (c) 2001 La Monte H.P. Yarroll 8 * 9 * This file is part of the SCTP kernel implementation 10 * 11 * These functions interface with the sockets layer to implement the 12 * SCTP Extensions for the Sockets API. 13 * 14 * Note that the descriptions from the specification are USER level 15 * functions--this file is the functions which populate the struct proto 16 * for SCTP which is the BOTTOM of the sockets interface. 17 * 18 * This SCTP implementation is free software; 19 * you can redistribute it and/or modify it under the terms of 20 * the GNU General Public License as published by 21 * the Free Software Foundation; either version 2, or (at your option) 22 * any later version. 23 * 24 * This SCTP implementation is distributed in the hope that it 25 * will be useful, but WITHOUT ANY WARRANTY; without even the implied 26 * ************************ 27 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. 28 * See the GNU General Public License for more details. 29 * 30 * You should have received a copy of the GNU General Public License 31 * along with GNU CC; see the file COPYING. If not, write to 32 * the Free Software Foundation, 59 Temple Place - Suite 330, 33 * Boston, MA 02111-1307, USA. 34 * 35 * Please send any bug reports or fixes you make to the 36 * email address(es): 37 * lksctp developers <linux-sctp@vger.kernel.org> 38 * 39 * Written or modified by: 40 * La Monte H.P. Yarroll <piggy@acm.org> 41 * Narasimha Budihal <narsi@refcode.org> 42 * Karl Knutson <karl@athena.chicago.il.us> 43 * Jon Grimm <jgrimm@us.ibm.com> 44 * Xingang Guo <xingang.guo@intel.com> 45 * Daisy Chang <daisyc@us.ibm.com> 46 * Sridhar Samudrala <samudrala@us.ibm.com> 47 * Inaky Perez-Gonzalez <inaky.gonzalez@intel.com> 48 * Ardelle Fan <ardelle.fan@intel.com> 49 * Ryan Layer <rmlayer@us.ibm.com> 50 * Anup Pemmaiah <pemmaiah@cc.usu.edu> 51 * Kevin Gao <kevin.gao@intel.com> 52 */ 53 54 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 55 56 #include <linux/types.h> 57 #include <linux/kernel.h> 58 #include <linux/wait.h> 59 #include <linux/time.h> 60 #include <linux/ip.h> 61 #include <linux/capability.h> 62 #include <linux/fcntl.h> 63 #include <linux/poll.h> 64 #include <linux/init.h> 65 #include <linux/crypto.h> 66 #include <linux/slab.h> 67 #include <linux/file.h> 68 69 #include <net/ip.h> 70 #include <net/icmp.h> 71 #include <net/route.h> 72 #include <net/ipv6.h> 73 #include <net/inet_common.h> 74 75 #include <linux/socket.h> /* for sa_family_t */ 76 #include <linux/export.h> 77 #include <net/sock.h> 78 #include <net/sctp/sctp.h> 79 #include <net/sctp/sm.h> 80 81 /* Forward declarations for internal helper functions. */ 82 static int sctp_writeable(struct sock *sk); 83 static void sctp_wfree(struct sk_buff *skb); 84 static int sctp_wait_for_sndbuf(struct sctp_association *, long *timeo_p, 85 size_t msg_len); 86 static int sctp_wait_for_packet(struct sock * sk, int *err, long *timeo_p); 87 static int sctp_wait_for_connect(struct sctp_association *, long *timeo_p); 88 static int sctp_wait_for_accept(struct sock *sk, long timeo); 89 static void sctp_wait_for_close(struct sock *sk, long timeo); 90 static void sctp_destruct_sock(struct sock *sk); 91 static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt, 92 union sctp_addr *addr, int len); 93 static int sctp_bindx_add(struct sock *, struct sockaddr *, int); 94 static int sctp_bindx_rem(struct sock *, struct sockaddr *, int); 95 static int sctp_send_asconf_add_ip(struct sock *, struct sockaddr *, int); 96 static int sctp_send_asconf_del_ip(struct sock *, struct sockaddr *, int); 97 static int sctp_send_asconf(struct sctp_association *asoc, 98 struct sctp_chunk *chunk); 99 static int sctp_do_bind(struct sock *, union sctp_addr *, int); 100 static int sctp_autobind(struct sock *sk); 101 static void sctp_sock_migrate(struct sock *, struct sock *, 102 struct sctp_association *, sctp_socket_type_t); 103 104 extern struct kmem_cache *sctp_bucket_cachep; 105 extern long sysctl_sctp_mem[3]; 106 extern int sysctl_sctp_rmem[3]; 107 extern int sysctl_sctp_wmem[3]; 108 109 static int sctp_memory_pressure; 110 static atomic_long_t sctp_memory_allocated; 111 struct percpu_counter sctp_sockets_allocated; 112 113 static void sctp_enter_memory_pressure(struct sock *sk) 114 { 115 sctp_memory_pressure = 1; 116 } 117 118 119 /* Get the sndbuf space available at the time on the association. */ 120 static inline int sctp_wspace(struct sctp_association *asoc) 121 { 122 int amt; 123 124 if (asoc->ep->sndbuf_policy) 125 amt = asoc->sndbuf_used; 126 else 127 amt = sk_wmem_alloc_get(asoc->base.sk); 128 129 if (amt >= asoc->base.sk->sk_sndbuf) { 130 if (asoc->base.sk->sk_userlocks & SOCK_SNDBUF_LOCK) 131 amt = 0; 132 else { 133 amt = sk_stream_wspace(asoc->base.sk); 134 if (amt < 0) 135 amt = 0; 136 } 137 } else { 138 amt = asoc->base.sk->sk_sndbuf - amt; 139 } 140 return amt; 141 } 142 143 /* Increment the used sndbuf space count of the corresponding association by 144 * the size of the outgoing data chunk. 145 * Also, set the skb destructor for sndbuf accounting later. 146 * 147 * Since it is always 1-1 between chunk and skb, and also a new skb is always 148 * allocated for chunk bundling in sctp_packet_transmit(), we can use the 149 * destructor in the data chunk skb for the purpose of the sndbuf space 150 * tracking. 151 */ 152 static inline void sctp_set_owner_w(struct sctp_chunk *chunk) 153 { 154 struct sctp_association *asoc = chunk->asoc; 155 struct sock *sk = asoc->base.sk; 156 157 /* The sndbuf space is tracked per association. */ 158 sctp_association_hold(asoc); 159 160 skb_set_owner_w(chunk->skb, sk); 161 162 chunk->skb->destructor = sctp_wfree; 163 /* Save the chunk pointer in skb for sctp_wfree to use later. */ 164 *((struct sctp_chunk **)(chunk->skb->cb)) = chunk; 165 166 asoc->sndbuf_used += SCTP_DATA_SNDSIZE(chunk) + 167 sizeof(struct sk_buff) + 168 sizeof(struct sctp_chunk); 169 170 atomic_add(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc); 171 sk->sk_wmem_queued += chunk->skb->truesize; 172 sk_mem_charge(sk, chunk->skb->truesize); 173 } 174 175 /* Verify that this is a valid address. */ 176 static inline int sctp_verify_addr(struct sock *sk, union sctp_addr *addr, 177 int len) 178 { 179 struct sctp_af *af; 180 181 /* Verify basic sockaddr. */ 182 af = sctp_sockaddr_af(sctp_sk(sk), addr, len); 183 if (!af) 184 return -EINVAL; 185 186 /* Is this a valid SCTP address? */ 187 if (!af->addr_valid(addr, sctp_sk(sk), NULL)) 188 return -EINVAL; 189 190 if (!sctp_sk(sk)->pf->send_verify(sctp_sk(sk), (addr))) 191 return -EINVAL; 192 193 return 0; 194 } 195 196 /* Look up the association by its id. If this is not a UDP-style 197 * socket, the ID field is always ignored. 198 */ 199 struct sctp_association *sctp_id2assoc(struct sock *sk, sctp_assoc_t id) 200 { 201 struct sctp_association *asoc = NULL; 202 203 /* If this is not a UDP-style socket, assoc id should be ignored. */ 204 if (!sctp_style(sk, UDP)) { 205 /* Return NULL if the socket state is not ESTABLISHED. It 206 * could be a TCP-style listening socket or a socket which 207 * hasn't yet called connect() to establish an association. 208 */ 209 if (!sctp_sstate(sk, ESTABLISHED)) 210 return NULL; 211 212 /* Get the first and the only association from the list. */ 213 if (!list_empty(&sctp_sk(sk)->ep->asocs)) 214 asoc = list_entry(sctp_sk(sk)->ep->asocs.next, 215 struct sctp_association, asocs); 216 return asoc; 217 } 218 219 /* Otherwise this is a UDP-style socket. */ 220 if (!id || (id == (sctp_assoc_t)-1)) 221 return NULL; 222 223 spin_lock_bh(&sctp_assocs_id_lock); 224 asoc = (struct sctp_association *)idr_find(&sctp_assocs_id, (int)id); 225 spin_unlock_bh(&sctp_assocs_id_lock); 226 227 if (!asoc || (asoc->base.sk != sk) || asoc->base.dead) 228 return NULL; 229 230 return asoc; 231 } 232 233 /* Look up the transport from an address and an assoc id. If both address and 234 * id are specified, the associations matching the address and the id should be 235 * the same. 236 */ 237 static struct sctp_transport *sctp_addr_id2transport(struct sock *sk, 238 struct sockaddr_storage *addr, 239 sctp_assoc_t id) 240 { 241 struct sctp_association *addr_asoc = NULL, *id_asoc = NULL; 242 struct sctp_transport *transport; 243 union sctp_addr *laddr = (union sctp_addr *)addr; 244 245 addr_asoc = sctp_endpoint_lookup_assoc(sctp_sk(sk)->ep, 246 laddr, 247 &transport); 248 249 if (!addr_asoc) 250 return NULL; 251 252 id_asoc = sctp_id2assoc(sk, id); 253 if (id_asoc && (id_asoc != addr_asoc)) 254 return NULL; 255 256 sctp_get_pf_specific(sk->sk_family)->addr_v4map(sctp_sk(sk), 257 (union sctp_addr *)addr); 258 259 return transport; 260 } 261 262 /* API 3.1.2 bind() - UDP Style Syntax 263 * The syntax of bind() is, 264 * 265 * ret = bind(int sd, struct sockaddr *addr, int addrlen); 266 * 267 * sd - the socket descriptor returned by socket(). 268 * addr - the address structure (struct sockaddr_in or struct 269 * sockaddr_in6 [RFC 2553]), 270 * addr_len - the size of the address structure. 271 */ 272 static int sctp_bind(struct sock *sk, struct sockaddr *addr, int addr_len) 273 { 274 int retval = 0; 275 276 sctp_lock_sock(sk); 277 278 pr_debug("%s: sk:%p, addr:%p, addr_len:%d\n", __func__, sk, 279 addr, addr_len); 280 281 /* Disallow binding twice. */ 282 if (!sctp_sk(sk)->ep->base.bind_addr.port) 283 retval = sctp_do_bind(sk, (union sctp_addr *)addr, 284 addr_len); 285 else 286 retval = -EINVAL; 287 288 sctp_release_sock(sk); 289 290 return retval; 291 } 292 293 static long sctp_get_port_local(struct sock *, union sctp_addr *); 294 295 /* Verify this is a valid sockaddr. */ 296 static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt, 297 union sctp_addr *addr, int len) 298 { 299 struct sctp_af *af; 300 301 /* Check minimum size. */ 302 if (len < sizeof (struct sockaddr)) 303 return NULL; 304 305 /* V4 mapped address are really of AF_INET family */ 306 if (addr->sa.sa_family == AF_INET6 && 307 ipv6_addr_v4mapped(&addr->v6.sin6_addr)) { 308 if (!opt->pf->af_supported(AF_INET, opt)) 309 return NULL; 310 } else { 311 /* Does this PF support this AF? */ 312 if (!opt->pf->af_supported(addr->sa.sa_family, opt)) 313 return NULL; 314 } 315 316 /* If we get this far, af is valid. */ 317 af = sctp_get_af_specific(addr->sa.sa_family); 318 319 if (len < af->sockaddr_len) 320 return NULL; 321 322 return af; 323 } 324 325 /* Bind a local address either to an endpoint or to an association. */ 326 static int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len) 327 { 328 struct net *net = sock_net(sk); 329 struct sctp_sock *sp = sctp_sk(sk); 330 struct sctp_endpoint *ep = sp->ep; 331 struct sctp_bind_addr *bp = &ep->base.bind_addr; 332 struct sctp_af *af; 333 unsigned short snum; 334 int ret = 0; 335 336 /* Common sockaddr verification. */ 337 af = sctp_sockaddr_af(sp, addr, len); 338 if (!af) { 339 pr_debug("%s: sk:%p, newaddr:%p, len:%d EINVAL\n", 340 __func__, sk, addr, len); 341 return -EINVAL; 342 } 343 344 snum = ntohs(addr->v4.sin_port); 345 346 pr_debug("%s: sk:%p, new addr:%pISc, port:%d, new port:%d, len:%d\n", 347 __func__, sk, &addr->sa, bp->port, snum, len); 348 349 /* PF specific bind() address verification. */ 350 if (!sp->pf->bind_verify(sp, addr)) 351 return -EADDRNOTAVAIL; 352 353 /* We must either be unbound, or bind to the same port. 354 * It's OK to allow 0 ports if we are already bound. 355 * We'll just inhert an already bound port in this case 356 */ 357 if (bp->port) { 358 if (!snum) 359 snum = bp->port; 360 else if (snum != bp->port) { 361 pr_debug("%s: new port %d doesn't match existing port " 362 "%d\n", __func__, snum, bp->port); 363 return -EINVAL; 364 } 365 } 366 367 if (snum && snum < PROT_SOCK && 368 !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) 369 return -EACCES; 370 371 /* See if the address matches any of the addresses we may have 372 * already bound before checking against other endpoints. 373 */ 374 if (sctp_bind_addr_match(bp, addr, sp)) 375 return -EINVAL; 376 377 /* Make sure we are allowed to bind here. 378 * The function sctp_get_port_local() does duplicate address 379 * detection. 380 */ 381 addr->v4.sin_port = htons(snum); 382 if ((ret = sctp_get_port_local(sk, addr))) { 383 return -EADDRINUSE; 384 } 385 386 /* Refresh ephemeral port. */ 387 if (!bp->port) 388 bp->port = inet_sk(sk)->inet_num; 389 390 /* Add the address to the bind address list. 391 * Use GFP_ATOMIC since BHs will be disabled. 392 */ 393 ret = sctp_add_bind_addr(bp, addr, SCTP_ADDR_SRC, GFP_ATOMIC); 394 395 /* Copy back into socket for getsockname() use. */ 396 if (!ret) { 397 inet_sk(sk)->inet_sport = htons(inet_sk(sk)->inet_num); 398 af->to_sk_saddr(addr, sk); 399 } 400 401 return ret; 402 } 403 404 /* ADDIP Section 4.1.1 Congestion Control of ASCONF Chunks 405 * 406 * R1) One and only one ASCONF Chunk MAY be in transit and unacknowledged 407 * at any one time. If a sender, after sending an ASCONF chunk, decides 408 * it needs to transfer another ASCONF Chunk, it MUST wait until the 409 * ASCONF-ACK Chunk returns from the previous ASCONF Chunk before sending a 410 * subsequent ASCONF. Note this restriction binds each side, so at any 411 * time two ASCONF may be in-transit on any given association (one sent 412 * from each endpoint). 413 */ 414 static int sctp_send_asconf(struct sctp_association *asoc, 415 struct sctp_chunk *chunk) 416 { 417 struct net *net = sock_net(asoc->base.sk); 418 int retval = 0; 419 420 /* If there is an outstanding ASCONF chunk, queue it for later 421 * transmission. 422 */ 423 if (asoc->addip_last_asconf) { 424 list_add_tail(&chunk->list, &asoc->addip_chunk_list); 425 goto out; 426 } 427 428 /* Hold the chunk until an ASCONF_ACK is received. */ 429 sctp_chunk_hold(chunk); 430 retval = sctp_primitive_ASCONF(net, asoc, chunk); 431 if (retval) 432 sctp_chunk_free(chunk); 433 else 434 asoc->addip_last_asconf = chunk; 435 436 out: 437 return retval; 438 } 439 440 /* Add a list of addresses as bind addresses to local endpoint or 441 * association. 442 * 443 * Basically run through each address specified in the addrs/addrcnt 444 * array/length pair, determine if it is IPv6 or IPv4 and call 445 * sctp_do_bind() on it. 446 * 447 * If any of them fails, then the operation will be reversed and the 448 * ones that were added will be removed. 449 * 450 * Only sctp_setsockopt_bindx() is supposed to call this function. 451 */ 452 static int sctp_bindx_add(struct sock *sk, struct sockaddr *addrs, int addrcnt) 453 { 454 int cnt; 455 int retval = 0; 456 void *addr_buf; 457 struct sockaddr *sa_addr; 458 struct sctp_af *af; 459 460 pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n", __func__, sk, 461 addrs, addrcnt); 462 463 addr_buf = addrs; 464 for (cnt = 0; cnt < addrcnt; cnt++) { 465 /* The list may contain either IPv4 or IPv6 address; 466 * determine the address length for walking thru the list. 467 */ 468 sa_addr = addr_buf; 469 af = sctp_get_af_specific(sa_addr->sa_family); 470 if (!af) { 471 retval = -EINVAL; 472 goto err_bindx_add; 473 } 474 475 retval = sctp_do_bind(sk, (union sctp_addr *)sa_addr, 476 af->sockaddr_len); 477 478 addr_buf += af->sockaddr_len; 479 480 err_bindx_add: 481 if (retval < 0) { 482 /* Failed. Cleanup the ones that have been added */ 483 if (cnt > 0) 484 sctp_bindx_rem(sk, addrs, cnt); 485 return retval; 486 } 487 } 488 489 return retval; 490 } 491 492 /* Send an ASCONF chunk with Add IP address parameters to all the peers of the 493 * associations that are part of the endpoint indicating that a list of local 494 * addresses are added to the endpoint. 495 * 496 * If any of the addresses is already in the bind address list of the 497 * association, we do not send the chunk for that association. But it will not 498 * affect other associations. 499 * 500 * Only sctp_setsockopt_bindx() is supposed to call this function. 501 */ 502 static int sctp_send_asconf_add_ip(struct sock *sk, 503 struct sockaddr *addrs, 504 int addrcnt) 505 { 506 struct net *net = sock_net(sk); 507 struct sctp_sock *sp; 508 struct sctp_endpoint *ep; 509 struct sctp_association *asoc; 510 struct sctp_bind_addr *bp; 511 struct sctp_chunk *chunk; 512 struct sctp_sockaddr_entry *laddr; 513 union sctp_addr *addr; 514 union sctp_addr saveaddr; 515 void *addr_buf; 516 struct sctp_af *af; 517 struct list_head *p; 518 int i; 519 int retval = 0; 520 521 if (!net->sctp.addip_enable) 522 return retval; 523 524 sp = sctp_sk(sk); 525 ep = sp->ep; 526 527 pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n", 528 __func__, sk, addrs, addrcnt); 529 530 list_for_each_entry(asoc, &ep->asocs, asocs) { 531 if (!asoc->peer.asconf_capable) 532 continue; 533 534 if (asoc->peer.addip_disabled_mask & SCTP_PARAM_ADD_IP) 535 continue; 536 537 if (!sctp_state(asoc, ESTABLISHED)) 538 continue; 539 540 /* Check if any address in the packed array of addresses is 541 * in the bind address list of the association. If so, 542 * do not send the asconf chunk to its peer, but continue with 543 * other associations. 544 */ 545 addr_buf = addrs; 546 for (i = 0; i < addrcnt; i++) { 547 addr = addr_buf; 548 af = sctp_get_af_specific(addr->v4.sin_family); 549 if (!af) { 550 retval = -EINVAL; 551 goto out; 552 } 553 554 if (sctp_assoc_lookup_laddr(asoc, addr)) 555 break; 556 557 addr_buf += af->sockaddr_len; 558 } 559 if (i < addrcnt) 560 continue; 561 562 /* Use the first valid address in bind addr list of 563 * association as Address Parameter of ASCONF CHUNK. 564 */ 565 bp = &asoc->base.bind_addr; 566 p = bp->address_list.next; 567 laddr = list_entry(p, struct sctp_sockaddr_entry, list); 568 chunk = sctp_make_asconf_update_ip(asoc, &laddr->a, addrs, 569 addrcnt, SCTP_PARAM_ADD_IP); 570 if (!chunk) { 571 retval = -ENOMEM; 572 goto out; 573 } 574 575 /* Add the new addresses to the bind address list with 576 * use_as_src set to 0. 577 */ 578 addr_buf = addrs; 579 for (i = 0; i < addrcnt; i++) { 580 addr = addr_buf; 581 af = sctp_get_af_specific(addr->v4.sin_family); 582 memcpy(&saveaddr, addr, af->sockaddr_len); 583 retval = sctp_add_bind_addr(bp, &saveaddr, 584 SCTP_ADDR_NEW, GFP_ATOMIC); 585 addr_buf += af->sockaddr_len; 586 } 587 if (asoc->src_out_of_asoc_ok) { 588 struct sctp_transport *trans; 589 590 list_for_each_entry(trans, 591 &asoc->peer.transport_addr_list, transports) { 592 /* Clear the source and route cache */ 593 dst_release(trans->dst); 594 trans->cwnd = min(4*asoc->pathmtu, max_t(__u32, 595 2*asoc->pathmtu, 4380)); 596 trans->ssthresh = asoc->peer.i.a_rwnd; 597 trans->rto = asoc->rto_initial; 598 sctp_max_rto(asoc, trans); 599 trans->rtt = trans->srtt = trans->rttvar = 0; 600 sctp_transport_route(trans, NULL, 601 sctp_sk(asoc->base.sk)); 602 } 603 } 604 retval = sctp_send_asconf(asoc, chunk); 605 } 606 607 out: 608 return retval; 609 } 610 611 /* Remove a list of addresses from bind addresses list. Do not remove the 612 * last address. 613 * 614 * Basically run through each address specified in the addrs/addrcnt 615 * array/length pair, determine if it is IPv6 or IPv4 and call 616 * sctp_del_bind() on it. 617 * 618 * If any of them fails, then the operation will be reversed and the 619 * ones that were removed will be added back. 620 * 621 * At least one address has to be left; if only one address is 622 * available, the operation will return -EBUSY. 623 * 624 * Only sctp_setsockopt_bindx() is supposed to call this function. 625 */ 626 static int sctp_bindx_rem(struct sock *sk, struct sockaddr *addrs, int addrcnt) 627 { 628 struct sctp_sock *sp = sctp_sk(sk); 629 struct sctp_endpoint *ep = sp->ep; 630 int cnt; 631 struct sctp_bind_addr *bp = &ep->base.bind_addr; 632 int retval = 0; 633 void *addr_buf; 634 union sctp_addr *sa_addr; 635 struct sctp_af *af; 636 637 pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n", 638 __func__, sk, addrs, addrcnt); 639 640 addr_buf = addrs; 641 for (cnt = 0; cnt < addrcnt; cnt++) { 642 /* If the bind address list is empty or if there is only one 643 * bind address, there is nothing more to be removed (we need 644 * at least one address here). 645 */ 646 if (list_empty(&bp->address_list) || 647 (sctp_list_single_entry(&bp->address_list))) { 648 retval = -EBUSY; 649 goto err_bindx_rem; 650 } 651 652 sa_addr = addr_buf; 653 af = sctp_get_af_specific(sa_addr->sa.sa_family); 654 if (!af) { 655 retval = -EINVAL; 656 goto err_bindx_rem; 657 } 658 659 if (!af->addr_valid(sa_addr, sp, NULL)) { 660 retval = -EADDRNOTAVAIL; 661 goto err_bindx_rem; 662 } 663 664 if (sa_addr->v4.sin_port && 665 sa_addr->v4.sin_port != htons(bp->port)) { 666 retval = -EINVAL; 667 goto err_bindx_rem; 668 } 669 670 if (!sa_addr->v4.sin_port) 671 sa_addr->v4.sin_port = htons(bp->port); 672 673 /* FIXME - There is probably a need to check if sk->sk_saddr and 674 * sk->sk_rcv_addr are currently set to one of the addresses to 675 * be removed. This is something which needs to be looked into 676 * when we are fixing the outstanding issues with multi-homing 677 * socket routing and failover schemes. Refer to comments in 678 * sctp_do_bind(). -daisy 679 */ 680 retval = sctp_del_bind_addr(bp, sa_addr); 681 682 addr_buf += af->sockaddr_len; 683 err_bindx_rem: 684 if (retval < 0) { 685 /* Failed. Add the ones that has been removed back */ 686 if (cnt > 0) 687 sctp_bindx_add(sk, addrs, cnt); 688 return retval; 689 } 690 } 691 692 return retval; 693 } 694 695 /* Send an ASCONF chunk with Delete IP address parameters to all the peers of 696 * the associations that are part of the endpoint indicating that a list of 697 * local addresses are removed from the endpoint. 698 * 699 * If any of the addresses is already in the bind address list of the 700 * association, we do not send the chunk for that association. But it will not 701 * affect other associations. 702 * 703 * Only sctp_setsockopt_bindx() is supposed to call this function. 704 */ 705 static int sctp_send_asconf_del_ip(struct sock *sk, 706 struct sockaddr *addrs, 707 int addrcnt) 708 { 709 struct net *net = sock_net(sk); 710 struct sctp_sock *sp; 711 struct sctp_endpoint *ep; 712 struct sctp_association *asoc; 713 struct sctp_transport *transport; 714 struct sctp_bind_addr *bp; 715 struct sctp_chunk *chunk; 716 union sctp_addr *laddr; 717 void *addr_buf; 718 struct sctp_af *af; 719 struct sctp_sockaddr_entry *saddr; 720 int i; 721 int retval = 0; 722 int stored = 0; 723 724 chunk = NULL; 725 if (!net->sctp.addip_enable) 726 return retval; 727 728 sp = sctp_sk(sk); 729 ep = sp->ep; 730 731 pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n", 732 __func__, sk, addrs, addrcnt); 733 734 list_for_each_entry(asoc, &ep->asocs, asocs) { 735 736 if (!asoc->peer.asconf_capable) 737 continue; 738 739 if (asoc->peer.addip_disabled_mask & SCTP_PARAM_DEL_IP) 740 continue; 741 742 if (!sctp_state(asoc, ESTABLISHED)) 743 continue; 744 745 /* Check if any address in the packed array of addresses is 746 * not present in the bind address list of the association. 747 * If so, do not send the asconf chunk to its peer, but 748 * continue with other associations. 749 */ 750 addr_buf = addrs; 751 for (i = 0; i < addrcnt; i++) { 752 laddr = addr_buf; 753 af = sctp_get_af_specific(laddr->v4.sin_family); 754 if (!af) { 755 retval = -EINVAL; 756 goto out; 757 } 758 759 if (!sctp_assoc_lookup_laddr(asoc, laddr)) 760 break; 761 762 addr_buf += af->sockaddr_len; 763 } 764 if (i < addrcnt) 765 continue; 766 767 /* Find one address in the association's bind address list 768 * that is not in the packed array of addresses. This is to 769 * make sure that we do not delete all the addresses in the 770 * association. 771 */ 772 bp = &asoc->base.bind_addr; 773 laddr = sctp_find_unmatch_addr(bp, (union sctp_addr *)addrs, 774 addrcnt, sp); 775 if ((laddr == NULL) && (addrcnt == 1)) { 776 if (asoc->asconf_addr_del_pending) 777 continue; 778 asoc->asconf_addr_del_pending = 779 kzalloc(sizeof(union sctp_addr), GFP_ATOMIC); 780 if (asoc->asconf_addr_del_pending == NULL) { 781 retval = -ENOMEM; 782 goto out; 783 } 784 asoc->asconf_addr_del_pending->sa.sa_family = 785 addrs->sa_family; 786 asoc->asconf_addr_del_pending->v4.sin_port = 787 htons(bp->port); 788 if (addrs->sa_family == AF_INET) { 789 struct sockaddr_in *sin; 790 791 sin = (struct sockaddr_in *)addrs; 792 asoc->asconf_addr_del_pending->v4.sin_addr.s_addr = sin->sin_addr.s_addr; 793 } else if (addrs->sa_family == AF_INET6) { 794 struct sockaddr_in6 *sin6; 795 796 sin6 = (struct sockaddr_in6 *)addrs; 797 asoc->asconf_addr_del_pending->v6.sin6_addr = sin6->sin6_addr; 798 } 799 800 pr_debug("%s: keep the last address asoc:%p %pISc at %p\n", 801 __func__, asoc, &asoc->asconf_addr_del_pending->sa, 802 asoc->asconf_addr_del_pending); 803 804 asoc->src_out_of_asoc_ok = 1; 805 stored = 1; 806 goto skip_mkasconf; 807 } 808 809 /* We do not need RCU protection throughout this loop 810 * because this is done under a socket lock from the 811 * setsockopt call. 812 */ 813 chunk = sctp_make_asconf_update_ip(asoc, laddr, addrs, addrcnt, 814 SCTP_PARAM_DEL_IP); 815 if (!chunk) { 816 retval = -ENOMEM; 817 goto out; 818 } 819 820 skip_mkasconf: 821 /* Reset use_as_src flag for the addresses in the bind address 822 * list that are to be deleted. 823 */ 824 addr_buf = addrs; 825 for (i = 0; i < addrcnt; i++) { 826 laddr = addr_buf; 827 af = sctp_get_af_specific(laddr->v4.sin_family); 828 list_for_each_entry(saddr, &bp->address_list, list) { 829 if (sctp_cmp_addr_exact(&saddr->a, laddr)) 830 saddr->state = SCTP_ADDR_DEL; 831 } 832 addr_buf += af->sockaddr_len; 833 } 834 835 /* Update the route and saddr entries for all the transports 836 * as some of the addresses in the bind address list are 837 * about to be deleted and cannot be used as source addresses. 838 */ 839 list_for_each_entry(transport, &asoc->peer.transport_addr_list, 840 transports) { 841 dst_release(transport->dst); 842 sctp_transport_route(transport, NULL, 843 sctp_sk(asoc->base.sk)); 844 } 845 846 if (stored) 847 /* We don't need to transmit ASCONF */ 848 continue; 849 retval = sctp_send_asconf(asoc, chunk); 850 } 851 out: 852 return retval; 853 } 854 855 /* set addr events to assocs in the endpoint. ep and addr_wq must be locked */ 856 int sctp_asconf_mgmt(struct sctp_sock *sp, struct sctp_sockaddr_entry *addrw) 857 { 858 struct sock *sk = sctp_opt2sk(sp); 859 union sctp_addr *addr; 860 struct sctp_af *af; 861 862 /* It is safe to write port space in caller. */ 863 addr = &addrw->a; 864 addr->v4.sin_port = htons(sp->ep->base.bind_addr.port); 865 af = sctp_get_af_specific(addr->sa.sa_family); 866 if (!af) 867 return -EINVAL; 868 if (sctp_verify_addr(sk, addr, af->sockaddr_len)) 869 return -EINVAL; 870 871 if (addrw->state == SCTP_ADDR_NEW) 872 return sctp_send_asconf_add_ip(sk, (struct sockaddr *)addr, 1); 873 else 874 return sctp_send_asconf_del_ip(sk, (struct sockaddr *)addr, 1); 875 } 876 877 /* Helper for tunneling sctp_bindx() requests through sctp_setsockopt() 878 * 879 * API 8.1 880 * int sctp_bindx(int sd, struct sockaddr *addrs, int addrcnt, 881 * int flags); 882 * 883 * If sd is an IPv4 socket, the addresses passed must be IPv4 addresses. 884 * If the sd is an IPv6 socket, the addresses passed can either be IPv4 885 * or IPv6 addresses. 886 * 887 * A single address may be specified as INADDR_ANY or IN6ADDR_ANY, see 888 * Section 3.1.2 for this usage. 889 * 890 * addrs is a pointer to an array of one or more socket addresses. Each 891 * address is contained in its appropriate structure (i.e. struct 892 * sockaddr_in or struct sockaddr_in6) the family of the address type 893 * must be used to distinguish the address length (note that this 894 * representation is termed a "packed array" of addresses). The caller 895 * specifies the number of addresses in the array with addrcnt. 896 * 897 * On success, sctp_bindx() returns 0. On failure, sctp_bindx() returns 898 * -1, and sets errno to the appropriate error code. 899 * 900 * For SCTP, the port given in each socket address must be the same, or 901 * sctp_bindx() will fail, setting errno to EINVAL. 902 * 903 * The flags parameter is formed from the bitwise OR of zero or more of 904 * the following currently defined flags: 905 * 906 * SCTP_BINDX_ADD_ADDR 907 * 908 * SCTP_BINDX_REM_ADDR 909 * 910 * SCTP_BINDX_ADD_ADDR directs SCTP to add the given addresses to the 911 * association, and SCTP_BINDX_REM_ADDR directs SCTP to remove the given 912 * addresses from the association. The two flags are mutually exclusive; 913 * if both are given, sctp_bindx() will fail with EINVAL. A caller may 914 * not remove all addresses from an association; sctp_bindx() will 915 * reject such an attempt with EINVAL. 916 * 917 * An application can use sctp_bindx(SCTP_BINDX_ADD_ADDR) to associate 918 * additional addresses with an endpoint after calling bind(). Or use 919 * sctp_bindx(SCTP_BINDX_REM_ADDR) to remove some addresses a listening 920 * socket is associated with so that no new association accepted will be 921 * associated with those addresses. If the endpoint supports dynamic 922 * address a SCTP_BINDX_REM_ADDR or SCTP_BINDX_ADD_ADDR may cause a 923 * endpoint to send the appropriate message to the peer to change the 924 * peers address lists. 925 * 926 * Adding and removing addresses from a connected association is 927 * optional functionality. Implementations that do not support this 928 * functionality should return EOPNOTSUPP. 929 * 930 * Basically do nothing but copying the addresses from user to kernel 931 * land and invoking either sctp_bindx_add() or sctp_bindx_rem() on the sk. 932 * This is used for tunneling the sctp_bindx() request through sctp_setsockopt() 933 * from userspace. 934 * 935 * We don't use copy_from_user() for optimization: we first do the 936 * sanity checks (buffer size -fast- and access check-healthy 937 * pointer); if all of those succeed, then we can alloc the memory 938 * (expensive operation) needed to copy the data to kernel. Then we do 939 * the copying without checking the user space area 940 * (__copy_from_user()). 941 * 942 * On exit there is no need to do sockfd_put(), sys_setsockopt() does 943 * it. 944 * 945 * sk The sk of the socket 946 * addrs The pointer to the addresses in user land 947 * addrssize Size of the addrs buffer 948 * op Operation to perform (add or remove, see the flags of 949 * sctp_bindx) 950 * 951 * Returns 0 if ok, <0 errno code on error. 952 */ 953 static int sctp_setsockopt_bindx(struct sock* sk, 954 struct sockaddr __user *addrs, 955 int addrs_size, int op) 956 { 957 struct sockaddr *kaddrs; 958 int err; 959 int addrcnt = 0; 960 int walk_size = 0; 961 struct sockaddr *sa_addr; 962 void *addr_buf; 963 struct sctp_af *af; 964 965 pr_debug("%s: sk:%p addrs:%p addrs_size:%d opt:%d\n", 966 __func__, sk, addrs, addrs_size, op); 967 968 if (unlikely(addrs_size <= 0)) 969 return -EINVAL; 970 971 /* Check the user passed a healthy pointer. */ 972 if (unlikely(!access_ok(VERIFY_READ, addrs, addrs_size))) 973 return -EFAULT; 974 975 /* Alloc space for the address array in kernel memory. */ 976 kaddrs = kmalloc(addrs_size, GFP_KERNEL); 977 if (unlikely(!kaddrs)) 978 return -ENOMEM; 979 980 if (__copy_from_user(kaddrs, addrs, addrs_size)) { 981 kfree(kaddrs); 982 return -EFAULT; 983 } 984 985 /* Walk through the addrs buffer and count the number of addresses. */ 986 addr_buf = kaddrs; 987 while (walk_size < addrs_size) { 988 if (walk_size + sizeof(sa_family_t) > addrs_size) { 989 kfree(kaddrs); 990 return -EINVAL; 991 } 992 993 sa_addr = addr_buf; 994 af = sctp_get_af_specific(sa_addr->sa_family); 995 996 /* If the address family is not supported or if this address 997 * causes the address buffer to overflow return EINVAL. 998 */ 999 if (!af || (walk_size + af->sockaddr_len) > addrs_size) { 1000 kfree(kaddrs); 1001 return -EINVAL; 1002 } 1003 addrcnt++; 1004 addr_buf += af->sockaddr_len; 1005 walk_size += af->sockaddr_len; 1006 } 1007 1008 /* Do the work. */ 1009 switch (op) { 1010 case SCTP_BINDX_ADD_ADDR: 1011 err = sctp_bindx_add(sk, kaddrs, addrcnt); 1012 if (err) 1013 goto out; 1014 err = sctp_send_asconf_add_ip(sk, kaddrs, addrcnt); 1015 break; 1016 1017 case SCTP_BINDX_REM_ADDR: 1018 err = sctp_bindx_rem(sk, kaddrs, addrcnt); 1019 if (err) 1020 goto out; 1021 err = sctp_send_asconf_del_ip(sk, kaddrs, addrcnt); 1022 break; 1023 1024 default: 1025 err = -EINVAL; 1026 break; 1027 } 1028 1029 out: 1030 kfree(kaddrs); 1031 1032 return err; 1033 } 1034 1035 /* __sctp_connect(struct sock* sk, struct sockaddr *kaddrs, int addrs_size) 1036 * 1037 * Common routine for handling connect() and sctp_connectx(). 1038 * Connect will come in with just a single address. 1039 */ 1040 static int __sctp_connect(struct sock* sk, 1041 struct sockaddr *kaddrs, 1042 int addrs_size, 1043 sctp_assoc_t *assoc_id) 1044 { 1045 struct net *net = sock_net(sk); 1046 struct sctp_sock *sp; 1047 struct sctp_endpoint *ep; 1048 struct sctp_association *asoc = NULL; 1049 struct sctp_association *asoc2; 1050 struct sctp_transport *transport; 1051 union sctp_addr to; 1052 struct sctp_af *af; 1053 sctp_scope_t scope; 1054 long timeo; 1055 int err = 0; 1056 int addrcnt = 0; 1057 int walk_size = 0; 1058 union sctp_addr *sa_addr = NULL; 1059 void *addr_buf; 1060 unsigned short port; 1061 unsigned int f_flags = 0; 1062 1063 sp = sctp_sk(sk); 1064 ep = sp->ep; 1065 1066 /* connect() cannot be done on a socket that is already in ESTABLISHED 1067 * state - UDP-style peeled off socket or a TCP-style socket that 1068 * is already connected. 1069 * It cannot be done even on a TCP-style listening socket. 1070 */ 1071 if (sctp_sstate(sk, ESTABLISHED) || 1072 (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))) { 1073 err = -EISCONN; 1074 goto out_free; 1075 } 1076 1077 /* Walk through the addrs buffer and count the number of addresses. */ 1078 addr_buf = kaddrs; 1079 while (walk_size < addrs_size) { 1080 if (walk_size + sizeof(sa_family_t) > addrs_size) { 1081 err = -EINVAL; 1082 goto out_free; 1083 } 1084 1085 sa_addr = addr_buf; 1086 af = sctp_get_af_specific(sa_addr->sa.sa_family); 1087 1088 /* If the address family is not supported or if this address 1089 * causes the address buffer to overflow return EINVAL. 1090 */ 1091 if (!af || (walk_size + af->sockaddr_len) > addrs_size) { 1092 err = -EINVAL; 1093 goto out_free; 1094 } 1095 1096 port = ntohs(sa_addr->v4.sin_port); 1097 1098 /* Save current address so we can work with it */ 1099 memcpy(&to, sa_addr, af->sockaddr_len); 1100 1101 err = sctp_verify_addr(sk, &to, af->sockaddr_len); 1102 if (err) 1103 goto out_free; 1104 1105 /* Make sure the destination port is correctly set 1106 * in all addresses. 1107 */ 1108 if (asoc && asoc->peer.port && asoc->peer.port != port) { 1109 err = -EINVAL; 1110 goto out_free; 1111 } 1112 1113 /* Check if there already is a matching association on the 1114 * endpoint (other than the one created here). 1115 */ 1116 asoc2 = sctp_endpoint_lookup_assoc(ep, &to, &transport); 1117 if (asoc2 && asoc2 != asoc) { 1118 if (asoc2->state >= SCTP_STATE_ESTABLISHED) 1119 err = -EISCONN; 1120 else 1121 err = -EALREADY; 1122 goto out_free; 1123 } 1124 1125 /* If we could not find a matching association on the endpoint, 1126 * make sure that there is no peeled-off association matching 1127 * the peer address even on another socket. 1128 */ 1129 if (sctp_endpoint_is_peeled_off(ep, &to)) { 1130 err = -EADDRNOTAVAIL; 1131 goto out_free; 1132 } 1133 1134 if (!asoc) { 1135 /* If a bind() or sctp_bindx() is not called prior to 1136 * an sctp_connectx() call, the system picks an 1137 * ephemeral port and will choose an address set 1138 * equivalent to binding with a wildcard address. 1139 */ 1140 if (!ep->base.bind_addr.port) { 1141 if (sctp_autobind(sk)) { 1142 err = -EAGAIN; 1143 goto out_free; 1144 } 1145 } else { 1146 /* 1147 * If an unprivileged user inherits a 1-many 1148 * style socket with open associations on a 1149 * privileged port, it MAY be permitted to 1150 * accept new associations, but it SHOULD NOT 1151 * be permitted to open new associations. 1152 */ 1153 if (ep->base.bind_addr.port < PROT_SOCK && 1154 !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) { 1155 err = -EACCES; 1156 goto out_free; 1157 } 1158 } 1159 1160 scope = sctp_scope(&to); 1161 asoc = sctp_association_new(ep, sk, scope, GFP_KERNEL); 1162 if (!asoc) { 1163 err = -ENOMEM; 1164 goto out_free; 1165 } 1166 1167 err = sctp_assoc_set_bind_addr_from_ep(asoc, scope, 1168 GFP_KERNEL); 1169 if (err < 0) { 1170 goto out_free; 1171 } 1172 1173 } 1174 1175 /* Prime the peer's transport structures. */ 1176 transport = sctp_assoc_add_peer(asoc, &to, GFP_KERNEL, 1177 SCTP_UNKNOWN); 1178 if (!transport) { 1179 err = -ENOMEM; 1180 goto out_free; 1181 } 1182 1183 addrcnt++; 1184 addr_buf += af->sockaddr_len; 1185 walk_size += af->sockaddr_len; 1186 } 1187 1188 /* In case the user of sctp_connectx() wants an association 1189 * id back, assign one now. 1190 */ 1191 if (assoc_id) { 1192 err = sctp_assoc_set_id(asoc, GFP_KERNEL); 1193 if (err < 0) 1194 goto out_free; 1195 } 1196 1197 err = sctp_primitive_ASSOCIATE(net, asoc, NULL); 1198 if (err < 0) { 1199 goto out_free; 1200 } 1201 1202 /* Initialize sk's dport and daddr for getpeername() */ 1203 inet_sk(sk)->inet_dport = htons(asoc->peer.port); 1204 af = sctp_get_af_specific(sa_addr->sa.sa_family); 1205 af->to_sk_daddr(sa_addr, sk); 1206 sk->sk_err = 0; 1207 1208 /* in-kernel sockets don't generally have a file allocated to them 1209 * if all they do is call sock_create_kern(). 1210 */ 1211 if (sk->sk_socket->file) 1212 f_flags = sk->sk_socket->file->f_flags; 1213 1214 timeo = sock_sndtimeo(sk, f_flags & O_NONBLOCK); 1215 1216 err = sctp_wait_for_connect(asoc, &timeo); 1217 if ((err == 0 || err == -EINPROGRESS) && assoc_id) 1218 *assoc_id = asoc->assoc_id; 1219 1220 /* Don't free association on exit. */ 1221 asoc = NULL; 1222 1223 out_free: 1224 pr_debug("%s: took out_free path with asoc:%p kaddrs:%p err:%d\n", 1225 __func__, asoc, kaddrs, err); 1226 1227 if (asoc) { 1228 /* sctp_primitive_ASSOCIATE may have added this association 1229 * To the hash table, try to unhash it, just in case, its a noop 1230 * if it wasn't hashed so we're safe 1231 */ 1232 sctp_unhash_established(asoc); 1233 sctp_association_free(asoc); 1234 } 1235 return err; 1236 } 1237 1238 /* Helper for tunneling sctp_connectx() requests through sctp_setsockopt() 1239 * 1240 * API 8.9 1241 * int sctp_connectx(int sd, struct sockaddr *addrs, int addrcnt, 1242 * sctp_assoc_t *asoc); 1243 * 1244 * If sd is an IPv4 socket, the addresses passed must be IPv4 addresses. 1245 * If the sd is an IPv6 socket, the addresses passed can either be IPv4 1246 * or IPv6 addresses. 1247 * 1248 * A single address may be specified as INADDR_ANY or IN6ADDR_ANY, see 1249 * Section 3.1.2 for this usage. 1250 * 1251 * addrs is a pointer to an array of one or more socket addresses. Each 1252 * address is contained in its appropriate structure (i.e. struct 1253 * sockaddr_in or struct sockaddr_in6) the family of the address type 1254 * must be used to distengish the address length (note that this 1255 * representation is termed a "packed array" of addresses). The caller 1256 * specifies the number of addresses in the array with addrcnt. 1257 * 1258 * On success, sctp_connectx() returns 0. It also sets the assoc_id to 1259 * the association id of the new association. On failure, sctp_connectx() 1260 * returns -1, and sets errno to the appropriate error code. The assoc_id 1261 * is not touched by the kernel. 1262 * 1263 * For SCTP, the port given in each socket address must be the same, or 1264 * sctp_connectx() will fail, setting errno to EINVAL. 1265 * 1266 * An application can use sctp_connectx to initiate an association with 1267 * an endpoint that is multi-homed. Much like sctp_bindx() this call 1268 * allows a caller to specify multiple addresses at which a peer can be 1269 * reached. The way the SCTP stack uses the list of addresses to set up 1270 * the association is implementation dependent. This function only 1271 * specifies that the stack will try to make use of all the addresses in 1272 * the list when needed. 1273 * 1274 * Note that the list of addresses passed in is only used for setting up 1275 * the association. It does not necessarily equal the set of addresses 1276 * the peer uses for the resulting association. If the caller wants to 1277 * find out the set of peer addresses, it must use sctp_getpaddrs() to 1278 * retrieve them after the association has been set up. 1279 * 1280 * Basically do nothing but copying the addresses from user to kernel 1281 * land and invoking either sctp_connectx(). This is used for tunneling 1282 * the sctp_connectx() request through sctp_setsockopt() from userspace. 1283 * 1284 * We don't use copy_from_user() for optimization: we first do the 1285 * sanity checks (buffer size -fast- and access check-healthy 1286 * pointer); if all of those succeed, then we can alloc the memory 1287 * (expensive operation) needed to copy the data to kernel. Then we do 1288 * the copying without checking the user space area 1289 * (__copy_from_user()). 1290 * 1291 * On exit there is no need to do sockfd_put(), sys_setsockopt() does 1292 * it. 1293 * 1294 * sk The sk of the socket 1295 * addrs The pointer to the addresses in user land 1296 * addrssize Size of the addrs buffer 1297 * 1298 * Returns >=0 if ok, <0 errno code on error. 1299 */ 1300 static int __sctp_setsockopt_connectx(struct sock* sk, 1301 struct sockaddr __user *addrs, 1302 int addrs_size, 1303 sctp_assoc_t *assoc_id) 1304 { 1305 int err = 0; 1306 struct sockaddr *kaddrs; 1307 1308 pr_debug("%s: sk:%p addrs:%p addrs_size:%d\n", 1309 __func__, sk, addrs, addrs_size); 1310 1311 if (unlikely(addrs_size <= 0)) 1312 return -EINVAL; 1313 1314 /* Check the user passed a healthy pointer. */ 1315 if (unlikely(!access_ok(VERIFY_READ, addrs, addrs_size))) 1316 return -EFAULT; 1317 1318 /* Alloc space for the address array in kernel memory. */ 1319 kaddrs = kmalloc(addrs_size, GFP_KERNEL); 1320 if (unlikely(!kaddrs)) 1321 return -ENOMEM; 1322 1323 if (__copy_from_user(kaddrs, addrs, addrs_size)) { 1324 err = -EFAULT; 1325 } else { 1326 err = __sctp_connect(sk, kaddrs, addrs_size, assoc_id); 1327 } 1328 1329 kfree(kaddrs); 1330 1331 return err; 1332 } 1333 1334 /* 1335 * This is an older interface. It's kept for backward compatibility 1336 * to the option that doesn't provide association id. 1337 */ 1338 static int sctp_setsockopt_connectx_old(struct sock* sk, 1339 struct sockaddr __user *addrs, 1340 int addrs_size) 1341 { 1342 return __sctp_setsockopt_connectx(sk, addrs, addrs_size, NULL); 1343 } 1344 1345 /* 1346 * New interface for the API. The since the API is done with a socket 1347 * option, to make it simple we feed back the association id is as a return 1348 * indication to the call. Error is always negative and association id is 1349 * always positive. 1350 */ 1351 static int sctp_setsockopt_connectx(struct sock* sk, 1352 struct sockaddr __user *addrs, 1353 int addrs_size) 1354 { 1355 sctp_assoc_t assoc_id = 0; 1356 int err = 0; 1357 1358 err = __sctp_setsockopt_connectx(sk, addrs, addrs_size, &assoc_id); 1359 1360 if (err) 1361 return err; 1362 else 1363 return assoc_id; 1364 } 1365 1366 /* 1367 * New (hopefully final) interface for the API. 1368 * We use the sctp_getaddrs_old structure so that use-space library 1369 * can avoid any unnecessary allocations. The only defferent part 1370 * is that we store the actual length of the address buffer into the 1371 * addrs_num structure member. That way we can re-use the existing 1372 * code. 1373 */ 1374 static int sctp_getsockopt_connectx3(struct sock* sk, int len, 1375 char __user *optval, 1376 int __user *optlen) 1377 { 1378 struct sctp_getaddrs_old param; 1379 sctp_assoc_t assoc_id = 0; 1380 int err = 0; 1381 1382 if (len < sizeof(param)) 1383 return -EINVAL; 1384 1385 if (copy_from_user(¶m, optval, sizeof(param))) 1386 return -EFAULT; 1387 1388 err = __sctp_setsockopt_connectx(sk, 1389 (struct sockaddr __user *)param.addrs, 1390 param.addr_num, &assoc_id); 1391 1392 if (err == 0 || err == -EINPROGRESS) { 1393 if (copy_to_user(optval, &assoc_id, sizeof(assoc_id))) 1394 return -EFAULT; 1395 if (put_user(sizeof(assoc_id), optlen)) 1396 return -EFAULT; 1397 } 1398 1399 return err; 1400 } 1401 1402 /* API 3.1.4 close() - UDP Style Syntax 1403 * Applications use close() to perform graceful shutdown (as described in 1404 * Section 10.1 of [SCTP]) on ALL the associations currently represented 1405 * by a UDP-style socket. 1406 * 1407 * The syntax is 1408 * 1409 * ret = close(int sd); 1410 * 1411 * sd - the socket descriptor of the associations to be closed. 1412 * 1413 * To gracefully shutdown a specific association represented by the 1414 * UDP-style socket, an application should use the sendmsg() call, 1415 * passing no user data, but including the appropriate flag in the 1416 * ancillary data (see Section xxxx). 1417 * 1418 * If sd in the close() call is a branched-off socket representing only 1419 * one association, the shutdown is performed on that association only. 1420 * 1421 * 4.1.6 close() - TCP Style Syntax 1422 * 1423 * Applications use close() to gracefully close down an association. 1424 * 1425 * The syntax is: 1426 * 1427 * int close(int sd); 1428 * 1429 * sd - the socket descriptor of the association to be closed. 1430 * 1431 * After an application calls close() on a socket descriptor, no further 1432 * socket operations will succeed on that descriptor. 1433 * 1434 * API 7.1.4 SO_LINGER 1435 * 1436 * An application using the TCP-style socket can use this option to 1437 * perform the SCTP ABORT primitive. The linger option structure is: 1438 * 1439 * struct linger { 1440 * int l_onoff; // option on/off 1441 * int l_linger; // linger time 1442 * }; 1443 * 1444 * To enable the option, set l_onoff to 1. If the l_linger value is set 1445 * to 0, calling close() is the same as the ABORT primitive. If the 1446 * value is set to a negative value, the setsockopt() call will return 1447 * an error. If the value is set to a positive value linger_time, the 1448 * close() can be blocked for at most linger_time ms. If the graceful 1449 * shutdown phase does not finish during this period, close() will 1450 * return but the graceful shutdown phase continues in the system. 1451 */ 1452 static void sctp_close(struct sock *sk, long timeout) 1453 { 1454 struct net *net = sock_net(sk); 1455 struct sctp_endpoint *ep; 1456 struct sctp_association *asoc; 1457 struct list_head *pos, *temp; 1458 unsigned int data_was_unread; 1459 1460 pr_debug("%s: sk:%p, timeout:%ld\n", __func__, sk, timeout); 1461 1462 sctp_lock_sock(sk); 1463 sk->sk_shutdown = SHUTDOWN_MASK; 1464 sk->sk_state = SCTP_SS_CLOSING; 1465 1466 ep = sctp_sk(sk)->ep; 1467 1468 /* Clean up any skbs sitting on the receive queue. */ 1469 data_was_unread = sctp_queue_purge_ulpevents(&sk->sk_receive_queue); 1470 data_was_unread += sctp_queue_purge_ulpevents(&sctp_sk(sk)->pd_lobby); 1471 1472 /* Walk all associations on an endpoint. */ 1473 list_for_each_safe(pos, temp, &ep->asocs) { 1474 asoc = list_entry(pos, struct sctp_association, asocs); 1475 1476 if (sctp_style(sk, TCP)) { 1477 /* A closed association can still be in the list if 1478 * it belongs to a TCP-style listening socket that is 1479 * not yet accepted. If so, free it. If not, send an 1480 * ABORT or SHUTDOWN based on the linger options. 1481 */ 1482 if (sctp_state(asoc, CLOSED)) { 1483 sctp_unhash_established(asoc); 1484 sctp_association_free(asoc); 1485 continue; 1486 } 1487 } 1488 1489 if (data_was_unread || !skb_queue_empty(&asoc->ulpq.lobby) || 1490 !skb_queue_empty(&asoc->ulpq.reasm) || 1491 (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime)) { 1492 struct sctp_chunk *chunk; 1493 1494 chunk = sctp_make_abort_user(asoc, NULL, 0); 1495 if (chunk) 1496 sctp_primitive_ABORT(net, asoc, chunk); 1497 } else 1498 sctp_primitive_SHUTDOWN(net, asoc, NULL); 1499 } 1500 1501 /* On a TCP-style socket, block for at most linger_time if set. */ 1502 if (sctp_style(sk, TCP) && timeout) 1503 sctp_wait_for_close(sk, timeout); 1504 1505 /* This will run the backlog queue. */ 1506 sctp_release_sock(sk); 1507 1508 /* Supposedly, no process has access to the socket, but 1509 * the net layers still may. 1510 */ 1511 sctp_local_bh_disable(); 1512 sctp_bh_lock_sock(sk); 1513 1514 /* Hold the sock, since sk_common_release() will put sock_put() 1515 * and we have just a little more cleanup. 1516 */ 1517 sock_hold(sk); 1518 sk_common_release(sk); 1519 1520 sctp_bh_unlock_sock(sk); 1521 sctp_local_bh_enable(); 1522 1523 sock_put(sk); 1524 1525 SCTP_DBG_OBJCNT_DEC(sock); 1526 } 1527 1528 /* Handle EPIPE error. */ 1529 static int sctp_error(struct sock *sk, int flags, int err) 1530 { 1531 if (err == -EPIPE) 1532 err = sock_error(sk) ? : -EPIPE; 1533 if (err == -EPIPE && !(flags & MSG_NOSIGNAL)) 1534 send_sig(SIGPIPE, current, 0); 1535 return err; 1536 } 1537 1538 /* API 3.1.3 sendmsg() - UDP Style Syntax 1539 * 1540 * An application uses sendmsg() and recvmsg() calls to transmit data to 1541 * and receive data from its peer. 1542 * 1543 * ssize_t sendmsg(int socket, const struct msghdr *message, 1544 * int flags); 1545 * 1546 * socket - the socket descriptor of the endpoint. 1547 * message - pointer to the msghdr structure which contains a single 1548 * user message and possibly some ancillary data. 1549 * 1550 * See Section 5 for complete description of the data 1551 * structures. 1552 * 1553 * flags - flags sent or received with the user message, see Section 1554 * 5 for complete description of the flags. 1555 * 1556 * Note: This function could use a rewrite especially when explicit 1557 * connect support comes in. 1558 */ 1559 /* BUG: We do not implement the equivalent of sk_stream_wait_memory(). */ 1560 1561 static int sctp_msghdr_parse(const struct msghdr *, sctp_cmsgs_t *); 1562 1563 static int sctp_sendmsg(struct kiocb *iocb, struct sock *sk, 1564 struct msghdr *msg, size_t msg_len) 1565 { 1566 struct net *net = sock_net(sk); 1567 struct sctp_sock *sp; 1568 struct sctp_endpoint *ep; 1569 struct sctp_association *new_asoc=NULL, *asoc=NULL; 1570 struct sctp_transport *transport, *chunk_tp; 1571 struct sctp_chunk *chunk; 1572 union sctp_addr to; 1573 struct sockaddr *msg_name = NULL; 1574 struct sctp_sndrcvinfo default_sinfo; 1575 struct sctp_sndrcvinfo *sinfo; 1576 struct sctp_initmsg *sinit; 1577 sctp_assoc_t associd = 0; 1578 sctp_cmsgs_t cmsgs = { NULL }; 1579 int err; 1580 sctp_scope_t scope; 1581 long timeo; 1582 __u16 sinfo_flags = 0; 1583 struct sctp_datamsg *datamsg; 1584 int msg_flags = msg->msg_flags; 1585 1586 err = 0; 1587 sp = sctp_sk(sk); 1588 ep = sp->ep; 1589 1590 pr_debug("%s: sk:%p, msg:%p, msg_len:%zu ep:%p\n", __func__, sk, 1591 msg, msg_len, ep); 1592 1593 /* We cannot send a message over a TCP-style listening socket. */ 1594 if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)) { 1595 err = -EPIPE; 1596 goto out_nounlock; 1597 } 1598 1599 /* Parse out the SCTP CMSGs. */ 1600 err = sctp_msghdr_parse(msg, &cmsgs); 1601 if (err) { 1602 pr_debug("%s: msghdr parse err:%x\n", __func__, err); 1603 goto out_nounlock; 1604 } 1605 1606 /* Fetch the destination address for this packet. This 1607 * address only selects the association--it is not necessarily 1608 * the address we will send to. 1609 * For a peeled-off socket, msg_name is ignored. 1610 */ 1611 if (!sctp_style(sk, UDP_HIGH_BANDWIDTH) && msg->msg_name) { 1612 int msg_namelen = msg->msg_namelen; 1613 1614 err = sctp_verify_addr(sk, (union sctp_addr *)msg->msg_name, 1615 msg_namelen); 1616 if (err) 1617 return err; 1618 1619 if (msg_namelen > sizeof(to)) 1620 msg_namelen = sizeof(to); 1621 memcpy(&to, msg->msg_name, msg_namelen); 1622 msg_name = msg->msg_name; 1623 } 1624 1625 sinfo = cmsgs.info; 1626 sinit = cmsgs.init; 1627 1628 /* Did the user specify SNDRCVINFO? */ 1629 if (sinfo) { 1630 sinfo_flags = sinfo->sinfo_flags; 1631 associd = sinfo->sinfo_assoc_id; 1632 } 1633 1634 pr_debug("%s: msg_len:%zu, sinfo_flags:0x%x\n", __func__, 1635 msg_len, sinfo_flags); 1636 1637 /* SCTP_EOF or SCTP_ABORT cannot be set on a TCP-style socket. */ 1638 if (sctp_style(sk, TCP) && (sinfo_flags & (SCTP_EOF | SCTP_ABORT))) { 1639 err = -EINVAL; 1640 goto out_nounlock; 1641 } 1642 1643 /* If SCTP_EOF is set, no data can be sent. Disallow sending zero 1644 * length messages when SCTP_EOF|SCTP_ABORT is not set. 1645 * If SCTP_ABORT is set, the message length could be non zero with 1646 * the msg_iov set to the user abort reason. 1647 */ 1648 if (((sinfo_flags & SCTP_EOF) && (msg_len > 0)) || 1649 (!(sinfo_flags & (SCTP_EOF|SCTP_ABORT)) && (msg_len == 0))) { 1650 err = -EINVAL; 1651 goto out_nounlock; 1652 } 1653 1654 /* If SCTP_ADDR_OVER is set, there must be an address 1655 * specified in msg_name. 1656 */ 1657 if ((sinfo_flags & SCTP_ADDR_OVER) && (!msg->msg_name)) { 1658 err = -EINVAL; 1659 goto out_nounlock; 1660 } 1661 1662 transport = NULL; 1663 1664 pr_debug("%s: about to look up association\n", __func__); 1665 1666 sctp_lock_sock(sk); 1667 1668 /* If a msg_name has been specified, assume this is to be used. */ 1669 if (msg_name) { 1670 /* Look for a matching association on the endpoint. */ 1671 asoc = sctp_endpoint_lookup_assoc(ep, &to, &transport); 1672 if (!asoc) { 1673 /* If we could not find a matching association on the 1674 * endpoint, make sure that it is not a TCP-style 1675 * socket that already has an association or there is 1676 * no peeled-off association on another socket. 1677 */ 1678 if ((sctp_style(sk, TCP) && 1679 sctp_sstate(sk, ESTABLISHED)) || 1680 sctp_endpoint_is_peeled_off(ep, &to)) { 1681 err = -EADDRNOTAVAIL; 1682 goto out_unlock; 1683 } 1684 } 1685 } else { 1686 asoc = sctp_id2assoc(sk, associd); 1687 if (!asoc) { 1688 err = -EPIPE; 1689 goto out_unlock; 1690 } 1691 } 1692 1693 if (asoc) { 1694 pr_debug("%s: just looked up association:%p\n", __func__, asoc); 1695 1696 /* We cannot send a message on a TCP-style SCTP_SS_ESTABLISHED 1697 * socket that has an association in CLOSED state. This can 1698 * happen when an accepted socket has an association that is 1699 * already CLOSED. 1700 */ 1701 if (sctp_state(asoc, CLOSED) && sctp_style(sk, TCP)) { 1702 err = -EPIPE; 1703 goto out_unlock; 1704 } 1705 1706 if (sinfo_flags & SCTP_EOF) { 1707 pr_debug("%s: shutting down association:%p\n", 1708 __func__, asoc); 1709 1710 sctp_primitive_SHUTDOWN(net, asoc, NULL); 1711 err = 0; 1712 goto out_unlock; 1713 } 1714 if (sinfo_flags & SCTP_ABORT) { 1715 1716 chunk = sctp_make_abort_user(asoc, msg, msg_len); 1717 if (!chunk) { 1718 err = -ENOMEM; 1719 goto out_unlock; 1720 } 1721 1722 pr_debug("%s: aborting association:%p\n", 1723 __func__, asoc); 1724 1725 sctp_primitive_ABORT(net, asoc, chunk); 1726 err = 0; 1727 goto out_unlock; 1728 } 1729 } 1730 1731 /* Do we need to create the association? */ 1732 if (!asoc) { 1733 pr_debug("%s: there is no association yet\n", __func__); 1734 1735 if (sinfo_flags & (SCTP_EOF | SCTP_ABORT)) { 1736 err = -EINVAL; 1737 goto out_unlock; 1738 } 1739 1740 /* Check for invalid stream against the stream counts, 1741 * either the default or the user specified stream counts. 1742 */ 1743 if (sinfo) { 1744 if (!sinit || (sinit && !sinit->sinit_num_ostreams)) { 1745 /* Check against the defaults. */ 1746 if (sinfo->sinfo_stream >= 1747 sp->initmsg.sinit_num_ostreams) { 1748 err = -EINVAL; 1749 goto out_unlock; 1750 } 1751 } else { 1752 /* Check against the requested. */ 1753 if (sinfo->sinfo_stream >= 1754 sinit->sinit_num_ostreams) { 1755 err = -EINVAL; 1756 goto out_unlock; 1757 } 1758 } 1759 } 1760 1761 /* 1762 * API 3.1.2 bind() - UDP Style Syntax 1763 * If a bind() or sctp_bindx() is not called prior to a 1764 * sendmsg() call that initiates a new association, the 1765 * system picks an ephemeral port and will choose an address 1766 * set equivalent to binding with a wildcard address. 1767 */ 1768 if (!ep->base.bind_addr.port) { 1769 if (sctp_autobind(sk)) { 1770 err = -EAGAIN; 1771 goto out_unlock; 1772 } 1773 } else { 1774 /* 1775 * If an unprivileged user inherits a one-to-many 1776 * style socket with open associations on a privileged 1777 * port, it MAY be permitted to accept new associations, 1778 * but it SHOULD NOT be permitted to open new 1779 * associations. 1780 */ 1781 if (ep->base.bind_addr.port < PROT_SOCK && 1782 !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) { 1783 err = -EACCES; 1784 goto out_unlock; 1785 } 1786 } 1787 1788 scope = sctp_scope(&to); 1789 new_asoc = sctp_association_new(ep, sk, scope, GFP_KERNEL); 1790 if (!new_asoc) { 1791 err = -ENOMEM; 1792 goto out_unlock; 1793 } 1794 asoc = new_asoc; 1795 err = sctp_assoc_set_bind_addr_from_ep(asoc, scope, GFP_KERNEL); 1796 if (err < 0) { 1797 err = -ENOMEM; 1798 goto out_free; 1799 } 1800 1801 /* If the SCTP_INIT ancillary data is specified, set all 1802 * the association init values accordingly. 1803 */ 1804 if (sinit) { 1805 if (sinit->sinit_num_ostreams) { 1806 asoc->c.sinit_num_ostreams = 1807 sinit->sinit_num_ostreams; 1808 } 1809 if (sinit->sinit_max_instreams) { 1810 asoc->c.sinit_max_instreams = 1811 sinit->sinit_max_instreams; 1812 } 1813 if (sinit->sinit_max_attempts) { 1814 asoc->max_init_attempts 1815 = sinit->sinit_max_attempts; 1816 } 1817 if (sinit->sinit_max_init_timeo) { 1818 asoc->max_init_timeo = 1819 msecs_to_jiffies(sinit->sinit_max_init_timeo); 1820 } 1821 } 1822 1823 /* Prime the peer's transport structures. */ 1824 transport = sctp_assoc_add_peer(asoc, &to, GFP_KERNEL, SCTP_UNKNOWN); 1825 if (!transport) { 1826 err = -ENOMEM; 1827 goto out_free; 1828 } 1829 } 1830 1831 /* ASSERT: we have a valid association at this point. */ 1832 pr_debug("%s: we have a valid association\n", __func__); 1833 1834 if (!sinfo) { 1835 /* If the user didn't specify SNDRCVINFO, make up one with 1836 * some defaults. 1837 */ 1838 memset(&default_sinfo, 0, sizeof(default_sinfo)); 1839 default_sinfo.sinfo_stream = asoc->default_stream; 1840 default_sinfo.sinfo_flags = asoc->default_flags; 1841 default_sinfo.sinfo_ppid = asoc->default_ppid; 1842 default_sinfo.sinfo_context = asoc->default_context; 1843 default_sinfo.sinfo_timetolive = asoc->default_timetolive; 1844 default_sinfo.sinfo_assoc_id = sctp_assoc2id(asoc); 1845 sinfo = &default_sinfo; 1846 } 1847 1848 /* API 7.1.7, the sndbuf size per association bounds the 1849 * maximum size of data that can be sent in a single send call. 1850 */ 1851 if (msg_len > sk->sk_sndbuf) { 1852 err = -EMSGSIZE; 1853 goto out_free; 1854 } 1855 1856 if (asoc->pmtu_pending) 1857 sctp_assoc_pending_pmtu(sk, asoc); 1858 1859 /* If fragmentation is disabled and the message length exceeds the 1860 * association fragmentation point, return EMSGSIZE. The I-D 1861 * does not specify what this error is, but this looks like 1862 * a great fit. 1863 */ 1864 if (sctp_sk(sk)->disable_fragments && (msg_len > asoc->frag_point)) { 1865 err = -EMSGSIZE; 1866 goto out_free; 1867 } 1868 1869 /* Check for invalid stream. */ 1870 if (sinfo->sinfo_stream >= asoc->c.sinit_num_ostreams) { 1871 err = -EINVAL; 1872 goto out_free; 1873 } 1874 1875 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); 1876 if (!sctp_wspace(asoc)) { 1877 err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len); 1878 if (err) 1879 goto out_free; 1880 } 1881 1882 /* If an address is passed with the sendto/sendmsg call, it is used 1883 * to override the primary destination address in the TCP model, or 1884 * when SCTP_ADDR_OVER flag is set in the UDP model. 1885 */ 1886 if ((sctp_style(sk, TCP) && msg_name) || 1887 (sinfo_flags & SCTP_ADDR_OVER)) { 1888 chunk_tp = sctp_assoc_lookup_paddr(asoc, &to); 1889 if (!chunk_tp) { 1890 err = -EINVAL; 1891 goto out_free; 1892 } 1893 } else 1894 chunk_tp = NULL; 1895 1896 /* Auto-connect, if we aren't connected already. */ 1897 if (sctp_state(asoc, CLOSED)) { 1898 err = sctp_primitive_ASSOCIATE(net, asoc, NULL); 1899 if (err < 0) 1900 goto out_free; 1901 1902 pr_debug("%s: we associated primitively\n", __func__); 1903 } 1904 1905 /* Break the message into multiple chunks of maximum size. */ 1906 datamsg = sctp_datamsg_from_user(asoc, sinfo, msg, msg_len); 1907 if (IS_ERR(datamsg)) { 1908 err = PTR_ERR(datamsg); 1909 goto out_free; 1910 } 1911 1912 /* Now send the (possibly) fragmented message. */ 1913 list_for_each_entry(chunk, &datamsg->chunks, frag_list) { 1914 sctp_chunk_hold(chunk); 1915 1916 /* Do accounting for the write space. */ 1917 sctp_set_owner_w(chunk); 1918 1919 chunk->transport = chunk_tp; 1920 } 1921 1922 /* Send it to the lower layers. Note: all chunks 1923 * must either fail or succeed. The lower layer 1924 * works that way today. Keep it that way or this 1925 * breaks. 1926 */ 1927 err = sctp_primitive_SEND(net, asoc, datamsg); 1928 /* Did the lower layer accept the chunk? */ 1929 if (err) { 1930 sctp_datamsg_free(datamsg); 1931 goto out_free; 1932 } 1933 1934 pr_debug("%s: we sent primitively\n", __func__); 1935 1936 sctp_datamsg_put(datamsg); 1937 err = msg_len; 1938 1939 /* If we are already past ASSOCIATE, the lower 1940 * layers are responsible for association cleanup. 1941 */ 1942 goto out_unlock; 1943 1944 out_free: 1945 if (new_asoc) { 1946 sctp_unhash_established(asoc); 1947 sctp_association_free(asoc); 1948 } 1949 out_unlock: 1950 sctp_release_sock(sk); 1951 1952 out_nounlock: 1953 return sctp_error(sk, msg_flags, err); 1954 1955 #if 0 1956 do_sock_err: 1957 if (msg_len) 1958 err = msg_len; 1959 else 1960 err = sock_error(sk); 1961 goto out; 1962 1963 do_interrupted: 1964 if (msg_len) 1965 err = msg_len; 1966 goto out; 1967 #endif /* 0 */ 1968 } 1969 1970 /* This is an extended version of skb_pull() that removes the data from the 1971 * start of a skb even when data is spread across the list of skb's in the 1972 * frag_list. len specifies the total amount of data that needs to be removed. 1973 * when 'len' bytes could be removed from the skb, it returns 0. 1974 * If 'len' exceeds the total skb length, it returns the no. of bytes that 1975 * could not be removed. 1976 */ 1977 static int sctp_skb_pull(struct sk_buff *skb, int len) 1978 { 1979 struct sk_buff *list; 1980 int skb_len = skb_headlen(skb); 1981 int rlen; 1982 1983 if (len <= skb_len) { 1984 __skb_pull(skb, len); 1985 return 0; 1986 } 1987 len -= skb_len; 1988 __skb_pull(skb, skb_len); 1989 1990 skb_walk_frags(skb, list) { 1991 rlen = sctp_skb_pull(list, len); 1992 skb->len -= (len-rlen); 1993 skb->data_len -= (len-rlen); 1994 1995 if (!rlen) 1996 return 0; 1997 1998 len = rlen; 1999 } 2000 2001 return len; 2002 } 2003 2004 /* API 3.1.3 recvmsg() - UDP Style Syntax 2005 * 2006 * ssize_t recvmsg(int socket, struct msghdr *message, 2007 * int flags); 2008 * 2009 * socket - the socket descriptor of the endpoint. 2010 * message - pointer to the msghdr structure which contains a single 2011 * user message and possibly some ancillary data. 2012 * 2013 * See Section 5 for complete description of the data 2014 * structures. 2015 * 2016 * flags - flags sent or received with the user message, see Section 2017 * 5 for complete description of the flags. 2018 */ 2019 static struct sk_buff *sctp_skb_recv_datagram(struct sock *, int, int, int *); 2020 2021 static int sctp_recvmsg(struct kiocb *iocb, struct sock *sk, 2022 struct msghdr *msg, size_t len, int noblock, 2023 int flags, int *addr_len) 2024 { 2025 struct sctp_ulpevent *event = NULL; 2026 struct sctp_sock *sp = sctp_sk(sk); 2027 struct sk_buff *skb; 2028 int copied; 2029 int err = 0; 2030 int skb_len; 2031 2032 pr_debug("%s: sk:%p, msghdr:%p, len:%zd, noblock:%d, flags:0x%x, " 2033 "addr_len:%p)\n", __func__, sk, msg, len, noblock, flags, 2034 addr_len); 2035 2036 sctp_lock_sock(sk); 2037 2038 if (sctp_style(sk, TCP) && !sctp_sstate(sk, ESTABLISHED)) { 2039 err = -ENOTCONN; 2040 goto out; 2041 } 2042 2043 skb = sctp_skb_recv_datagram(sk, flags, noblock, &err); 2044 if (!skb) 2045 goto out; 2046 2047 /* Get the total length of the skb including any skb's in the 2048 * frag_list. 2049 */ 2050 skb_len = skb->len; 2051 2052 copied = skb_len; 2053 if (copied > len) 2054 copied = len; 2055 2056 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); 2057 2058 event = sctp_skb2event(skb); 2059 2060 if (err) 2061 goto out_free; 2062 2063 sock_recv_ts_and_drops(msg, sk, skb); 2064 if (sctp_ulpevent_is_notification(event)) { 2065 msg->msg_flags |= MSG_NOTIFICATION; 2066 sp->pf->event_msgname(event, msg->msg_name, addr_len); 2067 } else { 2068 sp->pf->skb_msgname(skb, msg->msg_name, addr_len); 2069 } 2070 2071 /* Check if we allow SCTP_SNDRCVINFO. */ 2072 if (sp->subscribe.sctp_data_io_event) 2073 sctp_ulpevent_read_sndrcvinfo(event, msg); 2074 #if 0 2075 /* FIXME: we should be calling IP/IPv6 layers. */ 2076 if (sk->sk_protinfo.af_inet.cmsg_flags) 2077 ip_cmsg_recv(msg, skb); 2078 #endif 2079 2080 err = copied; 2081 2082 /* If skb's length exceeds the user's buffer, update the skb and 2083 * push it back to the receive_queue so that the next call to 2084 * recvmsg() will return the remaining data. Don't set MSG_EOR. 2085 */ 2086 if (skb_len > copied) { 2087 msg->msg_flags &= ~MSG_EOR; 2088 if (flags & MSG_PEEK) 2089 goto out_free; 2090 sctp_skb_pull(skb, copied); 2091 skb_queue_head(&sk->sk_receive_queue, skb); 2092 2093 /* When only partial message is copied to the user, increase 2094 * rwnd by that amount. If all the data in the skb is read, 2095 * rwnd is updated when the event is freed. 2096 */ 2097 if (!sctp_ulpevent_is_notification(event)) 2098 sctp_assoc_rwnd_increase(event->asoc, copied); 2099 goto out; 2100 } else if ((event->msg_flags & MSG_NOTIFICATION) || 2101 (event->msg_flags & MSG_EOR)) 2102 msg->msg_flags |= MSG_EOR; 2103 else 2104 msg->msg_flags &= ~MSG_EOR; 2105 2106 out_free: 2107 if (flags & MSG_PEEK) { 2108 /* Release the skb reference acquired after peeking the skb in 2109 * sctp_skb_recv_datagram(). 2110 */ 2111 kfree_skb(skb); 2112 } else { 2113 /* Free the event which includes releasing the reference to 2114 * the owner of the skb, freeing the skb and updating the 2115 * rwnd. 2116 */ 2117 sctp_ulpevent_free(event); 2118 } 2119 out: 2120 sctp_release_sock(sk); 2121 return err; 2122 } 2123 2124 /* 7.1.12 Enable/Disable message fragmentation (SCTP_DISABLE_FRAGMENTS) 2125 * 2126 * This option is a on/off flag. If enabled no SCTP message 2127 * fragmentation will be performed. Instead if a message being sent 2128 * exceeds the current PMTU size, the message will NOT be sent and 2129 * instead a error will be indicated to the user. 2130 */ 2131 static int sctp_setsockopt_disable_fragments(struct sock *sk, 2132 char __user *optval, 2133 unsigned int optlen) 2134 { 2135 int val; 2136 2137 if (optlen < sizeof(int)) 2138 return -EINVAL; 2139 2140 if (get_user(val, (int __user *)optval)) 2141 return -EFAULT; 2142 2143 sctp_sk(sk)->disable_fragments = (val == 0) ? 0 : 1; 2144 2145 return 0; 2146 } 2147 2148 static int sctp_setsockopt_events(struct sock *sk, char __user *optval, 2149 unsigned int optlen) 2150 { 2151 struct sctp_association *asoc; 2152 struct sctp_ulpevent *event; 2153 2154 if (optlen > sizeof(struct sctp_event_subscribe)) 2155 return -EINVAL; 2156 if (copy_from_user(&sctp_sk(sk)->subscribe, optval, optlen)) 2157 return -EFAULT; 2158 2159 /* 2160 * At the time when a user app subscribes to SCTP_SENDER_DRY_EVENT, 2161 * if there is no data to be sent or retransmit, the stack will 2162 * immediately send up this notification. 2163 */ 2164 if (sctp_ulpevent_type_enabled(SCTP_SENDER_DRY_EVENT, 2165 &sctp_sk(sk)->subscribe)) { 2166 asoc = sctp_id2assoc(sk, 0); 2167 2168 if (asoc && sctp_outq_is_empty(&asoc->outqueue)) { 2169 event = sctp_ulpevent_make_sender_dry_event(asoc, 2170 GFP_ATOMIC); 2171 if (!event) 2172 return -ENOMEM; 2173 2174 sctp_ulpq_tail_event(&asoc->ulpq, event); 2175 } 2176 } 2177 2178 return 0; 2179 } 2180 2181 /* 7.1.8 Automatic Close of associations (SCTP_AUTOCLOSE) 2182 * 2183 * This socket option is applicable to the UDP-style socket only. When 2184 * set it will cause associations that are idle for more than the 2185 * specified number of seconds to automatically close. An association 2186 * being idle is defined an association that has NOT sent or received 2187 * user data. The special value of '0' indicates that no automatic 2188 * close of any associations should be performed. The option expects an 2189 * integer defining the number of seconds of idle time before an 2190 * association is closed. 2191 */ 2192 static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval, 2193 unsigned int optlen) 2194 { 2195 struct sctp_sock *sp = sctp_sk(sk); 2196 2197 /* Applicable to UDP-style socket only */ 2198 if (sctp_style(sk, TCP)) 2199 return -EOPNOTSUPP; 2200 if (optlen != sizeof(int)) 2201 return -EINVAL; 2202 if (copy_from_user(&sp->autoclose, optval, optlen)) 2203 return -EFAULT; 2204 2205 return 0; 2206 } 2207 2208 /* 7.1.13 Peer Address Parameters (SCTP_PEER_ADDR_PARAMS) 2209 * 2210 * Applications can enable or disable heartbeats for any peer address of 2211 * an association, modify an address's heartbeat interval, force a 2212 * heartbeat to be sent immediately, and adjust the address's maximum 2213 * number of retransmissions sent before an address is considered 2214 * unreachable. The following structure is used to access and modify an 2215 * address's parameters: 2216 * 2217 * struct sctp_paddrparams { 2218 * sctp_assoc_t spp_assoc_id; 2219 * struct sockaddr_storage spp_address; 2220 * uint32_t spp_hbinterval; 2221 * uint16_t spp_pathmaxrxt; 2222 * uint32_t spp_pathmtu; 2223 * uint32_t spp_sackdelay; 2224 * uint32_t spp_flags; 2225 * }; 2226 * 2227 * spp_assoc_id - (one-to-many style socket) This is filled in the 2228 * application, and identifies the association for 2229 * this query. 2230 * spp_address - This specifies which address is of interest. 2231 * spp_hbinterval - This contains the value of the heartbeat interval, 2232 * in milliseconds. If a value of zero 2233 * is present in this field then no changes are to 2234 * be made to this parameter. 2235 * spp_pathmaxrxt - This contains the maximum number of 2236 * retransmissions before this address shall be 2237 * considered unreachable. If a value of zero 2238 * is present in this field then no changes are to 2239 * be made to this parameter. 2240 * spp_pathmtu - When Path MTU discovery is disabled the value 2241 * specified here will be the "fixed" path mtu. 2242 * Note that if the spp_address field is empty 2243 * then all associations on this address will 2244 * have this fixed path mtu set upon them. 2245 * 2246 * spp_sackdelay - When delayed sack is enabled, this value specifies 2247 * the number of milliseconds that sacks will be delayed 2248 * for. This value will apply to all addresses of an 2249 * association if the spp_address field is empty. Note 2250 * also, that if delayed sack is enabled and this 2251 * value is set to 0, no change is made to the last 2252 * recorded delayed sack timer value. 2253 * 2254 * spp_flags - These flags are used to control various features 2255 * on an association. The flag field may contain 2256 * zero or more of the following options. 2257 * 2258 * SPP_HB_ENABLE - Enable heartbeats on the 2259 * specified address. Note that if the address 2260 * field is empty all addresses for the association 2261 * have heartbeats enabled upon them. 2262 * 2263 * SPP_HB_DISABLE - Disable heartbeats on the 2264 * speicifed address. Note that if the address 2265 * field is empty all addresses for the association 2266 * will have their heartbeats disabled. Note also 2267 * that SPP_HB_ENABLE and SPP_HB_DISABLE are 2268 * mutually exclusive, only one of these two should 2269 * be specified. Enabling both fields will have 2270 * undetermined results. 2271 * 2272 * SPP_HB_DEMAND - Request a user initiated heartbeat 2273 * to be made immediately. 2274 * 2275 * SPP_HB_TIME_IS_ZERO - Specify's that the time for 2276 * heartbeat delayis to be set to the value of 0 2277 * milliseconds. 2278 * 2279 * SPP_PMTUD_ENABLE - This field will enable PMTU 2280 * discovery upon the specified address. Note that 2281 * if the address feild is empty then all addresses 2282 * on the association are effected. 2283 * 2284 * SPP_PMTUD_DISABLE - This field will disable PMTU 2285 * discovery upon the specified address. Note that 2286 * if the address feild is empty then all addresses 2287 * on the association are effected. Not also that 2288 * SPP_PMTUD_ENABLE and SPP_PMTUD_DISABLE are mutually 2289 * exclusive. Enabling both will have undetermined 2290 * results. 2291 * 2292 * SPP_SACKDELAY_ENABLE - Setting this flag turns 2293 * on delayed sack. The time specified in spp_sackdelay 2294 * is used to specify the sack delay for this address. Note 2295 * that if spp_address is empty then all addresses will 2296 * enable delayed sack and take on the sack delay 2297 * value specified in spp_sackdelay. 2298 * SPP_SACKDELAY_DISABLE - Setting this flag turns 2299 * off delayed sack. If the spp_address field is blank then 2300 * delayed sack is disabled for the entire association. Note 2301 * also that this field is mutually exclusive to 2302 * SPP_SACKDELAY_ENABLE, setting both will have undefined 2303 * results. 2304 */ 2305 static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params, 2306 struct sctp_transport *trans, 2307 struct sctp_association *asoc, 2308 struct sctp_sock *sp, 2309 int hb_change, 2310 int pmtud_change, 2311 int sackdelay_change) 2312 { 2313 int error; 2314 2315 if (params->spp_flags & SPP_HB_DEMAND && trans) { 2316 struct net *net = sock_net(trans->asoc->base.sk); 2317 2318 error = sctp_primitive_REQUESTHEARTBEAT(net, trans->asoc, trans); 2319 if (error) 2320 return error; 2321 } 2322 2323 /* Note that unless the spp_flag is set to SPP_HB_ENABLE the value of 2324 * this field is ignored. Note also that a value of zero indicates 2325 * the current setting should be left unchanged. 2326 */ 2327 if (params->spp_flags & SPP_HB_ENABLE) { 2328 2329 /* Re-zero the interval if the SPP_HB_TIME_IS_ZERO is 2330 * set. This lets us use 0 value when this flag 2331 * is set. 2332 */ 2333 if (params->spp_flags & SPP_HB_TIME_IS_ZERO) 2334 params->spp_hbinterval = 0; 2335 2336 if (params->spp_hbinterval || 2337 (params->spp_flags & SPP_HB_TIME_IS_ZERO)) { 2338 if (trans) { 2339 trans->hbinterval = 2340 msecs_to_jiffies(params->spp_hbinterval); 2341 } else if (asoc) { 2342 asoc->hbinterval = 2343 msecs_to_jiffies(params->spp_hbinterval); 2344 } else { 2345 sp->hbinterval = params->spp_hbinterval; 2346 } 2347 } 2348 } 2349 2350 if (hb_change) { 2351 if (trans) { 2352 trans->param_flags = 2353 (trans->param_flags & ~SPP_HB) | hb_change; 2354 } else if (asoc) { 2355 asoc->param_flags = 2356 (asoc->param_flags & ~SPP_HB) | hb_change; 2357 } else { 2358 sp->param_flags = 2359 (sp->param_flags & ~SPP_HB) | hb_change; 2360 } 2361 } 2362 2363 /* When Path MTU discovery is disabled the value specified here will 2364 * be the "fixed" path mtu (i.e. the value of the spp_flags field must 2365 * include the flag SPP_PMTUD_DISABLE for this field to have any 2366 * effect). 2367 */ 2368 if ((params->spp_flags & SPP_PMTUD_DISABLE) && params->spp_pathmtu) { 2369 if (trans) { 2370 trans->pathmtu = params->spp_pathmtu; 2371 sctp_assoc_sync_pmtu(sctp_opt2sk(sp), asoc); 2372 } else if (asoc) { 2373 asoc->pathmtu = params->spp_pathmtu; 2374 sctp_frag_point(asoc, params->spp_pathmtu); 2375 } else { 2376 sp->pathmtu = params->spp_pathmtu; 2377 } 2378 } 2379 2380 if (pmtud_change) { 2381 if (trans) { 2382 int update = (trans->param_flags & SPP_PMTUD_DISABLE) && 2383 (params->spp_flags & SPP_PMTUD_ENABLE); 2384 trans->param_flags = 2385 (trans->param_flags & ~SPP_PMTUD) | pmtud_change; 2386 if (update) { 2387 sctp_transport_pmtu(trans, sctp_opt2sk(sp)); 2388 sctp_assoc_sync_pmtu(sctp_opt2sk(sp), asoc); 2389 } 2390 } else if (asoc) { 2391 asoc->param_flags = 2392 (asoc->param_flags & ~SPP_PMTUD) | pmtud_change; 2393 } else { 2394 sp->param_flags = 2395 (sp->param_flags & ~SPP_PMTUD) | pmtud_change; 2396 } 2397 } 2398 2399 /* Note that unless the spp_flag is set to SPP_SACKDELAY_ENABLE the 2400 * value of this field is ignored. Note also that a value of zero 2401 * indicates the current setting should be left unchanged. 2402 */ 2403 if ((params->spp_flags & SPP_SACKDELAY_ENABLE) && params->spp_sackdelay) { 2404 if (trans) { 2405 trans->sackdelay = 2406 msecs_to_jiffies(params->spp_sackdelay); 2407 } else if (asoc) { 2408 asoc->sackdelay = 2409 msecs_to_jiffies(params->spp_sackdelay); 2410 } else { 2411 sp->sackdelay = params->spp_sackdelay; 2412 } 2413 } 2414 2415 if (sackdelay_change) { 2416 if (trans) { 2417 trans->param_flags = 2418 (trans->param_flags & ~SPP_SACKDELAY) | 2419 sackdelay_change; 2420 } else if (asoc) { 2421 asoc->param_flags = 2422 (asoc->param_flags & ~SPP_SACKDELAY) | 2423 sackdelay_change; 2424 } else { 2425 sp->param_flags = 2426 (sp->param_flags & ~SPP_SACKDELAY) | 2427 sackdelay_change; 2428 } 2429 } 2430 2431 /* Note that a value of zero indicates the current setting should be 2432 left unchanged. 2433 */ 2434 if (params->spp_pathmaxrxt) { 2435 if (trans) { 2436 trans->pathmaxrxt = params->spp_pathmaxrxt; 2437 } else if (asoc) { 2438 asoc->pathmaxrxt = params->spp_pathmaxrxt; 2439 } else { 2440 sp->pathmaxrxt = params->spp_pathmaxrxt; 2441 } 2442 } 2443 2444 return 0; 2445 } 2446 2447 static int sctp_setsockopt_peer_addr_params(struct sock *sk, 2448 char __user *optval, 2449 unsigned int optlen) 2450 { 2451 struct sctp_paddrparams params; 2452 struct sctp_transport *trans = NULL; 2453 struct sctp_association *asoc = NULL; 2454 struct sctp_sock *sp = sctp_sk(sk); 2455 int error; 2456 int hb_change, pmtud_change, sackdelay_change; 2457 2458 if (optlen != sizeof(struct sctp_paddrparams)) 2459 return - EINVAL; 2460 2461 if (copy_from_user(¶ms, optval, optlen)) 2462 return -EFAULT; 2463 2464 /* Validate flags and value parameters. */ 2465 hb_change = params.spp_flags & SPP_HB; 2466 pmtud_change = params.spp_flags & SPP_PMTUD; 2467 sackdelay_change = params.spp_flags & SPP_SACKDELAY; 2468 2469 if (hb_change == SPP_HB || 2470 pmtud_change == SPP_PMTUD || 2471 sackdelay_change == SPP_SACKDELAY || 2472 params.spp_sackdelay > 500 || 2473 (params.spp_pathmtu && 2474 params.spp_pathmtu < SCTP_DEFAULT_MINSEGMENT)) 2475 return -EINVAL; 2476 2477 /* If an address other than INADDR_ANY is specified, and 2478 * no transport is found, then the request is invalid. 2479 */ 2480 if (!sctp_is_any(sk, ( union sctp_addr *)¶ms.spp_address)) { 2481 trans = sctp_addr_id2transport(sk, ¶ms.spp_address, 2482 params.spp_assoc_id); 2483 if (!trans) 2484 return -EINVAL; 2485 } 2486 2487 /* Get association, if assoc_id != 0 and the socket is a one 2488 * to many style socket, and an association was not found, then 2489 * the id was invalid. 2490 */ 2491 asoc = sctp_id2assoc(sk, params.spp_assoc_id); 2492 if (!asoc && params.spp_assoc_id && sctp_style(sk, UDP)) 2493 return -EINVAL; 2494 2495 /* Heartbeat demand can only be sent on a transport or 2496 * association, but not a socket. 2497 */ 2498 if (params.spp_flags & SPP_HB_DEMAND && !trans && !asoc) 2499 return -EINVAL; 2500 2501 /* Process parameters. */ 2502 error = sctp_apply_peer_addr_params(¶ms, trans, asoc, sp, 2503 hb_change, pmtud_change, 2504 sackdelay_change); 2505 2506 if (error) 2507 return error; 2508 2509 /* If changes are for association, also apply parameters to each 2510 * transport. 2511 */ 2512 if (!trans && asoc) { 2513 list_for_each_entry(trans, &asoc->peer.transport_addr_list, 2514 transports) { 2515 sctp_apply_peer_addr_params(¶ms, trans, asoc, sp, 2516 hb_change, pmtud_change, 2517 sackdelay_change); 2518 } 2519 } 2520 2521 return 0; 2522 } 2523 2524 /* 2525 * 7.1.23. Get or set delayed ack timer (SCTP_DELAYED_SACK) 2526 * 2527 * This option will effect the way delayed acks are performed. This 2528 * option allows you to get or set the delayed ack time, in 2529 * milliseconds. It also allows changing the delayed ack frequency. 2530 * Changing the frequency to 1 disables the delayed sack algorithm. If 2531 * the assoc_id is 0, then this sets or gets the endpoints default 2532 * values. If the assoc_id field is non-zero, then the set or get 2533 * effects the specified association for the one to many model (the 2534 * assoc_id field is ignored by the one to one model). Note that if 2535 * sack_delay or sack_freq are 0 when setting this option, then the 2536 * current values will remain unchanged. 2537 * 2538 * struct sctp_sack_info { 2539 * sctp_assoc_t sack_assoc_id; 2540 * uint32_t sack_delay; 2541 * uint32_t sack_freq; 2542 * }; 2543 * 2544 * sack_assoc_id - This parameter, indicates which association the user 2545 * is performing an action upon. Note that if this field's value is 2546 * zero then the endpoints default value is changed (effecting future 2547 * associations only). 2548 * 2549 * sack_delay - This parameter contains the number of milliseconds that 2550 * the user is requesting the delayed ACK timer be set to. Note that 2551 * this value is defined in the standard to be between 200 and 500 2552 * milliseconds. 2553 * 2554 * sack_freq - This parameter contains the number of packets that must 2555 * be received before a sack is sent without waiting for the delay 2556 * timer to expire. The default value for this is 2, setting this 2557 * value to 1 will disable the delayed sack algorithm. 2558 */ 2559 2560 static int sctp_setsockopt_delayed_ack(struct sock *sk, 2561 char __user *optval, unsigned int optlen) 2562 { 2563 struct sctp_sack_info params; 2564 struct sctp_transport *trans = NULL; 2565 struct sctp_association *asoc = NULL; 2566 struct sctp_sock *sp = sctp_sk(sk); 2567 2568 if (optlen == sizeof(struct sctp_sack_info)) { 2569 if (copy_from_user(¶ms, optval, optlen)) 2570 return -EFAULT; 2571 2572 if (params.sack_delay == 0 && params.sack_freq == 0) 2573 return 0; 2574 } else if (optlen == sizeof(struct sctp_assoc_value)) { 2575 pr_warn("Use of struct sctp_assoc_value in delayed_ack socket option deprecated\n"); 2576 pr_warn("Use struct sctp_sack_info instead\n"); 2577 if (copy_from_user(¶ms, optval, optlen)) 2578 return -EFAULT; 2579 2580 if (params.sack_delay == 0) 2581 params.sack_freq = 1; 2582 else 2583 params.sack_freq = 0; 2584 } else 2585 return - EINVAL; 2586 2587 /* Validate value parameter. */ 2588 if (params.sack_delay > 500) 2589 return -EINVAL; 2590 2591 /* Get association, if sack_assoc_id != 0 and the socket is a one 2592 * to many style socket, and an association was not found, then 2593 * the id was invalid. 2594 */ 2595 asoc = sctp_id2assoc(sk, params.sack_assoc_id); 2596 if (!asoc && params.sack_assoc_id && sctp_style(sk, UDP)) 2597 return -EINVAL; 2598 2599 if (params.sack_delay) { 2600 if (asoc) { 2601 asoc->sackdelay = 2602 msecs_to_jiffies(params.sack_delay); 2603 asoc->param_flags = 2604 (asoc->param_flags & ~SPP_SACKDELAY) | 2605 SPP_SACKDELAY_ENABLE; 2606 } else { 2607 sp->sackdelay = params.sack_delay; 2608 sp->param_flags = 2609 (sp->param_flags & ~SPP_SACKDELAY) | 2610 SPP_SACKDELAY_ENABLE; 2611 } 2612 } 2613 2614 if (params.sack_freq == 1) { 2615 if (asoc) { 2616 asoc->param_flags = 2617 (asoc->param_flags & ~SPP_SACKDELAY) | 2618 SPP_SACKDELAY_DISABLE; 2619 } else { 2620 sp->param_flags = 2621 (sp->param_flags & ~SPP_SACKDELAY) | 2622 SPP_SACKDELAY_DISABLE; 2623 } 2624 } else if (params.sack_freq > 1) { 2625 if (asoc) { 2626 asoc->sackfreq = params.sack_freq; 2627 asoc->param_flags = 2628 (asoc->param_flags & ~SPP_SACKDELAY) | 2629 SPP_SACKDELAY_ENABLE; 2630 } else { 2631 sp->sackfreq = params.sack_freq; 2632 sp->param_flags = 2633 (sp->param_flags & ~SPP_SACKDELAY) | 2634 SPP_SACKDELAY_ENABLE; 2635 } 2636 } 2637 2638 /* If change is for association, also apply to each transport. */ 2639 if (asoc) { 2640 list_for_each_entry(trans, &asoc->peer.transport_addr_list, 2641 transports) { 2642 if (params.sack_delay) { 2643 trans->sackdelay = 2644 msecs_to_jiffies(params.sack_delay); 2645 trans->param_flags = 2646 (trans->param_flags & ~SPP_SACKDELAY) | 2647 SPP_SACKDELAY_ENABLE; 2648 } 2649 if (params.sack_freq == 1) { 2650 trans->param_flags = 2651 (trans->param_flags & ~SPP_SACKDELAY) | 2652 SPP_SACKDELAY_DISABLE; 2653 } else if (params.sack_freq > 1) { 2654 trans->sackfreq = params.sack_freq; 2655 trans->param_flags = 2656 (trans->param_flags & ~SPP_SACKDELAY) | 2657 SPP_SACKDELAY_ENABLE; 2658 } 2659 } 2660 } 2661 2662 return 0; 2663 } 2664 2665 /* 7.1.3 Initialization Parameters (SCTP_INITMSG) 2666 * 2667 * Applications can specify protocol parameters for the default association 2668 * initialization. The option name argument to setsockopt() and getsockopt() 2669 * is SCTP_INITMSG. 2670 * 2671 * Setting initialization parameters is effective only on an unconnected 2672 * socket (for UDP-style sockets only future associations are effected 2673 * by the change). With TCP-style sockets, this option is inherited by 2674 * sockets derived from a listener socket. 2675 */ 2676 static int sctp_setsockopt_initmsg(struct sock *sk, char __user *optval, unsigned int optlen) 2677 { 2678 struct sctp_initmsg sinit; 2679 struct sctp_sock *sp = sctp_sk(sk); 2680 2681 if (optlen != sizeof(struct sctp_initmsg)) 2682 return -EINVAL; 2683 if (copy_from_user(&sinit, optval, optlen)) 2684 return -EFAULT; 2685 2686 if (sinit.sinit_num_ostreams) 2687 sp->initmsg.sinit_num_ostreams = sinit.sinit_num_ostreams; 2688 if (sinit.sinit_max_instreams) 2689 sp->initmsg.sinit_max_instreams = sinit.sinit_max_instreams; 2690 if (sinit.sinit_max_attempts) 2691 sp->initmsg.sinit_max_attempts = sinit.sinit_max_attempts; 2692 if (sinit.sinit_max_init_timeo) 2693 sp->initmsg.sinit_max_init_timeo = sinit.sinit_max_init_timeo; 2694 2695 return 0; 2696 } 2697 2698 /* 2699 * 7.1.14 Set default send parameters (SCTP_DEFAULT_SEND_PARAM) 2700 * 2701 * Applications that wish to use the sendto() system call may wish to 2702 * specify a default set of parameters that would normally be supplied 2703 * through the inclusion of ancillary data. This socket option allows 2704 * such an application to set the default sctp_sndrcvinfo structure. 2705 * The application that wishes to use this socket option simply passes 2706 * in to this call the sctp_sndrcvinfo structure defined in Section 2707 * 5.2.2) The input parameters accepted by this call include 2708 * sinfo_stream, sinfo_flags, sinfo_ppid, sinfo_context, 2709 * sinfo_timetolive. The user must provide the sinfo_assoc_id field in 2710 * to this call if the caller is using the UDP model. 2711 */ 2712 static int sctp_setsockopt_default_send_param(struct sock *sk, 2713 char __user *optval, 2714 unsigned int optlen) 2715 { 2716 struct sctp_sndrcvinfo info; 2717 struct sctp_association *asoc; 2718 struct sctp_sock *sp = sctp_sk(sk); 2719 2720 if (optlen != sizeof(struct sctp_sndrcvinfo)) 2721 return -EINVAL; 2722 if (copy_from_user(&info, optval, optlen)) 2723 return -EFAULT; 2724 2725 asoc = sctp_id2assoc(sk, info.sinfo_assoc_id); 2726 if (!asoc && info.sinfo_assoc_id && sctp_style(sk, UDP)) 2727 return -EINVAL; 2728 2729 if (asoc) { 2730 asoc->default_stream = info.sinfo_stream; 2731 asoc->default_flags = info.sinfo_flags; 2732 asoc->default_ppid = info.sinfo_ppid; 2733 asoc->default_context = info.sinfo_context; 2734 asoc->default_timetolive = info.sinfo_timetolive; 2735 } else { 2736 sp->default_stream = info.sinfo_stream; 2737 sp->default_flags = info.sinfo_flags; 2738 sp->default_ppid = info.sinfo_ppid; 2739 sp->default_context = info.sinfo_context; 2740 sp->default_timetolive = info.sinfo_timetolive; 2741 } 2742 2743 return 0; 2744 } 2745 2746 /* 7.1.10 Set Primary Address (SCTP_PRIMARY_ADDR) 2747 * 2748 * Requests that the local SCTP stack use the enclosed peer address as 2749 * the association primary. The enclosed address must be one of the 2750 * association peer's addresses. 2751 */ 2752 static int sctp_setsockopt_primary_addr(struct sock *sk, char __user *optval, 2753 unsigned int optlen) 2754 { 2755 struct sctp_prim prim; 2756 struct sctp_transport *trans; 2757 2758 if (optlen != sizeof(struct sctp_prim)) 2759 return -EINVAL; 2760 2761 if (copy_from_user(&prim, optval, sizeof(struct sctp_prim))) 2762 return -EFAULT; 2763 2764 trans = sctp_addr_id2transport(sk, &prim.ssp_addr, prim.ssp_assoc_id); 2765 if (!trans) 2766 return -EINVAL; 2767 2768 sctp_assoc_set_primary(trans->asoc, trans); 2769 2770 return 0; 2771 } 2772 2773 /* 2774 * 7.1.5 SCTP_NODELAY 2775 * 2776 * Turn on/off any Nagle-like algorithm. This means that packets are 2777 * generally sent as soon as possible and no unnecessary delays are 2778 * introduced, at the cost of more packets in the network. Expects an 2779 * integer boolean flag. 2780 */ 2781 static int sctp_setsockopt_nodelay(struct sock *sk, char __user *optval, 2782 unsigned int optlen) 2783 { 2784 int val; 2785 2786 if (optlen < sizeof(int)) 2787 return -EINVAL; 2788 if (get_user(val, (int __user *)optval)) 2789 return -EFAULT; 2790 2791 sctp_sk(sk)->nodelay = (val == 0) ? 0 : 1; 2792 return 0; 2793 } 2794 2795 /* 2796 * 2797 * 7.1.1 SCTP_RTOINFO 2798 * 2799 * The protocol parameters used to initialize and bound retransmission 2800 * timeout (RTO) are tunable. sctp_rtoinfo structure is used to access 2801 * and modify these parameters. 2802 * All parameters are time values, in milliseconds. A value of 0, when 2803 * modifying the parameters, indicates that the current value should not 2804 * be changed. 2805 * 2806 */ 2807 static int sctp_setsockopt_rtoinfo(struct sock *sk, char __user *optval, unsigned int optlen) 2808 { 2809 struct sctp_rtoinfo rtoinfo; 2810 struct sctp_association *asoc; 2811 2812 if (optlen != sizeof (struct sctp_rtoinfo)) 2813 return -EINVAL; 2814 2815 if (copy_from_user(&rtoinfo, optval, optlen)) 2816 return -EFAULT; 2817 2818 asoc = sctp_id2assoc(sk, rtoinfo.srto_assoc_id); 2819 2820 /* Set the values to the specific association */ 2821 if (!asoc && rtoinfo.srto_assoc_id && sctp_style(sk, UDP)) 2822 return -EINVAL; 2823 2824 if (asoc) { 2825 if (rtoinfo.srto_initial != 0) 2826 asoc->rto_initial = 2827 msecs_to_jiffies(rtoinfo.srto_initial); 2828 if (rtoinfo.srto_max != 0) 2829 asoc->rto_max = msecs_to_jiffies(rtoinfo.srto_max); 2830 if (rtoinfo.srto_min != 0) 2831 asoc->rto_min = msecs_to_jiffies(rtoinfo.srto_min); 2832 } else { 2833 /* If there is no association or the association-id = 0 2834 * set the values to the endpoint. 2835 */ 2836 struct sctp_sock *sp = sctp_sk(sk); 2837 2838 if (rtoinfo.srto_initial != 0) 2839 sp->rtoinfo.srto_initial = rtoinfo.srto_initial; 2840 if (rtoinfo.srto_max != 0) 2841 sp->rtoinfo.srto_max = rtoinfo.srto_max; 2842 if (rtoinfo.srto_min != 0) 2843 sp->rtoinfo.srto_min = rtoinfo.srto_min; 2844 } 2845 2846 return 0; 2847 } 2848 2849 /* 2850 * 2851 * 7.1.2 SCTP_ASSOCINFO 2852 * 2853 * This option is used to tune the maximum retransmission attempts 2854 * of the association. 2855 * Returns an error if the new association retransmission value is 2856 * greater than the sum of the retransmission value of the peer. 2857 * See [SCTP] for more information. 2858 * 2859 */ 2860 static int sctp_setsockopt_associnfo(struct sock *sk, char __user *optval, unsigned int optlen) 2861 { 2862 2863 struct sctp_assocparams assocparams; 2864 struct sctp_association *asoc; 2865 2866 if (optlen != sizeof(struct sctp_assocparams)) 2867 return -EINVAL; 2868 if (copy_from_user(&assocparams, optval, optlen)) 2869 return -EFAULT; 2870 2871 asoc = sctp_id2assoc(sk, assocparams.sasoc_assoc_id); 2872 2873 if (!asoc && assocparams.sasoc_assoc_id && sctp_style(sk, UDP)) 2874 return -EINVAL; 2875 2876 /* Set the values to the specific association */ 2877 if (asoc) { 2878 if (assocparams.sasoc_asocmaxrxt != 0) { 2879 __u32 path_sum = 0; 2880 int paths = 0; 2881 struct sctp_transport *peer_addr; 2882 2883 list_for_each_entry(peer_addr, &asoc->peer.transport_addr_list, 2884 transports) { 2885 path_sum += peer_addr->pathmaxrxt; 2886 paths++; 2887 } 2888 2889 /* Only validate asocmaxrxt if we have more than 2890 * one path/transport. We do this because path 2891 * retransmissions are only counted when we have more 2892 * then one path. 2893 */ 2894 if (paths > 1 && 2895 assocparams.sasoc_asocmaxrxt > path_sum) 2896 return -EINVAL; 2897 2898 asoc->max_retrans = assocparams.sasoc_asocmaxrxt; 2899 } 2900 2901 if (assocparams.sasoc_cookie_life != 0) 2902 asoc->cookie_life = ms_to_ktime(assocparams.sasoc_cookie_life); 2903 } else { 2904 /* Set the values to the endpoint */ 2905 struct sctp_sock *sp = sctp_sk(sk); 2906 2907 if (assocparams.sasoc_asocmaxrxt != 0) 2908 sp->assocparams.sasoc_asocmaxrxt = 2909 assocparams.sasoc_asocmaxrxt; 2910 if (assocparams.sasoc_cookie_life != 0) 2911 sp->assocparams.sasoc_cookie_life = 2912 assocparams.sasoc_cookie_life; 2913 } 2914 return 0; 2915 } 2916 2917 /* 2918 * 7.1.16 Set/clear IPv4 mapped addresses (SCTP_I_WANT_MAPPED_V4_ADDR) 2919 * 2920 * This socket option is a boolean flag which turns on or off mapped V4 2921 * addresses. If this option is turned on and the socket is type 2922 * PF_INET6, then IPv4 addresses will be mapped to V6 representation. 2923 * If this option is turned off, then no mapping will be done of V4 2924 * addresses and a user will receive both PF_INET6 and PF_INET type 2925 * addresses on the socket. 2926 */ 2927 static int sctp_setsockopt_mappedv4(struct sock *sk, char __user *optval, unsigned int optlen) 2928 { 2929 int val; 2930 struct sctp_sock *sp = sctp_sk(sk); 2931 2932 if (optlen < sizeof(int)) 2933 return -EINVAL; 2934 if (get_user(val, (int __user *)optval)) 2935 return -EFAULT; 2936 if (val) 2937 sp->v4mapped = 1; 2938 else 2939 sp->v4mapped = 0; 2940 2941 return 0; 2942 } 2943 2944 /* 2945 * 8.1.16. Get or Set the Maximum Fragmentation Size (SCTP_MAXSEG) 2946 * This option will get or set the maximum size to put in any outgoing 2947 * SCTP DATA chunk. If a message is larger than this size it will be 2948 * fragmented by SCTP into the specified size. Note that the underlying 2949 * SCTP implementation may fragment into smaller sized chunks when the 2950 * PMTU of the underlying association is smaller than the value set by 2951 * the user. The default value for this option is '0' which indicates 2952 * the user is NOT limiting fragmentation and only the PMTU will effect 2953 * SCTP's choice of DATA chunk size. Note also that values set larger 2954 * than the maximum size of an IP datagram will effectively let SCTP 2955 * control fragmentation (i.e. the same as setting this option to 0). 2956 * 2957 * The following structure is used to access and modify this parameter: 2958 * 2959 * struct sctp_assoc_value { 2960 * sctp_assoc_t assoc_id; 2961 * uint32_t assoc_value; 2962 * }; 2963 * 2964 * assoc_id: This parameter is ignored for one-to-one style sockets. 2965 * For one-to-many style sockets this parameter indicates which 2966 * association the user is performing an action upon. Note that if 2967 * this field's value is zero then the endpoints default value is 2968 * changed (effecting future associations only). 2969 * assoc_value: This parameter specifies the maximum size in bytes. 2970 */ 2971 static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, unsigned int optlen) 2972 { 2973 struct sctp_assoc_value params; 2974 struct sctp_association *asoc; 2975 struct sctp_sock *sp = sctp_sk(sk); 2976 int val; 2977 2978 if (optlen == sizeof(int)) { 2979 pr_warn("Use of int in maxseg socket option deprecated\n"); 2980 pr_warn("Use struct sctp_assoc_value instead\n"); 2981 if (copy_from_user(&val, optval, optlen)) 2982 return -EFAULT; 2983 params.assoc_id = 0; 2984 } else if (optlen == sizeof(struct sctp_assoc_value)) { 2985 if (copy_from_user(¶ms, optval, optlen)) 2986 return -EFAULT; 2987 val = params.assoc_value; 2988 } else 2989 return -EINVAL; 2990 2991 if ((val != 0) && ((val < 8) || (val > SCTP_MAX_CHUNK_LEN))) 2992 return -EINVAL; 2993 2994 asoc = sctp_id2assoc(sk, params.assoc_id); 2995 if (!asoc && params.assoc_id && sctp_style(sk, UDP)) 2996 return -EINVAL; 2997 2998 if (asoc) { 2999 if (val == 0) { 3000 val = asoc->pathmtu; 3001 val -= sp->pf->af->net_header_len; 3002 val -= sizeof(struct sctphdr) + 3003 sizeof(struct sctp_data_chunk); 3004 } 3005 asoc->user_frag = val; 3006 asoc->frag_point = sctp_frag_point(asoc, asoc->pathmtu); 3007 } else { 3008 sp->user_frag = val; 3009 } 3010 3011 return 0; 3012 } 3013 3014 3015 /* 3016 * 7.1.9 Set Peer Primary Address (SCTP_SET_PEER_PRIMARY_ADDR) 3017 * 3018 * Requests that the peer mark the enclosed address as the association 3019 * primary. The enclosed address must be one of the association's 3020 * locally bound addresses. The following structure is used to make a 3021 * set primary request: 3022 */ 3023 static int sctp_setsockopt_peer_primary_addr(struct sock *sk, char __user *optval, 3024 unsigned int optlen) 3025 { 3026 struct net *net = sock_net(sk); 3027 struct sctp_sock *sp; 3028 struct sctp_association *asoc = NULL; 3029 struct sctp_setpeerprim prim; 3030 struct sctp_chunk *chunk; 3031 struct sctp_af *af; 3032 int err; 3033 3034 sp = sctp_sk(sk); 3035 3036 if (!net->sctp.addip_enable) 3037 return -EPERM; 3038 3039 if (optlen != sizeof(struct sctp_setpeerprim)) 3040 return -EINVAL; 3041 3042 if (copy_from_user(&prim, optval, optlen)) 3043 return -EFAULT; 3044 3045 asoc = sctp_id2assoc(sk, prim.sspp_assoc_id); 3046 if (!asoc) 3047 return -EINVAL; 3048 3049 if (!asoc->peer.asconf_capable) 3050 return -EPERM; 3051 3052 if (asoc->peer.addip_disabled_mask & SCTP_PARAM_SET_PRIMARY) 3053 return -EPERM; 3054 3055 if (!sctp_state(asoc, ESTABLISHED)) 3056 return -ENOTCONN; 3057 3058 af = sctp_get_af_specific(prim.sspp_addr.ss_family); 3059 if (!af) 3060 return -EINVAL; 3061 3062 if (!af->addr_valid((union sctp_addr *)&prim.sspp_addr, sp, NULL)) 3063 return -EADDRNOTAVAIL; 3064 3065 if (!sctp_assoc_lookup_laddr(asoc, (union sctp_addr *)&prim.sspp_addr)) 3066 return -EADDRNOTAVAIL; 3067 3068 /* Create an ASCONF chunk with SET_PRIMARY parameter */ 3069 chunk = sctp_make_asconf_set_prim(asoc, 3070 (union sctp_addr *)&prim.sspp_addr); 3071 if (!chunk) 3072 return -ENOMEM; 3073 3074 err = sctp_send_asconf(asoc, chunk); 3075 3076 pr_debug("%s: we set peer primary addr primitively\n", __func__); 3077 3078 return err; 3079 } 3080 3081 static int sctp_setsockopt_adaptation_layer(struct sock *sk, char __user *optval, 3082 unsigned int optlen) 3083 { 3084 struct sctp_setadaptation adaptation; 3085 3086 if (optlen != sizeof(struct sctp_setadaptation)) 3087 return -EINVAL; 3088 if (copy_from_user(&adaptation, optval, optlen)) 3089 return -EFAULT; 3090 3091 sctp_sk(sk)->adaptation_ind = adaptation.ssb_adaptation_ind; 3092 3093 return 0; 3094 } 3095 3096 /* 3097 * 7.1.29. Set or Get the default context (SCTP_CONTEXT) 3098 * 3099 * The context field in the sctp_sndrcvinfo structure is normally only 3100 * used when a failed message is retrieved holding the value that was 3101 * sent down on the actual send call. This option allows the setting of 3102 * a default context on an association basis that will be received on 3103 * reading messages from the peer. This is especially helpful in the 3104 * one-2-many model for an application to keep some reference to an 3105 * internal state machine that is processing messages on the 3106 * association. Note that the setting of this value only effects 3107 * received messages from the peer and does not effect the value that is 3108 * saved with outbound messages. 3109 */ 3110 static int sctp_setsockopt_context(struct sock *sk, char __user *optval, 3111 unsigned int optlen) 3112 { 3113 struct sctp_assoc_value params; 3114 struct sctp_sock *sp; 3115 struct sctp_association *asoc; 3116 3117 if (optlen != sizeof(struct sctp_assoc_value)) 3118 return -EINVAL; 3119 if (copy_from_user(¶ms, optval, optlen)) 3120 return -EFAULT; 3121 3122 sp = sctp_sk(sk); 3123 3124 if (params.assoc_id != 0) { 3125 asoc = sctp_id2assoc(sk, params.assoc_id); 3126 if (!asoc) 3127 return -EINVAL; 3128 asoc->default_rcv_context = params.assoc_value; 3129 } else { 3130 sp->default_rcv_context = params.assoc_value; 3131 } 3132 3133 return 0; 3134 } 3135 3136 /* 3137 * 7.1.24. Get or set fragmented interleave (SCTP_FRAGMENT_INTERLEAVE) 3138 * 3139 * This options will at a minimum specify if the implementation is doing 3140 * fragmented interleave. Fragmented interleave, for a one to many 3141 * socket, is when subsequent calls to receive a message may return 3142 * parts of messages from different associations. Some implementations 3143 * may allow you to turn this value on or off. If so, when turned off, 3144 * no fragment interleave will occur (which will cause a head of line 3145 * blocking amongst multiple associations sharing the same one to many 3146 * socket). When this option is turned on, then each receive call may 3147 * come from a different association (thus the user must receive data 3148 * with the extended calls (e.g. sctp_recvmsg) to keep track of which 3149 * association each receive belongs to. 3150 * 3151 * This option takes a boolean value. A non-zero value indicates that 3152 * fragmented interleave is on. A value of zero indicates that 3153 * fragmented interleave is off. 3154 * 3155 * Note that it is important that an implementation that allows this 3156 * option to be turned on, have it off by default. Otherwise an unaware 3157 * application using the one to many model may become confused and act 3158 * incorrectly. 3159 */ 3160 static int sctp_setsockopt_fragment_interleave(struct sock *sk, 3161 char __user *optval, 3162 unsigned int optlen) 3163 { 3164 int val; 3165 3166 if (optlen != sizeof(int)) 3167 return -EINVAL; 3168 if (get_user(val, (int __user *)optval)) 3169 return -EFAULT; 3170 3171 sctp_sk(sk)->frag_interleave = (val == 0) ? 0 : 1; 3172 3173 return 0; 3174 } 3175 3176 /* 3177 * 8.1.21. Set or Get the SCTP Partial Delivery Point 3178 * (SCTP_PARTIAL_DELIVERY_POINT) 3179 * 3180 * This option will set or get the SCTP partial delivery point. This 3181 * point is the size of a message where the partial delivery API will be 3182 * invoked to help free up rwnd space for the peer. Setting this to a 3183 * lower value will cause partial deliveries to happen more often. The 3184 * calls argument is an integer that sets or gets the partial delivery 3185 * point. Note also that the call will fail if the user attempts to set 3186 * this value larger than the socket receive buffer size. 3187 * 3188 * Note that any single message having a length smaller than or equal to 3189 * the SCTP partial delivery point will be delivered in one single read 3190 * call as long as the user provided buffer is large enough to hold the 3191 * message. 3192 */ 3193 static int sctp_setsockopt_partial_delivery_point(struct sock *sk, 3194 char __user *optval, 3195 unsigned int optlen) 3196 { 3197 u32 val; 3198 3199 if (optlen != sizeof(u32)) 3200 return -EINVAL; 3201 if (get_user(val, (int __user *)optval)) 3202 return -EFAULT; 3203 3204 /* Note: We double the receive buffer from what the user sets 3205 * it to be, also initial rwnd is based on rcvbuf/2. 3206 */ 3207 if (val > (sk->sk_rcvbuf >> 1)) 3208 return -EINVAL; 3209 3210 sctp_sk(sk)->pd_point = val; 3211 3212 return 0; /* is this the right error code? */ 3213 } 3214 3215 /* 3216 * 7.1.28. Set or Get the maximum burst (SCTP_MAX_BURST) 3217 * 3218 * This option will allow a user to change the maximum burst of packets 3219 * that can be emitted by this association. Note that the default value 3220 * is 4, and some implementations may restrict this setting so that it 3221 * can only be lowered. 3222 * 3223 * NOTE: This text doesn't seem right. Do this on a socket basis with 3224 * future associations inheriting the socket value. 3225 */ 3226 static int sctp_setsockopt_maxburst(struct sock *sk, 3227 char __user *optval, 3228 unsigned int optlen) 3229 { 3230 struct sctp_assoc_value params; 3231 struct sctp_sock *sp; 3232 struct sctp_association *asoc; 3233 int val; 3234 int assoc_id = 0; 3235 3236 if (optlen == sizeof(int)) { 3237 pr_warn("Use of int in max_burst socket option deprecated\n"); 3238 pr_warn("Use struct sctp_assoc_value instead\n"); 3239 if (copy_from_user(&val, optval, optlen)) 3240 return -EFAULT; 3241 } else if (optlen == sizeof(struct sctp_assoc_value)) { 3242 if (copy_from_user(¶ms, optval, optlen)) 3243 return -EFAULT; 3244 val = params.assoc_value; 3245 assoc_id = params.assoc_id; 3246 } else 3247 return -EINVAL; 3248 3249 sp = sctp_sk(sk); 3250 3251 if (assoc_id != 0) { 3252 asoc = sctp_id2assoc(sk, assoc_id); 3253 if (!asoc) 3254 return -EINVAL; 3255 asoc->max_burst = val; 3256 } else 3257 sp->max_burst = val; 3258 3259 return 0; 3260 } 3261 3262 /* 3263 * 7.1.18. Add a chunk that must be authenticated (SCTP_AUTH_CHUNK) 3264 * 3265 * This set option adds a chunk type that the user is requesting to be 3266 * received only in an authenticated way. Changes to the list of chunks 3267 * will only effect future associations on the socket. 3268 */ 3269 static int sctp_setsockopt_auth_chunk(struct sock *sk, 3270 char __user *optval, 3271 unsigned int optlen) 3272 { 3273 struct net *net = sock_net(sk); 3274 struct sctp_authchunk val; 3275 3276 if (!net->sctp.auth_enable) 3277 return -EACCES; 3278 3279 if (optlen != sizeof(struct sctp_authchunk)) 3280 return -EINVAL; 3281 if (copy_from_user(&val, optval, optlen)) 3282 return -EFAULT; 3283 3284 switch (val.sauth_chunk) { 3285 case SCTP_CID_INIT: 3286 case SCTP_CID_INIT_ACK: 3287 case SCTP_CID_SHUTDOWN_COMPLETE: 3288 case SCTP_CID_AUTH: 3289 return -EINVAL; 3290 } 3291 3292 /* add this chunk id to the endpoint */ 3293 return sctp_auth_ep_add_chunkid(sctp_sk(sk)->ep, val.sauth_chunk); 3294 } 3295 3296 /* 3297 * 7.1.19. Get or set the list of supported HMAC Identifiers (SCTP_HMAC_IDENT) 3298 * 3299 * This option gets or sets the list of HMAC algorithms that the local 3300 * endpoint requires the peer to use. 3301 */ 3302 static int sctp_setsockopt_hmac_ident(struct sock *sk, 3303 char __user *optval, 3304 unsigned int optlen) 3305 { 3306 struct net *net = sock_net(sk); 3307 struct sctp_hmacalgo *hmacs; 3308 u32 idents; 3309 int err; 3310 3311 if (!net->sctp.auth_enable) 3312 return -EACCES; 3313 3314 if (optlen < sizeof(struct sctp_hmacalgo)) 3315 return -EINVAL; 3316 3317 hmacs= memdup_user(optval, optlen); 3318 if (IS_ERR(hmacs)) 3319 return PTR_ERR(hmacs); 3320 3321 idents = hmacs->shmac_num_idents; 3322 if (idents == 0 || idents > SCTP_AUTH_NUM_HMACS || 3323 (idents * sizeof(u16)) > (optlen - sizeof(struct sctp_hmacalgo))) { 3324 err = -EINVAL; 3325 goto out; 3326 } 3327 3328 err = sctp_auth_ep_set_hmacs(sctp_sk(sk)->ep, hmacs); 3329 out: 3330 kfree(hmacs); 3331 return err; 3332 } 3333 3334 /* 3335 * 7.1.20. Set a shared key (SCTP_AUTH_KEY) 3336 * 3337 * This option will set a shared secret key which is used to build an 3338 * association shared key. 3339 */ 3340 static int sctp_setsockopt_auth_key(struct sock *sk, 3341 char __user *optval, 3342 unsigned int optlen) 3343 { 3344 struct net *net = sock_net(sk); 3345 struct sctp_authkey *authkey; 3346 struct sctp_association *asoc; 3347 int ret; 3348 3349 if (!net->sctp.auth_enable) 3350 return -EACCES; 3351 3352 if (optlen <= sizeof(struct sctp_authkey)) 3353 return -EINVAL; 3354 3355 authkey= memdup_user(optval, optlen); 3356 if (IS_ERR(authkey)) 3357 return PTR_ERR(authkey); 3358 3359 if (authkey->sca_keylength > optlen - sizeof(struct sctp_authkey)) { 3360 ret = -EINVAL; 3361 goto out; 3362 } 3363 3364 asoc = sctp_id2assoc(sk, authkey->sca_assoc_id); 3365 if (!asoc && authkey->sca_assoc_id && sctp_style(sk, UDP)) { 3366 ret = -EINVAL; 3367 goto out; 3368 } 3369 3370 ret = sctp_auth_set_key(sctp_sk(sk)->ep, asoc, authkey); 3371 out: 3372 kzfree(authkey); 3373 return ret; 3374 } 3375 3376 /* 3377 * 7.1.21. Get or set the active shared key (SCTP_AUTH_ACTIVE_KEY) 3378 * 3379 * This option will get or set the active shared key to be used to build 3380 * the association shared key. 3381 */ 3382 static int sctp_setsockopt_active_key(struct sock *sk, 3383 char __user *optval, 3384 unsigned int optlen) 3385 { 3386 struct net *net = sock_net(sk); 3387 struct sctp_authkeyid val; 3388 struct sctp_association *asoc; 3389 3390 if (!net->sctp.auth_enable) 3391 return -EACCES; 3392 3393 if (optlen != sizeof(struct sctp_authkeyid)) 3394 return -EINVAL; 3395 if (copy_from_user(&val, optval, optlen)) 3396 return -EFAULT; 3397 3398 asoc = sctp_id2assoc(sk, val.scact_assoc_id); 3399 if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP)) 3400 return -EINVAL; 3401 3402 return sctp_auth_set_active_key(sctp_sk(sk)->ep, asoc, 3403 val.scact_keynumber); 3404 } 3405 3406 /* 3407 * 7.1.22. Delete a shared key (SCTP_AUTH_DELETE_KEY) 3408 * 3409 * This set option will delete a shared secret key from use. 3410 */ 3411 static int sctp_setsockopt_del_key(struct sock *sk, 3412 char __user *optval, 3413 unsigned int optlen) 3414 { 3415 struct net *net = sock_net(sk); 3416 struct sctp_authkeyid val; 3417 struct sctp_association *asoc; 3418 3419 if (!net->sctp.auth_enable) 3420 return -EACCES; 3421 3422 if (optlen != sizeof(struct sctp_authkeyid)) 3423 return -EINVAL; 3424 if (copy_from_user(&val, optval, optlen)) 3425 return -EFAULT; 3426 3427 asoc = sctp_id2assoc(sk, val.scact_assoc_id); 3428 if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP)) 3429 return -EINVAL; 3430 3431 return sctp_auth_del_key_id(sctp_sk(sk)->ep, asoc, 3432 val.scact_keynumber); 3433 3434 } 3435 3436 /* 3437 * 8.1.23 SCTP_AUTO_ASCONF 3438 * 3439 * This option will enable or disable the use of the automatic generation of 3440 * ASCONF chunks to add and delete addresses to an existing association. Note 3441 * that this option has two caveats namely: a) it only affects sockets that 3442 * are bound to all addresses available to the SCTP stack, and b) the system 3443 * administrator may have an overriding control that turns the ASCONF feature 3444 * off no matter what setting the socket option may have. 3445 * This option expects an integer boolean flag, where a non-zero value turns on 3446 * the option, and a zero value turns off the option. 3447 * Note. In this implementation, socket operation overrides default parameter 3448 * being set by sysctl as well as FreeBSD implementation 3449 */ 3450 static int sctp_setsockopt_auto_asconf(struct sock *sk, char __user *optval, 3451 unsigned int optlen) 3452 { 3453 int val; 3454 struct sctp_sock *sp = sctp_sk(sk); 3455 3456 if (optlen < sizeof(int)) 3457 return -EINVAL; 3458 if (get_user(val, (int __user *)optval)) 3459 return -EFAULT; 3460 if (!sctp_is_ep_boundall(sk) && val) 3461 return -EINVAL; 3462 if ((val && sp->do_auto_asconf) || (!val && !sp->do_auto_asconf)) 3463 return 0; 3464 3465 if (val == 0 && sp->do_auto_asconf) { 3466 list_del(&sp->auto_asconf_list); 3467 sp->do_auto_asconf = 0; 3468 } else if (val && !sp->do_auto_asconf) { 3469 list_add_tail(&sp->auto_asconf_list, 3470 &sock_net(sk)->sctp.auto_asconf_splist); 3471 sp->do_auto_asconf = 1; 3472 } 3473 return 0; 3474 } 3475 3476 3477 /* 3478 * SCTP_PEER_ADDR_THLDS 3479 * 3480 * This option allows us to alter the partially failed threshold for one or all 3481 * transports in an association. See Section 6.1 of: 3482 * http://www.ietf.org/id/draft-nishida-tsvwg-sctp-failover-05.txt 3483 */ 3484 static int sctp_setsockopt_paddr_thresholds(struct sock *sk, 3485 char __user *optval, 3486 unsigned int optlen) 3487 { 3488 struct sctp_paddrthlds val; 3489 struct sctp_transport *trans; 3490 struct sctp_association *asoc; 3491 3492 if (optlen < sizeof(struct sctp_paddrthlds)) 3493 return -EINVAL; 3494 if (copy_from_user(&val, (struct sctp_paddrthlds __user *)optval, 3495 sizeof(struct sctp_paddrthlds))) 3496 return -EFAULT; 3497 3498 3499 if (sctp_is_any(sk, (const union sctp_addr *)&val.spt_address)) { 3500 asoc = sctp_id2assoc(sk, val.spt_assoc_id); 3501 if (!asoc) 3502 return -ENOENT; 3503 list_for_each_entry(trans, &asoc->peer.transport_addr_list, 3504 transports) { 3505 if (val.spt_pathmaxrxt) 3506 trans->pathmaxrxt = val.spt_pathmaxrxt; 3507 trans->pf_retrans = val.spt_pathpfthld; 3508 } 3509 3510 if (val.spt_pathmaxrxt) 3511 asoc->pathmaxrxt = val.spt_pathmaxrxt; 3512 asoc->pf_retrans = val.spt_pathpfthld; 3513 } else { 3514 trans = sctp_addr_id2transport(sk, &val.spt_address, 3515 val.spt_assoc_id); 3516 if (!trans) 3517 return -ENOENT; 3518 3519 if (val.spt_pathmaxrxt) 3520 trans->pathmaxrxt = val.spt_pathmaxrxt; 3521 trans->pf_retrans = val.spt_pathpfthld; 3522 } 3523 3524 return 0; 3525 } 3526 3527 /* API 6.2 setsockopt(), getsockopt() 3528 * 3529 * Applications use setsockopt() and getsockopt() to set or retrieve 3530 * socket options. Socket options are used to change the default 3531 * behavior of sockets calls. They are described in Section 7. 3532 * 3533 * The syntax is: 3534 * 3535 * ret = getsockopt(int sd, int level, int optname, void __user *optval, 3536 * int __user *optlen); 3537 * ret = setsockopt(int sd, int level, int optname, const void __user *optval, 3538 * int optlen); 3539 * 3540 * sd - the socket descript. 3541 * level - set to IPPROTO_SCTP for all SCTP options. 3542 * optname - the option name. 3543 * optval - the buffer to store the value of the option. 3544 * optlen - the size of the buffer. 3545 */ 3546 static int sctp_setsockopt(struct sock *sk, int level, int optname, 3547 char __user *optval, unsigned int optlen) 3548 { 3549 int retval = 0; 3550 3551 pr_debug("%s: sk:%p, optname:%d\n", __func__, sk, optname); 3552 3553 /* I can hardly begin to describe how wrong this is. This is 3554 * so broken as to be worse than useless. The API draft 3555 * REALLY is NOT helpful here... I am not convinced that the 3556 * semantics of setsockopt() with a level OTHER THAN SOL_SCTP 3557 * are at all well-founded. 3558 */ 3559 if (level != SOL_SCTP) { 3560 struct sctp_af *af = sctp_sk(sk)->pf->af; 3561 retval = af->setsockopt(sk, level, optname, optval, optlen); 3562 goto out_nounlock; 3563 } 3564 3565 sctp_lock_sock(sk); 3566 3567 switch (optname) { 3568 case SCTP_SOCKOPT_BINDX_ADD: 3569 /* 'optlen' is the size of the addresses buffer. */ 3570 retval = sctp_setsockopt_bindx(sk, (struct sockaddr __user *)optval, 3571 optlen, SCTP_BINDX_ADD_ADDR); 3572 break; 3573 3574 case SCTP_SOCKOPT_BINDX_REM: 3575 /* 'optlen' is the size of the addresses buffer. */ 3576 retval = sctp_setsockopt_bindx(sk, (struct sockaddr __user *)optval, 3577 optlen, SCTP_BINDX_REM_ADDR); 3578 break; 3579 3580 case SCTP_SOCKOPT_CONNECTX_OLD: 3581 /* 'optlen' is the size of the addresses buffer. */ 3582 retval = sctp_setsockopt_connectx_old(sk, 3583 (struct sockaddr __user *)optval, 3584 optlen); 3585 break; 3586 3587 case SCTP_SOCKOPT_CONNECTX: 3588 /* 'optlen' is the size of the addresses buffer. */ 3589 retval = sctp_setsockopt_connectx(sk, 3590 (struct sockaddr __user *)optval, 3591 optlen); 3592 break; 3593 3594 case SCTP_DISABLE_FRAGMENTS: 3595 retval = sctp_setsockopt_disable_fragments(sk, optval, optlen); 3596 break; 3597 3598 case SCTP_EVENTS: 3599 retval = sctp_setsockopt_events(sk, optval, optlen); 3600 break; 3601 3602 case SCTP_AUTOCLOSE: 3603 retval = sctp_setsockopt_autoclose(sk, optval, optlen); 3604 break; 3605 3606 case SCTP_PEER_ADDR_PARAMS: 3607 retval = sctp_setsockopt_peer_addr_params(sk, optval, optlen); 3608 break; 3609 3610 case SCTP_DELAYED_SACK: 3611 retval = sctp_setsockopt_delayed_ack(sk, optval, optlen); 3612 break; 3613 case SCTP_PARTIAL_DELIVERY_POINT: 3614 retval = sctp_setsockopt_partial_delivery_point(sk, optval, optlen); 3615 break; 3616 3617 case SCTP_INITMSG: 3618 retval = sctp_setsockopt_initmsg(sk, optval, optlen); 3619 break; 3620 case SCTP_DEFAULT_SEND_PARAM: 3621 retval = sctp_setsockopt_default_send_param(sk, optval, 3622 optlen); 3623 break; 3624 case SCTP_PRIMARY_ADDR: 3625 retval = sctp_setsockopt_primary_addr(sk, optval, optlen); 3626 break; 3627 case SCTP_SET_PEER_PRIMARY_ADDR: 3628 retval = sctp_setsockopt_peer_primary_addr(sk, optval, optlen); 3629 break; 3630 case SCTP_NODELAY: 3631 retval = sctp_setsockopt_nodelay(sk, optval, optlen); 3632 break; 3633 case SCTP_RTOINFO: 3634 retval = sctp_setsockopt_rtoinfo(sk, optval, optlen); 3635 break; 3636 case SCTP_ASSOCINFO: 3637 retval = sctp_setsockopt_associnfo(sk, optval, optlen); 3638 break; 3639 case SCTP_I_WANT_MAPPED_V4_ADDR: 3640 retval = sctp_setsockopt_mappedv4(sk, optval, optlen); 3641 break; 3642 case SCTP_MAXSEG: 3643 retval = sctp_setsockopt_maxseg(sk, optval, optlen); 3644 break; 3645 case SCTP_ADAPTATION_LAYER: 3646 retval = sctp_setsockopt_adaptation_layer(sk, optval, optlen); 3647 break; 3648 case SCTP_CONTEXT: 3649 retval = sctp_setsockopt_context(sk, optval, optlen); 3650 break; 3651 case SCTP_FRAGMENT_INTERLEAVE: 3652 retval = sctp_setsockopt_fragment_interleave(sk, optval, optlen); 3653 break; 3654 case SCTP_MAX_BURST: 3655 retval = sctp_setsockopt_maxburst(sk, optval, optlen); 3656 break; 3657 case SCTP_AUTH_CHUNK: 3658 retval = sctp_setsockopt_auth_chunk(sk, optval, optlen); 3659 break; 3660 case SCTP_HMAC_IDENT: 3661 retval = sctp_setsockopt_hmac_ident(sk, optval, optlen); 3662 break; 3663 case SCTP_AUTH_KEY: 3664 retval = sctp_setsockopt_auth_key(sk, optval, optlen); 3665 break; 3666 case SCTP_AUTH_ACTIVE_KEY: 3667 retval = sctp_setsockopt_active_key(sk, optval, optlen); 3668 break; 3669 case SCTP_AUTH_DELETE_KEY: 3670 retval = sctp_setsockopt_del_key(sk, optval, optlen); 3671 break; 3672 case SCTP_AUTO_ASCONF: 3673 retval = sctp_setsockopt_auto_asconf(sk, optval, optlen); 3674 break; 3675 case SCTP_PEER_ADDR_THLDS: 3676 retval = sctp_setsockopt_paddr_thresholds(sk, optval, optlen); 3677 break; 3678 default: 3679 retval = -ENOPROTOOPT; 3680 break; 3681 } 3682 3683 sctp_release_sock(sk); 3684 3685 out_nounlock: 3686 return retval; 3687 } 3688 3689 /* API 3.1.6 connect() - UDP Style Syntax 3690 * 3691 * An application may use the connect() call in the UDP model to initiate an 3692 * association without sending data. 3693 * 3694 * The syntax is: 3695 * 3696 * ret = connect(int sd, const struct sockaddr *nam, socklen_t len); 3697 * 3698 * sd: the socket descriptor to have a new association added to. 3699 * 3700 * nam: the address structure (either struct sockaddr_in or struct 3701 * sockaddr_in6 defined in RFC2553 [7]). 3702 * 3703 * len: the size of the address. 3704 */ 3705 static int sctp_connect(struct sock *sk, struct sockaddr *addr, 3706 int addr_len) 3707 { 3708 int err = 0; 3709 struct sctp_af *af; 3710 3711 sctp_lock_sock(sk); 3712 3713 pr_debug("%s: sk:%p, sockaddr:%p, addr_len:%d\n", __func__, sk, 3714 addr, addr_len); 3715 3716 /* Validate addr_len before calling common connect/connectx routine. */ 3717 af = sctp_get_af_specific(addr->sa_family); 3718 if (!af || addr_len < af->sockaddr_len) { 3719 err = -EINVAL; 3720 } else { 3721 /* Pass correct addr len to common routine (so it knows there 3722 * is only one address being passed. 3723 */ 3724 err = __sctp_connect(sk, addr, af->sockaddr_len, NULL); 3725 } 3726 3727 sctp_release_sock(sk); 3728 return err; 3729 } 3730 3731 /* FIXME: Write comments. */ 3732 static int sctp_disconnect(struct sock *sk, int flags) 3733 { 3734 return -EOPNOTSUPP; /* STUB */ 3735 } 3736 3737 /* 4.1.4 accept() - TCP Style Syntax 3738 * 3739 * Applications use accept() call to remove an established SCTP 3740 * association from the accept queue of the endpoint. A new socket 3741 * descriptor will be returned from accept() to represent the newly 3742 * formed association. 3743 */ 3744 static struct sock *sctp_accept(struct sock *sk, int flags, int *err) 3745 { 3746 struct sctp_sock *sp; 3747 struct sctp_endpoint *ep; 3748 struct sock *newsk = NULL; 3749 struct sctp_association *asoc; 3750 long timeo; 3751 int error = 0; 3752 3753 sctp_lock_sock(sk); 3754 3755 sp = sctp_sk(sk); 3756 ep = sp->ep; 3757 3758 if (!sctp_style(sk, TCP)) { 3759 error = -EOPNOTSUPP; 3760 goto out; 3761 } 3762 3763 if (!sctp_sstate(sk, LISTENING)) { 3764 error = -EINVAL; 3765 goto out; 3766 } 3767 3768 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); 3769 3770 error = sctp_wait_for_accept(sk, timeo); 3771 if (error) 3772 goto out; 3773 3774 /* We treat the list of associations on the endpoint as the accept 3775 * queue and pick the first association on the list. 3776 */ 3777 asoc = list_entry(ep->asocs.next, struct sctp_association, asocs); 3778 3779 newsk = sp->pf->create_accept_sk(sk, asoc); 3780 if (!newsk) { 3781 error = -ENOMEM; 3782 goto out; 3783 } 3784 3785 /* Populate the fields of the newsk from the oldsk and migrate the 3786 * asoc to the newsk. 3787 */ 3788 sctp_sock_migrate(sk, newsk, asoc, SCTP_SOCKET_TCP); 3789 3790 out: 3791 sctp_release_sock(sk); 3792 *err = error; 3793 return newsk; 3794 } 3795 3796 /* The SCTP ioctl handler. */ 3797 static int sctp_ioctl(struct sock *sk, int cmd, unsigned long arg) 3798 { 3799 int rc = -ENOTCONN; 3800 3801 sctp_lock_sock(sk); 3802 3803 /* 3804 * SEQPACKET-style sockets in LISTENING state are valid, for 3805 * SCTP, so only discard TCP-style sockets in LISTENING state. 3806 */ 3807 if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)) 3808 goto out; 3809 3810 switch (cmd) { 3811 case SIOCINQ: { 3812 struct sk_buff *skb; 3813 unsigned int amount = 0; 3814 3815 skb = skb_peek(&sk->sk_receive_queue); 3816 if (skb != NULL) { 3817 /* 3818 * We will only return the amount of this packet since 3819 * that is all that will be read. 3820 */ 3821 amount = skb->len; 3822 } 3823 rc = put_user(amount, (int __user *)arg); 3824 break; 3825 } 3826 default: 3827 rc = -ENOIOCTLCMD; 3828 break; 3829 } 3830 out: 3831 sctp_release_sock(sk); 3832 return rc; 3833 } 3834 3835 /* This is the function which gets called during socket creation to 3836 * initialized the SCTP-specific portion of the sock. 3837 * The sock structure should already be zero-filled memory. 3838 */ 3839 static int sctp_init_sock(struct sock *sk) 3840 { 3841 struct net *net = sock_net(sk); 3842 struct sctp_sock *sp; 3843 3844 pr_debug("%s: sk:%p\n", __func__, sk); 3845 3846 sp = sctp_sk(sk); 3847 3848 /* Initialize the SCTP per socket area. */ 3849 switch (sk->sk_type) { 3850 case SOCK_SEQPACKET: 3851 sp->type = SCTP_SOCKET_UDP; 3852 break; 3853 case SOCK_STREAM: 3854 sp->type = SCTP_SOCKET_TCP; 3855 break; 3856 default: 3857 return -ESOCKTNOSUPPORT; 3858 } 3859 3860 /* Initialize default send parameters. These parameters can be 3861 * modified with the SCTP_DEFAULT_SEND_PARAM socket option. 3862 */ 3863 sp->default_stream = 0; 3864 sp->default_ppid = 0; 3865 sp->default_flags = 0; 3866 sp->default_context = 0; 3867 sp->default_timetolive = 0; 3868 3869 sp->default_rcv_context = 0; 3870 sp->max_burst = net->sctp.max_burst; 3871 3872 sp->sctp_hmac_alg = net->sctp.sctp_hmac_alg; 3873 3874 /* Initialize default setup parameters. These parameters 3875 * can be modified with the SCTP_INITMSG socket option or 3876 * overridden by the SCTP_INIT CMSG. 3877 */ 3878 sp->initmsg.sinit_num_ostreams = sctp_max_outstreams; 3879 sp->initmsg.sinit_max_instreams = sctp_max_instreams; 3880 sp->initmsg.sinit_max_attempts = net->sctp.max_retrans_init; 3881 sp->initmsg.sinit_max_init_timeo = net->sctp.rto_max; 3882 3883 /* Initialize default RTO related parameters. These parameters can 3884 * be modified for with the SCTP_RTOINFO socket option. 3885 */ 3886 sp->rtoinfo.srto_initial = net->sctp.rto_initial; 3887 sp->rtoinfo.srto_max = net->sctp.rto_max; 3888 sp->rtoinfo.srto_min = net->sctp.rto_min; 3889 3890 /* Initialize default association related parameters. These parameters 3891 * can be modified with the SCTP_ASSOCINFO socket option. 3892 */ 3893 sp->assocparams.sasoc_asocmaxrxt = net->sctp.max_retrans_association; 3894 sp->assocparams.sasoc_number_peer_destinations = 0; 3895 sp->assocparams.sasoc_peer_rwnd = 0; 3896 sp->assocparams.sasoc_local_rwnd = 0; 3897 sp->assocparams.sasoc_cookie_life = net->sctp.valid_cookie_life; 3898 3899 /* Initialize default event subscriptions. By default, all the 3900 * options are off. 3901 */ 3902 memset(&sp->subscribe, 0, sizeof(struct sctp_event_subscribe)); 3903 3904 /* Default Peer Address Parameters. These defaults can 3905 * be modified via SCTP_PEER_ADDR_PARAMS 3906 */ 3907 sp->hbinterval = net->sctp.hb_interval; 3908 sp->pathmaxrxt = net->sctp.max_retrans_path; 3909 sp->pathmtu = 0; // allow default discovery 3910 sp->sackdelay = net->sctp.sack_timeout; 3911 sp->sackfreq = 2; 3912 sp->param_flags = SPP_HB_ENABLE | 3913 SPP_PMTUD_ENABLE | 3914 SPP_SACKDELAY_ENABLE; 3915 3916 /* If enabled no SCTP message fragmentation will be performed. 3917 * Configure through SCTP_DISABLE_FRAGMENTS socket option. 3918 */ 3919 sp->disable_fragments = 0; 3920 3921 /* Enable Nagle algorithm by default. */ 3922 sp->nodelay = 0; 3923 3924 /* Enable by default. */ 3925 sp->v4mapped = 1; 3926 3927 /* Auto-close idle associations after the configured 3928 * number of seconds. A value of 0 disables this 3929 * feature. Configure through the SCTP_AUTOCLOSE socket option, 3930 * for UDP-style sockets only. 3931 */ 3932 sp->autoclose = 0; 3933 3934 /* User specified fragmentation limit. */ 3935 sp->user_frag = 0; 3936 3937 sp->adaptation_ind = 0; 3938 3939 sp->pf = sctp_get_pf_specific(sk->sk_family); 3940 3941 /* Control variables for partial data delivery. */ 3942 atomic_set(&sp->pd_mode, 0); 3943 skb_queue_head_init(&sp->pd_lobby); 3944 sp->frag_interleave = 0; 3945 3946 /* Create a per socket endpoint structure. Even if we 3947 * change the data structure relationships, this may still 3948 * be useful for storing pre-connect address information. 3949 */ 3950 sp->ep = sctp_endpoint_new(sk, GFP_KERNEL); 3951 if (!sp->ep) 3952 return -ENOMEM; 3953 3954 sp->hmac = NULL; 3955 3956 sk->sk_destruct = sctp_destruct_sock; 3957 3958 SCTP_DBG_OBJCNT_INC(sock); 3959 3960 local_bh_disable(); 3961 percpu_counter_inc(&sctp_sockets_allocated); 3962 sock_prot_inuse_add(net, sk->sk_prot, 1); 3963 if (net->sctp.default_auto_asconf) { 3964 list_add_tail(&sp->auto_asconf_list, 3965 &net->sctp.auto_asconf_splist); 3966 sp->do_auto_asconf = 1; 3967 } else 3968 sp->do_auto_asconf = 0; 3969 local_bh_enable(); 3970 3971 return 0; 3972 } 3973 3974 /* Cleanup any SCTP per socket resources. */ 3975 static void sctp_destroy_sock(struct sock *sk) 3976 { 3977 struct sctp_sock *sp; 3978 3979 pr_debug("%s: sk:%p\n", __func__, sk); 3980 3981 /* Release our hold on the endpoint. */ 3982 sp = sctp_sk(sk); 3983 /* This could happen during socket init, thus we bail out 3984 * early, since the rest of the below is not setup either. 3985 */ 3986 if (sp->ep == NULL) 3987 return; 3988 3989 if (sp->do_auto_asconf) { 3990 sp->do_auto_asconf = 0; 3991 list_del(&sp->auto_asconf_list); 3992 } 3993 sctp_endpoint_free(sp->ep); 3994 local_bh_disable(); 3995 percpu_counter_dec(&sctp_sockets_allocated); 3996 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); 3997 local_bh_enable(); 3998 } 3999 4000 /* Triggered when there are no references on the socket anymore */ 4001 static void sctp_destruct_sock(struct sock *sk) 4002 { 4003 struct sctp_sock *sp = sctp_sk(sk); 4004 4005 /* Free up the HMAC transform. */ 4006 crypto_free_hash(sp->hmac); 4007 4008 inet_sock_destruct(sk); 4009 } 4010 4011 /* API 4.1.7 shutdown() - TCP Style Syntax 4012 * int shutdown(int socket, int how); 4013 * 4014 * sd - the socket descriptor of the association to be closed. 4015 * how - Specifies the type of shutdown. The values are 4016 * as follows: 4017 * SHUT_RD 4018 * Disables further receive operations. No SCTP 4019 * protocol action is taken. 4020 * SHUT_WR 4021 * Disables further send operations, and initiates 4022 * the SCTP shutdown sequence. 4023 * SHUT_RDWR 4024 * Disables further send and receive operations 4025 * and initiates the SCTP shutdown sequence. 4026 */ 4027 static void sctp_shutdown(struct sock *sk, int how) 4028 { 4029 struct net *net = sock_net(sk); 4030 struct sctp_endpoint *ep; 4031 struct sctp_association *asoc; 4032 4033 if (!sctp_style(sk, TCP)) 4034 return; 4035 4036 if (how & SEND_SHUTDOWN) { 4037 ep = sctp_sk(sk)->ep; 4038 if (!list_empty(&ep->asocs)) { 4039 asoc = list_entry(ep->asocs.next, 4040 struct sctp_association, asocs); 4041 sctp_primitive_SHUTDOWN(net, asoc, NULL); 4042 } 4043 } 4044 } 4045 4046 /* 7.2.1 Association Status (SCTP_STATUS) 4047 4048 * Applications can retrieve current status information about an 4049 * association, including association state, peer receiver window size, 4050 * number of unacked data chunks, and number of data chunks pending 4051 * receipt. This information is read-only. 4052 */ 4053 static int sctp_getsockopt_sctp_status(struct sock *sk, int len, 4054 char __user *optval, 4055 int __user *optlen) 4056 { 4057 struct sctp_status status; 4058 struct sctp_association *asoc = NULL; 4059 struct sctp_transport *transport; 4060 sctp_assoc_t associd; 4061 int retval = 0; 4062 4063 if (len < sizeof(status)) { 4064 retval = -EINVAL; 4065 goto out; 4066 } 4067 4068 len = sizeof(status); 4069 if (copy_from_user(&status, optval, len)) { 4070 retval = -EFAULT; 4071 goto out; 4072 } 4073 4074 associd = status.sstat_assoc_id; 4075 asoc = sctp_id2assoc(sk, associd); 4076 if (!asoc) { 4077 retval = -EINVAL; 4078 goto out; 4079 } 4080 4081 transport = asoc->peer.primary_path; 4082 4083 status.sstat_assoc_id = sctp_assoc2id(asoc); 4084 status.sstat_state = asoc->state; 4085 status.sstat_rwnd = asoc->peer.rwnd; 4086 status.sstat_unackdata = asoc->unack_data; 4087 4088 status.sstat_penddata = sctp_tsnmap_pending(&asoc->peer.tsn_map); 4089 status.sstat_instrms = asoc->c.sinit_max_instreams; 4090 status.sstat_outstrms = asoc->c.sinit_num_ostreams; 4091 status.sstat_fragmentation_point = asoc->frag_point; 4092 status.sstat_primary.spinfo_assoc_id = sctp_assoc2id(transport->asoc); 4093 memcpy(&status.sstat_primary.spinfo_address, &transport->ipaddr, 4094 transport->af_specific->sockaddr_len); 4095 /* Map ipv4 address into v4-mapped-on-v6 address. */ 4096 sctp_get_pf_specific(sk->sk_family)->addr_v4map(sctp_sk(sk), 4097 (union sctp_addr *)&status.sstat_primary.spinfo_address); 4098 status.sstat_primary.spinfo_state = transport->state; 4099 status.sstat_primary.spinfo_cwnd = transport->cwnd; 4100 status.sstat_primary.spinfo_srtt = transport->srtt; 4101 status.sstat_primary.spinfo_rto = jiffies_to_msecs(transport->rto); 4102 status.sstat_primary.spinfo_mtu = transport->pathmtu; 4103 4104 if (status.sstat_primary.spinfo_state == SCTP_UNKNOWN) 4105 status.sstat_primary.spinfo_state = SCTP_ACTIVE; 4106 4107 if (put_user(len, optlen)) { 4108 retval = -EFAULT; 4109 goto out; 4110 } 4111 4112 pr_debug("%s: len:%d, state:%d, rwnd:%d, assoc_id:%d\n", 4113 __func__, len, status.sstat_state, status.sstat_rwnd, 4114 status.sstat_assoc_id); 4115 4116 if (copy_to_user(optval, &status, len)) { 4117 retval = -EFAULT; 4118 goto out; 4119 } 4120 4121 out: 4122 return retval; 4123 } 4124 4125 4126 /* 7.2.2 Peer Address Information (SCTP_GET_PEER_ADDR_INFO) 4127 * 4128 * Applications can retrieve information about a specific peer address 4129 * of an association, including its reachability state, congestion 4130 * window, and retransmission timer values. This information is 4131 * read-only. 4132 */ 4133 static int sctp_getsockopt_peer_addr_info(struct sock *sk, int len, 4134 char __user *optval, 4135 int __user *optlen) 4136 { 4137 struct sctp_paddrinfo pinfo; 4138 struct sctp_transport *transport; 4139 int retval = 0; 4140 4141 if (len < sizeof(pinfo)) { 4142 retval = -EINVAL; 4143 goto out; 4144 } 4145 4146 len = sizeof(pinfo); 4147 if (copy_from_user(&pinfo, optval, len)) { 4148 retval = -EFAULT; 4149 goto out; 4150 } 4151 4152 transport = sctp_addr_id2transport(sk, &pinfo.spinfo_address, 4153 pinfo.spinfo_assoc_id); 4154 if (!transport) 4155 return -EINVAL; 4156 4157 pinfo.spinfo_assoc_id = sctp_assoc2id(transport->asoc); 4158 pinfo.spinfo_state = transport->state; 4159 pinfo.spinfo_cwnd = transport->cwnd; 4160 pinfo.spinfo_srtt = transport->srtt; 4161 pinfo.spinfo_rto = jiffies_to_msecs(transport->rto); 4162 pinfo.spinfo_mtu = transport->pathmtu; 4163 4164 if (pinfo.spinfo_state == SCTP_UNKNOWN) 4165 pinfo.spinfo_state = SCTP_ACTIVE; 4166 4167 if (put_user(len, optlen)) { 4168 retval = -EFAULT; 4169 goto out; 4170 } 4171 4172 if (copy_to_user(optval, &pinfo, len)) { 4173 retval = -EFAULT; 4174 goto out; 4175 } 4176 4177 out: 4178 return retval; 4179 } 4180 4181 /* 7.1.12 Enable/Disable message fragmentation (SCTP_DISABLE_FRAGMENTS) 4182 * 4183 * This option is a on/off flag. If enabled no SCTP message 4184 * fragmentation will be performed. Instead if a message being sent 4185 * exceeds the current PMTU size, the message will NOT be sent and 4186 * instead a error will be indicated to the user. 4187 */ 4188 static int sctp_getsockopt_disable_fragments(struct sock *sk, int len, 4189 char __user *optval, int __user *optlen) 4190 { 4191 int val; 4192 4193 if (len < sizeof(int)) 4194 return -EINVAL; 4195 4196 len = sizeof(int); 4197 val = (sctp_sk(sk)->disable_fragments == 1); 4198 if (put_user(len, optlen)) 4199 return -EFAULT; 4200 if (copy_to_user(optval, &val, len)) 4201 return -EFAULT; 4202 return 0; 4203 } 4204 4205 /* 7.1.15 Set notification and ancillary events (SCTP_EVENTS) 4206 * 4207 * This socket option is used to specify various notifications and 4208 * ancillary data the user wishes to receive. 4209 */ 4210 static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval, 4211 int __user *optlen) 4212 { 4213 if (len <= 0) 4214 return -EINVAL; 4215 if (len > sizeof(struct sctp_event_subscribe)) 4216 len = sizeof(struct sctp_event_subscribe); 4217 if (put_user(len, optlen)) 4218 return -EFAULT; 4219 if (copy_to_user(optval, &sctp_sk(sk)->subscribe, len)) 4220 return -EFAULT; 4221 return 0; 4222 } 4223 4224 /* 7.1.8 Automatic Close of associations (SCTP_AUTOCLOSE) 4225 * 4226 * This socket option is applicable to the UDP-style socket only. When 4227 * set it will cause associations that are idle for more than the 4228 * specified number of seconds to automatically close. An association 4229 * being idle is defined an association that has NOT sent or received 4230 * user data. The special value of '0' indicates that no automatic 4231 * close of any associations should be performed. The option expects an 4232 * integer defining the number of seconds of idle time before an 4233 * association is closed. 4234 */ 4235 static int sctp_getsockopt_autoclose(struct sock *sk, int len, char __user *optval, int __user *optlen) 4236 { 4237 /* Applicable to UDP-style socket only */ 4238 if (sctp_style(sk, TCP)) 4239 return -EOPNOTSUPP; 4240 if (len < sizeof(int)) 4241 return -EINVAL; 4242 len = sizeof(int); 4243 if (put_user(len, optlen)) 4244 return -EFAULT; 4245 if (copy_to_user(optval, &sctp_sk(sk)->autoclose, sizeof(int))) 4246 return -EFAULT; 4247 return 0; 4248 } 4249 4250 /* Helper routine to branch off an association to a new socket. */ 4251 int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp) 4252 { 4253 struct sctp_association *asoc = sctp_id2assoc(sk, id); 4254 struct socket *sock; 4255 struct sctp_af *af; 4256 int err = 0; 4257 4258 if (!asoc) 4259 return -EINVAL; 4260 4261 /* An association cannot be branched off from an already peeled-off 4262 * socket, nor is this supported for tcp style sockets. 4263 */ 4264 if (!sctp_style(sk, UDP)) 4265 return -EINVAL; 4266 4267 /* Create a new socket. */ 4268 err = sock_create(sk->sk_family, SOCK_SEQPACKET, IPPROTO_SCTP, &sock); 4269 if (err < 0) 4270 return err; 4271 4272 sctp_copy_sock(sock->sk, sk, asoc); 4273 4274 /* Make peeled-off sockets more like 1-1 accepted sockets. 4275 * Set the daddr and initialize id to something more random 4276 */ 4277 af = sctp_get_af_specific(asoc->peer.primary_addr.sa.sa_family); 4278 af->to_sk_daddr(&asoc->peer.primary_addr, sk); 4279 4280 /* Populate the fields of the newsk from the oldsk and migrate the 4281 * asoc to the newsk. 4282 */ 4283 sctp_sock_migrate(sk, sock->sk, asoc, SCTP_SOCKET_UDP_HIGH_BANDWIDTH); 4284 4285 *sockp = sock; 4286 4287 return err; 4288 } 4289 EXPORT_SYMBOL(sctp_do_peeloff); 4290 4291 static int sctp_getsockopt_peeloff(struct sock *sk, int len, char __user *optval, int __user *optlen) 4292 { 4293 sctp_peeloff_arg_t peeloff; 4294 struct socket *newsock; 4295 struct file *newfile; 4296 int retval = 0; 4297 4298 if (len < sizeof(sctp_peeloff_arg_t)) 4299 return -EINVAL; 4300 len = sizeof(sctp_peeloff_arg_t); 4301 if (copy_from_user(&peeloff, optval, len)) 4302 return -EFAULT; 4303 4304 retval = sctp_do_peeloff(sk, peeloff.associd, &newsock); 4305 if (retval < 0) 4306 goto out; 4307 4308 /* Map the socket to an unused fd that can be returned to the user. */ 4309 retval = get_unused_fd_flags(0); 4310 if (retval < 0) { 4311 sock_release(newsock); 4312 goto out; 4313 } 4314 4315 newfile = sock_alloc_file(newsock, 0, NULL); 4316 if (unlikely(IS_ERR(newfile))) { 4317 put_unused_fd(retval); 4318 sock_release(newsock); 4319 return PTR_ERR(newfile); 4320 } 4321 4322 pr_debug("%s: sk:%p, newsk:%p, sd:%d\n", __func__, sk, newsock->sk, 4323 retval); 4324 4325 /* Return the fd mapped to the new socket. */ 4326 if (put_user(len, optlen)) { 4327 fput(newfile); 4328 put_unused_fd(retval); 4329 return -EFAULT; 4330 } 4331 peeloff.sd = retval; 4332 if (copy_to_user(optval, &peeloff, len)) { 4333 fput(newfile); 4334 put_unused_fd(retval); 4335 return -EFAULT; 4336 } 4337 fd_install(retval, newfile); 4338 out: 4339 return retval; 4340 } 4341 4342 /* 7.1.13 Peer Address Parameters (SCTP_PEER_ADDR_PARAMS) 4343 * 4344 * Applications can enable or disable heartbeats for any peer address of 4345 * an association, modify an address's heartbeat interval, force a 4346 * heartbeat to be sent immediately, and adjust the address's maximum 4347 * number of retransmissions sent before an address is considered 4348 * unreachable. The following structure is used to access and modify an 4349 * address's parameters: 4350 * 4351 * struct sctp_paddrparams { 4352 * sctp_assoc_t spp_assoc_id; 4353 * struct sockaddr_storage spp_address; 4354 * uint32_t spp_hbinterval; 4355 * uint16_t spp_pathmaxrxt; 4356 * uint32_t spp_pathmtu; 4357 * uint32_t spp_sackdelay; 4358 * uint32_t spp_flags; 4359 * }; 4360 * 4361 * spp_assoc_id - (one-to-many style socket) This is filled in the 4362 * application, and identifies the association for 4363 * this query. 4364 * spp_address - This specifies which address is of interest. 4365 * spp_hbinterval - This contains the value of the heartbeat interval, 4366 * in milliseconds. If a value of zero 4367 * is present in this field then no changes are to 4368 * be made to this parameter. 4369 * spp_pathmaxrxt - This contains the maximum number of 4370 * retransmissions before this address shall be 4371 * considered unreachable. If a value of zero 4372 * is present in this field then no changes are to 4373 * be made to this parameter. 4374 * spp_pathmtu - When Path MTU discovery is disabled the value 4375 * specified here will be the "fixed" path mtu. 4376 * Note that if the spp_address field is empty 4377 * then all associations on this address will 4378 * have this fixed path mtu set upon them. 4379 * 4380 * spp_sackdelay - When delayed sack is enabled, this value specifies 4381 * the number of milliseconds that sacks will be delayed 4382 * for. This value will apply to all addresses of an 4383 * association if the spp_address field is empty. Note 4384 * also, that if delayed sack is enabled and this 4385 * value is set to 0, no change is made to the last 4386 * recorded delayed sack timer value. 4387 * 4388 * spp_flags - These flags are used to control various features 4389 * on an association. The flag field may contain 4390 * zero or more of the following options. 4391 * 4392 * SPP_HB_ENABLE - Enable heartbeats on the 4393 * specified address. Note that if the address 4394 * field is empty all addresses for the association 4395 * have heartbeats enabled upon them. 4396 * 4397 * SPP_HB_DISABLE - Disable heartbeats on the 4398 * speicifed address. Note that if the address 4399 * field is empty all addresses for the association 4400 * will have their heartbeats disabled. Note also 4401 * that SPP_HB_ENABLE and SPP_HB_DISABLE are 4402 * mutually exclusive, only one of these two should 4403 * be specified. Enabling both fields will have 4404 * undetermined results. 4405 * 4406 * SPP_HB_DEMAND - Request a user initiated heartbeat 4407 * to be made immediately. 4408 * 4409 * SPP_PMTUD_ENABLE - This field will enable PMTU 4410 * discovery upon the specified address. Note that 4411 * if the address feild is empty then all addresses 4412 * on the association are effected. 4413 * 4414 * SPP_PMTUD_DISABLE - This field will disable PMTU 4415 * discovery upon the specified address. Note that 4416 * if the address feild is empty then all addresses 4417 * on the association are effected. Not also that 4418 * SPP_PMTUD_ENABLE and SPP_PMTUD_DISABLE are mutually 4419 * exclusive. Enabling both will have undetermined 4420 * results. 4421 * 4422 * SPP_SACKDELAY_ENABLE - Setting this flag turns 4423 * on delayed sack. The time specified in spp_sackdelay 4424 * is used to specify the sack delay for this address. Note 4425 * that if spp_address is empty then all addresses will 4426 * enable delayed sack and take on the sack delay 4427 * value specified in spp_sackdelay. 4428 * SPP_SACKDELAY_DISABLE - Setting this flag turns 4429 * off delayed sack. If the spp_address field is blank then 4430 * delayed sack is disabled for the entire association. Note 4431 * also that this field is mutually exclusive to 4432 * SPP_SACKDELAY_ENABLE, setting both will have undefined 4433 * results. 4434 */ 4435 static int sctp_getsockopt_peer_addr_params(struct sock *sk, int len, 4436 char __user *optval, int __user *optlen) 4437 { 4438 struct sctp_paddrparams params; 4439 struct sctp_transport *trans = NULL; 4440 struct sctp_association *asoc = NULL; 4441 struct sctp_sock *sp = sctp_sk(sk); 4442 4443 if (len < sizeof(struct sctp_paddrparams)) 4444 return -EINVAL; 4445 len = sizeof(struct sctp_paddrparams); 4446 if (copy_from_user(¶ms, optval, len)) 4447 return -EFAULT; 4448 4449 /* If an address other than INADDR_ANY is specified, and 4450 * no transport is found, then the request is invalid. 4451 */ 4452 if (!sctp_is_any(sk, ( union sctp_addr *)¶ms.spp_address)) { 4453 trans = sctp_addr_id2transport(sk, ¶ms.spp_address, 4454 params.spp_assoc_id); 4455 if (!trans) { 4456 pr_debug("%s: failed no transport\n", __func__); 4457 return -EINVAL; 4458 } 4459 } 4460 4461 /* Get association, if assoc_id != 0 and the socket is a one 4462 * to many style socket, and an association was not found, then 4463 * the id was invalid. 4464 */ 4465 asoc = sctp_id2assoc(sk, params.spp_assoc_id); 4466 if (!asoc && params.spp_assoc_id && sctp_style(sk, UDP)) { 4467 pr_debug("%s: failed no association\n", __func__); 4468 return -EINVAL; 4469 } 4470 4471 if (trans) { 4472 /* Fetch transport values. */ 4473 params.spp_hbinterval = jiffies_to_msecs(trans->hbinterval); 4474 params.spp_pathmtu = trans->pathmtu; 4475 params.spp_pathmaxrxt = trans->pathmaxrxt; 4476 params.spp_sackdelay = jiffies_to_msecs(trans->sackdelay); 4477 4478 /*draft-11 doesn't say what to return in spp_flags*/ 4479 params.spp_flags = trans->param_flags; 4480 } else if (asoc) { 4481 /* Fetch association values. */ 4482 params.spp_hbinterval = jiffies_to_msecs(asoc->hbinterval); 4483 params.spp_pathmtu = asoc->pathmtu; 4484 params.spp_pathmaxrxt = asoc->pathmaxrxt; 4485 params.spp_sackdelay = jiffies_to_msecs(asoc->sackdelay); 4486 4487 /*draft-11 doesn't say what to return in spp_flags*/ 4488 params.spp_flags = asoc->param_flags; 4489 } else { 4490 /* Fetch socket values. */ 4491 params.spp_hbinterval = sp->hbinterval; 4492 params.spp_pathmtu = sp->pathmtu; 4493 params.spp_sackdelay = sp->sackdelay; 4494 params.spp_pathmaxrxt = sp->pathmaxrxt; 4495 4496 /*draft-11 doesn't say what to return in spp_flags*/ 4497 params.spp_flags = sp->param_flags; 4498 } 4499 4500 if (copy_to_user(optval, ¶ms, len)) 4501 return -EFAULT; 4502 4503 if (put_user(len, optlen)) 4504 return -EFAULT; 4505 4506 return 0; 4507 } 4508 4509 /* 4510 * 7.1.23. Get or set delayed ack timer (SCTP_DELAYED_SACK) 4511 * 4512 * This option will effect the way delayed acks are performed. This 4513 * option allows you to get or set the delayed ack time, in 4514 * milliseconds. It also allows changing the delayed ack frequency. 4515 * Changing the frequency to 1 disables the delayed sack algorithm. If 4516 * the assoc_id is 0, then this sets or gets the endpoints default 4517 * values. If the assoc_id field is non-zero, then the set or get 4518 * effects the specified association for the one to many model (the 4519 * assoc_id field is ignored by the one to one model). Note that if 4520 * sack_delay or sack_freq are 0 when setting this option, then the 4521 * current values will remain unchanged. 4522 * 4523 * struct sctp_sack_info { 4524 * sctp_assoc_t sack_assoc_id; 4525 * uint32_t sack_delay; 4526 * uint32_t sack_freq; 4527 * }; 4528 * 4529 * sack_assoc_id - This parameter, indicates which association the user 4530 * is performing an action upon. Note that if this field's value is 4531 * zero then the endpoints default value is changed (effecting future 4532 * associations only). 4533 * 4534 * sack_delay - This parameter contains the number of milliseconds that 4535 * the user is requesting the delayed ACK timer be set to. Note that 4536 * this value is defined in the standard to be between 200 and 500 4537 * milliseconds. 4538 * 4539 * sack_freq - This parameter contains the number of packets that must 4540 * be received before a sack is sent without waiting for the delay 4541 * timer to expire. The default value for this is 2, setting this 4542 * value to 1 will disable the delayed sack algorithm. 4543 */ 4544 static int sctp_getsockopt_delayed_ack(struct sock *sk, int len, 4545 char __user *optval, 4546 int __user *optlen) 4547 { 4548 struct sctp_sack_info params; 4549 struct sctp_association *asoc = NULL; 4550 struct sctp_sock *sp = sctp_sk(sk); 4551 4552 if (len >= sizeof(struct sctp_sack_info)) { 4553 len = sizeof(struct sctp_sack_info); 4554 4555 if (copy_from_user(¶ms, optval, len)) 4556 return -EFAULT; 4557 } else if (len == sizeof(struct sctp_assoc_value)) { 4558 pr_warn("Use of struct sctp_assoc_value in delayed_ack socket option deprecated\n"); 4559 pr_warn("Use struct sctp_sack_info instead\n"); 4560 if (copy_from_user(¶ms, optval, len)) 4561 return -EFAULT; 4562 } else 4563 return - EINVAL; 4564 4565 /* Get association, if sack_assoc_id != 0 and the socket is a one 4566 * to many style socket, and an association was not found, then 4567 * the id was invalid. 4568 */ 4569 asoc = sctp_id2assoc(sk, params.sack_assoc_id); 4570 if (!asoc && params.sack_assoc_id && sctp_style(sk, UDP)) 4571 return -EINVAL; 4572 4573 if (asoc) { 4574 /* Fetch association values. */ 4575 if (asoc->param_flags & SPP_SACKDELAY_ENABLE) { 4576 params.sack_delay = jiffies_to_msecs( 4577 asoc->sackdelay); 4578 params.sack_freq = asoc->sackfreq; 4579 4580 } else { 4581 params.sack_delay = 0; 4582 params.sack_freq = 1; 4583 } 4584 } else { 4585 /* Fetch socket values. */ 4586 if (sp->param_flags & SPP_SACKDELAY_ENABLE) { 4587 params.sack_delay = sp->sackdelay; 4588 params.sack_freq = sp->sackfreq; 4589 } else { 4590 params.sack_delay = 0; 4591 params.sack_freq = 1; 4592 } 4593 } 4594 4595 if (copy_to_user(optval, ¶ms, len)) 4596 return -EFAULT; 4597 4598 if (put_user(len, optlen)) 4599 return -EFAULT; 4600 4601 return 0; 4602 } 4603 4604 /* 7.1.3 Initialization Parameters (SCTP_INITMSG) 4605 * 4606 * Applications can specify protocol parameters for the default association 4607 * initialization. The option name argument to setsockopt() and getsockopt() 4608 * is SCTP_INITMSG. 4609 * 4610 * Setting initialization parameters is effective only on an unconnected 4611 * socket (for UDP-style sockets only future associations are effected 4612 * by the change). With TCP-style sockets, this option is inherited by 4613 * sockets derived from a listener socket. 4614 */ 4615 static int sctp_getsockopt_initmsg(struct sock *sk, int len, char __user *optval, int __user *optlen) 4616 { 4617 if (len < sizeof(struct sctp_initmsg)) 4618 return -EINVAL; 4619 len = sizeof(struct sctp_initmsg); 4620 if (put_user(len, optlen)) 4621 return -EFAULT; 4622 if (copy_to_user(optval, &sctp_sk(sk)->initmsg, len)) 4623 return -EFAULT; 4624 return 0; 4625 } 4626 4627 4628 static int sctp_getsockopt_peer_addrs(struct sock *sk, int len, 4629 char __user *optval, int __user *optlen) 4630 { 4631 struct sctp_association *asoc; 4632 int cnt = 0; 4633 struct sctp_getaddrs getaddrs; 4634 struct sctp_transport *from; 4635 void __user *to; 4636 union sctp_addr temp; 4637 struct sctp_sock *sp = sctp_sk(sk); 4638 int addrlen; 4639 size_t space_left; 4640 int bytes_copied; 4641 4642 if (len < sizeof(struct sctp_getaddrs)) 4643 return -EINVAL; 4644 4645 if (copy_from_user(&getaddrs, optval, sizeof(struct sctp_getaddrs))) 4646 return -EFAULT; 4647 4648 /* For UDP-style sockets, id specifies the association to query. */ 4649 asoc = sctp_id2assoc(sk, getaddrs.assoc_id); 4650 if (!asoc) 4651 return -EINVAL; 4652 4653 to = optval + offsetof(struct sctp_getaddrs,addrs); 4654 space_left = len - offsetof(struct sctp_getaddrs,addrs); 4655 4656 list_for_each_entry(from, &asoc->peer.transport_addr_list, 4657 transports) { 4658 memcpy(&temp, &from->ipaddr, sizeof(temp)); 4659 sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, &temp); 4660 addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len; 4661 if (space_left < addrlen) 4662 return -ENOMEM; 4663 if (copy_to_user(to, &temp, addrlen)) 4664 return -EFAULT; 4665 to += addrlen; 4666 cnt++; 4667 space_left -= addrlen; 4668 } 4669 4670 if (put_user(cnt, &((struct sctp_getaddrs __user *)optval)->addr_num)) 4671 return -EFAULT; 4672 bytes_copied = ((char __user *)to) - optval; 4673 if (put_user(bytes_copied, optlen)) 4674 return -EFAULT; 4675 4676 return 0; 4677 } 4678 4679 static int sctp_copy_laddrs(struct sock *sk, __u16 port, void *to, 4680 size_t space_left, int *bytes_copied) 4681 { 4682 struct sctp_sockaddr_entry *addr; 4683 union sctp_addr temp; 4684 int cnt = 0; 4685 int addrlen; 4686 struct net *net = sock_net(sk); 4687 4688 rcu_read_lock(); 4689 list_for_each_entry_rcu(addr, &net->sctp.local_addr_list, list) { 4690 if (!addr->valid) 4691 continue; 4692 4693 if ((PF_INET == sk->sk_family) && 4694 (AF_INET6 == addr->a.sa.sa_family)) 4695 continue; 4696 if ((PF_INET6 == sk->sk_family) && 4697 inet_v6_ipv6only(sk) && 4698 (AF_INET == addr->a.sa.sa_family)) 4699 continue; 4700 memcpy(&temp, &addr->a, sizeof(temp)); 4701 if (!temp.v4.sin_port) 4702 temp.v4.sin_port = htons(port); 4703 4704 sctp_get_pf_specific(sk->sk_family)->addr_v4map(sctp_sk(sk), 4705 &temp); 4706 addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len; 4707 if (space_left < addrlen) { 4708 cnt = -ENOMEM; 4709 break; 4710 } 4711 memcpy(to, &temp, addrlen); 4712 4713 to += addrlen; 4714 cnt ++; 4715 space_left -= addrlen; 4716 *bytes_copied += addrlen; 4717 } 4718 rcu_read_unlock(); 4719 4720 return cnt; 4721 } 4722 4723 4724 static int sctp_getsockopt_local_addrs(struct sock *sk, int len, 4725 char __user *optval, int __user *optlen) 4726 { 4727 struct sctp_bind_addr *bp; 4728 struct sctp_association *asoc; 4729 int cnt = 0; 4730 struct sctp_getaddrs getaddrs; 4731 struct sctp_sockaddr_entry *addr; 4732 void __user *to; 4733 union sctp_addr temp; 4734 struct sctp_sock *sp = sctp_sk(sk); 4735 int addrlen; 4736 int err = 0; 4737 size_t space_left; 4738 int bytes_copied = 0; 4739 void *addrs; 4740 void *buf; 4741 4742 if (len < sizeof(struct sctp_getaddrs)) 4743 return -EINVAL; 4744 4745 if (copy_from_user(&getaddrs, optval, sizeof(struct sctp_getaddrs))) 4746 return -EFAULT; 4747 4748 /* 4749 * For UDP-style sockets, id specifies the association to query. 4750 * If the id field is set to the value '0' then the locally bound 4751 * addresses are returned without regard to any particular 4752 * association. 4753 */ 4754 if (0 == getaddrs.assoc_id) { 4755 bp = &sctp_sk(sk)->ep->base.bind_addr; 4756 } else { 4757 asoc = sctp_id2assoc(sk, getaddrs.assoc_id); 4758 if (!asoc) 4759 return -EINVAL; 4760 bp = &asoc->base.bind_addr; 4761 } 4762 4763 to = optval + offsetof(struct sctp_getaddrs,addrs); 4764 space_left = len - offsetof(struct sctp_getaddrs,addrs); 4765 4766 addrs = kmalloc(space_left, GFP_KERNEL); 4767 if (!addrs) 4768 return -ENOMEM; 4769 4770 /* If the endpoint is bound to 0.0.0.0 or ::0, get the valid 4771 * addresses from the global local address list. 4772 */ 4773 if (sctp_list_single_entry(&bp->address_list)) { 4774 addr = list_entry(bp->address_list.next, 4775 struct sctp_sockaddr_entry, list); 4776 if (sctp_is_any(sk, &addr->a)) { 4777 cnt = sctp_copy_laddrs(sk, bp->port, addrs, 4778 space_left, &bytes_copied); 4779 if (cnt < 0) { 4780 err = cnt; 4781 goto out; 4782 } 4783 goto copy_getaddrs; 4784 } 4785 } 4786 4787 buf = addrs; 4788 /* Protection on the bound address list is not needed since 4789 * in the socket option context we hold a socket lock and 4790 * thus the bound address list can't change. 4791 */ 4792 list_for_each_entry(addr, &bp->address_list, list) { 4793 memcpy(&temp, &addr->a, sizeof(temp)); 4794 sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, &temp); 4795 addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len; 4796 if (space_left < addrlen) { 4797 err = -ENOMEM; /*fixme: right error?*/ 4798 goto out; 4799 } 4800 memcpy(buf, &temp, addrlen); 4801 buf += addrlen; 4802 bytes_copied += addrlen; 4803 cnt ++; 4804 space_left -= addrlen; 4805 } 4806 4807 copy_getaddrs: 4808 if (copy_to_user(to, addrs, bytes_copied)) { 4809 err = -EFAULT; 4810 goto out; 4811 } 4812 if (put_user(cnt, &((struct sctp_getaddrs __user *)optval)->addr_num)) { 4813 err = -EFAULT; 4814 goto out; 4815 } 4816 if (put_user(bytes_copied, optlen)) 4817 err = -EFAULT; 4818 out: 4819 kfree(addrs); 4820 return err; 4821 } 4822 4823 /* 7.1.10 Set Primary Address (SCTP_PRIMARY_ADDR) 4824 * 4825 * Requests that the local SCTP stack use the enclosed peer address as 4826 * the association primary. The enclosed address must be one of the 4827 * association peer's addresses. 4828 */ 4829 static int sctp_getsockopt_primary_addr(struct sock *sk, int len, 4830 char __user *optval, int __user *optlen) 4831 { 4832 struct sctp_prim prim; 4833 struct sctp_association *asoc; 4834 struct sctp_sock *sp = sctp_sk(sk); 4835 4836 if (len < sizeof(struct sctp_prim)) 4837 return -EINVAL; 4838 4839 len = sizeof(struct sctp_prim); 4840 4841 if (copy_from_user(&prim, optval, len)) 4842 return -EFAULT; 4843 4844 asoc = sctp_id2assoc(sk, prim.ssp_assoc_id); 4845 if (!asoc) 4846 return -EINVAL; 4847 4848 if (!asoc->peer.primary_path) 4849 return -ENOTCONN; 4850 4851 memcpy(&prim.ssp_addr, &asoc->peer.primary_path->ipaddr, 4852 asoc->peer.primary_path->af_specific->sockaddr_len); 4853 4854 sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, 4855 (union sctp_addr *)&prim.ssp_addr); 4856 4857 if (put_user(len, optlen)) 4858 return -EFAULT; 4859 if (copy_to_user(optval, &prim, len)) 4860 return -EFAULT; 4861 4862 return 0; 4863 } 4864 4865 /* 4866 * 7.1.11 Set Adaptation Layer Indicator (SCTP_ADAPTATION_LAYER) 4867 * 4868 * Requests that the local endpoint set the specified Adaptation Layer 4869 * Indication parameter for all future INIT and INIT-ACK exchanges. 4870 */ 4871 static int sctp_getsockopt_adaptation_layer(struct sock *sk, int len, 4872 char __user *optval, int __user *optlen) 4873 { 4874 struct sctp_setadaptation adaptation; 4875 4876 if (len < sizeof(struct sctp_setadaptation)) 4877 return -EINVAL; 4878 4879 len = sizeof(struct sctp_setadaptation); 4880 4881 adaptation.ssb_adaptation_ind = sctp_sk(sk)->adaptation_ind; 4882 4883 if (put_user(len, optlen)) 4884 return -EFAULT; 4885 if (copy_to_user(optval, &adaptation, len)) 4886 return -EFAULT; 4887 4888 return 0; 4889 } 4890 4891 /* 4892 * 4893 * 7.1.14 Set default send parameters (SCTP_DEFAULT_SEND_PARAM) 4894 * 4895 * Applications that wish to use the sendto() system call may wish to 4896 * specify a default set of parameters that would normally be supplied 4897 * through the inclusion of ancillary data. This socket option allows 4898 * such an application to set the default sctp_sndrcvinfo structure. 4899 4900 4901 * The application that wishes to use this socket option simply passes 4902 * in to this call the sctp_sndrcvinfo structure defined in Section 4903 * 5.2.2) The input parameters accepted by this call include 4904 * sinfo_stream, sinfo_flags, sinfo_ppid, sinfo_context, 4905 * sinfo_timetolive. The user must provide the sinfo_assoc_id field in 4906 * to this call if the caller is using the UDP model. 4907 * 4908 * For getsockopt, it get the default sctp_sndrcvinfo structure. 4909 */ 4910 static int sctp_getsockopt_default_send_param(struct sock *sk, 4911 int len, char __user *optval, 4912 int __user *optlen) 4913 { 4914 struct sctp_sndrcvinfo info; 4915 struct sctp_association *asoc; 4916 struct sctp_sock *sp = sctp_sk(sk); 4917 4918 if (len < sizeof(struct sctp_sndrcvinfo)) 4919 return -EINVAL; 4920 4921 len = sizeof(struct sctp_sndrcvinfo); 4922 4923 if (copy_from_user(&info, optval, len)) 4924 return -EFAULT; 4925 4926 asoc = sctp_id2assoc(sk, info.sinfo_assoc_id); 4927 if (!asoc && info.sinfo_assoc_id && sctp_style(sk, UDP)) 4928 return -EINVAL; 4929 4930 if (asoc) { 4931 info.sinfo_stream = asoc->default_stream; 4932 info.sinfo_flags = asoc->default_flags; 4933 info.sinfo_ppid = asoc->default_ppid; 4934 info.sinfo_context = asoc->default_context; 4935 info.sinfo_timetolive = asoc->default_timetolive; 4936 } else { 4937 info.sinfo_stream = sp->default_stream; 4938 info.sinfo_flags = sp->default_flags; 4939 info.sinfo_ppid = sp->default_ppid; 4940 info.sinfo_context = sp->default_context; 4941 info.sinfo_timetolive = sp->default_timetolive; 4942 } 4943 4944 if (put_user(len, optlen)) 4945 return -EFAULT; 4946 if (copy_to_user(optval, &info, len)) 4947 return -EFAULT; 4948 4949 return 0; 4950 } 4951 4952 /* 4953 * 4954 * 7.1.5 SCTP_NODELAY 4955 * 4956 * Turn on/off any Nagle-like algorithm. This means that packets are 4957 * generally sent as soon as possible and no unnecessary delays are 4958 * introduced, at the cost of more packets in the network. Expects an 4959 * integer boolean flag. 4960 */ 4961 4962 static int sctp_getsockopt_nodelay(struct sock *sk, int len, 4963 char __user *optval, int __user *optlen) 4964 { 4965 int val; 4966 4967 if (len < sizeof(int)) 4968 return -EINVAL; 4969 4970 len = sizeof(int); 4971 val = (sctp_sk(sk)->nodelay == 1); 4972 if (put_user(len, optlen)) 4973 return -EFAULT; 4974 if (copy_to_user(optval, &val, len)) 4975 return -EFAULT; 4976 return 0; 4977 } 4978 4979 /* 4980 * 4981 * 7.1.1 SCTP_RTOINFO 4982 * 4983 * The protocol parameters used to initialize and bound retransmission 4984 * timeout (RTO) are tunable. sctp_rtoinfo structure is used to access 4985 * and modify these parameters. 4986 * All parameters are time values, in milliseconds. A value of 0, when 4987 * modifying the parameters, indicates that the current value should not 4988 * be changed. 4989 * 4990 */ 4991 static int sctp_getsockopt_rtoinfo(struct sock *sk, int len, 4992 char __user *optval, 4993 int __user *optlen) { 4994 struct sctp_rtoinfo rtoinfo; 4995 struct sctp_association *asoc; 4996 4997 if (len < sizeof (struct sctp_rtoinfo)) 4998 return -EINVAL; 4999 5000 len = sizeof(struct sctp_rtoinfo); 5001 5002 if (copy_from_user(&rtoinfo, optval, len)) 5003 return -EFAULT; 5004 5005 asoc = sctp_id2assoc(sk, rtoinfo.srto_assoc_id); 5006 5007 if (!asoc && rtoinfo.srto_assoc_id && sctp_style(sk, UDP)) 5008 return -EINVAL; 5009 5010 /* Values corresponding to the specific association. */ 5011 if (asoc) { 5012 rtoinfo.srto_initial = jiffies_to_msecs(asoc->rto_initial); 5013 rtoinfo.srto_max = jiffies_to_msecs(asoc->rto_max); 5014 rtoinfo.srto_min = jiffies_to_msecs(asoc->rto_min); 5015 } else { 5016 /* Values corresponding to the endpoint. */ 5017 struct sctp_sock *sp = sctp_sk(sk); 5018 5019 rtoinfo.srto_initial = sp->rtoinfo.srto_initial; 5020 rtoinfo.srto_max = sp->rtoinfo.srto_max; 5021 rtoinfo.srto_min = sp->rtoinfo.srto_min; 5022 } 5023 5024 if (put_user(len, optlen)) 5025 return -EFAULT; 5026 5027 if (copy_to_user(optval, &rtoinfo, len)) 5028 return -EFAULT; 5029 5030 return 0; 5031 } 5032 5033 /* 5034 * 5035 * 7.1.2 SCTP_ASSOCINFO 5036 * 5037 * This option is used to tune the maximum retransmission attempts 5038 * of the association. 5039 * Returns an error if the new association retransmission value is 5040 * greater than the sum of the retransmission value of the peer. 5041 * See [SCTP] for more information. 5042 * 5043 */ 5044 static int sctp_getsockopt_associnfo(struct sock *sk, int len, 5045 char __user *optval, 5046 int __user *optlen) 5047 { 5048 5049 struct sctp_assocparams assocparams; 5050 struct sctp_association *asoc; 5051 struct list_head *pos; 5052 int cnt = 0; 5053 5054 if (len < sizeof (struct sctp_assocparams)) 5055 return -EINVAL; 5056 5057 len = sizeof(struct sctp_assocparams); 5058 5059 if (copy_from_user(&assocparams, optval, len)) 5060 return -EFAULT; 5061 5062 asoc = sctp_id2assoc(sk, assocparams.sasoc_assoc_id); 5063 5064 if (!asoc && assocparams.sasoc_assoc_id && sctp_style(sk, UDP)) 5065 return -EINVAL; 5066 5067 /* Values correspoinding to the specific association */ 5068 if (asoc) { 5069 assocparams.sasoc_asocmaxrxt = asoc->max_retrans; 5070 assocparams.sasoc_peer_rwnd = asoc->peer.rwnd; 5071 assocparams.sasoc_local_rwnd = asoc->a_rwnd; 5072 assocparams.sasoc_cookie_life = ktime_to_ms(asoc->cookie_life); 5073 5074 list_for_each(pos, &asoc->peer.transport_addr_list) { 5075 cnt ++; 5076 } 5077 5078 assocparams.sasoc_number_peer_destinations = cnt; 5079 } else { 5080 /* Values corresponding to the endpoint */ 5081 struct sctp_sock *sp = sctp_sk(sk); 5082 5083 assocparams.sasoc_asocmaxrxt = sp->assocparams.sasoc_asocmaxrxt; 5084 assocparams.sasoc_peer_rwnd = sp->assocparams.sasoc_peer_rwnd; 5085 assocparams.sasoc_local_rwnd = sp->assocparams.sasoc_local_rwnd; 5086 assocparams.sasoc_cookie_life = 5087 sp->assocparams.sasoc_cookie_life; 5088 assocparams.sasoc_number_peer_destinations = 5089 sp->assocparams. 5090 sasoc_number_peer_destinations; 5091 } 5092 5093 if (put_user(len, optlen)) 5094 return -EFAULT; 5095 5096 if (copy_to_user(optval, &assocparams, len)) 5097 return -EFAULT; 5098 5099 return 0; 5100 } 5101 5102 /* 5103 * 7.1.16 Set/clear IPv4 mapped addresses (SCTP_I_WANT_MAPPED_V4_ADDR) 5104 * 5105 * This socket option is a boolean flag which turns on or off mapped V4 5106 * addresses. If this option is turned on and the socket is type 5107 * PF_INET6, then IPv4 addresses will be mapped to V6 representation. 5108 * If this option is turned off, then no mapping will be done of V4 5109 * addresses and a user will receive both PF_INET6 and PF_INET type 5110 * addresses on the socket. 5111 */ 5112 static int sctp_getsockopt_mappedv4(struct sock *sk, int len, 5113 char __user *optval, int __user *optlen) 5114 { 5115 int val; 5116 struct sctp_sock *sp = sctp_sk(sk); 5117 5118 if (len < sizeof(int)) 5119 return -EINVAL; 5120 5121 len = sizeof(int); 5122 val = sp->v4mapped; 5123 if (put_user(len, optlen)) 5124 return -EFAULT; 5125 if (copy_to_user(optval, &val, len)) 5126 return -EFAULT; 5127 5128 return 0; 5129 } 5130 5131 /* 5132 * 7.1.29. Set or Get the default context (SCTP_CONTEXT) 5133 * (chapter and verse is quoted at sctp_setsockopt_context()) 5134 */ 5135 static int sctp_getsockopt_context(struct sock *sk, int len, 5136 char __user *optval, int __user *optlen) 5137 { 5138 struct sctp_assoc_value params; 5139 struct sctp_sock *sp; 5140 struct sctp_association *asoc; 5141 5142 if (len < sizeof(struct sctp_assoc_value)) 5143 return -EINVAL; 5144 5145 len = sizeof(struct sctp_assoc_value); 5146 5147 if (copy_from_user(¶ms, optval, len)) 5148 return -EFAULT; 5149 5150 sp = sctp_sk(sk); 5151 5152 if (params.assoc_id != 0) { 5153 asoc = sctp_id2assoc(sk, params.assoc_id); 5154 if (!asoc) 5155 return -EINVAL; 5156 params.assoc_value = asoc->default_rcv_context; 5157 } else { 5158 params.assoc_value = sp->default_rcv_context; 5159 } 5160 5161 if (put_user(len, optlen)) 5162 return -EFAULT; 5163 if (copy_to_user(optval, ¶ms, len)) 5164 return -EFAULT; 5165 5166 return 0; 5167 } 5168 5169 /* 5170 * 8.1.16. Get or Set the Maximum Fragmentation Size (SCTP_MAXSEG) 5171 * This option will get or set the maximum size to put in any outgoing 5172 * SCTP DATA chunk. If a message is larger than this size it will be 5173 * fragmented by SCTP into the specified size. Note that the underlying 5174 * SCTP implementation may fragment into smaller sized chunks when the 5175 * PMTU of the underlying association is smaller than the value set by 5176 * the user. The default value for this option is '0' which indicates 5177 * the user is NOT limiting fragmentation and only the PMTU will effect 5178 * SCTP's choice of DATA chunk size. Note also that values set larger 5179 * than the maximum size of an IP datagram will effectively let SCTP 5180 * control fragmentation (i.e. the same as setting this option to 0). 5181 * 5182 * The following structure is used to access and modify this parameter: 5183 * 5184 * struct sctp_assoc_value { 5185 * sctp_assoc_t assoc_id; 5186 * uint32_t assoc_value; 5187 * }; 5188 * 5189 * assoc_id: This parameter is ignored for one-to-one style sockets. 5190 * For one-to-many style sockets this parameter indicates which 5191 * association the user is performing an action upon. Note that if 5192 * this field's value is zero then the endpoints default value is 5193 * changed (effecting future associations only). 5194 * assoc_value: This parameter specifies the maximum size in bytes. 5195 */ 5196 static int sctp_getsockopt_maxseg(struct sock *sk, int len, 5197 char __user *optval, int __user *optlen) 5198 { 5199 struct sctp_assoc_value params; 5200 struct sctp_association *asoc; 5201 5202 if (len == sizeof(int)) { 5203 pr_warn("Use of int in maxseg socket option deprecated\n"); 5204 pr_warn("Use struct sctp_assoc_value instead\n"); 5205 params.assoc_id = 0; 5206 } else if (len >= sizeof(struct sctp_assoc_value)) { 5207 len = sizeof(struct sctp_assoc_value); 5208 if (copy_from_user(¶ms, optval, sizeof(params))) 5209 return -EFAULT; 5210 } else 5211 return -EINVAL; 5212 5213 asoc = sctp_id2assoc(sk, params.assoc_id); 5214 if (!asoc && params.assoc_id && sctp_style(sk, UDP)) 5215 return -EINVAL; 5216 5217 if (asoc) 5218 params.assoc_value = asoc->frag_point; 5219 else 5220 params.assoc_value = sctp_sk(sk)->user_frag; 5221 5222 if (put_user(len, optlen)) 5223 return -EFAULT; 5224 if (len == sizeof(int)) { 5225 if (copy_to_user(optval, ¶ms.assoc_value, len)) 5226 return -EFAULT; 5227 } else { 5228 if (copy_to_user(optval, ¶ms, len)) 5229 return -EFAULT; 5230 } 5231 5232 return 0; 5233 } 5234 5235 /* 5236 * 7.1.24. Get or set fragmented interleave (SCTP_FRAGMENT_INTERLEAVE) 5237 * (chapter and verse is quoted at sctp_setsockopt_fragment_interleave()) 5238 */ 5239 static int sctp_getsockopt_fragment_interleave(struct sock *sk, int len, 5240 char __user *optval, int __user *optlen) 5241 { 5242 int val; 5243 5244 if (len < sizeof(int)) 5245 return -EINVAL; 5246 5247 len = sizeof(int); 5248 5249 val = sctp_sk(sk)->frag_interleave; 5250 if (put_user(len, optlen)) 5251 return -EFAULT; 5252 if (copy_to_user(optval, &val, len)) 5253 return -EFAULT; 5254 5255 return 0; 5256 } 5257 5258 /* 5259 * 7.1.25. Set or Get the sctp partial delivery point 5260 * (chapter and verse is quoted at sctp_setsockopt_partial_delivery_point()) 5261 */ 5262 static int sctp_getsockopt_partial_delivery_point(struct sock *sk, int len, 5263 char __user *optval, 5264 int __user *optlen) 5265 { 5266 u32 val; 5267 5268 if (len < sizeof(u32)) 5269 return -EINVAL; 5270 5271 len = sizeof(u32); 5272 5273 val = sctp_sk(sk)->pd_point; 5274 if (put_user(len, optlen)) 5275 return -EFAULT; 5276 if (copy_to_user(optval, &val, len)) 5277 return -EFAULT; 5278 5279 return 0; 5280 } 5281 5282 /* 5283 * 7.1.28. Set or Get the maximum burst (SCTP_MAX_BURST) 5284 * (chapter and verse is quoted at sctp_setsockopt_maxburst()) 5285 */ 5286 static int sctp_getsockopt_maxburst(struct sock *sk, int len, 5287 char __user *optval, 5288 int __user *optlen) 5289 { 5290 struct sctp_assoc_value params; 5291 struct sctp_sock *sp; 5292 struct sctp_association *asoc; 5293 5294 if (len == sizeof(int)) { 5295 pr_warn("Use of int in max_burst socket option deprecated\n"); 5296 pr_warn("Use struct sctp_assoc_value instead\n"); 5297 params.assoc_id = 0; 5298 } else if (len >= sizeof(struct sctp_assoc_value)) { 5299 len = sizeof(struct sctp_assoc_value); 5300 if (copy_from_user(¶ms, optval, len)) 5301 return -EFAULT; 5302 } else 5303 return -EINVAL; 5304 5305 sp = sctp_sk(sk); 5306 5307 if (params.assoc_id != 0) { 5308 asoc = sctp_id2assoc(sk, params.assoc_id); 5309 if (!asoc) 5310 return -EINVAL; 5311 params.assoc_value = asoc->max_burst; 5312 } else 5313 params.assoc_value = sp->max_burst; 5314 5315 if (len == sizeof(int)) { 5316 if (copy_to_user(optval, ¶ms.assoc_value, len)) 5317 return -EFAULT; 5318 } else { 5319 if (copy_to_user(optval, ¶ms, len)) 5320 return -EFAULT; 5321 } 5322 5323 return 0; 5324 5325 } 5326 5327 static int sctp_getsockopt_hmac_ident(struct sock *sk, int len, 5328 char __user *optval, int __user *optlen) 5329 { 5330 struct net *net = sock_net(sk); 5331 struct sctp_hmacalgo __user *p = (void __user *)optval; 5332 struct sctp_hmac_algo_param *hmacs; 5333 __u16 data_len = 0; 5334 u32 num_idents; 5335 5336 if (!net->sctp.auth_enable) 5337 return -EACCES; 5338 5339 hmacs = sctp_sk(sk)->ep->auth_hmacs_list; 5340 data_len = ntohs(hmacs->param_hdr.length) - sizeof(sctp_paramhdr_t); 5341 5342 if (len < sizeof(struct sctp_hmacalgo) + data_len) 5343 return -EINVAL; 5344 5345 len = sizeof(struct sctp_hmacalgo) + data_len; 5346 num_idents = data_len / sizeof(u16); 5347 5348 if (put_user(len, optlen)) 5349 return -EFAULT; 5350 if (put_user(num_idents, &p->shmac_num_idents)) 5351 return -EFAULT; 5352 if (copy_to_user(p->shmac_idents, hmacs->hmac_ids, data_len)) 5353 return -EFAULT; 5354 return 0; 5355 } 5356 5357 static int sctp_getsockopt_active_key(struct sock *sk, int len, 5358 char __user *optval, int __user *optlen) 5359 { 5360 struct net *net = sock_net(sk); 5361 struct sctp_authkeyid val; 5362 struct sctp_association *asoc; 5363 5364 if (!net->sctp.auth_enable) 5365 return -EACCES; 5366 5367 if (len < sizeof(struct sctp_authkeyid)) 5368 return -EINVAL; 5369 if (copy_from_user(&val, optval, sizeof(struct sctp_authkeyid))) 5370 return -EFAULT; 5371 5372 asoc = sctp_id2assoc(sk, val.scact_assoc_id); 5373 if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP)) 5374 return -EINVAL; 5375 5376 if (asoc) 5377 val.scact_keynumber = asoc->active_key_id; 5378 else 5379 val.scact_keynumber = sctp_sk(sk)->ep->active_key_id; 5380 5381 len = sizeof(struct sctp_authkeyid); 5382 if (put_user(len, optlen)) 5383 return -EFAULT; 5384 if (copy_to_user(optval, &val, len)) 5385 return -EFAULT; 5386 5387 return 0; 5388 } 5389 5390 static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len, 5391 char __user *optval, int __user *optlen) 5392 { 5393 struct net *net = sock_net(sk); 5394 struct sctp_authchunks __user *p = (void __user *)optval; 5395 struct sctp_authchunks val; 5396 struct sctp_association *asoc; 5397 struct sctp_chunks_param *ch; 5398 u32 num_chunks = 0; 5399 char __user *to; 5400 5401 if (!net->sctp.auth_enable) 5402 return -EACCES; 5403 5404 if (len < sizeof(struct sctp_authchunks)) 5405 return -EINVAL; 5406 5407 if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks))) 5408 return -EFAULT; 5409 5410 to = p->gauth_chunks; 5411 asoc = sctp_id2assoc(sk, val.gauth_assoc_id); 5412 if (!asoc) 5413 return -EINVAL; 5414 5415 ch = asoc->peer.peer_chunks; 5416 if (!ch) 5417 goto num; 5418 5419 /* See if the user provided enough room for all the data */ 5420 num_chunks = ntohs(ch->param_hdr.length) - sizeof(sctp_paramhdr_t); 5421 if (len < num_chunks) 5422 return -EINVAL; 5423 5424 if (copy_to_user(to, ch->chunks, num_chunks)) 5425 return -EFAULT; 5426 num: 5427 len = sizeof(struct sctp_authchunks) + num_chunks; 5428 if (put_user(len, optlen)) return -EFAULT; 5429 if (put_user(num_chunks, &p->gauth_number_of_chunks)) 5430 return -EFAULT; 5431 return 0; 5432 } 5433 5434 static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len, 5435 char __user *optval, int __user *optlen) 5436 { 5437 struct net *net = sock_net(sk); 5438 struct sctp_authchunks __user *p = (void __user *)optval; 5439 struct sctp_authchunks val; 5440 struct sctp_association *asoc; 5441 struct sctp_chunks_param *ch; 5442 u32 num_chunks = 0; 5443 char __user *to; 5444 5445 if (!net->sctp.auth_enable) 5446 return -EACCES; 5447 5448 if (len < sizeof(struct sctp_authchunks)) 5449 return -EINVAL; 5450 5451 if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks))) 5452 return -EFAULT; 5453 5454 to = p->gauth_chunks; 5455 asoc = sctp_id2assoc(sk, val.gauth_assoc_id); 5456 if (!asoc && val.gauth_assoc_id && sctp_style(sk, UDP)) 5457 return -EINVAL; 5458 5459 if (asoc) 5460 ch = (struct sctp_chunks_param*)asoc->c.auth_chunks; 5461 else 5462 ch = sctp_sk(sk)->ep->auth_chunk_list; 5463 5464 if (!ch) 5465 goto num; 5466 5467 num_chunks = ntohs(ch->param_hdr.length) - sizeof(sctp_paramhdr_t); 5468 if (len < sizeof(struct sctp_authchunks) + num_chunks) 5469 return -EINVAL; 5470 5471 if (copy_to_user(to, ch->chunks, num_chunks)) 5472 return -EFAULT; 5473 num: 5474 len = sizeof(struct sctp_authchunks) + num_chunks; 5475 if (put_user(len, optlen)) 5476 return -EFAULT; 5477 if (put_user(num_chunks, &p->gauth_number_of_chunks)) 5478 return -EFAULT; 5479 5480 return 0; 5481 } 5482 5483 /* 5484 * 8.2.5. Get the Current Number of Associations (SCTP_GET_ASSOC_NUMBER) 5485 * This option gets the current number of associations that are attached 5486 * to a one-to-many style socket. The option value is an uint32_t. 5487 */ 5488 static int sctp_getsockopt_assoc_number(struct sock *sk, int len, 5489 char __user *optval, int __user *optlen) 5490 { 5491 struct sctp_sock *sp = sctp_sk(sk); 5492 struct sctp_association *asoc; 5493 u32 val = 0; 5494 5495 if (sctp_style(sk, TCP)) 5496 return -EOPNOTSUPP; 5497 5498 if (len < sizeof(u32)) 5499 return -EINVAL; 5500 5501 len = sizeof(u32); 5502 5503 list_for_each_entry(asoc, &(sp->ep->asocs), asocs) { 5504 val++; 5505 } 5506 5507 if (put_user(len, optlen)) 5508 return -EFAULT; 5509 if (copy_to_user(optval, &val, len)) 5510 return -EFAULT; 5511 5512 return 0; 5513 } 5514 5515 /* 5516 * 8.1.23 SCTP_AUTO_ASCONF 5517 * See the corresponding setsockopt entry as description 5518 */ 5519 static int sctp_getsockopt_auto_asconf(struct sock *sk, int len, 5520 char __user *optval, int __user *optlen) 5521 { 5522 int val = 0; 5523 5524 if (len < sizeof(int)) 5525 return -EINVAL; 5526 5527 len = sizeof(int); 5528 if (sctp_sk(sk)->do_auto_asconf && sctp_is_ep_boundall(sk)) 5529 val = 1; 5530 if (put_user(len, optlen)) 5531 return -EFAULT; 5532 if (copy_to_user(optval, &val, len)) 5533 return -EFAULT; 5534 return 0; 5535 } 5536 5537 /* 5538 * 8.2.6. Get the Current Identifiers of Associations 5539 * (SCTP_GET_ASSOC_ID_LIST) 5540 * 5541 * This option gets the current list of SCTP association identifiers of 5542 * the SCTP associations handled by a one-to-many style socket. 5543 */ 5544 static int sctp_getsockopt_assoc_ids(struct sock *sk, int len, 5545 char __user *optval, int __user *optlen) 5546 { 5547 struct sctp_sock *sp = sctp_sk(sk); 5548 struct sctp_association *asoc; 5549 struct sctp_assoc_ids *ids; 5550 u32 num = 0; 5551 5552 if (sctp_style(sk, TCP)) 5553 return -EOPNOTSUPP; 5554 5555 if (len < sizeof(struct sctp_assoc_ids)) 5556 return -EINVAL; 5557 5558 list_for_each_entry(asoc, &(sp->ep->asocs), asocs) { 5559 num++; 5560 } 5561 5562 if (len < sizeof(struct sctp_assoc_ids) + sizeof(sctp_assoc_t) * num) 5563 return -EINVAL; 5564 5565 len = sizeof(struct sctp_assoc_ids) + sizeof(sctp_assoc_t) * num; 5566 5567 ids = kmalloc(len, GFP_KERNEL); 5568 if (unlikely(!ids)) 5569 return -ENOMEM; 5570 5571 ids->gaids_number_of_ids = num; 5572 num = 0; 5573 list_for_each_entry(asoc, &(sp->ep->asocs), asocs) { 5574 ids->gaids_assoc_id[num++] = asoc->assoc_id; 5575 } 5576 5577 if (put_user(len, optlen) || copy_to_user(optval, ids, len)) { 5578 kfree(ids); 5579 return -EFAULT; 5580 } 5581 5582 kfree(ids); 5583 return 0; 5584 } 5585 5586 /* 5587 * SCTP_PEER_ADDR_THLDS 5588 * 5589 * This option allows us to fetch the partially failed threshold for one or all 5590 * transports in an association. See Section 6.1 of: 5591 * http://www.ietf.org/id/draft-nishida-tsvwg-sctp-failover-05.txt 5592 */ 5593 static int sctp_getsockopt_paddr_thresholds(struct sock *sk, 5594 char __user *optval, 5595 int len, 5596 int __user *optlen) 5597 { 5598 struct sctp_paddrthlds val; 5599 struct sctp_transport *trans; 5600 struct sctp_association *asoc; 5601 5602 if (len < sizeof(struct sctp_paddrthlds)) 5603 return -EINVAL; 5604 len = sizeof(struct sctp_paddrthlds); 5605 if (copy_from_user(&val, (struct sctp_paddrthlds __user *)optval, len)) 5606 return -EFAULT; 5607 5608 if (sctp_is_any(sk, (const union sctp_addr *)&val.spt_address)) { 5609 asoc = sctp_id2assoc(sk, val.spt_assoc_id); 5610 if (!asoc) 5611 return -ENOENT; 5612 5613 val.spt_pathpfthld = asoc->pf_retrans; 5614 val.spt_pathmaxrxt = asoc->pathmaxrxt; 5615 } else { 5616 trans = sctp_addr_id2transport(sk, &val.spt_address, 5617 val.spt_assoc_id); 5618 if (!trans) 5619 return -ENOENT; 5620 5621 val.spt_pathmaxrxt = trans->pathmaxrxt; 5622 val.spt_pathpfthld = trans->pf_retrans; 5623 } 5624 5625 if (put_user(len, optlen) || copy_to_user(optval, &val, len)) 5626 return -EFAULT; 5627 5628 return 0; 5629 } 5630 5631 /* 5632 * SCTP_GET_ASSOC_STATS 5633 * 5634 * This option retrieves local per endpoint statistics. It is modeled 5635 * after OpenSolaris' implementation 5636 */ 5637 static int sctp_getsockopt_assoc_stats(struct sock *sk, int len, 5638 char __user *optval, 5639 int __user *optlen) 5640 { 5641 struct sctp_assoc_stats sas; 5642 struct sctp_association *asoc = NULL; 5643 5644 /* User must provide at least the assoc id */ 5645 if (len < sizeof(sctp_assoc_t)) 5646 return -EINVAL; 5647 5648 /* Allow the struct to grow and fill in as much as possible */ 5649 len = min_t(size_t, len, sizeof(sas)); 5650 5651 if (copy_from_user(&sas, optval, len)) 5652 return -EFAULT; 5653 5654 asoc = sctp_id2assoc(sk, sas.sas_assoc_id); 5655 if (!asoc) 5656 return -EINVAL; 5657 5658 sas.sas_rtxchunks = asoc->stats.rtxchunks; 5659 sas.sas_gapcnt = asoc->stats.gapcnt; 5660 sas.sas_outofseqtsns = asoc->stats.outofseqtsns; 5661 sas.sas_osacks = asoc->stats.osacks; 5662 sas.sas_isacks = asoc->stats.isacks; 5663 sas.sas_octrlchunks = asoc->stats.octrlchunks; 5664 sas.sas_ictrlchunks = asoc->stats.ictrlchunks; 5665 sas.sas_oodchunks = asoc->stats.oodchunks; 5666 sas.sas_iodchunks = asoc->stats.iodchunks; 5667 sas.sas_ouodchunks = asoc->stats.ouodchunks; 5668 sas.sas_iuodchunks = asoc->stats.iuodchunks; 5669 sas.sas_idupchunks = asoc->stats.idupchunks; 5670 sas.sas_opackets = asoc->stats.opackets; 5671 sas.sas_ipackets = asoc->stats.ipackets; 5672 5673 /* New high max rto observed, will return 0 if not a single 5674 * RTO update took place. obs_rto_ipaddr will be bogus 5675 * in such a case 5676 */ 5677 sas.sas_maxrto = asoc->stats.max_obs_rto; 5678 memcpy(&sas.sas_obs_rto_ipaddr, &asoc->stats.obs_rto_ipaddr, 5679 sizeof(struct sockaddr_storage)); 5680 5681 /* Mark beginning of a new observation period */ 5682 asoc->stats.max_obs_rto = asoc->rto_min; 5683 5684 if (put_user(len, optlen)) 5685 return -EFAULT; 5686 5687 pr_debug("%s: len:%d, assoc_id:%d\n", __func__, len, sas.sas_assoc_id); 5688 5689 if (copy_to_user(optval, &sas, len)) 5690 return -EFAULT; 5691 5692 return 0; 5693 } 5694 5695 static int sctp_getsockopt(struct sock *sk, int level, int optname, 5696 char __user *optval, int __user *optlen) 5697 { 5698 int retval = 0; 5699 int len; 5700 5701 pr_debug("%s: sk:%p, optname:%d\n", __func__, sk, optname); 5702 5703 /* I can hardly begin to describe how wrong this is. This is 5704 * so broken as to be worse than useless. The API draft 5705 * REALLY is NOT helpful here... I am not convinced that the 5706 * semantics of getsockopt() with a level OTHER THAN SOL_SCTP 5707 * are at all well-founded. 5708 */ 5709 if (level != SOL_SCTP) { 5710 struct sctp_af *af = sctp_sk(sk)->pf->af; 5711 5712 retval = af->getsockopt(sk, level, optname, optval, optlen); 5713 return retval; 5714 } 5715 5716 if (get_user(len, optlen)) 5717 return -EFAULT; 5718 5719 sctp_lock_sock(sk); 5720 5721 switch (optname) { 5722 case SCTP_STATUS: 5723 retval = sctp_getsockopt_sctp_status(sk, len, optval, optlen); 5724 break; 5725 case SCTP_DISABLE_FRAGMENTS: 5726 retval = sctp_getsockopt_disable_fragments(sk, len, optval, 5727 optlen); 5728 break; 5729 case SCTP_EVENTS: 5730 retval = sctp_getsockopt_events(sk, len, optval, optlen); 5731 break; 5732 case SCTP_AUTOCLOSE: 5733 retval = sctp_getsockopt_autoclose(sk, len, optval, optlen); 5734 break; 5735 case SCTP_SOCKOPT_PEELOFF: 5736 retval = sctp_getsockopt_peeloff(sk, len, optval, optlen); 5737 break; 5738 case SCTP_PEER_ADDR_PARAMS: 5739 retval = sctp_getsockopt_peer_addr_params(sk, len, optval, 5740 optlen); 5741 break; 5742 case SCTP_DELAYED_SACK: 5743 retval = sctp_getsockopt_delayed_ack(sk, len, optval, 5744 optlen); 5745 break; 5746 case SCTP_INITMSG: 5747 retval = sctp_getsockopt_initmsg(sk, len, optval, optlen); 5748 break; 5749 case SCTP_GET_PEER_ADDRS: 5750 retval = sctp_getsockopt_peer_addrs(sk, len, optval, 5751 optlen); 5752 break; 5753 case SCTP_GET_LOCAL_ADDRS: 5754 retval = sctp_getsockopt_local_addrs(sk, len, optval, 5755 optlen); 5756 break; 5757 case SCTP_SOCKOPT_CONNECTX3: 5758 retval = sctp_getsockopt_connectx3(sk, len, optval, optlen); 5759 break; 5760 case SCTP_DEFAULT_SEND_PARAM: 5761 retval = sctp_getsockopt_default_send_param(sk, len, 5762 optval, optlen); 5763 break; 5764 case SCTP_PRIMARY_ADDR: 5765 retval = sctp_getsockopt_primary_addr(sk, len, optval, optlen); 5766 break; 5767 case SCTP_NODELAY: 5768 retval = sctp_getsockopt_nodelay(sk, len, optval, optlen); 5769 break; 5770 case SCTP_RTOINFO: 5771 retval = sctp_getsockopt_rtoinfo(sk, len, optval, optlen); 5772 break; 5773 case SCTP_ASSOCINFO: 5774 retval = sctp_getsockopt_associnfo(sk, len, optval, optlen); 5775 break; 5776 case SCTP_I_WANT_MAPPED_V4_ADDR: 5777 retval = sctp_getsockopt_mappedv4(sk, len, optval, optlen); 5778 break; 5779 case SCTP_MAXSEG: 5780 retval = sctp_getsockopt_maxseg(sk, len, optval, optlen); 5781 break; 5782 case SCTP_GET_PEER_ADDR_INFO: 5783 retval = sctp_getsockopt_peer_addr_info(sk, len, optval, 5784 optlen); 5785 break; 5786 case SCTP_ADAPTATION_LAYER: 5787 retval = sctp_getsockopt_adaptation_layer(sk, len, optval, 5788 optlen); 5789 break; 5790 case SCTP_CONTEXT: 5791 retval = sctp_getsockopt_context(sk, len, optval, optlen); 5792 break; 5793 case SCTP_FRAGMENT_INTERLEAVE: 5794 retval = sctp_getsockopt_fragment_interleave(sk, len, optval, 5795 optlen); 5796 break; 5797 case SCTP_PARTIAL_DELIVERY_POINT: 5798 retval = sctp_getsockopt_partial_delivery_point(sk, len, optval, 5799 optlen); 5800 break; 5801 case SCTP_MAX_BURST: 5802 retval = sctp_getsockopt_maxburst(sk, len, optval, optlen); 5803 break; 5804 case SCTP_AUTH_KEY: 5805 case SCTP_AUTH_CHUNK: 5806 case SCTP_AUTH_DELETE_KEY: 5807 retval = -EOPNOTSUPP; 5808 break; 5809 case SCTP_HMAC_IDENT: 5810 retval = sctp_getsockopt_hmac_ident(sk, len, optval, optlen); 5811 break; 5812 case SCTP_AUTH_ACTIVE_KEY: 5813 retval = sctp_getsockopt_active_key(sk, len, optval, optlen); 5814 break; 5815 case SCTP_PEER_AUTH_CHUNKS: 5816 retval = sctp_getsockopt_peer_auth_chunks(sk, len, optval, 5817 optlen); 5818 break; 5819 case SCTP_LOCAL_AUTH_CHUNKS: 5820 retval = sctp_getsockopt_local_auth_chunks(sk, len, optval, 5821 optlen); 5822 break; 5823 case SCTP_GET_ASSOC_NUMBER: 5824 retval = sctp_getsockopt_assoc_number(sk, len, optval, optlen); 5825 break; 5826 case SCTP_GET_ASSOC_ID_LIST: 5827 retval = sctp_getsockopt_assoc_ids(sk, len, optval, optlen); 5828 break; 5829 case SCTP_AUTO_ASCONF: 5830 retval = sctp_getsockopt_auto_asconf(sk, len, optval, optlen); 5831 break; 5832 case SCTP_PEER_ADDR_THLDS: 5833 retval = sctp_getsockopt_paddr_thresholds(sk, optval, len, optlen); 5834 break; 5835 case SCTP_GET_ASSOC_STATS: 5836 retval = sctp_getsockopt_assoc_stats(sk, len, optval, optlen); 5837 break; 5838 default: 5839 retval = -ENOPROTOOPT; 5840 break; 5841 } 5842 5843 sctp_release_sock(sk); 5844 return retval; 5845 } 5846 5847 static void sctp_hash(struct sock *sk) 5848 { 5849 /* STUB */ 5850 } 5851 5852 static void sctp_unhash(struct sock *sk) 5853 { 5854 /* STUB */ 5855 } 5856 5857 /* Check if port is acceptable. Possibly find first available port. 5858 * 5859 * The port hash table (contained in the 'global' SCTP protocol storage 5860 * returned by struct sctp_protocol *sctp_get_protocol()). The hash 5861 * table is an array of 4096 lists (sctp_bind_hashbucket). Each 5862 * list (the list number is the port number hashed out, so as you 5863 * would expect from a hash function, all the ports in a given list have 5864 * such a number that hashes out to the same list number; you were 5865 * expecting that, right?); so each list has a set of ports, with a 5866 * link to the socket (struct sock) that uses it, the port number and 5867 * a fastreuse flag (FIXME: NPI ipg). 5868 */ 5869 static struct sctp_bind_bucket *sctp_bucket_create( 5870 struct sctp_bind_hashbucket *head, struct net *, unsigned short snum); 5871 5872 static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr) 5873 { 5874 struct sctp_bind_hashbucket *head; /* hash list */ 5875 struct sctp_bind_bucket *pp; 5876 unsigned short snum; 5877 int ret; 5878 5879 snum = ntohs(addr->v4.sin_port); 5880 5881 pr_debug("%s: begins, snum:%d\n", __func__, snum); 5882 5883 sctp_local_bh_disable(); 5884 5885 if (snum == 0) { 5886 /* Search for an available port. */ 5887 int low, high, remaining, index; 5888 unsigned int rover; 5889 5890 inet_get_local_port_range(&low, &high); 5891 remaining = (high - low) + 1; 5892 rover = net_random() % remaining + low; 5893 5894 do { 5895 rover++; 5896 if ((rover < low) || (rover > high)) 5897 rover = low; 5898 if (inet_is_reserved_local_port(rover)) 5899 continue; 5900 index = sctp_phashfn(sock_net(sk), rover); 5901 head = &sctp_port_hashtable[index]; 5902 sctp_spin_lock(&head->lock); 5903 sctp_for_each_hentry(pp, &head->chain) 5904 if ((pp->port == rover) && 5905 net_eq(sock_net(sk), pp->net)) 5906 goto next; 5907 break; 5908 next: 5909 sctp_spin_unlock(&head->lock); 5910 } while (--remaining > 0); 5911 5912 /* Exhausted local port range during search? */ 5913 ret = 1; 5914 if (remaining <= 0) 5915 goto fail; 5916 5917 /* OK, here is the one we will use. HEAD (the port 5918 * hash table list entry) is non-NULL and we hold it's 5919 * mutex. 5920 */ 5921 snum = rover; 5922 } else { 5923 /* We are given an specific port number; we verify 5924 * that it is not being used. If it is used, we will 5925 * exahust the search in the hash list corresponding 5926 * to the port number (snum) - we detect that with the 5927 * port iterator, pp being NULL. 5928 */ 5929 head = &sctp_port_hashtable[sctp_phashfn(sock_net(sk), snum)]; 5930 sctp_spin_lock(&head->lock); 5931 sctp_for_each_hentry(pp, &head->chain) { 5932 if ((pp->port == snum) && net_eq(pp->net, sock_net(sk))) 5933 goto pp_found; 5934 } 5935 } 5936 pp = NULL; 5937 goto pp_not_found; 5938 pp_found: 5939 if (!hlist_empty(&pp->owner)) { 5940 /* We had a port hash table hit - there is an 5941 * available port (pp != NULL) and it is being 5942 * used by other socket (pp->owner not empty); that other 5943 * socket is going to be sk2. 5944 */ 5945 int reuse = sk->sk_reuse; 5946 struct sock *sk2; 5947 5948 pr_debug("%s: found a possible match\n", __func__); 5949 5950 if (pp->fastreuse && sk->sk_reuse && 5951 sk->sk_state != SCTP_SS_LISTENING) 5952 goto success; 5953 5954 /* Run through the list of sockets bound to the port 5955 * (pp->port) [via the pointers bind_next and 5956 * bind_pprev in the struct sock *sk2 (pp->sk)]. On each one, 5957 * we get the endpoint they describe and run through 5958 * the endpoint's list of IP (v4 or v6) addresses, 5959 * comparing each of the addresses with the address of 5960 * the socket sk. If we find a match, then that means 5961 * that this port/socket (sk) combination are already 5962 * in an endpoint. 5963 */ 5964 sk_for_each_bound(sk2, &pp->owner) { 5965 struct sctp_endpoint *ep2; 5966 ep2 = sctp_sk(sk2)->ep; 5967 5968 if (sk == sk2 || 5969 (reuse && sk2->sk_reuse && 5970 sk2->sk_state != SCTP_SS_LISTENING)) 5971 continue; 5972 5973 if (sctp_bind_addr_conflict(&ep2->base.bind_addr, addr, 5974 sctp_sk(sk2), sctp_sk(sk))) { 5975 ret = (long)sk2; 5976 goto fail_unlock; 5977 } 5978 } 5979 5980 pr_debug("%s: found a match\n", __func__); 5981 } 5982 pp_not_found: 5983 /* If there was a hash table miss, create a new port. */ 5984 ret = 1; 5985 if (!pp && !(pp = sctp_bucket_create(head, sock_net(sk), snum))) 5986 goto fail_unlock; 5987 5988 /* In either case (hit or miss), make sure fastreuse is 1 only 5989 * if sk->sk_reuse is too (that is, if the caller requested 5990 * SO_REUSEADDR on this socket -sk-). 5991 */ 5992 if (hlist_empty(&pp->owner)) { 5993 if (sk->sk_reuse && sk->sk_state != SCTP_SS_LISTENING) 5994 pp->fastreuse = 1; 5995 else 5996 pp->fastreuse = 0; 5997 } else if (pp->fastreuse && 5998 (!sk->sk_reuse || sk->sk_state == SCTP_SS_LISTENING)) 5999 pp->fastreuse = 0; 6000 6001 /* We are set, so fill up all the data in the hash table 6002 * entry, tie the socket list information with the rest of the 6003 * sockets FIXME: Blurry, NPI (ipg). 6004 */ 6005 success: 6006 if (!sctp_sk(sk)->bind_hash) { 6007 inet_sk(sk)->inet_num = snum; 6008 sk_add_bind_node(sk, &pp->owner); 6009 sctp_sk(sk)->bind_hash = pp; 6010 } 6011 ret = 0; 6012 6013 fail_unlock: 6014 sctp_spin_unlock(&head->lock); 6015 6016 fail: 6017 sctp_local_bh_enable(); 6018 return ret; 6019 } 6020 6021 /* Assign a 'snum' port to the socket. If snum == 0, an ephemeral 6022 * port is requested. 6023 */ 6024 static int sctp_get_port(struct sock *sk, unsigned short snum) 6025 { 6026 union sctp_addr addr; 6027 struct sctp_af *af = sctp_sk(sk)->pf->af; 6028 6029 /* Set up a dummy address struct from the sk. */ 6030 af->from_sk(&addr, sk); 6031 addr.v4.sin_port = htons(snum); 6032 6033 /* Note: sk->sk_num gets filled in if ephemeral port request. */ 6034 return !!sctp_get_port_local(sk, &addr); 6035 } 6036 6037 /* 6038 * Move a socket to LISTENING state. 6039 */ 6040 static int sctp_listen_start(struct sock *sk, int backlog) 6041 { 6042 struct sctp_sock *sp = sctp_sk(sk); 6043 struct sctp_endpoint *ep = sp->ep; 6044 struct crypto_hash *tfm = NULL; 6045 char alg[32]; 6046 6047 /* Allocate HMAC for generating cookie. */ 6048 if (!sp->hmac && sp->sctp_hmac_alg) { 6049 sprintf(alg, "hmac(%s)", sp->sctp_hmac_alg); 6050 tfm = crypto_alloc_hash(alg, 0, CRYPTO_ALG_ASYNC); 6051 if (IS_ERR(tfm)) { 6052 net_info_ratelimited("failed to load transform for %s: %ld\n", 6053 sp->sctp_hmac_alg, PTR_ERR(tfm)); 6054 return -ENOSYS; 6055 } 6056 sctp_sk(sk)->hmac = tfm; 6057 } 6058 6059 /* 6060 * If a bind() or sctp_bindx() is not called prior to a listen() 6061 * call that allows new associations to be accepted, the system 6062 * picks an ephemeral port and will choose an address set equivalent 6063 * to binding with a wildcard address. 6064 * 6065 * This is not currently spelled out in the SCTP sockets 6066 * extensions draft, but follows the practice as seen in TCP 6067 * sockets. 6068 * 6069 */ 6070 sk->sk_state = SCTP_SS_LISTENING; 6071 if (!ep->base.bind_addr.port) { 6072 if (sctp_autobind(sk)) 6073 return -EAGAIN; 6074 } else { 6075 if (sctp_get_port(sk, inet_sk(sk)->inet_num)) { 6076 sk->sk_state = SCTP_SS_CLOSED; 6077 return -EADDRINUSE; 6078 } 6079 } 6080 6081 sk->sk_max_ack_backlog = backlog; 6082 sctp_hash_endpoint(ep); 6083 return 0; 6084 } 6085 6086 /* 6087 * 4.1.3 / 5.1.3 listen() 6088 * 6089 * By default, new associations are not accepted for UDP style sockets. 6090 * An application uses listen() to mark a socket as being able to 6091 * accept new associations. 6092 * 6093 * On TCP style sockets, applications use listen() to ready the SCTP 6094 * endpoint for accepting inbound associations. 6095 * 6096 * On both types of endpoints a backlog of '0' disables listening. 6097 * 6098 * Move a socket to LISTENING state. 6099 */ 6100 int sctp_inet_listen(struct socket *sock, int backlog) 6101 { 6102 struct sock *sk = sock->sk; 6103 struct sctp_endpoint *ep = sctp_sk(sk)->ep; 6104 int err = -EINVAL; 6105 6106 if (unlikely(backlog < 0)) 6107 return err; 6108 6109 sctp_lock_sock(sk); 6110 6111 /* Peeled-off sockets are not allowed to listen(). */ 6112 if (sctp_style(sk, UDP_HIGH_BANDWIDTH)) 6113 goto out; 6114 6115 if (sock->state != SS_UNCONNECTED) 6116 goto out; 6117 6118 /* If backlog is zero, disable listening. */ 6119 if (!backlog) { 6120 if (sctp_sstate(sk, CLOSED)) 6121 goto out; 6122 6123 err = 0; 6124 sctp_unhash_endpoint(ep); 6125 sk->sk_state = SCTP_SS_CLOSED; 6126 if (sk->sk_reuse) 6127 sctp_sk(sk)->bind_hash->fastreuse = 1; 6128 goto out; 6129 } 6130 6131 /* If we are already listening, just update the backlog */ 6132 if (sctp_sstate(sk, LISTENING)) 6133 sk->sk_max_ack_backlog = backlog; 6134 else { 6135 err = sctp_listen_start(sk, backlog); 6136 if (err) 6137 goto out; 6138 } 6139 6140 err = 0; 6141 out: 6142 sctp_release_sock(sk); 6143 return err; 6144 } 6145 6146 /* 6147 * This function is done by modeling the current datagram_poll() and the 6148 * tcp_poll(). Note that, based on these implementations, we don't 6149 * lock the socket in this function, even though it seems that, 6150 * ideally, locking or some other mechanisms can be used to ensure 6151 * the integrity of the counters (sndbuf and wmem_alloc) used 6152 * in this place. We assume that we don't need locks either until proven 6153 * otherwise. 6154 * 6155 * Another thing to note is that we include the Async I/O support 6156 * here, again, by modeling the current TCP/UDP code. We don't have 6157 * a good way to test with it yet. 6158 */ 6159 unsigned int sctp_poll(struct file *file, struct socket *sock, poll_table *wait) 6160 { 6161 struct sock *sk = sock->sk; 6162 struct sctp_sock *sp = sctp_sk(sk); 6163 unsigned int mask; 6164 6165 poll_wait(file, sk_sleep(sk), wait); 6166 6167 /* A TCP-style listening socket becomes readable when the accept queue 6168 * is not empty. 6169 */ 6170 if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)) 6171 return (!list_empty(&sp->ep->asocs)) ? 6172 (POLLIN | POLLRDNORM) : 0; 6173 6174 mask = 0; 6175 6176 /* Is there any exceptional events? */ 6177 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) 6178 mask |= POLLERR | 6179 sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0; 6180 if (sk->sk_shutdown & RCV_SHUTDOWN) 6181 mask |= POLLRDHUP | POLLIN | POLLRDNORM; 6182 if (sk->sk_shutdown == SHUTDOWN_MASK) 6183 mask |= POLLHUP; 6184 6185 /* Is it readable? Reconsider this code with TCP-style support. */ 6186 if (!skb_queue_empty(&sk->sk_receive_queue)) 6187 mask |= POLLIN | POLLRDNORM; 6188 6189 /* The association is either gone or not ready. */ 6190 if (!sctp_style(sk, UDP) && sctp_sstate(sk, CLOSED)) 6191 return mask; 6192 6193 /* Is it writable? */ 6194 if (sctp_writeable(sk)) { 6195 mask |= POLLOUT | POLLWRNORM; 6196 } else { 6197 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 6198 /* 6199 * Since the socket is not locked, the buffer 6200 * might be made available after the writeable check and 6201 * before the bit is set. This could cause a lost I/O 6202 * signal. tcp_poll() has a race breaker for this race 6203 * condition. Based on their implementation, we put 6204 * in the following code to cover it as well. 6205 */ 6206 if (sctp_writeable(sk)) 6207 mask |= POLLOUT | POLLWRNORM; 6208 } 6209 return mask; 6210 } 6211 6212 /******************************************************************** 6213 * 2nd Level Abstractions 6214 ********************************************************************/ 6215 6216 static struct sctp_bind_bucket *sctp_bucket_create( 6217 struct sctp_bind_hashbucket *head, struct net *net, unsigned short snum) 6218 { 6219 struct sctp_bind_bucket *pp; 6220 6221 pp = kmem_cache_alloc(sctp_bucket_cachep, GFP_ATOMIC); 6222 if (pp) { 6223 SCTP_DBG_OBJCNT_INC(bind_bucket); 6224 pp->port = snum; 6225 pp->fastreuse = 0; 6226 INIT_HLIST_HEAD(&pp->owner); 6227 pp->net = net; 6228 hlist_add_head(&pp->node, &head->chain); 6229 } 6230 return pp; 6231 } 6232 6233 /* Caller must hold hashbucket lock for this tb with local BH disabled */ 6234 static void sctp_bucket_destroy(struct sctp_bind_bucket *pp) 6235 { 6236 if (pp && hlist_empty(&pp->owner)) { 6237 __hlist_del(&pp->node); 6238 kmem_cache_free(sctp_bucket_cachep, pp); 6239 SCTP_DBG_OBJCNT_DEC(bind_bucket); 6240 } 6241 } 6242 6243 /* Release this socket's reference to a local port. */ 6244 static inline void __sctp_put_port(struct sock *sk) 6245 { 6246 struct sctp_bind_hashbucket *head = 6247 &sctp_port_hashtable[sctp_phashfn(sock_net(sk), 6248 inet_sk(sk)->inet_num)]; 6249 struct sctp_bind_bucket *pp; 6250 6251 sctp_spin_lock(&head->lock); 6252 pp = sctp_sk(sk)->bind_hash; 6253 __sk_del_bind_node(sk); 6254 sctp_sk(sk)->bind_hash = NULL; 6255 inet_sk(sk)->inet_num = 0; 6256 sctp_bucket_destroy(pp); 6257 sctp_spin_unlock(&head->lock); 6258 } 6259 6260 void sctp_put_port(struct sock *sk) 6261 { 6262 sctp_local_bh_disable(); 6263 __sctp_put_port(sk); 6264 sctp_local_bh_enable(); 6265 } 6266 6267 /* 6268 * The system picks an ephemeral port and choose an address set equivalent 6269 * to binding with a wildcard address. 6270 * One of those addresses will be the primary address for the association. 6271 * This automatically enables the multihoming capability of SCTP. 6272 */ 6273 static int sctp_autobind(struct sock *sk) 6274 { 6275 union sctp_addr autoaddr; 6276 struct sctp_af *af; 6277 __be16 port; 6278 6279 /* Initialize a local sockaddr structure to INADDR_ANY. */ 6280 af = sctp_sk(sk)->pf->af; 6281 6282 port = htons(inet_sk(sk)->inet_num); 6283 af->inaddr_any(&autoaddr, port); 6284 6285 return sctp_do_bind(sk, &autoaddr, af->sockaddr_len); 6286 } 6287 6288 /* Parse out IPPROTO_SCTP CMSG headers. Perform only minimal validation. 6289 * 6290 * From RFC 2292 6291 * 4.2 The cmsghdr Structure * 6292 * 6293 * When ancillary data is sent or received, any number of ancillary data 6294 * objects can be specified by the msg_control and msg_controllen members of 6295 * the msghdr structure, because each object is preceded by 6296 * a cmsghdr structure defining the object's length (the cmsg_len member). 6297 * Historically Berkeley-derived implementations have passed only one object 6298 * at a time, but this API allows multiple objects to be 6299 * passed in a single call to sendmsg() or recvmsg(). The following example 6300 * shows two ancillary data objects in a control buffer. 6301 * 6302 * |<--------------------------- msg_controllen -------------------------->| 6303 * | | 6304 * 6305 * |<----- ancillary data object ----->|<----- ancillary data object ----->| 6306 * 6307 * |<---------- CMSG_SPACE() --------->|<---------- CMSG_SPACE() --------->| 6308 * | | | 6309 * 6310 * |<---------- cmsg_len ---------->| |<--------- cmsg_len ----------->| | 6311 * 6312 * |<--------- CMSG_LEN() --------->| |<-------- CMSG_LEN() ---------->| | 6313 * | | | | | 6314 * 6315 * +-----+-----+-----+--+-----------+--+-----+-----+-----+--+-----------+--+ 6316 * |cmsg_|cmsg_|cmsg_|XX| |XX|cmsg_|cmsg_|cmsg_|XX| |XX| 6317 * 6318 * |len |level|type |XX|cmsg_data[]|XX|len |level|type |XX|cmsg_data[]|XX| 6319 * 6320 * +-----+-----+-----+--+-----------+--+-----+-----+-----+--+-----------+--+ 6321 * ^ 6322 * | 6323 * 6324 * msg_control 6325 * points here 6326 */ 6327 static int sctp_msghdr_parse(const struct msghdr *msg, sctp_cmsgs_t *cmsgs) 6328 { 6329 struct cmsghdr *cmsg; 6330 struct msghdr *my_msg = (struct msghdr *)msg; 6331 6332 for (cmsg = CMSG_FIRSTHDR(msg); 6333 cmsg != NULL; 6334 cmsg = CMSG_NXTHDR(my_msg, cmsg)) { 6335 if (!CMSG_OK(my_msg, cmsg)) 6336 return -EINVAL; 6337 6338 /* Should we parse this header or ignore? */ 6339 if (cmsg->cmsg_level != IPPROTO_SCTP) 6340 continue; 6341 6342 /* Strictly check lengths following example in SCM code. */ 6343 switch (cmsg->cmsg_type) { 6344 case SCTP_INIT: 6345 /* SCTP Socket API Extension 6346 * 5.2.1 SCTP Initiation Structure (SCTP_INIT) 6347 * 6348 * This cmsghdr structure provides information for 6349 * initializing new SCTP associations with sendmsg(). 6350 * The SCTP_INITMSG socket option uses this same data 6351 * structure. This structure is not used for 6352 * recvmsg(). 6353 * 6354 * cmsg_level cmsg_type cmsg_data[] 6355 * ------------ ------------ ---------------------- 6356 * IPPROTO_SCTP SCTP_INIT struct sctp_initmsg 6357 */ 6358 if (cmsg->cmsg_len != 6359 CMSG_LEN(sizeof(struct sctp_initmsg))) 6360 return -EINVAL; 6361 cmsgs->init = (struct sctp_initmsg *)CMSG_DATA(cmsg); 6362 break; 6363 6364 case SCTP_SNDRCV: 6365 /* SCTP Socket API Extension 6366 * 5.2.2 SCTP Header Information Structure(SCTP_SNDRCV) 6367 * 6368 * This cmsghdr structure specifies SCTP options for 6369 * sendmsg() and describes SCTP header information 6370 * about a received message through recvmsg(). 6371 * 6372 * cmsg_level cmsg_type cmsg_data[] 6373 * ------------ ------------ ---------------------- 6374 * IPPROTO_SCTP SCTP_SNDRCV struct sctp_sndrcvinfo 6375 */ 6376 if (cmsg->cmsg_len != 6377 CMSG_LEN(sizeof(struct sctp_sndrcvinfo))) 6378 return -EINVAL; 6379 6380 cmsgs->info = 6381 (struct sctp_sndrcvinfo *)CMSG_DATA(cmsg); 6382 6383 /* Minimally, validate the sinfo_flags. */ 6384 if (cmsgs->info->sinfo_flags & 6385 ~(SCTP_UNORDERED | SCTP_ADDR_OVER | 6386 SCTP_ABORT | SCTP_EOF)) 6387 return -EINVAL; 6388 break; 6389 6390 default: 6391 return -EINVAL; 6392 } 6393 } 6394 return 0; 6395 } 6396 6397 /* 6398 * Wait for a packet.. 6399 * Note: This function is the same function as in core/datagram.c 6400 * with a few modifications to make lksctp work. 6401 */ 6402 static int sctp_wait_for_packet(struct sock * sk, int *err, long *timeo_p) 6403 { 6404 int error; 6405 DEFINE_WAIT(wait); 6406 6407 prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 6408 6409 /* Socket errors? */ 6410 error = sock_error(sk); 6411 if (error) 6412 goto out; 6413 6414 if (!skb_queue_empty(&sk->sk_receive_queue)) 6415 goto ready; 6416 6417 /* Socket shut down? */ 6418 if (sk->sk_shutdown & RCV_SHUTDOWN) 6419 goto out; 6420 6421 /* Sequenced packets can come disconnected. If so we report the 6422 * problem. 6423 */ 6424 error = -ENOTCONN; 6425 6426 /* Is there a good reason to think that we may receive some data? */ 6427 if (list_empty(&sctp_sk(sk)->ep->asocs) && !sctp_sstate(sk, LISTENING)) 6428 goto out; 6429 6430 /* Handle signals. */ 6431 if (signal_pending(current)) 6432 goto interrupted; 6433 6434 /* Let another process have a go. Since we are going to sleep 6435 * anyway. Note: This may cause odd behaviors if the message 6436 * does not fit in the user's buffer, but this seems to be the 6437 * only way to honor MSG_DONTWAIT realistically. 6438 */ 6439 sctp_release_sock(sk); 6440 *timeo_p = schedule_timeout(*timeo_p); 6441 sctp_lock_sock(sk); 6442 6443 ready: 6444 finish_wait(sk_sleep(sk), &wait); 6445 return 0; 6446 6447 interrupted: 6448 error = sock_intr_errno(*timeo_p); 6449 6450 out: 6451 finish_wait(sk_sleep(sk), &wait); 6452 *err = error; 6453 return error; 6454 } 6455 6456 /* Receive a datagram. 6457 * Note: This is pretty much the same routine as in core/datagram.c 6458 * with a few changes to make lksctp work. 6459 */ 6460 static struct sk_buff *sctp_skb_recv_datagram(struct sock *sk, int flags, 6461 int noblock, int *err) 6462 { 6463 int error; 6464 struct sk_buff *skb; 6465 long timeo; 6466 6467 timeo = sock_rcvtimeo(sk, noblock); 6468 6469 pr_debug("%s: timeo:%ld, max:%ld\n", __func__, timeo, 6470 MAX_SCHEDULE_TIMEOUT); 6471 6472 do { 6473 /* Again only user level code calls this function, 6474 * so nothing interrupt level 6475 * will suddenly eat the receive_queue. 6476 * 6477 * Look at current nfs client by the way... 6478 * However, this function was correct in any case. 8) 6479 */ 6480 if (flags & MSG_PEEK) { 6481 spin_lock_bh(&sk->sk_receive_queue.lock); 6482 skb = skb_peek(&sk->sk_receive_queue); 6483 if (skb) 6484 atomic_inc(&skb->users); 6485 spin_unlock_bh(&sk->sk_receive_queue.lock); 6486 } else { 6487 skb = skb_dequeue(&sk->sk_receive_queue); 6488 } 6489 6490 if (skb) 6491 return skb; 6492 6493 /* Caller is allowed not to check sk->sk_err before calling. */ 6494 error = sock_error(sk); 6495 if (error) 6496 goto no_packet; 6497 6498 if (sk->sk_shutdown & RCV_SHUTDOWN) 6499 break; 6500 6501 /* User doesn't want to wait. */ 6502 error = -EAGAIN; 6503 if (!timeo) 6504 goto no_packet; 6505 } while (sctp_wait_for_packet(sk, err, &timeo) == 0); 6506 6507 return NULL; 6508 6509 no_packet: 6510 *err = error; 6511 return NULL; 6512 } 6513 6514 /* If sndbuf has changed, wake up per association sndbuf waiters. */ 6515 static void __sctp_write_space(struct sctp_association *asoc) 6516 { 6517 struct sock *sk = asoc->base.sk; 6518 struct socket *sock = sk->sk_socket; 6519 6520 if ((sctp_wspace(asoc) > 0) && sock) { 6521 if (waitqueue_active(&asoc->wait)) 6522 wake_up_interruptible(&asoc->wait); 6523 6524 if (sctp_writeable(sk)) { 6525 wait_queue_head_t *wq = sk_sleep(sk); 6526 6527 if (wq && waitqueue_active(wq)) 6528 wake_up_interruptible(wq); 6529 6530 /* Note that we try to include the Async I/O support 6531 * here by modeling from the current TCP/UDP code. 6532 * We have not tested with it yet. 6533 */ 6534 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) 6535 sock_wake_async(sock, 6536 SOCK_WAKE_SPACE, POLL_OUT); 6537 } 6538 } 6539 } 6540 6541 /* Do accounting for the sndbuf space. 6542 * Decrement the used sndbuf space of the corresponding association by the 6543 * data size which was just transmitted(freed). 6544 */ 6545 static void sctp_wfree(struct sk_buff *skb) 6546 { 6547 struct sctp_association *asoc; 6548 struct sctp_chunk *chunk; 6549 struct sock *sk; 6550 6551 /* Get the saved chunk pointer. */ 6552 chunk = *((struct sctp_chunk **)(skb->cb)); 6553 asoc = chunk->asoc; 6554 sk = asoc->base.sk; 6555 asoc->sndbuf_used -= SCTP_DATA_SNDSIZE(chunk) + 6556 sizeof(struct sk_buff) + 6557 sizeof(struct sctp_chunk); 6558 6559 atomic_sub(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc); 6560 6561 /* 6562 * This undoes what is done via sctp_set_owner_w and sk_mem_charge 6563 */ 6564 sk->sk_wmem_queued -= skb->truesize; 6565 sk_mem_uncharge(sk, skb->truesize); 6566 6567 sock_wfree(skb); 6568 __sctp_write_space(asoc); 6569 6570 sctp_association_put(asoc); 6571 } 6572 6573 /* Do accounting for the receive space on the socket. 6574 * Accounting for the association is done in ulpevent.c 6575 * We set this as a destructor for the cloned data skbs so that 6576 * accounting is done at the correct time. 6577 */ 6578 void sctp_sock_rfree(struct sk_buff *skb) 6579 { 6580 struct sock *sk = skb->sk; 6581 struct sctp_ulpevent *event = sctp_skb2event(skb); 6582 6583 atomic_sub(event->rmem_len, &sk->sk_rmem_alloc); 6584 6585 /* 6586 * Mimic the behavior of sock_rfree 6587 */ 6588 sk_mem_uncharge(sk, event->rmem_len); 6589 } 6590 6591 6592 /* Helper function to wait for space in the sndbuf. */ 6593 static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p, 6594 size_t msg_len) 6595 { 6596 struct sock *sk = asoc->base.sk; 6597 int err = 0; 6598 long current_timeo = *timeo_p; 6599 DEFINE_WAIT(wait); 6600 6601 pr_debug("%s: asoc:%p, timeo:%ld, msg_len:%zu\n", __func__, asoc, 6602 *timeo_p, msg_len); 6603 6604 /* Increment the association's refcnt. */ 6605 sctp_association_hold(asoc); 6606 6607 /* Wait on the association specific sndbuf space. */ 6608 for (;;) { 6609 prepare_to_wait_exclusive(&asoc->wait, &wait, 6610 TASK_INTERRUPTIBLE); 6611 if (!*timeo_p) 6612 goto do_nonblock; 6613 if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING || 6614 asoc->base.dead) 6615 goto do_error; 6616 if (signal_pending(current)) 6617 goto do_interrupted; 6618 if (msg_len <= sctp_wspace(asoc)) 6619 break; 6620 6621 /* Let another process have a go. Since we are going 6622 * to sleep anyway. 6623 */ 6624 sctp_release_sock(sk); 6625 current_timeo = schedule_timeout(current_timeo); 6626 BUG_ON(sk != asoc->base.sk); 6627 sctp_lock_sock(sk); 6628 6629 *timeo_p = current_timeo; 6630 } 6631 6632 out: 6633 finish_wait(&asoc->wait, &wait); 6634 6635 /* Release the association's refcnt. */ 6636 sctp_association_put(asoc); 6637 6638 return err; 6639 6640 do_error: 6641 err = -EPIPE; 6642 goto out; 6643 6644 do_interrupted: 6645 err = sock_intr_errno(*timeo_p); 6646 goto out; 6647 6648 do_nonblock: 6649 err = -EAGAIN; 6650 goto out; 6651 } 6652 6653 void sctp_data_ready(struct sock *sk, int len) 6654 { 6655 struct socket_wq *wq; 6656 6657 rcu_read_lock(); 6658 wq = rcu_dereference(sk->sk_wq); 6659 if (wq_has_sleeper(wq)) 6660 wake_up_interruptible_sync_poll(&wq->wait, POLLIN | 6661 POLLRDNORM | POLLRDBAND); 6662 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); 6663 rcu_read_unlock(); 6664 } 6665 6666 /* If socket sndbuf has changed, wake up all per association waiters. */ 6667 void sctp_write_space(struct sock *sk) 6668 { 6669 struct sctp_association *asoc; 6670 6671 /* Wake up the tasks in each wait queue. */ 6672 list_for_each_entry(asoc, &((sctp_sk(sk))->ep->asocs), asocs) { 6673 __sctp_write_space(asoc); 6674 } 6675 } 6676 6677 /* Is there any sndbuf space available on the socket? 6678 * 6679 * Note that sk_wmem_alloc is the sum of the send buffers on all of the 6680 * associations on the same socket. For a UDP-style socket with 6681 * multiple associations, it is possible for it to be "unwriteable" 6682 * prematurely. I assume that this is acceptable because 6683 * a premature "unwriteable" is better than an accidental "writeable" which 6684 * would cause an unwanted block under certain circumstances. For the 1-1 6685 * UDP-style sockets or TCP-style sockets, this code should work. 6686 * - Daisy 6687 */ 6688 static int sctp_writeable(struct sock *sk) 6689 { 6690 int amt = 0; 6691 6692 amt = sk->sk_sndbuf - sk_wmem_alloc_get(sk); 6693 if (amt < 0) 6694 amt = 0; 6695 return amt; 6696 } 6697 6698 /* Wait for an association to go into ESTABLISHED state. If timeout is 0, 6699 * returns immediately with EINPROGRESS. 6700 */ 6701 static int sctp_wait_for_connect(struct sctp_association *asoc, long *timeo_p) 6702 { 6703 struct sock *sk = asoc->base.sk; 6704 int err = 0; 6705 long current_timeo = *timeo_p; 6706 DEFINE_WAIT(wait); 6707 6708 pr_debug("%s: asoc:%p, timeo:%ld\n", __func__, asoc, *timeo_p); 6709 6710 /* Increment the association's refcnt. */ 6711 sctp_association_hold(asoc); 6712 6713 for (;;) { 6714 prepare_to_wait_exclusive(&asoc->wait, &wait, 6715 TASK_INTERRUPTIBLE); 6716 if (!*timeo_p) 6717 goto do_nonblock; 6718 if (sk->sk_shutdown & RCV_SHUTDOWN) 6719 break; 6720 if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING || 6721 asoc->base.dead) 6722 goto do_error; 6723 if (signal_pending(current)) 6724 goto do_interrupted; 6725 6726 if (sctp_state(asoc, ESTABLISHED)) 6727 break; 6728 6729 /* Let another process have a go. Since we are going 6730 * to sleep anyway. 6731 */ 6732 sctp_release_sock(sk); 6733 current_timeo = schedule_timeout(current_timeo); 6734 sctp_lock_sock(sk); 6735 6736 *timeo_p = current_timeo; 6737 } 6738 6739 out: 6740 finish_wait(&asoc->wait, &wait); 6741 6742 /* Release the association's refcnt. */ 6743 sctp_association_put(asoc); 6744 6745 return err; 6746 6747 do_error: 6748 if (asoc->init_err_counter + 1 > asoc->max_init_attempts) 6749 err = -ETIMEDOUT; 6750 else 6751 err = -ECONNREFUSED; 6752 goto out; 6753 6754 do_interrupted: 6755 err = sock_intr_errno(*timeo_p); 6756 goto out; 6757 6758 do_nonblock: 6759 err = -EINPROGRESS; 6760 goto out; 6761 } 6762 6763 static int sctp_wait_for_accept(struct sock *sk, long timeo) 6764 { 6765 struct sctp_endpoint *ep; 6766 int err = 0; 6767 DEFINE_WAIT(wait); 6768 6769 ep = sctp_sk(sk)->ep; 6770 6771 6772 for (;;) { 6773 prepare_to_wait_exclusive(sk_sleep(sk), &wait, 6774 TASK_INTERRUPTIBLE); 6775 6776 if (list_empty(&ep->asocs)) { 6777 sctp_release_sock(sk); 6778 timeo = schedule_timeout(timeo); 6779 sctp_lock_sock(sk); 6780 } 6781 6782 err = -EINVAL; 6783 if (!sctp_sstate(sk, LISTENING)) 6784 break; 6785 6786 err = 0; 6787 if (!list_empty(&ep->asocs)) 6788 break; 6789 6790 err = sock_intr_errno(timeo); 6791 if (signal_pending(current)) 6792 break; 6793 6794 err = -EAGAIN; 6795 if (!timeo) 6796 break; 6797 } 6798 6799 finish_wait(sk_sleep(sk), &wait); 6800 6801 return err; 6802 } 6803 6804 static void sctp_wait_for_close(struct sock *sk, long timeout) 6805 { 6806 DEFINE_WAIT(wait); 6807 6808 do { 6809 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 6810 if (list_empty(&sctp_sk(sk)->ep->asocs)) 6811 break; 6812 sctp_release_sock(sk); 6813 timeout = schedule_timeout(timeout); 6814 sctp_lock_sock(sk); 6815 } while (!signal_pending(current) && timeout); 6816 6817 finish_wait(sk_sleep(sk), &wait); 6818 } 6819 6820 static void sctp_skb_set_owner_r_frag(struct sk_buff *skb, struct sock *sk) 6821 { 6822 struct sk_buff *frag; 6823 6824 if (!skb->data_len) 6825 goto done; 6826 6827 /* Don't forget the fragments. */ 6828 skb_walk_frags(skb, frag) 6829 sctp_skb_set_owner_r_frag(frag, sk); 6830 6831 done: 6832 sctp_skb_set_owner_r(skb, sk); 6833 } 6834 6835 void sctp_copy_sock(struct sock *newsk, struct sock *sk, 6836 struct sctp_association *asoc) 6837 { 6838 struct inet_sock *inet = inet_sk(sk); 6839 struct inet_sock *newinet; 6840 6841 newsk->sk_type = sk->sk_type; 6842 newsk->sk_bound_dev_if = sk->sk_bound_dev_if; 6843 newsk->sk_flags = sk->sk_flags; 6844 newsk->sk_no_check = sk->sk_no_check; 6845 newsk->sk_reuse = sk->sk_reuse; 6846 6847 newsk->sk_shutdown = sk->sk_shutdown; 6848 newsk->sk_destruct = sctp_destruct_sock; 6849 newsk->sk_family = sk->sk_family; 6850 newsk->sk_protocol = IPPROTO_SCTP; 6851 newsk->sk_backlog_rcv = sk->sk_prot->backlog_rcv; 6852 newsk->sk_sndbuf = sk->sk_sndbuf; 6853 newsk->sk_rcvbuf = sk->sk_rcvbuf; 6854 newsk->sk_lingertime = sk->sk_lingertime; 6855 newsk->sk_rcvtimeo = sk->sk_rcvtimeo; 6856 newsk->sk_sndtimeo = sk->sk_sndtimeo; 6857 6858 newinet = inet_sk(newsk); 6859 6860 /* Initialize sk's sport, dport, rcv_saddr and daddr for 6861 * getsockname() and getpeername() 6862 */ 6863 newinet->inet_sport = inet->inet_sport; 6864 newinet->inet_saddr = inet->inet_saddr; 6865 newinet->inet_rcv_saddr = inet->inet_rcv_saddr; 6866 newinet->inet_dport = htons(asoc->peer.port); 6867 newinet->pmtudisc = inet->pmtudisc; 6868 newinet->inet_id = asoc->next_tsn ^ jiffies; 6869 6870 newinet->uc_ttl = inet->uc_ttl; 6871 newinet->mc_loop = 1; 6872 newinet->mc_ttl = 1; 6873 newinet->mc_index = 0; 6874 newinet->mc_list = NULL; 6875 } 6876 6877 /* Populate the fields of the newsk from the oldsk and migrate the assoc 6878 * and its messages to the newsk. 6879 */ 6880 static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk, 6881 struct sctp_association *assoc, 6882 sctp_socket_type_t type) 6883 { 6884 struct sctp_sock *oldsp = sctp_sk(oldsk); 6885 struct sctp_sock *newsp = sctp_sk(newsk); 6886 struct sctp_bind_bucket *pp; /* hash list port iterator */ 6887 struct sctp_endpoint *newep = newsp->ep; 6888 struct sk_buff *skb, *tmp; 6889 struct sctp_ulpevent *event; 6890 struct sctp_bind_hashbucket *head; 6891 struct list_head tmplist; 6892 6893 /* Migrate socket buffer sizes and all the socket level options to the 6894 * new socket. 6895 */ 6896 newsk->sk_sndbuf = oldsk->sk_sndbuf; 6897 newsk->sk_rcvbuf = oldsk->sk_rcvbuf; 6898 /* Brute force copy old sctp opt. */ 6899 if (oldsp->do_auto_asconf) { 6900 memcpy(&tmplist, &newsp->auto_asconf_list, sizeof(tmplist)); 6901 inet_sk_copy_descendant(newsk, oldsk); 6902 memcpy(&newsp->auto_asconf_list, &tmplist, sizeof(tmplist)); 6903 } else 6904 inet_sk_copy_descendant(newsk, oldsk); 6905 6906 /* Restore the ep value that was overwritten with the above structure 6907 * copy. 6908 */ 6909 newsp->ep = newep; 6910 newsp->hmac = NULL; 6911 6912 /* Hook this new socket in to the bind_hash list. */ 6913 head = &sctp_port_hashtable[sctp_phashfn(sock_net(oldsk), 6914 inet_sk(oldsk)->inet_num)]; 6915 sctp_local_bh_disable(); 6916 sctp_spin_lock(&head->lock); 6917 pp = sctp_sk(oldsk)->bind_hash; 6918 sk_add_bind_node(newsk, &pp->owner); 6919 sctp_sk(newsk)->bind_hash = pp; 6920 inet_sk(newsk)->inet_num = inet_sk(oldsk)->inet_num; 6921 sctp_spin_unlock(&head->lock); 6922 sctp_local_bh_enable(); 6923 6924 /* Copy the bind_addr list from the original endpoint to the new 6925 * endpoint so that we can handle restarts properly 6926 */ 6927 sctp_bind_addr_dup(&newsp->ep->base.bind_addr, 6928 &oldsp->ep->base.bind_addr, GFP_KERNEL); 6929 6930 /* Move any messages in the old socket's receive queue that are for the 6931 * peeled off association to the new socket's receive queue. 6932 */ 6933 sctp_skb_for_each(skb, &oldsk->sk_receive_queue, tmp) { 6934 event = sctp_skb2event(skb); 6935 if (event->asoc == assoc) { 6936 __skb_unlink(skb, &oldsk->sk_receive_queue); 6937 __skb_queue_tail(&newsk->sk_receive_queue, skb); 6938 sctp_skb_set_owner_r_frag(skb, newsk); 6939 } 6940 } 6941 6942 /* Clean up any messages pending delivery due to partial 6943 * delivery. Three cases: 6944 * 1) No partial deliver; no work. 6945 * 2) Peeling off partial delivery; keep pd_lobby in new pd_lobby. 6946 * 3) Peeling off non-partial delivery; move pd_lobby to receive_queue. 6947 */ 6948 skb_queue_head_init(&newsp->pd_lobby); 6949 atomic_set(&sctp_sk(newsk)->pd_mode, assoc->ulpq.pd_mode); 6950 6951 if (atomic_read(&sctp_sk(oldsk)->pd_mode)) { 6952 struct sk_buff_head *queue; 6953 6954 /* Decide which queue to move pd_lobby skbs to. */ 6955 if (assoc->ulpq.pd_mode) { 6956 queue = &newsp->pd_lobby; 6957 } else 6958 queue = &newsk->sk_receive_queue; 6959 6960 /* Walk through the pd_lobby, looking for skbs that 6961 * need moved to the new socket. 6962 */ 6963 sctp_skb_for_each(skb, &oldsp->pd_lobby, tmp) { 6964 event = sctp_skb2event(skb); 6965 if (event->asoc == assoc) { 6966 __skb_unlink(skb, &oldsp->pd_lobby); 6967 __skb_queue_tail(queue, skb); 6968 sctp_skb_set_owner_r_frag(skb, newsk); 6969 } 6970 } 6971 6972 /* Clear up any skbs waiting for the partial 6973 * delivery to finish. 6974 */ 6975 if (assoc->ulpq.pd_mode) 6976 sctp_clear_pd(oldsk, NULL); 6977 6978 } 6979 6980 sctp_skb_for_each(skb, &assoc->ulpq.reasm, tmp) 6981 sctp_skb_set_owner_r_frag(skb, newsk); 6982 6983 sctp_skb_for_each(skb, &assoc->ulpq.lobby, tmp) 6984 sctp_skb_set_owner_r_frag(skb, newsk); 6985 6986 /* Set the type of socket to indicate that it is peeled off from the 6987 * original UDP-style socket or created with the accept() call on a 6988 * TCP-style socket.. 6989 */ 6990 newsp->type = type; 6991 6992 /* Mark the new socket "in-use" by the user so that any packets 6993 * that may arrive on the association after we've moved it are 6994 * queued to the backlog. This prevents a potential race between 6995 * backlog processing on the old socket and new-packet processing 6996 * on the new socket. 6997 * 6998 * The caller has just allocated newsk so we can guarantee that other 6999 * paths won't try to lock it and then oldsk. 7000 */ 7001 lock_sock_nested(newsk, SINGLE_DEPTH_NESTING); 7002 sctp_assoc_migrate(assoc, newsk); 7003 7004 /* If the association on the newsk is already closed before accept() 7005 * is called, set RCV_SHUTDOWN flag. 7006 */ 7007 if (sctp_state(assoc, CLOSED) && sctp_style(newsk, TCP)) 7008 newsk->sk_shutdown |= RCV_SHUTDOWN; 7009 7010 newsk->sk_state = SCTP_SS_ESTABLISHED; 7011 sctp_release_sock(newsk); 7012 } 7013 7014 7015 /* This proto struct describes the ULP interface for SCTP. */ 7016 struct proto sctp_prot = { 7017 .name = "SCTP", 7018 .owner = THIS_MODULE, 7019 .close = sctp_close, 7020 .connect = sctp_connect, 7021 .disconnect = sctp_disconnect, 7022 .accept = sctp_accept, 7023 .ioctl = sctp_ioctl, 7024 .init = sctp_init_sock, 7025 .destroy = sctp_destroy_sock, 7026 .shutdown = sctp_shutdown, 7027 .setsockopt = sctp_setsockopt, 7028 .getsockopt = sctp_getsockopt, 7029 .sendmsg = sctp_sendmsg, 7030 .recvmsg = sctp_recvmsg, 7031 .bind = sctp_bind, 7032 .backlog_rcv = sctp_backlog_rcv, 7033 .hash = sctp_hash, 7034 .unhash = sctp_unhash, 7035 .get_port = sctp_get_port, 7036 .obj_size = sizeof(struct sctp_sock), 7037 .sysctl_mem = sysctl_sctp_mem, 7038 .sysctl_rmem = sysctl_sctp_rmem, 7039 .sysctl_wmem = sysctl_sctp_wmem, 7040 .memory_pressure = &sctp_memory_pressure, 7041 .enter_memory_pressure = sctp_enter_memory_pressure, 7042 .memory_allocated = &sctp_memory_allocated, 7043 .sockets_allocated = &sctp_sockets_allocated, 7044 }; 7045 7046 #if IS_ENABLED(CONFIG_IPV6) 7047 7048 struct proto sctpv6_prot = { 7049 .name = "SCTPv6", 7050 .owner = THIS_MODULE, 7051 .close = sctp_close, 7052 .connect = sctp_connect, 7053 .disconnect = sctp_disconnect, 7054 .accept = sctp_accept, 7055 .ioctl = sctp_ioctl, 7056 .init = sctp_init_sock, 7057 .destroy = sctp_destroy_sock, 7058 .shutdown = sctp_shutdown, 7059 .setsockopt = sctp_setsockopt, 7060 .getsockopt = sctp_getsockopt, 7061 .sendmsg = sctp_sendmsg, 7062 .recvmsg = sctp_recvmsg, 7063 .bind = sctp_bind, 7064 .backlog_rcv = sctp_backlog_rcv, 7065 .hash = sctp_hash, 7066 .unhash = sctp_unhash, 7067 .get_port = sctp_get_port, 7068 .obj_size = sizeof(struct sctp6_sock), 7069 .sysctl_mem = sysctl_sctp_mem, 7070 .sysctl_rmem = sysctl_sctp_rmem, 7071 .sysctl_wmem = sysctl_sctp_wmem, 7072 .memory_pressure = &sctp_memory_pressure, 7073 .enter_memory_pressure = sctp_enter_memory_pressure, 7074 .memory_allocated = &sctp_memory_allocated, 7075 .sockets_allocated = &sctp_sockets_allocated, 7076 }; 7077 #endif /* IS_ENABLED(CONFIG_IPV6) */ 7078