1 /* SCTP kernel implementation 2 * (C) Copyright IBM Corp. 2001, 2004 3 * Copyright (c) 1999-2000 Cisco, Inc. 4 * Copyright (c) 1999-2001 Motorola, Inc. 5 * Copyright (c) 2001-2003 Intel Corp. 6 * Copyright (c) 2001-2002 Nokia, Inc. 7 * Copyright (c) 2001 La Monte H.P. Yarroll 8 * 9 * This file is part of the SCTP kernel implementation 10 * 11 * These functions interface with the sockets layer to implement the 12 * SCTP Extensions for the Sockets API. 13 * 14 * Note that the descriptions from the specification are USER level 15 * functions--this file is the functions which populate the struct proto 16 * for SCTP which is the BOTTOM of the sockets interface. 17 * 18 * This SCTP implementation is free software; 19 * you can redistribute it and/or modify it under the terms of 20 * the GNU General Public License as published by 21 * the Free Software Foundation; either version 2, or (at your option) 22 * any later version. 23 * 24 * This SCTP implementation is distributed in the hope that it 25 * will be useful, but WITHOUT ANY WARRANTY; without even the implied 26 * ************************ 27 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. 28 * See the GNU General Public License for more details. 29 * 30 * You should have received a copy of the GNU General Public License 31 * along with GNU CC; see the file COPYING. If not, write to 32 * the Free Software Foundation, 59 Temple Place - Suite 330, 33 * Boston, MA 02111-1307, USA. 34 * 35 * Please send any bug reports or fixes you make to the 36 * email address(es): 37 * lksctp developers <linux-sctp@vger.kernel.org> 38 * 39 * Written or modified by: 40 * La Monte H.P. Yarroll <piggy@acm.org> 41 * Narasimha Budihal <narsi@refcode.org> 42 * Karl Knutson <karl@athena.chicago.il.us> 43 * Jon Grimm <jgrimm@us.ibm.com> 44 * Xingang Guo <xingang.guo@intel.com> 45 * Daisy Chang <daisyc@us.ibm.com> 46 * Sridhar Samudrala <samudrala@us.ibm.com> 47 * Inaky Perez-Gonzalez <inaky.gonzalez@intel.com> 48 * Ardelle Fan <ardelle.fan@intel.com> 49 * Ryan Layer <rmlayer@us.ibm.com> 50 * Anup Pemmaiah <pemmaiah@cc.usu.edu> 51 * Kevin Gao <kevin.gao@intel.com> 52 */ 53 54 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 55 56 #include <linux/types.h> 57 #include <linux/kernel.h> 58 #include <linux/wait.h> 59 #include <linux/time.h> 60 #include <linux/ip.h> 61 #include <linux/capability.h> 62 #include <linux/fcntl.h> 63 #include <linux/poll.h> 64 #include <linux/init.h> 65 #include <linux/crypto.h> 66 #include <linux/slab.h> 67 #include <linux/file.h> 68 69 #include <net/ip.h> 70 #include <net/icmp.h> 71 #include <net/route.h> 72 #include <net/ipv6.h> 73 #include <net/inet_common.h> 74 75 #include <linux/socket.h> /* for sa_family_t */ 76 #include <linux/export.h> 77 #include <net/sock.h> 78 #include <net/sctp/sctp.h> 79 #include <net/sctp/sm.h> 80 81 /* Forward declarations for internal helper functions. */ 82 static int sctp_writeable(struct sock *sk); 83 static void sctp_wfree(struct sk_buff *skb); 84 static int sctp_wait_for_sndbuf(struct sctp_association *, long *timeo_p, 85 size_t msg_len); 86 static int sctp_wait_for_packet(struct sock * sk, int *err, long *timeo_p); 87 static int sctp_wait_for_connect(struct sctp_association *, long *timeo_p); 88 static int sctp_wait_for_accept(struct sock *sk, long timeo); 89 static void sctp_wait_for_close(struct sock *sk, long timeo); 90 static void sctp_destruct_sock(struct sock *sk); 91 static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt, 92 union sctp_addr *addr, int len); 93 static int sctp_bindx_add(struct sock *, struct sockaddr *, int); 94 static int sctp_bindx_rem(struct sock *, struct sockaddr *, int); 95 static int sctp_send_asconf_add_ip(struct sock *, struct sockaddr *, int); 96 static int sctp_send_asconf_del_ip(struct sock *, struct sockaddr *, int); 97 static int sctp_send_asconf(struct sctp_association *asoc, 98 struct sctp_chunk *chunk); 99 static int sctp_do_bind(struct sock *, union sctp_addr *, int); 100 static int sctp_autobind(struct sock *sk); 101 static void sctp_sock_migrate(struct sock *, struct sock *, 102 struct sctp_association *, sctp_socket_type_t); 103 104 extern struct kmem_cache *sctp_bucket_cachep; 105 extern long sysctl_sctp_mem[3]; 106 extern int sysctl_sctp_rmem[3]; 107 extern int sysctl_sctp_wmem[3]; 108 109 static int sctp_memory_pressure; 110 static atomic_long_t sctp_memory_allocated; 111 struct percpu_counter sctp_sockets_allocated; 112 113 static void sctp_enter_memory_pressure(struct sock *sk) 114 { 115 sctp_memory_pressure = 1; 116 } 117 118 119 /* Get the sndbuf space available at the time on the association. */ 120 static inline int sctp_wspace(struct sctp_association *asoc) 121 { 122 int amt; 123 124 if (asoc->ep->sndbuf_policy) 125 amt = asoc->sndbuf_used; 126 else 127 amt = sk_wmem_alloc_get(asoc->base.sk); 128 129 if (amt >= asoc->base.sk->sk_sndbuf) { 130 if (asoc->base.sk->sk_userlocks & SOCK_SNDBUF_LOCK) 131 amt = 0; 132 else { 133 amt = sk_stream_wspace(asoc->base.sk); 134 if (amt < 0) 135 amt = 0; 136 } 137 } else { 138 amt = asoc->base.sk->sk_sndbuf - amt; 139 } 140 return amt; 141 } 142 143 /* Increment the used sndbuf space count of the corresponding association by 144 * the size of the outgoing data chunk. 145 * Also, set the skb destructor for sndbuf accounting later. 146 * 147 * Since it is always 1-1 between chunk and skb, and also a new skb is always 148 * allocated for chunk bundling in sctp_packet_transmit(), we can use the 149 * destructor in the data chunk skb for the purpose of the sndbuf space 150 * tracking. 151 */ 152 static inline void sctp_set_owner_w(struct sctp_chunk *chunk) 153 { 154 struct sctp_association *asoc = chunk->asoc; 155 struct sock *sk = asoc->base.sk; 156 157 /* The sndbuf space is tracked per association. */ 158 sctp_association_hold(asoc); 159 160 skb_set_owner_w(chunk->skb, sk); 161 162 chunk->skb->destructor = sctp_wfree; 163 /* Save the chunk pointer in skb for sctp_wfree to use later. */ 164 *((struct sctp_chunk **)(chunk->skb->cb)) = chunk; 165 166 asoc->sndbuf_used += SCTP_DATA_SNDSIZE(chunk) + 167 sizeof(struct sk_buff) + 168 sizeof(struct sctp_chunk); 169 170 atomic_add(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc); 171 sk->sk_wmem_queued += chunk->skb->truesize; 172 sk_mem_charge(sk, chunk->skb->truesize); 173 } 174 175 /* Verify that this is a valid address. */ 176 static inline int sctp_verify_addr(struct sock *sk, union sctp_addr *addr, 177 int len) 178 { 179 struct sctp_af *af; 180 181 /* Verify basic sockaddr. */ 182 af = sctp_sockaddr_af(sctp_sk(sk), addr, len); 183 if (!af) 184 return -EINVAL; 185 186 /* Is this a valid SCTP address? */ 187 if (!af->addr_valid(addr, sctp_sk(sk), NULL)) 188 return -EINVAL; 189 190 if (!sctp_sk(sk)->pf->send_verify(sctp_sk(sk), (addr))) 191 return -EINVAL; 192 193 return 0; 194 } 195 196 /* Look up the association by its id. If this is not a UDP-style 197 * socket, the ID field is always ignored. 198 */ 199 struct sctp_association *sctp_id2assoc(struct sock *sk, sctp_assoc_t id) 200 { 201 struct sctp_association *asoc = NULL; 202 203 /* If this is not a UDP-style socket, assoc id should be ignored. */ 204 if (!sctp_style(sk, UDP)) { 205 /* Return NULL if the socket state is not ESTABLISHED. It 206 * could be a TCP-style listening socket or a socket which 207 * hasn't yet called connect() to establish an association. 208 */ 209 if (!sctp_sstate(sk, ESTABLISHED)) 210 return NULL; 211 212 /* Get the first and the only association from the list. */ 213 if (!list_empty(&sctp_sk(sk)->ep->asocs)) 214 asoc = list_entry(sctp_sk(sk)->ep->asocs.next, 215 struct sctp_association, asocs); 216 return asoc; 217 } 218 219 /* Otherwise this is a UDP-style socket. */ 220 if (!id || (id == (sctp_assoc_t)-1)) 221 return NULL; 222 223 spin_lock_bh(&sctp_assocs_id_lock); 224 asoc = (struct sctp_association *)idr_find(&sctp_assocs_id, (int)id); 225 spin_unlock_bh(&sctp_assocs_id_lock); 226 227 if (!asoc || (asoc->base.sk != sk) || asoc->base.dead) 228 return NULL; 229 230 return asoc; 231 } 232 233 /* Look up the transport from an address and an assoc id. If both address and 234 * id are specified, the associations matching the address and the id should be 235 * the same. 236 */ 237 static struct sctp_transport *sctp_addr_id2transport(struct sock *sk, 238 struct sockaddr_storage *addr, 239 sctp_assoc_t id) 240 { 241 struct sctp_association *addr_asoc = NULL, *id_asoc = NULL; 242 struct sctp_transport *transport; 243 union sctp_addr *laddr = (union sctp_addr *)addr; 244 245 addr_asoc = sctp_endpoint_lookup_assoc(sctp_sk(sk)->ep, 246 laddr, 247 &transport); 248 249 if (!addr_asoc) 250 return NULL; 251 252 id_asoc = sctp_id2assoc(sk, id); 253 if (id_asoc && (id_asoc != addr_asoc)) 254 return NULL; 255 256 sctp_get_pf_specific(sk->sk_family)->addr_v4map(sctp_sk(sk), 257 (union sctp_addr *)addr); 258 259 return transport; 260 } 261 262 /* API 3.1.2 bind() - UDP Style Syntax 263 * The syntax of bind() is, 264 * 265 * ret = bind(int sd, struct sockaddr *addr, int addrlen); 266 * 267 * sd - the socket descriptor returned by socket(). 268 * addr - the address structure (struct sockaddr_in or struct 269 * sockaddr_in6 [RFC 2553]), 270 * addr_len - the size of the address structure. 271 */ 272 static int sctp_bind(struct sock *sk, struct sockaddr *addr, int addr_len) 273 { 274 int retval = 0; 275 276 sctp_lock_sock(sk); 277 278 pr_debug("%s: sk:%p, addr:%p, addr_len:%d\n", __func__, sk, 279 addr, addr_len); 280 281 /* Disallow binding twice. */ 282 if (!sctp_sk(sk)->ep->base.bind_addr.port) 283 retval = sctp_do_bind(sk, (union sctp_addr *)addr, 284 addr_len); 285 else 286 retval = -EINVAL; 287 288 sctp_release_sock(sk); 289 290 return retval; 291 } 292 293 static long sctp_get_port_local(struct sock *, union sctp_addr *); 294 295 /* Verify this is a valid sockaddr. */ 296 static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt, 297 union sctp_addr *addr, int len) 298 { 299 struct sctp_af *af; 300 301 /* Check minimum size. */ 302 if (len < sizeof (struct sockaddr)) 303 return NULL; 304 305 /* V4 mapped address are really of AF_INET family */ 306 if (addr->sa.sa_family == AF_INET6 && 307 ipv6_addr_v4mapped(&addr->v6.sin6_addr)) { 308 if (!opt->pf->af_supported(AF_INET, opt)) 309 return NULL; 310 } else { 311 /* Does this PF support this AF? */ 312 if (!opt->pf->af_supported(addr->sa.sa_family, opt)) 313 return NULL; 314 } 315 316 /* If we get this far, af is valid. */ 317 af = sctp_get_af_specific(addr->sa.sa_family); 318 319 if (len < af->sockaddr_len) 320 return NULL; 321 322 return af; 323 } 324 325 /* Bind a local address either to an endpoint or to an association. */ 326 static int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len) 327 { 328 struct net *net = sock_net(sk); 329 struct sctp_sock *sp = sctp_sk(sk); 330 struct sctp_endpoint *ep = sp->ep; 331 struct sctp_bind_addr *bp = &ep->base.bind_addr; 332 struct sctp_af *af; 333 unsigned short snum; 334 int ret = 0; 335 336 /* Common sockaddr verification. */ 337 af = sctp_sockaddr_af(sp, addr, len); 338 if (!af) { 339 pr_debug("%s: sk:%p, newaddr:%p, len:%d EINVAL\n", 340 __func__, sk, addr, len); 341 return -EINVAL; 342 } 343 344 snum = ntohs(addr->v4.sin_port); 345 346 pr_debug("%s: sk:%p, new addr:%pISc, port:%d, new port:%d, len:%d\n", 347 __func__, sk, &addr->sa, bp->port, snum, len); 348 349 /* PF specific bind() address verification. */ 350 if (!sp->pf->bind_verify(sp, addr)) 351 return -EADDRNOTAVAIL; 352 353 /* We must either be unbound, or bind to the same port. 354 * It's OK to allow 0 ports if we are already bound. 355 * We'll just inhert an already bound port in this case 356 */ 357 if (bp->port) { 358 if (!snum) 359 snum = bp->port; 360 else if (snum != bp->port) { 361 pr_debug("%s: new port %d doesn't match existing port " 362 "%d\n", __func__, snum, bp->port); 363 return -EINVAL; 364 } 365 } 366 367 if (snum && snum < PROT_SOCK && 368 !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) 369 return -EACCES; 370 371 /* See if the address matches any of the addresses we may have 372 * already bound before checking against other endpoints. 373 */ 374 if (sctp_bind_addr_match(bp, addr, sp)) 375 return -EINVAL; 376 377 /* Make sure we are allowed to bind here. 378 * The function sctp_get_port_local() does duplicate address 379 * detection. 380 */ 381 addr->v4.sin_port = htons(snum); 382 if ((ret = sctp_get_port_local(sk, addr))) { 383 return -EADDRINUSE; 384 } 385 386 /* Refresh ephemeral port. */ 387 if (!bp->port) 388 bp->port = inet_sk(sk)->inet_num; 389 390 /* Add the address to the bind address list. 391 * Use GFP_ATOMIC since BHs will be disabled. 392 */ 393 ret = sctp_add_bind_addr(bp, addr, SCTP_ADDR_SRC, GFP_ATOMIC); 394 395 /* Copy back into socket for getsockname() use. */ 396 if (!ret) { 397 inet_sk(sk)->inet_sport = htons(inet_sk(sk)->inet_num); 398 af->to_sk_saddr(addr, sk); 399 } 400 401 return ret; 402 } 403 404 /* ADDIP Section 4.1.1 Congestion Control of ASCONF Chunks 405 * 406 * R1) One and only one ASCONF Chunk MAY be in transit and unacknowledged 407 * at any one time. If a sender, after sending an ASCONF chunk, decides 408 * it needs to transfer another ASCONF Chunk, it MUST wait until the 409 * ASCONF-ACK Chunk returns from the previous ASCONF Chunk before sending a 410 * subsequent ASCONF. Note this restriction binds each side, so at any 411 * time two ASCONF may be in-transit on any given association (one sent 412 * from each endpoint). 413 */ 414 static int sctp_send_asconf(struct sctp_association *asoc, 415 struct sctp_chunk *chunk) 416 { 417 struct net *net = sock_net(asoc->base.sk); 418 int retval = 0; 419 420 /* If there is an outstanding ASCONF chunk, queue it for later 421 * transmission. 422 */ 423 if (asoc->addip_last_asconf) { 424 list_add_tail(&chunk->list, &asoc->addip_chunk_list); 425 goto out; 426 } 427 428 /* Hold the chunk until an ASCONF_ACK is received. */ 429 sctp_chunk_hold(chunk); 430 retval = sctp_primitive_ASCONF(net, asoc, chunk); 431 if (retval) 432 sctp_chunk_free(chunk); 433 else 434 asoc->addip_last_asconf = chunk; 435 436 out: 437 return retval; 438 } 439 440 /* Add a list of addresses as bind addresses to local endpoint or 441 * association. 442 * 443 * Basically run through each address specified in the addrs/addrcnt 444 * array/length pair, determine if it is IPv6 or IPv4 and call 445 * sctp_do_bind() on it. 446 * 447 * If any of them fails, then the operation will be reversed and the 448 * ones that were added will be removed. 449 * 450 * Only sctp_setsockopt_bindx() is supposed to call this function. 451 */ 452 static int sctp_bindx_add(struct sock *sk, struct sockaddr *addrs, int addrcnt) 453 { 454 int cnt; 455 int retval = 0; 456 void *addr_buf; 457 struct sockaddr *sa_addr; 458 struct sctp_af *af; 459 460 pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n", __func__, sk, 461 addrs, addrcnt); 462 463 addr_buf = addrs; 464 for (cnt = 0; cnt < addrcnt; cnt++) { 465 /* The list may contain either IPv4 or IPv6 address; 466 * determine the address length for walking thru the list. 467 */ 468 sa_addr = addr_buf; 469 af = sctp_get_af_specific(sa_addr->sa_family); 470 if (!af) { 471 retval = -EINVAL; 472 goto err_bindx_add; 473 } 474 475 retval = sctp_do_bind(sk, (union sctp_addr *)sa_addr, 476 af->sockaddr_len); 477 478 addr_buf += af->sockaddr_len; 479 480 err_bindx_add: 481 if (retval < 0) { 482 /* Failed. Cleanup the ones that have been added */ 483 if (cnt > 0) 484 sctp_bindx_rem(sk, addrs, cnt); 485 return retval; 486 } 487 } 488 489 return retval; 490 } 491 492 /* Send an ASCONF chunk with Add IP address parameters to all the peers of the 493 * associations that are part of the endpoint indicating that a list of local 494 * addresses are added to the endpoint. 495 * 496 * If any of the addresses is already in the bind address list of the 497 * association, we do not send the chunk for that association. But it will not 498 * affect other associations. 499 * 500 * Only sctp_setsockopt_bindx() is supposed to call this function. 501 */ 502 static int sctp_send_asconf_add_ip(struct sock *sk, 503 struct sockaddr *addrs, 504 int addrcnt) 505 { 506 struct net *net = sock_net(sk); 507 struct sctp_sock *sp; 508 struct sctp_endpoint *ep; 509 struct sctp_association *asoc; 510 struct sctp_bind_addr *bp; 511 struct sctp_chunk *chunk; 512 struct sctp_sockaddr_entry *laddr; 513 union sctp_addr *addr; 514 union sctp_addr saveaddr; 515 void *addr_buf; 516 struct sctp_af *af; 517 struct list_head *p; 518 int i; 519 int retval = 0; 520 521 if (!net->sctp.addip_enable) 522 return retval; 523 524 sp = sctp_sk(sk); 525 ep = sp->ep; 526 527 pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n", 528 __func__, sk, addrs, addrcnt); 529 530 list_for_each_entry(asoc, &ep->asocs, asocs) { 531 if (!asoc->peer.asconf_capable) 532 continue; 533 534 if (asoc->peer.addip_disabled_mask & SCTP_PARAM_ADD_IP) 535 continue; 536 537 if (!sctp_state(asoc, ESTABLISHED)) 538 continue; 539 540 /* Check if any address in the packed array of addresses is 541 * in the bind address list of the association. If so, 542 * do not send the asconf chunk to its peer, but continue with 543 * other associations. 544 */ 545 addr_buf = addrs; 546 for (i = 0; i < addrcnt; i++) { 547 addr = addr_buf; 548 af = sctp_get_af_specific(addr->v4.sin_family); 549 if (!af) { 550 retval = -EINVAL; 551 goto out; 552 } 553 554 if (sctp_assoc_lookup_laddr(asoc, addr)) 555 break; 556 557 addr_buf += af->sockaddr_len; 558 } 559 if (i < addrcnt) 560 continue; 561 562 /* Use the first valid address in bind addr list of 563 * association as Address Parameter of ASCONF CHUNK. 564 */ 565 bp = &asoc->base.bind_addr; 566 p = bp->address_list.next; 567 laddr = list_entry(p, struct sctp_sockaddr_entry, list); 568 chunk = sctp_make_asconf_update_ip(asoc, &laddr->a, addrs, 569 addrcnt, SCTP_PARAM_ADD_IP); 570 if (!chunk) { 571 retval = -ENOMEM; 572 goto out; 573 } 574 575 /* Add the new addresses to the bind address list with 576 * use_as_src set to 0. 577 */ 578 addr_buf = addrs; 579 for (i = 0; i < addrcnt; i++) { 580 addr = addr_buf; 581 af = sctp_get_af_specific(addr->v4.sin_family); 582 memcpy(&saveaddr, addr, af->sockaddr_len); 583 retval = sctp_add_bind_addr(bp, &saveaddr, 584 SCTP_ADDR_NEW, GFP_ATOMIC); 585 addr_buf += af->sockaddr_len; 586 } 587 if (asoc->src_out_of_asoc_ok) { 588 struct sctp_transport *trans; 589 590 list_for_each_entry(trans, 591 &asoc->peer.transport_addr_list, transports) { 592 /* Clear the source and route cache */ 593 dst_release(trans->dst); 594 trans->cwnd = min(4*asoc->pathmtu, max_t(__u32, 595 2*asoc->pathmtu, 4380)); 596 trans->ssthresh = asoc->peer.i.a_rwnd; 597 trans->rto = asoc->rto_initial; 598 sctp_max_rto(asoc, trans); 599 trans->rtt = trans->srtt = trans->rttvar = 0; 600 sctp_transport_route(trans, NULL, 601 sctp_sk(asoc->base.sk)); 602 } 603 } 604 retval = sctp_send_asconf(asoc, chunk); 605 } 606 607 out: 608 return retval; 609 } 610 611 /* Remove a list of addresses from bind addresses list. Do not remove the 612 * last address. 613 * 614 * Basically run through each address specified in the addrs/addrcnt 615 * array/length pair, determine if it is IPv6 or IPv4 and call 616 * sctp_del_bind() on it. 617 * 618 * If any of them fails, then the operation will be reversed and the 619 * ones that were removed will be added back. 620 * 621 * At least one address has to be left; if only one address is 622 * available, the operation will return -EBUSY. 623 * 624 * Only sctp_setsockopt_bindx() is supposed to call this function. 625 */ 626 static int sctp_bindx_rem(struct sock *sk, struct sockaddr *addrs, int addrcnt) 627 { 628 struct sctp_sock *sp = sctp_sk(sk); 629 struct sctp_endpoint *ep = sp->ep; 630 int cnt; 631 struct sctp_bind_addr *bp = &ep->base.bind_addr; 632 int retval = 0; 633 void *addr_buf; 634 union sctp_addr *sa_addr; 635 struct sctp_af *af; 636 637 pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n", 638 __func__, sk, addrs, addrcnt); 639 640 addr_buf = addrs; 641 for (cnt = 0; cnt < addrcnt; cnt++) { 642 /* If the bind address list is empty or if there is only one 643 * bind address, there is nothing more to be removed (we need 644 * at least one address here). 645 */ 646 if (list_empty(&bp->address_list) || 647 (sctp_list_single_entry(&bp->address_list))) { 648 retval = -EBUSY; 649 goto err_bindx_rem; 650 } 651 652 sa_addr = addr_buf; 653 af = sctp_get_af_specific(sa_addr->sa.sa_family); 654 if (!af) { 655 retval = -EINVAL; 656 goto err_bindx_rem; 657 } 658 659 if (!af->addr_valid(sa_addr, sp, NULL)) { 660 retval = -EADDRNOTAVAIL; 661 goto err_bindx_rem; 662 } 663 664 if (sa_addr->v4.sin_port && 665 sa_addr->v4.sin_port != htons(bp->port)) { 666 retval = -EINVAL; 667 goto err_bindx_rem; 668 } 669 670 if (!sa_addr->v4.sin_port) 671 sa_addr->v4.sin_port = htons(bp->port); 672 673 /* FIXME - There is probably a need to check if sk->sk_saddr and 674 * sk->sk_rcv_addr are currently set to one of the addresses to 675 * be removed. This is something which needs to be looked into 676 * when we are fixing the outstanding issues with multi-homing 677 * socket routing and failover schemes. Refer to comments in 678 * sctp_do_bind(). -daisy 679 */ 680 retval = sctp_del_bind_addr(bp, sa_addr); 681 682 addr_buf += af->sockaddr_len; 683 err_bindx_rem: 684 if (retval < 0) { 685 /* Failed. Add the ones that has been removed back */ 686 if (cnt > 0) 687 sctp_bindx_add(sk, addrs, cnt); 688 return retval; 689 } 690 } 691 692 return retval; 693 } 694 695 /* Send an ASCONF chunk with Delete IP address parameters to all the peers of 696 * the associations that are part of the endpoint indicating that a list of 697 * local addresses are removed from the endpoint. 698 * 699 * If any of the addresses is already in the bind address list of the 700 * association, we do not send the chunk for that association. But it will not 701 * affect other associations. 702 * 703 * Only sctp_setsockopt_bindx() is supposed to call this function. 704 */ 705 static int sctp_send_asconf_del_ip(struct sock *sk, 706 struct sockaddr *addrs, 707 int addrcnt) 708 { 709 struct net *net = sock_net(sk); 710 struct sctp_sock *sp; 711 struct sctp_endpoint *ep; 712 struct sctp_association *asoc; 713 struct sctp_transport *transport; 714 struct sctp_bind_addr *bp; 715 struct sctp_chunk *chunk; 716 union sctp_addr *laddr; 717 void *addr_buf; 718 struct sctp_af *af; 719 struct sctp_sockaddr_entry *saddr; 720 int i; 721 int retval = 0; 722 int stored = 0; 723 724 chunk = NULL; 725 if (!net->sctp.addip_enable) 726 return retval; 727 728 sp = sctp_sk(sk); 729 ep = sp->ep; 730 731 pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n", 732 __func__, sk, addrs, addrcnt); 733 734 list_for_each_entry(asoc, &ep->asocs, asocs) { 735 736 if (!asoc->peer.asconf_capable) 737 continue; 738 739 if (asoc->peer.addip_disabled_mask & SCTP_PARAM_DEL_IP) 740 continue; 741 742 if (!sctp_state(asoc, ESTABLISHED)) 743 continue; 744 745 /* Check if any address in the packed array of addresses is 746 * not present in the bind address list of the association. 747 * If so, do not send the asconf chunk to its peer, but 748 * continue with other associations. 749 */ 750 addr_buf = addrs; 751 for (i = 0; i < addrcnt; i++) { 752 laddr = addr_buf; 753 af = sctp_get_af_specific(laddr->v4.sin_family); 754 if (!af) { 755 retval = -EINVAL; 756 goto out; 757 } 758 759 if (!sctp_assoc_lookup_laddr(asoc, laddr)) 760 break; 761 762 addr_buf += af->sockaddr_len; 763 } 764 if (i < addrcnt) 765 continue; 766 767 /* Find one address in the association's bind address list 768 * that is not in the packed array of addresses. This is to 769 * make sure that we do not delete all the addresses in the 770 * association. 771 */ 772 bp = &asoc->base.bind_addr; 773 laddr = sctp_find_unmatch_addr(bp, (union sctp_addr *)addrs, 774 addrcnt, sp); 775 if ((laddr == NULL) && (addrcnt == 1)) { 776 if (asoc->asconf_addr_del_pending) 777 continue; 778 asoc->asconf_addr_del_pending = 779 kzalloc(sizeof(union sctp_addr), GFP_ATOMIC); 780 if (asoc->asconf_addr_del_pending == NULL) { 781 retval = -ENOMEM; 782 goto out; 783 } 784 asoc->asconf_addr_del_pending->sa.sa_family = 785 addrs->sa_family; 786 asoc->asconf_addr_del_pending->v4.sin_port = 787 htons(bp->port); 788 if (addrs->sa_family == AF_INET) { 789 struct sockaddr_in *sin; 790 791 sin = (struct sockaddr_in *)addrs; 792 asoc->asconf_addr_del_pending->v4.sin_addr.s_addr = sin->sin_addr.s_addr; 793 } else if (addrs->sa_family == AF_INET6) { 794 struct sockaddr_in6 *sin6; 795 796 sin6 = (struct sockaddr_in6 *)addrs; 797 asoc->asconf_addr_del_pending->v6.sin6_addr = sin6->sin6_addr; 798 } 799 800 pr_debug("%s: keep the last address asoc:%p %pISc at %p\n", 801 __func__, asoc, &asoc->asconf_addr_del_pending->sa, 802 asoc->asconf_addr_del_pending); 803 804 asoc->src_out_of_asoc_ok = 1; 805 stored = 1; 806 goto skip_mkasconf; 807 } 808 809 if (laddr == NULL) 810 return -EINVAL; 811 812 /* We do not need RCU protection throughout this loop 813 * because this is done under a socket lock from the 814 * setsockopt call. 815 */ 816 chunk = sctp_make_asconf_update_ip(asoc, laddr, addrs, addrcnt, 817 SCTP_PARAM_DEL_IP); 818 if (!chunk) { 819 retval = -ENOMEM; 820 goto out; 821 } 822 823 skip_mkasconf: 824 /* Reset use_as_src flag for the addresses in the bind address 825 * list that are to be deleted. 826 */ 827 addr_buf = addrs; 828 for (i = 0; i < addrcnt; i++) { 829 laddr = addr_buf; 830 af = sctp_get_af_specific(laddr->v4.sin_family); 831 list_for_each_entry(saddr, &bp->address_list, list) { 832 if (sctp_cmp_addr_exact(&saddr->a, laddr)) 833 saddr->state = SCTP_ADDR_DEL; 834 } 835 addr_buf += af->sockaddr_len; 836 } 837 838 /* Update the route and saddr entries for all the transports 839 * as some of the addresses in the bind address list are 840 * about to be deleted and cannot be used as source addresses. 841 */ 842 list_for_each_entry(transport, &asoc->peer.transport_addr_list, 843 transports) { 844 dst_release(transport->dst); 845 sctp_transport_route(transport, NULL, 846 sctp_sk(asoc->base.sk)); 847 } 848 849 if (stored) 850 /* We don't need to transmit ASCONF */ 851 continue; 852 retval = sctp_send_asconf(asoc, chunk); 853 } 854 out: 855 return retval; 856 } 857 858 /* set addr events to assocs in the endpoint. ep and addr_wq must be locked */ 859 int sctp_asconf_mgmt(struct sctp_sock *sp, struct sctp_sockaddr_entry *addrw) 860 { 861 struct sock *sk = sctp_opt2sk(sp); 862 union sctp_addr *addr; 863 struct sctp_af *af; 864 865 /* It is safe to write port space in caller. */ 866 addr = &addrw->a; 867 addr->v4.sin_port = htons(sp->ep->base.bind_addr.port); 868 af = sctp_get_af_specific(addr->sa.sa_family); 869 if (!af) 870 return -EINVAL; 871 if (sctp_verify_addr(sk, addr, af->sockaddr_len)) 872 return -EINVAL; 873 874 if (addrw->state == SCTP_ADDR_NEW) 875 return sctp_send_asconf_add_ip(sk, (struct sockaddr *)addr, 1); 876 else 877 return sctp_send_asconf_del_ip(sk, (struct sockaddr *)addr, 1); 878 } 879 880 /* Helper for tunneling sctp_bindx() requests through sctp_setsockopt() 881 * 882 * API 8.1 883 * int sctp_bindx(int sd, struct sockaddr *addrs, int addrcnt, 884 * int flags); 885 * 886 * If sd is an IPv4 socket, the addresses passed must be IPv4 addresses. 887 * If the sd is an IPv6 socket, the addresses passed can either be IPv4 888 * or IPv6 addresses. 889 * 890 * A single address may be specified as INADDR_ANY or IN6ADDR_ANY, see 891 * Section 3.1.2 for this usage. 892 * 893 * addrs is a pointer to an array of one or more socket addresses. Each 894 * address is contained in its appropriate structure (i.e. struct 895 * sockaddr_in or struct sockaddr_in6) the family of the address type 896 * must be used to distinguish the address length (note that this 897 * representation is termed a "packed array" of addresses). The caller 898 * specifies the number of addresses in the array with addrcnt. 899 * 900 * On success, sctp_bindx() returns 0. On failure, sctp_bindx() returns 901 * -1, and sets errno to the appropriate error code. 902 * 903 * For SCTP, the port given in each socket address must be the same, or 904 * sctp_bindx() will fail, setting errno to EINVAL. 905 * 906 * The flags parameter is formed from the bitwise OR of zero or more of 907 * the following currently defined flags: 908 * 909 * SCTP_BINDX_ADD_ADDR 910 * 911 * SCTP_BINDX_REM_ADDR 912 * 913 * SCTP_BINDX_ADD_ADDR directs SCTP to add the given addresses to the 914 * association, and SCTP_BINDX_REM_ADDR directs SCTP to remove the given 915 * addresses from the association. The two flags are mutually exclusive; 916 * if both are given, sctp_bindx() will fail with EINVAL. A caller may 917 * not remove all addresses from an association; sctp_bindx() will 918 * reject such an attempt with EINVAL. 919 * 920 * An application can use sctp_bindx(SCTP_BINDX_ADD_ADDR) to associate 921 * additional addresses with an endpoint after calling bind(). Or use 922 * sctp_bindx(SCTP_BINDX_REM_ADDR) to remove some addresses a listening 923 * socket is associated with so that no new association accepted will be 924 * associated with those addresses. If the endpoint supports dynamic 925 * address a SCTP_BINDX_REM_ADDR or SCTP_BINDX_ADD_ADDR may cause a 926 * endpoint to send the appropriate message to the peer to change the 927 * peers address lists. 928 * 929 * Adding and removing addresses from a connected association is 930 * optional functionality. Implementations that do not support this 931 * functionality should return EOPNOTSUPP. 932 * 933 * Basically do nothing but copying the addresses from user to kernel 934 * land and invoking either sctp_bindx_add() or sctp_bindx_rem() on the sk. 935 * This is used for tunneling the sctp_bindx() request through sctp_setsockopt() 936 * from userspace. 937 * 938 * We don't use copy_from_user() for optimization: we first do the 939 * sanity checks (buffer size -fast- and access check-healthy 940 * pointer); if all of those succeed, then we can alloc the memory 941 * (expensive operation) needed to copy the data to kernel. Then we do 942 * the copying without checking the user space area 943 * (__copy_from_user()). 944 * 945 * On exit there is no need to do sockfd_put(), sys_setsockopt() does 946 * it. 947 * 948 * sk The sk of the socket 949 * addrs The pointer to the addresses in user land 950 * addrssize Size of the addrs buffer 951 * op Operation to perform (add or remove, see the flags of 952 * sctp_bindx) 953 * 954 * Returns 0 if ok, <0 errno code on error. 955 */ 956 static int sctp_setsockopt_bindx(struct sock* sk, 957 struct sockaddr __user *addrs, 958 int addrs_size, int op) 959 { 960 struct sockaddr *kaddrs; 961 int err; 962 int addrcnt = 0; 963 int walk_size = 0; 964 struct sockaddr *sa_addr; 965 void *addr_buf; 966 struct sctp_af *af; 967 968 pr_debug("%s: sk:%p addrs:%p addrs_size:%d opt:%d\n", 969 __func__, sk, addrs, addrs_size, op); 970 971 if (unlikely(addrs_size <= 0)) 972 return -EINVAL; 973 974 /* Check the user passed a healthy pointer. */ 975 if (unlikely(!access_ok(VERIFY_READ, addrs, addrs_size))) 976 return -EFAULT; 977 978 /* Alloc space for the address array in kernel memory. */ 979 kaddrs = kmalloc(addrs_size, GFP_KERNEL); 980 if (unlikely(!kaddrs)) 981 return -ENOMEM; 982 983 if (__copy_from_user(kaddrs, addrs, addrs_size)) { 984 kfree(kaddrs); 985 return -EFAULT; 986 } 987 988 /* Walk through the addrs buffer and count the number of addresses. */ 989 addr_buf = kaddrs; 990 while (walk_size < addrs_size) { 991 if (walk_size + sizeof(sa_family_t) > addrs_size) { 992 kfree(kaddrs); 993 return -EINVAL; 994 } 995 996 sa_addr = addr_buf; 997 af = sctp_get_af_specific(sa_addr->sa_family); 998 999 /* If the address family is not supported or if this address 1000 * causes the address buffer to overflow return EINVAL. 1001 */ 1002 if (!af || (walk_size + af->sockaddr_len) > addrs_size) { 1003 kfree(kaddrs); 1004 return -EINVAL; 1005 } 1006 addrcnt++; 1007 addr_buf += af->sockaddr_len; 1008 walk_size += af->sockaddr_len; 1009 } 1010 1011 /* Do the work. */ 1012 switch (op) { 1013 case SCTP_BINDX_ADD_ADDR: 1014 err = sctp_bindx_add(sk, kaddrs, addrcnt); 1015 if (err) 1016 goto out; 1017 err = sctp_send_asconf_add_ip(sk, kaddrs, addrcnt); 1018 break; 1019 1020 case SCTP_BINDX_REM_ADDR: 1021 err = sctp_bindx_rem(sk, kaddrs, addrcnt); 1022 if (err) 1023 goto out; 1024 err = sctp_send_asconf_del_ip(sk, kaddrs, addrcnt); 1025 break; 1026 1027 default: 1028 err = -EINVAL; 1029 break; 1030 } 1031 1032 out: 1033 kfree(kaddrs); 1034 1035 return err; 1036 } 1037 1038 /* __sctp_connect(struct sock* sk, struct sockaddr *kaddrs, int addrs_size) 1039 * 1040 * Common routine for handling connect() and sctp_connectx(). 1041 * Connect will come in with just a single address. 1042 */ 1043 static int __sctp_connect(struct sock* sk, 1044 struct sockaddr *kaddrs, 1045 int addrs_size, 1046 sctp_assoc_t *assoc_id) 1047 { 1048 struct net *net = sock_net(sk); 1049 struct sctp_sock *sp; 1050 struct sctp_endpoint *ep; 1051 struct sctp_association *asoc = NULL; 1052 struct sctp_association *asoc2; 1053 struct sctp_transport *transport; 1054 union sctp_addr to; 1055 struct sctp_af *af; 1056 sctp_scope_t scope; 1057 long timeo; 1058 int err = 0; 1059 int addrcnt = 0; 1060 int walk_size = 0; 1061 union sctp_addr *sa_addr = NULL; 1062 void *addr_buf; 1063 unsigned short port; 1064 unsigned int f_flags = 0; 1065 1066 sp = sctp_sk(sk); 1067 ep = sp->ep; 1068 1069 /* connect() cannot be done on a socket that is already in ESTABLISHED 1070 * state - UDP-style peeled off socket or a TCP-style socket that 1071 * is already connected. 1072 * It cannot be done even on a TCP-style listening socket. 1073 */ 1074 if (sctp_sstate(sk, ESTABLISHED) || 1075 (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))) { 1076 err = -EISCONN; 1077 goto out_free; 1078 } 1079 1080 /* Walk through the addrs buffer and count the number of addresses. */ 1081 addr_buf = kaddrs; 1082 while (walk_size < addrs_size) { 1083 if (walk_size + sizeof(sa_family_t) > addrs_size) { 1084 err = -EINVAL; 1085 goto out_free; 1086 } 1087 1088 sa_addr = addr_buf; 1089 af = sctp_get_af_specific(sa_addr->sa.sa_family); 1090 1091 /* If the address family is not supported or if this address 1092 * causes the address buffer to overflow return EINVAL. 1093 */ 1094 if (!af || (walk_size + af->sockaddr_len) > addrs_size) { 1095 err = -EINVAL; 1096 goto out_free; 1097 } 1098 1099 port = ntohs(sa_addr->v4.sin_port); 1100 1101 /* Save current address so we can work with it */ 1102 memcpy(&to, sa_addr, af->sockaddr_len); 1103 1104 err = sctp_verify_addr(sk, &to, af->sockaddr_len); 1105 if (err) 1106 goto out_free; 1107 1108 /* Make sure the destination port is correctly set 1109 * in all addresses. 1110 */ 1111 if (asoc && asoc->peer.port && asoc->peer.port != port) { 1112 err = -EINVAL; 1113 goto out_free; 1114 } 1115 1116 /* Check if there already is a matching association on the 1117 * endpoint (other than the one created here). 1118 */ 1119 asoc2 = sctp_endpoint_lookup_assoc(ep, &to, &transport); 1120 if (asoc2 && asoc2 != asoc) { 1121 if (asoc2->state >= SCTP_STATE_ESTABLISHED) 1122 err = -EISCONN; 1123 else 1124 err = -EALREADY; 1125 goto out_free; 1126 } 1127 1128 /* If we could not find a matching association on the endpoint, 1129 * make sure that there is no peeled-off association matching 1130 * the peer address even on another socket. 1131 */ 1132 if (sctp_endpoint_is_peeled_off(ep, &to)) { 1133 err = -EADDRNOTAVAIL; 1134 goto out_free; 1135 } 1136 1137 if (!asoc) { 1138 /* If a bind() or sctp_bindx() is not called prior to 1139 * an sctp_connectx() call, the system picks an 1140 * ephemeral port and will choose an address set 1141 * equivalent to binding with a wildcard address. 1142 */ 1143 if (!ep->base.bind_addr.port) { 1144 if (sctp_autobind(sk)) { 1145 err = -EAGAIN; 1146 goto out_free; 1147 } 1148 } else { 1149 /* 1150 * If an unprivileged user inherits a 1-many 1151 * style socket with open associations on a 1152 * privileged port, it MAY be permitted to 1153 * accept new associations, but it SHOULD NOT 1154 * be permitted to open new associations. 1155 */ 1156 if (ep->base.bind_addr.port < PROT_SOCK && 1157 !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) { 1158 err = -EACCES; 1159 goto out_free; 1160 } 1161 } 1162 1163 scope = sctp_scope(&to); 1164 asoc = sctp_association_new(ep, sk, scope, GFP_KERNEL); 1165 if (!asoc) { 1166 err = -ENOMEM; 1167 goto out_free; 1168 } 1169 1170 err = sctp_assoc_set_bind_addr_from_ep(asoc, scope, 1171 GFP_KERNEL); 1172 if (err < 0) { 1173 goto out_free; 1174 } 1175 1176 } 1177 1178 /* Prime the peer's transport structures. */ 1179 transport = sctp_assoc_add_peer(asoc, &to, GFP_KERNEL, 1180 SCTP_UNKNOWN); 1181 if (!transport) { 1182 err = -ENOMEM; 1183 goto out_free; 1184 } 1185 1186 addrcnt++; 1187 addr_buf += af->sockaddr_len; 1188 walk_size += af->sockaddr_len; 1189 } 1190 1191 /* In case the user of sctp_connectx() wants an association 1192 * id back, assign one now. 1193 */ 1194 if (assoc_id) { 1195 err = sctp_assoc_set_id(asoc, GFP_KERNEL); 1196 if (err < 0) 1197 goto out_free; 1198 } 1199 1200 err = sctp_primitive_ASSOCIATE(net, asoc, NULL); 1201 if (err < 0) { 1202 goto out_free; 1203 } 1204 1205 /* Initialize sk's dport and daddr for getpeername() */ 1206 inet_sk(sk)->inet_dport = htons(asoc->peer.port); 1207 af = sctp_get_af_specific(sa_addr->sa.sa_family); 1208 af->to_sk_daddr(sa_addr, sk); 1209 sk->sk_err = 0; 1210 1211 /* in-kernel sockets don't generally have a file allocated to them 1212 * if all they do is call sock_create_kern(). 1213 */ 1214 if (sk->sk_socket->file) 1215 f_flags = sk->sk_socket->file->f_flags; 1216 1217 timeo = sock_sndtimeo(sk, f_flags & O_NONBLOCK); 1218 1219 err = sctp_wait_for_connect(asoc, &timeo); 1220 if ((err == 0 || err == -EINPROGRESS) && assoc_id) 1221 *assoc_id = asoc->assoc_id; 1222 1223 /* Don't free association on exit. */ 1224 asoc = NULL; 1225 1226 out_free: 1227 pr_debug("%s: took out_free path with asoc:%p kaddrs:%p err:%d\n", 1228 __func__, asoc, kaddrs, err); 1229 1230 if (asoc) { 1231 /* sctp_primitive_ASSOCIATE may have added this association 1232 * To the hash table, try to unhash it, just in case, its a noop 1233 * if it wasn't hashed so we're safe 1234 */ 1235 sctp_unhash_established(asoc); 1236 sctp_association_free(asoc); 1237 } 1238 return err; 1239 } 1240 1241 /* Helper for tunneling sctp_connectx() requests through sctp_setsockopt() 1242 * 1243 * API 8.9 1244 * int sctp_connectx(int sd, struct sockaddr *addrs, int addrcnt, 1245 * sctp_assoc_t *asoc); 1246 * 1247 * If sd is an IPv4 socket, the addresses passed must be IPv4 addresses. 1248 * If the sd is an IPv6 socket, the addresses passed can either be IPv4 1249 * or IPv6 addresses. 1250 * 1251 * A single address may be specified as INADDR_ANY or IN6ADDR_ANY, see 1252 * Section 3.1.2 for this usage. 1253 * 1254 * addrs is a pointer to an array of one or more socket addresses. Each 1255 * address is contained in its appropriate structure (i.e. struct 1256 * sockaddr_in or struct sockaddr_in6) the family of the address type 1257 * must be used to distengish the address length (note that this 1258 * representation is termed a "packed array" of addresses). The caller 1259 * specifies the number of addresses in the array with addrcnt. 1260 * 1261 * On success, sctp_connectx() returns 0. It also sets the assoc_id to 1262 * the association id of the new association. On failure, sctp_connectx() 1263 * returns -1, and sets errno to the appropriate error code. The assoc_id 1264 * is not touched by the kernel. 1265 * 1266 * For SCTP, the port given in each socket address must be the same, or 1267 * sctp_connectx() will fail, setting errno to EINVAL. 1268 * 1269 * An application can use sctp_connectx to initiate an association with 1270 * an endpoint that is multi-homed. Much like sctp_bindx() this call 1271 * allows a caller to specify multiple addresses at which a peer can be 1272 * reached. The way the SCTP stack uses the list of addresses to set up 1273 * the association is implementation dependent. This function only 1274 * specifies that the stack will try to make use of all the addresses in 1275 * the list when needed. 1276 * 1277 * Note that the list of addresses passed in is only used for setting up 1278 * the association. It does not necessarily equal the set of addresses 1279 * the peer uses for the resulting association. If the caller wants to 1280 * find out the set of peer addresses, it must use sctp_getpaddrs() to 1281 * retrieve them after the association has been set up. 1282 * 1283 * Basically do nothing but copying the addresses from user to kernel 1284 * land and invoking either sctp_connectx(). This is used for tunneling 1285 * the sctp_connectx() request through sctp_setsockopt() from userspace. 1286 * 1287 * We don't use copy_from_user() for optimization: we first do the 1288 * sanity checks (buffer size -fast- and access check-healthy 1289 * pointer); if all of those succeed, then we can alloc the memory 1290 * (expensive operation) needed to copy the data to kernel. Then we do 1291 * the copying without checking the user space area 1292 * (__copy_from_user()). 1293 * 1294 * On exit there is no need to do sockfd_put(), sys_setsockopt() does 1295 * it. 1296 * 1297 * sk The sk of the socket 1298 * addrs The pointer to the addresses in user land 1299 * addrssize Size of the addrs buffer 1300 * 1301 * Returns >=0 if ok, <0 errno code on error. 1302 */ 1303 static int __sctp_setsockopt_connectx(struct sock* sk, 1304 struct sockaddr __user *addrs, 1305 int addrs_size, 1306 sctp_assoc_t *assoc_id) 1307 { 1308 int err = 0; 1309 struct sockaddr *kaddrs; 1310 1311 pr_debug("%s: sk:%p addrs:%p addrs_size:%d\n", 1312 __func__, sk, addrs, addrs_size); 1313 1314 if (unlikely(addrs_size <= 0)) 1315 return -EINVAL; 1316 1317 /* Check the user passed a healthy pointer. */ 1318 if (unlikely(!access_ok(VERIFY_READ, addrs, addrs_size))) 1319 return -EFAULT; 1320 1321 /* Alloc space for the address array in kernel memory. */ 1322 kaddrs = kmalloc(addrs_size, GFP_KERNEL); 1323 if (unlikely(!kaddrs)) 1324 return -ENOMEM; 1325 1326 if (__copy_from_user(kaddrs, addrs, addrs_size)) { 1327 err = -EFAULT; 1328 } else { 1329 err = __sctp_connect(sk, kaddrs, addrs_size, assoc_id); 1330 } 1331 1332 kfree(kaddrs); 1333 1334 return err; 1335 } 1336 1337 /* 1338 * This is an older interface. It's kept for backward compatibility 1339 * to the option that doesn't provide association id. 1340 */ 1341 static int sctp_setsockopt_connectx_old(struct sock* sk, 1342 struct sockaddr __user *addrs, 1343 int addrs_size) 1344 { 1345 return __sctp_setsockopt_connectx(sk, addrs, addrs_size, NULL); 1346 } 1347 1348 /* 1349 * New interface for the API. The since the API is done with a socket 1350 * option, to make it simple we feed back the association id is as a return 1351 * indication to the call. Error is always negative and association id is 1352 * always positive. 1353 */ 1354 static int sctp_setsockopt_connectx(struct sock* sk, 1355 struct sockaddr __user *addrs, 1356 int addrs_size) 1357 { 1358 sctp_assoc_t assoc_id = 0; 1359 int err = 0; 1360 1361 err = __sctp_setsockopt_connectx(sk, addrs, addrs_size, &assoc_id); 1362 1363 if (err) 1364 return err; 1365 else 1366 return assoc_id; 1367 } 1368 1369 /* 1370 * New (hopefully final) interface for the API. 1371 * We use the sctp_getaddrs_old structure so that use-space library 1372 * can avoid any unnecessary allocations. The only defferent part 1373 * is that we store the actual length of the address buffer into the 1374 * addrs_num structure member. That way we can re-use the existing 1375 * code. 1376 */ 1377 static int sctp_getsockopt_connectx3(struct sock* sk, int len, 1378 char __user *optval, 1379 int __user *optlen) 1380 { 1381 struct sctp_getaddrs_old param; 1382 sctp_assoc_t assoc_id = 0; 1383 int err = 0; 1384 1385 if (len < sizeof(param)) 1386 return -EINVAL; 1387 1388 if (copy_from_user(¶m, optval, sizeof(param))) 1389 return -EFAULT; 1390 1391 err = __sctp_setsockopt_connectx(sk, 1392 (struct sockaddr __user *)param.addrs, 1393 param.addr_num, &assoc_id); 1394 1395 if (err == 0 || err == -EINPROGRESS) { 1396 if (copy_to_user(optval, &assoc_id, sizeof(assoc_id))) 1397 return -EFAULT; 1398 if (put_user(sizeof(assoc_id), optlen)) 1399 return -EFAULT; 1400 } 1401 1402 return err; 1403 } 1404 1405 /* API 3.1.4 close() - UDP Style Syntax 1406 * Applications use close() to perform graceful shutdown (as described in 1407 * Section 10.1 of [SCTP]) on ALL the associations currently represented 1408 * by a UDP-style socket. 1409 * 1410 * The syntax is 1411 * 1412 * ret = close(int sd); 1413 * 1414 * sd - the socket descriptor of the associations to be closed. 1415 * 1416 * To gracefully shutdown a specific association represented by the 1417 * UDP-style socket, an application should use the sendmsg() call, 1418 * passing no user data, but including the appropriate flag in the 1419 * ancillary data (see Section xxxx). 1420 * 1421 * If sd in the close() call is a branched-off socket representing only 1422 * one association, the shutdown is performed on that association only. 1423 * 1424 * 4.1.6 close() - TCP Style Syntax 1425 * 1426 * Applications use close() to gracefully close down an association. 1427 * 1428 * The syntax is: 1429 * 1430 * int close(int sd); 1431 * 1432 * sd - the socket descriptor of the association to be closed. 1433 * 1434 * After an application calls close() on a socket descriptor, no further 1435 * socket operations will succeed on that descriptor. 1436 * 1437 * API 7.1.4 SO_LINGER 1438 * 1439 * An application using the TCP-style socket can use this option to 1440 * perform the SCTP ABORT primitive. The linger option structure is: 1441 * 1442 * struct linger { 1443 * int l_onoff; // option on/off 1444 * int l_linger; // linger time 1445 * }; 1446 * 1447 * To enable the option, set l_onoff to 1. If the l_linger value is set 1448 * to 0, calling close() is the same as the ABORT primitive. If the 1449 * value is set to a negative value, the setsockopt() call will return 1450 * an error. If the value is set to a positive value linger_time, the 1451 * close() can be blocked for at most linger_time ms. If the graceful 1452 * shutdown phase does not finish during this period, close() will 1453 * return but the graceful shutdown phase continues in the system. 1454 */ 1455 static void sctp_close(struct sock *sk, long timeout) 1456 { 1457 struct net *net = sock_net(sk); 1458 struct sctp_endpoint *ep; 1459 struct sctp_association *asoc; 1460 struct list_head *pos, *temp; 1461 unsigned int data_was_unread; 1462 1463 pr_debug("%s: sk:%p, timeout:%ld\n", __func__, sk, timeout); 1464 1465 sctp_lock_sock(sk); 1466 sk->sk_shutdown = SHUTDOWN_MASK; 1467 sk->sk_state = SCTP_SS_CLOSING; 1468 1469 ep = sctp_sk(sk)->ep; 1470 1471 /* Clean up any skbs sitting on the receive queue. */ 1472 data_was_unread = sctp_queue_purge_ulpevents(&sk->sk_receive_queue); 1473 data_was_unread += sctp_queue_purge_ulpevents(&sctp_sk(sk)->pd_lobby); 1474 1475 /* Walk all associations on an endpoint. */ 1476 list_for_each_safe(pos, temp, &ep->asocs) { 1477 asoc = list_entry(pos, struct sctp_association, asocs); 1478 1479 if (sctp_style(sk, TCP)) { 1480 /* A closed association can still be in the list if 1481 * it belongs to a TCP-style listening socket that is 1482 * not yet accepted. If so, free it. If not, send an 1483 * ABORT or SHUTDOWN based on the linger options. 1484 */ 1485 if (sctp_state(asoc, CLOSED)) { 1486 sctp_unhash_established(asoc); 1487 sctp_association_free(asoc); 1488 continue; 1489 } 1490 } 1491 1492 if (data_was_unread || !skb_queue_empty(&asoc->ulpq.lobby) || 1493 !skb_queue_empty(&asoc->ulpq.reasm) || 1494 (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime)) { 1495 struct sctp_chunk *chunk; 1496 1497 chunk = sctp_make_abort_user(asoc, NULL, 0); 1498 if (chunk) 1499 sctp_primitive_ABORT(net, asoc, chunk); 1500 } else 1501 sctp_primitive_SHUTDOWN(net, asoc, NULL); 1502 } 1503 1504 /* On a TCP-style socket, block for at most linger_time if set. */ 1505 if (sctp_style(sk, TCP) && timeout) 1506 sctp_wait_for_close(sk, timeout); 1507 1508 /* This will run the backlog queue. */ 1509 sctp_release_sock(sk); 1510 1511 /* Supposedly, no process has access to the socket, but 1512 * the net layers still may. 1513 */ 1514 sctp_local_bh_disable(); 1515 sctp_bh_lock_sock(sk); 1516 1517 /* Hold the sock, since sk_common_release() will put sock_put() 1518 * and we have just a little more cleanup. 1519 */ 1520 sock_hold(sk); 1521 sk_common_release(sk); 1522 1523 sctp_bh_unlock_sock(sk); 1524 sctp_local_bh_enable(); 1525 1526 sock_put(sk); 1527 1528 SCTP_DBG_OBJCNT_DEC(sock); 1529 } 1530 1531 /* Handle EPIPE error. */ 1532 static int sctp_error(struct sock *sk, int flags, int err) 1533 { 1534 if (err == -EPIPE) 1535 err = sock_error(sk) ? : -EPIPE; 1536 if (err == -EPIPE && !(flags & MSG_NOSIGNAL)) 1537 send_sig(SIGPIPE, current, 0); 1538 return err; 1539 } 1540 1541 /* API 3.1.3 sendmsg() - UDP Style Syntax 1542 * 1543 * An application uses sendmsg() and recvmsg() calls to transmit data to 1544 * and receive data from its peer. 1545 * 1546 * ssize_t sendmsg(int socket, const struct msghdr *message, 1547 * int flags); 1548 * 1549 * socket - the socket descriptor of the endpoint. 1550 * message - pointer to the msghdr structure which contains a single 1551 * user message and possibly some ancillary data. 1552 * 1553 * See Section 5 for complete description of the data 1554 * structures. 1555 * 1556 * flags - flags sent or received with the user message, see Section 1557 * 5 for complete description of the flags. 1558 * 1559 * Note: This function could use a rewrite especially when explicit 1560 * connect support comes in. 1561 */ 1562 /* BUG: We do not implement the equivalent of sk_stream_wait_memory(). */ 1563 1564 static int sctp_msghdr_parse(const struct msghdr *, sctp_cmsgs_t *); 1565 1566 static int sctp_sendmsg(struct kiocb *iocb, struct sock *sk, 1567 struct msghdr *msg, size_t msg_len) 1568 { 1569 struct net *net = sock_net(sk); 1570 struct sctp_sock *sp; 1571 struct sctp_endpoint *ep; 1572 struct sctp_association *new_asoc=NULL, *asoc=NULL; 1573 struct sctp_transport *transport, *chunk_tp; 1574 struct sctp_chunk *chunk; 1575 union sctp_addr to; 1576 struct sockaddr *msg_name = NULL; 1577 struct sctp_sndrcvinfo default_sinfo; 1578 struct sctp_sndrcvinfo *sinfo; 1579 struct sctp_initmsg *sinit; 1580 sctp_assoc_t associd = 0; 1581 sctp_cmsgs_t cmsgs = { NULL }; 1582 int err; 1583 sctp_scope_t scope; 1584 long timeo; 1585 __u16 sinfo_flags = 0; 1586 struct sctp_datamsg *datamsg; 1587 int msg_flags = msg->msg_flags; 1588 1589 err = 0; 1590 sp = sctp_sk(sk); 1591 ep = sp->ep; 1592 1593 pr_debug("%s: sk:%p, msg:%p, msg_len:%zu ep:%p\n", __func__, sk, 1594 msg, msg_len, ep); 1595 1596 /* We cannot send a message over a TCP-style listening socket. */ 1597 if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)) { 1598 err = -EPIPE; 1599 goto out_nounlock; 1600 } 1601 1602 /* Parse out the SCTP CMSGs. */ 1603 err = sctp_msghdr_parse(msg, &cmsgs); 1604 if (err) { 1605 pr_debug("%s: msghdr parse err:%x\n", __func__, err); 1606 goto out_nounlock; 1607 } 1608 1609 /* Fetch the destination address for this packet. This 1610 * address only selects the association--it is not necessarily 1611 * the address we will send to. 1612 * For a peeled-off socket, msg_name is ignored. 1613 */ 1614 if (!sctp_style(sk, UDP_HIGH_BANDWIDTH) && msg->msg_name) { 1615 int msg_namelen = msg->msg_namelen; 1616 1617 err = sctp_verify_addr(sk, (union sctp_addr *)msg->msg_name, 1618 msg_namelen); 1619 if (err) 1620 return err; 1621 1622 if (msg_namelen > sizeof(to)) 1623 msg_namelen = sizeof(to); 1624 memcpy(&to, msg->msg_name, msg_namelen); 1625 msg_name = msg->msg_name; 1626 } 1627 1628 sinfo = cmsgs.info; 1629 sinit = cmsgs.init; 1630 1631 /* Did the user specify SNDRCVINFO? */ 1632 if (sinfo) { 1633 sinfo_flags = sinfo->sinfo_flags; 1634 associd = sinfo->sinfo_assoc_id; 1635 } 1636 1637 pr_debug("%s: msg_len:%zu, sinfo_flags:0x%x\n", __func__, 1638 msg_len, sinfo_flags); 1639 1640 /* SCTP_EOF or SCTP_ABORT cannot be set on a TCP-style socket. */ 1641 if (sctp_style(sk, TCP) && (sinfo_flags & (SCTP_EOF | SCTP_ABORT))) { 1642 err = -EINVAL; 1643 goto out_nounlock; 1644 } 1645 1646 /* If SCTP_EOF is set, no data can be sent. Disallow sending zero 1647 * length messages when SCTP_EOF|SCTP_ABORT is not set. 1648 * If SCTP_ABORT is set, the message length could be non zero with 1649 * the msg_iov set to the user abort reason. 1650 */ 1651 if (((sinfo_flags & SCTP_EOF) && (msg_len > 0)) || 1652 (!(sinfo_flags & (SCTP_EOF|SCTP_ABORT)) && (msg_len == 0))) { 1653 err = -EINVAL; 1654 goto out_nounlock; 1655 } 1656 1657 /* If SCTP_ADDR_OVER is set, there must be an address 1658 * specified in msg_name. 1659 */ 1660 if ((sinfo_flags & SCTP_ADDR_OVER) && (!msg->msg_name)) { 1661 err = -EINVAL; 1662 goto out_nounlock; 1663 } 1664 1665 transport = NULL; 1666 1667 pr_debug("%s: about to look up association\n", __func__); 1668 1669 sctp_lock_sock(sk); 1670 1671 /* If a msg_name has been specified, assume this is to be used. */ 1672 if (msg_name) { 1673 /* Look for a matching association on the endpoint. */ 1674 asoc = sctp_endpoint_lookup_assoc(ep, &to, &transport); 1675 if (!asoc) { 1676 /* If we could not find a matching association on the 1677 * endpoint, make sure that it is not a TCP-style 1678 * socket that already has an association or there is 1679 * no peeled-off association on another socket. 1680 */ 1681 if ((sctp_style(sk, TCP) && 1682 sctp_sstate(sk, ESTABLISHED)) || 1683 sctp_endpoint_is_peeled_off(ep, &to)) { 1684 err = -EADDRNOTAVAIL; 1685 goto out_unlock; 1686 } 1687 } 1688 } else { 1689 asoc = sctp_id2assoc(sk, associd); 1690 if (!asoc) { 1691 err = -EPIPE; 1692 goto out_unlock; 1693 } 1694 } 1695 1696 if (asoc) { 1697 pr_debug("%s: just looked up association:%p\n", __func__, asoc); 1698 1699 /* We cannot send a message on a TCP-style SCTP_SS_ESTABLISHED 1700 * socket that has an association in CLOSED state. This can 1701 * happen when an accepted socket has an association that is 1702 * already CLOSED. 1703 */ 1704 if (sctp_state(asoc, CLOSED) && sctp_style(sk, TCP)) { 1705 err = -EPIPE; 1706 goto out_unlock; 1707 } 1708 1709 if (sinfo_flags & SCTP_EOF) { 1710 pr_debug("%s: shutting down association:%p\n", 1711 __func__, asoc); 1712 1713 sctp_primitive_SHUTDOWN(net, asoc, NULL); 1714 err = 0; 1715 goto out_unlock; 1716 } 1717 if (sinfo_flags & SCTP_ABORT) { 1718 1719 chunk = sctp_make_abort_user(asoc, msg, msg_len); 1720 if (!chunk) { 1721 err = -ENOMEM; 1722 goto out_unlock; 1723 } 1724 1725 pr_debug("%s: aborting association:%p\n", 1726 __func__, asoc); 1727 1728 sctp_primitive_ABORT(net, asoc, chunk); 1729 err = 0; 1730 goto out_unlock; 1731 } 1732 } 1733 1734 /* Do we need to create the association? */ 1735 if (!asoc) { 1736 pr_debug("%s: there is no association yet\n", __func__); 1737 1738 if (sinfo_flags & (SCTP_EOF | SCTP_ABORT)) { 1739 err = -EINVAL; 1740 goto out_unlock; 1741 } 1742 1743 /* Check for invalid stream against the stream counts, 1744 * either the default or the user specified stream counts. 1745 */ 1746 if (sinfo) { 1747 if (!sinit || (sinit && !sinit->sinit_num_ostreams)) { 1748 /* Check against the defaults. */ 1749 if (sinfo->sinfo_stream >= 1750 sp->initmsg.sinit_num_ostreams) { 1751 err = -EINVAL; 1752 goto out_unlock; 1753 } 1754 } else { 1755 /* Check against the requested. */ 1756 if (sinfo->sinfo_stream >= 1757 sinit->sinit_num_ostreams) { 1758 err = -EINVAL; 1759 goto out_unlock; 1760 } 1761 } 1762 } 1763 1764 /* 1765 * API 3.1.2 bind() - UDP Style Syntax 1766 * If a bind() or sctp_bindx() is not called prior to a 1767 * sendmsg() call that initiates a new association, the 1768 * system picks an ephemeral port and will choose an address 1769 * set equivalent to binding with a wildcard address. 1770 */ 1771 if (!ep->base.bind_addr.port) { 1772 if (sctp_autobind(sk)) { 1773 err = -EAGAIN; 1774 goto out_unlock; 1775 } 1776 } else { 1777 /* 1778 * If an unprivileged user inherits a one-to-many 1779 * style socket with open associations on a privileged 1780 * port, it MAY be permitted to accept new associations, 1781 * but it SHOULD NOT be permitted to open new 1782 * associations. 1783 */ 1784 if (ep->base.bind_addr.port < PROT_SOCK && 1785 !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) { 1786 err = -EACCES; 1787 goto out_unlock; 1788 } 1789 } 1790 1791 scope = sctp_scope(&to); 1792 new_asoc = sctp_association_new(ep, sk, scope, GFP_KERNEL); 1793 if (!new_asoc) { 1794 err = -ENOMEM; 1795 goto out_unlock; 1796 } 1797 asoc = new_asoc; 1798 err = sctp_assoc_set_bind_addr_from_ep(asoc, scope, GFP_KERNEL); 1799 if (err < 0) { 1800 err = -ENOMEM; 1801 goto out_free; 1802 } 1803 1804 /* If the SCTP_INIT ancillary data is specified, set all 1805 * the association init values accordingly. 1806 */ 1807 if (sinit) { 1808 if (sinit->sinit_num_ostreams) { 1809 asoc->c.sinit_num_ostreams = 1810 sinit->sinit_num_ostreams; 1811 } 1812 if (sinit->sinit_max_instreams) { 1813 asoc->c.sinit_max_instreams = 1814 sinit->sinit_max_instreams; 1815 } 1816 if (sinit->sinit_max_attempts) { 1817 asoc->max_init_attempts 1818 = sinit->sinit_max_attempts; 1819 } 1820 if (sinit->sinit_max_init_timeo) { 1821 asoc->max_init_timeo = 1822 msecs_to_jiffies(sinit->sinit_max_init_timeo); 1823 } 1824 } 1825 1826 /* Prime the peer's transport structures. */ 1827 transport = sctp_assoc_add_peer(asoc, &to, GFP_KERNEL, SCTP_UNKNOWN); 1828 if (!transport) { 1829 err = -ENOMEM; 1830 goto out_free; 1831 } 1832 } 1833 1834 /* ASSERT: we have a valid association at this point. */ 1835 pr_debug("%s: we have a valid association\n", __func__); 1836 1837 if (!sinfo) { 1838 /* If the user didn't specify SNDRCVINFO, make up one with 1839 * some defaults. 1840 */ 1841 memset(&default_sinfo, 0, sizeof(default_sinfo)); 1842 default_sinfo.sinfo_stream = asoc->default_stream; 1843 default_sinfo.sinfo_flags = asoc->default_flags; 1844 default_sinfo.sinfo_ppid = asoc->default_ppid; 1845 default_sinfo.sinfo_context = asoc->default_context; 1846 default_sinfo.sinfo_timetolive = asoc->default_timetolive; 1847 default_sinfo.sinfo_assoc_id = sctp_assoc2id(asoc); 1848 sinfo = &default_sinfo; 1849 } 1850 1851 /* API 7.1.7, the sndbuf size per association bounds the 1852 * maximum size of data that can be sent in a single send call. 1853 */ 1854 if (msg_len > sk->sk_sndbuf) { 1855 err = -EMSGSIZE; 1856 goto out_free; 1857 } 1858 1859 if (asoc->pmtu_pending) 1860 sctp_assoc_pending_pmtu(sk, asoc); 1861 1862 /* If fragmentation is disabled and the message length exceeds the 1863 * association fragmentation point, return EMSGSIZE. The I-D 1864 * does not specify what this error is, but this looks like 1865 * a great fit. 1866 */ 1867 if (sctp_sk(sk)->disable_fragments && (msg_len > asoc->frag_point)) { 1868 err = -EMSGSIZE; 1869 goto out_free; 1870 } 1871 1872 /* Check for invalid stream. */ 1873 if (sinfo->sinfo_stream >= asoc->c.sinit_num_ostreams) { 1874 err = -EINVAL; 1875 goto out_free; 1876 } 1877 1878 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); 1879 if (!sctp_wspace(asoc)) { 1880 err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len); 1881 if (err) 1882 goto out_free; 1883 } 1884 1885 /* If an address is passed with the sendto/sendmsg call, it is used 1886 * to override the primary destination address in the TCP model, or 1887 * when SCTP_ADDR_OVER flag is set in the UDP model. 1888 */ 1889 if ((sctp_style(sk, TCP) && msg_name) || 1890 (sinfo_flags & SCTP_ADDR_OVER)) { 1891 chunk_tp = sctp_assoc_lookup_paddr(asoc, &to); 1892 if (!chunk_tp) { 1893 err = -EINVAL; 1894 goto out_free; 1895 } 1896 } else 1897 chunk_tp = NULL; 1898 1899 /* Auto-connect, if we aren't connected already. */ 1900 if (sctp_state(asoc, CLOSED)) { 1901 err = sctp_primitive_ASSOCIATE(net, asoc, NULL); 1902 if (err < 0) 1903 goto out_free; 1904 1905 pr_debug("%s: we associated primitively\n", __func__); 1906 } 1907 1908 /* Break the message into multiple chunks of maximum size. */ 1909 datamsg = sctp_datamsg_from_user(asoc, sinfo, msg, msg_len); 1910 if (IS_ERR(datamsg)) { 1911 err = PTR_ERR(datamsg); 1912 goto out_free; 1913 } 1914 1915 /* Now send the (possibly) fragmented message. */ 1916 list_for_each_entry(chunk, &datamsg->chunks, frag_list) { 1917 sctp_chunk_hold(chunk); 1918 1919 /* Do accounting for the write space. */ 1920 sctp_set_owner_w(chunk); 1921 1922 chunk->transport = chunk_tp; 1923 } 1924 1925 /* Send it to the lower layers. Note: all chunks 1926 * must either fail or succeed. The lower layer 1927 * works that way today. Keep it that way or this 1928 * breaks. 1929 */ 1930 err = sctp_primitive_SEND(net, asoc, datamsg); 1931 /* Did the lower layer accept the chunk? */ 1932 if (err) { 1933 sctp_datamsg_free(datamsg); 1934 goto out_free; 1935 } 1936 1937 pr_debug("%s: we sent primitively\n", __func__); 1938 1939 sctp_datamsg_put(datamsg); 1940 err = msg_len; 1941 1942 /* If we are already past ASSOCIATE, the lower 1943 * layers are responsible for association cleanup. 1944 */ 1945 goto out_unlock; 1946 1947 out_free: 1948 if (new_asoc) { 1949 sctp_unhash_established(asoc); 1950 sctp_association_free(asoc); 1951 } 1952 out_unlock: 1953 sctp_release_sock(sk); 1954 1955 out_nounlock: 1956 return sctp_error(sk, msg_flags, err); 1957 1958 #if 0 1959 do_sock_err: 1960 if (msg_len) 1961 err = msg_len; 1962 else 1963 err = sock_error(sk); 1964 goto out; 1965 1966 do_interrupted: 1967 if (msg_len) 1968 err = msg_len; 1969 goto out; 1970 #endif /* 0 */ 1971 } 1972 1973 /* This is an extended version of skb_pull() that removes the data from the 1974 * start of a skb even when data is spread across the list of skb's in the 1975 * frag_list. len specifies the total amount of data that needs to be removed. 1976 * when 'len' bytes could be removed from the skb, it returns 0. 1977 * If 'len' exceeds the total skb length, it returns the no. of bytes that 1978 * could not be removed. 1979 */ 1980 static int sctp_skb_pull(struct sk_buff *skb, int len) 1981 { 1982 struct sk_buff *list; 1983 int skb_len = skb_headlen(skb); 1984 int rlen; 1985 1986 if (len <= skb_len) { 1987 __skb_pull(skb, len); 1988 return 0; 1989 } 1990 len -= skb_len; 1991 __skb_pull(skb, skb_len); 1992 1993 skb_walk_frags(skb, list) { 1994 rlen = sctp_skb_pull(list, len); 1995 skb->len -= (len-rlen); 1996 skb->data_len -= (len-rlen); 1997 1998 if (!rlen) 1999 return 0; 2000 2001 len = rlen; 2002 } 2003 2004 return len; 2005 } 2006 2007 /* API 3.1.3 recvmsg() - UDP Style Syntax 2008 * 2009 * ssize_t recvmsg(int socket, struct msghdr *message, 2010 * int flags); 2011 * 2012 * socket - the socket descriptor of the endpoint. 2013 * message - pointer to the msghdr structure which contains a single 2014 * user message and possibly some ancillary data. 2015 * 2016 * See Section 5 for complete description of the data 2017 * structures. 2018 * 2019 * flags - flags sent or received with the user message, see Section 2020 * 5 for complete description of the flags. 2021 */ 2022 static struct sk_buff *sctp_skb_recv_datagram(struct sock *, int, int, int *); 2023 2024 static int sctp_recvmsg(struct kiocb *iocb, struct sock *sk, 2025 struct msghdr *msg, size_t len, int noblock, 2026 int flags, int *addr_len) 2027 { 2028 struct sctp_ulpevent *event = NULL; 2029 struct sctp_sock *sp = sctp_sk(sk); 2030 struct sk_buff *skb; 2031 int copied; 2032 int err = 0; 2033 int skb_len; 2034 2035 pr_debug("%s: sk:%p, msghdr:%p, len:%zd, noblock:%d, flags:0x%x, " 2036 "addr_len:%p)\n", __func__, sk, msg, len, noblock, flags, 2037 addr_len); 2038 2039 sctp_lock_sock(sk); 2040 2041 if (sctp_style(sk, TCP) && !sctp_sstate(sk, ESTABLISHED)) { 2042 err = -ENOTCONN; 2043 goto out; 2044 } 2045 2046 skb = sctp_skb_recv_datagram(sk, flags, noblock, &err); 2047 if (!skb) 2048 goto out; 2049 2050 /* Get the total length of the skb including any skb's in the 2051 * frag_list. 2052 */ 2053 skb_len = skb->len; 2054 2055 copied = skb_len; 2056 if (copied > len) 2057 copied = len; 2058 2059 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); 2060 2061 event = sctp_skb2event(skb); 2062 2063 if (err) 2064 goto out_free; 2065 2066 sock_recv_ts_and_drops(msg, sk, skb); 2067 if (sctp_ulpevent_is_notification(event)) { 2068 msg->msg_flags |= MSG_NOTIFICATION; 2069 sp->pf->event_msgname(event, msg->msg_name, addr_len); 2070 } else { 2071 sp->pf->skb_msgname(skb, msg->msg_name, addr_len); 2072 } 2073 2074 /* Check if we allow SCTP_SNDRCVINFO. */ 2075 if (sp->subscribe.sctp_data_io_event) 2076 sctp_ulpevent_read_sndrcvinfo(event, msg); 2077 #if 0 2078 /* FIXME: we should be calling IP/IPv6 layers. */ 2079 if (sk->sk_protinfo.af_inet.cmsg_flags) 2080 ip_cmsg_recv(msg, skb); 2081 #endif 2082 2083 err = copied; 2084 2085 /* If skb's length exceeds the user's buffer, update the skb and 2086 * push it back to the receive_queue so that the next call to 2087 * recvmsg() will return the remaining data. Don't set MSG_EOR. 2088 */ 2089 if (skb_len > copied) { 2090 msg->msg_flags &= ~MSG_EOR; 2091 if (flags & MSG_PEEK) 2092 goto out_free; 2093 sctp_skb_pull(skb, copied); 2094 skb_queue_head(&sk->sk_receive_queue, skb); 2095 2096 /* When only partial message is copied to the user, increase 2097 * rwnd by that amount. If all the data in the skb is read, 2098 * rwnd is updated when the event is freed. 2099 */ 2100 if (!sctp_ulpevent_is_notification(event)) 2101 sctp_assoc_rwnd_increase(event->asoc, copied); 2102 goto out; 2103 } else if ((event->msg_flags & MSG_NOTIFICATION) || 2104 (event->msg_flags & MSG_EOR)) 2105 msg->msg_flags |= MSG_EOR; 2106 else 2107 msg->msg_flags &= ~MSG_EOR; 2108 2109 out_free: 2110 if (flags & MSG_PEEK) { 2111 /* Release the skb reference acquired after peeking the skb in 2112 * sctp_skb_recv_datagram(). 2113 */ 2114 kfree_skb(skb); 2115 } else { 2116 /* Free the event which includes releasing the reference to 2117 * the owner of the skb, freeing the skb and updating the 2118 * rwnd. 2119 */ 2120 sctp_ulpevent_free(event); 2121 } 2122 out: 2123 sctp_release_sock(sk); 2124 return err; 2125 } 2126 2127 /* 7.1.12 Enable/Disable message fragmentation (SCTP_DISABLE_FRAGMENTS) 2128 * 2129 * This option is a on/off flag. If enabled no SCTP message 2130 * fragmentation will be performed. Instead if a message being sent 2131 * exceeds the current PMTU size, the message will NOT be sent and 2132 * instead a error will be indicated to the user. 2133 */ 2134 static int sctp_setsockopt_disable_fragments(struct sock *sk, 2135 char __user *optval, 2136 unsigned int optlen) 2137 { 2138 int val; 2139 2140 if (optlen < sizeof(int)) 2141 return -EINVAL; 2142 2143 if (get_user(val, (int __user *)optval)) 2144 return -EFAULT; 2145 2146 sctp_sk(sk)->disable_fragments = (val == 0) ? 0 : 1; 2147 2148 return 0; 2149 } 2150 2151 static int sctp_setsockopt_events(struct sock *sk, char __user *optval, 2152 unsigned int optlen) 2153 { 2154 struct sctp_association *asoc; 2155 struct sctp_ulpevent *event; 2156 2157 if (optlen > sizeof(struct sctp_event_subscribe)) 2158 return -EINVAL; 2159 if (copy_from_user(&sctp_sk(sk)->subscribe, optval, optlen)) 2160 return -EFAULT; 2161 2162 /* 2163 * At the time when a user app subscribes to SCTP_SENDER_DRY_EVENT, 2164 * if there is no data to be sent or retransmit, the stack will 2165 * immediately send up this notification. 2166 */ 2167 if (sctp_ulpevent_type_enabled(SCTP_SENDER_DRY_EVENT, 2168 &sctp_sk(sk)->subscribe)) { 2169 asoc = sctp_id2assoc(sk, 0); 2170 2171 if (asoc && sctp_outq_is_empty(&asoc->outqueue)) { 2172 event = sctp_ulpevent_make_sender_dry_event(asoc, 2173 GFP_ATOMIC); 2174 if (!event) 2175 return -ENOMEM; 2176 2177 sctp_ulpq_tail_event(&asoc->ulpq, event); 2178 } 2179 } 2180 2181 return 0; 2182 } 2183 2184 /* 7.1.8 Automatic Close of associations (SCTP_AUTOCLOSE) 2185 * 2186 * This socket option is applicable to the UDP-style socket only. When 2187 * set it will cause associations that are idle for more than the 2188 * specified number of seconds to automatically close. An association 2189 * being idle is defined an association that has NOT sent or received 2190 * user data. The special value of '0' indicates that no automatic 2191 * close of any associations should be performed. The option expects an 2192 * integer defining the number of seconds of idle time before an 2193 * association is closed. 2194 */ 2195 static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval, 2196 unsigned int optlen) 2197 { 2198 struct sctp_sock *sp = sctp_sk(sk); 2199 struct net *net = sock_net(sk); 2200 2201 /* Applicable to UDP-style socket only */ 2202 if (sctp_style(sk, TCP)) 2203 return -EOPNOTSUPP; 2204 if (optlen != sizeof(int)) 2205 return -EINVAL; 2206 if (copy_from_user(&sp->autoclose, optval, optlen)) 2207 return -EFAULT; 2208 2209 if (sp->autoclose > net->sctp.max_autoclose) 2210 sp->autoclose = net->sctp.max_autoclose; 2211 2212 return 0; 2213 } 2214 2215 /* 7.1.13 Peer Address Parameters (SCTP_PEER_ADDR_PARAMS) 2216 * 2217 * Applications can enable or disable heartbeats for any peer address of 2218 * an association, modify an address's heartbeat interval, force a 2219 * heartbeat to be sent immediately, and adjust the address's maximum 2220 * number of retransmissions sent before an address is considered 2221 * unreachable. The following structure is used to access and modify an 2222 * address's parameters: 2223 * 2224 * struct sctp_paddrparams { 2225 * sctp_assoc_t spp_assoc_id; 2226 * struct sockaddr_storage spp_address; 2227 * uint32_t spp_hbinterval; 2228 * uint16_t spp_pathmaxrxt; 2229 * uint32_t spp_pathmtu; 2230 * uint32_t spp_sackdelay; 2231 * uint32_t spp_flags; 2232 * }; 2233 * 2234 * spp_assoc_id - (one-to-many style socket) This is filled in the 2235 * application, and identifies the association for 2236 * this query. 2237 * spp_address - This specifies which address is of interest. 2238 * spp_hbinterval - This contains the value of the heartbeat interval, 2239 * in milliseconds. If a value of zero 2240 * is present in this field then no changes are to 2241 * be made to this parameter. 2242 * spp_pathmaxrxt - This contains the maximum number of 2243 * retransmissions before this address shall be 2244 * considered unreachable. If a value of zero 2245 * is present in this field then no changes are to 2246 * be made to this parameter. 2247 * spp_pathmtu - When Path MTU discovery is disabled the value 2248 * specified here will be the "fixed" path mtu. 2249 * Note that if the spp_address field is empty 2250 * then all associations on this address will 2251 * have this fixed path mtu set upon them. 2252 * 2253 * spp_sackdelay - When delayed sack is enabled, this value specifies 2254 * the number of milliseconds that sacks will be delayed 2255 * for. This value will apply to all addresses of an 2256 * association if the spp_address field is empty. Note 2257 * also, that if delayed sack is enabled and this 2258 * value is set to 0, no change is made to the last 2259 * recorded delayed sack timer value. 2260 * 2261 * spp_flags - These flags are used to control various features 2262 * on an association. The flag field may contain 2263 * zero or more of the following options. 2264 * 2265 * SPP_HB_ENABLE - Enable heartbeats on the 2266 * specified address. Note that if the address 2267 * field is empty all addresses for the association 2268 * have heartbeats enabled upon them. 2269 * 2270 * SPP_HB_DISABLE - Disable heartbeats on the 2271 * speicifed address. Note that if the address 2272 * field is empty all addresses for the association 2273 * will have their heartbeats disabled. Note also 2274 * that SPP_HB_ENABLE and SPP_HB_DISABLE are 2275 * mutually exclusive, only one of these two should 2276 * be specified. Enabling both fields will have 2277 * undetermined results. 2278 * 2279 * SPP_HB_DEMAND - Request a user initiated heartbeat 2280 * to be made immediately. 2281 * 2282 * SPP_HB_TIME_IS_ZERO - Specify's that the time for 2283 * heartbeat delayis to be set to the value of 0 2284 * milliseconds. 2285 * 2286 * SPP_PMTUD_ENABLE - This field will enable PMTU 2287 * discovery upon the specified address. Note that 2288 * if the address feild is empty then all addresses 2289 * on the association are effected. 2290 * 2291 * SPP_PMTUD_DISABLE - This field will disable PMTU 2292 * discovery upon the specified address. Note that 2293 * if the address feild is empty then all addresses 2294 * on the association are effected. Not also that 2295 * SPP_PMTUD_ENABLE and SPP_PMTUD_DISABLE are mutually 2296 * exclusive. Enabling both will have undetermined 2297 * results. 2298 * 2299 * SPP_SACKDELAY_ENABLE - Setting this flag turns 2300 * on delayed sack. The time specified in spp_sackdelay 2301 * is used to specify the sack delay for this address. Note 2302 * that if spp_address is empty then all addresses will 2303 * enable delayed sack and take on the sack delay 2304 * value specified in spp_sackdelay. 2305 * SPP_SACKDELAY_DISABLE - Setting this flag turns 2306 * off delayed sack. If the spp_address field is blank then 2307 * delayed sack is disabled for the entire association. Note 2308 * also that this field is mutually exclusive to 2309 * SPP_SACKDELAY_ENABLE, setting both will have undefined 2310 * results. 2311 */ 2312 static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params, 2313 struct sctp_transport *trans, 2314 struct sctp_association *asoc, 2315 struct sctp_sock *sp, 2316 int hb_change, 2317 int pmtud_change, 2318 int sackdelay_change) 2319 { 2320 int error; 2321 2322 if (params->spp_flags & SPP_HB_DEMAND && trans) { 2323 struct net *net = sock_net(trans->asoc->base.sk); 2324 2325 error = sctp_primitive_REQUESTHEARTBEAT(net, trans->asoc, trans); 2326 if (error) 2327 return error; 2328 } 2329 2330 /* Note that unless the spp_flag is set to SPP_HB_ENABLE the value of 2331 * this field is ignored. Note also that a value of zero indicates 2332 * the current setting should be left unchanged. 2333 */ 2334 if (params->spp_flags & SPP_HB_ENABLE) { 2335 2336 /* Re-zero the interval if the SPP_HB_TIME_IS_ZERO is 2337 * set. This lets us use 0 value when this flag 2338 * is set. 2339 */ 2340 if (params->spp_flags & SPP_HB_TIME_IS_ZERO) 2341 params->spp_hbinterval = 0; 2342 2343 if (params->spp_hbinterval || 2344 (params->spp_flags & SPP_HB_TIME_IS_ZERO)) { 2345 if (trans) { 2346 trans->hbinterval = 2347 msecs_to_jiffies(params->spp_hbinterval); 2348 } else if (asoc) { 2349 asoc->hbinterval = 2350 msecs_to_jiffies(params->spp_hbinterval); 2351 } else { 2352 sp->hbinterval = params->spp_hbinterval; 2353 } 2354 } 2355 } 2356 2357 if (hb_change) { 2358 if (trans) { 2359 trans->param_flags = 2360 (trans->param_flags & ~SPP_HB) | hb_change; 2361 } else if (asoc) { 2362 asoc->param_flags = 2363 (asoc->param_flags & ~SPP_HB) | hb_change; 2364 } else { 2365 sp->param_flags = 2366 (sp->param_flags & ~SPP_HB) | hb_change; 2367 } 2368 } 2369 2370 /* When Path MTU discovery is disabled the value specified here will 2371 * be the "fixed" path mtu (i.e. the value of the spp_flags field must 2372 * include the flag SPP_PMTUD_DISABLE for this field to have any 2373 * effect). 2374 */ 2375 if ((params->spp_flags & SPP_PMTUD_DISABLE) && params->spp_pathmtu) { 2376 if (trans) { 2377 trans->pathmtu = params->spp_pathmtu; 2378 sctp_assoc_sync_pmtu(sctp_opt2sk(sp), asoc); 2379 } else if (asoc) { 2380 asoc->pathmtu = params->spp_pathmtu; 2381 sctp_frag_point(asoc, params->spp_pathmtu); 2382 } else { 2383 sp->pathmtu = params->spp_pathmtu; 2384 } 2385 } 2386 2387 if (pmtud_change) { 2388 if (trans) { 2389 int update = (trans->param_flags & SPP_PMTUD_DISABLE) && 2390 (params->spp_flags & SPP_PMTUD_ENABLE); 2391 trans->param_flags = 2392 (trans->param_flags & ~SPP_PMTUD) | pmtud_change; 2393 if (update) { 2394 sctp_transport_pmtu(trans, sctp_opt2sk(sp)); 2395 sctp_assoc_sync_pmtu(sctp_opt2sk(sp), asoc); 2396 } 2397 } else if (asoc) { 2398 asoc->param_flags = 2399 (asoc->param_flags & ~SPP_PMTUD) | pmtud_change; 2400 } else { 2401 sp->param_flags = 2402 (sp->param_flags & ~SPP_PMTUD) | pmtud_change; 2403 } 2404 } 2405 2406 /* Note that unless the spp_flag is set to SPP_SACKDELAY_ENABLE the 2407 * value of this field is ignored. Note also that a value of zero 2408 * indicates the current setting should be left unchanged. 2409 */ 2410 if ((params->spp_flags & SPP_SACKDELAY_ENABLE) && params->spp_sackdelay) { 2411 if (trans) { 2412 trans->sackdelay = 2413 msecs_to_jiffies(params->spp_sackdelay); 2414 } else if (asoc) { 2415 asoc->sackdelay = 2416 msecs_to_jiffies(params->spp_sackdelay); 2417 } else { 2418 sp->sackdelay = params->spp_sackdelay; 2419 } 2420 } 2421 2422 if (sackdelay_change) { 2423 if (trans) { 2424 trans->param_flags = 2425 (trans->param_flags & ~SPP_SACKDELAY) | 2426 sackdelay_change; 2427 } else if (asoc) { 2428 asoc->param_flags = 2429 (asoc->param_flags & ~SPP_SACKDELAY) | 2430 sackdelay_change; 2431 } else { 2432 sp->param_flags = 2433 (sp->param_flags & ~SPP_SACKDELAY) | 2434 sackdelay_change; 2435 } 2436 } 2437 2438 /* Note that a value of zero indicates the current setting should be 2439 left unchanged. 2440 */ 2441 if (params->spp_pathmaxrxt) { 2442 if (trans) { 2443 trans->pathmaxrxt = params->spp_pathmaxrxt; 2444 } else if (asoc) { 2445 asoc->pathmaxrxt = params->spp_pathmaxrxt; 2446 } else { 2447 sp->pathmaxrxt = params->spp_pathmaxrxt; 2448 } 2449 } 2450 2451 return 0; 2452 } 2453 2454 static int sctp_setsockopt_peer_addr_params(struct sock *sk, 2455 char __user *optval, 2456 unsigned int optlen) 2457 { 2458 struct sctp_paddrparams params; 2459 struct sctp_transport *trans = NULL; 2460 struct sctp_association *asoc = NULL; 2461 struct sctp_sock *sp = sctp_sk(sk); 2462 int error; 2463 int hb_change, pmtud_change, sackdelay_change; 2464 2465 if (optlen != sizeof(struct sctp_paddrparams)) 2466 return - EINVAL; 2467 2468 if (copy_from_user(¶ms, optval, optlen)) 2469 return -EFAULT; 2470 2471 /* Validate flags and value parameters. */ 2472 hb_change = params.spp_flags & SPP_HB; 2473 pmtud_change = params.spp_flags & SPP_PMTUD; 2474 sackdelay_change = params.spp_flags & SPP_SACKDELAY; 2475 2476 if (hb_change == SPP_HB || 2477 pmtud_change == SPP_PMTUD || 2478 sackdelay_change == SPP_SACKDELAY || 2479 params.spp_sackdelay > 500 || 2480 (params.spp_pathmtu && 2481 params.spp_pathmtu < SCTP_DEFAULT_MINSEGMENT)) 2482 return -EINVAL; 2483 2484 /* If an address other than INADDR_ANY is specified, and 2485 * no transport is found, then the request is invalid. 2486 */ 2487 if (!sctp_is_any(sk, ( union sctp_addr *)¶ms.spp_address)) { 2488 trans = sctp_addr_id2transport(sk, ¶ms.spp_address, 2489 params.spp_assoc_id); 2490 if (!trans) 2491 return -EINVAL; 2492 } 2493 2494 /* Get association, if assoc_id != 0 and the socket is a one 2495 * to many style socket, and an association was not found, then 2496 * the id was invalid. 2497 */ 2498 asoc = sctp_id2assoc(sk, params.spp_assoc_id); 2499 if (!asoc && params.spp_assoc_id && sctp_style(sk, UDP)) 2500 return -EINVAL; 2501 2502 /* Heartbeat demand can only be sent on a transport or 2503 * association, but not a socket. 2504 */ 2505 if (params.spp_flags & SPP_HB_DEMAND && !trans && !asoc) 2506 return -EINVAL; 2507 2508 /* Process parameters. */ 2509 error = sctp_apply_peer_addr_params(¶ms, trans, asoc, sp, 2510 hb_change, pmtud_change, 2511 sackdelay_change); 2512 2513 if (error) 2514 return error; 2515 2516 /* If changes are for association, also apply parameters to each 2517 * transport. 2518 */ 2519 if (!trans && asoc) { 2520 list_for_each_entry(trans, &asoc->peer.transport_addr_list, 2521 transports) { 2522 sctp_apply_peer_addr_params(¶ms, trans, asoc, sp, 2523 hb_change, pmtud_change, 2524 sackdelay_change); 2525 } 2526 } 2527 2528 return 0; 2529 } 2530 2531 /* 2532 * 7.1.23. Get or set delayed ack timer (SCTP_DELAYED_SACK) 2533 * 2534 * This option will effect the way delayed acks are performed. This 2535 * option allows you to get or set the delayed ack time, in 2536 * milliseconds. It also allows changing the delayed ack frequency. 2537 * Changing the frequency to 1 disables the delayed sack algorithm. If 2538 * the assoc_id is 0, then this sets or gets the endpoints default 2539 * values. If the assoc_id field is non-zero, then the set or get 2540 * effects the specified association for the one to many model (the 2541 * assoc_id field is ignored by the one to one model). Note that if 2542 * sack_delay or sack_freq are 0 when setting this option, then the 2543 * current values will remain unchanged. 2544 * 2545 * struct sctp_sack_info { 2546 * sctp_assoc_t sack_assoc_id; 2547 * uint32_t sack_delay; 2548 * uint32_t sack_freq; 2549 * }; 2550 * 2551 * sack_assoc_id - This parameter, indicates which association the user 2552 * is performing an action upon. Note that if this field's value is 2553 * zero then the endpoints default value is changed (effecting future 2554 * associations only). 2555 * 2556 * sack_delay - This parameter contains the number of milliseconds that 2557 * the user is requesting the delayed ACK timer be set to. Note that 2558 * this value is defined in the standard to be between 200 and 500 2559 * milliseconds. 2560 * 2561 * sack_freq - This parameter contains the number of packets that must 2562 * be received before a sack is sent without waiting for the delay 2563 * timer to expire. The default value for this is 2, setting this 2564 * value to 1 will disable the delayed sack algorithm. 2565 */ 2566 2567 static int sctp_setsockopt_delayed_ack(struct sock *sk, 2568 char __user *optval, unsigned int optlen) 2569 { 2570 struct sctp_sack_info params; 2571 struct sctp_transport *trans = NULL; 2572 struct sctp_association *asoc = NULL; 2573 struct sctp_sock *sp = sctp_sk(sk); 2574 2575 if (optlen == sizeof(struct sctp_sack_info)) { 2576 if (copy_from_user(¶ms, optval, optlen)) 2577 return -EFAULT; 2578 2579 if (params.sack_delay == 0 && params.sack_freq == 0) 2580 return 0; 2581 } else if (optlen == sizeof(struct sctp_assoc_value)) { 2582 pr_warn("Use of struct sctp_assoc_value in delayed_ack socket option deprecated\n"); 2583 pr_warn("Use struct sctp_sack_info instead\n"); 2584 if (copy_from_user(¶ms, optval, optlen)) 2585 return -EFAULT; 2586 2587 if (params.sack_delay == 0) 2588 params.sack_freq = 1; 2589 else 2590 params.sack_freq = 0; 2591 } else 2592 return - EINVAL; 2593 2594 /* Validate value parameter. */ 2595 if (params.sack_delay > 500) 2596 return -EINVAL; 2597 2598 /* Get association, if sack_assoc_id != 0 and the socket is a one 2599 * to many style socket, and an association was not found, then 2600 * the id was invalid. 2601 */ 2602 asoc = sctp_id2assoc(sk, params.sack_assoc_id); 2603 if (!asoc && params.sack_assoc_id && sctp_style(sk, UDP)) 2604 return -EINVAL; 2605 2606 if (params.sack_delay) { 2607 if (asoc) { 2608 asoc->sackdelay = 2609 msecs_to_jiffies(params.sack_delay); 2610 asoc->param_flags = 2611 (asoc->param_flags & ~SPP_SACKDELAY) | 2612 SPP_SACKDELAY_ENABLE; 2613 } else { 2614 sp->sackdelay = params.sack_delay; 2615 sp->param_flags = 2616 (sp->param_flags & ~SPP_SACKDELAY) | 2617 SPP_SACKDELAY_ENABLE; 2618 } 2619 } 2620 2621 if (params.sack_freq == 1) { 2622 if (asoc) { 2623 asoc->param_flags = 2624 (asoc->param_flags & ~SPP_SACKDELAY) | 2625 SPP_SACKDELAY_DISABLE; 2626 } else { 2627 sp->param_flags = 2628 (sp->param_flags & ~SPP_SACKDELAY) | 2629 SPP_SACKDELAY_DISABLE; 2630 } 2631 } else if (params.sack_freq > 1) { 2632 if (asoc) { 2633 asoc->sackfreq = params.sack_freq; 2634 asoc->param_flags = 2635 (asoc->param_flags & ~SPP_SACKDELAY) | 2636 SPP_SACKDELAY_ENABLE; 2637 } else { 2638 sp->sackfreq = params.sack_freq; 2639 sp->param_flags = 2640 (sp->param_flags & ~SPP_SACKDELAY) | 2641 SPP_SACKDELAY_ENABLE; 2642 } 2643 } 2644 2645 /* If change is for association, also apply to each transport. */ 2646 if (asoc) { 2647 list_for_each_entry(trans, &asoc->peer.transport_addr_list, 2648 transports) { 2649 if (params.sack_delay) { 2650 trans->sackdelay = 2651 msecs_to_jiffies(params.sack_delay); 2652 trans->param_flags = 2653 (trans->param_flags & ~SPP_SACKDELAY) | 2654 SPP_SACKDELAY_ENABLE; 2655 } 2656 if (params.sack_freq == 1) { 2657 trans->param_flags = 2658 (trans->param_flags & ~SPP_SACKDELAY) | 2659 SPP_SACKDELAY_DISABLE; 2660 } else if (params.sack_freq > 1) { 2661 trans->sackfreq = params.sack_freq; 2662 trans->param_flags = 2663 (trans->param_flags & ~SPP_SACKDELAY) | 2664 SPP_SACKDELAY_ENABLE; 2665 } 2666 } 2667 } 2668 2669 return 0; 2670 } 2671 2672 /* 7.1.3 Initialization Parameters (SCTP_INITMSG) 2673 * 2674 * Applications can specify protocol parameters for the default association 2675 * initialization. The option name argument to setsockopt() and getsockopt() 2676 * is SCTP_INITMSG. 2677 * 2678 * Setting initialization parameters is effective only on an unconnected 2679 * socket (for UDP-style sockets only future associations are effected 2680 * by the change). With TCP-style sockets, this option is inherited by 2681 * sockets derived from a listener socket. 2682 */ 2683 static int sctp_setsockopt_initmsg(struct sock *sk, char __user *optval, unsigned int optlen) 2684 { 2685 struct sctp_initmsg sinit; 2686 struct sctp_sock *sp = sctp_sk(sk); 2687 2688 if (optlen != sizeof(struct sctp_initmsg)) 2689 return -EINVAL; 2690 if (copy_from_user(&sinit, optval, optlen)) 2691 return -EFAULT; 2692 2693 if (sinit.sinit_num_ostreams) 2694 sp->initmsg.sinit_num_ostreams = sinit.sinit_num_ostreams; 2695 if (sinit.sinit_max_instreams) 2696 sp->initmsg.sinit_max_instreams = sinit.sinit_max_instreams; 2697 if (sinit.sinit_max_attempts) 2698 sp->initmsg.sinit_max_attempts = sinit.sinit_max_attempts; 2699 if (sinit.sinit_max_init_timeo) 2700 sp->initmsg.sinit_max_init_timeo = sinit.sinit_max_init_timeo; 2701 2702 return 0; 2703 } 2704 2705 /* 2706 * 7.1.14 Set default send parameters (SCTP_DEFAULT_SEND_PARAM) 2707 * 2708 * Applications that wish to use the sendto() system call may wish to 2709 * specify a default set of parameters that would normally be supplied 2710 * through the inclusion of ancillary data. This socket option allows 2711 * such an application to set the default sctp_sndrcvinfo structure. 2712 * The application that wishes to use this socket option simply passes 2713 * in to this call the sctp_sndrcvinfo structure defined in Section 2714 * 5.2.2) The input parameters accepted by this call include 2715 * sinfo_stream, sinfo_flags, sinfo_ppid, sinfo_context, 2716 * sinfo_timetolive. The user must provide the sinfo_assoc_id field in 2717 * to this call if the caller is using the UDP model. 2718 */ 2719 static int sctp_setsockopt_default_send_param(struct sock *sk, 2720 char __user *optval, 2721 unsigned int optlen) 2722 { 2723 struct sctp_sndrcvinfo info; 2724 struct sctp_association *asoc; 2725 struct sctp_sock *sp = sctp_sk(sk); 2726 2727 if (optlen != sizeof(struct sctp_sndrcvinfo)) 2728 return -EINVAL; 2729 if (copy_from_user(&info, optval, optlen)) 2730 return -EFAULT; 2731 2732 asoc = sctp_id2assoc(sk, info.sinfo_assoc_id); 2733 if (!asoc && info.sinfo_assoc_id && sctp_style(sk, UDP)) 2734 return -EINVAL; 2735 2736 if (asoc) { 2737 asoc->default_stream = info.sinfo_stream; 2738 asoc->default_flags = info.sinfo_flags; 2739 asoc->default_ppid = info.sinfo_ppid; 2740 asoc->default_context = info.sinfo_context; 2741 asoc->default_timetolive = info.sinfo_timetolive; 2742 } else { 2743 sp->default_stream = info.sinfo_stream; 2744 sp->default_flags = info.sinfo_flags; 2745 sp->default_ppid = info.sinfo_ppid; 2746 sp->default_context = info.sinfo_context; 2747 sp->default_timetolive = info.sinfo_timetolive; 2748 } 2749 2750 return 0; 2751 } 2752 2753 /* 7.1.10 Set Primary Address (SCTP_PRIMARY_ADDR) 2754 * 2755 * Requests that the local SCTP stack use the enclosed peer address as 2756 * the association primary. The enclosed address must be one of the 2757 * association peer's addresses. 2758 */ 2759 static int sctp_setsockopt_primary_addr(struct sock *sk, char __user *optval, 2760 unsigned int optlen) 2761 { 2762 struct sctp_prim prim; 2763 struct sctp_transport *trans; 2764 2765 if (optlen != sizeof(struct sctp_prim)) 2766 return -EINVAL; 2767 2768 if (copy_from_user(&prim, optval, sizeof(struct sctp_prim))) 2769 return -EFAULT; 2770 2771 trans = sctp_addr_id2transport(sk, &prim.ssp_addr, prim.ssp_assoc_id); 2772 if (!trans) 2773 return -EINVAL; 2774 2775 sctp_assoc_set_primary(trans->asoc, trans); 2776 2777 return 0; 2778 } 2779 2780 /* 2781 * 7.1.5 SCTP_NODELAY 2782 * 2783 * Turn on/off any Nagle-like algorithm. This means that packets are 2784 * generally sent as soon as possible and no unnecessary delays are 2785 * introduced, at the cost of more packets in the network. Expects an 2786 * integer boolean flag. 2787 */ 2788 static int sctp_setsockopt_nodelay(struct sock *sk, char __user *optval, 2789 unsigned int optlen) 2790 { 2791 int val; 2792 2793 if (optlen < sizeof(int)) 2794 return -EINVAL; 2795 if (get_user(val, (int __user *)optval)) 2796 return -EFAULT; 2797 2798 sctp_sk(sk)->nodelay = (val == 0) ? 0 : 1; 2799 return 0; 2800 } 2801 2802 /* 2803 * 2804 * 7.1.1 SCTP_RTOINFO 2805 * 2806 * The protocol parameters used to initialize and bound retransmission 2807 * timeout (RTO) are tunable. sctp_rtoinfo structure is used to access 2808 * and modify these parameters. 2809 * All parameters are time values, in milliseconds. A value of 0, when 2810 * modifying the parameters, indicates that the current value should not 2811 * be changed. 2812 * 2813 */ 2814 static int sctp_setsockopt_rtoinfo(struct sock *sk, char __user *optval, unsigned int optlen) 2815 { 2816 struct sctp_rtoinfo rtoinfo; 2817 struct sctp_association *asoc; 2818 unsigned long rto_min, rto_max; 2819 struct sctp_sock *sp = sctp_sk(sk); 2820 2821 if (optlen != sizeof (struct sctp_rtoinfo)) 2822 return -EINVAL; 2823 2824 if (copy_from_user(&rtoinfo, optval, optlen)) 2825 return -EFAULT; 2826 2827 asoc = sctp_id2assoc(sk, rtoinfo.srto_assoc_id); 2828 2829 /* Set the values to the specific association */ 2830 if (!asoc && rtoinfo.srto_assoc_id && sctp_style(sk, UDP)) 2831 return -EINVAL; 2832 2833 rto_max = rtoinfo.srto_max; 2834 rto_min = rtoinfo.srto_min; 2835 2836 if (rto_max) 2837 rto_max = asoc ? msecs_to_jiffies(rto_max) : rto_max; 2838 else 2839 rto_max = asoc ? asoc->rto_max : sp->rtoinfo.srto_max; 2840 2841 if (rto_min) 2842 rto_min = asoc ? msecs_to_jiffies(rto_min) : rto_min; 2843 else 2844 rto_min = asoc ? asoc->rto_min : sp->rtoinfo.srto_min; 2845 2846 if (rto_min > rto_max) 2847 return -EINVAL; 2848 2849 if (asoc) { 2850 if (rtoinfo.srto_initial != 0) 2851 asoc->rto_initial = 2852 msecs_to_jiffies(rtoinfo.srto_initial); 2853 asoc->rto_max = rto_max; 2854 asoc->rto_min = rto_min; 2855 } else { 2856 /* If there is no association or the association-id = 0 2857 * set the values to the endpoint. 2858 */ 2859 if (rtoinfo.srto_initial != 0) 2860 sp->rtoinfo.srto_initial = rtoinfo.srto_initial; 2861 sp->rtoinfo.srto_max = rto_max; 2862 sp->rtoinfo.srto_min = rto_min; 2863 } 2864 2865 return 0; 2866 } 2867 2868 /* 2869 * 2870 * 7.1.2 SCTP_ASSOCINFO 2871 * 2872 * This option is used to tune the maximum retransmission attempts 2873 * of the association. 2874 * Returns an error if the new association retransmission value is 2875 * greater than the sum of the retransmission value of the peer. 2876 * See [SCTP] for more information. 2877 * 2878 */ 2879 static int sctp_setsockopt_associnfo(struct sock *sk, char __user *optval, unsigned int optlen) 2880 { 2881 2882 struct sctp_assocparams assocparams; 2883 struct sctp_association *asoc; 2884 2885 if (optlen != sizeof(struct sctp_assocparams)) 2886 return -EINVAL; 2887 if (copy_from_user(&assocparams, optval, optlen)) 2888 return -EFAULT; 2889 2890 asoc = sctp_id2assoc(sk, assocparams.sasoc_assoc_id); 2891 2892 if (!asoc && assocparams.sasoc_assoc_id && sctp_style(sk, UDP)) 2893 return -EINVAL; 2894 2895 /* Set the values to the specific association */ 2896 if (asoc) { 2897 if (assocparams.sasoc_asocmaxrxt != 0) { 2898 __u32 path_sum = 0; 2899 int paths = 0; 2900 struct sctp_transport *peer_addr; 2901 2902 list_for_each_entry(peer_addr, &asoc->peer.transport_addr_list, 2903 transports) { 2904 path_sum += peer_addr->pathmaxrxt; 2905 paths++; 2906 } 2907 2908 /* Only validate asocmaxrxt if we have more than 2909 * one path/transport. We do this because path 2910 * retransmissions are only counted when we have more 2911 * then one path. 2912 */ 2913 if (paths > 1 && 2914 assocparams.sasoc_asocmaxrxt > path_sum) 2915 return -EINVAL; 2916 2917 asoc->max_retrans = assocparams.sasoc_asocmaxrxt; 2918 } 2919 2920 if (assocparams.sasoc_cookie_life != 0) 2921 asoc->cookie_life = ms_to_ktime(assocparams.sasoc_cookie_life); 2922 } else { 2923 /* Set the values to the endpoint */ 2924 struct sctp_sock *sp = sctp_sk(sk); 2925 2926 if (assocparams.sasoc_asocmaxrxt != 0) 2927 sp->assocparams.sasoc_asocmaxrxt = 2928 assocparams.sasoc_asocmaxrxt; 2929 if (assocparams.sasoc_cookie_life != 0) 2930 sp->assocparams.sasoc_cookie_life = 2931 assocparams.sasoc_cookie_life; 2932 } 2933 return 0; 2934 } 2935 2936 /* 2937 * 7.1.16 Set/clear IPv4 mapped addresses (SCTP_I_WANT_MAPPED_V4_ADDR) 2938 * 2939 * This socket option is a boolean flag which turns on or off mapped V4 2940 * addresses. If this option is turned on and the socket is type 2941 * PF_INET6, then IPv4 addresses will be mapped to V6 representation. 2942 * If this option is turned off, then no mapping will be done of V4 2943 * addresses and a user will receive both PF_INET6 and PF_INET type 2944 * addresses on the socket. 2945 */ 2946 static int sctp_setsockopt_mappedv4(struct sock *sk, char __user *optval, unsigned int optlen) 2947 { 2948 int val; 2949 struct sctp_sock *sp = sctp_sk(sk); 2950 2951 if (optlen < sizeof(int)) 2952 return -EINVAL; 2953 if (get_user(val, (int __user *)optval)) 2954 return -EFAULT; 2955 if (val) 2956 sp->v4mapped = 1; 2957 else 2958 sp->v4mapped = 0; 2959 2960 return 0; 2961 } 2962 2963 /* 2964 * 8.1.16. Get or Set the Maximum Fragmentation Size (SCTP_MAXSEG) 2965 * This option will get or set the maximum size to put in any outgoing 2966 * SCTP DATA chunk. If a message is larger than this size it will be 2967 * fragmented by SCTP into the specified size. Note that the underlying 2968 * SCTP implementation may fragment into smaller sized chunks when the 2969 * PMTU of the underlying association is smaller than the value set by 2970 * the user. The default value for this option is '0' which indicates 2971 * the user is NOT limiting fragmentation and only the PMTU will effect 2972 * SCTP's choice of DATA chunk size. Note also that values set larger 2973 * than the maximum size of an IP datagram will effectively let SCTP 2974 * control fragmentation (i.e. the same as setting this option to 0). 2975 * 2976 * The following structure is used to access and modify this parameter: 2977 * 2978 * struct sctp_assoc_value { 2979 * sctp_assoc_t assoc_id; 2980 * uint32_t assoc_value; 2981 * }; 2982 * 2983 * assoc_id: This parameter is ignored for one-to-one style sockets. 2984 * For one-to-many style sockets this parameter indicates which 2985 * association the user is performing an action upon. Note that if 2986 * this field's value is zero then the endpoints default value is 2987 * changed (effecting future associations only). 2988 * assoc_value: This parameter specifies the maximum size in bytes. 2989 */ 2990 static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, unsigned int optlen) 2991 { 2992 struct sctp_assoc_value params; 2993 struct sctp_association *asoc; 2994 struct sctp_sock *sp = sctp_sk(sk); 2995 int val; 2996 2997 if (optlen == sizeof(int)) { 2998 pr_warn("Use of int in maxseg socket option deprecated\n"); 2999 pr_warn("Use struct sctp_assoc_value instead\n"); 3000 if (copy_from_user(&val, optval, optlen)) 3001 return -EFAULT; 3002 params.assoc_id = 0; 3003 } else if (optlen == sizeof(struct sctp_assoc_value)) { 3004 if (copy_from_user(¶ms, optval, optlen)) 3005 return -EFAULT; 3006 val = params.assoc_value; 3007 } else 3008 return -EINVAL; 3009 3010 if ((val != 0) && ((val < 8) || (val > SCTP_MAX_CHUNK_LEN))) 3011 return -EINVAL; 3012 3013 asoc = sctp_id2assoc(sk, params.assoc_id); 3014 if (!asoc && params.assoc_id && sctp_style(sk, UDP)) 3015 return -EINVAL; 3016 3017 if (asoc) { 3018 if (val == 0) { 3019 val = asoc->pathmtu; 3020 val -= sp->pf->af->net_header_len; 3021 val -= sizeof(struct sctphdr) + 3022 sizeof(struct sctp_data_chunk); 3023 } 3024 asoc->user_frag = val; 3025 asoc->frag_point = sctp_frag_point(asoc, asoc->pathmtu); 3026 } else { 3027 sp->user_frag = val; 3028 } 3029 3030 return 0; 3031 } 3032 3033 3034 /* 3035 * 7.1.9 Set Peer Primary Address (SCTP_SET_PEER_PRIMARY_ADDR) 3036 * 3037 * Requests that the peer mark the enclosed address as the association 3038 * primary. The enclosed address must be one of the association's 3039 * locally bound addresses. The following structure is used to make a 3040 * set primary request: 3041 */ 3042 static int sctp_setsockopt_peer_primary_addr(struct sock *sk, char __user *optval, 3043 unsigned int optlen) 3044 { 3045 struct net *net = sock_net(sk); 3046 struct sctp_sock *sp; 3047 struct sctp_association *asoc = NULL; 3048 struct sctp_setpeerprim prim; 3049 struct sctp_chunk *chunk; 3050 struct sctp_af *af; 3051 int err; 3052 3053 sp = sctp_sk(sk); 3054 3055 if (!net->sctp.addip_enable) 3056 return -EPERM; 3057 3058 if (optlen != sizeof(struct sctp_setpeerprim)) 3059 return -EINVAL; 3060 3061 if (copy_from_user(&prim, optval, optlen)) 3062 return -EFAULT; 3063 3064 asoc = sctp_id2assoc(sk, prim.sspp_assoc_id); 3065 if (!asoc) 3066 return -EINVAL; 3067 3068 if (!asoc->peer.asconf_capable) 3069 return -EPERM; 3070 3071 if (asoc->peer.addip_disabled_mask & SCTP_PARAM_SET_PRIMARY) 3072 return -EPERM; 3073 3074 if (!sctp_state(asoc, ESTABLISHED)) 3075 return -ENOTCONN; 3076 3077 af = sctp_get_af_specific(prim.sspp_addr.ss_family); 3078 if (!af) 3079 return -EINVAL; 3080 3081 if (!af->addr_valid((union sctp_addr *)&prim.sspp_addr, sp, NULL)) 3082 return -EADDRNOTAVAIL; 3083 3084 if (!sctp_assoc_lookup_laddr(asoc, (union sctp_addr *)&prim.sspp_addr)) 3085 return -EADDRNOTAVAIL; 3086 3087 /* Create an ASCONF chunk with SET_PRIMARY parameter */ 3088 chunk = sctp_make_asconf_set_prim(asoc, 3089 (union sctp_addr *)&prim.sspp_addr); 3090 if (!chunk) 3091 return -ENOMEM; 3092 3093 err = sctp_send_asconf(asoc, chunk); 3094 3095 pr_debug("%s: we set peer primary addr primitively\n", __func__); 3096 3097 return err; 3098 } 3099 3100 static int sctp_setsockopt_adaptation_layer(struct sock *sk, char __user *optval, 3101 unsigned int optlen) 3102 { 3103 struct sctp_setadaptation adaptation; 3104 3105 if (optlen != sizeof(struct sctp_setadaptation)) 3106 return -EINVAL; 3107 if (copy_from_user(&adaptation, optval, optlen)) 3108 return -EFAULT; 3109 3110 sctp_sk(sk)->adaptation_ind = adaptation.ssb_adaptation_ind; 3111 3112 return 0; 3113 } 3114 3115 /* 3116 * 7.1.29. Set or Get the default context (SCTP_CONTEXT) 3117 * 3118 * The context field in the sctp_sndrcvinfo structure is normally only 3119 * used when a failed message is retrieved holding the value that was 3120 * sent down on the actual send call. This option allows the setting of 3121 * a default context on an association basis that will be received on 3122 * reading messages from the peer. This is especially helpful in the 3123 * one-2-many model for an application to keep some reference to an 3124 * internal state machine that is processing messages on the 3125 * association. Note that the setting of this value only effects 3126 * received messages from the peer and does not effect the value that is 3127 * saved with outbound messages. 3128 */ 3129 static int sctp_setsockopt_context(struct sock *sk, char __user *optval, 3130 unsigned int optlen) 3131 { 3132 struct sctp_assoc_value params; 3133 struct sctp_sock *sp; 3134 struct sctp_association *asoc; 3135 3136 if (optlen != sizeof(struct sctp_assoc_value)) 3137 return -EINVAL; 3138 if (copy_from_user(¶ms, optval, optlen)) 3139 return -EFAULT; 3140 3141 sp = sctp_sk(sk); 3142 3143 if (params.assoc_id != 0) { 3144 asoc = sctp_id2assoc(sk, params.assoc_id); 3145 if (!asoc) 3146 return -EINVAL; 3147 asoc->default_rcv_context = params.assoc_value; 3148 } else { 3149 sp->default_rcv_context = params.assoc_value; 3150 } 3151 3152 return 0; 3153 } 3154 3155 /* 3156 * 7.1.24. Get or set fragmented interleave (SCTP_FRAGMENT_INTERLEAVE) 3157 * 3158 * This options will at a minimum specify if the implementation is doing 3159 * fragmented interleave. Fragmented interleave, for a one to many 3160 * socket, is when subsequent calls to receive a message may return 3161 * parts of messages from different associations. Some implementations 3162 * may allow you to turn this value on or off. If so, when turned off, 3163 * no fragment interleave will occur (which will cause a head of line 3164 * blocking amongst multiple associations sharing the same one to many 3165 * socket). When this option is turned on, then each receive call may 3166 * come from a different association (thus the user must receive data 3167 * with the extended calls (e.g. sctp_recvmsg) to keep track of which 3168 * association each receive belongs to. 3169 * 3170 * This option takes a boolean value. A non-zero value indicates that 3171 * fragmented interleave is on. A value of zero indicates that 3172 * fragmented interleave is off. 3173 * 3174 * Note that it is important that an implementation that allows this 3175 * option to be turned on, have it off by default. Otherwise an unaware 3176 * application using the one to many model may become confused and act 3177 * incorrectly. 3178 */ 3179 static int sctp_setsockopt_fragment_interleave(struct sock *sk, 3180 char __user *optval, 3181 unsigned int optlen) 3182 { 3183 int val; 3184 3185 if (optlen != sizeof(int)) 3186 return -EINVAL; 3187 if (get_user(val, (int __user *)optval)) 3188 return -EFAULT; 3189 3190 sctp_sk(sk)->frag_interleave = (val == 0) ? 0 : 1; 3191 3192 return 0; 3193 } 3194 3195 /* 3196 * 8.1.21. Set or Get the SCTP Partial Delivery Point 3197 * (SCTP_PARTIAL_DELIVERY_POINT) 3198 * 3199 * This option will set or get the SCTP partial delivery point. This 3200 * point is the size of a message where the partial delivery API will be 3201 * invoked to help free up rwnd space for the peer. Setting this to a 3202 * lower value will cause partial deliveries to happen more often. The 3203 * calls argument is an integer that sets or gets the partial delivery 3204 * point. Note also that the call will fail if the user attempts to set 3205 * this value larger than the socket receive buffer size. 3206 * 3207 * Note that any single message having a length smaller than or equal to 3208 * the SCTP partial delivery point will be delivered in one single read 3209 * call as long as the user provided buffer is large enough to hold the 3210 * message. 3211 */ 3212 static int sctp_setsockopt_partial_delivery_point(struct sock *sk, 3213 char __user *optval, 3214 unsigned int optlen) 3215 { 3216 u32 val; 3217 3218 if (optlen != sizeof(u32)) 3219 return -EINVAL; 3220 if (get_user(val, (int __user *)optval)) 3221 return -EFAULT; 3222 3223 /* Note: We double the receive buffer from what the user sets 3224 * it to be, also initial rwnd is based on rcvbuf/2. 3225 */ 3226 if (val > (sk->sk_rcvbuf >> 1)) 3227 return -EINVAL; 3228 3229 sctp_sk(sk)->pd_point = val; 3230 3231 return 0; /* is this the right error code? */ 3232 } 3233 3234 /* 3235 * 7.1.28. Set or Get the maximum burst (SCTP_MAX_BURST) 3236 * 3237 * This option will allow a user to change the maximum burst of packets 3238 * that can be emitted by this association. Note that the default value 3239 * is 4, and some implementations may restrict this setting so that it 3240 * can only be lowered. 3241 * 3242 * NOTE: This text doesn't seem right. Do this on a socket basis with 3243 * future associations inheriting the socket value. 3244 */ 3245 static int sctp_setsockopt_maxburst(struct sock *sk, 3246 char __user *optval, 3247 unsigned int optlen) 3248 { 3249 struct sctp_assoc_value params; 3250 struct sctp_sock *sp; 3251 struct sctp_association *asoc; 3252 int val; 3253 int assoc_id = 0; 3254 3255 if (optlen == sizeof(int)) { 3256 pr_warn("Use of int in max_burst socket option deprecated\n"); 3257 pr_warn("Use struct sctp_assoc_value instead\n"); 3258 if (copy_from_user(&val, optval, optlen)) 3259 return -EFAULT; 3260 } else if (optlen == sizeof(struct sctp_assoc_value)) { 3261 if (copy_from_user(¶ms, optval, optlen)) 3262 return -EFAULT; 3263 val = params.assoc_value; 3264 assoc_id = params.assoc_id; 3265 } else 3266 return -EINVAL; 3267 3268 sp = sctp_sk(sk); 3269 3270 if (assoc_id != 0) { 3271 asoc = sctp_id2assoc(sk, assoc_id); 3272 if (!asoc) 3273 return -EINVAL; 3274 asoc->max_burst = val; 3275 } else 3276 sp->max_burst = val; 3277 3278 return 0; 3279 } 3280 3281 /* 3282 * 7.1.18. Add a chunk that must be authenticated (SCTP_AUTH_CHUNK) 3283 * 3284 * This set option adds a chunk type that the user is requesting to be 3285 * received only in an authenticated way. Changes to the list of chunks 3286 * will only effect future associations on the socket. 3287 */ 3288 static int sctp_setsockopt_auth_chunk(struct sock *sk, 3289 char __user *optval, 3290 unsigned int optlen) 3291 { 3292 struct net *net = sock_net(sk); 3293 struct sctp_authchunk val; 3294 3295 if (!net->sctp.auth_enable) 3296 return -EACCES; 3297 3298 if (optlen != sizeof(struct sctp_authchunk)) 3299 return -EINVAL; 3300 if (copy_from_user(&val, optval, optlen)) 3301 return -EFAULT; 3302 3303 switch (val.sauth_chunk) { 3304 case SCTP_CID_INIT: 3305 case SCTP_CID_INIT_ACK: 3306 case SCTP_CID_SHUTDOWN_COMPLETE: 3307 case SCTP_CID_AUTH: 3308 return -EINVAL; 3309 } 3310 3311 /* add this chunk id to the endpoint */ 3312 return sctp_auth_ep_add_chunkid(sctp_sk(sk)->ep, val.sauth_chunk); 3313 } 3314 3315 /* 3316 * 7.1.19. Get or set the list of supported HMAC Identifiers (SCTP_HMAC_IDENT) 3317 * 3318 * This option gets or sets the list of HMAC algorithms that the local 3319 * endpoint requires the peer to use. 3320 */ 3321 static int sctp_setsockopt_hmac_ident(struct sock *sk, 3322 char __user *optval, 3323 unsigned int optlen) 3324 { 3325 struct net *net = sock_net(sk); 3326 struct sctp_hmacalgo *hmacs; 3327 u32 idents; 3328 int err; 3329 3330 if (!net->sctp.auth_enable) 3331 return -EACCES; 3332 3333 if (optlen < sizeof(struct sctp_hmacalgo)) 3334 return -EINVAL; 3335 3336 hmacs= memdup_user(optval, optlen); 3337 if (IS_ERR(hmacs)) 3338 return PTR_ERR(hmacs); 3339 3340 idents = hmacs->shmac_num_idents; 3341 if (idents == 0 || idents > SCTP_AUTH_NUM_HMACS || 3342 (idents * sizeof(u16)) > (optlen - sizeof(struct sctp_hmacalgo))) { 3343 err = -EINVAL; 3344 goto out; 3345 } 3346 3347 err = sctp_auth_ep_set_hmacs(sctp_sk(sk)->ep, hmacs); 3348 out: 3349 kfree(hmacs); 3350 return err; 3351 } 3352 3353 /* 3354 * 7.1.20. Set a shared key (SCTP_AUTH_KEY) 3355 * 3356 * This option will set a shared secret key which is used to build an 3357 * association shared key. 3358 */ 3359 static int sctp_setsockopt_auth_key(struct sock *sk, 3360 char __user *optval, 3361 unsigned int optlen) 3362 { 3363 struct net *net = sock_net(sk); 3364 struct sctp_authkey *authkey; 3365 struct sctp_association *asoc; 3366 int ret; 3367 3368 if (!net->sctp.auth_enable) 3369 return -EACCES; 3370 3371 if (optlen <= sizeof(struct sctp_authkey)) 3372 return -EINVAL; 3373 3374 authkey= memdup_user(optval, optlen); 3375 if (IS_ERR(authkey)) 3376 return PTR_ERR(authkey); 3377 3378 if (authkey->sca_keylength > optlen - sizeof(struct sctp_authkey)) { 3379 ret = -EINVAL; 3380 goto out; 3381 } 3382 3383 asoc = sctp_id2assoc(sk, authkey->sca_assoc_id); 3384 if (!asoc && authkey->sca_assoc_id && sctp_style(sk, UDP)) { 3385 ret = -EINVAL; 3386 goto out; 3387 } 3388 3389 ret = sctp_auth_set_key(sctp_sk(sk)->ep, asoc, authkey); 3390 out: 3391 kzfree(authkey); 3392 return ret; 3393 } 3394 3395 /* 3396 * 7.1.21. Get or set the active shared key (SCTP_AUTH_ACTIVE_KEY) 3397 * 3398 * This option will get or set the active shared key to be used to build 3399 * the association shared key. 3400 */ 3401 static int sctp_setsockopt_active_key(struct sock *sk, 3402 char __user *optval, 3403 unsigned int optlen) 3404 { 3405 struct net *net = sock_net(sk); 3406 struct sctp_authkeyid val; 3407 struct sctp_association *asoc; 3408 3409 if (!net->sctp.auth_enable) 3410 return -EACCES; 3411 3412 if (optlen != sizeof(struct sctp_authkeyid)) 3413 return -EINVAL; 3414 if (copy_from_user(&val, optval, optlen)) 3415 return -EFAULT; 3416 3417 asoc = sctp_id2assoc(sk, val.scact_assoc_id); 3418 if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP)) 3419 return -EINVAL; 3420 3421 return sctp_auth_set_active_key(sctp_sk(sk)->ep, asoc, 3422 val.scact_keynumber); 3423 } 3424 3425 /* 3426 * 7.1.22. Delete a shared key (SCTP_AUTH_DELETE_KEY) 3427 * 3428 * This set option will delete a shared secret key from use. 3429 */ 3430 static int sctp_setsockopt_del_key(struct sock *sk, 3431 char __user *optval, 3432 unsigned int optlen) 3433 { 3434 struct net *net = sock_net(sk); 3435 struct sctp_authkeyid val; 3436 struct sctp_association *asoc; 3437 3438 if (!net->sctp.auth_enable) 3439 return -EACCES; 3440 3441 if (optlen != sizeof(struct sctp_authkeyid)) 3442 return -EINVAL; 3443 if (copy_from_user(&val, optval, optlen)) 3444 return -EFAULT; 3445 3446 asoc = sctp_id2assoc(sk, val.scact_assoc_id); 3447 if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP)) 3448 return -EINVAL; 3449 3450 return sctp_auth_del_key_id(sctp_sk(sk)->ep, asoc, 3451 val.scact_keynumber); 3452 3453 } 3454 3455 /* 3456 * 8.1.23 SCTP_AUTO_ASCONF 3457 * 3458 * This option will enable or disable the use of the automatic generation of 3459 * ASCONF chunks to add and delete addresses to an existing association. Note 3460 * that this option has two caveats namely: a) it only affects sockets that 3461 * are bound to all addresses available to the SCTP stack, and b) the system 3462 * administrator may have an overriding control that turns the ASCONF feature 3463 * off no matter what setting the socket option may have. 3464 * This option expects an integer boolean flag, where a non-zero value turns on 3465 * the option, and a zero value turns off the option. 3466 * Note. In this implementation, socket operation overrides default parameter 3467 * being set by sysctl as well as FreeBSD implementation 3468 */ 3469 static int sctp_setsockopt_auto_asconf(struct sock *sk, char __user *optval, 3470 unsigned int optlen) 3471 { 3472 int val; 3473 struct sctp_sock *sp = sctp_sk(sk); 3474 3475 if (optlen < sizeof(int)) 3476 return -EINVAL; 3477 if (get_user(val, (int __user *)optval)) 3478 return -EFAULT; 3479 if (!sctp_is_ep_boundall(sk) && val) 3480 return -EINVAL; 3481 if ((val && sp->do_auto_asconf) || (!val && !sp->do_auto_asconf)) 3482 return 0; 3483 3484 if (val == 0 && sp->do_auto_asconf) { 3485 list_del(&sp->auto_asconf_list); 3486 sp->do_auto_asconf = 0; 3487 } else if (val && !sp->do_auto_asconf) { 3488 list_add_tail(&sp->auto_asconf_list, 3489 &sock_net(sk)->sctp.auto_asconf_splist); 3490 sp->do_auto_asconf = 1; 3491 } 3492 return 0; 3493 } 3494 3495 3496 /* 3497 * SCTP_PEER_ADDR_THLDS 3498 * 3499 * This option allows us to alter the partially failed threshold for one or all 3500 * transports in an association. See Section 6.1 of: 3501 * http://www.ietf.org/id/draft-nishida-tsvwg-sctp-failover-05.txt 3502 */ 3503 static int sctp_setsockopt_paddr_thresholds(struct sock *sk, 3504 char __user *optval, 3505 unsigned int optlen) 3506 { 3507 struct sctp_paddrthlds val; 3508 struct sctp_transport *trans; 3509 struct sctp_association *asoc; 3510 3511 if (optlen < sizeof(struct sctp_paddrthlds)) 3512 return -EINVAL; 3513 if (copy_from_user(&val, (struct sctp_paddrthlds __user *)optval, 3514 sizeof(struct sctp_paddrthlds))) 3515 return -EFAULT; 3516 3517 3518 if (sctp_is_any(sk, (const union sctp_addr *)&val.spt_address)) { 3519 asoc = sctp_id2assoc(sk, val.spt_assoc_id); 3520 if (!asoc) 3521 return -ENOENT; 3522 list_for_each_entry(trans, &asoc->peer.transport_addr_list, 3523 transports) { 3524 if (val.spt_pathmaxrxt) 3525 trans->pathmaxrxt = val.spt_pathmaxrxt; 3526 trans->pf_retrans = val.spt_pathpfthld; 3527 } 3528 3529 if (val.spt_pathmaxrxt) 3530 asoc->pathmaxrxt = val.spt_pathmaxrxt; 3531 asoc->pf_retrans = val.spt_pathpfthld; 3532 } else { 3533 trans = sctp_addr_id2transport(sk, &val.spt_address, 3534 val.spt_assoc_id); 3535 if (!trans) 3536 return -ENOENT; 3537 3538 if (val.spt_pathmaxrxt) 3539 trans->pathmaxrxt = val.spt_pathmaxrxt; 3540 trans->pf_retrans = val.spt_pathpfthld; 3541 } 3542 3543 return 0; 3544 } 3545 3546 /* API 6.2 setsockopt(), getsockopt() 3547 * 3548 * Applications use setsockopt() and getsockopt() to set or retrieve 3549 * socket options. Socket options are used to change the default 3550 * behavior of sockets calls. They are described in Section 7. 3551 * 3552 * The syntax is: 3553 * 3554 * ret = getsockopt(int sd, int level, int optname, void __user *optval, 3555 * int __user *optlen); 3556 * ret = setsockopt(int sd, int level, int optname, const void __user *optval, 3557 * int optlen); 3558 * 3559 * sd - the socket descript. 3560 * level - set to IPPROTO_SCTP for all SCTP options. 3561 * optname - the option name. 3562 * optval - the buffer to store the value of the option. 3563 * optlen - the size of the buffer. 3564 */ 3565 static int sctp_setsockopt(struct sock *sk, int level, int optname, 3566 char __user *optval, unsigned int optlen) 3567 { 3568 int retval = 0; 3569 3570 pr_debug("%s: sk:%p, optname:%d\n", __func__, sk, optname); 3571 3572 /* I can hardly begin to describe how wrong this is. This is 3573 * so broken as to be worse than useless. The API draft 3574 * REALLY is NOT helpful here... I am not convinced that the 3575 * semantics of setsockopt() with a level OTHER THAN SOL_SCTP 3576 * are at all well-founded. 3577 */ 3578 if (level != SOL_SCTP) { 3579 struct sctp_af *af = sctp_sk(sk)->pf->af; 3580 retval = af->setsockopt(sk, level, optname, optval, optlen); 3581 goto out_nounlock; 3582 } 3583 3584 sctp_lock_sock(sk); 3585 3586 switch (optname) { 3587 case SCTP_SOCKOPT_BINDX_ADD: 3588 /* 'optlen' is the size of the addresses buffer. */ 3589 retval = sctp_setsockopt_bindx(sk, (struct sockaddr __user *)optval, 3590 optlen, SCTP_BINDX_ADD_ADDR); 3591 break; 3592 3593 case SCTP_SOCKOPT_BINDX_REM: 3594 /* 'optlen' is the size of the addresses buffer. */ 3595 retval = sctp_setsockopt_bindx(sk, (struct sockaddr __user *)optval, 3596 optlen, SCTP_BINDX_REM_ADDR); 3597 break; 3598 3599 case SCTP_SOCKOPT_CONNECTX_OLD: 3600 /* 'optlen' is the size of the addresses buffer. */ 3601 retval = sctp_setsockopt_connectx_old(sk, 3602 (struct sockaddr __user *)optval, 3603 optlen); 3604 break; 3605 3606 case SCTP_SOCKOPT_CONNECTX: 3607 /* 'optlen' is the size of the addresses buffer. */ 3608 retval = sctp_setsockopt_connectx(sk, 3609 (struct sockaddr __user *)optval, 3610 optlen); 3611 break; 3612 3613 case SCTP_DISABLE_FRAGMENTS: 3614 retval = sctp_setsockopt_disable_fragments(sk, optval, optlen); 3615 break; 3616 3617 case SCTP_EVENTS: 3618 retval = sctp_setsockopt_events(sk, optval, optlen); 3619 break; 3620 3621 case SCTP_AUTOCLOSE: 3622 retval = sctp_setsockopt_autoclose(sk, optval, optlen); 3623 break; 3624 3625 case SCTP_PEER_ADDR_PARAMS: 3626 retval = sctp_setsockopt_peer_addr_params(sk, optval, optlen); 3627 break; 3628 3629 case SCTP_DELAYED_SACK: 3630 retval = sctp_setsockopt_delayed_ack(sk, optval, optlen); 3631 break; 3632 case SCTP_PARTIAL_DELIVERY_POINT: 3633 retval = sctp_setsockopt_partial_delivery_point(sk, optval, optlen); 3634 break; 3635 3636 case SCTP_INITMSG: 3637 retval = sctp_setsockopt_initmsg(sk, optval, optlen); 3638 break; 3639 case SCTP_DEFAULT_SEND_PARAM: 3640 retval = sctp_setsockopt_default_send_param(sk, optval, 3641 optlen); 3642 break; 3643 case SCTP_PRIMARY_ADDR: 3644 retval = sctp_setsockopt_primary_addr(sk, optval, optlen); 3645 break; 3646 case SCTP_SET_PEER_PRIMARY_ADDR: 3647 retval = sctp_setsockopt_peer_primary_addr(sk, optval, optlen); 3648 break; 3649 case SCTP_NODELAY: 3650 retval = sctp_setsockopt_nodelay(sk, optval, optlen); 3651 break; 3652 case SCTP_RTOINFO: 3653 retval = sctp_setsockopt_rtoinfo(sk, optval, optlen); 3654 break; 3655 case SCTP_ASSOCINFO: 3656 retval = sctp_setsockopt_associnfo(sk, optval, optlen); 3657 break; 3658 case SCTP_I_WANT_MAPPED_V4_ADDR: 3659 retval = sctp_setsockopt_mappedv4(sk, optval, optlen); 3660 break; 3661 case SCTP_MAXSEG: 3662 retval = sctp_setsockopt_maxseg(sk, optval, optlen); 3663 break; 3664 case SCTP_ADAPTATION_LAYER: 3665 retval = sctp_setsockopt_adaptation_layer(sk, optval, optlen); 3666 break; 3667 case SCTP_CONTEXT: 3668 retval = sctp_setsockopt_context(sk, optval, optlen); 3669 break; 3670 case SCTP_FRAGMENT_INTERLEAVE: 3671 retval = sctp_setsockopt_fragment_interleave(sk, optval, optlen); 3672 break; 3673 case SCTP_MAX_BURST: 3674 retval = sctp_setsockopt_maxburst(sk, optval, optlen); 3675 break; 3676 case SCTP_AUTH_CHUNK: 3677 retval = sctp_setsockopt_auth_chunk(sk, optval, optlen); 3678 break; 3679 case SCTP_HMAC_IDENT: 3680 retval = sctp_setsockopt_hmac_ident(sk, optval, optlen); 3681 break; 3682 case SCTP_AUTH_KEY: 3683 retval = sctp_setsockopt_auth_key(sk, optval, optlen); 3684 break; 3685 case SCTP_AUTH_ACTIVE_KEY: 3686 retval = sctp_setsockopt_active_key(sk, optval, optlen); 3687 break; 3688 case SCTP_AUTH_DELETE_KEY: 3689 retval = sctp_setsockopt_del_key(sk, optval, optlen); 3690 break; 3691 case SCTP_AUTO_ASCONF: 3692 retval = sctp_setsockopt_auto_asconf(sk, optval, optlen); 3693 break; 3694 case SCTP_PEER_ADDR_THLDS: 3695 retval = sctp_setsockopt_paddr_thresholds(sk, optval, optlen); 3696 break; 3697 default: 3698 retval = -ENOPROTOOPT; 3699 break; 3700 } 3701 3702 sctp_release_sock(sk); 3703 3704 out_nounlock: 3705 return retval; 3706 } 3707 3708 /* API 3.1.6 connect() - UDP Style Syntax 3709 * 3710 * An application may use the connect() call in the UDP model to initiate an 3711 * association without sending data. 3712 * 3713 * The syntax is: 3714 * 3715 * ret = connect(int sd, const struct sockaddr *nam, socklen_t len); 3716 * 3717 * sd: the socket descriptor to have a new association added to. 3718 * 3719 * nam: the address structure (either struct sockaddr_in or struct 3720 * sockaddr_in6 defined in RFC2553 [7]). 3721 * 3722 * len: the size of the address. 3723 */ 3724 static int sctp_connect(struct sock *sk, struct sockaddr *addr, 3725 int addr_len) 3726 { 3727 int err = 0; 3728 struct sctp_af *af; 3729 3730 sctp_lock_sock(sk); 3731 3732 pr_debug("%s: sk:%p, sockaddr:%p, addr_len:%d\n", __func__, sk, 3733 addr, addr_len); 3734 3735 /* Validate addr_len before calling common connect/connectx routine. */ 3736 af = sctp_get_af_specific(addr->sa_family); 3737 if (!af || addr_len < af->sockaddr_len) { 3738 err = -EINVAL; 3739 } else { 3740 /* Pass correct addr len to common routine (so it knows there 3741 * is only one address being passed. 3742 */ 3743 err = __sctp_connect(sk, addr, af->sockaddr_len, NULL); 3744 } 3745 3746 sctp_release_sock(sk); 3747 return err; 3748 } 3749 3750 /* FIXME: Write comments. */ 3751 static int sctp_disconnect(struct sock *sk, int flags) 3752 { 3753 return -EOPNOTSUPP; /* STUB */ 3754 } 3755 3756 /* 4.1.4 accept() - TCP Style Syntax 3757 * 3758 * Applications use accept() call to remove an established SCTP 3759 * association from the accept queue of the endpoint. A new socket 3760 * descriptor will be returned from accept() to represent the newly 3761 * formed association. 3762 */ 3763 static struct sock *sctp_accept(struct sock *sk, int flags, int *err) 3764 { 3765 struct sctp_sock *sp; 3766 struct sctp_endpoint *ep; 3767 struct sock *newsk = NULL; 3768 struct sctp_association *asoc; 3769 long timeo; 3770 int error = 0; 3771 3772 sctp_lock_sock(sk); 3773 3774 sp = sctp_sk(sk); 3775 ep = sp->ep; 3776 3777 if (!sctp_style(sk, TCP)) { 3778 error = -EOPNOTSUPP; 3779 goto out; 3780 } 3781 3782 if (!sctp_sstate(sk, LISTENING)) { 3783 error = -EINVAL; 3784 goto out; 3785 } 3786 3787 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); 3788 3789 error = sctp_wait_for_accept(sk, timeo); 3790 if (error) 3791 goto out; 3792 3793 /* We treat the list of associations on the endpoint as the accept 3794 * queue and pick the first association on the list. 3795 */ 3796 asoc = list_entry(ep->asocs.next, struct sctp_association, asocs); 3797 3798 newsk = sp->pf->create_accept_sk(sk, asoc); 3799 if (!newsk) { 3800 error = -ENOMEM; 3801 goto out; 3802 } 3803 3804 /* Populate the fields of the newsk from the oldsk and migrate the 3805 * asoc to the newsk. 3806 */ 3807 sctp_sock_migrate(sk, newsk, asoc, SCTP_SOCKET_TCP); 3808 3809 out: 3810 sctp_release_sock(sk); 3811 *err = error; 3812 return newsk; 3813 } 3814 3815 /* The SCTP ioctl handler. */ 3816 static int sctp_ioctl(struct sock *sk, int cmd, unsigned long arg) 3817 { 3818 int rc = -ENOTCONN; 3819 3820 sctp_lock_sock(sk); 3821 3822 /* 3823 * SEQPACKET-style sockets in LISTENING state are valid, for 3824 * SCTP, so only discard TCP-style sockets in LISTENING state. 3825 */ 3826 if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)) 3827 goto out; 3828 3829 switch (cmd) { 3830 case SIOCINQ: { 3831 struct sk_buff *skb; 3832 unsigned int amount = 0; 3833 3834 skb = skb_peek(&sk->sk_receive_queue); 3835 if (skb != NULL) { 3836 /* 3837 * We will only return the amount of this packet since 3838 * that is all that will be read. 3839 */ 3840 amount = skb->len; 3841 } 3842 rc = put_user(amount, (int __user *)arg); 3843 break; 3844 } 3845 default: 3846 rc = -ENOIOCTLCMD; 3847 break; 3848 } 3849 out: 3850 sctp_release_sock(sk); 3851 return rc; 3852 } 3853 3854 /* This is the function which gets called during socket creation to 3855 * initialized the SCTP-specific portion of the sock. 3856 * The sock structure should already be zero-filled memory. 3857 */ 3858 static int sctp_init_sock(struct sock *sk) 3859 { 3860 struct net *net = sock_net(sk); 3861 struct sctp_sock *sp; 3862 3863 pr_debug("%s: sk:%p\n", __func__, sk); 3864 3865 sp = sctp_sk(sk); 3866 3867 /* Initialize the SCTP per socket area. */ 3868 switch (sk->sk_type) { 3869 case SOCK_SEQPACKET: 3870 sp->type = SCTP_SOCKET_UDP; 3871 break; 3872 case SOCK_STREAM: 3873 sp->type = SCTP_SOCKET_TCP; 3874 break; 3875 default: 3876 return -ESOCKTNOSUPPORT; 3877 } 3878 3879 /* Initialize default send parameters. These parameters can be 3880 * modified with the SCTP_DEFAULT_SEND_PARAM socket option. 3881 */ 3882 sp->default_stream = 0; 3883 sp->default_ppid = 0; 3884 sp->default_flags = 0; 3885 sp->default_context = 0; 3886 sp->default_timetolive = 0; 3887 3888 sp->default_rcv_context = 0; 3889 sp->max_burst = net->sctp.max_burst; 3890 3891 sp->sctp_hmac_alg = net->sctp.sctp_hmac_alg; 3892 3893 /* Initialize default setup parameters. These parameters 3894 * can be modified with the SCTP_INITMSG socket option or 3895 * overridden by the SCTP_INIT CMSG. 3896 */ 3897 sp->initmsg.sinit_num_ostreams = sctp_max_outstreams; 3898 sp->initmsg.sinit_max_instreams = sctp_max_instreams; 3899 sp->initmsg.sinit_max_attempts = net->sctp.max_retrans_init; 3900 sp->initmsg.sinit_max_init_timeo = net->sctp.rto_max; 3901 3902 /* Initialize default RTO related parameters. These parameters can 3903 * be modified for with the SCTP_RTOINFO socket option. 3904 */ 3905 sp->rtoinfo.srto_initial = net->sctp.rto_initial; 3906 sp->rtoinfo.srto_max = net->sctp.rto_max; 3907 sp->rtoinfo.srto_min = net->sctp.rto_min; 3908 3909 /* Initialize default association related parameters. These parameters 3910 * can be modified with the SCTP_ASSOCINFO socket option. 3911 */ 3912 sp->assocparams.sasoc_asocmaxrxt = net->sctp.max_retrans_association; 3913 sp->assocparams.sasoc_number_peer_destinations = 0; 3914 sp->assocparams.sasoc_peer_rwnd = 0; 3915 sp->assocparams.sasoc_local_rwnd = 0; 3916 sp->assocparams.sasoc_cookie_life = net->sctp.valid_cookie_life; 3917 3918 /* Initialize default event subscriptions. By default, all the 3919 * options are off. 3920 */ 3921 memset(&sp->subscribe, 0, sizeof(struct sctp_event_subscribe)); 3922 3923 /* Default Peer Address Parameters. These defaults can 3924 * be modified via SCTP_PEER_ADDR_PARAMS 3925 */ 3926 sp->hbinterval = net->sctp.hb_interval; 3927 sp->pathmaxrxt = net->sctp.max_retrans_path; 3928 sp->pathmtu = 0; // allow default discovery 3929 sp->sackdelay = net->sctp.sack_timeout; 3930 sp->sackfreq = 2; 3931 sp->param_flags = SPP_HB_ENABLE | 3932 SPP_PMTUD_ENABLE | 3933 SPP_SACKDELAY_ENABLE; 3934 3935 /* If enabled no SCTP message fragmentation will be performed. 3936 * Configure through SCTP_DISABLE_FRAGMENTS socket option. 3937 */ 3938 sp->disable_fragments = 0; 3939 3940 /* Enable Nagle algorithm by default. */ 3941 sp->nodelay = 0; 3942 3943 /* Enable by default. */ 3944 sp->v4mapped = 1; 3945 3946 /* Auto-close idle associations after the configured 3947 * number of seconds. A value of 0 disables this 3948 * feature. Configure through the SCTP_AUTOCLOSE socket option, 3949 * for UDP-style sockets only. 3950 */ 3951 sp->autoclose = 0; 3952 3953 /* User specified fragmentation limit. */ 3954 sp->user_frag = 0; 3955 3956 sp->adaptation_ind = 0; 3957 3958 sp->pf = sctp_get_pf_specific(sk->sk_family); 3959 3960 /* Control variables for partial data delivery. */ 3961 atomic_set(&sp->pd_mode, 0); 3962 skb_queue_head_init(&sp->pd_lobby); 3963 sp->frag_interleave = 0; 3964 3965 /* Create a per socket endpoint structure. Even if we 3966 * change the data structure relationships, this may still 3967 * be useful for storing pre-connect address information. 3968 */ 3969 sp->ep = sctp_endpoint_new(sk, GFP_KERNEL); 3970 if (!sp->ep) 3971 return -ENOMEM; 3972 3973 sp->hmac = NULL; 3974 3975 sk->sk_destruct = sctp_destruct_sock; 3976 3977 SCTP_DBG_OBJCNT_INC(sock); 3978 3979 local_bh_disable(); 3980 percpu_counter_inc(&sctp_sockets_allocated); 3981 sock_prot_inuse_add(net, sk->sk_prot, 1); 3982 if (net->sctp.default_auto_asconf) { 3983 list_add_tail(&sp->auto_asconf_list, 3984 &net->sctp.auto_asconf_splist); 3985 sp->do_auto_asconf = 1; 3986 } else 3987 sp->do_auto_asconf = 0; 3988 local_bh_enable(); 3989 3990 return 0; 3991 } 3992 3993 /* Cleanup any SCTP per socket resources. */ 3994 static void sctp_destroy_sock(struct sock *sk) 3995 { 3996 struct sctp_sock *sp; 3997 3998 pr_debug("%s: sk:%p\n", __func__, sk); 3999 4000 /* Release our hold on the endpoint. */ 4001 sp = sctp_sk(sk); 4002 /* This could happen during socket init, thus we bail out 4003 * early, since the rest of the below is not setup either. 4004 */ 4005 if (sp->ep == NULL) 4006 return; 4007 4008 if (sp->do_auto_asconf) { 4009 sp->do_auto_asconf = 0; 4010 list_del(&sp->auto_asconf_list); 4011 } 4012 sctp_endpoint_free(sp->ep); 4013 local_bh_disable(); 4014 percpu_counter_dec(&sctp_sockets_allocated); 4015 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); 4016 local_bh_enable(); 4017 } 4018 4019 /* Triggered when there are no references on the socket anymore */ 4020 static void sctp_destruct_sock(struct sock *sk) 4021 { 4022 struct sctp_sock *sp = sctp_sk(sk); 4023 4024 /* Free up the HMAC transform. */ 4025 crypto_free_hash(sp->hmac); 4026 4027 inet_sock_destruct(sk); 4028 } 4029 4030 /* API 4.1.7 shutdown() - TCP Style Syntax 4031 * int shutdown(int socket, int how); 4032 * 4033 * sd - the socket descriptor of the association to be closed. 4034 * how - Specifies the type of shutdown. The values are 4035 * as follows: 4036 * SHUT_RD 4037 * Disables further receive operations. No SCTP 4038 * protocol action is taken. 4039 * SHUT_WR 4040 * Disables further send operations, and initiates 4041 * the SCTP shutdown sequence. 4042 * SHUT_RDWR 4043 * Disables further send and receive operations 4044 * and initiates the SCTP shutdown sequence. 4045 */ 4046 static void sctp_shutdown(struct sock *sk, int how) 4047 { 4048 struct net *net = sock_net(sk); 4049 struct sctp_endpoint *ep; 4050 struct sctp_association *asoc; 4051 4052 if (!sctp_style(sk, TCP)) 4053 return; 4054 4055 if (how & SEND_SHUTDOWN) { 4056 ep = sctp_sk(sk)->ep; 4057 if (!list_empty(&ep->asocs)) { 4058 asoc = list_entry(ep->asocs.next, 4059 struct sctp_association, asocs); 4060 sctp_primitive_SHUTDOWN(net, asoc, NULL); 4061 } 4062 } 4063 } 4064 4065 /* 7.2.1 Association Status (SCTP_STATUS) 4066 4067 * Applications can retrieve current status information about an 4068 * association, including association state, peer receiver window size, 4069 * number of unacked data chunks, and number of data chunks pending 4070 * receipt. This information is read-only. 4071 */ 4072 static int sctp_getsockopt_sctp_status(struct sock *sk, int len, 4073 char __user *optval, 4074 int __user *optlen) 4075 { 4076 struct sctp_status status; 4077 struct sctp_association *asoc = NULL; 4078 struct sctp_transport *transport; 4079 sctp_assoc_t associd; 4080 int retval = 0; 4081 4082 if (len < sizeof(status)) { 4083 retval = -EINVAL; 4084 goto out; 4085 } 4086 4087 len = sizeof(status); 4088 if (copy_from_user(&status, optval, len)) { 4089 retval = -EFAULT; 4090 goto out; 4091 } 4092 4093 associd = status.sstat_assoc_id; 4094 asoc = sctp_id2assoc(sk, associd); 4095 if (!asoc) { 4096 retval = -EINVAL; 4097 goto out; 4098 } 4099 4100 transport = asoc->peer.primary_path; 4101 4102 status.sstat_assoc_id = sctp_assoc2id(asoc); 4103 status.sstat_state = asoc->state; 4104 status.sstat_rwnd = asoc->peer.rwnd; 4105 status.sstat_unackdata = asoc->unack_data; 4106 4107 status.sstat_penddata = sctp_tsnmap_pending(&asoc->peer.tsn_map); 4108 status.sstat_instrms = asoc->c.sinit_max_instreams; 4109 status.sstat_outstrms = asoc->c.sinit_num_ostreams; 4110 status.sstat_fragmentation_point = asoc->frag_point; 4111 status.sstat_primary.spinfo_assoc_id = sctp_assoc2id(transport->asoc); 4112 memcpy(&status.sstat_primary.spinfo_address, &transport->ipaddr, 4113 transport->af_specific->sockaddr_len); 4114 /* Map ipv4 address into v4-mapped-on-v6 address. */ 4115 sctp_get_pf_specific(sk->sk_family)->addr_v4map(sctp_sk(sk), 4116 (union sctp_addr *)&status.sstat_primary.spinfo_address); 4117 status.sstat_primary.spinfo_state = transport->state; 4118 status.sstat_primary.spinfo_cwnd = transport->cwnd; 4119 status.sstat_primary.spinfo_srtt = transport->srtt; 4120 status.sstat_primary.spinfo_rto = jiffies_to_msecs(transport->rto); 4121 status.sstat_primary.spinfo_mtu = transport->pathmtu; 4122 4123 if (status.sstat_primary.spinfo_state == SCTP_UNKNOWN) 4124 status.sstat_primary.spinfo_state = SCTP_ACTIVE; 4125 4126 if (put_user(len, optlen)) { 4127 retval = -EFAULT; 4128 goto out; 4129 } 4130 4131 pr_debug("%s: len:%d, state:%d, rwnd:%d, assoc_id:%d\n", 4132 __func__, len, status.sstat_state, status.sstat_rwnd, 4133 status.sstat_assoc_id); 4134 4135 if (copy_to_user(optval, &status, len)) { 4136 retval = -EFAULT; 4137 goto out; 4138 } 4139 4140 out: 4141 return retval; 4142 } 4143 4144 4145 /* 7.2.2 Peer Address Information (SCTP_GET_PEER_ADDR_INFO) 4146 * 4147 * Applications can retrieve information about a specific peer address 4148 * of an association, including its reachability state, congestion 4149 * window, and retransmission timer values. This information is 4150 * read-only. 4151 */ 4152 static int sctp_getsockopt_peer_addr_info(struct sock *sk, int len, 4153 char __user *optval, 4154 int __user *optlen) 4155 { 4156 struct sctp_paddrinfo pinfo; 4157 struct sctp_transport *transport; 4158 int retval = 0; 4159 4160 if (len < sizeof(pinfo)) { 4161 retval = -EINVAL; 4162 goto out; 4163 } 4164 4165 len = sizeof(pinfo); 4166 if (copy_from_user(&pinfo, optval, len)) { 4167 retval = -EFAULT; 4168 goto out; 4169 } 4170 4171 transport = sctp_addr_id2transport(sk, &pinfo.spinfo_address, 4172 pinfo.spinfo_assoc_id); 4173 if (!transport) 4174 return -EINVAL; 4175 4176 pinfo.spinfo_assoc_id = sctp_assoc2id(transport->asoc); 4177 pinfo.spinfo_state = transport->state; 4178 pinfo.spinfo_cwnd = transport->cwnd; 4179 pinfo.spinfo_srtt = transport->srtt; 4180 pinfo.spinfo_rto = jiffies_to_msecs(transport->rto); 4181 pinfo.spinfo_mtu = transport->pathmtu; 4182 4183 if (pinfo.spinfo_state == SCTP_UNKNOWN) 4184 pinfo.spinfo_state = SCTP_ACTIVE; 4185 4186 if (put_user(len, optlen)) { 4187 retval = -EFAULT; 4188 goto out; 4189 } 4190 4191 if (copy_to_user(optval, &pinfo, len)) { 4192 retval = -EFAULT; 4193 goto out; 4194 } 4195 4196 out: 4197 return retval; 4198 } 4199 4200 /* 7.1.12 Enable/Disable message fragmentation (SCTP_DISABLE_FRAGMENTS) 4201 * 4202 * This option is a on/off flag. If enabled no SCTP message 4203 * fragmentation will be performed. Instead if a message being sent 4204 * exceeds the current PMTU size, the message will NOT be sent and 4205 * instead a error will be indicated to the user. 4206 */ 4207 static int sctp_getsockopt_disable_fragments(struct sock *sk, int len, 4208 char __user *optval, int __user *optlen) 4209 { 4210 int val; 4211 4212 if (len < sizeof(int)) 4213 return -EINVAL; 4214 4215 len = sizeof(int); 4216 val = (sctp_sk(sk)->disable_fragments == 1); 4217 if (put_user(len, optlen)) 4218 return -EFAULT; 4219 if (copy_to_user(optval, &val, len)) 4220 return -EFAULT; 4221 return 0; 4222 } 4223 4224 /* 7.1.15 Set notification and ancillary events (SCTP_EVENTS) 4225 * 4226 * This socket option is used to specify various notifications and 4227 * ancillary data the user wishes to receive. 4228 */ 4229 static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval, 4230 int __user *optlen) 4231 { 4232 if (len <= 0) 4233 return -EINVAL; 4234 if (len > sizeof(struct sctp_event_subscribe)) 4235 len = sizeof(struct sctp_event_subscribe); 4236 if (put_user(len, optlen)) 4237 return -EFAULT; 4238 if (copy_to_user(optval, &sctp_sk(sk)->subscribe, len)) 4239 return -EFAULT; 4240 return 0; 4241 } 4242 4243 /* 7.1.8 Automatic Close of associations (SCTP_AUTOCLOSE) 4244 * 4245 * This socket option is applicable to the UDP-style socket only. When 4246 * set it will cause associations that are idle for more than the 4247 * specified number of seconds to automatically close. An association 4248 * being idle is defined an association that has NOT sent or received 4249 * user data. The special value of '0' indicates that no automatic 4250 * close of any associations should be performed. The option expects an 4251 * integer defining the number of seconds of idle time before an 4252 * association is closed. 4253 */ 4254 static int sctp_getsockopt_autoclose(struct sock *sk, int len, char __user *optval, int __user *optlen) 4255 { 4256 /* Applicable to UDP-style socket only */ 4257 if (sctp_style(sk, TCP)) 4258 return -EOPNOTSUPP; 4259 if (len < sizeof(int)) 4260 return -EINVAL; 4261 len = sizeof(int); 4262 if (put_user(len, optlen)) 4263 return -EFAULT; 4264 if (copy_to_user(optval, &sctp_sk(sk)->autoclose, sizeof(int))) 4265 return -EFAULT; 4266 return 0; 4267 } 4268 4269 /* Helper routine to branch off an association to a new socket. */ 4270 int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp) 4271 { 4272 struct sctp_association *asoc = sctp_id2assoc(sk, id); 4273 struct socket *sock; 4274 struct sctp_af *af; 4275 int err = 0; 4276 4277 if (!asoc) 4278 return -EINVAL; 4279 4280 /* An association cannot be branched off from an already peeled-off 4281 * socket, nor is this supported for tcp style sockets. 4282 */ 4283 if (!sctp_style(sk, UDP)) 4284 return -EINVAL; 4285 4286 /* Create a new socket. */ 4287 err = sock_create(sk->sk_family, SOCK_SEQPACKET, IPPROTO_SCTP, &sock); 4288 if (err < 0) 4289 return err; 4290 4291 sctp_copy_sock(sock->sk, sk, asoc); 4292 4293 /* Make peeled-off sockets more like 1-1 accepted sockets. 4294 * Set the daddr and initialize id to something more random 4295 */ 4296 af = sctp_get_af_specific(asoc->peer.primary_addr.sa.sa_family); 4297 af->to_sk_daddr(&asoc->peer.primary_addr, sk); 4298 4299 /* Populate the fields of the newsk from the oldsk and migrate the 4300 * asoc to the newsk. 4301 */ 4302 sctp_sock_migrate(sk, sock->sk, asoc, SCTP_SOCKET_UDP_HIGH_BANDWIDTH); 4303 4304 *sockp = sock; 4305 4306 return err; 4307 } 4308 EXPORT_SYMBOL(sctp_do_peeloff); 4309 4310 static int sctp_getsockopt_peeloff(struct sock *sk, int len, char __user *optval, int __user *optlen) 4311 { 4312 sctp_peeloff_arg_t peeloff; 4313 struct socket *newsock; 4314 struct file *newfile; 4315 int retval = 0; 4316 4317 if (len < sizeof(sctp_peeloff_arg_t)) 4318 return -EINVAL; 4319 len = sizeof(sctp_peeloff_arg_t); 4320 if (copy_from_user(&peeloff, optval, len)) 4321 return -EFAULT; 4322 4323 retval = sctp_do_peeloff(sk, peeloff.associd, &newsock); 4324 if (retval < 0) 4325 goto out; 4326 4327 /* Map the socket to an unused fd that can be returned to the user. */ 4328 retval = get_unused_fd_flags(0); 4329 if (retval < 0) { 4330 sock_release(newsock); 4331 goto out; 4332 } 4333 4334 newfile = sock_alloc_file(newsock, 0, NULL); 4335 if (unlikely(IS_ERR(newfile))) { 4336 put_unused_fd(retval); 4337 sock_release(newsock); 4338 return PTR_ERR(newfile); 4339 } 4340 4341 pr_debug("%s: sk:%p, newsk:%p, sd:%d\n", __func__, sk, newsock->sk, 4342 retval); 4343 4344 /* Return the fd mapped to the new socket. */ 4345 if (put_user(len, optlen)) { 4346 fput(newfile); 4347 put_unused_fd(retval); 4348 return -EFAULT; 4349 } 4350 peeloff.sd = retval; 4351 if (copy_to_user(optval, &peeloff, len)) { 4352 fput(newfile); 4353 put_unused_fd(retval); 4354 return -EFAULT; 4355 } 4356 fd_install(retval, newfile); 4357 out: 4358 return retval; 4359 } 4360 4361 /* 7.1.13 Peer Address Parameters (SCTP_PEER_ADDR_PARAMS) 4362 * 4363 * Applications can enable or disable heartbeats for any peer address of 4364 * an association, modify an address's heartbeat interval, force a 4365 * heartbeat to be sent immediately, and adjust the address's maximum 4366 * number of retransmissions sent before an address is considered 4367 * unreachable. The following structure is used to access and modify an 4368 * address's parameters: 4369 * 4370 * struct sctp_paddrparams { 4371 * sctp_assoc_t spp_assoc_id; 4372 * struct sockaddr_storage spp_address; 4373 * uint32_t spp_hbinterval; 4374 * uint16_t spp_pathmaxrxt; 4375 * uint32_t spp_pathmtu; 4376 * uint32_t spp_sackdelay; 4377 * uint32_t spp_flags; 4378 * }; 4379 * 4380 * spp_assoc_id - (one-to-many style socket) This is filled in the 4381 * application, and identifies the association for 4382 * this query. 4383 * spp_address - This specifies which address is of interest. 4384 * spp_hbinterval - This contains the value of the heartbeat interval, 4385 * in milliseconds. If a value of zero 4386 * is present in this field then no changes are to 4387 * be made to this parameter. 4388 * spp_pathmaxrxt - This contains the maximum number of 4389 * retransmissions before this address shall be 4390 * considered unreachable. If a value of zero 4391 * is present in this field then no changes are to 4392 * be made to this parameter. 4393 * spp_pathmtu - When Path MTU discovery is disabled the value 4394 * specified here will be the "fixed" path mtu. 4395 * Note that if the spp_address field is empty 4396 * then all associations on this address will 4397 * have this fixed path mtu set upon them. 4398 * 4399 * spp_sackdelay - When delayed sack is enabled, this value specifies 4400 * the number of milliseconds that sacks will be delayed 4401 * for. This value will apply to all addresses of an 4402 * association if the spp_address field is empty. Note 4403 * also, that if delayed sack is enabled and this 4404 * value is set to 0, no change is made to the last 4405 * recorded delayed sack timer value. 4406 * 4407 * spp_flags - These flags are used to control various features 4408 * on an association. The flag field may contain 4409 * zero or more of the following options. 4410 * 4411 * SPP_HB_ENABLE - Enable heartbeats on the 4412 * specified address. Note that if the address 4413 * field is empty all addresses for the association 4414 * have heartbeats enabled upon them. 4415 * 4416 * SPP_HB_DISABLE - Disable heartbeats on the 4417 * speicifed address. Note that if the address 4418 * field is empty all addresses for the association 4419 * will have their heartbeats disabled. Note also 4420 * that SPP_HB_ENABLE and SPP_HB_DISABLE are 4421 * mutually exclusive, only one of these two should 4422 * be specified. Enabling both fields will have 4423 * undetermined results. 4424 * 4425 * SPP_HB_DEMAND - Request a user initiated heartbeat 4426 * to be made immediately. 4427 * 4428 * SPP_PMTUD_ENABLE - This field will enable PMTU 4429 * discovery upon the specified address. Note that 4430 * if the address feild is empty then all addresses 4431 * on the association are effected. 4432 * 4433 * SPP_PMTUD_DISABLE - This field will disable PMTU 4434 * discovery upon the specified address. Note that 4435 * if the address feild is empty then all addresses 4436 * on the association are effected. Not also that 4437 * SPP_PMTUD_ENABLE and SPP_PMTUD_DISABLE are mutually 4438 * exclusive. Enabling both will have undetermined 4439 * results. 4440 * 4441 * SPP_SACKDELAY_ENABLE - Setting this flag turns 4442 * on delayed sack. The time specified in spp_sackdelay 4443 * is used to specify the sack delay for this address. Note 4444 * that if spp_address is empty then all addresses will 4445 * enable delayed sack and take on the sack delay 4446 * value specified in spp_sackdelay. 4447 * SPP_SACKDELAY_DISABLE - Setting this flag turns 4448 * off delayed sack. If the spp_address field is blank then 4449 * delayed sack is disabled for the entire association. Note 4450 * also that this field is mutually exclusive to 4451 * SPP_SACKDELAY_ENABLE, setting both will have undefined 4452 * results. 4453 */ 4454 static int sctp_getsockopt_peer_addr_params(struct sock *sk, int len, 4455 char __user *optval, int __user *optlen) 4456 { 4457 struct sctp_paddrparams params; 4458 struct sctp_transport *trans = NULL; 4459 struct sctp_association *asoc = NULL; 4460 struct sctp_sock *sp = sctp_sk(sk); 4461 4462 if (len < sizeof(struct sctp_paddrparams)) 4463 return -EINVAL; 4464 len = sizeof(struct sctp_paddrparams); 4465 if (copy_from_user(¶ms, optval, len)) 4466 return -EFAULT; 4467 4468 /* If an address other than INADDR_ANY is specified, and 4469 * no transport is found, then the request is invalid. 4470 */ 4471 if (!sctp_is_any(sk, ( union sctp_addr *)¶ms.spp_address)) { 4472 trans = sctp_addr_id2transport(sk, ¶ms.spp_address, 4473 params.spp_assoc_id); 4474 if (!trans) { 4475 pr_debug("%s: failed no transport\n", __func__); 4476 return -EINVAL; 4477 } 4478 } 4479 4480 /* Get association, if assoc_id != 0 and the socket is a one 4481 * to many style socket, and an association was not found, then 4482 * the id was invalid. 4483 */ 4484 asoc = sctp_id2assoc(sk, params.spp_assoc_id); 4485 if (!asoc && params.spp_assoc_id && sctp_style(sk, UDP)) { 4486 pr_debug("%s: failed no association\n", __func__); 4487 return -EINVAL; 4488 } 4489 4490 if (trans) { 4491 /* Fetch transport values. */ 4492 params.spp_hbinterval = jiffies_to_msecs(trans->hbinterval); 4493 params.spp_pathmtu = trans->pathmtu; 4494 params.spp_pathmaxrxt = trans->pathmaxrxt; 4495 params.spp_sackdelay = jiffies_to_msecs(trans->sackdelay); 4496 4497 /*draft-11 doesn't say what to return in spp_flags*/ 4498 params.spp_flags = trans->param_flags; 4499 } else if (asoc) { 4500 /* Fetch association values. */ 4501 params.spp_hbinterval = jiffies_to_msecs(asoc->hbinterval); 4502 params.spp_pathmtu = asoc->pathmtu; 4503 params.spp_pathmaxrxt = asoc->pathmaxrxt; 4504 params.spp_sackdelay = jiffies_to_msecs(asoc->sackdelay); 4505 4506 /*draft-11 doesn't say what to return in spp_flags*/ 4507 params.spp_flags = asoc->param_flags; 4508 } else { 4509 /* Fetch socket values. */ 4510 params.spp_hbinterval = sp->hbinterval; 4511 params.spp_pathmtu = sp->pathmtu; 4512 params.spp_sackdelay = sp->sackdelay; 4513 params.spp_pathmaxrxt = sp->pathmaxrxt; 4514 4515 /*draft-11 doesn't say what to return in spp_flags*/ 4516 params.spp_flags = sp->param_flags; 4517 } 4518 4519 if (copy_to_user(optval, ¶ms, len)) 4520 return -EFAULT; 4521 4522 if (put_user(len, optlen)) 4523 return -EFAULT; 4524 4525 return 0; 4526 } 4527 4528 /* 4529 * 7.1.23. Get or set delayed ack timer (SCTP_DELAYED_SACK) 4530 * 4531 * This option will effect the way delayed acks are performed. This 4532 * option allows you to get or set the delayed ack time, in 4533 * milliseconds. It also allows changing the delayed ack frequency. 4534 * Changing the frequency to 1 disables the delayed sack algorithm. If 4535 * the assoc_id is 0, then this sets or gets the endpoints default 4536 * values. If the assoc_id field is non-zero, then the set or get 4537 * effects the specified association for the one to many model (the 4538 * assoc_id field is ignored by the one to one model). Note that if 4539 * sack_delay or sack_freq are 0 when setting this option, then the 4540 * current values will remain unchanged. 4541 * 4542 * struct sctp_sack_info { 4543 * sctp_assoc_t sack_assoc_id; 4544 * uint32_t sack_delay; 4545 * uint32_t sack_freq; 4546 * }; 4547 * 4548 * sack_assoc_id - This parameter, indicates which association the user 4549 * is performing an action upon. Note that if this field's value is 4550 * zero then the endpoints default value is changed (effecting future 4551 * associations only). 4552 * 4553 * sack_delay - This parameter contains the number of milliseconds that 4554 * the user is requesting the delayed ACK timer be set to. Note that 4555 * this value is defined in the standard to be between 200 and 500 4556 * milliseconds. 4557 * 4558 * sack_freq - This parameter contains the number of packets that must 4559 * be received before a sack is sent without waiting for the delay 4560 * timer to expire. The default value for this is 2, setting this 4561 * value to 1 will disable the delayed sack algorithm. 4562 */ 4563 static int sctp_getsockopt_delayed_ack(struct sock *sk, int len, 4564 char __user *optval, 4565 int __user *optlen) 4566 { 4567 struct sctp_sack_info params; 4568 struct sctp_association *asoc = NULL; 4569 struct sctp_sock *sp = sctp_sk(sk); 4570 4571 if (len >= sizeof(struct sctp_sack_info)) { 4572 len = sizeof(struct sctp_sack_info); 4573 4574 if (copy_from_user(¶ms, optval, len)) 4575 return -EFAULT; 4576 } else if (len == sizeof(struct sctp_assoc_value)) { 4577 pr_warn("Use of struct sctp_assoc_value in delayed_ack socket option deprecated\n"); 4578 pr_warn("Use struct sctp_sack_info instead\n"); 4579 if (copy_from_user(¶ms, optval, len)) 4580 return -EFAULT; 4581 } else 4582 return - EINVAL; 4583 4584 /* Get association, if sack_assoc_id != 0 and the socket is a one 4585 * to many style socket, and an association was not found, then 4586 * the id was invalid. 4587 */ 4588 asoc = sctp_id2assoc(sk, params.sack_assoc_id); 4589 if (!asoc && params.sack_assoc_id && sctp_style(sk, UDP)) 4590 return -EINVAL; 4591 4592 if (asoc) { 4593 /* Fetch association values. */ 4594 if (asoc->param_flags & SPP_SACKDELAY_ENABLE) { 4595 params.sack_delay = jiffies_to_msecs( 4596 asoc->sackdelay); 4597 params.sack_freq = asoc->sackfreq; 4598 4599 } else { 4600 params.sack_delay = 0; 4601 params.sack_freq = 1; 4602 } 4603 } else { 4604 /* Fetch socket values. */ 4605 if (sp->param_flags & SPP_SACKDELAY_ENABLE) { 4606 params.sack_delay = sp->sackdelay; 4607 params.sack_freq = sp->sackfreq; 4608 } else { 4609 params.sack_delay = 0; 4610 params.sack_freq = 1; 4611 } 4612 } 4613 4614 if (copy_to_user(optval, ¶ms, len)) 4615 return -EFAULT; 4616 4617 if (put_user(len, optlen)) 4618 return -EFAULT; 4619 4620 return 0; 4621 } 4622 4623 /* 7.1.3 Initialization Parameters (SCTP_INITMSG) 4624 * 4625 * Applications can specify protocol parameters for the default association 4626 * initialization. The option name argument to setsockopt() and getsockopt() 4627 * is SCTP_INITMSG. 4628 * 4629 * Setting initialization parameters is effective only on an unconnected 4630 * socket (for UDP-style sockets only future associations are effected 4631 * by the change). With TCP-style sockets, this option is inherited by 4632 * sockets derived from a listener socket. 4633 */ 4634 static int sctp_getsockopt_initmsg(struct sock *sk, int len, char __user *optval, int __user *optlen) 4635 { 4636 if (len < sizeof(struct sctp_initmsg)) 4637 return -EINVAL; 4638 len = sizeof(struct sctp_initmsg); 4639 if (put_user(len, optlen)) 4640 return -EFAULT; 4641 if (copy_to_user(optval, &sctp_sk(sk)->initmsg, len)) 4642 return -EFAULT; 4643 return 0; 4644 } 4645 4646 4647 static int sctp_getsockopt_peer_addrs(struct sock *sk, int len, 4648 char __user *optval, int __user *optlen) 4649 { 4650 struct sctp_association *asoc; 4651 int cnt = 0; 4652 struct sctp_getaddrs getaddrs; 4653 struct sctp_transport *from; 4654 void __user *to; 4655 union sctp_addr temp; 4656 struct sctp_sock *sp = sctp_sk(sk); 4657 int addrlen; 4658 size_t space_left; 4659 int bytes_copied; 4660 4661 if (len < sizeof(struct sctp_getaddrs)) 4662 return -EINVAL; 4663 4664 if (copy_from_user(&getaddrs, optval, sizeof(struct sctp_getaddrs))) 4665 return -EFAULT; 4666 4667 /* For UDP-style sockets, id specifies the association to query. */ 4668 asoc = sctp_id2assoc(sk, getaddrs.assoc_id); 4669 if (!asoc) 4670 return -EINVAL; 4671 4672 to = optval + offsetof(struct sctp_getaddrs,addrs); 4673 space_left = len - offsetof(struct sctp_getaddrs,addrs); 4674 4675 list_for_each_entry(from, &asoc->peer.transport_addr_list, 4676 transports) { 4677 memcpy(&temp, &from->ipaddr, sizeof(temp)); 4678 sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, &temp); 4679 addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len; 4680 if (space_left < addrlen) 4681 return -ENOMEM; 4682 if (copy_to_user(to, &temp, addrlen)) 4683 return -EFAULT; 4684 to += addrlen; 4685 cnt++; 4686 space_left -= addrlen; 4687 } 4688 4689 if (put_user(cnt, &((struct sctp_getaddrs __user *)optval)->addr_num)) 4690 return -EFAULT; 4691 bytes_copied = ((char __user *)to) - optval; 4692 if (put_user(bytes_copied, optlen)) 4693 return -EFAULT; 4694 4695 return 0; 4696 } 4697 4698 static int sctp_copy_laddrs(struct sock *sk, __u16 port, void *to, 4699 size_t space_left, int *bytes_copied) 4700 { 4701 struct sctp_sockaddr_entry *addr; 4702 union sctp_addr temp; 4703 int cnt = 0; 4704 int addrlen; 4705 struct net *net = sock_net(sk); 4706 4707 rcu_read_lock(); 4708 list_for_each_entry_rcu(addr, &net->sctp.local_addr_list, list) { 4709 if (!addr->valid) 4710 continue; 4711 4712 if ((PF_INET == sk->sk_family) && 4713 (AF_INET6 == addr->a.sa.sa_family)) 4714 continue; 4715 if ((PF_INET6 == sk->sk_family) && 4716 inet_v6_ipv6only(sk) && 4717 (AF_INET == addr->a.sa.sa_family)) 4718 continue; 4719 memcpy(&temp, &addr->a, sizeof(temp)); 4720 if (!temp.v4.sin_port) 4721 temp.v4.sin_port = htons(port); 4722 4723 sctp_get_pf_specific(sk->sk_family)->addr_v4map(sctp_sk(sk), 4724 &temp); 4725 addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len; 4726 if (space_left < addrlen) { 4727 cnt = -ENOMEM; 4728 break; 4729 } 4730 memcpy(to, &temp, addrlen); 4731 4732 to += addrlen; 4733 cnt ++; 4734 space_left -= addrlen; 4735 *bytes_copied += addrlen; 4736 } 4737 rcu_read_unlock(); 4738 4739 return cnt; 4740 } 4741 4742 4743 static int sctp_getsockopt_local_addrs(struct sock *sk, int len, 4744 char __user *optval, int __user *optlen) 4745 { 4746 struct sctp_bind_addr *bp; 4747 struct sctp_association *asoc; 4748 int cnt = 0; 4749 struct sctp_getaddrs getaddrs; 4750 struct sctp_sockaddr_entry *addr; 4751 void __user *to; 4752 union sctp_addr temp; 4753 struct sctp_sock *sp = sctp_sk(sk); 4754 int addrlen; 4755 int err = 0; 4756 size_t space_left; 4757 int bytes_copied = 0; 4758 void *addrs; 4759 void *buf; 4760 4761 if (len < sizeof(struct sctp_getaddrs)) 4762 return -EINVAL; 4763 4764 if (copy_from_user(&getaddrs, optval, sizeof(struct sctp_getaddrs))) 4765 return -EFAULT; 4766 4767 /* 4768 * For UDP-style sockets, id specifies the association to query. 4769 * If the id field is set to the value '0' then the locally bound 4770 * addresses are returned without regard to any particular 4771 * association. 4772 */ 4773 if (0 == getaddrs.assoc_id) { 4774 bp = &sctp_sk(sk)->ep->base.bind_addr; 4775 } else { 4776 asoc = sctp_id2assoc(sk, getaddrs.assoc_id); 4777 if (!asoc) 4778 return -EINVAL; 4779 bp = &asoc->base.bind_addr; 4780 } 4781 4782 to = optval + offsetof(struct sctp_getaddrs,addrs); 4783 space_left = len - offsetof(struct sctp_getaddrs,addrs); 4784 4785 addrs = kmalloc(space_left, GFP_KERNEL); 4786 if (!addrs) 4787 return -ENOMEM; 4788 4789 /* If the endpoint is bound to 0.0.0.0 or ::0, get the valid 4790 * addresses from the global local address list. 4791 */ 4792 if (sctp_list_single_entry(&bp->address_list)) { 4793 addr = list_entry(bp->address_list.next, 4794 struct sctp_sockaddr_entry, list); 4795 if (sctp_is_any(sk, &addr->a)) { 4796 cnt = sctp_copy_laddrs(sk, bp->port, addrs, 4797 space_left, &bytes_copied); 4798 if (cnt < 0) { 4799 err = cnt; 4800 goto out; 4801 } 4802 goto copy_getaddrs; 4803 } 4804 } 4805 4806 buf = addrs; 4807 /* Protection on the bound address list is not needed since 4808 * in the socket option context we hold a socket lock and 4809 * thus the bound address list can't change. 4810 */ 4811 list_for_each_entry(addr, &bp->address_list, list) { 4812 memcpy(&temp, &addr->a, sizeof(temp)); 4813 sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, &temp); 4814 addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len; 4815 if (space_left < addrlen) { 4816 err = -ENOMEM; /*fixme: right error?*/ 4817 goto out; 4818 } 4819 memcpy(buf, &temp, addrlen); 4820 buf += addrlen; 4821 bytes_copied += addrlen; 4822 cnt ++; 4823 space_left -= addrlen; 4824 } 4825 4826 copy_getaddrs: 4827 if (copy_to_user(to, addrs, bytes_copied)) { 4828 err = -EFAULT; 4829 goto out; 4830 } 4831 if (put_user(cnt, &((struct sctp_getaddrs __user *)optval)->addr_num)) { 4832 err = -EFAULT; 4833 goto out; 4834 } 4835 if (put_user(bytes_copied, optlen)) 4836 err = -EFAULT; 4837 out: 4838 kfree(addrs); 4839 return err; 4840 } 4841 4842 /* 7.1.10 Set Primary Address (SCTP_PRIMARY_ADDR) 4843 * 4844 * Requests that the local SCTP stack use the enclosed peer address as 4845 * the association primary. The enclosed address must be one of the 4846 * association peer's addresses. 4847 */ 4848 static int sctp_getsockopt_primary_addr(struct sock *sk, int len, 4849 char __user *optval, int __user *optlen) 4850 { 4851 struct sctp_prim prim; 4852 struct sctp_association *asoc; 4853 struct sctp_sock *sp = sctp_sk(sk); 4854 4855 if (len < sizeof(struct sctp_prim)) 4856 return -EINVAL; 4857 4858 len = sizeof(struct sctp_prim); 4859 4860 if (copy_from_user(&prim, optval, len)) 4861 return -EFAULT; 4862 4863 asoc = sctp_id2assoc(sk, prim.ssp_assoc_id); 4864 if (!asoc) 4865 return -EINVAL; 4866 4867 if (!asoc->peer.primary_path) 4868 return -ENOTCONN; 4869 4870 memcpy(&prim.ssp_addr, &asoc->peer.primary_path->ipaddr, 4871 asoc->peer.primary_path->af_specific->sockaddr_len); 4872 4873 sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, 4874 (union sctp_addr *)&prim.ssp_addr); 4875 4876 if (put_user(len, optlen)) 4877 return -EFAULT; 4878 if (copy_to_user(optval, &prim, len)) 4879 return -EFAULT; 4880 4881 return 0; 4882 } 4883 4884 /* 4885 * 7.1.11 Set Adaptation Layer Indicator (SCTP_ADAPTATION_LAYER) 4886 * 4887 * Requests that the local endpoint set the specified Adaptation Layer 4888 * Indication parameter for all future INIT and INIT-ACK exchanges. 4889 */ 4890 static int sctp_getsockopt_adaptation_layer(struct sock *sk, int len, 4891 char __user *optval, int __user *optlen) 4892 { 4893 struct sctp_setadaptation adaptation; 4894 4895 if (len < sizeof(struct sctp_setadaptation)) 4896 return -EINVAL; 4897 4898 len = sizeof(struct sctp_setadaptation); 4899 4900 adaptation.ssb_adaptation_ind = sctp_sk(sk)->adaptation_ind; 4901 4902 if (put_user(len, optlen)) 4903 return -EFAULT; 4904 if (copy_to_user(optval, &adaptation, len)) 4905 return -EFAULT; 4906 4907 return 0; 4908 } 4909 4910 /* 4911 * 4912 * 7.1.14 Set default send parameters (SCTP_DEFAULT_SEND_PARAM) 4913 * 4914 * Applications that wish to use the sendto() system call may wish to 4915 * specify a default set of parameters that would normally be supplied 4916 * through the inclusion of ancillary data. This socket option allows 4917 * such an application to set the default sctp_sndrcvinfo structure. 4918 4919 4920 * The application that wishes to use this socket option simply passes 4921 * in to this call the sctp_sndrcvinfo structure defined in Section 4922 * 5.2.2) The input parameters accepted by this call include 4923 * sinfo_stream, sinfo_flags, sinfo_ppid, sinfo_context, 4924 * sinfo_timetolive. The user must provide the sinfo_assoc_id field in 4925 * to this call if the caller is using the UDP model. 4926 * 4927 * For getsockopt, it get the default sctp_sndrcvinfo structure. 4928 */ 4929 static int sctp_getsockopt_default_send_param(struct sock *sk, 4930 int len, char __user *optval, 4931 int __user *optlen) 4932 { 4933 struct sctp_sndrcvinfo info; 4934 struct sctp_association *asoc; 4935 struct sctp_sock *sp = sctp_sk(sk); 4936 4937 if (len < sizeof(struct sctp_sndrcvinfo)) 4938 return -EINVAL; 4939 4940 len = sizeof(struct sctp_sndrcvinfo); 4941 4942 if (copy_from_user(&info, optval, len)) 4943 return -EFAULT; 4944 4945 asoc = sctp_id2assoc(sk, info.sinfo_assoc_id); 4946 if (!asoc && info.sinfo_assoc_id && sctp_style(sk, UDP)) 4947 return -EINVAL; 4948 4949 if (asoc) { 4950 info.sinfo_stream = asoc->default_stream; 4951 info.sinfo_flags = asoc->default_flags; 4952 info.sinfo_ppid = asoc->default_ppid; 4953 info.sinfo_context = asoc->default_context; 4954 info.sinfo_timetolive = asoc->default_timetolive; 4955 } else { 4956 info.sinfo_stream = sp->default_stream; 4957 info.sinfo_flags = sp->default_flags; 4958 info.sinfo_ppid = sp->default_ppid; 4959 info.sinfo_context = sp->default_context; 4960 info.sinfo_timetolive = sp->default_timetolive; 4961 } 4962 4963 if (put_user(len, optlen)) 4964 return -EFAULT; 4965 if (copy_to_user(optval, &info, len)) 4966 return -EFAULT; 4967 4968 return 0; 4969 } 4970 4971 /* 4972 * 4973 * 7.1.5 SCTP_NODELAY 4974 * 4975 * Turn on/off any Nagle-like algorithm. This means that packets are 4976 * generally sent as soon as possible and no unnecessary delays are 4977 * introduced, at the cost of more packets in the network. Expects an 4978 * integer boolean flag. 4979 */ 4980 4981 static int sctp_getsockopt_nodelay(struct sock *sk, int len, 4982 char __user *optval, int __user *optlen) 4983 { 4984 int val; 4985 4986 if (len < sizeof(int)) 4987 return -EINVAL; 4988 4989 len = sizeof(int); 4990 val = (sctp_sk(sk)->nodelay == 1); 4991 if (put_user(len, optlen)) 4992 return -EFAULT; 4993 if (copy_to_user(optval, &val, len)) 4994 return -EFAULT; 4995 return 0; 4996 } 4997 4998 /* 4999 * 5000 * 7.1.1 SCTP_RTOINFO 5001 * 5002 * The protocol parameters used to initialize and bound retransmission 5003 * timeout (RTO) are tunable. sctp_rtoinfo structure is used to access 5004 * and modify these parameters. 5005 * All parameters are time values, in milliseconds. A value of 0, when 5006 * modifying the parameters, indicates that the current value should not 5007 * be changed. 5008 * 5009 */ 5010 static int sctp_getsockopt_rtoinfo(struct sock *sk, int len, 5011 char __user *optval, 5012 int __user *optlen) { 5013 struct sctp_rtoinfo rtoinfo; 5014 struct sctp_association *asoc; 5015 5016 if (len < sizeof (struct sctp_rtoinfo)) 5017 return -EINVAL; 5018 5019 len = sizeof(struct sctp_rtoinfo); 5020 5021 if (copy_from_user(&rtoinfo, optval, len)) 5022 return -EFAULT; 5023 5024 asoc = sctp_id2assoc(sk, rtoinfo.srto_assoc_id); 5025 5026 if (!asoc && rtoinfo.srto_assoc_id && sctp_style(sk, UDP)) 5027 return -EINVAL; 5028 5029 /* Values corresponding to the specific association. */ 5030 if (asoc) { 5031 rtoinfo.srto_initial = jiffies_to_msecs(asoc->rto_initial); 5032 rtoinfo.srto_max = jiffies_to_msecs(asoc->rto_max); 5033 rtoinfo.srto_min = jiffies_to_msecs(asoc->rto_min); 5034 } else { 5035 /* Values corresponding to the endpoint. */ 5036 struct sctp_sock *sp = sctp_sk(sk); 5037 5038 rtoinfo.srto_initial = sp->rtoinfo.srto_initial; 5039 rtoinfo.srto_max = sp->rtoinfo.srto_max; 5040 rtoinfo.srto_min = sp->rtoinfo.srto_min; 5041 } 5042 5043 if (put_user(len, optlen)) 5044 return -EFAULT; 5045 5046 if (copy_to_user(optval, &rtoinfo, len)) 5047 return -EFAULT; 5048 5049 return 0; 5050 } 5051 5052 /* 5053 * 5054 * 7.1.2 SCTP_ASSOCINFO 5055 * 5056 * This option is used to tune the maximum retransmission attempts 5057 * of the association. 5058 * Returns an error if the new association retransmission value is 5059 * greater than the sum of the retransmission value of the peer. 5060 * See [SCTP] for more information. 5061 * 5062 */ 5063 static int sctp_getsockopt_associnfo(struct sock *sk, int len, 5064 char __user *optval, 5065 int __user *optlen) 5066 { 5067 5068 struct sctp_assocparams assocparams; 5069 struct sctp_association *asoc; 5070 struct list_head *pos; 5071 int cnt = 0; 5072 5073 if (len < sizeof (struct sctp_assocparams)) 5074 return -EINVAL; 5075 5076 len = sizeof(struct sctp_assocparams); 5077 5078 if (copy_from_user(&assocparams, optval, len)) 5079 return -EFAULT; 5080 5081 asoc = sctp_id2assoc(sk, assocparams.sasoc_assoc_id); 5082 5083 if (!asoc && assocparams.sasoc_assoc_id && sctp_style(sk, UDP)) 5084 return -EINVAL; 5085 5086 /* Values correspoinding to the specific association */ 5087 if (asoc) { 5088 assocparams.sasoc_asocmaxrxt = asoc->max_retrans; 5089 assocparams.sasoc_peer_rwnd = asoc->peer.rwnd; 5090 assocparams.sasoc_local_rwnd = asoc->a_rwnd; 5091 assocparams.sasoc_cookie_life = ktime_to_ms(asoc->cookie_life); 5092 5093 list_for_each(pos, &asoc->peer.transport_addr_list) { 5094 cnt ++; 5095 } 5096 5097 assocparams.sasoc_number_peer_destinations = cnt; 5098 } else { 5099 /* Values corresponding to the endpoint */ 5100 struct sctp_sock *sp = sctp_sk(sk); 5101 5102 assocparams.sasoc_asocmaxrxt = sp->assocparams.sasoc_asocmaxrxt; 5103 assocparams.sasoc_peer_rwnd = sp->assocparams.sasoc_peer_rwnd; 5104 assocparams.sasoc_local_rwnd = sp->assocparams.sasoc_local_rwnd; 5105 assocparams.sasoc_cookie_life = 5106 sp->assocparams.sasoc_cookie_life; 5107 assocparams.sasoc_number_peer_destinations = 5108 sp->assocparams. 5109 sasoc_number_peer_destinations; 5110 } 5111 5112 if (put_user(len, optlen)) 5113 return -EFAULT; 5114 5115 if (copy_to_user(optval, &assocparams, len)) 5116 return -EFAULT; 5117 5118 return 0; 5119 } 5120 5121 /* 5122 * 7.1.16 Set/clear IPv4 mapped addresses (SCTP_I_WANT_MAPPED_V4_ADDR) 5123 * 5124 * This socket option is a boolean flag which turns on or off mapped V4 5125 * addresses. If this option is turned on and the socket is type 5126 * PF_INET6, then IPv4 addresses will be mapped to V6 representation. 5127 * If this option is turned off, then no mapping will be done of V4 5128 * addresses and a user will receive both PF_INET6 and PF_INET type 5129 * addresses on the socket. 5130 */ 5131 static int sctp_getsockopt_mappedv4(struct sock *sk, int len, 5132 char __user *optval, int __user *optlen) 5133 { 5134 int val; 5135 struct sctp_sock *sp = sctp_sk(sk); 5136 5137 if (len < sizeof(int)) 5138 return -EINVAL; 5139 5140 len = sizeof(int); 5141 val = sp->v4mapped; 5142 if (put_user(len, optlen)) 5143 return -EFAULT; 5144 if (copy_to_user(optval, &val, len)) 5145 return -EFAULT; 5146 5147 return 0; 5148 } 5149 5150 /* 5151 * 7.1.29. Set or Get the default context (SCTP_CONTEXT) 5152 * (chapter and verse is quoted at sctp_setsockopt_context()) 5153 */ 5154 static int sctp_getsockopt_context(struct sock *sk, int len, 5155 char __user *optval, int __user *optlen) 5156 { 5157 struct sctp_assoc_value params; 5158 struct sctp_sock *sp; 5159 struct sctp_association *asoc; 5160 5161 if (len < sizeof(struct sctp_assoc_value)) 5162 return -EINVAL; 5163 5164 len = sizeof(struct sctp_assoc_value); 5165 5166 if (copy_from_user(¶ms, optval, len)) 5167 return -EFAULT; 5168 5169 sp = sctp_sk(sk); 5170 5171 if (params.assoc_id != 0) { 5172 asoc = sctp_id2assoc(sk, params.assoc_id); 5173 if (!asoc) 5174 return -EINVAL; 5175 params.assoc_value = asoc->default_rcv_context; 5176 } else { 5177 params.assoc_value = sp->default_rcv_context; 5178 } 5179 5180 if (put_user(len, optlen)) 5181 return -EFAULT; 5182 if (copy_to_user(optval, ¶ms, len)) 5183 return -EFAULT; 5184 5185 return 0; 5186 } 5187 5188 /* 5189 * 8.1.16. Get or Set the Maximum Fragmentation Size (SCTP_MAXSEG) 5190 * This option will get or set the maximum size to put in any outgoing 5191 * SCTP DATA chunk. If a message is larger than this size it will be 5192 * fragmented by SCTP into the specified size. Note that the underlying 5193 * SCTP implementation may fragment into smaller sized chunks when the 5194 * PMTU of the underlying association is smaller than the value set by 5195 * the user. The default value for this option is '0' which indicates 5196 * the user is NOT limiting fragmentation and only the PMTU will effect 5197 * SCTP's choice of DATA chunk size. Note also that values set larger 5198 * than the maximum size of an IP datagram will effectively let SCTP 5199 * control fragmentation (i.e. the same as setting this option to 0). 5200 * 5201 * The following structure is used to access and modify this parameter: 5202 * 5203 * struct sctp_assoc_value { 5204 * sctp_assoc_t assoc_id; 5205 * uint32_t assoc_value; 5206 * }; 5207 * 5208 * assoc_id: This parameter is ignored for one-to-one style sockets. 5209 * For one-to-many style sockets this parameter indicates which 5210 * association the user is performing an action upon. Note that if 5211 * this field's value is zero then the endpoints default value is 5212 * changed (effecting future associations only). 5213 * assoc_value: This parameter specifies the maximum size in bytes. 5214 */ 5215 static int sctp_getsockopt_maxseg(struct sock *sk, int len, 5216 char __user *optval, int __user *optlen) 5217 { 5218 struct sctp_assoc_value params; 5219 struct sctp_association *asoc; 5220 5221 if (len == sizeof(int)) { 5222 pr_warn("Use of int in maxseg socket option deprecated\n"); 5223 pr_warn("Use struct sctp_assoc_value instead\n"); 5224 params.assoc_id = 0; 5225 } else if (len >= sizeof(struct sctp_assoc_value)) { 5226 len = sizeof(struct sctp_assoc_value); 5227 if (copy_from_user(¶ms, optval, sizeof(params))) 5228 return -EFAULT; 5229 } else 5230 return -EINVAL; 5231 5232 asoc = sctp_id2assoc(sk, params.assoc_id); 5233 if (!asoc && params.assoc_id && sctp_style(sk, UDP)) 5234 return -EINVAL; 5235 5236 if (asoc) 5237 params.assoc_value = asoc->frag_point; 5238 else 5239 params.assoc_value = sctp_sk(sk)->user_frag; 5240 5241 if (put_user(len, optlen)) 5242 return -EFAULT; 5243 if (len == sizeof(int)) { 5244 if (copy_to_user(optval, ¶ms.assoc_value, len)) 5245 return -EFAULT; 5246 } else { 5247 if (copy_to_user(optval, ¶ms, len)) 5248 return -EFAULT; 5249 } 5250 5251 return 0; 5252 } 5253 5254 /* 5255 * 7.1.24. Get or set fragmented interleave (SCTP_FRAGMENT_INTERLEAVE) 5256 * (chapter and verse is quoted at sctp_setsockopt_fragment_interleave()) 5257 */ 5258 static int sctp_getsockopt_fragment_interleave(struct sock *sk, int len, 5259 char __user *optval, int __user *optlen) 5260 { 5261 int val; 5262 5263 if (len < sizeof(int)) 5264 return -EINVAL; 5265 5266 len = sizeof(int); 5267 5268 val = sctp_sk(sk)->frag_interleave; 5269 if (put_user(len, optlen)) 5270 return -EFAULT; 5271 if (copy_to_user(optval, &val, len)) 5272 return -EFAULT; 5273 5274 return 0; 5275 } 5276 5277 /* 5278 * 7.1.25. Set or Get the sctp partial delivery point 5279 * (chapter and verse is quoted at sctp_setsockopt_partial_delivery_point()) 5280 */ 5281 static int sctp_getsockopt_partial_delivery_point(struct sock *sk, int len, 5282 char __user *optval, 5283 int __user *optlen) 5284 { 5285 u32 val; 5286 5287 if (len < sizeof(u32)) 5288 return -EINVAL; 5289 5290 len = sizeof(u32); 5291 5292 val = sctp_sk(sk)->pd_point; 5293 if (put_user(len, optlen)) 5294 return -EFAULT; 5295 if (copy_to_user(optval, &val, len)) 5296 return -EFAULT; 5297 5298 return 0; 5299 } 5300 5301 /* 5302 * 7.1.28. Set or Get the maximum burst (SCTP_MAX_BURST) 5303 * (chapter and verse is quoted at sctp_setsockopt_maxburst()) 5304 */ 5305 static int sctp_getsockopt_maxburst(struct sock *sk, int len, 5306 char __user *optval, 5307 int __user *optlen) 5308 { 5309 struct sctp_assoc_value params; 5310 struct sctp_sock *sp; 5311 struct sctp_association *asoc; 5312 5313 if (len == sizeof(int)) { 5314 pr_warn("Use of int in max_burst socket option deprecated\n"); 5315 pr_warn("Use struct sctp_assoc_value instead\n"); 5316 params.assoc_id = 0; 5317 } else if (len >= sizeof(struct sctp_assoc_value)) { 5318 len = sizeof(struct sctp_assoc_value); 5319 if (copy_from_user(¶ms, optval, len)) 5320 return -EFAULT; 5321 } else 5322 return -EINVAL; 5323 5324 sp = sctp_sk(sk); 5325 5326 if (params.assoc_id != 0) { 5327 asoc = sctp_id2assoc(sk, params.assoc_id); 5328 if (!asoc) 5329 return -EINVAL; 5330 params.assoc_value = asoc->max_burst; 5331 } else 5332 params.assoc_value = sp->max_burst; 5333 5334 if (len == sizeof(int)) { 5335 if (copy_to_user(optval, ¶ms.assoc_value, len)) 5336 return -EFAULT; 5337 } else { 5338 if (copy_to_user(optval, ¶ms, len)) 5339 return -EFAULT; 5340 } 5341 5342 return 0; 5343 5344 } 5345 5346 static int sctp_getsockopt_hmac_ident(struct sock *sk, int len, 5347 char __user *optval, int __user *optlen) 5348 { 5349 struct net *net = sock_net(sk); 5350 struct sctp_hmacalgo __user *p = (void __user *)optval; 5351 struct sctp_hmac_algo_param *hmacs; 5352 __u16 data_len = 0; 5353 u32 num_idents; 5354 5355 if (!net->sctp.auth_enable) 5356 return -EACCES; 5357 5358 hmacs = sctp_sk(sk)->ep->auth_hmacs_list; 5359 data_len = ntohs(hmacs->param_hdr.length) - sizeof(sctp_paramhdr_t); 5360 5361 if (len < sizeof(struct sctp_hmacalgo) + data_len) 5362 return -EINVAL; 5363 5364 len = sizeof(struct sctp_hmacalgo) + data_len; 5365 num_idents = data_len / sizeof(u16); 5366 5367 if (put_user(len, optlen)) 5368 return -EFAULT; 5369 if (put_user(num_idents, &p->shmac_num_idents)) 5370 return -EFAULT; 5371 if (copy_to_user(p->shmac_idents, hmacs->hmac_ids, data_len)) 5372 return -EFAULT; 5373 return 0; 5374 } 5375 5376 static int sctp_getsockopt_active_key(struct sock *sk, int len, 5377 char __user *optval, int __user *optlen) 5378 { 5379 struct net *net = sock_net(sk); 5380 struct sctp_authkeyid val; 5381 struct sctp_association *asoc; 5382 5383 if (!net->sctp.auth_enable) 5384 return -EACCES; 5385 5386 if (len < sizeof(struct sctp_authkeyid)) 5387 return -EINVAL; 5388 if (copy_from_user(&val, optval, sizeof(struct sctp_authkeyid))) 5389 return -EFAULT; 5390 5391 asoc = sctp_id2assoc(sk, val.scact_assoc_id); 5392 if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP)) 5393 return -EINVAL; 5394 5395 if (asoc) 5396 val.scact_keynumber = asoc->active_key_id; 5397 else 5398 val.scact_keynumber = sctp_sk(sk)->ep->active_key_id; 5399 5400 len = sizeof(struct sctp_authkeyid); 5401 if (put_user(len, optlen)) 5402 return -EFAULT; 5403 if (copy_to_user(optval, &val, len)) 5404 return -EFAULT; 5405 5406 return 0; 5407 } 5408 5409 static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len, 5410 char __user *optval, int __user *optlen) 5411 { 5412 struct net *net = sock_net(sk); 5413 struct sctp_authchunks __user *p = (void __user *)optval; 5414 struct sctp_authchunks val; 5415 struct sctp_association *asoc; 5416 struct sctp_chunks_param *ch; 5417 u32 num_chunks = 0; 5418 char __user *to; 5419 5420 if (!net->sctp.auth_enable) 5421 return -EACCES; 5422 5423 if (len < sizeof(struct sctp_authchunks)) 5424 return -EINVAL; 5425 5426 if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks))) 5427 return -EFAULT; 5428 5429 to = p->gauth_chunks; 5430 asoc = sctp_id2assoc(sk, val.gauth_assoc_id); 5431 if (!asoc) 5432 return -EINVAL; 5433 5434 ch = asoc->peer.peer_chunks; 5435 if (!ch) 5436 goto num; 5437 5438 /* See if the user provided enough room for all the data */ 5439 num_chunks = ntohs(ch->param_hdr.length) - sizeof(sctp_paramhdr_t); 5440 if (len < num_chunks) 5441 return -EINVAL; 5442 5443 if (copy_to_user(to, ch->chunks, num_chunks)) 5444 return -EFAULT; 5445 num: 5446 len = sizeof(struct sctp_authchunks) + num_chunks; 5447 if (put_user(len, optlen)) return -EFAULT; 5448 if (put_user(num_chunks, &p->gauth_number_of_chunks)) 5449 return -EFAULT; 5450 return 0; 5451 } 5452 5453 static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len, 5454 char __user *optval, int __user *optlen) 5455 { 5456 struct net *net = sock_net(sk); 5457 struct sctp_authchunks __user *p = (void __user *)optval; 5458 struct sctp_authchunks val; 5459 struct sctp_association *asoc; 5460 struct sctp_chunks_param *ch; 5461 u32 num_chunks = 0; 5462 char __user *to; 5463 5464 if (!net->sctp.auth_enable) 5465 return -EACCES; 5466 5467 if (len < sizeof(struct sctp_authchunks)) 5468 return -EINVAL; 5469 5470 if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks))) 5471 return -EFAULT; 5472 5473 to = p->gauth_chunks; 5474 asoc = sctp_id2assoc(sk, val.gauth_assoc_id); 5475 if (!asoc && val.gauth_assoc_id && sctp_style(sk, UDP)) 5476 return -EINVAL; 5477 5478 if (asoc) 5479 ch = (struct sctp_chunks_param*)asoc->c.auth_chunks; 5480 else 5481 ch = sctp_sk(sk)->ep->auth_chunk_list; 5482 5483 if (!ch) 5484 goto num; 5485 5486 num_chunks = ntohs(ch->param_hdr.length) - sizeof(sctp_paramhdr_t); 5487 if (len < sizeof(struct sctp_authchunks) + num_chunks) 5488 return -EINVAL; 5489 5490 if (copy_to_user(to, ch->chunks, num_chunks)) 5491 return -EFAULT; 5492 num: 5493 len = sizeof(struct sctp_authchunks) + num_chunks; 5494 if (put_user(len, optlen)) 5495 return -EFAULT; 5496 if (put_user(num_chunks, &p->gauth_number_of_chunks)) 5497 return -EFAULT; 5498 5499 return 0; 5500 } 5501 5502 /* 5503 * 8.2.5. Get the Current Number of Associations (SCTP_GET_ASSOC_NUMBER) 5504 * This option gets the current number of associations that are attached 5505 * to a one-to-many style socket. The option value is an uint32_t. 5506 */ 5507 static int sctp_getsockopt_assoc_number(struct sock *sk, int len, 5508 char __user *optval, int __user *optlen) 5509 { 5510 struct sctp_sock *sp = sctp_sk(sk); 5511 struct sctp_association *asoc; 5512 u32 val = 0; 5513 5514 if (sctp_style(sk, TCP)) 5515 return -EOPNOTSUPP; 5516 5517 if (len < sizeof(u32)) 5518 return -EINVAL; 5519 5520 len = sizeof(u32); 5521 5522 list_for_each_entry(asoc, &(sp->ep->asocs), asocs) { 5523 val++; 5524 } 5525 5526 if (put_user(len, optlen)) 5527 return -EFAULT; 5528 if (copy_to_user(optval, &val, len)) 5529 return -EFAULT; 5530 5531 return 0; 5532 } 5533 5534 /* 5535 * 8.1.23 SCTP_AUTO_ASCONF 5536 * See the corresponding setsockopt entry as description 5537 */ 5538 static int sctp_getsockopt_auto_asconf(struct sock *sk, int len, 5539 char __user *optval, int __user *optlen) 5540 { 5541 int val = 0; 5542 5543 if (len < sizeof(int)) 5544 return -EINVAL; 5545 5546 len = sizeof(int); 5547 if (sctp_sk(sk)->do_auto_asconf && sctp_is_ep_boundall(sk)) 5548 val = 1; 5549 if (put_user(len, optlen)) 5550 return -EFAULT; 5551 if (copy_to_user(optval, &val, len)) 5552 return -EFAULT; 5553 return 0; 5554 } 5555 5556 /* 5557 * 8.2.6. Get the Current Identifiers of Associations 5558 * (SCTP_GET_ASSOC_ID_LIST) 5559 * 5560 * This option gets the current list of SCTP association identifiers of 5561 * the SCTP associations handled by a one-to-many style socket. 5562 */ 5563 static int sctp_getsockopt_assoc_ids(struct sock *sk, int len, 5564 char __user *optval, int __user *optlen) 5565 { 5566 struct sctp_sock *sp = sctp_sk(sk); 5567 struct sctp_association *asoc; 5568 struct sctp_assoc_ids *ids; 5569 u32 num = 0; 5570 5571 if (sctp_style(sk, TCP)) 5572 return -EOPNOTSUPP; 5573 5574 if (len < sizeof(struct sctp_assoc_ids)) 5575 return -EINVAL; 5576 5577 list_for_each_entry(asoc, &(sp->ep->asocs), asocs) { 5578 num++; 5579 } 5580 5581 if (len < sizeof(struct sctp_assoc_ids) + sizeof(sctp_assoc_t) * num) 5582 return -EINVAL; 5583 5584 len = sizeof(struct sctp_assoc_ids) + sizeof(sctp_assoc_t) * num; 5585 5586 ids = kmalloc(len, GFP_KERNEL); 5587 if (unlikely(!ids)) 5588 return -ENOMEM; 5589 5590 ids->gaids_number_of_ids = num; 5591 num = 0; 5592 list_for_each_entry(asoc, &(sp->ep->asocs), asocs) { 5593 ids->gaids_assoc_id[num++] = asoc->assoc_id; 5594 } 5595 5596 if (put_user(len, optlen) || copy_to_user(optval, ids, len)) { 5597 kfree(ids); 5598 return -EFAULT; 5599 } 5600 5601 kfree(ids); 5602 return 0; 5603 } 5604 5605 /* 5606 * SCTP_PEER_ADDR_THLDS 5607 * 5608 * This option allows us to fetch the partially failed threshold for one or all 5609 * transports in an association. See Section 6.1 of: 5610 * http://www.ietf.org/id/draft-nishida-tsvwg-sctp-failover-05.txt 5611 */ 5612 static int sctp_getsockopt_paddr_thresholds(struct sock *sk, 5613 char __user *optval, 5614 int len, 5615 int __user *optlen) 5616 { 5617 struct sctp_paddrthlds val; 5618 struct sctp_transport *trans; 5619 struct sctp_association *asoc; 5620 5621 if (len < sizeof(struct sctp_paddrthlds)) 5622 return -EINVAL; 5623 len = sizeof(struct sctp_paddrthlds); 5624 if (copy_from_user(&val, (struct sctp_paddrthlds __user *)optval, len)) 5625 return -EFAULT; 5626 5627 if (sctp_is_any(sk, (const union sctp_addr *)&val.spt_address)) { 5628 asoc = sctp_id2assoc(sk, val.spt_assoc_id); 5629 if (!asoc) 5630 return -ENOENT; 5631 5632 val.spt_pathpfthld = asoc->pf_retrans; 5633 val.spt_pathmaxrxt = asoc->pathmaxrxt; 5634 } else { 5635 trans = sctp_addr_id2transport(sk, &val.spt_address, 5636 val.spt_assoc_id); 5637 if (!trans) 5638 return -ENOENT; 5639 5640 val.spt_pathmaxrxt = trans->pathmaxrxt; 5641 val.spt_pathpfthld = trans->pf_retrans; 5642 } 5643 5644 if (put_user(len, optlen) || copy_to_user(optval, &val, len)) 5645 return -EFAULT; 5646 5647 return 0; 5648 } 5649 5650 /* 5651 * SCTP_GET_ASSOC_STATS 5652 * 5653 * This option retrieves local per endpoint statistics. It is modeled 5654 * after OpenSolaris' implementation 5655 */ 5656 static int sctp_getsockopt_assoc_stats(struct sock *sk, int len, 5657 char __user *optval, 5658 int __user *optlen) 5659 { 5660 struct sctp_assoc_stats sas; 5661 struct sctp_association *asoc = NULL; 5662 5663 /* User must provide at least the assoc id */ 5664 if (len < sizeof(sctp_assoc_t)) 5665 return -EINVAL; 5666 5667 /* Allow the struct to grow and fill in as much as possible */ 5668 len = min_t(size_t, len, sizeof(sas)); 5669 5670 if (copy_from_user(&sas, optval, len)) 5671 return -EFAULT; 5672 5673 asoc = sctp_id2assoc(sk, sas.sas_assoc_id); 5674 if (!asoc) 5675 return -EINVAL; 5676 5677 sas.sas_rtxchunks = asoc->stats.rtxchunks; 5678 sas.sas_gapcnt = asoc->stats.gapcnt; 5679 sas.sas_outofseqtsns = asoc->stats.outofseqtsns; 5680 sas.sas_osacks = asoc->stats.osacks; 5681 sas.sas_isacks = asoc->stats.isacks; 5682 sas.sas_octrlchunks = asoc->stats.octrlchunks; 5683 sas.sas_ictrlchunks = asoc->stats.ictrlchunks; 5684 sas.sas_oodchunks = asoc->stats.oodchunks; 5685 sas.sas_iodchunks = asoc->stats.iodchunks; 5686 sas.sas_ouodchunks = asoc->stats.ouodchunks; 5687 sas.sas_iuodchunks = asoc->stats.iuodchunks; 5688 sas.sas_idupchunks = asoc->stats.idupchunks; 5689 sas.sas_opackets = asoc->stats.opackets; 5690 sas.sas_ipackets = asoc->stats.ipackets; 5691 5692 /* New high max rto observed, will return 0 if not a single 5693 * RTO update took place. obs_rto_ipaddr will be bogus 5694 * in such a case 5695 */ 5696 sas.sas_maxrto = asoc->stats.max_obs_rto; 5697 memcpy(&sas.sas_obs_rto_ipaddr, &asoc->stats.obs_rto_ipaddr, 5698 sizeof(struct sockaddr_storage)); 5699 5700 /* Mark beginning of a new observation period */ 5701 asoc->stats.max_obs_rto = asoc->rto_min; 5702 5703 if (put_user(len, optlen)) 5704 return -EFAULT; 5705 5706 pr_debug("%s: len:%d, assoc_id:%d\n", __func__, len, sas.sas_assoc_id); 5707 5708 if (copy_to_user(optval, &sas, len)) 5709 return -EFAULT; 5710 5711 return 0; 5712 } 5713 5714 static int sctp_getsockopt(struct sock *sk, int level, int optname, 5715 char __user *optval, int __user *optlen) 5716 { 5717 int retval = 0; 5718 int len; 5719 5720 pr_debug("%s: sk:%p, optname:%d\n", __func__, sk, optname); 5721 5722 /* I can hardly begin to describe how wrong this is. This is 5723 * so broken as to be worse than useless. The API draft 5724 * REALLY is NOT helpful here... I am not convinced that the 5725 * semantics of getsockopt() with a level OTHER THAN SOL_SCTP 5726 * are at all well-founded. 5727 */ 5728 if (level != SOL_SCTP) { 5729 struct sctp_af *af = sctp_sk(sk)->pf->af; 5730 5731 retval = af->getsockopt(sk, level, optname, optval, optlen); 5732 return retval; 5733 } 5734 5735 if (get_user(len, optlen)) 5736 return -EFAULT; 5737 5738 sctp_lock_sock(sk); 5739 5740 switch (optname) { 5741 case SCTP_STATUS: 5742 retval = sctp_getsockopt_sctp_status(sk, len, optval, optlen); 5743 break; 5744 case SCTP_DISABLE_FRAGMENTS: 5745 retval = sctp_getsockopt_disable_fragments(sk, len, optval, 5746 optlen); 5747 break; 5748 case SCTP_EVENTS: 5749 retval = sctp_getsockopt_events(sk, len, optval, optlen); 5750 break; 5751 case SCTP_AUTOCLOSE: 5752 retval = sctp_getsockopt_autoclose(sk, len, optval, optlen); 5753 break; 5754 case SCTP_SOCKOPT_PEELOFF: 5755 retval = sctp_getsockopt_peeloff(sk, len, optval, optlen); 5756 break; 5757 case SCTP_PEER_ADDR_PARAMS: 5758 retval = sctp_getsockopt_peer_addr_params(sk, len, optval, 5759 optlen); 5760 break; 5761 case SCTP_DELAYED_SACK: 5762 retval = sctp_getsockopt_delayed_ack(sk, len, optval, 5763 optlen); 5764 break; 5765 case SCTP_INITMSG: 5766 retval = sctp_getsockopt_initmsg(sk, len, optval, optlen); 5767 break; 5768 case SCTP_GET_PEER_ADDRS: 5769 retval = sctp_getsockopt_peer_addrs(sk, len, optval, 5770 optlen); 5771 break; 5772 case SCTP_GET_LOCAL_ADDRS: 5773 retval = sctp_getsockopt_local_addrs(sk, len, optval, 5774 optlen); 5775 break; 5776 case SCTP_SOCKOPT_CONNECTX3: 5777 retval = sctp_getsockopt_connectx3(sk, len, optval, optlen); 5778 break; 5779 case SCTP_DEFAULT_SEND_PARAM: 5780 retval = sctp_getsockopt_default_send_param(sk, len, 5781 optval, optlen); 5782 break; 5783 case SCTP_PRIMARY_ADDR: 5784 retval = sctp_getsockopt_primary_addr(sk, len, optval, optlen); 5785 break; 5786 case SCTP_NODELAY: 5787 retval = sctp_getsockopt_nodelay(sk, len, optval, optlen); 5788 break; 5789 case SCTP_RTOINFO: 5790 retval = sctp_getsockopt_rtoinfo(sk, len, optval, optlen); 5791 break; 5792 case SCTP_ASSOCINFO: 5793 retval = sctp_getsockopt_associnfo(sk, len, optval, optlen); 5794 break; 5795 case SCTP_I_WANT_MAPPED_V4_ADDR: 5796 retval = sctp_getsockopt_mappedv4(sk, len, optval, optlen); 5797 break; 5798 case SCTP_MAXSEG: 5799 retval = sctp_getsockopt_maxseg(sk, len, optval, optlen); 5800 break; 5801 case SCTP_GET_PEER_ADDR_INFO: 5802 retval = sctp_getsockopt_peer_addr_info(sk, len, optval, 5803 optlen); 5804 break; 5805 case SCTP_ADAPTATION_LAYER: 5806 retval = sctp_getsockopt_adaptation_layer(sk, len, optval, 5807 optlen); 5808 break; 5809 case SCTP_CONTEXT: 5810 retval = sctp_getsockopt_context(sk, len, optval, optlen); 5811 break; 5812 case SCTP_FRAGMENT_INTERLEAVE: 5813 retval = sctp_getsockopt_fragment_interleave(sk, len, optval, 5814 optlen); 5815 break; 5816 case SCTP_PARTIAL_DELIVERY_POINT: 5817 retval = sctp_getsockopt_partial_delivery_point(sk, len, optval, 5818 optlen); 5819 break; 5820 case SCTP_MAX_BURST: 5821 retval = sctp_getsockopt_maxburst(sk, len, optval, optlen); 5822 break; 5823 case SCTP_AUTH_KEY: 5824 case SCTP_AUTH_CHUNK: 5825 case SCTP_AUTH_DELETE_KEY: 5826 retval = -EOPNOTSUPP; 5827 break; 5828 case SCTP_HMAC_IDENT: 5829 retval = sctp_getsockopt_hmac_ident(sk, len, optval, optlen); 5830 break; 5831 case SCTP_AUTH_ACTIVE_KEY: 5832 retval = sctp_getsockopt_active_key(sk, len, optval, optlen); 5833 break; 5834 case SCTP_PEER_AUTH_CHUNKS: 5835 retval = sctp_getsockopt_peer_auth_chunks(sk, len, optval, 5836 optlen); 5837 break; 5838 case SCTP_LOCAL_AUTH_CHUNKS: 5839 retval = sctp_getsockopt_local_auth_chunks(sk, len, optval, 5840 optlen); 5841 break; 5842 case SCTP_GET_ASSOC_NUMBER: 5843 retval = sctp_getsockopt_assoc_number(sk, len, optval, optlen); 5844 break; 5845 case SCTP_GET_ASSOC_ID_LIST: 5846 retval = sctp_getsockopt_assoc_ids(sk, len, optval, optlen); 5847 break; 5848 case SCTP_AUTO_ASCONF: 5849 retval = sctp_getsockopt_auto_asconf(sk, len, optval, optlen); 5850 break; 5851 case SCTP_PEER_ADDR_THLDS: 5852 retval = sctp_getsockopt_paddr_thresholds(sk, optval, len, optlen); 5853 break; 5854 case SCTP_GET_ASSOC_STATS: 5855 retval = sctp_getsockopt_assoc_stats(sk, len, optval, optlen); 5856 break; 5857 default: 5858 retval = -ENOPROTOOPT; 5859 break; 5860 } 5861 5862 sctp_release_sock(sk); 5863 return retval; 5864 } 5865 5866 static void sctp_hash(struct sock *sk) 5867 { 5868 /* STUB */ 5869 } 5870 5871 static void sctp_unhash(struct sock *sk) 5872 { 5873 /* STUB */ 5874 } 5875 5876 /* Check if port is acceptable. Possibly find first available port. 5877 * 5878 * The port hash table (contained in the 'global' SCTP protocol storage 5879 * returned by struct sctp_protocol *sctp_get_protocol()). The hash 5880 * table is an array of 4096 lists (sctp_bind_hashbucket). Each 5881 * list (the list number is the port number hashed out, so as you 5882 * would expect from a hash function, all the ports in a given list have 5883 * such a number that hashes out to the same list number; you were 5884 * expecting that, right?); so each list has a set of ports, with a 5885 * link to the socket (struct sock) that uses it, the port number and 5886 * a fastreuse flag (FIXME: NPI ipg). 5887 */ 5888 static struct sctp_bind_bucket *sctp_bucket_create( 5889 struct sctp_bind_hashbucket *head, struct net *, unsigned short snum); 5890 5891 static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr) 5892 { 5893 struct sctp_bind_hashbucket *head; /* hash list */ 5894 struct sctp_bind_bucket *pp; 5895 unsigned short snum; 5896 int ret; 5897 5898 snum = ntohs(addr->v4.sin_port); 5899 5900 pr_debug("%s: begins, snum:%d\n", __func__, snum); 5901 5902 sctp_local_bh_disable(); 5903 5904 if (snum == 0) { 5905 /* Search for an available port. */ 5906 int low, high, remaining, index; 5907 unsigned int rover; 5908 5909 inet_get_local_port_range(sock_net(sk), &low, &high); 5910 remaining = (high - low) + 1; 5911 rover = net_random() % remaining + low; 5912 5913 do { 5914 rover++; 5915 if ((rover < low) || (rover > high)) 5916 rover = low; 5917 if (inet_is_reserved_local_port(rover)) 5918 continue; 5919 index = sctp_phashfn(sock_net(sk), rover); 5920 head = &sctp_port_hashtable[index]; 5921 sctp_spin_lock(&head->lock); 5922 sctp_for_each_hentry(pp, &head->chain) 5923 if ((pp->port == rover) && 5924 net_eq(sock_net(sk), pp->net)) 5925 goto next; 5926 break; 5927 next: 5928 sctp_spin_unlock(&head->lock); 5929 } while (--remaining > 0); 5930 5931 /* Exhausted local port range during search? */ 5932 ret = 1; 5933 if (remaining <= 0) 5934 goto fail; 5935 5936 /* OK, here is the one we will use. HEAD (the port 5937 * hash table list entry) is non-NULL and we hold it's 5938 * mutex. 5939 */ 5940 snum = rover; 5941 } else { 5942 /* We are given an specific port number; we verify 5943 * that it is not being used. If it is used, we will 5944 * exahust the search in the hash list corresponding 5945 * to the port number (snum) - we detect that with the 5946 * port iterator, pp being NULL. 5947 */ 5948 head = &sctp_port_hashtable[sctp_phashfn(sock_net(sk), snum)]; 5949 sctp_spin_lock(&head->lock); 5950 sctp_for_each_hentry(pp, &head->chain) { 5951 if ((pp->port == snum) && net_eq(pp->net, sock_net(sk))) 5952 goto pp_found; 5953 } 5954 } 5955 pp = NULL; 5956 goto pp_not_found; 5957 pp_found: 5958 if (!hlist_empty(&pp->owner)) { 5959 /* We had a port hash table hit - there is an 5960 * available port (pp != NULL) and it is being 5961 * used by other socket (pp->owner not empty); that other 5962 * socket is going to be sk2. 5963 */ 5964 int reuse = sk->sk_reuse; 5965 struct sock *sk2; 5966 5967 pr_debug("%s: found a possible match\n", __func__); 5968 5969 if (pp->fastreuse && sk->sk_reuse && 5970 sk->sk_state != SCTP_SS_LISTENING) 5971 goto success; 5972 5973 /* Run through the list of sockets bound to the port 5974 * (pp->port) [via the pointers bind_next and 5975 * bind_pprev in the struct sock *sk2 (pp->sk)]. On each one, 5976 * we get the endpoint they describe and run through 5977 * the endpoint's list of IP (v4 or v6) addresses, 5978 * comparing each of the addresses with the address of 5979 * the socket sk. If we find a match, then that means 5980 * that this port/socket (sk) combination are already 5981 * in an endpoint. 5982 */ 5983 sk_for_each_bound(sk2, &pp->owner) { 5984 struct sctp_endpoint *ep2; 5985 ep2 = sctp_sk(sk2)->ep; 5986 5987 if (sk == sk2 || 5988 (reuse && sk2->sk_reuse && 5989 sk2->sk_state != SCTP_SS_LISTENING)) 5990 continue; 5991 5992 if (sctp_bind_addr_conflict(&ep2->base.bind_addr, addr, 5993 sctp_sk(sk2), sctp_sk(sk))) { 5994 ret = (long)sk2; 5995 goto fail_unlock; 5996 } 5997 } 5998 5999 pr_debug("%s: found a match\n", __func__); 6000 } 6001 pp_not_found: 6002 /* If there was a hash table miss, create a new port. */ 6003 ret = 1; 6004 if (!pp && !(pp = sctp_bucket_create(head, sock_net(sk), snum))) 6005 goto fail_unlock; 6006 6007 /* In either case (hit or miss), make sure fastreuse is 1 only 6008 * if sk->sk_reuse is too (that is, if the caller requested 6009 * SO_REUSEADDR on this socket -sk-). 6010 */ 6011 if (hlist_empty(&pp->owner)) { 6012 if (sk->sk_reuse && sk->sk_state != SCTP_SS_LISTENING) 6013 pp->fastreuse = 1; 6014 else 6015 pp->fastreuse = 0; 6016 } else if (pp->fastreuse && 6017 (!sk->sk_reuse || sk->sk_state == SCTP_SS_LISTENING)) 6018 pp->fastreuse = 0; 6019 6020 /* We are set, so fill up all the data in the hash table 6021 * entry, tie the socket list information with the rest of the 6022 * sockets FIXME: Blurry, NPI (ipg). 6023 */ 6024 success: 6025 if (!sctp_sk(sk)->bind_hash) { 6026 inet_sk(sk)->inet_num = snum; 6027 sk_add_bind_node(sk, &pp->owner); 6028 sctp_sk(sk)->bind_hash = pp; 6029 } 6030 ret = 0; 6031 6032 fail_unlock: 6033 sctp_spin_unlock(&head->lock); 6034 6035 fail: 6036 sctp_local_bh_enable(); 6037 return ret; 6038 } 6039 6040 /* Assign a 'snum' port to the socket. If snum == 0, an ephemeral 6041 * port is requested. 6042 */ 6043 static int sctp_get_port(struct sock *sk, unsigned short snum) 6044 { 6045 union sctp_addr addr; 6046 struct sctp_af *af = sctp_sk(sk)->pf->af; 6047 6048 /* Set up a dummy address struct from the sk. */ 6049 af->from_sk(&addr, sk); 6050 addr.v4.sin_port = htons(snum); 6051 6052 /* Note: sk->sk_num gets filled in if ephemeral port request. */ 6053 return !!sctp_get_port_local(sk, &addr); 6054 } 6055 6056 /* 6057 * Move a socket to LISTENING state. 6058 */ 6059 static int sctp_listen_start(struct sock *sk, int backlog) 6060 { 6061 struct sctp_sock *sp = sctp_sk(sk); 6062 struct sctp_endpoint *ep = sp->ep; 6063 struct crypto_hash *tfm = NULL; 6064 char alg[32]; 6065 6066 /* Allocate HMAC for generating cookie. */ 6067 if (!sp->hmac && sp->sctp_hmac_alg) { 6068 sprintf(alg, "hmac(%s)", sp->sctp_hmac_alg); 6069 tfm = crypto_alloc_hash(alg, 0, CRYPTO_ALG_ASYNC); 6070 if (IS_ERR(tfm)) { 6071 net_info_ratelimited("failed to load transform for %s: %ld\n", 6072 sp->sctp_hmac_alg, PTR_ERR(tfm)); 6073 return -ENOSYS; 6074 } 6075 sctp_sk(sk)->hmac = tfm; 6076 } 6077 6078 /* 6079 * If a bind() or sctp_bindx() is not called prior to a listen() 6080 * call that allows new associations to be accepted, the system 6081 * picks an ephemeral port and will choose an address set equivalent 6082 * to binding with a wildcard address. 6083 * 6084 * This is not currently spelled out in the SCTP sockets 6085 * extensions draft, but follows the practice as seen in TCP 6086 * sockets. 6087 * 6088 */ 6089 sk->sk_state = SCTP_SS_LISTENING; 6090 if (!ep->base.bind_addr.port) { 6091 if (sctp_autobind(sk)) 6092 return -EAGAIN; 6093 } else { 6094 if (sctp_get_port(sk, inet_sk(sk)->inet_num)) { 6095 sk->sk_state = SCTP_SS_CLOSED; 6096 return -EADDRINUSE; 6097 } 6098 } 6099 6100 sk->sk_max_ack_backlog = backlog; 6101 sctp_hash_endpoint(ep); 6102 return 0; 6103 } 6104 6105 /* 6106 * 4.1.3 / 5.1.3 listen() 6107 * 6108 * By default, new associations are not accepted for UDP style sockets. 6109 * An application uses listen() to mark a socket as being able to 6110 * accept new associations. 6111 * 6112 * On TCP style sockets, applications use listen() to ready the SCTP 6113 * endpoint for accepting inbound associations. 6114 * 6115 * On both types of endpoints a backlog of '0' disables listening. 6116 * 6117 * Move a socket to LISTENING state. 6118 */ 6119 int sctp_inet_listen(struct socket *sock, int backlog) 6120 { 6121 struct sock *sk = sock->sk; 6122 struct sctp_endpoint *ep = sctp_sk(sk)->ep; 6123 int err = -EINVAL; 6124 6125 if (unlikely(backlog < 0)) 6126 return err; 6127 6128 sctp_lock_sock(sk); 6129 6130 /* Peeled-off sockets are not allowed to listen(). */ 6131 if (sctp_style(sk, UDP_HIGH_BANDWIDTH)) 6132 goto out; 6133 6134 if (sock->state != SS_UNCONNECTED) 6135 goto out; 6136 6137 /* If backlog is zero, disable listening. */ 6138 if (!backlog) { 6139 if (sctp_sstate(sk, CLOSED)) 6140 goto out; 6141 6142 err = 0; 6143 sctp_unhash_endpoint(ep); 6144 sk->sk_state = SCTP_SS_CLOSED; 6145 if (sk->sk_reuse) 6146 sctp_sk(sk)->bind_hash->fastreuse = 1; 6147 goto out; 6148 } 6149 6150 /* If we are already listening, just update the backlog */ 6151 if (sctp_sstate(sk, LISTENING)) 6152 sk->sk_max_ack_backlog = backlog; 6153 else { 6154 err = sctp_listen_start(sk, backlog); 6155 if (err) 6156 goto out; 6157 } 6158 6159 err = 0; 6160 out: 6161 sctp_release_sock(sk); 6162 return err; 6163 } 6164 6165 /* 6166 * This function is done by modeling the current datagram_poll() and the 6167 * tcp_poll(). Note that, based on these implementations, we don't 6168 * lock the socket in this function, even though it seems that, 6169 * ideally, locking or some other mechanisms can be used to ensure 6170 * the integrity of the counters (sndbuf and wmem_alloc) used 6171 * in this place. We assume that we don't need locks either until proven 6172 * otherwise. 6173 * 6174 * Another thing to note is that we include the Async I/O support 6175 * here, again, by modeling the current TCP/UDP code. We don't have 6176 * a good way to test with it yet. 6177 */ 6178 unsigned int sctp_poll(struct file *file, struct socket *sock, poll_table *wait) 6179 { 6180 struct sock *sk = sock->sk; 6181 struct sctp_sock *sp = sctp_sk(sk); 6182 unsigned int mask; 6183 6184 poll_wait(file, sk_sleep(sk), wait); 6185 6186 /* A TCP-style listening socket becomes readable when the accept queue 6187 * is not empty. 6188 */ 6189 if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)) 6190 return (!list_empty(&sp->ep->asocs)) ? 6191 (POLLIN | POLLRDNORM) : 0; 6192 6193 mask = 0; 6194 6195 /* Is there any exceptional events? */ 6196 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) 6197 mask |= POLLERR | 6198 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0); 6199 if (sk->sk_shutdown & RCV_SHUTDOWN) 6200 mask |= POLLRDHUP | POLLIN | POLLRDNORM; 6201 if (sk->sk_shutdown == SHUTDOWN_MASK) 6202 mask |= POLLHUP; 6203 6204 /* Is it readable? Reconsider this code with TCP-style support. */ 6205 if (!skb_queue_empty(&sk->sk_receive_queue)) 6206 mask |= POLLIN | POLLRDNORM; 6207 6208 /* The association is either gone or not ready. */ 6209 if (!sctp_style(sk, UDP) && sctp_sstate(sk, CLOSED)) 6210 return mask; 6211 6212 /* Is it writable? */ 6213 if (sctp_writeable(sk)) { 6214 mask |= POLLOUT | POLLWRNORM; 6215 } else { 6216 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 6217 /* 6218 * Since the socket is not locked, the buffer 6219 * might be made available after the writeable check and 6220 * before the bit is set. This could cause a lost I/O 6221 * signal. tcp_poll() has a race breaker for this race 6222 * condition. Based on their implementation, we put 6223 * in the following code to cover it as well. 6224 */ 6225 if (sctp_writeable(sk)) 6226 mask |= POLLOUT | POLLWRNORM; 6227 } 6228 return mask; 6229 } 6230 6231 /******************************************************************** 6232 * 2nd Level Abstractions 6233 ********************************************************************/ 6234 6235 static struct sctp_bind_bucket *sctp_bucket_create( 6236 struct sctp_bind_hashbucket *head, struct net *net, unsigned short snum) 6237 { 6238 struct sctp_bind_bucket *pp; 6239 6240 pp = kmem_cache_alloc(sctp_bucket_cachep, GFP_ATOMIC); 6241 if (pp) { 6242 SCTP_DBG_OBJCNT_INC(bind_bucket); 6243 pp->port = snum; 6244 pp->fastreuse = 0; 6245 INIT_HLIST_HEAD(&pp->owner); 6246 pp->net = net; 6247 hlist_add_head(&pp->node, &head->chain); 6248 } 6249 return pp; 6250 } 6251 6252 /* Caller must hold hashbucket lock for this tb with local BH disabled */ 6253 static void sctp_bucket_destroy(struct sctp_bind_bucket *pp) 6254 { 6255 if (pp && hlist_empty(&pp->owner)) { 6256 __hlist_del(&pp->node); 6257 kmem_cache_free(sctp_bucket_cachep, pp); 6258 SCTP_DBG_OBJCNT_DEC(bind_bucket); 6259 } 6260 } 6261 6262 /* Release this socket's reference to a local port. */ 6263 static inline void __sctp_put_port(struct sock *sk) 6264 { 6265 struct sctp_bind_hashbucket *head = 6266 &sctp_port_hashtable[sctp_phashfn(sock_net(sk), 6267 inet_sk(sk)->inet_num)]; 6268 struct sctp_bind_bucket *pp; 6269 6270 sctp_spin_lock(&head->lock); 6271 pp = sctp_sk(sk)->bind_hash; 6272 __sk_del_bind_node(sk); 6273 sctp_sk(sk)->bind_hash = NULL; 6274 inet_sk(sk)->inet_num = 0; 6275 sctp_bucket_destroy(pp); 6276 sctp_spin_unlock(&head->lock); 6277 } 6278 6279 void sctp_put_port(struct sock *sk) 6280 { 6281 sctp_local_bh_disable(); 6282 __sctp_put_port(sk); 6283 sctp_local_bh_enable(); 6284 } 6285 6286 /* 6287 * The system picks an ephemeral port and choose an address set equivalent 6288 * to binding with a wildcard address. 6289 * One of those addresses will be the primary address for the association. 6290 * This automatically enables the multihoming capability of SCTP. 6291 */ 6292 static int sctp_autobind(struct sock *sk) 6293 { 6294 union sctp_addr autoaddr; 6295 struct sctp_af *af; 6296 __be16 port; 6297 6298 /* Initialize a local sockaddr structure to INADDR_ANY. */ 6299 af = sctp_sk(sk)->pf->af; 6300 6301 port = htons(inet_sk(sk)->inet_num); 6302 af->inaddr_any(&autoaddr, port); 6303 6304 return sctp_do_bind(sk, &autoaddr, af->sockaddr_len); 6305 } 6306 6307 /* Parse out IPPROTO_SCTP CMSG headers. Perform only minimal validation. 6308 * 6309 * From RFC 2292 6310 * 4.2 The cmsghdr Structure * 6311 * 6312 * When ancillary data is sent or received, any number of ancillary data 6313 * objects can be specified by the msg_control and msg_controllen members of 6314 * the msghdr structure, because each object is preceded by 6315 * a cmsghdr structure defining the object's length (the cmsg_len member). 6316 * Historically Berkeley-derived implementations have passed only one object 6317 * at a time, but this API allows multiple objects to be 6318 * passed in a single call to sendmsg() or recvmsg(). The following example 6319 * shows two ancillary data objects in a control buffer. 6320 * 6321 * |<--------------------------- msg_controllen -------------------------->| 6322 * | | 6323 * 6324 * |<----- ancillary data object ----->|<----- ancillary data object ----->| 6325 * 6326 * |<---------- CMSG_SPACE() --------->|<---------- CMSG_SPACE() --------->| 6327 * | | | 6328 * 6329 * |<---------- cmsg_len ---------->| |<--------- cmsg_len ----------->| | 6330 * 6331 * |<--------- CMSG_LEN() --------->| |<-------- CMSG_LEN() ---------->| | 6332 * | | | | | 6333 * 6334 * +-----+-----+-----+--+-----------+--+-----+-----+-----+--+-----------+--+ 6335 * |cmsg_|cmsg_|cmsg_|XX| |XX|cmsg_|cmsg_|cmsg_|XX| |XX| 6336 * 6337 * |len |level|type |XX|cmsg_data[]|XX|len |level|type |XX|cmsg_data[]|XX| 6338 * 6339 * +-----+-----+-----+--+-----------+--+-----+-----+-----+--+-----------+--+ 6340 * ^ 6341 * | 6342 * 6343 * msg_control 6344 * points here 6345 */ 6346 static int sctp_msghdr_parse(const struct msghdr *msg, sctp_cmsgs_t *cmsgs) 6347 { 6348 struct cmsghdr *cmsg; 6349 struct msghdr *my_msg = (struct msghdr *)msg; 6350 6351 for (cmsg = CMSG_FIRSTHDR(msg); 6352 cmsg != NULL; 6353 cmsg = CMSG_NXTHDR(my_msg, cmsg)) { 6354 if (!CMSG_OK(my_msg, cmsg)) 6355 return -EINVAL; 6356 6357 /* Should we parse this header or ignore? */ 6358 if (cmsg->cmsg_level != IPPROTO_SCTP) 6359 continue; 6360 6361 /* Strictly check lengths following example in SCM code. */ 6362 switch (cmsg->cmsg_type) { 6363 case SCTP_INIT: 6364 /* SCTP Socket API Extension 6365 * 5.2.1 SCTP Initiation Structure (SCTP_INIT) 6366 * 6367 * This cmsghdr structure provides information for 6368 * initializing new SCTP associations with sendmsg(). 6369 * The SCTP_INITMSG socket option uses this same data 6370 * structure. This structure is not used for 6371 * recvmsg(). 6372 * 6373 * cmsg_level cmsg_type cmsg_data[] 6374 * ------------ ------------ ---------------------- 6375 * IPPROTO_SCTP SCTP_INIT struct sctp_initmsg 6376 */ 6377 if (cmsg->cmsg_len != 6378 CMSG_LEN(sizeof(struct sctp_initmsg))) 6379 return -EINVAL; 6380 cmsgs->init = (struct sctp_initmsg *)CMSG_DATA(cmsg); 6381 break; 6382 6383 case SCTP_SNDRCV: 6384 /* SCTP Socket API Extension 6385 * 5.2.2 SCTP Header Information Structure(SCTP_SNDRCV) 6386 * 6387 * This cmsghdr structure specifies SCTP options for 6388 * sendmsg() and describes SCTP header information 6389 * about a received message through recvmsg(). 6390 * 6391 * cmsg_level cmsg_type cmsg_data[] 6392 * ------------ ------------ ---------------------- 6393 * IPPROTO_SCTP SCTP_SNDRCV struct sctp_sndrcvinfo 6394 */ 6395 if (cmsg->cmsg_len != 6396 CMSG_LEN(sizeof(struct sctp_sndrcvinfo))) 6397 return -EINVAL; 6398 6399 cmsgs->info = 6400 (struct sctp_sndrcvinfo *)CMSG_DATA(cmsg); 6401 6402 /* Minimally, validate the sinfo_flags. */ 6403 if (cmsgs->info->sinfo_flags & 6404 ~(SCTP_UNORDERED | SCTP_ADDR_OVER | 6405 SCTP_ABORT | SCTP_EOF)) 6406 return -EINVAL; 6407 break; 6408 6409 default: 6410 return -EINVAL; 6411 } 6412 } 6413 return 0; 6414 } 6415 6416 /* 6417 * Wait for a packet.. 6418 * Note: This function is the same function as in core/datagram.c 6419 * with a few modifications to make lksctp work. 6420 */ 6421 static int sctp_wait_for_packet(struct sock * sk, int *err, long *timeo_p) 6422 { 6423 int error; 6424 DEFINE_WAIT(wait); 6425 6426 prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 6427 6428 /* Socket errors? */ 6429 error = sock_error(sk); 6430 if (error) 6431 goto out; 6432 6433 if (!skb_queue_empty(&sk->sk_receive_queue)) 6434 goto ready; 6435 6436 /* Socket shut down? */ 6437 if (sk->sk_shutdown & RCV_SHUTDOWN) 6438 goto out; 6439 6440 /* Sequenced packets can come disconnected. If so we report the 6441 * problem. 6442 */ 6443 error = -ENOTCONN; 6444 6445 /* Is there a good reason to think that we may receive some data? */ 6446 if (list_empty(&sctp_sk(sk)->ep->asocs) && !sctp_sstate(sk, LISTENING)) 6447 goto out; 6448 6449 /* Handle signals. */ 6450 if (signal_pending(current)) 6451 goto interrupted; 6452 6453 /* Let another process have a go. Since we are going to sleep 6454 * anyway. Note: This may cause odd behaviors if the message 6455 * does not fit in the user's buffer, but this seems to be the 6456 * only way to honor MSG_DONTWAIT realistically. 6457 */ 6458 sctp_release_sock(sk); 6459 *timeo_p = schedule_timeout(*timeo_p); 6460 sctp_lock_sock(sk); 6461 6462 ready: 6463 finish_wait(sk_sleep(sk), &wait); 6464 return 0; 6465 6466 interrupted: 6467 error = sock_intr_errno(*timeo_p); 6468 6469 out: 6470 finish_wait(sk_sleep(sk), &wait); 6471 *err = error; 6472 return error; 6473 } 6474 6475 /* Receive a datagram. 6476 * Note: This is pretty much the same routine as in core/datagram.c 6477 * with a few changes to make lksctp work. 6478 */ 6479 static struct sk_buff *sctp_skb_recv_datagram(struct sock *sk, int flags, 6480 int noblock, int *err) 6481 { 6482 int error; 6483 struct sk_buff *skb; 6484 long timeo; 6485 6486 timeo = sock_rcvtimeo(sk, noblock); 6487 6488 pr_debug("%s: timeo:%ld, max:%ld\n", __func__, timeo, 6489 MAX_SCHEDULE_TIMEOUT); 6490 6491 do { 6492 /* Again only user level code calls this function, 6493 * so nothing interrupt level 6494 * will suddenly eat the receive_queue. 6495 * 6496 * Look at current nfs client by the way... 6497 * However, this function was correct in any case. 8) 6498 */ 6499 if (flags & MSG_PEEK) { 6500 spin_lock_bh(&sk->sk_receive_queue.lock); 6501 skb = skb_peek(&sk->sk_receive_queue); 6502 if (skb) 6503 atomic_inc(&skb->users); 6504 spin_unlock_bh(&sk->sk_receive_queue.lock); 6505 } else { 6506 skb = skb_dequeue(&sk->sk_receive_queue); 6507 } 6508 6509 if (skb) 6510 return skb; 6511 6512 /* Caller is allowed not to check sk->sk_err before calling. */ 6513 error = sock_error(sk); 6514 if (error) 6515 goto no_packet; 6516 6517 if (sk->sk_shutdown & RCV_SHUTDOWN) 6518 break; 6519 6520 /* User doesn't want to wait. */ 6521 error = -EAGAIN; 6522 if (!timeo) 6523 goto no_packet; 6524 } while (sctp_wait_for_packet(sk, err, &timeo) == 0); 6525 6526 return NULL; 6527 6528 no_packet: 6529 *err = error; 6530 return NULL; 6531 } 6532 6533 /* If sndbuf has changed, wake up per association sndbuf waiters. */ 6534 static void __sctp_write_space(struct sctp_association *asoc) 6535 { 6536 struct sock *sk = asoc->base.sk; 6537 struct socket *sock = sk->sk_socket; 6538 6539 if ((sctp_wspace(asoc) > 0) && sock) { 6540 if (waitqueue_active(&asoc->wait)) 6541 wake_up_interruptible(&asoc->wait); 6542 6543 if (sctp_writeable(sk)) { 6544 wait_queue_head_t *wq = sk_sleep(sk); 6545 6546 if (wq && waitqueue_active(wq)) 6547 wake_up_interruptible(wq); 6548 6549 /* Note that we try to include the Async I/O support 6550 * here by modeling from the current TCP/UDP code. 6551 * We have not tested with it yet. 6552 */ 6553 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) 6554 sock_wake_async(sock, 6555 SOCK_WAKE_SPACE, POLL_OUT); 6556 } 6557 } 6558 } 6559 6560 /* Do accounting for the sndbuf space. 6561 * Decrement the used sndbuf space of the corresponding association by the 6562 * data size which was just transmitted(freed). 6563 */ 6564 static void sctp_wfree(struct sk_buff *skb) 6565 { 6566 struct sctp_association *asoc; 6567 struct sctp_chunk *chunk; 6568 struct sock *sk; 6569 6570 /* Get the saved chunk pointer. */ 6571 chunk = *((struct sctp_chunk **)(skb->cb)); 6572 asoc = chunk->asoc; 6573 sk = asoc->base.sk; 6574 asoc->sndbuf_used -= SCTP_DATA_SNDSIZE(chunk) + 6575 sizeof(struct sk_buff) + 6576 sizeof(struct sctp_chunk); 6577 6578 atomic_sub(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc); 6579 6580 /* 6581 * This undoes what is done via sctp_set_owner_w and sk_mem_charge 6582 */ 6583 sk->sk_wmem_queued -= skb->truesize; 6584 sk_mem_uncharge(sk, skb->truesize); 6585 6586 sock_wfree(skb); 6587 __sctp_write_space(asoc); 6588 6589 sctp_association_put(asoc); 6590 } 6591 6592 /* Do accounting for the receive space on the socket. 6593 * Accounting for the association is done in ulpevent.c 6594 * We set this as a destructor for the cloned data skbs so that 6595 * accounting is done at the correct time. 6596 */ 6597 void sctp_sock_rfree(struct sk_buff *skb) 6598 { 6599 struct sock *sk = skb->sk; 6600 struct sctp_ulpevent *event = sctp_skb2event(skb); 6601 6602 atomic_sub(event->rmem_len, &sk->sk_rmem_alloc); 6603 6604 /* 6605 * Mimic the behavior of sock_rfree 6606 */ 6607 sk_mem_uncharge(sk, event->rmem_len); 6608 } 6609 6610 6611 /* Helper function to wait for space in the sndbuf. */ 6612 static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p, 6613 size_t msg_len) 6614 { 6615 struct sock *sk = asoc->base.sk; 6616 int err = 0; 6617 long current_timeo = *timeo_p; 6618 DEFINE_WAIT(wait); 6619 6620 pr_debug("%s: asoc:%p, timeo:%ld, msg_len:%zu\n", __func__, asoc, 6621 *timeo_p, msg_len); 6622 6623 /* Increment the association's refcnt. */ 6624 sctp_association_hold(asoc); 6625 6626 /* Wait on the association specific sndbuf space. */ 6627 for (;;) { 6628 prepare_to_wait_exclusive(&asoc->wait, &wait, 6629 TASK_INTERRUPTIBLE); 6630 if (!*timeo_p) 6631 goto do_nonblock; 6632 if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING || 6633 asoc->base.dead) 6634 goto do_error; 6635 if (signal_pending(current)) 6636 goto do_interrupted; 6637 if (msg_len <= sctp_wspace(asoc)) 6638 break; 6639 6640 /* Let another process have a go. Since we are going 6641 * to sleep anyway. 6642 */ 6643 sctp_release_sock(sk); 6644 current_timeo = schedule_timeout(current_timeo); 6645 BUG_ON(sk != asoc->base.sk); 6646 sctp_lock_sock(sk); 6647 6648 *timeo_p = current_timeo; 6649 } 6650 6651 out: 6652 finish_wait(&asoc->wait, &wait); 6653 6654 /* Release the association's refcnt. */ 6655 sctp_association_put(asoc); 6656 6657 return err; 6658 6659 do_error: 6660 err = -EPIPE; 6661 goto out; 6662 6663 do_interrupted: 6664 err = sock_intr_errno(*timeo_p); 6665 goto out; 6666 6667 do_nonblock: 6668 err = -EAGAIN; 6669 goto out; 6670 } 6671 6672 void sctp_data_ready(struct sock *sk, int len) 6673 { 6674 struct socket_wq *wq; 6675 6676 rcu_read_lock(); 6677 wq = rcu_dereference(sk->sk_wq); 6678 if (wq_has_sleeper(wq)) 6679 wake_up_interruptible_sync_poll(&wq->wait, POLLIN | 6680 POLLRDNORM | POLLRDBAND); 6681 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); 6682 rcu_read_unlock(); 6683 } 6684 6685 /* If socket sndbuf has changed, wake up all per association waiters. */ 6686 void sctp_write_space(struct sock *sk) 6687 { 6688 struct sctp_association *asoc; 6689 6690 /* Wake up the tasks in each wait queue. */ 6691 list_for_each_entry(asoc, &((sctp_sk(sk))->ep->asocs), asocs) { 6692 __sctp_write_space(asoc); 6693 } 6694 } 6695 6696 /* Is there any sndbuf space available on the socket? 6697 * 6698 * Note that sk_wmem_alloc is the sum of the send buffers on all of the 6699 * associations on the same socket. For a UDP-style socket with 6700 * multiple associations, it is possible for it to be "unwriteable" 6701 * prematurely. I assume that this is acceptable because 6702 * a premature "unwriteable" is better than an accidental "writeable" which 6703 * would cause an unwanted block under certain circumstances. For the 1-1 6704 * UDP-style sockets or TCP-style sockets, this code should work. 6705 * - Daisy 6706 */ 6707 static int sctp_writeable(struct sock *sk) 6708 { 6709 int amt = 0; 6710 6711 amt = sk->sk_sndbuf - sk_wmem_alloc_get(sk); 6712 if (amt < 0) 6713 amt = 0; 6714 return amt; 6715 } 6716 6717 /* Wait for an association to go into ESTABLISHED state. If timeout is 0, 6718 * returns immediately with EINPROGRESS. 6719 */ 6720 static int sctp_wait_for_connect(struct sctp_association *asoc, long *timeo_p) 6721 { 6722 struct sock *sk = asoc->base.sk; 6723 int err = 0; 6724 long current_timeo = *timeo_p; 6725 DEFINE_WAIT(wait); 6726 6727 pr_debug("%s: asoc:%p, timeo:%ld\n", __func__, asoc, *timeo_p); 6728 6729 /* Increment the association's refcnt. */ 6730 sctp_association_hold(asoc); 6731 6732 for (;;) { 6733 prepare_to_wait_exclusive(&asoc->wait, &wait, 6734 TASK_INTERRUPTIBLE); 6735 if (!*timeo_p) 6736 goto do_nonblock; 6737 if (sk->sk_shutdown & RCV_SHUTDOWN) 6738 break; 6739 if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING || 6740 asoc->base.dead) 6741 goto do_error; 6742 if (signal_pending(current)) 6743 goto do_interrupted; 6744 6745 if (sctp_state(asoc, ESTABLISHED)) 6746 break; 6747 6748 /* Let another process have a go. Since we are going 6749 * to sleep anyway. 6750 */ 6751 sctp_release_sock(sk); 6752 current_timeo = schedule_timeout(current_timeo); 6753 sctp_lock_sock(sk); 6754 6755 *timeo_p = current_timeo; 6756 } 6757 6758 out: 6759 finish_wait(&asoc->wait, &wait); 6760 6761 /* Release the association's refcnt. */ 6762 sctp_association_put(asoc); 6763 6764 return err; 6765 6766 do_error: 6767 if (asoc->init_err_counter + 1 > asoc->max_init_attempts) 6768 err = -ETIMEDOUT; 6769 else 6770 err = -ECONNREFUSED; 6771 goto out; 6772 6773 do_interrupted: 6774 err = sock_intr_errno(*timeo_p); 6775 goto out; 6776 6777 do_nonblock: 6778 err = -EINPROGRESS; 6779 goto out; 6780 } 6781 6782 static int sctp_wait_for_accept(struct sock *sk, long timeo) 6783 { 6784 struct sctp_endpoint *ep; 6785 int err = 0; 6786 DEFINE_WAIT(wait); 6787 6788 ep = sctp_sk(sk)->ep; 6789 6790 6791 for (;;) { 6792 prepare_to_wait_exclusive(sk_sleep(sk), &wait, 6793 TASK_INTERRUPTIBLE); 6794 6795 if (list_empty(&ep->asocs)) { 6796 sctp_release_sock(sk); 6797 timeo = schedule_timeout(timeo); 6798 sctp_lock_sock(sk); 6799 } 6800 6801 err = -EINVAL; 6802 if (!sctp_sstate(sk, LISTENING)) 6803 break; 6804 6805 err = 0; 6806 if (!list_empty(&ep->asocs)) 6807 break; 6808 6809 err = sock_intr_errno(timeo); 6810 if (signal_pending(current)) 6811 break; 6812 6813 err = -EAGAIN; 6814 if (!timeo) 6815 break; 6816 } 6817 6818 finish_wait(sk_sleep(sk), &wait); 6819 6820 return err; 6821 } 6822 6823 static void sctp_wait_for_close(struct sock *sk, long timeout) 6824 { 6825 DEFINE_WAIT(wait); 6826 6827 do { 6828 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 6829 if (list_empty(&sctp_sk(sk)->ep->asocs)) 6830 break; 6831 sctp_release_sock(sk); 6832 timeout = schedule_timeout(timeout); 6833 sctp_lock_sock(sk); 6834 } while (!signal_pending(current) && timeout); 6835 6836 finish_wait(sk_sleep(sk), &wait); 6837 } 6838 6839 static void sctp_skb_set_owner_r_frag(struct sk_buff *skb, struct sock *sk) 6840 { 6841 struct sk_buff *frag; 6842 6843 if (!skb->data_len) 6844 goto done; 6845 6846 /* Don't forget the fragments. */ 6847 skb_walk_frags(skb, frag) 6848 sctp_skb_set_owner_r_frag(frag, sk); 6849 6850 done: 6851 sctp_skb_set_owner_r(skb, sk); 6852 } 6853 6854 void sctp_copy_sock(struct sock *newsk, struct sock *sk, 6855 struct sctp_association *asoc) 6856 { 6857 struct inet_sock *inet = inet_sk(sk); 6858 struct inet_sock *newinet; 6859 6860 newsk->sk_type = sk->sk_type; 6861 newsk->sk_bound_dev_if = sk->sk_bound_dev_if; 6862 newsk->sk_flags = sk->sk_flags; 6863 newsk->sk_no_check = sk->sk_no_check; 6864 newsk->sk_reuse = sk->sk_reuse; 6865 6866 newsk->sk_shutdown = sk->sk_shutdown; 6867 newsk->sk_destruct = sctp_destruct_sock; 6868 newsk->sk_family = sk->sk_family; 6869 newsk->sk_protocol = IPPROTO_SCTP; 6870 newsk->sk_backlog_rcv = sk->sk_prot->backlog_rcv; 6871 newsk->sk_sndbuf = sk->sk_sndbuf; 6872 newsk->sk_rcvbuf = sk->sk_rcvbuf; 6873 newsk->sk_lingertime = sk->sk_lingertime; 6874 newsk->sk_rcvtimeo = sk->sk_rcvtimeo; 6875 newsk->sk_sndtimeo = sk->sk_sndtimeo; 6876 6877 newinet = inet_sk(newsk); 6878 6879 /* Initialize sk's sport, dport, rcv_saddr and daddr for 6880 * getsockname() and getpeername() 6881 */ 6882 newinet->inet_sport = inet->inet_sport; 6883 newinet->inet_saddr = inet->inet_saddr; 6884 newinet->inet_rcv_saddr = inet->inet_rcv_saddr; 6885 newinet->inet_dport = htons(asoc->peer.port); 6886 newinet->pmtudisc = inet->pmtudisc; 6887 newinet->inet_id = asoc->next_tsn ^ jiffies; 6888 6889 newinet->uc_ttl = inet->uc_ttl; 6890 newinet->mc_loop = 1; 6891 newinet->mc_ttl = 1; 6892 newinet->mc_index = 0; 6893 newinet->mc_list = NULL; 6894 } 6895 6896 /* Populate the fields of the newsk from the oldsk and migrate the assoc 6897 * and its messages to the newsk. 6898 */ 6899 static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk, 6900 struct sctp_association *assoc, 6901 sctp_socket_type_t type) 6902 { 6903 struct sctp_sock *oldsp = sctp_sk(oldsk); 6904 struct sctp_sock *newsp = sctp_sk(newsk); 6905 struct sctp_bind_bucket *pp; /* hash list port iterator */ 6906 struct sctp_endpoint *newep = newsp->ep; 6907 struct sk_buff *skb, *tmp; 6908 struct sctp_ulpevent *event; 6909 struct sctp_bind_hashbucket *head; 6910 struct list_head tmplist; 6911 6912 /* Migrate socket buffer sizes and all the socket level options to the 6913 * new socket. 6914 */ 6915 newsk->sk_sndbuf = oldsk->sk_sndbuf; 6916 newsk->sk_rcvbuf = oldsk->sk_rcvbuf; 6917 /* Brute force copy old sctp opt. */ 6918 if (oldsp->do_auto_asconf) { 6919 memcpy(&tmplist, &newsp->auto_asconf_list, sizeof(tmplist)); 6920 inet_sk_copy_descendant(newsk, oldsk); 6921 memcpy(&newsp->auto_asconf_list, &tmplist, sizeof(tmplist)); 6922 } else 6923 inet_sk_copy_descendant(newsk, oldsk); 6924 6925 /* Restore the ep value that was overwritten with the above structure 6926 * copy. 6927 */ 6928 newsp->ep = newep; 6929 newsp->hmac = NULL; 6930 6931 /* Hook this new socket in to the bind_hash list. */ 6932 head = &sctp_port_hashtable[sctp_phashfn(sock_net(oldsk), 6933 inet_sk(oldsk)->inet_num)]; 6934 sctp_local_bh_disable(); 6935 sctp_spin_lock(&head->lock); 6936 pp = sctp_sk(oldsk)->bind_hash; 6937 sk_add_bind_node(newsk, &pp->owner); 6938 sctp_sk(newsk)->bind_hash = pp; 6939 inet_sk(newsk)->inet_num = inet_sk(oldsk)->inet_num; 6940 sctp_spin_unlock(&head->lock); 6941 sctp_local_bh_enable(); 6942 6943 /* Copy the bind_addr list from the original endpoint to the new 6944 * endpoint so that we can handle restarts properly 6945 */ 6946 sctp_bind_addr_dup(&newsp->ep->base.bind_addr, 6947 &oldsp->ep->base.bind_addr, GFP_KERNEL); 6948 6949 /* Move any messages in the old socket's receive queue that are for the 6950 * peeled off association to the new socket's receive queue. 6951 */ 6952 sctp_skb_for_each(skb, &oldsk->sk_receive_queue, tmp) { 6953 event = sctp_skb2event(skb); 6954 if (event->asoc == assoc) { 6955 __skb_unlink(skb, &oldsk->sk_receive_queue); 6956 __skb_queue_tail(&newsk->sk_receive_queue, skb); 6957 sctp_skb_set_owner_r_frag(skb, newsk); 6958 } 6959 } 6960 6961 /* Clean up any messages pending delivery due to partial 6962 * delivery. Three cases: 6963 * 1) No partial deliver; no work. 6964 * 2) Peeling off partial delivery; keep pd_lobby in new pd_lobby. 6965 * 3) Peeling off non-partial delivery; move pd_lobby to receive_queue. 6966 */ 6967 skb_queue_head_init(&newsp->pd_lobby); 6968 atomic_set(&sctp_sk(newsk)->pd_mode, assoc->ulpq.pd_mode); 6969 6970 if (atomic_read(&sctp_sk(oldsk)->pd_mode)) { 6971 struct sk_buff_head *queue; 6972 6973 /* Decide which queue to move pd_lobby skbs to. */ 6974 if (assoc->ulpq.pd_mode) { 6975 queue = &newsp->pd_lobby; 6976 } else 6977 queue = &newsk->sk_receive_queue; 6978 6979 /* Walk through the pd_lobby, looking for skbs that 6980 * need moved to the new socket. 6981 */ 6982 sctp_skb_for_each(skb, &oldsp->pd_lobby, tmp) { 6983 event = sctp_skb2event(skb); 6984 if (event->asoc == assoc) { 6985 __skb_unlink(skb, &oldsp->pd_lobby); 6986 __skb_queue_tail(queue, skb); 6987 sctp_skb_set_owner_r_frag(skb, newsk); 6988 } 6989 } 6990 6991 /* Clear up any skbs waiting for the partial 6992 * delivery to finish. 6993 */ 6994 if (assoc->ulpq.pd_mode) 6995 sctp_clear_pd(oldsk, NULL); 6996 6997 } 6998 6999 sctp_skb_for_each(skb, &assoc->ulpq.reasm, tmp) 7000 sctp_skb_set_owner_r_frag(skb, newsk); 7001 7002 sctp_skb_for_each(skb, &assoc->ulpq.lobby, tmp) 7003 sctp_skb_set_owner_r_frag(skb, newsk); 7004 7005 /* Set the type of socket to indicate that it is peeled off from the 7006 * original UDP-style socket or created with the accept() call on a 7007 * TCP-style socket.. 7008 */ 7009 newsp->type = type; 7010 7011 /* Mark the new socket "in-use" by the user so that any packets 7012 * that may arrive on the association after we've moved it are 7013 * queued to the backlog. This prevents a potential race between 7014 * backlog processing on the old socket and new-packet processing 7015 * on the new socket. 7016 * 7017 * The caller has just allocated newsk so we can guarantee that other 7018 * paths won't try to lock it and then oldsk. 7019 */ 7020 lock_sock_nested(newsk, SINGLE_DEPTH_NESTING); 7021 sctp_assoc_migrate(assoc, newsk); 7022 7023 /* If the association on the newsk is already closed before accept() 7024 * is called, set RCV_SHUTDOWN flag. 7025 */ 7026 if (sctp_state(assoc, CLOSED) && sctp_style(newsk, TCP)) 7027 newsk->sk_shutdown |= RCV_SHUTDOWN; 7028 7029 newsk->sk_state = SCTP_SS_ESTABLISHED; 7030 sctp_release_sock(newsk); 7031 } 7032 7033 7034 /* This proto struct describes the ULP interface for SCTP. */ 7035 struct proto sctp_prot = { 7036 .name = "SCTP", 7037 .owner = THIS_MODULE, 7038 .close = sctp_close, 7039 .connect = sctp_connect, 7040 .disconnect = sctp_disconnect, 7041 .accept = sctp_accept, 7042 .ioctl = sctp_ioctl, 7043 .init = sctp_init_sock, 7044 .destroy = sctp_destroy_sock, 7045 .shutdown = sctp_shutdown, 7046 .setsockopt = sctp_setsockopt, 7047 .getsockopt = sctp_getsockopt, 7048 .sendmsg = sctp_sendmsg, 7049 .recvmsg = sctp_recvmsg, 7050 .bind = sctp_bind, 7051 .backlog_rcv = sctp_backlog_rcv, 7052 .hash = sctp_hash, 7053 .unhash = sctp_unhash, 7054 .get_port = sctp_get_port, 7055 .obj_size = sizeof(struct sctp_sock), 7056 .sysctl_mem = sysctl_sctp_mem, 7057 .sysctl_rmem = sysctl_sctp_rmem, 7058 .sysctl_wmem = sysctl_sctp_wmem, 7059 .memory_pressure = &sctp_memory_pressure, 7060 .enter_memory_pressure = sctp_enter_memory_pressure, 7061 .memory_allocated = &sctp_memory_allocated, 7062 .sockets_allocated = &sctp_sockets_allocated, 7063 }; 7064 7065 #if IS_ENABLED(CONFIG_IPV6) 7066 7067 struct proto sctpv6_prot = { 7068 .name = "SCTPv6", 7069 .owner = THIS_MODULE, 7070 .close = sctp_close, 7071 .connect = sctp_connect, 7072 .disconnect = sctp_disconnect, 7073 .accept = sctp_accept, 7074 .ioctl = sctp_ioctl, 7075 .init = sctp_init_sock, 7076 .destroy = sctp_destroy_sock, 7077 .shutdown = sctp_shutdown, 7078 .setsockopt = sctp_setsockopt, 7079 .getsockopt = sctp_getsockopt, 7080 .sendmsg = sctp_sendmsg, 7081 .recvmsg = sctp_recvmsg, 7082 .bind = sctp_bind, 7083 .backlog_rcv = sctp_backlog_rcv, 7084 .hash = sctp_hash, 7085 .unhash = sctp_unhash, 7086 .get_port = sctp_get_port, 7087 .obj_size = sizeof(struct sctp6_sock), 7088 .sysctl_mem = sysctl_sctp_mem, 7089 .sysctl_rmem = sysctl_sctp_rmem, 7090 .sysctl_wmem = sysctl_sctp_wmem, 7091 .memory_pressure = &sctp_memory_pressure, 7092 .enter_memory_pressure = sctp_enter_memory_pressure, 7093 .memory_allocated = &sctp_memory_allocated, 7094 .sockets_allocated = &sctp_sockets_allocated, 7095 }; 7096 #endif /* IS_ENABLED(CONFIG_IPV6) */ 7097