1 /* 2 * NET An implementation of the SOCKET network access protocol. 3 * 4 * Version: @(#)socket.c 1.1.93 18/02/95 5 * 6 * Authors: Orest Zborowski, <obz@Kodak.COM> 7 * Ross Biro 8 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 9 * 10 * Fixes: 11 * Anonymous : NOTSOCK/BADF cleanup. Error fix in 12 * shutdown() 13 * Alan Cox : verify_area() fixes 14 * Alan Cox : Removed DDI 15 * Jonathan Kamens : SOCK_DGRAM reconnect bug 16 * Alan Cox : Moved a load of checks to the very 17 * top level. 18 * Alan Cox : Move address structures to/from user 19 * mode above the protocol layers. 20 * Rob Janssen : Allow 0 length sends. 21 * Alan Cox : Asynchronous I/O support (cribbed from the 22 * tty drivers). 23 * Niibe Yutaka : Asynchronous I/O for writes (4.4BSD style) 24 * Jeff Uphoff : Made max number of sockets command-line 25 * configurable. 26 * Matti Aarnio : Made the number of sockets dynamic, 27 * to be allocated when needed, and mr. 28 * Uphoff's max is used as max to be 29 * allowed to allocate. 30 * Linus : Argh. removed all the socket allocation 31 * altogether: it's in the inode now. 32 * Alan Cox : Made sock_alloc()/sock_release() public 33 * for NetROM and future kernel nfsd type 34 * stuff. 35 * Alan Cox : sendmsg/recvmsg basics. 36 * Tom Dyas : Export net symbols. 37 * Marcin Dalecki : Fixed problems with CONFIG_NET="n". 38 * Alan Cox : Added thread locking to sys_* calls 39 * for sockets. May have errors at the 40 * moment. 41 * Kevin Buhr : Fixed the dumb errors in the above. 42 * Andi Kleen : Some small cleanups, optimizations, 43 * and fixed a copy_from_user() bug. 44 * Tigran Aivazian : sys_send(args) calls sys_sendto(args, NULL, 0) 45 * Tigran Aivazian : Made listen(2) backlog sanity checks 46 * protocol-independent 47 * 48 * 49 * This program is free software; you can redistribute it and/or 50 * modify it under the terms of the GNU General Public License 51 * as published by the Free Software Foundation; either version 52 * 2 of the License, or (at your option) any later version. 53 * 54 * 55 * This module is effectively the top level interface to the BSD socket 56 * paradigm. 57 * 58 * Based upon Swansea University Computer Society NET3.039 59 */ 60 61 #include <linux/mm.h> 62 #include <linux/socket.h> 63 #include <linux/file.h> 64 #include <linux/net.h> 65 #include <linux/interrupt.h> 66 #include <linux/thread_info.h> 67 #include <linux/rcupdate.h> 68 #include <linux/netdevice.h> 69 #include <linux/proc_fs.h> 70 #include <linux/seq_file.h> 71 #include <linux/mutex.h> 72 #include <linux/if_bridge.h> 73 #include <linux/if_frad.h> 74 #include <linux/if_vlan.h> 75 #include <linux/ptp_classify.h> 76 #include <linux/init.h> 77 #include <linux/poll.h> 78 #include <linux/cache.h> 79 #include <linux/module.h> 80 #include <linux/highmem.h> 81 #include <linux/mount.h> 82 #include <linux/security.h> 83 #include <linux/syscalls.h> 84 #include <linux/compat.h> 85 #include <linux/kmod.h> 86 #include <linux/audit.h> 87 #include <linux/wireless.h> 88 #include <linux/nsproxy.h> 89 #include <linux/magic.h> 90 #include <linux/slab.h> 91 #include <linux/xattr.h> 92 93 #include <asm/uaccess.h> 94 #include <asm/unistd.h> 95 96 #include <net/compat.h> 97 #include <net/wext.h> 98 #include <net/cls_cgroup.h> 99 100 #include <net/sock.h> 101 #include <linux/netfilter.h> 102 103 #include <linux/if_tun.h> 104 #include <linux/ipv6_route.h> 105 #include <linux/route.h> 106 #include <linux/sockios.h> 107 #include <linux/atalk.h> 108 #include <net/busy_poll.h> 109 #include <linux/errqueue.h> 110 111 #ifdef CONFIG_NET_RX_BUSY_POLL 112 unsigned int sysctl_net_busy_read __read_mostly; 113 unsigned int sysctl_net_busy_poll __read_mostly; 114 #endif 115 116 static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov, 117 unsigned long nr_segs, loff_t pos); 118 static ssize_t sock_aio_write(struct kiocb *iocb, const struct iovec *iov, 119 unsigned long nr_segs, loff_t pos); 120 static int sock_mmap(struct file *file, struct vm_area_struct *vma); 121 122 static int sock_close(struct inode *inode, struct file *file); 123 static unsigned int sock_poll(struct file *file, 124 struct poll_table_struct *wait); 125 static long sock_ioctl(struct file *file, unsigned int cmd, unsigned long arg); 126 #ifdef CONFIG_COMPAT 127 static long compat_sock_ioctl(struct file *file, 128 unsigned int cmd, unsigned long arg); 129 #endif 130 static int sock_fasync(int fd, struct file *filp, int on); 131 static ssize_t sock_sendpage(struct file *file, struct page *page, 132 int offset, size_t size, loff_t *ppos, int more); 133 static ssize_t sock_splice_read(struct file *file, loff_t *ppos, 134 struct pipe_inode_info *pipe, size_t len, 135 unsigned int flags); 136 137 /* 138 * Socket files have a set of 'special' operations as well as the generic file ones. These don't appear 139 * in the operation structures but are done directly via the socketcall() multiplexor. 140 */ 141 142 static const struct file_operations socket_file_ops = { 143 .owner = THIS_MODULE, 144 .llseek = no_llseek, 145 .aio_read = sock_aio_read, 146 .aio_write = sock_aio_write, 147 .poll = sock_poll, 148 .unlocked_ioctl = sock_ioctl, 149 #ifdef CONFIG_COMPAT 150 .compat_ioctl = compat_sock_ioctl, 151 #endif 152 .mmap = sock_mmap, 153 .release = sock_close, 154 .fasync = sock_fasync, 155 .sendpage = sock_sendpage, 156 .splice_write = generic_splice_sendpage, 157 .splice_read = sock_splice_read, 158 }; 159 160 /* 161 * The protocol list. Each protocol is registered in here. 162 */ 163 164 static DEFINE_SPINLOCK(net_family_lock); 165 static const struct net_proto_family __rcu *net_families[NPROTO] __read_mostly; 166 167 /* 168 * Statistics counters of the socket lists 169 */ 170 171 static DEFINE_PER_CPU(int, sockets_in_use); 172 173 /* 174 * Support routines. 175 * Move socket addresses back and forth across the kernel/user 176 * divide and look after the messy bits. 177 */ 178 179 /** 180 * move_addr_to_kernel - copy a socket address into kernel space 181 * @uaddr: Address in user space 182 * @kaddr: Address in kernel space 183 * @ulen: Length in user space 184 * 185 * The address is copied into kernel space. If the provided address is 186 * too long an error code of -EINVAL is returned. If the copy gives 187 * invalid addresses -EFAULT is returned. On a success 0 is returned. 188 */ 189 190 int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr_storage *kaddr) 191 { 192 if (ulen < 0 || ulen > sizeof(struct sockaddr_storage)) 193 return -EINVAL; 194 if (ulen == 0) 195 return 0; 196 if (copy_from_user(kaddr, uaddr, ulen)) 197 return -EFAULT; 198 return audit_sockaddr(ulen, kaddr); 199 } 200 201 /** 202 * move_addr_to_user - copy an address to user space 203 * @kaddr: kernel space address 204 * @klen: length of address in kernel 205 * @uaddr: user space address 206 * @ulen: pointer to user length field 207 * 208 * The value pointed to by ulen on entry is the buffer length available. 209 * This is overwritten with the buffer space used. -EINVAL is returned 210 * if an overlong buffer is specified or a negative buffer size. -EFAULT 211 * is returned if either the buffer or the length field are not 212 * accessible. 213 * After copying the data up to the limit the user specifies, the true 214 * length of the data is written over the length limit the user 215 * specified. Zero is returned for a success. 216 */ 217 218 static int move_addr_to_user(struct sockaddr_storage *kaddr, int klen, 219 void __user *uaddr, int __user *ulen) 220 { 221 int err; 222 int len; 223 224 BUG_ON(klen > sizeof(struct sockaddr_storage)); 225 err = get_user(len, ulen); 226 if (err) 227 return err; 228 if (len > klen) 229 len = klen; 230 if (len < 0) 231 return -EINVAL; 232 if (len) { 233 if (audit_sockaddr(klen, kaddr)) 234 return -ENOMEM; 235 if (copy_to_user(uaddr, kaddr, len)) 236 return -EFAULT; 237 } 238 /* 239 * "fromlen shall refer to the value before truncation.." 240 * 1003.1g 241 */ 242 return __put_user(klen, ulen); 243 } 244 245 static struct kmem_cache *sock_inode_cachep __read_mostly; 246 247 static struct inode *sock_alloc_inode(struct super_block *sb) 248 { 249 struct socket_alloc *ei; 250 struct socket_wq *wq; 251 252 ei = kmem_cache_alloc(sock_inode_cachep, GFP_KERNEL); 253 if (!ei) 254 return NULL; 255 wq = kmalloc(sizeof(*wq), GFP_KERNEL); 256 if (!wq) { 257 kmem_cache_free(sock_inode_cachep, ei); 258 return NULL; 259 } 260 init_waitqueue_head(&wq->wait); 261 wq->fasync_list = NULL; 262 RCU_INIT_POINTER(ei->socket.wq, wq); 263 264 ei->socket.state = SS_UNCONNECTED; 265 ei->socket.flags = 0; 266 ei->socket.ops = NULL; 267 ei->socket.sk = NULL; 268 ei->socket.file = NULL; 269 270 return &ei->vfs_inode; 271 } 272 273 static void sock_destroy_inode(struct inode *inode) 274 { 275 struct socket_alloc *ei; 276 struct socket_wq *wq; 277 278 ei = container_of(inode, struct socket_alloc, vfs_inode); 279 wq = rcu_dereference_protected(ei->socket.wq, 1); 280 kfree_rcu(wq, rcu); 281 kmem_cache_free(sock_inode_cachep, ei); 282 } 283 284 static void init_once(void *foo) 285 { 286 struct socket_alloc *ei = (struct socket_alloc *)foo; 287 288 inode_init_once(&ei->vfs_inode); 289 } 290 291 static int init_inodecache(void) 292 { 293 sock_inode_cachep = kmem_cache_create("sock_inode_cache", 294 sizeof(struct socket_alloc), 295 0, 296 (SLAB_HWCACHE_ALIGN | 297 SLAB_RECLAIM_ACCOUNT | 298 SLAB_MEM_SPREAD), 299 init_once); 300 if (sock_inode_cachep == NULL) 301 return -ENOMEM; 302 return 0; 303 } 304 305 static const struct super_operations sockfs_ops = { 306 .alloc_inode = sock_alloc_inode, 307 .destroy_inode = sock_destroy_inode, 308 .statfs = simple_statfs, 309 }; 310 311 /* 312 * sockfs_dname() is called from d_path(). 313 */ 314 static char *sockfs_dname(struct dentry *dentry, char *buffer, int buflen) 315 { 316 return dynamic_dname(dentry, buffer, buflen, "socket:[%lu]", 317 dentry->d_inode->i_ino); 318 } 319 320 static const struct dentry_operations sockfs_dentry_operations = { 321 .d_dname = sockfs_dname, 322 }; 323 324 static struct dentry *sockfs_mount(struct file_system_type *fs_type, 325 int flags, const char *dev_name, void *data) 326 { 327 return mount_pseudo(fs_type, "socket:", &sockfs_ops, 328 &sockfs_dentry_operations, SOCKFS_MAGIC); 329 } 330 331 static struct vfsmount *sock_mnt __read_mostly; 332 333 static struct file_system_type sock_fs_type = { 334 .name = "sockfs", 335 .mount = sockfs_mount, 336 .kill_sb = kill_anon_super, 337 }; 338 339 /* 340 * Obtains the first available file descriptor and sets it up for use. 341 * 342 * These functions create file structures and maps them to fd space 343 * of the current process. On success it returns file descriptor 344 * and file struct implicitly stored in sock->file. 345 * Note that another thread may close file descriptor before we return 346 * from this function. We use the fact that now we do not refer 347 * to socket after mapping. If one day we will need it, this 348 * function will increment ref. count on file by 1. 349 * 350 * In any case returned fd MAY BE not valid! 351 * This race condition is unavoidable 352 * with shared fd spaces, we cannot solve it inside kernel, 353 * but we take care of internal coherence yet. 354 */ 355 356 struct file *sock_alloc_file(struct socket *sock, int flags, const char *dname) 357 { 358 struct qstr name = { .name = "" }; 359 struct path path; 360 struct file *file; 361 362 if (dname) { 363 name.name = dname; 364 name.len = strlen(name.name); 365 } else if (sock->sk) { 366 name.name = sock->sk->sk_prot_creator->name; 367 name.len = strlen(name.name); 368 } 369 path.dentry = d_alloc_pseudo(sock_mnt->mnt_sb, &name); 370 if (unlikely(!path.dentry)) 371 return ERR_PTR(-ENOMEM); 372 path.mnt = mntget(sock_mnt); 373 374 d_instantiate(path.dentry, SOCK_INODE(sock)); 375 376 file = alloc_file(&path, FMODE_READ | FMODE_WRITE, 377 &socket_file_ops); 378 if (unlikely(IS_ERR(file))) { 379 /* drop dentry, keep inode */ 380 ihold(path.dentry->d_inode); 381 path_put(&path); 382 return file; 383 } 384 385 sock->file = file; 386 file->f_flags = O_RDWR | (flags & O_NONBLOCK); 387 file->private_data = sock; 388 return file; 389 } 390 EXPORT_SYMBOL(sock_alloc_file); 391 392 static int sock_map_fd(struct socket *sock, int flags) 393 { 394 struct file *newfile; 395 int fd = get_unused_fd_flags(flags); 396 if (unlikely(fd < 0)) 397 return fd; 398 399 newfile = sock_alloc_file(sock, flags, NULL); 400 if (likely(!IS_ERR(newfile))) { 401 fd_install(fd, newfile); 402 return fd; 403 } 404 405 put_unused_fd(fd); 406 return PTR_ERR(newfile); 407 } 408 409 struct socket *sock_from_file(struct file *file, int *err) 410 { 411 if (file->f_op == &socket_file_ops) 412 return file->private_data; /* set in sock_map_fd */ 413 414 *err = -ENOTSOCK; 415 return NULL; 416 } 417 EXPORT_SYMBOL(sock_from_file); 418 419 /** 420 * sockfd_lookup - Go from a file number to its socket slot 421 * @fd: file handle 422 * @err: pointer to an error code return 423 * 424 * The file handle passed in is locked and the socket it is bound 425 * too is returned. If an error occurs the err pointer is overwritten 426 * with a negative errno code and NULL is returned. The function checks 427 * for both invalid handles and passing a handle which is not a socket. 428 * 429 * On a success the socket object pointer is returned. 430 */ 431 432 struct socket *sockfd_lookup(int fd, int *err) 433 { 434 struct file *file; 435 struct socket *sock; 436 437 file = fget(fd); 438 if (!file) { 439 *err = -EBADF; 440 return NULL; 441 } 442 443 sock = sock_from_file(file, err); 444 if (!sock) 445 fput(file); 446 return sock; 447 } 448 EXPORT_SYMBOL(sockfd_lookup); 449 450 static struct socket *sockfd_lookup_light(int fd, int *err, int *fput_needed) 451 { 452 struct fd f = fdget(fd); 453 struct socket *sock; 454 455 *err = -EBADF; 456 if (f.file) { 457 sock = sock_from_file(f.file, err); 458 if (likely(sock)) { 459 *fput_needed = f.flags; 460 return sock; 461 } 462 fdput(f); 463 } 464 return NULL; 465 } 466 467 #define XATTR_SOCKPROTONAME_SUFFIX "sockprotoname" 468 #define XATTR_NAME_SOCKPROTONAME (XATTR_SYSTEM_PREFIX XATTR_SOCKPROTONAME_SUFFIX) 469 #define XATTR_NAME_SOCKPROTONAME_LEN (sizeof(XATTR_NAME_SOCKPROTONAME)-1) 470 static ssize_t sockfs_getxattr(struct dentry *dentry, 471 const char *name, void *value, size_t size) 472 { 473 const char *proto_name; 474 size_t proto_size; 475 int error; 476 477 error = -ENODATA; 478 if (!strncmp(name, XATTR_NAME_SOCKPROTONAME, XATTR_NAME_SOCKPROTONAME_LEN)) { 479 proto_name = dentry->d_name.name; 480 proto_size = strlen(proto_name); 481 482 if (value) { 483 error = -ERANGE; 484 if (proto_size + 1 > size) 485 goto out; 486 487 strncpy(value, proto_name, proto_size + 1); 488 } 489 error = proto_size + 1; 490 } 491 492 out: 493 return error; 494 } 495 496 static ssize_t sockfs_listxattr(struct dentry *dentry, char *buffer, 497 size_t size) 498 { 499 ssize_t len; 500 ssize_t used = 0; 501 502 len = security_inode_listsecurity(dentry->d_inode, buffer, size); 503 if (len < 0) 504 return len; 505 used += len; 506 if (buffer) { 507 if (size < used) 508 return -ERANGE; 509 buffer += len; 510 } 511 512 len = (XATTR_NAME_SOCKPROTONAME_LEN + 1); 513 used += len; 514 if (buffer) { 515 if (size < used) 516 return -ERANGE; 517 memcpy(buffer, XATTR_NAME_SOCKPROTONAME, len); 518 buffer += len; 519 } 520 521 return used; 522 } 523 524 static const struct inode_operations sockfs_inode_ops = { 525 .getxattr = sockfs_getxattr, 526 .listxattr = sockfs_listxattr, 527 }; 528 529 /** 530 * sock_alloc - allocate a socket 531 * 532 * Allocate a new inode and socket object. The two are bound together 533 * and initialised. The socket is then returned. If we are out of inodes 534 * NULL is returned. 535 */ 536 537 static struct socket *sock_alloc(void) 538 { 539 struct inode *inode; 540 struct socket *sock; 541 542 inode = new_inode_pseudo(sock_mnt->mnt_sb); 543 if (!inode) 544 return NULL; 545 546 sock = SOCKET_I(inode); 547 548 kmemcheck_annotate_bitfield(sock, type); 549 inode->i_ino = get_next_ino(); 550 inode->i_mode = S_IFSOCK | S_IRWXUGO; 551 inode->i_uid = current_fsuid(); 552 inode->i_gid = current_fsgid(); 553 inode->i_op = &sockfs_inode_ops; 554 555 this_cpu_add(sockets_in_use, 1); 556 return sock; 557 } 558 559 /** 560 * sock_release - close a socket 561 * @sock: socket to close 562 * 563 * The socket is released from the protocol stack if it has a release 564 * callback, and the inode is then released if the socket is bound to 565 * an inode not a file. 566 */ 567 568 void sock_release(struct socket *sock) 569 { 570 if (sock->ops) { 571 struct module *owner = sock->ops->owner; 572 573 sock->ops->release(sock); 574 sock->ops = NULL; 575 module_put(owner); 576 } 577 578 if (rcu_dereference_protected(sock->wq, 1)->fasync_list) 579 pr_err("%s: fasync list not empty!\n", __func__); 580 581 if (test_bit(SOCK_EXTERNALLY_ALLOCATED, &sock->flags)) 582 return; 583 584 this_cpu_sub(sockets_in_use, 1); 585 if (!sock->file) { 586 iput(SOCK_INODE(sock)); 587 return; 588 } 589 sock->file = NULL; 590 } 591 EXPORT_SYMBOL(sock_release); 592 593 void __sock_tx_timestamp(const struct sock *sk, __u8 *tx_flags) 594 { 595 u8 flags = *tx_flags; 596 597 if (sk->sk_tsflags & SOF_TIMESTAMPING_TX_HARDWARE) 598 flags |= SKBTX_HW_TSTAMP; 599 600 if (sk->sk_tsflags & SOF_TIMESTAMPING_TX_SOFTWARE) 601 flags |= SKBTX_SW_TSTAMP; 602 603 if (sk->sk_tsflags & SOF_TIMESTAMPING_TX_SCHED) 604 flags |= SKBTX_SCHED_TSTAMP; 605 606 if (sk->sk_tsflags & SOF_TIMESTAMPING_TX_ACK) 607 flags |= SKBTX_ACK_TSTAMP; 608 609 *tx_flags = flags; 610 } 611 EXPORT_SYMBOL(__sock_tx_timestamp); 612 613 static inline int __sock_sendmsg_nosec(struct kiocb *iocb, struct socket *sock, 614 struct msghdr *msg, size_t size) 615 { 616 struct sock_iocb *si = kiocb_to_siocb(iocb); 617 618 si->sock = sock; 619 si->scm = NULL; 620 si->msg = msg; 621 si->size = size; 622 623 return sock->ops->sendmsg(iocb, sock, msg, size); 624 } 625 626 static inline int __sock_sendmsg(struct kiocb *iocb, struct socket *sock, 627 struct msghdr *msg, size_t size) 628 { 629 int err = security_socket_sendmsg(sock, msg, size); 630 631 return err ?: __sock_sendmsg_nosec(iocb, sock, msg, size); 632 } 633 634 static int do_sock_sendmsg(struct socket *sock, struct msghdr *msg, 635 size_t size, bool nosec) 636 { 637 struct kiocb iocb; 638 struct sock_iocb siocb; 639 int ret; 640 641 init_sync_kiocb(&iocb, NULL); 642 iocb.private = &siocb; 643 ret = nosec ? __sock_sendmsg_nosec(&iocb, sock, msg, size) : 644 __sock_sendmsg(&iocb, sock, msg, size); 645 if (-EIOCBQUEUED == ret) 646 ret = wait_on_sync_kiocb(&iocb); 647 return ret; 648 } 649 650 int sock_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) 651 { 652 return do_sock_sendmsg(sock, msg, size, false); 653 } 654 EXPORT_SYMBOL(sock_sendmsg); 655 656 static int sock_sendmsg_nosec(struct socket *sock, struct msghdr *msg, size_t size) 657 { 658 return do_sock_sendmsg(sock, msg, size, true); 659 } 660 661 int kernel_sendmsg(struct socket *sock, struct msghdr *msg, 662 struct kvec *vec, size_t num, size_t size) 663 { 664 mm_segment_t oldfs = get_fs(); 665 int result; 666 667 set_fs(KERNEL_DS); 668 /* 669 * the following is safe, since for compiler definitions of kvec and 670 * iovec are identical, yielding the same in-core layout and alignment 671 */ 672 iov_iter_init(&msg->msg_iter, WRITE, (struct iovec *)vec, num, size); 673 result = sock_sendmsg(sock, msg, size); 674 set_fs(oldfs); 675 return result; 676 } 677 EXPORT_SYMBOL(kernel_sendmsg); 678 679 /* 680 * called from sock_recv_timestamp() if sock_flag(sk, SOCK_RCVTSTAMP) 681 */ 682 void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk, 683 struct sk_buff *skb) 684 { 685 int need_software_tstamp = sock_flag(sk, SOCK_RCVTSTAMP); 686 struct scm_timestamping tss; 687 int empty = 1; 688 struct skb_shared_hwtstamps *shhwtstamps = 689 skb_hwtstamps(skb); 690 691 /* Race occurred between timestamp enabling and packet 692 receiving. Fill in the current time for now. */ 693 if (need_software_tstamp && skb->tstamp.tv64 == 0) 694 __net_timestamp(skb); 695 696 if (need_software_tstamp) { 697 if (!sock_flag(sk, SOCK_RCVTSTAMPNS)) { 698 struct timeval tv; 699 skb_get_timestamp(skb, &tv); 700 put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMP, 701 sizeof(tv), &tv); 702 } else { 703 struct timespec ts; 704 skb_get_timestampns(skb, &ts); 705 put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMPNS, 706 sizeof(ts), &ts); 707 } 708 } 709 710 memset(&tss, 0, sizeof(tss)); 711 if ((sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE) && 712 ktime_to_timespec_cond(skb->tstamp, tss.ts + 0)) 713 empty = 0; 714 if (shhwtstamps && 715 (sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE) && 716 ktime_to_timespec_cond(shhwtstamps->hwtstamp, tss.ts + 2)) 717 empty = 0; 718 if (!empty) 719 put_cmsg(msg, SOL_SOCKET, 720 SCM_TIMESTAMPING, sizeof(tss), &tss); 721 } 722 EXPORT_SYMBOL_GPL(__sock_recv_timestamp); 723 724 void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk, 725 struct sk_buff *skb) 726 { 727 int ack; 728 729 if (!sock_flag(sk, SOCK_WIFI_STATUS)) 730 return; 731 if (!skb->wifi_acked_valid) 732 return; 733 734 ack = skb->wifi_acked; 735 736 put_cmsg(msg, SOL_SOCKET, SCM_WIFI_STATUS, sizeof(ack), &ack); 737 } 738 EXPORT_SYMBOL_GPL(__sock_recv_wifi_status); 739 740 static inline void sock_recv_drops(struct msghdr *msg, struct sock *sk, 741 struct sk_buff *skb) 742 { 743 if (sock_flag(sk, SOCK_RXQ_OVFL) && skb && skb->dropcount) 744 put_cmsg(msg, SOL_SOCKET, SO_RXQ_OVFL, 745 sizeof(__u32), &skb->dropcount); 746 } 747 748 void __sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk, 749 struct sk_buff *skb) 750 { 751 sock_recv_timestamp(msg, sk, skb); 752 sock_recv_drops(msg, sk, skb); 753 } 754 EXPORT_SYMBOL_GPL(__sock_recv_ts_and_drops); 755 756 static inline int __sock_recvmsg_nosec(struct kiocb *iocb, struct socket *sock, 757 struct msghdr *msg, size_t size, int flags) 758 { 759 struct sock_iocb *si = kiocb_to_siocb(iocb); 760 761 si->sock = sock; 762 si->scm = NULL; 763 si->msg = msg; 764 si->size = size; 765 si->flags = flags; 766 767 return sock->ops->recvmsg(iocb, sock, msg, size, flags); 768 } 769 770 static inline int __sock_recvmsg(struct kiocb *iocb, struct socket *sock, 771 struct msghdr *msg, size_t size, int flags) 772 { 773 int err = security_socket_recvmsg(sock, msg, size, flags); 774 775 return err ?: __sock_recvmsg_nosec(iocb, sock, msg, size, flags); 776 } 777 778 int sock_recvmsg(struct socket *sock, struct msghdr *msg, 779 size_t size, int flags) 780 { 781 struct kiocb iocb; 782 struct sock_iocb siocb; 783 int ret; 784 785 init_sync_kiocb(&iocb, NULL); 786 iocb.private = &siocb; 787 ret = __sock_recvmsg(&iocb, sock, msg, size, flags); 788 if (-EIOCBQUEUED == ret) 789 ret = wait_on_sync_kiocb(&iocb); 790 return ret; 791 } 792 EXPORT_SYMBOL(sock_recvmsg); 793 794 static int sock_recvmsg_nosec(struct socket *sock, struct msghdr *msg, 795 size_t size, int flags) 796 { 797 struct kiocb iocb; 798 struct sock_iocb siocb; 799 int ret; 800 801 init_sync_kiocb(&iocb, NULL); 802 iocb.private = &siocb; 803 ret = __sock_recvmsg_nosec(&iocb, sock, msg, size, flags); 804 if (-EIOCBQUEUED == ret) 805 ret = wait_on_sync_kiocb(&iocb); 806 return ret; 807 } 808 809 /** 810 * kernel_recvmsg - Receive a message from a socket (kernel space) 811 * @sock: The socket to receive the message from 812 * @msg: Received message 813 * @vec: Input s/g array for message data 814 * @num: Size of input s/g array 815 * @size: Number of bytes to read 816 * @flags: Message flags (MSG_DONTWAIT, etc...) 817 * 818 * On return the msg structure contains the scatter/gather array passed in the 819 * vec argument. The array is modified so that it consists of the unfilled 820 * portion of the original array. 821 * 822 * The returned value is the total number of bytes received, or an error. 823 */ 824 int kernel_recvmsg(struct socket *sock, struct msghdr *msg, 825 struct kvec *vec, size_t num, size_t size, int flags) 826 { 827 mm_segment_t oldfs = get_fs(); 828 int result; 829 830 set_fs(KERNEL_DS); 831 /* 832 * the following is safe, since for compiler definitions of kvec and 833 * iovec are identical, yielding the same in-core layout and alignment 834 */ 835 iov_iter_init(&msg->msg_iter, READ, (struct iovec *)vec, num, size); 836 result = sock_recvmsg(sock, msg, size, flags); 837 set_fs(oldfs); 838 return result; 839 } 840 EXPORT_SYMBOL(kernel_recvmsg); 841 842 static ssize_t sock_sendpage(struct file *file, struct page *page, 843 int offset, size_t size, loff_t *ppos, int more) 844 { 845 struct socket *sock; 846 int flags; 847 848 sock = file->private_data; 849 850 flags = (file->f_flags & O_NONBLOCK) ? MSG_DONTWAIT : 0; 851 /* more is a combination of MSG_MORE and MSG_SENDPAGE_NOTLAST */ 852 flags |= more; 853 854 return kernel_sendpage(sock, page, offset, size, flags); 855 } 856 857 static ssize_t sock_splice_read(struct file *file, loff_t *ppos, 858 struct pipe_inode_info *pipe, size_t len, 859 unsigned int flags) 860 { 861 struct socket *sock = file->private_data; 862 863 if (unlikely(!sock->ops->splice_read)) 864 return -EINVAL; 865 866 return sock->ops->splice_read(sock, ppos, pipe, len, flags); 867 } 868 869 static struct sock_iocb *alloc_sock_iocb(struct kiocb *iocb, 870 struct sock_iocb *siocb) 871 { 872 if (!is_sync_kiocb(iocb)) 873 BUG(); 874 875 siocb->kiocb = iocb; 876 iocb->private = siocb; 877 return siocb; 878 } 879 880 static ssize_t do_sock_read(struct msghdr *msg, struct kiocb *iocb, 881 struct file *file, const struct iovec *iov, 882 unsigned long nr_segs) 883 { 884 struct socket *sock = file->private_data; 885 size_t size = 0; 886 int i; 887 888 for (i = 0; i < nr_segs; i++) 889 size += iov[i].iov_len; 890 891 msg->msg_name = NULL; 892 msg->msg_namelen = 0; 893 msg->msg_control = NULL; 894 msg->msg_controllen = 0; 895 iov_iter_init(&msg->msg_iter, READ, iov, nr_segs, size); 896 msg->msg_flags = (file->f_flags & O_NONBLOCK) ? MSG_DONTWAIT : 0; 897 898 return __sock_recvmsg(iocb, sock, msg, size, msg->msg_flags); 899 } 900 901 static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov, 902 unsigned long nr_segs, loff_t pos) 903 { 904 struct sock_iocb siocb, *x; 905 906 if (pos != 0) 907 return -ESPIPE; 908 909 if (iocb->ki_nbytes == 0) /* Match SYS5 behaviour */ 910 return 0; 911 912 913 x = alloc_sock_iocb(iocb, &siocb); 914 if (!x) 915 return -ENOMEM; 916 return do_sock_read(&x->async_msg, iocb, iocb->ki_filp, iov, nr_segs); 917 } 918 919 static ssize_t do_sock_write(struct msghdr *msg, struct kiocb *iocb, 920 struct file *file, const struct iovec *iov, 921 unsigned long nr_segs) 922 { 923 struct socket *sock = file->private_data; 924 size_t size = 0; 925 int i; 926 927 for (i = 0; i < nr_segs; i++) 928 size += iov[i].iov_len; 929 930 msg->msg_name = NULL; 931 msg->msg_namelen = 0; 932 msg->msg_control = NULL; 933 msg->msg_controllen = 0; 934 iov_iter_init(&msg->msg_iter, WRITE, iov, nr_segs, size); 935 msg->msg_flags = (file->f_flags & O_NONBLOCK) ? MSG_DONTWAIT : 0; 936 if (sock->type == SOCK_SEQPACKET) 937 msg->msg_flags |= MSG_EOR; 938 939 return __sock_sendmsg(iocb, sock, msg, size); 940 } 941 942 static ssize_t sock_aio_write(struct kiocb *iocb, const struct iovec *iov, 943 unsigned long nr_segs, loff_t pos) 944 { 945 struct sock_iocb siocb, *x; 946 947 if (pos != 0) 948 return -ESPIPE; 949 950 x = alloc_sock_iocb(iocb, &siocb); 951 if (!x) 952 return -ENOMEM; 953 954 return do_sock_write(&x->async_msg, iocb, iocb->ki_filp, iov, nr_segs); 955 } 956 957 /* 958 * Atomic setting of ioctl hooks to avoid race 959 * with module unload. 960 */ 961 962 static DEFINE_MUTEX(br_ioctl_mutex); 963 static int (*br_ioctl_hook) (struct net *, unsigned int cmd, void __user *arg); 964 965 void brioctl_set(int (*hook) (struct net *, unsigned int, void __user *)) 966 { 967 mutex_lock(&br_ioctl_mutex); 968 br_ioctl_hook = hook; 969 mutex_unlock(&br_ioctl_mutex); 970 } 971 EXPORT_SYMBOL(brioctl_set); 972 973 static DEFINE_MUTEX(vlan_ioctl_mutex); 974 static int (*vlan_ioctl_hook) (struct net *, void __user *arg); 975 976 void vlan_ioctl_set(int (*hook) (struct net *, void __user *)) 977 { 978 mutex_lock(&vlan_ioctl_mutex); 979 vlan_ioctl_hook = hook; 980 mutex_unlock(&vlan_ioctl_mutex); 981 } 982 EXPORT_SYMBOL(vlan_ioctl_set); 983 984 static DEFINE_MUTEX(dlci_ioctl_mutex); 985 static int (*dlci_ioctl_hook) (unsigned int, void __user *); 986 987 void dlci_ioctl_set(int (*hook) (unsigned int, void __user *)) 988 { 989 mutex_lock(&dlci_ioctl_mutex); 990 dlci_ioctl_hook = hook; 991 mutex_unlock(&dlci_ioctl_mutex); 992 } 993 EXPORT_SYMBOL(dlci_ioctl_set); 994 995 static long sock_do_ioctl(struct net *net, struct socket *sock, 996 unsigned int cmd, unsigned long arg) 997 { 998 int err; 999 void __user *argp = (void __user *)arg; 1000 1001 err = sock->ops->ioctl(sock, cmd, arg); 1002 1003 /* 1004 * If this ioctl is unknown try to hand it down 1005 * to the NIC driver. 1006 */ 1007 if (err == -ENOIOCTLCMD) 1008 err = dev_ioctl(net, cmd, argp); 1009 1010 return err; 1011 } 1012 1013 /* 1014 * With an ioctl, arg may well be a user mode pointer, but we don't know 1015 * what to do with it - that's up to the protocol still. 1016 */ 1017 1018 static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg) 1019 { 1020 struct socket *sock; 1021 struct sock *sk; 1022 void __user *argp = (void __user *)arg; 1023 int pid, err; 1024 struct net *net; 1025 1026 sock = file->private_data; 1027 sk = sock->sk; 1028 net = sock_net(sk); 1029 if (cmd >= SIOCDEVPRIVATE && cmd <= (SIOCDEVPRIVATE + 15)) { 1030 err = dev_ioctl(net, cmd, argp); 1031 } else 1032 #ifdef CONFIG_WEXT_CORE 1033 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) { 1034 err = dev_ioctl(net, cmd, argp); 1035 } else 1036 #endif 1037 switch (cmd) { 1038 case FIOSETOWN: 1039 case SIOCSPGRP: 1040 err = -EFAULT; 1041 if (get_user(pid, (int __user *)argp)) 1042 break; 1043 f_setown(sock->file, pid, 1); 1044 err = 0; 1045 break; 1046 case FIOGETOWN: 1047 case SIOCGPGRP: 1048 err = put_user(f_getown(sock->file), 1049 (int __user *)argp); 1050 break; 1051 case SIOCGIFBR: 1052 case SIOCSIFBR: 1053 case SIOCBRADDBR: 1054 case SIOCBRDELBR: 1055 err = -ENOPKG; 1056 if (!br_ioctl_hook) 1057 request_module("bridge"); 1058 1059 mutex_lock(&br_ioctl_mutex); 1060 if (br_ioctl_hook) 1061 err = br_ioctl_hook(net, cmd, argp); 1062 mutex_unlock(&br_ioctl_mutex); 1063 break; 1064 case SIOCGIFVLAN: 1065 case SIOCSIFVLAN: 1066 err = -ENOPKG; 1067 if (!vlan_ioctl_hook) 1068 request_module("8021q"); 1069 1070 mutex_lock(&vlan_ioctl_mutex); 1071 if (vlan_ioctl_hook) 1072 err = vlan_ioctl_hook(net, argp); 1073 mutex_unlock(&vlan_ioctl_mutex); 1074 break; 1075 case SIOCADDDLCI: 1076 case SIOCDELDLCI: 1077 err = -ENOPKG; 1078 if (!dlci_ioctl_hook) 1079 request_module("dlci"); 1080 1081 mutex_lock(&dlci_ioctl_mutex); 1082 if (dlci_ioctl_hook) 1083 err = dlci_ioctl_hook(cmd, argp); 1084 mutex_unlock(&dlci_ioctl_mutex); 1085 break; 1086 default: 1087 err = sock_do_ioctl(net, sock, cmd, arg); 1088 break; 1089 } 1090 return err; 1091 } 1092 1093 int sock_create_lite(int family, int type, int protocol, struct socket **res) 1094 { 1095 int err; 1096 struct socket *sock = NULL; 1097 1098 err = security_socket_create(family, type, protocol, 1); 1099 if (err) 1100 goto out; 1101 1102 sock = sock_alloc(); 1103 if (!sock) { 1104 err = -ENOMEM; 1105 goto out; 1106 } 1107 1108 sock->type = type; 1109 err = security_socket_post_create(sock, family, type, protocol, 1); 1110 if (err) 1111 goto out_release; 1112 1113 out: 1114 *res = sock; 1115 return err; 1116 out_release: 1117 sock_release(sock); 1118 sock = NULL; 1119 goto out; 1120 } 1121 EXPORT_SYMBOL(sock_create_lite); 1122 1123 /* No kernel lock held - perfect */ 1124 static unsigned int sock_poll(struct file *file, poll_table *wait) 1125 { 1126 unsigned int busy_flag = 0; 1127 struct socket *sock; 1128 1129 /* 1130 * We can't return errors to poll, so it's either yes or no. 1131 */ 1132 sock = file->private_data; 1133 1134 if (sk_can_busy_loop(sock->sk)) { 1135 /* this socket can poll_ll so tell the system call */ 1136 busy_flag = POLL_BUSY_LOOP; 1137 1138 /* once, only if requested by syscall */ 1139 if (wait && (wait->_key & POLL_BUSY_LOOP)) 1140 sk_busy_loop(sock->sk, 1); 1141 } 1142 1143 return busy_flag | sock->ops->poll(file, sock, wait); 1144 } 1145 1146 static int sock_mmap(struct file *file, struct vm_area_struct *vma) 1147 { 1148 struct socket *sock = file->private_data; 1149 1150 return sock->ops->mmap(file, sock, vma); 1151 } 1152 1153 static int sock_close(struct inode *inode, struct file *filp) 1154 { 1155 sock_release(SOCKET_I(inode)); 1156 return 0; 1157 } 1158 1159 /* 1160 * Update the socket async list 1161 * 1162 * Fasync_list locking strategy. 1163 * 1164 * 1. fasync_list is modified only under process context socket lock 1165 * i.e. under semaphore. 1166 * 2. fasync_list is used under read_lock(&sk->sk_callback_lock) 1167 * or under socket lock 1168 */ 1169 1170 static int sock_fasync(int fd, struct file *filp, int on) 1171 { 1172 struct socket *sock = filp->private_data; 1173 struct sock *sk = sock->sk; 1174 struct socket_wq *wq; 1175 1176 if (sk == NULL) 1177 return -EINVAL; 1178 1179 lock_sock(sk); 1180 wq = rcu_dereference_protected(sock->wq, sock_owned_by_user(sk)); 1181 fasync_helper(fd, filp, on, &wq->fasync_list); 1182 1183 if (!wq->fasync_list) 1184 sock_reset_flag(sk, SOCK_FASYNC); 1185 else 1186 sock_set_flag(sk, SOCK_FASYNC); 1187 1188 release_sock(sk); 1189 return 0; 1190 } 1191 1192 /* This function may be called only under socket lock or callback_lock or rcu_lock */ 1193 1194 int sock_wake_async(struct socket *sock, int how, int band) 1195 { 1196 struct socket_wq *wq; 1197 1198 if (!sock) 1199 return -1; 1200 rcu_read_lock(); 1201 wq = rcu_dereference(sock->wq); 1202 if (!wq || !wq->fasync_list) { 1203 rcu_read_unlock(); 1204 return -1; 1205 } 1206 switch (how) { 1207 case SOCK_WAKE_WAITD: 1208 if (test_bit(SOCK_ASYNC_WAITDATA, &sock->flags)) 1209 break; 1210 goto call_kill; 1211 case SOCK_WAKE_SPACE: 1212 if (!test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags)) 1213 break; 1214 /* fall through */ 1215 case SOCK_WAKE_IO: 1216 call_kill: 1217 kill_fasync(&wq->fasync_list, SIGIO, band); 1218 break; 1219 case SOCK_WAKE_URG: 1220 kill_fasync(&wq->fasync_list, SIGURG, band); 1221 } 1222 rcu_read_unlock(); 1223 return 0; 1224 } 1225 EXPORT_SYMBOL(sock_wake_async); 1226 1227 int __sock_create(struct net *net, int family, int type, int protocol, 1228 struct socket **res, int kern) 1229 { 1230 int err; 1231 struct socket *sock; 1232 const struct net_proto_family *pf; 1233 1234 /* 1235 * Check protocol is in range 1236 */ 1237 if (family < 0 || family >= NPROTO) 1238 return -EAFNOSUPPORT; 1239 if (type < 0 || type >= SOCK_MAX) 1240 return -EINVAL; 1241 1242 /* Compatibility. 1243 1244 This uglymoron is moved from INET layer to here to avoid 1245 deadlock in module load. 1246 */ 1247 if (family == PF_INET && type == SOCK_PACKET) { 1248 static int warned; 1249 if (!warned) { 1250 warned = 1; 1251 pr_info("%s uses obsolete (PF_INET,SOCK_PACKET)\n", 1252 current->comm); 1253 } 1254 family = PF_PACKET; 1255 } 1256 1257 err = security_socket_create(family, type, protocol, kern); 1258 if (err) 1259 return err; 1260 1261 /* 1262 * Allocate the socket and allow the family to set things up. if 1263 * the protocol is 0, the family is instructed to select an appropriate 1264 * default. 1265 */ 1266 sock = sock_alloc(); 1267 if (!sock) { 1268 net_warn_ratelimited("socket: no more sockets\n"); 1269 return -ENFILE; /* Not exactly a match, but its the 1270 closest posix thing */ 1271 } 1272 1273 sock->type = type; 1274 1275 #ifdef CONFIG_MODULES 1276 /* Attempt to load a protocol module if the find failed. 1277 * 1278 * 12/09/1996 Marcin: But! this makes REALLY only sense, if the user 1279 * requested real, full-featured networking support upon configuration. 1280 * Otherwise module support will break! 1281 */ 1282 if (rcu_access_pointer(net_families[family]) == NULL) 1283 request_module("net-pf-%d", family); 1284 #endif 1285 1286 rcu_read_lock(); 1287 pf = rcu_dereference(net_families[family]); 1288 err = -EAFNOSUPPORT; 1289 if (!pf) 1290 goto out_release; 1291 1292 /* 1293 * We will call the ->create function, that possibly is in a loadable 1294 * module, so we have to bump that loadable module refcnt first. 1295 */ 1296 if (!try_module_get(pf->owner)) 1297 goto out_release; 1298 1299 /* Now protected by module ref count */ 1300 rcu_read_unlock(); 1301 1302 err = pf->create(net, sock, protocol, kern); 1303 if (err < 0) 1304 goto out_module_put; 1305 1306 /* 1307 * Now to bump the refcnt of the [loadable] module that owns this 1308 * socket at sock_release time we decrement its refcnt. 1309 */ 1310 if (!try_module_get(sock->ops->owner)) 1311 goto out_module_busy; 1312 1313 /* 1314 * Now that we're done with the ->create function, the [loadable] 1315 * module can have its refcnt decremented 1316 */ 1317 module_put(pf->owner); 1318 err = security_socket_post_create(sock, family, type, protocol, kern); 1319 if (err) 1320 goto out_sock_release; 1321 *res = sock; 1322 1323 return 0; 1324 1325 out_module_busy: 1326 err = -EAFNOSUPPORT; 1327 out_module_put: 1328 sock->ops = NULL; 1329 module_put(pf->owner); 1330 out_sock_release: 1331 sock_release(sock); 1332 return err; 1333 1334 out_release: 1335 rcu_read_unlock(); 1336 goto out_sock_release; 1337 } 1338 EXPORT_SYMBOL(__sock_create); 1339 1340 int sock_create(int family, int type, int protocol, struct socket **res) 1341 { 1342 return __sock_create(current->nsproxy->net_ns, family, type, protocol, res, 0); 1343 } 1344 EXPORT_SYMBOL(sock_create); 1345 1346 int sock_create_kern(int family, int type, int protocol, struct socket **res) 1347 { 1348 return __sock_create(&init_net, family, type, protocol, res, 1); 1349 } 1350 EXPORT_SYMBOL(sock_create_kern); 1351 1352 SYSCALL_DEFINE3(socket, int, family, int, type, int, protocol) 1353 { 1354 int retval; 1355 struct socket *sock; 1356 int flags; 1357 1358 /* Check the SOCK_* constants for consistency. */ 1359 BUILD_BUG_ON(SOCK_CLOEXEC != O_CLOEXEC); 1360 BUILD_BUG_ON((SOCK_MAX | SOCK_TYPE_MASK) != SOCK_TYPE_MASK); 1361 BUILD_BUG_ON(SOCK_CLOEXEC & SOCK_TYPE_MASK); 1362 BUILD_BUG_ON(SOCK_NONBLOCK & SOCK_TYPE_MASK); 1363 1364 flags = type & ~SOCK_TYPE_MASK; 1365 if (flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK)) 1366 return -EINVAL; 1367 type &= SOCK_TYPE_MASK; 1368 1369 if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK)) 1370 flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK; 1371 1372 retval = sock_create(family, type, protocol, &sock); 1373 if (retval < 0) 1374 goto out; 1375 1376 retval = sock_map_fd(sock, flags & (O_CLOEXEC | O_NONBLOCK)); 1377 if (retval < 0) 1378 goto out_release; 1379 1380 out: 1381 /* It may be already another descriptor 8) Not kernel problem. */ 1382 return retval; 1383 1384 out_release: 1385 sock_release(sock); 1386 return retval; 1387 } 1388 1389 /* 1390 * Create a pair of connected sockets. 1391 */ 1392 1393 SYSCALL_DEFINE4(socketpair, int, family, int, type, int, protocol, 1394 int __user *, usockvec) 1395 { 1396 struct socket *sock1, *sock2; 1397 int fd1, fd2, err; 1398 struct file *newfile1, *newfile2; 1399 int flags; 1400 1401 flags = type & ~SOCK_TYPE_MASK; 1402 if (flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK)) 1403 return -EINVAL; 1404 type &= SOCK_TYPE_MASK; 1405 1406 if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK)) 1407 flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK; 1408 1409 /* 1410 * Obtain the first socket and check if the underlying protocol 1411 * supports the socketpair call. 1412 */ 1413 1414 err = sock_create(family, type, protocol, &sock1); 1415 if (err < 0) 1416 goto out; 1417 1418 err = sock_create(family, type, protocol, &sock2); 1419 if (err < 0) 1420 goto out_release_1; 1421 1422 err = sock1->ops->socketpair(sock1, sock2); 1423 if (err < 0) 1424 goto out_release_both; 1425 1426 fd1 = get_unused_fd_flags(flags); 1427 if (unlikely(fd1 < 0)) { 1428 err = fd1; 1429 goto out_release_both; 1430 } 1431 1432 fd2 = get_unused_fd_flags(flags); 1433 if (unlikely(fd2 < 0)) { 1434 err = fd2; 1435 goto out_put_unused_1; 1436 } 1437 1438 newfile1 = sock_alloc_file(sock1, flags, NULL); 1439 if (unlikely(IS_ERR(newfile1))) { 1440 err = PTR_ERR(newfile1); 1441 goto out_put_unused_both; 1442 } 1443 1444 newfile2 = sock_alloc_file(sock2, flags, NULL); 1445 if (IS_ERR(newfile2)) { 1446 err = PTR_ERR(newfile2); 1447 goto out_fput_1; 1448 } 1449 1450 err = put_user(fd1, &usockvec[0]); 1451 if (err) 1452 goto out_fput_both; 1453 1454 err = put_user(fd2, &usockvec[1]); 1455 if (err) 1456 goto out_fput_both; 1457 1458 audit_fd_pair(fd1, fd2); 1459 1460 fd_install(fd1, newfile1); 1461 fd_install(fd2, newfile2); 1462 /* fd1 and fd2 may be already another descriptors. 1463 * Not kernel problem. 1464 */ 1465 1466 return 0; 1467 1468 out_fput_both: 1469 fput(newfile2); 1470 fput(newfile1); 1471 put_unused_fd(fd2); 1472 put_unused_fd(fd1); 1473 goto out; 1474 1475 out_fput_1: 1476 fput(newfile1); 1477 put_unused_fd(fd2); 1478 put_unused_fd(fd1); 1479 sock_release(sock2); 1480 goto out; 1481 1482 out_put_unused_both: 1483 put_unused_fd(fd2); 1484 out_put_unused_1: 1485 put_unused_fd(fd1); 1486 out_release_both: 1487 sock_release(sock2); 1488 out_release_1: 1489 sock_release(sock1); 1490 out: 1491 return err; 1492 } 1493 1494 /* 1495 * Bind a name to a socket. Nothing much to do here since it's 1496 * the protocol's responsibility to handle the local address. 1497 * 1498 * We move the socket address to kernel space before we call 1499 * the protocol layer (having also checked the address is ok). 1500 */ 1501 1502 SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen) 1503 { 1504 struct socket *sock; 1505 struct sockaddr_storage address; 1506 int err, fput_needed; 1507 1508 sock = sockfd_lookup_light(fd, &err, &fput_needed); 1509 if (sock) { 1510 err = move_addr_to_kernel(umyaddr, addrlen, &address); 1511 if (err >= 0) { 1512 err = security_socket_bind(sock, 1513 (struct sockaddr *)&address, 1514 addrlen); 1515 if (!err) 1516 err = sock->ops->bind(sock, 1517 (struct sockaddr *) 1518 &address, addrlen); 1519 } 1520 fput_light(sock->file, fput_needed); 1521 } 1522 return err; 1523 } 1524 1525 /* 1526 * Perform a listen. Basically, we allow the protocol to do anything 1527 * necessary for a listen, and if that works, we mark the socket as 1528 * ready for listening. 1529 */ 1530 1531 SYSCALL_DEFINE2(listen, int, fd, int, backlog) 1532 { 1533 struct socket *sock; 1534 int err, fput_needed; 1535 int somaxconn; 1536 1537 sock = sockfd_lookup_light(fd, &err, &fput_needed); 1538 if (sock) { 1539 somaxconn = sock_net(sock->sk)->core.sysctl_somaxconn; 1540 if ((unsigned int)backlog > somaxconn) 1541 backlog = somaxconn; 1542 1543 err = security_socket_listen(sock, backlog); 1544 if (!err) 1545 err = sock->ops->listen(sock, backlog); 1546 1547 fput_light(sock->file, fput_needed); 1548 } 1549 return err; 1550 } 1551 1552 /* 1553 * For accept, we attempt to create a new socket, set up the link 1554 * with the client, wake up the client, then return the new 1555 * connected fd. We collect the address of the connector in kernel 1556 * space and move it to user at the very end. This is unclean because 1557 * we open the socket then return an error. 1558 * 1559 * 1003.1g adds the ability to recvmsg() to query connection pending 1560 * status to recvmsg. We need to add that support in a way thats 1561 * clean when we restucture accept also. 1562 */ 1563 1564 SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr, 1565 int __user *, upeer_addrlen, int, flags) 1566 { 1567 struct socket *sock, *newsock; 1568 struct file *newfile; 1569 int err, len, newfd, fput_needed; 1570 struct sockaddr_storage address; 1571 1572 if (flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK)) 1573 return -EINVAL; 1574 1575 if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK)) 1576 flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK; 1577 1578 sock = sockfd_lookup_light(fd, &err, &fput_needed); 1579 if (!sock) 1580 goto out; 1581 1582 err = -ENFILE; 1583 newsock = sock_alloc(); 1584 if (!newsock) 1585 goto out_put; 1586 1587 newsock->type = sock->type; 1588 newsock->ops = sock->ops; 1589 1590 /* 1591 * We don't need try_module_get here, as the listening socket (sock) 1592 * has the protocol module (sock->ops->owner) held. 1593 */ 1594 __module_get(newsock->ops->owner); 1595 1596 newfd = get_unused_fd_flags(flags); 1597 if (unlikely(newfd < 0)) { 1598 err = newfd; 1599 sock_release(newsock); 1600 goto out_put; 1601 } 1602 newfile = sock_alloc_file(newsock, flags, sock->sk->sk_prot_creator->name); 1603 if (unlikely(IS_ERR(newfile))) { 1604 err = PTR_ERR(newfile); 1605 put_unused_fd(newfd); 1606 sock_release(newsock); 1607 goto out_put; 1608 } 1609 1610 err = security_socket_accept(sock, newsock); 1611 if (err) 1612 goto out_fd; 1613 1614 err = sock->ops->accept(sock, newsock, sock->file->f_flags); 1615 if (err < 0) 1616 goto out_fd; 1617 1618 if (upeer_sockaddr) { 1619 if (newsock->ops->getname(newsock, (struct sockaddr *)&address, 1620 &len, 2) < 0) { 1621 err = -ECONNABORTED; 1622 goto out_fd; 1623 } 1624 err = move_addr_to_user(&address, 1625 len, upeer_sockaddr, upeer_addrlen); 1626 if (err < 0) 1627 goto out_fd; 1628 } 1629 1630 /* File flags are not inherited via accept() unlike another OSes. */ 1631 1632 fd_install(newfd, newfile); 1633 err = newfd; 1634 1635 out_put: 1636 fput_light(sock->file, fput_needed); 1637 out: 1638 return err; 1639 out_fd: 1640 fput(newfile); 1641 put_unused_fd(newfd); 1642 goto out_put; 1643 } 1644 1645 SYSCALL_DEFINE3(accept, int, fd, struct sockaddr __user *, upeer_sockaddr, 1646 int __user *, upeer_addrlen) 1647 { 1648 return sys_accept4(fd, upeer_sockaddr, upeer_addrlen, 0); 1649 } 1650 1651 /* 1652 * Attempt to connect to a socket with the server address. The address 1653 * is in user space so we verify it is OK and move it to kernel space. 1654 * 1655 * For 1003.1g we need to add clean support for a bind to AF_UNSPEC to 1656 * break bindings 1657 * 1658 * NOTE: 1003.1g draft 6.3 is broken with respect to AX.25/NetROM and 1659 * other SEQPACKET protocols that take time to connect() as it doesn't 1660 * include the -EINPROGRESS status for such sockets. 1661 */ 1662 1663 SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr, 1664 int, addrlen) 1665 { 1666 struct socket *sock; 1667 struct sockaddr_storage address; 1668 int err, fput_needed; 1669 1670 sock = sockfd_lookup_light(fd, &err, &fput_needed); 1671 if (!sock) 1672 goto out; 1673 err = move_addr_to_kernel(uservaddr, addrlen, &address); 1674 if (err < 0) 1675 goto out_put; 1676 1677 err = 1678 security_socket_connect(sock, (struct sockaddr *)&address, addrlen); 1679 if (err) 1680 goto out_put; 1681 1682 err = sock->ops->connect(sock, (struct sockaddr *)&address, addrlen, 1683 sock->file->f_flags); 1684 out_put: 1685 fput_light(sock->file, fput_needed); 1686 out: 1687 return err; 1688 } 1689 1690 /* 1691 * Get the local address ('name') of a socket object. Move the obtained 1692 * name to user space. 1693 */ 1694 1695 SYSCALL_DEFINE3(getsockname, int, fd, struct sockaddr __user *, usockaddr, 1696 int __user *, usockaddr_len) 1697 { 1698 struct socket *sock; 1699 struct sockaddr_storage address; 1700 int len, err, fput_needed; 1701 1702 sock = sockfd_lookup_light(fd, &err, &fput_needed); 1703 if (!sock) 1704 goto out; 1705 1706 err = security_socket_getsockname(sock); 1707 if (err) 1708 goto out_put; 1709 1710 err = sock->ops->getname(sock, (struct sockaddr *)&address, &len, 0); 1711 if (err) 1712 goto out_put; 1713 err = move_addr_to_user(&address, len, usockaddr, usockaddr_len); 1714 1715 out_put: 1716 fput_light(sock->file, fput_needed); 1717 out: 1718 return err; 1719 } 1720 1721 /* 1722 * Get the remote address ('name') of a socket object. Move the obtained 1723 * name to user space. 1724 */ 1725 1726 SYSCALL_DEFINE3(getpeername, int, fd, struct sockaddr __user *, usockaddr, 1727 int __user *, usockaddr_len) 1728 { 1729 struct socket *sock; 1730 struct sockaddr_storage address; 1731 int len, err, fput_needed; 1732 1733 sock = sockfd_lookup_light(fd, &err, &fput_needed); 1734 if (sock != NULL) { 1735 err = security_socket_getpeername(sock); 1736 if (err) { 1737 fput_light(sock->file, fput_needed); 1738 return err; 1739 } 1740 1741 err = 1742 sock->ops->getname(sock, (struct sockaddr *)&address, &len, 1743 1); 1744 if (!err) 1745 err = move_addr_to_user(&address, len, usockaddr, 1746 usockaddr_len); 1747 fput_light(sock->file, fput_needed); 1748 } 1749 return err; 1750 } 1751 1752 /* 1753 * Send a datagram to a given address. We move the address into kernel 1754 * space and check the user space data area is readable before invoking 1755 * the protocol. 1756 */ 1757 1758 SYSCALL_DEFINE6(sendto, int, fd, void __user *, buff, size_t, len, 1759 unsigned int, flags, struct sockaddr __user *, addr, 1760 int, addr_len) 1761 { 1762 struct socket *sock; 1763 struct sockaddr_storage address; 1764 int err; 1765 struct msghdr msg; 1766 struct iovec iov; 1767 int fput_needed; 1768 1769 if (len > INT_MAX) 1770 len = INT_MAX; 1771 sock = sockfd_lookup_light(fd, &err, &fput_needed); 1772 if (!sock) 1773 goto out; 1774 1775 iov.iov_base = buff; 1776 iov.iov_len = len; 1777 msg.msg_name = NULL; 1778 iov_iter_init(&msg.msg_iter, WRITE, &iov, 1, len); 1779 msg.msg_control = NULL; 1780 msg.msg_controllen = 0; 1781 msg.msg_namelen = 0; 1782 if (addr) { 1783 err = move_addr_to_kernel(addr, addr_len, &address); 1784 if (err < 0) 1785 goto out_put; 1786 msg.msg_name = (struct sockaddr *)&address; 1787 msg.msg_namelen = addr_len; 1788 } 1789 if (sock->file->f_flags & O_NONBLOCK) 1790 flags |= MSG_DONTWAIT; 1791 msg.msg_flags = flags; 1792 err = sock_sendmsg(sock, &msg, len); 1793 1794 out_put: 1795 fput_light(sock->file, fput_needed); 1796 out: 1797 return err; 1798 } 1799 1800 /* 1801 * Send a datagram down a socket. 1802 */ 1803 1804 SYSCALL_DEFINE4(send, int, fd, void __user *, buff, size_t, len, 1805 unsigned int, flags) 1806 { 1807 return sys_sendto(fd, buff, len, flags, NULL, 0); 1808 } 1809 1810 /* 1811 * Receive a frame from the socket and optionally record the address of the 1812 * sender. We verify the buffers are writable and if needed move the 1813 * sender address from kernel to user space. 1814 */ 1815 1816 SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size, 1817 unsigned int, flags, struct sockaddr __user *, addr, 1818 int __user *, addr_len) 1819 { 1820 struct socket *sock; 1821 struct iovec iov; 1822 struct msghdr msg; 1823 struct sockaddr_storage address; 1824 int err, err2; 1825 int fput_needed; 1826 1827 if (size > INT_MAX) 1828 size = INT_MAX; 1829 sock = sockfd_lookup_light(fd, &err, &fput_needed); 1830 if (!sock) 1831 goto out; 1832 1833 msg.msg_control = NULL; 1834 msg.msg_controllen = 0; 1835 iov.iov_len = size; 1836 iov.iov_base = ubuf; 1837 iov_iter_init(&msg.msg_iter, READ, &iov, 1, size); 1838 /* Save some cycles and don't copy the address if not needed */ 1839 msg.msg_name = addr ? (struct sockaddr *)&address : NULL; 1840 /* We assume all kernel code knows the size of sockaddr_storage */ 1841 msg.msg_namelen = 0; 1842 if (sock->file->f_flags & O_NONBLOCK) 1843 flags |= MSG_DONTWAIT; 1844 err = sock_recvmsg(sock, &msg, size, flags); 1845 1846 if (err >= 0 && addr != NULL) { 1847 err2 = move_addr_to_user(&address, 1848 msg.msg_namelen, addr, addr_len); 1849 if (err2 < 0) 1850 err = err2; 1851 } 1852 1853 fput_light(sock->file, fput_needed); 1854 out: 1855 return err; 1856 } 1857 1858 /* 1859 * Receive a datagram from a socket. 1860 */ 1861 1862 SYSCALL_DEFINE4(recv, int, fd, void __user *, ubuf, size_t, size, 1863 unsigned int, flags) 1864 { 1865 return sys_recvfrom(fd, ubuf, size, flags, NULL, NULL); 1866 } 1867 1868 /* 1869 * Set a socket option. Because we don't know the option lengths we have 1870 * to pass the user mode parameter for the protocols to sort out. 1871 */ 1872 1873 SYSCALL_DEFINE5(setsockopt, int, fd, int, level, int, optname, 1874 char __user *, optval, int, optlen) 1875 { 1876 int err, fput_needed; 1877 struct socket *sock; 1878 1879 if (optlen < 0) 1880 return -EINVAL; 1881 1882 sock = sockfd_lookup_light(fd, &err, &fput_needed); 1883 if (sock != NULL) { 1884 err = security_socket_setsockopt(sock, level, optname); 1885 if (err) 1886 goto out_put; 1887 1888 if (level == SOL_SOCKET) 1889 err = 1890 sock_setsockopt(sock, level, optname, optval, 1891 optlen); 1892 else 1893 err = 1894 sock->ops->setsockopt(sock, level, optname, optval, 1895 optlen); 1896 out_put: 1897 fput_light(sock->file, fput_needed); 1898 } 1899 return err; 1900 } 1901 1902 /* 1903 * Get a socket option. Because we don't know the option lengths we have 1904 * to pass a user mode parameter for the protocols to sort out. 1905 */ 1906 1907 SYSCALL_DEFINE5(getsockopt, int, fd, int, level, int, optname, 1908 char __user *, optval, int __user *, optlen) 1909 { 1910 int err, fput_needed; 1911 struct socket *sock; 1912 1913 sock = sockfd_lookup_light(fd, &err, &fput_needed); 1914 if (sock != NULL) { 1915 err = security_socket_getsockopt(sock, level, optname); 1916 if (err) 1917 goto out_put; 1918 1919 if (level == SOL_SOCKET) 1920 err = 1921 sock_getsockopt(sock, level, optname, optval, 1922 optlen); 1923 else 1924 err = 1925 sock->ops->getsockopt(sock, level, optname, optval, 1926 optlen); 1927 out_put: 1928 fput_light(sock->file, fput_needed); 1929 } 1930 return err; 1931 } 1932 1933 /* 1934 * Shutdown a socket. 1935 */ 1936 1937 SYSCALL_DEFINE2(shutdown, int, fd, int, how) 1938 { 1939 int err, fput_needed; 1940 struct socket *sock; 1941 1942 sock = sockfd_lookup_light(fd, &err, &fput_needed); 1943 if (sock != NULL) { 1944 err = security_socket_shutdown(sock, how); 1945 if (!err) 1946 err = sock->ops->shutdown(sock, how); 1947 fput_light(sock->file, fput_needed); 1948 } 1949 return err; 1950 } 1951 1952 /* A couple of helpful macros for getting the address of the 32/64 bit 1953 * fields which are the same type (int / unsigned) on our platforms. 1954 */ 1955 #define COMPAT_MSG(msg, member) ((MSG_CMSG_COMPAT & flags) ? &msg##_compat->member : &msg->member) 1956 #define COMPAT_NAMELEN(msg) COMPAT_MSG(msg, msg_namelen) 1957 #define COMPAT_FLAGS(msg) COMPAT_MSG(msg, msg_flags) 1958 1959 struct used_address { 1960 struct sockaddr_storage name; 1961 unsigned int name_len; 1962 }; 1963 1964 static ssize_t copy_msghdr_from_user(struct msghdr *kmsg, 1965 struct user_msghdr __user *umsg, 1966 struct sockaddr __user **save_addr, 1967 struct iovec **iov) 1968 { 1969 struct sockaddr __user *uaddr; 1970 struct iovec __user *uiov; 1971 size_t nr_segs; 1972 ssize_t err; 1973 1974 if (!access_ok(VERIFY_READ, umsg, sizeof(*umsg)) || 1975 __get_user(uaddr, &umsg->msg_name) || 1976 __get_user(kmsg->msg_namelen, &umsg->msg_namelen) || 1977 __get_user(uiov, &umsg->msg_iov) || 1978 __get_user(nr_segs, &umsg->msg_iovlen) || 1979 __get_user(kmsg->msg_control, &umsg->msg_control) || 1980 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) || 1981 __get_user(kmsg->msg_flags, &umsg->msg_flags)) 1982 return -EFAULT; 1983 1984 if (!uaddr) 1985 kmsg->msg_namelen = 0; 1986 1987 if (kmsg->msg_namelen < 0) 1988 return -EINVAL; 1989 1990 if (kmsg->msg_namelen > sizeof(struct sockaddr_storage)) 1991 kmsg->msg_namelen = sizeof(struct sockaddr_storage); 1992 1993 if (save_addr) 1994 *save_addr = uaddr; 1995 1996 if (uaddr && kmsg->msg_namelen) { 1997 if (!save_addr) { 1998 err = move_addr_to_kernel(uaddr, kmsg->msg_namelen, 1999 kmsg->msg_name); 2000 if (err < 0) 2001 return err; 2002 } 2003 } else { 2004 kmsg->msg_name = NULL; 2005 kmsg->msg_namelen = 0; 2006 } 2007 2008 if (nr_segs > UIO_MAXIOV) 2009 return -EMSGSIZE; 2010 2011 err = rw_copy_check_uvector(save_addr ? READ : WRITE, 2012 uiov, nr_segs, 2013 UIO_FASTIOV, *iov, iov); 2014 if (err >= 0) 2015 iov_iter_init(&kmsg->msg_iter, save_addr ? READ : WRITE, 2016 *iov, nr_segs, err); 2017 return err; 2018 } 2019 2020 static int ___sys_sendmsg(struct socket *sock, struct user_msghdr __user *msg, 2021 struct msghdr *msg_sys, unsigned int flags, 2022 struct used_address *used_address) 2023 { 2024 struct compat_msghdr __user *msg_compat = 2025 (struct compat_msghdr __user *)msg; 2026 struct sockaddr_storage address; 2027 struct iovec iovstack[UIO_FASTIOV], *iov = iovstack; 2028 unsigned char ctl[sizeof(struct cmsghdr) + 20] 2029 __attribute__ ((aligned(sizeof(__kernel_size_t)))); 2030 /* 20 is size of ipv6_pktinfo */ 2031 unsigned char *ctl_buf = ctl; 2032 int ctl_len, total_len; 2033 ssize_t err; 2034 2035 msg_sys->msg_name = &address; 2036 2037 if (MSG_CMSG_COMPAT & flags) 2038 err = get_compat_msghdr(msg_sys, msg_compat, NULL, &iov); 2039 else 2040 err = copy_msghdr_from_user(msg_sys, msg, NULL, &iov); 2041 if (err < 0) 2042 goto out_freeiov; 2043 total_len = err; 2044 2045 err = -ENOBUFS; 2046 2047 if (msg_sys->msg_controllen > INT_MAX) 2048 goto out_freeiov; 2049 ctl_len = msg_sys->msg_controllen; 2050 if ((MSG_CMSG_COMPAT & flags) && ctl_len) { 2051 err = 2052 cmsghdr_from_user_compat_to_kern(msg_sys, sock->sk, ctl, 2053 sizeof(ctl)); 2054 if (err) 2055 goto out_freeiov; 2056 ctl_buf = msg_sys->msg_control; 2057 ctl_len = msg_sys->msg_controllen; 2058 } else if (ctl_len) { 2059 if (ctl_len > sizeof(ctl)) { 2060 ctl_buf = sock_kmalloc(sock->sk, ctl_len, GFP_KERNEL); 2061 if (ctl_buf == NULL) 2062 goto out_freeiov; 2063 } 2064 err = -EFAULT; 2065 /* 2066 * Careful! Before this, msg_sys->msg_control contains a user pointer. 2067 * Afterwards, it will be a kernel pointer. Thus the compiler-assisted 2068 * checking falls down on this. 2069 */ 2070 if (copy_from_user(ctl_buf, 2071 (void __user __force *)msg_sys->msg_control, 2072 ctl_len)) 2073 goto out_freectl; 2074 msg_sys->msg_control = ctl_buf; 2075 } 2076 msg_sys->msg_flags = flags; 2077 2078 if (sock->file->f_flags & O_NONBLOCK) 2079 msg_sys->msg_flags |= MSG_DONTWAIT; 2080 /* 2081 * If this is sendmmsg() and current destination address is same as 2082 * previously succeeded address, omit asking LSM's decision. 2083 * used_address->name_len is initialized to UINT_MAX so that the first 2084 * destination address never matches. 2085 */ 2086 if (used_address && msg_sys->msg_name && 2087 used_address->name_len == msg_sys->msg_namelen && 2088 !memcmp(&used_address->name, msg_sys->msg_name, 2089 used_address->name_len)) { 2090 err = sock_sendmsg_nosec(sock, msg_sys, total_len); 2091 goto out_freectl; 2092 } 2093 err = sock_sendmsg(sock, msg_sys, total_len); 2094 /* 2095 * If this is sendmmsg() and sending to current destination address was 2096 * successful, remember it. 2097 */ 2098 if (used_address && err >= 0) { 2099 used_address->name_len = msg_sys->msg_namelen; 2100 if (msg_sys->msg_name) 2101 memcpy(&used_address->name, msg_sys->msg_name, 2102 used_address->name_len); 2103 } 2104 2105 out_freectl: 2106 if (ctl_buf != ctl) 2107 sock_kfree_s(sock->sk, ctl_buf, ctl_len); 2108 out_freeiov: 2109 if (iov != iovstack) 2110 kfree(iov); 2111 return err; 2112 } 2113 2114 /* 2115 * BSD sendmsg interface 2116 */ 2117 2118 long __sys_sendmsg(int fd, struct user_msghdr __user *msg, unsigned flags) 2119 { 2120 int fput_needed, err; 2121 struct msghdr msg_sys; 2122 struct socket *sock; 2123 2124 sock = sockfd_lookup_light(fd, &err, &fput_needed); 2125 if (!sock) 2126 goto out; 2127 2128 err = ___sys_sendmsg(sock, msg, &msg_sys, flags, NULL); 2129 2130 fput_light(sock->file, fput_needed); 2131 out: 2132 return err; 2133 } 2134 2135 SYSCALL_DEFINE3(sendmsg, int, fd, struct user_msghdr __user *, msg, unsigned int, flags) 2136 { 2137 if (flags & MSG_CMSG_COMPAT) 2138 return -EINVAL; 2139 return __sys_sendmsg(fd, msg, flags); 2140 } 2141 2142 /* 2143 * Linux sendmmsg interface 2144 */ 2145 2146 int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen, 2147 unsigned int flags) 2148 { 2149 int fput_needed, err, datagrams; 2150 struct socket *sock; 2151 struct mmsghdr __user *entry; 2152 struct compat_mmsghdr __user *compat_entry; 2153 struct msghdr msg_sys; 2154 struct used_address used_address; 2155 2156 if (vlen > UIO_MAXIOV) 2157 vlen = UIO_MAXIOV; 2158 2159 datagrams = 0; 2160 2161 sock = sockfd_lookup_light(fd, &err, &fput_needed); 2162 if (!sock) 2163 return err; 2164 2165 used_address.name_len = UINT_MAX; 2166 entry = mmsg; 2167 compat_entry = (struct compat_mmsghdr __user *)mmsg; 2168 err = 0; 2169 2170 while (datagrams < vlen) { 2171 if (MSG_CMSG_COMPAT & flags) { 2172 err = ___sys_sendmsg(sock, (struct user_msghdr __user *)compat_entry, 2173 &msg_sys, flags, &used_address); 2174 if (err < 0) 2175 break; 2176 err = __put_user(err, &compat_entry->msg_len); 2177 ++compat_entry; 2178 } else { 2179 err = ___sys_sendmsg(sock, 2180 (struct user_msghdr __user *)entry, 2181 &msg_sys, flags, &used_address); 2182 if (err < 0) 2183 break; 2184 err = put_user(err, &entry->msg_len); 2185 ++entry; 2186 } 2187 2188 if (err) 2189 break; 2190 ++datagrams; 2191 } 2192 2193 fput_light(sock->file, fput_needed); 2194 2195 /* We only return an error if no datagrams were able to be sent */ 2196 if (datagrams != 0) 2197 return datagrams; 2198 2199 return err; 2200 } 2201 2202 SYSCALL_DEFINE4(sendmmsg, int, fd, struct mmsghdr __user *, mmsg, 2203 unsigned int, vlen, unsigned int, flags) 2204 { 2205 if (flags & MSG_CMSG_COMPAT) 2206 return -EINVAL; 2207 return __sys_sendmmsg(fd, mmsg, vlen, flags); 2208 } 2209 2210 static int ___sys_recvmsg(struct socket *sock, struct user_msghdr __user *msg, 2211 struct msghdr *msg_sys, unsigned int flags, int nosec) 2212 { 2213 struct compat_msghdr __user *msg_compat = 2214 (struct compat_msghdr __user *)msg; 2215 struct iovec iovstack[UIO_FASTIOV]; 2216 struct iovec *iov = iovstack; 2217 unsigned long cmsg_ptr; 2218 int total_len, len; 2219 ssize_t err; 2220 2221 /* kernel mode address */ 2222 struct sockaddr_storage addr; 2223 2224 /* user mode address pointers */ 2225 struct sockaddr __user *uaddr; 2226 int __user *uaddr_len = COMPAT_NAMELEN(msg); 2227 2228 msg_sys->msg_name = &addr; 2229 2230 if (MSG_CMSG_COMPAT & flags) 2231 err = get_compat_msghdr(msg_sys, msg_compat, &uaddr, &iov); 2232 else 2233 err = copy_msghdr_from_user(msg_sys, msg, &uaddr, &iov); 2234 if (err < 0) 2235 goto out_freeiov; 2236 total_len = err; 2237 2238 cmsg_ptr = (unsigned long)msg_sys->msg_control; 2239 msg_sys->msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT); 2240 2241 /* We assume all kernel code knows the size of sockaddr_storage */ 2242 msg_sys->msg_namelen = 0; 2243 2244 if (sock->file->f_flags & O_NONBLOCK) 2245 flags |= MSG_DONTWAIT; 2246 err = (nosec ? sock_recvmsg_nosec : sock_recvmsg)(sock, msg_sys, 2247 total_len, flags); 2248 if (err < 0) 2249 goto out_freeiov; 2250 len = err; 2251 2252 if (uaddr != NULL) { 2253 err = move_addr_to_user(&addr, 2254 msg_sys->msg_namelen, uaddr, 2255 uaddr_len); 2256 if (err < 0) 2257 goto out_freeiov; 2258 } 2259 err = __put_user((msg_sys->msg_flags & ~MSG_CMSG_COMPAT), 2260 COMPAT_FLAGS(msg)); 2261 if (err) 2262 goto out_freeiov; 2263 if (MSG_CMSG_COMPAT & flags) 2264 err = __put_user((unsigned long)msg_sys->msg_control - cmsg_ptr, 2265 &msg_compat->msg_controllen); 2266 else 2267 err = __put_user((unsigned long)msg_sys->msg_control - cmsg_ptr, 2268 &msg->msg_controllen); 2269 if (err) 2270 goto out_freeiov; 2271 err = len; 2272 2273 out_freeiov: 2274 if (iov != iovstack) 2275 kfree(iov); 2276 return err; 2277 } 2278 2279 /* 2280 * BSD recvmsg interface 2281 */ 2282 2283 long __sys_recvmsg(int fd, struct user_msghdr __user *msg, unsigned flags) 2284 { 2285 int fput_needed, err; 2286 struct msghdr msg_sys; 2287 struct socket *sock; 2288 2289 sock = sockfd_lookup_light(fd, &err, &fput_needed); 2290 if (!sock) 2291 goto out; 2292 2293 err = ___sys_recvmsg(sock, msg, &msg_sys, flags, 0); 2294 2295 fput_light(sock->file, fput_needed); 2296 out: 2297 return err; 2298 } 2299 2300 SYSCALL_DEFINE3(recvmsg, int, fd, struct user_msghdr __user *, msg, 2301 unsigned int, flags) 2302 { 2303 if (flags & MSG_CMSG_COMPAT) 2304 return -EINVAL; 2305 return __sys_recvmsg(fd, msg, flags); 2306 } 2307 2308 /* 2309 * Linux recvmmsg interface 2310 */ 2311 2312 int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen, 2313 unsigned int flags, struct timespec *timeout) 2314 { 2315 int fput_needed, err, datagrams; 2316 struct socket *sock; 2317 struct mmsghdr __user *entry; 2318 struct compat_mmsghdr __user *compat_entry; 2319 struct msghdr msg_sys; 2320 struct timespec end_time; 2321 2322 if (timeout && 2323 poll_select_set_timeout(&end_time, timeout->tv_sec, 2324 timeout->tv_nsec)) 2325 return -EINVAL; 2326 2327 datagrams = 0; 2328 2329 sock = sockfd_lookup_light(fd, &err, &fput_needed); 2330 if (!sock) 2331 return err; 2332 2333 err = sock_error(sock->sk); 2334 if (err) 2335 goto out_put; 2336 2337 entry = mmsg; 2338 compat_entry = (struct compat_mmsghdr __user *)mmsg; 2339 2340 while (datagrams < vlen) { 2341 /* 2342 * No need to ask LSM for more than the first datagram. 2343 */ 2344 if (MSG_CMSG_COMPAT & flags) { 2345 err = ___sys_recvmsg(sock, (struct user_msghdr __user *)compat_entry, 2346 &msg_sys, flags & ~MSG_WAITFORONE, 2347 datagrams); 2348 if (err < 0) 2349 break; 2350 err = __put_user(err, &compat_entry->msg_len); 2351 ++compat_entry; 2352 } else { 2353 err = ___sys_recvmsg(sock, 2354 (struct user_msghdr __user *)entry, 2355 &msg_sys, flags & ~MSG_WAITFORONE, 2356 datagrams); 2357 if (err < 0) 2358 break; 2359 err = put_user(err, &entry->msg_len); 2360 ++entry; 2361 } 2362 2363 if (err) 2364 break; 2365 ++datagrams; 2366 2367 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */ 2368 if (flags & MSG_WAITFORONE) 2369 flags |= MSG_DONTWAIT; 2370 2371 if (timeout) { 2372 ktime_get_ts(timeout); 2373 *timeout = timespec_sub(end_time, *timeout); 2374 if (timeout->tv_sec < 0) { 2375 timeout->tv_sec = timeout->tv_nsec = 0; 2376 break; 2377 } 2378 2379 /* Timeout, return less than vlen datagrams */ 2380 if (timeout->tv_nsec == 0 && timeout->tv_sec == 0) 2381 break; 2382 } 2383 2384 /* Out of band data, return right away */ 2385 if (msg_sys.msg_flags & MSG_OOB) 2386 break; 2387 } 2388 2389 out_put: 2390 fput_light(sock->file, fput_needed); 2391 2392 if (err == 0) 2393 return datagrams; 2394 2395 if (datagrams != 0) { 2396 /* 2397 * We may return less entries than requested (vlen) if the 2398 * sock is non block and there aren't enough datagrams... 2399 */ 2400 if (err != -EAGAIN) { 2401 /* 2402 * ... or if recvmsg returns an error after we 2403 * received some datagrams, where we record the 2404 * error to return on the next call or if the 2405 * app asks about it using getsockopt(SO_ERROR). 2406 */ 2407 sock->sk->sk_err = -err; 2408 } 2409 2410 return datagrams; 2411 } 2412 2413 return err; 2414 } 2415 2416 SYSCALL_DEFINE5(recvmmsg, int, fd, struct mmsghdr __user *, mmsg, 2417 unsigned int, vlen, unsigned int, flags, 2418 struct timespec __user *, timeout) 2419 { 2420 int datagrams; 2421 struct timespec timeout_sys; 2422 2423 if (flags & MSG_CMSG_COMPAT) 2424 return -EINVAL; 2425 2426 if (!timeout) 2427 return __sys_recvmmsg(fd, mmsg, vlen, flags, NULL); 2428 2429 if (copy_from_user(&timeout_sys, timeout, sizeof(timeout_sys))) 2430 return -EFAULT; 2431 2432 datagrams = __sys_recvmmsg(fd, mmsg, vlen, flags, &timeout_sys); 2433 2434 if (datagrams > 0 && 2435 copy_to_user(timeout, &timeout_sys, sizeof(timeout_sys))) 2436 datagrams = -EFAULT; 2437 2438 return datagrams; 2439 } 2440 2441 #ifdef __ARCH_WANT_SYS_SOCKETCALL 2442 /* Argument list sizes for sys_socketcall */ 2443 #define AL(x) ((x) * sizeof(unsigned long)) 2444 static const unsigned char nargs[21] = { 2445 AL(0), AL(3), AL(3), AL(3), AL(2), AL(3), 2446 AL(3), AL(3), AL(4), AL(4), AL(4), AL(6), 2447 AL(6), AL(2), AL(5), AL(5), AL(3), AL(3), 2448 AL(4), AL(5), AL(4) 2449 }; 2450 2451 #undef AL 2452 2453 /* 2454 * System call vectors. 2455 * 2456 * Argument checking cleaned up. Saved 20% in size. 2457 * This function doesn't need to set the kernel lock because 2458 * it is set by the callees. 2459 */ 2460 2461 SYSCALL_DEFINE2(socketcall, int, call, unsigned long __user *, args) 2462 { 2463 unsigned long a[AUDITSC_ARGS]; 2464 unsigned long a0, a1; 2465 int err; 2466 unsigned int len; 2467 2468 if (call < 1 || call > SYS_SENDMMSG) 2469 return -EINVAL; 2470 2471 len = nargs[call]; 2472 if (len > sizeof(a)) 2473 return -EINVAL; 2474 2475 /* copy_from_user should be SMP safe. */ 2476 if (copy_from_user(a, args, len)) 2477 return -EFAULT; 2478 2479 err = audit_socketcall(nargs[call] / sizeof(unsigned long), a); 2480 if (err) 2481 return err; 2482 2483 a0 = a[0]; 2484 a1 = a[1]; 2485 2486 switch (call) { 2487 case SYS_SOCKET: 2488 err = sys_socket(a0, a1, a[2]); 2489 break; 2490 case SYS_BIND: 2491 err = sys_bind(a0, (struct sockaddr __user *)a1, a[2]); 2492 break; 2493 case SYS_CONNECT: 2494 err = sys_connect(a0, (struct sockaddr __user *)a1, a[2]); 2495 break; 2496 case SYS_LISTEN: 2497 err = sys_listen(a0, a1); 2498 break; 2499 case SYS_ACCEPT: 2500 err = sys_accept4(a0, (struct sockaddr __user *)a1, 2501 (int __user *)a[2], 0); 2502 break; 2503 case SYS_GETSOCKNAME: 2504 err = 2505 sys_getsockname(a0, (struct sockaddr __user *)a1, 2506 (int __user *)a[2]); 2507 break; 2508 case SYS_GETPEERNAME: 2509 err = 2510 sys_getpeername(a0, (struct sockaddr __user *)a1, 2511 (int __user *)a[2]); 2512 break; 2513 case SYS_SOCKETPAIR: 2514 err = sys_socketpair(a0, a1, a[2], (int __user *)a[3]); 2515 break; 2516 case SYS_SEND: 2517 err = sys_send(a0, (void __user *)a1, a[2], a[3]); 2518 break; 2519 case SYS_SENDTO: 2520 err = sys_sendto(a0, (void __user *)a1, a[2], a[3], 2521 (struct sockaddr __user *)a[4], a[5]); 2522 break; 2523 case SYS_RECV: 2524 err = sys_recv(a0, (void __user *)a1, a[2], a[3]); 2525 break; 2526 case SYS_RECVFROM: 2527 err = sys_recvfrom(a0, (void __user *)a1, a[2], a[3], 2528 (struct sockaddr __user *)a[4], 2529 (int __user *)a[5]); 2530 break; 2531 case SYS_SHUTDOWN: 2532 err = sys_shutdown(a0, a1); 2533 break; 2534 case SYS_SETSOCKOPT: 2535 err = sys_setsockopt(a0, a1, a[2], (char __user *)a[3], a[4]); 2536 break; 2537 case SYS_GETSOCKOPT: 2538 err = 2539 sys_getsockopt(a0, a1, a[2], (char __user *)a[3], 2540 (int __user *)a[4]); 2541 break; 2542 case SYS_SENDMSG: 2543 err = sys_sendmsg(a0, (struct user_msghdr __user *)a1, a[2]); 2544 break; 2545 case SYS_SENDMMSG: 2546 err = sys_sendmmsg(a0, (struct mmsghdr __user *)a1, a[2], a[3]); 2547 break; 2548 case SYS_RECVMSG: 2549 err = sys_recvmsg(a0, (struct user_msghdr __user *)a1, a[2]); 2550 break; 2551 case SYS_RECVMMSG: 2552 err = sys_recvmmsg(a0, (struct mmsghdr __user *)a1, a[2], a[3], 2553 (struct timespec __user *)a[4]); 2554 break; 2555 case SYS_ACCEPT4: 2556 err = sys_accept4(a0, (struct sockaddr __user *)a1, 2557 (int __user *)a[2], a[3]); 2558 break; 2559 default: 2560 err = -EINVAL; 2561 break; 2562 } 2563 return err; 2564 } 2565 2566 #endif /* __ARCH_WANT_SYS_SOCKETCALL */ 2567 2568 /** 2569 * sock_register - add a socket protocol handler 2570 * @ops: description of protocol 2571 * 2572 * This function is called by a protocol handler that wants to 2573 * advertise its address family, and have it linked into the 2574 * socket interface. The value ops->family corresponds to the 2575 * socket system call protocol family. 2576 */ 2577 int sock_register(const struct net_proto_family *ops) 2578 { 2579 int err; 2580 2581 if (ops->family >= NPROTO) { 2582 pr_crit("protocol %d >= NPROTO(%d)\n", ops->family, NPROTO); 2583 return -ENOBUFS; 2584 } 2585 2586 spin_lock(&net_family_lock); 2587 if (rcu_dereference_protected(net_families[ops->family], 2588 lockdep_is_held(&net_family_lock))) 2589 err = -EEXIST; 2590 else { 2591 rcu_assign_pointer(net_families[ops->family], ops); 2592 err = 0; 2593 } 2594 spin_unlock(&net_family_lock); 2595 2596 pr_info("NET: Registered protocol family %d\n", ops->family); 2597 return err; 2598 } 2599 EXPORT_SYMBOL(sock_register); 2600 2601 /** 2602 * sock_unregister - remove a protocol handler 2603 * @family: protocol family to remove 2604 * 2605 * This function is called by a protocol handler that wants to 2606 * remove its address family, and have it unlinked from the 2607 * new socket creation. 2608 * 2609 * If protocol handler is a module, then it can use module reference 2610 * counts to protect against new references. If protocol handler is not 2611 * a module then it needs to provide its own protection in 2612 * the ops->create routine. 2613 */ 2614 void sock_unregister(int family) 2615 { 2616 BUG_ON(family < 0 || family >= NPROTO); 2617 2618 spin_lock(&net_family_lock); 2619 RCU_INIT_POINTER(net_families[family], NULL); 2620 spin_unlock(&net_family_lock); 2621 2622 synchronize_rcu(); 2623 2624 pr_info("NET: Unregistered protocol family %d\n", family); 2625 } 2626 EXPORT_SYMBOL(sock_unregister); 2627 2628 static int __init sock_init(void) 2629 { 2630 int err; 2631 /* 2632 * Initialize the network sysctl infrastructure. 2633 */ 2634 err = net_sysctl_init(); 2635 if (err) 2636 goto out; 2637 2638 /* 2639 * Initialize skbuff SLAB cache 2640 */ 2641 skb_init(); 2642 2643 /* 2644 * Initialize the protocols module. 2645 */ 2646 2647 init_inodecache(); 2648 2649 err = register_filesystem(&sock_fs_type); 2650 if (err) 2651 goto out_fs; 2652 sock_mnt = kern_mount(&sock_fs_type); 2653 if (IS_ERR(sock_mnt)) { 2654 err = PTR_ERR(sock_mnt); 2655 goto out_mount; 2656 } 2657 2658 /* The real protocol initialization is performed in later initcalls. 2659 */ 2660 2661 #ifdef CONFIG_NETFILTER 2662 err = netfilter_init(); 2663 if (err) 2664 goto out; 2665 #endif 2666 2667 ptp_classifier_init(); 2668 2669 out: 2670 return err; 2671 2672 out_mount: 2673 unregister_filesystem(&sock_fs_type); 2674 out_fs: 2675 goto out; 2676 } 2677 2678 core_initcall(sock_init); /* early initcall */ 2679 2680 #ifdef CONFIG_PROC_FS 2681 void socket_seq_show(struct seq_file *seq) 2682 { 2683 int cpu; 2684 int counter = 0; 2685 2686 for_each_possible_cpu(cpu) 2687 counter += per_cpu(sockets_in_use, cpu); 2688 2689 /* It can be negative, by the way. 8) */ 2690 if (counter < 0) 2691 counter = 0; 2692 2693 seq_printf(seq, "sockets: used %d\n", counter); 2694 } 2695 #endif /* CONFIG_PROC_FS */ 2696 2697 #ifdef CONFIG_COMPAT 2698 static int do_siocgstamp(struct net *net, struct socket *sock, 2699 unsigned int cmd, void __user *up) 2700 { 2701 mm_segment_t old_fs = get_fs(); 2702 struct timeval ktv; 2703 int err; 2704 2705 set_fs(KERNEL_DS); 2706 err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv); 2707 set_fs(old_fs); 2708 if (!err) 2709 err = compat_put_timeval(&ktv, up); 2710 2711 return err; 2712 } 2713 2714 static int do_siocgstampns(struct net *net, struct socket *sock, 2715 unsigned int cmd, void __user *up) 2716 { 2717 mm_segment_t old_fs = get_fs(); 2718 struct timespec kts; 2719 int err; 2720 2721 set_fs(KERNEL_DS); 2722 err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts); 2723 set_fs(old_fs); 2724 if (!err) 2725 err = compat_put_timespec(&kts, up); 2726 2727 return err; 2728 } 2729 2730 static int dev_ifname32(struct net *net, struct compat_ifreq __user *uifr32) 2731 { 2732 struct ifreq __user *uifr; 2733 int err; 2734 2735 uifr = compat_alloc_user_space(sizeof(struct ifreq)); 2736 if (copy_in_user(uifr, uifr32, sizeof(struct compat_ifreq))) 2737 return -EFAULT; 2738 2739 err = dev_ioctl(net, SIOCGIFNAME, uifr); 2740 if (err) 2741 return err; 2742 2743 if (copy_in_user(uifr32, uifr, sizeof(struct compat_ifreq))) 2744 return -EFAULT; 2745 2746 return 0; 2747 } 2748 2749 static int dev_ifconf(struct net *net, struct compat_ifconf __user *uifc32) 2750 { 2751 struct compat_ifconf ifc32; 2752 struct ifconf ifc; 2753 struct ifconf __user *uifc; 2754 struct compat_ifreq __user *ifr32; 2755 struct ifreq __user *ifr; 2756 unsigned int i, j; 2757 int err; 2758 2759 if (copy_from_user(&ifc32, uifc32, sizeof(struct compat_ifconf))) 2760 return -EFAULT; 2761 2762 memset(&ifc, 0, sizeof(ifc)); 2763 if (ifc32.ifcbuf == 0) { 2764 ifc32.ifc_len = 0; 2765 ifc.ifc_len = 0; 2766 ifc.ifc_req = NULL; 2767 uifc = compat_alloc_user_space(sizeof(struct ifconf)); 2768 } else { 2769 size_t len = ((ifc32.ifc_len / sizeof(struct compat_ifreq)) + 1) * 2770 sizeof(struct ifreq); 2771 uifc = compat_alloc_user_space(sizeof(struct ifconf) + len); 2772 ifc.ifc_len = len; 2773 ifr = ifc.ifc_req = (void __user *)(uifc + 1); 2774 ifr32 = compat_ptr(ifc32.ifcbuf); 2775 for (i = 0; i < ifc32.ifc_len; i += sizeof(struct compat_ifreq)) { 2776 if (copy_in_user(ifr, ifr32, sizeof(struct compat_ifreq))) 2777 return -EFAULT; 2778 ifr++; 2779 ifr32++; 2780 } 2781 } 2782 if (copy_to_user(uifc, &ifc, sizeof(struct ifconf))) 2783 return -EFAULT; 2784 2785 err = dev_ioctl(net, SIOCGIFCONF, uifc); 2786 if (err) 2787 return err; 2788 2789 if (copy_from_user(&ifc, uifc, sizeof(struct ifconf))) 2790 return -EFAULT; 2791 2792 ifr = ifc.ifc_req; 2793 ifr32 = compat_ptr(ifc32.ifcbuf); 2794 for (i = 0, j = 0; 2795 i + sizeof(struct compat_ifreq) <= ifc32.ifc_len && j < ifc.ifc_len; 2796 i += sizeof(struct compat_ifreq), j += sizeof(struct ifreq)) { 2797 if (copy_in_user(ifr32, ifr, sizeof(struct compat_ifreq))) 2798 return -EFAULT; 2799 ifr32++; 2800 ifr++; 2801 } 2802 2803 if (ifc32.ifcbuf == 0) { 2804 /* Translate from 64-bit structure multiple to 2805 * a 32-bit one. 2806 */ 2807 i = ifc.ifc_len; 2808 i = ((i / sizeof(struct ifreq)) * sizeof(struct compat_ifreq)); 2809 ifc32.ifc_len = i; 2810 } else { 2811 ifc32.ifc_len = i; 2812 } 2813 if (copy_to_user(uifc32, &ifc32, sizeof(struct compat_ifconf))) 2814 return -EFAULT; 2815 2816 return 0; 2817 } 2818 2819 static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32) 2820 { 2821 struct compat_ethtool_rxnfc __user *compat_rxnfc; 2822 bool convert_in = false, convert_out = false; 2823 size_t buf_size = ALIGN(sizeof(struct ifreq), 8); 2824 struct ethtool_rxnfc __user *rxnfc; 2825 struct ifreq __user *ifr; 2826 u32 rule_cnt = 0, actual_rule_cnt; 2827 u32 ethcmd; 2828 u32 data; 2829 int ret; 2830 2831 if (get_user(data, &ifr32->ifr_ifru.ifru_data)) 2832 return -EFAULT; 2833 2834 compat_rxnfc = compat_ptr(data); 2835 2836 if (get_user(ethcmd, &compat_rxnfc->cmd)) 2837 return -EFAULT; 2838 2839 /* Most ethtool structures are defined without padding. 2840 * Unfortunately struct ethtool_rxnfc is an exception. 2841 */ 2842 switch (ethcmd) { 2843 default: 2844 break; 2845 case ETHTOOL_GRXCLSRLALL: 2846 /* Buffer size is variable */ 2847 if (get_user(rule_cnt, &compat_rxnfc->rule_cnt)) 2848 return -EFAULT; 2849 if (rule_cnt > KMALLOC_MAX_SIZE / sizeof(u32)) 2850 return -ENOMEM; 2851 buf_size += rule_cnt * sizeof(u32); 2852 /* fall through */ 2853 case ETHTOOL_GRXRINGS: 2854 case ETHTOOL_GRXCLSRLCNT: 2855 case ETHTOOL_GRXCLSRULE: 2856 case ETHTOOL_SRXCLSRLINS: 2857 convert_out = true; 2858 /* fall through */ 2859 case ETHTOOL_SRXCLSRLDEL: 2860 buf_size += sizeof(struct ethtool_rxnfc); 2861 convert_in = true; 2862 break; 2863 } 2864 2865 ifr = compat_alloc_user_space(buf_size); 2866 rxnfc = (void __user *)ifr + ALIGN(sizeof(struct ifreq), 8); 2867 2868 if (copy_in_user(&ifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ)) 2869 return -EFAULT; 2870 2871 if (put_user(convert_in ? rxnfc : compat_ptr(data), 2872 &ifr->ifr_ifru.ifru_data)) 2873 return -EFAULT; 2874 2875 if (convert_in) { 2876 /* We expect there to be holes between fs.m_ext and 2877 * fs.ring_cookie and at the end of fs, but nowhere else. 2878 */ 2879 BUILD_BUG_ON(offsetof(struct compat_ethtool_rxnfc, fs.m_ext) + 2880 sizeof(compat_rxnfc->fs.m_ext) != 2881 offsetof(struct ethtool_rxnfc, fs.m_ext) + 2882 sizeof(rxnfc->fs.m_ext)); 2883 BUILD_BUG_ON( 2884 offsetof(struct compat_ethtool_rxnfc, fs.location) - 2885 offsetof(struct compat_ethtool_rxnfc, fs.ring_cookie) != 2886 offsetof(struct ethtool_rxnfc, fs.location) - 2887 offsetof(struct ethtool_rxnfc, fs.ring_cookie)); 2888 2889 if (copy_in_user(rxnfc, compat_rxnfc, 2890 (void __user *)(&rxnfc->fs.m_ext + 1) - 2891 (void __user *)rxnfc) || 2892 copy_in_user(&rxnfc->fs.ring_cookie, 2893 &compat_rxnfc->fs.ring_cookie, 2894 (void __user *)(&rxnfc->fs.location + 1) - 2895 (void __user *)&rxnfc->fs.ring_cookie) || 2896 copy_in_user(&rxnfc->rule_cnt, &compat_rxnfc->rule_cnt, 2897 sizeof(rxnfc->rule_cnt))) 2898 return -EFAULT; 2899 } 2900 2901 ret = dev_ioctl(net, SIOCETHTOOL, ifr); 2902 if (ret) 2903 return ret; 2904 2905 if (convert_out) { 2906 if (copy_in_user(compat_rxnfc, rxnfc, 2907 (const void __user *)(&rxnfc->fs.m_ext + 1) - 2908 (const void __user *)rxnfc) || 2909 copy_in_user(&compat_rxnfc->fs.ring_cookie, 2910 &rxnfc->fs.ring_cookie, 2911 (const void __user *)(&rxnfc->fs.location + 1) - 2912 (const void __user *)&rxnfc->fs.ring_cookie) || 2913 copy_in_user(&compat_rxnfc->rule_cnt, &rxnfc->rule_cnt, 2914 sizeof(rxnfc->rule_cnt))) 2915 return -EFAULT; 2916 2917 if (ethcmd == ETHTOOL_GRXCLSRLALL) { 2918 /* As an optimisation, we only copy the actual 2919 * number of rules that the underlying 2920 * function returned. Since Mallory might 2921 * change the rule count in user memory, we 2922 * check that it is less than the rule count 2923 * originally given (as the user buffer size), 2924 * which has been range-checked. 2925 */ 2926 if (get_user(actual_rule_cnt, &rxnfc->rule_cnt)) 2927 return -EFAULT; 2928 if (actual_rule_cnt < rule_cnt) 2929 rule_cnt = actual_rule_cnt; 2930 if (copy_in_user(&compat_rxnfc->rule_locs[0], 2931 &rxnfc->rule_locs[0], 2932 rule_cnt * sizeof(u32))) 2933 return -EFAULT; 2934 } 2935 } 2936 2937 return 0; 2938 } 2939 2940 static int compat_siocwandev(struct net *net, struct compat_ifreq __user *uifr32) 2941 { 2942 void __user *uptr; 2943 compat_uptr_t uptr32; 2944 struct ifreq __user *uifr; 2945 2946 uifr = compat_alloc_user_space(sizeof(*uifr)); 2947 if (copy_in_user(uifr, uifr32, sizeof(struct compat_ifreq))) 2948 return -EFAULT; 2949 2950 if (get_user(uptr32, &uifr32->ifr_settings.ifs_ifsu)) 2951 return -EFAULT; 2952 2953 uptr = compat_ptr(uptr32); 2954 2955 if (put_user(uptr, &uifr->ifr_settings.ifs_ifsu.raw_hdlc)) 2956 return -EFAULT; 2957 2958 return dev_ioctl(net, SIOCWANDEV, uifr); 2959 } 2960 2961 static int bond_ioctl(struct net *net, unsigned int cmd, 2962 struct compat_ifreq __user *ifr32) 2963 { 2964 struct ifreq kifr; 2965 mm_segment_t old_fs; 2966 int err; 2967 2968 switch (cmd) { 2969 case SIOCBONDENSLAVE: 2970 case SIOCBONDRELEASE: 2971 case SIOCBONDSETHWADDR: 2972 case SIOCBONDCHANGEACTIVE: 2973 if (copy_from_user(&kifr, ifr32, sizeof(struct compat_ifreq))) 2974 return -EFAULT; 2975 2976 old_fs = get_fs(); 2977 set_fs(KERNEL_DS); 2978 err = dev_ioctl(net, cmd, 2979 (struct ifreq __user __force *) &kifr); 2980 set_fs(old_fs); 2981 2982 return err; 2983 default: 2984 return -ENOIOCTLCMD; 2985 } 2986 } 2987 2988 /* Handle ioctls that use ifreq::ifr_data and just need struct ifreq converted */ 2989 static int compat_ifr_data_ioctl(struct net *net, unsigned int cmd, 2990 struct compat_ifreq __user *u_ifreq32) 2991 { 2992 struct ifreq __user *u_ifreq64; 2993 char tmp_buf[IFNAMSIZ]; 2994 void __user *data64; 2995 u32 data32; 2996 2997 if (copy_from_user(&tmp_buf[0], &(u_ifreq32->ifr_ifrn.ifrn_name[0]), 2998 IFNAMSIZ)) 2999 return -EFAULT; 3000 if (get_user(data32, &u_ifreq32->ifr_ifru.ifru_data)) 3001 return -EFAULT; 3002 data64 = compat_ptr(data32); 3003 3004 u_ifreq64 = compat_alloc_user_space(sizeof(*u_ifreq64)); 3005 3006 if (copy_to_user(&u_ifreq64->ifr_ifrn.ifrn_name[0], &tmp_buf[0], 3007 IFNAMSIZ)) 3008 return -EFAULT; 3009 if (put_user(data64, &u_ifreq64->ifr_ifru.ifru_data)) 3010 return -EFAULT; 3011 3012 return dev_ioctl(net, cmd, u_ifreq64); 3013 } 3014 3015 static int dev_ifsioc(struct net *net, struct socket *sock, 3016 unsigned int cmd, struct compat_ifreq __user *uifr32) 3017 { 3018 struct ifreq __user *uifr; 3019 int err; 3020 3021 uifr = compat_alloc_user_space(sizeof(*uifr)); 3022 if (copy_in_user(uifr, uifr32, sizeof(*uifr32))) 3023 return -EFAULT; 3024 3025 err = sock_do_ioctl(net, sock, cmd, (unsigned long)uifr); 3026 3027 if (!err) { 3028 switch (cmd) { 3029 case SIOCGIFFLAGS: 3030 case SIOCGIFMETRIC: 3031 case SIOCGIFMTU: 3032 case SIOCGIFMEM: 3033 case SIOCGIFHWADDR: 3034 case SIOCGIFINDEX: 3035 case SIOCGIFADDR: 3036 case SIOCGIFBRDADDR: 3037 case SIOCGIFDSTADDR: 3038 case SIOCGIFNETMASK: 3039 case SIOCGIFPFLAGS: 3040 case SIOCGIFTXQLEN: 3041 case SIOCGMIIPHY: 3042 case SIOCGMIIREG: 3043 if (copy_in_user(uifr32, uifr, sizeof(*uifr32))) 3044 err = -EFAULT; 3045 break; 3046 } 3047 } 3048 return err; 3049 } 3050 3051 static int compat_sioc_ifmap(struct net *net, unsigned int cmd, 3052 struct compat_ifreq __user *uifr32) 3053 { 3054 struct ifreq ifr; 3055 struct compat_ifmap __user *uifmap32; 3056 mm_segment_t old_fs; 3057 int err; 3058 3059 uifmap32 = &uifr32->ifr_ifru.ifru_map; 3060 err = copy_from_user(&ifr, uifr32, sizeof(ifr.ifr_name)); 3061 err |= get_user(ifr.ifr_map.mem_start, &uifmap32->mem_start); 3062 err |= get_user(ifr.ifr_map.mem_end, &uifmap32->mem_end); 3063 err |= get_user(ifr.ifr_map.base_addr, &uifmap32->base_addr); 3064 err |= get_user(ifr.ifr_map.irq, &uifmap32->irq); 3065 err |= get_user(ifr.ifr_map.dma, &uifmap32->dma); 3066 err |= get_user(ifr.ifr_map.port, &uifmap32->port); 3067 if (err) 3068 return -EFAULT; 3069 3070 old_fs = get_fs(); 3071 set_fs(KERNEL_DS); 3072 err = dev_ioctl(net, cmd, (void __user __force *)&ifr); 3073 set_fs(old_fs); 3074 3075 if (cmd == SIOCGIFMAP && !err) { 3076 err = copy_to_user(uifr32, &ifr, sizeof(ifr.ifr_name)); 3077 err |= put_user(ifr.ifr_map.mem_start, &uifmap32->mem_start); 3078 err |= put_user(ifr.ifr_map.mem_end, &uifmap32->mem_end); 3079 err |= put_user(ifr.ifr_map.base_addr, &uifmap32->base_addr); 3080 err |= put_user(ifr.ifr_map.irq, &uifmap32->irq); 3081 err |= put_user(ifr.ifr_map.dma, &uifmap32->dma); 3082 err |= put_user(ifr.ifr_map.port, &uifmap32->port); 3083 if (err) 3084 err = -EFAULT; 3085 } 3086 return err; 3087 } 3088 3089 struct rtentry32 { 3090 u32 rt_pad1; 3091 struct sockaddr rt_dst; /* target address */ 3092 struct sockaddr rt_gateway; /* gateway addr (RTF_GATEWAY) */ 3093 struct sockaddr rt_genmask; /* target network mask (IP) */ 3094 unsigned short rt_flags; 3095 short rt_pad2; 3096 u32 rt_pad3; 3097 unsigned char rt_tos; 3098 unsigned char rt_class; 3099 short rt_pad4; 3100 short rt_metric; /* +1 for binary compatibility! */ 3101 /* char * */ u32 rt_dev; /* forcing the device at add */ 3102 u32 rt_mtu; /* per route MTU/Window */ 3103 u32 rt_window; /* Window clamping */ 3104 unsigned short rt_irtt; /* Initial RTT */ 3105 }; 3106 3107 struct in6_rtmsg32 { 3108 struct in6_addr rtmsg_dst; 3109 struct in6_addr rtmsg_src; 3110 struct in6_addr rtmsg_gateway; 3111 u32 rtmsg_type; 3112 u16 rtmsg_dst_len; 3113 u16 rtmsg_src_len; 3114 u32 rtmsg_metric; 3115 u32 rtmsg_info; 3116 u32 rtmsg_flags; 3117 s32 rtmsg_ifindex; 3118 }; 3119 3120 static int routing_ioctl(struct net *net, struct socket *sock, 3121 unsigned int cmd, void __user *argp) 3122 { 3123 int ret; 3124 void *r = NULL; 3125 struct in6_rtmsg r6; 3126 struct rtentry r4; 3127 char devname[16]; 3128 u32 rtdev; 3129 mm_segment_t old_fs = get_fs(); 3130 3131 if (sock && sock->sk && sock->sk->sk_family == AF_INET6) { /* ipv6 */ 3132 struct in6_rtmsg32 __user *ur6 = argp; 3133 ret = copy_from_user(&r6.rtmsg_dst, &(ur6->rtmsg_dst), 3134 3 * sizeof(struct in6_addr)); 3135 ret |= get_user(r6.rtmsg_type, &(ur6->rtmsg_type)); 3136 ret |= get_user(r6.rtmsg_dst_len, &(ur6->rtmsg_dst_len)); 3137 ret |= get_user(r6.rtmsg_src_len, &(ur6->rtmsg_src_len)); 3138 ret |= get_user(r6.rtmsg_metric, &(ur6->rtmsg_metric)); 3139 ret |= get_user(r6.rtmsg_info, &(ur6->rtmsg_info)); 3140 ret |= get_user(r6.rtmsg_flags, &(ur6->rtmsg_flags)); 3141 ret |= get_user(r6.rtmsg_ifindex, &(ur6->rtmsg_ifindex)); 3142 3143 r = (void *) &r6; 3144 } else { /* ipv4 */ 3145 struct rtentry32 __user *ur4 = argp; 3146 ret = copy_from_user(&r4.rt_dst, &(ur4->rt_dst), 3147 3 * sizeof(struct sockaddr)); 3148 ret |= get_user(r4.rt_flags, &(ur4->rt_flags)); 3149 ret |= get_user(r4.rt_metric, &(ur4->rt_metric)); 3150 ret |= get_user(r4.rt_mtu, &(ur4->rt_mtu)); 3151 ret |= get_user(r4.rt_window, &(ur4->rt_window)); 3152 ret |= get_user(r4.rt_irtt, &(ur4->rt_irtt)); 3153 ret |= get_user(rtdev, &(ur4->rt_dev)); 3154 if (rtdev) { 3155 ret |= copy_from_user(devname, compat_ptr(rtdev), 15); 3156 r4.rt_dev = (char __user __force *)devname; 3157 devname[15] = 0; 3158 } else 3159 r4.rt_dev = NULL; 3160 3161 r = (void *) &r4; 3162 } 3163 3164 if (ret) { 3165 ret = -EFAULT; 3166 goto out; 3167 } 3168 3169 set_fs(KERNEL_DS); 3170 ret = sock_do_ioctl(net, sock, cmd, (unsigned long) r); 3171 set_fs(old_fs); 3172 3173 out: 3174 return ret; 3175 } 3176 3177 /* Since old style bridge ioctl's endup using SIOCDEVPRIVATE 3178 * for some operations; this forces use of the newer bridge-utils that 3179 * use compatible ioctls 3180 */ 3181 static int old_bridge_ioctl(compat_ulong_t __user *argp) 3182 { 3183 compat_ulong_t tmp; 3184 3185 if (get_user(tmp, argp)) 3186 return -EFAULT; 3187 if (tmp == BRCTL_GET_VERSION) 3188 return BRCTL_VERSION + 1; 3189 return -EINVAL; 3190 } 3191 3192 static int compat_sock_ioctl_trans(struct file *file, struct socket *sock, 3193 unsigned int cmd, unsigned long arg) 3194 { 3195 void __user *argp = compat_ptr(arg); 3196 struct sock *sk = sock->sk; 3197 struct net *net = sock_net(sk); 3198 3199 if (cmd >= SIOCDEVPRIVATE && cmd <= (SIOCDEVPRIVATE + 15)) 3200 return compat_ifr_data_ioctl(net, cmd, argp); 3201 3202 switch (cmd) { 3203 case SIOCSIFBR: 3204 case SIOCGIFBR: 3205 return old_bridge_ioctl(argp); 3206 case SIOCGIFNAME: 3207 return dev_ifname32(net, argp); 3208 case SIOCGIFCONF: 3209 return dev_ifconf(net, argp); 3210 case SIOCETHTOOL: 3211 return ethtool_ioctl(net, argp); 3212 case SIOCWANDEV: 3213 return compat_siocwandev(net, argp); 3214 case SIOCGIFMAP: 3215 case SIOCSIFMAP: 3216 return compat_sioc_ifmap(net, cmd, argp); 3217 case SIOCBONDENSLAVE: 3218 case SIOCBONDRELEASE: 3219 case SIOCBONDSETHWADDR: 3220 case SIOCBONDCHANGEACTIVE: 3221 return bond_ioctl(net, cmd, argp); 3222 case SIOCADDRT: 3223 case SIOCDELRT: 3224 return routing_ioctl(net, sock, cmd, argp); 3225 case SIOCGSTAMP: 3226 return do_siocgstamp(net, sock, cmd, argp); 3227 case SIOCGSTAMPNS: 3228 return do_siocgstampns(net, sock, cmd, argp); 3229 case SIOCBONDSLAVEINFOQUERY: 3230 case SIOCBONDINFOQUERY: 3231 case SIOCSHWTSTAMP: 3232 case SIOCGHWTSTAMP: 3233 return compat_ifr_data_ioctl(net, cmd, argp); 3234 3235 case FIOSETOWN: 3236 case SIOCSPGRP: 3237 case FIOGETOWN: 3238 case SIOCGPGRP: 3239 case SIOCBRADDBR: 3240 case SIOCBRDELBR: 3241 case SIOCGIFVLAN: 3242 case SIOCSIFVLAN: 3243 case SIOCADDDLCI: 3244 case SIOCDELDLCI: 3245 return sock_ioctl(file, cmd, arg); 3246 3247 case SIOCGIFFLAGS: 3248 case SIOCSIFFLAGS: 3249 case SIOCGIFMETRIC: 3250 case SIOCSIFMETRIC: 3251 case SIOCGIFMTU: 3252 case SIOCSIFMTU: 3253 case SIOCGIFMEM: 3254 case SIOCSIFMEM: 3255 case SIOCGIFHWADDR: 3256 case SIOCSIFHWADDR: 3257 case SIOCADDMULTI: 3258 case SIOCDELMULTI: 3259 case SIOCGIFINDEX: 3260 case SIOCGIFADDR: 3261 case SIOCSIFADDR: 3262 case SIOCSIFHWBROADCAST: 3263 case SIOCDIFADDR: 3264 case SIOCGIFBRDADDR: 3265 case SIOCSIFBRDADDR: 3266 case SIOCGIFDSTADDR: 3267 case SIOCSIFDSTADDR: 3268 case SIOCGIFNETMASK: 3269 case SIOCSIFNETMASK: 3270 case SIOCSIFPFLAGS: 3271 case SIOCGIFPFLAGS: 3272 case SIOCGIFTXQLEN: 3273 case SIOCSIFTXQLEN: 3274 case SIOCBRADDIF: 3275 case SIOCBRDELIF: 3276 case SIOCSIFNAME: 3277 case SIOCGMIIPHY: 3278 case SIOCGMIIREG: 3279 case SIOCSMIIREG: 3280 return dev_ifsioc(net, sock, cmd, argp); 3281 3282 case SIOCSARP: 3283 case SIOCGARP: 3284 case SIOCDARP: 3285 case SIOCATMARK: 3286 return sock_do_ioctl(net, sock, cmd, arg); 3287 } 3288 3289 return -ENOIOCTLCMD; 3290 } 3291 3292 static long compat_sock_ioctl(struct file *file, unsigned int cmd, 3293 unsigned long arg) 3294 { 3295 struct socket *sock = file->private_data; 3296 int ret = -ENOIOCTLCMD; 3297 struct sock *sk; 3298 struct net *net; 3299 3300 sk = sock->sk; 3301 net = sock_net(sk); 3302 3303 if (sock->ops->compat_ioctl) 3304 ret = sock->ops->compat_ioctl(sock, cmd, arg); 3305 3306 if (ret == -ENOIOCTLCMD && 3307 (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)) 3308 ret = compat_wext_handle_ioctl(net, cmd, arg); 3309 3310 if (ret == -ENOIOCTLCMD) 3311 ret = compat_sock_ioctl_trans(file, sock, cmd, arg); 3312 3313 return ret; 3314 } 3315 #endif 3316 3317 int kernel_bind(struct socket *sock, struct sockaddr *addr, int addrlen) 3318 { 3319 return sock->ops->bind(sock, addr, addrlen); 3320 } 3321 EXPORT_SYMBOL(kernel_bind); 3322 3323 int kernel_listen(struct socket *sock, int backlog) 3324 { 3325 return sock->ops->listen(sock, backlog); 3326 } 3327 EXPORT_SYMBOL(kernel_listen); 3328 3329 int kernel_accept(struct socket *sock, struct socket **newsock, int flags) 3330 { 3331 struct sock *sk = sock->sk; 3332 int err; 3333 3334 err = sock_create_lite(sk->sk_family, sk->sk_type, sk->sk_protocol, 3335 newsock); 3336 if (err < 0) 3337 goto done; 3338 3339 err = sock->ops->accept(sock, *newsock, flags); 3340 if (err < 0) { 3341 sock_release(*newsock); 3342 *newsock = NULL; 3343 goto done; 3344 } 3345 3346 (*newsock)->ops = sock->ops; 3347 __module_get((*newsock)->ops->owner); 3348 3349 done: 3350 return err; 3351 } 3352 EXPORT_SYMBOL(kernel_accept); 3353 3354 int kernel_connect(struct socket *sock, struct sockaddr *addr, int addrlen, 3355 int flags) 3356 { 3357 return sock->ops->connect(sock, addr, addrlen, flags); 3358 } 3359 EXPORT_SYMBOL(kernel_connect); 3360 3361 int kernel_getsockname(struct socket *sock, struct sockaddr *addr, 3362 int *addrlen) 3363 { 3364 return sock->ops->getname(sock, addr, addrlen, 0); 3365 } 3366 EXPORT_SYMBOL(kernel_getsockname); 3367 3368 int kernel_getpeername(struct socket *sock, struct sockaddr *addr, 3369 int *addrlen) 3370 { 3371 return sock->ops->getname(sock, addr, addrlen, 1); 3372 } 3373 EXPORT_SYMBOL(kernel_getpeername); 3374 3375 int kernel_getsockopt(struct socket *sock, int level, int optname, 3376 char *optval, int *optlen) 3377 { 3378 mm_segment_t oldfs = get_fs(); 3379 char __user *uoptval; 3380 int __user *uoptlen; 3381 int err; 3382 3383 uoptval = (char __user __force *) optval; 3384 uoptlen = (int __user __force *) optlen; 3385 3386 set_fs(KERNEL_DS); 3387 if (level == SOL_SOCKET) 3388 err = sock_getsockopt(sock, level, optname, uoptval, uoptlen); 3389 else 3390 err = sock->ops->getsockopt(sock, level, optname, uoptval, 3391 uoptlen); 3392 set_fs(oldfs); 3393 return err; 3394 } 3395 EXPORT_SYMBOL(kernel_getsockopt); 3396 3397 int kernel_setsockopt(struct socket *sock, int level, int optname, 3398 char *optval, unsigned int optlen) 3399 { 3400 mm_segment_t oldfs = get_fs(); 3401 char __user *uoptval; 3402 int err; 3403 3404 uoptval = (char __user __force *) optval; 3405 3406 set_fs(KERNEL_DS); 3407 if (level == SOL_SOCKET) 3408 err = sock_setsockopt(sock, level, optname, uoptval, optlen); 3409 else 3410 err = sock->ops->setsockopt(sock, level, optname, uoptval, 3411 optlen); 3412 set_fs(oldfs); 3413 return err; 3414 } 3415 EXPORT_SYMBOL(kernel_setsockopt); 3416 3417 int kernel_sendpage(struct socket *sock, struct page *page, int offset, 3418 size_t size, int flags) 3419 { 3420 if (sock->ops->sendpage) 3421 return sock->ops->sendpage(sock, page, offset, size, flags); 3422 3423 return sock_no_sendpage(sock, page, offset, size, flags); 3424 } 3425 EXPORT_SYMBOL(kernel_sendpage); 3426 3427 int kernel_sock_ioctl(struct socket *sock, int cmd, unsigned long arg) 3428 { 3429 mm_segment_t oldfs = get_fs(); 3430 int err; 3431 3432 set_fs(KERNEL_DS); 3433 err = sock->ops->ioctl(sock, cmd, arg); 3434 set_fs(oldfs); 3435 3436 return err; 3437 } 3438 EXPORT_SYMBOL(kernel_sock_ioctl); 3439 3440 int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd how) 3441 { 3442 return sock->ops->shutdown(sock, how); 3443 } 3444 EXPORT_SYMBOL(kernel_sock_shutdown); 3445