1 /* 2 * NET4: Implementation of BSD Unix domain sockets. 3 * 4 * Authors: Alan Cox, <alan@lxorguk.ukuu.org.uk> 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 * 11 * Fixes: 12 * Linus Torvalds : Assorted bug cures. 13 * Niibe Yutaka : async I/O support. 14 * Carsten Paeth : PF_UNIX check, address fixes. 15 * Alan Cox : Limit size of allocated blocks. 16 * Alan Cox : Fixed the stupid socketpair bug. 17 * Alan Cox : BSD compatibility fine tuning. 18 * Alan Cox : Fixed a bug in connect when interrupted. 19 * Alan Cox : Sorted out a proper draft version of 20 * file descriptor passing hacked up from 21 * Mike Shaver's work. 22 * Marty Leisner : Fixes to fd passing 23 * Nick Nevin : recvmsg bugfix. 24 * Alan Cox : Started proper garbage collector 25 * Heiko EiBfeldt : Missing verify_area check 26 * Alan Cox : Started POSIXisms 27 * Andreas Schwab : Replace inode by dentry for proper 28 * reference counting 29 * Kirk Petersen : Made this a module 30 * Christoph Rohland : Elegant non-blocking accept/connect algorithm. 31 * Lots of bug fixes. 32 * Alexey Kuznetosv : Repaired (I hope) bugs introduces 33 * by above two patches. 34 * Andrea Arcangeli : If possible we block in connect(2) 35 * if the max backlog of the listen socket 36 * is been reached. This won't break 37 * old apps and it will avoid huge amount 38 * of socks hashed (this for unix_gc() 39 * performances reasons). 40 * Security fix that limits the max 41 * number of socks to 2*max_files and 42 * the number of skb queueable in the 43 * dgram receiver. 44 * Artur Skawina : Hash function optimizations 45 * Alexey Kuznetsov : Full scale SMP. Lot of bugs are introduced 8) 46 * Malcolm Beattie : Set peercred for socketpair 47 * Michal Ostrowski : Module initialization cleanup. 48 * Arnaldo C. Melo : Remove MOD_{INC,DEC}_USE_COUNT, 49 * the core infrastructure is doing that 50 * for all net proto families now (2.5.69+) 51 * 52 * 53 * Known differences from reference BSD that was tested: 54 * 55 * [TO FIX] 56 * ECONNREFUSED is not returned from one end of a connected() socket to the 57 * other the moment one end closes. 58 * fstat() doesn't return st_dev=0, and give the blksize as high water mark 59 * and a fake inode identifier (nor the BSD first socket fstat twice bug). 60 * [NOT TO FIX] 61 * accept() returns a path name even if the connecting socket has closed 62 * in the meantime (BSD loses the path and gives up). 63 * accept() returns 0 length path for an unbound connector. BSD returns 16 64 * and a null first byte in the path (but not for gethost/peername - BSD bug ??) 65 * socketpair(...SOCK_RAW..) doesn't panic the kernel. 66 * BSD af_unix apparently has connect forgetting to block properly. 67 * (need to check this with the POSIX spec in detail) 68 * 69 * Differences from 2.0.0-11-... (ANK) 70 * Bug fixes and improvements. 71 * - client shutdown killed server socket. 72 * - removed all useless cli/sti pairs. 73 * 74 * Semantic changes/extensions. 75 * - generic control message passing. 76 * - SCM_CREDENTIALS control message. 77 * - "Abstract" (not FS based) socket bindings. 78 * Abstract names are sequences of bytes (not zero terminated) 79 * started by 0, so that this name space does not intersect 80 * with BSD names. 81 */ 82 83 #include <linux/module.h> 84 #include <linux/kernel.h> 85 #include <linux/signal.h> 86 #include <linux/sched.h> 87 #include <linux/errno.h> 88 #include <linux/string.h> 89 #include <linux/stat.h> 90 #include <linux/dcache.h> 91 #include <linux/namei.h> 92 #include <linux/socket.h> 93 #include <linux/un.h> 94 #include <linux/fcntl.h> 95 #include <linux/termios.h> 96 #include <linux/sockios.h> 97 #include <linux/net.h> 98 #include <linux/in.h> 99 #include <linux/fs.h> 100 #include <linux/slab.h> 101 #include <asm/uaccess.h> 102 #include <linux/skbuff.h> 103 #include <linux/netdevice.h> 104 #include <net/net_namespace.h> 105 #include <net/sock.h> 106 #include <net/tcp_states.h> 107 #include <net/af_unix.h> 108 #include <linux/proc_fs.h> 109 #include <linux/seq_file.h> 110 #include <net/scm.h> 111 #include <linux/init.h> 112 #include <linux/poll.h> 113 #include <linux/rtnetlink.h> 114 #include <linux/mount.h> 115 #include <net/checksum.h> 116 #include <linux/security.h> 117 118 static struct hlist_head unix_socket_table[UNIX_HASH_SIZE + 1]; 119 static DEFINE_SPINLOCK(unix_table_lock); 120 static atomic_long_t unix_nr_socks; 121 122 #define unix_sockets_unbound (&unix_socket_table[UNIX_HASH_SIZE]) 123 124 #define UNIX_ABSTRACT(sk) (unix_sk(sk)->addr->hash != UNIX_HASH_SIZE) 125 126 #ifdef CONFIG_SECURITY_NETWORK 127 static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb) 128 { 129 memcpy(UNIXSID(skb), &scm->secid, sizeof(u32)); 130 } 131 132 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb) 133 { 134 scm->secid = *UNIXSID(skb); 135 } 136 #else 137 static inline void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb) 138 { } 139 140 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb) 141 { } 142 #endif /* CONFIG_SECURITY_NETWORK */ 143 144 /* 145 * SMP locking strategy: 146 * hash table is protected with spinlock unix_table_lock 147 * each socket state is protected by separate spin lock. 148 */ 149 150 static inline unsigned unix_hash_fold(__wsum n) 151 { 152 unsigned hash = (__force unsigned)n; 153 hash ^= hash>>16; 154 hash ^= hash>>8; 155 return hash&(UNIX_HASH_SIZE-1); 156 } 157 158 #define unix_peer(sk) (unix_sk(sk)->peer) 159 160 static inline int unix_our_peer(struct sock *sk, struct sock *osk) 161 { 162 return unix_peer(osk) == sk; 163 } 164 165 static inline int unix_may_send(struct sock *sk, struct sock *osk) 166 { 167 return unix_peer(osk) == NULL || unix_our_peer(sk, osk); 168 } 169 170 static inline int unix_recvq_full(struct sock const *sk) 171 { 172 return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog; 173 } 174 175 static struct sock *unix_peer_get(struct sock *s) 176 { 177 struct sock *peer; 178 179 unix_state_lock(s); 180 peer = unix_peer(s); 181 if (peer) 182 sock_hold(peer); 183 unix_state_unlock(s); 184 return peer; 185 } 186 187 static inline void unix_release_addr(struct unix_address *addr) 188 { 189 if (atomic_dec_and_test(&addr->refcnt)) 190 kfree(addr); 191 } 192 193 /* 194 * Check unix socket name: 195 * - should be not zero length. 196 * - if started by not zero, should be NULL terminated (FS object) 197 * - if started by zero, it is abstract name. 198 */ 199 200 static int unix_mkname(struct sockaddr_un *sunaddr, int len, unsigned *hashp) 201 { 202 if (len <= sizeof(short) || len > sizeof(*sunaddr)) 203 return -EINVAL; 204 if (!sunaddr || sunaddr->sun_family != AF_UNIX) 205 return -EINVAL; 206 if (sunaddr->sun_path[0]) { 207 /* 208 * This may look like an off by one error but it is a bit more 209 * subtle. 108 is the longest valid AF_UNIX path for a binding. 210 * sun_path[108] doesn't as such exist. However in kernel space 211 * we are guaranteed that it is a valid memory location in our 212 * kernel address buffer. 213 */ 214 ((char *)sunaddr)[len] = 0; 215 len = strlen(sunaddr->sun_path)+1+sizeof(short); 216 return len; 217 } 218 219 *hashp = unix_hash_fold(csum_partial(sunaddr, len, 0)); 220 return len; 221 } 222 223 static void __unix_remove_socket(struct sock *sk) 224 { 225 sk_del_node_init(sk); 226 } 227 228 static void __unix_insert_socket(struct hlist_head *list, struct sock *sk) 229 { 230 WARN_ON(!sk_unhashed(sk)); 231 sk_add_node(sk, list); 232 } 233 234 static inline void unix_remove_socket(struct sock *sk) 235 { 236 spin_lock(&unix_table_lock); 237 __unix_remove_socket(sk); 238 spin_unlock(&unix_table_lock); 239 } 240 241 static inline void unix_insert_socket(struct hlist_head *list, struct sock *sk) 242 { 243 spin_lock(&unix_table_lock); 244 __unix_insert_socket(list, sk); 245 spin_unlock(&unix_table_lock); 246 } 247 248 static struct sock *__unix_find_socket_byname(struct net *net, 249 struct sockaddr_un *sunname, 250 int len, int type, unsigned hash) 251 { 252 struct sock *s; 253 struct hlist_node *node; 254 255 sk_for_each(s, node, &unix_socket_table[hash ^ type]) { 256 struct unix_sock *u = unix_sk(s); 257 258 if (!net_eq(sock_net(s), net)) 259 continue; 260 261 if (u->addr->len == len && 262 !memcmp(u->addr->name, sunname, len)) 263 goto found; 264 } 265 s = NULL; 266 found: 267 return s; 268 } 269 270 static inline struct sock *unix_find_socket_byname(struct net *net, 271 struct sockaddr_un *sunname, 272 int len, int type, 273 unsigned hash) 274 { 275 struct sock *s; 276 277 spin_lock(&unix_table_lock); 278 s = __unix_find_socket_byname(net, sunname, len, type, hash); 279 if (s) 280 sock_hold(s); 281 spin_unlock(&unix_table_lock); 282 return s; 283 } 284 285 static struct sock *unix_find_socket_byinode(struct inode *i) 286 { 287 struct sock *s; 288 struct hlist_node *node; 289 290 spin_lock(&unix_table_lock); 291 sk_for_each(s, node, 292 &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) { 293 struct dentry *dentry = unix_sk(s)->dentry; 294 295 if (dentry && dentry->d_inode == i) { 296 sock_hold(s); 297 goto found; 298 } 299 } 300 s = NULL; 301 found: 302 spin_unlock(&unix_table_lock); 303 return s; 304 } 305 306 static inline int unix_writable(struct sock *sk) 307 { 308 return (atomic_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf; 309 } 310 311 static void unix_write_space(struct sock *sk) 312 { 313 struct socket_wq *wq; 314 315 rcu_read_lock(); 316 if (unix_writable(sk)) { 317 wq = rcu_dereference(sk->sk_wq); 318 if (wq_has_sleeper(wq)) 319 wake_up_interruptible_sync_poll(&wq->wait, 320 POLLOUT | POLLWRNORM | POLLWRBAND); 321 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); 322 } 323 rcu_read_unlock(); 324 } 325 326 /* When dgram socket disconnects (or changes its peer), we clear its receive 327 * queue of packets arrived from previous peer. First, it allows to do 328 * flow control based only on wmem_alloc; second, sk connected to peer 329 * may receive messages only from that peer. */ 330 static void unix_dgram_disconnected(struct sock *sk, struct sock *other) 331 { 332 if (!skb_queue_empty(&sk->sk_receive_queue)) { 333 skb_queue_purge(&sk->sk_receive_queue); 334 wake_up_interruptible_all(&unix_sk(sk)->peer_wait); 335 336 /* If one link of bidirectional dgram pipe is disconnected, 337 * we signal error. Messages are lost. Do not make this, 338 * when peer was not connected to us. 339 */ 340 if (!sock_flag(other, SOCK_DEAD) && unix_peer(other) == sk) { 341 other->sk_err = ECONNRESET; 342 other->sk_error_report(other); 343 } 344 } 345 } 346 347 static void unix_sock_destructor(struct sock *sk) 348 { 349 struct unix_sock *u = unix_sk(sk); 350 351 skb_queue_purge(&sk->sk_receive_queue); 352 353 WARN_ON(atomic_read(&sk->sk_wmem_alloc)); 354 WARN_ON(!sk_unhashed(sk)); 355 WARN_ON(sk->sk_socket); 356 if (!sock_flag(sk, SOCK_DEAD)) { 357 printk(KERN_INFO "Attempt to release alive unix socket: %p\n", sk); 358 return; 359 } 360 361 if (u->addr) 362 unix_release_addr(u->addr); 363 364 atomic_long_dec(&unix_nr_socks); 365 local_bh_disable(); 366 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); 367 local_bh_enable(); 368 #ifdef UNIX_REFCNT_DEBUG 369 printk(KERN_DEBUG "UNIX %p is destroyed, %ld are still alive.\n", sk, 370 atomic_long_read(&unix_nr_socks)); 371 #endif 372 } 373 374 static int unix_release_sock(struct sock *sk, int embrion) 375 { 376 struct unix_sock *u = unix_sk(sk); 377 struct dentry *dentry; 378 struct vfsmount *mnt; 379 struct sock *skpair; 380 struct sk_buff *skb; 381 int state; 382 383 unix_remove_socket(sk); 384 385 /* Clear state */ 386 unix_state_lock(sk); 387 sock_orphan(sk); 388 sk->sk_shutdown = SHUTDOWN_MASK; 389 dentry = u->dentry; 390 u->dentry = NULL; 391 mnt = u->mnt; 392 u->mnt = NULL; 393 state = sk->sk_state; 394 sk->sk_state = TCP_CLOSE; 395 unix_state_unlock(sk); 396 397 wake_up_interruptible_all(&u->peer_wait); 398 399 skpair = unix_peer(sk); 400 401 if (skpair != NULL) { 402 if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) { 403 unix_state_lock(skpair); 404 /* No more writes */ 405 skpair->sk_shutdown = SHUTDOWN_MASK; 406 if (!skb_queue_empty(&sk->sk_receive_queue) || embrion) 407 skpair->sk_err = ECONNRESET; 408 unix_state_unlock(skpair); 409 skpair->sk_state_change(skpair); 410 sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP); 411 } 412 sock_put(skpair); /* It may now die */ 413 unix_peer(sk) = NULL; 414 } 415 416 /* Try to flush out this socket. Throw out buffers at least */ 417 418 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) { 419 if (state == TCP_LISTEN) 420 unix_release_sock(skb->sk, 1); 421 /* passed fds are erased in the kfree_skb hook */ 422 kfree_skb(skb); 423 } 424 425 if (dentry) { 426 dput(dentry); 427 mntput(mnt); 428 } 429 430 sock_put(sk); 431 432 /* ---- Socket is dead now and most probably destroyed ---- */ 433 434 /* 435 * Fixme: BSD difference: In BSD all sockets connected to use get 436 * ECONNRESET and we die on the spot. In Linux we behave 437 * like files and pipes do and wait for the last 438 * dereference. 439 * 440 * Can't we simply set sock->err? 441 * 442 * What the above comment does talk about? --ANK(980817) 443 */ 444 445 if (unix_tot_inflight) 446 unix_gc(); /* Garbage collect fds */ 447 448 return 0; 449 } 450 451 static void init_peercred(struct sock *sk) 452 { 453 put_pid(sk->sk_peer_pid); 454 if (sk->sk_peer_cred) 455 put_cred(sk->sk_peer_cred); 456 sk->sk_peer_pid = get_pid(task_tgid(current)); 457 sk->sk_peer_cred = get_current_cred(); 458 } 459 460 static void copy_peercred(struct sock *sk, struct sock *peersk) 461 { 462 put_pid(sk->sk_peer_pid); 463 if (sk->sk_peer_cred) 464 put_cred(sk->sk_peer_cred); 465 sk->sk_peer_pid = get_pid(peersk->sk_peer_pid); 466 sk->sk_peer_cred = get_cred(peersk->sk_peer_cred); 467 } 468 469 static int unix_listen(struct socket *sock, int backlog) 470 { 471 int err; 472 struct sock *sk = sock->sk; 473 struct unix_sock *u = unix_sk(sk); 474 struct pid *old_pid = NULL; 475 const struct cred *old_cred = NULL; 476 477 err = -EOPNOTSUPP; 478 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET) 479 goto out; /* Only stream/seqpacket sockets accept */ 480 err = -EINVAL; 481 if (!u->addr) 482 goto out; /* No listens on an unbound socket */ 483 unix_state_lock(sk); 484 if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN) 485 goto out_unlock; 486 if (backlog > sk->sk_max_ack_backlog) 487 wake_up_interruptible_all(&u->peer_wait); 488 sk->sk_max_ack_backlog = backlog; 489 sk->sk_state = TCP_LISTEN; 490 /* set credentials so connect can copy them */ 491 init_peercred(sk); 492 err = 0; 493 494 out_unlock: 495 unix_state_unlock(sk); 496 put_pid(old_pid); 497 if (old_cred) 498 put_cred(old_cred); 499 out: 500 return err; 501 } 502 503 static int unix_release(struct socket *); 504 static int unix_bind(struct socket *, struct sockaddr *, int); 505 static int unix_stream_connect(struct socket *, struct sockaddr *, 506 int addr_len, int flags); 507 static int unix_socketpair(struct socket *, struct socket *); 508 static int unix_accept(struct socket *, struct socket *, int); 509 static int unix_getname(struct socket *, struct sockaddr *, int *, int); 510 static unsigned int unix_poll(struct file *, struct socket *, poll_table *); 511 static unsigned int unix_dgram_poll(struct file *, struct socket *, 512 poll_table *); 513 static int unix_ioctl(struct socket *, unsigned int, unsigned long); 514 static int unix_shutdown(struct socket *, int); 515 static int unix_stream_sendmsg(struct kiocb *, struct socket *, 516 struct msghdr *, size_t); 517 static int unix_stream_recvmsg(struct kiocb *, struct socket *, 518 struct msghdr *, size_t, int); 519 static int unix_dgram_sendmsg(struct kiocb *, struct socket *, 520 struct msghdr *, size_t); 521 static int unix_dgram_recvmsg(struct kiocb *, struct socket *, 522 struct msghdr *, size_t, int); 523 static int unix_dgram_connect(struct socket *, struct sockaddr *, 524 int, int); 525 static int unix_seqpacket_sendmsg(struct kiocb *, struct socket *, 526 struct msghdr *, size_t); 527 static int unix_seqpacket_recvmsg(struct kiocb *, struct socket *, 528 struct msghdr *, size_t, int); 529 530 static const struct proto_ops unix_stream_ops = { 531 .family = PF_UNIX, 532 .owner = THIS_MODULE, 533 .release = unix_release, 534 .bind = unix_bind, 535 .connect = unix_stream_connect, 536 .socketpair = unix_socketpair, 537 .accept = unix_accept, 538 .getname = unix_getname, 539 .poll = unix_poll, 540 .ioctl = unix_ioctl, 541 .listen = unix_listen, 542 .shutdown = unix_shutdown, 543 .setsockopt = sock_no_setsockopt, 544 .getsockopt = sock_no_getsockopt, 545 .sendmsg = unix_stream_sendmsg, 546 .recvmsg = unix_stream_recvmsg, 547 .mmap = sock_no_mmap, 548 .sendpage = sock_no_sendpage, 549 }; 550 551 static const struct proto_ops unix_dgram_ops = { 552 .family = PF_UNIX, 553 .owner = THIS_MODULE, 554 .release = unix_release, 555 .bind = unix_bind, 556 .connect = unix_dgram_connect, 557 .socketpair = unix_socketpair, 558 .accept = sock_no_accept, 559 .getname = unix_getname, 560 .poll = unix_dgram_poll, 561 .ioctl = unix_ioctl, 562 .listen = sock_no_listen, 563 .shutdown = unix_shutdown, 564 .setsockopt = sock_no_setsockopt, 565 .getsockopt = sock_no_getsockopt, 566 .sendmsg = unix_dgram_sendmsg, 567 .recvmsg = unix_dgram_recvmsg, 568 .mmap = sock_no_mmap, 569 .sendpage = sock_no_sendpage, 570 }; 571 572 static const struct proto_ops unix_seqpacket_ops = { 573 .family = PF_UNIX, 574 .owner = THIS_MODULE, 575 .release = unix_release, 576 .bind = unix_bind, 577 .connect = unix_stream_connect, 578 .socketpair = unix_socketpair, 579 .accept = unix_accept, 580 .getname = unix_getname, 581 .poll = unix_dgram_poll, 582 .ioctl = unix_ioctl, 583 .listen = unix_listen, 584 .shutdown = unix_shutdown, 585 .setsockopt = sock_no_setsockopt, 586 .getsockopt = sock_no_getsockopt, 587 .sendmsg = unix_seqpacket_sendmsg, 588 .recvmsg = unix_seqpacket_recvmsg, 589 .mmap = sock_no_mmap, 590 .sendpage = sock_no_sendpage, 591 }; 592 593 static struct proto unix_proto = { 594 .name = "UNIX", 595 .owner = THIS_MODULE, 596 .obj_size = sizeof(struct unix_sock), 597 }; 598 599 /* 600 * AF_UNIX sockets do not interact with hardware, hence they 601 * dont trigger interrupts - so it's safe for them to have 602 * bh-unsafe locking for their sk_receive_queue.lock. Split off 603 * this special lock-class by reinitializing the spinlock key: 604 */ 605 static struct lock_class_key af_unix_sk_receive_queue_lock_key; 606 607 static struct sock *unix_create1(struct net *net, struct socket *sock) 608 { 609 struct sock *sk = NULL; 610 struct unix_sock *u; 611 612 atomic_long_inc(&unix_nr_socks); 613 if (atomic_long_read(&unix_nr_socks) > 2 * get_max_files()) 614 goto out; 615 616 sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_proto); 617 if (!sk) 618 goto out; 619 620 sock_init_data(sock, sk); 621 lockdep_set_class(&sk->sk_receive_queue.lock, 622 &af_unix_sk_receive_queue_lock_key); 623 624 sk->sk_write_space = unix_write_space; 625 sk->sk_max_ack_backlog = net->unx.sysctl_max_dgram_qlen; 626 sk->sk_destruct = unix_sock_destructor; 627 u = unix_sk(sk); 628 u->dentry = NULL; 629 u->mnt = NULL; 630 spin_lock_init(&u->lock); 631 atomic_long_set(&u->inflight, 0); 632 INIT_LIST_HEAD(&u->link); 633 mutex_init(&u->readlock); /* single task reading lock */ 634 init_waitqueue_head(&u->peer_wait); 635 unix_insert_socket(unix_sockets_unbound, sk); 636 out: 637 if (sk == NULL) 638 atomic_long_dec(&unix_nr_socks); 639 else { 640 local_bh_disable(); 641 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); 642 local_bh_enable(); 643 } 644 return sk; 645 } 646 647 static int unix_create(struct net *net, struct socket *sock, int protocol, 648 int kern) 649 { 650 if (protocol && protocol != PF_UNIX) 651 return -EPROTONOSUPPORT; 652 653 sock->state = SS_UNCONNECTED; 654 655 switch (sock->type) { 656 case SOCK_STREAM: 657 sock->ops = &unix_stream_ops; 658 break; 659 /* 660 * Believe it or not BSD has AF_UNIX, SOCK_RAW though 661 * nothing uses it. 662 */ 663 case SOCK_RAW: 664 sock->type = SOCK_DGRAM; 665 case SOCK_DGRAM: 666 sock->ops = &unix_dgram_ops; 667 break; 668 case SOCK_SEQPACKET: 669 sock->ops = &unix_seqpacket_ops; 670 break; 671 default: 672 return -ESOCKTNOSUPPORT; 673 } 674 675 return unix_create1(net, sock) ? 0 : -ENOMEM; 676 } 677 678 static int unix_release(struct socket *sock) 679 { 680 struct sock *sk = sock->sk; 681 682 if (!sk) 683 return 0; 684 685 sock->sk = NULL; 686 687 return unix_release_sock(sk, 0); 688 } 689 690 static int unix_autobind(struct socket *sock) 691 { 692 struct sock *sk = sock->sk; 693 struct net *net = sock_net(sk); 694 struct unix_sock *u = unix_sk(sk); 695 static u32 ordernum = 1; 696 struct unix_address *addr; 697 int err; 698 unsigned int retries = 0; 699 700 mutex_lock(&u->readlock); 701 702 err = 0; 703 if (u->addr) 704 goto out; 705 706 err = -ENOMEM; 707 addr = kzalloc(sizeof(*addr) + sizeof(short) + 16, GFP_KERNEL); 708 if (!addr) 709 goto out; 710 711 addr->name->sun_family = AF_UNIX; 712 atomic_set(&addr->refcnt, 1); 713 714 retry: 715 addr->len = sprintf(addr->name->sun_path+1, "%05x", ordernum) + 1 + sizeof(short); 716 addr->hash = unix_hash_fold(csum_partial(addr->name, addr->len, 0)); 717 718 spin_lock(&unix_table_lock); 719 ordernum = (ordernum+1)&0xFFFFF; 720 721 if (__unix_find_socket_byname(net, addr->name, addr->len, sock->type, 722 addr->hash)) { 723 spin_unlock(&unix_table_lock); 724 /* 725 * __unix_find_socket_byname() may take long time if many names 726 * are already in use. 727 */ 728 cond_resched(); 729 /* Give up if all names seems to be in use. */ 730 if (retries++ == 0xFFFFF) { 731 err = -ENOSPC; 732 kfree(addr); 733 goto out; 734 } 735 goto retry; 736 } 737 addr->hash ^= sk->sk_type; 738 739 __unix_remove_socket(sk); 740 u->addr = addr; 741 __unix_insert_socket(&unix_socket_table[addr->hash], sk); 742 spin_unlock(&unix_table_lock); 743 err = 0; 744 745 out: mutex_unlock(&u->readlock); 746 return err; 747 } 748 749 static struct sock *unix_find_other(struct net *net, 750 struct sockaddr_un *sunname, int len, 751 int type, unsigned hash, int *error) 752 { 753 struct sock *u; 754 struct path path; 755 int err = 0; 756 757 if (sunname->sun_path[0]) { 758 struct inode *inode; 759 err = kern_path(sunname->sun_path, LOOKUP_FOLLOW, &path); 760 if (err) 761 goto fail; 762 inode = path.dentry->d_inode; 763 err = inode_permission(inode, MAY_WRITE); 764 if (err) 765 goto put_fail; 766 767 err = -ECONNREFUSED; 768 if (!S_ISSOCK(inode->i_mode)) 769 goto put_fail; 770 u = unix_find_socket_byinode(inode); 771 if (!u) 772 goto put_fail; 773 774 if (u->sk_type == type) 775 touch_atime(path.mnt, path.dentry); 776 777 path_put(&path); 778 779 err = -EPROTOTYPE; 780 if (u->sk_type != type) { 781 sock_put(u); 782 goto fail; 783 } 784 } else { 785 err = -ECONNREFUSED; 786 u = unix_find_socket_byname(net, sunname, len, type, hash); 787 if (u) { 788 struct dentry *dentry; 789 dentry = unix_sk(u)->dentry; 790 if (dentry) 791 touch_atime(unix_sk(u)->mnt, dentry); 792 } else 793 goto fail; 794 } 795 return u; 796 797 put_fail: 798 path_put(&path); 799 fail: 800 *error = err; 801 return NULL; 802 } 803 804 805 static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) 806 { 807 struct sock *sk = sock->sk; 808 struct net *net = sock_net(sk); 809 struct unix_sock *u = unix_sk(sk); 810 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr; 811 char *sun_path = sunaddr->sun_path; 812 struct dentry *dentry = NULL; 813 struct path path; 814 int err; 815 unsigned hash; 816 struct unix_address *addr; 817 struct hlist_head *list; 818 819 err = -EINVAL; 820 if (sunaddr->sun_family != AF_UNIX) 821 goto out; 822 823 if (addr_len == sizeof(short)) { 824 err = unix_autobind(sock); 825 goto out; 826 } 827 828 err = unix_mkname(sunaddr, addr_len, &hash); 829 if (err < 0) 830 goto out; 831 addr_len = err; 832 833 mutex_lock(&u->readlock); 834 835 err = -EINVAL; 836 if (u->addr) 837 goto out_up; 838 839 err = -ENOMEM; 840 addr = kmalloc(sizeof(*addr)+addr_len, GFP_KERNEL); 841 if (!addr) 842 goto out_up; 843 844 memcpy(addr->name, sunaddr, addr_len); 845 addr->len = addr_len; 846 addr->hash = hash ^ sk->sk_type; 847 atomic_set(&addr->refcnt, 1); 848 849 if (sun_path[0]) { 850 unsigned int mode; 851 err = 0; 852 /* 853 * Get the parent directory, calculate the hash for last 854 * component. 855 */ 856 dentry = kern_path_create(AT_FDCWD, sun_path, &path, 0); 857 err = PTR_ERR(dentry); 858 if (IS_ERR(dentry)) 859 goto out_mknod_parent; 860 861 /* 862 * All right, let's create it. 863 */ 864 mode = S_IFSOCK | 865 (SOCK_INODE(sock)->i_mode & ~current_umask()); 866 err = mnt_want_write(path.mnt); 867 if (err) 868 goto out_mknod_dput; 869 err = security_path_mknod(&path, dentry, mode, 0); 870 if (err) 871 goto out_mknod_drop_write; 872 err = vfs_mknod(path.dentry->d_inode, dentry, mode, 0); 873 out_mknod_drop_write: 874 mnt_drop_write(path.mnt); 875 if (err) 876 goto out_mknod_dput; 877 mutex_unlock(&path.dentry->d_inode->i_mutex); 878 dput(path.dentry); 879 path.dentry = dentry; 880 881 addr->hash = UNIX_HASH_SIZE; 882 } 883 884 spin_lock(&unix_table_lock); 885 886 if (!sun_path[0]) { 887 err = -EADDRINUSE; 888 if (__unix_find_socket_byname(net, sunaddr, addr_len, 889 sk->sk_type, hash)) { 890 unix_release_addr(addr); 891 goto out_unlock; 892 } 893 894 list = &unix_socket_table[addr->hash]; 895 } else { 896 list = &unix_socket_table[dentry->d_inode->i_ino & (UNIX_HASH_SIZE-1)]; 897 u->dentry = path.dentry; 898 u->mnt = path.mnt; 899 } 900 901 err = 0; 902 __unix_remove_socket(sk); 903 u->addr = addr; 904 __unix_insert_socket(list, sk); 905 906 out_unlock: 907 spin_unlock(&unix_table_lock); 908 out_up: 909 mutex_unlock(&u->readlock); 910 out: 911 return err; 912 913 out_mknod_dput: 914 dput(dentry); 915 mutex_unlock(&path.dentry->d_inode->i_mutex); 916 path_put(&path); 917 out_mknod_parent: 918 if (err == -EEXIST) 919 err = -EADDRINUSE; 920 unix_release_addr(addr); 921 goto out_up; 922 } 923 924 static void unix_state_double_lock(struct sock *sk1, struct sock *sk2) 925 { 926 if (unlikely(sk1 == sk2) || !sk2) { 927 unix_state_lock(sk1); 928 return; 929 } 930 if (sk1 < sk2) { 931 unix_state_lock(sk1); 932 unix_state_lock_nested(sk2); 933 } else { 934 unix_state_lock(sk2); 935 unix_state_lock_nested(sk1); 936 } 937 } 938 939 static void unix_state_double_unlock(struct sock *sk1, struct sock *sk2) 940 { 941 if (unlikely(sk1 == sk2) || !sk2) { 942 unix_state_unlock(sk1); 943 return; 944 } 945 unix_state_unlock(sk1); 946 unix_state_unlock(sk2); 947 } 948 949 static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr, 950 int alen, int flags) 951 { 952 struct sock *sk = sock->sk; 953 struct net *net = sock_net(sk); 954 struct sockaddr_un *sunaddr = (struct sockaddr_un *)addr; 955 struct sock *other; 956 unsigned hash; 957 int err; 958 959 if (addr->sa_family != AF_UNSPEC) { 960 err = unix_mkname(sunaddr, alen, &hash); 961 if (err < 0) 962 goto out; 963 alen = err; 964 965 if (test_bit(SOCK_PASSCRED, &sock->flags) && 966 !unix_sk(sk)->addr && (err = unix_autobind(sock)) != 0) 967 goto out; 968 969 restart: 970 other = unix_find_other(net, sunaddr, alen, sock->type, hash, &err); 971 if (!other) 972 goto out; 973 974 unix_state_double_lock(sk, other); 975 976 /* Apparently VFS overslept socket death. Retry. */ 977 if (sock_flag(other, SOCK_DEAD)) { 978 unix_state_double_unlock(sk, other); 979 sock_put(other); 980 goto restart; 981 } 982 983 err = -EPERM; 984 if (!unix_may_send(sk, other)) 985 goto out_unlock; 986 987 err = security_unix_may_send(sk->sk_socket, other->sk_socket); 988 if (err) 989 goto out_unlock; 990 991 } else { 992 /* 993 * 1003.1g breaking connected state with AF_UNSPEC 994 */ 995 other = NULL; 996 unix_state_double_lock(sk, other); 997 } 998 999 /* 1000 * If it was connected, reconnect. 1001 */ 1002 if (unix_peer(sk)) { 1003 struct sock *old_peer = unix_peer(sk); 1004 unix_peer(sk) = other; 1005 unix_state_double_unlock(sk, other); 1006 1007 if (other != old_peer) 1008 unix_dgram_disconnected(sk, old_peer); 1009 sock_put(old_peer); 1010 } else { 1011 unix_peer(sk) = other; 1012 unix_state_double_unlock(sk, other); 1013 } 1014 return 0; 1015 1016 out_unlock: 1017 unix_state_double_unlock(sk, other); 1018 sock_put(other); 1019 out: 1020 return err; 1021 } 1022 1023 static long unix_wait_for_peer(struct sock *other, long timeo) 1024 { 1025 struct unix_sock *u = unix_sk(other); 1026 int sched; 1027 DEFINE_WAIT(wait); 1028 1029 prepare_to_wait_exclusive(&u->peer_wait, &wait, TASK_INTERRUPTIBLE); 1030 1031 sched = !sock_flag(other, SOCK_DEAD) && 1032 !(other->sk_shutdown & RCV_SHUTDOWN) && 1033 unix_recvq_full(other); 1034 1035 unix_state_unlock(other); 1036 1037 if (sched) 1038 timeo = schedule_timeout(timeo); 1039 1040 finish_wait(&u->peer_wait, &wait); 1041 return timeo; 1042 } 1043 1044 static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr, 1045 int addr_len, int flags) 1046 { 1047 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr; 1048 struct sock *sk = sock->sk; 1049 struct net *net = sock_net(sk); 1050 struct unix_sock *u = unix_sk(sk), *newu, *otheru; 1051 struct sock *newsk = NULL; 1052 struct sock *other = NULL; 1053 struct sk_buff *skb = NULL; 1054 unsigned hash; 1055 int st; 1056 int err; 1057 long timeo; 1058 1059 err = unix_mkname(sunaddr, addr_len, &hash); 1060 if (err < 0) 1061 goto out; 1062 addr_len = err; 1063 1064 if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr && 1065 (err = unix_autobind(sock)) != 0) 1066 goto out; 1067 1068 timeo = sock_sndtimeo(sk, flags & O_NONBLOCK); 1069 1070 /* First of all allocate resources. 1071 If we will make it after state is locked, 1072 we will have to recheck all again in any case. 1073 */ 1074 1075 err = -ENOMEM; 1076 1077 /* create new sock for complete connection */ 1078 newsk = unix_create1(sock_net(sk), NULL); 1079 if (newsk == NULL) 1080 goto out; 1081 1082 /* Allocate skb for sending to listening sock */ 1083 skb = sock_wmalloc(newsk, 1, 0, GFP_KERNEL); 1084 if (skb == NULL) 1085 goto out; 1086 1087 restart: 1088 /* Find listening sock. */ 1089 other = unix_find_other(net, sunaddr, addr_len, sk->sk_type, hash, &err); 1090 if (!other) 1091 goto out; 1092 1093 /* Latch state of peer */ 1094 unix_state_lock(other); 1095 1096 /* Apparently VFS overslept socket death. Retry. */ 1097 if (sock_flag(other, SOCK_DEAD)) { 1098 unix_state_unlock(other); 1099 sock_put(other); 1100 goto restart; 1101 } 1102 1103 err = -ECONNREFUSED; 1104 if (other->sk_state != TCP_LISTEN) 1105 goto out_unlock; 1106 if (other->sk_shutdown & RCV_SHUTDOWN) 1107 goto out_unlock; 1108 1109 if (unix_recvq_full(other)) { 1110 err = -EAGAIN; 1111 if (!timeo) 1112 goto out_unlock; 1113 1114 timeo = unix_wait_for_peer(other, timeo); 1115 1116 err = sock_intr_errno(timeo); 1117 if (signal_pending(current)) 1118 goto out; 1119 sock_put(other); 1120 goto restart; 1121 } 1122 1123 /* Latch our state. 1124 1125 It is tricky place. We need to grab our state lock and cannot 1126 drop lock on peer. It is dangerous because deadlock is 1127 possible. Connect to self case and simultaneous 1128 attempt to connect are eliminated by checking socket 1129 state. other is TCP_LISTEN, if sk is TCP_LISTEN we 1130 check this before attempt to grab lock. 1131 1132 Well, and we have to recheck the state after socket locked. 1133 */ 1134 st = sk->sk_state; 1135 1136 switch (st) { 1137 case TCP_CLOSE: 1138 /* This is ok... continue with connect */ 1139 break; 1140 case TCP_ESTABLISHED: 1141 /* Socket is already connected */ 1142 err = -EISCONN; 1143 goto out_unlock; 1144 default: 1145 err = -EINVAL; 1146 goto out_unlock; 1147 } 1148 1149 unix_state_lock_nested(sk); 1150 1151 if (sk->sk_state != st) { 1152 unix_state_unlock(sk); 1153 unix_state_unlock(other); 1154 sock_put(other); 1155 goto restart; 1156 } 1157 1158 err = security_unix_stream_connect(sk, other, newsk); 1159 if (err) { 1160 unix_state_unlock(sk); 1161 goto out_unlock; 1162 } 1163 1164 /* The way is open! Fastly set all the necessary fields... */ 1165 1166 sock_hold(sk); 1167 unix_peer(newsk) = sk; 1168 newsk->sk_state = TCP_ESTABLISHED; 1169 newsk->sk_type = sk->sk_type; 1170 init_peercred(newsk); 1171 newu = unix_sk(newsk); 1172 RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq); 1173 otheru = unix_sk(other); 1174 1175 /* copy address information from listening to new sock*/ 1176 if (otheru->addr) { 1177 atomic_inc(&otheru->addr->refcnt); 1178 newu->addr = otheru->addr; 1179 } 1180 if (otheru->dentry) { 1181 newu->dentry = dget(otheru->dentry); 1182 newu->mnt = mntget(otheru->mnt); 1183 } 1184 1185 /* Set credentials */ 1186 copy_peercred(sk, other); 1187 1188 sock->state = SS_CONNECTED; 1189 sk->sk_state = TCP_ESTABLISHED; 1190 sock_hold(newsk); 1191 1192 smp_mb__after_atomic_inc(); /* sock_hold() does an atomic_inc() */ 1193 unix_peer(sk) = newsk; 1194 1195 unix_state_unlock(sk); 1196 1197 /* take ten and and send info to listening sock */ 1198 spin_lock(&other->sk_receive_queue.lock); 1199 __skb_queue_tail(&other->sk_receive_queue, skb); 1200 spin_unlock(&other->sk_receive_queue.lock); 1201 unix_state_unlock(other); 1202 other->sk_data_ready(other, 0); 1203 sock_put(other); 1204 return 0; 1205 1206 out_unlock: 1207 if (other) 1208 unix_state_unlock(other); 1209 1210 out: 1211 kfree_skb(skb); 1212 if (newsk) 1213 unix_release_sock(newsk, 0); 1214 if (other) 1215 sock_put(other); 1216 return err; 1217 } 1218 1219 static int unix_socketpair(struct socket *socka, struct socket *sockb) 1220 { 1221 struct sock *ska = socka->sk, *skb = sockb->sk; 1222 1223 /* Join our sockets back to back */ 1224 sock_hold(ska); 1225 sock_hold(skb); 1226 unix_peer(ska) = skb; 1227 unix_peer(skb) = ska; 1228 init_peercred(ska); 1229 init_peercred(skb); 1230 1231 if (ska->sk_type != SOCK_DGRAM) { 1232 ska->sk_state = TCP_ESTABLISHED; 1233 skb->sk_state = TCP_ESTABLISHED; 1234 socka->state = SS_CONNECTED; 1235 sockb->state = SS_CONNECTED; 1236 } 1237 return 0; 1238 } 1239 1240 static int unix_accept(struct socket *sock, struct socket *newsock, int flags) 1241 { 1242 struct sock *sk = sock->sk; 1243 struct sock *tsk; 1244 struct sk_buff *skb; 1245 int err; 1246 1247 err = -EOPNOTSUPP; 1248 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET) 1249 goto out; 1250 1251 err = -EINVAL; 1252 if (sk->sk_state != TCP_LISTEN) 1253 goto out; 1254 1255 /* If socket state is TCP_LISTEN it cannot change (for now...), 1256 * so that no locks are necessary. 1257 */ 1258 1259 skb = skb_recv_datagram(sk, 0, flags&O_NONBLOCK, &err); 1260 if (!skb) { 1261 /* This means receive shutdown. */ 1262 if (err == 0) 1263 err = -EINVAL; 1264 goto out; 1265 } 1266 1267 tsk = skb->sk; 1268 skb_free_datagram(sk, skb); 1269 wake_up_interruptible(&unix_sk(sk)->peer_wait); 1270 1271 /* attach accepted sock to socket */ 1272 unix_state_lock(tsk); 1273 newsock->state = SS_CONNECTED; 1274 sock_graft(tsk, newsock); 1275 unix_state_unlock(tsk); 1276 return 0; 1277 1278 out: 1279 return err; 1280 } 1281 1282 1283 static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer) 1284 { 1285 struct sock *sk = sock->sk; 1286 struct unix_sock *u; 1287 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, uaddr); 1288 int err = 0; 1289 1290 if (peer) { 1291 sk = unix_peer_get(sk); 1292 1293 err = -ENOTCONN; 1294 if (!sk) 1295 goto out; 1296 err = 0; 1297 } else { 1298 sock_hold(sk); 1299 } 1300 1301 u = unix_sk(sk); 1302 unix_state_lock(sk); 1303 if (!u->addr) { 1304 sunaddr->sun_family = AF_UNIX; 1305 sunaddr->sun_path[0] = 0; 1306 *uaddr_len = sizeof(short); 1307 } else { 1308 struct unix_address *addr = u->addr; 1309 1310 *uaddr_len = addr->len; 1311 memcpy(sunaddr, addr->name, *uaddr_len); 1312 } 1313 unix_state_unlock(sk); 1314 sock_put(sk); 1315 out: 1316 return err; 1317 } 1318 1319 static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb) 1320 { 1321 int i; 1322 1323 scm->fp = UNIXCB(skb).fp; 1324 UNIXCB(skb).fp = NULL; 1325 1326 for (i = scm->fp->count-1; i >= 0; i--) 1327 unix_notinflight(scm->fp->fp[i]); 1328 } 1329 1330 static void unix_destruct_scm(struct sk_buff *skb) 1331 { 1332 struct scm_cookie scm; 1333 memset(&scm, 0, sizeof(scm)); 1334 scm.pid = UNIXCB(skb).pid; 1335 scm.cred = UNIXCB(skb).cred; 1336 if (UNIXCB(skb).fp) 1337 unix_detach_fds(&scm, skb); 1338 1339 /* Alas, it calls VFS */ 1340 /* So fscking what? fput() had been SMP-safe since the last Summer */ 1341 scm_destroy(&scm); 1342 sock_wfree(skb); 1343 } 1344 1345 #define MAX_RECURSION_LEVEL 4 1346 1347 static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb) 1348 { 1349 int i; 1350 unsigned char max_level = 0; 1351 int unix_sock_count = 0; 1352 1353 for (i = scm->fp->count - 1; i >= 0; i--) { 1354 struct sock *sk = unix_get_socket(scm->fp->fp[i]); 1355 1356 if (sk) { 1357 unix_sock_count++; 1358 max_level = max(max_level, 1359 unix_sk(sk)->recursion_level); 1360 } 1361 } 1362 if (unlikely(max_level > MAX_RECURSION_LEVEL)) 1363 return -ETOOMANYREFS; 1364 1365 /* 1366 * Need to duplicate file references for the sake of garbage 1367 * collection. Otherwise a socket in the fps might become a 1368 * candidate for GC while the skb is not yet queued. 1369 */ 1370 UNIXCB(skb).fp = scm_fp_dup(scm->fp); 1371 if (!UNIXCB(skb).fp) 1372 return -ENOMEM; 1373 1374 if (unix_sock_count) { 1375 for (i = scm->fp->count - 1; i >= 0; i--) 1376 unix_inflight(scm->fp->fp[i]); 1377 } 1378 return max_level; 1379 } 1380 1381 static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds) 1382 { 1383 int err = 0; 1384 UNIXCB(skb).pid = get_pid(scm->pid); 1385 UNIXCB(skb).cred = get_cred(scm->cred); 1386 UNIXCB(skb).fp = NULL; 1387 if (scm->fp && send_fds) 1388 err = unix_attach_fds(scm, skb); 1389 1390 skb->destructor = unix_destruct_scm; 1391 return err; 1392 } 1393 1394 /* 1395 * Send AF_UNIX data. 1396 */ 1397 1398 static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock, 1399 struct msghdr *msg, size_t len) 1400 { 1401 struct sock_iocb *siocb = kiocb_to_siocb(kiocb); 1402 struct sock *sk = sock->sk; 1403 struct net *net = sock_net(sk); 1404 struct unix_sock *u = unix_sk(sk); 1405 struct sockaddr_un *sunaddr = msg->msg_name; 1406 struct sock *other = NULL; 1407 int namelen = 0; /* fake GCC */ 1408 int err; 1409 unsigned hash; 1410 struct sk_buff *skb; 1411 long timeo; 1412 struct scm_cookie tmp_scm; 1413 int max_level; 1414 1415 if (NULL == siocb->scm) 1416 siocb->scm = &tmp_scm; 1417 wait_for_unix_gc(); 1418 err = scm_send(sock, msg, siocb->scm); 1419 if (err < 0) 1420 return err; 1421 1422 err = -EOPNOTSUPP; 1423 if (msg->msg_flags&MSG_OOB) 1424 goto out; 1425 1426 if (msg->msg_namelen) { 1427 err = unix_mkname(sunaddr, msg->msg_namelen, &hash); 1428 if (err < 0) 1429 goto out; 1430 namelen = err; 1431 } else { 1432 sunaddr = NULL; 1433 err = -ENOTCONN; 1434 other = unix_peer_get(sk); 1435 if (!other) 1436 goto out; 1437 } 1438 1439 if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr 1440 && (err = unix_autobind(sock)) != 0) 1441 goto out; 1442 1443 err = -EMSGSIZE; 1444 if (len > sk->sk_sndbuf - 32) 1445 goto out; 1446 1447 skb = sock_alloc_send_skb(sk, len, msg->msg_flags&MSG_DONTWAIT, &err); 1448 if (skb == NULL) 1449 goto out; 1450 1451 err = unix_scm_to_skb(siocb->scm, skb, true); 1452 if (err < 0) 1453 goto out_free; 1454 max_level = err + 1; 1455 unix_get_secdata(siocb->scm, skb); 1456 1457 skb_reset_transport_header(skb); 1458 err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len); 1459 if (err) 1460 goto out_free; 1461 1462 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); 1463 1464 restart: 1465 if (!other) { 1466 err = -ECONNRESET; 1467 if (sunaddr == NULL) 1468 goto out_free; 1469 1470 other = unix_find_other(net, sunaddr, namelen, sk->sk_type, 1471 hash, &err); 1472 if (other == NULL) 1473 goto out_free; 1474 } 1475 1476 if (sk_filter(other, skb) < 0) { 1477 /* Toss the packet but do not return any error to the sender */ 1478 err = len; 1479 goto out_free; 1480 } 1481 1482 unix_state_lock(other); 1483 err = -EPERM; 1484 if (!unix_may_send(sk, other)) 1485 goto out_unlock; 1486 1487 if (sock_flag(other, SOCK_DEAD)) { 1488 /* 1489 * Check with 1003.1g - what should 1490 * datagram error 1491 */ 1492 unix_state_unlock(other); 1493 sock_put(other); 1494 1495 err = 0; 1496 unix_state_lock(sk); 1497 if (unix_peer(sk) == other) { 1498 unix_peer(sk) = NULL; 1499 unix_state_unlock(sk); 1500 1501 unix_dgram_disconnected(sk, other); 1502 sock_put(other); 1503 err = -ECONNREFUSED; 1504 } else { 1505 unix_state_unlock(sk); 1506 } 1507 1508 other = NULL; 1509 if (err) 1510 goto out_free; 1511 goto restart; 1512 } 1513 1514 err = -EPIPE; 1515 if (other->sk_shutdown & RCV_SHUTDOWN) 1516 goto out_unlock; 1517 1518 if (sk->sk_type != SOCK_SEQPACKET) { 1519 err = security_unix_may_send(sk->sk_socket, other->sk_socket); 1520 if (err) 1521 goto out_unlock; 1522 } 1523 1524 if (unix_peer(other) != sk && unix_recvq_full(other)) { 1525 if (!timeo) { 1526 err = -EAGAIN; 1527 goto out_unlock; 1528 } 1529 1530 timeo = unix_wait_for_peer(other, timeo); 1531 1532 err = sock_intr_errno(timeo); 1533 if (signal_pending(current)) 1534 goto out_free; 1535 1536 goto restart; 1537 } 1538 1539 if (sock_flag(other, SOCK_RCVTSTAMP)) 1540 __net_timestamp(skb); 1541 skb_queue_tail(&other->sk_receive_queue, skb); 1542 if (max_level > unix_sk(other)->recursion_level) 1543 unix_sk(other)->recursion_level = max_level; 1544 unix_state_unlock(other); 1545 other->sk_data_ready(other, len); 1546 sock_put(other); 1547 scm_destroy(siocb->scm); 1548 return len; 1549 1550 out_unlock: 1551 unix_state_unlock(other); 1552 out_free: 1553 kfree_skb(skb); 1554 out: 1555 if (other) 1556 sock_put(other); 1557 scm_destroy(siocb->scm); 1558 return err; 1559 } 1560 1561 1562 static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock, 1563 struct msghdr *msg, size_t len) 1564 { 1565 struct sock_iocb *siocb = kiocb_to_siocb(kiocb); 1566 struct sock *sk = sock->sk; 1567 struct sock *other = NULL; 1568 int err, size; 1569 struct sk_buff *skb; 1570 int sent = 0; 1571 struct scm_cookie tmp_scm; 1572 bool fds_sent = false; 1573 int max_level; 1574 1575 if (NULL == siocb->scm) 1576 siocb->scm = &tmp_scm; 1577 wait_for_unix_gc(); 1578 err = scm_send(sock, msg, siocb->scm); 1579 if (err < 0) 1580 return err; 1581 1582 err = -EOPNOTSUPP; 1583 if (msg->msg_flags&MSG_OOB) 1584 goto out_err; 1585 1586 if (msg->msg_namelen) { 1587 err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP; 1588 goto out_err; 1589 } else { 1590 err = -ENOTCONN; 1591 other = unix_peer(sk); 1592 if (!other) 1593 goto out_err; 1594 } 1595 1596 if (sk->sk_shutdown & SEND_SHUTDOWN) 1597 goto pipe_err; 1598 1599 while (sent < len) { 1600 /* 1601 * Optimisation for the fact that under 0.01% of X 1602 * messages typically need breaking up. 1603 */ 1604 1605 size = len-sent; 1606 1607 /* Keep two messages in the pipe so it schedules better */ 1608 if (size > ((sk->sk_sndbuf >> 1) - 64)) 1609 size = (sk->sk_sndbuf >> 1) - 64; 1610 1611 if (size > SKB_MAX_ALLOC) 1612 size = SKB_MAX_ALLOC; 1613 1614 /* 1615 * Grab a buffer 1616 */ 1617 1618 skb = sock_alloc_send_skb(sk, size, msg->msg_flags&MSG_DONTWAIT, 1619 &err); 1620 1621 if (skb == NULL) 1622 goto out_err; 1623 1624 /* 1625 * If you pass two values to the sock_alloc_send_skb 1626 * it tries to grab the large buffer with GFP_NOFS 1627 * (which can fail easily), and if it fails grab the 1628 * fallback size buffer which is under a page and will 1629 * succeed. [Alan] 1630 */ 1631 size = min_t(int, size, skb_tailroom(skb)); 1632 1633 1634 /* Only send the fds in the first buffer */ 1635 err = unix_scm_to_skb(siocb->scm, skb, !fds_sent); 1636 if (err < 0) { 1637 kfree_skb(skb); 1638 goto out_err; 1639 } 1640 max_level = err + 1; 1641 fds_sent = true; 1642 1643 err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size); 1644 if (err) { 1645 kfree_skb(skb); 1646 goto out_err; 1647 } 1648 1649 unix_state_lock(other); 1650 1651 if (sock_flag(other, SOCK_DEAD) || 1652 (other->sk_shutdown & RCV_SHUTDOWN)) 1653 goto pipe_err_free; 1654 1655 skb_queue_tail(&other->sk_receive_queue, skb); 1656 if (max_level > unix_sk(other)->recursion_level) 1657 unix_sk(other)->recursion_level = max_level; 1658 unix_state_unlock(other); 1659 other->sk_data_ready(other, size); 1660 sent += size; 1661 } 1662 1663 scm_destroy(siocb->scm); 1664 siocb->scm = NULL; 1665 1666 return sent; 1667 1668 pipe_err_free: 1669 unix_state_unlock(other); 1670 kfree_skb(skb); 1671 pipe_err: 1672 if (sent == 0 && !(msg->msg_flags&MSG_NOSIGNAL)) 1673 send_sig(SIGPIPE, current, 0); 1674 err = -EPIPE; 1675 out_err: 1676 scm_destroy(siocb->scm); 1677 siocb->scm = NULL; 1678 return sent ? : err; 1679 } 1680 1681 static int unix_seqpacket_sendmsg(struct kiocb *kiocb, struct socket *sock, 1682 struct msghdr *msg, size_t len) 1683 { 1684 int err; 1685 struct sock *sk = sock->sk; 1686 1687 err = sock_error(sk); 1688 if (err) 1689 return err; 1690 1691 if (sk->sk_state != TCP_ESTABLISHED) 1692 return -ENOTCONN; 1693 1694 if (msg->msg_namelen) 1695 msg->msg_namelen = 0; 1696 1697 return unix_dgram_sendmsg(kiocb, sock, msg, len); 1698 } 1699 1700 static int unix_seqpacket_recvmsg(struct kiocb *iocb, struct socket *sock, 1701 struct msghdr *msg, size_t size, 1702 int flags) 1703 { 1704 struct sock *sk = sock->sk; 1705 1706 if (sk->sk_state != TCP_ESTABLISHED) 1707 return -ENOTCONN; 1708 1709 return unix_dgram_recvmsg(iocb, sock, msg, size, flags); 1710 } 1711 1712 static void unix_copy_addr(struct msghdr *msg, struct sock *sk) 1713 { 1714 struct unix_sock *u = unix_sk(sk); 1715 1716 msg->msg_namelen = 0; 1717 if (u->addr) { 1718 msg->msg_namelen = u->addr->len; 1719 memcpy(msg->msg_name, u->addr->name, u->addr->len); 1720 } 1721 } 1722 1723 static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock, 1724 struct msghdr *msg, size_t size, 1725 int flags) 1726 { 1727 struct sock_iocb *siocb = kiocb_to_siocb(iocb); 1728 struct scm_cookie tmp_scm; 1729 struct sock *sk = sock->sk; 1730 struct unix_sock *u = unix_sk(sk); 1731 int noblock = flags & MSG_DONTWAIT; 1732 struct sk_buff *skb; 1733 int err; 1734 1735 err = -EOPNOTSUPP; 1736 if (flags&MSG_OOB) 1737 goto out; 1738 1739 msg->msg_namelen = 0; 1740 1741 err = mutex_lock_interruptible(&u->readlock); 1742 if (err) { 1743 err = sock_intr_errno(sock_rcvtimeo(sk, noblock)); 1744 goto out; 1745 } 1746 1747 skb = skb_recv_datagram(sk, flags, noblock, &err); 1748 if (!skb) { 1749 unix_state_lock(sk); 1750 /* Signal EOF on disconnected non-blocking SEQPACKET socket. */ 1751 if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN && 1752 (sk->sk_shutdown & RCV_SHUTDOWN)) 1753 err = 0; 1754 unix_state_unlock(sk); 1755 goto out_unlock; 1756 } 1757 1758 wake_up_interruptible_sync_poll(&u->peer_wait, 1759 POLLOUT | POLLWRNORM | POLLWRBAND); 1760 1761 if (msg->msg_name) 1762 unix_copy_addr(msg, skb->sk); 1763 1764 if (size > skb->len) 1765 size = skb->len; 1766 else if (size < skb->len) 1767 msg->msg_flags |= MSG_TRUNC; 1768 1769 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, size); 1770 if (err) 1771 goto out_free; 1772 1773 if (sock_flag(sk, SOCK_RCVTSTAMP)) 1774 __sock_recv_timestamp(msg, sk, skb); 1775 1776 if (!siocb->scm) { 1777 siocb->scm = &tmp_scm; 1778 memset(&tmp_scm, 0, sizeof(tmp_scm)); 1779 } 1780 scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).cred); 1781 unix_set_secdata(siocb->scm, skb); 1782 1783 if (!(flags & MSG_PEEK)) { 1784 if (UNIXCB(skb).fp) 1785 unix_detach_fds(siocb->scm, skb); 1786 } else { 1787 /* It is questionable: on PEEK we could: 1788 - do not return fds - good, but too simple 8) 1789 - return fds, and do not return them on read (old strategy, 1790 apparently wrong) 1791 - clone fds (I chose it for now, it is the most universal 1792 solution) 1793 1794 POSIX 1003.1g does not actually define this clearly 1795 at all. POSIX 1003.1g doesn't define a lot of things 1796 clearly however! 1797 1798 */ 1799 if (UNIXCB(skb).fp) 1800 siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp); 1801 } 1802 err = size; 1803 1804 scm_recv(sock, msg, siocb->scm, flags); 1805 1806 out_free: 1807 skb_free_datagram(sk, skb); 1808 out_unlock: 1809 mutex_unlock(&u->readlock); 1810 out: 1811 return err; 1812 } 1813 1814 /* 1815 * Sleep until data has arrive. But check for races.. 1816 */ 1817 1818 static long unix_stream_data_wait(struct sock *sk, long timeo) 1819 { 1820 DEFINE_WAIT(wait); 1821 1822 unix_state_lock(sk); 1823 1824 for (;;) { 1825 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 1826 1827 if (!skb_queue_empty(&sk->sk_receive_queue) || 1828 sk->sk_err || 1829 (sk->sk_shutdown & RCV_SHUTDOWN) || 1830 signal_pending(current) || 1831 !timeo) 1832 break; 1833 1834 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 1835 unix_state_unlock(sk); 1836 timeo = schedule_timeout(timeo); 1837 unix_state_lock(sk); 1838 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 1839 } 1840 1841 finish_wait(sk_sleep(sk), &wait); 1842 unix_state_unlock(sk); 1843 return timeo; 1844 } 1845 1846 1847 1848 static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock, 1849 struct msghdr *msg, size_t size, 1850 int flags) 1851 { 1852 struct sock_iocb *siocb = kiocb_to_siocb(iocb); 1853 struct scm_cookie tmp_scm; 1854 struct sock *sk = sock->sk; 1855 struct unix_sock *u = unix_sk(sk); 1856 struct sockaddr_un *sunaddr = msg->msg_name; 1857 int copied = 0; 1858 int check_creds = 0; 1859 int target; 1860 int err = 0; 1861 long timeo; 1862 1863 err = -EINVAL; 1864 if (sk->sk_state != TCP_ESTABLISHED) 1865 goto out; 1866 1867 err = -EOPNOTSUPP; 1868 if (flags&MSG_OOB) 1869 goto out; 1870 1871 target = sock_rcvlowat(sk, flags&MSG_WAITALL, size); 1872 timeo = sock_rcvtimeo(sk, flags&MSG_DONTWAIT); 1873 1874 msg->msg_namelen = 0; 1875 1876 /* Lock the socket to prevent queue disordering 1877 * while sleeps in memcpy_tomsg 1878 */ 1879 1880 if (!siocb->scm) { 1881 siocb->scm = &tmp_scm; 1882 memset(&tmp_scm, 0, sizeof(tmp_scm)); 1883 } 1884 1885 err = mutex_lock_interruptible(&u->readlock); 1886 if (err) { 1887 err = sock_intr_errno(timeo); 1888 goto out; 1889 } 1890 1891 do { 1892 int chunk; 1893 struct sk_buff *skb; 1894 1895 unix_state_lock(sk); 1896 skb = skb_dequeue(&sk->sk_receive_queue); 1897 if (skb == NULL) { 1898 unix_sk(sk)->recursion_level = 0; 1899 if (copied >= target) 1900 goto unlock; 1901 1902 /* 1903 * POSIX 1003.1g mandates this order. 1904 */ 1905 1906 err = sock_error(sk); 1907 if (err) 1908 goto unlock; 1909 if (sk->sk_shutdown & RCV_SHUTDOWN) 1910 goto unlock; 1911 1912 unix_state_unlock(sk); 1913 err = -EAGAIN; 1914 if (!timeo) 1915 break; 1916 mutex_unlock(&u->readlock); 1917 1918 timeo = unix_stream_data_wait(sk, timeo); 1919 1920 if (signal_pending(current) 1921 || mutex_lock_interruptible(&u->readlock)) { 1922 err = sock_intr_errno(timeo); 1923 goto out; 1924 } 1925 1926 continue; 1927 unlock: 1928 unix_state_unlock(sk); 1929 break; 1930 } 1931 unix_state_unlock(sk); 1932 1933 if (check_creds) { 1934 /* Never glue messages from different writers */ 1935 if ((UNIXCB(skb).pid != siocb->scm->pid) || 1936 (UNIXCB(skb).cred != siocb->scm->cred)) { 1937 skb_queue_head(&sk->sk_receive_queue, skb); 1938 break; 1939 } 1940 } else { 1941 /* Copy credentials */ 1942 scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).cred); 1943 check_creds = 1; 1944 } 1945 1946 /* Copy address just once */ 1947 if (sunaddr) { 1948 unix_copy_addr(msg, skb->sk); 1949 sunaddr = NULL; 1950 } 1951 1952 chunk = min_t(unsigned int, skb->len, size); 1953 if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) { 1954 skb_queue_head(&sk->sk_receive_queue, skb); 1955 if (copied == 0) 1956 copied = -EFAULT; 1957 break; 1958 } 1959 copied += chunk; 1960 size -= chunk; 1961 1962 /* Mark read part of skb as used */ 1963 if (!(flags & MSG_PEEK)) { 1964 skb_pull(skb, chunk); 1965 1966 if (UNIXCB(skb).fp) 1967 unix_detach_fds(siocb->scm, skb); 1968 1969 /* put the skb back if we didn't use it up.. */ 1970 if (skb->len) { 1971 skb_queue_head(&sk->sk_receive_queue, skb); 1972 break; 1973 } 1974 1975 consume_skb(skb); 1976 1977 if (siocb->scm->fp) 1978 break; 1979 } else { 1980 /* It is questionable, see note in unix_dgram_recvmsg. 1981 */ 1982 if (UNIXCB(skb).fp) 1983 siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp); 1984 1985 /* put message back and return */ 1986 skb_queue_head(&sk->sk_receive_queue, skb); 1987 break; 1988 } 1989 } while (size); 1990 1991 mutex_unlock(&u->readlock); 1992 scm_recv(sock, msg, siocb->scm, flags); 1993 out: 1994 return copied ? : err; 1995 } 1996 1997 static int unix_shutdown(struct socket *sock, int mode) 1998 { 1999 struct sock *sk = sock->sk; 2000 struct sock *other; 2001 2002 mode = (mode+1)&(RCV_SHUTDOWN|SEND_SHUTDOWN); 2003 2004 if (!mode) 2005 return 0; 2006 2007 unix_state_lock(sk); 2008 sk->sk_shutdown |= mode; 2009 other = unix_peer(sk); 2010 if (other) 2011 sock_hold(other); 2012 unix_state_unlock(sk); 2013 sk->sk_state_change(sk); 2014 2015 if (other && 2016 (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) { 2017 2018 int peer_mode = 0; 2019 2020 if (mode&RCV_SHUTDOWN) 2021 peer_mode |= SEND_SHUTDOWN; 2022 if (mode&SEND_SHUTDOWN) 2023 peer_mode |= RCV_SHUTDOWN; 2024 unix_state_lock(other); 2025 other->sk_shutdown |= peer_mode; 2026 unix_state_unlock(other); 2027 other->sk_state_change(other); 2028 if (peer_mode == SHUTDOWN_MASK) 2029 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP); 2030 else if (peer_mode & RCV_SHUTDOWN) 2031 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN); 2032 } 2033 if (other) 2034 sock_put(other); 2035 2036 return 0; 2037 } 2038 2039 static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) 2040 { 2041 struct sock *sk = sock->sk; 2042 long amount = 0; 2043 int err; 2044 2045 switch (cmd) { 2046 case SIOCOUTQ: 2047 amount = sk_wmem_alloc_get(sk); 2048 err = put_user(amount, (int __user *)arg); 2049 break; 2050 case SIOCINQ: 2051 { 2052 struct sk_buff *skb; 2053 2054 if (sk->sk_state == TCP_LISTEN) { 2055 err = -EINVAL; 2056 break; 2057 } 2058 2059 spin_lock(&sk->sk_receive_queue.lock); 2060 if (sk->sk_type == SOCK_STREAM || 2061 sk->sk_type == SOCK_SEQPACKET) { 2062 skb_queue_walk(&sk->sk_receive_queue, skb) 2063 amount += skb->len; 2064 } else { 2065 skb = skb_peek(&sk->sk_receive_queue); 2066 if (skb) 2067 amount = skb->len; 2068 } 2069 spin_unlock(&sk->sk_receive_queue.lock); 2070 err = put_user(amount, (int __user *)arg); 2071 break; 2072 } 2073 2074 default: 2075 err = -ENOIOCTLCMD; 2076 break; 2077 } 2078 return err; 2079 } 2080 2081 static unsigned int unix_poll(struct file *file, struct socket *sock, poll_table *wait) 2082 { 2083 struct sock *sk = sock->sk; 2084 unsigned int mask; 2085 2086 sock_poll_wait(file, sk_sleep(sk), wait); 2087 mask = 0; 2088 2089 /* exceptional events? */ 2090 if (sk->sk_err) 2091 mask |= POLLERR; 2092 if (sk->sk_shutdown == SHUTDOWN_MASK) 2093 mask |= POLLHUP; 2094 if (sk->sk_shutdown & RCV_SHUTDOWN) 2095 mask |= POLLRDHUP | POLLIN | POLLRDNORM; 2096 2097 /* readable? */ 2098 if (!skb_queue_empty(&sk->sk_receive_queue)) 2099 mask |= POLLIN | POLLRDNORM; 2100 2101 /* Connection-based need to check for termination and startup */ 2102 if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) && 2103 sk->sk_state == TCP_CLOSE) 2104 mask |= POLLHUP; 2105 2106 /* 2107 * we set writable also when the other side has shut down the 2108 * connection. This prevents stuck sockets. 2109 */ 2110 if (unix_writable(sk)) 2111 mask |= POLLOUT | POLLWRNORM | POLLWRBAND; 2112 2113 return mask; 2114 } 2115 2116 static unsigned int unix_dgram_poll(struct file *file, struct socket *sock, 2117 poll_table *wait) 2118 { 2119 struct sock *sk = sock->sk, *other; 2120 unsigned int mask, writable; 2121 2122 sock_poll_wait(file, sk_sleep(sk), wait); 2123 mask = 0; 2124 2125 /* exceptional events? */ 2126 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) 2127 mask |= POLLERR; 2128 if (sk->sk_shutdown & RCV_SHUTDOWN) 2129 mask |= POLLRDHUP | POLLIN | POLLRDNORM; 2130 if (sk->sk_shutdown == SHUTDOWN_MASK) 2131 mask |= POLLHUP; 2132 2133 /* readable? */ 2134 if (!skb_queue_empty(&sk->sk_receive_queue)) 2135 mask |= POLLIN | POLLRDNORM; 2136 2137 /* Connection-based need to check for termination and startup */ 2138 if (sk->sk_type == SOCK_SEQPACKET) { 2139 if (sk->sk_state == TCP_CLOSE) 2140 mask |= POLLHUP; 2141 /* connection hasn't started yet? */ 2142 if (sk->sk_state == TCP_SYN_SENT) 2143 return mask; 2144 } 2145 2146 /* No write status requested, avoid expensive OUT tests. */ 2147 if (wait && !(wait->key & (POLLWRBAND | POLLWRNORM | POLLOUT))) 2148 return mask; 2149 2150 writable = unix_writable(sk); 2151 other = unix_peer_get(sk); 2152 if (other) { 2153 if (unix_peer(other) != sk) { 2154 sock_poll_wait(file, &unix_sk(other)->peer_wait, wait); 2155 if (unix_recvq_full(other)) 2156 writable = 0; 2157 } 2158 sock_put(other); 2159 } 2160 2161 if (writable) 2162 mask |= POLLOUT | POLLWRNORM | POLLWRBAND; 2163 else 2164 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 2165 2166 return mask; 2167 } 2168 2169 #ifdef CONFIG_PROC_FS 2170 static struct sock *first_unix_socket(int *i) 2171 { 2172 for (*i = 0; *i <= UNIX_HASH_SIZE; (*i)++) { 2173 if (!hlist_empty(&unix_socket_table[*i])) 2174 return __sk_head(&unix_socket_table[*i]); 2175 } 2176 return NULL; 2177 } 2178 2179 static struct sock *next_unix_socket(int *i, struct sock *s) 2180 { 2181 struct sock *next = sk_next(s); 2182 /* More in this chain? */ 2183 if (next) 2184 return next; 2185 /* Look for next non-empty chain. */ 2186 for ((*i)++; *i <= UNIX_HASH_SIZE; (*i)++) { 2187 if (!hlist_empty(&unix_socket_table[*i])) 2188 return __sk_head(&unix_socket_table[*i]); 2189 } 2190 return NULL; 2191 } 2192 2193 struct unix_iter_state { 2194 struct seq_net_private p; 2195 int i; 2196 }; 2197 2198 static struct sock *unix_seq_idx(struct seq_file *seq, loff_t pos) 2199 { 2200 struct unix_iter_state *iter = seq->private; 2201 loff_t off = 0; 2202 struct sock *s; 2203 2204 for (s = first_unix_socket(&iter->i); s; s = next_unix_socket(&iter->i, s)) { 2205 if (sock_net(s) != seq_file_net(seq)) 2206 continue; 2207 if (off == pos) 2208 return s; 2209 ++off; 2210 } 2211 return NULL; 2212 } 2213 2214 static void *unix_seq_start(struct seq_file *seq, loff_t *pos) 2215 __acquires(unix_table_lock) 2216 { 2217 spin_lock(&unix_table_lock); 2218 return *pos ? unix_seq_idx(seq, *pos - 1) : SEQ_START_TOKEN; 2219 } 2220 2221 static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos) 2222 { 2223 struct unix_iter_state *iter = seq->private; 2224 struct sock *sk = v; 2225 ++*pos; 2226 2227 if (v == SEQ_START_TOKEN) 2228 sk = first_unix_socket(&iter->i); 2229 else 2230 sk = next_unix_socket(&iter->i, sk); 2231 while (sk && (sock_net(sk) != seq_file_net(seq))) 2232 sk = next_unix_socket(&iter->i, sk); 2233 return sk; 2234 } 2235 2236 static void unix_seq_stop(struct seq_file *seq, void *v) 2237 __releases(unix_table_lock) 2238 { 2239 spin_unlock(&unix_table_lock); 2240 } 2241 2242 static int unix_seq_show(struct seq_file *seq, void *v) 2243 { 2244 2245 if (v == SEQ_START_TOKEN) 2246 seq_puts(seq, "Num RefCount Protocol Flags Type St " 2247 "Inode Path\n"); 2248 else { 2249 struct sock *s = v; 2250 struct unix_sock *u = unix_sk(s); 2251 unix_state_lock(s); 2252 2253 seq_printf(seq, "%pK: %08X %08X %08X %04X %02X %5lu", 2254 s, 2255 atomic_read(&s->sk_refcnt), 2256 0, 2257 s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0, 2258 s->sk_type, 2259 s->sk_socket ? 2260 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTED : SS_UNCONNECTED) : 2261 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING), 2262 sock_i_ino(s)); 2263 2264 if (u->addr) { 2265 int i, len; 2266 seq_putc(seq, ' '); 2267 2268 i = 0; 2269 len = u->addr->len - sizeof(short); 2270 if (!UNIX_ABSTRACT(s)) 2271 len--; 2272 else { 2273 seq_putc(seq, '@'); 2274 i++; 2275 } 2276 for ( ; i < len; i++) 2277 seq_putc(seq, u->addr->name->sun_path[i]); 2278 } 2279 unix_state_unlock(s); 2280 seq_putc(seq, '\n'); 2281 } 2282 2283 return 0; 2284 } 2285 2286 static const struct seq_operations unix_seq_ops = { 2287 .start = unix_seq_start, 2288 .next = unix_seq_next, 2289 .stop = unix_seq_stop, 2290 .show = unix_seq_show, 2291 }; 2292 2293 static int unix_seq_open(struct inode *inode, struct file *file) 2294 { 2295 return seq_open_net(inode, file, &unix_seq_ops, 2296 sizeof(struct unix_iter_state)); 2297 } 2298 2299 static const struct file_operations unix_seq_fops = { 2300 .owner = THIS_MODULE, 2301 .open = unix_seq_open, 2302 .read = seq_read, 2303 .llseek = seq_lseek, 2304 .release = seq_release_net, 2305 }; 2306 2307 #endif 2308 2309 static const struct net_proto_family unix_family_ops = { 2310 .family = PF_UNIX, 2311 .create = unix_create, 2312 .owner = THIS_MODULE, 2313 }; 2314 2315 2316 static int __net_init unix_net_init(struct net *net) 2317 { 2318 int error = -ENOMEM; 2319 2320 net->unx.sysctl_max_dgram_qlen = 10; 2321 if (unix_sysctl_register(net)) 2322 goto out; 2323 2324 #ifdef CONFIG_PROC_FS 2325 if (!proc_net_fops_create(net, "unix", 0, &unix_seq_fops)) { 2326 unix_sysctl_unregister(net); 2327 goto out; 2328 } 2329 #endif 2330 error = 0; 2331 out: 2332 return error; 2333 } 2334 2335 static void __net_exit unix_net_exit(struct net *net) 2336 { 2337 unix_sysctl_unregister(net); 2338 proc_net_remove(net, "unix"); 2339 } 2340 2341 static struct pernet_operations unix_net_ops = { 2342 .init = unix_net_init, 2343 .exit = unix_net_exit, 2344 }; 2345 2346 static int __init af_unix_init(void) 2347 { 2348 int rc = -1; 2349 struct sk_buff *dummy_skb; 2350 2351 BUILD_BUG_ON(sizeof(struct unix_skb_parms) > sizeof(dummy_skb->cb)); 2352 2353 rc = proto_register(&unix_proto, 1); 2354 if (rc != 0) { 2355 printk(KERN_CRIT "%s: Cannot create unix_sock SLAB cache!\n", 2356 __func__); 2357 goto out; 2358 } 2359 2360 sock_register(&unix_family_ops); 2361 register_pernet_subsys(&unix_net_ops); 2362 out: 2363 return rc; 2364 } 2365 2366 static void __exit af_unix_exit(void) 2367 { 2368 sock_unregister(PF_UNIX); 2369 proto_unregister(&unix_proto); 2370 unregister_pernet_subsys(&unix_net_ops); 2371 } 2372 2373 /* Earlier than device_initcall() so that other drivers invoking 2374 request_module() don't end up in a loop when modprobe tries 2375 to use a UNIX socket. But later than subsys_initcall() because 2376 we depend on stuff initialised there */ 2377 fs_initcall(af_unix_init); 2378 module_exit(af_unix_exit); 2379 2380 MODULE_LICENSE("GPL"); 2381 MODULE_ALIAS_NETPROTO(PF_UNIX); 2382