1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * NET4: Implementation of BSD Unix domain sockets. 4 * 5 * Authors: Alan Cox, <alan@lxorguk.ukuu.org.uk> 6 * 7 * Fixes: 8 * Linus Torvalds : Assorted bug cures. 9 * Niibe Yutaka : async I/O support. 10 * Carsten Paeth : PF_UNIX check, address fixes. 11 * Alan Cox : Limit size of allocated blocks. 12 * Alan Cox : Fixed the stupid socketpair bug. 13 * Alan Cox : BSD compatibility fine tuning. 14 * Alan Cox : Fixed a bug in connect when interrupted. 15 * Alan Cox : Sorted out a proper draft version of 16 * file descriptor passing hacked up from 17 * Mike Shaver's work. 18 * Marty Leisner : Fixes to fd passing 19 * Nick Nevin : recvmsg bugfix. 20 * Alan Cox : Started proper garbage collector 21 * Heiko EiBfeldt : Missing verify_area check 22 * Alan Cox : Started POSIXisms 23 * Andreas Schwab : Replace inode by dentry for proper 24 * reference counting 25 * Kirk Petersen : Made this a module 26 * Christoph Rohland : Elegant non-blocking accept/connect algorithm. 27 * Lots of bug fixes. 28 * Alexey Kuznetosv : Repaired (I hope) bugs introduces 29 * by above two patches. 30 * Andrea Arcangeli : If possible we block in connect(2) 31 * if the max backlog of the listen socket 32 * is been reached. This won't break 33 * old apps and it will avoid huge amount 34 * of socks hashed (this for unix_gc() 35 * performances reasons). 36 * Security fix that limits the max 37 * number of socks to 2*max_files and 38 * the number of skb queueable in the 39 * dgram receiver. 40 * Artur Skawina : Hash function optimizations 41 * Alexey Kuznetsov : Full scale SMP. Lot of bugs are introduced 8) 42 * Malcolm Beattie : Set peercred for socketpair 43 * Michal Ostrowski : Module initialization cleanup. 44 * Arnaldo C. Melo : Remove MOD_{INC,DEC}_USE_COUNT, 45 * the core infrastructure is doing that 46 * for all net proto families now (2.5.69+) 47 * 48 * Known differences from reference BSD that was tested: 49 * 50 * [TO FIX] 51 * ECONNREFUSED is not returned from one end of a connected() socket to the 52 * other the moment one end closes. 53 * fstat() doesn't return st_dev=0, and give the blksize as high water mark 54 * and a fake inode identifier (nor the BSD first socket fstat twice bug). 55 * [NOT TO FIX] 56 * accept() returns a path name even if the connecting socket has closed 57 * in the meantime (BSD loses the path and gives up). 58 * accept() returns 0 length path for an unbound connector. BSD returns 16 59 * and a null first byte in the path (but not for gethost/peername - BSD bug ??) 60 * socketpair(...SOCK_RAW..) doesn't panic the kernel. 61 * BSD af_unix apparently has connect forgetting to block properly. 62 * (need to check this with the POSIX spec in detail) 63 * 64 * Differences from 2.0.0-11-... (ANK) 65 * Bug fixes and improvements. 66 * - client shutdown killed server socket. 67 * - removed all useless cli/sti pairs. 68 * 69 * Semantic changes/extensions. 70 * - generic control message passing. 71 * - SCM_CREDENTIALS control message. 72 * - "Abstract" (not FS based) socket bindings. 73 * Abstract names are sequences of bytes (not zero terminated) 74 * started by 0, so that this name space does not intersect 75 * with BSD names. 76 */ 77 78 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 79 80 #include <linux/bpf-cgroup.h> 81 #include <linux/btf_ids.h> 82 #include <linux/dcache.h> 83 #include <linux/errno.h> 84 #include <linux/fcntl.h> 85 #include <linux/file.h> 86 #include <linux/filter.h> 87 #include <linux/fs.h> 88 #include <linux/fs_struct.h> 89 #include <linux/init.h> 90 #include <linux/kernel.h> 91 #include <linux/mount.h> 92 #include <linux/namei.h> 93 #include <linux/net.h> 94 #include <linux/pidfs.h> 95 #include <linux/poll.h> 96 #include <linux/proc_fs.h> 97 #include <linux/sched/signal.h> 98 #include <linux/security.h> 99 #include <linux/seq_file.h> 100 #include <linux/skbuff.h> 101 #include <linux/slab.h> 102 #include <linux/socket.h> 103 #include <linux/splice.h> 104 #include <linux/string.h> 105 #include <linux/uaccess.h> 106 #include <net/af_unix.h> 107 #include <net/net_namespace.h> 108 #include <net/scm.h> 109 #include <net/tcp_states.h> 110 #include <uapi/linux/sockios.h> 111 #include <uapi/linux/termios.h> 112 113 #include "af_unix.h" 114 115 static atomic_long_t unix_nr_socks; 116 static struct hlist_head bsd_socket_buckets[UNIX_HASH_SIZE / 2]; 117 static spinlock_t bsd_socket_locks[UNIX_HASH_SIZE / 2]; 118 119 /* SMP locking strategy: 120 * hash table is protected with spinlock. 121 * each socket state is protected by separate spinlock. 122 */ 123 #ifdef CONFIG_PROVE_LOCKING 124 #define cmp_ptr(l, r) (((l) > (r)) - ((l) < (r))) 125 126 static int unix_table_lock_cmp_fn(const struct lockdep_map *a, 127 const struct lockdep_map *b) 128 { 129 return cmp_ptr(a, b); 130 } 131 132 static int unix_state_lock_cmp_fn(const struct lockdep_map *_a, 133 const struct lockdep_map *_b) 134 { 135 const struct unix_sock *a, *b; 136 137 a = container_of(_a, struct unix_sock, lock.dep_map); 138 b = container_of(_b, struct unix_sock, lock.dep_map); 139 140 if (a->sk.sk_state == TCP_LISTEN) { 141 /* unix_stream_connect(): Before the 2nd unix_state_lock(), 142 * 143 * 1. a is TCP_LISTEN. 144 * 2. b is not a. 145 * 3. concurrent connect(b -> a) must fail. 146 * 147 * Except for 2. & 3., the b's state can be any possible 148 * value due to concurrent connect() or listen(). 149 * 150 * 2. is detected in debug_spin_lock_before(), and 3. cannot 151 * be expressed as lock_cmp_fn. 152 */ 153 switch (b->sk.sk_state) { 154 case TCP_CLOSE: 155 case TCP_ESTABLISHED: 156 case TCP_LISTEN: 157 return -1; 158 default: 159 /* Invalid case. */ 160 return 0; 161 } 162 } 163 164 /* Should never happen. Just to be symmetric. */ 165 if (b->sk.sk_state == TCP_LISTEN) { 166 switch (b->sk.sk_state) { 167 case TCP_CLOSE: 168 case TCP_ESTABLISHED: 169 return 1; 170 default: 171 return 0; 172 } 173 } 174 175 /* unix_state_double_lock(): ascending address order. */ 176 return cmp_ptr(a, b); 177 } 178 179 static int unix_recvq_lock_cmp_fn(const struct lockdep_map *_a, 180 const struct lockdep_map *_b) 181 { 182 const struct sock *a, *b; 183 184 a = container_of(_a, struct sock, sk_receive_queue.lock.dep_map); 185 b = container_of(_b, struct sock, sk_receive_queue.lock.dep_map); 186 187 /* unix_collect_skb(): listener -> embryo order. */ 188 if (a->sk_state == TCP_LISTEN && unix_sk(b)->listener == a) 189 return -1; 190 191 /* Should never happen. Just to be symmetric. */ 192 if (b->sk_state == TCP_LISTEN && unix_sk(a)->listener == b) 193 return 1; 194 195 return 0; 196 } 197 #endif 198 199 static unsigned int unix_unbound_hash(struct sock *sk) 200 { 201 unsigned long hash = (unsigned long)sk; 202 203 hash ^= hash >> 16; 204 hash ^= hash >> 8; 205 hash ^= sk->sk_type; 206 207 return hash & UNIX_HASH_MOD; 208 } 209 210 static unsigned int unix_bsd_hash(struct inode *i) 211 { 212 return i->i_ino & UNIX_HASH_MOD; 213 } 214 215 static unsigned int unix_abstract_hash(struct sockaddr_un *sunaddr, 216 int addr_len, int type) 217 { 218 __wsum csum = csum_partial(sunaddr, addr_len, 0); 219 unsigned int hash; 220 221 hash = (__force unsigned int)csum_fold(csum); 222 hash ^= hash >> 8; 223 hash ^= type; 224 225 return UNIX_HASH_MOD + 1 + (hash & UNIX_HASH_MOD); 226 } 227 228 static void unix_table_double_lock(struct net *net, 229 unsigned int hash1, unsigned int hash2) 230 { 231 if (hash1 == hash2) { 232 spin_lock(&net->unx.table.locks[hash1]); 233 return; 234 } 235 236 if (hash1 > hash2) 237 swap(hash1, hash2); 238 239 spin_lock(&net->unx.table.locks[hash1]); 240 spin_lock(&net->unx.table.locks[hash2]); 241 } 242 243 static void unix_table_double_unlock(struct net *net, 244 unsigned int hash1, unsigned int hash2) 245 { 246 if (hash1 == hash2) { 247 spin_unlock(&net->unx.table.locks[hash1]); 248 return; 249 } 250 251 spin_unlock(&net->unx.table.locks[hash1]); 252 spin_unlock(&net->unx.table.locks[hash2]); 253 } 254 255 #ifdef CONFIG_SECURITY_NETWORK 256 static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb) 257 { 258 UNIXCB(skb).secid = scm->secid; 259 } 260 261 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb) 262 { 263 scm->secid = UNIXCB(skb).secid; 264 } 265 266 static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb) 267 { 268 return (scm->secid == UNIXCB(skb).secid); 269 } 270 #else 271 static inline void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb) 272 { } 273 274 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb) 275 { } 276 277 static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb) 278 { 279 return true; 280 } 281 #endif /* CONFIG_SECURITY_NETWORK */ 282 283 static inline int unix_may_send(struct sock *sk, struct sock *osk) 284 { 285 return !unix_peer(osk) || unix_peer(osk) == sk; 286 } 287 288 static inline int unix_recvq_full_lockless(const struct sock *sk) 289 { 290 return skb_queue_len_lockless(&sk->sk_receive_queue) > sk->sk_max_ack_backlog; 291 } 292 293 struct sock *unix_peer_get(struct sock *s) 294 { 295 struct sock *peer; 296 297 unix_state_lock(s); 298 peer = unix_peer(s); 299 if (peer) 300 sock_hold(peer); 301 unix_state_unlock(s); 302 return peer; 303 } 304 EXPORT_SYMBOL_GPL(unix_peer_get); 305 306 static struct unix_address *unix_create_addr(struct sockaddr_un *sunaddr, 307 int addr_len) 308 { 309 struct unix_address *addr; 310 311 addr = kmalloc(sizeof(*addr) + addr_len, GFP_KERNEL); 312 if (!addr) 313 return NULL; 314 315 refcount_set(&addr->refcnt, 1); 316 addr->len = addr_len; 317 memcpy(addr->name, sunaddr, addr_len); 318 319 return addr; 320 } 321 322 static inline void unix_release_addr(struct unix_address *addr) 323 { 324 if (refcount_dec_and_test(&addr->refcnt)) 325 kfree(addr); 326 } 327 328 /* 329 * Check unix socket name: 330 * - should be not zero length. 331 * - if started by not zero, should be NULL terminated (FS object) 332 * - if started by zero, it is abstract name. 333 */ 334 335 static int unix_validate_addr(struct sockaddr_un *sunaddr, int addr_len) 336 { 337 if (addr_len <= offsetof(struct sockaddr_un, sun_path) || 338 addr_len > sizeof(*sunaddr)) 339 return -EINVAL; 340 341 if (sunaddr->sun_family != AF_UNIX) 342 return -EINVAL; 343 344 return 0; 345 } 346 347 static int unix_mkname_bsd(struct sockaddr_un *sunaddr, int addr_len) 348 { 349 struct sockaddr_storage *addr = (struct sockaddr_storage *)sunaddr; 350 short offset = offsetof(struct sockaddr_storage, __data); 351 352 BUILD_BUG_ON(offset != offsetof(struct sockaddr_un, sun_path)); 353 354 /* This may look like an off by one error but it is a bit more 355 * subtle. 108 is the longest valid AF_UNIX path for a binding. 356 * sun_path[108] doesn't as such exist. However in kernel space 357 * we are guaranteed that it is a valid memory location in our 358 * kernel address buffer because syscall functions always pass 359 * a pointer of struct sockaddr_storage which has a bigger buffer 360 * than 108. Also, we must terminate sun_path for strlen() in 361 * getname_kernel(). 362 */ 363 addr->__data[addr_len - offset] = 0; 364 365 /* Don't pass sunaddr->sun_path to strlen(). Otherwise, 108 will 366 * cause panic if CONFIG_FORTIFY_SOURCE=y. Let __fortify_strlen() 367 * know the actual buffer. 368 */ 369 return strlen(addr->__data) + offset + 1; 370 } 371 372 static void __unix_remove_socket(struct sock *sk) 373 { 374 sk_del_node_init(sk); 375 } 376 377 static void __unix_insert_socket(struct net *net, struct sock *sk) 378 { 379 DEBUG_NET_WARN_ON_ONCE(!sk_unhashed(sk)); 380 sk_add_node(sk, &net->unx.table.buckets[sk->sk_hash]); 381 } 382 383 static void __unix_set_addr_hash(struct net *net, struct sock *sk, 384 struct unix_address *addr, unsigned int hash) 385 { 386 __unix_remove_socket(sk); 387 smp_store_release(&unix_sk(sk)->addr, addr); 388 389 sk->sk_hash = hash; 390 __unix_insert_socket(net, sk); 391 } 392 393 static void unix_remove_socket(struct net *net, struct sock *sk) 394 { 395 spin_lock(&net->unx.table.locks[sk->sk_hash]); 396 __unix_remove_socket(sk); 397 spin_unlock(&net->unx.table.locks[sk->sk_hash]); 398 } 399 400 static void unix_insert_unbound_socket(struct net *net, struct sock *sk) 401 { 402 spin_lock(&net->unx.table.locks[sk->sk_hash]); 403 __unix_insert_socket(net, sk); 404 spin_unlock(&net->unx.table.locks[sk->sk_hash]); 405 } 406 407 static void unix_insert_bsd_socket(struct sock *sk) 408 { 409 spin_lock(&bsd_socket_locks[sk->sk_hash]); 410 sk_add_bind_node(sk, &bsd_socket_buckets[sk->sk_hash]); 411 spin_unlock(&bsd_socket_locks[sk->sk_hash]); 412 } 413 414 static void unix_remove_bsd_socket(struct sock *sk) 415 { 416 if (!hlist_unhashed(&sk->sk_bind_node)) { 417 spin_lock(&bsd_socket_locks[sk->sk_hash]); 418 __sk_del_bind_node(sk); 419 spin_unlock(&bsd_socket_locks[sk->sk_hash]); 420 421 sk_node_init(&sk->sk_bind_node); 422 } 423 } 424 425 static struct sock *__unix_find_socket_byname(struct net *net, 426 struct sockaddr_un *sunname, 427 int len, unsigned int hash) 428 { 429 struct sock *s; 430 431 sk_for_each(s, &net->unx.table.buckets[hash]) { 432 struct unix_sock *u = unix_sk(s); 433 434 if (u->addr->len == len && 435 !memcmp(u->addr->name, sunname, len)) 436 return s; 437 } 438 return NULL; 439 } 440 441 static inline struct sock *unix_find_socket_byname(struct net *net, 442 struct sockaddr_un *sunname, 443 int len, unsigned int hash) 444 { 445 struct sock *s; 446 447 spin_lock(&net->unx.table.locks[hash]); 448 s = __unix_find_socket_byname(net, sunname, len, hash); 449 if (s) 450 sock_hold(s); 451 spin_unlock(&net->unx.table.locks[hash]); 452 return s; 453 } 454 455 static struct sock *unix_find_socket_byinode(struct inode *i) 456 { 457 unsigned int hash = unix_bsd_hash(i); 458 struct sock *s; 459 460 spin_lock(&bsd_socket_locks[hash]); 461 sk_for_each_bound(s, &bsd_socket_buckets[hash]) { 462 struct dentry *dentry = unix_sk(s)->path.dentry; 463 464 if (dentry && d_backing_inode(dentry) == i) { 465 sock_hold(s); 466 spin_unlock(&bsd_socket_locks[hash]); 467 return s; 468 } 469 } 470 spin_unlock(&bsd_socket_locks[hash]); 471 return NULL; 472 } 473 474 /* Support code for asymmetrically connected dgram sockets 475 * 476 * If a datagram socket is connected to a socket not itself connected 477 * to the first socket (eg, /dev/log), clients may only enqueue more 478 * messages if the present receive queue of the server socket is not 479 * "too large". This means there's a second writeability condition 480 * poll and sendmsg need to test. The dgram recv code will do a wake 481 * up on the peer_wait wait queue of a socket upon reception of a 482 * datagram which needs to be propagated to sleeping would-be writers 483 * since these might not have sent anything so far. This can't be 484 * accomplished via poll_wait because the lifetime of the server 485 * socket might be less than that of its clients if these break their 486 * association with it or if the server socket is closed while clients 487 * are still connected to it and there's no way to inform "a polling 488 * implementation" that it should let go of a certain wait queue 489 * 490 * In order to propagate a wake up, a wait_queue_entry_t of the client 491 * socket is enqueued on the peer_wait queue of the server socket 492 * whose wake function does a wake_up on the ordinary client socket 493 * wait queue. This connection is established whenever a write (or 494 * poll for write) hit the flow control condition and broken when the 495 * association to the server socket is dissolved or after a wake up 496 * was relayed. 497 */ 498 499 static int unix_dgram_peer_wake_relay(wait_queue_entry_t *q, unsigned mode, int flags, 500 void *key) 501 { 502 struct unix_sock *u; 503 wait_queue_head_t *u_sleep; 504 505 u = container_of(q, struct unix_sock, peer_wake); 506 507 __remove_wait_queue(&unix_sk(u->peer_wake.private)->peer_wait, 508 q); 509 u->peer_wake.private = NULL; 510 511 /* relaying can only happen while the wq still exists */ 512 u_sleep = sk_sleep(&u->sk); 513 if (u_sleep) 514 wake_up_interruptible_poll(u_sleep, key_to_poll(key)); 515 516 return 0; 517 } 518 519 static int unix_dgram_peer_wake_connect(struct sock *sk, struct sock *other) 520 { 521 struct unix_sock *u, *u_other; 522 int rc; 523 524 u = unix_sk(sk); 525 u_other = unix_sk(other); 526 rc = 0; 527 spin_lock(&u_other->peer_wait.lock); 528 529 if (!u->peer_wake.private) { 530 u->peer_wake.private = other; 531 __add_wait_queue(&u_other->peer_wait, &u->peer_wake); 532 533 rc = 1; 534 } 535 536 spin_unlock(&u_other->peer_wait.lock); 537 return rc; 538 } 539 540 static void unix_dgram_peer_wake_disconnect(struct sock *sk, 541 struct sock *other) 542 { 543 struct unix_sock *u, *u_other; 544 545 u = unix_sk(sk); 546 u_other = unix_sk(other); 547 spin_lock(&u_other->peer_wait.lock); 548 549 if (u->peer_wake.private == other) { 550 __remove_wait_queue(&u_other->peer_wait, &u->peer_wake); 551 u->peer_wake.private = NULL; 552 } 553 554 spin_unlock(&u_other->peer_wait.lock); 555 } 556 557 static void unix_dgram_peer_wake_disconnect_wakeup(struct sock *sk, 558 struct sock *other) 559 { 560 unix_dgram_peer_wake_disconnect(sk, other); 561 wake_up_interruptible_poll(sk_sleep(sk), 562 EPOLLOUT | 563 EPOLLWRNORM | 564 EPOLLWRBAND); 565 } 566 567 /* preconditions: 568 * - unix_peer(sk) == other 569 * - association is stable 570 */ 571 static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other) 572 { 573 int connected; 574 575 connected = unix_dgram_peer_wake_connect(sk, other); 576 577 /* If other is SOCK_DEAD, we want to make sure we signal 578 * POLLOUT, such that a subsequent write() can get a 579 * -ECONNREFUSED. Otherwise, if we haven't queued any skbs 580 * to other and its full, we will hang waiting for POLLOUT. 581 */ 582 if (unix_recvq_full_lockless(other) && !sock_flag(other, SOCK_DEAD)) 583 return 1; 584 585 if (connected) 586 unix_dgram_peer_wake_disconnect(sk, other); 587 588 return 0; 589 } 590 591 static int unix_writable(const struct sock *sk, unsigned char state) 592 { 593 return state != TCP_LISTEN && 594 (refcount_read(&sk->sk_wmem_alloc) << 2) <= READ_ONCE(sk->sk_sndbuf); 595 } 596 597 static void unix_write_space(struct sock *sk) 598 { 599 struct socket_wq *wq; 600 601 rcu_read_lock(); 602 if (unix_writable(sk, READ_ONCE(sk->sk_state))) { 603 wq = rcu_dereference(sk->sk_wq); 604 if (skwq_has_sleeper(wq)) 605 wake_up_interruptible_sync_poll(&wq->wait, 606 EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND); 607 sk_wake_async_rcu(sk, SOCK_WAKE_SPACE, POLL_OUT); 608 } 609 rcu_read_unlock(); 610 } 611 612 /* When dgram socket disconnects (or changes its peer), we clear its receive 613 * queue of packets arrived from previous peer. First, it allows to do 614 * flow control based only on wmem_alloc; second, sk connected to peer 615 * may receive messages only from that peer. */ 616 static void unix_dgram_disconnected(struct sock *sk, struct sock *other) 617 { 618 if (!skb_queue_empty(&sk->sk_receive_queue)) { 619 skb_queue_purge_reason(&sk->sk_receive_queue, 620 SKB_DROP_REASON_UNIX_DISCONNECT); 621 622 wake_up_interruptible_all(&unix_sk(sk)->peer_wait); 623 624 /* If one link of bidirectional dgram pipe is disconnected, 625 * we signal error. Messages are lost. Do not make this, 626 * when peer was not connected to us. 627 */ 628 if (!sock_flag(other, SOCK_DEAD) && unix_peer(other) == sk) { 629 WRITE_ONCE(other->sk_err, ECONNRESET); 630 sk_error_report(other); 631 } 632 } 633 } 634 635 static void unix_sock_destructor(struct sock *sk) 636 { 637 struct unix_sock *u = unix_sk(sk); 638 639 skb_queue_purge_reason(&sk->sk_receive_queue, SKB_DROP_REASON_SOCKET_CLOSE); 640 641 DEBUG_NET_WARN_ON_ONCE(refcount_read(&sk->sk_wmem_alloc)); 642 DEBUG_NET_WARN_ON_ONCE(!sk_unhashed(sk)); 643 DEBUG_NET_WARN_ON_ONCE(sk->sk_socket); 644 if (!sock_flag(sk, SOCK_DEAD)) { 645 pr_info("Attempt to release alive unix socket: %p\n", sk); 646 return; 647 } 648 649 if (sk->sk_peer_pid) 650 pidfs_put_pid(sk->sk_peer_pid); 651 652 if (u->addr) 653 unix_release_addr(u->addr); 654 655 atomic_long_dec(&unix_nr_socks); 656 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); 657 #ifdef UNIX_REFCNT_DEBUG 658 pr_debug("UNIX %p is destroyed, %ld are still alive.\n", sk, 659 atomic_long_read(&unix_nr_socks)); 660 #endif 661 } 662 663 static void unix_release_sock(struct sock *sk, int embrion) 664 { 665 struct unix_sock *u = unix_sk(sk); 666 struct sock *skpair; 667 struct sk_buff *skb; 668 struct path path; 669 int state; 670 671 unix_remove_socket(sock_net(sk), sk); 672 unix_remove_bsd_socket(sk); 673 674 /* Clear state */ 675 unix_state_lock(sk); 676 sock_orphan(sk); 677 WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK); 678 path = u->path; 679 u->path.dentry = NULL; 680 u->path.mnt = NULL; 681 state = sk->sk_state; 682 WRITE_ONCE(sk->sk_state, TCP_CLOSE); 683 684 skpair = unix_peer(sk); 685 unix_peer(sk) = NULL; 686 687 unix_state_unlock(sk); 688 689 #if IS_ENABLED(CONFIG_AF_UNIX_OOB) 690 u->oob_skb = NULL; 691 #endif 692 693 wake_up_interruptible_all(&u->peer_wait); 694 695 if (skpair != NULL) { 696 if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) { 697 unix_state_lock(skpair); 698 /* No more writes */ 699 WRITE_ONCE(skpair->sk_shutdown, SHUTDOWN_MASK); 700 if (!skb_queue_empty_lockless(&sk->sk_receive_queue) || embrion) 701 WRITE_ONCE(skpair->sk_err, ECONNRESET); 702 unix_state_unlock(skpair); 703 skpair->sk_state_change(skpair); 704 sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP); 705 } 706 707 unix_dgram_peer_wake_disconnect(sk, skpair); 708 sock_put(skpair); /* It may now die */ 709 } 710 711 /* Try to flush out this socket. Throw out buffers at least */ 712 713 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) { 714 if (state == TCP_LISTEN) 715 unix_release_sock(skb->sk, 1); 716 717 /* passed fds are erased in the kfree_skb hook */ 718 kfree_skb_reason(skb, SKB_DROP_REASON_SOCKET_CLOSE); 719 } 720 721 if (path.dentry) 722 path_put(&path); 723 724 sock_put(sk); 725 726 /* ---- Socket is dead now and most probably destroyed ---- */ 727 728 /* 729 * Fixme: BSD difference: In BSD all sockets connected to us get 730 * ECONNRESET and we die on the spot. In Linux we behave 731 * like files and pipes do and wait for the last 732 * dereference. 733 * 734 * Can't we simply set sock->err? 735 * 736 * What the above comment does talk about? --ANK(980817) 737 */ 738 739 if (READ_ONCE(unix_tot_inflight)) 740 unix_gc(); /* Garbage collect fds */ 741 } 742 743 struct unix_peercred { 744 struct pid *peer_pid; 745 const struct cred *peer_cred; 746 }; 747 748 static inline int prepare_peercred(struct unix_peercred *peercred) 749 { 750 struct pid *pid; 751 int err; 752 753 pid = task_tgid(current); 754 err = pidfs_register_pid(pid); 755 if (likely(!err)) { 756 peercred->peer_pid = get_pid(pid); 757 peercred->peer_cred = get_current_cred(); 758 } 759 return err; 760 } 761 762 static void drop_peercred(struct unix_peercred *peercred) 763 { 764 const struct cred *cred = NULL; 765 struct pid *pid = NULL; 766 767 might_sleep(); 768 769 swap(peercred->peer_pid, pid); 770 swap(peercred->peer_cred, cred); 771 772 pidfs_put_pid(pid); 773 put_pid(pid); 774 put_cred(cred); 775 } 776 777 static inline void init_peercred(struct sock *sk, 778 const struct unix_peercred *peercred) 779 { 780 sk->sk_peer_pid = peercred->peer_pid; 781 sk->sk_peer_cred = peercred->peer_cred; 782 } 783 784 static void update_peercred(struct sock *sk, struct unix_peercred *peercred) 785 { 786 const struct cred *old_cred; 787 struct pid *old_pid; 788 789 spin_lock(&sk->sk_peer_lock); 790 old_pid = sk->sk_peer_pid; 791 old_cred = sk->sk_peer_cred; 792 init_peercred(sk, peercred); 793 spin_unlock(&sk->sk_peer_lock); 794 795 peercred->peer_pid = old_pid; 796 peercred->peer_cred = old_cred; 797 } 798 799 static void copy_peercred(struct sock *sk, struct sock *peersk) 800 { 801 lockdep_assert_held(&unix_sk(peersk)->lock); 802 803 spin_lock(&sk->sk_peer_lock); 804 sk->sk_peer_pid = get_pid(peersk->sk_peer_pid); 805 pidfs_get_pid(sk->sk_peer_pid); 806 sk->sk_peer_cred = get_cred(peersk->sk_peer_cred); 807 spin_unlock(&sk->sk_peer_lock); 808 } 809 810 static bool unix_may_passcred(const struct sock *sk) 811 { 812 return sk->sk_scm_credentials || sk->sk_scm_pidfd; 813 } 814 815 static int unix_listen(struct socket *sock, int backlog) 816 { 817 int err; 818 struct sock *sk = sock->sk; 819 struct unix_sock *u = unix_sk(sk); 820 struct unix_peercred peercred = {}; 821 822 err = -EOPNOTSUPP; 823 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET) 824 goto out; /* Only stream/seqpacket sockets accept */ 825 err = -EINVAL; 826 if (!READ_ONCE(u->addr)) 827 goto out; /* No listens on an unbound socket */ 828 err = prepare_peercred(&peercred); 829 if (err) 830 goto out; 831 unix_state_lock(sk); 832 if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN) 833 goto out_unlock; 834 if (backlog > sk->sk_max_ack_backlog) 835 wake_up_interruptible_all(&u->peer_wait); 836 sk->sk_max_ack_backlog = backlog; 837 WRITE_ONCE(sk->sk_state, TCP_LISTEN); 838 839 /* set credentials so connect can copy them */ 840 update_peercred(sk, &peercred); 841 err = 0; 842 843 out_unlock: 844 unix_state_unlock(sk); 845 drop_peercred(&peercred); 846 out: 847 return err; 848 } 849 850 static int unix_release(struct socket *); 851 static int unix_bind(struct socket *, struct sockaddr *, int); 852 static int unix_stream_connect(struct socket *, struct sockaddr *, 853 int addr_len, int flags); 854 static int unix_socketpair(struct socket *, struct socket *); 855 static int unix_accept(struct socket *, struct socket *, struct proto_accept_arg *arg); 856 static int unix_getname(struct socket *, struct sockaddr *, int); 857 static __poll_t unix_poll(struct file *, struct socket *, poll_table *); 858 static __poll_t unix_dgram_poll(struct file *, struct socket *, 859 poll_table *); 860 static int unix_ioctl(struct socket *, unsigned int, unsigned long); 861 #ifdef CONFIG_COMPAT 862 static int unix_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg); 863 #endif 864 static int unix_shutdown(struct socket *, int); 865 static int unix_stream_sendmsg(struct socket *, struct msghdr *, size_t); 866 static int unix_stream_recvmsg(struct socket *, struct msghdr *, size_t, int); 867 static ssize_t unix_stream_splice_read(struct socket *, loff_t *ppos, 868 struct pipe_inode_info *, size_t size, 869 unsigned int flags); 870 static int unix_dgram_sendmsg(struct socket *, struct msghdr *, size_t); 871 static int unix_dgram_recvmsg(struct socket *, struct msghdr *, size_t, int); 872 static int unix_read_skb(struct sock *sk, skb_read_actor_t recv_actor); 873 static int unix_stream_read_skb(struct sock *sk, skb_read_actor_t recv_actor); 874 static int unix_dgram_connect(struct socket *, struct sockaddr *, 875 int, int); 876 static int unix_seqpacket_sendmsg(struct socket *, struct msghdr *, size_t); 877 static int unix_seqpacket_recvmsg(struct socket *, struct msghdr *, size_t, 878 int); 879 880 #ifdef CONFIG_PROC_FS 881 static int unix_count_nr_fds(struct sock *sk) 882 { 883 struct sk_buff *skb; 884 struct unix_sock *u; 885 int nr_fds = 0; 886 887 spin_lock(&sk->sk_receive_queue.lock); 888 skb = skb_peek(&sk->sk_receive_queue); 889 while (skb) { 890 u = unix_sk(skb->sk); 891 nr_fds += atomic_read(&u->scm_stat.nr_fds); 892 skb = skb_peek_next(skb, &sk->sk_receive_queue); 893 } 894 spin_unlock(&sk->sk_receive_queue.lock); 895 896 return nr_fds; 897 } 898 899 static void unix_show_fdinfo(struct seq_file *m, struct socket *sock) 900 { 901 struct sock *sk = sock->sk; 902 unsigned char s_state; 903 struct unix_sock *u; 904 int nr_fds = 0; 905 906 if (sk) { 907 s_state = READ_ONCE(sk->sk_state); 908 u = unix_sk(sk); 909 910 /* SOCK_STREAM and SOCK_SEQPACKET sockets never change their 911 * sk_state after switching to TCP_ESTABLISHED or TCP_LISTEN. 912 * SOCK_DGRAM is ordinary. So, no lock is needed. 913 */ 914 if (sock->type == SOCK_DGRAM || s_state == TCP_ESTABLISHED) 915 nr_fds = atomic_read(&u->scm_stat.nr_fds); 916 else if (s_state == TCP_LISTEN) 917 nr_fds = unix_count_nr_fds(sk); 918 919 seq_printf(m, "scm_fds: %u\n", nr_fds); 920 } 921 } 922 #else 923 #define unix_show_fdinfo NULL 924 #endif 925 926 static const struct proto_ops unix_stream_ops = { 927 .family = PF_UNIX, 928 .owner = THIS_MODULE, 929 .release = unix_release, 930 .bind = unix_bind, 931 .connect = unix_stream_connect, 932 .socketpair = unix_socketpair, 933 .accept = unix_accept, 934 .getname = unix_getname, 935 .poll = unix_poll, 936 .ioctl = unix_ioctl, 937 #ifdef CONFIG_COMPAT 938 .compat_ioctl = unix_compat_ioctl, 939 #endif 940 .listen = unix_listen, 941 .shutdown = unix_shutdown, 942 .sendmsg = unix_stream_sendmsg, 943 .recvmsg = unix_stream_recvmsg, 944 .read_skb = unix_stream_read_skb, 945 .mmap = sock_no_mmap, 946 .splice_read = unix_stream_splice_read, 947 .set_peek_off = sk_set_peek_off, 948 .show_fdinfo = unix_show_fdinfo, 949 }; 950 951 static const struct proto_ops unix_dgram_ops = { 952 .family = PF_UNIX, 953 .owner = THIS_MODULE, 954 .release = unix_release, 955 .bind = unix_bind, 956 .connect = unix_dgram_connect, 957 .socketpair = unix_socketpair, 958 .accept = sock_no_accept, 959 .getname = unix_getname, 960 .poll = unix_dgram_poll, 961 .ioctl = unix_ioctl, 962 #ifdef CONFIG_COMPAT 963 .compat_ioctl = unix_compat_ioctl, 964 #endif 965 .listen = sock_no_listen, 966 .shutdown = unix_shutdown, 967 .sendmsg = unix_dgram_sendmsg, 968 .read_skb = unix_read_skb, 969 .recvmsg = unix_dgram_recvmsg, 970 .mmap = sock_no_mmap, 971 .set_peek_off = sk_set_peek_off, 972 .show_fdinfo = unix_show_fdinfo, 973 }; 974 975 static const struct proto_ops unix_seqpacket_ops = { 976 .family = PF_UNIX, 977 .owner = THIS_MODULE, 978 .release = unix_release, 979 .bind = unix_bind, 980 .connect = unix_stream_connect, 981 .socketpair = unix_socketpair, 982 .accept = unix_accept, 983 .getname = unix_getname, 984 .poll = unix_dgram_poll, 985 .ioctl = unix_ioctl, 986 #ifdef CONFIG_COMPAT 987 .compat_ioctl = unix_compat_ioctl, 988 #endif 989 .listen = unix_listen, 990 .shutdown = unix_shutdown, 991 .sendmsg = unix_seqpacket_sendmsg, 992 .recvmsg = unix_seqpacket_recvmsg, 993 .mmap = sock_no_mmap, 994 .set_peek_off = sk_set_peek_off, 995 .show_fdinfo = unix_show_fdinfo, 996 }; 997 998 static void unix_close(struct sock *sk, long timeout) 999 { 1000 /* Nothing to do here, unix socket does not need a ->close(). 1001 * This is merely for sockmap. 1002 */ 1003 } 1004 1005 static bool unix_bpf_bypass_getsockopt(int level, int optname) 1006 { 1007 if (level == SOL_SOCKET) { 1008 switch (optname) { 1009 case SO_PEERPIDFD: 1010 return true; 1011 default: 1012 return false; 1013 } 1014 } 1015 1016 return false; 1017 } 1018 1019 struct proto unix_dgram_proto = { 1020 .name = "UNIX", 1021 .owner = THIS_MODULE, 1022 .obj_size = sizeof(struct unix_sock), 1023 .close = unix_close, 1024 .bpf_bypass_getsockopt = unix_bpf_bypass_getsockopt, 1025 #ifdef CONFIG_BPF_SYSCALL 1026 .psock_update_sk_prot = unix_dgram_bpf_update_proto, 1027 #endif 1028 }; 1029 1030 struct proto unix_stream_proto = { 1031 .name = "UNIX-STREAM", 1032 .owner = THIS_MODULE, 1033 .obj_size = sizeof(struct unix_sock), 1034 .close = unix_close, 1035 .bpf_bypass_getsockopt = unix_bpf_bypass_getsockopt, 1036 #ifdef CONFIG_BPF_SYSCALL 1037 .psock_update_sk_prot = unix_stream_bpf_update_proto, 1038 #endif 1039 }; 1040 1041 static struct sock *unix_create1(struct net *net, struct socket *sock, int kern, int type) 1042 { 1043 struct unix_sock *u; 1044 struct sock *sk; 1045 int err; 1046 1047 atomic_long_inc(&unix_nr_socks); 1048 if (atomic_long_read(&unix_nr_socks) > 2 * get_max_files()) { 1049 err = -ENFILE; 1050 goto err; 1051 } 1052 1053 if (type == SOCK_STREAM) 1054 sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_stream_proto, kern); 1055 else /*dgram and seqpacket */ 1056 sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_dgram_proto, kern); 1057 1058 if (!sk) { 1059 err = -ENOMEM; 1060 goto err; 1061 } 1062 1063 sock_init_data(sock, sk); 1064 1065 sk->sk_scm_rights = 1; 1066 sk->sk_hash = unix_unbound_hash(sk); 1067 sk->sk_allocation = GFP_KERNEL_ACCOUNT; 1068 sk->sk_write_space = unix_write_space; 1069 sk->sk_max_ack_backlog = READ_ONCE(net->unx.sysctl_max_dgram_qlen); 1070 sk->sk_destruct = unix_sock_destructor; 1071 lock_set_cmp_fn(&sk->sk_receive_queue.lock, unix_recvq_lock_cmp_fn, NULL); 1072 1073 u = unix_sk(sk); 1074 u->listener = NULL; 1075 u->vertex = NULL; 1076 u->path.dentry = NULL; 1077 u->path.mnt = NULL; 1078 spin_lock_init(&u->lock); 1079 lock_set_cmp_fn(&u->lock, unix_state_lock_cmp_fn, NULL); 1080 mutex_init(&u->iolock); /* single task reading lock */ 1081 mutex_init(&u->bindlock); /* single task binding lock */ 1082 init_waitqueue_head(&u->peer_wait); 1083 init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay); 1084 memset(&u->scm_stat, 0, sizeof(struct scm_stat)); 1085 unix_insert_unbound_socket(net, sk); 1086 1087 sock_prot_inuse_add(net, sk->sk_prot, 1); 1088 1089 return sk; 1090 1091 err: 1092 atomic_long_dec(&unix_nr_socks); 1093 return ERR_PTR(err); 1094 } 1095 1096 static int unix_create(struct net *net, struct socket *sock, int protocol, 1097 int kern) 1098 { 1099 struct sock *sk; 1100 1101 if (protocol && protocol != PF_UNIX) 1102 return -EPROTONOSUPPORT; 1103 1104 sock->state = SS_UNCONNECTED; 1105 1106 switch (sock->type) { 1107 case SOCK_STREAM: 1108 sock->ops = &unix_stream_ops; 1109 break; 1110 /* 1111 * Believe it or not BSD has AF_UNIX, SOCK_RAW though 1112 * nothing uses it. 1113 */ 1114 case SOCK_RAW: 1115 sock->type = SOCK_DGRAM; 1116 fallthrough; 1117 case SOCK_DGRAM: 1118 sock->ops = &unix_dgram_ops; 1119 break; 1120 case SOCK_SEQPACKET: 1121 sock->ops = &unix_seqpacket_ops; 1122 break; 1123 default: 1124 return -ESOCKTNOSUPPORT; 1125 } 1126 1127 sk = unix_create1(net, sock, kern, sock->type); 1128 if (IS_ERR(sk)) 1129 return PTR_ERR(sk); 1130 1131 return 0; 1132 } 1133 1134 static int unix_release(struct socket *sock) 1135 { 1136 struct sock *sk = sock->sk; 1137 1138 if (!sk) 1139 return 0; 1140 1141 sk->sk_prot->close(sk, 0); 1142 unix_release_sock(sk, 0); 1143 sock->sk = NULL; 1144 1145 return 0; 1146 } 1147 1148 static struct sock *unix_find_bsd(struct sockaddr_un *sunaddr, int addr_len, 1149 int type, int flags) 1150 { 1151 struct inode *inode; 1152 struct path path; 1153 struct sock *sk; 1154 int err; 1155 1156 unix_mkname_bsd(sunaddr, addr_len); 1157 1158 if (flags & SOCK_COREDUMP) { 1159 const struct cred *cred; 1160 struct cred *kcred; 1161 struct path root; 1162 1163 kcred = prepare_kernel_cred(&init_task); 1164 if (!kcred) { 1165 err = -ENOMEM; 1166 goto fail; 1167 } 1168 1169 task_lock(&init_task); 1170 get_fs_root(init_task.fs, &root); 1171 task_unlock(&init_task); 1172 1173 cred = override_creds(kcred); 1174 err = vfs_path_lookup(root.dentry, root.mnt, sunaddr->sun_path, 1175 LOOKUP_BENEATH | LOOKUP_NO_SYMLINKS | 1176 LOOKUP_NO_MAGICLINKS, &path); 1177 put_cred(revert_creds(cred)); 1178 path_put(&root); 1179 if (err) 1180 goto fail; 1181 } else { 1182 err = kern_path(sunaddr->sun_path, LOOKUP_FOLLOW, &path); 1183 if (err) 1184 goto fail; 1185 1186 err = path_permission(&path, MAY_WRITE); 1187 if (err) 1188 goto path_put; 1189 } 1190 1191 err = -ECONNREFUSED; 1192 inode = d_backing_inode(path.dentry); 1193 if (!S_ISSOCK(inode->i_mode)) 1194 goto path_put; 1195 1196 sk = unix_find_socket_byinode(inode); 1197 if (!sk) 1198 goto path_put; 1199 1200 err = -EPROTOTYPE; 1201 if (sk->sk_type == type) 1202 touch_atime(&path); 1203 else 1204 goto sock_put; 1205 1206 path_put(&path); 1207 1208 return sk; 1209 1210 sock_put: 1211 sock_put(sk); 1212 path_put: 1213 path_put(&path); 1214 fail: 1215 return ERR_PTR(err); 1216 } 1217 1218 static struct sock *unix_find_abstract(struct net *net, 1219 struct sockaddr_un *sunaddr, 1220 int addr_len, int type) 1221 { 1222 unsigned int hash = unix_abstract_hash(sunaddr, addr_len, type); 1223 struct dentry *dentry; 1224 struct sock *sk; 1225 1226 sk = unix_find_socket_byname(net, sunaddr, addr_len, hash); 1227 if (!sk) 1228 return ERR_PTR(-ECONNREFUSED); 1229 1230 dentry = unix_sk(sk)->path.dentry; 1231 if (dentry) 1232 touch_atime(&unix_sk(sk)->path); 1233 1234 return sk; 1235 } 1236 1237 static struct sock *unix_find_other(struct net *net, 1238 struct sockaddr_un *sunaddr, 1239 int addr_len, int type, int flags) 1240 { 1241 struct sock *sk; 1242 1243 if (sunaddr->sun_path[0]) 1244 sk = unix_find_bsd(sunaddr, addr_len, type, flags); 1245 else 1246 sk = unix_find_abstract(net, sunaddr, addr_len, type); 1247 1248 return sk; 1249 } 1250 1251 static int unix_autobind(struct sock *sk) 1252 { 1253 struct unix_sock *u = unix_sk(sk); 1254 unsigned int new_hash, old_hash; 1255 struct net *net = sock_net(sk); 1256 struct unix_address *addr; 1257 u32 lastnum, ordernum; 1258 int err; 1259 1260 err = mutex_lock_interruptible(&u->bindlock); 1261 if (err) 1262 return err; 1263 1264 if (u->addr) 1265 goto out; 1266 1267 err = -ENOMEM; 1268 addr = kzalloc(sizeof(*addr) + 1269 offsetof(struct sockaddr_un, sun_path) + 16, GFP_KERNEL); 1270 if (!addr) 1271 goto out; 1272 1273 addr->len = offsetof(struct sockaddr_un, sun_path) + 6; 1274 addr->name->sun_family = AF_UNIX; 1275 refcount_set(&addr->refcnt, 1); 1276 1277 old_hash = sk->sk_hash; 1278 ordernum = get_random_u32(); 1279 lastnum = ordernum & 0xFFFFF; 1280 retry: 1281 ordernum = (ordernum + 1) & 0xFFFFF; 1282 sprintf(addr->name->sun_path + 1, "%05x", ordernum); 1283 1284 new_hash = unix_abstract_hash(addr->name, addr->len, sk->sk_type); 1285 unix_table_double_lock(net, old_hash, new_hash); 1286 1287 if (__unix_find_socket_byname(net, addr->name, addr->len, new_hash)) { 1288 unix_table_double_unlock(net, old_hash, new_hash); 1289 1290 /* __unix_find_socket_byname() may take long time if many names 1291 * are already in use. 1292 */ 1293 cond_resched(); 1294 1295 if (ordernum == lastnum) { 1296 /* Give up if all names seems to be in use. */ 1297 err = -ENOSPC; 1298 unix_release_addr(addr); 1299 goto out; 1300 } 1301 1302 goto retry; 1303 } 1304 1305 __unix_set_addr_hash(net, sk, addr, new_hash); 1306 unix_table_double_unlock(net, old_hash, new_hash); 1307 err = 0; 1308 1309 out: mutex_unlock(&u->bindlock); 1310 return err; 1311 } 1312 1313 static int unix_bind_bsd(struct sock *sk, struct sockaddr_un *sunaddr, 1314 int addr_len) 1315 { 1316 umode_t mode = S_IFSOCK | 1317 (SOCK_INODE(sk->sk_socket)->i_mode & ~current_umask()); 1318 struct unix_sock *u = unix_sk(sk); 1319 unsigned int new_hash, old_hash; 1320 struct net *net = sock_net(sk); 1321 struct mnt_idmap *idmap; 1322 struct unix_address *addr; 1323 struct dentry *dentry; 1324 struct path parent; 1325 int err; 1326 1327 addr_len = unix_mkname_bsd(sunaddr, addr_len); 1328 addr = unix_create_addr(sunaddr, addr_len); 1329 if (!addr) 1330 return -ENOMEM; 1331 1332 /* 1333 * Get the parent directory, calculate the hash for last 1334 * component. 1335 */ 1336 dentry = kern_path_create(AT_FDCWD, addr->name->sun_path, &parent, 0); 1337 if (IS_ERR(dentry)) { 1338 err = PTR_ERR(dentry); 1339 goto out; 1340 } 1341 1342 /* 1343 * All right, let's create it. 1344 */ 1345 idmap = mnt_idmap(parent.mnt); 1346 err = security_path_mknod(&parent, dentry, mode, 0); 1347 if (!err) 1348 err = vfs_mknod(idmap, d_inode(parent.dentry), dentry, mode, 0); 1349 if (err) 1350 goto out_path; 1351 err = mutex_lock_interruptible(&u->bindlock); 1352 if (err) 1353 goto out_unlink; 1354 if (u->addr) 1355 goto out_unlock; 1356 1357 old_hash = sk->sk_hash; 1358 new_hash = unix_bsd_hash(d_backing_inode(dentry)); 1359 unix_table_double_lock(net, old_hash, new_hash); 1360 u->path.mnt = mntget(parent.mnt); 1361 u->path.dentry = dget(dentry); 1362 __unix_set_addr_hash(net, sk, addr, new_hash); 1363 unix_table_double_unlock(net, old_hash, new_hash); 1364 unix_insert_bsd_socket(sk); 1365 mutex_unlock(&u->bindlock); 1366 done_path_create(&parent, dentry); 1367 return 0; 1368 1369 out_unlock: 1370 mutex_unlock(&u->bindlock); 1371 err = -EINVAL; 1372 out_unlink: 1373 /* failed after successful mknod? unlink what we'd created... */ 1374 vfs_unlink(idmap, d_inode(parent.dentry), dentry, NULL); 1375 out_path: 1376 done_path_create(&parent, dentry); 1377 out: 1378 unix_release_addr(addr); 1379 return err == -EEXIST ? -EADDRINUSE : err; 1380 } 1381 1382 static int unix_bind_abstract(struct sock *sk, struct sockaddr_un *sunaddr, 1383 int addr_len) 1384 { 1385 struct unix_sock *u = unix_sk(sk); 1386 unsigned int new_hash, old_hash; 1387 struct net *net = sock_net(sk); 1388 struct unix_address *addr; 1389 int err; 1390 1391 addr = unix_create_addr(sunaddr, addr_len); 1392 if (!addr) 1393 return -ENOMEM; 1394 1395 err = mutex_lock_interruptible(&u->bindlock); 1396 if (err) 1397 goto out; 1398 1399 if (u->addr) { 1400 err = -EINVAL; 1401 goto out_mutex; 1402 } 1403 1404 old_hash = sk->sk_hash; 1405 new_hash = unix_abstract_hash(addr->name, addr->len, sk->sk_type); 1406 unix_table_double_lock(net, old_hash, new_hash); 1407 1408 if (__unix_find_socket_byname(net, addr->name, addr->len, new_hash)) 1409 goto out_spin; 1410 1411 __unix_set_addr_hash(net, sk, addr, new_hash); 1412 unix_table_double_unlock(net, old_hash, new_hash); 1413 mutex_unlock(&u->bindlock); 1414 return 0; 1415 1416 out_spin: 1417 unix_table_double_unlock(net, old_hash, new_hash); 1418 err = -EADDRINUSE; 1419 out_mutex: 1420 mutex_unlock(&u->bindlock); 1421 out: 1422 unix_release_addr(addr); 1423 return err; 1424 } 1425 1426 static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) 1427 { 1428 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr; 1429 struct sock *sk = sock->sk; 1430 int err; 1431 1432 if (addr_len == offsetof(struct sockaddr_un, sun_path) && 1433 sunaddr->sun_family == AF_UNIX) 1434 return unix_autobind(sk); 1435 1436 err = unix_validate_addr(sunaddr, addr_len); 1437 if (err) 1438 return err; 1439 1440 if (sunaddr->sun_path[0]) 1441 err = unix_bind_bsd(sk, sunaddr, addr_len); 1442 else 1443 err = unix_bind_abstract(sk, sunaddr, addr_len); 1444 1445 return err; 1446 } 1447 1448 static void unix_state_double_lock(struct sock *sk1, struct sock *sk2) 1449 { 1450 if (unlikely(sk1 == sk2) || !sk2) { 1451 unix_state_lock(sk1); 1452 return; 1453 } 1454 1455 if (sk1 > sk2) 1456 swap(sk1, sk2); 1457 1458 unix_state_lock(sk1); 1459 unix_state_lock(sk2); 1460 } 1461 1462 static void unix_state_double_unlock(struct sock *sk1, struct sock *sk2) 1463 { 1464 if (unlikely(sk1 == sk2) || !sk2) { 1465 unix_state_unlock(sk1); 1466 return; 1467 } 1468 unix_state_unlock(sk1); 1469 unix_state_unlock(sk2); 1470 } 1471 1472 static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr, 1473 int alen, int flags) 1474 { 1475 struct sockaddr_un *sunaddr = (struct sockaddr_un *)addr; 1476 struct sock *sk = sock->sk; 1477 struct sock *other; 1478 int err; 1479 1480 err = -EINVAL; 1481 if (alen < offsetofend(struct sockaddr, sa_family)) 1482 goto out; 1483 1484 if (addr->sa_family != AF_UNSPEC) { 1485 err = unix_validate_addr(sunaddr, alen); 1486 if (err) 1487 goto out; 1488 1489 err = BPF_CGROUP_RUN_PROG_UNIX_CONNECT_LOCK(sk, addr, &alen); 1490 if (err) 1491 goto out; 1492 1493 if (unix_may_passcred(sk) && !READ_ONCE(unix_sk(sk)->addr)) { 1494 err = unix_autobind(sk); 1495 if (err) 1496 goto out; 1497 } 1498 1499 restart: 1500 other = unix_find_other(sock_net(sk), sunaddr, alen, sock->type, 0); 1501 if (IS_ERR(other)) { 1502 err = PTR_ERR(other); 1503 goto out; 1504 } 1505 1506 unix_state_double_lock(sk, other); 1507 1508 /* Apparently VFS overslept socket death. Retry. */ 1509 if (sock_flag(other, SOCK_DEAD)) { 1510 unix_state_double_unlock(sk, other); 1511 sock_put(other); 1512 goto restart; 1513 } 1514 1515 err = -EPERM; 1516 if (!unix_may_send(sk, other)) 1517 goto out_unlock; 1518 1519 err = security_unix_may_send(sk->sk_socket, other->sk_socket); 1520 if (err) 1521 goto out_unlock; 1522 1523 WRITE_ONCE(sk->sk_state, TCP_ESTABLISHED); 1524 WRITE_ONCE(other->sk_state, TCP_ESTABLISHED); 1525 } else { 1526 /* 1527 * 1003.1g breaking connected state with AF_UNSPEC 1528 */ 1529 other = NULL; 1530 unix_state_double_lock(sk, other); 1531 } 1532 1533 /* 1534 * If it was connected, reconnect. 1535 */ 1536 if (unix_peer(sk)) { 1537 struct sock *old_peer = unix_peer(sk); 1538 1539 unix_peer(sk) = other; 1540 if (!other) 1541 WRITE_ONCE(sk->sk_state, TCP_CLOSE); 1542 unix_dgram_peer_wake_disconnect_wakeup(sk, old_peer); 1543 1544 unix_state_double_unlock(sk, other); 1545 1546 if (other != old_peer) { 1547 unix_dgram_disconnected(sk, old_peer); 1548 1549 unix_state_lock(old_peer); 1550 if (!unix_peer(old_peer)) 1551 WRITE_ONCE(old_peer->sk_state, TCP_CLOSE); 1552 unix_state_unlock(old_peer); 1553 } 1554 1555 sock_put(old_peer); 1556 } else { 1557 unix_peer(sk) = other; 1558 unix_state_double_unlock(sk, other); 1559 } 1560 1561 return 0; 1562 1563 out_unlock: 1564 unix_state_double_unlock(sk, other); 1565 sock_put(other); 1566 out: 1567 return err; 1568 } 1569 1570 static long unix_wait_for_peer(struct sock *other, long timeo) 1571 { 1572 struct unix_sock *u = unix_sk(other); 1573 int sched; 1574 DEFINE_WAIT(wait); 1575 1576 prepare_to_wait_exclusive(&u->peer_wait, &wait, TASK_INTERRUPTIBLE); 1577 1578 sched = !sock_flag(other, SOCK_DEAD) && 1579 !(other->sk_shutdown & RCV_SHUTDOWN) && 1580 unix_recvq_full_lockless(other); 1581 1582 unix_state_unlock(other); 1583 1584 if (sched) 1585 timeo = schedule_timeout(timeo); 1586 1587 finish_wait(&u->peer_wait, &wait); 1588 return timeo; 1589 } 1590 1591 static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr, 1592 int addr_len, int flags) 1593 { 1594 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr; 1595 struct sock *sk = sock->sk, *newsk = NULL, *other = NULL; 1596 struct unix_sock *u = unix_sk(sk), *newu, *otheru; 1597 struct unix_peercred peercred = {}; 1598 struct net *net = sock_net(sk); 1599 struct sk_buff *skb = NULL; 1600 unsigned char state; 1601 long timeo; 1602 int err; 1603 1604 err = unix_validate_addr(sunaddr, addr_len); 1605 if (err) 1606 goto out; 1607 1608 err = BPF_CGROUP_RUN_PROG_UNIX_CONNECT_LOCK(sk, uaddr, &addr_len); 1609 if (err) 1610 goto out; 1611 1612 if (unix_may_passcred(sk) && !READ_ONCE(u->addr)) { 1613 err = unix_autobind(sk); 1614 if (err) 1615 goto out; 1616 } 1617 1618 timeo = sock_sndtimeo(sk, flags & O_NONBLOCK); 1619 1620 /* First of all allocate resources. 1621 * If we will make it after state is locked, 1622 * we will have to recheck all again in any case. 1623 */ 1624 1625 /* create new sock for complete connection */ 1626 newsk = unix_create1(net, NULL, 0, sock->type); 1627 if (IS_ERR(newsk)) { 1628 err = PTR_ERR(newsk); 1629 goto out; 1630 } 1631 1632 err = prepare_peercred(&peercred); 1633 if (err) 1634 goto out; 1635 1636 /* Allocate skb for sending to listening sock */ 1637 skb = sock_wmalloc(newsk, 1, 0, GFP_KERNEL); 1638 if (!skb) { 1639 err = -ENOMEM; 1640 goto out_free_sk; 1641 } 1642 1643 restart: 1644 /* Find listening sock. */ 1645 other = unix_find_other(net, sunaddr, addr_len, sk->sk_type, flags); 1646 if (IS_ERR(other)) { 1647 err = PTR_ERR(other); 1648 goto out_free_skb; 1649 } 1650 1651 unix_state_lock(other); 1652 1653 /* Apparently VFS overslept socket death. Retry. */ 1654 if (sock_flag(other, SOCK_DEAD)) { 1655 unix_state_unlock(other); 1656 sock_put(other); 1657 goto restart; 1658 } 1659 1660 if (other->sk_state != TCP_LISTEN || 1661 other->sk_shutdown & RCV_SHUTDOWN) { 1662 err = -ECONNREFUSED; 1663 goto out_unlock; 1664 } 1665 1666 if (unix_recvq_full_lockless(other)) { 1667 if (!timeo) { 1668 err = -EAGAIN; 1669 goto out_unlock; 1670 } 1671 1672 timeo = unix_wait_for_peer(other, timeo); 1673 sock_put(other); 1674 1675 err = sock_intr_errno(timeo); 1676 if (signal_pending(current)) 1677 goto out_free_skb; 1678 1679 goto restart; 1680 } 1681 1682 /* self connect and simultaneous connect are eliminated 1683 * by rejecting TCP_LISTEN socket to avoid deadlock. 1684 */ 1685 state = READ_ONCE(sk->sk_state); 1686 if (unlikely(state != TCP_CLOSE)) { 1687 err = state == TCP_ESTABLISHED ? -EISCONN : -EINVAL; 1688 goto out_unlock; 1689 } 1690 1691 unix_state_lock(sk); 1692 1693 if (unlikely(sk->sk_state != TCP_CLOSE)) { 1694 err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EINVAL; 1695 unix_state_unlock(sk); 1696 goto out_unlock; 1697 } 1698 1699 err = security_unix_stream_connect(sk, other, newsk); 1700 if (err) { 1701 unix_state_unlock(sk); 1702 goto out_unlock; 1703 } 1704 1705 /* The way is open! Fastly set all the necessary fields... */ 1706 1707 sock_hold(sk); 1708 unix_peer(newsk) = sk; 1709 newsk->sk_state = TCP_ESTABLISHED; 1710 newsk->sk_type = sk->sk_type; 1711 newsk->sk_scm_recv_flags = other->sk_scm_recv_flags; 1712 init_peercred(newsk, &peercred); 1713 1714 newu = unix_sk(newsk); 1715 newu->listener = other; 1716 RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq); 1717 otheru = unix_sk(other); 1718 1719 /* copy address information from listening to new sock 1720 * 1721 * The contents of *(otheru->addr) and otheru->path 1722 * are seen fully set up here, since we have found 1723 * otheru in hash under its lock. Insertion into the 1724 * hash chain we'd found it in had been done in an 1725 * earlier critical area protected by the chain's lock, 1726 * the same one where we'd set *(otheru->addr) contents, 1727 * as well as otheru->path and otheru->addr itself. 1728 * 1729 * Using smp_store_release() here to set newu->addr 1730 * is enough to make those stores, as well as stores 1731 * to newu->path visible to anyone who gets newu->addr 1732 * by smp_load_acquire(). IOW, the same warranties 1733 * as for unix_sock instances bound in unix_bind() or 1734 * in unix_autobind(). 1735 */ 1736 if (otheru->path.dentry) { 1737 path_get(&otheru->path); 1738 newu->path = otheru->path; 1739 } 1740 refcount_inc(&otheru->addr->refcnt); 1741 smp_store_release(&newu->addr, otheru->addr); 1742 1743 /* Set credentials */ 1744 copy_peercred(sk, other); 1745 1746 sock->state = SS_CONNECTED; 1747 WRITE_ONCE(sk->sk_state, TCP_ESTABLISHED); 1748 sock_hold(newsk); 1749 1750 smp_mb__after_atomic(); /* sock_hold() does an atomic_inc() */ 1751 unix_peer(sk) = newsk; 1752 1753 unix_state_unlock(sk); 1754 1755 /* take ten and send info to listening sock */ 1756 spin_lock(&other->sk_receive_queue.lock); 1757 __skb_queue_tail(&other->sk_receive_queue, skb); 1758 spin_unlock(&other->sk_receive_queue.lock); 1759 unix_state_unlock(other); 1760 other->sk_data_ready(other); 1761 sock_put(other); 1762 return 0; 1763 1764 out_unlock: 1765 unix_state_unlock(other); 1766 sock_put(other); 1767 out_free_skb: 1768 consume_skb(skb); 1769 out_free_sk: 1770 unix_release_sock(newsk, 0); 1771 out: 1772 drop_peercred(&peercred); 1773 return err; 1774 } 1775 1776 static int unix_socketpair(struct socket *socka, struct socket *sockb) 1777 { 1778 struct unix_peercred ska_peercred = {}, skb_peercred = {}; 1779 struct sock *ska = socka->sk, *skb = sockb->sk; 1780 int err; 1781 1782 err = prepare_peercred(&ska_peercred); 1783 if (err) 1784 return err; 1785 1786 err = prepare_peercred(&skb_peercred); 1787 if (err) { 1788 drop_peercred(&ska_peercred); 1789 return err; 1790 } 1791 1792 /* Join our sockets back to back */ 1793 sock_hold(ska); 1794 sock_hold(skb); 1795 unix_peer(ska) = skb; 1796 unix_peer(skb) = ska; 1797 init_peercred(ska, &ska_peercred); 1798 init_peercred(skb, &skb_peercred); 1799 1800 ska->sk_state = TCP_ESTABLISHED; 1801 skb->sk_state = TCP_ESTABLISHED; 1802 socka->state = SS_CONNECTED; 1803 sockb->state = SS_CONNECTED; 1804 return 0; 1805 } 1806 1807 static int unix_accept(struct socket *sock, struct socket *newsock, 1808 struct proto_accept_arg *arg) 1809 { 1810 struct sock *sk = sock->sk; 1811 struct sk_buff *skb; 1812 struct sock *tsk; 1813 1814 arg->err = -EOPNOTSUPP; 1815 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET) 1816 goto out; 1817 1818 arg->err = -EINVAL; 1819 if (READ_ONCE(sk->sk_state) != TCP_LISTEN) 1820 goto out; 1821 1822 /* If socket state is TCP_LISTEN it cannot change (for now...), 1823 * so that no locks are necessary. 1824 */ 1825 1826 skb = skb_recv_datagram(sk, (arg->flags & O_NONBLOCK) ? MSG_DONTWAIT : 0, 1827 &arg->err); 1828 if (!skb) { 1829 /* This means receive shutdown. */ 1830 if (arg->err == 0) 1831 arg->err = -EINVAL; 1832 goto out; 1833 } 1834 1835 tsk = skb->sk; 1836 skb_free_datagram(sk, skb); 1837 wake_up_interruptible(&unix_sk(sk)->peer_wait); 1838 1839 /* attach accepted sock to socket */ 1840 unix_state_lock(tsk); 1841 unix_update_edges(unix_sk(tsk)); 1842 newsock->state = SS_CONNECTED; 1843 sock_graft(tsk, newsock); 1844 unix_state_unlock(tsk); 1845 return 0; 1846 1847 out: 1848 return arg->err; 1849 } 1850 1851 1852 static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int peer) 1853 { 1854 struct sock *sk = sock->sk; 1855 struct unix_address *addr; 1856 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, uaddr); 1857 int err = 0; 1858 1859 if (peer) { 1860 sk = unix_peer_get(sk); 1861 1862 err = -ENOTCONN; 1863 if (!sk) 1864 goto out; 1865 err = 0; 1866 } else { 1867 sock_hold(sk); 1868 } 1869 1870 addr = smp_load_acquire(&unix_sk(sk)->addr); 1871 if (!addr) { 1872 sunaddr->sun_family = AF_UNIX; 1873 sunaddr->sun_path[0] = 0; 1874 err = offsetof(struct sockaddr_un, sun_path); 1875 } else { 1876 err = addr->len; 1877 memcpy(sunaddr, addr->name, addr->len); 1878 1879 if (peer) 1880 BPF_CGROUP_RUN_SA_PROG(sk, uaddr, &err, 1881 CGROUP_UNIX_GETPEERNAME); 1882 else 1883 BPF_CGROUP_RUN_SA_PROG(sk, uaddr, &err, 1884 CGROUP_UNIX_GETSOCKNAME); 1885 } 1886 sock_put(sk); 1887 out: 1888 return err; 1889 } 1890 1891 /* The "user->unix_inflight" variable is protected by the garbage 1892 * collection lock, and we just read it locklessly here. If you go 1893 * over the limit, there might be a tiny race in actually noticing 1894 * it across threads. Tough. 1895 */ 1896 static inline bool too_many_unix_fds(struct task_struct *p) 1897 { 1898 struct user_struct *user = current_user(); 1899 1900 if (unlikely(READ_ONCE(user->unix_inflight) > task_rlimit(p, RLIMIT_NOFILE))) 1901 return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN); 1902 return false; 1903 } 1904 1905 static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb) 1906 { 1907 if (too_many_unix_fds(current)) 1908 return -ETOOMANYREFS; 1909 1910 UNIXCB(skb).fp = scm->fp; 1911 scm->fp = NULL; 1912 1913 if (unix_prepare_fpl(UNIXCB(skb).fp)) 1914 return -ENOMEM; 1915 1916 return 0; 1917 } 1918 1919 static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb) 1920 { 1921 scm->fp = UNIXCB(skb).fp; 1922 UNIXCB(skb).fp = NULL; 1923 1924 unix_destroy_fpl(scm->fp); 1925 } 1926 1927 static void unix_peek_fds(struct scm_cookie *scm, struct sk_buff *skb) 1928 { 1929 scm->fp = scm_fp_dup(UNIXCB(skb).fp); 1930 } 1931 1932 static void unix_destruct_scm(struct sk_buff *skb) 1933 { 1934 struct scm_cookie scm; 1935 1936 memset(&scm, 0, sizeof(scm)); 1937 scm.pid = UNIXCB(skb).pid; 1938 if (UNIXCB(skb).fp) 1939 unix_detach_fds(&scm, skb); 1940 1941 /* Alas, it calls VFS */ 1942 /* So fscking what? fput() had been SMP-safe since the last Summer */ 1943 scm_destroy(&scm); 1944 sock_wfree(skb); 1945 } 1946 1947 static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds) 1948 { 1949 int err = 0; 1950 1951 UNIXCB(skb).pid = get_pid(scm->pid); 1952 UNIXCB(skb).uid = scm->creds.uid; 1953 UNIXCB(skb).gid = scm->creds.gid; 1954 UNIXCB(skb).fp = NULL; 1955 unix_get_secdata(scm, skb); 1956 if (scm->fp && send_fds) 1957 err = unix_attach_fds(scm, skb); 1958 1959 skb->destructor = unix_destruct_scm; 1960 return err; 1961 } 1962 1963 /* 1964 * Some apps rely on write() giving SCM_CREDENTIALS 1965 * We include credentials if source or destination socket 1966 * asserted SOCK_PASSCRED. 1967 */ 1968 static void unix_maybe_add_creds(struct sk_buff *skb, const struct sock *sk, 1969 const struct sock *other) 1970 { 1971 if (UNIXCB(skb).pid) 1972 return; 1973 1974 if (unix_may_passcred(sk) || unix_may_passcred(other) || 1975 !other->sk_socket) { 1976 UNIXCB(skb).pid = get_pid(task_tgid(current)); 1977 current_uid_gid(&UNIXCB(skb).uid, &UNIXCB(skb).gid); 1978 } 1979 } 1980 1981 static bool unix_skb_scm_eq(struct sk_buff *skb, 1982 struct scm_cookie *scm) 1983 { 1984 return UNIXCB(skb).pid == scm->pid && 1985 uid_eq(UNIXCB(skb).uid, scm->creds.uid) && 1986 gid_eq(UNIXCB(skb).gid, scm->creds.gid) && 1987 unix_secdata_eq(scm, skb); 1988 } 1989 1990 static void scm_stat_add(struct sock *sk, struct sk_buff *skb) 1991 { 1992 struct scm_fp_list *fp = UNIXCB(skb).fp; 1993 struct unix_sock *u = unix_sk(sk); 1994 1995 if (unlikely(fp && fp->count)) { 1996 atomic_add(fp->count, &u->scm_stat.nr_fds); 1997 unix_add_edges(fp, u); 1998 } 1999 } 2000 2001 static void scm_stat_del(struct sock *sk, struct sk_buff *skb) 2002 { 2003 struct scm_fp_list *fp = UNIXCB(skb).fp; 2004 struct unix_sock *u = unix_sk(sk); 2005 2006 if (unlikely(fp && fp->count)) { 2007 atomic_sub(fp->count, &u->scm_stat.nr_fds); 2008 unix_del_edges(fp); 2009 } 2010 } 2011 2012 /* 2013 * Send AF_UNIX data. 2014 */ 2015 2016 static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg, 2017 size_t len) 2018 { 2019 struct sock *sk = sock->sk, *other = NULL; 2020 struct unix_sock *u = unix_sk(sk); 2021 struct scm_cookie scm; 2022 struct sk_buff *skb; 2023 int data_len = 0; 2024 int sk_locked; 2025 long timeo; 2026 int err; 2027 2028 err = scm_send(sock, msg, &scm, false); 2029 if (err < 0) 2030 return err; 2031 2032 wait_for_unix_gc(scm.fp); 2033 2034 if (msg->msg_flags & MSG_OOB) { 2035 err = -EOPNOTSUPP; 2036 goto out; 2037 } 2038 2039 if (msg->msg_namelen) { 2040 err = unix_validate_addr(msg->msg_name, msg->msg_namelen); 2041 if (err) 2042 goto out; 2043 2044 err = BPF_CGROUP_RUN_PROG_UNIX_SENDMSG_LOCK(sk, 2045 msg->msg_name, 2046 &msg->msg_namelen, 2047 NULL); 2048 if (err) 2049 goto out; 2050 } 2051 2052 if (unix_may_passcred(sk) && !READ_ONCE(u->addr)) { 2053 err = unix_autobind(sk); 2054 if (err) 2055 goto out; 2056 } 2057 2058 if (len > READ_ONCE(sk->sk_sndbuf) - 32) { 2059 err = -EMSGSIZE; 2060 goto out; 2061 } 2062 2063 if (len > SKB_MAX_ALLOC) { 2064 data_len = min_t(size_t, 2065 len - SKB_MAX_ALLOC, 2066 MAX_SKB_FRAGS * PAGE_SIZE); 2067 data_len = PAGE_ALIGN(data_len); 2068 2069 BUILD_BUG_ON(SKB_MAX_ALLOC < PAGE_SIZE); 2070 } 2071 2072 skb = sock_alloc_send_pskb(sk, len - data_len, data_len, 2073 msg->msg_flags & MSG_DONTWAIT, &err, 2074 PAGE_ALLOC_COSTLY_ORDER); 2075 if (!skb) 2076 goto out; 2077 2078 err = unix_scm_to_skb(&scm, skb, true); 2079 if (err < 0) 2080 goto out_free; 2081 2082 skb_put(skb, len - data_len); 2083 skb->data_len = data_len; 2084 skb->len = len; 2085 err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, len); 2086 if (err) 2087 goto out_free; 2088 2089 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); 2090 2091 if (msg->msg_namelen) { 2092 lookup: 2093 other = unix_find_other(sock_net(sk), msg->msg_name, 2094 msg->msg_namelen, sk->sk_type, 0); 2095 if (IS_ERR(other)) { 2096 err = PTR_ERR(other); 2097 goto out_free; 2098 } 2099 } else { 2100 other = unix_peer_get(sk); 2101 if (!other) { 2102 err = -ENOTCONN; 2103 goto out_free; 2104 } 2105 } 2106 2107 if (sk_filter(other, skb) < 0) { 2108 /* Toss the packet but do not return any error to the sender */ 2109 err = len; 2110 goto out_sock_put; 2111 } 2112 2113 restart: 2114 sk_locked = 0; 2115 unix_state_lock(other); 2116 restart_locked: 2117 2118 if (!unix_may_send(sk, other)) { 2119 err = -EPERM; 2120 goto out_unlock; 2121 } 2122 2123 if (unlikely(sock_flag(other, SOCK_DEAD))) { 2124 /* Check with 1003.1g - what should datagram error */ 2125 2126 unix_state_unlock(other); 2127 2128 if (sk->sk_type == SOCK_SEQPACKET) { 2129 /* We are here only when racing with unix_release_sock() 2130 * is clearing @other. Never change state to TCP_CLOSE 2131 * unlike SOCK_DGRAM wants. 2132 */ 2133 err = -EPIPE; 2134 goto out_sock_put; 2135 } 2136 2137 if (!sk_locked) 2138 unix_state_lock(sk); 2139 2140 if (unix_peer(sk) == other) { 2141 unix_peer(sk) = NULL; 2142 unix_dgram_peer_wake_disconnect_wakeup(sk, other); 2143 2144 WRITE_ONCE(sk->sk_state, TCP_CLOSE); 2145 unix_state_unlock(sk); 2146 2147 unix_dgram_disconnected(sk, other); 2148 sock_put(other); 2149 err = -ECONNREFUSED; 2150 goto out_sock_put; 2151 } 2152 2153 unix_state_unlock(sk); 2154 2155 if (!msg->msg_namelen) { 2156 err = -ECONNRESET; 2157 goto out_sock_put; 2158 } 2159 2160 sock_put(other); 2161 goto lookup; 2162 } 2163 2164 if (other->sk_shutdown & RCV_SHUTDOWN) { 2165 err = -EPIPE; 2166 goto out_unlock; 2167 } 2168 2169 if (UNIXCB(skb).fp && !other->sk_scm_rights) { 2170 err = -EPERM; 2171 goto out_unlock; 2172 } 2173 2174 if (sk->sk_type != SOCK_SEQPACKET) { 2175 err = security_unix_may_send(sk->sk_socket, other->sk_socket); 2176 if (err) 2177 goto out_unlock; 2178 } 2179 2180 /* other == sk && unix_peer(other) != sk if 2181 * - unix_peer(sk) == NULL, destination address bound to sk 2182 * - unix_peer(sk) == sk by time of get but disconnected before lock 2183 */ 2184 if (other != sk && 2185 unlikely(unix_peer(other) != sk && 2186 unix_recvq_full_lockless(other))) { 2187 if (timeo) { 2188 timeo = unix_wait_for_peer(other, timeo); 2189 2190 err = sock_intr_errno(timeo); 2191 if (signal_pending(current)) 2192 goto out_sock_put; 2193 2194 goto restart; 2195 } 2196 2197 if (!sk_locked) { 2198 unix_state_unlock(other); 2199 unix_state_double_lock(sk, other); 2200 } 2201 2202 if (unix_peer(sk) != other || 2203 unix_dgram_peer_wake_me(sk, other)) { 2204 err = -EAGAIN; 2205 sk_locked = 1; 2206 goto out_unlock; 2207 } 2208 2209 if (!sk_locked) { 2210 sk_locked = 1; 2211 goto restart_locked; 2212 } 2213 } 2214 2215 if (unlikely(sk_locked)) 2216 unix_state_unlock(sk); 2217 2218 if (sock_flag(other, SOCK_RCVTSTAMP)) 2219 __net_timestamp(skb); 2220 2221 unix_maybe_add_creds(skb, sk, other); 2222 scm_stat_add(other, skb); 2223 skb_queue_tail(&other->sk_receive_queue, skb); 2224 unix_state_unlock(other); 2225 other->sk_data_ready(other); 2226 sock_put(other); 2227 scm_destroy(&scm); 2228 return len; 2229 2230 out_unlock: 2231 if (sk_locked) 2232 unix_state_unlock(sk); 2233 unix_state_unlock(other); 2234 out_sock_put: 2235 sock_put(other); 2236 out_free: 2237 consume_skb(skb); 2238 out: 2239 scm_destroy(&scm); 2240 return err; 2241 } 2242 2243 /* We use paged skbs for stream sockets, and limit occupancy to 32768 2244 * bytes, and a minimum of a full page. 2245 */ 2246 #define UNIX_SKB_FRAGS_SZ (PAGE_SIZE << get_order(32768)) 2247 2248 #if IS_ENABLED(CONFIG_AF_UNIX_OOB) 2249 static int queue_oob(struct sock *sk, struct msghdr *msg, struct sock *other, 2250 struct scm_cookie *scm, bool fds_sent) 2251 { 2252 struct unix_sock *ousk = unix_sk(other); 2253 struct sk_buff *skb; 2254 int err; 2255 2256 skb = sock_alloc_send_skb(sk, 1, msg->msg_flags & MSG_DONTWAIT, &err); 2257 2258 if (!skb) 2259 return err; 2260 2261 err = unix_scm_to_skb(scm, skb, !fds_sent); 2262 if (err < 0) 2263 goto out; 2264 2265 skb_put(skb, 1); 2266 err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, 1); 2267 2268 if (err) 2269 goto out; 2270 2271 unix_state_lock(other); 2272 2273 if (sock_flag(other, SOCK_DEAD) || 2274 (other->sk_shutdown & RCV_SHUTDOWN)) { 2275 err = -EPIPE; 2276 goto out_unlock; 2277 } 2278 2279 if (UNIXCB(skb).fp && !other->sk_scm_rights) { 2280 err = -EPERM; 2281 goto out_unlock; 2282 } 2283 2284 unix_maybe_add_creds(skb, sk, other); 2285 scm_stat_add(other, skb); 2286 2287 spin_lock(&other->sk_receive_queue.lock); 2288 WRITE_ONCE(ousk->oob_skb, skb); 2289 __skb_queue_tail(&other->sk_receive_queue, skb); 2290 spin_unlock(&other->sk_receive_queue.lock); 2291 2292 sk_send_sigurg(other); 2293 unix_state_unlock(other); 2294 other->sk_data_ready(other); 2295 2296 return 0; 2297 out_unlock: 2298 unix_state_unlock(other); 2299 out: 2300 consume_skb(skb); 2301 return err; 2302 } 2303 #endif 2304 2305 static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg, 2306 size_t len) 2307 { 2308 struct sock *sk = sock->sk; 2309 struct sk_buff *skb = NULL; 2310 struct sock *other = NULL; 2311 struct scm_cookie scm; 2312 bool fds_sent = false; 2313 int err, sent = 0; 2314 2315 err = scm_send(sock, msg, &scm, false); 2316 if (err < 0) 2317 return err; 2318 2319 wait_for_unix_gc(scm.fp); 2320 2321 if (msg->msg_flags & MSG_OOB) { 2322 err = -EOPNOTSUPP; 2323 #if IS_ENABLED(CONFIG_AF_UNIX_OOB) 2324 if (len) 2325 len--; 2326 else 2327 #endif 2328 goto out_err; 2329 } 2330 2331 if (msg->msg_namelen) { 2332 err = READ_ONCE(sk->sk_state) == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP; 2333 goto out_err; 2334 } else { 2335 other = unix_peer(sk); 2336 if (!other) { 2337 err = -ENOTCONN; 2338 goto out_err; 2339 } 2340 } 2341 2342 if (READ_ONCE(sk->sk_shutdown) & SEND_SHUTDOWN) 2343 goto out_pipe; 2344 2345 while (sent < len) { 2346 int size = len - sent; 2347 int data_len; 2348 2349 if (unlikely(msg->msg_flags & MSG_SPLICE_PAGES)) { 2350 skb = sock_alloc_send_pskb(sk, 0, 0, 2351 msg->msg_flags & MSG_DONTWAIT, 2352 &err, 0); 2353 } else { 2354 /* Keep two messages in the pipe so it schedules better */ 2355 size = min_t(int, size, (READ_ONCE(sk->sk_sndbuf) >> 1) - 64); 2356 2357 /* allow fallback to order-0 allocations */ 2358 size = min_t(int, size, SKB_MAX_HEAD(0) + UNIX_SKB_FRAGS_SZ); 2359 2360 data_len = max_t(int, 0, size - SKB_MAX_HEAD(0)); 2361 2362 data_len = min_t(size_t, size, PAGE_ALIGN(data_len)); 2363 2364 skb = sock_alloc_send_pskb(sk, size - data_len, data_len, 2365 msg->msg_flags & MSG_DONTWAIT, &err, 2366 get_order(UNIX_SKB_FRAGS_SZ)); 2367 } 2368 if (!skb) 2369 goto out_err; 2370 2371 /* Only send the fds in the first buffer */ 2372 err = unix_scm_to_skb(&scm, skb, !fds_sent); 2373 if (err < 0) 2374 goto out_free; 2375 2376 fds_sent = true; 2377 2378 if (unlikely(msg->msg_flags & MSG_SPLICE_PAGES)) { 2379 skb->ip_summed = CHECKSUM_UNNECESSARY; 2380 err = skb_splice_from_iter(skb, &msg->msg_iter, size, 2381 sk->sk_allocation); 2382 if (err < 0) 2383 goto out_free; 2384 2385 size = err; 2386 refcount_add(size, &sk->sk_wmem_alloc); 2387 } else { 2388 skb_put(skb, size - data_len); 2389 skb->data_len = data_len; 2390 skb->len = size; 2391 err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size); 2392 if (err) 2393 goto out_free; 2394 } 2395 2396 unix_state_lock(other); 2397 2398 if (sock_flag(other, SOCK_DEAD) || 2399 (other->sk_shutdown & RCV_SHUTDOWN)) 2400 goto out_pipe_unlock; 2401 2402 if (UNIXCB(skb).fp && !other->sk_scm_rights) { 2403 unix_state_unlock(other); 2404 err = -EPERM; 2405 goto out_free; 2406 } 2407 2408 unix_maybe_add_creds(skb, sk, other); 2409 scm_stat_add(other, skb); 2410 skb_queue_tail(&other->sk_receive_queue, skb); 2411 unix_state_unlock(other); 2412 other->sk_data_ready(other); 2413 sent += size; 2414 } 2415 2416 #if IS_ENABLED(CONFIG_AF_UNIX_OOB) 2417 if (msg->msg_flags & MSG_OOB) { 2418 err = queue_oob(sk, msg, other, &scm, fds_sent); 2419 if (err) 2420 goto out_err; 2421 sent++; 2422 } 2423 #endif 2424 2425 scm_destroy(&scm); 2426 2427 return sent; 2428 2429 out_pipe_unlock: 2430 unix_state_unlock(other); 2431 out_pipe: 2432 if (!sent && !(msg->msg_flags & MSG_NOSIGNAL)) 2433 send_sig(SIGPIPE, current, 0); 2434 err = -EPIPE; 2435 out_free: 2436 consume_skb(skb); 2437 out_err: 2438 scm_destroy(&scm); 2439 return sent ? : err; 2440 } 2441 2442 static int unix_seqpacket_sendmsg(struct socket *sock, struct msghdr *msg, 2443 size_t len) 2444 { 2445 int err; 2446 struct sock *sk = sock->sk; 2447 2448 err = sock_error(sk); 2449 if (err) 2450 return err; 2451 2452 if (READ_ONCE(sk->sk_state) != TCP_ESTABLISHED) 2453 return -ENOTCONN; 2454 2455 if (msg->msg_namelen) 2456 msg->msg_namelen = 0; 2457 2458 return unix_dgram_sendmsg(sock, msg, len); 2459 } 2460 2461 static int unix_seqpacket_recvmsg(struct socket *sock, struct msghdr *msg, 2462 size_t size, int flags) 2463 { 2464 struct sock *sk = sock->sk; 2465 2466 if (READ_ONCE(sk->sk_state) != TCP_ESTABLISHED) 2467 return -ENOTCONN; 2468 2469 return unix_dgram_recvmsg(sock, msg, size, flags); 2470 } 2471 2472 static void unix_copy_addr(struct msghdr *msg, struct sock *sk) 2473 { 2474 struct unix_address *addr = smp_load_acquire(&unix_sk(sk)->addr); 2475 2476 if (addr) { 2477 msg->msg_namelen = addr->len; 2478 memcpy(msg->msg_name, addr->name, addr->len); 2479 } 2480 } 2481 2482 int __unix_dgram_recvmsg(struct sock *sk, struct msghdr *msg, size_t size, 2483 int flags) 2484 { 2485 struct scm_cookie scm; 2486 struct socket *sock = sk->sk_socket; 2487 struct unix_sock *u = unix_sk(sk); 2488 struct sk_buff *skb, *last; 2489 long timeo; 2490 int skip; 2491 int err; 2492 2493 err = -EOPNOTSUPP; 2494 if (flags&MSG_OOB) 2495 goto out; 2496 2497 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); 2498 2499 do { 2500 mutex_lock(&u->iolock); 2501 2502 skip = sk_peek_offset(sk, flags); 2503 skb = __skb_try_recv_datagram(sk, &sk->sk_receive_queue, flags, 2504 &skip, &err, &last); 2505 if (skb) { 2506 if (!(flags & MSG_PEEK)) 2507 scm_stat_del(sk, skb); 2508 break; 2509 } 2510 2511 mutex_unlock(&u->iolock); 2512 2513 if (err != -EAGAIN) 2514 break; 2515 } while (timeo && 2516 !__skb_wait_for_more_packets(sk, &sk->sk_receive_queue, 2517 &err, &timeo, last)); 2518 2519 if (!skb) { /* implies iolock unlocked */ 2520 unix_state_lock(sk); 2521 /* Signal EOF on disconnected non-blocking SEQPACKET socket. */ 2522 if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN && 2523 (sk->sk_shutdown & RCV_SHUTDOWN)) 2524 err = 0; 2525 unix_state_unlock(sk); 2526 goto out; 2527 } 2528 2529 if (wq_has_sleeper(&u->peer_wait)) 2530 wake_up_interruptible_sync_poll(&u->peer_wait, 2531 EPOLLOUT | EPOLLWRNORM | 2532 EPOLLWRBAND); 2533 2534 if (msg->msg_name) { 2535 unix_copy_addr(msg, skb->sk); 2536 2537 BPF_CGROUP_RUN_PROG_UNIX_RECVMSG_LOCK(sk, 2538 msg->msg_name, 2539 &msg->msg_namelen); 2540 } 2541 2542 if (size > skb->len - skip) 2543 size = skb->len - skip; 2544 else if (size < skb->len - skip) 2545 msg->msg_flags |= MSG_TRUNC; 2546 2547 err = skb_copy_datagram_msg(skb, skip, msg, size); 2548 if (err) 2549 goto out_free; 2550 2551 if (sock_flag(sk, SOCK_RCVTSTAMP)) 2552 __sock_recv_timestamp(msg, sk, skb); 2553 2554 memset(&scm, 0, sizeof(scm)); 2555 2556 scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid); 2557 unix_set_secdata(&scm, skb); 2558 2559 if (!(flags & MSG_PEEK)) { 2560 if (UNIXCB(skb).fp) 2561 unix_detach_fds(&scm, skb); 2562 2563 sk_peek_offset_bwd(sk, skb->len); 2564 } else { 2565 /* It is questionable: on PEEK we could: 2566 - do not return fds - good, but too simple 8) 2567 - return fds, and do not return them on read (old strategy, 2568 apparently wrong) 2569 - clone fds (I chose it for now, it is the most universal 2570 solution) 2571 2572 POSIX 1003.1g does not actually define this clearly 2573 at all. POSIX 1003.1g doesn't define a lot of things 2574 clearly however! 2575 2576 */ 2577 2578 sk_peek_offset_fwd(sk, size); 2579 2580 if (UNIXCB(skb).fp) 2581 unix_peek_fds(&scm, skb); 2582 } 2583 err = (flags & MSG_TRUNC) ? skb->len - skip : size; 2584 2585 scm_recv_unix(sock, msg, &scm, flags); 2586 2587 out_free: 2588 skb_free_datagram(sk, skb); 2589 mutex_unlock(&u->iolock); 2590 out: 2591 return err; 2592 } 2593 2594 static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, 2595 int flags) 2596 { 2597 struct sock *sk = sock->sk; 2598 2599 #ifdef CONFIG_BPF_SYSCALL 2600 const struct proto *prot = READ_ONCE(sk->sk_prot); 2601 2602 if (prot != &unix_dgram_proto) 2603 return prot->recvmsg(sk, msg, size, flags, NULL); 2604 #endif 2605 return __unix_dgram_recvmsg(sk, msg, size, flags); 2606 } 2607 2608 static int unix_read_skb(struct sock *sk, skb_read_actor_t recv_actor) 2609 { 2610 struct unix_sock *u = unix_sk(sk); 2611 struct sk_buff *skb; 2612 int err; 2613 2614 mutex_lock(&u->iolock); 2615 skb = skb_recv_datagram(sk, MSG_DONTWAIT, &err); 2616 mutex_unlock(&u->iolock); 2617 if (!skb) 2618 return err; 2619 2620 return recv_actor(sk, skb); 2621 } 2622 2623 /* 2624 * Sleep until more data has arrived. But check for races.. 2625 */ 2626 static long unix_stream_data_wait(struct sock *sk, long timeo, 2627 struct sk_buff *last, unsigned int last_len, 2628 bool freezable) 2629 { 2630 unsigned int state = TASK_INTERRUPTIBLE | freezable * TASK_FREEZABLE; 2631 struct sk_buff *tail; 2632 DEFINE_WAIT(wait); 2633 2634 unix_state_lock(sk); 2635 2636 for (;;) { 2637 prepare_to_wait(sk_sleep(sk), &wait, state); 2638 2639 tail = skb_peek_tail(&sk->sk_receive_queue); 2640 if (tail != last || 2641 (tail && tail->len != last_len) || 2642 sk->sk_err || 2643 (sk->sk_shutdown & RCV_SHUTDOWN) || 2644 signal_pending(current) || 2645 !timeo) 2646 break; 2647 2648 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); 2649 unix_state_unlock(sk); 2650 timeo = schedule_timeout(timeo); 2651 unix_state_lock(sk); 2652 2653 if (sock_flag(sk, SOCK_DEAD)) 2654 break; 2655 2656 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); 2657 } 2658 2659 finish_wait(sk_sleep(sk), &wait); 2660 unix_state_unlock(sk); 2661 return timeo; 2662 } 2663 2664 static unsigned int unix_skb_len(const struct sk_buff *skb) 2665 { 2666 return skb->len - UNIXCB(skb).consumed; 2667 } 2668 2669 struct unix_stream_read_state { 2670 int (*recv_actor)(struct sk_buff *, int, int, 2671 struct unix_stream_read_state *); 2672 struct socket *socket; 2673 struct msghdr *msg; 2674 struct pipe_inode_info *pipe; 2675 size_t size; 2676 int flags; 2677 unsigned int splice_flags; 2678 }; 2679 2680 #if IS_ENABLED(CONFIG_AF_UNIX_OOB) 2681 static int unix_stream_recv_urg(struct unix_stream_read_state *state) 2682 { 2683 struct socket *sock = state->socket; 2684 struct sock *sk = sock->sk; 2685 struct unix_sock *u = unix_sk(sk); 2686 int chunk = 1; 2687 struct sk_buff *oob_skb; 2688 2689 mutex_lock(&u->iolock); 2690 unix_state_lock(sk); 2691 spin_lock(&sk->sk_receive_queue.lock); 2692 2693 if (sock_flag(sk, SOCK_URGINLINE) || !u->oob_skb) { 2694 spin_unlock(&sk->sk_receive_queue.lock); 2695 unix_state_unlock(sk); 2696 mutex_unlock(&u->iolock); 2697 return -EINVAL; 2698 } 2699 2700 oob_skb = u->oob_skb; 2701 2702 if (!(state->flags & MSG_PEEK)) 2703 WRITE_ONCE(u->oob_skb, NULL); 2704 2705 spin_unlock(&sk->sk_receive_queue.lock); 2706 unix_state_unlock(sk); 2707 2708 chunk = state->recv_actor(oob_skb, 0, chunk, state); 2709 2710 if (!(state->flags & MSG_PEEK)) 2711 UNIXCB(oob_skb).consumed += 1; 2712 2713 mutex_unlock(&u->iolock); 2714 2715 if (chunk < 0) 2716 return -EFAULT; 2717 2718 state->msg->msg_flags |= MSG_OOB; 2719 return 1; 2720 } 2721 2722 static struct sk_buff *manage_oob(struct sk_buff *skb, struct sock *sk, 2723 int flags, int copied) 2724 { 2725 struct sk_buff *read_skb = NULL, *unread_skb = NULL; 2726 struct unix_sock *u = unix_sk(sk); 2727 2728 if (likely(unix_skb_len(skb) && skb != READ_ONCE(u->oob_skb))) 2729 return skb; 2730 2731 spin_lock(&sk->sk_receive_queue.lock); 2732 2733 if (!unix_skb_len(skb)) { 2734 if (copied && (!u->oob_skb || skb == u->oob_skb)) { 2735 skb = NULL; 2736 } else if (flags & MSG_PEEK) { 2737 skb = skb_peek_next(skb, &sk->sk_receive_queue); 2738 } else { 2739 read_skb = skb; 2740 skb = skb_peek_next(skb, &sk->sk_receive_queue); 2741 __skb_unlink(read_skb, &sk->sk_receive_queue); 2742 } 2743 2744 if (!skb) 2745 goto unlock; 2746 } 2747 2748 if (skb != u->oob_skb) 2749 goto unlock; 2750 2751 if (copied) { 2752 skb = NULL; 2753 } else if (!(flags & MSG_PEEK)) { 2754 WRITE_ONCE(u->oob_skb, NULL); 2755 2756 if (!sock_flag(sk, SOCK_URGINLINE)) { 2757 __skb_unlink(skb, &sk->sk_receive_queue); 2758 unread_skb = skb; 2759 skb = skb_peek(&sk->sk_receive_queue); 2760 } 2761 } else if (!sock_flag(sk, SOCK_URGINLINE)) { 2762 skb = skb_peek_next(skb, &sk->sk_receive_queue); 2763 } 2764 2765 unlock: 2766 spin_unlock(&sk->sk_receive_queue.lock); 2767 2768 consume_skb(read_skb); 2769 kfree_skb_reason(unread_skb, SKB_DROP_REASON_UNIX_SKIP_OOB); 2770 2771 return skb; 2772 } 2773 #endif 2774 2775 static int unix_stream_read_skb(struct sock *sk, skb_read_actor_t recv_actor) 2776 { 2777 struct unix_sock *u = unix_sk(sk); 2778 struct sk_buff *skb; 2779 int err; 2780 2781 if (unlikely(READ_ONCE(sk->sk_state) != TCP_ESTABLISHED)) 2782 return -ENOTCONN; 2783 2784 mutex_lock(&u->iolock); 2785 skb = skb_recv_datagram(sk, MSG_DONTWAIT, &err); 2786 mutex_unlock(&u->iolock); 2787 if (!skb) 2788 return err; 2789 2790 #if IS_ENABLED(CONFIG_AF_UNIX_OOB) 2791 if (unlikely(skb == READ_ONCE(u->oob_skb))) { 2792 bool drop = false; 2793 2794 unix_state_lock(sk); 2795 2796 if (sock_flag(sk, SOCK_DEAD)) { 2797 unix_state_unlock(sk); 2798 kfree_skb_reason(skb, SKB_DROP_REASON_SOCKET_CLOSE); 2799 return -ECONNRESET; 2800 } 2801 2802 spin_lock(&sk->sk_receive_queue.lock); 2803 if (likely(skb == u->oob_skb)) { 2804 WRITE_ONCE(u->oob_skb, NULL); 2805 drop = true; 2806 } 2807 spin_unlock(&sk->sk_receive_queue.lock); 2808 2809 unix_state_unlock(sk); 2810 2811 if (drop) { 2812 kfree_skb_reason(skb, SKB_DROP_REASON_UNIX_SKIP_OOB); 2813 return -EAGAIN; 2814 } 2815 } 2816 #endif 2817 2818 return recv_actor(sk, skb); 2819 } 2820 2821 static int unix_stream_read_generic(struct unix_stream_read_state *state, 2822 bool freezable) 2823 { 2824 struct scm_cookie scm; 2825 struct socket *sock = state->socket; 2826 struct sock *sk = sock->sk; 2827 struct unix_sock *u = unix_sk(sk); 2828 int copied = 0; 2829 int flags = state->flags; 2830 int noblock = flags & MSG_DONTWAIT; 2831 bool check_creds = false; 2832 int target; 2833 int err = 0; 2834 long timeo; 2835 int skip; 2836 size_t size = state->size; 2837 unsigned int last_len; 2838 2839 if (unlikely(READ_ONCE(sk->sk_state) != TCP_ESTABLISHED)) { 2840 err = -EINVAL; 2841 goto out; 2842 } 2843 2844 if (unlikely(flags & MSG_OOB)) { 2845 err = -EOPNOTSUPP; 2846 #if IS_ENABLED(CONFIG_AF_UNIX_OOB) 2847 err = unix_stream_recv_urg(state); 2848 #endif 2849 goto out; 2850 } 2851 2852 target = sock_rcvlowat(sk, flags & MSG_WAITALL, size); 2853 timeo = sock_rcvtimeo(sk, noblock); 2854 2855 memset(&scm, 0, sizeof(scm)); 2856 2857 /* Lock the socket to prevent queue disordering 2858 * while sleeps in memcpy_tomsg 2859 */ 2860 mutex_lock(&u->iolock); 2861 2862 skip = max(sk_peek_offset(sk, flags), 0); 2863 2864 do { 2865 struct sk_buff *skb, *last; 2866 int chunk; 2867 2868 redo: 2869 unix_state_lock(sk); 2870 if (sock_flag(sk, SOCK_DEAD)) { 2871 err = -ECONNRESET; 2872 goto unlock; 2873 } 2874 last = skb = skb_peek(&sk->sk_receive_queue); 2875 last_len = last ? last->len : 0; 2876 2877 again: 2878 #if IS_ENABLED(CONFIG_AF_UNIX_OOB) 2879 if (skb) { 2880 skb = manage_oob(skb, sk, flags, copied); 2881 if (!skb && copied) { 2882 unix_state_unlock(sk); 2883 break; 2884 } 2885 } 2886 #endif 2887 if (skb == NULL) { 2888 if (copied >= target) 2889 goto unlock; 2890 2891 /* 2892 * POSIX 1003.1g mandates this order. 2893 */ 2894 2895 err = sock_error(sk); 2896 if (err) 2897 goto unlock; 2898 if (sk->sk_shutdown & RCV_SHUTDOWN) 2899 goto unlock; 2900 2901 unix_state_unlock(sk); 2902 if (!timeo) { 2903 err = -EAGAIN; 2904 break; 2905 } 2906 2907 mutex_unlock(&u->iolock); 2908 2909 timeo = unix_stream_data_wait(sk, timeo, last, 2910 last_len, freezable); 2911 2912 if (signal_pending(current)) { 2913 err = sock_intr_errno(timeo); 2914 scm_destroy(&scm); 2915 goto out; 2916 } 2917 2918 mutex_lock(&u->iolock); 2919 goto redo; 2920 unlock: 2921 unix_state_unlock(sk); 2922 break; 2923 } 2924 2925 while (skip >= unix_skb_len(skb)) { 2926 skip -= unix_skb_len(skb); 2927 last = skb; 2928 last_len = skb->len; 2929 skb = skb_peek_next(skb, &sk->sk_receive_queue); 2930 if (!skb) 2931 goto again; 2932 } 2933 2934 unix_state_unlock(sk); 2935 2936 if (check_creds) { 2937 /* Never glue messages from different writers */ 2938 if (!unix_skb_scm_eq(skb, &scm)) 2939 break; 2940 } else if (unix_may_passcred(sk)) { 2941 /* Copy credentials */ 2942 scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid); 2943 unix_set_secdata(&scm, skb); 2944 check_creds = true; 2945 } 2946 2947 /* Copy address just once */ 2948 if (state->msg && state->msg->msg_name) { 2949 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, 2950 state->msg->msg_name); 2951 unix_copy_addr(state->msg, skb->sk); 2952 2953 BPF_CGROUP_RUN_PROG_UNIX_RECVMSG_LOCK(sk, 2954 state->msg->msg_name, 2955 &state->msg->msg_namelen); 2956 2957 sunaddr = NULL; 2958 } 2959 2960 chunk = min_t(unsigned int, unix_skb_len(skb) - skip, size); 2961 chunk = state->recv_actor(skb, skip, chunk, state); 2962 if (chunk < 0) { 2963 if (copied == 0) 2964 copied = -EFAULT; 2965 break; 2966 } 2967 copied += chunk; 2968 size -= chunk; 2969 2970 /* Mark read part of skb as used */ 2971 if (!(flags & MSG_PEEK)) { 2972 UNIXCB(skb).consumed += chunk; 2973 2974 sk_peek_offset_bwd(sk, chunk); 2975 2976 if (UNIXCB(skb).fp) { 2977 scm_stat_del(sk, skb); 2978 unix_detach_fds(&scm, skb); 2979 } 2980 2981 if (unix_skb_len(skb)) 2982 break; 2983 2984 skb_unlink(skb, &sk->sk_receive_queue); 2985 consume_skb(skb); 2986 2987 if (scm.fp) 2988 break; 2989 } else { 2990 /* It is questionable, see note in unix_dgram_recvmsg. 2991 */ 2992 if (UNIXCB(skb).fp) 2993 unix_peek_fds(&scm, skb); 2994 2995 sk_peek_offset_fwd(sk, chunk); 2996 2997 if (UNIXCB(skb).fp) 2998 break; 2999 3000 skip = 0; 3001 last = skb; 3002 last_len = skb->len; 3003 unix_state_lock(sk); 3004 skb = skb_peek_next(skb, &sk->sk_receive_queue); 3005 if (skb) 3006 goto again; 3007 unix_state_unlock(sk); 3008 break; 3009 } 3010 } while (size); 3011 3012 mutex_unlock(&u->iolock); 3013 if (state->msg) 3014 scm_recv_unix(sock, state->msg, &scm, flags); 3015 else 3016 scm_destroy(&scm); 3017 out: 3018 return copied ? : err; 3019 } 3020 3021 static int unix_stream_read_actor(struct sk_buff *skb, 3022 int skip, int chunk, 3023 struct unix_stream_read_state *state) 3024 { 3025 int ret; 3026 3027 ret = skb_copy_datagram_msg(skb, UNIXCB(skb).consumed + skip, 3028 state->msg, chunk); 3029 return ret ?: chunk; 3030 } 3031 3032 int __unix_stream_recvmsg(struct sock *sk, struct msghdr *msg, 3033 size_t size, int flags) 3034 { 3035 struct unix_stream_read_state state = { 3036 .recv_actor = unix_stream_read_actor, 3037 .socket = sk->sk_socket, 3038 .msg = msg, 3039 .size = size, 3040 .flags = flags 3041 }; 3042 3043 return unix_stream_read_generic(&state, true); 3044 } 3045 3046 static int unix_stream_recvmsg(struct socket *sock, struct msghdr *msg, 3047 size_t size, int flags) 3048 { 3049 struct unix_stream_read_state state = { 3050 .recv_actor = unix_stream_read_actor, 3051 .socket = sock, 3052 .msg = msg, 3053 .size = size, 3054 .flags = flags 3055 }; 3056 3057 #ifdef CONFIG_BPF_SYSCALL 3058 struct sock *sk = sock->sk; 3059 const struct proto *prot = READ_ONCE(sk->sk_prot); 3060 3061 if (prot != &unix_stream_proto) 3062 return prot->recvmsg(sk, msg, size, flags, NULL); 3063 #endif 3064 return unix_stream_read_generic(&state, true); 3065 } 3066 3067 static int unix_stream_splice_actor(struct sk_buff *skb, 3068 int skip, int chunk, 3069 struct unix_stream_read_state *state) 3070 { 3071 return skb_splice_bits(skb, state->socket->sk, 3072 UNIXCB(skb).consumed + skip, 3073 state->pipe, chunk, state->splice_flags); 3074 } 3075 3076 static ssize_t unix_stream_splice_read(struct socket *sock, loff_t *ppos, 3077 struct pipe_inode_info *pipe, 3078 size_t size, unsigned int flags) 3079 { 3080 struct unix_stream_read_state state = { 3081 .recv_actor = unix_stream_splice_actor, 3082 .socket = sock, 3083 .pipe = pipe, 3084 .size = size, 3085 .splice_flags = flags, 3086 }; 3087 3088 if (unlikely(*ppos)) 3089 return -ESPIPE; 3090 3091 if (sock->file->f_flags & O_NONBLOCK || 3092 flags & SPLICE_F_NONBLOCK) 3093 state.flags = MSG_DONTWAIT; 3094 3095 return unix_stream_read_generic(&state, false); 3096 } 3097 3098 static int unix_shutdown(struct socket *sock, int mode) 3099 { 3100 struct sock *sk = sock->sk; 3101 struct sock *other; 3102 3103 if (mode < SHUT_RD || mode > SHUT_RDWR) 3104 return -EINVAL; 3105 /* This maps: 3106 * SHUT_RD (0) -> RCV_SHUTDOWN (1) 3107 * SHUT_WR (1) -> SEND_SHUTDOWN (2) 3108 * SHUT_RDWR (2) -> SHUTDOWN_MASK (3) 3109 */ 3110 ++mode; 3111 3112 unix_state_lock(sk); 3113 WRITE_ONCE(sk->sk_shutdown, sk->sk_shutdown | mode); 3114 other = unix_peer(sk); 3115 if (other) 3116 sock_hold(other); 3117 unix_state_unlock(sk); 3118 sk->sk_state_change(sk); 3119 3120 if (other && 3121 (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) { 3122 3123 int peer_mode = 0; 3124 const struct proto *prot = READ_ONCE(other->sk_prot); 3125 3126 if (prot->unhash) 3127 prot->unhash(other); 3128 if (mode&RCV_SHUTDOWN) 3129 peer_mode |= SEND_SHUTDOWN; 3130 if (mode&SEND_SHUTDOWN) 3131 peer_mode |= RCV_SHUTDOWN; 3132 unix_state_lock(other); 3133 WRITE_ONCE(other->sk_shutdown, other->sk_shutdown | peer_mode); 3134 unix_state_unlock(other); 3135 other->sk_state_change(other); 3136 if (peer_mode == SHUTDOWN_MASK) 3137 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP); 3138 else if (peer_mode & RCV_SHUTDOWN) 3139 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN); 3140 } 3141 if (other) 3142 sock_put(other); 3143 3144 return 0; 3145 } 3146 3147 long unix_inq_len(struct sock *sk) 3148 { 3149 struct sk_buff *skb; 3150 long amount = 0; 3151 3152 if (READ_ONCE(sk->sk_state) == TCP_LISTEN) 3153 return -EINVAL; 3154 3155 spin_lock(&sk->sk_receive_queue.lock); 3156 if (sk->sk_type == SOCK_STREAM || 3157 sk->sk_type == SOCK_SEQPACKET) { 3158 skb_queue_walk(&sk->sk_receive_queue, skb) 3159 amount += unix_skb_len(skb); 3160 } else { 3161 skb = skb_peek(&sk->sk_receive_queue); 3162 if (skb) 3163 amount = skb->len; 3164 } 3165 spin_unlock(&sk->sk_receive_queue.lock); 3166 3167 return amount; 3168 } 3169 EXPORT_SYMBOL_GPL(unix_inq_len); 3170 3171 long unix_outq_len(struct sock *sk) 3172 { 3173 return sk_wmem_alloc_get(sk); 3174 } 3175 EXPORT_SYMBOL_GPL(unix_outq_len); 3176 3177 static int unix_open_file(struct sock *sk) 3178 { 3179 struct path path; 3180 struct file *f; 3181 int fd; 3182 3183 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) 3184 return -EPERM; 3185 3186 if (!smp_load_acquire(&unix_sk(sk)->addr)) 3187 return -ENOENT; 3188 3189 path = unix_sk(sk)->path; 3190 if (!path.dentry) 3191 return -ENOENT; 3192 3193 path_get(&path); 3194 3195 fd = get_unused_fd_flags(O_CLOEXEC); 3196 if (fd < 0) 3197 goto out; 3198 3199 f = dentry_open(&path, O_PATH, current_cred()); 3200 if (IS_ERR(f)) { 3201 put_unused_fd(fd); 3202 fd = PTR_ERR(f); 3203 goto out; 3204 } 3205 3206 fd_install(fd, f); 3207 out: 3208 path_put(&path); 3209 3210 return fd; 3211 } 3212 3213 static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) 3214 { 3215 struct sock *sk = sock->sk; 3216 long amount = 0; 3217 int err; 3218 3219 switch (cmd) { 3220 case SIOCOUTQ: 3221 amount = unix_outq_len(sk); 3222 err = put_user(amount, (int __user *)arg); 3223 break; 3224 case SIOCINQ: 3225 amount = unix_inq_len(sk); 3226 if (amount < 0) 3227 err = amount; 3228 else 3229 err = put_user(amount, (int __user *)arg); 3230 break; 3231 case SIOCUNIXFILE: 3232 err = unix_open_file(sk); 3233 break; 3234 #if IS_ENABLED(CONFIG_AF_UNIX_OOB) 3235 case SIOCATMARK: 3236 { 3237 struct unix_sock *u = unix_sk(sk); 3238 struct sk_buff *skb; 3239 int answ = 0; 3240 3241 mutex_lock(&u->iolock); 3242 3243 skb = skb_peek(&sk->sk_receive_queue); 3244 if (skb) { 3245 struct sk_buff *oob_skb = READ_ONCE(u->oob_skb); 3246 struct sk_buff *next_skb; 3247 3248 next_skb = skb_peek_next(skb, &sk->sk_receive_queue); 3249 3250 if (skb == oob_skb || 3251 (!unix_skb_len(skb) && 3252 (!oob_skb || next_skb == oob_skb))) 3253 answ = 1; 3254 } 3255 3256 mutex_unlock(&u->iolock); 3257 3258 err = put_user(answ, (int __user *)arg); 3259 } 3260 break; 3261 #endif 3262 default: 3263 err = -ENOIOCTLCMD; 3264 break; 3265 } 3266 return err; 3267 } 3268 3269 #ifdef CONFIG_COMPAT 3270 static int unix_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) 3271 { 3272 return unix_ioctl(sock, cmd, (unsigned long)compat_ptr(arg)); 3273 } 3274 #endif 3275 3276 static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wait) 3277 { 3278 struct sock *sk = sock->sk; 3279 unsigned char state; 3280 __poll_t mask; 3281 u8 shutdown; 3282 3283 sock_poll_wait(file, sock, wait); 3284 mask = 0; 3285 shutdown = READ_ONCE(sk->sk_shutdown); 3286 state = READ_ONCE(sk->sk_state); 3287 3288 /* exceptional events? */ 3289 if (READ_ONCE(sk->sk_err)) 3290 mask |= EPOLLERR; 3291 if (shutdown == SHUTDOWN_MASK) 3292 mask |= EPOLLHUP; 3293 if (shutdown & RCV_SHUTDOWN) 3294 mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM; 3295 3296 /* readable? */ 3297 if (!skb_queue_empty_lockless(&sk->sk_receive_queue)) 3298 mask |= EPOLLIN | EPOLLRDNORM; 3299 if (sk_is_readable(sk)) 3300 mask |= EPOLLIN | EPOLLRDNORM; 3301 #if IS_ENABLED(CONFIG_AF_UNIX_OOB) 3302 if (READ_ONCE(unix_sk(sk)->oob_skb)) 3303 mask |= EPOLLPRI; 3304 #endif 3305 3306 /* Connection-based need to check for termination and startup */ 3307 if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) && 3308 state == TCP_CLOSE) 3309 mask |= EPOLLHUP; 3310 3311 /* 3312 * we set writable also when the other side has shut down the 3313 * connection. This prevents stuck sockets. 3314 */ 3315 if (unix_writable(sk, state)) 3316 mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND; 3317 3318 return mask; 3319 } 3320 3321 static __poll_t unix_dgram_poll(struct file *file, struct socket *sock, 3322 poll_table *wait) 3323 { 3324 struct sock *sk = sock->sk, *other; 3325 unsigned int writable; 3326 unsigned char state; 3327 __poll_t mask; 3328 u8 shutdown; 3329 3330 sock_poll_wait(file, sock, wait); 3331 mask = 0; 3332 shutdown = READ_ONCE(sk->sk_shutdown); 3333 state = READ_ONCE(sk->sk_state); 3334 3335 /* exceptional events? */ 3336 if (READ_ONCE(sk->sk_err) || 3337 !skb_queue_empty_lockless(&sk->sk_error_queue)) 3338 mask |= EPOLLERR | 3339 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0); 3340 3341 if (shutdown & RCV_SHUTDOWN) 3342 mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM; 3343 if (shutdown == SHUTDOWN_MASK) 3344 mask |= EPOLLHUP; 3345 3346 /* readable? */ 3347 if (!skb_queue_empty_lockless(&sk->sk_receive_queue)) 3348 mask |= EPOLLIN | EPOLLRDNORM; 3349 if (sk_is_readable(sk)) 3350 mask |= EPOLLIN | EPOLLRDNORM; 3351 3352 /* Connection-based need to check for termination and startup */ 3353 if (sk->sk_type == SOCK_SEQPACKET && state == TCP_CLOSE) 3354 mask |= EPOLLHUP; 3355 3356 /* No write status requested, avoid expensive OUT tests. */ 3357 if (!(poll_requested_events(wait) & (EPOLLWRBAND|EPOLLWRNORM|EPOLLOUT))) 3358 return mask; 3359 3360 writable = unix_writable(sk, state); 3361 if (writable) { 3362 unix_state_lock(sk); 3363 3364 other = unix_peer(sk); 3365 if (other && unix_peer(other) != sk && 3366 unix_recvq_full_lockless(other) && 3367 unix_dgram_peer_wake_me(sk, other)) 3368 writable = 0; 3369 3370 unix_state_unlock(sk); 3371 } 3372 3373 if (writable) 3374 mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND; 3375 else 3376 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); 3377 3378 return mask; 3379 } 3380 3381 #ifdef CONFIG_PROC_FS 3382 3383 #define BUCKET_SPACE (BITS_PER_LONG - (UNIX_HASH_BITS + 1) - 1) 3384 3385 #define get_bucket(x) ((x) >> BUCKET_SPACE) 3386 #define get_offset(x) ((x) & ((1UL << BUCKET_SPACE) - 1)) 3387 #define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o)) 3388 3389 static struct sock *unix_from_bucket(struct seq_file *seq, loff_t *pos) 3390 { 3391 unsigned long offset = get_offset(*pos); 3392 unsigned long bucket = get_bucket(*pos); 3393 unsigned long count = 0; 3394 struct sock *sk; 3395 3396 for (sk = sk_head(&seq_file_net(seq)->unx.table.buckets[bucket]); 3397 sk; sk = sk_next(sk)) { 3398 if (++count == offset) 3399 break; 3400 } 3401 3402 return sk; 3403 } 3404 3405 static struct sock *unix_get_first(struct seq_file *seq, loff_t *pos) 3406 { 3407 unsigned long bucket = get_bucket(*pos); 3408 struct net *net = seq_file_net(seq); 3409 struct sock *sk; 3410 3411 while (bucket < UNIX_HASH_SIZE) { 3412 spin_lock(&net->unx.table.locks[bucket]); 3413 3414 sk = unix_from_bucket(seq, pos); 3415 if (sk) 3416 return sk; 3417 3418 spin_unlock(&net->unx.table.locks[bucket]); 3419 3420 *pos = set_bucket_offset(++bucket, 1); 3421 } 3422 3423 return NULL; 3424 } 3425 3426 static struct sock *unix_get_next(struct seq_file *seq, struct sock *sk, 3427 loff_t *pos) 3428 { 3429 unsigned long bucket = get_bucket(*pos); 3430 3431 sk = sk_next(sk); 3432 if (sk) 3433 return sk; 3434 3435 3436 spin_unlock(&seq_file_net(seq)->unx.table.locks[bucket]); 3437 3438 *pos = set_bucket_offset(++bucket, 1); 3439 3440 return unix_get_first(seq, pos); 3441 } 3442 3443 static void *unix_seq_start(struct seq_file *seq, loff_t *pos) 3444 { 3445 if (!*pos) 3446 return SEQ_START_TOKEN; 3447 3448 return unix_get_first(seq, pos); 3449 } 3450 3451 static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos) 3452 { 3453 ++*pos; 3454 3455 if (v == SEQ_START_TOKEN) 3456 return unix_get_first(seq, pos); 3457 3458 return unix_get_next(seq, v, pos); 3459 } 3460 3461 static void unix_seq_stop(struct seq_file *seq, void *v) 3462 { 3463 struct sock *sk = v; 3464 3465 if (sk) 3466 spin_unlock(&seq_file_net(seq)->unx.table.locks[sk->sk_hash]); 3467 } 3468 3469 static int unix_seq_show(struct seq_file *seq, void *v) 3470 { 3471 3472 if (v == SEQ_START_TOKEN) 3473 seq_puts(seq, "Num RefCount Protocol Flags Type St " 3474 "Inode Path\n"); 3475 else { 3476 struct sock *s = v; 3477 struct unix_sock *u = unix_sk(s); 3478 unix_state_lock(s); 3479 3480 seq_printf(seq, "%pK: %08X %08X %08X %04X %02X %5lu", 3481 s, 3482 refcount_read(&s->sk_refcnt), 3483 0, 3484 s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0, 3485 s->sk_type, 3486 s->sk_socket ? 3487 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTED : SS_UNCONNECTED) : 3488 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING), 3489 sock_i_ino(s)); 3490 3491 if (u->addr) { // under a hash table lock here 3492 int i, len; 3493 seq_putc(seq, ' '); 3494 3495 i = 0; 3496 len = u->addr->len - 3497 offsetof(struct sockaddr_un, sun_path); 3498 if (u->addr->name->sun_path[0]) { 3499 len--; 3500 } else { 3501 seq_putc(seq, '@'); 3502 i++; 3503 } 3504 for ( ; i < len; i++) 3505 seq_putc(seq, u->addr->name->sun_path[i] ?: 3506 '@'); 3507 } 3508 unix_state_unlock(s); 3509 seq_putc(seq, '\n'); 3510 } 3511 3512 return 0; 3513 } 3514 3515 static const struct seq_operations unix_seq_ops = { 3516 .start = unix_seq_start, 3517 .next = unix_seq_next, 3518 .stop = unix_seq_stop, 3519 .show = unix_seq_show, 3520 }; 3521 3522 #ifdef CONFIG_BPF_SYSCALL 3523 struct bpf_unix_iter_state { 3524 struct seq_net_private p; 3525 unsigned int cur_sk; 3526 unsigned int end_sk; 3527 unsigned int max_sk; 3528 struct sock **batch; 3529 bool st_bucket_done; 3530 }; 3531 3532 struct bpf_iter__unix { 3533 __bpf_md_ptr(struct bpf_iter_meta *, meta); 3534 __bpf_md_ptr(struct unix_sock *, unix_sk); 3535 uid_t uid __aligned(8); 3536 }; 3537 3538 static int unix_prog_seq_show(struct bpf_prog *prog, struct bpf_iter_meta *meta, 3539 struct unix_sock *unix_sk, uid_t uid) 3540 { 3541 struct bpf_iter__unix ctx; 3542 3543 meta->seq_num--; /* skip SEQ_START_TOKEN */ 3544 ctx.meta = meta; 3545 ctx.unix_sk = unix_sk; 3546 ctx.uid = uid; 3547 return bpf_iter_run_prog(prog, &ctx); 3548 } 3549 3550 static int bpf_iter_unix_hold_batch(struct seq_file *seq, struct sock *start_sk) 3551 3552 { 3553 struct bpf_unix_iter_state *iter = seq->private; 3554 unsigned int expected = 1; 3555 struct sock *sk; 3556 3557 sock_hold(start_sk); 3558 iter->batch[iter->end_sk++] = start_sk; 3559 3560 for (sk = sk_next(start_sk); sk; sk = sk_next(sk)) { 3561 if (iter->end_sk < iter->max_sk) { 3562 sock_hold(sk); 3563 iter->batch[iter->end_sk++] = sk; 3564 } 3565 3566 expected++; 3567 } 3568 3569 spin_unlock(&seq_file_net(seq)->unx.table.locks[start_sk->sk_hash]); 3570 3571 return expected; 3572 } 3573 3574 static void bpf_iter_unix_put_batch(struct bpf_unix_iter_state *iter) 3575 { 3576 while (iter->cur_sk < iter->end_sk) 3577 sock_put(iter->batch[iter->cur_sk++]); 3578 } 3579 3580 static int bpf_iter_unix_realloc_batch(struct bpf_unix_iter_state *iter, 3581 unsigned int new_batch_sz) 3582 { 3583 struct sock **new_batch; 3584 3585 new_batch = kvmalloc(sizeof(*new_batch) * new_batch_sz, 3586 GFP_USER | __GFP_NOWARN); 3587 if (!new_batch) 3588 return -ENOMEM; 3589 3590 bpf_iter_unix_put_batch(iter); 3591 kvfree(iter->batch); 3592 iter->batch = new_batch; 3593 iter->max_sk = new_batch_sz; 3594 3595 return 0; 3596 } 3597 3598 static struct sock *bpf_iter_unix_batch(struct seq_file *seq, 3599 loff_t *pos) 3600 { 3601 struct bpf_unix_iter_state *iter = seq->private; 3602 unsigned int expected; 3603 bool resized = false; 3604 struct sock *sk; 3605 3606 if (iter->st_bucket_done) 3607 *pos = set_bucket_offset(get_bucket(*pos) + 1, 1); 3608 3609 again: 3610 /* Get a new batch */ 3611 iter->cur_sk = 0; 3612 iter->end_sk = 0; 3613 3614 sk = unix_get_first(seq, pos); 3615 if (!sk) 3616 return NULL; /* Done */ 3617 3618 expected = bpf_iter_unix_hold_batch(seq, sk); 3619 3620 if (iter->end_sk == expected) { 3621 iter->st_bucket_done = true; 3622 return sk; 3623 } 3624 3625 if (!resized && !bpf_iter_unix_realloc_batch(iter, expected * 3 / 2)) { 3626 resized = true; 3627 goto again; 3628 } 3629 3630 return sk; 3631 } 3632 3633 static void *bpf_iter_unix_seq_start(struct seq_file *seq, loff_t *pos) 3634 { 3635 if (!*pos) 3636 return SEQ_START_TOKEN; 3637 3638 /* bpf iter does not support lseek, so it always 3639 * continue from where it was stop()-ped. 3640 */ 3641 return bpf_iter_unix_batch(seq, pos); 3642 } 3643 3644 static void *bpf_iter_unix_seq_next(struct seq_file *seq, void *v, loff_t *pos) 3645 { 3646 struct bpf_unix_iter_state *iter = seq->private; 3647 struct sock *sk; 3648 3649 /* Whenever seq_next() is called, the iter->cur_sk is 3650 * done with seq_show(), so advance to the next sk in 3651 * the batch. 3652 */ 3653 if (iter->cur_sk < iter->end_sk) 3654 sock_put(iter->batch[iter->cur_sk++]); 3655 3656 ++*pos; 3657 3658 if (iter->cur_sk < iter->end_sk) 3659 sk = iter->batch[iter->cur_sk]; 3660 else 3661 sk = bpf_iter_unix_batch(seq, pos); 3662 3663 return sk; 3664 } 3665 3666 static int bpf_iter_unix_seq_show(struct seq_file *seq, void *v) 3667 { 3668 struct bpf_iter_meta meta; 3669 struct bpf_prog *prog; 3670 struct sock *sk = v; 3671 uid_t uid; 3672 bool slow; 3673 int ret; 3674 3675 if (v == SEQ_START_TOKEN) 3676 return 0; 3677 3678 slow = lock_sock_fast(sk); 3679 3680 if (unlikely(sk_unhashed(sk))) { 3681 ret = SEQ_SKIP; 3682 goto unlock; 3683 } 3684 3685 uid = from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)); 3686 meta.seq = seq; 3687 prog = bpf_iter_get_info(&meta, false); 3688 ret = unix_prog_seq_show(prog, &meta, v, uid); 3689 unlock: 3690 unlock_sock_fast(sk, slow); 3691 return ret; 3692 } 3693 3694 static void bpf_iter_unix_seq_stop(struct seq_file *seq, void *v) 3695 { 3696 struct bpf_unix_iter_state *iter = seq->private; 3697 struct bpf_iter_meta meta; 3698 struct bpf_prog *prog; 3699 3700 if (!v) { 3701 meta.seq = seq; 3702 prog = bpf_iter_get_info(&meta, true); 3703 if (prog) 3704 (void)unix_prog_seq_show(prog, &meta, v, 0); 3705 } 3706 3707 if (iter->cur_sk < iter->end_sk) 3708 bpf_iter_unix_put_batch(iter); 3709 } 3710 3711 static const struct seq_operations bpf_iter_unix_seq_ops = { 3712 .start = bpf_iter_unix_seq_start, 3713 .next = bpf_iter_unix_seq_next, 3714 .stop = bpf_iter_unix_seq_stop, 3715 .show = bpf_iter_unix_seq_show, 3716 }; 3717 #endif 3718 #endif 3719 3720 static const struct net_proto_family unix_family_ops = { 3721 .family = PF_UNIX, 3722 .create = unix_create, 3723 .owner = THIS_MODULE, 3724 }; 3725 3726 3727 static int __net_init unix_net_init(struct net *net) 3728 { 3729 int i; 3730 3731 net->unx.sysctl_max_dgram_qlen = 10; 3732 if (unix_sysctl_register(net)) 3733 goto out; 3734 3735 #ifdef CONFIG_PROC_FS 3736 if (!proc_create_net("unix", 0, net->proc_net, &unix_seq_ops, 3737 sizeof(struct seq_net_private))) 3738 goto err_sysctl; 3739 #endif 3740 3741 net->unx.table.locks = kvmalloc_array(UNIX_HASH_SIZE, 3742 sizeof(spinlock_t), GFP_KERNEL); 3743 if (!net->unx.table.locks) 3744 goto err_proc; 3745 3746 net->unx.table.buckets = kvmalloc_array(UNIX_HASH_SIZE, 3747 sizeof(struct hlist_head), 3748 GFP_KERNEL); 3749 if (!net->unx.table.buckets) 3750 goto free_locks; 3751 3752 for (i = 0; i < UNIX_HASH_SIZE; i++) { 3753 spin_lock_init(&net->unx.table.locks[i]); 3754 lock_set_cmp_fn(&net->unx.table.locks[i], unix_table_lock_cmp_fn, NULL); 3755 INIT_HLIST_HEAD(&net->unx.table.buckets[i]); 3756 } 3757 3758 return 0; 3759 3760 free_locks: 3761 kvfree(net->unx.table.locks); 3762 err_proc: 3763 #ifdef CONFIG_PROC_FS 3764 remove_proc_entry("unix", net->proc_net); 3765 err_sysctl: 3766 #endif 3767 unix_sysctl_unregister(net); 3768 out: 3769 return -ENOMEM; 3770 } 3771 3772 static void __net_exit unix_net_exit(struct net *net) 3773 { 3774 kvfree(net->unx.table.buckets); 3775 kvfree(net->unx.table.locks); 3776 unix_sysctl_unregister(net); 3777 remove_proc_entry("unix", net->proc_net); 3778 } 3779 3780 static struct pernet_operations unix_net_ops = { 3781 .init = unix_net_init, 3782 .exit = unix_net_exit, 3783 }; 3784 3785 #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS) 3786 DEFINE_BPF_ITER_FUNC(unix, struct bpf_iter_meta *meta, 3787 struct unix_sock *unix_sk, uid_t uid) 3788 3789 #define INIT_BATCH_SZ 16 3790 3791 static int bpf_iter_init_unix(void *priv_data, struct bpf_iter_aux_info *aux) 3792 { 3793 struct bpf_unix_iter_state *iter = priv_data; 3794 int err; 3795 3796 err = bpf_iter_init_seq_net(priv_data, aux); 3797 if (err) 3798 return err; 3799 3800 err = bpf_iter_unix_realloc_batch(iter, INIT_BATCH_SZ); 3801 if (err) { 3802 bpf_iter_fini_seq_net(priv_data); 3803 return err; 3804 } 3805 3806 return 0; 3807 } 3808 3809 static void bpf_iter_fini_unix(void *priv_data) 3810 { 3811 struct bpf_unix_iter_state *iter = priv_data; 3812 3813 bpf_iter_fini_seq_net(priv_data); 3814 kvfree(iter->batch); 3815 } 3816 3817 static const struct bpf_iter_seq_info unix_seq_info = { 3818 .seq_ops = &bpf_iter_unix_seq_ops, 3819 .init_seq_private = bpf_iter_init_unix, 3820 .fini_seq_private = bpf_iter_fini_unix, 3821 .seq_priv_size = sizeof(struct bpf_unix_iter_state), 3822 }; 3823 3824 static const struct bpf_func_proto * 3825 bpf_iter_unix_get_func_proto(enum bpf_func_id func_id, 3826 const struct bpf_prog *prog) 3827 { 3828 switch (func_id) { 3829 case BPF_FUNC_setsockopt: 3830 return &bpf_sk_setsockopt_proto; 3831 case BPF_FUNC_getsockopt: 3832 return &bpf_sk_getsockopt_proto; 3833 default: 3834 return NULL; 3835 } 3836 } 3837 3838 static struct bpf_iter_reg unix_reg_info = { 3839 .target = "unix", 3840 .ctx_arg_info_size = 1, 3841 .ctx_arg_info = { 3842 { offsetof(struct bpf_iter__unix, unix_sk), 3843 PTR_TO_BTF_ID_OR_NULL }, 3844 }, 3845 .get_func_proto = bpf_iter_unix_get_func_proto, 3846 .seq_info = &unix_seq_info, 3847 }; 3848 3849 static void __init bpf_iter_register(void) 3850 { 3851 unix_reg_info.ctx_arg_info[0].btf_id = btf_sock_ids[BTF_SOCK_TYPE_UNIX]; 3852 if (bpf_iter_reg_target(&unix_reg_info)) 3853 pr_warn("Warning: could not register bpf iterator unix\n"); 3854 } 3855 #endif 3856 3857 static int __init af_unix_init(void) 3858 { 3859 int i, rc = -1; 3860 3861 BUILD_BUG_ON(sizeof(struct unix_skb_parms) > sizeof_field(struct sk_buff, cb)); 3862 3863 for (i = 0; i < UNIX_HASH_SIZE / 2; i++) { 3864 spin_lock_init(&bsd_socket_locks[i]); 3865 INIT_HLIST_HEAD(&bsd_socket_buckets[i]); 3866 } 3867 3868 rc = proto_register(&unix_dgram_proto, 1); 3869 if (rc != 0) { 3870 pr_crit("%s: Cannot create unix_sock SLAB cache!\n", __func__); 3871 goto out; 3872 } 3873 3874 rc = proto_register(&unix_stream_proto, 1); 3875 if (rc != 0) { 3876 pr_crit("%s: Cannot create unix_sock SLAB cache!\n", __func__); 3877 proto_unregister(&unix_dgram_proto); 3878 goto out; 3879 } 3880 3881 sock_register(&unix_family_ops); 3882 register_pernet_subsys(&unix_net_ops); 3883 unix_bpf_build_proto(); 3884 3885 #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS) 3886 bpf_iter_register(); 3887 #endif 3888 3889 out: 3890 return rc; 3891 } 3892 3893 /* Later than subsys_initcall() because we depend on stuff initialised there */ 3894 fs_initcall(af_unix_init); 3895