1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * NET4: Implementation of BSD Unix domain sockets. 4 * 5 * Authors: Alan Cox, <alan@lxorguk.ukuu.org.uk> 6 * 7 * Fixes: 8 * Linus Torvalds : Assorted bug cures. 9 * Niibe Yutaka : async I/O support. 10 * Carsten Paeth : PF_UNIX check, address fixes. 11 * Alan Cox : Limit size of allocated blocks. 12 * Alan Cox : Fixed the stupid socketpair bug. 13 * Alan Cox : BSD compatibility fine tuning. 14 * Alan Cox : Fixed a bug in connect when interrupted. 15 * Alan Cox : Sorted out a proper draft version of 16 * file descriptor passing hacked up from 17 * Mike Shaver's work. 18 * Marty Leisner : Fixes to fd passing 19 * Nick Nevin : recvmsg bugfix. 20 * Alan Cox : Started proper garbage collector 21 * Heiko EiBfeldt : Missing verify_area check 22 * Alan Cox : Started POSIXisms 23 * Andreas Schwab : Replace inode by dentry for proper 24 * reference counting 25 * Kirk Petersen : Made this a module 26 * Christoph Rohland : Elegant non-blocking accept/connect algorithm. 27 * Lots of bug fixes. 28 * Alexey Kuznetosv : Repaired (I hope) bugs introduces 29 * by above two patches. 30 * Andrea Arcangeli : If possible we block in connect(2) 31 * if the max backlog of the listen socket 32 * is been reached. This won't break 33 * old apps and it will avoid huge amount 34 * of socks hashed (this for unix_gc() 35 * performances reasons). 36 * Security fix that limits the max 37 * number of socks to 2*max_files and 38 * the number of skb queueable in the 39 * dgram receiver. 40 * Artur Skawina : Hash function optimizations 41 * Alexey Kuznetsov : Full scale SMP. Lot of bugs are introduced 8) 42 * Malcolm Beattie : Set peercred for socketpair 43 * Michal Ostrowski : Module initialization cleanup. 44 * Arnaldo C. Melo : Remove MOD_{INC,DEC}_USE_COUNT, 45 * the core infrastructure is doing that 46 * for all net proto families now (2.5.69+) 47 * 48 * Known differences from reference BSD that was tested: 49 * 50 * [TO FIX] 51 * ECONNREFUSED is not returned from one end of a connected() socket to the 52 * other the moment one end closes. 53 * fstat() doesn't return st_dev=0, and give the blksize as high water mark 54 * and a fake inode identifier (nor the BSD first socket fstat twice bug). 55 * [NOT TO FIX] 56 * accept() returns a path name even if the connecting socket has closed 57 * in the meantime (BSD loses the path and gives up). 58 * accept() returns 0 length path for an unbound connector. BSD returns 16 59 * and a null first byte in the path (but not for gethost/peername - BSD bug ??) 60 * socketpair(...SOCK_RAW..) doesn't panic the kernel. 61 * BSD af_unix apparently has connect forgetting to block properly. 62 * (need to check this with the POSIX spec in detail) 63 * 64 * Differences from 2.0.0-11-... (ANK) 65 * Bug fixes and improvements. 66 * - client shutdown killed server socket. 67 * - removed all useless cli/sti pairs. 68 * 69 * Semantic changes/extensions. 70 * - generic control message passing. 71 * - SCM_CREDENTIALS control message. 72 * - "Abstract" (not FS based) socket bindings. 73 * Abstract names are sequences of bytes (not zero terminated) 74 * started by 0, so that this name space does not intersect 75 * with BSD names. 76 */ 77 78 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 79 80 #include <linux/module.h> 81 #include <linux/kernel.h> 82 #include <linux/signal.h> 83 #include <linux/sched/signal.h> 84 #include <linux/errno.h> 85 #include <linux/string.h> 86 #include <linux/stat.h> 87 #include <linux/dcache.h> 88 #include <linux/namei.h> 89 #include <linux/socket.h> 90 #include <linux/un.h> 91 #include <linux/fcntl.h> 92 #include <linux/filter.h> 93 #include <linux/termios.h> 94 #include <linux/sockios.h> 95 #include <linux/net.h> 96 #include <linux/in.h> 97 #include <linux/fs.h> 98 #include <linux/slab.h> 99 #include <linux/uaccess.h> 100 #include <linux/skbuff.h> 101 #include <linux/netdevice.h> 102 #include <net/net_namespace.h> 103 #include <net/sock.h> 104 #include <net/tcp_states.h> 105 #include <net/af_unix.h> 106 #include <linux/proc_fs.h> 107 #include <linux/seq_file.h> 108 #include <net/scm.h> 109 #include <linux/init.h> 110 #include <linux/poll.h> 111 #include <linux/rtnetlink.h> 112 #include <linux/mount.h> 113 #include <net/checksum.h> 114 #include <linux/security.h> 115 #include <linux/splice.h> 116 #include <linux/freezer.h> 117 #include <linux/file.h> 118 #include <linux/btf_ids.h> 119 #include <linux/bpf-cgroup.h> 120 121 static atomic_long_t unix_nr_socks; 122 static struct hlist_head bsd_socket_buckets[UNIX_HASH_SIZE / 2]; 123 static spinlock_t bsd_socket_locks[UNIX_HASH_SIZE / 2]; 124 125 /* SMP locking strategy: 126 * hash table is protected with spinlock. 127 * each socket state is protected by separate spinlock. 128 */ 129 130 static unsigned int unix_unbound_hash(struct sock *sk) 131 { 132 unsigned long hash = (unsigned long)sk; 133 134 hash ^= hash >> 16; 135 hash ^= hash >> 8; 136 hash ^= sk->sk_type; 137 138 return hash & UNIX_HASH_MOD; 139 } 140 141 static unsigned int unix_bsd_hash(struct inode *i) 142 { 143 return i->i_ino & UNIX_HASH_MOD; 144 } 145 146 static unsigned int unix_abstract_hash(struct sockaddr_un *sunaddr, 147 int addr_len, int type) 148 { 149 __wsum csum = csum_partial(sunaddr, addr_len, 0); 150 unsigned int hash; 151 152 hash = (__force unsigned int)csum_fold(csum); 153 hash ^= hash >> 8; 154 hash ^= type; 155 156 return UNIX_HASH_MOD + 1 + (hash & UNIX_HASH_MOD); 157 } 158 159 static void unix_table_double_lock(struct net *net, 160 unsigned int hash1, unsigned int hash2) 161 { 162 if (hash1 == hash2) { 163 spin_lock(&net->unx.table.locks[hash1]); 164 return; 165 } 166 167 if (hash1 > hash2) 168 swap(hash1, hash2); 169 170 spin_lock(&net->unx.table.locks[hash1]); 171 spin_lock_nested(&net->unx.table.locks[hash2], SINGLE_DEPTH_NESTING); 172 } 173 174 static void unix_table_double_unlock(struct net *net, 175 unsigned int hash1, unsigned int hash2) 176 { 177 if (hash1 == hash2) { 178 spin_unlock(&net->unx.table.locks[hash1]); 179 return; 180 } 181 182 spin_unlock(&net->unx.table.locks[hash1]); 183 spin_unlock(&net->unx.table.locks[hash2]); 184 } 185 186 #ifdef CONFIG_SECURITY_NETWORK 187 static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb) 188 { 189 UNIXCB(skb).secid = scm->secid; 190 } 191 192 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb) 193 { 194 scm->secid = UNIXCB(skb).secid; 195 } 196 197 static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb) 198 { 199 return (scm->secid == UNIXCB(skb).secid); 200 } 201 #else 202 static inline void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb) 203 { } 204 205 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb) 206 { } 207 208 static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb) 209 { 210 return true; 211 } 212 #endif /* CONFIG_SECURITY_NETWORK */ 213 214 static inline int unix_our_peer(struct sock *sk, struct sock *osk) 215 { 216 return unix_peer(osk) == sk; 217 } 218 219 static inline int unix_may_send(struct sock *sk, struct sock *osk) 220 { 221 return unix_peer(osk) == NULL || unix_our_peer(sk, osk); 222 } 223 224 static inline int unix_recvq_full(const struct sock *sk) 225 { 226 return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog; 227 } 228 229 static inline int unix_recvq_full_lockless(const struct sock *sk) 230 { 231 return skb_queue_len_lockless(&sk->sk_receive_queue) > 232 READ_ONCE(sk->sk_max_ack_backlog); 233 } 234 235 struct sock *unix_peer_get(struct sock *s) 236 { 237 struct sock *peer; 238 239 unix_state_lock(s); 240 peer = unix_peer(s); 241 if (peer) 242 sock_hold(peer); 243 unix_state_unlock(s); 244 return peer; 245 } 246 EXPORT_SYMBOL_GPL(unix_peer_get); 247 248 static struct unix_address *unix_create_addr(struct sockaddr_un *sunaddr, 249 int addr_len) 250 { 251 struct unix_address *addr; 252 253 addr = kmalloc(sizeof(*addr) + addr_len, GFP_KERNEL); 254 if (!addr) 255 return NULL; 256 257 refcount_set(&addr->refcnt, 1); 258 addr->len = addr_len; 259 memcpy(addr->name, sunaddr, addr_len); 260 261 return addr; 262 } 263 264 static inline void unix_release_addr(struct unix_address *addr) 265 { 266 if (refcount_dec_and_test(&addr->refcnt)) 267 kfree(addr); 268 } 269 270 /* 271 * Check unix socket name: 272 * - should be not zero length. 273 * - if started by not zero, should be NULL terminated (FS object) 274 * - if started by zero, it is abstract name. 275 */ 276 277 static int unix_validate_addr(struct sockaddr_un *sunaddr, int addr_len) 278 { 279 if (addr_len <= offsetof(struct sockaddr_un, sun_path) || 280 addr_len > sizeof(*sunaddr)) 281 return -EINVAL; 282 283 if (sunaddr->sun_family != AF_UNIX) 284 return -EINVAL; 285 286 return 0; 287 } 288 289 static int unix_mkname_bsd(struct sockaddr_un *sunaddr, int addr_len) 290 { 291 struct sockaddr_storage *addr = (struct sockaddr_storage *)sunaddr; 292 short offset = offsetof(struct sockaddr_storage, __data); 293 294 BUILD_BUG_ON(offset != offsetof(struct sockaddr_un, sun_path)); 295 296 /* This may look like an off by one error but it is a bit more 297 * subtle. 108 is the longest valid AF_UNIX path for a binding. 298 * sun_path[108] doesn't as such exist. However in kernel space 299 * we are guaranteed that it is a valid memory location in our 300 * kernel address buffer because syscall functions always pass 301 * a pointer of struct sockaddr_storage which has a bigger buffer 302 * than 108. Also, we must terminate sun_path for strlen() in 303 * getname_kernel(). 304 */ 305 addr->__data[addr_len - offset] = 0; 306 307 /* Don't pass sunaddr->sun_path to strlen(). Otherwise, 108 will 308 * cause panic if CONFIG_FORTIFY_SOURCE=y. Let __fortify_strlen() 309 * know the actual buffer. 310 */ 311 return strlen(addr->__data) + offset + 1; 312 } 313 314 static void __unix_remove_socket(struct sock *sk) 315 { 316 sk_del_node_init(sk); 317 } 318 319 static void __unix_insert_socket(struct net *net, struct sock *sk) 320 { 321 DEBUG_NET_WARN_ON_ONCE(!sk_unhashed(sk)); 322 sk_add_node(sk, &net->unx.table.buckets[sk->sk_hash]); 323 } 324 325 static void __unix_set_addr_hash(struct net *net, struct sock *sk, 326 struct unix_address *addr, unsigned int hash) 327 { 328 __unix_remove_socket(sk); 329 smp_store_release(&unix_sk(sk)->addr, addr); 330 331 sk->sk_hash = hash; 332 __unix_insert_socket(net, sk); 333 } 334 335 static void unix_remove_socket(struct net *net, struct sock *sk) 336 { 337 spin_lock(&net->unx.table.locks[sk->sk_hash]); 338 __unix_remove_socket(sk); 339 spin_unlock(&net->unx.table.locks[sk->sk_hash]); 340 } 341 342 static void unix_insert_unbound_socket(struct net *net, struct sock *sk) 343 { 344 spin_lock(&net->unx.table.locks[sk->sk_hash]); 345 __unix_insert_socket(net, sk); 346 spin_unlock(&net->unx.table.locks[sk->sk_hash]); 347 } 348 349 static void unix_insert_bsd_socket(struct sock *sk) 350 { 351 spin_lock(&bsd_socket_locks[sk->sk_hash]); 352 sk_add_bind_node(sk, &bsd_socket_buckets[sk->sk_hash]); 353 spin_unlock(&bsd_socket_locks[sk->sk_hash]); 354 } 355 356 static void unix_remove_bsd_socket(struct sock *sk) 357 { 358 if (!hlist_unhashed(&sk->sk_bind_node)) { 359 spin_lock(&bsd_socket_locks[sk->sk_hash]); 360 __sk_del_bind_node(sk); 361 spin_unlock(&bsd_socket_locks[sk->sk_hash]); 362 363 sk_node_init(&sk->sk_bind_node); 364 } 365 } 366 367 static struct sock *__unix_find_socket_byname(struct net *net, 368 struct sockaddr_un *sunname, 369 int len, unsigned int hash) 370 { 371 struct sock *s; 372 373 sk_for_each(s, &net->unx.table.buckets[hash]) { 374 struct unix_sock *u = unix_sk(s); 375 376 if (u->addr->len == len && 377 !memcmp(u->addr->name, sunname, len)) 378 return s; 379 } 380 return NULL; 381 } 382 383 static inline struct sock *unix_find_socket_byname(struct net *net, 384 struct sockaddr_un *sunname, 385 int len, unsigned int hash) 386 { 387 struct sock *s; 388 389 spin_lock(&net->unx.table.locks[hash]); 390 s = __unix_find_socket_byname(net, sunname, len, hash); 391 if (s) 392 sock_hold(s); 393 spin_unlock(&net->unx.table.locks[hash]); 394 return s; 395 } 396 397 static struct sock *unix_find_socket_byinode(struct inode *i) 398 { 399 unsigned int hash = unix_bsd_hash(i); 400 struct sock *s; 401 402 spin_lock(&bsd_socket_locks[hash]); 403 sk_for_each_bound(s, &bsd_socket_buckets[hash]) { 404 struct dentry *dentry = unix_sk(s)->path.dentry; 405 406 if (dentry && d_backing_inode(dentry) == i) { 407 sock_hold(s); 408 spin_unlock(&bsd_socket_locks[hash]); 409 return s; 410 } 411 } 412 spin_unlock(&bsd_socket_locks[hash]); 413 return NULL; 414 } 415 416 /* Support code for asymmetrically connected dgram sockets 417 * 418 * If a datagram socket is connected to a socket not itself connected 419 * to the first socket (eg, /dev/log), clients may only enqueue more 420 * messages if the present receive queue of the server socket is not 421 * "too large". This means there's a second writeability condition 422 * poll and sendmsg need to test. The dgram recv code will do a wake 423 * up on the peer_wait wait queue of a socket upon reception of a 424 * datagram which needs to be propagated to sleeping would-be writers 425 * since these might not have sent anything so far. This can't be 426 * accomplished via poll_wait because the lifetime of the server 427 * socket might be less than that of its clients if these break their 428 * association with it or if the server socket is closed while clients 429 * are still connected to it and there's no way to inform "a polling 430 * implementation" that it should let go of a certain wait queue 431 * 432 * In order to propagate a wake up, a wait_queue_entry_t of the client 433 * socket is enqueued on the peer_wait queue of the server socket 434 * whose wake function does a wake_up on the ordinary client socket 435 * wait queue. This connection is established whenever a write (or 436 * poll for write) hit the flow control condition and broken when the 437 * association to the server socket is dissolved or after a wake up 438 * was relayed. 439 */ 440 441 static int unix_dgram_peer_wake_relay(wait_queue_entry_t *q, unsigned mode, int flags, 442 void *key) 443 { 444 struct unix_sock *u; 445 wait_queue_head_t *u_sleep; 446 447 u = container_of(q, struct unix_sock, peer_wake); 448 449 __remove_wait_queue(&unix_sk(u->peer_wake.private)->peer_wait, 450 q); 451 u->peer_wake.private = NULL; 452 453 /* relaying can only happen while the wq still exists */ 454 u_sleep = sk_sleep(&u->sk); 455 if (u_sleep) 456 wake_up_interruptible_poll(u_sleep, key_to_poll(key)); 457 458 return 0; 459 } 460 461 static int unix_dgram_peer_wake_connect(struct sock *sk, struct sock *other) 462 { 463 struct unix_sock *u, *u_other; 464 int rc; 465 466 u = unix_sk(sk); 467 u_other = unix_sk(other); 468 rc = 0; 469 spin_lock(&u_other->peer_wait.lock); 470 471 if (!u->peer_wake.private) { 472 u->peer_wake.private = other; 473 __add_wait_queue(&u_other->peer_wait, &u->peer_wake); 474 475 rc = 1; 476 } 477 478 spin_unlock(&u_other->peer_wait.lock); 479 return rc; 480 } 481 482 static void unix_dgram_peer_wake_disconnect(struct sock *sk, 483 struct sock *other) 484 { 485 struct unix_sock *u, *u_other; 486 487 u = unix_sk(sk); 488 u_other = unix_sk(other); 489 spin_lock(&u_other->peer_wait.lock); 490 491 if (u->peer_wake.private == other) { 492 __remove_wait_queue(&u_other->peer_wait, &u->peer_wake); 493 u->peer_wake.private = NULL; 494 } 495 496 spin_unlock(&u_other->peer_wait.lock); 497 } 498 499 static void unix_dgram_peer_wake_disconnect_wakeup(struct sock *sk, 500 struct sock *other) 501 { 502 unix_dgram_peer_wake_disconnect(sk, other); 503 wake_up_interruptible_poll(sk_sleep(sk), 504 EPOLLOUT | 505 EPOLLWRNORM | 506 EPOLLWRBAND); 507 } 508 509 /* preconditions: 510 * - unix_peer(sk) == other 511 * - association is stable 512 */ 513 static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other) 514 { 515 int connected; 516 517 connected = unix_dgram_peer_wake_connect(sk, other); 518 519 /* If other is SOCK_DEAD, we want to make sure we signal 520 * POLLOUT, such that a subsequent write() can get a 521 * -ECONNREFUSED. Otherwise, if we haven't queued any skbs 522 * to other and its full, we will hang waiting for POLLOUT. 523 */ 524 if (unix_recvq_full_lockless(other) && !sock_flag(other, SOCK_DEAD)) 525 return 1; 526 527 if (connected) 528 unix_dgram_peer_wake_disconnect(sk, other); 529 530 return 0; 531 } 532 533 static int unix_writable(const struct sock *sk) 534 { 535 return sk->sk_state != TCP_LISTEN && 536 (refcount_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf; 537 } 538 539 static void unix_write_space(struct sock *sk) 540 { 541 struct socket_wq *wq; 542 543 rcu_read_lock(); 544 if (unix_writable(sk)) { 545 wq = rcu_dereference(sk->sk_wq); 546 if (skwq_has_sleeper(wq)) 547 wake_up_interruptible_sync_poll(&wq->wait, 548 EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND); 549 sk_wake_async_rcu(sk, SOCK_WAKE_SPACE, POLL_OUT); 550 } 551 rcu_read_unlock(); 552 } 553 554 /* When dgram socket disconnects (or changes its peer), we clear its receive 555 * queue of packets arrived from previous peer. First, it allows to do 556 * flow control based only on wmem_alloc; second, sk connected to peer 557 * may receive messages only from that peer. */ 558 static void unix_dgram_disconnected(struct sock *sk, struct sock *other) 559 { 560 if (!skb_queue_empty(&sk->sk_receive_queue)) { 561 skb_queue_purge(&sk->sk_receive_queue); 562 wake_up_interruptible_all(&unix_sk(sk)->peer_wait); 563 564 /* If one link of bidirectional dgram pipe is disconnected, 565 * we signal error. Messages are lost. Do not make this, 566 * when peer was not connected to us. 567 */ 568 if (!sock_flag(other, SOCK_DEAD) && unix_peer(other) == sk) { 569 WRITE_ONCE(other->sk_err, ECONNRESET); 570 sk_error_report(other); 571 } 572 } 573 other->sk_state = TCP_CLOSE; 574 } 575 576 static void unix_sock_destructor(struct sock *sk) 577 { 578 struct unix_sock *u = unix_sk(sk); 579 580 skb_queue_purge(&sk->sk_receive_queue); 581 582 DEBUG_NET_WARN_ON_ONCE(refcount_read(&sk->sk_wmem_alloc)); 583 DEBUG_NET_WARN_ON_ONCE(!sk_unhashed(sk)); 584 DEBUG_NET_WARN_ON_ONCE(sk->sk_socket); 585 if (!sock_flag(sk, SOCK_DEAD)) { 586 pr_info("Attempt to release alive unix socket: %p\n", sk); 587 return; 588 } 589 590 if (u->addr) 591 unix_release_addr(u->addr); 592 593 atomic_long_dec(&unix_nr_socks); 594 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); 595 #ifdef UNIX_REFCNT_DEBUG 596 pr_debug("UNIX %p is destroyed, %ld are still alive.\n", sk, 597 atomic_long_read(&unix_nr_socks)); 598 #endif 599 } 600 601 static void unix_release_sock(struct sock *sk, int embrion) 602 { 603 struct unix_sock *u = unix_sk(sk); 604 struct sock *skpair; 605 struct sk_buff *skb; 606 struct path path; 607 int state; 608 609 unix_remove_socket(sock_net(sk), sk); 610 unix_remove_bsd_socket(sk); 611 612 /* Clear state */ 613 unix_state_lock(sk); 614 sock_orphan(sk); 615 WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK); 616 path = u->path; 617 u->path.dentry = NULL; 618 u->path.mnt = NULL; 619 state = sk->sk_state; 620 sk->sk_state = TCP_CLOSE; 621 622 skpair = unix_peer(sk); 623 unix_peer(sk) = NULL; 624 625 unix_state_unlock(sk); 626 627 #if IS_ENABLED(CONFIG_AF_UNIX_OOB) 628 if (u->oob_skb) { 629 kfree_skb(u->oob_skb); 630 u->oob_skb = NULL; 631 } 632 #endif 633 634 wake_up_interruptible_all(&u->peer_wait); 635 636 if (skpair != NULL) { 637 if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) { 638 unix_state_lock(skpair); 639 /* No more writes */ 640 WRITE_ONCE(skpair->sk_shutdown, SHUTDOWN_MASK); 641 if (!skb_queue_empty(&sk->sk_receive_queue) || embrion) 642 WRITE_ONCE(skpair->sk_err, ECONNRESET); 643 unix_state_unlock(skpair); 644 skpair->sk_state_change(skpair); 645 sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP); 646 } 647 648 unix_dgram_peer_wake_disconnect(sk, skpair); 649 sock_put(skpair); /* It may now die */ 650 } 651 652 /* Try to flush out this socket. Throw out buffers at least */ 653 654 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) { 655 if (state == TCP_LISTEN) 656 unix_release_sock(skb->sk, 1); 657 /* passed fds are erased in the kfree_skb hook */ 658 UNIXCB(skb).consumed = skb->len; 659 kfree_skb(skb); 660 } 661 662 if (path.dentry) 663 path_put(&path); 664 665 sock_put(sk); 666 667 /* ---- Socket is dead now and most probably destroyed ---- */ 668 669 /* 670 * Fixme: BSD difference: In BSD all sockets connected to us get 671 * ECONNRESET and we die on the spot. In Linux we behave 672 * like files and pipes do and wait for the last 673 * dereference. 674 * 675 * Can't we simply set sock->err? 676 * 677 * What the above comment does talk about? --ANK(980817) 678 */ 679 680 if (READ_ONCE(unix_tot_inflight)) 681 unix_gc(); /* Garbage collect fds */ 682 } 683 684 static void init_peercred(struct sock *sk) 685 { 686 const struct cred *old_cred; 687 struct pid *old_pid; 688 689 spin_lock(&sk->sk_peer_lock); 690 old_pid = sk->sk_peer_pid; 691 old_cred = sk->sk_peer_cred; 692 sk->sk_peer_pid = get_pid(task_tgid(current)); 693 sk->sk_peer_cred = get_current_cred(); 694 spin_unlock(&sk->sk_peer_lock); 695 696 put_pid(old_pid); 697 put_cred(old_cred); 698 } 699 700 static void copy_peercred(struct sock *sk, struct sock *peersk) 701 { 702 const struct cred *old_cred; 703 struct pid *old_pid; 704 705 if (sk < peersk) { 706 spin_lock(&sk->sk_peer_lock); 707 spin_lock_nested(&peersk->sk_peer_lock, SINGLE_DEPTH_NESTING); 708 } else { 709 spin_lock(&peersk->sk_peer_lock); 710 spin_lock_nested(&sk->sk_peer_lock, SINGLE_DEPTH_NESTING); 711 } 712 old_pid = sk->sk_peer_pid; 713 old_cred = sk->sk_peer_cred; 714 sk->sk_peer_pid = get_pid(peersk->sk_peer_pid); 715 sk->sk_peer_cred = get_cred(peersk->sk_peer_cred); 716 717 spin_unlock(&sk->sk_peer_lock); 718 spin_unlock(&peersk->sk_peer_lock); 719 720 put_pid(old_pid); 721 put_cred(old_cred); 722 } 723 724 static int unix_listen(struct socket *sock, int backlog) 725 { 726 int err; 727 struct sock *sk = sock->sk; 728 struct unix_sock *u = unix_sk(sk); 729 730 err = -EOPNOTSUPP; 731 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET) 732 goto out; /* Only stream/seqpacket sockets accept */ 733 err = -EINVAL; 734 if (!u->addr) 735 goto out; /* No listens on an unbound socket */ 736 unix_state_lock(sk); 737 if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN) 738 goto out_unlock; 739 if (backlog > sk->sk_max_ack_backlog) 740 wake_up_interruptible_all(&u->peer_wait); 741 sk->sk_max_ack_backlog = backlog; 742 sk->sk_state = TCP_LISTEN; 743 /* set credentials so connect can copy them */ 744 init_peercred(sk); 745 err = 0; 746 747 out_unlock: 748 unix_state_unlock(sk); 749 out: 750 return err; 751 } 752 753 static int unix_release(struct socket *); 754 static int unix_bind(struct socket *, struct sockaddr *, int); 755 static int unix_stream_connect(struct socket *, struct sockaddr *, 756 int addr_len, int flags); 757 static int unix_socketpair(struct socket *, struct socket *); 758 static int unix_accept(struct socket *, struct socket *, int, bool); 759 static int unix_getname(struct socket *, struct sockaddr *, int); 760 static __poll_t unix_poll(struct file *, struct socket *, poll_table *); 761 static __poll_t unix_dgram_poll(struct file *, struct socket *, 762 poll_table *); 763 static int unix_ioctl(struct socket *, unsigned int, unsigned long); 764 #ifdef CONFIG_COMPAT 765 static int unix_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg); 766 #endif 767 static int unix_shutdown(struct socket *, int); 768 static int unix_stream_sendmsg(struct socket *, struct msghdr *, size_t); 769 static int unix_stream_recvmsg(struct socket *, struct msghdr *, size_t, int); 770 static ssize_t unix_stream_splice_read(struct socket *, loff_t *ppos, 771 struct pipe_inode_info *, size_t size, 772 unsigned int flags); 773 static int unix_dgram_sendmsg(struct socket *, struct msghdr *, size_t); 774 static int unix_dgram_recvmsg(struct socket *, struct msghdr *, size_t, int); 775 static int unix_read_skb(struct sock *sk, skb_read_actor_t recv_actor); 776 static int unix_stream_read_skb(struct sock *sk, skb_read_actor_t recv_actor); 777 static int unix_dgram_connect(struct socket *, struct sockaddr *, 778 int, int); 779 static int unix_seqpacket_sendmsg(struct socket *, struct msghdr *, size_t); 780 static int unix_seqpacket_recvmsg(struct socket *, struct msghdr *, size_t, 781 int); 782 783 #ifdef CONFIG_PROC_FS 784 static int unix_count_nr_fds(struct sock *sk) 785 { 786 struct sk_buff *skb; 787 struct unix_sock *u; 788 int nr_fds = 0; 789 790 spin_lock(&sk->sk_receive_queue.lock); 791 skb = skb_peek(&sk->sk_receive_queue); 792 while (skb) { 793 u = unix_sk(skb->sk); 794 nr_fds += atomic_read(&u->scm_stat.nr_fds); 795 skb = skb_peek_next(skb, &sk->sk_receive_queue); 796 } 797 spin_unlock(&sk->sk_receive_queue.lock); 798 799 return nr_fds; 800 } 801 802 static void unix_show_fdinfo(struct seq_file *m, struct socket *sock) 803 { 804 struct sock *sk = sock->sk; 805 unsigned char s_state; 806 struct unix_sock *u; 807 int nr_fds = 0; 808 809 if (sk) { 810 s_state = READ_ONCE(sk->sk_state); 811 u = unix_sk(sk); 812 813 /* SOCK_STREAM and SOCK_SEQPACKET sockets never change their 814 * sk_state after switching to TCP_ESTABLISHED or TCP_LISTEN. 815 * SOCK_DGRAM is ordinary. So, no lock is needed. 816 */ 817 if (sock->type == SOCK_DGRAM || s_state == TCP_ESTABLISHED) 818 nr_fds = atomic_read(&u->scm_stat.nr_fds); 819 else if (s_state == TCP_LISTEN) 820 nr_fds = unix_count_nr_fds(sk); 821 822 seq_printf(m, "scm_fds: %u\n", nr_fds); 823 } 824 } 825 #else 826 #define unix_show_fdinfo NULL 827 #endif 828 829 static const struct proto_ops unix_stream_ops = { 830 .family = PF_UNIX, 831 .owner = THIS_MODULE, 832 .release = unix_release, 833 .bind = unix_bind, 834 .connect = unix_stream_connect, 835 .socketpair = unix_socketpair, 836 .accept = unix_accept, 837 .getname = unix_getname, 838 .poll = unix_poll, 839 .ioctl = unix_ioctl, 840 #ifdef CONFIG_COMPAT 841 .compat_ioctl = unix_compat_ioctl, 842 #endif 843 .listen = unix_listen, 844 .shutdown = unix_shutdown, 845 .sendmsg = unix_stream_sendmsg, 846 .recvmsg = unix_stream_recvmsg, 847 .read_skb = unix_stream_read_skb, 848 .mmap = sock_no_mmap, 849 .splice_read = unix_stream_splice_read, 850 .set_peek_off = sk_set_peek_off, 851 .show_fdinfo = unix_show_fdinfo, 852 }; 853 854 static const struct proto_ops unix_dgram_ops = { 855 .family = PF_UNIX, 856 .owner = THIS_MODULE, 857 .release = unix_release, 858 .bind = unix_bind, 859 .connect = unix_dgram_connect, 860 .socketpair = unix_socketpair, 861 .accept = sock_no_accept, 862 .getname = unix_getname, 863 .poll = unix_dgram_poll, 864 .ioctl = unix_ioctl, 865 #ifdef CONFIG_COMPAT 866 .compat_ioctl = unix_compat_ioctl, 867 #endif 868 .listen = sock_no_listen, 869 .shutdown = unix_shutdown, 870 .sendmsg = unix_dgram_sendmsg, 871 .read_skb = unix_read_skb, 872 .recvmsg = unix_dgram_recvmsg, 873 .mmap = sock_no_mmap, 874 .set_peek_off = sk_set_peek_off, 875 .show_fdinfo = unix_show_fdinfo, 876 }; 877 878 static const struct proto_ops unix_seqpacket_ops = { 879 .family = PF_UNIX, 880 .owner = THIS_MODULE, 881 .release = unix_release, 882 .bind = unix_bind, 883 .connect = unix_stream_connect, 884 .socketpair = unix_socketpair, 885 .accept = unix_accept, 886 .getname = unix_getname, 887 .poll = unix_dgram_poll, 888 .ioctl = unix_ioctl, 889 #ifdef CONFIG_COMPAT 890 .compat_ioctl = unix_compat_ioctl, 891 #endif 892 .listen = unix_listen, 893 .shutdown = unix_shutdown, 894 .sendmsg = unix_seqpacket_sendmsg, 895 .recvmsg = unix_seqpacket_recvmsg, 896 .mmap = sock_no_mmap, 897 .set_peek_off = sk_set_peek_off, 898 .show_fdinfo = unix_show_fdinfo, 899 }; 900 901 static void unix_close(struct sock *sk, long timeout) 902 { 903 /* Nothing to do here, unix socket does not need a ->close(). 904 * This is merely for sockmap. 905 */ 906 } 907 908 static void unix_unhash(struct sock *sk) 909 { 910 /* Nothing to do here, unix socket does not need a ->unhash(). 911 * This is merely for sockmap. 912 */ 913 } 914 915 static bool unix_bpf_bypass_getsockopt(int level, int optname) 916 { 917 if (level == SOL_SOCKET) { 918 switch (optname) { 919 case SO_PEERPIDFD: 920 return true; 921 default: 922 return false; 923 } 924 } 925 926 return false; 927 } 928 929 struct proto unix_dgram_proto = { 930 .name = "UNIX", 931 .owner = THIS_MODULE, 932 .obj_size = sizeof(struct unix_sock), 933 .close = unix_close, 934 .bpf_bypass_getsockopt = unix_bpf_bypass_getsockopt, 935 #ifdef CONFIG_BPF_SYSCALL 936 .psock_update_sk_prot = unix_dgram_bpf_update_proto, 937 #endif 938 }; 939 940 struct proto unix_stream_proto = { 941 .name = "UNIX-STREAM", 942 .owner = THIS_MODULE, 943 .obj_size = sizeof(struct unix_sock), 944 .close = unix_close, 945 .unhash = unix_unhash, 946 .bpf_bypass_getsockopt = unix_bpf_bypass_getsockopt, 947 #ifdef CONFIG_BPF_SYSCALL 948 .psock_update_sk_prot = unix_stream_bpf_update_proto, 949 #endif 950 }; 951 952 static struct sock *unix_create1(struct net *net, struct socket *sock, int kern, int type) 953 { 954 struct unix_sock *u; 955 struct sock *sk; 956 int err; 957 958 atomic_long_inc(&unix_nr_socks); 959 if (atomic_long_read(&unix_nr_socks) > 2 * get_max_files()) { 960 err = -ENFILE; 961 goto err; 962 } 963 964 if (type == SOCK_STREAM) 965 sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_stream_proto, kern); 966 else /*dgram and seqpacket */ 967 sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_dgram_proto, kern); 968 969 if (!sk) { 970 err = -ENOMEM; 971 goto err; 972 } 973 974 sock_init_data(sock, sk); 975 976 sk->sk_hash = unix_unbound_hash(sk); 977 sk->sk_allocation = GFP_KERNEL_ACCOUNT; 978 sk->sk_write_space = unix_write_space; 979 sk->sk_max_ack_backlog = net->unx.sysctl_max_dgram_qlen; 980 sk->sk_destruct = unix_sock_destructor; 981 u = unix_sk(sk); 982 u->listener = NULL; 983 u->vertex = NULL; 984 u->path.dentry = NULL; 985 u->path.mnt = NULL; 986 spin_lock_init(&u->lock); 987 mutex_init(&u->iolock); /* single task reading lock */ 988 mutex_init(&u->bindlock); /* single task binding lock */ 989 init_waitqueue_head(&u->peer_wait); 990 init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay); 991 memset(&u->scm_stat, 0, sizeof(struct scm_stat)); 992 unix_insert_unbound_socket(net, sk); 993 994 sock_prot_inuse_add(net, sk->sk_prot, 1); 995 996 return sk; 997 998 err: 999 atomic_long_dec(&unix_nr_socks); 1000 return ERR_PTR(err); 1001 } 1002 1003 static int unix_create(struct net *net, struct socket *sock, int protocol, 1004 int kern) 1005 { 1006 struct sock *sk; 1007 1008 if (protocol && protocol != PF_UNIX) 1009 return -EPROTONOSUPPORT; 1010 1011 sock->state = SS_UNCONNECTED; 1012 1013 switch (sock->type) { 1014 case SOCK_STREAM: 1015 sock->ops = &unix_stream_ops; 1016 break; 1017 /* 1018 * Believe it or not BSD has AF_UNIX, SOCK_RAW though 1019 * nothing uses it. 1020 */ 1021 case SOCK_RAW: 1022 sock->type = SOCK_DGRAM; 1023 fallthrough; 1024 case SOCK_DGRAM: 1025 sock->ops = &unix_dgram_ops; 1026 break; 1027 case SOCK_SEQPACKET: 1028 sock->ops = &unix_seqpacket_ops; 1029 break; 1030 default: 1031 return -ESOCKTNOSUPPORT; 1032 } 1033 1034 sk = unix_create1(net, sock, kern, sock->type); 1035 if (IS_ERR(sk)) 1036 return PTR_ERR(sk); 1037 1038 return 0; 1039 } 1040 1041 static int unix_release(struct socket *sock) 1042 { 1043 struct sock *sk = sock->sk; 1044 1045 if (!sk) 1046 return 0; 1047 1048 sk->sk_prot->close(sk, 0); 1049 unix_release_sock(sk, 0); 1050 sock->sk = NULL; 1051 1052 return 0; 1053 } 1054 1055 static struct sock *unix_find_bsd(struct sockaddr_un *sunaddr, int addr_len, 1056 int type) 1057 { 1058 struct inode *inode; 1059 struct path path; 1060 struct sock *sk; 1061 int err; 1062 1063 unix_mkname_bsd(sunaddr, addr_len); 1064 err = kern_path(sunaddr->sun_path, LOOKUP_FOLLOW, &path); 1065 if (err) 1066 goto fail; 1067 1068 err = path_permission(&path, MAY_WRITE); 1069 if (err) 1070 goto path_put; 1071 1072 err = -ECONNREFUSED; 1073 inode = d_backing_inode(path.dentry); 1074 if (!S_ISSOCK(inode->i_mode)) 1075 goto path_put; 1076 1077 sk = unix_find_socket_byinode(inode); 1078 if (!sk) 1079 goto path_put; 1080 1081 err = -EPROTOTYPE; 1082 if (sk->sk_type == type) 1083 touch_atime(&path); 1084 else 1085 goto sock_put; 1086 1087 path_put(&path); 1088 1089 return sk; 1090 1091 sock_put: 1092 sock_put(sk); 1093 path_put: 1094 path_put(&path); 1095 fail: 1096 return ERR_PTR(err); 1097 } 1098 1099 static struct sock *unix_find_abstract(struct net *net, 1100 struct sockaddr_un *sunaddr, 1101 int addr_len, int type) 1102 { 1103 unsigned int hash = unix_abstract_hash(sunaddr, addr_len, type); 1104 struct dentry *dentry; 1105 struct sock *sk; 1106 1107 sk = unix_find_socket_byname(net, sunaddr, addr_len, hash); 1108 if (!sk) 1109 return ERR_PTR(-ECONNREFUSED); 1110 1111 dentry = unix_sk(sk)->path.dentry; 1112 if (dentry) 1113 touch_atime(&unix_sk(sk)->path); 1114 1115 return sk; 1116 } 1117 1118 static struct sock *unix_find_other(struct net *net, 1119 struct sockaddr_un *sunaddr, 1120 int addr_len, int type) 1121 { 1122 struct sock *sk; 1123 1124 if (sunaddr->sun_path[0]) 1125 sk = unix_find_bsd(sunaddr, addr_len, type); 1126 else 1127 sk = unix_find_abstract(net, sunaddr, addr_len, type); 1128 1129 return sk; 1130 } 1131 1132 static int unix_autobind(struct sock *sk) 1133 { 1134 unsigned int new_hash, old_hash = sk->sk_hash; 1135 struct unix_sock *u = unix_sk(sk); 1136 struct net *net = sock_net(sk); 1137 struct unix_address *addr; 1138 u32 lastnum, ordernum; 1139 int err; 1140 1141 err = mutex_lock_interruptible(&u->bindlock); 1142 if (err) 1143 return err; 1144 1145 if (u->addr) 1146 goto out; 1147 1148 err = -ENOMEM; 1149 addr = kzalloc(sizeof(*addr) + 1150 offsetof(struct sockaddr_un, sun_path) + 16, GFP_KERNEL); 1151 if (!addr) 1152 goto out; 1153 1154 addr->len = offsetof(struct sockaddr_un, sun_path) + 6; 1155 addr->name->sun_family = AF_UNIX; 1156 refcount_set(&addr->refcnt, 1); 1157 1158 ordernum = get_random_u32(); 1159 lastnum = ordernum & 0xFFFFF; 1160 retry: 1161 ordernum = (ordernum + 1) & 0xFFFFF; 1162 sprintf(addr->name->sun_path + 1, "%05x", ordernum); 1163 1164 new_hash = unix_abstract_hash(addr->name, addr->len, sk->sk_type); 1165 unix_table_double_lock(net, old_hash, new_hash); 1166 1167 if (__unix_find_socket_byname(net, addr->name, addr->len, new_hash)) { 1168 unix_table_double_unlock(net, old_hash, new_hash); 1169 1170 /* __unix_find_socket_byname() may take long time if many names 1171 * are already in use. 1172 */ 1173 cond_resched(); 1174 1175 if (ordernum == lastnum) { 1176 /* Give up if all names seems to be in use. */ 1177 err = -ENOSPC; 1178 unix_release_addr(addr); 1179 goto out; 1180 } 1181 1182 goto retry; 1183 } 1184 1185 __unix_set_addr_hash(net, sk, addr, new_hash); 1186 unix_table_double_unlock(net, old_hash, new_hash); 1187 err = 0; 1188 1189 out: mutex_unlock(&u->bindlock); 1190 return err; 1191 } 1192 1193 static int unix_bind_bsd(struct sock *sk, struct sockaddr_un *sunaddr, 1194 int addr_len) 1195 { 1196 umode_t mode = S_IFSOCK | 1197 (SOCK_INODE(sk->sk_socket)->i_mode & ~current_umask()); 1198 unsigned int new_hash, old_hash = sk->sk_hash; 1199 struct unix_sock *u = unix_sk(sk); 1200 struct net *net = sock_net(sk); 1201 struct mnt_idmap *idmap; 1202 struct unix_address *addr; 1203 struct dentry *dentry; 1204 struct path parent; 1205 int err; 1206 1207 addr_len = unix_mkname_bsd(sunaddr, addr_len); 1208 addr = unix_create_addr(sunaddr, addr_len); 1209 if (!addr) 1210 return -ENOMEM; 1211 1212 /* 1213 * Get the parent directory, calculate the hash for last 1214 * component. 1215 */ 1216 dentry = kern_path_create(AT_FDCWD, addr->name->sun_path, &parent, 0); 1217 if (IS_ERR(dentry)) { 1218 err = PTR_ERR(dentry); 1219 goto out; 1220 } 1221 1222 /* 1223 * All right, let's create it. 1224 */ 1225 idmap = mnt_idmap(parent.mnt); 1226 err = security_path_mknod(&parent, dentry, mode, 0); 1227 if (!err) 1228 err = vfs_mknod(idmap, d_inode(parent.dentry), dentry, mode, 0); 1229 if (err) 1230 goto out_path; 1231 err = mutex_lock_interruptible(&u->bindlock); 1232 if (err) 1233 goto out_unlink; 1234 if (u->addr) 1235 goto out_unlock; 1236 1237 new_hash = unix_bsd_hash(d_backing_inode(dentry)); 1238 unix_table_double_lock(net, old_hash, new_hash); 1239 u->path.mnt = mntget(parent.mnt); 1240 u->path.dentry = dget(dentry); 1241 __unix_set_addr_hash(net, sk, addr, new_hash); 1242 unix_table_double_unlock(net, old_hash, new_hash); 1243 unix_insert_bsd_socket(sk); 1244 mutex_unlock(&u->bindlock); 1245 done_path_create(&parent, dentry); 1246 return 0; 1247 1248 out_unlock: 1249 mutex_unlock(&u->bindlock); 1250 err = -EINVAL; 1251 out_unlink: 1252 /* failed after successful mknod? unlink what we'd created... */ 1253 vfs_unlink(idmap, d_inode(parent.dentry), dentry, NULL); 1254 out_path: 1255 done_path_create(&parent, dentry); 1256 out: 1257 unix_release_addr(addr); 1258 return err == -EEXIST ? -EADDRINUSE : err; 1259 } 1260 1261 static int unix_bind_abstract(struct sock *sk, struct sockaddr_un *sunaddr, 1262 int addr_len) 1263 { 1264 unsigned int new_hash, old_hash = sk->sk_hash; 1265 struct unix_sock *u = unix_sk(sk); 1266 struct net *net = sock_net(sk); 1267 struct unix_address *addr; 1268 int err; 1269 1270 addr = unix_create_addr(sunaddr, addr_len); 1271 if (!addr) 1272 return -ENOMEM; 1273 1274 err = mutex_lock_interruptible(&u->bindlock); 1275 if (err) 1276 goto out; 1277 1278 if (u->addr) { 1279 err = -EINVAL; 1280 goto out_mutex; 1281 } 1282 1283 new_hash = unix_abstract_hash(addr->name, addr->len, sk->sk_type); 1284 unix_table_double_lock(net, old_hash, new_hash); 1285 1286 if (__unix_find_socket_byname(net, addr->name, addr->len, new_hash)) 1287 goto out_spin; 1288 1289 __unix_set_addr_hash(net, sk, addr, new_hash); 1290 unix_table_double_unlock(net, old_hash, new_hash); 1291 mutex_unlock(&u->bindlock); 1292 return 0; 1293 1294 out_spin: 1295 unix_table_double_unlock(net, old_hash, new_hash); 1296 err = -EADDRINUSE; 1297 out_mutex: 1298 mutex_unlock(&u->bindlock); 1299 out: 1300 unix_release_addr(addr); 1301 return err; 1302 } 1303 1304 static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) 1305 { 1306 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr; 1307 struct sock *sk = sock->sk; 1308 int err; 1309 1310 if (addr_len == offsetof(struct sockaddr_un, sun_path) && 1311 sunaddr->sun_family == AF_UNIX) 1312 return unix_autobind(sk); 1313 1314 err = unix_validate_addr(sunaddr, addr_len); 1315 if (err) 1316 return err; 1317 1318 if (sunaddr->sun_path[0]) 1319 err = unix_bind_bsd(sk, sunaddr, addr_len); 1320 else 1321 err = unix_bind_abstract(sk, sunaddr, addr_len); 1322 1323 return err; 1324 } 1325 1326 static void unix_state_double_lock(struct sock *sk1, struct sock *sk2) 1327 { 1328 if (unlikely(sk1 == sk2) || !sk2) { 1329 unix_state_lock(sk1); 1330 return; 1331 } 1332 if (sk1 > sk2) 1333 swap(sk1, sk2); 1334 1335 unix_state_lock(sk1); 1336 unix_state_lock_nested(sk2, U_LOCK_SECOND); 1337 } 1338 1339 static void unix_state_double_unlock(struct sock *sk1, struct sock *sk2) 1340 { 1341 if (unlikely(sk1 == sk2) || !sk2) { 1342 unix_state_unlock(sk1); 1343 return; 1344 } 1345 unix_state_unlock(sk1); 1346 unix_state_unlock(sk2); 1347 } 1348 1349 static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr, 1350 int alen, int flags) 1351 { 1352 struct sockaddr_un *sunaddr = (struct sockaddr_un *)addr; 1353 struct sock *sk = sock->sk; 1354 struct sock *other; 1355 int err; 1356 1357 err = -EINVAL; 1358 if (alen < offsetofend(struct sockaddr, sa_family)) 1359 goto out; 1360 1361 if (addr->sa_family != AF_UNSPEC) { 1362 err = unix_validate_addr(sunaddr, alen); 1363 if (err) 1364 goto out; 1365 1366 err = BPF_CGROUP_RUN_PROG_UNIX_CONNECT_LOCK(sk, addr, &alen); 1367 if (err) 1368 goto out; 1369 1370 if ((test_bit(SOCK_PASSCRED, &sock->flags) || 1371 test_bit(SOCK_PASSPIDFD, &sock->flags)) && 1372 !unix_sk(sk)->addr) { 1373 err = unix_autobind(sk); 1374 if (err) 1375 goto out; 1376 } 1377 1378 restart: 1379 other = unix_find_other(sock_net(sk), sunaddr, alen, sock->type); 1380 if (IS_ERR(other)) { 1381 err = PTR_ERR(other); 1382 goto out; 1383 } 1384 1385 unix_state_double_lock(sk, other); 1386 1387 /* Apparently VFS overslept socket death. Retry. */ 1388 if (sock_flag(other, SOCK_DEAD)) { 1389 unix_state_double_unlock(sk, other); 1390 sock_put(other); 1391 goto restart; 1392 } 1393 1394 err = -EPERM; 1395 if (!unix_may_send(sk, other)) 1396 goto out_unlock; 1397 1398 err = security_unix_may_send(sk->sk_socket, other->sk_socket); 1399 if (err) 1400 goto out_unlock; 1401 1402 sk->sk_state = other->sk_state = TCP_ESTABLISHED; 1403 } else { 1404 /* 1405 * 1003.1g breaking connected state with AF_UNSPEC 1406 */ 1407 other = NULL; 1408 unix_state_double_lock(sk, other); 1409 } 1410 1411 /* 1412 * If it was connected, reconnect. 1413 */ 1414 if (unix_peer(sk)) { 1415 struct sock *old_peer = unix_peer(sk); 1416 1417 unix_peer(sk) = other; 1418 if (!other) 1419 sk->sk_state = TCP_CLOSE; 1420 unix_dgram_peer_wake_disconnect_wakeup(sk, old_peer); 1421 1422 unix_state_double_unlock(sk, other); 1423 1424 if (other != old_peer) 1425 unix_dgram_disconnected(sk, old_peer); 1426 sock_put(old_peer); 1427 } else { 1428 unix_peer(sk) = other; 1429 unix_state_double_unlock(sk, other); 1430 } 1431 1432 return 0; 1433 1434 out_unlock: 1435 unix_state_double_unlock(sk, other); 1436 sock_put(other); 1437 out: 1438 return err; 1439 } 1440 1441 static long unix_wait_for_peer(struct sock *other, long timeo) 1442 __releases(&unix_sk(other)->lock) 1443 { 1444 struct unix_sock *u = unix_sk(other); 1445 int sched; 1446 DEFINE_WAIT(wait); 1447 1448 prepare_to_wait_exclusive(&u->peer_wait, &wait, TASK_INTERRUPTIBLE); 1449 1450 sched = !sock_flag(other, SOCK_DEAD) && 1451 !(other->sk_shutdown & RCV_SHUTDOWN) && 1452 unix_recvq_full_lockless(other); 1453 1454 unix_state_unlock(other); 1455 1456 if (sched) 1457 timeo = schedule_timeout(timeo); 1458 1459 finish_wait(&u->peer_wait, &wait); 1460 return timeo; 1461 } 1462 1463 static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr, 1464 int addr_len, int flags) 1465 { 1466 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr; 1467 struct sock *sk = sock->sk, *newsk = NULL, *other = NULL; 1468 struct unix_sock *u = unix_sk(sk), *newu, *otheru; 1469 struct net *net = sock_net(sk); 1470 struct sk_buff *skb = NULL; 1471 long timeo; 1472 int err; 1473 int st; 1474 1475 err = unix_validate_addr(sunaddr, addr_len); 1476 if (err) 1477 goto out; 1478 1479 err = BPF_CGROUP_RUN_PROG_UNIX_CONNECT_LOCK(sk, uaddr, &addr_len); 1480 if (err) 1481 goto out; 1482 1483 if ((test_bit(SOCK_PASSCRED, &sock->flags) || 1484 test_bit(SOCK_PASSPIDFD, &sock->flags)) && !u->addr) { 1485 err = unix_autobind(sk); 1486 if (err) 1487 goto out; 1488 } 1489 1490 timeo = sock_sndtimeo(sk, flags & O_NONBLOCK); 1491 1492 /* First of all allocate resources. 1493 If we will make it after state is locked, 1494 we will have to recheck all again in any case. 1495 */ 1496 1497 /* create new sock for complete connection */ 1498 newsk = unix_create1(net, NULL, 0, sock->type); 1499 if (IS_ERR(newsk)) { 1500 err = PTR_ERR(newsk); 1501 newsk = NULL; 1502 goto out; 1503 } 1504 1505 err = -ENOMEM; 1506 1507 /* Allocate skb for sending to listening sock */ 1508 skb = sock_wmalloc(newsk, 1, 0, GFP_KERNEL); 1509 if (skb == NULL) 1510 goto out; 1511 1512 restart: 1513 /* Find listening sock. */ 1514 other = unix_find_other(net, sunaddr, addr_len, sk->sk_type); 1515 if (IS_ERR(other)) { 1516 err = PTR_ERR(other); 1517 other = NULL; 1518 goto out; 1519 } 1520 1521 /* Latch state of peer */ 1522 unix_state_lock(other); 1523 1524 /* Apparently VFS overslept socket death. Retry. */ 1525 if (sock_flag(other, SOCK_DEAD)) { 1526 unix_state_unlock(other); 1527 sock_put(other); 1528 goto restart; 1529 } 1530 1531 err = -ECONNREFUSED; 1532 if (other->sk_state != TCP_LISTEN) 1533 goto out_unlock; 1534 if (other->sk_shutdown & RCV_SHUTDOWN) 1535 goto out_unlock; 1536 1537 if (unix_recvq_full(other)) { 1538 err = -EAGAIN; 1539 if (!timeo) 1540 goto out_unlock; 1541 1542 timeo = unix_wait_for_peer(other, timeo); 1543 1544 err = sock_intr_errno(timeo); 1545 if (signal_pending(current)) 1546 goto out; 1547 sock_put(other); 1548 goto restart; 1549 } 1550 1551 /* Latch our state. 1552 1553 It is tricky place. We need to grab our state lock and cannot 1554 drop lock on peer. It is dangerous because deadlock is 1555 possible. Connect to self case and simultaneous 1556 attempt to connect are eliminated by checking socket 1557 state. other is TCP_LISTEN, if sk is TCP_LISTEN we 1558 check this before attempt to grab lock. 1559 1560 Well, and we have to recheck the state after socket locked. 1561 */ 1562 st = sk->sk_state; 1563 1564 switch (st) { 1565 case TCP_CLOSE: 1566 /* This is ok... continue with connect */ 1567 break; 1568 case TCP_ESTABLISHED: 1569 /* Socket is already connected */ 1570 err = -EISCONN; 1571 goto out_unlock; 1572 default: 1573 err = -EINVAL; 1574 goto out_unlock; 1575 } 1576 1577 unix_state_lock_nested(sk, U_LOCK_SECOND); 1578 1579 if (sk->sk_state != st) { 1580 unix_state_unlock(sk); 1581 unix_state_unlock(other); 1582 sock_put(other); 1583 goto restart; 1584 } 1585 1586 err = security_unix_stream_connect(sk, other, newsk); 1587 if (err) { 1588 unix_state_unlock(sk); 1589 goto out_unlock; 1590 } 1591 1592 /* The way is open! Fastly set all the necessary fields... */ 1593 1594 sock_hold(sk); 1595 unix_peer(newsk) = sk; 1596 newsk->sk_state = TCP_ESTABLISHED; 1597 newsk->sk_type = sk->sk_type; 1598 init_peercred(newsk); 1599 newu = unix_sk(newsk); 1600 newu->listener = other; 1601 RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq); 1602 otheru = unix_sk(other); 1603 1604 /* copy address information from listening to new sock 1605 * 1606 * The contents of *(otheru->addr) and otheru->path 1607 * are seen fully set up here, since we have found 1608 * otheru in hash under its lock. Insertion into the 1609 * hash chain we'd found it in had been done in an 1610 * earlier critical area protected by the chain's lock, 1611 * the same one where we'd set *(otheru->addr) contents, 1612 * as well as otheru->path and otheru->addr itself. 1613 * 1614 * Using smp_store_release() here to set newu->addr 1615 * is enough to make those stores, as well as stores 1616 * to newu->path visible to anyone who gets newu->addr 1617 * by smp_load_acquire(). IOW, the same warranties 1618 * as for unix_sock instances bound in unix_bind() or 1619 * in unix_autobind(). 1620 */ 1621 if (otheru->path.dentry) { 1622 path_get(&otheru->path); 1623 newu->path = otheru->path; 1624 } 1625 refcount_inc(&otheru->addr->refcnt); 1626 smp_store_release(&newu->addr, otheru->addr); 1627 1628 /* Set credentials */ 1629 copy_peercred(sk, other); 1630 1631 sock->state = SS_CONNECTED; 1632 sk->sk_state = TCP_ESTABLISHED; 1633 sock_hold(newsk); 1634 1635 smp_mb__after_atomic(); /* sock_hold() does an atomic_inc() */ 1636 unix_peer(sk) = newsk; 1637 1638 unix_state_unlock(sk); 1639 1640 /* take ten and send info to listening sock */ 1641 spin_lock(&other->sk_receive_queue.lock); 1642 __skb_queue_tail(&other->sk_receive_queue, skb); 1643 spin_unlock(&other->sk_receive_queue.lock); 1644 unix_state_unlock(other); 1645 other->sk_data_ready(other); 1646 sock_put(other); 1647 return 0; 1648 1649 out_unlock: 1650 if (other) 1651 unix_state_unlock(other); 1652 1653 out: 1654 kfree_skb(skb); 1655 if (newsk) 1656 unix_release_sock(newsk, 0); 1657 if (other) 1658 sock_put(other); 1659 return err; 1660 } 1661 1662 static int unix_socketpair(struct socket *socka, struct socket *sockb) 1663 { 1664 struct sock *ska = socka->sk, *skb = sockb->sk; 1665 1666 /* Join our sockets back to back */ 1667 sock_hold(ska); 1668 sock_hold(skb); 1669 unix_peer(ska) = skb; 1670 unix_peer(skb) = ska; 1671 init_peercred(ska); 1672 init_peercred(skb); 1673 1674 ska->sk_state = TCP_ESTABLISHED; 1675 skb->sk_state = TCP_ESTABLISHED; 1676 socka->state = SS_CONNECTED; 1677 sockb->state = SS_CONNECTED; 1678 return 0; 1679 } 1680 1681 static void unix_sock_inherit_flags(const struct socket *old, 1682 struct socket *new) 1683 { 1684 if (test_bit(SOCK_PASSCRED, &old->flags)) 1685 set_bit(SOCK_PASSCRED, &new->flags); 1686 if (test_bit(SOCK_PASSPIDFD, &old->flags)) 1687 set_bit(SOCK_PASSPIDFD, &new->flags); 1688 if (test_bit(SOCK_PASSSEC, &old->flags)) 1689 set_bit(SOCK_PASSSEC, &new->flags); 1690 } 1691 1692 static int unix_accept(struct socket *sock, struct socket *newsock, int flags, 1693 bool kern) 1694 { 1695 struct sock *sk = sock->sk; 1696 struct sk_buff *skb; 1697 struct sock *tsk; 1698 int err; 1699 1700 err = -EOPNOTSUPP; 1701 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET) 1702 goto out; 1703 1704 err = -EINVAL; 1705 if (sk->sk_state != TCP_LISTEN) 1706 goto out; 1707 1708 /* If socket state is TCP_LISTEN it cannot change (for now...), 1709 * so that no locks are necessary. 1710 */ 1711 1712 skb = skb_recv_datagram(sk, (flags & O_NONBLOCK) ? MSG_DONTWAIT : 0, 1713 &err); 1714 if (!skb) { 1715 /* This means receive shutdown. */ 1716 if (err == 0) 1717 err = -EINVAL; 1718 goto out; 1719 } 1720 1721 tsk = skb->sk; 1722 unix_update_edges(unix_sk(tsk)); 1723 skb_free_datagram(sk, skb); 1724 wake_up_interruptible(&unix_sk(sk)->peer_wait); 1725 1726 /* attach accepted sock to socket */ 1727 unix_state_lock(tsk); 1728 newsock->state = SS_CONNECTED; 1729 unix_sock_inherit_flags(sock, newsock); 1730 sock_graft(tsk, newsock); 1731 unix_state_unlock(tsk); 1732 return 0; 1733 1734 out: 1735 return err; 1736 } 1737 1738 1739 static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int peer) 1740 { 1741 struct sock *sk = sock->sk; 1742 struct unix_address *addr; 1743 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, uaddr); 1744 int err = 0; 1745 1746 if (peer) { 1747 sk = unix_peer_get(sk); 1748 1749 err = -ENOTCONN; 1750 if (!sk) 1751 goto out; 1752 err = 0; 1753 } else { 1754 sock_hold(sk); 1755 } 1756 1757 addr = smp_load_acquire(&unix_sk(sk)->addr); 1758 if (!addr) { 1759 sunaddr->sun_family = AF_UNIX; 1760 sunaddr->sun_path[0] = 0; 1761 err = offsetof(struct sockaddr_un, sun_path); 1762 } else { 1763 err = addr->len; 1764 memcpy(sunaddr, addr->name, addr->len); 1765 1766 if (peer) 1767 BPF_CGROUP_RUN_SA_PROG(sk, uaddr, &err, 1768 CGROUP_UNIX_GETPEERNAME); 1769 else 1770 BPF_CGROUP_RUN_SA_PROG(sk, uaddr, &err, 1771 CGROUP_UNIX_GETSOCKNAME); 1772 } 1773 sock_put(sk); 1774 out: 1775 return err; 1776 } 1777 1778 /* The "user->unix_inflight" variable is protected by the garbage 1779 * collection lock, and we just read it locklessly here. If you go 1780 * over the limit, there might be a tiny race in actually noticing 1781 * it across threads. Tough. 1782 */ 1783 static inline bool too_many_unix_fds(struct task_struct *p) 1784 { 1785 struct user_struct *user = current_user(); 1786 1787 if (unlikely(READ_ONCE(user->unix_inflight) > task_rlimit(p, RLIMIT_NOFILE))) 1788 return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN); 1789 return false; 1790 } 1791 1792 static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb) 1793 { 1794 if (too_many_unix_fds(current)) 1795 return -ETOOMANYREFS; 1796 1797 /* Need to duplicate file references for the sake of garbage 1798 * collection. Otherwise a socket in the fps might become a 1799 * candidate for GC while the skb is not yet queued. 1800 */ 1801 UNIXCB(skb).fp = scm_fp_dup(scm->fp); 1802 if (!UNIXCB(skb).fp) 1803 return -ENOMEM; 1804 1805 if (unix_prepare_fpl(UNIXCB(skb).fp)) 1806 return -ENOMEM; 1807 1808 return 0; 1809 } 1810 1811 static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb) 1812 { 1813 scm->fp = UNIXCB(skb).fp; 1814 UNIXCB(skb).fp = NULL; 1815 1816 unix_destroy_fpl(scm->fp); 1817 } 1818 1819 static void unix_peek_fds(struct scm_cookie *scm, struct sk_buff *skb) 1820 { 1821 scm->fp = scm_fp_dup(UNIXCB(skb).fp); 1822 1823 /* 1824 * Garbage collection of unix sockets starts by selecting a set of 1825 * candidate sockets which have reference only from being in flight 1826 * (total_refs == inflight_refs). This condition is checked once during 1827 * the candidate collection phase, and candidates are marked as such, so 1828 * that non-candidates can later be ignored. While inflight_refs is 1829 * protected by unix_gc_lock, total_refs (file count) is not, hence this 1830 * is an instantaneous decision. 1831 * 1832 * Once a candidate, however, the socket must not be reinstalled into a 1833 * file descriptor while the garbage collection is in progress. 1834 * 1835 * If the above conditions are met, then the directed graph of 1836 * candidates (*) does not change while unix_gc_lock is held. 1837 * 1838 * Any operations that changes the file count through file descriptors 1839 * (dup, close, sendmsg) does not change the graph since candidates are 1840 * not installed in fds. 1841 * 1842 * Dequeing a candidate via recvmsg would install it into an fd, but 1843 * that takes unix_gc_lock to decrement the inflight count, so it's 1844 * serialized with garbage collection. 1845 * 1846 * MSG_PEEK is special in that it does not change the inflight count, 1847 * yet does install the socket into an fd. The following lock/unlock 1848 * pair is to ensure serialization with garbage collection. It must be 1849 * done between incrementing the file count and installing the file into 1850 * an fd. 1851 * 1852 * If garbage collection starts after the barrier provided by the 1853 * lock/unlock, then it will see the elevated refcount and not mark this 1854 * as a candidate. If a garbage collection is already in progress 1855 * before the file count was incremented, then the lock/unlock pair will 1856 * ensure that garbage collection is finished before progressing to 1857 * installing the fd. 1858 * 1859 * (*) A -> B where B is on the queue of A or B is on the queue of C 1860 * which is on the queue of listening socket A. 1861 */ 1862 spin_lock(&unix_gc_lock); 1863 spin_unlock(&unix_gc_lock); 1864 } 1865 1866 static void unix_destruct_scm(struct sk_buff *skb) 1867 { 1868 struct scm_cookie scm; 1869 1870 memset(&scm, 0, sizeof(scm)); 1871 scm.pid = UNIXCB(skb).pid; 1872 if (UNIXCB(skb).fp) 1873 unix_detach_fds(&scm, skb); 1874 1875 /* Alas, it calls VFS */ 1876 /* So fscking what? fput() had been SMP-safe since the last Summer */ 1877 scm_destroy(&scm); 1878 sock_wfree(skb); 1879 } 1880 1881 static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds) 1882 { 1883 int err = 0; 1884 1885 UNIXCB(skb).pid = get_pid(scm->pid); 1886 UNIXCB(skb).uid = scm->creds.uid; 1887 UNIXCB(skb).gid = scm->creds.gid; 1888 UNIXCB(skb).fp = NULL; 1889 unix_get_secdata(scm, skb); 1890 if (scm->fp && send_fds) 1891 err = unix_attach_fds(scm, skb); 1892 1893 skb->destructor = unix_destruct_scm; 1894 return err; 1895 } 1896 1897 static bool unix_passcred_enabled(const struct socket *sock, 1898 const struct sock *other) 1899 { 1900 return test_bit(SOCK_PASSCRED, &sock->flags) || 1901 test_bit(SOCK_PASSPIDFD, &sock->flags) || 1902 !other->sk_socket || 1903 test_bit(SOCK_PASSCRED, &other->sk_socket->flags) || 1904 test_bit(SOCK_PASSPIDFD, &other->sk_socket->flags); 1905 } 1906 1907 /* 1908 * Some apps rely on write() giving SCM_CREDENTIALS 1909 * We include credentials if source or destination socket 1910 * asserted SOCK_PASSCRED. 1911 */ 1912 static void maybe_add_creds(struct sk_buff *skb, const struct socket *sock, 1913 const struct sock *other) 1914 { 1915 if (UNIXCB(skb).pid) 1916 return; 1917 if (unix_passcred_enabled(sock, other)) { 1918 UNIXCB(skb).pid = get_pid(task_tgid(current)); 1919 current_uid_gid(&UNIXCB(skb).uid, &UNIXCB(skb).gid); 1920 } 1921 } 1922 1923 static bool unix_skb_scm_eq(struct sk_buff *skb, 1924 struct scm_cookie *scm) 1925 { 1926 return UNIXCB(skb).pid == scm->pid && 1927 uid_eq(UNIXCB(skb).uid, scm->creds.uid) && 1928 gid_eq(UNIXCB(skb).gid, scm->creds.gid) && 1929 unix_secdata_eq(scm, skb); 1930 } 1931 1932 static void scm_stat_add(struct sock *sk, struct sk_buff *skb) 1933 { 1934 struct scm_fp_list *fp = UNIXCB(skb).fp; 1935 struct unix_sock *u = unix_sk(sk); 1936 1937 if (unlikely(fp && fp->count)) { 1938 atomic_add(fp->count, &u->scm_stat.nr_fds); 1939 unix_add_edges(fp, u); 1940 } 1941 } 1942 1943 static void scm_stat_del(struct sock *sk, struct sk_buff *skb) 1944 { 1945 struct scm_fp_list *fp = UNIXCB(skb).fp; 1946 struct unix_sock *u = unix_sk(sk); 1947 1948 if (unlikely(fp && fp->count)) { 1949 atomic_sub(fp->count, &u->scm_stat.nr_fds); 1950 unix_del_edges(fp); 1951 } 1952 } 1953 1954 /* 1955 * Send AF_UNIX data. 1956 */ 1957 1958 static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg, 1959 size_t len) 1960 { 1961 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, msg->msg_name); 1962 struct sock *sk = sock->sk, *other = NULL; 1963 struct unix_sock *u = unix_sk(sk); 1964 struct scm_cookie scm; 1965 struct sk_buff *skb; 1966 int data_len = 0; 1967 int sk_locked; 1968 long timeo; 1969 int err; 1970 1971 err = scm_send(sock, msg, &scm, false); 1972 if (err < 0) 1973 return err; 1974 1975 wait_for_unix_gc(scm.fp); 1976 1977 err = -EOPNOTSUPP; 1978 if (msg->msg_flags&MSG_OOB) 1979 goto out; 1980 1981 if (msg->msg_namelen) { 1982 err = unix_validate_addr(sunaddr, msg->msg_namelen); 1983 if (err) 1984 goto out; 1985 1986 err = BPF_CGROUP_RUN_PROG_UNIX_SENDMSG_LOCK(sk, 1987 msg->msg_name, 1988 &msg->msg_namelen, 1989 NULL); 1990 if (err) 1991 goto out; 1992 } else { 1993 sunaddr = NULL; 1994 err = -ENOTCONN; 1995 other = unix_peer_get(sk); 1996 if (!other) 1997 goto out; 1998 } 1999 2000 if ((test_bit(SOCK_PASSCRED, &sock->flags) || 2001 test_bit(SOCK_PASSPIDFD, &sock->flags)) && !u->addr) { 2002 err = unix_autobind(sk); 2003 if (err) 2004 goto out; 2005 } 2006 2007 err = -EMSGSIZE; 2008 if (len > sk->sk_sndbuf - 32) 2009 goto out; 2010 2011 if (len > SKB_MAX_ALLOC) { 2012 data_len = min_t(size_t, 2013 len - SKB_MAX_ALLOC, 2014 MAX_SKB_FRAGS * PAGE_SIZE); 2015 data_len = PAGE_ALIGN(data_len); 2016 2017 BUILD_BUG_ON(SKB_MAX_ALLOC < PAGE_SIZE); 2018 } 2019 2020 skb = sock_alloc_send_pskb(sk, len - data_len, data_len, 2021 msg->msg_flags & MSG_DONTWAIT, &err, 2022 PAGE_ALLOC_COSTLY_ORDER); 2023 if (skb == NULL) 2024 goto out; 2025 2026 err = unix_scm_to_skb(&scm, skb, true); 2027 if (err < 0) 2028 goto out_free; 2029 2030 skb_put(skb, len - data_len); 2031 skb->data_len = data_len; 2032 skb->len = len; 2033 err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, len); 2034 if (err) 2035 goto out_free; 2036 2037 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); 2038 2039 restart: 2040 if (!other) { 2041 err = -ECONNRESET; 2042 if (sunaddr == NULL) 2043 goto out_free; 2044 2045 other = unix_find_other(sock_net(sk), sunaddr, msg->msg_namelen, 2046 sk->sk_type); 2047 if (IS_ERR(other)) { 2048 err = PTR_ERR(other); 2049 other = NULL; 2050 goto out_free; 2051 } 2052 } 2053 2054 if (sk_filter(other, skb) < 0) { 2055 /* Toss the packet but do not return any error to the sender */ 2056 err = len; 2057 goto out_free; 2058 } 2059 2060 sk_locked = 0; 2061 unix_state_lock(other); 2062 restart_locked: 2063 err = -EPERM; 2064 if (!unix_may_send(sk, other)) 2065 goto out_unlock; 2066 2067 if (unlikely(sock_flag(other, SOCK_DEAD))) { 2068 /* 2069 * Check with 1003.1g - what should 2070 * datagram error 2071 */ 2072 unix_state_unlock(other); 2073 sock_put(other); 2074 2075 if (!sk_locked) 2076 unix_state_lock(sk); 2077 2078 err = 0; 2079 if (sk->sk_type == SOCK_SEQPACKET) { 2080 /* We are here only when racing with unix_release_sock() 2081 * is clearing @other. Never change state to TCP_CLOSE 2082 * unlike SOCK_DGRAM wants. 2083 */ 2084 unix_state_unlock(sk); 2085 err = -EPIPE; 2086 } else if (unix_peer(sk) == other) { 2087 unix_peer(sk) = NULL; 2088 unix_dgram_peer_wake_disconnect_wakeup(sk, other); 2089 2090 sk->sk_state = TCP_CLOSE; 2091 unix_state_unlock(sk); 2092 2093 unix_dgram_disconnected(sk, other); 2094 sock_put(other); 2095 err = -ECONNREFUSED; 2096 } else { 2097 unix_state_unlock(sk); 2098 } 2099 2100 other = NULL; 2101 if (err) 2102 goto out_free; 2103 goto restart; 2104 } 2105 2106 err = -EPIPE; 2107 if (other->sk_shutdown & RCV_SHUTDOWN) 2108 goto out_unlock; 2109 2110 if (sk->sk_type != SOCK_SEQPACKET) { 2111 err = security_unix_may_send(sk->sk_socket, other->sk_socket); 2112 if (err) 2113 goto out_unlock; 2114 } 2115 2116 /* other == sk && unix_peer(other) != sk if 2117 * - unix_peer(sk) == NULL, destination address bound to sk 2118 * - unix_peer(sk) == sk by time of get but disconnected before lock 2119 */ 2120 if (other != sk && 2121 unlikely(unix_peer(other) != sk && 2122 unix_recvq_full_lockless(other))) { 2123 if (timeo) { 2124 timeo = unix_wait_for_peer(other, timeo); 2125 2126 err = sock_intr_errno(timeo); 2127 if (signal_pending(current)) 2128 goto out_free; 2129 2130 goto restart; 2131 } 2132 2133 if (!sk_locked) { 2134 unix_state_unlock(other); 2135 unix_state_double_lock(sk, other); 2136 } 2137 2138 if (unix_peer(sk) != other || 2139 unix_dgram_peer_wake_me(sk, other)) { 2140 err = -EAGAIN; 2141 sk_locked = 1; 2142 goto out_unlock; 2143 } 2144 2145 if (!sk_locked) { 2146 sk_locked = 1; 2147 goto restart_locked; 2148 } 2149 } 2150 2151 if (unlikely(sk_locked)) 2152 unix_state_unlock(sk); 2153 2154 if (sock_flag(other, SOCK_RCVTSTAMP)) 2155 __net_timestamp(skb); 2156 maybe_add_creds(skb, sock, other); 2157 scm_stat_add(other, skb); 2158 skb_queue_tail(&other->sk_receive_queue, skb); 2159 unix_state_unlock(other); 2160 other->sk_data_ready(other); 2161 sock_put(other); 2162 scm_destroy(&scm); 2163 return len; 2164 2165 out_unlock: 2166 if (sk_locked) 2167 unix_state_unlock(sk); 2168 unix_state_unlock(other); 2169 out_free: 2170 kfree_skb(skb); 2171 out: 2172 if (other) 2173 sock_put(other); 2174 scm_destroy(&scm); 2175 return err; 2176 } 2177 2178 /* We use paged skbs for stream sockets, and limit occupancy to 32768 2179 * bytes, and a minimum of a full page. 2180 */ 2181 #define UNIX_SKB_FRAGS_SZ (PAGE_SIZE << get_order(32768)) 2182 2183 #if IS_ENABLED(CONFIG_AF_UNIX_OOB) 2184 static int queue_oob(struct socket *sock, struct msghdr *msg, struct sock *other, 2185 struct scm_cookie *scm, bool fds_sent) 2186 { 2187 struct unix_sock *ousk = unix_sk(other); 2188 struct sk_buff *skb; 2189 int err = 0; 2190 2191 skb = sock_alloc_send_skb(sock->sk, 1, msg->msg_flags & MSG_DONTWAIT, &err); 2192 2193 if (!skb) 2194 return err; 2195 2196 err = unix_scm_to_skb(scm, skb, !fds_sent); 2197 if (err < 0) { 2198 kfree_skb(skb); 2199 return err; 2200 } 2201 skb_put(skb, 1); 2202 err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, 1); 2203 2204 if (err) { 2205 kfree_skb(skb); 2206 return err; 2207 } 2208 2209 unix_state_lock(other); 2210 2211 if (sock_flag(other, SOCK_DEAD) || 2212 (other->sk_shutdown & RCV_SHUTDOWN)) { 2213 unix_state_unlock(other); 2214 kfree_skb(skb); 2215 return -EPIPE; 2216 } 2217 2218 maybe_add_creds(skb, sock, other); 2219 skb_get(skb); 2220 2221 if (ousk->oob_skb) 2222 consume_skb(ousk->oob_skb); 2223 2224 WRITE_ONCE(ousk->oob_skb, skb); 2225 2226 scm_stat_add(other, skb); 2227 skb_queue_tail(&other->sk_receive_queue, skb); 2228 sk_send_sigurg(other); 2229 unix_state_unlock(other); 2230 other->sk_data_ready(other); 2231 2232 return err; 2233 } 2234 #endif 2235 2236 static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg, 2237 size_t len) 2238 { 2239 struct sock *sk = sock->sk; 2240 struct sock *other = NULL; 2241 int err, size; 2242 struct sk_buff *skb; 2243 int sent = 0; 2244 struct scm_cookie scm; 2245 bool fds_sent = false; 2246 int data_len; 2247 2248 err = scm_send(sock, msg, &scm, false); 2249 if (err < 0) 2250 return err; 2251 2252 wait_for_unix_gc(scm.fp); 2253 2254 err = -EOPNOTSUPP; 2255 if (msg->msg_flags & MSG_OOB) { 2256 #if IS_ENABLED(CONFIG_AF_UNIX_OOB) 2257 if (len) 2258 len--; 2259 else 2260 #endif 2261 goto out_err; 2262 } 2263 2264 if (msg->msg_namelen) { 2265 err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP; 2266 goto out_err; 2267 } else { 2268 err = -ENOTCONN; 2269 other = unix_peer(sk); 2270 if (!other) 2271 goto out_err; 2272 } 2273 2274 if (sk->sk_shutdown & SEND_SHUTDOWN) 2275 goto pipe_err; 2276 2277 while (sent < len) { 2278 size = len - sent; 2279 2280 if (unlikely(msg->msg_flags & MSG_SPLICE_PAGES)) { 2281 skb = sock_alloc_send_pskb(sk, 0, 0, 2282 msg->msg_flags & MSG_DONTWAIT, 2283 &err, 0); 2284 } else { 2285 /* Keep two messages in the pipe so it schedules better */ 2286 size = min_t(int, size, (sk->sk_sndbuf >> 1) - 64); 2287 2288 /* allow fallback to order-0 allocations */ 2289 size = min_t(int, size, SKB_MAX_HEAD(0) + UNIX_SKB_FRAGS_SZ); 2290 2291 data_len = max_t(int, 0, size - SKB_MAX_HEAD(0)); 2292 2293 data_len = min_t(size_t, size, PAGE_ALIGN(data_len)); 2294 2295 skb = sock_alloc_send_pskb(sk, size - data_len, data_len, 2296 msg->msg_flags & MSG_DONTWAIT, &err, 2297 get_order(UNIX_SKB_FRAGS_SZ)); 2298 } 2299 if (!skb) 2300 goto out_err; 2301 2302 /* Only send the fds in the first buffer */ 2303 err = unix_scm_to_skb(&scm, skb, !fds_sent); 2304 if (err < 0) { 2305 kfree_skb(skb); 2306 goto out_err; 2307 } 2308 fds_sent = true; 2309 2310 if (unlikely(msg->msg_flags & MSG_SPLICE_PAGES)) { 2311 err = skb_splice_from_iter(skb, &msg->msg_iter, size, 2312 sk->sk_allocation); 2313 if (err < 0) { 2314 kfree_skb(skb); 2315 goto out_err; 2316 } 2317 size = err; 2318 refcount_add(size, &sk->sk_wmem_alloc); 2319 } else { 2320 skb_put(skb, size - data_len); 2321 skb->data_len = data_len; 2322 skb->len = size; 2323 err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size); 2324 if (err) { 2325 kfree_skb(skb); 2326 goto out_err; 2327 } 2328 } 2329 2330 unix_state_lock(other); 2331 2332 if (sock_flag(other, SOCK_DEAD) || 2333 (other->sk_shutdown & RCV_SHUTDOWN)) 2334 goto pipe_err_free; 2335 2336 maybe_add_creds(skb, sock, other); 2337 scm_stat_add(other, skb); 2338 skb_queue_tail(&other->sk_receive_queue, skb); 2339 unix_state_unlock(other); 2340 other->sk_data_ready(other); 2341 sent += size; 2342 } 2343 2344 #if IS_ENABLED(CONFIG_AF_UNIX_OOB) 2345 if (msg->msg_flags & MSG_OOB) { 2346 err = queue_oob(sock, msg, other, &scm, fds_sent); 2347 if (err) 2348 goto out_err; 2349 sent++; 2350 } 2351 #endif 2352 2353 scm_destroy(&scm); 2354 2355 return sent; 2356 2357 pipe_err_free: 2358 unix_state_unlock(other); 2359 kfree_skb(skb); 2360 pipe_err: 2361 if (sent == 0 && !(msg->msg_flags&MSG_NOSIGNAL)) 2362 send_sig(SIGPIPE, current, 0); 2363 err = -EPIPE; 2364 out_err: 2365 scm_destroy(&scm); 2366 return sent ? : err; 2367 } 2368 2369 static int unix_seqpacket_sendmsg(struct socket *sock, struct msghdr *msg, 2370 size_t len) 2371 { 2372 int err; 2373 struct sock *sk = sock->sk; 2374 2375 err = sock_error(sk); 2376 if (err) 2377 return err; 2378 2379 if (sk->sk_state != TCP_ESTABLISHED) 2380 return -ENOTCONN; 2381 2382 if (msg->msg_namelen) 2383 msg->msg_namelen = 0; 2384 2385 return unix_dgram_sendmsg(sock, msg, len); 2386 } 2387 2388 static int unix_seqpacket_recvmsg(struct socket *sock, struct msghdr *msg, 2389 size_t size, int flags) 2390 { 2391 struct sock *sk = sock->sk; 2392 2393 if (sk->sk_state != TCP_ESTABLISHED) 2394 return -ENOTCONN; 2395 2396 return unix_dgram_recvmsg(sock, msg, size, flags); 2397 } 2398 2399 static void unix_copy_addr(struct msghdr *msg, struct sock *sk) 2400 { 2401 struct unix_address *addr = smp_load_acquire(&unix_sk(sk)->addr); 2402 2403 if (addr) { 2404 msg->msg_namelen = addr->len; 2405 memcpy(msg->msg_name, addr->name, addr->len); 2406 } 2407 } 2408 2409 int __unix_dgram_recvmsg(struct sock *sk, struct msghdr *msg, size_t size, 2410 int flags) 2411 { 2412 struct scm_cookie scm; 2413 struct socket *sock = sk->sk_socket; 2414 struct unix_sock *u = unix_sk(sk); 2415 struct sk_buff *skb, *last; 2416 long timeo; 2417 int skip; 2418 int err; 2419 2420 err = -EOPNOTSUPP; 2421 if (flags&MSG_OOB) 2422 goto out; 2423 2424 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); 2425 2426 do { 2427 mutex_lock(&u->iolock); 2428 2429 skip = sk_peek_offset(sk, flags); 2430 skb = __skb_try_recv_datagram(sk, &sk->sk_receive_queue, flags, 2431 &skip, &err, &last); 2432 if (skb) { 2433 if (!(flags & MSG_PEEK)) 2434 scm_stat_del(sk, skb); 2435 break; 2436 } 2437 2438 mutex_unlock(&u->iolock); 2439 2440 if (err != -EAGAIN) 2441 break; 2442 } while (timeo && 2443 !__skb_wait_for_more_packets(sk, &sk->sk_receive_queue, 2444 &err, &timeo, last)); 2445 2446 if (!skb) { /* implies iolock unlocked */ 2447 unix_state_lock(sk); 2448 /* Signal EOF on disconnected non-blocking SEQPACKET socket. */ 2449 if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN && 2450 (sk->sk_shutdown & RCV_SHUTDOWN)) 2451 err = 0; 2452 unix_state_unlock(sk); 2453 goto out; 2454 } 2455 2456 if (wq_has_sleeper(&u->peer_wait)) 2457 wake_up_interruptible_sync_poll(&u->peer_wait, 2458 EPOLLOUT | EPOLLWRNORM | 2459 EPOLLWRBAND); 2460 2461 if (msg->msg_name) { 2462 unix_copy_addr(msg, skb->sk); 2463 2464 BPF_CGROUP_RUN_PROG_UNIX_RECVMSG_LOCK(sk, 2465 msg->msg_name, 2466 &msg->msg_namelen); 2467 } 2468 2469 if (size > skb->len - skip) 2470 size = skb->len - skip; 2471 else if (size < skb->len - skip) 2472 msg->msg_flags |= MSG_TRUNC; 2473 2474 err = skb_copy_datagram_msg(skb, skip, msg, size); 2475 if (err) 2476 goto out_free; 2477 2478 if (sock_flag(sk, SOCK_RCVTSTAMP)) 2479 __sock_recv_timestamp(msg, sk, skb); 2480 2481 memset(&scm, 0, sizeof(scm)); 2482 2483 scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid); 2484 unix_set_secdata(&scm, skb); 2485 2486 if (!(flags & MSG_PEEK)) { 2487 if (UNIXCB(skb).fp) 2488 unix_detach_fds(&scm, skb); 2489 2490 sk_peek_offset_bwd(sk, skb->len); 2491 } else { 2492 /* It is questionable: on PEEK we could: 2493 - do not return fds - good, but too simple 8) 2494 - return fds, and do not return them on read (old strategy, 2495 apparently wrong) 2496 - clone fds (I chose it for now, it is the most universal 2497 solution) 2498 2499 POSIX 1003.1g does not actually define this clearly 2500 at all. POSIX 1003.1g doesn't define a lot of things 2501 clearly however! 2502 2503 */ 2504 2505 sk_peek_offset_fwd(sk, size); 2506 2507 if (UNIXCB(skb).fp) 2508 unix_peek_fds(&scm, skb); 2509 } 2510 err = (flags & MSG_TRUNC) ? skb->len - skip : size; 2511 2512 scm_recv_unix(sock, msg, &scm, flags); 2513 2514 out_free: 2515 skb_free_datagram(sk, skb); 2516 mutex_unlock(&u->iolock); 2517 out: 2518 return err; 2519 } 2520 2521 static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, 2522 int flags) 2523 { 2524 struct sock *sk = sock->sk; 2525 2526 #ifdef CONFIG_BPF_SYSCALL 2527 const struct proto *prot = READ_ONCE(sk->sk_prot); 2528 2529 if (prot != &unix_dgram_proto) 2530 return prot->recvmsg(sk, msg, size, flags, NULL); 2531 #endif 2532 return __unix_dgram_recvmsg(sk, msg, size, flags); 2533 } 2534 2535 static int unix_read_skb(struct sock *sk, skb_read_actor_t recv_actor) 2536 { 2537 struct unix_sock *u = unix_sk(sk); 2538 struct sk_buff *skb; 2539 int err; 2540 2541 mutex_lock(&u->iolock); 2542 skb = skb_recv_datagram(sk, MSG_DONTWAIT, &err); 2543 mutex_unlock(&u->iolock); 2544 if (!skb) 2545 return err; 2546 2547 return recv_actor(sk, skb); 2548 } 2549 2550 /* 2551 * Sleep until more data has arrived. But check for races.. 2552 */ 2553 static long unix_stream_data_wait(struct sock *sk, long timeo, 2554 struct sk_buff *last, unsigned int last_len, 2555 bool freezable) 2556 { 2557 unsigned int state = TASK_INTERRUPTIBLE | freezable * TASK_FREEZABLE; 2558 struct sk_buff *tail; 2559 DEFINE_WAIT(wait); 2560 2561 unix_state_lock(sk); 2562 2563 for (;;) { 2564 prepare_to_wait(sk_sleep(sk), &wait, state); 2565 2566 tail = skb_peek_tail(&sk->sk_receive_queue); 2567 if (tail != last || 2568 (tail && tail->len != last_len) || 2569 sk->sk_err || 2570 (sk->sk_shutdown & RCV_SHUTDOWN) || 2571 signal_pending(current) || 2572 !timeo) 2573 break; 2574 2575 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); 2576 unix_state_unlock(sk); 2577 timeo = schedule_timeout(timeo); 2578 unix_state_lock(sk); 2579 2580 if (sock_flag(sk, SOCK_DEAD)) 2581 break; 2582 2583 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); 2584 } 2585 2586 finish_wait(sk_sleep(sk), &wait); 2587 unix_state_unlock(sk); 2588 return timeo; 2589 } 2590 2591 static unsigned int unix_skb_len(const struct sk_buff *skb) 2592 { 2593 return skb->len - UNIXCB(skb).consumed; 2594 } 2595 2596 struct unix_stream_read_state { 2597 int (*recv_actor)(struct sk_buff *, int, int, 2598 struct unix_stream_read_state *); 2599 struct socket *socket; 2600 struct msghdr *msg; 2601 struct pipe_inode_info *pipe; 2602 size_t size; 2603 int flags; 2604 unsigned int splice_flags; 2605 }; 2606 2607 #if IS_ENABLED(CONFIG_AF_UNIX_OOB) 2608 static int unix_stream_recv_urg(struct unix_stream_read_state *state) 2609 { 2610 struct socket *sock = state->socket; 2611 struct sock *sk = sock->sk; 2612 struct unix_sock *u = unix_sk(sk); 2613 int chunk = 1; 2614 struct sk_buff *oob_skb; 2615 2616 mutex_lock(&u->iolock); 2617 unix_state_lock(sk); 2618 2619 if (sock_flag(sk, SOCK_URGINLINE) || !u->oob_skb) { 2620 unix_state_unlock(sk); 2621 mutex_unlock(&u->iolock); 2622 return -EINVAL; 2623 } 2624 2625 oob_skb = u->oob_skb; 2626 2627 if (!(state->flags & MSG_PEEK)) 2628 WRITE_ONCE(u->oob_skb, NULL); 2629 else 2630 skb_get(oob_skb); 2631 unix_state_unlock(sk); 2632 2633 chunk = state->recv_actor(oob_skb, 0, chunk, state); 2634 2635 if (!(state->flags & MSG_PEEK)) 2636 UNIXCB(oob_skb).consumed += 1; 2637 2638 consume_skb(oob_skb); 2639 2640 mutex_unlock(&u->iolock); 2641 2642 if (chunk < 0) 2643 return -EFAULT; 2644 2645 state->msg->msg_flags |= MSG_OOB; 2646 return 1; 2647 } 2648 2649 static struct sk_buff *manage_oob(struct sk_buff *skb, struct sock *sk, 2650 int flags, int copied) 2651 { 2652 struct unix_sock *u = unix_sk(sk); 2653 2654 if (!unix_skb_len(skb) && !(flags & MSG_PEEK)) { 2655 skb_unlink(skb, &sk->sk_receive_queue); 2656 consume_skb(skb); 2657 skb = NULL; 2658 } else { 2659 if (skb == u->oob_skb) { 2660 if (copied) { 2661 skb = NULL; 2662 } else if (sock_flag(sk, SOCK_URGINLINE)) { 2663 if (!(flags & MSG_PEEK)) { 2664 WRITE_ONCE(u->oob_skb, NULL); 2665 consume_skb(skb); 2666 } 2667 } else if (!(flags & MSG_PEEK)) { 2668 skb_unlink(skb, &sk->sk_receive_queue); 2669 consume_skb(skb); 2670 skb = skb_peek(&sk->sk_receive_queue); 2671 } 2672 } 2673 } 2674 return skb; 2675 } 2676 #endif 2677 2678 static int unix_stream_read_skb(struct sock *sk, skb_read_actor_t recv_actor) 2679 { 2680 if (unlikely(sk->sk_state != TCP_ESTABLISHED)) 2681 return -ENOTCONN; 2682 2683 return unix_read_skb(sk, recv_actor); 2684 } 2685 2686 static int unix_stream_read_generic(struct unix_stream_read_state *state, 2687 bool freezable) 2688 { 2689 struct scm_cookie scm; 2690 struct socket *sock = state->socket; 2691 struct sock *sk = sock->sk; 2692 struct unix_sock *u = unix_sk(sk); 2693 int copied = 0; 2694 int flags = state->flags; 2695 int noblock = flags & MSG_DONTWAIT; 2696 bool check_creds = false; 2697 int target; 2698 int err = 0; 2699 long timeo; 2700 int skip; 2701 size_t size = state->size; 2702 unsigned int last_len; 2703 2704 if (unlikely(sk->sk_state != TCP_ESTABLISHED)) { 2705 err = -EINVAL; 2706 goto out; 2707 } 2708 2709 if (unlikely(flags & MSG_OOB)) { 2710 err = -EOPNOTSUPP; 2711 #if IS_ENABLED(CONFIG_AF_UNIX_OOB) 2712 err = unix_stream_recv_urg(state); 2713 #endif 2714 goto out; 2715 } 2716 2717 target = sock_rcvlowat(sk, flags & MSG_WAITALL, size); 2718 timeo = sock_rcvtimeo(sk, noblock); 2719 2720 memset(&scm, 0, sizeof(scm)); 2721 2722 /* Lock the socket to prevent queue disordering 2723 * while sleeps in memcpy_tomsg 2724 */ 2725 mutex_lock(&u->iolock); 2726 2727 skip = max(sk_peek_offset(sk, flags), 0); 2728 2729 do { 2730 int chunk; 2731 bool drop_skb; 2732 struct sk_buff *skb, *last; 2733 2734 redo: 2735 unix_state_lock(sk); 2736 if (sock_flag(sk, SOCK_DEAD)) { 2737 err = -ECONNRESET; 2738 goto unlock; 2739 } 2740 last = skb = skb_peek(&sk->sk_receive_queue); 2741 last_len = last ? last->len : 0; 2742 2743 #if IS_ENABLED(CONFIG_AF_UNIX_OOB) 2744 if (skb) { 2745 skb = manage_oob(skb, sk, flags, copied); 2746 if (!skb) { 2747 unix_state_unlock(sk); 2748 if (copied) 2749 break; 2750 goto redo; 2751 } 2752 } 2753 #endif 2754 again: 2755 if (skb == NULL) { 2756 if (copied >= target) 2757 goto unlock; 2758 2759 /* 2760 * POSIX 1003.1g mandates this order. 2761 */ 2762 2763 err = sock_error(sk); 2764 if (err) 2765 goto unlock; 2766 if (sk->sk_shutdown & RCV_SHUTDOWN) 2767 goto unlock; 2768 2769 unix_state_unlock(sk); 2770 if (!timeo) { 2771 err = -EAGAIN; 2772 break; 2773 } 2774 2775 mutex_unlock(&u->iolock); 2776 2777 timeo = unix_stream_data_wait(sk, timeo, last, 2778 last_len, freezable); 2779 2780 if (signal_pending(current)) { 2781 err = sock_intr_errno(timeo); 2782 scm_destroy(&scm); 2783 goto out; 2784 } 2785 2786 mutex_lock(&u->iolock); 2787 goto redo; 2788 unlock: 2789 unix_state_unlock(sk); 2790 break; 2791 } 2792 2793 while (skip >= unix_skb_len(skb)) { 2794 skip -= unix_skb_len(skb); 2795 last = skb; 2796 last_len = skb->len; 2797 skb = skb_peek_next(skb, &sk->sk_receive_queue); 2798 if (!skb) 2799 goto again; 2800 } 2801 2802 unix_state_unlock(sk); 2803 2804 if (check_creds) { 2805 /* Never glue messages from different writers */ 2806 if (!unix_skb_scm_eq(skb, &scm)) 2807 break; 2808 } else if (test_bit(SOCK_PASSCRED, &sock->flags) || 2809 test_bit(SOCK_PASSPIDFD, &sock->flags)) { 2810 /* Copy credentials */ 2811 scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid); 2812 unix_set_secdata(&scm, skb); 2813 check_creds = true; 2814 } 2815 2816 /* Copy address just once */ 2817 if (state->msg && state->msg->msg_name) { 2818 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, 2819 state->msg->msg_name); 2820 unix_copy_addr(state->msg, skb->sk); 2821 2822 BPF_CGROUP_RUN_PROG_UNIX_RECVMSG_LOCK(sk, 2823 state->msg->msg_name, 2824 &state->msg->msg_namelen); 2825 2826 sunaddr = NULL; 2827 } 2828 2829 chunk = min_t(unsigned int, unix_skb_len(skb) - skip, size); 2830 skb_get(skb); 2831 chunk = state->recv_actor(skb, skip, chunk, state); 2832 drop_skb = !unix_skb_len(skb); 2833 /* skb is only safe to use if !drop_skb */ 2834 consume_skb(skb); 2835 if (chunk < 0) { 2836 if (copied == 0) 2837 copied = -EFAULT; 2838 break; 2839 } 2840 copied += chunk; 2841 size -= chunk; 2842 2843 if (drop_skb) { 2844 /* the skb was touched by a concurrent reader; 2845 * we should not expect anything from this skb 2846 * anymore and assume it invalid - we can be 2847 * sure it was dropped from the socket queue 2848 * 2849 * let's report a short read 2850 */ 2851 err = 0; 2852 break; 2853 } 2854 2855 /* Mark read part of skb as used */ 2856 if (!(flags & MSG_PEEK)) { 2857 UNIXCB(skb).consumed += chunk; 2858 2859 sk_peek_offset_bwd(sk, chunk); 2860 2861 if (UNIXCB(skb).fp) { 2862 scm_stat_del(sk, skb); 2863 unix_detach_fds(&scm, skb); 2864 } 2865 2866 if (unix_skb_len(skb)) 2867 break; 2868 2869 skb_unlink(skb, &sk->sk_receive_queue); 2870 consume_skb(skb); 2871 2872 if (scm.fp) 2873 break; 2874 } else { 2875 /* It is questionable, see note in unix_dgram_recvmsg. 2876 */ 2877 if (UNIXCB(skb).fp) 2878 unix_peek_fds(&scm, skb); 2879 2880 sk_peek_offset_fwd(sk, chunk); 2881 2882 if (UNIXCB(skb).fp) 2883 break; 2884 2885 skip = 0; 2886 last = skb; 2887 last_len = skb->len; 2888 unix_state_lock(sk); 2889 skb = skb_peek_next(skb, &sk->sk_receive_queue); 2890 if (skb) 2891 goto again; 2892 unix_state_unlock(sk); 2893 break; 2894 } 2895 } while (size); 2896 2897 mutex_unlock(&u->iolock); 2898 if (state->msg) 2899 scm_recv_unix(sock, state->msg, &scm, flags); 2900 else 2901 scm_destroy(&scm); 2902 out: 2903 return copied ? : err; 2904 } 2905 2906 static int unix_stream_read_actor(struct sk_buff *skb, 2907 int skip, int chunk, 2908 struct unix_stream_read_state *state) 2909 { 2910 int ret; 2911 2912 ret = skb_copy_datagram_msg(skb, UNIXCB(skb).consumed + skip, 2913 state->msg, chunk); 2914 return ret ?: chunk; 2915 } 2916 2917 int __unix_stream_recvmsg(struct sock *sk, struct msghdr *msg, 2918 size_t size, int flags) 2919 { 2920 struct unix_stream_read_state state = { 2921 .recv_actor = unix_stream_read_actor, 2922 .socket = sk->sk_socket, 2923 .msg = msg, 2924 .size = size, 2925 .flags = flags 2926 }; 2927 2928 return unix_stream_read_generic(&state, true); 2929 } 2930 2931 static int unix_stream_recvmsg(struct socket *sock, struct msghdr *msg, 2932 size_t size, int flags) 2933 { 2934 struct unix_stream_read_state state = { 2935 .recv_actor = unix_stream_read_actor, 2936 .socket = sock, 2937 .msg = msg, 2938 .size = size, 2939 .flags = flags 2940 }; 2941 2942 #ifdef CONFIG_BPF_SYSCALL 2943 struct sock *sk = sock->sk; 2944 const struct proto *prot = READ_ONCE(sk->sk_prot); 2945 2946 if (prot != &unix_stream_proto) 2947 return prot->recvmsg(sk, msg, size, flags, NULL); 2948 #endif 2949 return unix_stream_read_generic(&state, true); 2950 } 2951 2952 static int unix_stream_splice_actor(struct sk_buff *skb, 2953 int skip, int chunk, 2954 struct unix_stream_read_state *state) 2955 { 2956 return skb_splice_bits(skb, state->socket->sk, 2957 UNIXCB(skb).consumed + skip, 2958 state->pipe, chunk, state->splice_flags); 2959 } 2960 2961 static ssize_t unix_stream_splice_read(struct socket *sock, loff_t *ppos, 2962 struct pipe_inode_info *pipe, 2963 size_t size, unsigned int flags) 2964 { 2965 struct unix_stream_read_state state = { 2966 .recv_actor = unix_stream_splice_actor, 2967 .socket = sock, 2968 .pipe = pipe, 2969 .size = size, 2970 .splice_flags = flags, 2971 }; 2972 2973 if (unlikely(*ppos)) 2974 return -ESPIPE; 2975 2976 if (sock->file->f_flags & O_NONBLOCK || 2977 flags & SPLICE_F_NONBLOCK) 2978 state.flags = MSG_DONTWAIT; 2979 2980 return unix_stream_read_generic(&state, false); 2981 } 2982 2983 static int unix_shutdown(struct socket *sock, int mode) 2984 { 2985 struct sock *sk = sock->sk; 2986 struct sock *other; 2987 2988 if (mode < SHUT_RD || mode > SHUT_RDWR) 2989 return -EINVAL; 2990 /* This maps: 2991 * SHUT_RD (0) -> RCV_SHUTDOWN (1) 2992 * SHUT_WR (1) -> SEND_SHUTDOWN (2) 2993 * SHUT_RDWR (2) -> SHUTDOWN_MASK (3) 2994 */ 2995 ++mode; 2996 2997 unix_state_lock(sk); 2998 WRITE_ONCE(sk->sk_shutdown, sk->sk_shutdown | mode); 2999 other = unix_peer(sk); 3000 if (other) 3001 sock_hold(other); 3002 unix_state_unlock(sk); 3003 sk->sk_state_change(sk); 3004 3005 if (other && 3006 (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) { 3007 3008 int peer_mode = 0; 3009 const struct proto *prot = READ_ONCE(other->sk_prot); 3010 3011 if (prot->unhash) 3012 prot->unhash(other); 3013 if (mode&RCV_SHUTDOWN) 3014 peer_mode |= SEND_SHUTDOWN; 3015 if (mode&SEND_SHUTDOWN) 3016 peer_mode |= RCV_SHUTDOWN; 3017 unix_state_lock(other); 3018 WRITE_ONCE(other->sk_shutdown, other->sk_shutdown | peer_mode); 3019 unix_state_unlock(other); 3020 other->sk_state_change(other); 3021 if (peer_mode == SHUTDOWN_MASK) 3022 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP); 3023 else if (peer_mode & RCV_SHUTDOWN) 3024 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN); 3025 } 3026 if (other) 3027 sock_put(other); 3028 3029 return 0; 3030 } 3031 3032 long unix_inq_len(struct sock *sk) 3033 { 3034 struct sk_buff *skb; 3035 long amount = 0; 3036 3037 if (sk->sk_state == TCP_LISTEN) 3038 return -EINVAL; 3039 3040 spin_lock(&sk->sk_receive_queue.lock); 3041 if (sk->sk_type == SOCK_STREAM || 3042 sk->sk_type == SOCK_SEQPACKET) { 3043 skb_queue_walk(&sk->sk_receive_queue, skb) 3044 amount += unix_skb_len(skb); 3045 } else { 3046 skb = skb_peek(&sk->sk_receive_queue); 3047 if (skb) 3048 amount = skb->len; 3049 } 3050 spin_unlock(&sk->sk_receive_queue.lock); 3051 3052 return amount; 3053 } 3054 EXPORT_SYMBOL_GPL(unix_inq_len); 3055 3056 long unix_outq_len(struct sock *sk) 3057 { 3058 return sk_wmem_alloc_get(sk); 3059 } 3060 EXPORT_SYMBOL_GPL(unix_outq_len); 3061 3062 static int unix_open_file(struct sock *sk) 3063 { 3064 struct path path; 3065 struct file *f; 3066 int fd; 3067 3068 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) 3069 return -EPERM; 3070 3071 if (!smp_load_acquire(&unix_sk(sk)->addr)) 3072 return -ENOENT; 3073 3074 path = unix_sk(sk)->path; 3075 if (!path.dentry) 3076 return -ENOENT; 3077 3078 path_get(&path); 3079 3080 fd = get_unused_fd_flags(O_CLOEXEC); 3081 if (fd < 0) 3082 goto out; 3083 3084 f = dentry_open(&path, O_PATH, current_cred()); 3085 if (IS_ERR(f)) { 3086 put_unused_fd(fd); 3087 fd = PTR_ERR(f); 3088 goto out; 3089 } 3090 3091 fd_install(fd, f); 3092 out: 3093 path_put(&path); 3094 3095 return fd; 3096 } 3097 3098 static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) 3099 { 3100 struct sock *sk = sock->sk; 3101 long amount = 0; 3102 int err; 3103 3104 switch (cmd) { 3105 case SIOCOUTQ: 3106 amount = unix_outq_len(sk); 3107 err = put_user(amount, (int __user *)arg); 3108 break; 3109 case SIOCINQ: 3110 amount = unix_inq_len(sk); 3111 if (amount < 0) 3112 err = amount; 3113 else 3114 err = put_user(amount, (int __user *)arg); 3115 break; 3116 case SIOCUNIXFILE: 3117 err = unix_open_file(sk); 3118 break; 3119 #if IS_ENABLED(CONFIG_AF_UNIX_OOB) 3120 case SIOCATMARK: 3121 { 3122 struct sk_buff *skb; 3123 int answ = 0; 3124 3125 skb = skb_peek(&sk->sk_receive_queue); 3126 if (skb && skb == READ_ONCE(unix_sk(sk)->oob_skb)) 3127 answ = 1; 3128 err = put_user(answ, (int __user *)arg); 3129 } 3130 break; 3131 #endif 3132 default: 3133 err = -ENOIOCTLCMD; 3134 break; 3135 } 3136 return err; 3137 } 3138 3139 #ifdef CONFIG_COMPAT 3140 static int unix_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) 3141 { 3142 return unix_ioctl(sock, cmd, (unsigned long)compat_ptr(arg)); 3143 } 3144 #endif 3145 3146 static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wait) 3147 { 3148 struct sock *sk = sock->sk; 3149 __poll_t mask; 3150 u8 shutdown; 3151 3152 sock_poll_wait(file, sock, wait); 3153 mask = 0; 3154 shutdown = READ_ONCE(sk->sk_shutdown); 3155 3156 /* exceptional events? */ 3157 if (READ_ONCE(sk->sk_err)) 3158 mask |= EPOLLERR; 3159 if (shutdown == SHUTDOWN_MASK) 3160 mask |= EPOLLHUP; 3161 if (shutdown & RCV_SHUTDOWN) 3162 mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM; 3163 3164 /* readable? */ 3165 if (!skb_queue_empty_lockless(&sk->sk_receive_queue)) 3166 mask |= EPOLLIN | EPOLLRDNORM; 3167 if (sk_is_readable(sk)) 3168 mask |= EPOLLIN | EPOLLRDNORM; 3169 #if IS_ENABLED(CONFIG_AF_UNIX_OOB) 3170 if (READ_ONCE(unix_sk(sk)->oob_skb)) 3171 mask |= EPOLLPRI; 3172 #endif 3173 3174 /* Connection-based need to check for termination and startup */ 3175 if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) && 3176 sk->sk_state == TCP_CLOSE) 3177 mask |= EPOLLHUP; 3178 3179 /* 3180 * we set writable also when the other side has shut down the 3181 * connection. This prevents stuck sockets. 3182 */ 3183 if (unix_writable(sk)) 3184 mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND; 3185 3186 return mask; 3187 } 3188 3189 static __poll_t unix_dgram_poll(struct file *file, struct socket *sock, 3190 poll_table *wait) 3191 { 3192 struct sock *sk = sock->sk, *other; 3193 unsigned int writable; 3194 __poll_t mask; 3195 u8 shutdown; 3196 3197 sock_poll_wait(file, sock, wait); 3198 mask = 0; 3199 shutdown = READ_ONCE(sk->sk_shutdown); 3200 3201 /* exceptional events? */ 3202 if (READ_ONCE(sk->sk_err) || 3203 !skb_queue_empty_lockless(&sk->sk_error_queue)) 3204 mask |= EPOLLERR | 3205 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0); 3206 3207 if (shutdown & RCV_SHUTDOWN) 3208 mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM; 3209 if (shutdown == SHUTDOWN_MASK) 3210 mask |= EPOLLHUP; 3211 3212 /* readable? */ 3213 if (!skb_queue_empty_lockless(&sk->sk_receive_queue)) 3214 mask |= EPOLLIN | EPOLLRDNORM; 3215 if (sk_is_readable(sk)) 3216 mask |= EPOLLIN | EPOLLRDNORM; 3217 3218 /* Connection-based need to check for termination and startup */ 3219 if (sk->sk_type == SOCK_SEQPACKET) { 3220 if (sk->sk_state == TCP_CLOSE) 3221 mask |= EPOLLHUP; 3222 /* connection hasn't started yet? */ 3223 if (sk->sk_state == TCP_SYN_SENT) 3224 return mask; 3225 } 3226 3227 /* No write status requested, avoid expensive OUT tests. */ 3228 if (!(poll_requested_events(wait) & (EPOLLWRBAND|EPOLLWRNORM|EPOLLOUT))) 3229 return mask; 3230 3231 writable = unix_writable(sk); 3232 if (writable) { 3233 unix_state_lock(sk); 3234 3235 other = unix_peer(sk); 3236 if (other && unix_peer(other) != sk && 3237 unix_recvq_full_lockless(other) && 3238 unix_dgram_peer_wake_me(sk, other)) 3239 writable = 0; 3240 3241 unix_state_unlock(sk); 3242 } 3243 3244 if (writable) 3245 mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND; 3246 else 3247 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); 3248 3249 return mask; 3250 } 3251 3252 #ifdef CONFIG_PROC_FS 3253 3254 #define BUCKET_SPACE (BITS_PER_LONG - (UNIX_HASH_BITS + 1) - 1) 3255 3256 #define get_bucket(x) ((x) >> BUCKET_SPACE) 3257 #define get_offset(x) ((x) & ((1UL << BUCKET_SPACE) - 1)) 3258 #define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o)) 3259 3260 static struct sock *unix_from_bucket(struct seq_file *seq, loff_t *pos) 3261 { 3262 unsigned long offset = get_offset(*pos); 3263 unsigned long bucket = get_bucket(*pos); 3264 unsigned long count = 0; 3265 struct sock *sk; 3266 3267 for (sk = sk_head(&seq_file_net(seq)->unx.table.buckets[bucket]); 3268 sk; sk = sk_next(sk)) { 3269 if (++count == offset) 3270 break; 3271 } 3272 3273 return sk; 3274 } 3275 3276 static struct sock *unix_get_first(struct seq_file *seq, loff_t *pos) 3277 { 3278 unsigned long bucket = get_bucket(*pos); 3279 struct net *net = seq_file_net(seq); 3280 struct sock *sk; 3281 3282 while (bucket < UNIX_HASH_SIZE) { 3283 spin_lock(&net->unx.table.locks[bucket]); 3284 3285 sk = unix_from_bucket(seq, pos); 3286 if (sk) 3287 return sk; 3288 3289 spin_unlock(&net->unx.table.locks[bucket]); 3290 3291 *pos = set_bucket_offset(++bucket, 1); 3292 } 3293 3294 return NULL; 3295 } 3296 3297 static struct sock *unix_get_next(struct seq_file *seq, struct sock *sk, 3298 loff_t *pos) 3299 { 3300 unsigned long bucket = get_bucket(*pos); 3301 3302 sk = sk_next(sk); 3303 if (sk) 3304 return sk; 3305 3306 3307 spin_unlock(&seq_file_net(seq)->unx.table.locks[bucket]); 3308 3309 *pos = set_bucket_offset(++bucket, 1); 3310 3311 return unix_get_first(seq, pos); 3312 } 3313 3314 static void *unix_seq_start(struct seq_file *seq, loff_t *pos) 3315 { 3316 if (!*pos) 3317 return SEQ_START_TOKEN; 3318 3319 return unix_get_first(seq, pos); 3320 } 3321 3322 static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos) 3323 { 3324 ++*pos; 3325 3326 if (v == SEQ_START_TOKEN) 3327 return unix_get_first(seq, pos); 3328 3329 return unix_get_next(seq, v, pos); 3330 } 3331 3332 static void unix_seq_stop(struct seq_file *seq, void *v) 3333 { 3334 struct sock *sk = v; 3335 3336 if (sk) 3337 spin_unlock(&seq_file_net(seq)->unx.table.locks[sk->sk_hash]); 3338 } 3339 3340 static int unix_seq_show(struct seq_file *seq, void *v) 3341 { 3342 3343 if (v == SEQ_START_TOKEN) 3344 seq_puts(seq, "Num RefCount Protocol Flags Type St " 3345 "Inode Path\n"); 3346 else { 3347 struct sock *s = v; 3348 struct unix_sock *u = unix_sk(s); 3349 unix_state_lock(s); 3350 3351 seq_printf(seq, "%pK: %08X %08X %08X %04X %02X %5lu", 3352 s, 3353 refcount_read(&s->sk_refcnt), 3354 0, 3355 s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0, 3356 s->sk_type, 3357 s->sk_socket ? 3358 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTED : SS_UNCONNECTED) : 3359 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING), 3360 sock_i_ino(s)); 3361 3362 if (u->addr) { // under a hash table lock here 3363 int i, len; 3364 seq_putc(seq, ' '); 3365 3366 i = 0; 3367 len = u->addr->len - 3368 offsetof(struct sockaddr_un, sun_path); 3369 if (u->addr->name->sun_path[0]) { 3370 len--; 3371 } else { 3372 seq_putc(seq, '@'); 3373 i++; 3374 } 3375 for ( ; i < len; i++) 3376 seq_putc(seq, u->addr->name->sun_path[i] ?: 3377 '@'); 3378 } 3379 unix_state_unlock(s); 3380 seq_putc(seq, '\n'); 3381 } 3382 3383 return 0; 3384 } 3385 3386 static const struct seq_operations unix_seq_ops = { 3387 .start = unix_seq_start, 3388 .next = unix_seq_next, 3389 .stop = unix_seq_stop, 3390 .show = unix_seq_show, 3391 }; 3392 3393 #ifdef CONFIG_BPF_SYSCALL 3394 struct bpf_unix_iter_state { 3395 struct seq_net_private p; 3396 unsigned int cur_sk; 3397 unsigned int end_sk; 3398 unsigned int max_sk; 3399 struct sock **batch; 3400 bool st_bucket_done; 3401 }; 3402 3403 struct bpf_iter__unix { 3404 __bpf_md_ptr(struct bpf_iter_meta *, meta); 3405 __bpf_md_ptr(struct unix_sock *, unix_sk); 3406 uid_t uid __aligned(8); 3407 }; 3408 3409 static int unix_prog_seq_show(struct bpf_prog *prog, struct bpf_iter_meta *meta, 3410 struct unix_sock *unix_sk, uid_t uid) 3411 { 3412 struct bpf_iter__unix ctx; 3413 3414 meta->seq_num--; /* skip SEQ_START_TOKEN */ 3415 ctx.meta = meta; 3416 ctx.unix_sk = unix_sk; 3417 ctx.uid = uid; 3418 return bpf_iter_run_prog(prog, &ctx); 3419 } 3420 3421 static int bpf_iter_unix_hold_batch(struct seq_file *seq, struct sock *start_sk) 3422 3423 { 3424 struct bpf_unix_iter_state *iter = seq->private; 3425 unsigned int expected = 1; 3426 struct sock *sk; 3427 3428 sock_hold(start_sk); 3429 iter->batch[iter->end_sk++] = start_sk; 3430 3431 for (sk = sk_next(start_sk); sk; sk = sk_next(sk)) { 3432 if (iter->end_sk < iter->max_sk) { 3433 sock_hold(sk); 3434 iter->batch[iter->end_sk++] = sk; 3435 } 3436 3437 expected++; 3438 } 3439 3440 spin_unlock(&seq_file_net(seq)->unx.table.locks[start_sk->sk_hash]); 3441 3442 return expected; 3443 } 3444 3445 static void bpf_iter_unix_put_batch(struct bpf_unix_iter_state *iter) 3446 { 3447 while (iter->cur_sk < iter->end_sk) 3448 sock_put(iter->batch[iter->cur_sk++]); 3449 } 3450 3451 static int bpf_iter_unix_realloc_batch(struct bpf_unix_iter_state *iter, 3452 unsigned int new_batch_sz) 3453 { 3454 struct sock **new_batch; 3455 3456 new_batch = kvmalloc(sizeof(*new_batch) * new_batch_sz, 3457 GFP_USER | __GFP_NOWARN); 3458 if (!new_batch) 3459 return -ENOMEM; 3460 3461 bpf_iter_unix_put_batch(iter); 3462 kvfree(iter->batch); 3463 iter->batch = new_batch; 3464 iter->max_sk = new_batch_sz; 3465 3466 return 0; 3467 } 3468 3469 static struct sock *bpf_iter_unix_batch(struct seq_file *seq, 3470 loff_t *pos) 3471 { 3472 struct bpf_unix_iter_state *iter = seq->private; 3473 unsigned int expected; 3474 bool resized = false; 3475 struct sock *sk; 3476 3477 if (iter->st_bucket_done) 3478 *pos = set_bucket_offset(get_bucket(*pos) + 1, 1); 3479 3480 again: 3481 /* Get a new batch */ 3482 iter->cur_sk = 0; 3483 iter->end_sk = 0; 3484 3485 sk = unix_get_first(seq, pos); 3486 if (!sk) 3487 return NULL; /* Done */ 3488 3489 expected = bpf_iter_unix_hold_batch(seq, sk); 3490 3491 if (iter->end_sk == expected) { 3492 iter->st_bucket_done = true; 3493 return sk; 3494 } 3495 3496 if (!resized && !bpf_iter_unix_realloc_batch(iter, expected * 3 / 2)) { 3497 resized = true; 3498 goto again; 3499 } 3500 3501 return sk; 3502 } 3503 3504 static void *bpf_iter_unix_seq_start(struct seq_file *seq, loff_t *pos) 3505 { 3506 if (!*pos) 3507 return SEQ_START_TOKEN; 3508 3509 /* bpf iter does not support lseek, so it always 3510 * continue from where it was stop()-ped. 3511 */ 3512 return bpf_iter_unix_batch(seq, pos); 3513 } 3514 3515 static void *bpf_iter_unix_seq_next(struct seq_file *seq, void *v, loff_t *pos) 3516 { 3517 struct bpf_unix_iter_state *iter = seq->private; 3518 struct sock *sk; 3519 3520 /* Whenever seq_next() is called, the iter->cur_sk is 3521 * done with seq_show(), so advance to the next sk in 3522 * the batch. 3523 */ 3524 if (iter->cur_sk < iter->end_sk) 3525 sock_put(iter->batch[iter->cur_sk++]); 3526 3527 ++*pos; 3528 3529 if (iter->cur_sk < iter->end_sk) 3530 sk = iter->batch[iter->cur_sk]; 3531 else 3532 sk = bpf_iter_unix_batch(seq, pos); 3533 3534 return sk; 3535 } 3536 3537 static int bpf_iter_unix_seq_show(struct seq_file *seq, void *v) 3538 { 3539 struct bpf_iter_meta meta; 3540 struct bpf_prog *prog; 3541 struct sock *sk = v; 3542 uid_t uid; 3543 bool slow; 3544 int ret; 3545 3546 if (v == SEQ_START_TOKEN) 3547 return 0; 3548 3549 slow = lock_sock_fast(sk); 3550 3551 if (unlikely(sk_unhashed(sk))) { 3552 ret = SEQ_SKIP; 3553 goto unlock; 3554 } 3555 3556 uid = from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)); 3557 meta.seq = seq; 3558 prog = bpf_iter_get_info(&meta, false); 3559 ret = unix_prog_seq_show(prog, &meta, v, uid); 3560 unlock: 3561 unlock_sock_fast(sk, slow); 3562 return ret; 3563 } 3564 3565 static void bpf_iter_unix_seq_stop(struct seq_file *seq, void *v) 3566 { 3567 struct bpf_unix_iter_state *iter = seq->private; 3568 struct bpf_iter_meta meta; 3569 struct bpf_prog *prog; 3570 3571 if (!v) { 3572 meta.seq = seq; 3573 prog = bpf_iter_get_info(&meta, true); 3574 if (prog) 3575 (void)unix_prog_seq_show(prog, &meta, v, 0); 3576 } 3577 3578 if (iter->cur_sk < iter->end_sk) 3579 bpf_iter_unix_put_batch(iter); 3580 } 3581 3582 static const struct seq_operations bpf_iter_unix_seq_ops = { 3583 .start = bpf_iter_unix_seq_start, 3584 .next = bpf_iter_unix_seq_next, 3585 .stop = bpf_iter_unix_seq_stop, 3586 .show = bpf_iter_unix_seq_show, 3587 }; 3588 #endif 3589 #endif 3590 3591 static const struct net_proto_family unix_family_ops = { 3592 .family = PF_UNIX, 3593 .create = unix_create, 3594 .owner = THIS_MODULE, 3595 }; 3596 3597 3598 static int __net_init unix_net_init(struct net *net) 3599 { 3600 int i; 3601 3602 net->unx.sysctl_max_dgram_qlen = 10; 3603 if (unix_sysctl_register(net)) 3604 goto out; 3605 3606 #ifdef CONFIG_PROC_FS 3607 if (!proc_create_net("unix", 0, net->proc_net, &unix_seq_ops, 3608 sizeof(struct seq_net_private))) 3609 goto err_sysctl; 3610 #endif 3611 3612 net->unx.table.locks = kvmalloc_array(UNIX_HASH_SIZE, 3613 sizeof(spinlock_t), GFP_KERNEL); 3614 if (!net->unx.table.locks) 3615 goto err_proc; 3616 3617 net->unx.table.buckets = kvmalloc_array(UNIX_HASH_SIZE, 3618 sizeof(struct hlist_head), 3619 GFP_KERNEL); 3620 if (!net->unx.table.buckets) 3621 goto free_locks; 3622 3623 for (i = 0; i < UNIX_HASH_SIZE; i++) { 3624 spin_lock_init(&net->unx.table.locks[i]); 3625 INIT_HLIST_HEAD(&net->unx.table.buckets[i]); 3626 } 3627 3628 return 0; 3629 3630 free_locks: 3631 kvfree(net->unx.table.locks); 3632 err_proc: 3633 #ifdef CONFIG_PROC_FS 3634 remove_proc_entry("unix", net->proc_net); 3635 err_sysctl: 3636 #endif 3637 unix_sysctl_unregister(net); 3638 out: 3639 return -ENOMEM; 3640 } 3641 3642 static void __net_exit unix_net_exit(struct net *net) 3643 { 3644 kvfree(net->unx.table.buckets); 3645 kvfree(net->unx.table.locks); 3646 unix_sysctl_unregister(net); 3647 remove_proc_entry("unix", net->proc_net); 3648 } 3649 3650 static struct pernet_operations unix_net_ops = { 3651 .init = unix_net_init, 3652 .exit = unix_net_exit, 3653 }; 3654 3655 #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS) 3656 DEFINE_BPF_ITER_FUNC(unix, struct bpf_iter_meta *meta, 3657 struct unix_sock *unix_sk, uid_t uid) 3658 3659 #define INIT_BATCH_SZ 16 3660 3661 static int bpf_iter_init_unix(void *priv_data, struct bpf_iter_aux_info *aux) 3662 { 3663 struct bpf_unix_iter_state *iter = priv_data; 3664 int err; 3665 3666 err = bpf_iter_init_seq_net(priv_data, aux); 3667 if (err) 3668 return err; 3669 3670 err = bpf_iter_unix_realloc_batch(iter, INIT_BATCH_SZ); 3671 if (err) { 3672 bpf_iter_fini_seq_net(priv_data); 3673 return err; 3674 } 3675 3676 return 0; 3677 } 3678 3679 static void bpf_iter_fini_unix(void *priv_data) 3680 { 3681 struct bpf_unix_iter_state *iter = priv_data; 3682 3683 bpf_iter_fini_seq_net(priv_data); 3684 kvfree(iter->batch); 3685 } 3686 3687 static const struct bpf_iter_seq_info unix_seq_info = { 3688 .seq_ops = &bpf_iter_unix_seq_ops, 3689 .init_seq_private = bpf_iter_init_unix, 3690 .fini_seq_private = bpf_iter_fini_unix, 3691 .seq_priv_size = sizeof(struct bpf_unix_iter_state), 3692 }; 3693 3694 static const struct bpf_func_proto * 3695 bpf_iter_unix_get_func_proto(enum bpf_func_id func_id, 3696 const struct bpf_prog *prog) 3697 { 3698 switch (func_id) { 3699 case BPF_FUNC_setsockopt: 3700 return &bpf_sk_setsockopt_proto; 3701 case BPF_FUNC_getsockopt: 3702 return &bpf_sk_getsockopt_proto; 3703 default: 3704 return NULL; 3705 } 3706 } 3707 3708 static struct bpf_iter_reg unix_reg_info = { 3709 .target = "unix", 3710 .ctx_arg_info_size = 1, 3711 .ctx_arg_info = { 3712 { offsetof(struct bpf_iter__unix, unix_sk), 3713 PTR_TO_BTF_ID_OR_NULL }, 3714 }, 3715 .get_func_proto = bpf_iter_unix_get_func_proto, 3716 .seq_info = &unix_seq_info, 3717 }; 3718 3719 static void __init bpf_iter_register(void) 3720 { 3721 unix_reg_info.ctx_arg_info[0].btf_id = btf_sock_ids[BTF_SOCK_TYPE_UNIX]; 3722 if (bpf_iter_reg_target(&unix_reg_info)) 3723 pr_warn("Warning: could not register bpf iterator unix\n"); 3724 } 3725 #endif 3726 3727 static int __init af_unix_init(void) 3728 { 3729 int i, rc = -1; 3730 3731 BUILD_BUG_ON(sizeof(struct unix_skb_parms) > sizeof_field(struct sk_buff, cb)); 3732 3733 for (i = 0; i < UNIX_HASH_SIZE / 2; i++) { 3734 spin_lock_init(&bsd_socket_locks[i]); 3735 INIT_HLIST_HEAD(&bsd_socket_buckets[i]); 3736 } 3737 3738 rc = proto_register(&unix_dgram_proto, 1); 3739 if (rc != 0) { 3740 pr_crit("%s: Cannot create unix_sock SLAB cache!\n", __func__); 3741 goto out; 3742 } 3743 3744 rc = proto_register(&unix_stream_proto, 1); 3745 if (rc != 0) { 3746 pr_crit("%s: Cannot create unix_sock SLAB cache!\n", __func__); 3747 proto_unregister(&unix_dgram_proto); 3748 goto out; 3749 } 3750 3751 sock_register(&unix_family_ops); 3752 register_pernet_subsys(&unix_net_ops); 3753 unix_bpf_build_proto(); 3754 3755 #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS) 3756 bpf_iter_register(); 3757 #endif 3758 3759 out: 3760 return rc; 3761 } 3762 3763 /* Later than subsys_initcall() because we depend on stuff initialised there */ 3764 fs_initcall(af_unix_init); 3765