1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Fast Userspace Mutexes (which I call "Futexes!"). 4 * (C) Rusty Russell, IBM 2002 5 * 6 * Generalized futexes, futex requeueing, misc fixes by Ingo Molnar 7 * (C) Copyright 2003 Red Hat Inc, All Rights Reserved 8 * 9 * Removed page pinning, fix privately mapped COW pages and other cleanups 10 * (C) Copyright 2003, 2004 Jamie Lokier 11 * 12 * Robust futex support started by Ingo Molnar 13 * (C) Copyright 2006 Red Hat Inc, All Rights Reserved 14 * Thanks to Thomas Gleixner for suggestions, analysis and fixes. 15 * 16 * PI-futex support started by Ingo Molnar and Thomas Gleixner 17 * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 18 * Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com> 19 * 20 * PRIVATE futexes by Eric Dumazet 21 * Copyright (C) 2007 Eric Dumazet <dada1@cosmosbay.com> 22 * 23 * Requeue-PI support by Darren Hart <dvhltc@us.ibm.com> 24 * Copyright (C) IBM Corporation, 2009 25 * Thanks to Thomas Gleixner for conceptual design and careful reviews. 26 * 27 * Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly 28 * enough at me, Linus for the original (flawed) idea, Matthew 29 * Kirkwood for proof-of-concept implementation. 30 * 31 * "The futexes are also cursed." 32 * "But they come in a choice of three flavours!" 33 */ 34 #include <linux/compat.h> 35 #include <linux/jhash.h> 36 #include <linux/pagemap.h> 37 #include <linux/debugfs.h> 38 #include <linux/plist.h> 39 #include <linux/gfp.h> 40 #include <linux/vmalloc.h> 41 #include <linux/memblock.h> 42 #include <linux/fault-inject.h> 43 #include <linux/slab.h> 44 #include <linux/prctl.h> 45 #include <linux/rcuref.h> 46 47 #include "futex.h" 48 #include "../locking/rtmutex_common.h" 49 50 /* 51 * The base of the bucket array and its size are always used together 52 * (after initialization only in futex_hash()), so ensure that they 53 * reside in the same cacheline. 54 */ 55 static struct { 56 unsigned long hashmask; 57 unsigned int hashshift; 58 struct futex_hash_bucket *queues[MAX_NUMNODES]; 59 } __futex_data __read_mostly __aligned(2*sizeof(long)); 60 61 #define futex_hashmask (__futex_data.hashmask) 62 #define futex_hashshift (__futex_data.hashshift) 63 #define futex_queues (__futex_data.queues) 64 65 struct futex_private_hash { 66 rcuref_t users; 67 unsigned int hash_mask; 68 struct rcu_head rcu; 69 void *mm; 70 bool custom; 71 bool immutable; 72 struct futex_hash_bucket queues[]; 73 }; 74 75 /* 76 * Fault injections for futexes. 77 */ 78 #ifdef CONFIG_FAIL_FUTEX 79 80 static struct { 81 struct fault_attr attr; 82 83 bool ignore_private; 84 } fail_futex = { 85 .attr = FAULT_ATTR_INITIALIZER, 86 .ignore_private = false, 87 }; 88 89 static int __init setup_fail_futex(char *str) 90 { 91 return setup_fault_attr(&fail_futex.attr, str); 92 } 93 __setup("fail_futex=", setup_fail_futex); 94 95 bool should_fail_futex(bool fshared) 96 { 97 if (fail_futex.ignore_private && !fshared) 98 return false; 99 100 return should_fail(&fail_futex.attr, 1); 101 } 102 103 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS 104 105 static int __init fail_futex_debugfs(void) 106 { 107 umode_t mode = S_IFREG | S_IRUSR | S_IWUSR; 108 struct dentry *dir; 109 110 dir = fault_create_debugfs_attr("fail_futex", NULL, 111 &fail_futex.attr); 112 if (IS_ERR(dir)) 113 return PTR_ERR(dir); 114 115 debugfs_create_bool("ignore-private", mode, dir, 116 &fail_futex.ignore_private); 117 return 0; 118 } 119 120 late_initcall(fail_futex_debugfs); 121 122 #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */ 123 124 #endif /* CONFIG_FAIL_FUTEX */ 125 126 static struct futex_hash_bucket * 127 __futex_hash(union futex_key *key, struct futex_private_hash *fph); 128 129 #ifdef CONFIG_FUTEX_PRIVATE_HASH 130 static inline bool futex_key_is_private(union futex_key *key) 131 { 132 /* 133 * Relies on get_futex_key() to set either bit for shared 134 * futexes -- see comment with union futex_key. 135 */ 136 return !(key->both.offset & (FUT_OFF_INODE | FUT_OFF_MMSHARED)); 137 } 138 139 bool futex_private_hash_get(struct futex_private_hash *fph) 140 { 141 if (fph->immutable) 142 return true; 143 return rcuref_get(&fph->users); 144 } 145 146 void futex_private_hash_put(struct futex_private_hash *fph) 147 { 148 /* Ignore return value, last put is verified via rcuref_is_dead() */ 149 if (fph->immutable) 150 return; 151 if (rcuref_put(&fph->users)) 152 wake_up_var(fph->mm); 153 } 154 155 /** 156 * futex_hash_get - Get an additional reference for the local hash. 157 * @hb: ptr to the private local hash. 158 * 159 * Obtain an additional reference for the already obtained hash bucket. The 160 * caller must already own an reference. 161 */ 162 void futex_hash_get(struct futex_hash_bucket *hb) 163 { 164 struct futex_private_hash *fph = hb->priv; 165 166 if (!fph) 167 return; 168 WARN_ON_ONCE(!futex_private_hash_get(fph)); 169 } 170 171 void futex_hash_put(struct futex_hash_bucket *hb) 172 { 173 struct futex_private_hash *fph = hb->priv; 174 175 if (!fph) 176 return; 177 futex_private_hash_put(fph); 178 } 179 180 static struct futex_hash_bucket * 181 __futex_hash_private(union futex_key *key, struct futex_private_hash *fph) 182 { 183 u32 hash; 184 185 if (!futex_key_is_private(key)) 186 return NULL; 187 188 if (!fph) 189 fph = rcu_dereference(key->private.mm->futex_phash); 190 if (!fph || !fph->hash_mask) 191 return NULL; 192 193 hash = jhash2((void *)&key->private.address, 194 sizeof(key->private.address) / 4, 195 key->both.offset); 196 return &fph->queues[hash & fph->hash_mask]; 197 } 198 199 static void futex_rehash_private(struct futex_private_hash *old, 200 struct futex_private_hash *new) 201 { 202 struct futex_hash_bucket *hb_old, *hb_new; 203 unsigned int slots = old->hash_mask + 1; 204 unsigned int i; 205 206 for (i = 0; i < slots; i++) { 207 struct futex_q *this, *tmp; 208 209 hb_old = &old->queues[i]; 210 211 spin_lock(&hb_old->lock); 212 plist_for_each_entry_safe(this, tmp, &hb_old->chain, list) { 213 214 plist_del(&this->list, &hb_old->chain); 215 futex_hb_waiters_dec(hb_old); 216 217 WARN_ON_ONCE(this->lock_ptr != &hb_old->lock); 218 219 hb_new = __futex_hash(&this->key, new); 220 futex_hb_waiters_inc(hb_new); 221 /* 222 * The new pointer isn't published yet but an already 223 * moved user can be unqueued due to timeout or signal. 224 */ 225 spin_lock_nested(&hb_new->lock, SINGLE_DEPTH_NESTING); 226 plist_add(&this->list, &hb_new->chain); 227 this->lock_ptr = &hb_new->lock; 228 spin_unlock(&hb_new->lock); 229 } 230 spin_unlock(&hb_old->lock); 231 } 232 } 233 234 static bool __futex_pivot_hash(struct mm_struct *mm, 235 struct futex_private_hash *new) 236 { 237 struct futex_private_hash *fph; 238 239 WARN_ON_ONCE(mm->futex_phash_new); 240 241 fph = rcu_dereference_protected(mm->futex_phash, 242 lockdep_is_held(&mm->futex_hash_lock)); 243 if (fph) { 244 if (!rcuref_is_dead(&fph->users)) { 245 mm->futex_phash_new = new; 246 return false; 247 } 248 249 futex_rehash_private(fph, new); 250 } 251 rcu_assign_pointer(mm->futex_phash, new); 252 kvfree_rcu(fph, rcu); 253 return true; 254 } 255 256 static void futex_pivot_hash(struct mm_struct *mm) 257 { 258 scoped_guard(mutex, &mm->futex_hash_lock) { 259 struct futex_private_hash *fph; 260 261 fph = mm->futex_phash_new; 262 if (fph) { 263 mm->futex_phash_new = NULL; 264 __futex_pivot_hash(mm, fph); 265 } 266 } 267 } 268 269 struct futex_private_hash *futex_private_hash(void) 270 { 271 struct mm_struct *mm = current->mm; 272 /* 273 * Ideally we don't loop. If there is a replacement in progress 274 * then a new private hash is already prepared and a reference can't be 275 * obtained once the last user dropped it's. 276 * In that case we block on mm_struct::futex_hash_lock and either have 277 * to perform the replacement or wait while someone else is doing the 278 * job. Eitherway, on the second iteration we acquire a reference on the 279 * new private hash or loop again because a new replacement has been 280 * requested. 281 */ 282 again: 283 scoped_guard(rcu) { 284 struct futex_private_hash *fph; 285 286 fph = rcu_dereference(mm->futex_phash); 287 if (!fph) 288 return NULL; 289 290 if (fph->immutable) 291 return fph; 292 if (rcuref_get(&fph->users)) 293 return fph; 294 } 295 futex_pivot_hash(mm); 296 goto again; 297 } 298 299 struct futex_hash_bucket *futex_hash(union futex_key *key) 300 { 301 struct futex_private_hash *fph; 302 struct futex_hash_bucket *hb; 303 304 again: 305 scoped_guard(rcu) { 306 hb = __futex_hash(key, NULL); 307 fph = hb->priv; 308 309 if (!fph || futex_private_hash_get(fph)) 310 return hb; 311 } 312 futex_pivot_hash(key->private.mm); 313 goto again; 314 } 315 316 #else /* !CONFIG_FUTEX_PRIVATE_HASH */ 317 318 static struct futex_hash_bucket * 319 __futex_hash_private(union futex_key *key, struct futex_private_hash *fph) 320 { 321 return NULL; 322 } 323 324 struct futex_hash_bucket *futex_hash(union futex_key *key) 325 { 326 return __futex_hash(key, NULL); 327 } 328 329 #endif /* CONFIG_FUTEX_PRIVATE_HASH */ 330 331 /** 332 * __futex_hash - Return the hash bucket 333 * @key: Pointer to the futex key for which the hash is calculated 334 * @fph: Pointer to private hash if known 335 * 336 * We hash on the keys returned from get_futex_key (see below) and return the 337 * corresponding hash bucket. 338 * If the FUTEX is PROCESS_PRIVATE then a per-process hash bucket (from the 339 * private hash) is returned if existing. Otherwise a hash bucket from the 340 * global hash is returned. 341 */ 342 static struct futex_hash_bucket * 343 __futex_hash(union futex_key *key, struct futex_private_hash *fph) 344 { 345 struct futex_hash_bucket *hb; 346 u32 hash; 347 int node; 348 349 hb = __futex_hash_private(key, fph); 350 if (hb) 351 return hb; 352 353 hash = jhash2((u32 *)key, 354 offsetof(typeof(*key), both.offset) / sizeof(u32), 355 key->both.offset); 356 node = key->both.node; 357 358 if (node == FUTEX_NO_NODE) { 359 /* 360 * In case of !FLAGS_NUMA, use some unused hash bits to pick a 361 * node -- this ensures regular futexes are interleaved across 362 * the nodes and avoids having to allocate multiple 363 * hash-tables. 364 * 365 * NOTE: this isn't perfectly uniform, but it is fast and 366 * handles sparse node masks. 367 */ 368 node = (hash >> futex_hashshift) % nr_node_ids; 369 if (!node_possible(node)) { 370 node = find_next_bit_wrap(node_possible_map.bits, 371 nr_node_ids, node); 372 } 373 } 374 375 return &futex_queues[node][hash & futex_hashmask]; 376 } 377 378 /** 379 * futex_setup_timer - set up the sleeping hrtimer. 380 * @time: ptr to the given timeout value 381 * @timeout: the hrtimer_sleeper structure to be set up 382 * @flags: futex flags 383 * @range_ns: optional range in ns 384 * 385 * Return: Initialized hrtimer_sleeper structure or NULL if no timeout 386 * value given 387 */ 388 struct hrtimer_sleeper * 389 futex_setup_timer(ktime_t *time, struct hrtimer_sleeper *timeout, 390 int flags, u64 range_ns) 391 { 392 if (!time) 393 return NULL; 394 395 hrtimer_setup_sleeper_on_stack(timeout, 396 (flags & FLAGS_CLOCKRT) ? CLOCK_REALTIME : CLOCK_MONOTONIC, 397 HRTIMER_MODE_ABS); 398 /* 399 * If range_ns is 0, calling hrtimer_set_expires_range_ns() is 400 * effectively the same as calling hrtimer_set_expires(). 401 */ 402 hrtimer_set_expires_range_ns(&timeout->timer, *time, range_ns); 403 404 return timeout; 405 } 406 407 /* 408 * Generate a machine wide unique identifier for this inode. 409 * 410 * This relies on u64 not wrapping in the life-time of the machine; which with 411 * 1ns resolution means almost 585 years. 412 * 413 * This further relies on the fact that a well formed program will not unmap 414 * the file while it has a (shared) futex waiting on it. This mapping will have 415 * a file reference which pins the mount and inode. 416 * 417 * If for some reason an inode gets evicted and read back in again, it will get 418 * a new sequence number and will _NOT_ match, even though it is the exact same 419 * file. 420 * 421 * It is important that futex_match() will never have a false-positive, esp. 422 * for PI futexes that can mess up the state. The above argues that false-negatives 423 * are only possible for malformed programs. 424 */ 425 static u64 get_inode_sequence_number(struct inode *inode) 426 { 427 static atomic64_t i_seq; 428 u64 old; 429 430 /* Does the inode already have a sequence number? */ 431 old = atomic64_read(&inode->i_sequence); 432 if (likely(old)) 433 return old; 434 435 for (;;) { 436 u64 new = atomic64_inc_return(&i_seq); 437 if (WARN_ON_ONCE(!new)) 438 continue; 439 440 old = 0; 441 if (!atomic64_try_cmpxchg_relaxed(&inode->i_sequence, &old, new)) 442 return old; 443 return new; 444 } 445 } 446 447 /** 448 * get_futex_key() - Get parameters which are the keys for a futex 449 * @uaddr: virtual address of the futex 450 * @flags: FLAGS_* 451 * @key: address where result is stored. 452 * @rw: mapping needs to be read/write (values: FUTEX_READ, 453 * FUTEX_WRITE) 454 * 455 * Return: a negative error code or 0 456 * 457 * The key words are stored in @key on success. 458 * 459 * For shared mappings (when @fshared), the key is: 460 * 461 * ( inode->i_sequence, page->index, offset_within_page ) 462 * 463 * [ also see get_inode_sequence_number() ] 464 * 465 * For private mappings (or when !@fshared), the key is: 466 * 467 * ( current->mm, address, 0 ) 468 * 469 * This allows (cross process, where applicable) identification of the futex 470 * without keeping the page pinned for the duration of the FUTEX_WAIT. 471 * 472 * lock_page() might sleep, the caller should not hold a spinlock. 473 */ 474 int get_futex_key(u32 __user *uaddr, unsigned int flags, union futex_key *key, 475 enum futex_access rw) 476 { 477 unsigned long address = (unsigned long)uaddr; 478 struct mm_struct *mm = current->mm; 479 struct page *page; 480 struct folio *folio; 481 struct address_space *mapping; 482 int node, err, size, ro = 0; 483 bool fshared; 484 485 fshared = flags & FLAGS_SHARED; 486 size = futex_size(flags); 487 if (flags & FLAGS_NUMA) 488 size *= 2; 489 490 /* 491 * The futex address must be "naturally" aligned. 492 */ 493 key->both.offset = address % PAGE_SIZE; 494 if (unlikely((address % size) != 0)) 495 return -EINVAL; 496 address -= key->both.offset; 497 498 if (unlikely(!access_ok(uaddr, size))) 499 return -EFAULT; 500 501 if (unlikely(should_fail_futex(fshared))) 502 return -EFAULT; 503 504 if (flags & FLAGS_NUMA) { 505 u32 __user *naddr = (void *)uaddr + size / 2; 506 507 if (futex_get_value(&node, naddr)) 508 return -EFAULT; 509 510 if (node == FUTEX_NO_NODE) { 511 node = numa_node_id(); 512 if (futex_put_value(node, naddr)) 513 return -EFAULT; 514 515 } else if (node >= MAX_NUMNODES || !node_possible(node)) { 516 return -EINVAL; 517 } 518 519 key->both.node = node; 520 521 } else { 522 key->both.node = FUTEX_NO_NODE; 523 } 524 525 /* 526 * PROCESS_PRIVATE futexes are fast. 527 * As the mm cannot disappear under us and the 'key' only needs 528 * virtual address, we dont even have to find the underlying vma. 529 * Note : We do have to check 'uaddr' is a valid user address, 530 * but access_ok() should be faster than find_vma() 531 */ 532 if (!fshared) { 533 /* 534 * On no-MMU, shared futexes are treated as private, therefore 535 * we must not include the current process in the key. Since 536 * there is only one address space, the address is a unique key 537 * on its own. 538 */ 539 if (IS_ENABLED(CONFIG_MMU)) 540 key->private.mm = mm; 541 else 542 key->private.mm = NULL; 543 544 key->private.address = address; 545 return 0; 546 } 547 548 again: 549 /* Ignore any VERIFY_READ mapping (futex common case) */ 550 if (unlikely(should_fail_futex(true))) 551 return -EFAULT; 552 553 err = get_user_pages_fast(address, 1, FOLL_WRITE, &page); 554 /* 555 * If write access is not required (eg. FUTEX_WAIT), try 556 * and get read-only access. 557 */ 558 if (err == -EFAULT && rw == FUTEX_READ) { 559 err = get_user_pages_fast(address, 1, 0, &page); 560 ro = 1; 561 } 562 if (err < 0) 563 return err; 564 else 565 err = 0; 566 567 /* 568 * The treatment of mapping from this point on is critical. The folio 569 * lock protects many things but in this context the folio lock 570 * stabilizes mapping, prevents inode freeing in the shared 571 * file-backed region case and guards against movement to swap cache. 572 * 573 * Strictly speaking the folio lock is not needed in all cases being 574 * considered here and folio lock forces unnecessarily serialization. 575 * From this point on, mapping will be re-verified if necessary and 576 * folio lock will be acquired only if it is unavoidable 577 * 578 * Mapping checks require the folio so it is looked up now. For 579 * anonymous pages, it does not matter if the folio is split 580 * in the future as the key is based on the address. For 581 * filesystem-backed pages, the precise page is required as the 582 * index of the page determines the key. 583 */ 584 folio = page_folio(page); 585 mapping = READ_ONCE(folio->mapping); 586 587 /* 588 * If folio->mapping is NULL, then it cannot be an anonymous 589 * page; but it might be the ZERO_PAGE or in the gate area or 590 * in a special mapping (all cases which we are happy to fail); 591 * or it may have been a good file page when get_user_pages_fast 592 * found it, but truncated or holepunched or subjected to 593 * invalidate_complete_page2 before we got the folio lock (also 594 * cases which we are happy to fail). And we hold a reference, 595 * so refcount care in invalidate_inode_page's remove_mapping 596 * prevents drop_caches from setting mapping to NULL beneath us. 597 * 598 * The case we do have to guard against is when memory pressure made 599 * shmem_writepage move it from filecache to swapcache beneath us: 600 * an unlikely race, but we do need to retry for folio->mapping. 601 */ 602 if (unlikely(!mapping)) { 603 int shmem_swizzled; 604 605 /* 606 * Folio lock is required to identify which special case above 607 * applies. If this is really a shmem page then the folio lock 608 * will prevent unexpected transitions. 609 */ 610 folio_lock(folio); 611 shmem_swizzled = folio_test_swapcache(folio) || folio->mapping; 612 folio_unlock(folio); 613 folio_put(folio); 614 615 if (shmem_swizzled) 616 goto again; 617 618 return -EFAULT; 619 } 620 621 /* 622 * Private mappings are handled in a simple way. 623 * 624 * If the futex key is stored in anonymous memory, then the associated 625 * object is the mm which is implicitly pinned by the calling process. 626 * 627 * NOTE: When userspace waits on a MAP_SHARED mapping, even if 628 * it's a read-only handle, it's expected that futexes attach to 629 * the object not the particular process. 630 */ 631 if (folio_test_anon(folio)) { 632 /* 633 * A RO anonymous page will never change and thus doesn't make 634 * sense for futex operations. 635 */ 636 if (unlikely(should_fail_futex(true)) || ro) { 637 err = -EFAULT; 638 goto out; 639 } 640 641 key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */ 642 key->private.mm = mm; 643 key->private.address = address; 644 645 } else { 646 struct inode *inode; 647 648 /* 649 * The associated futex object in this case is the inode and 650 * the folio->mapping must be traversed. Ordinarily this should 651 * be stabilised under folio lock but it's not strictly 652 * necessary in this case as we just want to pin the inode, not 653 * update i_pages or anything like that. 654 * 655 * The RCU read lock is taken as the inode is finally freed 656 * under RCU. If the mapping still matches expectations then the 657 * mapping->host can be safely accessed as being a valid inode. 658 */ 659 rcu_read_lock(); 660 661 if (READ_ONCE(folio->mapping) != mapping) { 662 rcu_read_unlock(); 663 folio_put(folio); 664 665 goto again; 666 } 667 668 inode = READ_ONCE(mapping->host); 669 if (!inode) { 670 rcu_read_unlock(); 671 folio_put(folio); 672 673 goto again; 674 } 675 676 key->both.offset |= FUT_OFF_INODE; /* inode-based key */ 677 key->shared.i_seq = get_inode_sequence_number(inode); 678 key->shared.pgoff = page_pgoff(folio, page); 679 rcu_read_unlock(); 680 } 681 682 out: 683 folio_put(folio); 684 return err; 685 } 686 687 /** 688 * fault_in_user_writeable() - Fault in user address and verify RW access 689 * @uaddr: pointer to faulting user space address 690 * 691 * Slow path to fixup the fault we just took in the atomic write 692 * access to @uaddr. 693 * 694 * We have no generic implementation of a non-destructive write to the 695 * user address. We know that we faulted in the atomic pagefault 696 * disabled section so we can as well avoid the #PF overhead by 697 * calling get_user_pages() right away. 698 */ 699 int fault_in_user_writeable(u32 __user *uaddr) 700 { 701 struct mm_struct *mm = current->mm; 702 int ret; 703 704 mmap_read_lock(mm); 705 ret = fixup_user_fault(mm, (unsigned long)uaddr, 706 FAULT_FLAG_WRITE, NULL); 707 mmap_read_unlock(mm); 708 709 return ret < 0 ? ret : 0; 710 } 711 712 /** 713 * futex_top_waiter() - Return the highest priority waiter on a futex 714 * @hb: the hash bucket the futex_q's reside in 715 * @key: the futex key (to distinguish it from other futex futex_q's) 716 * 717 * Must be called with the hb lock held. 718 */ 719 struct futex_q *futex_top_waiter(struct futex_hash_bucket *hb, union futex_key *key) 720 { 721 struct futex_q *this; 722 723 plist_for_each_entry(this, &hb->chain, list) { 724 if (futex_match(&this->key, key)) 725 return this; 726 } 727 return NULL; 728 } 729 730 /** 731 * wait_for_owner_exiting - Block until the owner has exited 732 * @ret: owner's current futex lock status 733 * @exiting: Pointer to the exiting task 734 * 735 * Caller must hold a refcount on @exiting. 736 */ 737 void wait_for_owner_exiting(int ret, struct task_struct *exiting) 738 { 739 if (ret != -EBUSY) { 740 WARN_ON_ONCE(exiting); 741 return; 742 } 743 744 if (WARN_ON_ONCE(ret == -EBUSY && !exiting)) 745 return; 746 747 mutex_lock(&exiting->futex_exit_mutex); 748 /* 749 * No point in doing state checking here. If the waiter got here 750 * while the task was in exec()->exec_futex_release() then it can 751 * have any FUTEX_STATE_* value when the waiter has acquired the 752 * mutex. OK, if running, EXITING or DEAD if it reached exit() 753 * already. Highly unlikely and not a problem. Just one more round 754 * through the futex maze. 755 */ 756 mutex_unlock(&exiting->futex_exit_mutex); 757 758 put_task_struct(exiting); 759 } 760 761 /** 762 * __futex_unqueue() - Remove the futex_q from its futex_hash_bucket 763 * @q: The futex_q to unqueue 764 * 765 * The q->lock_ptr must not be NULL and must be held by the caller. 766 */ 767 void __futex_unqueue(struct futex_q *q) 768 { 769 struct futex_hash_bucket *hb; 770 771 if (WARN_ON_SMP(!q->lock_ptr) || WARN_ON(plist_node_empty(&q->list))) 772 return; 773 lockdep_assert_held(q->lock_ptr); 774 775 hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock); 776 plist_del(&q->list, &hb->chain); 777 futex_hb_waiters_dec(hb); 778 } 779 780 /* The key must be already stored in q->key. */ 781 void futex_q_lock(struct futex_q *q, struct futex_hash_bucket *hb) 782 __acquires(&hb->lock) 783 { 784 /* 785 * Increment the counter before taking the lock so that 786 * a potential waker won't miss a to-be-slept task that is 787 * waiting for the spinlock. This is safe as all futex_q_lock() 788 * users end up calling futex_queue(). Similarly, for housekeeping, 789 * decrement the counter at futex_q_unlock() when some error has 790 * occurred and we don't end up adding the task to the list. 791 */ 792 futex_hb_waiters_inc(hb); /* implies smp_mb(); (A) */ 793 794 q->lock_ptr = &hb->lock; 795 796 spin_lock(&hb->lock); 797 } 798 799 void futex_q_unlock(struct futex_hash_bucket *hb) 800 __releases(&hb->lock) 801 { 802 futex_hb_waiters_dec(hb); 803 spin_unlock(&hb->lock); 804 } 805 806 void __futex_queue(struct futex_q *q, struct futex_hash_bucket *hb, 807 struct task_struct *task) 808 { 809 int prio; 810 811 /* 812 * The priority used to register this element is 813 * - either the real thread-priority for the real-time threads 814 * (i.e. threads with a priority lower than MAX_RT_PRIO) 815 * - or MAX_RT_PRIO for non-RT threads. 816 * Thus, all RT-threads are woken first in priority order, and 817 * the others are woken last, in FIFO order. 818 */ 819 prio = min(current->normal_prio, MAX_RT_PRIO); 820 821 plist_node_init(&q->list, prio); 822 plist_add(&q->list, &hb->chain); 823 q->task = task; 824 } 825 826 /** 827 * futex_unqueue() - Remove the futex_q from its futex_hash_bucket 828 * @q: The futex_q to unqueue 829 * 830 * The q->lock_ptr must not be held by the caller. A call to futex_unqueue() must 831 * be paired with exactly one earlier call to futex_queue(). 832 * 833 * Return: 834 * - 1 - if the futex_q was still queued (and we removed unqueued it); 835 * - 0 - if the futex_q was already removed by the waking thread 836 */ 837 int futex_unqueue(struct futex_q *q) 838 { 839 spinlock_t *lock_ptr; 840 int ret = 0; 841 842 /* RCU so lock_ptr is not going away during locking. */ 843 guard(rcu)(); 844 /* In the common case we don't take the spinlock, which is nice. */ 845 retry: 846 /* 847 * q->lock_ptr can change between this read and the following spin_lock. 848 * Use READ_ONCE to forbid the compiler from reloading q->lock_ptr and 849 * optimizing lock_ptr out of the logic below. 850 */ 851 lock_ptr = READ_ONCE(q->lock_ptr); 852 if (lock_ptr != NULL) { 853 spin_lock(lock_ptr); 854 /* 855 * q->lock_ptr can change between reading it and 856 * spin_lock(), causing us to take the wrong lock. This 857 * corrects the race condition. 858 * 859 * Reasoning goes like this: if we have the wrong lock, 860 * q->lock_ptr must have changed (maybe several times) 861 * between reading it and the spin_lock(). It can 862 * change again after the spin_lock() but only if it was 863 * already changed before the spin_lock(). It cannot, 864 * however, change back to the original value. Therefore 865 * we can detect whether we acquired the correct lock. 866 */ 867 if (unlikely(lock_ptr != q->lock_ptr)) { 868 spin_unlock(lock_ptr); 869 goto retry; 870 } 871 __futex_unqueue(q); 872 873 BUG_ON(q->pi_state); 874 875 spin_unlock(lock_ptr); 876 ret = 1; 877 } 878 879 return ret; 880 } 881 882 void futex_q_lockptr_lock(struct futex_q *q) 883 { 884 spinlock_t *lock_ptr; 885 886 /* 887 * See futex_unqueue() why lock_ptr can change. 888 */ 889 guard(rcu)(); 890 retry: 891 lock_ptr = READ_ONCE(q->lock_ptr); 892 spin_lock(lock_ptr); 893 894 if (unlikely(lock_ptr != q->lock_ptr)) { 895 spin_unlock(lock_ptr); 896 goto retry; 897 } 898 } 899 900 /* 901 * PI futexes can not be requeued and must remove themselves from the hash 902 * bucket. The hash bucket lock (i.e. lock_ptr) is held. 903 */ 904 void futex_unqueue_pi(struct futex_q *q) 905 { 906 /* 907 * If the lock was not acquired (due to timeout or signal) then the 908 * rt_waiter is removed before futex_q is. If this is observed by 909 * an unlocker after dropping the rtmutex wait lock and before 910 * acquiring the hash bucket lock, then the unlocker dequeues the 911 * futex_q from the hash bucket list to guarantee consistent state 912 * vs. userspace. Therefore the dequeue here must be conditional. 913 */ 914 if (!plist_node_empty(&q->list)) 915 __futex_unqueue(q); 916 917 BUG_ON(!q->pi_state); 918 put_pi_state(q->pi_state); 919 q->pi_state = NULL; 920 } 921 922 /* Constants for the pending_op argument of handle_futex_death */ 923 #define HANDLE_DEATH_PENDING true 924 #define HANDLE_DEATH_LIST false 925 926 /* 927 * Process a futex-list entry, check whether it's owned by the 928 * dying task, and do notification if so: 929 */ 930 static int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, 931 bool pi, bool pending_op) 932 { 933 u32 uval, nval, mval; 934 pid_t owner; 935 int err; 936 937 /* Futex address must be 32bit aligned */ 938 if ((((unsigned long)uaddr) % sizeof(*uaddr)) != 0) 939 return -1; 940 941 retry: 942 if (get_user(uval, uaddr)) 943 return -1; 944 945 /* 946 * Special case for regular (non PI) futexes. The unlock path in 947 * user space has two race scenarios: 948 * 949 * 1. The unlock path releases the user space futex value and 950 * before it can execute the futex() syscall to wake up 951 * waiters it is killed. 952 * 953 * 2. A woken up waiter is killed before it can acquire the 954 * futex in user space. 955 * 956 * In the second case, the wake up notification could be generated 957 * by the unlock path in user space after setting the futex value 958 * to zero or by the kernel after setting the OWNER_DIED bit below. 959 * 960 * In both cases the TID validation below prevents a wakeup of 961 * potential waiters which can cause these waiters to block 962 * forever. 963 * 964 * In both cases the following conditions are met: 965 * 966 * 1) task->robust_list->list_op_pending != NULL 967 * @pending_op == true 968 * 2) The owner part of user space futex value == 0 969 * 3) Regular futex: @pi == false 970 * 971 * If these conditions are met, it is safe to attempt waking up a 972 * potential waiter without touching the user space futex value and 973 * trying to set the OWNER_DIED bit. If the futex value is zero, 974 * the rest of the user space mutex state is consistent, so a woken 975 * waiter will just take over the uncontended futex. Setting the 976 * OWNER_DIED bit would create inconsistent state and malfunction 977 * of the user space owner died handling. Otherwise, the OWNER_DIED 978 * bit is already set, and the woken waiter is expected to deal with 979 * this. 980 */ 981 owner = uval & FUTEX_TID_MASK; 982 983 if (pending_op && !pi && !owner) { 984 futex_wake(uaddr, FLAGS_SIZE_32 | FLAGS_SHARED, 1, 985 FUTEX_BITSET_MATCH_ANY); 986 return 0; 987 } 988 989 if (owner != task_pid_vnr(curr)) 990 return 0; 991 992 /* 993 * Ok, this dying thread is truly holding a futex 994 * of interest. Set the OWNER_DIED bit atomically 995 * via cmpxchg, and if the value had FUTEX_WAITERS 996 * set, wake up a waiter (if any). (We have to do a 997 * futex_wake() even if OWNER_DIED is already set - 998 * to handle the rare but possible case of recursive 999 * thread-death.) The rest of the cleanup is done in 1000 * userspace. 1001 */ 1002 mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED; 1003 1004 /* 1005 * We are not holding a lock here, but we want to have 1006 * the pagefault_disable/enable() protection because 1007 * we want to handle the fault gracefully. If the 1008 * access fails we try to fault in the futex with R/W 1009 * verification via get_user_pages. get_user() above 1010 * does not guarantee R/W access. If that fails we 1011 * give up and leave the futex locked. 1012 */ 1013 if ((err = futex_cmpxchg_value_locked(&nval, uaddr, uval, mval))) { 1014 switch (err) { 1015 case -EFAULT: 1016 if (fault_in_user_writeable(uaddr)) 1017 return -1; 1018 goto retry; 1019 1020 case -EAGAIN: 1021 cond_resched(); 1022 goto retry; 1023 1024 default: 1025 WARN_ON_ONCE(1); 1026 return err; 1027 } 1028 } 1029 1030 if (nval != uval) 1031 goto retry; 1032 1033 /* 1034 * Wake robust non-PI futexes here. The wakeup of 1035 * PI futexes happens in exit_pi_state(): 1036 */ 1037 if (!pi && (uval & FUTEX_WAITERS)) { 1038 futex_wake(uaddr, FLAGS_SIZE_32 | FLAGS_SHARED, 1, 1039 FUTEX_BITSET_MATCH_ANY); 1040 } 1041 1042 return 0; 1043 } 1044 1045 /* 1046 * Fetch a robust-list pointer. Bit 0 signals PI futexes: 1047 */ 1048 static inline int fetch_robust_entry(struct robust_list __user **entry, 1049 struct robust_list __user * __user *head, 1050 unsigned int *pi) 1051 { 1052 unsigned long uentry; 1053 1054 if (get_user(uentry, (unsigned long __user *)head)) 1055 return -EFAULT; 1056 1057 *entry = (void __user *)(uentry & ~1UL); 1058 *pi = uentry & 1; 1059 1060 return 0; 1061 } 1062 1063 /* 1064 * Walk curr->robust_list (very carefully, it's a userspace list!) 1065 * and mark any locks found there dead, and notify any waiters. 1066 * 1067 * We silently return on any sign of list-walking problem. 1068 */ 1069 static void exit_robust_list(struct task_struct *curr) 1070 { 1071 struct robust_list_head __user *head = curr->robust_list; 1072 struct robust_list __user *entry, *next_entry, *pending; 1073 unsigned int limit = ROBUST_LIST_LIMIT, pi, pip; 1074 unsigned int next_pi; 1075 unsigned long futex_offset; 1076 int rc; 1077 1078 /* 1079 * Fetch the list head (which was registered earlier, via 1080 * sys_set_robust_list()): 1081 */ 1082 if (fetch_robust_entry(&entry, &head->list.next, &pi)) 1083 return; 1084 /* 1085 * Fetch the relative futex offset: 1086 */ 1087 if (get_user(futex_offset, &head->futex_offset)) 1088 return; 1089 /* 1090 * Fetch any possibly pending lock-add first, and handle it 1091 * if it exists: 1092 */ 1093 if (fetch_robust_entry(&pending, &head->list_op_pending, &pip)) 1094 return; 1095 1096 next_entry = NULL; /* avoid warning with gcc */ 1097 while (entry != &head->list) { 1098 /* 1099 * Fetch the next entry in the list before calling 1100 * handle_futex_death: 1101 */ 1102 rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi); 1103 /* 1104 * A pending lock might already be on the list, so 1105 * don't process it twice: 1106 */ 1107 if (entry != pending) { 1108 if (handle_futex_death((void __user *)entry + futex_offset, 1109 curr, pi, HANDLE_DEATH_LIST)) 1110 return; 1111 } 1112 if (rc) 1113 return; 1114 entry = next_entry; 1115 pi = next_pi; 1116 /* 1117 * Avoid excessively long or circular lists: 1118 */ 1119 if (!--limit) 1120 break; 1121 1122 cond_resched(); 1123 } 1124 1125 if (pending) { 1126 handle_futex_death((void __user *)pending + futex_offset, 1127 curr, pip, HANDLE_DEATH_PENDING); 1128 } 1129 } 1130 1131 #ifdef CONFIG_COMPAT 1132 static void __user *futex_uaddr(struct robust_list __user *entry, 1133 compat_long_t futex_offset) 1134 { 1135 compat_uptr_t base = ptr_to_compat(entry); 1136 void __user *uaddr = compat_ptr(base + futex_offset); 1137 1138 return uaddr; 1139 } 1140 1141 /* 1142 * Fetch a robust-list pointer. Bit 0 signals PI futexes: 1143 */ 1144 static inline int 1145 compat_fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry, 1146 compat_uptr_t __user *head, unsigned int *pi) 1147 { 1148 if (get_user(*uentry, head)) 1149 return -EFAULT; 1150 1151 *entry = compat_ptr((*uentry) & ~1); 1152 *pi = (unsigned int)(*uentry) & 1; 1153 1154 return 0; 1155 } 1156 1157 /* 1158 * Walk curr->robust_list (very carefully, it's a userspace list!) 1159 * and mark any locks found there dead, and notify any waiters. 1160 * 1161 * We silently return on any sign of list-walking problem. 1162 */ 1163 static void compat_exit_robust_list(struct task_struct *curr) 1164 { 1165 struct compat_robust_list_head __user *head = curr->compat_robust_list; 1166 struct robust_list __user *entry, *next_entry, *pending; 1167 unsigned int limit = ROBUST_LIST_LIMIT, pi, pip; 1168 unsigned int next_pi; 1169 compat_uptr_t uentry, next_uentry, upending; 1170 compat_long_t futex_offset; 1171 int rc; 1172 1173 /* 1174 * Fetch the list head (which was registered earlier, via 1175 * sys_set_robust_list()): 1176 */ 1177 if (compat_fetch_robust_entry(&uentry, &entry, &head->list.next, &pi)) 1178 return; 1179 /* 1180 * Fetch the relative futex offset: 1181 */ 1182 if (get_user(futex_offset, &head->futex_offset)) 1183 return; 1184 /* 1185 * Fetch any possibly pending lock-add first, and handle it 1186 * if it exists: 1187 */ 1188 if (compat_fetch_robust_entry(&upending, &pending, 1189 &head->list_op_pending, &pip)) 1190 return; 1191 1192 next_entry = NULL; /* avoid warning with gcc */ 1193 while (entry != (struct robust_list __user *) &head->list) { 1194 /* 1195 * Fetch the next entry in the list before calling 1196 * handle_futex_death: 1197 */ 1198 rc = compat_fetch_robust_entry(&next_uentry, &next_entry, 1199 (compat_uptr_t __user *)&entry->next, &next_pi); 1200 /* 1201 * A pending lock might already be on the list, so 1202 * dont process it twice: 1203 */ 1204 if (entry != pending) { 1205 void __user *uaddr = futex_uaddr(entry, futex_offset); 1206 1207 if (handle_futex_death(uaddr, curr, pi, 1208 HANDLE_DEATH_LIST)) 1209 return; 1210 } 1211 if (rc) 1212 return; 1213 uentry = next_uentry; 1214 entry = next_entry; 1215 pi = next_pi; 1216 /* 1217 * Avoid excessively long or circular lists: 1218 */ 1219 if (!--limit) 1220 break; 1221 1222 cond_resched(); 1223 } 1224 if (pending) { 1225 void __user *uaddr = futex_uaddr(pending, futex_offset); 1226 1227 handle_futex_death(uaddr, curr, pip, HANDLE_DEATH_PENDING); 1228 } 1229 } 1230 #endif 1231 1232 #ifdef CONFIG_FUTEX_PI 1233 1234 /* 1235 * This task is holding PI mutexes at exit time => bad. 1236 * Kernel cleans up PI-state, but userspace is likely hosed. 1237 * (Robust-futex cleanup is separate and might save the day for userspace.) 1238 */ 1239 static void exit_pi_state_list(struct task_struct *curr) 1240 { 1241 struct list_head *next, *head = &curr->pi_state_list; 1242 struct futex_pi_state *pi_state; 1243 union futex_key key = FUTEX_KEY_INIT; 1244 1245 /* 1246 * The mutex mm_struct::futex_hash_lock might be acquired. 1247 */ 1248 might_sleep(); 1249 /* 1250 * Ensure the hash remains stable (no resize) during the while loop 1251 * below. The hb pointer is acquired under the pi_lock so we can't block 1252 * on the mutex. 1253 */ 1254 WARN_ON(curr != current); 1255 guard(private_hash)(); 1256 /* 1257 * We are a ZOMBIE and nobody can enqueue itself on 1258 * pi_state_list anymore, but we have to be careful 1259 * versus waiters unqueueing themselves: 1260 */ 1261 raw_spin_lock_irq(&curr->pi_lock); 1262 while (!list_empty(head)) { 1263 next = head->next; 1264 pi_state = list_entry(next, struct futex_pi_state, list); 1265 key = pi_state->key; 1266 if (1) { 1267 CLASS(hb, hb)(&key); 1268 1269 /* 1270 * We can race against put_pi_state() removing itself from the 1271 * list (a waiter going away). put_pi_state() will first 1272 * decrement the reference count and then modify the list, so 1273 * its possible to see the list entry but fail this reference 1274 * acquire. 1275 * 1276 * In that case; drop the locks to let put_pi_state() make 1277 * progress and retry the loop. 1278 */ 1279 if (!refcount_inc_not_zero(&pi_state->refcount)) { 1280 raw_spin_unlock_irq(&curr->pi_lock); 1281 cpu_relax(); 1282 raw_spin_lock_irq(&curr->pi_lock); 1283 continue; 1284 } 1285 raw_spin_unlock_irq(&curr->pi_lock); 1286 1287 spin_lock(&hb->lock); 1288 raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); 1289 raw_spin_lock(&curr->pi_lock); 1290 /* 1291 * We dropped the pi-lock, so re-check whether this 1292 * task still owns the PI-state: 1293 */ 1294 if (head->next != next) { 1295 /* retain curr->pi_lock for the loop invariant */ 1296 raw_spin_unlock(&pi_state->pi_mutex.wait_lock); 1297 spin_unlock(&hb->lock); 1298 put_pi_state(pi_state); 1299 continue; 1300 } 1301 1302 WARN_ON(pi_state->owner != curr); 1303 WARN_ON(list_empty(&pi_state->list)); 1304 list_del_init(&pi_state->list); 1305 pi_state->owner = NULL; 1306 1307 raw_spin_unlock(&curr->pi_lock); 1308 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); 1309 spin_unlock(&hb->lock); 1310 } 1311 1312 rt_mutex_futex_unlock(&pi_state->pi_mutex); 1313 put_pi_state(pi_state); 1314 1315 raw_spin_lock_irq(&curr->pi_lock); 1316 } 1317 raw_spin_unlock_irq(&curr->pi_lock); 1318 } 1319 #else 1320 static inline void exit_pi_state_list(struct task_struct *curr) { } 1321 #endif 1322 1323 static void futex_cleanup(struct task_struct *tsk) 1324 { 1325 if (unlikely(tsk->robust_list)) { 1326 exit_robust_list(tsk); 1327 tsk->robust_list = NULL; 1328 } 1329 1330 #ifdef CONFIG_COMPAT 1331 if (unlikely(tsk->compat_robust_list)) { 1332 compat_exit_robust_list(tsk); 1333 tsk->compat_robust_list = NULL; 1334 } 1335 #endif 1336 1337 if (unlikely(!list_empty(&tsk->pi_state_list))) 1338 exit_pi_state_list(tsk); 1339 } 1340 1341 /** 1342 * futex_exit_recursive - Set the tasks futex state to FUTEX_STATE_DEAD 1343 * @tsk: task to set the state on 1344 * 1345 * Set the futex exit state of the task lockless. The futex waiter code 1346 * observes that state when a task is exiting and loops until the task has 1347 * actually finished the futex cleanup. The worst case for this is that the 1348 * waiter runs through the wait loop until the state becomes visible. 1349 * 1350 * This is called from the recursive fault handling path in make_task_dead(). 1351 * 1352 * This is best effort. Either the futex exit code has run already or 1353 * not. If the OWNER_DIED bit has been set on the futex then the waiter can 1354 * take it over. If not, the problem is pushed back to user space. If the 1355 * futex exit code did not run yet, then an already queued waiter might 1356 * block forever, but there is nothing which can be done about that. 1357 */ 1358 void futex_exit_recursive(struct task_struct *tsk) 1359 { 1360 /* If the state is FUTEX_STATE_EXITING then futex_exit_mutex is held */ 1361 if (tsk->futex_state == FUTEX_STATE_EXITING) 1362 mutex_unlock(&tsk->futex_exit_mutex); 1363 tsk->futex_state = FUTEX_STATE_DEAD; 1364 } 1365 1366 static void futex_cleanup_begin(struct task_struct *tsk) 1367 { 1368 /* 1369 * Prevent various race issues against a concurrent incoming waiter 1370 * including live locks by forcing the waiter to block on 1371 * tsk->futex_exit_mutex when it observes FUTEX_STATE_EXITING in 1372 * attach_to_pi_owner(). 1373 */ 1374 mutex_lock(&tsk->futex_exit_mutex); 1375 1376 /* 1377 * Switch the state to FUTEX_STATE_EXITING under tsk->pi_lock. 1378 * 1379 * This ensures that all subsequent checks of tsk->futex_state in 1380 * attach_to_pi_owner() must observe FUTEX_STATE_EXITING with 1381 * tsk->pi_lock held. 1382 * 1383 * It guarantees also that a pi_state which was queued right before 1384 * the state change under tsk->pi_lock by a concurrent waiter must 1385 * be observed in exit_pi_state_list(). 1386 */ 1387 raw_spin_lock_irq(&tsk->pi_lock); 1388 tsk->futex_state = FUTEX_STATE_EXITING; 1389 raw_spin_unlock_irq(&tsk->pi_lock); 1390 } 1391 1392 static void futex_cleanup_end(struct task_struct *tsk, int state) 1393 { 1394 /* 1395 * Lockless store. The only side effect is that an observer might 1396 * take another loop until it becomes visible. 1397 */ 1398 tsk->futex_state = state; 1399 /* 1400 * Drop the exit protection. This unblocks waiters which observed 1401 * FUTEX_STATE_EXITING to reevaluate the state. 1402 */ 1403 mutex_unlock(&tsk->futex_exit_mutex); 1404 } 1405 1406 void futex_exec_release(struct task_struct *tsk) 1407 { 1408 /* 1409 * The state handling is done for consistency, but in the case of 1410 * exec() there is no way to prevent further damage as the PID stays 1411 * the same. But for the unlikely and arguably buggy case that a 1412 * futex is held on exec(), this provides at least as much state 1413 * consistency protection which is possible. 1414 */ 1415 futex_cleanup_begin(tsk); 1416 futex_cleanup(tsk); 1417 /* 1418 * Reset the state to FUTEX_STATE_OK. The task is alive and about 1419 * exec a new binary. 1420 */ 1421 futex_cleanup_end(tsk, FUTEX_STATE_OK); 1422 } 1423 1424 void futex_exit_release(struct task_struct *tsk) 1425 { 1426 futex_cleanup_begin(tsk); 1427 futex_cleanup(tsk); 1428 futex_cleanup_end(tsk, FUTEX_STATE_DEAD); 1429 } 1430 1431 static void futex_hash_bucket_init(struct futex_hash_bucket *fhb, 1432 struct futex_private_hash *fph) 1433 { 1434 #ifdef CONFIG_FUTEX_PRIVATE_HASH 1435 fhb->priv = fph; 1436 #endif 1437 atomic_set(&fhb->waiters, 0); 1438 plist_head_init(&fhb->chain); 1439 spin_lock_init(&fhb->lock); 1440 } 1441 1442 #define FH_CUSTOM 0x01 1443 #define FH_IMMUTABLE 0x02 1444 1445 #ifdef CONFIG_FUTEX_PRIVATE_HASH 1446 void futex_hash_free(struct mm_struct *mm) 1447 { 1448 struct futex_private_hash *fph; 1449 1450 kvfree(mm->futex_phash_new); 1451 fph = rcu_dereference_raw(mm->futex_phash); 1452 if (fph) { 1453 WARN_ON_ONCE(rcuref_read(&fph->users) > 1); 1454 kvfree(fph); 1455 } 1456 } 1457 1458 static bool futex_pivot_pending(struct mm_struct *mm) 1459 { 1460 struct futex_private_hash *fph; 1461 1462 guard(rcu)(); 1463 1464 if (!mm->futex_phash_new) 1465 return true; 1466 1467 fph = rcu_dereference(mm->futex_phash); 1468 return rcuref_is_dead(&fph->users); 1469 } 1470 1471 static bool futex_hash_less(struct futex_private_hash *a, 1472 struct futex_private_hash *b) 1473 { 1474 /* user provided always wins */ 1475 if (!a->custom && b->custom) 1476 return true; 1477 if (a->custom && !b->custom) 1478 return false; 1479 1480 /* zero-sized hash wins */ 1481 if (!b->hash_mask) 1482 return true; 1483 if (!a->hash_mask) 1484 return false; 1485 1486 /* keep the biggest */ 1487 if (a->hash_mask < b->hash_mask) 1488 return true; 1489 if (a->hash_mask > b->hash_mask) 1490 return false; 1491 1492 return false; /* equal */ 1493 } 1494 1495 static int futex_hash_allocate(unsigned int hash_slots, unsigned int flags) 1496 { 1497 struct mm_struct *mm = current->mm; 1498 struct futex_private_hash *fph; 1499 bool custom = flags & FH_CUSTOM; 1500 int i; 1501 1502 if (hash_slots && (hash_slots == 1 || !is_power_of_2(hash_slots))) 1503 return -EINVAL; 1504 1505 /* 1506 * Once we've disabled the global hash there is no way back. 1507 */ 1508 scoped_guard(rcu) { 1509 fph = rcu_dereference(mm->futex_phash); 1510 if (fph && (!fph->hash_mask || fph->immutable)) { 1511 if (custom) 1512 return -EBUSY; 1513 return 0; 1514 } 1515 } 1516 1517 fph = kvzalloc(struct_size(fph, queues, hash_slots), GFP_KERNEL_ACCOUNT | __GFP_NOWARN); 1518 if (!fph) 1519 return -ENOMEM; 1520 1521 rcuref_init(&fph->users, 1); 1522 fph->hash_mask = hash_slots ? hash_slots - 1 : 0; 1523 fph->custom = custom; 1524 fph->immutable = !!(flags & FH_IMMUTABLE); 1525 fph->mm = mm; 1526 1527 for (i = 0; i < hash_slots; i++) 1528 futex_hash_bucket_init(&fph->queues[i], fph); 1529 1530 if (custom) { 1531 /* 1532 * Only let prctl() wait / retry; don't unduly delay clone(). 1533 */ 1534 again: 1535 wait_var_event(mm, futex_pivot_pending(mm)); 1536 } 1537 1538 scoped_guard(mutex, &mm->futex_hash_lock) { 1539 struct futex_private_hash *free __free(kvfree) = NULL; 1540 struct futex_private_hash *cur, *new; 1541 1542 cur = rcu_dereference_protected(mm->futex_phash, 1543 lockdep_is_held(&mm->futex_hash_lock)); 1544 new = mm->futex_phash_new; 1545 mm->futex_phash_new = NULL; 1546 1547 if (fph) { 1548 if (cur && !new) { 1549 /* 1550 * If we have an existing hash, but do not yet have 1551 * allocated a replacement hash, drop the initial 1552 * reference on the existing hash. 1553 */ 1554 futex_private_hash_put(cur); 1555 } 1556 1557 if (new) { 1558 /* 1559 * Two updates raced; throw out the lesser one. 1560 */ 1561 if (futex_hash_less(new, fph)) { 1562 free = new; 1563 new = fph; 1564 } else { 1565 free = fph; 1566 } 1567 } else { 1568 new = fph; 1569 } 1570 fph = NULL; 1571 } 1572 1573 if (new) { 1574 /* 1575 * Will set mm->futex_phash_new on failure; 1576 * futex_private_hash_get() will try again. 1577 */ 1578 if (!__futex_pivot_hash(mm, new) && custom) 1579 goto again; 1580 } 1581 } 1582 return 0; 1583 } 1584 1585 int futex_hash_allocate_default(void) 1586 { 1587 unsigned int threads, buckets, current_buckets = 0; 1588 struct futex_private_hash *fph; 1589 1590 if (!current->mm) 1591 return 0; 1592 1593 scoped_guard(rcu) { 1594 threads = min_t(unsigned int, 1595 get_nr_threads(current), 1596 num_online_cpus()); 1597 1598 fph = rcu_dereference(current->mm->futex_phash); 1599 if (fph) { 1600 if (fph->custom) 1601 return 0; 1602 1603 current_buckets = fph->hash_mask + 1; 1604 } 1605 } 1606 1607 /* 1608 * The default allocation will remain within 1609 * 16 <= threads * 4 <= global hash size 1610 */ 1611 buckets = roundup_pow_of_two(4 * threads); 1612 buckets = clamp(buckets, 16, futex_hashmask + 1); 1613 1614 if (current_buckets >= buckets) 1615 return 0; 1616 1617 return futex_hash_allocate(buckets, 0); 1618 } 1619 1620 static int futex_hash_get_slots(void) 1621 { 1622 struct futex_private_hash *fph; 1623 1624 guard(rcu)(); 1625 fph = rcu_dereference(current->mm->futex_phash); 1626 if (fph && fph->hash_mask) 1627 return fph->hash_mask + 1; 1628 return 0; 1629 } 1630 1631 static int futex_hash_get_immutable(void) 1632 { 1633 struct futex_private_hash *fph; 1634 1635 guard(rcu)(); 1636 fph = rcu_dereference(current->mm->futex_phash); 1637 if (fph && fph->immutable) 1638 return 1; 1639 if (fph && !fph->hash_mask) 1640 return 1; 1641 return 0; 1642 } 1643 1644 #else 1645 1646 static int futex_hash_allocate(unsigned int hash_slots, unsigned int flags) 1647 { 1648 return -EINVAL; 1649 } 1650 1651 static int futex_hash_get_slots(void) 1652 { 1653 return 0; 1654 } 1655 1656 static int futex_hash_get_immutable(void) 1657 { 1658 return 0; 1659 } 1660 #endif 1661 1662 int futex_hash_prctl(unsigned long arg2, unsigned long arg3, unsigned long arg4) 1663 { 1664 unsigned int flags = FH_CUSTOM; 1665 int ret; 1666 1667 switch (arg2) { 1668 case PR_FUTEX_HASH_SET_SLOTS: 1669 if (arg4 & ~FH_FLAG_IMMUTABLE) 1670 return -EINVAL; 1671 if (arg4 & FH_FLAG_IMMUTABLE) 1672 flags |= FH_IMMUTABLE; 1673 ret = futex_hash_allocate(arg3, flags); 1674 break; 1675 1676 case PR_FUTEX_HASH_GET_SLOTS: 1677 ret = futex_hash_get_slots(); 1678 break; 1679 1680 case PR_FUTEX_HASH_GET_IMMUTABLE: 1681 ret = futex_hash_get_immutable(); 1682 break; 1683 1684 default: 1685 ret = -EINVAL; 1686 break; 1687 } 1688 return ret; 1689 } 1690 1691 static int __init futex_init(void) 1692 { 1693 unsigned long hashsize, i; 1694 unsigned int order, n; 1695 unsigned long size; 1696 1697 #ifdef CONFIG_BASE_SMALL 1698 hashsize = 16; 1699 #else 1700 hashsize = 256 * num_possible_cpus(); 1701 hashsize /= num_possible_nodes(); 1702 hashsize = max(4, hashsize); 1703 hashsize = roundup_pow_of_two(hashsize); 1704 #endif 1705 futex_hashshift = ilog2(hashsize); 1706 size = sizeof(struct futex_hash_bucket) * hashsize; 1707 order = get_order(size); 1708 1709 for_each_node(n) { 1710 struct futex_hash_bucket *table; 1711 1712 if (order > MAX_PAGE_ORDER) 1713 table = vmalloc_huge_node(size, GFP_KERNEL, n); 1714 else 1715 table = alloc_pages_exact_nid(n, size, GFP_KERNEL); 1716 1717 BUG_ON(!table); 1718 1719 for (i = 0; i < hashsize; i++) 1720 futex_hash_bucket_init(&table[i], NULL); 1721 1722 futex_queues[n] = table; 1723 } 1724 1725 futex_hashmask = hashsize - 1; 1726 pr_info("futex hash table entries: %lu (%lu bytes on %d NUMA nodes, total %lu KiB, %s).\n", 1727 hashsize, size, num_possible_nodes(), size * num_possible_nodes() / 1024, 1728 order > MAX_PAGE_ORDER ? "vmalloc" : "linear"); 1729 return 0; 1730 } 1731 core_initcall(futex_init); 1732