1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Fast Userspace Mutexes (which I call "Futexes!"). 4 * (C) Rusty Russell, IBM 2002 5 * 6 * Generalized futexes, futex requeueing, misc fixes by Ingo Molnar 7 * (C) Copyright 2003 Red Hat Inc, All Rights Reserved 8 * 9 * Removed page pinning, fix privately mapped COW pages and other cleanups 10 * (C) Copyright 2003, 2004 Jamie Lokier 11 * 12 * Robust futex support started by Ingo Molnar 13 * (C) Copyright 2006 Red Hat Inc, All Rights Reserved 14 * Thanks to Thomas Gleixner for suggestions, analysis and fixes. 15 * 16 * PI-futex support started by Ingo Molnar and Thomas Gleixner 17 * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 18 * Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com> 19 * 20 * PRIVATE futexes by Eric Dumazet 21 * Copyright (C) 2007 Eric Dumazet <dada1@cosmosbay.com> 22 * 23 * Requeue-PI support by Darren Hart <dvhltc@us.ibm.com> 24 * Copyright (C) IBM Corporation, 2009 25 * Thanks to Thomas Gleixner for conceptual design and careful reviews. 26 * 27 * Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly 28 * enough at me, Linus for the original (flawed) idea, Matthew 29 * Kirkwood for proof-of-concept implementation. 30 * 31 * "The futexes are also cursed." 32 * "But they come in a choice of three flavours!" 33 */ 34 #include <linux/compat.h> 35 #include <linux/jhash.h> 36 #include <linux/pagemap.h> 37 #include <linux/debugfs.h> 38 #include <linux/plist.h> 39 #include <linux/memblock.h> 40 #include <linux/fault-inject.h> 41 #include <linux/slab.h> 42 43 #include "futex.h" 44 #include "../locking/rtmutex_common.h" 45 46 /* 47 * The base of the bucket array and its size are always used together 48 * (after initialization only in futex_hash()), so ensure that they 49 * reside in the same cacheline. 50 */ 51 static struct { 52 struct futex_hash_bucket *queues; 53 unsigned long hashsize; 54 } __futex_data __read_mostly __aligned(2*sizeof(long)); 55 #define futex_queues (__futex_data.queues) 56 #define futex_hashsize (__futex_data.hashsize) 57 58 59 /* 60 * Fault injections for futexes. 61 */ 62 #ifdef CONFIG_FAIL_FUTEX 63 64 static struct { 65 struct fault_attr attr; 66 67 bool ignore_private; 68 } fail_futex = { 69 .attr = FAULT_ATTR_INITIALIZER, 70 .ignore_private = false, 71 }; 72 73 static int __init setup_fail_futex(char *str) 74 { 75 return setup_fault_attr(&fail_futex.attr, str); 76 } 77 __setup("fail_futex=", setup_fail_futex); 78 79 bool should_fail_futex(bool fshared) 80 { 81 if (fail_futex.ignore_private && !fshared) 82 return false; 83 84 return should_fail(&fail_futex.attr, 1); 85 } 86 87 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS 88 89 static int __init fail_futex_debugfs(void) 90 { 91 umode_t mode = S_IFREG | S_IRUSR | S_IWUSR; 92 struct dentry *dir; 93 94 dir = fault_create_debugfs_attr("fail_futex", NULL, 95 &fail_futex.attr); 96 if (IS_ERR(dir)) 97 return PTR_ERR(dir); 98 99 debugfs_create_bool("ignore-private", mode, dir, 100 &fail_futex.ignore_private); 101 return 0; 102 } 103 104 late_initcall(fail_futex_debugfs); 105 106 #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */ 107 108 #endif /* CONFIG_FAIL_FUTEX */ 109 110 /** 111 * futex_hash - Return the hash bucket in the global hash 112 * @key: Pointer to the futex key for which the hash is calculated 113 * 114 * We hash on the keys returned from get_futex_key (see below) and return the 115 * corresponding hash bucket in the global hash. 116 */ 117 struct futex_hash_bucket *futex_hash(union futex_key *key) 118 { 119 u32 hash = jhash2((u32 *)key, offsetof(typeof(*key), both.offset) / 4, 120 key->both.offset); 121 122 return &futex_queues[hash & (futex_hashsize - 1)]; 123 } 124 125 126 /** 127 * futex_setup_timer - set up the sleeping hrtimer. 128 * @time: ptr to the given timeout value 129 * @timeout: the hrtimer_sleeper structure to be set up 130 * @flags: futex flags 131 * @range_ns: optional range in ns 132 * 133 * Return: Initialized hrtimer_sleeper structure or NULL if no timeout 134 * value given 135 */ 136 struct hrtimer_sleeper * 137 futex_setup_timer(ktime_t *time, struct hrtimer_sleeper *timeout, 138 int flags, u64 range_ns) 139 { 140 if (!time) 141 return NULL; 142 143 hrtimer_init_sleeper_on_stack(timeout, (flags & FLAGS_CLOCKRT) ? 144 CLOCK_REALTIME : CLOCK_MONOTONIC, 145 HRTIMER_MODE_ABS); 146 /* 147 * If range_ns is 0, calling hrtimer_set_expires_range_ns() is 148 * effectively the same as calling hrtimer_set_expires(). 149 */ 150 hrtimer_set_expires_range_ns(&timeout->timer, *time, range_ns); 151 152 return timeout; 153 } 154 155 /* 156 * Generate a machine wide unique identifier for this inode. 157 * 158 * This relies on u64 not wrapping in the life-time of the machine; which with 159 * 1ns resolution means almost 585 years. 160 * 161 * This further relies on the fact that a well formed program will not unmap 162 * the file while it has a (shared) futex waiting on it. This mapping will have 163 * a file reference which pins the mount and inode. 164 * 165 * If for some reason an inode gets evicted and read back in again, it will get 166 * a new sequence number and will _NOT_ match, even though it is the exact same 167 * file. 168 * 169 * It is important that futex_match() will never have a false-positive, esp. 170 * for PI futexes that can mess up the state. The above argues that false-negatives 171 * are only possible for malformed programs. 172 */ 173 static u64 get_inode_sequence_number(struct inode *inode) 174 { 175 static atomic64_t i_seq; 176 u64 old; 177 178 /* Does the inode already have a sequence number? */ 179 old = atomic64_read(&inode->i_sequence); 180 if (likely(old)) 181 return old; 182 183 for (;;) { 184 u64 new = atomic64_add_return(1, &i_seq); 185 if (WARN_ON_ONCE(!new)) 186 continue; 187 188 old = atomic64_cmpxchg_relaxed(&inode->i_sequence, 0, new); 189 if (old) 190 return old; 191 return new; 192 } 193 } 194 195 /** 196 * get_futex_key() - Get parameters which are the keys for a futex 197 * @uaddr: virtual address of the futex 198 * @flags: FLAGS_* 199 * @key: address where result is stored. 200 * @rw: mapping needs to be read/write (values: FUTEX_READ, 201 * FUTEX_WRITE) 202 * 203 * Return: a negative error code or 0 204 * 205 * The key words are stored in @key on success. 206 * 207 * For shared mappings (when @fshared), the key is: 208 * 209 * ( inode->i_sequence, page->index, offset_within_page ) 210 * 211 * [ also see get_inode_sequence_number() ] 212 * 213 * For private mappings (or when !@fshared), the key is: 214 * 215 * ( current->mm, address, 0 ) 216 * 217 * This allows (cross process, where applicable) identification of the futex 218 * without keeping the page pinned for the duration of the FUTEX_WAIT. 219 * 220 * lock_page() might sleep, the caller should not hold a spinlock. 221 */ 222 int get_futex_key(u32 __user *uaddr, unsigned int flags, union futex_key *key, 223 enum futex_access rw) 224 { 225 unsigned long address = (unsigned long)uaddr; 226 struct mm_struct *mm = current->mm; 227 struct page *page; 228 struct folio *folio; 229 struct address_space *mapping; 230 int err, ro = 0; 231 bool fshared; 232 233 fshared = flags & FLAGS_SHARED; 234 235 /* 236 * The futex address must be "naturally" aligned. 237 */ 238 key->both.offset = address % PAGE_SIZE; 239 if (unlikely((address % sizeof(u32)) != 0)) 240 return -EINVAL; 241 address -= key->both.offset; 242 243 if (unlikely(!access_ok(uaddr, sizeof(u32)))) 244 return -EFAULT; 245 246 if (unlikely(should_fail_futex(fshared))) 247 return -EFAULT; 248 249 /* 250 * PROCESS_PRIVATE futexes are fast. 251 * As the mm cannot disappear under us and the 'key' only needs 252 * virtual address, we dont even have to find the underlying vma. 253 * Note : We do have to check 'uaddr' is a valid user address, 254 * but access_ok() should be faster than find_vma() 255 */ 256 if (!fshared) { 257 /* 258 * On no-MMU, shared futexes are treated as private, therefore 259 * we must not include the current process in the key. Since 260 * there is only one address space, the address is a unique key 261 * on its own. 262 */ 263 if (IS_ENABLED(CONFIG_MMU)) 264 key->private.mm = mm; 265 else 266 key->private.mm = NULL; 267 268 key->private.address = address; 269 return 0; 270 } 271 272 again: 273 /* Ignore any VERIFY_READ mapping (futex common case) */ 274 if (unlikely(should_fail_futex(true))) 275 return -EFAULT; 276 277 err = get_user_pages_fast(address, 1, FOLL_WRITE, &page); 278 /* 279 * If write access is not required (eg. FUTEX_WAIT), try 280 * and get read-only access. 281 */ 282 if (err == -EFAULT && rw == FUTEX_READ) { 283 err = get_user_pages_fast(address, 1, 0, &page); 284 ro = 1; 285 } 286 if (err < 0) 287 return err; 288 else 289 err = 0; 290 291 /* 292 * The treatment of mapping from this point on is critical. The folio 293 * lock protects many things but in this context the folio lock 294 * stabilizes mapping, prevents inode freeing in the shared 295 * file-backed region case and guards against movement to swap cache. 296 * 297 * Strictly speaking the folio lock is not needed in all cases being 298 * considered here and folio lock forces unnecessarily serialization. 299 * From this point on, mapping will be re-verified if necessary and 300 * folio lock will be acquired only if it is unavoidable 301 * 302 * Mapping checks require the folio so it is looked up now. For 303 * anonymous pages, it does not matter if the folio is split 304 * in the future as the key is based on the address. For 305 * filesystem-backed pages, the precise page is required as the 306 * index of the page determines the key. 307 */ 308 folio = page_folio(page); 309 mapping = READ_ONCE(folio->mapping); 310 311 /* 312 * If folio->mapping is NULL, then it cannot be an anonymous 313 * page; but it might be the ZERO_PAGE or in the gate area or 314 * in a special mapping (all cases which we are happy to fail); 315 * or it may have been a good file page when get_user_pages_fast 316 * found it, but truncated or holepunched or subjected to 317 * invalidate_complete_page2 before we got the folio lock (also 318 * cases which we are happy to fail). And we hold a reference, 319 * so refcount care in invalidate_inode_page's remove_mapping 320 * prevents drop_caches from setting mapping to NULL beneath us. 321 * 322 * The case we do have to guard against is when memory pressure made 323 * shmem_writepage move it from filecache to swapcache beneath us: 324 * an unlikely race, but we do need to retry for folio->mapping. 325 */ 326 if (unlikely(!mapping)) { 327 int shmem_swizzled; 328 329 /* 330 * Folio lock is required to identify which special case above 331 * applies. If this is really a shmem page then the folio lock 332 * will prevent unexpected transitions. 333 */ 334 folio_lock(folio); 335 shmem_swizzled = folio_test_swapcache(folio) || folio->mapping; 336 folio_unlock(folio); 337 folio_put(folio); 338 339 if (shmem_swizzled) 340 goto again; 341 342 return -EFAULT; 343 } 344 345 /* 346 * Private mappings are handled in a simple way. 347 * 348 * If the futex key is stored in anonymous memory, then the associated 349 * object is the mm which is implicitly pinned by the calling process. 350 * 351 * NOTE: When userspace waits on a MAP_SHARED mapping, even if 352 * it's a read-only handle, it's expected that futexes attach to 353 * the object not the particular process. 354 */ 355 if (folio_test_anon(folio)) { 356 /* 357 * A RO anonymous page will never change and thus doesn't make 358 * sense for futex operations. 359 */ 360 if (unlikely(should_fail_futex(true)) || ro) { 361 err = -EFAULT; 362 goto out; 363 } 364 365 key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */ 366 key->private.mm = mm; 367 key->private.address = address; 368 369 } else { 370 struct inode *inode; 371 372 /* 373 * The associated futex object in this case is the inode and 374 * the folio->mapping must be traversed. Ordinarily this should 375 * be stabilised under folio lock but it's not strictly 376 * necessary in this case as we just want to pin the inode, not 377 * update i_pages or anything like that. 378 * 379 * The RCU read lock is taken as the inode is finally freed 380 * under RCU. If the mapping still matches expectations then the 381 * mapping->host can be safely accessed as being a valid inode. 382 */ 383 rcu_read_lock(); 384 385 if (READ_ONCE(folio->mapping) != mapping) { 386 rcu_read_unlock(); 387 folio_put(folio); 388 389 goto again; 390 } 391 392 inode = READ_ONCE(mapping->host); 393 if (!inode) { 394 rcu_read_unlock(); 395 folio_put(folio); 396 397 goto again; 398 } 399 400 key->both.offset |= FUT_OFF_INODE; /* inode-based key */ 401 key->shared.i_seq = get_inode_sequence_number(inode); 402 key->shared.pgoff = folio->index + folio_page_idx(folio, page); 403 rcu_read_unlock(); 404 } 405 406 out: 407 folio_put(folio); 408 return err; 409 } 410 411 /** 412 * fault_in_user_writeable() - Fault in user address and verify RW access 413 * @uaddr: pointer to faulting user space address 414 * 415 * Slow path to fixup the fault we just took in the atomic write 416 * access to @uaddr. 417 * 418 * We have no generic implementation of a non-destructive write to the 419 * user address. We know that we faulted in the atomic pagefault 420 * disabled section so we can as well avoid the #PF overhead by 421 * calling get_user_pages() right away. 422 */ 423 int fault_in_user_writeable(u32 __user *uaddr) 424 { 425 struct mm_struct *mm = current->mm; 426 int ret; 427 428 mmap_read_lock(mm); 429 ret = fixup_user_fault(mm, (unsigned long)uaddr, 430 FAULT_FLAG_WRITE, NULL); 431 mmap_read_unlock(mm); 432 433 return ret < 0 ? ret : 0; 434 } 435 436 /** 437 * futex_top_waiter() - Return the highest priority waiter on a futex 438 * @hb: the hash bucket the futex_q's reside in 439 * @key: the futex key (to distinguish it from other futex futex_q's) 440 * 441 * Must be called with the hb lock held. 442 */ 443 struct futex_q *futex_top_waiter(struct futex_hash_bucket *hb, union futex_key *key) 444 { 445 struct futex_q *this; 446 447 plist_for_each_entry(this, &hb->chain, list) { 448 if (futex_match(&this->key, key)) 449 return this; 450 } 451 return NULL; 452 } 453 454 int futex_cmpxchg_value_locked(u32 *curval, u32 __user *uaddr, u32 uval, u32 newval) 455 { 456 int ret; 457 458 pagefault_disable(); 459 ret = futex_atomic_cmpxchg_inatomic(curval, uaddr, uval, newval); 460 pagefault_enable(); 461 462 return ret; 463 } 464 465 int futex_get_value_locked(u32 *dest, u32 __user *from) 466 { 467 int ret; 468 469 pagefault_disable(); 470 ret = __get_user(*dest, from); 471 pagefault_enable(); 472 473 return ret ? -EFAULT : 0; 474 } 475 476 /** 477 * wait_for_owner_exiting - Block until the owner has exited 478 * @ret: owner's current futex lock status 479 * @exiting: Pointer to the exiting task 480 * 481 * Caller must hold a refcount on @exiting. 482 */ 483 void wait_for_owner_exiting(int ret, struct task_struct *exiting) 484 { 485 if (ret != -EBUSY) { 486 WARN_ON_ONCE(exiting); 487 return; 488 } 489 490 if (WARN_ON_ONCE(ret == -EBUSY && !exiting)) 491 return; 492 493 mutex_lock(&exiting->futex_exit_mutex); 494 /* 495 * No point in doing state checking here. If the waiter got here 496 * while the task was in exec()->exec_futex_release() then it can 497 * have any FUTEX_STATE_* value when the waiter has acquired the 498 * mutex. OK, if running, EXITING or DEAD if it reached exit() 499 * already. Highly unlikely and not a problem. Just one more round 500 * through the futex maze. 501 */ 502 mutex_unlock(&exiting->futex_exit_mutex); 503 504 put_task_struct(exiting); 505 } 506 507 /** 508 * __futex_unqueue() - Remove the futex_q from its futex_hash_bucket 509 * @q: The futex_q to unqueue 510 * 511 * The q->lock_ptr must not be NULL and must be held by the caller. 512 */ 513 void __futex_unqueue(struct futex_q *q) 514 { 515 struct futex_hash_bucket *hb; 516 517 if (WARN_ON_SMP(!q->lock_ptr) || WARN_ON(plist_node_empty(&q->list))) 518 return; 519 lockdep_assert_held(q->lock_ptr); 520 521 hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock); 522 plist_del(&q->list, &hb->chain); 523 futex_hb_waiters_dec(hb); 524 } 525 526 /* The key must be already stored in q->key. */ 527 struct futex_hash_bucket *futex_q_lock(struct futex_q *q) 528 __acquires(&hb->lock) 529 { 530 struct futex_hash_bucket *hb; 531 532 hb = futex_hash(&q->key); 533 534 /* 535 * Increment the counter before taking the lock so that 536 * a potential waker won't miss a to-be-slept task that is 537 * waiting for the spinlock. This is safe as all futex_q_lock() 538 * users end up calling futex_queue(). Similarly, for housekeeping, 539 * decrement the counter at futex_q_unlock() when some error has 540 * occurred and we don't end up adding the task to the list. 541 */ 542 futex_hb_waiters_inc(hb); /* implies smp_mb(); (A) */ 543 544 q->lock_ptr = &hb->lock; 545 546 spin_lock(&hb->lock); 547 return hb; 548 } 549 550 void futex_q_unlock(struct futex_hash_bucket *hb) 551 __releases(&hb->lock) 552 { 553 spin_unlock(&hb->lock); 554 futex_hb_waiters_dec(hb); 555 } 556 557 void __futex_queue(struct futex_q *q, struct futex_hash_bucket *hb) 558 { 559 int prio; 560 561 /* 562 * The priority used to register this element is 563 * - either the real thread-priority for the real-time threads 564 * (i.e. threads with a priority lower than MAX_RT_PRIO) 565 * - or MAX_RT_PRIO for non-RT threads. 566 * Thus, all RT-threads are woken first in priority order, and 567 * the others are woken last, in FIFO order. 568 */ 569 prio = min(current->normal_prio, MAX_RT_PRIO); 570 571 plist_node_init(&q->list, prio); 572 plist_add(&q->list, &hb->chain); 573 q->task = current; 574 } 575 576 /** 577 * futex_unqueue() - Remove the futex_q from its futex_hash_bucket 578 * @q: The futex_q to unqueue 579 * 580 * The q->lock_ptr must not be held by the caller. A call to futex_unqueue() must 581 * be paired with exactly one earlier call to futex_queue(). 582 * 583 * Return: 584 * - 1 - if the futex_q was still queued (and we removed unqueued it); 585 * - 0 - if the futex_q was already removed by the waking thread 586 */ 587 int futex_unqueue(struct futex_q *q) 588 { 589 spinlock_t *lock_ptr; 590 int ret = 0; 591 592 /* In the common case we don't take the spinlock, which is nice. */ 593 retry: 594 /* 595 * q->lock_ptr can change between this read and the following spin_lock. 596 * Use READ_ONCE to forbid the compiler from reloading q->lock_ptr and 597 * optimizing lock_ptr out of the logic below. 598 */ 599 lock_ptr = READ_ONCE(q->lock_ptr); 600 if (lock_ptr != NULL) { 601 spin_lock(lock_ptr); 602 /* 603 * q->lock_ptr can change between reading it and 604 * spin_lock(), causing us to take the wrong lock. This 605 * corrects the race condition. 606 * 607 * Reasoning goes like this: if we have the wrong lock, 608 * q->lock_ptr must have changed (maybe several times) 609 * between reading it and the spin_lock(). It can 610 * change again after the spin_lock() but only if it was 611 * already changed before the spin_lock(). It cannot, 612 * however, change back to the original value. Therefore 613 * we can detect whether we acquired the correct lock. 614 */ 615 if (unlikely(lock_ptr != q->lock_ptr)) { 616 spin_unlock(lock_ptr); 617 goto retry; 618 } 619 __futex_unqueue(q); 620 621 BUG_ON(q->pi_state); 622 623 spin_unlock(lock_ptr); 624 ret = 1; 625 } 626 627 return ret; 628 } 629 630 /* 631 * PI futexes can not be requeued and must remove themselves from the hash 632 * bucket. The hash bucket lock (i.e. lock_ptr) is held. 633 */ 634 void futex_unqueue_pi(struct futex_q *q) 635 { 636 /* 637 * If the lock was not acquired (due to timeout or signal) then the 638 * rt_waiter is removed before futex_q is. If this is observed by 639 * an unlocker after dropping the rtmutex wait lock and before 640 * acquiring the hash bucket lock, then the unlocker dequeues the 641 * futex_q from the hash bucket list to guarantee consistent state 642 * vs. userspace. Therefore the dequeue here must be conditional. 643 */ 644 if (!plist_node_empty(&q->list)) 645 __futex_unqueue(q); 646 647 BUG_ON(!q->pi_state); 648 put_pi_state(q->pi_state); 649 q->pi_state = NULL; 650 } 651 652 /* Constants for the pending_op argument of handle_futex_death */ 653 #define HANDLE_DEATH_PENDING true 654 #define HANDLE_DEATH_LIST false 655 656 /* 657 * Process a futex-list entry, check whether it's owned by the 658 * dying task, and do notification if so: 659 */ 660 static int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, 661 bool pi, bool pending_op) 662 { 663 u32 uval, nval, mval; 664 pid_t owner; 665 int err; 666 667 /* Futex address must be 32bit aligned */ 668 if ((((unsigned long)uaddr) % sizeof(*uaddr)) != 0) 669 return -1; 670 671 retry: 672 if (get_user(uval, uaddr)) 673 return -1; 674 675 /* 676 * Special case for regular (non PI) futexes. The unlock path in 677 * user space has two race scenarios: 678 * 679 * 1. The unlock path releases the user space futex value and 680 * before it can execute the futex() syscall to wake up 681 * waiters it is killed. 682 * 683 * 2. A woken up waiter is killed before it can acquire the 684 * futex in user space. 685 * 686 * In the second case, the wake up notification could be generated 687 * by the unlock path in user space after setting the futex value 688 * to zero or by the kernel after setting the OWNER_DIED bit below. 689 * 690 * In both cases the TID validation below prevents a wakeup of 691 * potential waiters which can cause these waiters to block 692 * forever. 693 * 694 * In both cases the following conditions are met: 695 * 696 * 1) task->robust_list->list_op_pending != NULL 697 * @pending_op == true 698 * 2) The owner part of user space futex value == 0 699 * 3) Regular futex: @pi == false 700 * 701 * If these conditions are met, it is safe to attempt waking up a 702 * potential waiter without touching the user space futex value and 703 * trying to set the OWNER_DIED bit. If the futex value is zero, 704 * the rest of the user space mutex state is consistent, so a woken 705 * waiter will just take over the uncontended futex. Setting the 706 * OWNER_DIED bit would create inconsistent state and malfunction 707 * of the user space owner died handling. Otherwise, the OWNER_DIED 708 * bit is already set, and the woken waiter is expected to deal with 709 * this. 710 */ 711 owner = uval & FUTEX_TID_MASK; 712 713 if (pending_op && !pi && !owner) { 714 futex_wake(uaddr, FLAGS_SIZE_32 | FLAGS_SHARED, 1, 715 FUTEX_BITSET_MATCH_ANY); 716 return 0; 717 } 718 719 if (owner != task_pid_vnr(curr)) 720 return 0; 721 722 /* 723 * Ok, this dying thread is truly holding a futex 724 * of interest. Set the OWNER_DIED bit atomically 725 * via cmpxchg, and if the value had FUTEX_WAITERS 726 * set, wake up a waiter (if any). (We have to do a 727 * futex_wake() even if OWNER_DIED is already set - 728 * to handle the rare but possible case of recursive 729 * thread-death.) The rest of the cleanup is done in 730 * userspace. 731 */ 732 mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED; 733 734 /* 735 * We are not holding a lock here, but we want to have 736 * the pagefault_disable/enable() protection because 737 * we want to handle the fault gracefully. If the 738 * access fails we try to fault in the futex with R/W 739 * verification via get_user_pages. get_user() above 740 * does not guarantee R/W access. If that fails we 741 * give up and leave the futex locked. 742 */ 743 if ((err = futex_cmpxchg_value_locked(&nval, uaddr, uval, mval))) { 744 switch (err) { 745 case -EFAULT: 746 if (fault_in_user_writeable(uaddr)) 747 return -1; 748 goto retry; 749 750 case -EAGAIN: 751 cond_resched(); 752 goto retry; 753 754 default: 755 WARN_ON_ONCE(1); 756 return err; 757 } 758 } 759 760 if (nval != uval) 761 goto retry; 762 763 /* 764 * Wake robust non-PI futexes here. The wakeup of 765 * PI futexes happens in exit_pi_state(): 766 */ 767 if (!pi && (uval & FUTEX_WAITERS)) { 768 futex_wake(uaddr, FLAGS_SIZE_32 | FLAGS_SHARED, 1, 769 FUTEX_BITSET_MATCH_ANY); 770 } 771 772 return 0; 773 } 774 775 /* 776 * Fetch a robust-list pointer. Bit 0 signals PI futexes: 777 */ 778 static inline int fetch_robust_entry(struct robust_list __user **entry, 779 struct robust_list __user * __user *head, 780 unsigned int *pi) 781 { 782 unsigned long uentry; 783 784 if (get_user(uentry, (unsigned long __user *)head)) 785 return -EFAULT; 786 787 *entry = (void __user *)(uentry & ~1UL); 788 *pi = uentry & 1; 789 790 return 0; 791 } 792 793 /* 794 * Walk curr->robust_list (very carefully, it's a userspace list!) 795 * and mark any locks found there dead, and notify any waiters. 796 * 797 * We silently return on any sign of list-walking problem. 798 */ 799 static void exit_robust_list(struct task_struct *curr) 800 { 801 struct robust_list_head __user *head = curr->robust_list; 802 struct robust_list __user *entry, *next_entry, *pending; 803 unsigned int limit = ROBUST_LIST_LIMIT, pi, pip; 804 unsigned int next_pi; 805 unsigned long futex_offset; 806 int rc; 807 808 /* 809 * Fetch the list head (which was registered earlier, via 810 * sys_set_robust_list()): 811 */ 812 if (fetch_robust_entry(&entry, &head->list.next, &pi)) 813 return; 814 /* 815 * Fetch the relative futex offset: 816 */ 817 if (get_user(futex_offset, &head->futex_offset)) 818 return; 819 /* 820 * Fetch any possibly pending lock-add first, and handle it 821 * if it exists: 822 */ 823 if (fetch_robust_entry(&pending, &head->list_op_pending, &pip)) 824 return; 825 826 next_entry = NULL; /* avoid warning with gcc */ 827 while (entry != &head->list) { 828 /* 829 * Fetch the next entry in the list before calling 830 * handle_futex_death: 831 */ 832 rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi); 833 /* 834 * A pending lock might already be on the list, so 835 * don't process it twice: 836 */ 837 if (entry != pending) { 838 if (handle_futex_death((void __user *)entry + futex_offset, 839 curr, pi, HANDLE_DEATH_LIST)) 840 return; 841 } 842 if (rc) 843 return; 844 entry = next_entry; 845 pi = next_pi; 846 /* 847 * Avoid excessively long or circular lists: 848 */ 849 if (!--limit) 850 break; 851 852 cond_resched(); 853 } 854 855 if (pending) { 856 handle_futex_death((void __user *)pending + futex_offset, 857 curr, pip, HANDLE_DEATH_PENDING); 858 } 859 } 860 861 #ifdef CONFIG_COMPAT 862 static void __user *futex_uaddr(struct robust_list __user *entry, 863 compat_long_t futex_offset) 864 { 865 compat_uptr_t base = ptr_to_compat(entry); 866 void __user *uaddr = compat_ptr(base + futex_offset); 867 868 return uaddr; 869 } 870 871 /* 872 * Fetch a robust-list pointer. Bit 0 signals PI futexes: 873 */ 874 static inline int 875 compat_fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry, 876 compat_uptr_t __user *head, unsigned int *pi) 877 { 878 if (get_user(*uentry, head)) 879 return -EFAULT; 880 881 *entry = compat_ptr((*uentry) & ~1); 882 *pi = (unsigned int)(*uentry) & 1; 883 884 return 0; 885 } 886 887 /* 888 * Walk curr->robust_list (very carefully, it's a userspace list!) 889 * and mark any locks found there dead, and notify any waiters. 890 * 891 * We silently return on any sign of list-walking problem. 892 */ 893 static void compat_exit_robust_list(struct task_struct *curr) 894 { 895 struct compat_robust_list_head __user *head = curr->compat_robust_list; 896 struct robust_list __user *entry, *next_entry, *pending; 897 unsigned int limit = ROBUST_LIST_LIMIT, pi, pip; 898 unsigned int next_pi; 899 compat_uptr_t uentry, next_uentry, upending; 900 compat_long_t futex_offset; 901 int rc; 902 903 /* 904 * Fetch the list head (which was registered earlier, via 905 * sys_set_robust_list()): 906 */ 907 if (compat_fetch_robust_entry(&uentry, &entry, &head->list.next, &pi)) 908 return; 909 /* 910 * Fetch the relative futex offset: 911 */ 912 if (get_user(futex_offset, &head->futex_offset)) 913 return; 914 /* 915 * Fetch any possibly pending lock-add first, and handle it 916 * if it exists: 917 */ 918 if (compat_fetch_robust_entry(&upending, &pending, 919 &head->list_op_pending, &pip)) 920 return; 921 922 next_entry = NULL; /* avoid warning with gcc */ 923 while (entry != (struct robust_list __user *) &head->list) { 924 /* 925 * Fetch the next entry in the list before calling 926 * handle_futex_death: 927 */ 928 rc = compat_fetch_robust_entry(&next_uentry, &next_entry, 929 (compat_uptr_t __user *)&entry->next, &next_pi); 930 /* 931 * A pending lock might already be on the list, so 932 * dont process it twice: 933 */ 934 if (entry != pending) { 935 void __user *uaddr = futex_uaddr(entry, futex_offset); 936 937 if (handle_futex_death(uaddr, curr, pi, 938 HANDLE_DEATH_LIST)) 939 return; 940 } 941 if (rc) 942 return; 943 uentry = next_uentry; 944 entry = next_entry; 945 pi = next_pi; 946 /* 947 * Avoid excessively long or circular lists: 948 */ 949 if (!--limit) 950 break; 951 952 cond_resched(); 953 } 954 if (pending) { 955 void __user *uaddr = futex_uaddr(pending, futex_offset); 956 957 handle_futex_death(uaddr, curr, pip, HANDLE_DEATH_PENDING); 958 } 959 } 960 #endif 961 962 #ifdef CONFIG_FUTEX_PI 963 964 /* 965 * This task is holding PI mutexes at exit time => bad. 966 * Kernel cleans up PI-state, but userspace is likely hosed. 967 * (Robust-futex cleanup is separate and might save the day for userspace.) 968 */ 969 static void exit_pi_state_list(struct task_struct *curr) 970 { 971 struct list_head *next, *head = &curr->pi_state_list; 972 struct futex_pi_state *pi_state; 973 struct futex_hash_bucket *hb; 974 union futex_key key = FUTEX_KEY_INIT; 975 976 /* 977 * We are a ZOMBIE and nobody can enqueue itself on 978 * pi_state_list anymore, but we have to be careful 979 * versus waiters unqueueing themselves: 980 */ 981 raw_spin_lock_irq(&curr->pi_lock); 982 while (!list_empty(head)) { 983 next = head->next; 984 pi_state = list_entry(next, struct futex_pi_state, list); 985 key = pi_state->key; 986 hb = futex_hash(&key); 987 988 /* 989 * We can race against put_pi_state() removing itself from the 990 * list (a waiter going away). put_pi_state() will first 991 * decrement the reference count and then modify the list, so 992 * its possible to see the list entry but fail this reference 993 * acquire. 994 * 995 * In that case; drop the locks to let put_pi_state() make 996 * progress and retry the loop. 997 */ 998 if (!refcount_inc_not_zero(&pi_state->refcount)) { 999 raw_spin_unlock_irq(&curr->pi_lock); 1000 cpu_relax(); 1001 raw_spin_lock_irq(&curr->pi_lock); 1002 continue; 1003 } 1004 raw_spin_unlock_irq(&curr->pi_lock); 1005 1006 spin_lock(&hb->lock); 1007 raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); 1008 raw_spin_lock(&curr->pi_lock); 1009 /* 1010 * We dropped the pi-lock, so re-check whether this 1011 * task still owns the PI-state: 1012 */ 1013 if (head->next != next) { 1014 /* retain curr->pi_lock for the loop invariant */ 1015 raw_spin_unlock(&pi_state->pi_mutex.wait_lock); 1016 spin_unlock(&hb->lock); 1017 put_pi_state(pi_state); 1018 continue; 1019 } 1020 1021 WARN_ON(pi_state->owner != curr); 1022 WARN_ON(list_empty(&pi_state->list)); 1023 list_del_init(&pi_state->list); 1024 pi_state->owner = NULL; 1025 1026 raw_spin_unlock(&curr->pi_lock); 1027 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); 1028 spin_unlock(&hb->lock); 1029 1030 rt_mutex_futex_unlock(&pi_state->pi_mutex); 1031 put_pi_state(pi_state); 1032 1033 raw_spin_lock_irq(&curr->pi_lock); 1034 } 1035 raw_spin_unlock_irq(&curr->pi_lock); 1036 } 1037 #else 1038 static inline void exit_pi_state_list(struct task_struct *curr) { } 1039 #endif 1040 1041 static void futex_cleanup(struct task_struct *tsk) 1042 { 1043 if (unlikely(tsk->robust_list)) { 1044 exit_robust_list(tsk); 1045 tsk->robust_list = NULL; 1046 } 1047 1048 #ifdef CONFIG_COMPAT 1049 if (unlikely(tsk->compat_robust_list)) { 1050 compat_exit_robust_list(tsk); 1051 tsk->compat_robust_list = NULL; 1052 } 1053 #endif 1054 1055 if (unlikely(!list_empty(&tsk->pi_state_list))) 1056 exit_pi_state_list(tsk); 1057 } 1058 1059 /** 1060 * futex_exit_recursive - Set the tasks futex state to FUTEX_STATE_DEAD 1061 * @tsk: task to set the state on 1062 * 1063 * Set the futex exit state of the task lockless. The futex waiter code 1064 * observes that state when a task is exiting and loops until the task has 1065 * actually finished the futex cleanup. The worst case for this is that the 1066 * waiter runs through the wait loop until the state becomes visible. 1067 * 1068 * This is called from the recursive fault handling path in make_task_dead(). 1069 * 1070 * This is best effort. Either the futex exit code has run already or 1071 * not. If the OWNER_DIED bit has been set on the futex then the waiter can 1072 * take it over. If not, the problem is pushed back to user space. If the 1073 * futex exit code did not run yet, then an already queued waiter might 1074 * block forever, but there is nothing which can be done about that. 1075 */ 1076 void futex_exit_recursive(struct task_struct *tsk) 1077 { 1078 /* If the state is FUTEX_STATE_EXITING then futex_exit_mutex is held */ 1079 if (tsk->futex_state == FUTEX_STATE_EXITING) 1080 mutex_unlock(&tsk->futex_exit_mutex); 1081 tsk->futex_state = FUTEX_STATE_DEAD; 1082 } 1083 1084 static void futex_cleanup_begin(struct task_struct *tsk) 1085 { 1086 /* 1087 * Prevent various race issues against a concurrent incoming waiter 1088 * including live locks by forcing the waiter to block on 1089 * tsk->futex_exit_mutex when it observes FUTEX_STATE_EXITING in 1090 * attach_to_pi_owner(). 1091 */ 1092 mutex_lock(&tsk->futex_exit_mutex); 1093 1094 /* 1095 * Switch the state to FUTEX_STATE_EXITING under tsk->pi_lock. 1096 * 1097 * This ensures that all subsequent checks of tsk->futex_state in 1098 * attach_to_pi_owner() must observe FUTEX_STATE_EXITING with 1099 * tsk->pi_lock held. 1100 * 1101 * It guarantees also that a pi_state which was queued right before 1102 * the state change under tsk->pi_lock by a concurrent waiter must 1103 * be observed in exit_pi_state_list(). 1104 */ 1105 raw_spin_lock_irq(&tsk->pi_lock); 1106 tsk->futex_state = FUTEX_STATE_EXITING; 1107 raw_spin_unlock_irq(&tsk->pi_lock); 1108 } 1109 1110 static void futex_cleanup_end(struct task_struct *tsk, int state) 1111 { 1112 /* 1113 * Lockless store. The only side effect is that an observer might 1114 * take another loop until it becomes visible. 1115 */ 1116 tsk->futex_state = state; 1117 /* 1118 * Drop the exit protection. This unblocks waiters which observed 1119 * FUTEX_STATE_EXITING to reevaluate the state. 1120 */ 1121 mutex_unlock(&tsk->futex_exit_mutex); 1122 } 1123 1124 void futex_exec_release(struct task_struct *tsk) 1125 { 1126 /* 1127 * The state handling is done for consistency, but in the case of 1128 * exec() there is no way to prevent further damage as the PID stays 1129 * the same. But for the unlikely and arguably buggy case that a 1130 * futex is held on exec(), this provides at least as much state 1131 * consistency protection which is possible. 1132 */ 1133 futex_cleanup_begin(tsk); 1134 futex_cleanup(tsk); 1135 /* 1136 * Reset the state to FUTEX_STATE_OK. The task is alive and about 1137 * exec a new binary. 1138 */ 1139 futex_cleanup_end(tsk, FUTEX_STATE_OK); 1140 } 1141 1142 void futex_exit_release(struct task_struct *tsk) 1143 { 1144 futex_cleanup_begin(tsk); 1145 futex_cleanup(tsk); 1146 futex_cleanup_end(tsk, FUTEX_STATE_DEAD); 1147 } 1148 1149 static int __init futex_init(void) 1150 { 1151 unsigned int futex_shift; 1152 unsigned long i; 1153 1154 #ifdef CONFIG_BASE_SMALL 1155 futex_hashsize = 16; 1156 #else 1157 futex_hashsize = roundup_pow_of_two(256 * num_possible_cpus()); 1158 #endif 1159 1160 futex_queues = alloc_large_system_hash("futex", sizeof(*futex_queues), 1161 futex_hashsize, 0, 0, 1162 &futex_shift, NULL, 1163 futex_hashsize, futex_hashsize); 1164 futex_hashsize = 1UL << futex_shift; 1165 1166 for (i = 0; i < futex_hashsize; i++) { 1167 atomic_set(&futex_queues[i].waiters, 0); 1168 plist_head_init(&futex_queues[i].chain); 1169 spin_lock_init(&futex_queues[i].lock); 1170 } 1171 1172 return 0; 1173 } 1174 core_initcall(futex_init); 1175