Lines Matching +full:hart +full:- +full:index +full:- +full:bits
1 // SPDX-License-Identifier: GPL-2.0-or-later
16 * PI-futex support started by Ingo Molnar and Thomas Gleixner
23 * Requeue-PI support by Darren Hart <dvhltc@us.ibm.com>
29 * Kirkwood for proof-of-concept implementation.
42 #include <linux/fault-inject.h>
115 debugfs_create_bool("ignore-private", mode, dir, in fail_futex_debugfs()
140 * futexes -- see comment with union futex_key. in futex_key_is_private()
142 return !(key->both.offset & (FUT_OFF_INODE | FUT_OFF_MMSHARED)); in futex_key_is_private()
153 wake_up_var(fph->mm); in futex_private_hash_put()
157 * futex_hash_get - Get an additional reference for the local hash.
165 struct futex_private_hash *fph = hb->priv; in futex_hash_get()
174 struct futex_private_hash *fph = hb->priv; in futex_hash_put()
190 fph = rcu_dereference(key->private.mm->futex_phash); in __futex_hash_private()
191 if (!fph || !fph->hash_mask) in __futex_hash_private()
194 hash = jhash2((void *)&key->private.address, in __futex_hash_private()
195 sizeof(key->private.address) / 4, in __futex_hash_private()
196 key->both.offset); in __futex_hash_private()
197 return &fph->queues[hash & fph->hash_mask]; in __futex_hash_private()
204 unsigned int slots = old->hash_mask + 1; in futex_rehash_private()
210 hb_old = &old->queues[i]; in futex_rehash_private()
212 spin_lock(&hb_old->lock); in futex_rehash_private()
213 plist_for_each_entry_safe(this, tmp, &hb_old->chain, list) { in futex_rehash_private()
215 plist_del(&this->list, &hb_old->chain); in futex_rehash_private()
218 WARN_ON_ONCE(this->lock_ptr != &hb_old->lock); in futex_rehash_private()
220 hb_new = __futex_hash(&this->key, new); in futex_rehash_private()
226 spin_lock_nested(&hb_new->lock, SINGLE_DEPTH_NESTING); in futex_rehash_private()
227 plist_add(&this->list, &hb_new->chain); in futex_rehash_private()
228 this->lock_ptr = &hb_new->lock; in futex_rehash_private()
229 spin_unlock(&hb_new->lock); in futex_rehash_private()
231 spin_unlock(&hb_old->lock); in futex_rehash_private()
240 WARN_ON_ONCE(mm->futex_phash_new); in __futex_pivot_hash()
242 fph = rcu_dereference_protected(mm->futex_phash, in __futex_pivot_hash()
243 lockdep_is_held(&mm->futex_hash_lock)); in __futex_pivot_hash()
246 mm->futex_phash_new = new; in __futex_pivot_hash()
252 new->state = FR_PERCPU; in __futex_pivot_hash()
254 mm->futex_batches = get_state_synchronize_rcu(); in __futex_pivot_hash()
255 rcu_assign_pointer(mm->futex_phash, new); in __futex_pivot_hash()
263 scoped_guard(mutex, &mm->futex_hash_lock) { in futex_pivot_hash()
266 fph = mm->futex_phash_new; in futex_pivot_hash()
268 mm->futex_phash_new = NULL; in futex_pivot_hash()
276 struct mm_struct *mm = current->mm; in futex_private_hash()
291 fph = rcu_dereference(mm->futex_phash); in futex_private_hash()
310 fph = hb->priv; in futex_hash()
315 futex_pivot_hash(key->private.mm); in futex_hash()
349 switch (mpol->mode) { in __futex_key_to_node()
351 node = first_node(mpol->nodes); in __futex_key_to_node()
355 if (mpol->home_node != NUMA_NO_NODE) in __futex_key_to_node()
356 node = mpol->home_node; in __futex_key_to_node()
372 return -EBUSY; in futex_key_to_node_opt()
377 return -EAGAIN; in futex_key_to_node_opt()
404 * __futex_hash - Return the hash bucket
410 * If the FUTEX is PROCESS_PRIVATE then a per-process hash bucket (from the
417 int node = key->both.node; in __futex_hash()
430 key->both.offset); in __futex_hash()
434 * In case of !FLAGS_NUMA, use some unused hash bits to pick a in __futex_hash()
435 * node -- this ensures regular futexes are interleaved across in __futex_hash()
437 * hash-tables. in __futex_hash()
444 node = find_next_bit_wrap(node_possible_map.bits, in __futex_hash()
453 * futex_setup_timer - set up the sleeping hrtimer.
476 hrtimer_set_expires_range_ns(&timeout->timer, *time, range_ns); in futex_setup_timer()
484 * This relies on u64 not wrapping in the life-time of the machine; which with
495 * It is important that futex_match() will never have a false-positive, esp.
496 * for PI futexes that can mess up the state. The above argues that false-negatives
505 old = atomic64_read(&inode->i_sequence); in get_inode_sequence_number()
515 if (!atomic64_try_cmpxchg_relaxed(&inode->i_sequence, &old, new)) in get_inode_sequence_number()
522 * get_futex_key() - Get parameters which are the keys for a futex
535 * ( inode->i_sequence, page offset within mapping, offset_within_page )
541 * ( current->mm, address, 0 )
552 struct mm_struct *mm = current->mm; in get_futex_key()
568 key->both.offset = address % PAGE_SIZE; in get_futex_key()
570 return -EINVAL; in get_futex_key()
571 address -= key->both.offset; in get_futex_key()
574 return -EFAULT; in get_futex_key()
577 return -EFAULT; in get_futex_key()
585 return -EFAULT; in get_futex_key()
589 return -EINVAL; in get_futex_key()
605 return -EFAULT; in get_futex_key()
608 key->both.node = node; in get_futex_key()
619 * On no-MMU, shared futexes are treated as private, therefore in get_futex_key()
625 key->private.mm = mm; in get_futex_key()
627 key->private.mm = NULL; in get_futex_key()
629 key->private.address = address; in get_futex_key()
636 return -EFAULT; in get_futex_key()
641 * and get read-only access. in get_futex_key()
643 if (err == -EFAULT && rw == FUTEX_READ) { in get_futex_key()
656 * file-backed region case and guards against movement to swap cache. in get_futex_key()
660 * From this point on, mapping will be re-verified if necessary and in get_futex_key()
666 * filesystem-backed pages, the precise page is required as the in get_futex_key()
667 * index of the page determines the key. in get_futex_key()
670 mapping = READ_ONCE(folio->mapping); in get_futex_key()
673 * If folio->mapping is NULL, then it cannot be an anonymous in get_futex_key()
685 * an unlikely race, but we do need to retry for folio->mapping. in get_futex_key()
696 shmem_swizzled = folio_test_swapcache(folio) || folio->mapping; in get_futex_key()
703 return -EFAULT; in get_futex_key()
713 * it's a read-only handle, it's expected that futexes attach to in get_futex_key()
722 err = -EFAULT; in get_futex_key()
726 key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */ in get_futex_key()
727 key->private.mm = mm; in get_futex_key()
728 key->private.address = address; in get_futex_key()
735 * the folio->mapping must be traversed. Ordinarily this should in get_futex_key()
742 * mapping->host can be safely accessed as being a valid inode. in get_futex_key()
746 if (READ_ONCE(folio->mapping) != mapping) { in get_futex_key()
753 inode = READ_ONCE(mapping->host); in get_futex_key()
761 key->both.offset |= FUT_OFF_INODE; /* inode-based key */ in get_futex_key()
762 key->shared.i_seq = get_inode_sequence_number(inode); in get_futex_key()
763 key->shared.pgoff = page_pgoff(folio, page); in get_futex_key()
773 * fault_in_user_writeable() - Fault in user address and verify RW access
779 * We have no generic implementation of a non-destructive write to the
786 struct mm_struct *mm = current->mm; in fault_in_user_writeable()
798 * futex_top_waiter() - Return the highest priority waiter on a futex
808 plist_for_each_entry(this, &hb->chain, list) { in futex_top_waiter()
809 if (futex_match(&this->key, key)) in futex_top_waiter()
816 * wait_for_owner_exiting - Block until the owner has exited
824 if (ret != -EBUSY) { in wait_for_owner_exiting()
829 if (WARN_ON_ONCE(ret == -EBUSY && !exiting)) in wait_for_owner_exiting()
832 mutex_lock(&exiting->futex_exit_mutex); in wait_for_owner_exiting()
835 * while the task was in exec()->exec_futex_release() then it can in wait_for_owner_exiting()
841 mutex_unlock(&exiting->futex_exit_mutex); in wait_for_owner_exiting()
847 * __futex_unqueue() - Remove the futex_q from its futex_hash_bucket
850 * The q->lock_ptr must not be NULL and must be held by the caller.
856 if (WARN_ON_SMP(!q->lock_ptr) || WARN_ON(plist_node_empty(&q->list))) in __futex_unqueue()
858 lockdep_assert_held(q->lock_ptr); in __futex_unqueue()
860 hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock); in __futex_unqueue()
861 plist_del(&q->list, &hb->chain); in __futex_unqueue()
865 /* The key must be already stored in q->key. */
867 __acquires(&hb->lock) in futex_q_lock()
871 * a potential waker won't miss a to-be-slept task that is in futex_q_lock()
879 q->lock_ptr = &hb->lock; in futex_q_lock()
881 spin_lock(&hb->lock); in futex_q_lock()
885 __releases(&hb->lock) in futex_q_unlock()
888 spin_unlock(&hb->lock); in futex_q_unlock()
898 * - either the real thread-priority for the real-time threads in __futex_queue()
900 * - or MAX_RT_PRIO for non-RT threads. in __futex_queue()
901 * Thus, all RT-threads are woken first in priority order, and in __futex_queue()
904 prio = min(current->normal_prio, MAX_RT_PRIO); in __futex_queue()
906 plist_node_init(&q->list, prio); in __futex_queue()
907 plist_add(&q->list, &hb->chain); in __futex_queue()
908 q->task = task; in __futex_queue()
912 * futex_unqueue() - Remove the futex_q from its futex_hash_bucket
915 * The q->lock_ptr must not be held by the caller. A call to futex_unqueue() must
919 * - 1 - if the futex_q was still queued (and we removed unqueued it);
920 * - 0 - if the futex_q was already removed by the waking thread
932 * q->lock_ptr can change between this read and the following spin_lock. in futex_unqueue()
933 * Use READ_ONCE to forbid the compiler from reloading q->lock_ptr and in futex_unqueue()
936 lock_ptr = READ_ONCE(q->lock_ptr); in futex_unqueue()
940 * q->lock_ptr can change between reading it and in futex_unqueue()
945 * q->lock_ptr must have changed (maybe several times) in futex_unqueue()
952 if (unlikely(lock_ptr != q->lock_ptr)) { in futex_unqueue()
958 BUG_ON(q->pi_state); in futex_unqueue()
976 lock_ptr = READ_ONCE(q->lock_ptr); in futex_q_lockptr_lock()
979 if (unlikely(lock_ptr != q->lock_ptr)) { in futex_q_lockptr_lock()
999 if (!plist_node_empty(&q->list)) in futex_unqueue_pi()
1002 BUG_ON(!q->pi_state); in futex_unqueue_pi()
1003 put_pi_state(q->pi_state); in futex_unqueue_pi()
1004 q->pi_state = NULL; in futex_unqueue_pi()
1012 * Process a futex-list entry, check whether it's owned by the
1024 return -1; in handle_futex_death()
1028 return -1; in handle_futex_death()
1051 * 1) task->robust_list->list_op_pending != NULL in handle_futex_death()
1082 * futex_wake() even if OWNER_DIED is already set - in handle_futex_death()
1084 * thread-death.) The rest of the cleanup is done in in handle_futex_death()
1100 case -EFAULT: in handle_futex_death()
1102 return -1; in handle_futex_death()
1105 case -EAGAIN: in handle_futex_death()
1119 * Wake robust non-PI futexes here. The wakeup of in handle_futex_death()
1131 * Fetch a robust-list pointer. Bit 0 signals PI futexes:
1140 return -EFAULT; in fetch_robust_entry()
1149 * Walk curr->robust_list (very carefully, it's a userspace list!)
1152 * We silently return on any sign of list-walking problem.
1156 struct robust_list_head __user *head = curr->robust_list; in exit_robust_list()
1167 if (fetch_robust_entry(&entry, &head->list.next, &pi)) in exit_robust_list()
1172 if (get_user(futex_offset, &head->futex_offset)) in exit_robust_list()
1175 * Fetch any possibly pending lock-add first, and handle it in exit_robust_list()
1178 if (fetch_robust_entry(&pending, &head->list_op_pending, &pip)) in exit_robust_list()
1182 while (entry != &head->list) { in exit_robust_list()
1187 rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi); in exit_robust_list()
1204 if (!--limit) in exit_robust_list()
1227 * Fetch a robust-list pointer. Bit 0 signals PI futexes:
1234 return -EFAULT; in compat_fetch_robust_entry()
1243 * Walk curr->robust_list (very carefully, it's a userspace list!)
1246 * We silently return on any sign of list-walking problem.
1250 struct compat_robust_list_head __user *head = curr->compat_robust_list; in compat_exit_robust_list()
1262 if (compat_fetch_robust_entry(&uentry, &entry, &head->list.next, &pi)) in compat_exit_robust_list()
1267 if (get_user(futex_offset, &head->futex_offset)) in compat_exit_robust_list()
1270 * Fetch any possibly pending lock-add first, and handle it in compat_exit_robust_list()
1274 &head->list_op_pending, &pip)) in compat_exit_robust_list()
1278 while (entry != (struct robust_list __user *) &head->list) { in compat_exit_robust_list()
1284 (compat_uptr_t __user *)&entry->next, &next_pi); in compat_exit_robust_list()
1304 if (!--limit) in compat_exit_robust_list()
1321 * Kernel cleans up PI-state, but userspace is likely hosed.
1322 * (Robust-futex cleanup is separate and might save the day for userspace.)
1326 struct list_head *next, *head = &curr->pi_state_list; in exit_pi_state_list()
1346 raw_spin_lock_irq(&curr->pi_lock); in exit_pi_state_list()
1348 next = head->next; in exit_pi_state_list()
1350 key = pi_state->key; in exit_pi_state_list()
1364 if (!refcount_inc_not_zero(&pi_state->refcount)) { in exit_pi_state_list()
1365 raw_spin_unlock_irq(&curr->pi_lock); in exit_pi_state_list()
1367 raw_spin_lock_irq(&curr->pi_lock); in exit_pi_state_list()
1370 raw_spin_unlock_irq(&curr->pi_lock); in exit_pi_state_list()
1372 spin_lock(&hb->lock); in exit_pi_state_list()
1373 raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); in exit_pi_state_list()
1374 raw_spin_lock(&curr->pi_lock); in exit_pi_state_list()
1376 * We dropped the pi-lock, so re-check whether this in exit_pi_state_list()
1377 * task still owns the PI-state: in exit_pi_state_list()
1379 if (head->next != next) { in exit_pi_state_list()
1380 /* retain curr->pi_lock for the loop invariant */ in exit_pi_state_list()
1381 raw_spin_unlock(&pi_state->pi_mutex.wait_lock); in exit_pi_state_list()
1382 spin_unlock(&hb->lock); in exit_pi_state_list()
1387 WARN_ON(pi_state->owner != curr); in exit_pi_state_list()
1388 WARN_ON(list_empty(&pi_state->list)); in exit_pi_state_list()
1389 list_del_init(&pi_state->list); in exit_pi_state_list()
1390 pi_state->owner = NULL; in exit_pi_state_list()
1392 raw_spin_unlock(&curr->pi_lock); in exit_pi_state_list()
1393 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); in exit_pi_state_list()
1394 spin_unlock(&hb->lock); in exit_pi_state_list()
1397 rt_mutex_futex_unlock(&pi_state->pi_mutex); in exit_pi_state_list()
1400 raw_spin_lock_irq(&curr->pi_lock); in exit_pi_state_list()
1402 raw_spin_unlock_irq(&curr->pi_lock); in exit_pi_state_list()
1410 if (unlikely(tsk->robust_list)) { in futex_cleanup()
1412 tsk->robust_list = NULL; in futex_cleanup()
1416 if (unlikely(tsk->compat_robust_list)) { in futex_cleanup()
1418 tsk->compat_robust_list = NULL; in futex_cleanup()
1422 if (unlikely(!list_empty(&tsk->pi_state_list))) in futex_cleanup()
1427 * futex_exit_recursive - Set the tasks futex state to FUTEX_STATE_DEAD
1446 if (tsk->futex_state == FUTEX_STATE_EXITING) in futex_exit_recursive()
1447 mutex_unlock(&tsk->futex_exit_mutex); in futex_exit_recursive()
1448 tsk->futex_state = FUTEX_STATE_DEAD; in futex_exit_recursive()
1456 * tsk->futex_exit_mutex when it observes FUTEX_STATE_EXITING in in futex_cleanup_begin()
1459 mutex_lock(&tsk->futex_exit_mutex); in futex_cleanup_begin()
1462 * Switch the state to FUTEX_STATE_EXITING under tsk->pi_lock. in futex_cleanup_begin()
1464 * This ensures that all subsequent checks of tsk->futex_state in in futex_cleanup_begin()
1466 * tsk->pi_lock held. in futex_cleanup_begin()
1469 * the state change under tsk->pi_lock by a concurrent waiter must in futex_cleanup_begin()
1472 raw_spin_lock_irq(&tsk->pi_lock); in futex_cleanup_begin()
1473 tsk->futex_state = FUTEX_STATE_EXITING; in futex_cleanup_begin()
1474 raw_spin_unlock_irq(&tsk->pi_lock); in futex_cleanup_begin()
1483 tsk->futex_state = state; in futex_cleanup_end()
1488 mutex_unlock(&tsk->futex_exit_mutex); in futex_cleanup_end()
1520 fhb->priv = fph; in futex_hash_bucket_init()
1522 atomic_set(&fhb->waiters, 0); in futex_hash_bucket_init()
1523 plist_head_init(&fhb->chain); in futex_hash_bucket_init()
1524 spin_lock_init(&fhb->lock); in futex_hash_bucket_init()
1532 * futex-ref
1534 * Heavily inspired by percpu-rwsem/percpu-refcount; not reusing any of that
1537 * Dual counter, per-cpu / atomic approach like percpu-refcount, except it
1538 * re-initializes the state automatically, such that the fph swizzle is also a
1539 * transition back to per-cpu.
1546 struct mm_struct *mm = fph->mm; in __futex_ref_atomic_begin()
1553 WARN_ON_ONCE(atomic_long_read(&mm->futex_atomic) != 0); in __futex_ref_atomic_begin()
1560 atomic_long_set(&mm->futex_atomic, LONG_MAX); in __futex_ref_atomic_begin()
1561 smp_store_release(&fph->state, FR_ATOMIC); in __futex_ref_atomic_begin()
1563 call_rcu_hurry(&mm->futex_rcu, futex_ref_rcu); in __futex_ref_atomic_begin()
1568 struct mm_struct *mm = fph->mm; in __futex_ref_atomic_end()
1578 WARN_ON_ONCE(fph->state != FR_ATOMIC); in __futex_ref_atomic_end()
1581 * Therefore the per-cpu counter is now stable, sum and reset. in __futex_ref_atomic_end()
1584 unsigned int *ptr = per_cpu_ptr(mm->futex_ref, cpu); in __futex_ref_atomic_end()
1590 * Re-init for the next cycle. in __futex_ref_atomic_end()
1592 this_cpu_inc(*mm->futex_ref); /* 0 -> 1 */ in __futex_ref_atomic_end()
1600 ret = atomic_long_add_return(count - LONG_MAX - 1, &mm->futex_atomic); in __futex_ref_atomic_end()
1611 struct futex_private_hash *fph = rcu_dereference_raw(mm->futex_phash); in futex_ref_rcu()
1613 if (fph->state == FR_PERCPU) { in futex_ref_rcu()
1615 * Per this extra grace-period, everybody must now observe in futex_ref_rcu()
1617 * are in-flight. in futex_ref_rcu()
1621 * migration of the per-cpu counter into the atomic. in futex_ref_rcu()
1635 struct mm_struct *mm = fph->mm; in futex_ref_drop()
1640 WARN_ON_ONCE(rcu_dereference_raw(mm->futex_phash) != fph); in futex_ref_drop()
1651 * guard(rcu); guard(mm->futex_hash_lock); in futex_ref_drop()
1652 * fph = mm->futex_phash; in futex_ref_drop()
1653 * rcu_assign_pointer(&mm->futex_phash, new); in futex_ref_drop()
1656 * fph->state = FR_ATOMIC; in futex_ref_drop()
1665 * There must be at least one full grace-period between publishing a in futex_ref_drop()
1668 if (poll_state_synchronize_rcu(mm->futex_batches)) { in futex_ref_drop()
1670 * There was a grace-period, we can begin now. in futex_ref_drop()
1676 call_rcu_hurry(&mm->futex_rcu, futex_ref_rcu); in futex_ref_drop()
1681 struct mm_struct *mm = fph->mm; in futex_ref_get()
1685 if (smp_load_acquire(&fph->state) == FR_PERCPU) { in futex_ref_get()
1686 this_cpu_inc(*mm->futex_ref); in futex_ref_get()
1690 return atomic_long_inc_not_zero(&mm->futex_atomic); in futex_ref_get()
1695 struct mm_struct *mm = fph->mm; in futex_ref_put()
1699 if (smp_load_acquire(&fph->state) == FR_PERCPU) { in futex_ref_put()
1700 this_cpu_dec(*mm->futex_ref); in futex_ref_put()
1704 return atomic_long_dec_and_test(&mm->futex_atomic); in futex_ref_put()
1709 struct mm_struct *mm = fph->mm; in futex_ref_is_dead()
1713 if (smp_load_acquire(&fph->state) == FR_PERCPU) in futex_ref_is_dead()
1716 return atomic_long_read(&mm->futex_atomic) == 0; in futex_ref_is_dead()
1721 mutex_init(&mm->futex_hash_lock); in futex_mm_init()
1722 RCU_INIT_POINTER(mm->futex_phash, NULL); in futex_mm_init()
1723 mm->futex_phash_new = NULL; in futex_mm_init()
1724 /* futex-ref */ in futex_mm_init()
1725 mm->futex_ref = NULL; in futex_mm_init()
1726 atomic_long_set(&mm->futex_atomic, 0); in futex_mm_init()
1727 mm->futex_batches = get_state_synchronize_rcu(); in futex_mm_init()
1735 free_percpu(mm->futex_ref); in futex_hash_free()
1736 kvfree(mm->futex_phash_new); in futex_hash_free()
1737 fph = rcu_dereference_raw(mm->futex_phash); in futex_hash_free()
1748 if (!mm->futex_phash_new) in futex_pivot_pending()
1751 fph = rcu_dereference(mm->futex_phash); in futex_pivot_pending()
1759 if (!a->custom && b->custom) in futex_hash_less()
1761 if (a->custom && !b->custom) in futex_hash_less()
1764 /* zero-sized hash wins */ in futex_hash_less()
1765 if (!b->hash_mask) in futex_hash_less()
1767 if (!a->hash_mask) in futex_hash_less()
1771 if (a->hash_mask < b->hash_mask) in futex_hash_less()
1773 if (a->hash_mask > b->hash_mask) in futex_hash_less()
1781 struct mm_struct *mm = current->mm; in futex_hash_allocate()
1787 return -EINVAL; in futex_hash_allocate()
1793 fph = rcu_dereference(mm->futex_phash); in futex_hash_allocate()
1794 if (fph && !fph->hash_mask) { in futex_hash_allocate()
1796 return -EBUSY; in futex_hash_allocate()
1801 if (!mm->futex_ref) { in futex_hash_allocate()
1806 mm->futex_ref = alloc_percpu(unsigned int); in futex_hash_allocate()
1807 if (!mm->futex_ref) in futex_hash_allocate()
1808 return -ENOMEM; in futex_hash_allocate()
1809 this_cpu_inc(*mm->futex_ref); /* 0 -> 1 */ in futex_hash_allocate()
1815 return -ENOMEM; in futex_hash_allocate()
1817 fph->hash_mask = hash_slots ? hash_slots - 1 : 0; in futex_hash_allocate()
1818 fph->custom = custom; in futex_hash_allocate()
1819 fph->mm = mm; in futex_hash_allocate()
1822 futex_hash_bucket_init(&fph->queues[i], fph); in futex_hash_allocate()
1832 scoped_guard(mutex, &mm->futex_hash_lock) { in futex_hash_allocate()
1836 cur = rcu_dereference_protected(mm->futex_phash, in futex_hash_allocate()
1837 lockdep_is_held(&mm->futex_hash_lock)); in futex_hash_allocate()
1838 new = mm->futex_phash_new; in futex_hash_allocate()
1839 mm->futex_phash_new = NULL; in futex_hash_allocate()
1842 if (cur && !cur->hash_mask) { in futex_hash_allocate()
1849 mm->futex_phash_new = new; in futex_hash_allocate()
1850 return -EBUSY; in futex_hash_allocate()
1879 * Will set mm->futex_phash_new on failure; in futex_hash_allocate()
1894 if (!current->mm) in futex_hash_allocate_default()
1902 fph = rcu_dereference(current->mm->futex_phash); in futex_hash_allocate_default()
1904 if (fph->custom) in futex_hash_allocate_default()
1907 current_buckets = fph->hash_mask + 1; in futex_hash_allocate_default()
1929 fph = rcu_dereference(current->mm->futex_phash); in futex_hash_get_slots()
1930 if (fph && fph->hash_mask) in futex_hash_get_slots()
1931 return fph->hash_mask + 1; in futex_hash_get_slots()
1939 return -EINVAL; in futex_hash_allocate()
1957 return -EINVAL; in futex_hash_prctl()
1966 ret = -EINVAL; in futex_hash_prctl()
2006 futex_hashmask = hashsize - 1; in futex_init()