xref: /linux/kernel/futex/core.c (revision 56180dd20c19e5b0fa34822997a9ac66b517e7b3)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  Fast Userspace Mutexes (which I call "Futexes!").
4  *  (C) Rusty Russell, IBM 2002
5  *
6  *  Generalized futexes, futex requeueing, misc fixes by Ingo Molnar
7  *  (C) Copyright 2003 Red Hat Inc, All Rights Reserved
8  *
9  *  Removed page pinning, fix privately mapped COW pages and other cleanups
10  *  (C) Copyright 2003, 2004 Jamie Lokier
11  *
12  *  Robust futex support started by Ingo Molnar
13  *  (C) Copyright 2006 Red Hat Inc, All Rights Reserved
14  *  Thanks to Thomas Gleixner for suggestions, analysis and fixes.
15  *
16  *  PI-futex support started by Ingo Molnar and Thomas Gleixner
17  *  Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
18  *  Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
19  *
20  *  PRIVATE futexes by Eric Dumazet
21  *  Copyright (C) 2007 Eric Dumazet <dada1@cosmosbay.com>
22  *
23  *  Requeue-PI support by Darren Hart <dvhltc@us.ibm.com>
24  *  Copyright (C) IBM Corporation, 2009
25  *  Thanks to Thomas Gleixner for conceptual design and careful reviews.
26  *
27  *  Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly
28  *  enough at me, Linus for the original (flawed) idea, Matthew
29  *  Kirkwood for proof-of-concept implementation.
30  *
31  *  "The futexes are also cursed."
32  *  "But they come in a choice of three flavours!"
33  */
34 #include <linux/compat.h>
35 #include <linux/jhash.h>
36 #include <linux/pagemap.h>
37 #include <linux/debugfs.h>
38 #include <linux/plist.h>
39 #include <linux/gfp.h>
40 #include <linux/vmalloc.h>
41 #include <linux/memblock.h>
42 #include <linux/fault-inject.h>
43 #include <linux/slab.h>
44 #include <linux/prctl.h>
45 #include <linux/mempolicy.h>
46 #include <linux/mmap_lock.h>
47 
48 #include "futex.h"
49 #include "../locking/rtmutex_common.h"
50 
51 /*
52  * The base of the bucket array and its size are always used together
53  * (after initialization only in futex_hash()), so ensure that they
54  * reside in the same cacheline.
55  */
56 static struct {
57 	unsigned long            hashmask;
58 	unsigned int		 hashshift;
59 	struct futex_hash_bucket *queues[MAX_NUMNODES];
60 } __futex_data __read_mostly __aligned(2*sizeof(long));
61 
62 #define futex_hashmask	(__futex_data.hashmask)
63 #define futex_hashshift	(__futex_data.hashshift)
64 #define futex_queues	(__futex_data.queues)
65 
66 struct futex_private_hash {
67 	int		state;
68 	unsigned int	hash_mask;
69 	struct rcu_head	rcu;
70 	void		*mm;
71 	bool		custom;
72 	bool		immutable;
73 	struct futex_hash_bucket queues[];
74 };
75 
76 /*
77  * Fault injections for futexes.
78  */
79 #ifdef CONFIG_FAIL_FUTEX
80 
81 static struct {
82 	struct fault_attr attr;
83 
84 	bool ignore_private;
85 } fail_futex = {
86 	.attr = FAULT_ATTR_INITIALIZER,
87 	.ignore_private = false,
88 };
89 
90 static int __init setup_fail_futex(char *str)
91 {
92 	return setup_fault_attr(&fail_futex.attr, str);
93 }
94 __setup("fail_futex=", setup_fail_futex);
95 
96 bool should_fail_futex(bool fshared)
97 {
98 	if (fail_futex.ignore_private && !fshared)
99 		return false;
100 
101 	return should_fail(&fail_futex.attr, 1);
102 }
103 
104 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
105 
106 static int __init fail_futex_debugfs(void)
107 {
108 	umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
109 	struct dentry *dir;
110 
111 	dir = fault_create_debugfs_attr("fail_futex", NULL,
112 					&fail_futex.attr);
113 	if (IS_ERR(dir))
114 		return PTR_ERR(dir);
115 
116 	debugfs_create_bool("ignore-private", mode, dir,
117 			    &fail_futex.ignore_private);
118 	return 0;
119 }
120 
121 late_initcall(fail_futex_debugfs);
122 
123 #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
124 
125 #endif /* CONFIG_FAIL_FUTEX */
126 
127 static struct futex_hash_bucket *
128 __futex_hash(union futex_key *key, struct futex_private_hash *fph);
129 
130 #ifdef CONFIG_FUTEX_PRIVATE_HASH
131 static bool futex_ref_get(struct futex_private_hash *fph);
132 static bool futex_ref_put(struct futex_private_hash *fph);
133 static bool futex_ref_is_dead(struct futex_private_hash *fph);
134 
135 enum { FR_PERCPU = 0, FR_ATOMIC };
136 
137 static inline bool futex_key_is_private(union futex_key *key)
138 {
139 	/*
140 	 * Relies on get_futex_key() to set either bit for shared
141 	 * futexes -- see comment with union futex_key.
142 	 */
143 	return !(key->both.offset & (FUT_OFF_INODE | FUT_OFF_MMSHARED));
144 }
145 
146 bool futex_private_hash_get(struct futex_private_hash *fph)
147 {
148 	if (fph->immutable)
149 		return true;
150 	return futex_ref_get(fph);
151 }
152 
153 void futex_private_hash_put(struct futex_private_hash *fph)
154 {
155 	if (fph->immutable)
156 		return;
157 	if (futex_ref_put(fph))
158 		wake_up_var(fph->mm);
159 }
160 
161 /**
162  * futex_hash_get - Get an additional reference for the local hash.
163  * @hb:                    ptr to the private local hash.
164  *
165  * Obtain an additional reference for the already obtained hash bucket. The
166  * caller must already own an reference.
167  */
168 void futex_hash_get(struct futex_hash_bucket *hb)
169 {
170 	struct futex_private_hash *fph = hb->priv;
171 
172 	if (!fph)
173 		return;
174 	WARN_ON_ONCE(!futex_private_hash_get(fph));
175 }
176 
177 void futex_hash_put(struct futex_hash_bucket *hb)
178 {
179 	struct futex_private_hash *fph = hb->priv;
180 
181 	if (!fph)
182 		return;
183 	futex_private_hash_put(fph);
184 }
185 
186 static struct futex_hash_bucket *
187 __futex_hash_private(union futex_key *key, struct futex_private_hash *fph)
188 {
189 	u32 hash;
190 
191 	if (!futex_key_is_private(key))
192 		return NULL;
193 
194 	if (!fph)
195 		fph = rcu_dereference(key->private.mm->futex_phash);
196 	if (!fph || !fph->hash_mask)
197 		return NULL;
198 
199 	hash = jhash2((void *)&key->private.address,
200 		      sizeof(key->private.address) / 4,
201 		      key->both.offset);
202 	return &fph->queues[hash & fph->hash_mask];
203 }
204 
205 static void futex_rehash_private(struct futex_private_hash *old,
206 				 struct futex_private_hash *new)
207 {
208 	struct futex_hash_bucket *hb_old, *hb_new;
209 	unsigned int slots = old->hash_mask + 1;
210 	unsigned int i;
211 
212 	for (i = 0; i < slots; i++) {
213 		struct futex_q *this, *tmp;
214 
215 		hb_old = &old->queues[i];
216 
217 		spin_lock(&hb_old->lock);
218 		plist_for_each_entry_safe(this, tmp, &hb_old->chain, list) {
219 
220 			plist_del(&this->list, &hb_old->chain);
221 			futex_hb_waiters_dec(hb_old);
222 
223 			WARN_ON_ONCE(this->lock_ptr != &hb_old->lock);
224 
225 			hb_new = __futex_hash(&this->key, new);
226 			futex_hb_waiters_inc(hb_new);
227 			/*
228 			 * The new pointer isn't published yet but an already
229 			 * moved user can be unqueued due to timeout or signal.
230 			 */
231 			spin_lock_nested(&hb_new->lock, SINGLE_DEPTH_NESTING);
232 			plist_add(&this->list, &hb_new->chain);
233 			this->lock_ptr = &hb_new->lock;
234 			spin_unlock(&hb_new->lock);
235 		}
236 		spin_unlock(&hb_old->lock);
237 	}
238 }
239 
240 static bool __futex_pivot_hash(struct mm_struct *mm,
241 			       struct futex_private_hash *new)
242 {
243 	struct futex_private_hash *fph;
244 
245 	WARN_ON_ONCE(mm->futex_phash_new);
246 
247 	fph = rcu_dereference_protected(mm->futex_phash,
248 					lockdep_is_held(&mm->futex_hash_lock));
249 	if (fph) {
250 		if (!futex_ref_is_dead(fph)) {
251 			mm->futex_phash_new = new;
252 			return false;
253 		}
254 
255 		futex_rehash_private(fph, new);
256 	}
257 	new->state = FR_PERCPU;
258 	scoped_guard(rcu) {
259 		mm->futex_batches = get_state_synchronize_rcu();
260 		rcu_assign_pointer(mm->futex_phash, new);
261 	}
262 	kvfree_rcu(fph, rcu);
263 	return true;
264 }
265 
266 static void futex_pivot_hash(struct mm_struct *mm)
267 {
268 	scoped_guard(mutex, &mm->futex_hash_lock) {
269 		struct futex_private_hash *fph;
270 
271 		fph = mm->futex_phash_new;
272 		if (fph) {
273 			mm->futex_phash_new = NULL;
274 			__futex_pivot_hash(mm, fph);
275 		}
276 	}
277 }
278 
279 struct futex_private_hash *futex_private_hash(void)
280 {
281 	struct mm_struct *mm = current->mm;
282 	/*
283 	 * Ideally we don't loop. If there is a replacement in progress
284 	 * then a new private hash is already prepared and a reference can't be
285 	 * obtained once the last user dropped it's.
286 	 * In that case we block on mm_struct::futex_hash_lock and either have
287 	 * to perform the replacement or wait while someone else is doing the
288 	 * job. Eitherway, on the second iteration we acquire a reference on the
289 	 * new private hash or loop again because a new replacement has been
290 	 * requested.
291 	 */
292 again:
293 	scoped_guard(rcu) {
294 		struct futex_private_hash *fph;
295 
296 		fph = rcu_dereference(mm->futex_phash);
297 		if (!fph)
298 			return NULL;
299 
300 		if (futex_private_hash_get(fph))
301 			return fph;
302 	}
303 	futex_pivot_hash(mm);
304 	goto again;
305 }
306 
307 struct futex_hash_bucket *futex_hash(union futex_key *key)
308 {
309 	struct futex_private_hash *fph;
310 	struct futex_hash_bucket *hb;
311 
312 again:
313 	scoped_guard(rcu) {
314 		hb = __futex_hash(key, NULL);
315 		fph = hb->priv;
316 
317 		if (!fph || futex_private_hash_get(fph))
318 			return hb;
319 	}
320 	futex_pivot_hash(key->private.mm);
321 	goto again;
322 }
323 
324 #else /* !CONFIG_FUTEX_PRIVATE_HASH */
325 
326 static struct futex_hash_bucket *
327 __futex_hash_private(union futex_key *key, struct futex_private_hash *fph)
328 {
329 	return NULL;
330 }
331 
332 struct futex_hash_bucket *futex_hash(union futex_key *key)
333 {
334 	return __futex_hash(key, NULL);
335 }
336 
337 #endif /* CONFIG_FUTEX_PRIVATE_HASH */
338 
339 #ifdef CONFIG_FUTEX_MPOL
340 
341 static int __futex_key_to_node(struct mm_struct *mm, unsigned long addr)
342 {
343 	struct vm_area_struct *vma = vma_lookup(mm, addr);
344 	struct mempolicy *mpol;
345 	int node = FUTEX_NO_NODE;
346 
347 	if (!vma)
348 		return FUTEX_NO_NODE;
349 
350 	mpol = vma_policy(vma);
351 	if (!mpol)
352 		return FUTEX_NO_NODE;
353 
354 	switch (mpol->mode) {
355 	case MPOL_PREFERRED:
356 		node = first_node(mpol->nodes);
357 		break;
358 	case MPOL_PREFERRED_MANY:
359 	case MPOL_BIND:
360 		if (mpol->home_node != NUMA_NO_NODE)
361 			node = mpol->home_node;
362 		break;
363 	default:
364 		break;
365 	}
366 
367 	return node;
368 }
369 
370 static int futex_key_to_node_opt(struct mm_struct *mm, unsigned long addr)
371 {
372 	int seq, node;
373 
374 	guard(rcu)();
375 
376 	if (!mmap_lock_speculate_try_begin(mm, &seq))
377 		return -EBUSY;
378 
379 	node = __futex_key_to_node(mm, addr);
380 
381 	if (mmap_lock_speculate_retry(mm, seq))
382 		return -EAGAIN;
383 
384 	return node;
385 }
386 
387 static int futex_mpol(struct mm_struct *mm, unsigned long addr)
388 {
389 	int node;
390 
391 	node = futex_key_to_node_opt(mm, addr);
392 	if (node >= FUTEX_NO_NODE)
393 		return node;
394 
395 	guard(mmap_read_lock)(mm);
396 	return __futex_key_to_node(mm, addr);
397 }
398 
399 #else /* !CONFIG_FUTEX_MPOL */
400 
401 static int futex_mpol(struct mm_struct *mm, unsigned long addr)
402 {
403 	return FUTEX_NO_NODE;
404 }
405 
406 #endif /* CONFIG_FUTEX_MPOL */
407 
408 /**
409  * __futex_hash - Return the hash bucket
410  * @key:	Pointer to the futex key for which the hash is calculated
411  * @fph:	Pointer to private hash if known
412  *
413  * We hash on the keys returned from get_futex_key (see below) and return the
414  * corresponding hash bucket.
415  * If the FUTEX is PROCESS_PRIVATE then a per-process hash bucket (from the
416  * private hash) is returned if existing. Otherwise a hash bucket from the
417  * global hash is returned.
418  */
419 static struct futex_hash_bucket *
420 __futex_hash(union futex_key *key, struct futex_private_hash *fph)
421 {
422 	int node = key->both.node;
423 	u32 hash;
424 
425 	if (node == FUTEX_NO_NODE) {
426 		struct futex_hash_bucket *hb;
427 
428 		hb = __futex_hash_private(key, fph);
429 		if (hb)
430 			return hb;
431 	}
432 
433 	hash = jhash2((u32 *)key,
434 		      offsetof(typeof(*key), both.offset) / sizeof(u32),
435 		      key->both.offset);
436 
437 	if (node == FUTEX_NO_NODE) {
438 		/*
439 		 * In case of !FLAGS_NUMA, use some unused hash bits to pick a
440 		 * node -- this ensures regular futexes are interleaved across
441 		 * the nodes and avoids having to allocate multiple
442 		 * hash-tables.
443 		 *
444 		 * NOTE: this isn't perfectly uniform, but it is fast and
445 		 * handles sparse node masks.
446 		 */
447 		node = (hash >> futex_hashshift) % nr_node_ids;
448 		if (!node_possible(node)) {
449 			node = find_next_bit_wrap(node_possible_map.bits,
450 						  nr_node_ids, node);
451 		}
452 	}
453 
454 	return &futex_queues[node][hash & futex_hashmask];
455 }
456 
457 /**
458  * futex_setup_timer - set up the sleeping hrtimer.
459  * @time:	ptr to the given timeout value
460  * @timeout:	the hrtimer_sleeper structure to be set up
461  * @flags:	futex flags
462  * @range_ns:	optional range in ns
463  *
464  * Return: Initialized hrtimer_sleeper structure or NULL if no timeout
465  *	   value given
466  */
467 struct hrtimer_sleeper *
468 futex_setup_timer(ktime_t *time, struct hrtimer_sleeper *timeout,
469 		  int flags, u64 range_ns)
470 {
471 	if (!time)
472 		return NULL;
473 
474 	hrtimer_setup_sleeper_on_stack(timeout,
475 				       (flags & FLAGS_CLOCKRT) ? CLOCK_REALTIME : CLOCK_MONOTONIC,
476 				       HRTIMER_MODE_ABS);
477 	/*
478 	 * If range_ns is 0, calling hrtimer_set_expires_range_ns() is
479 	 * effectively the same as calling hrtimer_set_expires().
480 	 */
481 	hrtimer_set_expires_range_ns(&timeout->timer, *time, range_ns);
482 
483 	return timeout;
484 }
485 
486 /*
487  * Generate a machine wide unique identifier for this inode.
488  *
489  * This relies on u64 not wrapping in the life-time of the machine; which with
490  * 1ns resolution means almost 585 years.
491  *
492  * This further relies on the fact that a well formed program will not unmap
493  * the file while it has a (shared) futex waiting on it. This mapping will have
494  * a file reference which pins the mount and inode.
495  *
496  * If for some reason an inode gets evicted and read back in again, it will get
497  * a new sequence number and will _NOT_ match, even though it is the exact same
498  * file.
499  *
500  * It is important that futex_match() will never have a false-positive, esp.
501  * for PI futexes that can mess up the state. The above argues that false-negatives
502  * are only possible for malformed programs.
503  */
504 static u64 get_inode_sequence_number(struct inode *inode)
505 {
506 	static atomic64_t i_seq;
507 	u64 old;
508 
509 	/* Does the inode already have a sequence number? */
510 	old = atomic64_read(&inode->i_sequence);
511 	if (likely(old))
512 		return old;
513 
514 	for (;;) {
515 		u64 new = atomic64_inc_return(&i_seq);
516 		if (WARN_ON_ONCE(!new))
517 			continue;
518 
519 		old = 0;
520 		if (!atomic64_try_cmpxchg_relaxed(&inode->i_sequence, &old, new))
521 			return old;
522 		return new;
523 	}
524 }
525 
526 /**
527  * get_futex_key() - Get parameters which are the keys for a futex
528  * @uaddr:	virtual address of the futex
529  * @flags:	FLAGS_*
530  * @key:	address where result is stored.
531  * @rw:		mapping needs to be read/write (values: FUTEX_READ,
532  *              FUTEX_WRITE)
533  *
534  * Return: a negative error code or 0
535  *
536  * The key words are stored in @key on success.
537  *
538  * For shared mappings (when @fshared), the key is:
539  *
540  *   ( inode->i_sequence, page offset within mapping, offset_within_page )
541  *
542  * [ also see get_inode_sequence_number() ]
543  *
544  * For private mappings (or when !@fshared), the key is:
545  *
546  *   ( current->mm, address, 0 )
547  *
548  * This allows (cross process, where applicable) identification of the futex
549  * without keeping the page pinned for the duration of the FUTEX_WAIT.
550  *
551  * lock_page() might sleep, the caller should not hold a spinlock.
552  */
553 int get_futex_key(u32 __user *uaddr, unsigned int flags, union futex_key *key,
554 		  enum futex_access rw)
555 {
556 	unsigned long address = (unsigned long)uaddr;
557 	struct mm_struct *mm = current->mm;
558 	struct page *page;
559 	struct folio *folio;
560 	struct address_space *mapping;
561 	int node, err, size, ro = 0;
562 	bool node_updated = false;
563 	bool fshared;
564 
565 	fshared = flags & FLAGS_SHARED;
566 	size = futex_size(flags);
567 	if (flags & FLAGS_NUMA)
568 		size *= 2;
569 
570 	/*
571 	 * The futex address must be "naturally" aligned.
572 	 */
573 	key->both.offset = address % PAGE_SIZE;
574 	if (unlikely((address % size) != 0))
575 		return -EINVAL;
576 	address -= key->both.offset;
577 
578 	if (unlikely(!access_ok(uaddr, size)))
579 		return -EFAULT;
580 
581 	if (unlikely(should_fail_futex(fshared)))
582 		return -EFAULT;
583 
584 	node = FUTEX_NO_NODE;
585 
586 	if (flags & FLAGS_NUMA) {
587 		u32 __user *naddr = (void *)uaddr + size / 2;
588 
589 		if (futex_get_value(&node, naddr))
590 			return -EFAULT;
591 
592 		if ((node != FUTEX_NO_NODE) &&
593 		    ((unsigned int)node >= MAX_NUMNODES || !node_possible(node)))
594 			return -EINVAL;
595 	}
596 
597 	if (node == FUTEX_NO_NODE && (flags & FLAGS_MPOL)) {
598 		node = futex_mpol(mm, address);
599 		node_updated = true;
600 	}
601 
602 	if (flags & FLAGS_NUMA) {
603 		u32 __user *naddr = (void *)uaddr + size / 2;
604 
605 		if (node == FUTEX_NO_NODE) {
606 			node = numa_node_id();
607 			node_updated = true;
608 		}
609 		if (node_updated && futex_put_value(node, naddr))
610 			return -EFAULT;
611 	}
612 
613 	key->both.node = node;
614 
615 	/*
616 	 * PROCESS_PRIVATE futexes are fast.
617 	 * As the mm cannot disappear under us and the 'key' only needs
618 	 * virtual address, we dont even have to find the underlying vma.
619 	 * Note : We do have to check 'uaddr' is a valid user address,
620 	 *        but access_ok() should be faster than find_vma()
621 	 */
622 	if (!fshared) {
623 		/*
624 		 * On no-MMU, shared futexes are treated as private, therefore
625 		 * we must not include the current process in the key. Since
626 		 * there is only one address space, the address is a unique key
627 		 * on its own.
628 		 */
629 		if (IS_ENABLED(CONFIG_MMU))
630 			key->private.mm = mm;
631 		else
632 			key->private.mm = NULL;
633 
634 		key->private.address = address;
635 		return 0;
636 	}
637 
638 again:
639 	/* Ignore any VERIFY_READ mapping (futex common case) */
640 	if (unlikely(should_fail_futex(true)))
641 		return -EFAULT;
642 
643 	err = get_user_pages_fast(address, 1, FOLL_WRITE, &page);
644 	/*
645 	 * If write access is not required (eg. FUTEX_WAIT), try
646 	 * and get read-only access.
647 	 */
648 	if (err == -EFAULT && rw == FUTEX_READ) {
649 		err = get_user_pages_fast(address, 1, 0, &page);
650 		ro = 1;
651 	}
652 	if (err < 0)
653 		return err;
654 	else
655 		err = 0;
656 
657 	/*
658 	 * The treatment of mapping from this point on is critical. The folio
659 	 * lock protects many things but in this context the folio lock
660 	 * stabilizes mapping, prevents inode freeing in the shared
661 	 * file-backed region case and guards against movement to swap cache.
662 	 *
663 	 * Strictly speaking the folio lock is not needed in all cases being
664 	 * considered here and folio lock forces unnecessarily serialization.
665 	 * From this point on, mapping will be re-verified if necessary and
666 	 * folio lock will be acquired only if it is unavoidable
667 	 *
668 	 * Mapping checks require the folio so it is looked up now. For
669 	 * anonymous pages, it does not matter if the folio is split
670 	 * in the future as the key is based on the address. For
671 	 * filesystem-backed pages, the precise page is required as the
672 	 * index of the page determines the key.
673 	 */
674 	folio = page_folio(page);
675 	mapping = READ_ONCE(folio->mapping);
676 
677 	/*
678 	 * If folio->mapping is NULL, then it cannot be an anonymous
679 	 * page; but it might be the ZERO_PAGE or in the gate area or
680 	 * in a special mapping (all cases which we are happy to fail);
681 	 * or it may have been a good file page when get_user_pages_fast
682 	 * found it, but truncated or holepunched or subjected to
683 	 * invalidate_complete_page2 before we got the folio lock (also
684 	 * cases which we are happy to fail).  And we hold a reference,
685 	 * so refcount care in invalidate_inode_page's remove_mapping
686 	 * prevents drop_caches from setting mapping to NULL beneath us.
687 	 *
688 	 * The case we do have to guard against is when memory pressure made
689 	 * shmem_writepage move it from filecache to swapcache beneath us:
690 	 * an unlikely race, but we do need to retry for folio->mapping.
691 	 */
692 	if (unlikely(!mapping)) {
693 		int shmem_swizzled;
694 
695 		/*
696 		 * Folio lock is required to identify which special case above
697 		 * applies. If this is really a shmem page then the folio lock
698 		 * will prevent unexpected transitions.
699 		 */
700 		folio_lock(folio);
701 		shmem_swizzled = folio_test_swapcache(folio) || folio->mapping;
702 		folio_unlock(folio);
703 		folio_put(folio);
704 
705 		if (shmem_swizzled)
706 			goto again;
707 
708 		return -EFAULT;
709 	}
710 
711 	/*
712 	 * Private mappings are handled in a simple way.
713 	 *
714 	 * If the futex key is stored in anonymous memory, then the associated
715 	 * object is the mm which is implicitly pinned by the calling process.
716 	 *
717 	 * NOTE: When userspace waits on a MAP_SHARED mapping, even if
718 	 * it's a read-only handle, it's expected that futexes attach to
719 	 * the object not the particular process.
720 	 */
721 	if (folio_test_anon(folio)) {
722 		/*
723 		 * A RO anonymous page will never change and thus doesn't make
724 		 * sense for futex operations.
725 		 */
726 		if (unlikely(should_fail_futex(true)) || ro) {
727 			err = -EFAULT;
728 			goto out;
729 		}
730 
731 		key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */
732 		key->private.mm = mm;
733 		key->private.address = address;
734 
735 	} else {
736 		struct inode *inode;
737 
738 		/*
739 		 * The associated futex object in this case is the inode and
740 		 * the folio->mapping must be traversed. Ordinarily this should
741 		 * be stabilised under folio lock but it's not strictly
742 		 * necessary in this case as we just want to pin the inode, not
743 		 * update i_pages or anything like that.
744 		 *
745 		 * The RCU read lock is taken as the inode is finally freed
746 		 * under RCU. If the mapping still matches expectations then the
747 		 * mapping->host can be safely accessed as being a valid inode.
748 		 */
749 		rcu_read_lock();
750 
751 		if (READ_ONCE(folio->mapping) != mapping) {
752 			rcu_read_unlock();
753 			folio_put(folio);
754 
755 			goto again;
756 		}
757 
758 		inode = READ_ONCE(mapping->host);
759 		if (!inode) {
760 			rcu_read_unlock();
761 			folio_put(folio);
762 
763 			goto again;
764 		}
765 
766 		key->both.offset |= FUT_OFF_INODE; /* inode-based key */
767 		key->shared.i_seq = get_inode_sequence_number(inode);
768 		key->shared.pgoff = page_pgoff(folio, page);
769 		rcu_read_unlock();
770 	}
771 
772 out:
773 	folio_put(folio);
774 	return err;
775 }
776 
777 /**
778  * fault_in_user_writeable() - Fault in user address and verify RW access
779  * @uaddr:	pointer to faulting user space address
780  *
781  * Slow path to fixup the fault we just took in the atomic write
782  * access to @uaddr.
783  *
784  * We have no generic implementation of a non-destructive write to the
785  * user address. We know that we faulted in the atomic pagefault
786  * disabled section so we can as well avoid the #PF overhead by
787  * calling get_user_pages() right away.
788  */
789 int fault_in_user_writeable(u32 __user *uaddr)
790 {
791 	struct mm_struct *mm = current->mm;
792 	int ret;
793 
794 	mmap_read_lock(mm);
795 	ret = fixup_user_fault(mm, (unsigned long)uaddr,
796 			       FAULT_FLAG_WRITE, NULL);
797 	mmap_read_unlock(mm);
798 
799 	return ret < 0 ? ret : 0;
800 }
801 
802 /**
803  * futex_top_waiter() - Return the highest priority waiter on a futex
804  * @hb:		the hash bucket the futex_q's reside in
805  * @key:	the futex key (to distinguish it from other futex futex_q's)
806  *
807  * Must be called with the hb lock held.
808  */
809 struct futex_q *futex_top_waiter(struct futex_hash_bucket *hb, union futex_key *key)
810 {
811 	struct futex_q *this;
812 
813 	plist_for_each_entry(this, &hb->chain, list) {
814 		if (futex_match(&this->key, key))
815 			return this;
816 	}
817 	return NULL;
818 }
819 
820 /**
821  * wait_for_owner_exiting - Block until the owner has exited
822  * @ret: owner's current futex lock status
823  * @exiting:	Pointer to the exiting task
824  *
825  * Caller must hold a refcount on @exiting.
826  */
827 void wait_for_owner_exiting(int ret, struct task_struct *exiting)
828 {
829 	if (ret != -EBUSY) {
830 		WARN_ON_ONCE(exiting);
831 		return;
832 	}
833 
834 	if (WARN_ON_ONCE(ret == -EBUSY && !exiting))
835 		return;
836 
837 	mutex_lock(&exiting->futex_exit_mutex);
838 	/*
839 	 * No point in doing state checking here. If the waiter got here
840 	 * while the task was in exec()->exec_futex_release() then it can
841 	 * have any FUTEX_STATE_* value when the waiter has acquired the
842 	 * mutex. OK, if running, EXITING or DEAD if it reached exit()
843 	 * already. Highly unlikely and not a problem. Just one more round
844 	 * through the futex maze.
845 	 */
846 	mutex_unlock(&exiting->futex_exit_mutex);
847 
848 	put_task_struct(exiting);
849 }
850 
851 /**
852  * __futex_unqueue() - Remove the futex_q from its futex_hash_bucket
853  * @q:	The futex_q to unqueue
854  *
855  * The q->lock_ptr must not be NULL and must be held by the caller.
856  */
857 void __futex_unqueue(struct futex_q *q)
858 {
859 	struct futex_hash_bucket *hb;
860 
861 	if (WARN_ON_SMP(!q->lock_ptr) || WARN_ON(plist_node_empty(&q->list)))
862 		return;
863 	lockdep_assert_held(q->lock_ptr);
864 
865 	hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock);
866 	plist_del(&q->list, &hb->chain);
867 	futex_hb_waiters_dec(hb);
868 }
869 
870 /* The key must be already stored in q->key. */
871 void futex_q_lock(struct futex_q *q, struct futex_hash_bucket *hb)
872 	__acquires(&hb->lock)
873 {
874 	/*
875 	 * Increment the counter before taking the lock so that
876 	 * a potential waker won't miss a to-be-slept task that is
877 	 * waiting for the spinlock. This is safe as all futex_q_lock()
878 	 * users end up calling futex_queue(). Similarly, for housekeeping,
879 	 * decrement the counter at futex_q_unlock() when some error has
880 	 * occurred and we don't end up adding the task to the list.
881 	 */
882 	futex_hb_waiters_inc(hb); /* implies smp_mb(); (A) */
883 
884 	q->lock_ptr = &hb->lock;
885 
886 	spin_lock(&hb->lock);
887 }
888 
889 void futex_q_unlock(struct futex_hash_bucket *hb)
890 	__releases(&hb->lock)
891 {
892 	futex_hb_waiters_dec(hb);
893 	spin_unlock(&hb->lock);
894 }
895 
896 void __futex_queue(struct futex_q *q, struct futex_hash_bucket *hb,
897 		   struct task_struct *task)
898 {
899 	int prio;
900 
901 	/*
902 	 * The priority used to register this element is
903 	 * - either the real thread-priority for the real-time threads
904 	 * (i.e. threads with a priority lower than MAX_RT_PRIO)
905 	 * - or MAX_RT_PRIO for non-RT threads.
906 	 * Thus, all RT-threads are woken first in priority order, and
907 	 * the others are woken last, in FIFO order.
908 	 */
909 	prio = min(current->normal_prio, MAX_RT_PRIO);
910 
911 	plist_node_init(&q->list, prio);
912 	plist_add(&q->list, &hb->chain);
913 	q->task = task;
914 }
915 
916 /**
917  * futex_unqueue() - Remove the futex_q from its futex_hash_bucket
918  * @q:	The futex_q to unqueue
919  *
920  * The q->lock_ptr must not be held by the caller. A call to futex_unqueue() must
921  * be paired with exactly one earlier call to futex_queue().
922  *
923  * Return:
924  *  - 1 - if the futex_q was still queued (and we removed unqueued it);
925  *  - 0 - if the futex_q was already removed by the waking thread
926  */
927 int futex_unqueue(struct futex_q *q)
928 {
929 	spinlock_t *lock_ptr;
930 	int ret = 0;
931 
932 	/* RCU so lock_ptr is not going away during locking. */
933 	guard(rcu)();
934 	/* In the common case we don't take the spinlock, which is nice. */
935 retry:
936 	/*
937 	 * q->lock_ptr can change between this read and the following spin_lock.
938 	 * Use READ_ONCE to forbid the compiler from reloading q->lock_ptr and
939 	 * optimizing lock_ptr out of the logic below.
940 	 */
941 	lock_ptr = READ_ONCE(q->lock_ptr);
942 	if (lock_ptr != NULL) {
943 		spin_lock(lock_ptr);
944 		/*
945 		 * q->lock_ptr can change between reading it and
946 		 * spin_lock(), causing us to take the wrong lock.  This
947 		 * corrects the race condition.
948 		 *
949 		 * Reasoning goes like this: if we have the wrong lock,
950 		 * q->lock_ptr must have changed (maybe several times)
951 		 * between reading it and the spin_lock().  It can
952 		 * change again after the spin_lock() but only if it was
953 		 * already changed before the spin_lock().  It cannot,
954 		 * however, change back to the original value.  Therefore
955 		 * we can detect whether we acquired the correct lock.
956 		 */
957 		if (unlikely(lock_ptr != q->lock_ptr)) {
958 			spin_unlock(lock_ptr);
959 			goto retry;
960 		}
961 		__futex_unqueue(q);
962 
963 		BUG_ON(q->pi_state);
964 
965 		spin_unlock(lock_ptr);
966 		ret = 1;
967 	}
968 
969 	return ret;
970 }
971 
972 void futex_q_lockptr_lock(struct futex_q *q)
973 {
974 	spinlock_t *lock_ptr;
975 
976 	/*
977 	 * See futex_unqueue() why lock_ptr can change.
978 	 */
979 	guard(rcu)();
980 retry:
981 	lock_ptr = READ_ONCE(q->lock_ptr);
982 	spin_lock(lock_ptr);
983 
984 	if (unlikely(lock_ptr != q->lock_ptr)) {
985 		spin_unlock(lock_ptr);
986 		goto retry;
987 	}
988 }
989 
990 /*
991  * PI futexes can not be requeued and must remove themselves from the hash
992  * bucket. The hash bucket lock (i.e. lock_ptr) is held.
993  */
994 void futex_unqueue_pi(struct futex_q *q)
995 {
996 	/*
997 	 * If the lock was not acquired (due to timeout or signal) then the
998 	 * rt_waiter is removed before futex_q is. If this is observed by
999 	 * an unlocker after dropping the rtmutex wait lock and before
1000 	 * acquiring the hash bucket lock, then the unlocker dequeues the
1001 	 * futex_q from the hash bucket list to guarantee consistent state
1002 	 * vs. userspace. Therefore the dequeue here must be conditional.
1003 	 */
1004 	if (!plist_node_empty(&q->list))
1005 		__futex_unqueue(q);
1006 
1007 	BUG_ON(!q->pi_state);
1008 	put_pi_state(q->pi_state);
1009 	q->pi_state = NULL;
1010 }
1011 
1012 /* Constants for the pending_op argument of handle_futex_death */
1013 #define HANDLE_DEATH_PENDING	true
1014 #define HANDLE_DEATH_LIST	false
1015 
1016 /*
1017  * Process a futex-list entry, check whether it's owned by the
1018  * dying task, and do notification if so:
1019  */
1020 static int handle_futex_death(u32 __user *uaddr, struct task_struct *curr,
1021 			      bool pi, bool pending_op)
1022 {
1023 	u32 uval, nval, mval;
1024 	pid_t owner;
1025 	int err;
1026 
1027 	/* Futex address must be 32bit aligned */
1028 	if ((((unsigned long)uaddr) % sizeof(*uaddr)) != 0)
1029 		return -1;
1030 
1031 retry:
1032 	if (get_user(uval, uaddr))
1033 		return -1;
1034 
1035 	/*
1036 	 * Special case for regular (non PI) futexes. The unlock path in
1037 	 * user space has two race scenarios:
1038 	 *
1039 	 * 1. The unlock path releases the user space futex value and
1040 	 *    before it can execute the futex() syscall to wake up
1041 	 *    waiters it is killed.
1042 	 *
1043 	 * 2. A woken up waiter is killed before it can acquire the
1044 	 *    futex in user space.
1045 	 *
1046 	 * In the second case, the wake up notification could be generated
1047 	 * by the unlock path in user space after setting the futex value
1048 	 * to zero or by the kernel after setting the OWNER_DIED bit below.
1049 	 *
1050 	 * In both cases the TID validation below prevents a wakeup of
1051 	 * potential waiters which can cause these waiters to block
1052 	 * forever.
1053 	 *
1054 	 * In both cases the following conditions are met:
1055 	 *
1056 	 *	1) task->robust_list->list_op_pending != NULL
1057 	 *	   @pending_op == true
1058 	 *	2) The owner part of user space futex value == 0
1059 	 *	3) Regular futex: @pi == false
1060 	 *
1061 	 * If these conditions are met, it is safe to attempt waking up a
1062 	 * potential waiter without touching the user space futex value and
1063 	 * trying to set the OWNER_DIED bit. If the futex value is zero,
1064 	 * the rest of the user space mutex state is consistent, so a woken
1065 	 * waiter will just take over the uncontended futex. Setting the
1066 	 * OWNER_DIED bit would create inconsistent state and malfunction
1067 	 * of the user space owner died handling. Otherwise, the OWNER_DIED
1068 	 * bit is already set, and the woken waiter is expected to deal with
1069 	 * this.
1070 	 */
1071 	owner = uval & FUTEX_TID_MASK;
1072 
1073 	if (pending_op && !pi && !owner) {
1074 		futex_wake(uaddr, FLAGS_SIZE_32 | FLAGS_SHARED, 1,
1075 			   FUTEX_BITSET_MATCH_ANY);
1076 		return 0;
1077 	}
1078 
1079 	if (owner != task_pid_vnr(curr))
1080 		return 0;
1081 
1082 	/*
1083 	 * Ok, this dying thread is truly holding a futex
1084 	 * of interest. Set the OWNER_DIED bit atomically
1085 	 * via cmpxchg, and if the value had FUTEX_WAITERS
1086 	 * set, wake up a waiter (if any). (We have to do a
1087 	 * futex_wake() even if OWNER_DIED is already set -
1088 	 * to handle the rare but possible case of recursive
1089 	 * thread-death.) The rest of the cleanup is done in
1090 	 * userspace.
1091 	 */
1092 	mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
1093 
1094 	/*
1095 	 * We are not holding a lock here, but we want to have
1096 	 * the pagefault_disable/enable() protection because
1097 	 * we want to handle the fault gracefully. If the
1098 	 * access fails we try to fault in the futex with R/W
1099 	 * verification via get_user_pages. get_user() above
1100 	 * does not guarantee R/W access. If that fails we
1101 	 * give up and leave the futex locked.
1102 	 */
1103 	if ((err = futex_cmpxchg_value_locked(&nval, uaddr, uval, mval))) {
1104 		switch (err) {
1105 		case -EFAULT:
1106 			if (fault_in_user_writeable(uaddr))
1107 				return -1;
1108 			goto retry;
1109 
1110 		case -EAGAIN:
1111 			cond_resched();
1112 			goto retry;
1113 
1114 		default:
1115 			WARN_ON_ONCE(1);
1116 			return err;
1117 		}
1118 	}
1119 
1120 	if (nval != uval)
1121 		goto retry;
1122 
1123 	/*
1124 	 * Wake robust non-PI futexes here. The wakeup of
1125 	 * PI futexes happens in exit_pi_state():
1126 	 */
1127 	if (!pi && (uval & FUTEX_WAITERS)) {
1128 		futex_wake(uaddr, FLAGS_SIZE_32 | FLAGS_SHARED, 1,
1129 			   FUTEX_BITSET_MATCH_ANY);
1130 	}
1131 
1132 	return 0;
1133 }
1134 
1135 /*
1136  * Fetch a robust-list pointer. Bit 0 signals PI futexes:
1137  */
1138 static inline int fetch_robust_entry(struct robust_list __user **entry,
1139 				     struct robust_list __user * __user *head,
1140 				     unsigned int *pi)
1141 {
1142 	unsigned long uentry;
1143 
1144 	if (get_user(uentry, (unsigned long __user *)head))
1145 		return -EFAULT;
1146 
1147 	*entry = (void __user *)(uentry & ~1UL);
1148 	*pi = uentry & 1;
1149 
1150 	return 0;
1151 }
1152 
1153 /*
1154  * Walk curr->robust_list (very carefully, it's a userspace list!)
1155  * and mark any locks found there dead, and notify any waiters.
1156  *
1157  * We silently return on any sign of list-walking problem.
1158  */
1159 static void exit_robust_list(struct task_struct *curr)
1160 {
1161 	struct robust_list_head __user *head = curr->robust_list;
1162 	struct robust_list __user *entry, *next_entry, *pending;
1163 	unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
1164 	unsigned int next_pi;
1165 	unsigned long futex_offset;
1166 	int rc;
1167 
1168 	/*
1169 	 * Fetch the list head (which was registered earlier, via
1170 	 * sys_set_robust_list()):
1171 	 */
1172 	if (fetch_robust_entry(&entry, &head->list.next, &pi))
1173 		return;
1174 	/*
1175 	 * Fetch the relative futex offset:
1176 	 */
1177 	if (get_user(futex_offset, &head->futex_offset))
1178 		return;
1179 	/*
1180 	 * Fetch any possibly pending lock-add first, and handle it
1181 	 * if it exists:
1182 	 */
1183 	if (fetch_robust_entry(&pending, &head->list_op_pending, &pip))
1184 		return;
1185 
1186 	next_entry = NULL;	/* avoid warning with gcc */
1187 	while (entry != &head->list) {
1188 		/*
1189 		 * Fetch the next entry in the list before calling
1190 		 * handle_futex_death:
1191 		 */
1192 		rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi);
1193 		/*
1194 		 * A pending lock might already be on the list, so
1195 		 * don't process it twice:
1196 		 */
1197 		if (entry != pending) {
1198 			if (handle_futex_death((void __user *)entry + futex_offset,
1199 						curr, pi, HANDLE_DEATH_LIST))
1200 				return;
1201 		}
1202 		if (rc)
1203 			return;
1204 		entry = next_entry;
1205 		pi = next_pi;
1206 		/*
1207 		 * Avoid excessively long or circular lists:
1208 		 */
1209 		if (!--limit)
1210 			break;
1211 
1212 		cond_resched();
1213 	}
1214 
1215 	if (pending) {
1216 		handle_futex_death((void __user *)pending + futex_offset,
1217 				   curr, pip, HANDLE_DEATH_PENDING);
1218 	}
1219 }
1220 
1221 #ifdef CONFIG_COMPAT
1222 static void __user *futex_uaddr(struct robust_list __user *entry,
1223 				compat_long_t futex_offset)
1224 {
1225 	compat_uptr_t base = ptr_to_compat(entry);
1226 	void __user *uaddr = compat_ptr(base + futex_offset);
1227 
1228 	return uaddr;
1229 }
1230 
1231 /*
1232  * Fetch a robust-list pointer. Bit 0 signals PI futexes:
1233  */
1234 static inline int
1235 compat_fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
1236 		   compat_uptr_t __user *head, unsigned int *pi)
1237 {
1238 	if (get_user(*uentry, head))
1239 		return -EFAULT;
1240 
1241 	*entry = compat_ptr((*uentry) & ~1);
1242 	*pi = (unsigned int)(*uentry) & 1;
1243 
1244 	return 0;
1245 }
1246 
1247 /*
1248  * Walk curr->robust_list (very carefully, it's a userspace list!)
1249  * and mark any locks found there dead, and notify any waiters.
1250  *
1251  * We silently return on any sign of list-walking problem.
1252  */
1253 static void compat_exit_robust_list(struct task_struct *curr)
1254 {
1255 	struct compat_robust_list_head __user *head = curr->compat_robust_list;
1256 	struct robust_list __user *entry, *next_entry, *pending;
1257 	unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
1258 	unsigned int next_pi;
1259 	compat_uptr_t uentry, next_uentry, upending;
1260 	compat_long_t futex_offset;
1261 	int rc;
1262 
1263 	/*
1264 	 * Fetch the list head (which was registered earlier, via
1265 	 * sys_set_robust_list()):
1266 	 */
1267 	if (compat_fetch_robust_entry(&uentry, &entry, &head->list.next, &pi))
1268 		return;
1269 	/*
1270 	 * Fetch the relative futex offset:
1271 	 */
1272 	if (get_user(futex_offset, &head->futex_offset))
1273 		return;
1274 	/*
1275 	 * Fetch any possibly pending lock-add first, and handle it
1276 	 * if it exists:
1277 	 */
1278 	if (compat_fetch_robust_entry(&upending, &pending,
1279 			       &head->list_op_pending, &pip))
1280 		return;
1281 
1282 	next_entry = NULL;	/* avoid warning with gcc */
1283 	while (entry != (struct robust_list __user *) &head->list) {
1284 		/*
1285 		 * Fetch the next entry in the list before calling
1286 		 * handle_futex_death:
1287 		 */
1288 		rc = compat_fetch_robust_entry(&next_uentry, &next_entry,
1289 			(compat_uptr_t __user *)&entry->next, &next_pi);
1290 		/*
1291 		 * A pending lock might already be on the list, so
1292 		 * dont process it twice:
1293 		 */
1294 		if (entry != pending) {
1295 			void __user *uaddr = futex_uaddr(entry, futex_offset);
1296 
1297 			if (handle_futex_death(uaddr, curr, pi,
1298 					       HANDLE_DEATH_LIST))
1299 				return;
1300 		}
1301 		if (rc)
1302 			return;
1303 		uentry = next_uentry;
1304 		entry = next_entry;
1305 		pi = next_pi;
1306 		/*
1307 		 * Avoid excessively long or circular lists:
1308 		 */
1309 		if (!--limit)
1310 			break;
1311 
1312 		cond_resched();
1313 	}
1314 	if (pending) {
1315 		void __user *uaddr = futex_uaddr(pending, futex_offset);
1316 
1317 		handle_futex_death(uaddr, curr, pip, HANDLE_DEATH_PENDING);
1318 	}
1319 }
1320 #endif
1321 
1322 #ifdef CONFIG_FUTEX_PI
1323 
1324 /*
1325  * This task is holding PI mutexes at exit time => bad.
1326  * Kernel cleans up PI-state, but userspace is likely hosed.
1327  * (Robust-futex cleanup is separate and might save the day for userspace.)
1328  */
1329 static void exit_pi_state_list(struct task_struct *curr)
1330 {
1331 	struct list_head *next, *head = &curr->pi_state_list;
1332 	struct futex_pi_state *pi_state;
1333 	union futex_key key = FUTEX_KEY_INIT;
1334 
1335 	/*
1336 	 * The mutex mm_struct::futex_hash_lock might be acquired.
1337 	 */
1338 	might_sleep();
1339 	/*
1340 	 * Ensure the hash remains stable (no resize) during the while loop
1341 	 * below. The hb pointer is acquired under the pi_lock so we can't block
1342 	 * on the mutex.
1343 	 */
1344 	WARN_ON(curr != current);
1345 	guard(private_hash)();
1346 	/*
1347 	 * We are a ZOMBIE and nobody can enqueue itself on
1348 	 * pi_state_list anymore, but we have to be careful
1349 	 * versus waiters unqueueing themselves:
1350 	 */
1351 	raw_spin_lock_irq(&curr->pi_lock);
1352 	while (!list_empty(head)) {
1353 		next = head->next;
1354 		pi_state = list_entry(next, struct futex_pi_state, list);
1355 		key = pi_state->key;
1356 		if (1) {
1357 			CLASS(hb, hb)(&key);
1358 
1359 			/*
1360 			 * We can race against put_pi_state() removing itself from the
1361 			 * list (a waiter going away). put_pi_state() will first
1362 			 * decrement the reference count and then modify the list, so
1363 			 * its possible to see the list entry but fail this reference
1364 			 * acquire.
1365 			 *
1366 			 * In that case; drop the locks to let put_pi_state() make
1367 			 * progress and retry the loop.
1368 			 */
1369 			if (!refcount_inc_not_zero(&pi_state->refcount)) {
1370 				raw_spin_unlock_irq(&curr->pi_lock);
1371 				cpu_relax();
1372 				raw_spin_lock_irq(&curr->pi_lock);
1373 				continue;
1374 			}
1375 			raw_spin_unlock_irq(&curr->pi_lock);
1376 
1377 			spin_lock(&hb->lock);
1378 			raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
1379 			raw_spin_lock(&curr->pi_lock);
1380 			/*
1381 			 * We dropped the pi-lock, so re-check whether this
1382 			 * task still owns the PI-state:
1383 			 */
1384 			if (head->next != next) {
1385 				/* retain curr->pi_lock for the loop invariant */
1386 				raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
1387 				spin_unlock(&hb->lock);
1388 				put_pi_state(pi_state);
1389 				continue;
1390 			}
1391 
1392 			WARN_ON(pi_state->owner != curr);
1393 			WARN_ON(list_empty(&pi_state->list));
1394 			list_del_init(&pi_state->list);
1395 			pi_state->owner = NULL;
1396 
1397 			raw_spin_unlock(&curr->pi_lock);
1398 			raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
1399 			spin_unlock(&hb->lock);
1400 		}
1401 
1402 		rt_mutex_futex_unlock(&pi_state->pi_mutex);
1403 		put_pi_state(pi_state);
1404 
1405 		raw_spin_lock_irq(&curr->pi_lock);
1406 	}
1407 	raw_spin_unlock_irq(&curr->pi_lock);
1408 }
1409 #else
1410 static inline void exit_pi_state_list(struct task_struct *curr) { }
1411 #endif
1412 
1413 static void futex_cleanup(struct task_struct *tsk)
1414 {
1415 	if (unlikely(tsk->robust_list)) {
1416 		exit_robust_list(tsk);
1417 		tsk->robust_list = NULL;
1418 	}
1419 
1420 #ifdef CONFIG_COMPAT
1421 	if (unlikely(tsk->compat_robust_list)) {
1422 		compat_exit_robust_list(tsk);
1423 		tsk->compat_robust_list = NULL;
1424 	}
1425 #endif
1426 
1427 	if (unlikely(!list_empty(&tsk->pi_state_list)))
1428 		exit_pi_state_list(tsk);
1429 }
1430 
1431 /**
1432  * futex_exit_recursive - Set the tasks futex state to FUTEX_STATE_DEAD
1433  * @tsk:	task to set the state on
1434  *
1435  * Set the futex exit state of the task lockless. The futex waiter code
1436  * observes that state when a task is exiting and loops until the task has
1437  * actually finished the futex cleanup. The worst case for this is that the
1438  * waiter runs through the wait loop until the state becomes visible.
1439  *
1440  * This is called from the recursive fault handling path in make_task_dead().
1441  *
1442  * This is best effort. Either the futex exit code has run already or
1443  * not. If the OWNER_DIED bit has been set on the futex then the waiter can
1444  * take it over. If not, the problem is pushed back to user space. If the
1445  * futex exit code did not run yet, then an already queued waiter might
1446  * block forever, but there is nothing which can be done about that.
1447  */
1448 void futex_exit_recursive(struct task_struct *tsk)
1449 {
1450 	/* If the state is FUTEX_STATE_EXITING then futex_exit_mutex is held */
1451 	if (tsk->futex_state == FUTEX_STATE_EXITING)
1452 		mutex_unlock(&tsk->futex_exit_mutex);
1453 	tsk->futex_state = FUTEX_STATE_DEAD;
1454 }
1455 
1456 static void futex_cleanup_begin(struct task_struct *tsk)
1457 {
1458 	/*
1459 	 * Prevent various race issues against a concurrent incoming waiter
1460 	 * including live locks by forcing the waiter to block on
1461 	 * tsk->futex_exit_mutex when it observes FUTEX_STATE_EXITING in
1462 	 * attach_to_pi_owner().
1463 	 */
1464 	mutex_lock(&tsk->futex_exit_mutex);
1465 
1466 	/*
1467 	 * Switch the state to FUTEX_STATE_EXITING under tsk->pi_lock.
1468 	 *
1469 	 * This ensures that all subsequent checks of tsk->futex_state in
1470 	 * attach_to_pi_owner() must observe FUTEX_STATE_EXITING with
1471 	 * tsk->pi_lock held.
1472 	 *
1473 	 * It guarantees also that a pi_state which was queued right before
1474 	 * the state change under tsk->pi_lock by a concurrent waiter must
1475 	 * be observed in exit_pi_state_list().
1476 	 */
1477 	raw_spin_lock_irq(&tsk->pi_lock);
1478 	tsk->futex_state = FUTEX_STATE_EXITING;
1479 	raw_spin_unlock_irq(&tsk->pi_lock);
1480 }
1481 
1482 static void futex_cleanup_end(struct task_struct *tsk, int state)
1483 {
1484 	/*
1485 	 * Lockless store. The only side effect is that an observer might
1486 	 * take another loop until it becomes visible.
1487 	 */
1488 	tsk->futex_state = state;
1489 	/*
1490 	 * Drop the exit protection. This unblocks waiters which observed
1491 	 * FUTEX_STATE_EXITING to reevaluate the state.
1492 	 */
1493 	mutex_unlock(&tsk->futex_exit_mutex);
1494 }
1495 
1496 void futex_exec_release(struct task_struct *tsk)
1497 {
1498 	/*
1499 	 * The state handling is done for consistency, but in the case of
1500 	 * exec() there is no way to prevent further damage as the PID stays
1501 	 * the same. But for the unlikely and arguably buggy case that a
1502 	 * futex is held on exec(), this provides at least as much state
1503 	 * consistency protection which is possible.
1504 	 */
1505 	futex_cleanup_begin(tsk);
1506 	futex_cleanup(tsk);
1507 	/*
1508 	 * Reset the state to FUTEX_STATE_OK. The task is alive and about
1509 	 * exec a new binary.
1510 	 */
1511 	futex_cleanup_end(tsk, FUTEX_STATE_OK);
1512 }
1513 
1514 void futex_exit_release(struct task_struct *tsk)
1515 {
1516 	futex_cleanup_begin(tsk);
1517 	futex_cleanup(tsk);
1518 	futex_cleanup_end(tsk, FUTEX_STATE_DEAD);
1519 }
1520 
1521 static void futex_hash_bucket_init(struct futex_hash_bucket *fhb,
1522 				   struct futex_private_hash *fph)
1523 {
1524 #ifdef CONFIG_FUTEX_PRIVATE_HASH
1525 	fhb->priv = fph;
1526 #endif
1527 	atomic_set(&fhb->waiters, 0);
1528 	plist_head_init(&fhb->chain);
1529 	spin_lock_init(&fhb->lock);
1530 }
1531 
1532 #define FH_CUSTOM	0x01
1533 #define FH_IMMUTABLE	0x02
1534 
1535 #ifdef CONFIG_FUTEX_PRIVATE_HASH
1536 
1537 /*
1538  * futex-ref
1539  *
1540  * Heavily inspired by percpu-rwsem/percpu-refcount; not reusing any of that
1541  * code because it just doesn't fit right.
1542  *
1543  * Dual counter, per-cpu / atomic approach like percpu-refcount, except it
1544  * re-initializes the state automatically, such that the fph swizzle is also a
1545  * transition back to per-cpu.
1546  */
1547 
1548 static void futex_ref_rcu(struct rcu_head *head);
1549 
1550 static void __futex_ref_atomic_begin(struct futex_private_hash *fph)
1551 {
1552 	struct mm_struct *mm = fph->mm;
1553 
1554 	/*
1555 	 * The counter we're about to switch to must have fully switched;
1556 	 * otherwise it would be impossible for it to have reported success
1557 	 * from futex_ref_is_dead().
1558 	 */
1559 	WARN_ON_ONCE(atomic_long_read(&mm->futex_atomic) != 0);
1560 
1561 	/*
1562 	 * Set the atomic to the bias value such that futex_ref_{get,put}()
1563 	 * will never observe 0. Will be fixed up in __futex_ref_atomic_end()
1564 	 * when folding in the percpu count.
1565 	 */
1566 	atomic_long_set(&mm->futex_atomic, LONG_MAX);
1567 	smp_store_release(&fph->state, FR_ATOMIC);
1568 
1569 	call_rcu_hurry(&mm->futex_rcu, futex_ref_rcu);
1570 }
1571 
1572 static void __futex_ref_atomic_end(struct futex_private_hash *fph)
1573 {
1574 	struct mm_struct *mm = fph->mm;
1575 	unsigned int count = 0;
1576 	long ret;
1577 	int cpu;
1578 
1579 	/*
1580 	 * Per __futex_ref_atomic_begin() the state of the fph must be ATOMIC
1581 	 * and per this RCU callback, everybody must now observe this state and
1582 	 * use the atomic variable.
1583 	 */
1584 	WARN_ON_ONCE(fph->state != FR_ATOMIC);
1585 
1586 	/*
1587 	 * Therefore the per-cpu counter is now stable, sum and reset.
1588 	 */
1589 	for_each_possible_cpu(cpu) {
1590 		unsigned int *ptr = per_cpu_ptr(mm->futex_ref, cpu);
1591 		count += *ptr;
1592 		*ptr = 0;
1593 	}
1594 
1595 	/*
1596 	 * Re-init for the next cycle.
1597 	 */
1598 	this_cpu_inc(*mm->futex_ref); /* 0 -> 1 */
1599 
1600 	/*
1601 	 * Add actual count, subtract bias and initial refcount.
1602 	 *
1603 	 * The moment this atomic operation happens, futex_ref_is_dead() can
1604 	 * become true.
1605 	 */
1606 	ret = atomic_long_add_return(count - LONG_MAX - 1, &mm->futex_atomic);
1607 	if (!ret)
1608 		wake_up_var(mm);
1609 
1610 	WARN_ON_ONCE(ret < 0);
1611 	mmput_async(mm);
1612 }
1613 
1614 static void futex_ref_rcu(struct rcu_head *head)
1615 {
1616 	struct mm_struct *mm = container_of(head, struct mm_struct, futex_rcu);
1617 	struct futex_private_hash *fph = rcu_dereference_raw(mm->futex_phash);
1618 
1619 	if (fph->state == FR_PERCPU) {
1620 		/*
1621 		 * Per this extra grace-period, everybody must now observe
1622 		 * fph as the current fph and no previously observed fph's
1623 		 * are in-flight.
1624 		 *
1625 		 * Notably, nobody will now rely on the atomic
1626 		 * futex_ref_is_dead() state anymore so we can begin the
1627 		 * migration of the per-cpu counter into the atomic.
1628 		 */
1629 		__futex_ref_atomic_begin(fph);
1630 		return;
1631 	}
1632 
1633 	__futex_ref_atomic_end(fph);
1634 }
1635 
1636 /*
1637  * Drop the initial refcount and transition to atomics.
1638  */
1639 static void futex_ref_drop(struct futex_private_hash *fph)
1640 {
1641 	struct mm_struct *mm = fph->mm;
1642 
1643 	/*
1644 	 * Can only transition the current fph;
1645 	 */
1646 	WARN_ON_ONCE(rcu_dereference_raw(mm->futex_phash) != fph);
1647 	/*
1648 	 * We enqueue at least one RCU callback. Ensure mm stays if the task
1649 	 * exits before the transition is completed.
1650 	 */
1651 	mmget(mm);
1652 
1653 	/*
1654 	 * In order to avoid the following scenario:
1655 	 *
1656 	 * futex_hash()			__futex_pivot_hash()
1657 	 *   guard(rcu);		  guard(mm->futex_hash_lock);
1658 	 *   fph = mm->futex_phash;
1659 	 *				  rcu_assign_pointer(&mm->futex_phash, new);
1660 	 *				futex_hash_allocate()
1661 	 *				  futex_ref_drop()
1662 	 *				    fph->state = FR_ATOMIC;
1663 	 *				    atomic_set(, BIAS);
1664 	 *
1665 	 *   futex_private_hash_get(fph); // OOPS
1666 	 *
1667 	 * Where an old fph (which is FR_ATOMIC) and should fail on
1668 	 * inc_not_zero, will succeed because a new transition is started and
1669 	 * the atomic is bias'ed away from 0.
1670 	 *
1671 	 * There must be at least one full grace-period between publishing a
1672 	 * new fph and trying to replace it.
1673 	 */
1674 	if (poll_state_synchronize_rcu(mm->futex_batches)) {
1675 		/*
1676 		 * There was a grace-period, we can begin now.
1677 		 */
1678 		__futex_ref_atomic_begin(fph);
1679 		return;
1680 	}
1681 
1682 	call_rcu_hurry(&mm->futex_rcu, futex_ref_rcu);
1683 }
1684 
1685 static bool futex_ref_get(struct futex_private_hash *fph)
1686 {
1687 	struct mm_struct *mm = fph->mm;
1688 
1689 	guard(rcu)();
1690 
1691 	if (smp_load_acquire(&fph->state) == FR_PERCPU) {
1692 		this_cpu_inc(*mm->futex_ref);
1693 		return true;
1694 	}
1695 
1696 	return atomic_long_inc_not_zero(&mm->futex_atomic);
1697 }
1698 
1699 static bool futex_ref_put(struct futex_private_hash *fph)
1700 {
1701 	struct mm_struct *mm = fph->mm;
1702 
1703 	guard(rcu)();
1704 
1705 	if (smp_load_acquire(&fph->state) == FR_PERCPU) {
1706 		this_cpu_dec(*mm->futex_ref);
1707 		return false;
1708 	}
1709 
1710 	return atomic_long_dec_and_test(&mm->futex_atomic);
1711 }
1712 
1713 static bool futex_ref_is_dead(struct futex_private_hash *fph)
1714 {
1715 	struct mm_struct *mm = fph->mm;
1716 
1717 	guard(rcu)();
1718 
1719 	if (smp_load_acquire(&fph->state) == FR_PERCPU)
1720 		return false;
1721 
1722 	return atomic_long_read(&mm->futex_atomic) == 0;
1723 }
1724 
1725 int futex_mm_init(struct mm_struct *mm)
1726 {
1727 	mutex_init(&mm->futex_hash_lock);
1728 	RCU_INIT_POINTER(mm->futex_phash, NULL);
1729 	mm->futex_phash_new = NULL;
1730 	/* futex-ref */
1731 	atomic_long_set(&mm->futex_atomic, 0);
1732 	mm->futex_batches = get_state_synchronize_rcu();
1733 	mm->futex_ref = alloc_percpu(unsigned int);
1734 	if (!mm->futex_ref)
1735 		return -ENOMEM;
1736 	this_cpu_inc(*mm->futex_ref); /* 0 -> 1 */
1737 	return 0;
1738 }
1739 
1740 void futex_hash_free(struct mm_struct *mm)
1741 {
1742 	struct futex_private_hash *fph;
1743 
1744 	free_percpu(mm->futex_ref);
1745 	kvfree(mm->futex_phash_new);
1746 	fph = rcu_dereference_raw(mm->futex_phash);
1747 	if (fph)
1748 		kvfree(fph);
1749 }
1750 
1751 static bool futex_pivot_pending(struct mm_struct *mm)
1752 {
1753 	struct futex_private_hash *fph;
1754 
1755 	guard(rcu)();
1756 
1757 	if (!mm->futex_phash_new)
1758 		return true;
1759 
1760 	fph = rcu_dereference(mm->futex_phash);
1761 	return futex_ref_is_dead(fph);
1762 }
1763 
1764 static bool futex_hash_less(struct futex_private_hash *a,
1765 			    struct futex_private_hash *b)
1766 {
1767 	/* user provided always wins */
1768 	if (!a->custom && b->custom)
1769 		return true;
1770 	if (a->custom && !b->custom)
1771 		return false;
1772 
1773 	/* zero-sized hash wins */
1774 	if (!b->hash_mask)
1775 		return true;
1776 	if (!a->hash_mask)
1777 		return false;
1778 
1779 	/* keep the biggest */
1780 	if (a->hash_mask < b->hash_mask)
1781 		return true;
1782 	if (a->hash_mask > b->hash_mask)
1783 		return false;
1784 
1785 	return false; /* equal */
1786 }
1787 
1788 static int futex_hash_allocate(unsigned int hash_slots, unsigned int flags)
1789 {
1790 	struct mm_struct *mm = current->mm;
1791 	struct futex_private_hash *fph;
1792 	bool custom = flags & FH_CUSTOM;
1793 	int i;
1794 
1795 	if (hash_slots && (hash_slots == 1 || !is_power_of_2(hash_slots)))
1796 		return -EINVAL;
1797 
1798 	/*
1799 	 * Once we've disabled the global hash there is no way back.
1800 	 */
1801 	scoped_guard(rcu) {
1802 		fph = rcu_dereference(mm->futex_phash);
1803 		if (fph && (!fph->hash_mask || fph->immutable)) {
1804 			if (custom)
1805 				return -EBUSY;
1806 			return 0;
1807 		}
1808 	}
1809 
1810 	fph = kvzalloc(struct_size(fph, queues, hash_slots),
1811 		       GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
1812 	if (!fph)
1813 		return -ENOMEM;
1814 
1815 	fph->hash_mask = hash_slots ? hash_slots - 1 : 0;
1816 	fph->custom = custom;
1817 	fph->immutable = !!(flags & FH_IMMUTABLE);
1818 	fph->mm = mm;
1819 
1820 	for (i = 0; i < hash_slots; i++)
1821 		futex_hash_bucket_init(&fph->queues[i], fph);
1822 
1823 	if (custom) {
1824 		/*
1825 		 * Only let prctl() wait / retry; don't unduly delay clone().
1826 		 */
1827 again:
1828 		wait_var_event(mm, futex_pivot_pending(mm));
1829 	}
1830 
1831 	scoped_guard(mutex, &mm->futex_hash_lock) {
1832 		struct futex_private_hash *free __free(kvfree) = NULL;
1833 		struct futex_private_hash *cur, *new;
1834 
1835 		cur = rcu_dereference_protected(mm->futex_phash,
1836 						lockdep_is_held(&mm->futex_hash_lock));
1837 		new = mm->futex_phash_new;
1838 		mm->futex_phash_new = NULL;
1839 
1840 		if (fph) {
1841 			if (cur && (!cur->hash_mask || cur->immutable)) {
1842 				/*
1843 				 * If two threads simultaneously request the global
1844 				 * hash then the first one performs the switch,
1845 				 * the second one returns here.
1846 				 */
1847 				free = fph;
1848 				mm->futex_phash_new = new;
1849 				return -EBUSY;
1850 			}
1851 			if (cur && !new) {
1852 				/*
1853 				 * If we have an existing hash, but do not yet have
1854 				 * allocated a replacement hash, drop the initial
1855 				 * reference on the existing hash.
1856 				 */
1857 				futex_ref_drop(cur);
1858 			}
1859 
1860 			if (new) {
1861 				/*
1862 				 * Two updates raced; throw out the lesser one.
1863 				 */
1864 				if (futex_hash_less(new, fph)) {
1865 					free = new;
1866 					new = fph;
1867 				} else {
1868 					free = fph;
1869 				}
1870 			} else {
1871 				new = fph;
1872 			}
1873 			fph = NULL;
1874 		}
1875 
1876 		if (new) {
1877 			/*
1878 			 * Will set mm->futex_phash_new on failure;
1879 			 * futex_private_hash_get() will try again.
1880 			 */
1881 			if (!__futex_pivot_hash(mm, new) && custom)
1882 				goto again;
1883 		}
1884 	}
1885 	return 0;
1886 }
1887 
1888 int futex_hash_allocate_default(void)
1889 {
1890 	unsigned int threads, buckets, current_buckets = 0;
1891 	struct futex_private_hash *fph;
1892 
1893 	if (!current->mm)
1894 		return 0;
1895 
1896 	scoped_guard(rcu) {
1897 		threads = min_t(unsigned int,
1898 				get_nr_threads(current),
1899 				num_online_cpus());
1900 
1901 		fph = rcu_dereference(current->mm->futex_phash);
1902 		if (fph) {
1903 			if (fph->custom)
1904 				return 0;
1905 
1906 			current_buckets = fph->hash_mask + 1;
1907 		}
1908 	}
1909 
1910 	/*
1911 	 * The default allocation will remain within
1912 	 *   16 <= threads * 4 <= global hash size
1913 	 */
1914 	buckets = roundup_pow_of_two(4 * threads);
1915 	buckets = clamp(buckets, 16, futex_hashmask + 1);
1916 
1917 	if (current_buckets >= buckets)
1918 		return 0;
1919 
1920 	return futex_hash_allocate(buckets, 0);
1921 }
1922 
1923 static int futex_hash_get_slots(void)
1924 {
1925 	struct futex_private_hash *fph;
1926 
1927 	guard(rcu)();
1928 	fph = rcu_dereference(current->mm->futex_phash);
1929 	if (fph && fph->hash_mask)
1930 		return fph->hash_mask + 1;
1931 	return 0;
1932 }
1933 
1934 static int futex_hash_get_immutable(void)
1935 {
1936 	struct futex_private_hash *fph;
1937 
1938 	guard(rcu)();
1939 	fph = rcu_dereference(current->mm->futex_phash);
1940 	if (fph && fph->immutable)
1941 		return 1;
1942 	if (fph && !fph->hash_mask)
1943 		return 1;
1944 	return 0;
1945 }
1946 
1947 #else
1948 
1949 static int futex_hash_allocate(unsigned int hash_slots, unsigned int flags)
1950 {
1951 	return -EINVAL;
1952 }
1953 
1954 static int futex_hash_get_slots(void)
1955 {
1956 	return 0;
1957 }
1958 
1959 static int futex_hash_get_immutable(void)
1960 {
1961 	return 0;
1962 }
1963 #endif
1964 
1965 int futex_hash_prctl(unsigned long arg2, unsigned long arg3, unsigned long arg4)
1966 {
1967 	unsigned int flags = FH_CUSTOM;
1968 	int ret;
1969 
1970 	switch (arg2) {
1971 	case PR_FUTEX_HASH_SET_SLOTS:
1972 		if (arg4 & ~FH_FLAG_IMMUTABLE)
1973 			return -EINVAL;
1974 		if (arg4 & FH_FLAG_IMMUTABLE)
1975 			flags |= FH_IMMUTABLE;
1976 		ret = futex_hash_allocate(arg3, flags);
1977 		break;
1978 
1979 	case PR_FUTEX_HASH_GET_SLOTS:
1980 		ret = futex_hash_get_slots();
1981 		break;
1982 
1983 	case PR_FUTEX_HASH_GET_IMMUTABLE:
1984 		ret = futex_hash_get_immutable();
1985 		break;
1986 
1987 	default:
1988 		ret = -EINVAL;
1989 		break;
1990 	}
1991 	return ret;
1992 }
1993 
1994 static int __init futex_init(void)
1995 {
1996 	unsigned long hashsize, i;
1997 	unsigned int order, n;
1998 	unsigned long size;
1999 
2000 #ifdef CONFIG_BASE_SMALL
2001 	hashsize = 16;
2002 #else
2003 	hashsize = 256 * num_possible_cpus();
2004 	hashsize /= num_possible_nodes();
2005 	hashsize = max(4, hashsize);
2006 	hashsize = roundup_pow_of_two(hashsize);
2007 #endif
2008 	futex_hashshift = ilog2(hashsize);
2009 	size = sizeof(struct futex_hash_bucket) * hashsize;
2010 	order = get_order(size);
2011 
2012 	for_each_node(n) {
2013 		struct futex_hash_bucket *table;
2014 
2015 		if (order > MAX_PAGE_ORDER)
2016 			table = vmalloc_huge_node(size, GFP_KERNEL, n);
2017 		else
2018 			table = alloc_pages_exact_nid(n, size, GFP_KERNEL);
2019 
2020 		BUG_ON(!table);
2021 
2022 		for (i = 0; i < hashsize; i++)
2023 			futex_hash_bucket_init(&table[i], NULL);
2024 
2025 		futex_queues[n] = table;
2026 	}
2027 
2028 	futex_hashmask = hashsize - 1;
2029 	pr_info("futex hash table entries: %lu (%lu bytes on %d NUMA nodes, total %lu KiB, %s).\n",
2030 		hashsize, size, num_possible_nodes(), size * num_possible_nodes() / 1024,
2031 		order > MAX_PAGE_ORDER ? "vmalloc" : "linear");
2032 	return 0;
2033 }
2034 core_initcall(futex_init);
2035