xref: /linux/kernel/futex/core.c (revision aff2a7e23f23738ca3cd62e4ce5be2d62a3d52ad)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  Fast Userspace Mutexes (which I call "Futexes!").
4  *  (C) Rusty Russell, IBM 2002
5  *
6  *  Generalized futexes, futex requeueing, misc fixes by Ingo Molnar
7  *  (C) Copyright 2003 Red Hat Inc, All Rights Reserved
8  *
9  *  Removed page pinning, fix privately mapped COW pages and other cleanups
10  *  (C) Copyright 2003, 2004 Jamie Lokier
11  *
12  *  Robust futex support started by Ingo Molnar
13  *  (C) Copyright 2006 Red Hat Inc, All Rights Reserved
14  *  Thanks to Thomas Gleixner for suggestions, analysis and fixes.
15  *
16  *  PI-futex support started by Ingo Molnar and Thomas Gleixner
17  *  Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
18  *  Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
19  *
20  *  PRIVATE futexes by Eric Dumazet
21  *  Copyright (C) 2007 Eric Dumazet <dada1@cosmosbay.com>
22  *
23  *  Requeue-PI support by Darren Hart <dvhltc@us.ibm.com>
24  *  Copyright (C) IBM Corporation, 2009
25  *  Thanks to Thomas Gleixner for conceptual design and careful reviews.
26  *
27  *  Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly
28  *  enough at me, Linus for the original (flawed) idea, Matthew
29  *  Kirkwood for proof-of-concept implementation.
30  *
31  *  "The futexes are also cursed."
32  *  "But they come in a choice of three flavours!"
33  */
34 #include <linux/compat.h>
35 #include <linux/jhash.h>
36 #include <linux/pagemap.h>
37 #include <linux/debugfs.h>
38 #include <linux/plist.h>
39 #include <linux/gfp.h>
40 #include <linux/vmalloc.h>
41 #include <linux/memblock.h>
42 #include <linux/fault-inject.h>
43 #include <linux/slab.h>
44 #include <linux/prctl.h>
45 #include <linux/rcuref.h>
46 #include <linux/mempolicy.h>
47 #include <linux/mmap_lock.h>
48 
49 #include "futex.h"
50 #include "../locking/rtmutex_common.h"
51 
52 /*
53  * The base of the bucket array and its size are always used together
54  * (after initialization only in futex_hash()), so ensure that they
55  * reside in the same cacheline.
56  */
57 static struct {
58 	unsigned long            hashmask;
59 	unsigned int		 hashshift;
60 	struct futex_hash_bucket *queues[MAX_NUMNODES];
61 } __futex_data __read_mostly __aligned(2*sizeof(long));
62 
63 #define futex_hashmask	(__futex_data.hashmask)
64 #define futex_hashshift	(__futex_data.hashshift)
65 #define futex_queues	(__futex_data.queues)
66 
67 struct futex_private_hash {
68 	rcuref_t	users;
69 	unsigned int	hash_mask;
70 	struct rcu_head	rcu;
71 	void		*mm;
72 	bool		custom;
73 	bool		immutable;
74 	struct futex_hash_bucket queues[];
75 };
76 
77 /*
78  * Fault injections for futexes.
79  */
80 #ifdef CONFIG_FAIL_FUTEX
81 
82 static struct {
83 	struct fault_attr attr;
84 
85 	bool ignore_private;
86 } fail_futex = {
87 	.attr = FAULT_ATTR_INITIALIZER,
88 	.ignore_private = false,
89 };
90 
setup_fail_futex(char * str)91 static int __init setup_fail_futex(char *str)
92 {
93 	return setup_fault_attr(&fail_futex.attr, str);
94 }
95 __setup("fail_futex=", setup_fail_futex);
96 
should_fail_futex(bool fshared)97 bool should_fail_futex(bool fshared)
98 {
99 	if (fail_futex.ignore_private && !fshared)
100 		return false;
101 
102 	return should_fail(&fail_futex.attr, 1);
103 }
104 
105 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
106 
fail_futex_debugfs(void)107 static int __init fail_futex_debugfs(void)
108 {
109 	umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
110 	struct dentry *dir;
111 
112 	dir = fault_create_debugfs_attr("fail_futex", NULL,
113 					&fail_futex.attr);
114 	if (IS_ERR(dir))
115 		return PTR_ERR(dir);
116 
117 	debugfs_create_bool("ignore-private", mode, dir,
118 			    &fail_futex.ignore_private);
119 	return 0;
120 }
121 
122 late_initcall(fail_futex_debugfs);
123 
124 #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
125 
126 #endif /* CONFIG_FAIL_FUTEX */
127 
128 static struct futex_hash_bucket *
129 __futex_hash(union futex_key *key, struct futex_private_hash *fph);
130 
131 #ifdef CONFIG_FUTEX_PRIVATE_HASH
futex_key_is_private(union futex_key * key)132 static inline bool futex_key_is_private(union futex_key *key)
133 {
134 	/*
135 	 * Relies on get_futex_key() to set either bit for shared
136 	 * futexes -- see comment with union futex_key.
137 	 */
138 	return !(key->both.offset & (FUT_OFF_INODE | FUT_OFF_MMSHARED));
139 }
140 
futex_private_hash_get(struct futex_private_hash * fph)141 bool futex_private_hash_get(struct futex_private_hash *fph)
142 {
143 	if (fph->immutable)
144 		return true;
145 	return rcuref_get(&fph->users);
146 }
147 
futex_private_hash_put(struct futex_private_hash * fph)148 void futex_private_hash_put(struct futex_private_hash *fph)
149 {
150 	/* Ignore return value, last put is verified via rcuref_is_dead() */
151 	if (fph->immutable)
152 		return;
153 	if (rcuref_put(&fph->users))
154 		wake_up_var(fph->mm);
155 }
156 
157 /**
158  * futex_hash_get - Get an additional reference for the local hash.
159  * @hb:                    ptr to the private local hash.
160  *
161  * Obtain an additional reference for the already obtained hash bucket. The
162  * caller must already own an reference.
163  */
futex_hash_get(struct futex_hash_bucket * hb)164 void futex_hash_get(struct futex_hash_bucket *hb)
165 {
166 	struct futex_private_hash *fph = hb->priv;
167 
168 	if (!fph)
169 		return;
170 	WARN_ON_ONCE(!futex_private_hash_get(fph));
171 }
172 
futex_hash_put(struct futex_hash_bucket * hb)173 void futex_hash_put(struct futex_hash_bucket *hb)
174 {
175 	struct futex_private_hash *fph = hb->priv;
176 
177 	if (!fph)
178 		return;
179 	futex_private_hash_put(fph);
180 }
181 
182 static struct futex_hash_bucket *
__futex_hash_private(union futex_key * key,struct futex_private_hash * fph)183 __futex_hash_private(union futex_key *key, struct futex_private_hash *fph)
184 {
185 	u32 hash;
186 
187 	if (!futex_key_is_private(key))
188 		return NULL;
189 
190 	if (!fph)
191 		fph = rcu_dereference(key->private.mm->futex_phash);
192 	if (!fph || !fph->hash_mask)
193 		return NULL;
194 
195 	hash = jhash2((void *)&key->private.address,
196 		      sizeof(key->private.address) / 4,
197 		      key->both.offset);
198 	return &fph->queues[hash & fph->hash_mask];
199 }
200 
futex_rehash_private(struct futex_private_hash * old,struct futex_private_hash * new)201 static void futex_rehash_private(struct futex_private_hash *old,
202 				 struct futex_private_hash *new)
203 {
204 	struct futex_hash_bucket *hb_old, *hb_new;
205 	unsigned int slots = old->hash_mask + 1;
206 	unsigned int i;
207 
208 	for (i = 0; i < slots; i++) {
209 		struct futex_q *this, *tmp;
210 
211 		hb_old = &old->queues[i];
212 
213 		spin_lock(&hb_old->lock);
214 		plist_for_each_entry_safe(this, tmp, &hb_old->chain, list) {
215 
216 			plist_del(&this->list, &hb_old->chain);
217 			futex_hb_waiters_dec(hb_old);
218 
219 			WARN_ON_ONCE(this->lock_ptr != &hb_old->lock);
220 
221 			hb_new = __futex_hash(&this->key, new);
222 			futex_hb_waiters_inc(hb_new);
223 			/*
224 			 * The new pointer isn't published yet but an already
225 			 * moved user can be unqueued due to timeout or signal.
226 			 */
227 			spin_lock_nested(&hb_new->lock, SINGLE_DEPTH_NESTING);
228 			plist_add(&this->list, &hb_new->chain);
229 			this->lock_ptr = &hb_new->lock;
230 			spin_unlock(&hb_new->lock);
231 		}
232 		spin_unlock(&hb_old->lock);
233 	}
234 }
235 
__futex_pivot_hash(struct mm_struct * mm,struct futex_private_hash * new)236 static bool __futex_pivot_hash(struct mm_struct *mm,
237 			       struct futex_private_hash *new)
238 {
239 	struct futex_private_hash *fph;
240 
241 	WARN_ON_ONCE(mm->futex_phash_new);
242 
243 	fph = rcu_dereference_protected(mm->futex_phash,
244 					lockdep_is_held(&mm->futex_hash_lock));
245 	if (fph) {
246 		if (!rcuref_is_dead(&fph->users)) {
247 			mm->futex_phash_new = new;
248 			return false;
249 		}
250 
251 		futex_rehash_private(fph, new);
252 	}
253 	rcu_assign_pointer(mm->futex_phash, new);
254 	kvfree_rcu(fph, rcu);
255 	return true;
256 }
257 
futex_pivot_hash(struct mm_struct * mm)258 static void futex_pivot_hash(struct mm_struct *mm)
259 {
260 	scoped_guard(mutex, &mm->futex_hash_lock) {
261 		struct futex_private_hash *fph;
262 
263 		fph = mm->futex_phash_new;
264 		if (fph) {
265 			mm->futex_phash_new = NULL;
266 			__futex_pivot_hash(mm, fph);
267 		}
268 	}
269 }
270 
futex_private_hash(void)271 struct futex_private_hash *futex_private_hash(void)
272 {
273 	struct mm_struct *mm = current->mm;
274 	/*
275 	 * Ideally we don't loop. If there is a replacement in progress
276 	 * then a new private hash is already prepared and a reference can't be
277 	 * obtained once the last user dropped it's.
278 	 * In that case we block on mm_struct::futex_hash_lock and either have
279 	 * to perform the replacement or wait while someone else is doing the
280 	 * job. Eitherway, on the second iteration we acquire a reference on the
281 	 * new private hash or loop again because a new replacement has been
282 	 * requested.
283 	 */
284 again:
285 	scoped_guard(rcu) {
286 		struct futex_private_hash *fph;
287 
288 		fph = rcu_dereference(mm->futex_phash);
289 		if (!fph)
290 			return NULL;
291 
292 		if (fph->immutable)
293 			return fph;
294 		if (rcuref_get(&fph->users))
295 			return fph;
296 	}
297 	futex_pivot_hash(mm);
298 	goto again;
299 }
300 
futex_hash(union futex_key * key)301 struct futex_hash_bucket *futex_hash(union futex_key *key)
302 {
303 	struct futex_private_hash *fph;
304 	struct futex_hash_bucket *hb;
305 
306 again:
307 	scoped_guard(rcu) {
308 		hb = __futex_hash(key, NULL);
309 		fph = hb->priv;
310 
311 		if (!fph || futex_private_hash_get(fph))
312 			return hb;
313 	}
314 	futex_pivot_hash(key->private.mm);
315 	goto again;
316 }
317 
318 #else /* !CONFIG_FUTEX_PRIVATE_HASH */
319 
320 static struct futex_hash_bucket *
__futex_hash_private(union futex_key * key,struct futex_private_hash * fph)321 __futex_hash_private(union futex_key *key, struct futex_private_hash *fph)
322 {
323 	return NULL;
324 }
325 
futex_hash(union futex_key * key)326 struct futex_hash_bucket *futex_hash(union futex_key *key)
327 {
328 	return __futex_hash(key, NULL);
329 }
330 
331 #endif /* CONFIG_FUTEX_PRIVATE_HASH */
332 
333 #ifdef CONFIG_FUTEX_MPOL
334 
__futex_key_to_node(struct mm_struct * mm,unsigned long addr)335 static int __futex_key_to_node(struct mm_struct *mm, unsigned long addr)
336 {
337 	struct vm_area_struct *vma = vma_lookup(mm, addr);
338 	struct mempolicy *mpol;
339 	int node = FUTEX_NO_NODE;
340 
341 	if (!vma)
342 		return FUTEX_NO_NODE;
343 
344 	mpol = vma_policy(vma);
345 	if (!mpol)
346 		return FUTEX_NO_NODE;
347 
348 	switch (mpol->mode) {
349 	case MPOL_PREFERRED:
350 		node = first_node(mpol->nodes);
351 		break;
352 	case MPOL_PREFERRED_MANY:
353 	case MPOL_BIND:
354 		if (mpol->home_node != NUMA_NO_NODE)
355 			node = mpol->home_node;
356 		break;
357 	default:
358 		break;
359 	}
360 
361 	return node;
362 }
363 
futex_key_to_node_opt(struct mm_struct * mm,unsigned long addr)364 static int futex_key_to_node_opt(struct mm_struct *mm, unsigned long addr)
365 {
366 	int seq, node;
367 
368 	guard(rcu)();
369 
370 	if (!mmap_lock_speculate_try_begin(mm, &seq))
371 		return -EBUSY;
372 
373 	node = __futex_key_to_node(mm, addr);
374 
375 	if (mmap_lock_speculate_retry(mm, seq))
376 		return -EAGAIN;
377 
378 	return node;
379 }
380 
futex_mpol(struct mm_struct * mm,unsigned long addr)381 static int futex_mpol(struct mm_struct *mm, unsigned long addr)
382 {
383 	int node;
384 
385 	node = futex_key_to_node_opt(mm, addr);
386 	if (node >= FUTEX_NO_NODE)
387 		return node;
388 
389 	guard(mmap_read_lock)(mm);
390 	return __futex_key_to_node(mm, addr);
391 }
392 
393 #else /* !CONFIG_FUTEX_MPOL */
394 
futex_mpol(struct mm_struct * mm,unsigned long addr)395 static int futex_mpol(struct mm_struct *mm, unsigned long addr)
396 {
397 	return FUTEX_NO_NODE;
398 }
399 
400 #endif /* CONFIG_FUTEX_MPOL */
401 
402 /**
403  * __futex_hash - Return the hash bucket
404  * @key:	Pointer to the futex key for which the hash is calculated
405  * @fph:	Pointer to private hash if known
406  *
407  * We hash on the keys returned from get_futex_key (see below) and return the
408  * corresponding hash bucket.
409  * If the FUTEX is PROCESS_PRIVATE then a per-process hash bucket (from the
410  * private hash) is returned if existing. Otherwise a hash bucket from the
411  * global hash is returned.
412  */
413 static struct futex_hash_bucket *
__futex_hash(union futex_key * key,struct futex_private_hash * fph)414 __futex_hash(union futex_key *key, struct futex_private_hash *fph)
415 {
416 	int node = key->both.node;
417 	u32 hash;
418 
419 	if (node == FUTEX_NO_NODE) {
420 		struct futex_hash_bucket *hb;
421 
422 		hb = __futex_hash_private(key, fph);
423 		if (hb)
424 			return hb;
425 	}
426 
427 	hash = jhash2((u32 *)key,
428 		      offsetof(typeof(*key), both.offset) / sizeof(u32),
429 		      key->both.offset);
430 
431 	if (node == FUTEX_NO_NODE) {
432 		/*
433 		 * In case of !FLAGS_NUMA, use some unused hash bits to pick a
434 		 * node -- this ensures regular futexes are interleaved across
435 		 * the nodes and avoids having to allocate multiple
436 		 * hash-tables.
437 		 *
438 		 * NOTE: this isn't perfectly uniform, but it is fast and
439 		 * handles sparse node masks.
440 		 */
441 		node = (hash >> futex_hashshift) % nr_node_ids;
442 		if (!node_possible(node)) {
443 			node = find_next_bit_wrap(node_possible_map.bits,
444 						  nr_node_ids, node);
445 		}
446 	}
447 
448 	return &futex_queues[node][hash & futex_hashmask];
449 }
450 
451 /**
452  * futex_setup_timer - set up the sleeping hrtimer.
453  * @time:	ptr to the given timeout value
454  * @timeout:	the hrtimer_sleeper structure to be set up
455  * @flags:	futex flags
456  * @range_ns:	optional range in ns
457  *
458  * Return: Initialized hrtimer_sleeper structure or NULL if no timeout
459  *	   value given
460  */
461 struct hrtimer_sleeper *
futex_setup_timer(ktime_t * time,struct hrtimer_sleeper * timeout,int flags,u64 range_ns)462 futex_setup_timer(ktime_t *time, struct hrtimer_sleeper *timeout,
463 		  int flags, u64 range_ns)
464 {
465 	if (!time)
466 		return NULL;
467 
468 	hrtimer_setup_sleeper_on_stack(timeout,
469 				       (flags & FLAGS_CLOCKRT) ? CLOCK_REALTIME : CLOCK_MONOTONIC,
470 				       HRTIMER_MODE_ABS);
471 	/*
472 	 * If range_ns is 0, calling hrtimer_set_expires_range_ns() is
473 	 * effectively the same as calling hrtimer_set_expires().
474 	 */
475 	hrtimer_set_expires_range_ns(&timeout->timer, *time, range_ns);
476 
477 	return timeout;
478 }
479 
480 /*
481  * Generate a machine wide unique identifier for this inode.
482  *
483  * This relies on u64 not wrapping in the life-time of the machine; which with
484  * 1ns resolution means almost 585 years.
485  *
486  * This further relies on the fact that a well formed program will not unmap
487  * the file while it has a (shared) futex waiting on it. This mapping will have
488  * a file reference which pins the mount and inode.
489  *
490  * If for some reason an inode gets evicted and read back in again, it will get
491  * a new sequence number and will _NOT_ match, even though it is the exact same
492  * file.
493  *
494  * It is important that futex_match() will never have a false-positive, esp.
495  * for PI futexes that can mess up the state. The above argues that false-negatives
496  * are only possible for malformed programs.
497  */
get_inode_sequence_number(struct inode * inode)498 static u64 get_inode_sequence_number(struct inode *inode)
499 {
500 	static atomic64_t i_seq;
501 	u64 old;
502 
503 	/* Does the inode already have a sequence number? */
504 	old = atomic64_read(&inode->i_sequence);
505 	if (likely(old))
506 		return old;
507 
508 	for (;;) {
509 		u64 new = atomic64_inc_return(&i_seq);
510 		if (WARN_ON_ONCE(!new))
511 			continue;
512 
513 		old = 0;
514 		if (!atomic64_try_cmpxchg_relaxed(&inode->i_sequence, &old, new))
515 			return old;
516 		return new;
517 	}
518 }
519 
520 /**
521  * get_futex_key() - Get parameters which are the keys for a futex
522  * @uaddr:	virtual address of the futex
523  * @flags:	FLAGS_*
524  * @key:	address where result is stored.
525  * @rw:		mapping needs to be read/write (values: FUTEX_READ,
526  *              FUTEX_WRITE)
527  *
528  * Return: a negative error code or 0
529  *
530  * The key words are stored in @key on success.
531  *
532  * For shared mappings (when @fshared), the key is:
533  *
534  *   ( inode->i_sequence, page offset within mapping, offset_within_page )
535  *
536  * [ also see get_inode_sequence_number() ]
537  *
538  * For private mappings (or when !@fshared), the key is:
539  *
540  *   ( current->mm, address, 0 )
541  *
542  * This allows (cross process, where applicable) identification of the futex
543  * without keeping the page pinned for the duration of the FUTEX_WAIT.
544  *
545  * lock_page() might sleep, the caller should not hold a spinlock.
546  */
get_futex_key(u32 __user * uaddr,unsigned int flags,union futex_key * key,enum futex_access rw)547 int get_futex_key(u32 __user *uaddr, unsigned int flags, union futex_key *key,
548 		  enum futex_access rw)
549 {
550 	unsigned long address = (unsigned long)uaddr;
551 	struct mm_struct *mm = current->mm;
552 	struct page *page;
553 	struct folio *folio;
554 	struct address_space *mapping;
555 	int node, err, size, ro = 0;
556 	bool node_updated = false;
557 	bool fshared;
558 
559 	fshared = flags & FLAGS_SHARED;
560 	size = futex_size(flags);
561 	if (flags & FLAGS_NUMA)
562 		size *= 2;
563 
564 	/*
565 	 * The futex address must be "naturally" aligned.
566 	 */
567 	key->both.offset = address % PAGE_SIZE;
568 	if (unlikely((address % size) != 0))
569 		return -EINVAL;
570 	address -= key->both.offset;
571 
572 	if (unlikely(!access_ok(uaddr, size)))
573 		return -EFAULT;
574 
575 	if (unlikely(should_fail_futex(fshared)))
576 		return -EFAULT;
577 
578 	node = FUTEX_NO_NODE;
579 
580 	if (flags & FLAGS_NUMA) {
581 		u32 __user *naddr = (void *)uaddr + size / 2;
582 
583 		if (futex_get_value(&node, naddr))
584 			return -EFAULT;
585 
586 		if ((node != FUTEX_NO_NODE) &&
587 		    ((unsigned int)node >= MAX_NUMNODES || !node_possible(node)))
588 			return -EINVAL;
589 	}
590 
591 	if (node == FUTEX_NO_NODE && (flags & FLAGS_MPOL)) {
592 		node = futex_mpol(mm, address);
593 		node_updated = true;
594 	}
595 
596 	if (flags & FLAGS_NUMA) {
597 		u32 __user *naddr = (void *)uaddr + size / 2;
598 
599 		if (node == FUTEX_NO_NODE) {
600 			node = numa_node_id();
601 			node_updated = true;
602 		}
603 		if (node_updated && futex_put_value(node, naddr))
604 			return -EFAULT;
605 	}
606 
607 	key->both.node = node;
608 
609 	/*
610 	 * PROCESS_PRIVATE futexes are fast.
611 	 * As the mm cannot disappear under us and the 'key' only needs
612 	 * virtual address, we dont even have to find the underlying vma.
613 	 * Note : We do have to check 'uaddr' is a valid user address,
614 	 *        but access_ok() should be faster than find_vma()
615 	 */
616 	if (!fshared) {
617 		/*
618 		 * On no-MMU, shared futexes are treated as private, therefore
619 		 * we must not include the current process in the key. Since
620 		 * there is only one address space, the address is a unique key
621 		 * on its own.
622 		 */
623 		if (IS_ENABLED(CONFIG_MMU))
624 			key->private.mm = mm;
625 		else
626 			key->private.mm = NULL;
627 
628 		key->private.address = address;
629 		return 0;
630 	}
631 
632 again:
633 	/* Ignore any VERIFY_READ mapping (futex common case) */
634 	if (unlikely(should_fail_futex(true)))
635 		return -EFAULT;
636 
637 	err = get_user_pages_fast(address, 1, FOLL_WRITE, &page);
638 	/*
639 	 * If write access is not required (eg. FUTEX_WAIT), try
640 	 * and get read-only access.
641 	 */
642 	if (err == -EFAULT && rw == FUTEX_READ) {
643 		err = get_user_pages_fast(address, 1, 0, &page);
644 		ro = 1;
645 	}
646 	if (err < 0)
647 		return err;
648 	else
649 		err = 0;
650 
651 	/*
652 	 * The treatment of mapping from this point on is critical. The folio
653 	 * lock protects many things but in this context the folio lock
654 	 * stabilizes mapping, prevents inode freeing in the shared
655 	 * file-backed region case and guards against movement to swap cache.
656 	 *
657 	 * Strictly speaking the folio lock is not needed in all cases being
658 	 * considered here and folio lock forces unnecessarily serialization.
659 	 * From this point on, mapping will be re-verified if necessary and
660 	 * folio lock will be acquired only if it is unavoidable
661 	 *
662 	 * Mapping checks require the folio so it is looked up now. For
663 	 * anonymous pages, it does not matter if the folio is split
664 	 * in the future as the key is based on the address. For
665 	 * filesystem-backed pages, the precise page is required as the
666 	 * index of the page determines the key.
667 	 */
668 	folio = page_folio(page);
669 	mapping = READ_ONCE(folio->mapping);
670 
671 	/*
672 	 * If folio->mapping is NULL, then it cannot be an anonymous
673 	 * page; but it might be the ZERO_PAGE or in the gate area or
674 	 * in a special mapping (all cases which we are happy to fail);
675 	 * or it may have been a good file page when get_user_pages_fast
676 	 * found it, but truncated or holepunched or subjected to
677 	 * invalidate_complete_page2 before we got the folio lock (also
678 	 * cases which we are happy to fail).  And we hold a reference,
679 	 * so refcount care in invalidate_inode_page's remove_mapping
680 	 * prevents drop_caches from setting mapping to NULL beneath us.
681 	 *
682 	 * The case we do have to guard against is when memory pressure made
683 	 * shmem_writepage move it from filecache to swapcache beneath us:
684 	 * an unlikely race, but we do need to retry for folio->mapping.
685 	 */
686 	if (unlikely(!mapping)) {
687 		int shmem_swizzled;
688 
689 		/*
690 		 * Folio lock is required to identify which special case above
691 		 * applies. If this is really a shmem page then the folio lock
692 		 * will prevent unexpected transitions.
693 		 */
694 		folio_lock(folio);
695 		shmem_swizzled = folio_test_swapcache(folio) || folio->mapping;
696 		folio_unlock(folio);
697 		folio_put(folio);
698 
699 		if (shmem_swizzled)
700 			goto again;
701 
702 		return -EFAULT;
703 	}
704 
705 	/*
706 	 * Private mappings are handled in a simple way.
707 	 *
708 	 * If the futex key is stored in anonymous memory, then the associated
709 	 * object is the mm which is implicitly pinned by the calling process.
710 	 *
711 	 * NOTE: When userspace waits on a MAP_SHARED mapping, even if
712 	 * it's a read-only handle, it's expected that futexes attach to
713 	 * the object not the particular process.
714 	 */
715 	if (folio_test_anon(folio)) {
716 		/*
717 		 * A RO anonymous page will never change and thus doesn't make
718 		 * sense for futex operations.
719 		 */
720 		if (unlikely(should_fail_futex(true)) || ro) {
721 			err = -EFAULT;
722 			goto out;
723 		}
724 
725 		key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */
726 		key->private.mm = mm;
727 		key->private.address = address;
728 
729 	} else {
730 		struct inode *inode;
731 
732 		/*
733 		 * The associated futex object in this case is the inode and
734 		 * the folio->mapping must be traversed. Ordinarily this should
735 		 * be stabilised under folio lock but it's not strictly
736 		 * necessary in this case as we just want to pin the inode, not
737 		 * update i_pages or anything like that.
738 		 *
739 		 * The RCU read lock is taken as the inode is finally freed
740 		 * under RCU. If the mapping still matches expectations then the
741 		 * mapping->host can be safely accessed as being a valid inode.
742 		 */
743 		rcu_read_lock();
744 
745 		if (READ_ONCE(folio->mapping) != mapping) {
746 			rcu_read_unlock();
747 			folio_put(folio);
748 
749 			goto again;
750 		}
751 
752 		inode = READ_ONCE(mapping->host);
753 		if (!inode) {
754 			rcu_read_unlock();
755 			folio_put(folio);
756 
757 			goto again;
758 		}
759 
760 		key->both.offset |= FUT_OFF_INODE; /* inode-based key */
761 		key->shared.i_seq = get_inode_sequence_number(inode);
762 		key->shared.pgoff = page_pgoff(folio, page);
763 		rcu_read_unlock();
764 	}
765 
766 out:
767 	folio_put(folio);
768 	return err;
769 }
770 
771 /**
772  * fault_in_user_writeable() - Fault in user address and verify RW access
773  * @uaddr:	pointer to faulting user space address
774  *
775  * Slow path to fixup the fault we just took in the atomic write
776  * access to @uaddr.
777  *
778  * We have no generic implementation of a non-destructive write to the
779  * user address. We know that we faulted in the atomic pagefault
780  * disabled section so we can as well avoid the #PF overhead by
781  * calling get_user_pages() right away.
782  */
fault_in_user_writeable(u32 __user * uaddr)783 int fault_in_user_writeable(u32 __user *uaddr)
784 {
785 	struct mm_struct *mm = current->mm;
786 	int ret;
787 
788 	mmap_read_lock(mm);
789 	ret = fixup_user_fault(mm, (unsigned long)uaddr,
790 			       FAULT_FLAG_WRITE, NULL);
791 	mmap_read_unlock(mm);
792 
793 	return ret < 0 ? ret : 0;
794 }
795 
796 /**
797  * futex_top_waiter() - Return the highest priority waiter on a futex
798  * @hb:		the hash bucket the futex_q's reside in
799  * @key:	the futex key (to distinguish it from other futex futex_q's)
800  *
801  * Must be called with the hb lock held.
802  */
futex_top_waiter(struct futex_hash_bucket * hb,union futex_key * key)803 struct futex_q *futex_top_waiter(struct futex_hash_bucket *hb, union futex_key *key)
804 {
805 	struct futex_q *this;
806 
807 	plist_for_each_entry(this, &hb->chain, list) {
808 		if (futex_match(&this->key, key))
809 			return this;
810 	}
811 	return NULL;
812 }
813 
814 /**
815  * wait_for_owner_exiting - Block until the owner has exited
816  * @ret: owner's current futex lock status
817  * @exiting:	Pointer to the exiting task
818  *
819  * Caller must hold a refcount on @exiting.
820  */
wait_for_owner_exiting(int ret,struct task_struct * exiting)821 void wait_for_owner_exiting(int ret, struct task_struct *exiting)
822 {
823 	if (ret != -EBUSY) {
824 		WARN_ON_ONCE(exiting);
825 		return;
826 	}
827 
828 	if (WARN_ON_ONCE(ret == -EBUSY && !exiting))
829 		return;
830 
831 	mutex_lock(&exiting->futex_exit_mutex);
832 	/*
833 	 * No point in doing state checking here. If the waiter got here
834 	 * while the task was in exec()->exec_futex_release() then it can
835 	 * have any FUTEX_STATE_* value when the waiter has acquired the
836 	 * mutex. OK, if running, EXITING or DEAD if it reached exit()
837 	 * already. Highly unlikely and not a problem. Just one more round
838 	 * through the futex maze.
839 	 */
840 	mutex_unlock(&exiting->futex_exit_mutex);
841 
842 	put_task_struct(exiting);
843 }
844 
845 /**
846  * __futex_unqueue() - Remove the futex_q from its futex_hash_bucket
847  * @q:	The futex_q to unqueue
848  *
849  * The q->lock_ptr must not be NULL and must be held by the caller.
850  */
__futex_unqueue(struct futex_q * q)851 void __futex_unqueue(struct futex_q *q)
852 {
853 	struct futex_hash_bucket *hb;
854 
855 	if (WARN_ON_SMP(!q->lock_ptr) || WARN_ON(plist_node_empty(&q->list)))
856 		return;
857 	lockdep_assert_held(q->lock_ptr);
858 
859 	hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock);
860 	plist_del(&q->list, &hb->chain);
861 	futex_hb_waiters_dec(hb);
862 }
863 
864 /* The key must be already stored in q->key. */
futex_q_lock(struct futex_q * q,struct futex_hash_bucket * hb)865 void futex_q_lock(struct futex_q *q, struct futex_hash_bucket *hb)
866 	__acquires(&hb->lock)
867 {
868 	/*
869 	 * Increment the counter before taking the lock so that
870 	 * a potential waker won't miss a to-be-slept task that is
871 	 * waiting for the spinlock. This is safe as all futex_q_lock()
872 	 * users end up calling futex_queue(). Similarly, for housekeeping,
873 	 * decrement the counter at futex_q_unlock() when some error has
874 	 * occurred and we don't end up adding the task to the list.
875 	 */
876 	futex_hb_waiters_inc(hb); /* implies smp_mb(); (A) */
877 
878 	q->lock_ptr = &hb->lock;
879 
880 	spin_lock(&hb->lock);
881 }
882 
futex_q_unlock(struct futex_hash_bucket * hb)883 void futex_q_unlock(struct futex_hash_bucket *hb)
884 	__releases(&hb->lock)
885 {
886 	futex_hb_waiters_dec(hb);
887 	spin_unlock(&hb->lock);
888 }
889 
__futex_queue(struct futex_q * q,struct futex_hash_bucket * hb,struct task_struct * task)890 void __futex_queue(struct futex_q *q, struct futex_hash_bucket *hb,
891 		   struct task_struct *task)
892 {
893 	int prio;
894 
895 	/*
896 	 * The priority used to register this element is
897 	 * - either the real thread-priority for the real-time threads
898 	 * (i.e. threads with a priority lower than MAX_RT_PRIO)
899 	 * - or MAX_RT_PRIO for non-RT threads.
900 	 * Thus, all RT-threads are woken first in priority order, and
901 	 * the others are woken last, in FIFO order.
902 	 */
903 	prio = min(current->normal_prio, MAX_RT_PRIO);
904 
905 	plist_node_init(&q->list, prio);
906 	plist_add(&q->list, &hb->chain);
907 	q->task = task;
908 }
909 
910 /**
911  * futex_unqueue() - Remove the futex_q from its futex_hash_bucket
912  * @q:	The futex_q to unqueue
913  *
914  * The q->lock_ptr must not be held by the caller. A call to futex_unqueue() must
915  * be paired with exactly one earlier call to futex_queue().
916  *
917  * Return:
918  *  - 1 - if the futex_q was still queued (and we removed unqueued it);
919  *  - 0 - if the futex_q was already removed by the waking thread
920  */
futex_unqueue(struct futex_q * q)921 int futex_unqueue(struct futex_q *q)
922 {
923 	spinlock_t *lock_ptr;
924 	int ret = 0;
925 
926 	/* RCU so lock_ptr is not going away during locking. */
927 	guard(rcu)();
928 	/* In the common case we don't take the spinlock, which is nice. */
929 retry:
930 	/*
931 	 * q->lock_ptr can change between this read and the following spin_lock.
932 	 * Use READ_ONCE to forbid the compiler from reloading q->lock_ptr and
933 	 * optimizing lock_ptr out of the logic below.
934 	 */
935 	lock_ptr = READ_ONCE(q->lock_ptr);
936 	if (lock_ptr != NULL) {
937 		spin_lock(lock_ptr);
938 		/*
939 		 * q->lock_ptr can change between reading it and
940 		 * spin_lock(), causing us to take the wrong lock.  This
941 		 * corrects the race condition.
942 		 *
943 		 * Reasoning goes like this: if we have the wrong lock,
944 		 * q->lock_ptr must have changed (maybe several times)
945 		 * between reading it and the spin_lock().  It can
946 		 * change again after the spin_lock() but only if it was
947 		 * already changed before the spin_lock().  It cannot,
948 		 * however, change back to the original value.  Therefore
949 		 * we can detect whether we acquired the correct lock.
950 		 */
951 		if (unlikely(lock_ptr != q->lock_ptr)) {
952 			spin_unlock(lock_ptr);
953 			goto retry;
954 		}
955 		__futex_unqueue(q);
956 
957 		BUG_ON(q->pi_state);
958 
959 		spin_unlock(lock_ptr);
960 		ret = 1;
961 	}
962 
963 	return ret;
964 }
965 
futex_q_lockptr_lock(struct futex_q * q)966 void futex_q_lockptr_lock(struct futex_q *q)
967 {
968 	spinlock_t *lock_ptr;
969 
970 	/*
971 	 * See futex_unqueue() why lock_ptr can change.
972 	 */
973 	guard(rcu)();
974 retry:
975 	lock_ptr = READ_ONCE(q->lock_ptr);
976 	spin_lock(lock_ptr);
977 
978 	if (unlikely(lock_ptr != q->lock_ptr)) {
979 		spin_unlock(lock_ptr);
980 		goto retry;
981 	}
982 }
983 
984 /*
985  * PI futexes can not be requeued and must remove themselves from the hash
986  * bucket. The hash bucket lock (i.e. lock_ptr) is held.
987  */
futex_unqueue_pi(struct futex_q * q)988 void futex_unqueue_pi(struct futex_q *q)
989 {
990 	/*
991 	 * If the lock was not acquired (due to timeout or signal) then the
992 	 * rt_waiter is removed before futex_q is. If this is observed by
993 	 * an unlocker after dropping the rtmutex wait lock and before
994 	 * acquiring the hash bucket lock, then the unlocker dequeues the
995 	 * futex_q from the hash bucket list to guarantee consistent state
996 	 * vs. userspace. Therefore the dequeue here must be conditional.
997 	 */
998 	if (!plist_node_empty(&q->list))
999 		__futex_unqueue(q);
1000 
1001 	BUG_ON(!q->pi_state);
1002 	put_pi_state(q->pi_state);
1003 	q->pi_state = NULL;
1004 }
1005 
1006 /* Constants for the pending_op argument of handle_futex_death */
1007 #define HANDLE_DEATH_PENDING	true
1008 #define HANDLE_DEATH_LIST	false
1009 
1010 /*
1011  * Process a futex-list entry, check whether it's owned by the
1012  * dying task, and do notification if so:
1013  */
handle_futex_death(u32 __user * uaddr,struct task_struct * curr,bool pi,bool pending_op)1014 static int handle_futex_death(u32 __user *uaddr, struct task_struct *curr,
1015 			      bool pi, bool pending_op)
1016 {
1017 	u32 uval, nval, mval;
1018 	pid_t owner;
1019 	int err;
1020 
1021 	/* Futex address must be 32bit aligned */
1022 	if ((((unsigned long)uaddr) % sizeof(*uaddr)) != 0)
1023 		return -1;
1024 
1025 retry:
1026 	if (get_user(uval, uaddr))
1027 		return -1;
1028 
1029 	/*
1030 	 * Special case for regular (non PI) futexes. The unlock path in
1031 	 * user space has two race scenarios:
1032 	 *
1033 	 * 1. The unlock path releases the user space futex value and
1034 	 *    before it can execute the futex() syscall to wake up
1035 	 *    waiters it is killed.
1036 	 *
1037 	 * 2. A woken up waiter is killed before it can acquire the
1038 	 *    futex in user space.
1039 	 *
1040 	 * In the second case, the wake up notification could be generated
1041 	 * by the unlock path in user space after setting the futex value
1042 	 * to zero or by the kernel after setting the OWNER_DIED bit below.
1043 	 *
1044 	 * In both cases the TID validation below prevents a wakeup of
1045 	 * potential waiters which can cause these waiters to block
1046 	 * forever.
1047 	 *
1048 	 * In both cases the following conditions are met:
1049 	 *
1050 	 *	1) task->robust_list->list_op_pending != NULL
1051 	 *	   @pending_op == true
1052 	 *	2) The owner part of user space futex value == 0
1053 	 *	3) Regular futex: @pi == false
1054 	 *
1055 	 * If these conditions are met, it is safe to attempt waking up a
1056 	 * potential waiter without touching the user space futex value and
1057 	 * trying to set the OWNER_DIED bit. If the futex value is zero,
1058 	 * the rest of the user space mutex state is consistent, so a woken
1059 	 * waiter will just take over the uncontended futex. Setting the
1060 	 * OWNER_DIED bit would create inconsistent state and malfunction
1061 	 * of the user space owner died handling. Otherwise, the OWNER_DIED
1062 	 * bit is already set, and the woken waiter is expected to deal with
1063 	 * this.
1064 	 */
1065 	owner = uval & FUTEX_TID_MASK;
1066 
1067 	if (pending_op && !pi && !owner) {
1068 		futex_wake(uaddr, FLAGS_SIZE_32 | FLAGS_SHARED, 1,
1069 			   FUTEX_BITSET_MATCH_ANY);
1070 		return 0;
1071 	}
1072 
1073 	if (owner != task_pid_vnr(curr))
1074 		return 0;
1075 
1076 	/*
1077 	 * Ok, this dying thread is truly holding a futex
1078 	 * of interest. Set the OWNER_DIED bit atomically
1079 	 * via cmpxchg, and if the value had FUTEX_WAITERS
1080 	 * set, wake up a waiter (if any). (We have to do a
1081 	 * futex_wake() even if OWNER_DIED is already set -
1082 	 * to handle the rare but possible case of recursive
1083 	 * thread-death.) The rest of the cleanup is done in
1084 	 * userspace.
1085 	 */
1086 	mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
1087 
1088 	/*
1089 	 * We are not holding a lock here, but we want to have
1090 	 * the pagefault_disable/enable() protection because
1091 	 * we want to handle the fault gracefully. If the
1092 	 * access fails we try to fault in the futex with R/W
1093 	 * verification via get_user_pages. get_user() above
1094 	 * does not guarantee R/W access. If that fails we
1095 	 * give up and leave the futex locked.
1096 	 */
1097 	if ((err = futex_cmpxchg_value_locked(&nval, uaddr, uval, mval))) {
1098 		switch (err) {
1099 		case -EFAULT:
1100 			if (fault_in_user_writeable(uaddr))
1101 				return -1;
1102 			goto retry;
1103 
1104 		case -EAGAIN:
1105 			cond_resched();
1106 			goto retry;
1107 
1108 		default:
1109 			WARN_ON_ONCE(1);
1110 			return err;
1111 		}
1112 	}
1113 
1114 	if (nval != uval)
1115 		goto retry;
1116 
1117 	/*
1118 	 * Wake robust non-PI futexes here. The wakeup of
1119 	 * PI futexes happens in exit_pi_state():
1120 	 */
1121 	if (!pi && (uval & FUTEX_WAITERS)) {
1122 		futex_wake(uaddr, FLAGS_SIZE_32 | FLAGS_SHARED, 1,
1123 			   FUTEX_BITSET_MATCH_ANY);
1124 	}
1125 
1126 	return 0;
1127 }
1128 
1129 /*
1130  * Fetch a robust-list pointer. Bit 0 signals PI futexes:
1131  */
fetch_robust_entry(struct robust_list __user ** entry,struct robust_list __user * __user * head,unsigned int * pi)1132 static inline int fetch_robust_entry(struct robust_list __user **entry,
1133 				     struct robust_list __user * __user *head,
1134 				     unsigned int *pi)
1135 {
1136 	unsigned long uentry;
1137 
1138 	if (get_user(uentry, (unsigned long __user *)head))
1139 		return -EFAULT;
1140 
1141 	*entry = (void __user *)(uentry & ~1UL);
1142 	*pi = uentry & 1;
1143 
1144 	return 0;
1145 }
1146 
1147 /*
1148  * Walk curr->robust_list (very carefully, it's a userspace list!)
1149  * and mark any locks found there dead, and notify any waiters.
1150  *
1151  * We silently return on any sign of list-walking problem.
1152  */
exit_robust_list(struct task_struct * curr)1153 static void exit_robust_list(struct task_struct *curr)
1154 {
1155 	struct robust_list_head __user *head = curr->robust_list;
1156 	struct robust_list __user *entry, *next_entry, *pending;
1157 	unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
1158 	unsigned int next_pi;
1159 	unsigned long futex_offset;
1160 	int rc;
1161 
1162 	/*
1163 	 * Fetch the list head (which was registered earlier, via
1164 	 * sys_set_robust_list()):
1165 	 */
1166 	if (fetch_robust_entry(&entry, &head->list.next, &pi))
1167 		return;
1168 	/*
1169 	 * Fetch the relative futex offset:
1170 	 */
1171 	if (get_user(futex_offset, &head->futex_offset))
1172 		return;
1173 	/*
1174 	 * Fetch any possibly pending lock-add first, and handle it
1175 	 * if it exists:
1176 	 */
1177 	if (fetch_robust_entry(&pending, &head->list_op_pending, &pip))
1178 		return;
1179 
1180 	next_entry = NULL;	/* avoid warning with gcc */
1181 	while (entry != &head->list) {
1182 		/*
1183 		 * Fetch the next entry in the list before calling
1184 		 * handle_futex_death:
1185 		 */
1186 		rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi);
1187 		/*
1188 		 * A pending lock might already be on the list, so
1189 		 * don't process it twice:
1190 		 */
1191 		if (entry != pending) {
1192 			if (handle_futex_death((void __user *)entry + futex_offset,
1193 						curr, pi, HANDLE_DEATH_LIST))
1194 				return;
1195 		}
1196 		if (rc)
1197 			return;
1198 		entry = next_entry;
1199 		pi = next_pi;
1200 		/*
1201 		 * Avoid excessively long or circular lists:
1202 		 */
1203 		if (!--limit)
1204 			break;
1205 
1206 		cond_resched();
1207 	}
1208 
1209 	if (pending) {
1210 		handle_futex_death((void __user *)pending + futex_offset,
1211 				   curr, pip, HANDLE_DEATH_PENDING);
1212 	}
1213 }
1214 
1215 #ifdef CONFIG_COMPAT
futex_uaddr(struct robust_list __user * entry,compat_long_t futex_offset)1216 static void __user *futex_uaddr(struct robust_list __user *entry,
1217 				compat_long_t futex_offset)
1218 {
1219 	compat_uptr_t base = ptr_to_compat(entry);
1220 	void __user *uaddr = compat_ptr(base + futex_offset);
1221 
1222 	return uaddr;
1223 }
1224 
1225 /*
1226  * Fetch a robust-list pointer. Bit 0 signals PI futexes:
1227  */
1228 static inline int
compat_fetch_robust_entry(compat_uptr_t * uentry,struct robust_list __user ** entry,compat_uptr_t __user * head,unsigned int * pi)1229 compat_fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
1230 		   compat_uptr_t __user *head, unsigned int *pi)
1231 {
1232 	if (get_user(*uentry, head))
1233 		return -EFAULT;
1234 
1235 	*entry = compat_ptr((*uentry) & ~1);
1236 	*pi = (unsigned int)(*uentry) & 1;
1237 
1238 	return 0;
1239 }
1240 
1241 /*
1242  * Walk curr->robust_list (very carefully, it's a userspace list!)
1243  * and mark any locks found there dead, and notify any waiters.
1244  *
1245  * We silently return on any sign of list-walking problem.
1246  */
compat_exit_robust_list(struct task_struct * curr)1247 static void compat_exit_robust_list(struct task_struct *curr)
1248 {
1249 	struct compat_robust_list_head __user *head = curr->compat_robust_list;
1250 	struct robust_list __user *entry, *next_entry, *pending;
1251 	unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
1252 	unsigned int next_pi;
1253 	compat_uptr_t uentry, next_uentry, upending;
1254 	compat_long_t futex_offset;
1255 	int rc;
1256 
1257 	/*
1258 	 * Fetch the list head (which was registered earlier, via
1259 	 * sys_set_robust_list()):
1260 	 */
1261 	if (compat_fetch_robust_entry(&uentry, &entry, &head->list.next, &pi))
1262 		return;
1263 	/*
1264 	 * Fetch the relative futex offset:
1265 	 */
1266 	if (get_user(futex_offset, &head->futex_offset))
1267 		return;
1268 	/*
1269 	 * Fetch any possibly pending lock-add first, and handle it
1270 	 * if it exists:
1271 	 */
1272 	if (compat_fetch_robust_entry(&upending, &pending,
1273 			       &head->list_op_pending, &pip))
1274 		return;
1275 
1276 	next_entry = NULL;	/* avoid warning with gcc */
1277 	while (entry != (struct robust_list __user *) &head->list) {
1278 		/*
1279 		 * Fetch the next entry in the list before calling
1280 		 * handle_futex_death:
1281 		 */
1282 		rc = compat_fetch_robust_entry(&next_uentry, &next_entry,
1283 			(compat_uptr_t __user *)&entry->next, &next_pi);
1284 		/*
1285 		 * A pending lock might already be on the list, so
1286 		 * dont process it twice:
1287 		 */
1288 		if (entry != pending) {
1289 			void __user *uaddr = futex_uaddr(entry, futex_offset);
1290 
1291 			if (handle_futex_death(uaddr, curr, pi,
1292 					       HANDLE_DEATH_LIST))
1293 				return;
1294 		}
1295 		if (rc)
1296 			return;
1297 		uentry = next_uentry;
1298 		entry = next_entry;
1299 		pi = next_pi;
1300 		/*
1301 		 * Avoid excessively long or circular lists:
1302 		 */
1303 		if (!--limit)
1304 			break;
1305 
1306 		cond_resched();
1307 	}
1308 	if (pending) {
1309 		void __user *uaddr = futex_uaddr(pending, futex_offset);
1310 
1311 		handle_futex_death(uaddr, curr, pip, HANDLE_DEATH_PENDING);
1312 	}
1313 }
1314 #endif
1315 
1316 #ifdef CONFIG_FUTEX_PI
1317 
1318 /*
1319  * This task is holding PI mutexes at exit time => bad.
1320  * Kernel cleans up PI-state, but userspace is likely hosed.
1321  * (Robust-futex cleanup is separate and might save the day for userspace.)
1322  */
exit_pi_state_list(struct task_struct * curr)1323 static void exit_pi_state_list(struct task_struct *curr)
1324 {
1325 	struct list_head *next, *head = &curr->pi_state_list;
1326 	struct futex_pi_state *pi_state;
1327 	union futex_key key = FUTEX_KEY_INIT;
1328 
1329 	/*
1330 	 * The mutex mm_struct::futex_hash_lock might be acquired.
1331 	 */
1332 	might_sleep();
1333 	/*
1334 	 * Ensure the hash remains stable (no resize) during the while loop
1335 	 * below. The hb pointer is acquired under the pi_lock so we can't block
1336 	 * on the mutex.
1337 	 */
1338 	WARN_ON(curr != current);
1339 	guard(private_hash)();
1340 	/*
1341 	 * We are a ZOMBIE and nobody can enqueue itself on
1342 	 * pi_state_list anymore, but we have to be careful
1343 	 * versus waiters unqueueing themselves:
1344 	 */
1345 	raw_spin_lock_irq(&curr->pi_lock);
1346 	while (!list_empty(head)) {
1347 		next = head->next;
1348 		pi_state = list_entry(next, struct futex_pi_state, list);
1349 		key = pi_state->key;
1350 		if (1) {
1351 			CLASS(hb, hb)(&key);
1352 
1353 			/*
1354 			 * We can race against put_pi_state() removing itself from the
1355 			 * list (a waiter going away). put_pi_state() will first
1356 			 * decrement the reference count and then modify the list, so
1357 			 * its possible to see the list entry but fail this reference
1358 			 * acquire.
1359 			 *
1360 			 * In that case; drop the locks to let put_pi_state() make
1361 			 * progress and retry the loop.
1362 			 */
1363 			if (!refcount_inc_not_zero(&pi_state->refcount)) {
1364 				raw_spin_unlock_irq(&curr->pi_lock);
1365 				cpu_relax();
1366 				raw_spin_lock_irq(&curr->pi_lock);
1367 				continue;
1368 			}
1369 			raw_spin_unlock_irq(&curr->pi_lock);
1370 
1371 			spin_lock(&hb->lock);
1372 			raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
1373 			raw_spin_lock(&curr->pi_lock);
1374 			/*
1375 			 * We dropped the pi-lock, so re-check whether this
1376 			 * task still owns the PI-state:
1377 			 */
1378 			if (head->next != next) {
1379 				/* retain curr->pi_lock for the loop invariant */
1380 				raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
1381 				spin_unlock(&hb->lock);
1382 				put_pi_state(pi_state);
1383 				continue;
1384 			}
1385 
1386 			WARN_ON(pi_state->owner != curr);
1387 			WARN_ON(list_empty(&pi_state->list));
1388 			list_del_init(&pi_state->list);
1389 			pi_state->owner = NULL;
1390 
1391 			raw_spin_unlock(&curr->pi_lock);
1392 			raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
1393 			spin_unlock(&hb->lock);
1394 		}
1395 
1396 		rt_mutex_futex_unlock(&pi_state->pi_mutex);
1397 		put_pi_state(pi_state);
1398 
1399 		raw_spin_lock_irq(&curr->pi_lock);
1400 	}
1401 	raw_spin_unlock_irq(&curr->pi_lock);
1402 }
1403 #else
exit_pi_state_list(struct task_struct * curr)1404 static inline void exit_pi_state_list(struct task_struct *curr) { }
1405 #endif
1406 
futex_cleanup(struct task_struct * tsk)1407 static void futex_cleanup(struct task_struct *tsk)
1408 {
1409 	if (unlikely(tsk->robust_list)) {
1410 		exit_robust_list(tsk);
1411 		tsk->robust_list = NULL;
1412 	}
1413 
1414 #ifdef CONFIG_COMPAT
1415 	if (unlikely(tsk->compat_robust_list)) {
1416 		compat_exit_robust_list(tsk);
1417 		tsk->compat_robust_list = NULL;
1418 	}
1419 #endif
1420 
1421 	if (unlikely(!list_empty(&tsk->pi_state_list)))
1422 		exit_pi_state_list(tsk);
1423 }
1424 
1425 /**
1426  * futex_exit_recursive - Set the tasks futex state to FUTEX_STATE_DEAD
1427  * @tsk:	task to set the state on
1428  *
1429  * Set the futex exit state of the task lockless. The futex waiter code
1430  * observes that state when a task is exiting and loops until the task has
1431  * actually finished the futex cleanup. The worst case for this is that the
1432  * waiter runs through the wait loop until the state becomes visible.
1433  *
1434  * This is called from the recursive fault handling path in make_task_dead().
1435  *
1436  * This is best effort. Either the futex exit code has run already or
1437  * not. If the OWNER_DIED bit has been set on the futex then the waiter can
1438  * take it over. If not, the problem is pushed back to user space. If the
1439  * futex exit code did not run yet, then an already queued waiter might
1440  * block forever, but there is nothing which can be done about that.
1441  */
futex_exit_recursive(struct task_struct * tsk)1442 void futex_exit_recursive(struct task_struct *tsk)
1443 {
1444 	/* If the state is FUTEX_STATE_EXITING then futex_exit_mutex is held */
1445 	if (tsk->futex_state == FUTEX_STATE_EXITING)
1446 		mutex_unlock(&tsk->futex_exit_mutex);
1447 	tsk->futex_state = FUTEX_STATE_DEAD;
1448 }
1449 
futex_cleanup_begin(struct task_struct * tsk)1450 static void futex_cleanup_begin(struct task_struct *tsk)
1451 {
1452 	/*
1453 	 * Prevent various race issues against a concurrent incoming waiter
1454 	 * including live locks by forcing the waiter to block on
1455 	 * tsk->futex_exit_mutex when it observes FUTEX_STATE_EXITING in
1456 	 * attach_to_pi_owner().
1457 	 */
1458 	mutex_lock(&tsk->futex_exit_mutex);
1459 
1460 	/*
1461 	 * Switch the state to FUTEX_STATE_EXITING under tsk->pi_lock.
1462 	 *
1463 	 * This ensures that all subsequent checks of tsk->futex_state in
1464 	 * attach_to_pi_owner() must observe FUTEX_STATE_EXITING with
1465 	 * tsk->pi_lock held.
1466 	 *
1467 	 * It guarantees also that a pi_state which was queued right before
1468 	 * the state change under tsk->pi_lock by a concurrent waiter must
1469 	 * be observed in exit_pi_state_list().
1470 	 */
1471 	raw_spin_lock_irq(&tsk->pi_lock);
1472 	tsk->futex_state = FUTEX_STATE_EXITING;
1473 	raw_spin_unlock_irq(&tsk->pi_lock);
1474 }
1475 
futex_cleanup_end(struct task_struct * tsk,int state)1476 static void futex_cleanup_end(struct task_struct *tsk, int state)
1477 {
1478 	/*
1479 	 * Lockless store. The only side effect is that an observer might
1480 	 * take another loop until it becomes visible.
1481 	 */
1482 	tsk->futex_state = state;
1483 	/*
1484 	 * Drop the exit protection. This unblocks waiters which observed
1485 	 * FUTEX_STATE_EXITING to reevaluate the state.
1486 	 */
1487 	mutex_unlock(&tsk->futex_exit_mutex);
1488 }
1489 
futex_exec_release(struct task_struct * tsk)1490 void futex_exec_release(struct task_struct *tsk)
1491 {
1492 	/*
1493 	 * The state handling is done for consistency, but in the case of
1494 	 * exec() there is no way to prevent further damage as the PID stays
1495 	 * the same. But for the unlikely and arguably buggy case that a
1496 	 * futex is held on exec(), this provides at least as much state
1497 	 * consistency protection which is possible.
1498 	 */
1499 	futex_cleanup_begin(tsk);
1500 	futex_cleanup(tsk);
1501 	/*
1502 	 * Reset the state to FUTEX_STATE_OK. The task is alive and about
1503 	 * exec a new binary.
1504 	 */
1505 	futex_cleanup_end(tsk, FUTEX_STATE_OK);
1506 }
1507 
futex_exit_release(struct task_struct * tsk)1508 void futex_exit_release(struct task_struct *tsk)
1509 {
1510 	futex_cleanup_begin(tsk);
1511 	futex_cleanup(tsk);
1512 	futex_cleanup_end(tsk, FUTEX_STATE_DEAD);
1513 }
1514 
futex_hash_bucket_init(struct futex_hash_bucket * fhb,struct futex_private_hash * fph)1515 static void futex_hash_bucket_init(struct futex_hash_bucket *fhb,
1516 				   struct futex_private_hash *fph)
1517 {
1518 #ifdef CONFIG_FUTEX_PRIVATE_HASH
1519 	fhb->priv = fph;
1520 #endif
1521 	atomic_set(&fhb->waiters, 0);
1522 	plist_head_init(&fhb->chain);
1523 	spin_lock_init(&fhb->lock);
1524 }
1525 
1526 #define FH_CUSTOM	0x01
1527 #define FH_IMMUTABLE	0x02
1528 
1529 #ifdef CONFIG_FUTEX_PRIVATE_HASH
futex_hash_free(struct mm_struct * mm)1530 void futex_hash_free(struct mm_struct *mm)
1531 {
1532 	struct futex_private_hash *fph;
1533 
1534 	kvfree(mm->futex_phash_new);
1535 	fph = rcu_dereference_raw(mm->futex_phash);
1536 	if (fph) {
1537 		WARN_ON_ONCE(rcuref_read(&fph->users) > 1);
1538 		kvfree(fph);
1539 	}
1540 }
1541 
futex_pivot_pending(struct mm_struct * mm)1542 static bool futex_pivot_pending(struct mm_struct *mm)
1543 {
1544 	struct futex_private_hash *fph;
1545 
1546 	guard(rcu)();
1547 
1548 	if (!mm->futex_phash_new)
1549 		return true;
1550 
1551 	fph = rcu_dereference(mm->futex_phash);
1552 	return rcuref_is_dead(&fph->users);
1553 }
1554 
futex_hash_less(struct futex_private_hash * a,struct futex_private_hash * b)1555 static bool futex_hash_less(struct futex_private_hash *a,
1556 			    struct futex_private_hash *b)
1557 {
1558 	/* user provided always wins */
1559 	if (!a->custom && b->custom)
1560 		return true;
1561 	if (a->custom && !b->custom)
1562 		return false;
1563 
1564 	/* zero-sized hash wins */
1565 	if (!b->hash_mask)
1566 		return true;
1567 	if (!a->hash_mask)
1568 		return false;
1569 
1570 	/* keep the biggest */
1571 	if (a->hash_mask < b->hash_mask)
1572 		return true;
1573 	if (a->hash_mask > b->hash_mask)
1574 		return false;
1575 
1576 	return false; /* equal */
1577 }
1578 
futex_hash_allocate(unsigned int hash_slots,unsigned int flags)1579 static int futex_hash_allocate(unsigned int hash_slots, unsigned int flags)
1580 {
1581 	struct mm_struct *mm = current->mm;
1582 	struct futex_private_hash *fph;
1583 	bool custom = flags & FH_CUSTOM;
1584 	int i;
1585 
1586 	if (hash_slots && (hash_slots == 1 || !is_power_of_2(hash_slots)))
1587 		return -EINVAL;
1588 
1589 	/*
1590 	 * Once we've disabled the global hash there is no way back.
1591 	 */
1592 	scoped_guard(rcu) {
1593 		fph = rcu_dereference(mm->futex_phash);
1594 		if (fph && (!fph->hash_mask || fph->immutable)) {
1595 			if (custom)
1596 				return -EBUSY;
1597 			return 0;
1598 		}
1599 	}
1600 
1601 	fph = kvzalloc(struct_size(fph, queues, hash_slots), GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
1602 	if (!fph)
1603 		return -ENOMEM;
1604 
1605 	rcuref_init(&fph->users, 1);
1606 	fph->hash_mask = hash_slots ? hash_slots - 1 : 0;
1607 	fph->custom = custom;
1608 	fph->immutable = !!(flags & FH_IMMUTABLE);
1609 	fph->mm = mm;
1610 
1611 	for (i = 0; i < hash_slots; i++)
1612 		futex_hash_bucket_init(&fph->queues[i], fph);
1613 
1614 	if (custom) {
1615 		/*
1616 		 * Only let prctl() wait / retry; don't unduly delay clone().
1617 		 */
1618 again:
1619 		wait_var_event(mm, futex_pivot_pending(mm));
1620 	}
1621 
1622 	scoped_guard(mutex, &mm->futex_hash_lock) {
1623 		struct futex_private_hash *free __free(kvfree) = NULL;
1624 		struct futex_private_hash *cur, *new;
1625 
1626 		cur = rcu_dereference_protected(mm->futex_phash,
1627 						lockdep_is_held(&mm->futex_hash_lock));
1628 		new = mm->futex_phash_new;
1629 		mm->futex_phash_new = NULL;
1630 
1631 		if (fph) {
1632 			if (cur && (!cur->hash_mask || cur->immutable)) {
1633 				/*
1634 				 * If two threads simultaneously request the global
1635 				 * hash then the first one performs the switch,
1636 				 * the second one returns here.
1637 				 */
1638 				free = fph;
1639 				mm->futex_phash_new = new;
1640 				return -EBUSY;
1641 			}
1642 			if (cur && !new) {
1643 				/*
1644 				 * If we have an existing hash, but do not yet have
1645 				 * allocated a replacement hash, drop the initial
1646 				 * reference on the existing hash.
1647 				 */
1648 				futex_private_hash_put(cur);
1649 			}
1650 
1651 			if (new) {
1652 				/*
1653 				 * Two updates raced; throw out the lesser one.
1654 				 */
1655 				if (futex_hash_less(new, fph)) {
1656 					free = new;
1657 					new = fph;
1658 				} else {
1659 					free = fph;
1660 				}
1661 			} else {
1662 				new = fph;
1663 			}
1664 			fph = NULL;
1665 		}
1666 
1667 		if (new) {
1668 			/*
1669 			 * Will set mm->futex_phash_new on failure;
1670 			 * futex_private_hash_get() will try again.
1671 			 */
1672 			if (!__futex_pivot_hash(mm, new) && custom)
1673 				goto again;
1674 		}
1675 	}
1676 	return 0;
1677 }
1678 
futex_hash_allocate_default(void)1679 int futex_hash_allocate_default(void)
1680 {
1681 	unsigned int threads, buckets, current_buckets = 0;
1682 	struct futex_private_hash *fph;
1683 
1684 	if (!current->mm)
1685 		return 0;
1686 
1687 	scoped_guard(rcu) {
1688 		threads = min_t(unsigned int,
1689 				get_nr_threads(current),
1690 				num_online_cpus());
1691 
1692 		fph = rcu_dereference(current->mm->futex_phash);
1693 		if (fph) {
1694 			if (fph->custom)
1695 				return 0;
1696 
1697 			current_buckets = fph->hash_mask + 1;
1698 		}
1699 	}
1700 
1701 	/*
1702 	 * The default allocation will remain within
1703 	 *   16 <= threads * 4 <= global hash size
1704 	 */
1705 	buckets = roundup_pow_of_two(4 * threads);
1706 	buckets = clamp(buckets, 16, futex_hashmask + 1);
1707 
1708 	if (current_buckets >= buckets)
1709 		return 0;
1710 
1711 	return futex_hash_allocate(buckets, 0);
1712 }
1713 
futex_hash_get_slots(void)1714 static int futex_hash_get_slots(void)
1715 {
1716 	struct futex_private_hash *fph;
1717 
1718 	guard(rcu)();
1719 	fph = rcu_dereference(current->mm->futex_phash);
1720 	if (fph && fph->hash_mask)
1721 		return fph->hash_mask + 1;
1722 	return 0;
1723 }
1724 
futex_hash_get_immutable(void)1725 static int futex_hash_get_immutable(void)
1726 {
1727 	struct futex_private_hash *fph;
1728 
1729 	guard(rcu)();
1730 	fph = rcu_dereference(current->mm->futex_phash);
1731 	if (fph && fph->immutable)
1732 		return 1;
1733 	if (fph && !fph->hash_mask)
1734 		return 1;
1735 	return 0;
1736 }
1737 
1738 #else
1739 
futex_hash_allocate(unsigned int hash_slots,unsigned int flags)1740 static int futex_hash_allocate(unsigned int hash_slots, unsigned int flags)
1741 {
1742 	return -EINVAL;
1743 }
1744 
futex_hash_get_slots(void)1745 static int futex_hash_get_slots(void)
1746 {
1747 	return 0;
1748 }
1749 
futex_hash_get_immutable(void)1750 static int futex_hash_get_immutable(void)
1751 {
1752 	return 0;
1753 }
1754 #endif
1755 
futex_hash_prctl(unsigned long arg2,unsigned long arg3,unsigned long arg4)1756 int futex_hash_prctl(unsigned long arg2, unsigned long arg3, unsigned long arg4)
1757 {
1758 	unsigned int flags = FH_CUSTOM;
1759 	int ret;
1760 
1761 	switch (arg2) {
1762 	case PR_FUTEX_HASH_SET_SLOTS:
1763 		if (arg4 & ~FH_FLAG_IMMUTABLE)
1764 			return -EINVAL;
1765 		if (arg4 & FH_FLAG_IMMUTABLE)
1766 			flags |= FH_IMMUTABLE;
1767 		ret = futex_hash_allocate(arg3, flags);
1768 		break;
1769 
1770 	case PR_FUTEX_HASH_GET_SLOTS:
1771 		ret = futex_hash_get_slots();
1772 		break;
1773 
1774 	case PR_FUTEX_HASH_GET_IMMUTABLE:
1775 		ret = futex_hash_get_immutable();
1776 		break;
1777 
1778 	default:
1779 		ret = -EINVAL;
1780 		break;
1781 	}
1782 	return ret;
1783 }
1784 
futex_init(void)1785 static int __init futex_init(void)
1786 {
1787 	unsigned long hashsize, i;
1788 	unsigned int order, n;
1789 	unsigned long size;
1790 
1791 #ifdef CONFIG_BASE_SMALL
1792 	hashsize = 16;
1793 #else
1794 	hashsize = 256 * num_possible_cpus();
1795 	hashsize /= num_possible_nodes();
1796 	hashsize = max(4, hashsize);
1797 	hashsize = roundup_pow_of_two(hashsize);
1798 #endif
1799 	futex_hashshift = ilog2(hashsize);
1800 	size = sizeof(struct futex_hash_bucket) * hashsize;
1801 	order = get_order(size);
1802 
1803 	for_each_node(n) {
1804 		struct futex_hash_bucket *table;
1805 
1806 		if (order > MAX_PAGE_ORDER)
1807 			table = vmalloc_huge_node(size, GFP_KERNEL, n);
1808 		else
1809 			table = alloc_pages_exact_nid(n, size, GFP_KERNEL);
1810 
1811 		BUG_ON(!table);
1812 
1813 		for (i = 0; i < hashsize; i++)
1814 			futex_hash_bucket_init(&table[i], NULL);
1815 
1816 		futex_queues[n] = table;
1817 	}
1818 
1819 	futex_hashmask = hashsize - 1;
1820 	pr_info("futex hash table entries: %lu (%lu bytes on %d NUMA nodes, total %lu KiB, %s).\n",
1821 		hashsize, size, num_possible_nodes(), size * num_possible_nodes() / 1024,
1822 		order > MAX_PAGE_ORDER ? "vmalloc" : "linear");
1823 	return 0;
1824 }
1825 core_initcall(futex_init);
1826