xref: /linux/kernel/futex/core.c (revision fa76887bb72ae11347730271e6a04c147b7527e6)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  Fast Userspace Mutexes (which I call "Futexes!").
4  *  (C) Rusty Russell, IBM 2002
5  *
6  *  Generalized futexes, futex requeueing, misc fixes by Ingo Molnar
7  *  (C) Copyright 2003 Red Hat Inc, All Rights Reserved
8  *
9  *  Removed page pinning, fix privately mapped COW pages and other cleanups
10  *  (C) Copyright 2003, 2004 Jamie Lokier
11  *
12  *  Robust futex support started by Ingo Molnar
13  *  (C) Copyright 2006 Red Hat Inc, All Rights Reserved
14  *  Thanks to Thomas Gleixner for suggestions, analysis and fixes.
15  *
16  *  PI-futex support started by Ingo Molnar and Thomas Gleixner
17  *  Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
18  *  Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
19  *
20  *  PRIVATE futexes by Eric Dumazet
21  *  Copyright (C) 2007 Eric Dumazet <dada1@cosmosbay.com>
22  *
23  *  Requeue-PI support by Darren Hart <dvhltc@us.ibm.com>
24  *  Copyright (C) IBM Corporation, 2009
25  *  Thanks to Thomas Gleixner for conceptual design and careful reviews.
26  *
27  *  Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly
28  *  enough at me, Linus for the original (flawed) idea, Matthew
29  *  Kirkwood for proof-of-concept implementation.
30  *
31  *  "The futexes are also cursed."
32  *  "But they come in a choice of three flavours!"
33  */
34 #include <linux/compat.h>
35 #include <linux/jhash.h>
36 #include <linux/pagemap.h>
37 #include <linux/debugfs.h>
38 #include <linux/plist.h>
39 #include <linux/memblock.h>
40 #include <linux/fault-inject.h>
41 #include <linux/slab.h>
42 
43 #include "futex.h"
44 #include "../locking/rtmutex_common.h"
45 
46 /*
47  * The base of the bucket array and its size are always used together
48  * (after initialization only in futex_hash()), so ensure that they
49  * reside in the same cacheline.
50  */
51 static struct {
52 	struct futex_hash_bucket *queues;
53 	unsigned long            hashsize;
54 } __futex_data __read_mostly __aligned(2*sizeof(long));
55 #define futex_queues   (__futex_data.queues)
56 #define futex_hashsize (__futex_data.hashsize)
57 
58 
59 /*
60  * Fault injections for futexes.
61  */
62 #ifdef CONFIG_FAIL_FUTEX
63 
64 static struct {
65 	struct fault_attr attr;
66 
67 	bool ignore_private;
68 } fail_futex = {
69 	.attr = FAULT_ATTR_INITIALIZER,
70 	.ignore_private = false,
71 };
72 
setup_fail_futex(char * str)73 static int __init setup_fail_futex(char *str)
74 {
75 	return setup_fault_attr(&fail_futex.attr, str);
76 }
77 __setup("fail_futex=", setup_fail_futex);
78 
should_fail_futex(bool fshared)79 bool should_fail_futex(bool fshared)
80 {
81 	if (fail_futex.ignore_private && !fshared)
82 		return false;
83 
84 	return should_fail(&fail_futex.attr, 1);
85 }
86 
87 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
88 
fail_futex_debugfs(void)89 static int __init fail_futex_debugfs(void)
90 {
91 	umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
92 	struct dentry *dir;
93 
94 	dir = fault_create_debugfs_attr("fail_futex", NULL,
95 					&fail_futex.attr);
96 	if (IS_ERR(dir))
97 		return PTR_ERR(dir);
98 
99 	debugfs_create_bool("ignore-private", mode, dir,
100 			    &fail_futex.ignore_private);
101 	return 0;
102 }
103 
104 late_initcall(fail_futex_debugfs);
105 
106 #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
107 
108 #endif /* CONFIG_FAIL_FUTEX */
109 
110 /**
111  * futex_hash - Return the hash bucket in the global hash
112  * @key:	Pointer to the futex key for which the hash is calculated
113  *
114  * We hash on the keys returned from get_futex_key (see below) and return the
115  * corresponding hash bucket in the global hash.
116  */
futex_hash(union futex_key * key)117 struct futex_hash_bucket *futex_hash(union futex_key *key)
118 {
119 	u32 hash = jhash2((u32 *)key, offsetof(typeof(*key), both.offset) / 4,
120 			  key->both.offset);
121 
122 	return &futex_queues[hash & (futex_hashsize - 1)];
123 }
124 
125 
126 /**
127  * futex_setup_timer - set up the sleeping hrtimer.
128  * @time:	ptr to the given timeout value
129  * @timeout:	the hrtimer_sleeper structure to be set up
130  * @flags:	futex flags
131  * @range_ns:	optional range in ns
132  *
133  * Return: Initialized hrtimer_sleeper structure or NULL if no timeout
134  *	   value given
135  */
136 struct hrtimer_sleeper *
futex_setup_timer(ktime_t * time,struct hrtimer_sleeper * timeout,int flags,u64 range_ns)137 futex_setup_timer(ktime_t *time, struct hrtimer_sleeper *timeout,
138 		  int flags, u64 range_ns)
139 {
140 	if (!time)
141 		return NULL;
142 
143 	hrtimer_setup_sleeper_on_stack(timeout,
144 				       (flags & FLAGS_CLOCKRT) ? CLOCK_REALTIME : CLOCK_MONOTONIC,
145 				       HRTIMER_MODE_ABS);
146 	/*
147 	 * If range_ns is 0, calling hrtimer_set_expires_range_ns() is
148 	 * effectively the same as calling hrtimer_set_expires().
149 	 */
150 	hrtimer_set_expires_range_ns(&timeout->timer, *time, range_ns);
151 
152 	return timeout;
153 }
154 
155 /*
156  * Generate a machine wide unique identifier for this inode.
157  *
158  * This relies on u64 not wrapping in the life-time of the machine; which with
159  * 1ns resolution means almost 585 years.
160  *
161  * This further relies on the fact that a well formed program will not unmap
162  * the file while it has a (shared) futex waiting on it. This mapping will have
163  * a file reference which pins the mount and inode.
164  *
165  * If for some reason an inode gets evicted and read back in again, it will get
166  * a new sequence number and will _NOT_ match, even though it is the exact same
167  * file.
168  *
169  * It is important that futex_match() will never have a false-positive, esp.
170  * for PI futexes that can mess up the state. The above argues that false-negatives
171  * are only possible for malformed programs.
172  */
get_inode_sequence_number(struct inode * inode)173 static u64 get_inode_sequence_number(struct inode *inode)
174 {
175 	static atomic64_t i_seq;
176 	u64 old;
177 
178 	/* Does the inode already have a sequence number? */
179 	old = atomic64_read(&inode->i_sequence);
180 	if (likely(old))
181 		return old;
182 
183 	for (;;) {
184 		u64 new = atomic64_inc_return(&i_seq);
185 		if (WARN_ON_ONCE(!new))
186 			continue;
187 
188 		old = 0;
189 		if (!atomic64_try_cmpxchg_relaxed(&inode->i_sequence, &old, new))
190 			return old;
191 		return new;
192 	}
193 }
194 
195 /**
196  * get_futex_key() - Get parameters which are the keys for a futex
197  * @uaddr:	virtual address of the futex
198  * @flags:	FLAGS_*
199  * @key:	address where result is stored.
200  * @rw:		mapping needs to be read/write (values: FUTEX_READ,
201  *              FUTEX_WRITE)
202  *
203  * Return: a negative error code or 0
204  *
205  * The key words are stored in @key on success.
206  *
207  * For shared mappings (when @fshared), the key is:
208  *
209  *   ( inode->i_sequence, page->index, offset_within_page )
210  *
211  * [ also see get_inode_sequence_number() ]
212  *
213  * For private mappings (or when !@fshared), the key is:
214  *
215  *   ( current->mm, address, 0 )
216  *
217  * This allows (cross process, where applicable) identification of the futex
218  * without keeping the page pinned for the duration of the FUTEX_WAIT.
219  *
220  * lock_page() might sleep, the caller should not hold a spinlock.
221  */
get_futex_key(u32 __user * uaddr,unsigned int flags,union futex_key * key,enum futex_access rw)222 int get_futex_key(u32 __user *uaddr, unsigned int flags, union futex_key *key,
223 		  enum futex_access rw)
224 {
225 	unsigned long address = (unsigned long)uaddr;
226 	struct mm_struct *mm = current->mm;
227 	struct page *page;
228 	struct folio *folio;
229 	struct address_space *mapping;
230 	int err, ro = 0;
231 	bool fshared;
232 
233 	fshared = flags & FLAGS_SHARED;
234 
235 	/*
236 	 * The futex address must be "naturally" aligned.
237 	 */
238 	key->both.offset = address % PAGE_SIZE;
239 	if (unlikely((address % sizeof(u32)) != 0))
240 		return -EINVAL;
241 	address -= key->both.offset;
242 
243 	if (unlikely(!access_ok(uaddr, sizeof(u32))))
244 		return -EFAULT;
245 
246 	if (unlikely(should_fail_futex(fshared)))
247 		return -EFAULT;
248 
249 	/*
250 	 * PROCESS_PRIVATE futexes are fast.
251 	 * As the mm cannot disappear under us and the 'key' only needs
252 	 * virtual address, we dont even have to find the underlying vma.
253 	 * Note : We do have to check 'uaddr' is a valid user address,
254 	 *        but access_ok() should be faster than find_vma()
255 	 */
256 	if (!fshared) {
257 		/*
258 		 * On no-MMU, shared futexes are treated as private, therefore
259 		 * we must not include the current process in the key. Since
260 		 * there is only one address space, the address is a unique key
261 		 * on its own.
262 		 */
263 		if (IS_ENABLED(CONFIG_MMU))
264 			key->private.mm = mm;
265 		else
266 			key->private.mm = NULL;
267 
268 		key->private.address = address;
269 		return 0;
270 	}
271 
272 again:
273 	/* Ignore any VERIFY_READ mapping (futex common case) */
274 	if (unlikely(should_fail_futex(true)))
275 		return -EFAULT;
276 
277 	err = get_user_pages_fast(address, 1, FOLL_WRITE, &page);
278 	/*
279 	 * If write access is not required (eg. FUTEX_WAIT), try
280 	 * and get read-only access.
281 	 */
282 	if (err == -EFAULT && rw == FUTEX_READ) {
283 		err = get_user_pages_fast(address, 1, 0, &page);
284 		ro = 1;
285 	}
286 	if (err < 0)
287 		return err;
288 	else
289 		err = 0;
290 
291 	/*
292 	 * The treatment of mapping from this point on is critical. The folio
293 	 * lock protects many things but in this context the folio lock
294 	 * stabilizes mapping, prevents inode freeing in the shared
295 	 * file-backed region case and guards against movement to swap cache.
296 	 *
297 	 * Strictly speaking the folio lock is not needed in all cases being
298 	 * considered here and folio lock forces unnecessarily serialization.
299 	 * From this point on, mapping will be re-verified if necessary and
300 	 * folio lock will be acquired only if it is unavoidable
301 	 *
302 	 * Mapping checks require the folio so it is looked up now. For
303 	 * anonymous pages, it does not matter if the folio is split
304 	 * in the future as the key is based on the address. For
305 	 * filesystem-backed pages, the precise page is required as the
306 	 * index of the page determines the key.
307 	 */
308 	folio = page_folio(page);
309 	mapping = READ_ONCE(folio->mapping);
310 
311 	/*
312 	 * If folio->mapping is NULL, then it cannot be an anonymous
313 	 * page; but it might be the ZERO_PAGE or in the gate area or
314 	 * in a special mapping (all cases which we are happy to fail);
315 	 * or it may have been a good file page when get_user_pages_fast
316 	 * found it, but truncated or holepunched or subjected to
317 	 * invalidate_complete_page2 before we got the folio lock (also
318 	 * cases which we are happy to fail).  And we hold a reference,
319 	 * so refcount care in invalidate_inode_page's remove_mapping
320 	 * prevents drop_caches from setting mapping to NULL beneath us.
321 	 *
322 	 * The case we do have to guard against is when memory pressure made
323 	 * shmem_writepage move it from filecache to swapcache beneath us:
324 	 * an unlikely race, but we do need to retry for folio->mapping.
325 	 */
326 	if (unlikely(!mapping)) {
327 		int shmem_swizzled;
328 
329 		/*
330 		 * Folio lock is required to identify which special case above
331 		 * applies. If this is really a shmem page then the folio lock
332 		 * will prevent unexpected transitions.
333 		 */
334 		folio_lock(folio);
335 		shmem_swizzled = folio_test_swapcache(folio) || folio->mapping;
336 		folio_unlock(folio);
337 		folio_put(folio);
338 
339 		if (shmem_swizzled)
340 			goto again;
341 
342 		return -EFAULT;
343 	}
344 
345 	/*
346 	 * Private mappings are handled in a simple way.
347 	 *
348 	 * If the futex key is stored in anonymous memory, then the associated
349 	 * object is the mm which is implicitly pinned by the calling process.
350 	 *
351 	 * NOTE: When userspace waits on a MAP_SHARED mapping, even if
352 	 * it's a read-only handle, it's expected that futexes attach to
353 	 * the object not the particular process.
354 	 */
355 	if (folio_test_anon(folio)) {
356 		/*
357 		 * A RO anonymous page will never change and thus doesn't make
358 		 * sense for futex operations.
359 		 */
360 		if (unlikely(should_fail_futex(true)) || ro) {
361 			err = -EFAULT;
362 			goto out;
363 		}
364 
365 		key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */
366 		key->private.mm = mm;
367 		key->private.address = address;
368 
369 	} else {
370 		struct inode *inode;
371 
372 		/*
373 		 * The associated futex object in this case is the inode and
374 		 * the folio->mapping must be traversed. Ordinarily this should
375 		 * be stabilised under folio lock but it's not strictly
376 		 * necessary in this case as we just want to pin the inode, not
377 		 * update i_pages or anything like that.
378 		 *
379 		 * The RCU read lock is taken as the inode is finally freed
380 		 * under RCU. If the mapping still matches expectations then the
381 		 * mapping->host can be safely accessed as being a valid inode.
382 		 */
383 		rcu_read_lock();
384 
385 		if (READ_ONCE(folio->mapping) != mapping) {
386 			rcu_read_unlock();
387 			folio_put(folio);
388 
389 			goto again;
390 		}
391 
392 		inode = READ_ONCE(mapping->host);
393 		if (!inode) {
394 			rcu_read_unlock();
395 			folio_put(folio);
396 
397 			goto again;
398 		}
399 
400 		key->both.offset |= FUT_OFF_INODE; /* inode-based key */
401 		key->shared.i_seq = get_inode_sequence_number(inode);
402 		key->shared.pgoff = page_pgoff(folio, page);
403 		rcu_read_unlock();
404 	}
405 
406 out:
407 	folio_put(folio);
408 	return err;
409 }
410 
411 /**
412  * fault_in_user_writeable() - Fault in user address and verify RW access
413  * @uaddr:	pointer to faulting user space address
414  *
415  * Slow path to fixup the fault we just took in the atomic write
416  * access to @uaddr.
417  *
418  * We have no generic implementation of a non-destructive write to the
419  * user address. We know that we faulted in the atomic pagefault
420  * disabled section so we can as well avoid the #PF overhead by
421  * calling get_user_pages() right away.
422  */
fault_in_user_writeable(u32 __user * uaddr)423 int fault_in_user_writeable(u32 __user *uaddr)
424 {
425 	struct mm_struct *mm = current->mm;
426 	int ret;
427 
428 	mmap_read_lock(mm);
429 	ret = fixup_user_fault(mm, (unsigned long)uaddr,
430 			       FAULT_FLAG_WRITE, NULL);
431 	mmap_read_unlock(mm);
432 
433 	return ret < 0 ? ret : 0;
434 }
435 
436 /**
437  * futex_top_waiter() - Return the highest priority waiter on a futex
438  * @hb:		the hash bucket the futex_q's reside in
439  * @key:	the futex key (to distinguish it from other futex futex_q's)
440  *
441  * Must be called with the hb lock held.
442  */
futex_top_waiter(struct futex_hash_bucket * hb,union futex_key * key)443 struct futex_q *futex_top_waiter(struct futex_hash_bucket *hb, union futex_key *key)
444 {
445 	struct futex_q *this;
446 
447 	plist_for_each_entry(this, &hb->chain, list) {
448 		if (futex_match(&this->key, key))
449 			return this;
450 	}
451 	return NULL;
452 }
453 
454 /**
455  * wait_for_owner_exiting - Block until the owner has exited
456  * @ret: owner's current futex lock status
457  * @exiting:	Pointer to the exiting task
458  *
459  * Caller must hold a refcount on @exiting.
460  */
wait_for_owner_exiting(int ret,struct task_struct * exiting)461 void wait_for_owner_exiting(int ret, struct task_struct *exiting)
462 {
463 	if (ret != -EBUSY) {
464 		WARN_ON_ONCE(exiting);
465 		return;
466 	}
467 
468 	if (WARN_ON_ONCE(ret == -EBUSY && !exiting))
469 		return;
470 
471 	mutex_lock(&exiting->futex_exit_mutex);
472 	/*
473 	 * No point in doing state checking here. If the waiter got here
474 	 * while the task was in exec()->exec_futex_release() then it can
475 	 * have any FUTEX_STATE_* value when the waiter has acquired the
476 	 * mutex. OK, if running, EXITING or DEAD if it reached exit()
477 	 * already. Highly unlikely and not a problem. Just one more round
478 	 * through the futex maze.
479 	 */
480 	mutex_unlock(&exiting->futex_exit_mutex);
481 
482 	put_task_struct(exiting);
483 }
484 
485 /**
486  * __futex_unqueue() - Remove the futex_q from its futex_hash_bucket
487  * @q:	The futex_q to unqueue
488  *
489  * The q->lock_ptr must not be NULL and must be held by the caller.
490  */
__futex_unqueue(struct futex_q * q)491 void __futex_unqueue(struct futex_q *q)
492 {
493 	struct futex_hash_bucket *hb;
494 
495 	if (WARN_ON_SMP(!q->lock_ptr) || WARN_ON(plist_node_empty(&q->list)))
496 		return;
497 	lockdep_assert_held(q->lock_ptr);
498 
499 	hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock);
500 	plist_del(&q->list, &hb->chain);
501 	futex_hb_waiters_dec(hb);
502 }
503 
504 /* The key must be already stored in q->key. */
futex_q_lock(struct futex_q * q)505 struct futex_hash_bucket *futex_q_lock(struct futex_q *q)
506 	__acquires(&hb->lock)
507 {
508 	struct futex_hash_bucket *hb;
509 
510 	hb = futex_hash(&q->key);
511 
512 	/*
513 	 * Increment the counter before taking the lock so that
514 	 * a potential waker won't miss a to-be-slept task that is
515 	 * waiting for the spinlock. This is safe as all futex_q_lock()
516 	 * users end up calling futex_queue(). Similarly, for housekeeping,
517 	 * decrement the counter at futex_q_unlock() when some error has
518 	 * occurred and we don't end up adding the task to the list.
519 	 */
520 	futex_hb_waiters_inc(hb); /* implies smp_mb(); (A) */
521 
522 	q->lock_ptr = &hb->lock;
523 
524 	spin_lock(&hb->lock);
525 	return hb;
526 }
527 
futex_q_unlock(struct futex_hash_bucket * hb)528 void futex_q_unlock(struct futex_hash_bucket *hb)
529 	__releases(&hb->lock)
530 {
531 	spin_unlock(&hb->lock);
532 	futex_hb_waiters_dec(hb);
533 }
534 
__futex_queue(struct futex_q * q,struct futex_hash_bucket * hb,struct task_struct * task)535 void __futex_queue(struct futex_q *q, struct futex_hash_bucket *hb,
536 		   struct task_struct *task)
537 {
538 	int prio;
539 
540 	/*
541 	 * The priority used to register this element is
542 	 * - either the real thread-priority for the real-time threads
543 	 * (i.e. threads with a priority lower than MAX_RT_PRIO)
544 	 * - or MAX_RT_PRIO for non-RT threads.
545 	 * Thus, all RT-threads are woken first in priority order, and
546 	 * the others are woken last, in FIFO order.
547 	 */
548 	prio = min(current->normal_prio, MAX_RT_PRIO);
549 
550 	plist_node_init(&q->list, prio);
551 	plist_add(&q->list, &hb->chain);
552 	q->task = task;
553 }
554 
555 /**
556  * futex_unqueue() - Remove the futex_q from its futex_hash_bucket
557  * @q:	The futex_q to unqueue
558  *
559  * The q->lock_ptr must not be held by the caller. A call to futex_unqueue() must
560  * be paired with exactly one earlier call to futex_queue().
561  *
562  * Return:
563  *  - 1 - if the futex_q was still queued (and we removed unqueued it);
564  *  - 0 - if the futex_q was already removed by the waking thread
565  */
futex_unqueue(struct futex_q * q)566 int futex_unqueue(struct futex_q *q)
567 {
568 	spinlock_t *lock_ptr;
569 	int ret = 0;
570 
571 	/* In the common case we don't take the spinlock, which is nice. */
572 retry:
573 	/*
574 	 * q->lock_ptr can change between this read and the following spin_lock.
575 	 * Use READ_ONCE to forbid the compiler from reloading q->lock_ptr and
576 	 * optimizing lock_ptr out of the logic below.
577 	 */
578 	lock_ptr = READ_ONCE(q->lock_ptr);
579 	if (lock_ptr != NULL) {
580 		spin_lock(lock_ptr);
581 		/*
582 		 * q->lock_ptr can change between reading it and
583 		 * spin_lock(), causing us to take the wrong lock.  This
584 		 * corrects the race condition.
585 		 *
586 		 * Reasoning goes like this: if we have the wrong lock,
587 		 * q->lock_ptr must have changed (maybe several times)
588 		 * between reading it and the spin_lock().  It can
589 		 * change again after the spin_lock() but only if it was
590 		 * already changed before the spin_lock().  It cannot,
591 		 * however, change back to the original value.  Therefore
592 		 * we can detect whether we acquired the correct lock.
593 		 */
594 		if (unlikely(lock_ptr != q->lock_ptr)) {
595 			spin_unlock(lock_ptr);
596 			goto retry;
597 		}
598 		__futex_unqueue(q);
599 
600 		BUG_ON(q->pi_state);
601 
602 		spin_unlock(lock_ptr);
603 		ret = 1;
604 	}
605 
606 	return ret;
607 }
608 
609 /*
610  * PI futexes can not be requeued and must remove themselves from the hash
611  * bucket. The hash bucket lock (i.e. lock_ptr) is held.
612  */
futex_unqueue_pi(struct futex_q * q)613 void futex_unqueue_pi(struct futex_q *q)
614 {
615 	/*
616 	 * If the lock was not acquired (due to timeout or signal) then the
617 	 * rt_waiter is removed before futex_q is. If this is observed by
618 	 * an unlocker after dropping the rtmutex wait lock and before
619 	 * acquiring the hash bucket lock, then the unlocker dequeues the
620 	 * futex_q from the hash bucket list to guarantee consistent state
621 	 * vs. userspace. Therefore the dequeue here must be conditional.
622 	 */
623 	if (!plist_node_empty(&q->list))
624 		__futex_unqueue(q);
625 
626 	BUG_ON(!q->pi_state);
627 	put_pi_state(q->pi_state);
628 	q->pi_state = NULL;
629 }
630 
631 /* Constants for the pending_op argument of handle_futex_death */
632 #define HANDLE_DEATH_PENDING	true
633 #define HANDLE_DEATH_LIST	false
634 
635 /*
636  * Process a futex-list entry, check whether it's owned by the
637  * dying task, and do notification if so:
638  */
handle_futex_death(u32 __user * uaddr,struct task_struct * curr,bool pi,bool pending_op)639 static int handle_futex_death(u32 __user *uaddr, struct task_struct *curr,
640 			      bool pi, bool pending_op)
641 {
642 	u32 uval, nval, mval;
643 	pid_t owner;
644 	int err;
645 
646 	/* Futex address must be 32bit aligned */
647 	if ((((unsigned long)uaddr) % sizeof(*uaddr)) != 0)
648 		return -1;
649 
650 retry:
651 	if (get_user(uval, uaddr))
652 		return -1;
653 
654 	/*
655 	 * Special case for regular (non PI) futexes. The unlock path in
656 	 * user space has two race scenarios:
657 	 *
658 	 * 1. The unlock path releases the user space futex value and
659 	 *    before it can execute the futex() syscall to wake up
660 	 *    waiters it is killed.
661 	 *
662 	 * 2. A woken up waiter is killed before it can acquire the
663 	 *    futex in user space.
664 	 *
665 	 * In the second case, the wake up notification could be generated
666 	 * by the unlock path in user space after setting the futex value
667 	 * to zero or by the kernel after setting the OWNER_DIED bit below.
668 	 *
669 	 * In both cases the TID validation below prevents a wakeup of
670 	 * potential waiters which can cause these waiters to block
671 	 * forever.
672 	 *
673 	 * In both cases the following conditions are met:
674 	 *
675 	 *	1) task->robust_list->list_op_pending != NULL
676 	 *	   @pending_op == true
677 	 *	2) The owner part of user space futex value == 0
678 	 *	3) Regular futex: @pi == false
679 	 *
680 	 * If these conditions are met, it is safe to attempt waking up a
681 	 * potential waiter without touching the user space futex value and
682 	 * trying to set the OWNER_DIED bit. If the futex value is zero,
683 	 * the rest of the user space mutex state is consistent, so a woken
684 	 * waiter will just take over the uncontended futex. Setting the
685 	 * OWNER_DIED bit would create inconsistent state and malfunction
686 	 * of the user space owner died handling. Otherwise, the OWNER_DIED
687 	 * bit is already set, and the woken waiter is expected to deal with
688 	 * this.
689 	 */
690 	owner = uval & FUTEX_TID_MASK;
691 
692 	if (pending_op && !pi && !owner) {
693 		futex_wake(uaddr, FLAGS_SIZE_32 | FLAGS_SHARED, 1,
694 			   FUTEX_BITSET_MATCH_ANY);
695 		return 0;
696 	}
697 
698 	if (owner != task_pid_vnr(curr))
699 		return 0;
700 
701 	/*
702 	 * Ok, this dying thread is truly holding a futex
703 	 * of interest. Set the OWNER_DIED bit atomically
704 	 * via cmpxchg, and if the value had FUTEX_WAITERS
705 	 * set, wake up a waiter (if any). (We have to do a
706 	 * futex_wake() even if OWNER_DIED is already set -
707 	 * to handle the rare but possible case of recursive
708 	 * thread-death.) The rest of the cleanup is done in
709 	 * userspace.
710 	 */
711 	mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
712 
713 	/*
714 	 * We are not holding a lock here, but we want to have
715 	 * the pagefault_disable/enable() protection because
716 	 * we want to handle the fault gracefully. If the
717 	 * access fails we try to fault in the futex with R/W
718 	 * verification via get_user_pages. get_user() above
719 	 * does not guarantee R/W access. If that fails we
720 	 * give up and leave the futex locked.
721 	 */
722 	if ((err = futex_cmpxchg_value_locked(&nval, uaddr, uval, mval))) {
723 		switch (err) {
724 		case -EFAULT:
725 			if (fault_in_user_writeable(uaddr))
726 				return -1;
727 			goto retry;
728 
729 		case -EAGAIN:
730 			cond_resched();
731 			goto retry;
732 
733 		default:
734 			WARN_ON_ONCE(1);
735 			return err;
736 		}
737 	}
738 
739 	if (nval != uval)
740 		goto retry;
741 
742 	/*
743 	 * Wake robust non-PI futexes here. The wakeup of
744 	 * PI futexes happens in exit_pi_state():
745 	 */
746 	if (!pi && (uval & FUTEX_WAITERS)) {
747 		futex_wake(uaddr, FLAGS_SIZE_32 | FLAGS_SHARED, 1,
748 			   FUTEX_BITSET_MATCH_ANY);
749 	}
750 
751 	return 0;
752 }
753 
754 /*
755  * Fetch a robust-list pointer. Bit 0 signals PI futexes:
756  */
fetch_robust_entry(struct robust_list __user ** entry,struct robust_list __user * __user * head,unsigned int * pi)757 static inline int fetch_robust_entry(struct robust_list __user **entry,
758 				     struct robust_list __user * __user *head,
759 				     unsigned int *pi)
760 {
761 	unsigned long uentry;
762 
763 	if (get_user(uentry, (unsigned long __user *)head))
764 		return -EFAULT;
765 
766 	*entry = (void __user *)(uentry & ~1UL);
767 	*pi = uentry & 1;
768 
769 	return 0;
770 }
771 
772 /*
773  * Walk curr->robust_list (very carefully, it's a userspace list!)
774  * and mark any locks found there dead, and notify any waiters.
775  *
776  * We silently return on any sign of list-walking problem.
777  */
exit_robust_list(struct task_struct * curr)778 static void exit_robust_list(struct task_struct *curr)
779 {
780 	struct robust_list_head __user *head = curr->robust_list;
781 	struct robust_list __user *entry, *next_entry, *pending;
782 	unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
783 	unsigned int next_pi;
784 	unsigned long futex_offset;
785 	int rc;
786 
787 	/*
788 	 * Fetch the list head (which was registered earlier, via
789 	 * sys_set_robust_list()):
790 	 */
791 	if (fetch_robust_entry(&entry, &head->list.next, &pi))
792 		return;
793 	/*
794 	 * Fetch the relative futex offset:
795 	 */
796 	if (get_user(futex_offset, &head->futex_offset))
797 		return;
798 	/*
799 	 * Fetch any possibly pending lock-add first, and handle it
800 	 * if it exists:
801 	 */
802 	if (fetch_robust_entry(&pending, &head->list_op_pending, &pip))
803 		return;
804 
805 	next_entry = NULL;	/* avoid warning with gcc */
806 	while (entry != &head->list) {
807 		/*
808 		 * Fetch the next entry in the list before calling
809 		 * handle_futex_death:
810 		 */
811 		rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi);
812 		/*
813 		 * A pending lock might already be on the list, so
814 		 * don't process it twice:
815 		 */
816 		if (entry != pending) {
817 			if (handle_futex_death((void __user *)entry + futex_offset,
818 						curr, pi, HANDLE_DEATH_LIST))
819 				return;
820 		}
821 		if (rc)
822 			return;
823 		entry = next_entry;
824 		pi = next_pi;
825 		/*
826 		 * Avoid excessively long or circular lists:
827 		 */
828 		if (!--limit)
829 			break;
830 
831 		cond_resched();
832 	}
833 
834 	if (pending) {
835 		handle_futex_death((void __user *)pending + futex_offset,
836 				   curr, pip, HANDLE_DEATH_PENDING);
837 	}
838 }
839 
840 #ifdef CONFIG_COMPAT
futex_uaddr(struct robust_list __user * entry,compat_long_t futex_offset)841 static void __user *futex_uaddr(struct robust_list __user *entry,
842 				compat_long_t futex_offset)
843 {
844 	compat_uptr_t base = ptr_to_compat(entry);
845 	void __user *uaddr = compat_ptr(base + futex_offset);
846 
847 	return uaddr;
848 }
849 
850 /*
851  * Fetch a robust-list pointer. Bit 0 signals PI futexes:
852  */
853 static inline int
compat_fetch_robust_entry(compat_uptr_t * uentry,struct robust_list __user ** entry,compat_uptr_t __user * head,unsigned int * pi)854 compat_fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
855 		   compat_uptr_t __user *head, unsigned int *pi)
856 {
857 	if (get_user(*uentry, head))
858 		return -EFAULT;
859 
860 	*entry = compat_ptr((*uentry) & ~1);
861 	*pi = (unsigned int)(*uentry) & 1;
862 
863 	return 0;
864 }
865 
866 /*
867  * Walk curr->robust_list (very carefully, it's a userspace list!)
868  * and mark any locks found there dead, and notify any waiters.
869  *
870  * We silently return on any sign of list-walking problem.
871  */
compat_exit_robust_list(struct task_struct * curr)872 static void compat_exit_robust_list(struct task_struct *curr)
873 {
874 	struct compat_robust_list_head __user *head = curr->compat_robust_list;
875 	struct robust_list __user *entry, *next_entry, *pending;
876 	unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
877 	unsigned int next_pi;
878 	compat_uptr_t uentry, next_uentry, upending;
879 	compat_long_t futex_offset;
880 	int rc;
881 
882 	/*
883 	 * Fetch the list head (which was registered earlier, via
884 	 * sys_set_robust_list()):
885 	 */
886 	if (compat_fetch_robust_entry(&uentry, &entry, &head->list.next, &pi))
887 		return;
888 	/*
889 	 * Fetch the relative futex offset:
890 	 */
891 	if (get_user(futex_offset, &head->futex_offset))
892 		return;
893 	/*
894 	 * Fetch any possibly pending lock-add first, and handle it
895 	 * if it exists:
896 	 */
897 	if (compat_fetch_robust_entry(&upending, &pending,
898 			       &head->list_op_pending, &pip))
899 		return;
900 
901 	next_entry = NULL;	/* avoid warning with gcc */
902 	while (entry != (struct robust_list __user *) &head->list) {
903 		/*
904 		 * Fetch the next entry in the list before calling
905 		 * handle_futex_death:
906 		 */
907 		rc = compat_fetch_robust_entry(&next_uentry, &next_entry,
908 			(compat_uptr_t __user *)&entry->next, &next_pi);
909 		/*
910 		 * A pending lock might already be on the list, so
911 		 * dont process it twice:
912 		 */
913 		if (entry != pending) {
914 			void __user *uaddr = futex_uaddr(entry, futex_offset);
915 
916 			if (handle_futex_death(uaddr, curr, pi,
917 					       HANDLE_DEATH_LIST))
918 				return;
919 		}
920 		if (rc)
921 			return;
922 		uentry = next_uentry;
923 		entry = next_entry;
924 		pi = next_pi;
925 		/*
926 		 * Avoid excessively long or circular lists:
927 		 */
928 		if (!--limit)
929 			break;
930 
931 		cond_resched();
932 	}
933 	if (pending) {
934 		void __user *uaddr = futex_uaddr(pending, futex_offset);
935 
936 		handle_futex_death(uaddr, curr, pip, HANDLE_DEATH_PENDING);
937 	}
938 }
939 #endif
940 
941 #ifdef CONFIG_FUTEX_PI
942 
943 /*
944  * This task is holding PI mutexes at exit time => bad.
945  * Kernel cleans up PI-state, but userspace is likely hosed.
946  * (Robust-futex cleanup is separate and might save the day for userspace.)
947  */
exit_pi_state_list(struct task_struct * curr)948 static void exit_pi_state_list(struct task_struct *curr)
949 {
950 	struct list_head *next, *head = &curr->pi_state_list;
951 	struct futex_pi_state *pi_state;
952 	struct futex_hash_bucket *hb;
953 	union futex_key key = FUTEX_KEY_INIT;
954 
955 	/*
956 	 * We are a ZOMBIE and nobody can enqueue itself on
957 	 * pi_state_list anymore, but we have to be careful
958 	 * versus waiters unqueueing themselves:
959 	 */
960 	raw_spin_lock_irq(&curr->pi_lock);
961 	while (!list_empty(head)) {
962 		next = head->next;
963 		pi_state = list_entry(next, struct futex_pi_state, list);
964 		key = pi_state->key;
965 		hb = futex_hash(&key);
966 
967 		/*
968 		 * We can race against put_pi_state() removing itself from the
969 		 * list (a waiter going away). put_pi_state() will first
970 		 * decrement the reference count and then modify the list, so
971 		 * its possible to see the list entry but fail this reference
972 		 * acquire.
973 		 *
974 		 * In that case; drop the locks to let put_pi_state() make
975 		 * progress and retry the loop.
976 		 */
977 		if (!refcount_inc_not_zero(&pi_state->refcount)) {
978 			raw_spin_unlock_irq(&curr->pi_lock);
979 			cpu_relax();
980 			raw_spin_lock_irq(&curr->pi_lock);
981 			continue;
982 		}
983 		raw_spin_unlock_irq(&curr->pi_lock);
984 
985 		spin_lock(&hb->lock);
986 		raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
987 		raw_spin_lock(&curr->pi_lock);
988 		/*
989 		 * We dropped the pi-lock, so re-check whether this
990 		 * task still owns the PI-state:
991 		 */
992 		if (head->next != next) {
993 			/* retain curr->pi_lock for the loop invariant */
994 			raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
995 			spin_unlock(&hb->lock);
996 			put_pi_state(pi_state);
997 			continue;
998 		}
999 
1000 		WARN_ON(pi_state->owner != curr);
1001 		WARN_ON(list_empty(&pi_state->list));
1002 		list_del_init(&pi_state->list);
1003 		pi_state->owner = NULL;
1004 
1005 		raw_spin_unlock(&curr->pi_lock);
1006 		raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
1007 		spin_unlock(&hb->lock);
1008 
1009 		rt_mutex_futex_unlock(&pi_state->pi_mutex);
1010 		put_pi_state(pi_state);
1011 
1012 		raw_spin_lock_irq(&curr->pi_lock);
1013 	}
1014 	raw_spin_unlock_irq(&curr->pi_lock);
1015 }
1016 #else
exit_pi_state_list(struct task_struct * curr)1017 static inline void exit_pi_state_list(struct task_struct *curr) { }
1018 #endif
1019 
futex_cleanup(struct task_struct * tsk)1020 static void futex_cleanup(struct task_struct *tsk)
1021 {
1022 	if (unlikely(tsk->robust_list)) {
1023 		exit_robust_list(tsk);
1024 		tsk->robust_list = NULL;
1025 	}
1026 
1027 #ifdef CONFIG_COMPAT
1028 	if (unlikely(tsk->compat_robust_list)) {
1029 		compat_exit_robust_list(tsk);
1030 		tsk->compat_robust_list = NULL;
1031 	}
1032 #endif
1033 
1034 	if (unlikely(!list_empty(&tsk->pi_state_list)))
1035 		exit_pi_state_list(tsk);
1036 }
1037 
1038 /**
1039  * futex_exit_recursive - Set the tasks futex state to FUTEX_STATE_DEAD
1040  * @tsk:	task to set the state on
1041  *
1042  * Set the futex exit state of the task lockless. The futex waiter code
1043  * observes that state when a task is exiting and loops until the task has
1044  * actually finished the futex cleanup. The worst case for this is that the
1045  * waiter runs through the wait loop until the state becomes visible.
1046  *
1047  * This is called from the recursive fault handling path in make_task_dead().
1048  *
1049  * This is best effort. Either the futex exit code has run already or
1050  * not. If the OWNER_DIED bit has been set on the futex then the waiter can
1051  * take it over. If not, the problem is pushed back to user space. If the
1052  * futex exit code did not run yet, then an already queued waiter might
1053  * block forever, but there is nothing which can be done about that.
1054  */
futex_exit_recursive(struct task_struct * tsk)1055 void futex_exit_recursive(struct task_struct *tsk)
1056 {
1057 	/* If the state is FUTEX_STATE_EXITING then futex_exit_mutex is held */
1058 	if (tsk->futex_state == FUTEX_STATE_EXITING)
1059 		mutex_unlock(&tsk->futex_exit_mutex);
1060 	tsk->futex_state = FUTEX_STATE_DEAD;
1061 }
1062 
futex_cleanup_begin(struct task_struct * tsk)1063 static void futex_cleanup_begin(struct task_struct *tsk)
1064 {
1065 	/*
1066 	 * Prevent various race issues against a concurrent incoming waiter
1067 	 * including live locks by forcing the waiter to block on
1068 	 * tsk->futex_exit_mutex when it observes FUTEX_STATE_EXITING in
1069 	 * attach_to_pi_owner().
1070 	 */
1071 	mutex_lock(&tsk->futex_exit_mutex);
1072 
1073 	/*
1074 	 * Switch the state to FUTEX_STATE_EXITING under tsk->pi_lock.
1075 	 *
1076 	 * This ensures that all subsequent checks of tsk->futex_state in
1077 	 * attach_to_pi_owner() must observe FUTEX_STATE_EXITING with
1078 	 * tsk->pi_lock held.
1079 	 *
1080 	 * It guarantees also that a pi_state which was queued right before
1081 	 * the state change under tsk->pi_lock by a concurrent waiter must
1082 	 * be observed in exit_pi_state_list().
1083 	 */
1084 	raw_spin_lock_irq(&tsk->pi_lock);
1085 	tsk->futex_state = FUTEX_STATE_EXITING;
1086 	raw_spin_unlock_irq(&tsk->pi_lock);
1087 }
1088 
futex_cleanup_end(struct task_struct * tsk,int state)1089 static void futex_cleanup_end(struct task_struct *tsk, int state)
1090 {
1091 	/*
1092 	 * Lockless store. The only side effect is that an observer might
1093 	 * take another loop until it becomes visible.
1094 	 */
1095 	tsk->futex_state = state;
1096 	/*
1097 	 * Drop the exit protection. This unblocks waiters which observed
1098 	 * FUTEX_STATE_EXITING to reevaluate the state.
1099 	 */
1100 	mutex_unlock(&tsk->futex_exit_mutex);
1101 }
1102 
futex_exec_release(struct task_struct * tsk)1103 void futex_exec_release(struct task_struct *tsk)
1104 {
1105 	/*
1106 	 * The state handling is done for consistency, but in the case of
1107 	 * exec() there is no way to prevent further damage as the PID stays
1108 	 * the same. But for the unlikely and arguably buggy case that a
1109 	 * futex is held on exec(), this provides at least as much state
1110 	 * consistency protection which is possible.
1111 	 */
1112 	futex_cleanup_begin(tsk);
1113 	futex_cleanup(tsk);
1114 	/*
1115 	 * Reset the state to FUTEX_STATE_OK. The task is alive and about
1116 	 * exec a new binary.
1117 	 */
1118 	futex_cleanup_end(tsk, FUTEX_STATE_OK);
1119 }
1120 
futex_exit_release(struct task_struct * tsk)1121 void futex_exit_release(struct task_struct *tsk)
1122 {
1123 	futex_cleanup_begin(tsk);
1124 	futex_cleanup(tsk);
1125 	futex_cleanup_end(tsk, FUTEX_STATE_DEAD);
1126 }
1127 
futex_init(void)1128 static int __init futex_init(void)
1129 {
1130 	unsigned int futex_shift;
1131 	unsigned long i;
1132 
1133 #ifdef CONFIG_BASE_SMALL
1134 	futex_hashsize = 16;
1135 #else
1136 	futex_hashsize = roundup_pow_of_two(256 * num_possible_cpus());
1137 #endif
1138 
1139 	futex_queues = alloc_large_system_hash("futex", sizeof(*futex_queues),
1140 					       futex_hashsize, 0, 0,
1141 					       &futex_shift, NULL,
1142 					       futex_hashsize, futex_hashsize);
1143 	futex_hashsize = 1UL << futex_shift;
1144 
1145 	for (i = 0; i < futex_hashsize; i++) {
1146 		atomic_set(&futex_queues[i].waiters, 0);
1147 		plist_head_init(&futex_queues[i].chain);
1148 		spin_lock_init(&futex_queues[i].lock);
1149 	}
1150 
1151 	return 0;
1152 }
1153 core_initcall(futex_init);
1154