xref: /linux/kernel/futex/requeue.c (revision b8e85e6f3a09fc56b0ff574887798962ef8a8f80)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 
3 #include <linux/plist.h>
4 #include <linux/sched/signal.h>
5 
6 #include "futex.h"
7 #include "../locking/rtmutex_common.h"
8 
9 /*
10  * On PREEMPT_RT, the hash bucket lock is a 'sleeping' spinlock with an
11  * underlying rtmutex. The task which is about to be requeued could have
12  * just woken up (timeout, signal). After the wake up the task has to
13  * acquire hash bucket lock, which is held by the requeue code.  As a task
14  * can only be blocked on _ONE_ rtmutex at a time, the proxy lock blocking
15  * and the hash bucket lock blocking would collide and corrupt state.
16  *
17  * On !PREEMPT_RT this is not a problem and everything could be serialized
18  * on hash bucket lock, but aside of having the benefit of common code,
19  * this allows to avoid doing the requeue when the task is already on the
20  * way out and taking the hash bucket lock of the original uaddr1 when the
21  * requeue has been completed.
22  *
23  * The following state transitions are valid:
24  *
25  * On the waiter side:
26  *   Q_REQUEUE_PI_NONE		-> Q_REQUEUE_PI_IGNORE
27  *   Q_REQUEUE_PI_IN_PROGRESS	-> Q_REQUEUE_PI_WAIT
28  *
29  * On the requeue side:
30  *   Q_REQUEUE_PI_NONE		-> Q_REQUEUE_PI_INPROGRESS
31  *   Q_REQUEUE_PI_IN_PROGRESS	-> Q_REQUEUE_PI_DONE/LOCKED
32  *   Q_REQUEUE_PI_IN_PROGRESS	-> Q_REQUEUE_PI_NONE (requeue failed)
33  *   Q_REQUEUE_PI_WAIT		-> Q_REQUEUE_PI_DONE/LOCKED
34  *   Q_REQUEUE_PI_WAIT		-> Q_REQUEUE_PI_IGNORE (requeue failed)
35  *
36  * The requeue side ignores a waiter with state Q_REQUEUE_PI_IGNORE as this
37  * signals that the waiter is already on the way out. It also means that
38  * the waiter is still on the 'wait' futex, i.e. uaddr1.
39  *
40  * The waiter side signals early wakeup to the requeue side either through
41  * setting state to Q_REQUEUE_PI_IGNORE or to Q_REQUEUE_PI_WAIT depending
42  * on the current state. In case of Q_REQUEUE_PI_IGNORE it can immediately
43  * proceed to take the hash bucket lock of uaddr1. If it set state to WAIT,
44  * which means the wakeup is interleaving with a requeue in progress it has
45  * to wait for the requeue side to change the state. Either to DONE/LOCKED
46  * or to IGNORE. DONE/LOCKED means the waiter q is now on the uaddr2 futex
47  * and either blocked (DONE) or has acquired it (LOCKED). IGNORE is set by
48  * the requeue side when the requeue attempt failed via deadlock detection
49  * and therefore the waiter q is still on the uaddr1 futex.
50  */
51 enum {
52 	Q_REQUEUE_PI_NONE		=  0,
53 	Q_REQUEUE_PI_IGNORE,
54 	Q_REQUEUE_PI_IN_PROGRESS,
55 	Q_REQUEUE_PI_WAIT,
56 	Q_REQUEUE_PI_DONE,
57 	Q_REQUEUE_PI_LOCKED,
58 };
59 
60 const struct futex_q futex_q_init = {
61 	/* list gets initialized in futex_queue()*/
62 	.wake		= futex_wake_mark,
63 	.key		= FUTEX_KEY_INIT,
64 	.bitset		= FUTEX_BITSET_MATCH_ANY,
65 	.requeue_state	= ATOMIC_INIT(Q_REQUEUE_PI_NONE),
66 };
67 
68 /**
69  * requeue_futex() - Requeue a futex_q from one hb to another
70  * @q:		the futex_q to requeue
71  * @hb1:	the source hash_bucket
72  * @hb2:	the target hash_bucket
73  * @key2:	the new key for the requeued futex_q
74  */
75 static inline
76 void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
77 		   struct futex_hash_bucket *hb2, union futex_key *key2)
78 {
79 
80 	/*
81 	 * If key1 and key2 hash to the same bucket, no need to
82 	 * requeue.
83 	 */
84 	if (likely(&hb1->chain != &hb2->chain)) {
85 		plist_del(&q->list, &hb1->chain);
86 		futex_hb_waiters_dec(hb1);
87 		futex_hb_waiters_inc(hb2);
88 		plist_add(&q->list, &hb2->chain);
89 		q->lock_ptr = &hb2->lock;
90 	}
91 	q->key = *key2;
92 }
93 
94 static inline bool futex_requeue_pi_prepare(struct futex_q *q,
95 					    struct futex_pi_state *pi_state)
96 {
97 	int old, new;
98 
99 	/*
100 	 * Set state to Q_REQUEUE_PI_IN_PROGRESS unless an early wakeup has
101 	 * already set Q_REQUEUE_PI_IGNORE to signal that requeue should
102 	 * ignore the waiter.
103 	 */
104 	old = atomic_read_acquire(&q->requeue_state);
105 	do {
106 		if (old == Q_REQUEUE_PI_IGNORE)
107 			return false;
108 
109 		/*
110 		 * futex_proxy_trylock_atomic() might have set it to
111 		 * IN_PROGRESS and a interleaved early wake to WAIT.
112 		 *
113 		 * It was considered to have an extra state for that
114 		 * trylock, but that would just add more conditionals
115 		 * all over the place for a dubious value.
116 		 */
117 		if (old != Q_REQUEUE_PI_NONE)
118 			break;
119 
120 		new = Q_REQUEUE_PI_IN_PROGRESS;
121 	} while (!atomic_try_cmpxchg(&q->requeue_state, &old, new));
122 
123 	q->pi_state = pi_state;
124 	return true;
125 }
126 
127 static inline void futex_requeue_pi_complete(struct futex_q *q, int locked)
128 {
129 	int old, new;
130 
131 	old = atomic_read_acquire(&q->requeue_state);
132 	do {
133 		if (old == Q_REQUEUE_PI_IGNORE)
134 			return;
135 
136 		if (locked >= 0) {
137 			/* Requeue succeeded. Set DONE or LOCKED */
138 			WARN_ON_ONCE(old != Q_REQUEUE_PI_IN_PROGRESS &&
139 				     old != Q_REQUEUE_PI_WAIT);
140 			new = Q_REQUEUE_PI_DONE + locked;
141 		} else if (old == Q_REQUEUE_PI_IN_PROGRESS) {
142 			/* Deadlock, no early wakeup interleave */
143 			new = Q_REQUEUE_PI_NONE;
144 		} else {
145 			/* Deadlock, early wakeup interleave. */
146 			WARN_ON_ONCE(old != Q_REQUEUE_PI_WAIT);
147 			new = Q_REQUEUE_PI_IGNORE;
148 		}
149 	} while (!atomic_try_cmpxchg(&q->requeue_state, &old, new));
150 
151 #ifdef CONFIG_PREEMPT_RT
152 	/* If the waiter interleaved with the requeue let it know */
153 	if (unlikely(old == Q_REQUEUE_PI_WAIT))
154 		rcuwait_wake_up(&q->requeue_wait);
155 #endif
156 }
157 
158 static inline int futex_requeue_pi_wakeup_sync(struct futex_q *q)
159 {
160 	int old, new;
161 
162 	old = atomic_read_acquire(&q->requeue_state);
163 	do {
164 		/* Is requeue done already? */
165 		if (old >= Q_REQUEUE_PI_DONE)
166 			return old;
167 
168 		/*
169 		 * If not done, then tell the requeue code to either ignore
170 		 * the waiter or to wake it up once the requeue is done.
171 		 */
172 		new = Q_REQUEUE_PI_WAIT;
173 		if (old == Q_REQUEUE_PI_NONE)
174 			new = Q_REQUEUE_PI_IGNORE;
175 	} while (!atomic_try_cmpxchg(&q->requeue_state, &old, new));
176 
177 	/* If the requeue was in progress, wait for it to complete */
178 	if (old == Q_REQUEUE_PI_IN_PROGRESS) {
179 #ifdef CONFIG_PREEMPT_RT
180 		rcuwait_wait_event(&q->requeue_wait,
181 				   atomic_read(&q->requeue_state) != Q_REQUEUE_PI_WAIT,
182 				   TASK_UNINTERRUPTIBLE);
183 #else
184 		(void)atomic_cond_read_relaxed(&q->requeue_state, VAL != Q_REQUEUE_PI_WAIT);
185 #endif
186 	}
187 
188 	/*
189 	 * Requeue is now either prohibited or complete. Reread state
190 	 * because during the wait above it might have changed. Nothing
191 	 * will modify q->requeue_state after this point.
192 	 */
193 	return atomic_read(&q->requeue_state);
194 }
195 
196 /**
197  * requeue_pi_wake_futex() - Wake a task that acquired the lock during requeue
198  * @q:		the futex_q
199  * @key:	the key of the requeue target futex
200  * @hb:		the hash_bucket of the requeue target futex
201  *
202  * During futex_requeue, with requeue_pi=1, it is possible to acquire the
203  * target futex if it is uncontended or via a lock steal.
204  *
205  * 1) Set @q::key to the requeue target futex key so the waiter can detect
206  *    the wakeup on the right futex.
207  *
208  * 2) Dequeue @q from the hash bucket.
209  *
210  * 3) Set @q::rt_waiter to NULL so the woken up task can detect atomic lock
211  *    acquisition.
212  *
213  * 4) Set the q->lock_ptr to the requeue target hb->lock for the case that
214  *    the waiter has to fixup the pi state.
215  *
216  * 5) Complete the requeue state so the waiter can make progress. After
217  *    this point the waiter task can return from the syscall immediately in
218  *    case that the pi state does not have to be fixed up.
219  *
220  * 6) Wake the waiter task.
221  *
222  * Must be called with both q->lock_ptr and hb->lock held.
223  */
224 static inline
225 void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
226 			   struct futex_hash_bucket *hb)
227 {
228 	q->key = *key;
229 
230 	__futex_unqueue(q);
231 
232 	WARN_ON(!q->rt_waiter);
233 	q->rt_waiter = NULL;
234 
235 	q->lock_ptr = &hb->lock;
236 
237 	/* Signal locked state to the waiter */
238 	futex_requeue_pi_complete(q, 1);
239 	wake_up_state(q->task, TASK_NORMAL);
240 }
241 
242 /**
243  * futex_proxy_trylock_atomic() - Attempt an atomic lock for the top waiter
244  * @pifutex:		the user address of the to futex
245  * @hb1:		the from futex hash bucket, must be locked by the caller
246  * @hb2:		the to futex hash bucket, must be locked by the caller
247  * @key1:		the from futex key
248  * @key2:		the to futex key
249  * @ps:			address to store the pi_state pointer
250  * @exiting:		Pointer to store the task pointer of the owner task
251  *			which is in the middle of exiting
252  * @set_waiters:	force setting the FUTEX_WAITERS bit (1) or not (0)
253  *
254  * Try and get the lock on behalf of the top waiter if we can do it atomically.
255  * Wake the top waiter if we succeed.  If the caller specified set_waiters,
256  * then direct futex_lock_pi_atomic() to force setting the FUTEX_WAITERS bit.
257  * hb1 and hb2 must be held by the caller.
258  *
259  * @exiting is only set when the return value is -EBUSY. If so, this holds
260  * a refcount on the exiting task on return and the caller needs to drop it
261  * after waiting for the exit to complete.
262  *
263  * Return:
264  *  -  0 - failed to acquire the lock atomically;
265  *  - >0 - acquired the lock, return value is vpid of the top_waiter
266  *  - <0 - error
267  */
268 static int
269 futex_proxy_trylock_atomic(u32 __user *pifutex, struct futex_hash_bucket *hb1,
270 			   struct futex_hash_bucket *hb2, union futex_key *key1,
271 			   union futex_key *key2, struct futex_pi_state **ps,
272 			   struct task_struct **exiting, int set_waiters)
273 {
274 	struct futex_q *top_waiter;
275 	u32 curval;
276 	int ret;
277 
278 	if (futex_get_value_locked(&curval, pifutex))
279 		return -EFAULT;
280 
281 	if (unlikely(should_fail_futex(true)))
282 		return -EFAULT;
283 
284 	/*
285 	 * Find the top_waiter and determine if there are additional waiters.
286 	 * If the caller intends to requeue more than 1 waiter to pifutex,
287 	 * force futex_lock_pi_atomic() to set the FUTEX_WAITERS bit now,
288 	 * as we have means to handle the possible fault.  If not, don't set
289 	 * the bit unnecessarily as it will force the subsequent unlock to enter
290 	 * the kernel.
291 	 */
292 	top_waiter = futex_top_waiter(hb1, key1);
293 
294 	/* There are no waiters, nothing for us to do. */
295 	if (!top_waiter)
296 		return 0;
297 
298 	/*
299 	 * Ensure that this is a waiter sitting in futex_wait_requeue_pi()
300 	 * and waiting on the 'waitqueue' futex which is always !PI.
301 	 */
302 	if (!top_waiter->rt_waiter || top_waiter->pi_state)
303 		return -EINVAL;
304 
305 	/* Ensure we requeue to the expected futex. */
306 	if (!futex_match(top_waiter->requeue_pi_key, key2))
307 		return -EINVAL;
308 
309 	/* Ensure that this does not race against an early wakeup */
310 	if (!futex_requeue_pi_prepare(top_waiter, NULL))
311 		return -EAGAIN;
312 
313 	/*
314 	 * Try to take the lock for top_waiter and set the FUTEX_WAITERS bit
315 	 * in the contended case or if @set_waiters is true.
316 	 *
317 	 * In the contended case PI state is attached to the lock owner. If
318 	 * the user space lock can be acquired then PI state is attached to
319 	 * the new owner (@top_waiter->task) when @set_waiters is true.
320 	 */
321 	ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task,
322 				   exiting, set_waiters);
323 	if (ret == 1) {
324 		/*
325 		 * Lock was acquired in user space and PI state was
326 		 * attached to @top_waiter->task. That means state is fully
327 		 * consistent and the waiter can return to user space
328 		 * immediately after the wakeup.
329 		 */
330 		requeue_pi_wake_futex(top_waiter, key2, hb2);
331 	} else if (ret < 0) {
332 		/* Rewind top_waiter::requeue_state */
333 		futex_requeue_pi_complete(top_waiter, ret);
334 	} else {
335 		/*
336 		 * futex_lock_pi_atomic() did not acquire the user space
337 		 * futex, but managed to establish the proxy lock and pi
338 		 * state. top_waiter::requeue_state cannot be fixed up here
339 		 * because the waiter is not enqueued on the rtmutex
340 		 * yet. This is handled at the callsite depending on the
341 		 * result of rt_mutex_start_proxy_lock() which is
342 		 * guaranteed to be reached with this function returning 0.
343 		 */
344 	}
345 	return ret;
346 }
347 
348 /**
349  * futex_requeue() - Requeue waiters from uaddr1 to uaddr2
350  * @uaddr1:	source futex user address
351  * @flags1:	futex flags (FLAGS_SHARED, etc.)
352  * @uaddr2:	target futex user address
353  * @flags2:	futex flags (FLAGS_SHARED, etc.)
354  * @nr_wake:	number of waiters to wake (must be 1 for requeue_pi)
355  * @nr_requeue:	number of waiters to requeue (0-INT_MAX)
356  * @cmpval:	@uaddr1 expected value (or %NULL)
357  * @requeue_pi:	if we are attempting to requeue from a non-pi futex to a
358  *		pi futex (pi to pi requeue is not supported)
359  *
360  * Requeue waiters on uaddr1 to uaddr2. In the requeue_pi case, try to acquire
361  * uaddr2 atomically on behalf of the top waiter.
362  *
363  * Return:
364  *  - >=0 - on success, the number of tasks requeued or woken;
365  *  -  <0 - on error
366  */
367 int futex_requeue(u32 __user *uaddr1, unsigned int flags1,
368 		  u32 __user *uaddr2, unsigned int flags2,
369 		  int nr_wake, int nr_requeue, u32 *cmpval, int requeue_pi)
370 {
371 	union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
372 	int task_count = 0, ret;
373 	struct futex_pi_state *pi_state = NULL;
374 	struct futex_hash_bucket *hb1, *hb2;
375 	struct futex_q *this, *next;
376 	DEFINE_WAKE_Q(wake_q);
377 
378 	if (nr_wake < 0 || nr_requeue < 0)
379 		return -EINVAL;
380 
381 	/*
382 	 * When PI not supported: return -ENOSYS if requeue_pi is true,
383 	 * consequently the compiler knows requeue_pi is always false past
384 	 * this point which will optimize away all the conditional code
385 	 * further down.
386 	 */
387 	if (!IS_ENABLED(CONFIG_FUTEX_PI) && requeue_pi)
388 		return -ENOSYS;
389 
390 	if (requeue_pi) {
391 		/*
392 		 * Requeue PI only works on two distinct uaddrs. This
393 		 * check is only valid for private futexes. See below.
394 		 */
395 		if (uaddr1 == uaddr2)
396 			return -EINVAL;
397 
398 		/*
399 		 * futex_requeue() allows the caller to define the number
400 		 * of waiters to wake up via the @nr_wake argument. With
401 		 * REQUEUE_PI, waking up more than one waiter is creating
402 		 * more problems than it solves. Waking up a waiter makes
403 		 * only sense if the PI futex @uaddr2 is uncontended as
404 		 * this allows the requeue code to acquire the futex
405 		 * @uaddr2 before waking the waiter. The waiter can then
406 		 * return to user space without further action. A secondary
407 		 * wakeup would just make the futex_wait_requeue_pi()
408 		 * handling more complex, because that code would have to
409 		 * look up pi_state and do more or less all the handling
410 		 * which the requeue code has to do for the to be requeued
411 		 * waiters. So restrict the number of waiters to wake to
412 		 * one, and only wake it up when the PI futex is
413 		 * uncontended. Otherwise requeue it and let the unlock of
414 		 * the PI futex handle the wakeup.
415 		 *
416 		 * All REQUEUE_PI users, e.g. pthread_cond_signal() and
417 		 * pthread_cond_broadcast() must use nr_wake=1.
418 		 */
419 		if (nr_wake != 1)
420 			return -EINVAL;
421 
422 		/*
423 		 * requeue_pi requires a pi_state, try to allocate it now
424 		 * without any locks in case it fails.
425 		 */
426 		if (refill_pi_state_cache())
427 			return -ENOMEM;
428 	}
429 
430 retry:
431 	ret = get_futex_key(uaddr1, flags1, &key1, FUTEX_READ);
432 	if (unlikely(ret != 0))
433 		return ret;
434 	ret = get_futex_key(uaddr2, flags2, &key2,
435 			    requeue_pi ? FUTEX_WRITE : FUTEX_READ);
436 	if (unlikely(ret != 0))
437 		return ret;
438 
439 	/*
440 	 * The check above which compares uaddrs is not sufficient for
441 	 * shared futexes. We need to compare the keys:
442 	 */
443 	if (requeue_pi && futex_match(&key1, &key2))
444 		return -EINVAL;
445 
446 	hb1 = futex_hash(&key1);
447 	hb2 = futex_hash(&key2);
448 
449 retry_private:
450 	futex_hb_waiters_inc(hb2);
451 	double_lock_hb(hb1, hb2);
452 
453 	if (likely(cmpval != NULL)) {
454 		u32 curval;
455 
456 		ret = futex_get_value_locked(&curval, uaddr1);
457 
458 		if (unlikely(ret)) {
459 			double_unlock_hb(hb1, hb2);
460 			futex_hb_waiters_dec(hb2);
461 
462 			ret = get_user(curval, uaddr1);
463 			if (ret)
464 				return ret;
465 
466 			if (!(flags1 & FLAGS_SHARED))
467 				goto retry_private;
468 
469 			goto retry;
470 		}
471 		if (curval != *cmpval) {
472 			ret = -EAGAIN;
473 			goto out_unlock;
474 		}
475 	}
476 
477 	if (requeue_pi) {
478 		struct task_struct *exiting = NULL;
479 
480 		/*
481 		 * Attempt to acquire uaddr2 and wake the top waiter. If we
482 		 * intend to requeue waiters, force setting the FUTEX_WAITERS
483 		 * bit.  We force this here where we are able to easily handle
484 		 * faults rather in the requeue loop below.
485 		 *
486 		 * Updates topwaiter::requeue_state if a top waiter exists.
487 		 */
488 		ret = futex_proxy_trylock_atomic(uaddr2, hb1, hb2, &key1,
489 						 &key2, &pi_state,
490 						 &exiting, nr_requeue);
491 
492 		/*
493 		 * At this point the top_waiter has either taken uaddr2 or
494 		 * is waiting on it. In both cases pi_state has been
495 		 * established and an initial refcount on it. In case of an
496 		 * error there's nothing.
497 		 *
498 		 * The top waiter's requeue_state is up to date:
499 		 *
500 		 *  - If the lock was acquired atomically (ret == 1), then
501 		 *    the state is Q_REQUEUE_PI_LOCKED.
502 		 *
503 		 *    The top waiter has been dequeued and woken up and can
504 		 *    return to user space immediately. The kernel/user
505 		 *    space state is consistent. In case that there must be
506 		 *    more waiters requeued the WAITERS bit in the user
507 		 *    space futex is set so the top waiter task has to go
508 		 *    into the syscall slowpath to unlock the futex. This
509 		 *    will block until this requeue operation has been
510 		 *    completed and the hash bucket locks have been
511 		 *    dropped.
512 		 *
513 		 *  - If the trylock failed with an error (ret < 0) then
514 		 *    the state is either Q_REQUEUE_PI_NONE, i.e. "nothing
515 		 *    happened", or Q_REQUEUE_PI_IGNORE when there was an
516 		 *    interleaved early wakeup.
517 		 *
518 		 *  - If the trylock did not succeed (ret == 0) then the
519 		 *    state is either Q_REQUEUE_PI_IN_PROGRESS or
520 		 *    Q_REQUEUE_PI_WAIT if an early wakeup interleaved.
521 		 *    This will be cleaned up in the loop below, which
522 		 *    cannot fail because futex_proxy_trylock_atomic() did
523 		 *    the same sanity checks for requeue_pi as the loop
524 		 *    below does.
525 		 */
526 		switch (ret) {
527 		case 0:
528 			/* We hold a reference on the pi state. */
529 			break;
530 
531 		case 1:
532 			/*
533 			 * futex_proxy_trylock_atomic() acquired the user space
534 			 * futex. Adjust task_count.
535 			 */
536 			task_count++;
537 			ret = 0;
538 			break;
539 
540 		/*
541 		 * If the above failed, then pi_state is NULL and
542 		 * waiter::requeue_state is correct.
543 		 */
544 		case -EFAULT:
545 			double_unlock_hb(hb1, hb2);
546 			futex_hb_waiters_dec(hb2);
547 			ret = fault_in_user_writeable(uaddr2);
548 			if (!ret)
549 				goto retry;
550 			return ret;
551 		case -EBUSY:
552 		case -EAGAIN:
553 			/*
554 			 * Two reasons for this:
555 			 * - EBUSY: Owner is exiting and we just wait for the
556 			 *   exit to complete.
557 			 * - EAGAIN: The user space value changed.
558 			 */
559 			double_unlock_hb(hb1, hb2);
560 			futex_hb_waiters_dec(hb2);
561 			/*
562 			 * Handle the case where the owner is in the middle of
563 			 * exiting. Wait for the exit to complete otherwise
564 			 * this task might loop forever, aka. live lock.
565 			 */
566 			wait_for_owner_exiting(ret, exiting);
567 			cond_resched();
568 			goto retry;
569 		default:
570 			goto out_unlock;
571 		}
572 	}
573 
574 	plist_for_each_entry_safe(this, next, &hb1->chain, list) {
575 		if (task_count - nr_wake >= nr_requeue)
576 			break;
577 
578 		if (!futex_match(&this->key, &key1))
579 			continue;
580 
581 		/*
582 		 * FUTEX_WAIT_REQUEUE_PI and FUTEX_CMP_REQUEUE_PI should always
583 		 * be paired with each other and no other futex ops.
584 		 *
585 		 * We should never be requeueing a futex_q with a pi_state,
586 		 * which is awaiting a futex_unlock_pi().
587 		 */
588 		if ((requeue_pi && !this->rt_waiter) ||
589 		    (!requeue_pi && this->rt_waiter) ||
590 		    this->pi_state) {
591 			ret = -EINVAL;
592 			break;
593 		}
594 
595 		/* Plain futexes just wake or requeue and are done */
596 		if (!requeue_pi) {
597 			if (++task_count <= nr_wake)
598 				this->wake(&wake_q, this);
599 			else
600 				requeue_futex(this, hb1, hb2, &key2);
601 			continue;
602 		}
603 
604 		/* Ensure we requeue to the expected futex for requeue_pi. */
605 		if (!futex_match(this->requeue_pi_key, &key2)) {
606 			ret = -EINVAL;
607 			break;
608 		}
609 
610 		/*
611 		 * Requeue nr_requeue waiters and possibly one more in the case
612 		 * of requeue_pi if we couldn't acquire the lock atomically.
613 		 *
614 		 * Prepare the waiter to take the rt_mutex. Take a refcount
615 		 * on the pi_state and store the pointer in the futex_q
616 		 * object of the waiter.
617 		 */
618 		get_pi_state(pi_state);
619 
620 		/* Don't requeue when the waiter is already on the way out. */
621 		if (!futex_requeue_pi_prepare(this, pi_state)) {
622 			/*
623 			 * Early woken waiter signaled that it is on the
624 			 * way out. Drop the pi_state reference and try the
625 			 * next waiter. @this->pi_state is still NULL.
626 			 */
627 			put_pi_state(pi_state);
628 			continue;
629 		}
630 
631 		ret = rt_mutex_start_proxy_lock(&pi_state->pi_mutex,
632 						this->rt_waiter,
633 						this->task);
634 
635 		if (ret == 1) {
636 			/*
637 			 * We got the lock. We do neither drop the refcount
638 			 * on pi_state nor clear this->pi_state because the
639 			 * waiter needs the pi_state for cleaning up the
640 			 * user space value. It will drop the refcount
641 			 * after doing so. this::requeue_state is updated
642 			 * in the wakeup as well.
643 			 */
644 			requeue_pi_wake_futex(this, &key2, hb2);
645 			task_count++;
646 		} else if (!ret) {
647 			/* Waiter is queued, move it to hb2 */
648 			requeue_futex(this, hb1, hb2, &key2);
649 			futex_requeue_pi_complete(this, 0);
650 			task_count++;
651 		} else {
652 			/*
653 			 * rt_mutex_start_proxy_lock() detected a potential
654 			 * deadlock when we tried to queue that waiter.
655 			 * Drop the pi_state reference which we took above
656 			 * and remove the pointer to the state from the
657 			 * waiters futex_q object.
658 			 */
659 			this->pi_state = NULL;
660 			put_pi_state(pi_state);
661 			futex_requeue_pi_complete(this, ret);
662 			/*
663 			 * We stop queueing more waiters and let user space
664 			 * deal with the mess.
665 			 */
666 			break;
667 		}
668 	}
669 
670 	/*
671 	 * We took an extra initial reference to the pi_state in
672 	 * futex_proxy_trylock_atomic(). We need to drop it here again.
673 	 */
674 	put_pi_state(pi_state);
675 
676 out_unlock:
677 	double_unlock_hb(hb1, hb2);
678 	wake_up_q(&wake_q);
679 	futex_hb_waiters_dec(hb2);
680 	return ret ? ret : task_count;
681 }
682 
683 /**
684  * handle_early_requeue_pi_wakeup() - Handle early wakeup on the initial futex
685  * @hb:		the hash_bucket futex_q was original enqueued on
686  * @q:		the futex_q woken while waiting to be requeued
687  * @timeout:	the timeout associated with the wait (NULL if none)
688  *
689  * Determine the cause for the early wakeup.
690  *
691  * Return:
692  *  -EWOULDBLOCK or -ETIMEDOUT or -ERESTARTNOINTR
693  */
694 static inline
695 int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
696 				   struct futex_q *q,
697 				   struct hrtimer_sleeper *timeout)
698 {
699 	int ret;
700 
701 	/*
702 	 * With the hb lock held, we avoid races while we process the wakeup.
703 	 * We only need to hold hb (and not hb2) to ensure atomicity as the
704 	 * wakeup code can't change q.key from uaddr to uaddr2 if we hold hb.
705 	 * It can't be requeued from uaddr2 to something else since we don't
706 	 * support a PI aware source futex for requeue.
707 	 */
708 	WARN_ON_ONCE(&hb->lock != q->lock_ptr);
709 
710 	/*
711 	 * We were woken prior to requeue by a timeout or a signal.
712 	 * Unqueue the futex_q and determine which it was.
713 	 */
714 	plist_del(&q->list, &hb->chain);
715 	futex_hb_waiters_dec(hb);
716 
717 	/* Handle spurious wakeups gracefully */
718 	ret = -EWOULDBLOCK;
719 	if (timeout && !timeout->task)
720 		ret = -ETIMEDOUT;
721 	else if (signal_pending(current))
722 		ret = -ERESTARTNOINTR;
723 	return ret;
724 }
725 
726 /**
727  * futex_wait_requeue_pi() - Wait on uaddr and take uaddr2
728  * @uaddr:	the futex we initially wait on (non-pi)
729  * @flags:	futex flags (FLAGS_SHARED, FLAGS_CLOCKRT, etc.), they must be
730  *		the same type, no requeueing from private to shared, etc.
731  * @val:	the expected value of uaddr
732  * @abs_time:	absolute timeout
733  * @bitset:	32 bit wakeup bitset set by userspace, defaults to all
734  * @uaddr2:	the pi futex we will take prior to returning to user-space
735  *
736  * The caller will wait on uaddr and will be requeued by futex_requeue() to
737  * uaddr2 which must be PI aware and unique from uaddr.  Normal wakeup will wake
738  * on uaddr2 and complete the acquisition of the rt_mutex prior to returning to
739  * userspace.  This ensures the rt_mutex maintains an owner when it has waiters;
740  * without one, the pi logic would not know which task to boost/deboost, if
741  * there was a need to.
742  *
743  * We call schedule in futex_wait_queue() when we enqueue and return there
744  * via the following--
745  * 1) wakeup on uaddr2 after an atomic lock acquisition by futex_requeue()
746  * 2) wakeup on uaddr2 after a requeue
747  * 3) signal
748  * 4) timeout
749  *
750  * If 3, cleanup and return -ERESTARTNOINTR.
751  *
752  * If 2, we may then block on trying to take the rt_mutex and return via:
753  * 5) successful lock
754  * 6) signal
755  * 7) timeout
756  * 8) other lock acquisition failure
757  *
758  * If 6, return -EWOULDBLOCK (restarting the syscall would do the same).
759  *
760  * If 4 or 7, we cleanup and return with -ETIMEDOUT.
761  *
762  * Return:
763  *  -  0 - On success;
764  *  - <0 - On error
765  */
766 int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
767 			  u32 val, ktime_t *abs_time, u32 bitset,
768 			  u32 __user *uaddr2)
769 {
770 	struct hrtimer_sleeper timeout, *to;
771 	struct rt_mutex_waiter rt_waiter;
772 	struct futex_hash_bucket *hb;
773 	union futex_key key2 = FUTEX_KEY_INIT;
774 	struct futex_q q = futex_q_init;
775 	struct rt_mutex_base *pi_mutex;
776 	int res, ret;
777 
778 	if (!IS_ENABLED(CONFIG_FUTEX_PI))
779 		return -ENOSYS;
780 
781 	if (uaddr == uaddr2)
782 		return -EINVAL;
783 
784 	if (!bitset)
785 		return -EINVAL;
786 
787 	to = futex_setup_timer(abs_time, &timeout, flags,
788 			       current->timer_slack_ns);
789 
790 	/*
791 	 * The waiter is allocated on our stack, manipulated by the requeue
792 	 * code while we sleep on uaddr.
793 	 */
794 	rt_mutex_init_waiter(&rt_waiter);
795 
796 	ret = get_futex_key(uaddr2, flags, &key2, FUTEX_WRITE);
797 	if (unlikely(ret != 0))
798 		goto out;
799 
800 	q.bitset = bitset;
801 	q.rt_waiter = &rt_waiter;
802 	q.requeue_pi_key = &key2;
803 
804 	/*
805 	 * Prepare to wait on uaddr. On success, it holds hb->lock and q
806 	 * is initialized.
807 	 */
808 	ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
809 	if (ret)
810 		goto out;
811 
812 	/*
813 	 * The check above which compares uaddrs is not sufficient for
814 	 * shared futexes. We need to compare the keys:
815 	 */
816 	if (futex_match(&q.key, &key2)) {
817 		futex_q_unlock(hb);
818 		ret = -EINVAL;
819 		goto out;
820 	}
821 
822 	/* Queue the futex_q, drop the hb lock, wait for wakeup. */
823 	futex_wait_queue(hb, &q, to);
824 
825 	switch (futex_requeue_pi_wakeup_sync(&q)) {
826 	case Q_REQUEUE_PI_IGNORE:
827 		/* The waiter is still on uaddr1 */
828 		spin_lock(&hb->lock);
829 		ret = handle_early_requeue_pi_wakeup(hb, &q, to);
830 		spin_unlock(&hb->lock);
831 		break;
832 
833 	case Q_REQUEUE_PI_LOCKED:
834 		/* The requeue acquired the lock */
835 		if (q.pi_state && (q.pi_state->owner != current)) {
836 			spin_lock(q.lock_ptr);
837 			ret = fixup_pi_owner(uaddr2, &q, true);
838 			/*
839 			 * Drop the reference to the pi state which the
840 			 * requeue_pi() code acquired for us.
841 			 */
842 			put_pi_state(q.pi_state);
843 			spin_unlock(q.lock_ptr);
844 			/*
845 			 * Adjust the return value. It's either -EFAULT or
846 			 * success (1) but the caller expects 0 for success.
847 			 */
848 			ret = ret < 0 ? ret : 0;
849 		}
850 		break;
851 
852 	case Q_REQUEUE_PI_DONE:
853 		/* Requeue completed. Current is 'pi_blocked_on' the rtmutex */
854 		pi_mutex = &q.pi_state->pi_mutex;
855 		ret = rt_mutex_wait_proxy_lock(pi_mutex, to, &rt_waiter);
856 
857 		/*
858 		 * See futex_unlock_pi()'s cleanup: comment.
859 		 */
860 		if (ret && !rt_mutex_cleanup_proxy_lock(pi_mutex, &rt_waiter))
861 			ret = 0;
862 
863 		spin_lock(q.lock_ptr);
864 		debug_rt_mutex_free_waiter(&rt_waiter);
865 		/*
866 		 * Fixup the pi_state owner and possibly acquire the lock if we
867 		 * haven't already.
868 		 */
869 		res = fixup_pi_owner(uaddr2, &q, !ret);
870 		/*
871 		 * If fixup_pi_owner() returned an error, propagate that.  If it
872 		 * acquired the lock, clear -ETIMEDOUT or -EINTR.
873 		 */
874 		if (res)
875 			ret = (res < 0) ? res : 0;
876 
877 		futex_unqueue_pi(&q);
878 		spin_unlock(q.lock_ptr);
879 
880 		if (ret == -EINTR) {
881 			/*
882 			 * We've already been requeued, but cannot restart
883 			 * by calling futex_lock_pi() directly. We could
884 			 * restart this syscall, but it would detect that
885 			 * the user space "val" changed and return
886 			 * -EWOULDBLOCK.  Save the overhead of the restart
887 			 * and return -EWOULDBLOCK directly.
888 			 */
889 			ret = -EWOULDBLOCK;
890 		}
891 		break;
892 	default:
893 		BUG();
894 	}
895 
896 out:
897 	if (to) {
898 		hrtimer_cancel(&to->timer);
899 		destroy_hrtimer_on_stack(&to->timer);
900 	}
901 	return ret;
902 }
903 
904