1 // SPDX-License-Identifier: GPL-2.0-or-later
2
3 #include <linux/plist.h>
4 #include <linux/sched/signal.h>
5
6 #include "futex.h"
7 #include "../locking/rtmutex_common.h"
8
9 /*
10 * On PREEMPT_RT, the hash bucket lock is a 'sleeping' spinlock with an
11 * underlying rtmutex. The task which is about to be requeued could have
12 * just woken up (timeout, signal). After the wake up the task has to
13 * acquire hash bucket lock, which is held by the requeue code. As a task
14 * can only be blocked on _ONE_ rtmutex at a time, the proxy lock blocking
15 * and the hash bucket lock blocking would collide and corrupt state.
16 *
17 * On !PREEMPT_RT this is not a problem and everything could be serialized
18 * on hash bucket lock, but aside of having the benefit of common code,
19 * this allows to avoid doing the requeue when the task is already on the
20 * way out and taking the hash bucket lock of the original uaddr1 when the
21 * requeue has been completed.
22 *
23 * The following state transitions are valid:
24 *
25 * On the waiter side:
26 * Q_REQUEUE_PI_NONE -> Q_REQUEUE_PI_IGNORE
27 * Q_REQUEUE_PI_IN_PROGRESS -> Q_REQUEUE_PI_WAIT
28 *
29 * On the requeue side:
30 * Q_REQUEUE_PI_NONE -> Q_REQUEUE_PI_INPROGRESS
31 * Q_REQUEUE_PI_IN_PROGRESS -> Q_REQUEUE_PI_DONE/LOCKED
32 * Q_REQUEUE_PI_IN_PROGRESS -> Q_REQUEUE_PI_NONE (requeue failed)
33 * Q_REQUEUE_PI_WAIT -> Q_REQUEUE_PI_DONE/LOCKED
34 * Q_REQUEUE_PI_WAIT -> Q_REQUEUE_PI_IGNORE (requeue failed)
35 *
36 * The requeue side ignores a waiter with state Q_REQUEUE_PI_IGNORE as this
37 * signals that the waiter is already on the way out. It also means that
38 * the waiter is still on the 'wait' futex, i.e. uaddr1.
39 *
40 * The waiter side signals early wakeup to the requeue side either through
41 * setting state to Q_REQUEUE_PI_IGNORE or to Q_REQUEUE_PI_WAIT depending
42 * on the current state. In case of Q_REQUEUE_PI_IGNORE it can immediately
43 * proceed to take the hash bucket lock of uaddr1. If it set state to WAIT,
44 * which means the wakeup is interleaving with a requeue in progress it has
45 * to wait for the requeue side to change the state. Either to DONE/LOCKED
46 * or to IGNORE. DONE/LOCKED means the waiter q is now on the uaddr2 futex
47 * and either blocked (DONE) or has acquired it (LOCKED). IGNORE is set by
48 * the requeue side when the requeue attempt failed via deadlock detection
49 * and therefore the waiter q is still on the uaddr1 futex.
50 */
51 enum {
52 Q_REQUEUE_PI_NONE = 0,
53 Q_REQUEUE_PI_IGNORE,
54 Q_REQUEUE_PI_IN_PROGRESS,
55 Q_REQUEUE_PI_WAIT,
56 Q_REQUEUE_PI_DONE,
57 Q_REQUEUE_PI_LOCKED,
58 };
59
60 const struct futex_q futex_q_init = {
61 /* list gets initialized in futex_queue()*/
62 .wake = futex_wake_mark,
63 .key = FUTEX_KEY_INIT,
64 .bitset = FUTEX_BITSET_MATCH_ANY,
65 .requeue_state = ATOMIC_INIT(Q_REQUEUE_PI_NONE),
66 };
67
68 /**
69 * requeue_futex() - Requeue a futex_q from one hb to another
70 * @q: the futex_q to requeue
71 * @hb1: the source hash_bucket
72 * @hb2: the target hash_bucket
73 * @key2: the new key for the requeued futex_q
74 */
75 static inline
requeue_futex(struct futex_q * q,struct futex_hash_bucket * hb1,struct futex_hash_bucket * hb2,union futex_key * key2)76 void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
77 struct futex_hash_bucket *hb2, union futex_key *key2)
78 {
79
80 /*
81 * If key1 and key2 hash to the same bucket, no need to
82 * requeue.
83 */
84 if (likely(&hb1->chain != &hb2->chain)) {
85 plist_del(&q->list, &hb1->chain);
86 futex_hb_waiters_dec(hb1);
87 futex_hb_waiters_inc(hb2);
88 plist_add(&q->list, &hb2->chain);
89 q->lock_ptr = &hb2->lock;
90 /*
91 * hb1 and hb2 belong to the same futex_hash_bucket_private
92 * because if we managed get a reference on hb1 then it can't be
93 * replaced. Therefore we avoid put(hb1)+get(hb2) here.
94 */
95 }
96 q->key = *key2;
97 }
98
futex_requeue_pi_prepare(struct futex_q * q,struct futex_pi_state * pi_state)99 static inline bool futex_requeue_pi_prepare(struct futex_q *q,
100 struct futex_pi_state *pi_state)
101 {
102 int old, new;
103
104 /*
105 * Set state to Q_REQUEUE_PI_IN_PROGRESS unless an early wakeup has
106 * already set Q_REQUEUE_PI_IGNORE to signal that requeue should
107 * ignore the waiter.
108 */
109 old = atomic_read_acquire(&q->requeue_state);
110 do {
111 if (old == Q_REQUEUE_PI_IGNORE)
112 return false;
113
114 /*
115 * futex_proxy_trylock_atomic() might have set it to
116 * IN_PROGRESS and a interleaved early wake to WAIT.
117 *
118 * It was considered to have an extra state for that
119 * trylock, but that would just add more conditionals
120 * all over the place for a dubious value.
121 */
122 if (old != Q_REQUEUE_PI_NONE)
123 break;
124
125 new = Q_REQUEUE_PI_IN_PROGRESS;
126 } while (!atomic_try_cmpxchg(&q->requeue_state, &old, new));
127
128 q->pi_state = pi_state;
129 return true;
130 }
131
futex_requeue_pi_complete(struct futex_q * q,int locked)132 static inline void futex_requeue_pi_complete(struct futex_q *q, int locked)
133 {
134 int old, new;
135
136 old = atomic_read_acquire(&q->requeue_state);
137 do {
138 if (old == Q_REQUEUE_PI_IGNORE)
139 return;
140
141 if (locked >= 0) {
142 /* Requeue succeeded. Set DONE or LOCKED */
143 WARN_ON_ONCE(old != Q_REQUEUE_PI_IN_PROGRESS &&
144 old != Q_REQUEUE_PI_WAIT);
145 new = Q_REQUEUE_PI_DONE + locked;
146 } else if (old == Q_REQUEUE_PI_IN_PROGRESS) {
147 /* Deadlock, no early wakeup interleave */
148 new = Q_REQUEUE_PI_NONE;
149 } else {
150 /* Deadlock, early wakeup interleave. */
151 WARN_ON_ONCE(old != Q_REQUEUE_PI_WAIT);
152 new = Q_REQUEUE_PI_IGNORE;
153 }
154 } while (!atomic_try_cmpxchg(&q->requeue_state, &old, new));
155
156 #ifdef CONFIG_PREEMPT_RT
157 /* If the waiter interleaved with the requeue let it know */
158 if (unlikely(old == Q_REQUEUE_PI_WAIT))
159 rcuwait_wake_up(&q->requeue_wait);
160 #endif
161 }
162
futex_requeue_pi_wakeup_sync(struct futex_q * q)163 static inline int futex_requeue_pi_wakeup_sync(struct futex_q *q)
164 {
165 int old, new;
166
167 old = atomic_read_acquire(&q->requeue_state);
168 do {
169 /* Is requeue done already? */
170 if (old >= Q_REQUEUE_PI_DONE)
171 return old;
172
173 /*
174 * If not done, then tell the requeue code to either ignore
175 * the waiter or to wake it up once the requeue is done.
176 */
177 new = Q_REQUEUE_PI_WAIT;
178 if (old == Q_REQUEUE_PI_NONE)
179 new = Q_REQUEUE_PI_IGNORE;
180 } while (!atomic_try_cmpxchg(&q->requeue_state, &old, new));
181
182 /* If the requeue was in progress, wait for it to complete */
183 if (old == Q_REQUEUE_PI_IN_PROGRESS) {
184 #ifdef CONFIG_PREEMPT_RT
185 rcuwait_wait_event(&q->requeue_wait,
186 atomic_read(&q->requeue_state) != Q_REQUEUE_PI_WAIT,
187 TASK_UNINTERRUPTIBLE);
188 #else
189 (void)atomic_cond_read_relaxed(&q->requeue_state, VAL != Q_REQUEUE_PI_WAIT);
190 #endif
191 }
192
193 /*
194 * Requeue is now either prohibited or complete. Reread state
195 * because during the wait above it might have changed. Nothing
196 * will modify q->requeue_state after this point.
197 */
198 return atomic_read(&q->requeue_state);
199 }
200
201 /**
202 * requeue_pi_wake_futex() - Wake a task that acquired the lock during requeue
203 * @q: the futex_q
204 * @key: the key of the requeue target futex
205 * @hb: the hash_bucket of the requeue target futex
206 *
207 * During futex_requeue, with requeue_pi=1, it is possible to acquire the
208 * target futex if it is uncontended or via a lock steal.
209 *
210 * 1) Set @q::key to the requeue target futex key so the waiter can detect
211 * the wakeup on the right futex.
212 *
213 * 2) Dequeue @q from the hash bucket.
214 *
215 * 3) Set @q::rt_waiter to NULL so the woken up task can detect atomic lock
216 * acquisition.
217 *
218 * 4) Set the q->lock_ptr to the requeue target hb->lock for the case that
219 * the waiter has to fixup the pi state.
220 *
221 * 5) Complete the requeue state so the waiter can make progress. After
222 * this point the waiter task can return from the syscall immediately in
223 * case that the pi state does not have to be fixed up.
224 *
225 * 6) Wake the waiter task.
226 *
227 * Must be called with both q->lock_ptr and hb->lock held.
228 */
229 static inline
requeue_pi_wake_futex(struct futex_q * q,union futex_key * key,struct futex_hash_bucket * hb)230 void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
231 struct futex_hash_bucket *hb)
232 {
233 struct task_struct *task;
234
235 q->key = *key;
236 __futex_unqueue(q);
237
238 WARN_ON(!q->rt_waiter);
239 q->rt_waiter = NULL;
240 /*
241 * Acquire a reference for the waiter to ensure valid
242 * futex_q::lock_ptr.
243 */
244 futex_hash_get(hb);
245 q->drop_hb_ref = true;
246 q->lock_ptr = &hb->lock;
247 task = READ_ONCE(q->task);
248
249 /* Signal locked state to the waiter */
250 futex_requeue_pi_complete(q, 1);
251 wake_up_state(task, TASK_NORMAL);
252 }
253
254 /**
255 * futex_proxy_trylock_atomic() - Attempt an atomic lock for the top waiter
256 * @pifutex: the user address of the to futex
257 * @hb1: the from futex hash bucket, must be locked by the caller
258 * @hb2: the to futex hash bucket, must be locked by the caller
259 * @key1: the from futex key
260 * @key2: the to futex key
261 * @ps: address to store the pi_state pointer
262 * @exiting: Pointer to store the task pointer of the owner task
263 * which is in the middle of exiting
264 * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0)
265 *
266 * Try and get the lock on behalf of the top waiter if we can do it atomically.
267 * Wake the top waiter if we succeed. If the caller specified set_waiters,
268 * then direct futex_lock_pi_atomic() to force setting the FUTEX_WAITERS bit.
269 * hb1 and hb2 must be held by the caller.
270 *
271 * @exiting is only set when the return value is -EBUSY. If so, this holds
272 * a refcount on the exiting task on return and the caller needs to drop it
273 * after waiting for the exit to complete.
274 *
275 * Return:
276 * - 0 - failed to acquire the lock atomically;
277 * - >0 - acquired the lock, return value is vpid of the top_waiter
278 * - <0 - error
279 */
280 static int
futex_proxy_trylock_atomic(u32 __user * pifutex,struct futex_hash_bucket * hb1,struct futex_hash_bucket * hb2,union futex_key * key1,union futex_key * key2,struct futex_pi_state ** ps,struct task_struct ** exiting,int set_waiters)281 futex_proxy_trylock_atomic(u32 __user *pifutex, struct futex_hash_bucket *hb1,
282 struct futex_hash_bucket *hb2, union futex_key *key1,
283 union futex_key *key2, struct futex_pi_state **ps,
284 struct task_struct **exiting, int set_waiters)
285 {
286 struct futex_q *top_waiter;
287 u32 curval;
288 int ret;
289
290 if (futex_get_value_locked(&curval, pifutex))
291 return -EFAULT;
292
293 if (unlikely(should_fail_futex(true)))
294 return -EFAULT;
295
296 /*
297 * Find the top_waiter and determine if there are additional waiters.
298 * If the caller intends to requeue more than 1 waiter to pifutex,
299 * force futex_lock_pi_atomic() to set the FUTEX_WAITERS bit now,
300 * as we have means to handle the possible fault. If not, don't set
301 * the bit unnecessarily as it will force the subsequent unlock to enter
302 * the kernel.
303 */
304 top_waiter = futex_top_waiter(hb1, key1);
305
306 /* There are no waiters, nothing for us to do. */
307 if (!top_waiter)
308 return 0;
309
310 /*
311 * Ensure that this is a waiter sitting in futex_wait_requeue_pi()
312 * and waiting on the 'waitqueue' futex which is always !PI.
313 */
314 if (!top_waiter->rt_waiter || top_waiter->pi_state)
315 return -EINVAL;
316
317 /* Ensure we requeue to the expected futex. */
318 if (!futex_match(top_waiter->requeue_pi_key, key2))
319 return -EINVAL;
320
321 /* Ensure that this does not race against an early wakeup */
322 if (!futex_requeue_pi_prepare(top_waiter, NULL)) {
323 plist_del(&top_waiter->list, &hb1->chain);
324 futex_hb_waiters_dec(hb1);
325 return -EAGAIN;
326 }
327
328 /*
329 * Try to take the lock for top_waiter and set the FUTEX_WAITERS bit
330 * in the contended case or if @set_waiters is true.
331 *
332 * In the contended case PI state is attached to the lock owner. If
333 * the user space lock can be acquired then PI state is attached to
334 * the new owner (@top_waiter->task) when @set_waiters is true.
335 */
336 ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task,
337 exiting, set_waiters);
338 if (ret == 1) {
339 /*
340 * Lock was acquired in user space and PI state was
341 * attached to @top_waiter->task. That means state is fully
342 * consistent and the waiter can return to user space
343 * immediately after the wakeup.
344 */
345 requeue_pi_wake_futex(top_waiter, key2, hb2);
346 } else if (ret < 0) {
347 /* Rewind top_waiter::requeue_state */
348 futex_requeue_pi_complete(top_waiter, ret);
349 } else {
350 /*
351 * futex_lock_pi_atomic() did not acquire the user space
352 * futex, but managed to establish the proxy lock and pi
353 * state. top_waiter::requeue_state cannot be fixed up here
354 * because the waiter is not enqueued on the rtmutex
355 * yet. This is handled at the callsite depending on the
356 * result of rt_mutex_start_proxy_lock() which is
357 * guaranteed to be reached with this function returning 0.
358 */
359 }
360 return ret;
361 }
362
363 /**
364 * futex_requeue() - Requeue waiters from uaddr1 to uaddr2
365 * @uaddr1: source futex user address
366 * @flags1: futex flags (FLAGS_SHARED, etc.)
367 * @uaddr2: target futex user address
368 * @flags2: futex flags (FLAGS_SHARED, etc.)
369 * @nr_wake: number of waiters to wake (must be 1 for requeue_pi)
370 * @nr_requeue: number of waiters to requeue (0-INT_MAX)
371 * @cmpval: @uaddr1 expected value (or %NULL)
372 * @requeue_pi: if we are attempting to requeue from a non-pi futex to a
373 * pi futex (pi to pi requeue is not supported)
374 *
375 * Requeue waiters on uaddr1 to uaddr2. In the requeue_pi case, try to acquire
376 * uaddr2 atomically on behalf of the top waiter.
377 *
378 * Return:
379 * - >=0 - on success, the number of tasks requeued or woken;
380 * - <0 - on error
381 */
futex_requeue(u32 __user * uaddr1,unsigned int flags1,u32 __user * uaddr2,unsigned int flags2,int nr_wake,int nr_requeue,u32 * cmpval,int requeue_pi)382 int futex_requeue(u32 __user *uaddr1, unsigned int flags1,
383 u32 __user *uaddr2, unsigned int flags2,
384 int nr_wake, int nr_requeue, u32 *cmpval, int requeue_pi)
385 {
386 union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
387 int task_count = 0, ret;
388 struct futex_pi_state *pi_state = NULL;
389 struct futex_q *this, *next;
390 DEFINE_WAKE_Q(wake_q);
391
392 if (nr_wake < 0 || nr_requeue < 0)
393 return -EINVAL;
394
395 /*
396 * When PI not supported: return -ENOSYS if requeue_pi is true,
397 * consequently the compiler knows requeue_pi is always false past
398 * this point which will optimize away all the conditional code
399 * further down.
400 */
401 if (!IS_ENABLED(CONFIG_FUTEX_PI) && requeue_pi)
402 return -ENOSYS;
403
404 if (requeue_pi) {
405 /*
406 * Requeue PI only works on two distinct uaddrs. This
407 * check is only valid for private futexes. See below.
408 */
409 if (uaddr1 == uaddr2)
410 return -EINVAL;
411
412 /*
413 * futex_requeue() allows the caller to define the number
414 * of waiters to wake up via the @nr_wake argument. With
415 * REQUEUE_PI, waking up more than one waiter is creating
416 * more problems than it solves. Waking up a waiter makes
417 * only sense if the PI futex @uaddr2 is uncontended as
418 * this allows the requeue code to acquire the futex
419 * @uaddr2 before waking the waiter. The waiter can then
420 * return to user space without further action. A secondary
421 * wakeup would just make the futex_wait_requeue_pi()
422 * handling more complex, because that code would have to
423 * look up pi_state and do more or less all the handling
424 * which the requeue code has to do for the to be requeued
425 * waiters. So restrict the number of waiters to wake to
426 * one, and only wake it up when the PI futex is
427 * uncontended. Otherwise requeue it and let the unlock of
428 * the PI futex handle the wakeup.
429 *
430 * All REQUEUE_PI users, e.g. pthread_cond_signal() and
431 * pthread_cond_broadcast() must use nr_wake=1.
432 */
433 if (nr_wake != 1)
434 return -EINVAL;
435
436 /*
437 * requeue_pi requires a pi_state, try to allocate it now
438 * without any locks in case it fails.
439 */
440 if (refill_pi_state_cache())
441 return -ENOMEM;
442 }
443
444 retry:
445 ret = get_futex_key(uaddr1, flags1, &key1, FUTEX_READ);
446 if (unlikely(ret != 0))
447 return ret;
448 ret = get_futex_key(uaddr2, flags2, &key2,
449 requeue_pi ? FUTEX_WRITE : FUTEX_READ);
450 if (unlikely(ret != 0))
451 return ret;
452
453 /*
454 * The check above which compares uaddrs is not sufficient for
455 * shared futexes. We need to compare the keys:
456 */
457 if (requeue_pi && futex_match(&key1, &key2))
458 return -EINVAL;
459
460 retry_private:
461 if (1) {
462 CLASS(hb, hb1)(&key1);
463 CLASS(hb, hb2)(&key2);
464
465 futex_hb_waiters_inc(hb2);
466 double_lock_hb(hb1, hb2);
467
468 if (likely(cmpval != NULL)) {
469 u32 curval;
470
471 ret = futex_get_value_locked(&curval, uaddr1);
472
473 if (unlikely(ret)) {
474 futex_hb_waiters_dec(hb2);
475 double_unlock_hb(hb1, hb2);
476
477 ret = get_user(curval, uaddr1);
478 if (ret)
479 return ret;
480
481 if (!(flags1 & FLAGS_SHARED))
482 goto retry_private;
483
484 goto retry;
485 }
486 if (curval != *cmpval) {
487 ret = -EAGAIN;
488 goto out_unlock;
489 }
490 }
491
492 if (requeue_pi) {
493 struct task_struct *exiting = NULL;
494
495 /*
496 * Attempt to acquire uaddr2 and wake the top waiter. If we
497 * intend to requeue waiters, force setting the FUTEX_WAITERS
498 * bit. We force this here where we are able to easily handle
499 * faults rather in the requeue loop below.
500 *
501 * Updates topwaiter::requeue_state if a top waiter exists.
502 */
503 ret = futex_proxy_trylock_atomic(uaddr2, hb1, hb2, &key1,
504 &key2, &pi_state,
505 &exiting, nr_requeue);
506
507 /*
508 * At this point the top_waiter has either taken uaddr2 or
509 * is waiting on it. In both cases pi_state has been
510 * established and an initial refcount on it. In case of an
511 * error there's nothing.
512 *
513 * The top waiter's requeue_state is up to date:
514 *
515 * - If the lock was acquired atomically (ret == 1), then
516 * the state is Q_REQUEUE_PI_LOCKED.
517 *
518 * The top waiter has been dequeued and woken up and can
519 * return to user space immediately. The kernel/user
520 * space state is consistent. In case that there must be
521 * more waiters requeued the WAITERS bit in the user
522 * space futex is set so the top waiter task has to go
523 * into the syscall slowpath to unlock the futex. This
524 * will block until this requeue operation has been
525 * completed and the hash bucket locks have been
526 * dropped.
527 *
528 * - If the trylock failed with an error (ret < 0) then
529 * the state is either Q_REQUEUE_PI_NONE, i.e. "nothing
530 * happened", or Q_REQUEUE_PI_IGNORE when there was an
531 * interleaved early wakeup.
532 *
533 * - If the trylock did not succeed (ret == 0) then the
534 * state is either Q_REQUEUE_PI_IN_PROGRESS or
535 * Q_REQUEUE_PI_WAIT if an early wakeup interleaved.
536 * This will be cleaned up in the loop below, which
537 * cannot fail because futex_proxy_trylock_atomic() did
538 * the same sanity checks for requeue_pi as the loop
539 * below does.
540 */
541 switch (ret) {
542 case 0:
543 /* We hold a reference on the pi state. */
544 break;
545
546 case 1:
547 /*
548 * futex_proxy_trylock_atomic() acquired the user space
549 * futex. Adjust task_count.
550 */
551 task_count++;
552 ret = 0;
553 break;
554
555 /*
556 * If the above failed, then pi_state is NULL and
557 * waiter::requeue_state is correct.
558 */
559 case -EFAULT:
560 futex_hb_waiters_dec(hb2);
561 double_unlock_hb(hb1, hb2);
562 ret = fault_in_user_writeable(uaddr2);
563 if (!ret)
564 goto retry;
565 return ret;
566 case -EBUSY:
567 case -EAGAIN:
568 /*
569 * Two reasons for this:
570 * - EBUSY: Owner is exiting and we just wait for the
571 * exit to complete.
572 * - EAGAIN: The user space value changed.
573 */
574 futex_hb_waiters_dec(hb2);
575 double_unlock_hb(hb1, hb2);
576 /*
577 * Handle the case where the owner is in the middle of
578 * exiting. Wait for the exit to complete otherwise
579 * this task might loop forever, aka. live lock.
580 */
581 wait_for_owner_exiting(ret, exiting);
582 cond_resched();
583 goto retry;
584 default:
585 goto out_unlock;
586 }
587 }
588
589 plist_for_each_entry_safe(this, next, &hb1->chain, list) {
590 if (task_count - nr_wake >= nr_requeue)
591 break;
592
593 if (!futex_match(&this->key, &key1))
594 continue;
595
596 /*
597 * FUTEX_WAIT_REQUEUE_PI and FUTEX_CMP_REQUEUE_PI should always
598 * be paired with each other and no other futex ops.
599 *
600 * We should never be requeueing a futex_q with a pi_state,
601 * which is awaiting a futex_unlock_pi().
602 */
603 if ((requeue_pi && !this->rt_waiter) ||
604 (!requeue_pi && this->rt_waiter) ||
605 this->pi_state) {
606 ret = -EINVAL;
607 break;
608 }
609
610 /* Plain futexes just wake or requeue and are done */
611 if (!requeue_pi) {
612 if (++task_count <= nr_wake)
613 this->wake(&wake_q, this);
614 else
615 requeue_futex(this, hb1, hb2, &key2);
616 continue;
617 }
618
619 /* Ensure we requeue to the expected futex for requeue_pi. */
620 if (!futex_match(this->requeue_pi_key, &key2)) {
621 ret = -EINVAL;
622 break;
623 }
624
625 /*
626 * Requeue nr_requeue waiters and possibly one more in the case
627 * of requeue_pi if we couldn't acquire the lock atomically.
628 *
629 * Prepare the waiter to take the rt_mutex. Take a refcount
630 * on the pi_state and store the pointer in the futex_q
631 * object of the waiter.
632 */
633 get_pi_state(pi_state);
634
635 /* Don't requeue when the waiter is already on the way out. */
636 if (!futex_requeue_pi_prepare(this, pi_state)) {
637 /*
638 * Early woken waiter signaled that it is on the
639 * way out. Drop the pi_state reference and try the
640 * next waiter. @this->pi_state is still NULL.
641 */
642 put_pi_state(pi_state);
643 continue;
644 }
645
646 ret = rt_mutex_start_proxy_lock(&pi_state->pi_mutex,
647 this->rt_waiter,
648 this->task);
649
650 if (ret == 1) {
651 /*
652 * We got the lock. We do neither drop the refcount
653 * on pi_state nor clear this->pi_state because the
654 * waiter needs the pi_state for cleaning up the
655 * user space value. It will drop the refcount
656 * after doing so. this::requeue_state is updated
657 * in the wakeup as well.
658 */
659 requeue_pi_wake_futex(this, &key2, hb2);
660 task_count++;
661 } else if (!ret) {
662 /* Waiter is queued, move it to hb2 */
663 requeue_futex(this, hb1, hb2, &key2);
664 futex_requeue_pi_complete(this, 0);
665 task_count++;
666 } else {
667 /*
668 * rt_mutex_start_proxy_lock() detected a potential
669 * deadlock when we tried to queue that waiter.
670 * Drop the pi_state reference which we took above
671 * and remove the pointer to the state from the
672 * waiters futex_q object.
673 */
674 this->pi_state = NULL;
675 put_pi_state(pi_state);
676 futex_requeue_pi_complete(this, ret);
677 /*
678 * We stop queueing more waiters and let user space
679 * deal with the mess.
680 */
681 break;
682 }
683 }
684
685 /*
686 * We took an extra initial reference to the pi_state in
687 * futex_proxy_trylock_atomic(). We need to drop it here again.
688 */
689 put_pi_state(pi_state);
690
691 out_unlock:
692 futex_hb_waiters_dec(hb2);
693 double_unlock_hb(hb1, hb2);
694 }
695 wake_up_q(&wake_q);
696 return ret ? ret : task_count;
697 }
698
699 /**
700 * handle_early_requeue_pi_wakeup() - Handle early wakeup on the initial futex
701 * @hb: the hash_bucket futex_q was original enqueued on
702 * @q: the futex_q woken while waiting to be requeued
703 * @timeout: the timeout associated with the wait (NULL if none)
704 *
705 * Determine the cause for the early wakeup.
706 *
707 * Return:
708 * -EWOULDBLOCK or -ETIMEDOUT or -ERESTARTNOINTR
709 */
710 static inline
handle_early_requeue_pi_wakeup(struct futex_hash_bucket * hb,struct futex_q * q,struct hrtimer_sleeper * timeout)711 int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
712 struct futex_q *q,
713 struct hrtimer_sleeper *timeout)
714 {
715 int ret;
716
717 /*
718 * With the hb lock held, we avoid races while we process the wakeup.
719 * We only need to hold hb (and not hb2) to ensure atomicity as the
720 * wakeup code can't change q.key from uaddr to uaddr2 if we hold hb.
721 * It can't be requeued from uaddr2 to something else since we don't
722 * support a PI aware source futex for requeue.
723 */
724 WARN_ON_ONCE(&hb->lock != q->lock_ptr);
725
726 /*
727 * We were woken prior to requeue by a timeout or a signal.
728 * Conditionally unqueue the futex_q and determine which it was.
729 */
730 if (!plist_node_empty(&q->list)) {
731 plist_del(&q->list, &hb->chain);
732 futex_hb_waiters_dec(hb);
733 }
734
735 /* Handle spurious wakeups gracefully */
736 ret = -EWOULDBLOCK;
737 if (timeout && !timeout->task)
738 ret = -ETIMEDOUT;
739 else if (signal_pending(current))
740 ret = -ERESTARTNOINTR;
741 return ret;
742 }
743
744 /**
745 * futex_wait_requeue_pi() - Wait on uaddr and take uaddr2
746 * @uaddr: the futex we initially wait on (non-pi)
747 * @flags: futex flags (FLAGS_SHARED, FLAGS_CLOCKRT, etc.), they must be
748 * the same type, no requeueing from private to shared, etc.
749 * @val: the expected value of uaddr
750 * @abs_time: absolute timeout
751 * @bitset: 32 bit wakeup bitset set by userspace, defaults to all
752 * @uaddr2: the pi futex we will take prior to returning to user-space
753 *
754 * The caller will wait on uaddr and will be requeued by futex_requeue() to
755 * uaddr2 which must be PI aware and unique from uaddr. Normal wakeup will wake
756 * on uaddr2 and complete the acquisition of the rt_mutex prior to returning to
757 * userspace. This ensures the rt_mutex maintains an owner when it has waiters;
758 * without one, the pi logic would not know which task to boost/deboost, if
759 * there was a need to.
760 *
761 * We call schedule in futex_wait_queue() when we enqueue and return there
762 * via the following--
763 * 1) wakeup on uaddr2 after an atomic lock acquisition by futex_requeue()
764 * 2) wakeup on uaddr2 after a requeue
765 * 3) signal
766 * 4) timeout
767 *
768 * If 3, cleanup and return -ERESTARTNOINTR.
769 *
770 * If 2, we may then block on trying to take the rt_mutex and return via:
771 * 5) successful lock
772 * 6) signal
773 * 7) timeout
774 * 8) other lock acquisition failure
775 *
776 * If 6, return -EWOULDBLOCK (restarting the syscall would do the same).
777 *
778 * If 4 or 7, we cleanup and return with -ETIMEDOUT.
779 *
780 * Return:
781 * - 0 - On success;
782 * - <0 - On error
783 */
futex_wait_requeue_pi(u32 __user * uaddr,unsigned int flags,u32 val,ktime_t * abs_time,u32 bitset,u32 __user * uaddr2)784 int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
785 u32 val, ktime_t *abs_time, u32 bitset,
786 u32 __user *uaddr2)
787 {
788 struct hrtimer_sleeper timeout, *to;
789 struct rt_mutex_waiter rt_waiter;
790 union futex_key key2 = FUTEX_KEY_INIT;
791 struct futex_q q = futex_q_init;
792 struct rt_mutex_base *pi_mutex;
793 int res, ret;
794
795 if (!IS_ENABLED(CONFIG_FUTEX_PI))
796 return -ENOSYS;
797
798 if (uaddr == uaddr2)
799 return -EINVAL;
800
801 if (!bitset)
802 return -EINVAL;
803
804 to = futex_setup_timer(abs_time, &timeout, flags,
805 current->timer_slack_ns);
806
807 /*
808 * The waiter is allocated on our stack, manipulated by the requeue
809 * code while we sleep on uaddr.
810 */
811 rt_mutex_init_waiter(&rt_waiter);
812
813 ret = get_futex_key(uaddr2, flags, &key2, FUTEX_WRITE);
814 if (unlikely(ret != 0))
815 goto out;
816
817 q.bitset = bitset;
818 q.rt_waiter = &rt_waiter;
819 q.requeue_pi_key = &key2;
820
821 /*
822 * Prepare to wait on uaddr. On success, it holds hb->lock and q
823 * is initialized.
824 */
825 ret = futex_wait_setup(uaddr, val, flags, &q, &key2, current);
826 if (ret)
827 goto out;
828
829 /* Queue the futex_q, drop the hb lock, wait for wakeup. */
830 futex_do_wait(&q, to);
831
832 switch (futex_requeue_pi_wakeup_sync(&q)) {
833 case Q_REQUEUE_PI_IGNORE:
834 {
835 CLASS(hb, hb)(&q.key);
836 /* The waiter is still on uaddr1 */
837 spin_lock(&hb->lock);
838 ret = handle_early_requeue_pi_wakeup(hb, &q, to);
839 spin_unlock(&hb->lock);
840 }
841 break;
842
843 case Q_REQUEUE_PI_LOCKED:
844 /* The requeue acquired the lock */
845 if (q.pi_state && (q.pi_state->owner != current)) {
846 futex_q_lockptr_lock(&q);
847 ret = fixup_pi_owner(uaddr2, &q, true);
848 /*
849 * Drop the reference to the pi state which the
850 * requeue_pi() code acquired for us.
851 */
852 put_pi_state(q.pi_state);
853 spin_unlock(q.lock_ptr);
854 /*
855 * Adjust the return value. It's either -EFAULT or
856 * success (1) but the caller expects 0 for success.
857 */
858 ret = ret < 0 ? ret : 0;
859 }
860 break;
861
862 case Q_REQUEUE_PI_DONE:
863 /* Requeue completed. Current is 'pi_blocked_on' the rtmutex */
864 pi_mutex = &q.pi_state->pi_mutex;
865 ret = rt_mutex_wait_proxy_lock(pi_mutex, to, &rt_waiter);
866
867 /*
868 * See futex_unlock_pi()'s cleanup: comment.
869 */
870 if (ret && !rt_mutex_cleanup_proxy_lock(pi_mutex, &rt_waiter))
871 ret = 0;
872
873 futex_q_lockptr_lock(&q);
874 debug_rt_mutex_free_waiter(&rt_waiter);
875 /*
876 * Fixup the pi_state owner and possibly acquire the lock if we
877 * haven't already.
878 */
879 res = fixup_pi_owner(uaddr2, &q, !ret);
880 /*
881 * If fixup_pi_owner() returned an error, propagate that. If it
882 * acquired the lock, clear -ETIMEDOUT or -EINTR.
883 */
884 if (res)
885 ret = (res < 0) ? res : 0;
886
887 futex_unqueue_pi(&q);
888 spin_unlock(q.lock_ptr);
889
890 if (ret == -EINTR) {
891 /*
892 * We've already been requeued, but cannot restart
893 * by calling futex_lock_pi() directly. We could
894 * restart this syscall, but it would detect that
895 * the user space "val" changed and return
896 * -EWOULDBLOCK. Save the overhead of the restart
897 * and return -EWOULDBLOCK directly.
898 */
899 ret = -EWOULDBLOCK;
900 }
901 break;
902 default:
903 BUG();
904 }
905 if (q.drop_hb_ref) {
906 CLASS(hb, hb)(&q.key);
907 /* Additional reference from requeue_pi_wake_futex() */
908 futex_hash_put(hb);
909 }
910
911 out:
912 if (to) {
913 hrtimer_cancel(&to->timer);
914 destroy_hrtimer_on_stack(&to->timer);
915 }
916 return ret;
917 }
918
919