1 // SPDX-License-Identifier: GPL-2.0-or-later 2 3 #include <linux/plist.h> 4 #include <linux/sched/signal.h> 5 6 #include "futex.h" 7 #include "../locking/rtmutex_common.h" 8 9 /* 10 * On PREEMPT_RT, the hash bucket lock is a 'sleeping' spinlock with an 11 * underlying rtmutex. The task which is about to be requeued could have 12 * just woken up (timeout, signal). After the wake up the task has to 13 * acquire hash bucket lock, which is held by the requeue code. As a task 14 * can only be blocked on _ONE_ rtmutex at a time, the proxy lock blocking 15 * and the hash bucket lock blocking would collide and corrupt state. 16 * 17 * On !PREEMPT_RT this is not a problem and everything could be serialized 18 * on hash bucket lock, but aside of having the benefit of common code, 19 * this allows to avoid doing the requeue when the task is already on the 20 * way out and taking the hash bucket lock of the original uaddr1 when the 21 * requeue has been completed. 22 * 23 * The following state transitions are valid: 24 * 25 * On the waiter side: 26 * Q_REQUEUE_PI_NONE -> Q_REQUEUE_PI_IGNORE 27 * Q_REQUEUE_PI_IN_PROGRESS -> Q_REQUEUE_PI_WAIT 28 * 29 * On the requeue side: 30 * Q_REQUEUE_PI_NONE -> Q_REQUEUE_PI_INPROGRESS 31 * Q_REQUEUE_PI_IN_PROGRESS -> Q_REQUEUE_PI_DONE/LOCKED 32 * Q_REQUEUE_PI_IN_PROGRESS -> Q_REQUEUE_PI_NONE (requeue failed) 33 * Q_REQUEUE_PI_WAIT -> Q_REQUEUE_PI_DONE/LOCKED 34 * Q_REQUEUE_PI_WAIT -> Q_REQUEUE_PI_IGNORE (requeue failed) 35 * 36 * The requeue side ignores a waiter with state Q_REQUEUE_PI_IGNORE as this 37 * signals that the waiter is already on the way out. It also means that 38 * the waiter is still on the 'wait' futex, i.e. uaddr1. 39 * 40 * The waiter side signals early wakeup to the requeue side either through 41 * setting state to Q_REQUEUE_PI_IGNORE or to Q_REQUEUE_PI_WAIT depending 42 * on the current state. In case of Q_REQUEUE_PI_IGNORE it can immediately 43 * proceed to take the hash bucket lock of uaddr1. If it set state to WAIT, 44 * which means the wakeup is interleaving with a requeue in progress it has 45 * to wait for the requeue side to change the state. Either to DONE/LOCKED 46 * or to IGNORE. DONE/LOCKED means the waiter q is now on the uaddr2 futex 47 * and either blocked (DONE) or has acquired it (LOCKED). IGNORE is set by 48 * the requeue side when the requeue attempt failed via deadlock detection 49 * and therefore the waiter q is still on the uaddr1 futex. 50 */ 51 enum { 52 Q_REQUEUE_PI_NONE = 0, 53 Q_REQUEUE_PI_IGNORE, 54 Q_REQUEUE_PI_IN_PROGRESS, 55 Q_REQUEUE_PI_WAIT, 56 Q_REQUEUE_PI_DONE, 57 Q_REQUEUE_PI_LOCKED, 58 }; 59 60 const struct futex_q futex_q_init = { 61 /* list gets initialized in futex_queue()*/ 62 .wake = futex_wake_mark, 63 .key = FUTEX_KEY_INIT, 64 .bitset = FUTEX_BITSET_MATCH_ANY, 65 .requeue_state = ATOMIC_INIT(Q_REQUEUE_PI_NONE), 66 }; 67 68 /** 69 * requeue_futex() - Requeue a futex_q from one hb to another 70 * @q: the futex_q to requeue 71 * @hb1: the source hash_bucket 72 * @hb2: the target hash_bucket 73 * @key2: the new key for the requeued futex_q 74 */ 75 static inline 76 void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1, 77 struct futex_hash_bucket *hb2, union futex_key *key2) 78 { 79 80 /* 81 * If key1 and key2 hash to the same bucket, no need to 82 * requeue. 83 */ 84 if (likely(&hb1->chain != &hb2->chain)) { 85 plist_del(&q->list, &hb1->chain); 86 futex_hb_waiters_dec(hb1); 87 futex_hb_waiters_inc(hb2); 88 plist_add(&q->list, &hb2->chain); 89 q->lock_ptr = &hb2->lock; 90 /* 91 * hb1 and hb2 belong to the same futex_hash_bucket_private 92 * because if we managed get a reference on hb1 then it can't be 93 * replaced. Therefore we avoid put(hb1)+get(hb2) here. 94 */ 95 } 96 q->key = *key2; 97 } 98 99 static inline bool futex_requeue_pi_prepare(struct futex_q *q, 100 struct futex_pi_state *pi_state) 101 { 102 int old, new; 103 104 /* 105 * Set state to Q_REQUEUE_PI_IN_PROGRESS unless an early wakeup has 106 * already set Q_REQUEUE_PI_IGNORE to signal that requeue should 107 * ignore the waiter. 108 */ 109 old = atomic_read_acquire(&q->requeue_state); 110 do { 111 if (old == Q_REQUEUE_PI_IGNORE) 112 return false; 113 114 /* 115 * futex_proxy_trylock_atomic() might have set it to 116 * IN_PROGRESS and a interleaved early wake to WAIT. 117 * 118 * It was considered to have an extra state for that 119 * trylock, but that would just add more conditionals 120 * all over the place for a dubious value. 121 */ 122 if (old != Q_REQUEUE_PI_NONE) 123 break; 124 125 new = Q_REQUEUE_PI_IN_PROGRESS; 126 } while (!atomic_try_cmpxchg(&q->requeue_state, &old, new)); 127 128 q->pi_state = pi_state; 129 return true; 130 } 131 132 static inline void futex_requeue_pi_complete(struct futex_q *q, int locked) 133 { 134 int old, new; 135 136 old = atomic_read_acquire(&q->requeue_state); 137 do { 138 if (old == Q_REQUEUE_PI_IGNORE) 139 return; 140 141 if (locked >= 0) { 142 /* Requeue succeeded. Set DONE or LOCKED */ 143 WARN_ON_ONCE(old != Q_REQUEUE_PI_IN_PROGRESS && 144 old != Q_REQUEUE_PI_WAIT); 145 new = Q_REQUEUE_PI_DONE + locked; 146 } else if (old == Q_REQUEUE_PI_IN_PROGRESS) { 147 /* Deadlock, no early wakeup interleave */ 148 new = Q_REQUEUE_PI_NONE; 149 } else { 150 /* Deadlock, early wakeup interleave. */ 151 WARN_ON_ONCE(old != Q_REQUEUE_PI_WAIT); 152 new = Q_REQUEUE_PI_IGNORE; 153 } 154 } while (!atomic_try_cmpxchg(&q->requeue_state, &old, new)); 155 156 #ifdef CONFIG_PREEMPT_RT 157 /* If the waiter interleaved with the requeue let it know */ 158 if (unlikely(old == Q_REQUEUE_PI_WAIT)) 159 rcuwait_wake_up(&q->requeue_wait); 160 #endif 161 } 162 163 static inline int futex_requeue_pi_wakeup_sync(struct futex_q *q) 164 { 165 int old, new; 166 167 old = atomic_read_acquire(&q->requeue_state); 168 do { 169 /* Is requeue done already? */ 170 if (old >= Q_REQUEUE_PI_DONE) 171 return old; 172 173 /* 174 * If not done, then tell the requeue code to either ignore 175 * the waiter or to wake it up once the requeue is done. 176 */ 177 new = Q_REQUEUE_PI_WAIT; 178 if (old == Q_REQUEUE_PI_NONE) 179 new = Q_REQUEUE_PI_IGNORE; 180 } while (!atomic_try_cmpxchg(&q->requeue_state, &old, new)); 181 182 /* If the requeue was in progress, wait for it to complete */ 183 if (old == Q_REQUEUE_PI_IN_PROGRESS) { 184 #ifdef CONFIG_PREEMPT_RT 185 rcuwait_wait_event(&q->requeue_wait, 186 atomic_read(&q->requeue_state) != Q_REQUEUE_PI_WAIT, 187 TASK_UNINTERRUPTIBLE); 188 #else 189 (void)atomic_cond_read_relaxed(&q->requeue_state, VAL != Q_REQUEUE_PI_WAIT); 190 #endif 191 } 192 193 /* 194 * Requeue is now either prohibited or complete. Reread state 195 * because during the wait above it might have changed. Nothing 196 * will modify q->requeue_state after this point. 197 */ 198 return atomic_read(&q->requeue_state); 199 } 200 201 /** 202 * requeue_pi_wake_futex() - Wake a task that acquired the lock during requeue 203 * @q: the futex_q 204 * @key: the key of the requeue target futex 205 * @hb: the hash_bucket of the requeue target futex 206 * 207 * During futex_requeue, with requeue_pi=1, it is possible to acquire the 208 * target futex if it is uncontended or via a lock steal. 209 * 210 * 1) Set @q::key to the requeue target futex key so the waiter can detect 211 * the wakeup on the right futex. 212 * 213 * 2) Dequeue @q from the hash bucket. 214 * 215 * 3) Set @q::rt_waiter to NULL so the woken up task can detect atomic lock 216 * acquisition. 217 * 218 * 4) Set the q->lock_ptr to the requeue target hb->lock for the case that 219 * the waiter has to fixup the pi state. 220 * 221 * 5) Complete the requeue state so the waiter can make progress. After 222 * this point the waiter task can return from the syscall immediately in 223 * case that the pi state does not have to be fixed up. 224 * 225 * 6) Wake the waiter task. 226 * 227 * Must be called with both q->lock_ptr and hb->lock held. 228 */ 229 static inline 230 void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key, 231 struct futex_hash_bucket *hb) 232 { 233 q->key = *key; 234 235 __futex_unqueue(q); 236 237 WARN_ON(!q->rt_waiter); 238 q->rt_waiter = NULL; 239 /* 240 * Acquire a reference for the waiter to ensure valid 241 * futex_q::lock_ptr. 242 */ 243 futex_hash_get(hb); 244 q->drop_hb_ref = true; 245 q->lock_ptr = &hb->lock; 246 247 /* Signal locked state to the waiter */ 248 futex_requeue_pi_complete(q, 1); 249 wake_up_state(q->task, TASK_NORMAL); 250 } 251 252 /** 253 * futex_proxy_trylock_atomic() - Attempt an atomic lock for the top waiter 254 * @pifutex: the user address of the to futex 255 * @hb1: the from futex hash bucket, must be locked by the caller 256 * @hb2: the to futex hash bucket, must be locked by the caller 257 * @key1: the from futex key 258 * @key2: the to futex key 259 * @ps: address to store the pi_state pointer 260 * @exiting: Pointer to store the task pointer of the owner task 261 * which is in the middle of exiting 262 * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0) 263 * 264 * Try and get the lock on behalf of the top waiter if we can do it atomically. 265 * Wake the top waiter if we succeed. If the caller specified set_waiters, 266 * then direct futex_lock_pi_atomic() to force setting the FUTEX_WAITERS bit. 267 * hb1 and hb2 must be held by the caller. 268 * 269 * @exiting is only set when the return value is -EBUSY. If so, this holds 270 * a refcount on the exiting task on return and the caller needs to drop it 271 * after waiting for the exit to complete. 272 * 273 * Return: 274 * - 0 - failed to acquire the lock atomically; 275 * - >0 - acquired the lock, return value is vpid of the top_waiter 276 * - <0 - error 277 */ 278 static int 279 futex_proxy_trylock_atomic(u32 __user *pifutex, struct futex_hash_bucket *hb1, 280 struct futex_hash_bucket *hb2, union futex_key *key1, 281 union futex_key *key2, struct futex_pi_state **ps, 282 struct task_struct **exiting, int set_waiters) 283 { 284 struct futex_q *top_waiter; 285 u32 curval; 286 int ret; 287 288 if (futex_get_value_locked(&curval, pifutex)) 289 return -EFAULT; 290 291 if (unlikely(should_fail_futex(true))) 292 return -EFAULT; 293 294 /* 295 * Find the top_waiter and determine if there are additional waiters. 296 * If the caller intends to requeue more than 1 waiter to pifutex, 297 * force futex_lock_pi_atomic() to set the FUTEX_WAITERS bit now, 298 * as we have means to handle the possible fault. If not, don't set 299 * the bit unnecessarily as it will force the subsequent unlock to enter 300 * the kernel. 301 */ 302 top_waiter = futex_top_waiter(hb1, key1); 303 304 /* There are no waiters, nothing for us to do. */ 305 if (!top_waiter) 306 return 0; 307 308 /* 309 * Ensure that this is a waiter sitting in futex_wait_requeue_pi() 310 * and waiting on the 'waitqueue' futex which is always !PI. 311 */ 312 if (!top_waiter->rt_waiter || top_waiter->pi_state) 313 return -EINVAL; 314 315 /* Ensure we requeue to the expected futex. */ 316 if (!futex_match(top_waiter->requeue_pi_key, key2)) 317 return -EINVAL; 318 319 /* Ensure that this does not race against an early wakeup */ 320 if (!futex_requeue_pi_prepare(top_waiter, NULL)) 321 return -EAGAIN; 322 323 /* 324 * Try to take the lock for top_waiter and set the FUTEX_WAITERS bit 325 * in the contended case or if @set_waiters is true. 326 * 327 * In the contended case PI state is attached to the lock owner. If 328 * the user space lock can be acquired then PI state is attached to 329 * the new owner (@top_waiter->task) when @set_waiters is true. 330 */ 331 ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task, 332 exiting, set_waiters); 333 if (ret == 1) { 334 /* 335 * Lock was acquired in user space and PI state was 336 * attached to @top_waiter->task. That means state is fully 337 * consistent and the waiter can return to user space 338 * immediately after the wakeup. 339 */ 340 requeue_pi_wake_futex(top_waiter, key2, hb2); 341 } else if (ret < 0) { 342 /* Rewind top_waiter::requeue_state */ 343 futex_requeue_pi_complete(top_waiter, ret); 344 } else { 345 /* 346 * futex_lock_pi_atomic() did not acquire the user space 347 * futex, but managed to establish the proxy lock and pi 348 * state. top_waiter::requeue_state cannot be fixed up here 349 * because the waiter is not enqueued on the rtmutex 350 * yet. This is handled at the callsite depending on the 351 * result of rt_mutex_start_proxy_lock() which is 352 * guaranteed to be reached with this function returning 0. 353 */ 354 } 355 return ret; 356 } 357 358 /** 359 * futex_requeue() - Requeue waiters from uaddr1 to uaddr2 360 * @uaddr1: source futex user address 361 * @flags1: futex flags (FLAGS_SHARED, etc.) 362 * @uaddr2: target futex user address 363 * @flags2: futex flags (FLAGS_SHARED, etc.) 364 * @nr_wake: number of waiters to wake (must be 1 for requeue_pi) 365 * @nr_requeue: number of waiters to requeue (0-INT_MAX) 366 * @cmpval: @uaddr1 expected value (or %NULL) 367 * @requeue_pi: if we are attempting to requeue from a non-pi futex to a 368 * pi futex (pi to pi requeue is not supported) 369 * 370 * Requeue waiters on uaddr1 to uaddr2. In the requeue_pi case, try to acquire 371 * uaddr2 atomically on behalf of the top waiter. 372 * 373 * Return: 374 * - >=0 - on success, the number of tasks requeued or woken; 375 * - <0 - on error 376 */ 377 int futex_requeue(u32 __user *uaddr1, unsigned int flags1, 378 u32 __user *uaddr2, unsigned int flags2, 379 int nr_wake, int nr_requeue, u32 *cmpval, int requeue_pi) 380 { 381 union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT; 382 int task_count = 0, ret; 383 struct futex_pi_state *pi_state = NULL; 384 struct futex_q *this, *next; 385 DEFINE_WAKE_Q(wake_q); 386 387 if (nr_wake < 0 || nr_requeue < 0) 388 return -EINVAL; 389 390 /* 391 * When PI not supported: return -ENOSYS if requeue_pi is true, 392 * consequently the compiler knows requeue_pi is always false past 393 * this point which will optimize away all the conditional code 394 * further down. 395 */ 396 if (!IS_ENABLED(CONFIG_FUTEX_PI) && requeue_pi) 397 return -ENOSYS; 398 399 if (requeue_pi) { 400 /* 401 * Requeue PI only works on two distinct uaddrs. This 402 * check is only valid for private futexes. See below. 403 */ 404 if (uaddr1 == uaddr2) 405 return -EINVAL; 406 407 /* 408 * futex_requeue() allows the caller to define the number 409 * of waiters to wake up via the @nr_wake argument. With 410 * REQUEUE_PI, waking up more than one waiter is creating 411 * more problems than it solves. Waking up a waiter makes 412 * only sense if the PI futex @uaddr2 is uncontended as 413 * this allows the requeue code to acquire the futex 414 * @uaddr2 before waking the waiter. The waiter can then 415 * return to user space without further action. A secondary 416 * wakeup would just make the futex_wait_requeue_pi() 417 * handling more complex, because that code would have to 418 * look up pi_state and do more or less all the handling 419 * which the requeue code has to do for the to be requeued 420 * waiters. So restrict the number of waiters to wake to 421 * one, and only wake it up when the PI futex is 422 * uncontended. Otherwise requeue it and let the unlock of 423 * the PI futex handle the wakeup. 424 * 425 * All REQUEUE_PI users, e.g. pthread_cond_signal() and 426 * pthread_cond_broadcast() must use nr_wake=1. 427 */ 428 if (nr_wake != 1) 429 return -EINVAL; 430 431 /* 432 * requeue_pi requires a pi_state, try to allocate it now 433 * without any locks in case it fails. 434 */ 435 if (refill_pi_state_cache()) 436 return -ENOMEM; 437 } 438 439 retry: 440 ret = get_futex_key(uaddr1, flags1, &key1, FUTEX_READ); 441 if (unlikely(ret != 0)) 442 return ret; 443 ret = get_futex_key(uaddr2, flags2, &key2, 444 requeue_pi ? FUTEX_WRITE : FUTEX_READ); 445 if (unlikely(ret != 0)) 446 return ret; 447 448 /* 449 * The check above which compares uaddrs is not sufficient for 450 * shared futexes. We need to compare the keys: 451 */ 452 if (requeue_pi && futex_match(&key1, &key2)) 453 return -EINVAL; 454 455 retry_private: 456 if (1) { 457 CLASS(hb, hb1)(&key1); 458 CLASS(hb, hb2)(&key2); 459 460 futex_hb_waiters_inc(hb2); 461 double_lock_hb(hb1, hb2); 462 463 if (likely(cmpval != NULL)) { 464 u32 curval; 465 466 ret = futex_get_value_locked(&curval, uaddr1); 467 468 if (unlikely(ret)) { 469 futex_hb_waiters_dec(hb2); 470 double_unlock_hb(hb1, hb2); 471 472 ret = get_user(curval, uaddr1); 473 if (ret) 474 return ret; 475 476 if (!(flags1 & FLAGS_SHARED)) 477 goto retry_private; 478 479 goto retry; 480 } 481 if (curval != *cmpval) { 482 ret = -EAGAIN; 483 goto out_unlock; 484 } 485 } 486 487 if (requeue_pi) { 488 struct task_struct *exiting = NULL; 489 490 /* 491 * Attempt to acquire uaddr2 and wake the top waiter. If we 492 * intend to requeue waiters, force setting the FUTEX_WAITERS 493 * bit. We force this here where we are able to easily handle 494 * faults rather in the requeue loop below. 495 * 496 * Updates topwaiter::requeue_state if a top waiter exists. 497 */ 498 ret = futex_proxy_trylock_atomic(uaddr2, hb1, hb2, &key1, 499 &key2, &pi_state, 500 &exiting, nr_requeue); 501 502 /* 503 * At this point the top_waiter has either taken uaddr2 or 504 * is waiting on it. In both cases pi_state has been 505 * established and an initial refcount on it. In case of an 506 * error there's nothing. 507 * 508 * The top waiter's requeue_state is up to date: 509 * 510 * - If the lock was acquired atomically (ret == 1), then 511 * the state is Q_REQUEUE_PI_LOCKED. 512 * 513 * The top waiter has been dequeued and woken up and can 514 * return to user space immediately. The kernel/user 515 * space state is consistent. In case that there must be 516 * more waiters requeued the WAITERS bit in the user 517 * space futex is set so the top waiter task has to go 518 * into the syscall slowpath to unlock the futex. This 519 * will block until this requeue operation has been 520 * completed and the hash bucket locks have been 521 * dropped. 522 * 523 * - If the trylock failed with an error (ret < 0) then 524 * the state is either Q_REQUEUE_PI_NONE, i.e. "nothing 525 * happened", or Q_REQUEUE_PI_IGNORE when there was an 526 * interleaved early wakeup. 527 * 528 * - If the trylock did not succeed (ret == 0) then the 529 * state is either Q_REQUEUE_PI_IN_PROGRESS or 530 * Q_REQUEUE_PI_WAIT if an early wakeup interleaved. 531 * This will be cleaned up in the loop below, which 532 * cannot fail because futex_proxy_trylock_atomic() did 533 * the same sanity checks for requeue_pi as the loop 534 * below does. 535 */ 536 switch (ret) { 537 case 0: 538 /* We hold a reference on the pi state. */ 539 break; 540 541 case 1: 542 /* 543 * futex_proxy_trylock_atomic() acquired the user space 544 * futex. Adjust task_count. 545 */ 546 task_count++; 547 ret = 0; 548 break; 549 550 /* 551 * If the above failed, then pi_state is NULL and 552 * waiter::requeue_state is correct. 553 */ 554 case -EFAULT: 555 futex_hb_waiters_dec(hb2); 556 double_unlock_hb(hb1, hb2); 557 ret = fault_in_user_writeable(uaddr2); 558 if (!ret) 559 goto retry; 560 return ret; 561 case -EBUSY: 562 case -EAGAIN: 563 /* 564 * Two reasons for this: 565 * - EBUSY: Owner is exiting and we just wait for the 566 * exit to complete. 567 * - EAGAIN: The user space value changed. 568 */ 569 futex_hb_waiters_dec(hb2); 570 double_unlock_hb(hb1, hb2); 571 /* 572 * Handle the case where the owner is in the middle of 573 * exiting. Wait for the exit to complete otherwise 574 * this task might loop forever, aka. live lock. 575 */ 576 wait_for_owner_exiting(ret, exiting); 577 cond_resched(); 578 goto retry; 579 default: 580 goto out_unlock; 581 } 582 } 583 584 plist_for_each_entry_safe(this, next, &hb1->chain, list) { 585 if (task_count - nr_wake >= nr_requeue) 586 break; 587 588 if (!futex_match(&this->key, &key1)) 589 continue; 590 591 /* 592 * FUTEX_WAIT_REQUEUE_PI and FUTEX_CMP_REQUEUE_PI should always 593 * be paired with each other and no other futex ops. 594 * 595 * We should never be requeueing a futex_q with a pi_state, 596 * which is awaiting a futex_unlock_pi(). 597 */ 598 if ((requeue_pi && !this->rt_waiter) || 599 (!requeue_pi && this->rt_waiter) || 600 this->pi_state) { 601 ret = -EINVAL; 602 break; 603 } 604 605 /* Plain futexes just wake or requeue and are done */ 606 if (!requeue_pi) { 607 if (++task_count <= nr_wake) 608 this->wake(&wake_q, this); 609 else 610 requeue_futex(this, hb1, hb2, &key2); 611 continue; 612 } 613 614 /* Ensure we requeue to the expected futex for requeue_pi. */ 615 if (!futex_match(this->requeue_pi_key, &key2)) { 616 ret = -EINVAL; 617 break; 618 } 619 620 /* 621 * Requeue nr_requeue waiters and possibly one more in the case 622 * of requeue_pi if we couldn't acquire the lock atomically. 623 * 624 * Prepare the waiter to take the rt_mutex. Take a refcount 625 * on the pi_state and store the pointer in the futex_q 626 * object of the waiter. 627 */ 628 get_pi_state(pi_state); 629 630 /* Don't requeue when the waiter is already on the way out. */ 631 if (!futex_requeue_pi_prepare(this, pi_state)) { 632 /* 633 * Early woken waiter signaled that it is on the 634 * way out. Drop the pi_state reference and try the 635 * next waiter. @this->pi_state is still NULL. 636 */ 637 put_pi_state(pi_state); 638 continue; 639 } 640 641 ret = rt_mutex_start_proxy_lock(&pi_state->pi_mutex, 642 this->rt_waiter, 643 this->task); 644 645 if (ret == 1) { 646 /* 647 * We got the lock. We do neither drop the refcount 648 * on pi_state nor clear this->pi_state because the 649 * waiter needs the pi_state for cleaning up the 650 * user space value. It will drop the refcount 651 * after doing so. this::requeue_state is updated 652 * in the wakeup as well. 653 */ 654 requeue_pi_wake_futex(this, &key2, hb2); 655 task_count++; 656 } else if (!ret) { 657 /* Waiter is queued, move it to hb2 */ 658 requeue_futex(this, hb1, hb2, &key2); 659 futex_requeue_pi_complete(this, 0); 660 task_count++; 661 } else { 662 /* 663 * rt_mutex_start_proxy_lock() detected a potential 664 * deadlock when we tried to queue that waiter. 665 * Drop the pi_state reference which we took above 666 * and remove the pointer to the state from the 667 * waiters futex_q object. 668 */ 669 this->pi_state = NULL; 670 put_pi_state(pi_state); 671 futex_requeue_pi_complete(this, ret); 672 /* 673 * We stop queueing more waiters and let user space 674 * deal with the mess. 675 */ 676 break; 677 } 678 } 679 680 /* 681 * We took an extra initial reference to the pi_state in 682 * futex_proxy_trylock_atomic(). We need to drop it here again. 683 */ 684 put_pi_state(pi_state); 685 686 out_unlock: 687 futex_hb_waiters_dec(hb2); 688 double_unlock_hb(hb1, hb2); 689 } 690 wake_up_q(&wake_q); 691 return ret ? ret : task_count; 692 } 693 694 /** 695 * handle_early_requeue_pi_wakeup() - Handle early wakeup on the initial futex 696 * @hb: the hash_bucket futex_q was original enqueued on 697 * @q: the futex_q woken while waiting to be requeued 698 * @timeout: the timeout associated with the wait (NULL if none) 699 * 700 * Determine the cause for the early wakeup. 701 * 702 * Return: 703 * -EWOULDBLOCK or -ETIMEDOUT or -ERESTARTNOINTR 704 */ 705 static inline 706 int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb, 707 struct futex_q *q, 708 struct hrtimer_sleeper *timeout) 709 { 710 int ret; 711 712 /* 713 * With the hb lock held, we avoid races while we process the wakeup. 714 * We only need to hold hb (and not hb2) to ensure atomicity as the 715 * wakeup code can't change q.key from uaddr to uaddr2 if we hold hb. 716 * It can't be requeued from uaddr2 to something else since we don't 717 * support a PI aware source futex for requeue. 718 */ 719 WARN_ON_ONCE(&hb->lock != q->lock_ptr); 720 721 /* 722 * We were woken prior to requeue by a timeout or a signal. 723 * Unqueue the futex_q and determine which it was. 724 */ 725 plist_del(&q->list, &hb->chain); 726 futex_hb_waiters_dec(hb); 727 728 /* Handle spurious wakeups gracefully */ 729 ret = -EWOULDBLOCK; 730 if (timeout && !timeout->task) 731 ret = -ETIMEDOUT; 732 else if (signal_pending(current)) 733 ret = -ERESTARTNOINTR; 734 return ret; 735 } 736 737 /** 738 * futex_wait_requeue_pi() - Wait on uaddr and take uaddr2 739 * @uaddr: the futex we initially wait on (non-pi) 740 * @flags: futex flags (FLAGS_SHARED, FLAGS_CLOCKRT, etc.), they must be 741 * the same type, no requeueing from private to shared, etc. 742 * @val: the expected value of uaddr 743 * @abs_time: absolute timeout 744 * @bitset: 32 bit wakeup bitset set by userspace, defaults to all 745 * @uaddr2: the pi futex we will take prior to returning to user-space 746 * 747 * The caller will wait on uaddr and will be requeued by futex_requeue() to 748 * uaddr2 which must be PI aware and unique from uaddr. Normal wakeup will wake 749 * on uaddr2 and complete the acquisition of the rt_mutex prior to returning to 750 * userspace. This ensures the rt_mutex maintains an owner when it has waiters; 751 * without one, the pi logic would not know which task to boost/deboost, if 752 * there was a need to. 753 * 754 * We call schedule in futex_wait_queue() when we enqueue and return there 755 * via the following-- 756 * 1) wakeup on uaddr2 after an atomic lock acquisition by futex_requeue() 757 * 2) wakeup on uaddr2 after a requeue 758 * 3) signal 759 * 4) timeout 760 * 761 * If 3, cleanup and return -ERESTARTNOINTR. 762 * 763 * If 2, we may then block on trying to take the rt_mutex and return via: 764 * 5) successful lock 765 * 6) signal 766 * 7) timeout 767 * 8) other lock acquisition failure 768 * 769 * If 6, return -EWOULDBLOCK (restarting the syscall would do the same). 770 * 771 * If 4 or 7, we cleanup and return with -ETIMEDOUT. 772 * 773 * Return: 774 * - 0 - On success; 775 * - <0 - On error 776 */ 777 int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, 778 u32 val, ktime_t *abs_time, u32 bitset, 779 u32 __user *uaddr2) 780 { 781 struct hrtimer_sleeper timeout, *to; 782 struct rt_mutex_waiter rt_waiter; 783 union futex_key key2 = FUTEX_KEY_INIT; 784 struct futex_q q = futex_q_init; 785 struct rt_mutex_base *pi_mutex; 786 int res, ret; 787 788 if (!IS_ENABLED(CONFIG_FUTEX_PI)) 789 return -ENOSYS; 790 791 if (uaddr == uaddr2) 792 return -EINVAL; 793 794 if (!bitset) 795 return -EINVAL; 796 797 to = futex_setup_timer(abs_time, &timeout, flags, 798 current->timer_slack_ns); 799 800 /* 801 * The waiter is allocated on our stack, manipulated by the requeue 802 * code while we sleep on uaddr. 803 */ 804 rt_mutex_init_waiter(&rt_waiter); 805 806 ret = get_futex_key(uaddr2, flags, &key2, FUTEX_WRITE); 807 if (unlikely(ret != 0)) 808 goto out; 809 810 q.bitset = bitset; 811 q.rt_waiter = &rt_waiter; 812 q.requeue_pi_key = &key2; 813 814 /* 815 * Prepare to wait on uaddr. On success, it holds hb->lock and q 816 * is initialized. 817 */ 818 ret = futex_wait_setup(uaddr, val, flags, &q, &key2, current); 819 if (ret) 820 goto out; 821 822 /* Queue the futex_q, drop the hb lock, wait for wakeup. */ 823 futex_do_wait(&q, to); 824 825 switch (futex_requeue_pi_wakeup_sync(&q)) { 826 case Q_REQUEUE_PI_IGNORE: 827 { 828 CLASS(hb, hb)(&q.key); 829 /* The waiter is still on uaddr1 */ 830 spin_lock(&hb->lock); 831 ret = handle_early_requeue_pi_wakeup(hb, &q, to); 832 spin_unlock(&hb->lock); 833 } 834 break; 835 836 case Q_REQUEUE_PI_LOCKED: 837 /* The requeue acquired the lock */ 838 if (q.pi_state && (q.pi_state->owner != current)) { 839 futex_q_lockptr_lock(&q); 840 ret = fixup_pi_owner(uaddr2, &q, true); 841 /* 842 * Drop the reference to the pi state which the 843 * requeue_pi() code acquired for us. 844 */ 845 put_pi_state(q.pi_state); 846 spin_unlock(q.lock_ptr); 847 /* 848 * Adjust the return value. It's either -EFAULT or 849 * success (1) but the caller expects 0 for success. 850 */ 851 ret = ret < 0 ? ret : 0; 852 } 853 break; 854 855 case Q_REQUEUE_PI_DONE: 856 /* Requeue completed. Current is 'pi_blocked_on' the rtmutex */ 857 pi_mutex = &q.pi_state->pi_mutex; 858 ret = rt_mutex_wait_proxy_lock(pi_mutex, to, &rt_waiter); 859 860 /* 861 * See futex_unlock_pi()'s cleanup: comment. 862 */ 863 if (ret && !rt_mutex_cleanup_proxy_lock(pi_mutex, &rt_waiter)) 864 ret = 0; 865 866 futex_q_lockptr_lock(&q); 867 debug_rt_mutex_free_waiter(&rt_waiter); 868 /* 869 * Fixup the pi_state owner and possibly acquire the lock if we 870 * haven't already. 871 */ 872 res = fixup_pi_owner(uaddr2, &q, !ret); 873 /* 874 * If fixup_pi_owner() returned an error, propagate that. If it 875 * acquired the lock, clear -ETIMEDOUT or -EINTR. 876 */ 877 if (res) 878 ret = (res < 0) ? res : 0; 879 880 futex_unqueue_pi(&q); 881 spin_unlock(q.lock_ptr); 882 883 if (ret == -EINTR) { 884 /* 885 * We've already been requeued, but cannot restart 886 * by calling futex_lock_pi() directly. We could 887 * restart this syscall, but it would detect that 888 * the user space "val" changed and return 889 * -EWOULDBLOCK. Save the overhead of the restart 890 * and return -EWOULDBLOCK directly. 891 */ 892 ret = -EWOULDBLOCK; 893 } 894 break; 895 default: 896 BUG(); 897 } 898 if (q.drop_hb_ref) { 899 CLASS(hb, hb)(&q.key); 900 /* Additional reference from requeue_pi_wake_futex() */ 901 futex_hash_put(hb); 902 } 903 904 out: 905 if (to) { 906 hrtimer_cancel(&to->timer); 907 destroy_hrtimer_on_stack(&to->timer); 908 } 909 return ret; 910 } 911 912