1 // SPDX-License-Identifier: GPL-2.0-or-later 2 3 #include <linux/sched/signal.h> 4 5 #include "futex.h" 6 #include "../locking/rtmutex_common.h" 7 8 /* 9 * On PREEMPT_RT, the hash bucket lock is a 'sleeping' spinlock with an 10 * underlying rtmutex. The task which is about to be requeued could have 11 * just woken up (timeout, signal). After the wake up the task has to 12 * acquire hash bucket lock, which is held by the requeue code. As a task 13 * can only be blocked on _ONE_ rtmutex at a time, the proxy lock blocking 14 * and the hash bucket lock blocking would collide and corrupt state. 15 * 16 * On !PREEMPT_RT this is not a problem and everything could be serialized 17 * on hash bucket lock, but aside of having the benefit of common code, 18 * this allows to avoid doing the requeue when the task is already on the 19 * way out and taking the hash bucket lock of the original uaddr1 when the 20 * requeue has been completed. 21 * 22 * The following state transitions are valid: 23 * 24 * On the waiter side: 25 * Q_REQUEUE_PI_NONE -> Q_REQUEUE_PI_IGNORE 26 * Q_REQUEUE_PI_IN_PROGRESS -> Q_REQUEUE_PI_WAIT 27 * 28 * On the requeue side: 29 * Q_REQUEUE_PI_NONE -> Q_REQUEUE_PI_INPROGRESS 30 * Q_REQUEUE_PI_IN_PROGRESS -> Q_REQUEUE_PI_DONE/LOCKED 31 * Q_REQUEUE_PI_IN_PROGRESS -> Q_REQUEUE_PI_NONE (requeue failed) 32 * Q_REQUEUE_PI_WAIT -> Q_REQUEUE_PI_DONE/LOCKED 33 * Q_REQUEUE_PI_WAIT -> Q_REQUEUE_PI_IGNORE (requeue failed) 34 * 35 * The requeue side ignores a waiter with state Q_REQUEUE_PI_IGNORE as this 36 * signals that the waiter is already on the way out. It also means that 37 * the waiter is still on the 'wait' futex, i.e. uaddr1. 38 * 39 * The waiter side signals early wakeup to the requeue side either through 40 * setting state to Q_REQUEUE_PI_IGNORE or to Q_REQUEUE_PI_WAIT depending 41 * on the current state. In case of Q_REQUEUE_PI_IGNORE it can immediately 42 * proceed to take the hash bucket lock of uaddr1. If it set state to WAIT, 43 * which means the wakeup is interleaving with a requeue in progress it has 44 * to wait for the requeue side to change the state. Either to DONE/LOCKED 45 * or to IGNORE. DONE/LOCKED means the waiter q is now on the uaddr2 futex 46 * and either blocked (DONE) or has acquired it (LOCKED). IGNORE is set by 47 * the requeue side when the requeue attempt failed via deadlock detection 48 * and therefore the waiter q is still on the uaddr1 futex. 49 */ 50 enum { 51 Q_REQUEUE_PI_NONE = 0, 52 Q_REQUEUE_PI_IGNORE, 53 Q_REQUEUE_PI_IN_PROGRESS, 54 Q_REQUEUE_PI_WAIT, 55 Q_REQUEUE_PI_DONE, 56 Q_REQUEUE_PI_LOCKED, 57 }; 58 59 const struct futex_q futex_q_init = { 60 /* list gets initialized in futex_queue()*/ 61 .wake = futex_wake_mark, 62 .key = FUTEX_KEY_INIT, 63 .bitset = FUTEX_BITSET_MATCH_ANY, 64 .requeue_state = ATOMIC_INIT(Q_REQUEUE_PI_NONE), 65 }; 66 67 /** 68 * requeue_futex() - Requeue a futex_q from one hb to another 69 * @q: the futex_q to requeue 70 * @hb1: the source hash_bucket 71 * @hb2: the target hash_bucket 72 * @key2: the new key for the requeued futex_q 73 */ 74 static inline 75 void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1, 76 struct futex_hash_bucket *hb2, union futex_key *key2) 77 { 78 79 /* 80 * If key1 and key2 hash to the same bucket, no need to 81 * requeue. 82 */ 83 if (likely(&hb1->chain != &hb2->chain)) { 84 plist_del(&q->list, &hb1->chain); 85 futex_hb_waiters_dec(hb1); 86 futex_hb_waiters_inc(hb2); 87 plist_add(&q->list, &hb2->chain); 88 q->lock_ptr = &hb2->lock; 89 } 90 q->key = *key2; 91 } 92 93 static inline bool futex_requeue_pi_prepare(struct futex_q *q, 94 struct futex_pi_state *pi_state) 95 { 96 int old, new; 97 98 /* 99 * Set state to Q_REQUEUE_PI_IN_PROGRESS unless an early wakeup has 100 * already set Q_REQUEUE_PI_IGNORE to signal that requeue should 101 * ignore the waiter. 102 */ 103 old = atomic_read_acquire(&q->requeue_state); 104 do { 105 if (old == Q_REQUEUE_PI_IGNORE) 106 return false; 107 108 /* 109 * futex_proxy_trylock_atomic() might have set it to 110 * IN_PROGRESS and a interleaved early wake to WAIT. 111 * 112 * It was considered to have an extra state for that 113 * trylock, but that would just add more conditionals 114 * all over the place for a dubious value. 115 */ 116 if (old != Q_REQUEUE_PI_NONE) 117 break; 118 119 new = Q_REQUEUE_PI_IN_PROGRESS; 120 } while (!atomic_try_cmpxchg(&q->requeue_state, &old, new)); 121 122 q->pi_state = pi_state; 123 return true; 124 } 125 126 static inline void futex_requeue_pi_complete(struct futex_q *q, int locked) 127 { 128 int old, new; 129 130 old = atomic_read_acquire(&q->requeue_state); 131 do { 132 if (old == Q_REQUEUE_PI_IGNORE) 133 return; 134 135 if (locked >= 0) { 136 /* Requeue succeeded. Set DONE or LOCKED */ 137 WARN_ON_ONCE(old != Q_REQUEUE_PI_IN_PROGRESS && 138 old != Q_REQUEUE_PI_WAIT); 139 new = Q_REQUEUE_PI_DONE + locked; 140 } else if (old == Q_REQUEUE_PI_IN_PROGRESS) { 141 /* Deadlock, no early wakeup interleave */ 142 new = Q_REQUEUE_PI_NONE; 143 } else { 144 /* Deadlock, early wakeup interleave. */ 145 WARN_ON_ONCE(old != Q_REQUEUE_PI_WAIT); 146 new = Q_REQUEUE_PI_IGNORE; 147 } 148 } while (!atomic_try_cmpxchg(&q->requeue_state, &old, new)); 149 150 #ifdef CONFIG_PREEMPT_RT 151 /* If the waiter interleaved with the requeue let it know */ 152 if (unlikely(old == Q_REQUEUE_PI_WAIT)) 153 rcuwait_wake_up(&q->requeue_wait); 154 #endif 155 } 156 157 static inline int futex_requeue_pi_wakeup_sync(struct futex_q *q) 158 { 159 int old, new; 160 161 old = atomic_read_acquire(&q->requeue_state); 162 do { 163 /* Is requeue done already? */ 164 if (old >= Q_REQUEUE_PI_DONE) 165 return old; 166 167 /* 168 * If not done, then tell the requeue code to either ignore 169 * the waiter or to wake it up once the requeue is done. 170 */ 171 new = Q_REQUEUE_PI_WAIT; 172 if (old == Q_REQUEUE_PI_NONE) 173 new = Q_REQUEUE_PI_IGNORE; 174 } while (!atomic_try_cmpxchg(&q->requeue_state, &old, new)); 175 176 /* If the requeue was in progress, wait for it to complete */ 177 if (old == Q_REQUEUE_PI_IN_PROGRESS) { 178 #ifdef CONFIG_PREEMPT_RT 179 rcuwait_wait_event(&q->requeue_wait, 180 atomic_read(&q->requeue_state) != Q_REQUEUE_PI_WAIT, 181 TASK_UNINTERRUPTIBLE); 182 #else 183 (void)atomic_cond_read_relaxed(&q->requeue_state, VAL != Q_REQUEUE_PI_WAIT); 184 #endif 185 } 186 187 /* 188 * Requeue is now either prohibited or complete. Reread state 189 * because during the wait above it might have changed. Nothing 190 * will modify q->requeue_state after this point. 191 */ 192 return atomic_read(&q->requeue_state); 193 } 194 195 /** 196 * requeue_pi_wake_futex() - Wake a task that acquired the lock during requeue 197 * @q: the futex_q 198 * @key: the key of the requeue target futex 199 * @hb: the hash_bucket of the requeue target futex 200 * 201 * During futex_requeue, with requeue_pi=1, it is possible to acquire the 202 * target futex if it is uncontended or via a lock steal. 203 * 204 * 1) Set @q::key to the requeue target futex key so the waiter can detect 205 * the wakeup on the right futex. 206 * 207 * 2) Dequeue @q from the hash bucket. 208 * 209 * 3) Set @q::rt_waiter to NULL so the woken up task can detect atomic lock 210 * acquisition. 211 * 212 * 4) Set the q->lock_ptr to the requeue target hb->lock for the case that 213 * the waiter has to fixup the pi state. 214 * 215 * 5) Complete the requeue state so the waiter can make progress. After 216 * this point the waiter task can return from the syscall immediately in 217 * case that the pi state does not have to be fixed up. 218 * 219 * 6) Wake the waiter task. 220 * 221 * Must be called with both q->lock_ptr and hb->lock held. 222 */ 223 static inline 224 void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key, 225 struct futex_hash_bucket *hb) 226 { 227 q->key = *key; 228 229 __futex_unqueue(q); 230 231 WARN_ON(!q->rt_waiter); 232 q->rt_waiter = NULL; 233 234 q->lock_ptr = &hb->lock; 235 236 /* Signal locked state to the waiter */ 237 futex_requeue_pi_complete(q, 1); 238 wake_up_state(q->task, TASK_NORMAL); 239 } 240 241 /** 242 * futex_proxy_trylock_atomic() - Attempt an atomic lock for the top waiter 243 * @pifutex: the user address of the to futex 244 * @hb1: the from futex hash bucket, must be locked by the caller 245 * @hb2: the to futex hash bucket, must be locked by the caller 246 * @key1: the from futex key 247 * @key2: the to futex key 248 * @ps: address to store the pi_state pointer 249 * @exiting: Pointer to store the task pointer of the owner task 250 * which is in the middle of exiting 251 * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0) 252 * 253 * Try and get the lock on behalf of the top waiter if we can do it atomically. 254 * Wake the top waiter if we succeed. If the caller specified set_waiters, 255 * then direct futex_lock_pi_atomic() to force setting the FUTEX_WAITERS bit. 256 * hb1 and hb2 must be held by the caller. 257 * 258 * @exiting is only set when the return value is -EBUSY. If so, this holds 259 * a refcount on the exiting task on return and the caller needs to drop it 260 * after waiting for the exit to complete. 261 * 262 * Return: 263 * - 0 - failed to acquire the lock atomically; 264 * - >0 - acquired the lock, return value is vpid of the top_waiter 265 * - <0 - error 266 */ 267 static int 268 futex_proxy_trylock_atomic(u32 __user *pifutex, struct futex_hash_bucket *hb1, 269 struct futex_hash_bucket *hb2, union futex_key *key1, 270 union futex_key *key2, struct futex_pi_state **ps, 271 struct task_struct **exiting, int set_waiters) 272 { 273 struct futex_q *top_waiter; 274 u32 curval; 275 int ret; 276 277 if (futex_get_value_locked(&curval, pifutex)) 278 return -EFAULT; 279 280 if (unlikely(should_fail_futex(true))) 281 return -EFAULT; 282 283 /* 284 * Find the top_waiter and determine if there are additional waiters. 285 * If the caller intends to requeue more than 1 waiter to pifutex, 286 * force futex_lock_pi_atomic() to set the FUTEX_WAITERS bit now, 287 * as we have means to handle the possible fault. If not, don't set 288 * the bit unnecessarily as it will force the subsequent unlock to enter 289 * the kernel. 290 */ 291 top_waiter = futex_top_waiter(hb1, key1); 292 293 /* There are no waiters, nothing for us to do. */ 294 if (!top_waiter) 295 return 0; 296 297 /* 298 * Ensure that this is a waiter sitting in futex_wait_requeue_pi() 299 * and waiting on the 'waitqueue' futex which is always !PI. 300 */ 301 if (!top_waiter->rt_waiter || top_waiter->pi_state) 302 return -EINVAL; 303 304 /* Ensure we requeue to the expected futex. */ 305 if (!futex_match(top_waiter->requeue_pi_key, key2)) 306 return -EINVAL; 307 308 /* Ensure that this does not race against an early wakeup */ 309 if (!futex_requeue_pi_prepare(top_waiter, NULL)) 310 return -EAGAIN; 311 312 /* 313 * Try to take the lock for top_waiter and set the FUTEX_WAITERS bit 314 * in the contended case or if @set_waiters is true. 315 * 316 * In the contended case PI state is attached to the lock owner. If 317 * the user space lock can be acquired then PI state is attached to 318 * the new owner (@top_waiter->task) when @set_waiters is true. 319 */ 320 ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task, 321 exiting, set_waiters); 322 if (ret == 1) { 323 /* 324 * Lock was acquired in user space and PI state was 325 * attached to @top_waiter->task. That means state is fully 326 * consistent and the waiter can return to user space 327 * immediately after the wakeup. 328 */ 329 requeue_pi_wake_futex(top_waiter, key2, hb2); 330 } else if (ret < 0) { 331 /* Rewind top_waiter::requeue_state */ 332 futex_requeue_pi_complete(top_waiter, ret); 333 } else { 334 /* 335 * futex_lock_pi_atomic() did not acquire the user space 336 * futex, but managed to establish the proxy lock and pi 337 * state. top_waiter::requeue_state cannot be fixed up here 338 * because the waiter is not enqueued on the rtmutex 339 * yet. This is handled at the callsite depending on the 340 * result of rt_mutex_start_proxy_lock() which is 341 * guaranteed to be reached with this function returning 0. 342 */ 343 } 344 return ret; 345 } 346 347 /** 348 * futex_requeue() - Requeue waiters from uaddr1 to uaddr2 349 * @uaddr1: source futex user address 350 * @flags1: futex flags (FLAGS_SHARED, etc.) 351 * @uaddr2: target futex user address 352 * @flags2: futex flags (FLAGS_SHARED, etc.) 353 * @nr_wake: number of waiters to wake (must be 1 for requeue_pi) 354 * @nr_requeue: number of waiters to requeue (0-INT_MAX) 355 * @cmpval: @uaddr1 expected value (or %NULL) 356 * @requeue_pi: if we are attempting to requeue from a non-pi futex to a 357 * pi futex (pi to pi requeue is not supported) 358 * 359 * Requeue waiters on uaddr1 to uaddr2. In the requeue_pi case, try to acquire 360 * uaddr2 atomically on behalf of the top waiter. 361 * 362 * Return: 363 * - >=0 - on success, the number of tasks requeued or woken; 364 * - <0 - on error 365 */ 366 int futex_requeue(u32 __user *uaddr1, unsigned int flags1, 367 u32 __user *uaddr2, unsigned int flags2, 368 int nr_wake, int nr_requeue, u32 *cmpval, int requeue_pi) 369 { 370 union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT; 371 int task_count = 0, ret; 372 struct futex_pi_state *pi_state = NULL; 373 struct futex_hash_bucket *hb1, *hb2; 374 struct futex_q *this, *next; 375 DEFINE_WAKE_Q(wake_q); 376 377 if (nr_wake < 0 || nr_requeue < 0) 378 return -EINVAL; 379 380 /* 381 * When PI not supported: return -ENOSYS if requeue_pi is true, 382 * consequently the compiler knows requeue_pi is always false past 383 * this point which will optimize away all the conditional code 384 * further down. 385 */ 386 if (!IS_ENABLED(CONFIG_FUTEX_PI) && requeue_pi) 387 return -ENOSYS; 388 389 if (requeue_pi) { 390 /* 391 * Requeue PI only works on two distinct uaddrs. This 392 * check is only valid for private futexes. See below. 393 */ 394 if (uaddr1 == uaddr2) 395 return -EINVAL; 396 397 /* 398 * futex_requeue() allows the caller to define the number 399 * of waiters to wake up via the @nr_wake argument. With 400 * REQUEUE_PI, waking up more than one waiter is creating 401 * more problems than it solves. Waking up a waiter makes 402 * only sense if the PI futex @uaddr2 is uncontended as 403 * this allows the requeue code to acquire the futex 404 * @uaddr2 before waking the waiter. The waiter can then 405 * return to user space without further action. A secondary 406 * wakeup would just make the futex_wait_requeue_pi() 407 * handling more complex, because that code would have to 408 * look up pi_state and do more or less all the handling 409 * which the requeue code has to do for the to be requeued 410 * waiters. So restrict the number of waiters to wake to 411 * one, and only wake it up when the PI futex is 412 * uncontended. Otherwise requeue it and let the unlock of 413 * the PI futex handle the wakeup. 414 * 415 * All REQUEUE_PI users, e.g. pthread_cond_signal() and 416 * pthread_cond_broadcast() must use nr_wake=1. 417 */ 418 if (nr_wake != 1) 419 return -EINVAL; 420 421 /* 422 * requeue_pi requires a pi_state, try to allocate it now 423 * without any locks in case it fails. 424 */ 425 if (refill_pi_state_cache()) 426 return -ENOMEM; 427 } 428 429 retry: 430 ret = get_futex_key(uaddr1, flags1, &key1, FUTEX_READ); 431 if (unlikely(ret != 0)) 432 return ret; 433 ret = get_futex_key(uaddr2, flags2, &key2, 434 requeue_pi ? FUTEX_WRITE : FUTEX_READ); 435 if (unlikely(ret != 0)) 436 return ret; 437 438 /* 439 * The check above which compares uaddrs is not sufficient for 440 * shared futexes. We need to compare the keys: 441 */ 442 if (requeue_pi && futex_match(&key1, &key2)) 443 return -EINVAL; 444 445 hb1 = futex_hash(&key1); 446 hb2 = futex_hash(&key2); 447 448 retry_private: 449 futex_hb_waiters_inc(hb2); 450 double_lock_hb(hb1, hb2); 451 452 if (likely(cmpval != NULL)) { 453 u32 curval; 454 455 ret = futex_get_value_locked(&curval, uaddr1); 456 457 if (unlikely(ret)) { 458 double_unlock_hb(hb1, hb2); 459 futex_hb_waiters_dec(hb2); 460 461 ret = get_user(curval, uaddr1); 462 if (ret) 463 return ret; 464 465 if (!(flags1 & FLAGS_SHARED)) 466 goto retry_private; 467 468 goto retry; 469 } 470 if (curval != *cmpval) { 471 ret = -EAGAIN; 472 goto out_unlock; 473 } 474 } 475 476 if (requeue_pi) { 477 struct task_struct *exiting = NULL; 478 479 /* 480 * Attempt to acquire uaddr2 and wake the top waiter. If we 481 * intend to requeue waiters, force setting the FUTEX_WAITERS 482 * bit. We force this here where we are able to easily handle 483 * faults rather in the requeue loop below. 484 * 485 * Updates topwaiter::requeue_state if a top waiter exists. 486 */ 487 ret = futex_proxy_trylock_atomic(uaddr2, hb1, hb2, &key1, 488 &key2, &pi_state, 489 &exiting, nr_requeue); 490 491 /* 492 * At this point the top_waiter has either taken uaddr2 or 493 * is waiting on it. In both cases pi_state has been 494 * established and an initial refcount on it. In case of an 495 * error there's nothing. 496 * 497 * The top waiter's requeue_state is up to date: 498 * 499 * - If the lock was acquired atomically (ret == 1), then 500 * the state is Q_REQUEUE_PI_LOCKED. 501 * 502 * The top waiter has been dequeued and woken up and can 503 * return to user space immediately. The kernel/user 504 * space state is consistent. In case that there must be 505 * more waiters requeued the WAITERS bit in the user 506 * space futex is set so the top waiter task has to go 507 * into the syscall slowpath to unlock the futex. This 508 * will block until this requeue operation has been 509 * completed and the hash bucket locks have been 510 * dropped. 511 * 512 * - If the trylock failed with an error (ret < 0) then 513 * the state is either Q_REQUEUE_PI_NONE, i.e. "nothing 514 * happened", or Q_REQUEUE_PI_IGNORE when there was an 515 * interleaved early wakeup. 516 * 517 * - If the trylock did not succeed (ret == 0) then the 518 * state is either Q_REQUEUE_PI_IN_PROGRESS or 519 * Q_REQUEUE_PI_WAIT if an early wakeup interleaved. 520 * This will be cleaned up in the loop below, which 521 * cannot fail because futex_proxy_trylock_atomic() did 522 * the same sanity checks for requeue_pi as the loop 523 * below does. 524 */ 525 switch (ret) { 526 case 0: 527 /* We hold a reference on the pi state. */ 528 break; 529 530 case 1: 531 /* 532 * futex_proxy_trylock_atomic() acquired the user space 533 * futex. Adjust task_count. 534 */ 535 task_count++; 536 ret = 0; 537 break; 538 539 /* 540 * If the above failed, then pi_state is NULL and 541 * waiter::requeue_state is correct. 542 */ 543 case -EFAULT: 544 double_unlock_hb(hb1, hb2); 545 futex_hb_waiters_dec(hb2); 546 ret = fault_in_user_writeable(uaddr2); 547 if (!ret) 548 goto retry; 549 return ret; 550 case -EBUSY: 551 case -EAGAIN: 552 /* 553 * Two reasons for this: 554 * - EBUSY: Owner is exiting and we just wait for the 555 * exit to complete. 556 * - EAGAIN: The user space value changed. 557 */ 558 double_unlock_hb(hb1, hb2); 559 futex_hb_waiters_dec(hb2); 560 /* 561 * Handle the case where the owner is in the middle of 562 * exiting. Wait for the exit to complete otherwise 563 * this task might loop forever, aka. live lock. 564 */ 565 wait_for_owner_exiting(ret, exiting); 566 cond_resched(); 567 goto retry; 568 default: 569 goto out_unlock; 570 } 571 } 572 573 plist_for_each_entry_safe(this, next, &hb1->chain, list) { 574 if (task_count - nr_wake >= nr_requeue) 575 break; 576 577 if (!futex_match(&this->key, &key1)) 578 continue; 579 580 /* 581 * FUTEX_WAIT_REQUEUE_PI and FUTEX_CMP_REQUEUE_PI should always 582 * be paired with each other and no other futex ops. 583 * 584 * We should never be requeueing a futex_q with a pi_state, 585 * which is awaiting a futex_unlock_pi(). 586 */ 587 if ((requeue_pi && !this->rt_waiter) || 588 (!requeue_pi && this->rt_waiter) || 589 this->pi_state) { 590 ret = -EINVAL; 591 break; 592 } 593 594 /* Plain futexes just wake or requeue and are done */ 595 if (!requeue_pi) { 596 if (++task_count <= nr_wake) 597 this->wake(&wake_q, this); 598 else 599 requeue_futex(this, hb1, hb2, &key2); 600 continue; 601 } 602 603 /* Ensure we requeue to the expected futex for requeue_pi. */ 604 if (!futex_match(this->requeue_pi_key, &key2)) { 605 ret = -EINVAL; 606 break; 607 } 608 609 /* 610 * Requeue nr_requeue waiters and possibly one more in the case 611 * of requeue_pi if we couldn't acquire the lock atomically. 612 * 613 * Prepare the waiter to take the rt_mutex. Take a refcount 614 * on the pi_state and store the pointer in the futex_q 615 * object of the waiter. 616 */ 617 get_pi_state(pi_state); 618 619 /* Don't requeue when the waiter is already on the way out. */ 620 if (!futex_requeue_pi_prepare(this, pi_state)) { 621 /* 622 * Early woken waiter signaled that it is on the 623 * way out. Drop the pi_state reference and try the 624 * next waiter. @this->pi_state is still NULL. 625 */ 626 put_pi_state(pi_state); 627 continue; 628 } 629 630 ret = rt_mutex_start_proxy_lock(&pi_state->pi_mutex, 631 this->rt_waiter, 632 this->task); 633 634 if (ret == 1) { 635 /* 636 * We got the lock. We do neither drop the refcount 637 * on pi_state nor clear this->pi_state because the 638 * waiter needs the pi_state for cleaning up the 639 * user space value. It will drop the refcount 640 * after doing so. this::requeue_state is updated 641 * in the wakeup as well. 642 */ 643 requeue_pi_wake_futex(this, &key2, hb2); 644 task_count++; 645 } else if (!ret) { 646 /* Waiter is queued, move it to hb2 */ 647 requeue_futex(this, hb1, hb2, &key2); 648 futex_requeue_pi_complete(this, 0); 649 task_count++; 650 } else { 651 /* 652 * rt_mutex_start_proxy_lock() detected a potential 653 * deadlock when we tried to queue that waiter. 654 * Drop the pi_state reference which we took above 655 * and remove the pointer to the state from the 656 * waiters futex_q object. 657 */ 658 this->pi_state = NULL; 659 put_pi_state(pi_state); 660 futex_requeue_pi_complete(this, ret); 661 /* 662 * We stop queueing more waiters and let user space 663 * deal with the mess. 664 */ 665 break; 666 } 667 } 668 669 /* 670 * We took an extra initial reference to the pi_state in 671 * futex_proxy_trylock_atomic(). We need to drop it here again. 672 */ 673 put_pi_state(pi_state); 674 675 out_unlock: 676 double_unlock_hb(hb1, hb2); 677 wake_up_q(&wake_q); 678 futex_hb_waiters_dec(hb2); 679 return ret ? ret : task_count; 680 } 681 682 /** 683 * handle_early_requeue_pi_wakeup() - Handle early wakeup on the initial futex 684 * @hb: the hash_bucket futex_q was original enqueued on 685 * @q: the futex_q woken while waiting to be requeued 686 * @timeout: the timeout associated with the wait (NULL if none) 687 * 688 * Determine the cause for the early wakeup. 689 * 690 * Return: 691 * -EWOULDBLOCK or -ETIMEDOUT or -ERESTARTNOINTR 692 */ 693 static inline 694 int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb, 695 struct futex_q *q, 696 struct hrtimer_sleeper *timeout) 697 { 698 int ret; 699 700 /* 701 * With the hb lock held, we avoid races while we process the wakeup. 702 * We only need to hold hb (and not hb2) to ensure atomicity as the 703 * wakeup code can't change q.key from uaddr to uaddr2 if we hold hb. 704 * It can't be requeued from uaddr2 to something else since we don't 705 * support a PI aware source futex for requeue. 706 */ 707 WARN_ON_ONCE(&hb->lock != q->lock_ptr); 708 709 /* 710 * We were woken prior to requeue by a timeout or a signal. 711 * Unqueue the futex_q and determine which it was. 712 */ 713 plist_del(&q->list, &hb->chain); 714 futex_hb_waiters_dec(hb); 715 716 /* Handle spurious wakeups gracefully */ 717 ret = -EWOULDBLOCK; 718 if (timeout && !timeout->task) 719 ret = -ETIMEDOUT; 720 else if (signal_pending(current)) 721 ret = -ERESTARTNOINTR; 722 return ret; 723 } 724 725 /** 726 * futex_wait_requeue_pi() - Wait on uaddr and take uaddr2 727 * @uaddr: the futex we initially wait on (non-pi) 728 * @flags: futex flags (FLAGS_SHARED, FLAGS_CLOCKRT, etc.), they must be 729 * the same type, no requeueing from private to shared, etc. 730 * @val: the expected value of uaddr 731 * @abs_time: absolute timeout 732 * @bitset: 32 bit wakeup bitset set by userspace, defaults to all 733 * @uaddr2: the pi futex we will take prior to returning to user-space 734 * 735 * The caller will wait on uaddr and will be requeued by futex_requeue() to 736 * uaddr2 which must be PI aware and unique from uaddr. Normal wakeup will wake 737 * on uaddr2 and complete the acquisition of the rt_mutex prior to returning to 738 * userspace. This ensures the rt_mutex maintains an owner when it has waiters; 739 * without one, the pi logic would not know which task to boost/deboost, if 740 * there was a need to. 741 * 742 * We call schedule in futex_wait_queue() when we enqueue and return there 743 * via the following-- 744 * 1) wakeup on uaddr2 after an atomic lock acquisition by futex_requeue() 745 * 2) wakeup on uaddr2 after a requeue 746 * 3) signal 747 * 4) timeout 748 * 749 * If 3, cleanup and return -ERESTARTNOINTR. 750 * 751 * If 2, we may then block on trying to take the rt_mutex and return via: 752 * 5) successful lock 753 * 6) signal 754 * 7) timeout 755 * 8) other lock acquisition failure 756 * 757 * If 6, return -EWOULDBLOCK (restarting the syscall would do the same). 758 * 759 * If 4 or 7, we cleanup and return with -ETIMEDOUT. 760 * 761 * Return: 762 * - 0 - On success; 763 * - <0 - On error 764 */ 765 int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, 766 u32 val, ktime_t *abs_time, u32 bitset, 767 u32 __user *uaddr2) 768 { 769 struct hrtimer_sleeper timeout, *to; 770 struct rt_mutex_waiter rt_waiter; 771 struct futex_hash_bucket *hb; 772 union futex_key key2 = FUTEX_KEY_INIT; 773 struct futex_q q = futex_q_init; 774 struct rt_mutex_base *pi_mutex; 775 int res, ret; 776 777 if (!IS_ENABLED(CONFIG_FUTEX_PI)) 778 return -ENOSYS; 779 780 if (uaddr == uaddr2) 781 return -EINVAL; 782 783 if (!bitset) 784 return -EINVAL; 785 786 to = futex_setup_timer(abs_time, &timeout, flags, 787 current->timer_slack_ns); 788 789 /* 790 * The waiter is allocated on our stack, manipulated by the requeue 791 * code while we sleep on uaddr. 792 */ 793 rt_mutex_init_waiter(&rt_waiter); 794 795 ret = get_futex_key(uaddr2, flags, &key2, FUTEX_WRITE); 796 if (unlikely(ret != 0)) 797 goto out; 798 799 q.bitset = bitset; 800 q.rt_waiter = &rt_waiter; 801 q.requeue_pi_key = &key2; 802 803 /* 804 * Prepare to wait on uaddr. On success, it holds hb->lock and q 805 * is initialized. 806 */ 807 ret = futex_wait_setup(uaddr, val, flags, &q, &hb); 808 if (ret) 809 goto out; 810 811 /* 812 * The check above which compares uaddrs is not sufficient for 813 * shared futexes. We need to compare the keys: 814 */ 815 if (futex_match(&q.key, &key2)) { 816 futex_q_unlock(hb); 817 ret = -EINVAL; 818 goto out; 819 } 820 821 /* Queue the futex_q, drop the hb lock, wait for wakeup. */ 822 futex_wait_queue(hb, &q, to); 823 824 switch (futex_requeue_pi_wakeup_sync(&q)) { 825 case Q_REQUEUE_PI_IGNORE: 826 /* The waiter is still on uaddr1 */ 827 spin_lock(&hb->lock); 828 ret = handle_early_requeue_pi_wakeup(hb, &q, to); 829 spin_unlock(&hb->lock); 830 break; 831 832 case Q_REQUEUE_PI_LOCKED: 833 /* The requeue acquired the lock */ 834 if (q.pi_state && (q.pi_state->owner != current)) { 835 spin_lock(q.lock_ptr); 836 ret = fixup_pi_owner(uaddr2, &q, true); 837 /* 838 * Drop the reference to the pi state which the 839 * requeue_pi() code acquired for us. 840 */ 841 put_pi_state(q.pi_state); 842 spin_unlock(q.lock_ptr); 843 /* 844 * Adjust the return value. It's either -EFAULT or 845 * success (1) but the caller expects 0 for success. 846 */ 847 ret = ret < 0 ? ret : 0; 848 } 849 break; 850 851 case Q_REQUEUE_PI_DONE: 852 /* Requeue completed. Current is 'pi_blocked_on' the rtmutex */ 853 pi_mutex = &q.pi_state->pi_mutex; 854 ret = rt_mutex_wait_proxy_lock(pi_mutex, to, &rt_waiter); 855 856 /* 857 * See futex_unlock_pi()'s cleanup: comment. 858 */ 859 if (ret && !rt_mutex_cleanup_proxy_lock(pi_mutex, &rt_waiter)) 860 ret = 0; 861 862 spin_lock(q.lock_ptr); 863 debug_rt_mutex_free_waiter(&rt_waiter); 864 /* 865 * Fixup the pi_state owner and possibly acquire the lock if we 866 * haven't already. 867 */ 868 res = fixup_pi_owner(uaddr2, &q, !ret); 869 /* 870 * If fixup_pi_owner() returned an error, propagate that. If it 871 * acquired the lock, clear -ETIMEDOUT or -EINTR. 872 */ 873 if (res) 874 ret = (res < 0) ? res : 0; 875 876 futex_unqueue_pi(&q); 877 spin_unlock(q.lock_ptr); 878 879 if (ret == -EINTR) { 880 /* 881 * We've already been requeued, but cannot restart 882 * by calling futex_lock_pi() directly. We could 883 * restart this syscall, but it would detect that 884 * the user space "val" changed and return 885 * -EWOULDBLOCK. Save the overhead of the restart 886 * and return -EWOULDBLOCK directly. 887 */ 888 ret = -EWOULDBLOCK; 889 } 890 break; 891 default: 892 BUG(); 893 } 894 895 out: 896 if (to) { 897 hrtimer_cancel(&to->timer); 898 destroy_hrtimer_on_stack(&to->timer); 899 } 900 return ret; 901 } 902 903