1 /* 2 * kernel/locking/mutex.c 3 * 4 * Mutexes: blocking mutual exclusion locks 5 * 6 * Started by Ingo Molnar: 7 * 8 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 9 * 10 * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and 11 * David Howells for suggestions and improvements. 12 * 13 * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline 14 * from the -rt tree, where it was originally implemented for rtmutexes 15 * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale 16 * and Sven Dietrich. 17 * 18 * Also see Documentation/locking/mutex-design.txt. 19 */ 20 #include <linux/mutex.h> 21 #include <linux/ww_mutex.h> 22 #include <linux/sched.h> 23 #include <linux/sched/rt.h> 24 #include <linux/export.h> 25 #include <linux/spinlock.h> 26 #include <linux/interrupt.h> 27 #include <linux/debug_locks.h> 28 #include "mcs_spinlock.h" 29 30 /* 31 * In the DEBUG case we are using the "NULL fastpath" for mutexes, 32 * which forces all calls into the slowpath: 33 */ 34 #ifdef CONFIG_DEBUG_MUTEXES 35 # include "mutex-debug.h" 36 # include <asm-generic/mutex-null.h> 37 /* 38 * Must be 0 for the debug case so we do not do the unlock outside of the 39 * wait_lock region. debug_mutex_unlock() will do the actual unlock in this 40 * case. 41 */ 42 # undef __mutex_slowpath_needs_to_unlock 43 # define __mutex_slowpath_needs_to_unlock() 0 44 #else 45 # include "mutex.h" 46 # include <asm/mutex.h> 47 #endif 48 49 void 50 __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) 51 { 52 atomic_set(&lock->count, 1); 53 spin_lock_init(&lock->wait_lock); 54 INIT_LIST_HEAD(&lock->wait_list); 55 mutex_clear_owner(lock); 56 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER 57 osq_lock_init(&lock->osq); 58 #endif 59 60 debug_mutex_init(lock, name, key); 61 } 62 63 EXPORT_SYMBOL(__mutex_init); 64 65 #ifndef CONFIG_DEBUG_LOCK_ALLOC 66 /* 67 * We split the mutex lock/unlock logic into separate fastpath and 68 * slowpath functions, to reduce the register pressure on the fastpath. 69 * We also put the fastpath first in the kernel image, to make sure the 70 * branch is predicted by the CPU as default-untaken. 71 */ 72 __visible void __sched __mutex_lock_slowpath(atomic_t *lock_count); 73 74 /** 75 * mutex_lock - acquire the mutex 76 * @lock: the mutex to be acquired 77 * 78 * Lock the mutex exclusively for this task. If the mutex is not 79 * available right now, it will sleep until it can get it. 80 * 81 * The mutex must later on be released by the same task that 82 * acquired it. Recursive locking is not allowed. The task 83 * may not exit without first unlocking the mutex. Also, kernel 84 * memory where the mutex resides must not be freed with 85 * the mutex still locked. The mutex must first be initialized 86 * (or statically defined) before it can be locked. memset()-ing 87 * the mutex to 0 is not allowed. 88 * 89 * ( The CONFIG_DEBUG_MUTEXES .config option turns on debugging 90 * checks that will enforce the restrictions and will also do 91 * deadlock debugging. ) 92 * 93 * This function is similar to (but not equivalent to) down(). 94 */ 95 void __sched mutex_lock(struct mutex *lock) 96 { 97 might_sleep(); 98 /* 99 * The locking fastpath is the 1->0 transition from 100 * 'unlocked' into 'locked' state. 101 */ 102 __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath); 103 mutex_set_owner(lock); 104 } 105 106 EXPORT_SYMBOL(mutex_lock); 107 #endif 108 109 static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww, 110 struct ww_acquire_ctx *ww_ctx) 111 { 112 #ifdef CONFIG_DEBUG_MUTEXES 113 /* 114 * If this WARN_ON triggers, you used ww_mutex_lock to acquire, 115 * but released with a normal mutex_unlock in this call. 116 * 117 * This should never happen, always use ww_mutex_unlock. 118 */ 119 DEBUG_LOCKS_WARN_ON(ww->ctx); 120 121 /* 122 * Not quite done after calling ww_acquire_done() ? 123 */ 124 DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire); 125 126 if (ww_ctx->contending_lock) { 127 /* 128 * After -EDEADLK you tried to 129 * acquire a different ww_mutex? Bad! 130 */ 131 DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww); 132 133 /* 134 * You called ww_mutex_lock after receiving -EDEADLK, 135 * but 'forgot' to unlock everything else first? 136 */ 137 DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0); 138 ww_ctx->contending_lock = NULL; 139 } 140 141 /* 142 * Naughty, using a different class will lead to undefined behavior! 143 */ 144 DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class); 145 #endif 146 ww_ctx->acquired++; 147 } 148 149 /* 150 * After acquiring lock with fastpath or when we lost out in contested 151 * slowpath, set ctx and wake up any waiters so they can recheck. 152 * 153 * This function is never called when CONFIG_DEBUG_LOCK_ALLOC is set, 154 * as the fastpath and opportunistic spinning are disabled in that case. 155 */ 156 static __always_inline void 157 ww_mutex_set_context_fastpath(struct ww_mutex *lock, 158 struct ww_acquire_ctx *ctx) 159 { 160 unsigned long flags; 161 struct mutex_waiter *cur; 162 163 ww_mutex_lock_acquired(lock, ctx); 164 165 lock->ctx = ctx; 166 167 /* 168 * The lock->ctx update should be visible on all cores before 169 * the atomic read is done, otherwise contended waiters might be 170 * missed. The contended waiters will either see ww_ctx == NULL 171 * and keep spinning, or it will acquire wait_lock, add itself 172 * to waiter list and sleep. 173 */ 174 smp_mb(); /* ^^^ */ 175 176 /* 177 * Check if lock is contended, if not there is nobody to wake up 178 */ 179 if (likely(atomic_read(&lock->base.count) == 0)) 180 return; 181 182 /* 183 * Uh oh, we raced in fastpath, wake up everyone in this case, 184 * so they can see the new lock->ctx. 185 */ 186 spin_lock_mutex(&lock->base.wait_lock, flags); 187 list_for_each_entry(cur, &lock->base.wait_list, list) { 188 debug_mutex_wake_waiter(&lock->base, cur); 189 wake_up_process(cur->task); 190 } 191 spin_unlock_mutex(&lock->base.wait_lock, flags); 192 } 193 194 /* 195 * After acquiring lock in the slowpath set ctx and wake up any 196 * waiters so they can recheck. 197 * 198 * Callers must hold the mutex wait_lock. 199 */ 200 static __always_inline void 201 ww_mutex_set_context_slowpath(struct ww_mutex *lock, 202 struct ww_acquire_ctx *ctx) 203 { 204 struct mutex_waiter *cur; 205 206 ww_mutex_lock_acquired(lock, ctx); 207 lock->ctx = ctx; 208 209 /* 210 * Give any possible sleeping processes the chance to wake up, 211 * so they can recheck if they have to back off. 212 */ 213 list_for_each_entry(cur, &lock->base.wait_list, list) { 214 debug_mutex_wake_waiter(&lock->base, cur); 215 wake_up_process(cur->task); 216 } 217 } 218 219 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER 220 static inline bool owner_running(struct mutex *lock, struct task_struct *owner) 221 { 222 if (lock->owner != owner) 223 return false; 224 225 /* 226 * Ensure we emit the owner->on_cpu, dereference _after_ checking 227 * lock->owner still matches owner, if that fails, owner might 228 * point to free()d memory, if it still matches, the rcu_read_lock() 229 * ensures the memory stays valid. 230 */ 231 barrier(); 232 233 return owner->on_cpu; 234 } 235 236 /* 237 * Look out! "owner" is an entirely speculative pointer 238 * access and not reliable. 239 */ 240 static noinline 241 int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner) 242 { 243 rcu_read_lock(); 244 while (owner_running(lock, owner)) { 245 if (need_resched()) 246 break; 247 248 cpu_relax_lowlatency(); 249 } 250 rcu_read_unlock(); 251 252 /* 253 * We break out the loop above on need_resched() and when the 254 * owner changed, which is a sign for heavy contention. Return 255 * success only when lock->owner is NULL. 256 */ 257 return lock->owner == NULL; 258 } 259 260 /* 261 * Initial check for entering the mutex spinning loop 262 */ 263 static inline int mutex_can_spin_on_owner(struct mutex *lock) 264 { 265 struct task_struct *owner; 266 int retval = 1; 267 268 if (need_resched()) 269 return 0; 270 271 rcu_read_lock(); 272 owner = ACCESS_ONCE(lock->owner); 273 if (owner) 274 retval = owner->on_cpu; 275 rcu_read_unlock(); 276 /* 277 * if lock->owner is not set, the mutex owner may have just acquired 278 * it and not set the owner yet or the mutex has been released. 279 */ 280 return retval; 281 } 282 283 /* 284 * Atomically try to take the lock when it is available 285 */ 286 static inline bool mutex_try_to_acquire(struct mutex *lock) 287 { 288 return !mutex_is_locked(lock) && 289 (atomic_cmpxchg(&lock->count, 1, 0) == 1); 290 } 291 292 /* 293 * Optimistic spinning. 294 * 295 * We try to spin for acquisition when we find that the lock owner 296 * is currently running on a (different) CPU and while we don't 297 * need to reschedule. The rationale is that if the lock owner is 298 * running, it is likely to release the lock soon. 299 * 300 * Since this needs the lock owner, and this mutex implementation 301 * doesn't track the owner atomically in the lock field, we need to 302 * track it non-atomically. 303 * 304 * We can't do this for DEBUG_MUTEXES because that relies on wait_lock 305 * to serialize everything. 306 * 307 * The mutex spinners are queued up using MCS lock so that only one 308 * spinner can compete for the mutex. However, if mutex spinning isn't 309 * going to happen, there is no point in going through the lock/unlock 310 * overhead. 311 * 312 * Returns true when the lock was taken, otherwise false, indicating 313 * that we need to jump to the slowpath and sleep. 314 */ 315 static bool mutex_optimistic_spin(struct mutex *lock, 316 struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx) 317 { 318 struct task_struct *task = current; 319 320 if (!mutex_can_spin_on_owner(lock)) 321 goto done; 322 323 /* 324 * In order to avoid a stampede of mutex spinners trying to 325 * acquire the mutex all at once, the spinners need to take a 326 * MCS (queued) lock first before spinning on the owner field. 327 */ 328 if (!osq_lock(&lock->osq)) 329 goto done; 330 331 while (true) { 332 struct task_struct *owner; 333 334 if (use_ww_ctx && ww_ctx->acquired > 0) { 335 struct ww_mutex *ww; 336 337 ww = container_of(lock, struct ww_mutex, base); 338 /* 339 * If ww->ctx is set the contents are undefined, only 340 * by acquiring wait_lock there is a guarantee that 341 * they are not invalid when reading. 342 * 343 * As such, when deadlock detection needs to be 344 * performed the optimistic spinning cannot be done. 345 */ 346 if (ACCESS_ONCE(ww->ctx)) 347 break; 348 } 349 350 /* 351 * If there's an owner, wait for it to either 352 * release the lock or go to sleep. 353 */ 354 owner = ACCESS_ONCE(lock->owner); 355 if (owner && !mutex_spin_on_owner(lock, owner)) 356 break; 357 358 /* Try to acquire the mutex if it is unlocked. */ 359 if (mutex_try_to_acquire(lock)) { 360 lock_acquired(&lock->dep_map, ip); 361 362 if (use_ww_ctx) { 363 struct ww_mutex *ww; 364 ww = container_of(lock, struct ww_mutex, base); 365 366 ww_mutex_set_context_fastpath(ww, ww_ctx); 367 } 368 369 mutex_set_owner(lock); 370 osq_unlock(&lock->osq); 371 return true; 372 } 373 374 /* 375 * When there's no owner, we might have preempted between the 376 * owner acquiring the lock and setting the owner field. If 377 * we're an RT task that will live-lock because we won't let 378 * the owner complete. 379 */ 380 if (!owner && (need_resched() || rt_task(task))) 381 break; 382 383 /* 384 * The cpu_relax() call is a compiler barrier which forces 385 * everything in this loop to be re-loaded. We don't need 386 * memory barriers as we'll eventually observe the right 387 * values at the cost of a few extra spins. 388 */ 389 cpu_relax_lowlatency(); 390 } 391 392 osq_unlock(&lock->osq); 393 done: 394 /* 395 * If we fell out of the spin path because of need_resched(), 396 * reschedule now, before we try-lock the mutex. This avoids getting 397 * scheduled out right after we obtained the mutex. 398 */ 399 if (need_resched()) { 400 /* 401 * We _should_ have TASK_RUNNING here, but just in case 402 * we do not, make it so, otherwise we might get stuck. 403 */ 404 __set_current_state(TASK_RUNNING); 405 schedule_preempt_disabled(); 406 } 407 408 return false; 409 } 410 #else 411 static bool mutex_optimistic_spin(struct mutex *lock, 412 struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx) 413 { 414 return false; 415 } 416 #endif 417 418 __visible __used noinline 419 void __sched __mutex_unlock_slowpath(atomic_t *lock_count); 420 421 /** 422 * mutex_unlock - release the mutex 423 * @lock: the mutex to be released 424 * 425 * Unlock a mutex that has been locked by this task previously. 426 * 427 * This function must not be used in interrupt context. Unlocking 428 * of a not locked mutex is not allowed. 429 * 430 * This function is similar to (but not equivalent to) up(). 431 */ 432 void __sched mutex_unlock(struct mutex *lock) 433 { 434 /* 435 * The unlocking fastpath is the 0->1 transition from 'locked' 436 * into 'unlocked' state: 437 */ 438 #ifndef CONFIG_DEBUG_MUTEXES 439 /* 440 * When debugging is enabled we must not clear the owner before time, 441 * the slow path will always be taken, and that clears the owner field 442 * after verifying that it was indeed current. 443 */ 444 mutex_clear_owner(lock); 445 #endif 446 __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath); 447 } 448 449 EXPORT_SYMBOL(mutex_unlock); 450 451 /** 452 * ww_mutex_unlock - release the w/w mutex 453 * @lock: the mutex to be released 454 * 455 * Unlock a mutex that has been locked by this task previously with any of the 456 * ww_mutex_lock* functions (with or without an acquire context). It is 457 * forbidden to release the locks after releasing the acquire context. 458 * 459 * This function must not be used in interrupt context. Unlocking 460 * of a unlocked mutex is not allowed. 461 */ 462 void __sched ww_mutex_unlock(struct ww_mutex *lock) 463 { 464 /* 465 * The unlocking fastpath is the 0->1 transition from 'locked' 466 * into 'unlocked' state: 467 */ 468 if (lock->ctx) { 469 #ifdef CONFIG_DEBUG_MUTEXES 470 DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired); 471 #endif 472 if (lock->ctx->acquired > 0) 473 lock->ctx->acquired--; 474 lock->ctx = NULL; 475 } 476 477 #ifndef CONFIG_DEBUG_MUTEXES 478 /* 479 * When debugging is enabled we must not clear the owner before time, 480 * the slow path will always be taken, and that clears the owner field 481 * after verifying that it was indeed current. 482 */ 483 mutex_clear_owner(&lock->base); 484 #endif 485 __mutex_fastpath_unlock(&lock->base.count, __mutex_unlock_slowpath); 486 } 487 EXPORT_SYMBOL(ww_mutex_unlock); 488 489 static inline int __sched 490 __ww_mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx) 491 { 492 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); 493 struct ww_acquire_ctx *hold_ctx = ACCESS_ONCE(ww->ctx); 494 495 if (!hold_ctx) 496 return 0; 497 498 if (unlikely(ctx == hold_ctx)) 499 return -EALREADY; 500 501 if (ctx->stamp - hold_ctx->stamp <= LONG_MAX && 502 (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) { 503 #ifdef CONFIG_DEBUG_MUTEXES 504 DEBUG_LOCKS_WARN_ON(ctx->contending_lock); 505 ctx->contending_lock = ww; 506 #endif 507 return -EDEADLK; 508 } 509 510 return 0; 511 } 512 513 /* 514 * Lock a mutex (possibly interruptible), slowpath: 515 */ 516 static __always_inline int __sched 517 __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, 518 struct lockdep_map *nest_lock, unsigned long ip, 519 struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx) 520 { 521 struct task_struct *task = current; 522 struct mutex_waiter waiter; 523 unsigned long flags; 524 int ret; 525 526 preempt_disable(); 527 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip); 528 529 if (mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx)) { 530 /* got the lock, yay! */ 531 preempt_enable(); 532 return 0; 533 } 534 535 spin_lock_mutex(&lock->wait_lock, flags); 536 537 /* 538 * Once more, try to acquire the lock. Only try-lock the mutex if 539 * it is unlocked to reduce unnecessary xchg() operations. 540 */ 541 if (!mutex_is_locked(lock) && (atomic_xchg(&lock->count, 0) == 1)) 542 goto skip_wait; 543 544 debug_mutex_lock_common(lock, &waiter); 545 debug_mutex_add_waiter(lock, &waiter, task_thread_info(task)); 546 547 /* add waiting tasks to the end of the waitqueue (FIFO): */ 548 list_add_tail(&waiter.list, &lock->wait_list); 549 waiter.task = task; 550 551 lock_contended(&lock->dep_map, ip); 552 553 for (;;) { 554 /* 555 * Lets try to take the lock again - this is needed even if 556 * we get here for the first time (shortly after failing to 557 * acquire the lock), to make sure that we get a wakeup once 558 * it's unlocked. Later on, if we sleep, this is the 559 * operation that gives us the lock. We xchg it to -1, so 560 * that when we release the lock, we properly wake up the 561 * other waiters. We only attempt the xchg if the count is 562 * non-negative in order to avoid unnecessary xchg operations: 563 */ 564 if (atomic_read(&lock->count) >= 0 && 565 (atomic_xchg(&lock->count, -1) == 1)) 566 break; 567 568 /* 569 * got a signal? (This code gets eliminated in the 570 * TASK_UNINTERRUPTIBLE case.) 571 */ 572 if (unlikely(signal_pending_state(state, task))) { 573 ret = -EINTR; 574 goto err; 575 } 576 577 if (use_ww_ctx && ww_ctx->acquired > 0) { 578 ret = __ww_mutex_lock_check_stamp(lock, ww_ctx); 579 if (ret) 580 goto err; 581 } 582 583 __set_task_state(task, state); 584 585 /* didn't get the lock, go to sleep: */ 586 spin_unlock_mutex(&lock->wait_lock, flags); 587 schedule_preempt_disabled(); 588 spin_lock_mutex(&lock->wait_lock, flags); 589 } 590 __set_task_state(task, TASK_RUNNING); 591 592 mutex_remove_waiter(lock, &waiter, current_thread_info()); 593 /* set it to 0 if there are no waiters left: */ 594 if (likely(list_empty(&lock->wait_list))) 595 atomic_set(&lock->count, 0); 596 debug_mutex_free_waiter(&waiter); 597 598 skip_wait: 599 /* got the lock - cleanup and rejoice! */ 600 lock_acquired(&lock->dep_map, ip); 601 mutex_set_owner(lock); 602 603 if (use_ww_ctx) { 604 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); 605 ww_mutex_set_context_slowpath(ww, ww_ctx); 606 } 607 608 spin_unlock_mutex(&lock->wait_lock, flags); 609 preempt_enable(); 610 return 0; 611 612 err: 613 mutex_remove_waiter(lock, &waiter, task_thread_info(task)); 614 spin_unlock_mutex(&lock->wait_lock, flags); 615 debug_mutex_free_waiter(&waiter); 616 mutex_release(&lock->dep_map, 1, ip); 617 preempt_enable(); 618 return ret; 619 } 620 621 #ifdef CONFIG_DEBUG_LOCK_ALLOC 622 void __sched 623 mutex_lock_nested(struct mutex *lock, unsigned int subclass) 624 { 625 might_sleep(); 626 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 627 subclass, NULL, _RET_IP_, NULL, 0); 628 } 629 630 EXPORT_SYMBOL_GPL(mutex_lock_nested); 631 632 void __sched 633 _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest) 634 { 635 might_sleep(); 636 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 637 0, nest, _RET_IP_, NULL, 0); 638 } 639 640 EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock); 641 642 int __sched 643 mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass) 644 { 645 might_sleep(); 646 return __mutex_lock_common(lock, TASK_KILLABLE, 647 subclass, NULL, _RET_IP_, NULL, 0); 648 } 649 EXPORT_SYMBOL_GPL(mutex_lock_killable_nested); 650 651 int __sched 652 mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass) 653 { 654 might_sleep(); 655 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 656 subclass, NULL, _RET_IP_, NULL, 0); 657 } 658 659 EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested); 660 661 static inline int 662 ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 663 { 664 #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH 665 unsigned tmp; 666 667 if (ctx->deadlock_inject_countdown-- == 0) { 668 tmp = ctx->deadlock_inject_interval; 669 if (tmp > UINT_MAX/4) 670 tmp = UINT_MAX; 671 else 672 tmp = tmp*2 + tmp + tmp/2; 673 674 ctx->deadlock_inject_interval = tmp; 675 ctx->deadlock_inject_countdown = tmp; 676 ctx->contending_lock = lock; 677 678 ww_mutex_unlock(lock); 679 680 return -EDEADLK; 681 } 682 #endif 683 684 return 0; 685 } 686 687 int __sched 688 __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 689 { 690 int ret; 691 692 might_sleep(); 693 ret = __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, 694 0, &ctx->dep_map, _RET_IP_, ctx, 1); 695 if (!ret && ctx->acquired > 1) 696 return ww_mutex_deadlock_injection(lock, ctx); 697 698 return ret; 699 } 700 EXPORT_SYMBOL_GPL(__ww_mutex_lock); 701 702 int __sched 703 __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 704 { 705 int ret; 706 707 might_sleep(); 708 ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, 709 0, &ctx->dep_map, _RET_IP_, ctx, 1); 710 711 if (!ret && ctx->acquired > 1) 712 return ww_mutex_deadlock_injection(lock, ctx); 713 714 return ret; 715 } 716 EXPORT_SYMBOL_GPL(__ww_mutex_lock_interruptible); 717 718 #endif 719 720 /* 721 * Release the lock, slowpath: 722 */ 723 static inline void 724 __mutex_unlock_common_slowpath(struct mutex *lock, int nested) 725 { 726 unsigned long flags; 727 728 /* 729 * As a performance measurement, release the lock before doing other 730 * wakeup related duties to follow. This allows other tasks to acquire 731 * the lock sooner, while still handling cleanups in past unlock calls. 732 * This can be done as we do not enforce strict equivalence between the 733 * mutex counter and wait_list. 734 * 735 * 736 * Some architectures leave the lock unlocked in the fastpath failure 737 * case, others need to leave it locked. In the later case we have to 738 * unlock it here - as the lock counter is currently 0 or negative. 739 */ 740 if (__mutex_slowpath_needs_to_unlock()) 741 atomic_set(&lock->count, 1); 742 743 spin_lock_mutex(&lock->wait_lock, flags); 744 mutex_release(&lock->dep_map, nested, _RET_IP_); 745 debug_mutex_unlock(lock); 746 747 if (!list_empty(&lock->wait_list)) { 748 /* get the first entry from the wait-list: */ 749 struct mutex_waiter *waiter = 750 list_entry(lock->wait_list.next, 751 struct mutex_waiter, list); 752 753 debug_mutex_wake_waiter(lock, waiter); 754 755 wake_up_process(waiter->task); 756 } 757 758 spin_unlock_mutex(&lock->wait_lock, flags); 759 } 760 761 /* 762 * Release the lock, slowpath: 763 */ 764 __visible void 765 __mutex_unlock_slowpath(atomic_t *lock_count) 766 { 767 struct mutex *lock = container_of(lock_count, struct mutex, count); 768 769 __mutex_unlock_common_slowpath(lock, 1); 770 } 771 772 #ifndef CONFIG_DEBUG_LOCK_ALLOC 773 /* 774 * Here come the less common (and hence less performance-critical) APIs: 775 * mutex_lock_interruptible() and mutex_trylock(). 776 */ 777 static noinline int __sched 778 __mutex_lock_killable_slowpath(struct mutex *lock); 779 780 static noinline int __sched 781 __mutex_lock_interruptible_slowpath(struct mutex *lock); 782 783 /** 784 * mutex_lock_interruptible - acquire the mutex, interruptible 785 * @lock: the mutex to be acquired 786 * 787 * Lock the mutex like mutex_lock(), and return 0 if the mutex has 788 * been acquired or sleep until the mutex becomes available. If a 789 * signal arrives while waiting for the lock then this function 790 * returns -EINTR. 791 * 792 * This function is similar to (but not equivalent to) down_interruptible(). 793 */ 794 int __sched mutex_lock_interruptible(struct mutex *lock) 795 { 796 int ret; 797 798 might_sleep(); 799 ret = __mutex_fastpath_lock_retval(&lock->count); 800 if (likely(!ret)) { 801 mutex_set_owner(lock); 802 return 0; 803 } else 804 return __mutex_lock_interruptible_slowpath(lock); 805 } 806 807 EXPORT_SYMBOL(mutex_lock_interruptible); 808 809 int __sched mutex_lock_killable(struct mutex *lock) 810 { 811 int ret; 812 813 might_sleep(); 814 ret = __mutex_fastpath_lock_retval(&lock->count); 815 if (likely(!ret)) { 816 mutex_set_owner(lock); 817 return 0; 818 } else 819 return __mutex_lock_killable_slowpath(lock); 820 } 821 EXPORT_SYMBOL(mutex_lock_killable); 822 823 __visible void __sched 824 __mutex_lock_slowpath(atomic_t *lock_count) 825 { 826 struct mutex *lock = container_of(lock_count, struct mutex, count); 827 828 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, 829 NULL, _RET_IP_, NULL, 0); 830 } 831 832 static noinline int __sched 833 __mutex_lock_killable_slowpath(struct mutex *lock) 834 { 835 return __mutex_lock_common(lock, TASK_KILLABLE, 0, 836 NULL, _RET_IP_, NULL, 0); 837 } 838 839 static noinline int __sched 840 __mutex_lock_interruptible_slowpath(struct mutex *lock) 841 { 842 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, 843 NULL, _RET_IP_, NULL, 0); 844 } 845 846 static noinline int __sched 847 __ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 848 { 849 return __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, 0, 850 NULL, _RET_IP_, ctx, 1); 851 } 852 853 static noinline int __sched 854 __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock, 855 struct ww_acquire_ctx *ctx) 856 { 857 return __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, 0, 858 NULL, _RET_IP_, ctx, 1); 859 } 860 861 #endif 862 863 /* 864 * Spinlock based trylock, we take the spinlock and check whether we 865 * can get the lock: 866 */ 867 static inline int __mutex_trylock_slowpath(atomic_t *lock_count) 868 { 869 struct mutex *lock = container_of(lock_count, struct mutex, count); 870 unsigned long flags; 871 int prev; 872 873 /* No need to trylock if the mutex is locked. */ 874 if (mutex_is_locked(lock)) 875 return 0; 876 877 spin_lock_mutex(&lock->wait_lock, flags); 878 879 prev = atomic_xchg(&lock->count, -1); 880 if (likely(prev == 1)) { 881 mutex_set_owner(lock); 882 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); 883 } 884 885 /* Set it back to 0 if there are no waiters: */ 886 if (likely(list_empty(&lock->wait_list))) 887 atomic_set(&lock->count, 0); 888 889 spin_unlock_mutex(&lock->wait_lock, flags); 890 891 return prev == 1; 892 } 893 894 /** 895 * mutex_trylock - try to acquire the mutex, without waiting 896 * @lock: the mutex to be acquired 897 * 898 * Try to acquire the mutex atomically. Returns 1 if the mutex 899 * has been acquired successfully, and 0 on contention. 900 * 901 * NOTE: this function follows the spin_trylock() convention, so 902 * it is negated from the down_trylock() return values! Be careful 903 * about this when converting semaphore users to mutexes. 904 * 905 * This function must not be used in interrupt context. The 906 * mutex must be released by the same task that acquired it. 907 */ 908 int __sched mutex_trylock(struct mutex *lock) 909 { 910 int ret; 911 912 ret = __mutex_fastpath_trylock(&lock->count, __mutex_trylock_slowpath); 913 if (ret) 914 mutex_set_owner(lock); 915 916 return ret; 917 } 918 EXPORT_SYMBOL(mutex_trylock); 919 920 #ifndef CONFIG_DEBUG_LOCK_ALLOC 921 int __sched 922 __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 923 { 924 int ret; 925 926 might_sleep(); 927 928 ret = __mutex_fastpath_lock_retval(&lock->base.count); 929 930 if (likely(!ret)) { 931 ww_mutex_set_context_fastpath(lock, ctx); 932 mutex_set_owner(&lock->base); 933 } else 934 ret = __ww_mutex_lock_slowpath(lock, ctx); 935 return ret; 936 } 937 EXPORT_SYMBOL(__ww_mutex_lock); 938 939 int __sched 940 __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 941 { 942 int ret; 943 944 might_sleep(); 945 946 ret = __mutex_fastpath_lock_retval(&lock->base.count); 947 948 if (likely(!ret)) { 949 ww_mutex_set_context_fastpath(lock, ctx); 950 mutex_set_owner(&lock->base); 951 } else 952 ret = __ww_mutex_lock_interruptible_slowpath(lock, ctx); 953 return ret; 954 } 955 EXPORT_SYMBOL(__ww_mutex_lock_interruptible); 956 957 #endif 958 959 /** 960 * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0 961 * @cnt: the atomic which we are to dec 962 * @lock: the mutex to return holding if we dec to 0 963 * 964 * return true and hold lock if we dec to 0, return false otherwise 965 */ 966 int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock) 967 { 968 /* dec if we can't possibly hit 0 */ 969 if (atomic_add_unless(cnt, -1, 1)) 970 return 0; 971 /* we might hit 0, so take the lock */ 972 mutex_lock(lock); 973 if (!atomic_dec_and_test(cnt)) { 974 /* when we actually did the dec, we didn't hit 0 */ 975 mutex_unlock(lock); 976 return 0; 977 } 978 /* we hit 0, and we hold the lock */ 979 return 1; 980 } 981 EXPORT_SYMBOL(atomic_dec_and_mutex_lock); 982