1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * kernel/locking/mutex.c 4 * 5 * Mutexes: blocking mutual exclusion locks 6 * 7 * Started by Ingo Molnar: 8 * 9 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 10 * 11 * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and 12 * David Howells for suggestions and improvements. 13 * 14 * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline 15 * from the -rt tree, where it was originally implemented for rtmutexes 16 * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale 17 * and Sven Dietrich. 18 * 19 * Also see Documentation/locking/mutex-design.rst. 20 */ 21 #include <linux/mutex.h> 22 #include <linux/ww_mutex.h> 23 #include <linux/sched/signal.h> 24 #include <linux/sched/rt.h> 25 #include <linux/sched/wake_q.h> 26 #include <linux/sched/debug.h> 27 #include <linux/export.h> 28 #include <linux/spinlock.h> 29 #include <linux/interrupt.h> 30 #include <linux/debug_locks.h> 31 #include <linux/osq_lock.h> 32 #include <linux/hung_task.h> 33 34 #define CREATE_TRACE_POINTS 35 #include <trace/events/lock.h> 36 37 #ifndef CONFIG_PREEMPT_RT 38 #include "mutex.h" 39 40 #ifdef CONFIG_DEBUG_MUTEXES 41 # define MUTEX_WARN_ON(cond) DEBUG_LOCKS_WARN_ON(cond) 42 #else 43 # define MUTEX_WARN_ON(cond) 44 #endif 45 46 static void __mutex_init_generic(struct mutex *lock) 47 { 48 atomic_long_set(&lock->owner, 0); 49 scoped_guard (raw_spinlock_init, &lock->wait_lock) { 50 lock->first_waiter = NULL; 51 } 52 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER 53 osq_lock_init(&lock->osq); 54 #endif 55 debug_mutex_init(lock); 56 } 57 58 static inline struct task_struct *__owner_task(unsigned long owner) 59 { 60 return (struct task_struct *)(owner & ~MUTEX_FLAGS); 61 } 62 63 bool mutex_is_locked(struct mutex *lock) 64 { 65 return __mutex_owner(lock) != NULL; 66 } 67 EXPORT_SYMBOL(mutex_is_locked); 68 69 static inline unsigned long __owner_flags(unsigned long owner) 70 { 71 return owner & MUTEX_FLAGS; 72 } 73 74 /* Do not use the return value as a pointer directly. */ 75 unsigned long mutex_get_owner(struct mutex *lock) 76 { 77 unsigned long owner = atomic_long_read(&lock->owner); 78 79 return (unsigned long)__owner_task(owner); 80 } 81 82 /* 83 * Returns: __mutex_owner(lock) on failure or NULL on success. 84 */ 85 static inline struct task_struct *__mutex_trylock_common(struct mutex *lock, bool handoff) 86 { 87 unsigned long owner, curr = (unsigned long)current; 88 89 owner = atomic_long_read(&lock->owner); 90 for (;;) { /* must loop, can race against a flag */ 91 unsigned long flags = __owner_flags(owner); 92 unsigned long task = owner & ~MUTEX_FLAGS; 93 94 if (task) { 95 if (flags & MUTEX_FLAG_PICKUP) { 96 if (task != curr) 97 break; 98 flags &= ~MUTEX_FLAG_PICKUP; 99 } else if (handoff) { 100 if (flags & MUTEX_FLAG_HANDOFF) 101 break; 102 flags |= MUTEX_FLAG_HANDOFF; 103 } else { 104 break; 105 } 106 } else { 107 MUTEX_WARN_ON(flags & (MUTEX_FLAG_HANDOFF | MUTEX_FLAG_PICKUP)); 108 task = curr; 109 } 110 111 if (atomic_long_try_cmpxchg_acquire(&lock->owner, &owner, task | flags)) { 112 if (task == curr) 113 return NULL; 114 break; 115 } 116 } 117 118 return __owner_task(owner); 119 } 120 121 /* 122 * Trylock or set HANDOFF 123 */ 124 static inline bool __mutex_trylock_or_handoff(struct mutex *lock, bool handoff) 125 { 126 return !__mutex_trylock_common(lock, handoff); 127 } 128 129 /* 130 * Actual trylock that will work on any unlocked state. 131 */ 132 static inline bool __mutex_trylock(struct mutex *lock) 133 { 134 return !__mutex_trylock_common(lock, false); 135 } 136 137 #ifndef CONFIG_DEBUG_LOCK_ALLOC 138 /* 139 * Lockdep annotations are contained to the slow paths for simplicity. 140 * There is nothing that would stop spreading the lockdep annotations outwards 141 * except more code. 142 */ 143 void mutex_init_generic(struct mutex *lock) 144 { 145 __mutex_init_generic(lock); 146 } 147 EXPORT_SYMBOL(mutex_init_generic); 148 149 /* 150 * Optimistic trylock that only works in the uncontended case. Make sure to 151 * follow with a __mutex_trylock() before failing. 152 */ 153 static __always_inline bool __mutex_trylock_fast(struct mutex *lock) 154 __cond_acquires(true, lock) 155 { 156 unsigned long curr = (unsigned long)current; 157 unsigned long zero = 0UL; 158 159 MUTEX_WARN_ON(lock->magic != lock); 160 161 if (atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr)) 162 return true; 163 164 return false; 165 } 166 167 static __always_inline bool __mutex_unlock_fast(struct mutex *lock) 168 __cond_releases(true, lock) 169 { 170 unsigned long curr = (unsigned long)current; 171 172 return atomic_long_try_cmpxchg_release(&lock->owner, &curr, 0UL); 173 } 174 175 #else /* !CONFIG_DEBUG_LOCK_ALLOC */ 176 177 void mutex_init_lockdep(struct mutex *lock, const char *name, struct lock_class_key *key) 178 { 179 __mutex_init_generic(lock); 180 181 /* 182 * Make sure we are not reinitializing a held lock: 183 */ 184 debug_check_no_locks_freed((void *)lock, sizeof(*lock)); 185 lockdep_init_map_wait(&lock->dep_map, name, key, 0, LD_WAIT_SLEEP); 186 } 187 EXPORT_SYMBOL(mutex_init_lockdep); 188 #endif /* !CONFIG_DEBUG_LOCK_ALLOC */ 189 190 static inline void __mutex_set_flag(struct mutex *lock, unsigned long flag) 191 { 192 atomic_long_or(flag, &lock->owner); 193 } 194 195 static inline void __mutex_clear_flag(struct mutex *lock, unsigned long flag) 196 { 197 atomic_long_andnot(flag, &lock->owner); 198 } 199 200 /* 201 * Add @waiter to a given location in the lock wait_list and set the 202 * FLAG_WAITERS flag if it's the first waiter. 203 */ 204 static void 205 __mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter, 206 struct mutex_waiter *first) 207 __must_hold(&lock->wait_lock) 208 { 209 hung_task_set_blocker(lock, BLOCKER_TYPE_MUTEX); 210 debug_mutex_add_waiter(lock, waiter, current); 211 212 if (!first) 213 first = lock->first_waiter; 214 215 if (first) { 216 list_add_tail(&waiter->list, &first->list); 217 } else { 218 INIT_LIST_HEAD(&waiter->list); 219 lock->first_waiter = waiter; 220 __mutex_set_flag(lock, MUTEX_FLAG_WAITERS); 221 } 222 } 223 224 static void 225 __mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter) 226 __must_hold(&lock->wait_lock) 227 { 228 if (list_empty(&waiter->list)) { 229 __mutex_clear_flag(lock, MUTEX_FLAGS); 230 lock->first_waiter = NULL; 231 } else { 232 if (lock->first_waiter == waiter) { 233 lock->first_waiter = list_first_entry(&waiter->list, 234 struct mutex_waiter, list); 235 } 236 list_del(&waiter->list); 237 } 238 239 debug_mutex_remove_waiter(lock, waiter, current); 240 hung_task_clear_blocker(); 241 } 242 243 /* 244 * Give up ownership to a specific task, when @task = NULL, this is equivalent 245 * to a regular unlock. Sets PICKUP on a handoff, clears HANDOFF, preserves 246 * WAITERS. Provides RELEASE semantics like a regular unlock, the 247 * __mutex_trylock() provides a matching ACQUIRE semantics for the handoff. 248 */ 249 static void __mutex_handoff(struct mutex *lock, struct task_struct *task) 250 { 251 unsigned long owner = atomic_long_read(&lock->owner); 252 253 for (;;) { 254 unsigned long new; 255 256 MUTEX_WARN_ON(__owner_task(owner) != current); 257 MUTEX_WARN_ON(owner & MUTEX_FLAG_PICKUP); 258 259 new = (owner & MUTEX_FLAG_WAITERS); 260 new |= (unsigned long)task; 261 if (task) 262 new |= MUTEX_FLAG_PICKUP; 263 264 if (atomic_long_try_cmpxchg_release(&lock->owner, &owner, new)) 265 break; 266 } 267 } 268 269 #ifndef CONFIG_DEBUG_LOCK_ALLOC 270 /* 271 * We split the mutex lock/unlock logic into separate fastpath and 272 * slowpath functions, to reduce the register pressure on the fastpath. 273 * We also put the fastpath first in the kernel image, to make sure the 274 * branch is predicted by the CPU as default-untaken. 275 */ 276 static void __sched __mutex_lock_slowpath(struct mutex *lock) 277 __acquires(lock); 278 279 /** 280 * mutex_lock - acquire the mutex 281 * @lock: the mutex to be acquired 282 * 283 * Lock the mutex exclusively for this task. If the mutex is not 284 * available right now, it will sleep until it can get it. 285 * 286 * The mutex must later on be released by the same task that 287 * acquired it. Recursive locking is not allowed. The task 288 * may not exit without first unlocking the mutex. Also, kernel 289 * memory where the mutex resides must not be freed with 290 * the mutex still locked. The mutex must first be initialized 291 * (or statically defined) before it can be locked. memset()-ing 292 * the mutex to 0 is not allowed. 293 * 294 * (The CONFIG_DEBUG_MUTEXES .config option turns on debugging 295 * checks that will enforce the restrictions and will also do 296 * deadlock debugging) 297 * 298 * This function is similar to (but not equivalent to) down(). 299 */ 300 void __sched mutex_lock(struct mutex *lock) 301 { 302 might_sleep(); 303 304 if (!__mutex_trylock_fast(lock)) 305 __mutex_lock_slowpath(lock); 306 } 307 EXPORT_SYMBOL(mutex_lock); 308 #endif 309 310 #include "ww_mutex.h" 311 312 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER 313 314 /* 315 * Trylock variant that returns the owning task on failure. 316 */ 317 static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock) 318 { 319 return __mutex_trylock_common(lock, false); 320 } 321 322 static inline 323 bool ww_mutex_spin_on_owner(struct mutex *lock, struct ww_acquire_ctx *ww_ctx, 324 struct mutex_waiter *waiter) 325 { 326 struct ww_mutex *ww; 327 328 ww = container_of(lock, struct ww_mutex, base); 329 330 /* 331 * If ww->ctx is set the contents are undefined, only 332 * by acquiring wait_lock there is a guarantee that 333 * they are not invalid when reading. 334 * 335 * As such, when deadlock detection needs to be 336 * performed the optimistic spinning cannot be done. 337 * 338 * Check this in every inner iteration because we may 339 * be racing against another thread's ww_mutex_lock. 340 */ 341 if (ww_ctx->acquired > 0 && READ_ONCE(ww->ctx)) 342 return false; 343 344 /* 345 * If we aren't on the wait list yet, cancel the spin 346 * if there are waiters. We want to avoid stealing the 347 * lock from a waiter with an earlier stamp, since the 348 * other thread may already own a lock that we also 349 * need. 350 */ 351 if (!waiter && (atomic_long_read(&lock->owner) & MUTEX_FLAG_WAITERS)) 352 return false; 353 354 /* 355 * Similarly, stop spinning if we are no longer the 356 * first waiter. 357 */ 358 if (waiter && data_race(lock->first_waiter != waiter)) 359 return false; 360 361 return true; 362 } 363 364 /* 365 * Look out! "owner" is an entirely speculative pointer access and not 366 * reliable. 367 * 368 * "noinline" so that this function shows up on perf profiles. 369 */ 370 static noinline 371 bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner, 372 struct ww_acquire_ctx *ww_ctx, struct mutex_waiter *waiter) 373 { 374 bool ret = true; 375 376 lockdep_assert_preemption_disabled(); 377 378 while (__mutex_owner(lock) == owner) { 379 /* 380 * Ensure we emit the owner->on_cpu, dereference _after_ 381 * checking lock->owner still matches owner. And we already 382 * disabled preemption which is equal to the RCU read-side 383 * crital section in optimistic spinning code. Thus the 384 * task_strcut structure won't go away during the spinning 385 * period 386 */ 387 barrier(); 388 389 /* 390 * Use vcpu_is_preempted to detect lock holder preemption issue. 391 */ 392 if (!owner_on_cpu(owner) || need_resched()) { 393 ret = false; 394 break; 395 } 396 397 if (ww_ctx && !ww_mutex_spin_on_owner(lock, ww_ctx, waiter)) { 398 ret = false; 399 break; 400 } 401 402 cpu_relax(); 403 } 404 405 return ret; 406 } 407 408 /* 409 * Initial check for entering the mutex spinning loop 410 */ 411 static inline int mutex_can_spin_on_owner(struct mutex *lock) 412 { 413 struct task_struct *owner; 414 int retval = 1; 415 416 lockdep_assert_preemption_disabled(); 417 418 if (need_resched()) 419 return 0; 420 421 /* 422 * We already disabled preemption which is equal to the RCU read-side 423 * crital section in optimistic spinning code. Thus the task_strcut 424 * structure won't go away during the spinning period. 425 */ 426 owner = __mutex_owner(lock); 427 if (owner) 428 retval = owner_on_cpu(owner); 429 430 /* 431 * If lock->owner is not set, the mutex has been released. Return true 432 * such that we'll trylock in the spin path, which is a faster option 433 * than the blocking slow path. 434 */ 435 return retval; 436 } 437 438 /* 439 * Optimistic spinning. 440 * 441 * We try to spin for acquisition when we find that the lock owner 442 * is currently running on a (different) CPU and while we don't 443 * need to reschedule. The rationale is that if the lock owner is 444 * running, it is likely to release the lock soon. 445 * 446 * The mutex spinners are queued up using MCS lock so that only one 447 * spinner can compete for the mutex. However, if mutex spinning isn't 448 * going to happen, there is no point in going through the lock/unlock 449 * overhead. 450 * 451 * Returns true when the lock was taken, otherwise false, indicating 452 * that we need to jump to the slowpath and sleep. 453 * 454 * The waiter flag is set to true if the spinner is a waiter in the wait 455 * queue. The waiter-spinner will spin on the lock directly and concurrently 456 * with the spinner at the head of the OSQ, if present, until the owner is 457 * changed to itself. 458 */ 459 static __always_inline bool 460 mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx, 461 struct mutex_waiter *waiter) 462 { 463 if (!waiter) { 464 /* 465 * The purpose of the mutex_can_spin_on_owner() function is 466 * to eliminate the overhead of osq_lock() and osq_unlock() 467 * in case spinning isn't possible. As a waiter-spinner 468 * is not going to take OSQ lock anyway, there is no need 469 * to call mutex_can_spin_on_owner(). 470 */ 471 if (!mutex_can_spin_on_owner(lock)) 472 goto fail; 473 474 /* 475 * In order to avoid a stampede of mutex spinners trying to 476 * acquire the mutex all at once, the spinners need to take a 477 * MCS (queued) lock first before spinning on the owner field. 478 */ 479 if (!osq_lock(&lock->osq)) 480 goto fail; 481 } 482 483 for (;;) { 484 struct task_struct *owner; 485 486 /* Try to acquire the mutex... */ 487 owner = __mutex_trylock_or_owner(lock); 488 if (!owner) 489 break; 490 491 /* 492 * There's an owner, wait for it to either 493 * release the lock or go to sleep. 494 */ 495 if (!mutex_spin_on_owner(lock, owner, ww_ctx, waiter)) 496 goto fail_unlock; 497 498 /* 499 * The cpu_relax() call is a compiler barrier which forces 500 * everything in this loop to be re-loaded. We don't need 501 * memory barriers as we'll eventually observe the right 502 * values at the cost of a few extra spins. 503 */ 504 cpu_relax(); 505 } 506 507 if (!waiter) 508 osq_unlock(&lock->osq); 509 510 return true; 511 512 513 fail_unlock: 514 if (!waiter) 515 osq_unlock(&lock->osq); 516 517 fail: 518 /* 519 * If we fell out of the spin path because of need_resched(), 520 * reschedule now, before we try-lock the mutex. This avoids getting 521 * scheduled out right after we obtained the mutex. 522 */ 523 if (need_resched()) { 524 /* 525 * We _should_ have TASK_RUNNING here, but just in case 526 * we do not, make it so, otherwise we might get stuck. 527 */ 528 __set_current_state(TASK_RUNNING); 529 schedule_preempt_disabled(); 530 } 531 532 return false; 533 } 534 #else 535 static __always_inline bool 536 mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx, 537 struct mutex_waiter *waiter) 538 { 539 return false; 540 } 541 #endif 542 543 static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip) 544 __releases(lock); 545 546 /** 547 * mutex_unlock - release the mutex 548 * @lock: the mutex to be released 549 * 550 * Unlock a mutex that has been locked by this task previously. 551 * 552 * This function must not be used in interrupt context. Unlocking 553 * of a not locked mutex is not allowed. 554 * 555 * The caller must ensure that the mutex stays alive until this function has 556 * returned - mutex_unlock() can NOT directly be used to release an object such 557 * that another concurrent task can free it. 558 * Mutexes are different from spinlocks & refcounts in this aspect. 559 * 560 * This function is similar to (but not equivalent to) up(). 561 */ 562 void __sched mutex_unlock(struct mutex *lock) 563 { 564 #ifndef CONFIG_DEBUG_LOCK_ALLOC 565 if (__mutex_unlock_fast(lock)) 566 return; 567 #endif 568 __mutex_unlock_slowpath(lock, _RET_IP_); 569 } 570 EXPORT_SYMBOL(mutex_unlock); 571 572 /** 573 * ww_mutex_unlock - release the w/w mutex 574 * @lock: the mutex to be released 575 * 576 * Unlock a mutex that has been locked by this task previously with any of the 577 * ww_mutex_lock* functions (with or without an acquire context). It is 578 * forbidden to release the locks after releasing the acquire context. 579 * 580 * This function must not be used in interrupt context. Unlocking 581 * of a unlocked mutex is not allowed. 582 */ 583 void __sched ww_mutex_unlock(struct ww_mutex *lock) 584 __no_context_analysis 585 { 586 __ww_mutex_unlock(lock); 587 mutex_unlock(&lock->base); 588 } 589 EXPORT_SYMBOL(ww_mutex_unlock); 590 591 /* 592 * Lock a mutex (possibly interruptible), slowpath: 593 */ 594 static __always_inline int __sched 595 __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclass, 596 struct lockdep_map *nest_lock, unsigned long ip, 597 struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx) 598 __cond_acquires(0, lock) 599 { 600 DEFINE_WAKE_Q(wake_q); 601 struct mutex_waiter waiter; 602 struct ww_mutex *ww; 603 unsigned long flags; 604 int ret; 605 606 if (!use_ww_ctx) 607 ww_ctx = NULL; 608 609 might_sleep(); 610 611 MUTEX_WARN_ON(lock->magic != lock); 612 613 ww = container_of(lock, struct ww_mutex, base); 614 if (ww_ctx) { 615 if (unlikely(ww_ctx == READ_ONCE(ww->ctx))) 616 return -EALREADY; 617 618 /* 619 * Reset the wounded flag after a kill. No other process can 620 * race and wound us here since they can't have a valid owner 621 * pointer if we don't have any locks held. 622 */ 623 if (ww_ctx->acquired == 0) 624 ww_ctx->wounded = 0; 625 626 #ifdef CONFIG_DEBUG_LOCK_ALLOC 627 nest_lock = &ww_ctx->dep_map; 628 #endif 629 } 630 631 preempt_disable(); 632 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip); 633 634 trace_contention_begin(lock, LCB_F_MUTEX | LCB_F_SPIN); 635 if (__mutex_trylock(lock) || 636 mutex_optimistic_spin(lock, ww_ctx, NULL)) { 637 /* got the lock, yay! */ 638 lock_acquired(&lock->dep_map, ip); 639 if (ww_ctx) 640 ww_mutex_set_context_fastpath(ww, ww_ctx); 641 trace_contention_end(lock, 0); 642 preempt_enable(); 643 return 0; 644 } 645 646 raw_spin_lock_irqsave(&lock->wait_lock, flags); 647 /* 648 * After waiting to acquire the wait_lock, try again. 649 */ 650 if (__mutex_trylock(lock)) { 651 if (ww_ctx) 652 __ww_mutex_check_waiters(lock, ww_ctx, &wake_q); 653 654 goto skip_wait; 655 } 656 657 debug_mutex_lock_common(lock, &waiter); 658 waiter.task = current; 659 if (use_ww_ctx) 660 waiter.ww_ctx = ww_ctx; 661 662 lock_contended(&lock->dep_map, ip); 663 664 if (!use_ww_ctx) { 665 /* add waiting tasks to the end of the waitqueue (FIFO): */ 666 __mutex_add_waiter(lock, &waiter, NULL); 667 } else { 668 /* 669 * Add in stamp order, waking up waiters that must kill 670 * themselves. 671 */ 672 ret = __ww_mutex_add_waiter(&waiter, lock, ww_ctx, &wake_q); 673 if (ret) 674 goto err_early_kill; 675 } 676 677 __set_task_blocked_on(current, lock); 678 set_current_state(state); 679 trace_contention_begin(lock, LCB_F_MUTEX); 680 for (;;) { 681 bool first; 682 683 /* 684 * Once we hold wait_lock, we're serialized against 685 * mutex_unlock() handing the lock off to us, do a trylock 686 * before testing the error conditions to make sure we pick up 687 * the handoff. 688 */ 689 if (__mutex_trylock(lock)) 690 goto acquired; 691 692 /* 693 * Check for signals and kill conditions while holding 694 * wait_lock. This ensures the lock cancellation is ordered 695 * against mutex_unlock() and wake-ups do not go missing. 696 */ 697 if (signal_pending_state(state, current)) { 698 ret = -EINTR; 699 goto err; 700 } 701 702 if (ww_ctx) { 703 ret = __ww_mutex_check_kill(lock, &waiter, ww_ctx); 704 if (ret) 705 goto err; 706 } 707 708 raw_spin_unlock_irqrestore_wake(&lock->wait_lock, flags, &wake_q); 709 710 schedule_preempt_disabled(); 711 712 first = lock->first_waiter == &waiter; 713 714 /* 715 * As we likely have been woken up by task 716 * that has cleared our blocked_on state, re-set 717 * it to the lock we are trying to acquire. 718 */ 719 set_task_blocked_on(current, lock); 720 set_current_state(state); 721 /* 722 * Here we order against unlock; we must either see it change 723 * state back to RUNNING and fall through the next schedule(), 724 * or we must see its unlock and acquire. 725 */ 726 if (__mutex_trylock_or_handoff(lock, first)) 727 break; 728 729 if (first) { 730 trace_contention_begin(lock, LCB_F_MUTEX | LCB_F_SPIN); 731 /* 732 * mutex_optimistic_spin() can call schedule(), so 733 * clear blocked on so we don't become unselectable 734 * to run. 735 */ 736 clear_task_blocked_on(current, lock); 737 if (mutex_optimistic_spin(lock, ww_ctx, &waiter)) 738 break; 739 set_task_blocked_on(current, lock); 740 trace_contention_begin(lock, LCB_F_MUTEX); 741 } 742 743 raw_spin_lock_irqsave(&lock->wait_lock, flags); 744 } 745 raw_spin_lock_irqsave(&lock->wait_lock, flags); 746 acquired: 747 __clear_task_blocked_on(current, lock); 748 __set_current_state(TASK_RUNNING); 749 750 if (ww_ctx) { 751 /* 752 * Wound-Wait; we stole the lock (!first_waiter), check the 753 * waiters as anyone might want to wound us. 754 */ 755 if (!ww_ctx->is_wait_die && lock->first_waiter != &waiter) 756 __ww_mutex_check_waiters(lock, ww_ctx, &wake_q); 757 } 758 759 __mutex_remove_waiter(lock, &waiter); 760 761 debug_mutex_free_waiter(&waiter); 762 763 skip_wait: 764 /* got the lock - cleanup and rejoice! */ 765 lock_acquired(&lock->dep_map, ip); 766 trace_contention_end(lock, 0); 767 768 if (ww_ctx) 769 ww_mutex_lock_acquired(ww, ww_ctx); 770 771 raw_spin_unlock_irqrestore_wake(&lock->wait_lock, flags, &wake_q); 772 preempt_enable(); 773 return 0; 774 775 err: 776 __clear_task_blocked_on(current, lock); 777 __set_current_state(TASK_RUNNING); 778 __mutex_remove_waiter(lock, &waiter); 779 err_early_kill: 780 WARN_ON(__get_task_blocked_on(current)); 781 trace_contention_end(lock, ret); 782 raw_spin_unlock_irqrestore_wake(&lock->wait_lock, flags, &wake_q); 783 debug_mutex_free_waiter(&waiter); 784 mutex_release(&lock->dep_map, ip); 785 preempt_enable(); 786 return ret; 787 } 788 789 static int __sched 790 __mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass, 791 struct lockdep_map *nest_lock, unsigned long ip) 792 __cond_acquires(0, lock) 793 { 794 return __mutex_lock_common(lock, state, subclass, nest_lock, ip, NULL, false); 795 } 796 797 static int __sched 798 __ww_mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass, 799 unsigned long ip, struct ww_acquire_ctx *ww_ctx) 800 __cond_acquires(0, lock) 801 { 802 return __mutex_lock_common(lock, state, subclass, NULL, ip, ww_ctx, true); 803 } 804 805 /** 806 * ww_mutex_trylock - tries to acquire the w/w mutex with optional acquire context 807 * @ww: mutex to lock 808 * @ww_ctx: optional w/w acquire context 809 * 810 * Trylocks a mutex with the optional acquire context; no deadlock detection is 811 * possible. Returns 1 if the mutex has been acquired successfully, 0 otherwise. 812 * 813 * Unlike ww_mutex_lock, no deadlock handling is performed. However, if a @ctx is 814 * specified, -EALREADY handling may happen in calls to ww_mutex_trylock. 815 * 816 * A mutex acquired with this function must be released with ww_mutex_unlock. 817 */ 818 int ww_mutex_trylock(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx) 819 { 820 if (!ww_ctx) 821 return mutex_trylock(&ww->base); 822 823 MUTEX_WARN_ON(ww->base.magic != &ww->base); 824 825 /* 826 * Reset the wounded flag after a kill. No other process can 827 * race and wound us here, since they can't have a valid owner 828 * pointer if we don't have any locks held. 829 */ 830 if (ww_ctx->acquired == 0) 831 ww_ctx->wounded = 0; 832 833 if (__mutex_trylock(&ww->base)) { 834 ww_mutex_set_context_fastpath(ww, ww_ctx); 835 mutex_acquire_nest(&ww->base.dep_map, 0, 1, &ww_ctx->dep_map, _RET_IP_); 836 return 1; 837 } 838 839 return 0; 840 } 841 EXPORT_SYMBOL(ww_mutex_trylock); 842 843 #ifdef CONFIG_DEBUG_LOCK_ALLOC 844 void __sched 845 mutex_lock_nested(struct mutex *lock, unsigned int subclass) 846 { 847 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_); 848 __acquire(lock); 849 } 850 851 EXPORT_SYMBOL_GPL(mutex_lock_nested); 852 853 void __sched 854 _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest) 855 { 856 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_); 857 __acquire(lock); 858 } 859 EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock); 860 861 int __sched 862 _mutex_lock_killable(struct mutex *lock, unsigned int subclass, 863 struct lockdep_map *nest) 864 { 865 return __mutex_lock(lock, TASK_KILLABLE, subclass, nest, _RET_IP_); 866 } 867 EXPORT_SYMBOL_GPL(_mutex_lock_killable); 868 869 int __sched 870 mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass) 871 { 872 return __mutex_lock(lock, TASK_INTERRUPTIBLE, subclass, NULL, _RET_IP_); 873 } 874 EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested); 875 876 void __sched 877 mutex_lock_io_nested(struct mutex *lock, unsigned int subclass) 878 { 879 int token; 880 881 might_sleep(); 882 883 token = io_schedule_prepare(); 884 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 885 subclass, NULL, _RET_IP_, NULL, 0); 886 __acquire(lock); 887 io_schedule_finish(token); 888 } 889 EXPORT_SYMBOL_GPL(mutex_lock_io_nested); 890 891 static inline int 892 ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 893 __cond_releases(nonzero, lock) 894 { 895 #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH 896 unsigned tmp; 897 898 if (ctx->deadlock_inject_countdown-- == 0) { 899 tmp = ctx->deadlock_inject_interval; 900 if (tmp > UINT_MAX/4) 901 tmp = UINT_MAX; 902 else 903 tmp = tmp*2 + tmp + tmp/2; 904 905 ctx->deadlock_inject_interval = tmp; 906 ctx->deadlock_inject_countdown = tmp; 907 ctx->contending_lock = lock; 908 909 ww_mutex_unlock(lock); 910 911 return -EDEADLK; 912 } 913 #endif 914 915 return 0; 916 } 917 918 int __sched 919 ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 920 { 921 int ret; 922 923 might_sleep(); 924 ret = __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, 925 0, _RET_IP_, ctx); 926 if (!ret && ctx && ctx->acquired > 1) 927 return ww_mutex_deadlock_injection(lock, ctx); 928 929 return ret; 930 } 931 EXPORT_SYMBOL_GPL(ww_mutex_lock); 932 933 int __sched 934 ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 935 { 936 int ret; 937 938 might_sleep(); 939 ret = __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 940 0, _RET_IP_, ctx); 941 942 if (!ret && ctx && ctx->acquired > 1) 943 return ww_mutex_deadlock_injection(lock, ctx); 944 945 return ret; 946 } 947 EXPORT_SYMBOL_GPL(ww_mutex_lock_interruptible); 948 949 #endif 950 951 /* 952 * Release the lock, slowpath: 953 */ 954 static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip) 955 __releases(lock) 956 { 957 struct task_struct *next = NULL; 958 struct mutex_waiter *waiter; 959 DEFINE_WAKE_Q(wake_q); 960 unsigned long owner; 961 unsigned long flags; 962 963 mutex_release(&lock->dep_map, ip); 964 __release(lock); 965 966 /* 967 * Release the lock before (potentially) taking the spinlock such that 968 * other contenders can get on with things ASAP. 969 * 970 * Except when HANDOFF, in that case we must not clear the owner field, 971 * but instead set it to the top waiter. 972 */ 973 owner = atomic_long_read(&lock->owner); 974 for (;;) { 975 MUTEX_WARN_ON(__owner_task(owner) != current); 976 MUTEX_WARN_ON(owner & MUTEX_FLAG_PICKUP); 977 978 if (owner & MUTEX_FLAG_HANDOFF) 979 break; 980 981 if (atomic_long_try_cmpxchg_release(&lock->owner, &owner, __owner_flags(owner))) { 982 if (owner & MUTEX_FLAG_WAITERS) 983 break; 984 985 return; 986 } 987 } 988 989 raw_spin_lock_irqsave(&lock->wait_lock, flags); 990 debug_mutex_unlock(lock); 991 waiter = lock->first_waiter; 992 if (waiter) { 993 next = waiter->task; 994 995 debug_mutex_wake_waiter(lock, waiter); 996 __clear_task_blocked_on(next, lock); 997 wake_q_add(&wake_q, next); 998 } 999 1000 if (owner & MUTEX_FLAG_HANDOFF) 1001 __mutex_handoff(lock, next); 1002 1003 raw_spin_unlock_irqrestore_wake(&lock->wait_lock, flags, &wake_q); 1004 } 1005 1006 #ifndef CONFIG_DEBUG_LOCK_ALLOC 1007 /* 1008 * Here come the less common (and hence less performance-critical) APIs: 1009 * mutex_lock_interruptible() and mutex_trylock(). 1010 */ 1011 static noinline int __sched 1012 __mutex_lock_killable_slowpath(struct mutex *lock); 1013 1014 static noinline int __sched 1015 __mutex_lock_interruptible_slowpath(struct mutex *lock); 1016 1017 /** 1018 * mutex_lock_interruptible() - Acquire the mutex, interruptible by signals. 1019 * @lock: The mutex to be acquired. 1020 * 1021 * Lock the mutex like mutex_lock(). If a signal is delivered while the 1022 * process is sleeping, this function will return without acquiring the 1023 * mutex. 1024 * 1025 * Context: Process context. 1026 * Return: 0 if the lock was successfully acquired or %-EINTR if a 1027 * signal arrived. 1028 */ 1029 int __sched mutex_lock_interruptible(struct mutex *lock) 1030 { 1031 might_sleep(); 1032 1033 if (__mutex_trylock_fast(lock)) 1034 return 0; 1035 1036 return __mutex_lock_interruptible_slowpath(lock); 1037 } 1038 1039 EXPORT_SYMBOL(mutex_lock_interruptible); 1040 1041 /** 1042 * mutex_lock_killable() - Acquire the mutex, interruptible by fatal signals. 1043 * @lock: The mutex to be acquired. 1044 * 1045 * Lock the mutex like mutex_lock(). If a signal which will be fatal to 1046 * the current process is delivered while the process is sleeping, this 1047 * function will return without acquiring the mutex. 1048 * 1049 * Context: Process context. 1050 * Return: 0 if the lock was successfully acquired or %-EINTR if a 1051 * fatal signal arrived. 1052 */ 1053 int __sched mutex_lock_killable(struct mutex *lock) 1054 { 1055 might_sleep(); 1056 1057 if (__mutex_trylock_fast(lock)) 1058 return 0; 1059 1060 return __mutex_lock_killable_slowpath(lock); 1061 } 1062 EXPORT_SYMBOL(mutex_lock_killable); 1063 1064 /** 1065 * mutex_lock_io() - Acquire the mutex and mark the process as waiting for I/O 1066 * @lock: The mutex to be acquired. 1067 * 1068 * Lock the mutex like mutex_lock(). While the task is waiting for this 1069 * mutex, it will be accounted as being in the IO wait state by the 1070 * scheduler. 1071 * 1072 * Context: Process context. 1073 */ 1074 void __sched mutex_lock_io(struct mutex *lock) 1075 { 1076 int token; 1077 1078 token = io_schedule_prepare(); 1079 mutex_lock(lock); 1080 io_schedule_finish(token); 1081 } 1082 EXPORT_SYMBOL_GPL(mutex_lock_io); 1083 1084 static noinline void __sched 1085 __mutex_lock_slowpath(struct mutex *lock) 1086 __acquires(lock) 1087 { 1088 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_); 1089 __acquire(lock); 1090 } 1091 1092 static noinline int __sched 1093 __mutex_lock_killable_slowpath(struct mutex *lock) 1094 __cond_acquires(0, lock) 1095 { 1096 return __mutex_lock(lock, TASK_KILLABLE, 0, NULL, _RET_IP_); 1097 } 1098 1099 static noinline int __sched 1100 __mutex_lock_interruptible_slowpath(struct mutex *lock) 1101 __cond_acquires(0, lock) 1102 { 1103 return __mutex_lock(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_); 1104 } 1105 1106 static noinline int __sched 1107 __ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 1108 __cond_acquires(0, lock) 1109 { 1110 return __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, 0, 1111 _RET_IP_, ctx); 1112 } 1113 1114 static noinline int __sched 1115 __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock, 1116 struct ww_acquire_ctx *ctx) 1117 __cond_acquires(0, lock) 1118 { 1119 return __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 0, 1120 _RET_IP_, ctx); 1121 } 1122 1123 #endif 1124 1125 #ifndef CONFIG_DEBUG_LOCK_ALLOC 1126 /** 1127 * mutex_trylock - try to acquire the mutex, without waiting 1128 * @lock: the mutex to be acquired 1129 * 1130 * Try to acquire the mutex atomically. Returns 1 if the mutex 1131 * has been acquired successfully, and 0 on contention. 1132 * 1133 * NOTE: this function follows the spin_trylock() convention, so 1134 * it is negated from the down_trylock() return values! Be careful 1135 * about this when converting semaphore users to mutexes. 1136 * 1137 * This function must not be used in interrupt context. The 1138 * mutex must be released by the same task that acquired it. 1139 */ 1140 int __sched mutex_trylock(struct mutex *lock) 1141 { 1142 MUTEX_WARN_ON(lock->magic != lock); 1143 return __mutex_trylock(lock); 1144 } 1145 EXPORT_SYMBOL(mutex_trylock); 1146 #else 1147 int __sched _mutex_trylock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock) 1148 { 1149 bool locked; 1150 1151 MUTEX_WARN_ON(lock->magic != lock); 1152 locked = __mutex_trylock(lock); 1153 if (locked) 1154 mutex_acquire_nest(&lock->dep_map, 0, 1, nest_lock, _RET_IP_); 1155 1156 return locked; 1157 } 1158 EXPORT_SYMBOL(_mutex_trylock_nest_lock); 1159 #endif 1160 1161 #ifndef CONFIG_DEBUG_LOCK_ALLOC 1162 int __sched 1163 ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 1164 { 1165 might_sleep(); 1166 1167 if (__mutex_trylock_fast(&lock->base)) { 1168 if (ctx) 1169 ww_mutex_set_context_fastpath(lock, ctx); 1170 return 0; 1171 } 1172 1173 return __ww_mutex_lock_slowpath(lock, ctx); 1174 } 1175 EXPORT_SYMBOL(ww_mutex_lock); 1176 1177 int __sched 1178 ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 1179 { 1180 might_sleep(); 1181 1182 if (__mutex_trylock_fast(&lock->base)) { 1183 if (ctx) 1184 ww_mutex_set_context_fastpath(lock, ctx); 1185 return 0; 1186 } 1187 1188 return __ww_mutex_lock_interruptible_slowpath(lock, ctx); 1189 } 1190 EXPORT_SYMBOL(ww_mutex_lock_interruptible); 1191 1192 #endif /* !CONFIG_DEBUG_LOCK_ALLOC */ 1193 #endif /* !CONFIG_PREEMPT_RT */ 1194 1195 EXPORT_TRACEPOINT_SYMBOL_GPL(contention_begin); 1196 EXPORT_TRACEPOINT_SYMBOL_GPL(contention_end); 1197 1198 /** 1199 * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0 1200 * @cnt: the atomic which we are to dec 1201 * @lock: the mutex to return holding if we dec to 0 1202 * 1203 * return true and hold lock if we dec to 0, return false otherwise 1204 */ 1205 int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock) 1206 { 1207 /* dec if we can't possibly hit 0 */ 1208 if (atomic_add_unless(cnt, -1, 1)) 1209 return 0; 1210 /* we might hit 0, so take the lock */ 1211 mutex_lock(lock); 1212 if (!atomic_dec_and_test(cnt)) { 1213 /* when we actually did the dec, we didn't hit 0 */ 1214 mutex_unlock(lock); 1215 return 0; 1216 } 1217 /* we hit 0, and we hold the lock */ 1218 return 1; 1219 } 1220 EXPORT_SYMBOL(atomic_dec_and_mutex_lock); 1221