1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * kernel/locking/mutex.c 4 * 5 * Mutexes: blocking mutual exclusion locks 6 * 7 * Started by Ingo Molnar: 8 * 9 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 10 * 11 * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and 12 * David Howells for suggestions and improvements. 13 * 14 * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline 15 * from the -rt tree, where it was originally implemented for rtmutexes 16 * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale 17 * and Sven Dietrich. 18 * 19 * Also see Documentation/locking/mutex-design.rst. 20 */ 21 #include <linux/mutex.h> 22 #include <linux/ww_mutex.h> 23 #include <linux/sched/signal.h> 24 #include <linux/sched/rt.h> 25 #include <linux/sched/wake_q.h> 26 #include <linux/sched/debug.h> 27 #include <linux/export.h> 28 #include <linux/spinlock.h> 29 #include <linux/interrupt.h> 30 #include <linux/debug_locks.h> 31 #include <linux/osq_lock.h> 32 33 #define CREATE_TRACE_POINTS 34 #include <trace/events/lock.h> 35 36 #ifndef CONFIG_PREEMPT_RT 37 #include "mutex.h" 38 39 #ifdef CONFIG_DEBUG_MUTEXES 40 # define MUTEX_WARN_ON(cond) DEBUG_LOCKS_WARN_ON(cond) 41 #else 42 # define MUTEX_WARN_ON(cond) 43 #endif 44 45 void 46 __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) 47 { 48 atomic_long_set(&lock->owner, 0); 49 raw_spin_lock_init(&lock->wait_lock); 50 INIT_LIST_HEAD(&lock->wait_list); 51 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER 52 osq_lock_init(&lock->osq); 53 #endif 54 55 debug_mutex_init(lock, name, key); 56 } 57 EXPORT_SYMBOL(__mutex_init); 58 59 static inline struct task_struct *__owner_task(unsigned long owner) 60 { 61 return (struct task_struct *)(owner & ~MUTEX_FLAGS); 62 } 63 64 bool mutex_is_locked(struct mutex *lock) 65 { 66 return __mutex_owner(lock) != NULL; 67 } 68 EXPORT_SYMBOL(mutex_is_locked); 69 70 static inline unsigned long __owner_flags(unsigned long owner) 71 { 72 return owner & MUTEX_FLAGS; 73 } 74 75 /* 76 * Returns: __mutex_owner(lock) on failure or NULL on success. 77 */ 78 static inline struct task_struct *__mutex_trylock_common(struct mutex *lock, bool handoff) 79 { 80 unsigned long owner, curr = (unsigned long)current; 81 82 owner = atomic_long_read(&lock->owner); 83 for (;;) { /* must loop, can race against a flag */ 84 unsigned long flags = __owner_flags(owner); 85 unsigned long task = owner & ~MUTEX_FLAGS; 86 87 if (task) { 88 if (flags & MUTEX_FLAG_PICKUP) { 89 if (task != curr) 90 break; 91 flags &= ~MUTEX_FLAG_PICKUP; 92 } else if (handoff) { 93 if (flags & MUTEX_FLAG_HANDOFF) 94 break; 95 flags |= MUTEX_FLAG_HANDOFF; 96 } else { 97 break; 98 } 99 } else { 100 MUTEX_WARN_ON(flags & (MUTEX_FLAG_HANDOFF | MUTEX_FLAG_PICKUP)); 101 task = curr; 102 } 103 104 if (atomic_long_try_cmpxchg_acquire(&lock->owner, &owner, task | flags)) { 105 if (task == curr) 106 return NULL; 107 break; 108 } 109 } 110 111 return __owner_task(owner); 112 } 113 114 /* 115 * Trylock or set HANDOFF 116 */ 117 static inline bool __mutex_trylock_or_handoff(struct mutex *lock, bool handoff) 118 { 119 return !__mutex_trylock_common(lock, handoff); 120 } 121 122 /* 123 * Actual trylock that will work on any unlocked state. 124 */ 125 static inline bool __mutex_trylock(struct mutex *lock) 126 { 127 return !__mutex_trylock_common(lock, false); 128 } 129 130 #ifndef CONFIG_DEBUG_LOCK_ALLOC 131 /* 132 * Lockdep annotations are contained to the slow paths for simplicity. 133 * There is nothing that would stop spreading the lockdep annotations outwards 134 * except more code. 135 */ 136 137 /* 138 * Optimistic trylock that only works in the uncontended case. Make sure to 139 * follow with a __mutex_trylock() before failing. 140 */ 141 static __always_inline bool __mutex_trylock_fast(struct mutex *lock) 142 { 143 unsigned long curr = (unsigned long)current; 144 unsigned long zero = 0UL; 145 146 if (atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr)) 147 return true; 148 149 return false; 150 } 151 152 static __always_inline bool __mutex_unlock_fast(struct mutex *lock) 153 { 154 unsigned long curr = (unsigned long)current; 155 156 return atomic_long_try_cmpxchg_release(&lock->owner, &curr, 0UL); 157 } 158 #endif 159 160 static inline void __mutex_set_flag(struct mutex *lock, unsigned long flag) 161 { 162 atomic_long_or(flag, &lock->owner); 163 } 164 165 static inline void __mutex_clear_flag(struct mutex *lock, unsigned long flag) 166 { 167 atomic_long_andnot(flag, &lock->owner); 168 } 169 170 static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_waiter *waiter) 171 { 172 return list_first_entry(&lock->wait_list, struct mutex_waiter, list) == waiter; 173 } 174 175 /* 176 * Add @waiter to a given location in the lock wait_list and set the 177 * FLAG_WAITERS flag if it's the first waiter. 178 */ 179 static void 180 __mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter, 181 struct list_head *list) 182 { 183 debug_mutex_add_waiter(lock, waiter, current); 184 185 list_add_tail(&waiter->list, list); 186 if (__mutex_waiter_is_first(lock, waiter)) 187 __mutex_set_flag(lock, MUTEX_FLAG_WAITERS); 188 } 189 190 static void 191 __mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter) 192 { 193 list_del(&waiter->list); 194 if (likely(list_empty(&lock->wait_list))) 195 __mutex_clear_flag(lock, MUTEX_FLAGS); 196 197 debug_mutex_remove_waiter(lock, waiter, current); 198 } 199 200 /* 201 * Give up ownership to a specific task, when @task = NULL, this is equivalent 202 * to a regular unlock. Sets PICKUP on a handoff, clears HANDOFF, preserves 203 * WAITERS. Provides RELEASE semantics like a regular unlock, the 204 * __mutex_trylock() provides a matching ACQUIRE semantics for the handoff. 205 */ 206 static void __mutex_handoff(struct mutex *lock, struct task_struct *task) 207 { 208 unsigned long owner = atomic_long_read(&lock->owner); 209 210 for (;;) { 211 unsigned long new; 212 213 MUTEX_WARN_ON(__owner_task(owner) != current); 214 MUTEX_WARN_ON(owner & MUTEX_FLAG_PICKUP); 215 216 new = (owner & MUTEX_FLAG_WAITERS); 217 new |= (unsigned long)task; 218 if (task) 219 new |= MUTEX_FLAG_PICKUP; 220 221 if (atomic_long_try_cmpxchg_release(&lock->owner, &owner, new)) 222 break; 223 } 224 } 225 226 #ifndef CONFIG_DEBUG_LOCK_ALLOC 227 /* 228 * We split the mutex lock/unlock logic into separate fastpath and 229 * slowpath functions, to reduce the register pressure on the fastpath. 230 * We also put the fastpath first in the kernel image, to make sure the 231 * branch is predicted by the CPU as default-untaken. 232 */ 233 static void __sched __mutex_lock_slowpath(struct mutex *lock); 234 235 /** 236 * mutex_lock - acquire the mutex 237 * @lock: the mutex to be acquired 238 * 239 * Lock the mutex exclusively for this task. If the mutex is not 240 * available right now, it will sleep until it can get it. 241 * 242 * The mutex must later on be released by the same task that 243 * acquired it. Recursive locking is not allowed. The task 244 * may not exit without first unlocking the mutex. Also, kernel 245 * memory where the mutex resides must not be freed with 246 * the mutex still locked. The mutex must first be initialized 247 * (or statically defined) before it can be locked. memset()-ing 248 * the mutex to 0 is not allowed. 249 * 250 * (The CONFIG_DEBUG_MUTEXES .config option turns on debugging 251 * checks that will enforce the restrictions and will also do 252 * deadlock debugging) 253 * 254 * This function is similar to (but not equivalent to) down(). 255 */ 256 void __sched mutex_lock(struct mutex *lock) 257 { 258 might_sleep(); 259 260 if (!__mutex_trylock_fast(lock)) 261 __mutex_lock_slowpath(lock); 262 } 263 EXPORT_SYMBOL(mutex_lock); 264 #endif 265 266 #include "ww_mutex.h" 267 268 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER 269 270 /* 271 * Trylock variant that returns the owning task on failure. 272 */ 273 static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock) 274 { 275 return __mutex_trylock_common(lock, false); 276 } 277 278 static inline 279 bool ww_mutex_spin_on_owner(struct mutex *lock, struct ww_acquire_ctx *ww_ctx, 280 struct mutex_waiter *waiter) 281 { 282 struct ww_mutex *ww; 283 284 ww = container_of(lock, struct ww_mutex, base); 285 286 /* 287 * If ww->ctx is set the contents are undefined, only 288 * by acquiring wait_lock there is a guarantee that 289 * they are not invalid when reading. 290 * 291 * As such, when deadlock detection needs to be 292 * performed the optimistic spinning cannot be done. 293 * 294 * Check this in every inner iteration because we may 295 * be racing against another thread's ww_mutex_lock. 296 */ 297 if (ww_ctx->acquired > 0 && READ_ONCE(ww->ctx)) 298 return false; 299 300 /* 301 * If we aren't on the wait list yet, cancel the spin 302 * if there are waiters. We want to avoid stealing the 303 * lock from a waiter with an earlier stamp, since the 304 * other thread may already own a lock that we also 305 * need. 306 */ 307 if (!waiter && (atomic_long_read(&lock->owner) & MUTEX_FLAG_WAITERS)) 308 return false; 309 310 /* 311 * Similarly, stop spinning if we are no longer the 312 * first waiter. 313 */ 314 if (waiter && !__mutex_waiter_is_first(lock, waiter)) 315 return false; 316 317 return true; 318 } 319 320 /* 321 * Look out! "owner" is an entirely speculative pointer access and not 322 * reliable. 323 * 324 * "noinline" so that this function shows up on perf profiles. 325 */ 326 static noinline 327 bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner, 328 struct ww_acquire_ctx *ww_ctx, struct mutex_waiter *waiter) 329 { 330 bool ret = true; 331 332 lockdep_assert_preemption_disabled(); 333 334 while (__mutex_owner(lock) == owner) { 335 /* 336 * Ensure we emit the owner->on_cpu, dereference _after_ 337 * checking lock->owner still matches owner. And we already 338 * disabled preemption which is equal to the RCU read-side 339 * crital section in optimistic spinning code. Thus the 340 * task_strcut structure won't go away during the spinning 341 * period 342 */ 343 barrier(); 344 345 /* 346 * Use vcpu_is_preempted to detect lock holder preemption issue. 347 */ 348 if (!owner_on_cpu(owner) || need_resched()) { 349 ret = false; 350 break; 351 } 352 353 if (ww_ctx && !ww_mutex_spin_on_owner(lock, ww_ctx, waiter)) { 354 ret = false; 355 break; 356 } 357 358 cpu_relax(); 359 } 360 361 return ret; 362 } 363 364 /* 365 * Initial check for entering the mutex spinning loop 366 */ 367 static inline int mutex_can_spin_on_owner(struct mutex *lock) 368 { 369 struct task_struct *owner; 370 int retval = 1; 371 372 lockdep_assert_preemption_disabled(); 373 374 if (need_resched()) 375 return 0; 376 377 /* 378 * We already disabled preemption which is equal to the RCU read-side 379 * crital section in optimistic spinning code. Thus the task_strcut 380 * structure won't go away during the spinning period. 381 */ 382 owner = __mutex_owner(lock); 383 if (owner) 384 retval = owner_on_cpu(owner); 385 386 /* 387 * If lock->owner is not set, the mutex has been released. Return true 388 * such that we'll trylock in the spin path, which is a faster option 389 * than the blocking slow path. 390 */ 391 return retval; 392 } 393 394 /* 395 * Optimistic spinning. 396 * 397 * We try to spin for acquisition when we find that the lock owner 398 * is currently running on a (different) CPU and while we don't 399 * need to reschedule. The rationale is that if the lock owner is 400 * running, it is likely to release the lock soon. 401 * 402 * The mutex spinners are queued up using MCS lock so that only one 403 * spinner can compete for the mutex. However, if mutex spinning isn't 404 * going to happen, there is no point in going through the lock/unlock 405 * overhead. 406 * 407 * Returns true when the lock was taken, otherwise false, indicating 408 * that we need to jump to the slowpath and sleep. 409 * 410 * The waiter flag is set to true if the spinner is a waiter in the wait 411 * queue. The waiter-spinner will spin on the lock directly and concurrently 412 * with the spinner at the head of the OSQ, if present, until the owner is 413 * changed to itself. 414 */ 415 static __always_inline bool 416 mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx, 417 struct mutex_waiter *waiter) 418 { 419 if (!waiter) { 420 /* 421 * The purpose of the mutex_can_spin_on_owner() function is 422 * to eliminate the overhead of osq_lock() and osq_unlock() 423 * in case spinning isn't possible. As a waiter-spinner 424 * is not going to take OSQ lock anyway, there is no need 425 * to call mutex_can_spin_on_owner(). 426 */ 427 if (!mutex_can_spin_on_owner(lock)) 428 goto fail; 429 430 /* 431 * In order to avoid a stampede of mutex spinners trying to 432 * acquire the mutex all at once, the spinners need to take a 433 * MCS (queued) lock first before spinning on the owner field. 434 */ 435 if (!osq_lock(&lock->osq)) 436 goto fail; 437 } 438 439 for (;;) { 440 struct task_struct *owner; 441 442 /* Try to acquire the mutex... */ 443 owner = __mutex_trylock_or_owner(lock); 444 if (!owner) 445 break; 446 447 /* 448 * There's an owner, wait for it to either 449 * release the lock or go to sleep. 450 */ 451 if (!mutex_spin_on_owner(lock, owner, ww_ctx, waiter)) 452 goto fail_unlock; 453 454 /* 455 * The cpu_relax() call is a compiler barrier which forces 456 * everything in this loop to be re-loaded. We don't need 457 * memory barriers as we'll eventually observe the right 458 * values at the cost of a few extra spins. 459 */ 460 cpu_relax(); 461 } 462 463 if (!waiter) 464 osq_unlock(&lock->osq); 465 466 return true; 467 468 469 fail_unlock: 470 if (!waiter) 471 osq_unlock(&lock->osq); 472 473 fail: 474 /* 475 * If we fell out of the spin path because of need_resched(), 476 * reschedule now, before we try-lock the mutex. This avoids getting 477 * scheduled out right after we obtained the mutex. 478 */ 479 if (need_resched()) { 480 /* 481 * We _should_ have TASK_RUNNING here, but just in case 482 * we do not, make it so, otherwise we might get stuck. 483 */ 484 __set_current_state(TASK_RUNNING); 485 schedule_preempt_disabled(); 486 } 487 488 return false; 489 } 490 #else 491 static __always_inline bool 492 mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx, 493 struct mutex_waiter *waiter) 494 { 495 return false; 496 } 497 #endif 498 499 static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip); 500 501 /** 502 * mutex_unlock - release the mutex 503 * @lock: the mutex to be released 504 * 505 * Unlock a mutex that has been locked by this task previously. 506 * 507 * This function must not be used in interrupt context. Unlocking 508 * of a not locked mutex is not allowed. 509 * 510 * The caller must ensure that the mutex stays alive until this function has 511 * returned - mutex_unlock() can NOT directly be used to release an object such 512 * that another concurrent task can free it. 513 * Mutexes are different from spinlocks & refcounts in this aspect. 514 * 515 * This function is similar to (but not equivalent to) up(). 516 */ 517 void __sched mutex_unlock(struct mutex *lock) 518 { 519 #ifndef CONFIG_DEBUG_LOCK_ALLOC 520 if (__mutex_unlock_fast(lock)) 521 return; 522 #endif 523 __mutex_unlock_slowpath(lock, _RET_IP_); 524 } 525 EXPORT_SYMBOL(mutex_unlock); 526 527 /** 528 * ww_mutex_unlock - release the w/w mutex 529 * @lock: the mutex to be released 530 * 531 * Unlock a mutex that has been locked by this task previously with any of the 532 * ww_mutex_lock* functions (with or without an acquire context). It is 533 * forbidden to release the locks after releasing the acquire context. 534 * 535 * This function must not be used in interrupt context. Unlocking 536 * of a unlocked mutex is not allowed. 537 */ 538 void __sched ww_mutex_unlock(struct ww_mutex *lock) 539 { 540 __ww_mutex_unlock(lock); 541 mutex_unlock(&lock->base); 542 } 543 EXPORT_SYMBOL(ww_mutex_unlock); 544 545 /* 546 * Lock a mutex (possibly interruptible), slowpath: 547 */ 548 static __always_inline int __sched 549 __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclass, 550 struct lockdep_map *nest_lock, unsigned long ip, 551 struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx) 552 { 553 DEFINE_WAKE_Q(wake_q); 554 struct mutex_waiter waiter; 555 struct ww_mutex *ww; 556 unsigned long flags; 557 int ret; 558 559 if (!use_ww_ctx) 560 ww_ctx = NULL; 561 562 might_sleep(); 563 564 MUTEX_WARN_ON(lock->magic != lock); 565 566 ww = container_of(lock, struct ww_mutex, base); 567 if (ww_ctx) { 568 if (unlikely(ww_ctx == READ_ONCE(ww->ctx))) 569 return -EALREADY; 570 571 /* 572 * Reset the wounded flag after a kill. No other process can 573 * race and wound us here since they can't have a valid owner 574 * pointer if we don't have any locks held. 575 */ 576 if (ww_ctx->acquired == 0) 577 ww_ctx->wounded = 0; 578 579 #ifdef CONFIG_DEBUG_LOCK_ALLOC 580 nest_lock = &ww_ctx->dep_map; 581 #endif 582 } 583 584 preempt_disable(); 585 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip); 586 587 trace_contention_begin(lock, LCB_F_MUTEX | LCB_F_SPIN); 588 if (__mutex_trylock(lock) || 589 mutex_optimistic_spin(lock, ww_ctx, NULL)) { 590 /* got the lock, yay! */ 591 lock_acquired(&lock->dep_map, ip); 592 if (ww_ctx) 593 ww_mutex_set_context_fastpath(ww, ww_ctx); 594 trace_contention_end(lock, 0); 595 preempt_enable(); 596 return 0; 597 } 598 599 raw_spin_lock_irqsave(&lock->wait_lock, flags); 600 /* 601 * After waiting to acquire the wait_lock, try again. 602 */ 603 if (__mutex_trylock(lock)) { 604 if (ww_ctx) 605 __ww_mutex_check_waiters(lock, ww_ctx, &wake_q); 606 607 goto skip_wait; 608 } 609 610 debug_mutex_lock_common(lock, &waiter); 611 waiter.task = current; 612 if (use_ww_ctx) 613 waiter.ww_ctx = ww_ctx; 614 615 lock_contended(&lock->dep_map, ip); 616 617 if (!use_ww_ctx) { 618 /* add waiting tasks to the end of the waitqueue (FIFO): */ 619 __mutex_add_waiter(lock, &waiter, &lock->wait_list); 620 } else { 621 /* 622 * Add in stamp order, waking up waiters that must kill 623 * themselves. 624 */ 625 ret = __ww_mutex_add_waiter(&waiter, lock, ww_ctx, &wake_q); 626 if (ret) 627 goto err_early_kill; 628 } 629 630 set_current_state(state); 631 trace_contention_begin(lock, LCB_F_MUTEX); 632 for (;;) { 633 bool first; 634 635 /* 636 * Once we hold wait_lock, we're serialized against 637 * mutex_unlock() handing the lock off to us, do a trylock 638 * before testing the error conditions to make sure we pick up 639 * the handoff. 640 */ 641 if (__mutex_trylock(lock)) 642 goto acquired; 643 644 /* 645 * Check for signals and kill conditions while holding 646 * wait_lock. This ensures the lock cancellation is ordered 647 * against mutex_unlock() and wake-ups do not go missing. 648 */ 649 if (signal_pending_state(state, current)) { 650 ret = -EINTR; 651 goto err; 652 } 653 654 if (ww_ctx) { 655 ret = __ww_mutex_check_kill(lock, &waiter, ww_ctx); 656 if (ret) 657 goto err; 658 } 659 660 raw_spin_unlock_irqrestore(&lock->wait_lock, flags); 661 /* Make sure we do wakeups before calling schedule */ 662 wake_up_q(&wake_q); 663 wake_q_init(&wake_q); 664 665 schedule_preempt_disabled(); 666 667 first = __mutex_waiter_is_first(lock, &waiter); 668 669 set_current_state(state); 670 /* 671 * Here we order against unlock; we must either see it change 672 * state back to RUNNING and fall through the next schedule(), 673 * or we must see its unlock and acquire. 674 */ 675 if (__mutex_trylock_or_handoff(lock, first)) 676 break; 677 678 if (first) { 679 trace_contention_begin(lock, LCB_F_MUTEX | LCB_F_SPIN); 680 if (mutex_optimistic_spin(lock, ww_ctx, &waiter)) 681 break; 682 trace_contention_begin(lock, LCB_F_MUTEX); 683 } 684 685 raw_spin_lock_irqsave(&lock->wait_lock, flags); 686 } 687 raw_spin_lock_irqsave(&lock->wait_lock, flags); 688 acquired: 689 __set_current_state(TASK_RUNNING); 690 691 if (ww_ctx) { 692 /* 693 * Wound-Wait; we stole the lock (!first_waiter), check the 694 * waiters as anyone might want to wound us. 695 */ 696 if (!ww_ctx->is_wait_die && 697 !__mutex_waiter_is_first(lock, &waiter)) 698 __ww_mutex_check_waiters(lock, ww_ctx, &wake_q); 699 } 700 701 __mutex_remove_waiter(lock, &waiter); 702 703 debug_mutex_free_waiter(&waiter); 704 705 skip_wait: 706 /* got the lock - cleanup and rejoice! */ 707 lock_acquired(&lock->dep_map, ip); 708 trace_contention_end(lock, 0); 709 710 if (ww_ctx) 711 ww_mutex_lock_acquired(ww, ww_ctx); 712 713 raw_spin_unlock_irqrestore(&lock->wait_lock, flags); 714 wake_up_q(&wake_q); 715 preempt_enable(); 716 return 0; 717 718 err: 719 __set_current_state(TASK_RUNNING); 720 __mutex_remove_waiter(lock, &waiter); 721 err_early_kill: 722 trace_contention_end(lock, ret); 723 raw_spin_unlock_irqrestore(&lock->wait_lock, flags); 724 debug_mutex_free_waiter(&waiter); 725 mutex_release(&lock->dep_map, ip); 726 wake_up_q(&wake_q); 727 preempt_enable(); 728 return ret; 729 } 730 731 static int __sched 732 __mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass, 733 struct lockdep_map *nest_lock, unsigned long ip) 734 { 735 return __mutex_lock_common(lock, state, subclass, nest_lock, ip, NULL, false); 736 } 737 738 static int __sched 739 __ww_mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass, 740 unsigned long ip, struct ww_acquire_ctx *ww_ctx) 741 { 742 return __mutex_lock_common(lock, state, subclass, NULL, ip, ww_ctx, true); 743 } 744 745 /** 746 * ww_mutex_trylock - tries to acquire the w/w mutex with optional acquire context 747 * @ww: mutex to lock 748 * @ww_ctx: optional w/w acquire context 749 * 750 * Trylocks a mutex with the optional acquire context; no deadlock detection is 751 * possible. Returns 1 if the mutex has been acquired successfully, 0 otherwise. 752 * 753 * Unlike ww_mutex_lock, no deadlock handling is performed. However, if a @ctx is 754 * specified, -EALREADY handling may happen in calls to ww_mutex_trylock. 755 * 756 * A mutex acquired with this function must be released with ww_mutex_unlock. 757 */ 758 int ww_mutex_trylock(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx) 759 { 760 if (!ww_ctx) 761 return mutex_trylock(&ww->base); 762 763 MUTEX_WARN_ON(ww->base.magic != &ww->base); 764 765 /* 766 * Reset the wounded flag after a kill. No other process can 767 * race and wound us here, since they can't have a valid owner 768 * pointer if we don't have any locks held. 769 */ 770 if (ww_ctx->acquired == 0) 771 ww_ctx->wounded = 0; 772 773 if (__mutex_trylock(&ww->base)) { 774 ww_mutex_set_context_fastpath(ww, ww_ctx); 775 mutex_acquire_nest(&ww->base.dep_map, 0, 1, &ww_ctx->dep_map, _RET_IP_); 776 return 1; 777 } 778 779 return 0; 780 } 781 EXPORT_SYMBOL(ww_mutex_trylock); 782 783 #ifdef CONFIG_DEBUG_LOCK_ALLOC 784 void __sched 785 mutex_lock_nested(struct mutex *lock, unsigned int subclass) 786 { 787 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_); 788 } 789 790 EXPORT_SYMBOL_GPL(mutex_lock_nested); 791 792 void __sched 793 _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest) 794 { 795 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_); 796 } 797 EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock); 798 799 int __sched 800 mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass) 801 { 802 return __mutex_lock(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_); 803 } 804 EXPORT_SYMBOL_GPL(mutex_lock_killable_nested); 805 806 int __sched 807 mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass) 808 { 809 return __mutex_lock(lock, TASK_INTERRUPTIBLE, subclass, NULL, _RET_IP_); 810 } 811 EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested); 812 813 void __sched 814 mutex_lock_io_nested(struct mutex *lock, unsigned int subclass) 815 { 816 int token; 817 818 might_sleep(); 819 820 token = io_schedule_prepare(); 821 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 822 subclass, NULL, _RET_IP_, NULL, 0); 823 io_schedule_finish(token); 824 } 825 EXPORT_SYMBOL_GPL(mutex_lock_io_nested); 826 827 static inline int 828 ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 829 { 830 #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH 831 unsigned tmp; 832 833 if (ctx->deadlock_inject_countdown-- == 0) { 834 tmp = ctx->deadlock_inject_interval; 835 if (tmp > UINT_MAX/4) 836 tmp = UINT_MAX; 837 else 838 tmp = tmp*2 + tmp + tmp/2; 839 840 ctx->deadlock_inject_interval = tmp; 841 ctx->deadlock_inject_countdown = tmp; 842 ctx->contending_lock = lock; 843 844 ww_mutex_unlock(lock); 845 846 return -EDEADLK; 847 } 848 #endif 849 850 return 0; 851 } 852 853 int __sched 854 ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 855 { 856 int ret; 857 858 might_sleep(); 859 ret = __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, 860 0, _RET_IP_, ctx); 861 if (!ret && ctx && ctx->acquired > 1) 862 return ww_mutex_deadlock_injection(lock, ctx); 863 864 return ret; 865 } 866 EXPORT_SYMBOL_GPL(ww_mutex_lock); 867 868 int __sched 869 ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 870 { 871 int ret; 872 873 might_sleep(); 874 ret = __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 875 0, _RET_IP_, ctx); 876 877 if (!ret && ctx && ctx->acquired > 1) 878 return ww_mutex_deadlock_injection(lock, ctx); 879 880 return ret; 881 } 882 EXPORT_SYMBOL_GPL(ww_mutex_lock_interruptible); 883 884 #endif 885 886 /* 887 * Release the lock, slowpath: 888 */ 889 static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip) 890 { 891 struct task_struct *next = NULL; 892 DEFINE_WAKE_Q(wake_q); 893 unsigned long owner; 894 unsigned long flags; 895 896 mutex_release(&lock->dep_map, ip); 897 898 /* 899 * Release the lock before (potentially) taking the spinlock such that 900 * other contenders can get on with things ASAP. 901 * 902 * Except when HANDOFF, in that case we must not clear the owner field, 903 * but instead set it to the top waiter. 904 */ 905 owner = atomic_long_read(&lock->owner); 906 for (;;) { 907 MUTEX_WARN_ON(__owner_task(owner) != current); 908 MUTEX_WARN_ON(owner & MUTEX_FLAG_PICKUP); 909 910 if (owner & MUTEX_FLAG_HANDOFF) 911 break; 912 913 if (atomic_long_try_cmpxchg_release(&lock->owner, &owner, __owner_flags(owner))) { 914 if (owner & MUTEX_FLAG_WAITERS) 915 break; 916 917 return; 918 } 919 } 920 921 raw_spin_lock_irqsave(&lock->wait_lock, flags); 922 debug_mutex_unlock(lock); 923 if (!list_empty(&lock->wait_list)) { 924 /* get the first entry from the wait-list: */ 925 struct mutex_waiter *waiter = 926 list_first_entry(&lock->wait_list, 927 struct mutex_waiter, list); 928 929 next = waiter->task; 930 931 debug_mutex_wake_waiter(lock, waiter); 932 wake_q_add(&wake_q, next); 933 } 934 935 if (owner & MUTEX_FLAG_HANDOFF) 936 __mutex_handoff(lock, next); 937 938 preempt_disable(); 939 raw_spin_unlock_irqrestore(&lock->wait_lock, flags); 940 wake_up_q(&wake_q); 941 preempt_enable(); 942 } 943 944 #ifndef CONFIG_DEBUG_LOCK_ALLOC 945 /* 946 * Here come the less common (and hence less performance-critical) APIs: 947 * mutex_lock_interruptible() and mutex_trylock(). 948 */ 949 static noinline int __sched 950 __mutex_lock_killable_slowpath(struct mutex *lock); 951 952 static noinline int __sched 953 __mutex_lock_interruptible_slowpath(struct mutex *lock); 954 955 /** 956 * mutex_lock_interruptible() - Acquire the mutex, interruptible by signals. 957 * @lock: The mutex to be acquired. 958 * 959 * Lock the mutex like mutex_lock(). If a signal is delivered while the 960 * process is sleeping, this function will return without acquiring the 961 * mutex. 962 * 963 * Context: Process context. 964 * Return: 0 if the lock was successfully acquired or %-EINTR if a 965 * signal arrived. 966 */ 967 int __sched mutex_lock_interruptible(struct mutex *lock) 968 { 969 might_sleep(); 970 971 if (__mutex_trylock_fast(lock)) 972 return 0; 973 974 return __mutex_lock_interruptible_slowpath(lock); 975 } 976 977 EXPORT_SYMBOL(mutex_lock_interruptible); 978 979 /** 980 * mutex_lock_killable() - Acquire the mutex, interruptible by fatal signals. 981 * @lock: The mutex to be acquired. 982 * 983 * Lock the mutex like mutex_lock(). If a signal which will be fatal to 984 * the current process is delivered while the process is sleeping, this 985 * function will return without acquiring the mutex. 986 * 987 * Context: Process context. 988 * Return: 0 if the lock was successfully acquired or %-EINTR if a 989 * fatal signal arrived. 990 */ 991 int __sched mutex_lock_killable(struct mutex *lock) 992 { 993 might_sleep(); 994 995 if (__mutex_trylock_fast(lock)) 996 return 0; 997 998 return __mutex_lock_killable_slowpath(lock); 999 } 1000 EXPORT_SYMBOL(mutex_lock_killable); 1001 1002 /** 1003 * mutex_lock_io() - Acquire the mutex and mark the process as waiting for I/O 1004 * @lock: The mutex to be acquired. 1005 * 1006 * Lock the mutex like mutex_lock(). While the task is waiting for this 1007 * mutex, it will be accounted as being in the IO wait state by the 1008 * scheduler. 1009 * 1010 * Context: Process context. 1011 */ 1012 void __sched mutex_lock_io(struct mutex *lock) 1013 { 1014 int token; 1015 1016 token = io_schedule_prepare(); 1017 mutex_lock(lock); 1018 io_schedule_finish(token); 1019 } 1020 EXPORT_SYMBOL_GPL(mutex_lock_io); 1021 1022 static noinline void __sched 1023 __mutex_lock_slowpath(struct mutex *lock) 1024 { 1025 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_); 1026 } 1027 1028 static noinline int __sched 1029 __mutex_lock_killable_slowpath(struct mutex *lock) 1030 { 1031 return __mutex_lock(lock, TASK_KILLABLE, 0, NULL, _RET_IP_); 1032 } 1033 1034 static noinline int __sched 1035 __mutex_lock_interruptible_slowpath(struct mutex *lock) 1036 { 1037 return __mutex_lock(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_); 1038 } 1039 1040 static noinline int __sched 1041 __ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 1042 { 1043 return __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, 0, 1044 _RET_IP_, ctx); 1045 } 1046 1047 static noinline int __sched 1048 __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock, 1049 struct ww_acquire_ctx *ctx) 1050 { 1051 return __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 0, 1052 _RET_IP_, ctx); 1053 } 1054 1055 #endif 1056 1057 /** 1058 * mutex_trylock - try to acquire the mutex, without waiting 1059 * @lock: the mutex to be acquired 1060 * 1061 * Try to acquire the mutex atomically. Returns 1 if the mutex 1062 * has been acquired successfully, and 0 on contention. 1063 * 1064 * NOTE: this function follows the spin_trylock() convention, so 1065 * it is negated from the down_trylock() return values! Be careful 1066 * about this when converting semaphore users to mutexes. 1067 * 1068 * This function must not be used in interrupt context. The 1069 * mutex must be released by the same task that acquired it. 1070 */ 1071 int __sched mutex_trylock(struct mutex *lock) 1072 { 1073 bool locked; 1074 1075 MUTEX_WARN_ON(lock->magic != lock); 1076 1077 locked = __mutex_trylock(lock); 1078 if (locked) 1079 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); 1080 1081 return locked; 1082 } 1083 EXPORT_SYMBOL(mutex_trylock); 1084 1085 #ifndef CONFIG_DEBUG_LOCK_ALLOC 1086 int __sched 1087 ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 1088 { 1089 might_sleep(); 1090 1091 if (__mutex_trylock_fast(&lock->base)) { 1092 if (ctx) 1093 ww_mutex_set_context_fastpath(lock, ctx); 1094 return 0; 1095 } 1096 1097 return __ww_mutex_lock_slowpath(lock, ctx); 1098 } 1099 EXPORT_SYMBOL(ww_mutex_lock); 1100 1101 int __sched 1102 ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 1103 { 1104 might_sleep(); 1105 1106 if (__mutex_trylock_fast(&lock->base)) { 1107 if (ctx) 1108 ww_mutex_set_context_fastpath(lock, ctx); 1109 return 0; 1110 } 1111 1112 return __ww_mutex_lock_interruptible_slowpath(lock, ctx); 1113 } 1114 EXPORT_SYMBOL(ww_mutex_lock_interruptible); 1115 1116 #endif /* !CONFIG_DEBUG_LOCK_ALLOC */ 1117 #endif /* !CONFIG_PREEMPT_RT */ 1118 1119 EXPORT_TRACEPOINT_SYMBOL_GPL(contention_begin); 1120 EXPORT_TRACEPOINT_SYMBOL_GPL(contention_end); 1121 1122 /** 1123 * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0 1124 * @cnt: the atomic which we are to dec 1125 * @lock: the mutex to return holding if we dec to 0 1126 * 1127 * return true and hold lock if we dec to 0, return false otherwise 1128 */ 1129 int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock) 1130 { 1131 /* dec if we can't possibly hit 0 */ 1132 if (atomic_add_unless(cnt, -1, 1)) 1133 return 0; 1134 /* we might hit 0, so take the lock */ 1135 mutex_lock(lock); 1136 if (!atomic_dec_and_test(cnt)) { 1137 /* when we actually did the dec, we didn't hit 0 */ 1138 mutex_unlock(lock); 1139 return 0; 1140 } 1141 /* we hit 0, and we hold the lock */ 1142 return 1; 1143 } 1144 EXPORT_SYMBOL(atomic_dec_and_mutex_lock); 1145