Lines Matching +full:config +full:- +full:cond
1 // SPDX-License-Identifier: GPL-2.0-only
14 * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
15 * from the -rt tree, where it was originally implemented for rtmutexes
19 * Also see Documentation/locking/mutex-design.rst.
41 # define MUTEX_WARN_ON(cond) DEBUG_LOCKS_WARN_ON(cond) argument
43 # define MUTEX_WARN_ON(cond) argument
49 atomic_long_set(&lock->owner, 0); in __mutex_init()
50 raw_spin_lock_init(&lock->wait_lock); in __mutex_init()
51 INIT_LIST_HEAD(&lock->wait_list); in __mutex_init()
53 osq_lock_init(&lock->osq); in __mutex_init()
79 unsigned long owner = atomic_long_read(&lock->owner); in mutex_get_owner()
91 owner = atomic_long_read(&lock->owner); in __mutex_trylock_common()
113 if (atomic_long_try_cmpxchg_acquire(&lock->owner, &owner, task | flags)) { in __mutex_trylock_common()
155 MUTEX_WARN_ON(lock->magic != lock); in __mutex_trylock_fast()
157 if (atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr)) in __mutex_trylock_fast()
167 return atomic_long_try_cmpxchg_release(&lock->owner, &curr, 0UL); in __mutex_unlock_fast()
173 atomic_long_or(flag, &lock->owner); in __mutex_set_flag()
178 atomic_long_andnot(flag, &lock->owner); in __mutex_clear_flag()
183 return list_first_entry(&lock->wait_list, struct mutex_waiter, list) == waiter; in __mutex_waiter_is_first()
197 list_add_tail(&waiter->list, list); in __mutex_add_waiter()
205 list_del(&waiter->list); in __mutex_remove_waiter()
206 if (likely(list_empty(&lock->wait_list))) in __mutex_remove_waiter()
221 unsigned long owner = atomic_long_read(&lock->owner); in __mutex_handoff()
234 if (atomic_long_try_cmpxchg_release(&lock->owner, &owner, new)) in __mutex_handoff()
244 * branch is predicted by the CPU as default-untaken.
249 * mutex_lock - acquire the mutex
260 * (or statically defined) before it can be locked. memset()-ing
263 * (The CONFIG_DEBUG_MUTEXES .config option turns on debugging
300 * If ww->ctx is set the contents are undefined, only in ww_mutex_spin_on_owner()
310 if (ww_ctx->acquired > 0 && READ_ONCE(ww->ctx)) in ww_mutex_spin_on_owner()
320 if (!waiter && (atomic_long_read(&lock->owner) & MUTEX_FLAG_WAITERS)) in ww_mutex_spin_on_owner()
349 * Ensure we emit the owner->on_cpu, dereference _after_ in mutex_spin_on_owner()
350 * checking lock->owner still matches owner. And we already in mutex_spin_on_owner()
351 * disabled preemption which is equal to the RCU read-side in mutex_spin_on_owner()
391 * We already disabled preemption which is equal to the RCU read-side in mutex_can_spin_on_owner()
400 * If lock->owner is not set, the mutex has been released. Return true in mutex_can_spin_on_owner()
424 * queue. The waiter-spinner will spin on the lock directly and concurrently
436 * in case spinning isn't possible. As a waiter-spinner in mutex_optimistic_spin()
448 if (!osq_lock(&lock->osq)) in mutex_optimistic_spin()
469 * everything in this loop to be re-loaded. We don't need in mutex_optimistic_spin()
477 osq_unlock(&lock->osq); in mutex_optimistic_spin()
484 osq_unlock(&lock->osq); in mutex_optimistic_spin()
489 * reschedule now, before we try-lock the mutex. This avoids getting in mutex_optimistic_spin()
515 * mutex_unlock - release the mutex
524 * returned - mutex_unlock() can NOT directly be used to release an object such
541 * ww_mutex_unlock - release the w/w mutex
554 mutex_unlock(&lock->base); in ww_mutex_unlock()
577 MUTEX_WARN_ON(lock->magic != lock); in __mutex_lock_common()
581 if (unlikely(ww_ctx == READ_ONCE(ww->ctx))) in __mutex_lock_common()
582 return -EALREADY; in __mutex_lock_common()
589 if (ww_ctx->acquired == 0) in __mutex_lock_common()
590 ww_ctx->wounded = 0; in __mutex_lock_common()
593 nest_lock = &ww_ctx->dep_map; in __mutex_lock_common()
598 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip); in __mutex_lock_common()
604 lock_acquired(&lock->dep_map, ip); in __mutex_lock_common()
612 raw_spin_lock_irqsave(&lock->wait_lock, flags); in __mutex_lock_common()
628 lock_contended(&lock->dep_map, ip); in __mutex_lock_common()
632 __mutex_add_waiter(lock, &waiter, &lock->wait_list); in __mutex_lock_common()
661 * against mutex_unlock() and wake-ups do not go missing. in __mutex_lock_common()
664 ret = -EINTR; in __mutex_lock_common()
674 raw_spin_unlock_irqrestore_wake(&lock->wait_lock, flags, &wake_q); in __mutex_lock_common()
682 * that has cleared our blocked_on state, re-set in __mutex_lock_common()
709 raw_spin_lock_irqsave(&lock->wait_lock, flags); in __mutex_lock_common()
711 raw_spin_lock_irqsave(&lock->wait_lock, flags); in __mutex_lock_common()
718 * Wound-Wait; we stole the lock (!first_waiter), check the in __mutex_lock_common()
721 if (!ww_ctx->is_wait_die && in __mutex_lock_common()
731 /* got the lock - cleanup and rejoice! */ in __mutex_lock_common()
732 lock_acquired(&lock->dep_map, ip); in __mutex_lock_common()
738 raw_spin_unlock_irqrestore_wake(&lock->wait_lock, flags, &wake_q); in __mutex_lock_common()
749 raw_spin_unlock_irqrestore_wake(&lock->wait_lock, flags, &wake_q); in __mutex_lock_common()
751 mutex_release(&lock->dep_map, ip); in __mutex_lock_common()
771 * ww_mutex_trylock - tries to acquire the w/w mutex with optional acquire context
779 * specified, -EALREADY handling may happen in calls to ww_mutex_trylock.
786 return mutex_trylock(&ww->base); in ww_mutex_trylock()
788 MUTEX_WARN_ON(ww->base.magic != &ww->base); in ww_mutex_trylock()
795 if (ww_ctx->acquired == 0) in ww_mutex_trylock()
796 ww_ctx->wounded = 0; in ww_mutex_trylock()
798 if (__mutex_trylock(&ww->base)) { in ww_mutex_trylock()
800 mutex_acquire_nest(&ww->base.dep_map, 0, 1, &ww_ctx->dep_map, _RET_IP_); in ww_mutex_trylock()
859 if (ctx->deadlock_inject_countdown-- == 0) { in ww_mutex_deadlock_injection()
860 tmp = ctx->deadlock_inject_interval; in ww_mutex_deadlock_injection()
866 ctx->deadlock_inject_interval = tmp; in ww_mutex_deadlock_injection()
867 ctx->deadlock_inject_countdown = tmp; in ww_mutex_deadlock_injection()
868 ctx->contending_lock = lock; in ww_mutex_deadlock_injection()
872 return -EDEADLK; in ww_mutex_deadlock_injection()
885 ret = __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, in ww_mutex_lock()
887 if (!ret && ctx && ctx->acquired > 1) in ww_mutex_lock()
900 ret = __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, in ww_mutex_lock_interruptible()
903 if (!ret && ctx && ctx->acquired > 1) in ww_mutex_lock_interruptible()
922 mutex_release(&lock->dep_map, ip); in __mutex_unlock_slowpath()
931 owner = atomic_long_read(&lock->owner); in __mutex_unlock_slowpath()
939 if (atomic_long_try_cmpxchg_release(&lock->owner, &owner, __owner_flags(owner))) { in __mutex_unlock_slowpath()
947 raw_spin_lock_irqsave(&lock->wait_lock, flags); in __mutex_unlock_slowpath()
949 if (!list_empty(&lock->wait_list)) { in __mutex_unlock_slowpath()
950 /* get the first entry from the wait-list: */ in __mutex_unlock_slowpath()
952 list_first_entry(&lock->wait_list, in __mutex_unlock_slowpath()
955 next = waiter->task; in __mutex_unlock_slowpath()
965 raw_spin_unlock_irqrestore_wake(&lock->wait_lock, flags, &wake_q); in __mutex_unlock_slowpath()
970 * Here come the less common (and hence less performance-critical) APIs:
980 * mutex_lock_interruptible() - Acquire the mutex, interruptible by signals.
988 * Return: 0 if the lock was successfully acquired or %-EINTR if a
1004 * mutex_lock_killable() - Acquire the mutex, interruptible by fatal signals.
1012 * Return: 0 if the lock was successfully acquired or %-EINTR if a
1027 * mutex_lock_io() - Acquire the mutex and mark the process as waiting for I/O
1067 return __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, 0, in __ww_mutex_lock_slowpath()
1075 return __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 0, in __ww_mutex_lock_interruptible_slowpath()
1083 * mutex_trylock - try to acquire the mutex, without waiting
1098 MUTEX_WARN_ON(lock->magic != lock); in mutex_trylock()
1107 MUTEX_WARN_ON(lock->magic != lock); in _mutex_trylock_nest_lock()
1110 mutex_acquire_nest(&lock->dep_map, 0, 1, nest_lock, _RET_IP_); in _mutex_trylock_nest_lock()
1123 if (__mutex_trylock_fast(&lock->base)) { in ww_mutex_lock()
1138 if (__mutex_trylock_fast(&lock->base)) { in ww_mutex_lock_interruptible()
1155 * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
1164 if (atomic_add_unless(cnt, -1, 1)) in atomic_dec_and_mutex_lock()