Lines Matching full:mutex

3  * kernel/locking/mutex.c
19 * Also see Documentation/locking/mutex-design.rst.
21 #include <linux/mutex.h>
38 #include "mutex.h"
47 __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) in __mutex_init()
65 bool mutex_is_locked(struct mutex *lock) in mutex_is_locked()
77 unsigned long mutex_get_owner(struct mutex *lock) in mutex_get_owner()
87 static inline struct task_struct *__mutex_trylock_common(struct mutex *lock, bool handoff) in __mutex_trylock_common()
126 static inline bool __mutex_trylock_or_handoff(struct mutex *lock, bool handoff) in __mutex_trylock_or_handoff()
134 static inline bool __mutex_trylock(struct mutex *lock) in __mutex_trylock()
150 static __always_inline bool __mutex_trylock_fast(struct mutex *lock) in __mutex_trylock_fast()
163 static __always_inline bool __mutex_unlock_fast(struct mutex *lock) in __mutex_unlock_fast()
171 static inline void __mutex_set_flag(struct mutex *lock, unsigned long flag) in __mutex_set_flag()
176 static inline void __mutex_clear_flag(struct mutex *lock, unsigned long flag) in __mutex_clear_flag()
181 static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_waiter *waiter) in __mutex_waiter_is_first()
191 __mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter, in __mutex_add_waiter()
203 __mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter) in __mutex_remove_waiter()
219 static void __mutex_handoff(struct mutex *lock, struct task_struct *task) in __mutex_handoff()
241 * We split the mutex lock/unlock logic into separate fastpath and
246 static void __sched __mutex_lock_slowpath(struct mutex *lock);
249 * mutex_lock - acquire the mutex
250 * @lock: the mutex to be acquired
252 * Lock the mutex exclusively for this task. If the mutex is not
255 * The mutex must later on be released by the same task that
257 * may not exit without first unlocking the mutex. Also, kernel
258 * memory where the mutex resides must not be freed with
259 * the mutex still locked. The mutex must first be initialized
261 * the mutex to 0 is not allowed.
269 void __sched mutex_lock(struct mutex *lock) in mutex_lock()
286 static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock) in __mutex_trylock_or_owner()
292 bool ww_mutex_spin_on_owner(struct mutex *lock, struct ww_acquire_ctx *ww_ctx, in ww_mutex_spin_on_owner()
340 bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner, in mutex_spin_on_owner()
378 * Initial check for entering the mutex spinning loop
380 static inline int mutex_can_spin_on_owner(struct mutex *lock) in mutex_can_spin_on_owner()
400 * If lock->owner is not set, the mutex has been released. Return true in mutex_can_spin_on_owner()
415 * The mutex spinners are queued up using MCS lock so that only one
416 * spinner can compete for the mutex. However, if mutex spinning isn't
429 mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx, in mutex_optimistic_spin()
444 * In order to avoid a stampede of mutex spinners trying to in mutex_optimistic_spin()
445 * acquire the mutex all at once, the spinners need to take a in mutex_optimistic_spin()
455 /* Try to acquire the mutex... */ in mutex_optimistic_spin()
489 * reschedule now, before we try-lock the mutex. This avoids getting in mutex_optimistic_spin()
490 * scheduled out right after we obtained the mutex. in mutex_optimistic_spin()
505 mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx, in mutex_optimistic_spin()
512 static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip);
515 * mutex_unlock - release the mutex
516 * @lock: the mutex to be released
518 * Unlock a mutex that has been locked by this task previously.
521 * of a not locked mutex is not allowed.
523 * The caller must ensure that the mutex stays alive until this function has
530 void __sched mutex_unlock(struct mutex *lock) in mutex_unlock()
541 * ww_mutex_unlock - release the w/w mutex
542 * @lock: the mutex to be released
544 * Unlock a mutex that has been locked by this task previously with any of the
549 * of a unlocked mutex is not allowed.
559 * Lock a mutex (possibly interruptible), slowpath:
562 __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclass, in __mutex_lock_common()
757 __mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass, in __mutex_lock()
764 __ww_mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass, in __ww_mutex_lock()
771 * ww_mutex_trylock - tries to acquire the w/w mutex with optional acquire context
772 * @ww: mutex to lock
775 * Trylocks a mutex with the optional acquire context; no deadlock detection is
776 * possible. Returns 1 if the mutex has been acquired successfully, 0 otherwise.
781 * A mutex acquired with this function must be released with ww_mutex_unlock.
810 mutex_lock_nested(struct mutex *lock, unsigned int subclass) in mutex_lock_nested()
818 _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest) in _mutex_lock_nest_lock()
825 _mutex_lock_killable(struct mutex *lock, unsigned int subclass, in _mutex_lock_killable()
833 mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass) in mutex_lock_interruptible_nested()
840 mutex_lock_io_nested(struct mutex *lock, unsigned int subclass) in mutex_lock_io_nested()
915 static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip) in __mutex_unlock_slowpath()
974 __mutex_lock_killable_slowpath(struct mutex *lock);
977 __mutex_lock_interruptible_slowpath(struct mutex *lock);
980 * mutex_lock_interruptible() - Acquire the mutex, interruptible by signals.
981 * @lock: The mutex to be acquired.
983 * Lock the mutex like mutex_lock(). If a signal is delivered while the
985 * mutex.
991 int __sched mutex_lock_interruptible(struct mutex *lock) in mutex_lock_interruptible()
1004 * mutex_lock_killable() - Acquire the mutex, interruptible by fatal signals.
1005 * @lock: The mutex to be acquired.
1007 * Lock the mutex like mutex_lock(). If a signal which will be fatal to
1009 * function will return without acquiring the mutex.
1015 int __sched mutex_lock_killable(struct mutex *lock) in mutex_lock_killable()
1027 * mutex_lock_io() - Acquire the mutex and mark the process as waiting for I/O
1028 * @lock: The mutex to be acquired.
1030 * Lock the mutex like mutex_lock(). While the task is waiting for this
1031 * mutex, it will be accounted as being in the IO wait state by the
1036 void __sched mutex_lock_io(struct mutex *lock) in mutex_lock_io()
1047 __mutex_lock_slowpath(struct mutex *lock) in __mutex_lock_slowpath()
1053 __mutex_lock_killable_slowpath(struct mutex *lock) in __mutex_lock_killable_slowpath()
1059 __mutex_lock_interruptible_slowpath(struct mutex *lock) in __mutex_lock_interruptible_slowpath()
1083 * mutex_trylock - try to acquire the mutex, without waiting
1084 * @lock: the mutex to be acquired
1086 * Try to acquire the mutex atomically. Returns 1 if the mutex
1094 * mutex must be released by the same task that acquired it.
1096 int __sched mutex_trylock(struct mutex *lock) in mutex_trylock()
1103 int __sched _mutex_trylock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock) in _mutex_trylock_nest_lock()
1155 * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
1157 * @lock: the mutex to return holding if we dec to 0
1161 int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock) in atomic_dec_and_mutex_lock()