Home
last modified time | relevance | path

Searched full:lock (Results 1 – 25 of 3758) sorted by relevance

12345678910>>...151

/linux/kernel/locking/
H A Dmutex.c35 #include <trace/events/lock.h>
46 static void __mutex_init_generic(struct mutex *lock) in __mutex_init_generic() argument
48 atomic_long_set(&lock->owner, 0); in __mutex_init_generic()
49 scoped_guard (raw_spinlock_init, &lock->wait_lock) { in __mutex_init_generic()
50 lock->first_waiter = NULL; in __mutex_init_generic()
53 osq_lock_init(&lock->osq); in __mutex_init_generic()
55 debug_mutex_init(lock); in __mutex_init_generic()
63 bool mutex_is_locked(struct mutex *lock) in mutex_is_locked() argument
65 return __mutex_owner(lock) != NULL; in mutex_is_locked()
75 unsigned long mutex_get_owner(struct mutex *lock) in mutex_get_owner() argument
[all …]
H A Drtmutex.c27 #include <trace/events/lock.h>
37 struct rt_mutex *lock, in __ww_mutex_add_waiter() argument
44 static inline void __ww_mutex_check_waiters(struct rt_mutex *lock, in __ww_mutex_check_waiters() argument
50 static inline void ww_mutex_lock_acquired(struct ww_mutex *lock, in ww_mutex_lock_acquired() argument
55 static inline int __ww_mutex_check_kill(struct rt_mutex *lock, in __ww_mutex_check_kill() argument
69 * lock->owner state tracking:
71 * lock->owner holds the task_struct pointer of the owner. Bit 0
72 * is used to keep track of the "lock has waiters" state.
75 * NULL 0 lock is free (fast acquire possible)
76 * NULL 1 lock is free and has waiters and the top waiter
[all …]
H A Dww_mutex.h23 __ww_waiter_first(struct mutex *lock) in __ww_waiter_first() argument
24 __must_hold(&lock->wait_lock) in __ww_waiter_first()
26 return lock->first_waiter; in __ww_waiter_first()
35 __ww_waiter_next(struct mutex *lock, struct mutex_waiter *w) in __ww_waiter_next() argument
36 __must_hold(&lock->wait_lock) in __ww_waiter_next()
43 if (lock->first_waiter == w) in __ww_waiter_next()
55 __ww_waiter_prev(struct mutex *lock, struct mutex_waiter *w) in __ww_waiter_prev() argument
56 __must_hold(&lock->wait_lock) in __ww_waiter_prev()
62 if (lock->first_waiter == w) in __ww_waiter_prev()
69 __ww_waiter_last(struct mutex *lock) in __ww_waiter_last() argument
[all …]
H A Dqspinlock_paravirt.h30 * not running. The one lock stealing attempt allowed at slowpath entry
57 * Hybrid PV queued/unfair lock
60 * it will be called once when a lock waiter enter the PV slowpath before
64 * pv_wait_head_or_lock() to signal that it is ready to spin on the lock.
65 * When that bit becomes visible to the incoming waiters, no lock stealing
67 * enter the MCS wait queue. So lock starvation shouldn't happen as long
69 * and hence disabling lock stealing.
71 * When the pending bit isn't set, the lock waiters will stay in the unfair
72 * mode spinning on the lock unless the MCS wait queue is empty. In this
73 * case, the lock waiters will enter the queued mode slowpath trying to
[all …]
H A Dqrwlock.c15 #include <trace/events/lock.h>
18 * queued_read_lock_slowpath - acquire read lock of a queued rwlock
19 * @lock: Pointer to queued rwlock structure
21 void __lockfunc queued_read_lock_slowpath(struct qrwlock *lock) in queued_read_lock_slowpath() argument
24 * Readers come here when they cannot get the lock without waiting in queued_read_lock_slowpath()
28 * Readers in interrupt context will get the lock immediately in queued_read_lock_slowpath()
29 * if the writer is just waiting (not holding the lock yet), in queued_read_lock_slowpath()
30 * so spin with ACQUIRE semantics until the lock is available in queued_read_lock_slowpath()
33 atomic_cond_read_acquire(&lock->cnts, !(VAL & _QW_LOCKED)); in queued_read_lock_slowpath()
36 atomic_sub(_QR_BIAS, &lock->cnts); in queued_read_lock_slowpath()
[all …]
H A Dqspinlock.c25 #include <trace/events/lock.h>
36 * MCS lock. A copy of the original MCS lock paper ("Algorithms for Scalable
42 * This queued spinlock implementation is based on the MCS lock, however to
46 * In particular; where the traditional MCS lock consists of a tail pointer
55 * number. With one byte for the lock value and 3 bytes for the tail, only a
56 * 32-bit word is now needed. Even though we only need 1 bit for the lock,
60 * We also change the first spinner to spin on the lock bit instead of its
61 * node; whereby avoiding the need to carry a node from lock to unlock, and
62 * preserving existing lock AP
149 clear_pending(struct qspinlock * lock) clear_pending() argument
162 clear_pending_set_locked(struct qspinlock * lock) clear_pending_set_locked() argument
177 xchg_tail(struct qspinlock * lock,u32 tail) xchg_tail() argument
195 clear_pending(struct qspinlock * lock) clear_pending() argument
206 clear_pending_set_locked(struct qspinlock * lock) clear_pending_set_locked() argument
221 xchg_tail(struct qspinlock * lock,u32 tail) xchg_tail() argument
247 queued_fetch_set_pending_acquire(struct qspinlock * lock) queued_fetch_set_pending_acquire() argument
259 set_locked(struct qspinlock * lock) set_locked() argument
273 __pv_kick_node(struct qspinlock * lock,struct mcs_spinlock * node) __pv_kick_node() argument
275 __pv_wait_head_or_lock(struct qspinlock * lock,struct mcs_spinlock * node) __pv_wait_head_or_lock() argument
313 queued_spin_lock_slowpath(struct qspinlock * lock,u32 val) queued_spin_lock_slowpath() argument
[all...]
/linux/include/linux/
H A Dlocal_lock_internal.h81 #define __local_lock_init(lock) \ argument
85 debug_check_no_locks_freed((void *)lock, sizeof(*lock));\
86 lockdep_init_map_type(&(lock)->dep_map, #lock, &__key, \
89 local_lock_debug_init(lock); \
92 #define __local_trylock_init(lock) \ argument
94 __local_lock_init((local_lock_t *)lock); \
97 #define __spinlock_nested_bh_init(lock) \ argument
101 debug_check_no_locks_freed((void *)lock, sizeof(*lock));\
102 lockdep_init_map_type(&(lock)->dep_map, #lock, &__key, \
105 local_lock_debug_init(lock); \
[all …]
/linux/fs/ocfs2/dlm/
H A Ddlmast.c35 struct dlm_lock *lock);
36 static int dlm_should_cancel_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
39 * lock level will obsolete a pending bast.
40 * For example, if dlm_thread queued a bast for an EX lock that
42 * lock owner downconverted to NL, the bast is now obsolete.
44 * This is needed because the lock and convert paths can queue
47 static int dlm_should_cancel_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock) in dlm_should_cancel_bast() argument
50 assert_spin_locked(&lock->spinlock); in dlm_should_cancel_bast()
52 if (lock->ml.highest_blocked == LKM_IVMODE) in dlm_should_cancel_bast()
54 BUG_ON(lock->ml.highest_blocked == LKM_NLMODE); in dlm_should_cancel_bast()
[all …]
H A Ddlmlock.c5 * underlying calls for lock creation
45 struct dlm_lock *lock, int flags);
49 static void dlm_lock_detach_lockres(struct dlm_lock *lock);
66 /* Tell us whether we can grant a new lock request.
71 * returns: 1 if the lock can be granted, 0 otherwise.
74 struct dlm_lock *lock) in dlm_can_grant_new_lock() argument
79 if (!dlm_lock_compatible(tmplock->ml.type, lock->ml.type)) in dlm_can_grant_new_lock()
84 if (!dlm_lock_compatible(tmplock->ml.type, lock->ml.type)) in dlm_can_grant_new_lock()
87 lock->ml.type)) in dlm_can_grant_new_lock()
94 /* performs lock creation at the lockres master site
[all …]
H A Ddlmconvert.c5 * underlying calls for lock conversion
38 * only one that holds a lock on exit (res->spinlock).
43 struct dlm_lock *lock, int flags,
48 struct dlm_lock *lock, int flags, int type);
61 struct dlm_lock *lock, int flags, int type) in dlmconvert_master() argument
72 status = __dlmconvert_master(dlm, res, lock, flags, type, in dlmconvert_master()
83 dlm_queue_ast(dlm, lock); in dlmconvert_master()
93 /* performs lock conversion at the lockres master site
96 * taken: takes and drops lock->spinlock
99 * call_ast: whether ast should be called for this lock
[all …]
H A Ddlmunlock.c43 struct dlm_lock *lock,
48 struct dlm_lock *lock,
54 struct dlm_lock *lock,
67 * So to unlock a converting lock, you must first cancel the
76 * taken: res->spinlock and lock->spinlock taken and dropped
79 * all callers should have taken an extra ref on lock coming in
83 struct dlm_lock *lock, in dlmunlock_common() argument
103 /* We want to be sure that we're not freeing a lock in dlmunlock_common()
105 in_use = !list_empty(&lock->ast_list); in dlmunlock_common()
125 spin_lock(&lock->spinlock); in dlmunlock_common()
[all …]
/linux/drivers/md/persistent-data/
H A Ddm-block-manager.c32 * trace is also emitted for the previous lock acquisition.
45 spinlock_t lock; member
61 static unsigned int __find_holder(struct block_lock *lock, in __find_holder() argument
67 if (lock->holders[i] == task) in __find_holder()
74 /* call this *after* you increment lock->count */
75 static void __add_holder(struct block_lock *lock, struct task_struct *task) in __add_holder() argument
77 unsigned int h = __find_holder(lock, NULL); in __add_holder()
83 lock->holders[h] = task; in __add_holder()
86 t = lock->traces + h; in __add_holder()
91 /* call this *before* you decrement lock->count */
[all …]
/linux/rust/kernel/sync/
H A Dlock.rs3 //! Generic kernel lock and guard.
5 //! It contains a generic Rust lock and guard that allow for different backends (e.g., mutexes,
22 /// The "backend" of a lock.
24 /// It is the actual implementation of the lock, without the need to repeat patterns used in all
29 /// - Implementers must ensure that only one thread/CPU may access the protected data once the lock
30 /// is owned, that is, between calls to [`lock`] and [`unlock`].
32 /// lock operation.
34 /// [`lock`]: Backend::lock
38 /// The state required by the lock
65 unsafe fn lock(ptr: *mut Self::State) -> Self::GuardState; lock() method
106 pub struct Lock<T: ?Sized, B: Backend> { global() struct
122 dataLock global() argument
123 unsafe impl<T: ?Sized + Send, B: Backend> Send for Lock<T, B> {} global() implementation
127 unsafe impl<T: ?Sized + Send, B: Backend> Sync for Lock<T, B> {} global() implementation
129 impl<T, B: Backend> Lock<T, B> { global() implementation
148 impl<B: Backend> Lock<(), B> { global() implementation
170 impl<T: ?Sized, B: Backend> Lock<T, B> { global() implementation
172 pub fn lock(&self) -> Guard<'_, T, B> { lock() method
199 pub(crate) lock: &'a Lock<T, B>, global() field
[all...]
/linux/include/asm-generic/
H A Dqrwlock.h3 * Queue read/write lock
28 #define _QW_LOCKED 0x0ff /* A writer holds the lock */
36 extern void queued_read_lock_slowpath(struct qrwlock *lock);
37 extern void queued_write_lock_slowpath(struct qrwlock *lock);
40 * queued_read_trylock - try to acquire read lock of a queued rwlock
41 * @lock : Pointer to queued rwlock structure
42 * Return: 1 if lock acquired, 0 if failed
44 static inline int queued_read_trylock(struct qrwlock *lock) in queued_read_trylock() argument
48 cnts = atomic_read(&lock->cnts); in queued_read_trylock()
50 cnts = (u32)atomic_add_return_acquire(_QR_BIAS, &lock->cnts); in queued_read_trylock()
[all …]
/linux/Documentation/locking/
H A Dlockdep-design.rst8 Lock-class
15 tens of thousands of) instantiations. For example a lock in the inode
17 lock class.
19 The validator tracks the 'usage state' of lock-classes, and it tracks
20 the dependencies between different lock-classes. Lock usage indicates
21 how a lock is used with regard to its IRQ contexts, while lock
22 dependency can be understood as lock order, where L1 -> L2 suggests that
26 continuing effort to prove lock usages and dependencies are correct or
29 A lock-class's behavior is constructed by its instances collectively:
30 when the first instance of a lock-class is used after bootup the class
[all …]
H A Drobust-futex-ABI.rst56 pointer to a single linked list of 'lock entries', one per lock,
58 to itself, 'head'. The last 'lock entry' points back to the 'head'.
61 address of the associated 'lock entry', plus or minus, of what will
62 be called the 'lock word', from that 'lock entry'. The 'lock word'
63 is always a 32 bit word, unlike the other words above. The 'lock
65 of the thread holding the lock in the bottom 30 bits. See further
69 the address of the 'lock entry', during list insertion and removal,
73 Each 'lock entry' on the single linked list starting at 'head' consists
74 of just a single word, pointing to the next 'lock entry', or back to
75 'head' if there are no more entries. In addition, nearby to each 'lock
[all …]
/linux/arch/powerpc/include/asm/
H A Dsimple_spinlock.h6 * Simple spin lock operations.
35 static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock) in arch_spin_value_unlocked() argument
37 return lock.slock == 0; in arch_spin_value_unlocked()
40 static inline int arch_spin_is_locked(arch_spinlock_t *lock) in arch_spin_is_locked() argument
42 return !arch_spin_value_unlocked(READ_ONCE(*lock)); in arch_spin_is_locked()
46 * This returns the old value in the lock, so we succeeded
47 * in getting the lock if the return value is 0.
49 static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock) in __arch_spin_trylock() argument
64 : "r" (token), "r" (&lock->slock), [eh] "n" (eh) in __arch_spin_trylock()
70 static inline int arch_spin_trylock(arch_spinlock_t *lock) in arch_spin_trylock() argument
[all …]
/linux/tools/testing/selftests/bpf/progs/
H A Dlinked_list.c12 struct bpf_spin_lock lock; member
28 int list_push_pop(struct bpf_spin_lock *lock, struct bpf_list_head *head, bool leave_in_map) in list_push_pop() argument
37 bpf_spin_lock(lock); in list_push_pop()
39 bpf_spin_unlock(lock); in list_push_pop()
46 bpf_spin_lock(lock); in list_push_pop()
48 bpf_spin_unlock(lock); in list_push_pop()
56 bpf_spin_lock(lock); in list_push_pop()
59 bpf_spin_unlock(lock); in list_push_pop()
62 bpf_spin_lock(lock); in list_push_pop()
64 bpf_spin_unlock(lock); in list_push_pop()
[all …]
/linux/tools/perf/tests/shell/
H A Dlock_contention.sh2 # kernel lock contention analysis test
36 if ! perf list tracepoint | grep -q lock:contention_begin; then
37 echo "[Skip] No lock contention tracepoints"
44 echo "[Skip] Low number of CPUs (`nproc`), lock event cannot be triggered certainly"
52 echo "Testing perf lock record and perf lock contention"
53 perf lock record -o ${perfdata} -- perf bench sched messaging -p > /dev/null 2>&1
55 perf lock contention -i ${perfdata} -E 1 -q 2> ${result}
65 echo "Testing perf lock contention --use-bpf"
67 if ! perf lock con -b true > /dev/null 2>&1 ; then
72 # the perf lock contention output goes to the stderr
[all …]
/linux/arch/alpha/include/asm/
H A Dspinlock.h11 * Simple spin lock operations. There are two variants, one clears IRQ's
17 #define arch_spin_is_locked(x) ((x)->lock != 0)
19 static inline int arch_spin_value_unlocked(arch_spinlock_t lock) in arch_spin_value_unlocked() argument
21 return lock.lock == 0; in arch_spin_value_unlocked()
24 static inline void arch_spin_unlock(arch_spinlock_t * lock) in arch_spin_unlock() argument
27 lock->lock = 0; in arch_spin_unlock()
30 static inline void arch_spin_lock(arch_spinlock_t * lock) in arch_spin_lock() argument
46 : "=&r" (tmp), "=m" (lock->lock) in arch_spin_lock()
47 : "m"(lock->lock) : "memory"); in arch_spin_lock()
50 static inline int arch_spin_trylock(arch_spinlock_t *lock) in arch_spin_trylock() argument
[all …]
/linux/drivers/acpi/acpica/
H A Dutlock.c4 * Module Name: utlock - Reader/Writer lock interfaces
21 * PARAMETERS: lock - Pointer to a valid RW lock
25 * DESCRIPTION: Reader/writer lock creation and deletion interfaces.
28 acpi_status acpi_ut_create_rw_lock(struct acpi_rw_lock *lock) in acpi_ut_create_rw_lock() argument
32 lock->num_readers = 0; in acpi_ut_create_rw_lock()
33 status = acpi_os_create_mutex(&lock->reader_mutex); in acpi_ut_create_rw_lock()
38 status = acpi_os_create_mutex(&lock->writer_mutex); in acpi_ut_create_rw_lock()
42 void acpi_ut_delete_rw_lock(struct acpi_rw_lock *lock) in acpi_ut_delete_rw_lock() argument
45 acpi_os_delete_mutex(lock->reader_mutex); in acpi_ut_delete_rw_lock()
46 acpi_os_delete_mutex(lock->writer_mutex); in acpi_ut_delete_rw_lock()
[all …]
/linux/arch/hexagon/include/asm/
H A Dspinlock.h22 * - load the lock value
24 * - if the lock value is still negative, go back and try again.
26 * - successful store new lock value if positive -> lock acquired
28 static inline void arch_read_lock(arch_rwlock_t *lock) in arch_read_lock() argument
37 : "r" (&lock->lock) in arch_read_lock()
43 static inline void arch_read_unlock(arch_rwlock_t *lock) in arch_read_unlock() argument
51 : "r" (&lock->lock) in arch_read_unlock()
58 static inline int arch_read_trylock(arch_rwlock_t *lock) in arch_read_trylock() argument
69 : "r" (&lock->lock) in arch_read_trylock()
75 /* Stuffs a -1 in the lock value? */
[all …]
/linux/tools/perf/Documentation/
H A Dperf-lock.txt1 perf-lock(1)
6 perf-lock - Analyze lock events
11 'perf lock' {record|report|script|info|contention}
15 You can analyze various lock behaviours
16 and statistics with this 'perf lock' command.
18 'perf lock record <command>' records lock events
21 results of lock events.
23 'perf lock report' reports statistical data.
25 'perf lock script' shows raw lock events.
27 'perf lock info' shows metadata like threads or addresses
[all …]
/linux/arch/mips/include/asm/octeon/
H A Dcvmx-spinlock.h61 * @lock: Lock to initialize
63 static inline void cvmx_spinlock_init(cvmx_spinlock_t *lock) in cvmx_spinlock_init() argument
65 lock->value = CVMX_SPINLOCK_UNLOCKED_VAL; in cvmx_spinlock_init()
71 * @lock: Lock to check
74 static inline int cvmx_spinlock_locked(cvmx_spinlock_t *lock) in cvmx_spinlock_locked() argument
76 return lock->value != CVMX_SPINLOCK_UNLOCKED_VAL; in cvmx_spinlock_locked()
80 * Releases lock
82 * @lock: pointer to lock structure
84 static inline void cvmx_spinlock_unlock(cvmx_spinlock_t *lock) in cvmx_spinlock_unlock() argument
87 lock->value = 0; in cvmx_spinlock_unlock()
[all …]
/linux/include/trace/events/
H A Dlock.h3 #define TRACE_SYSTEM lock
11 /* flags for lock:contention_begin */
26 TP_PROTO(struct lockdep_map *lock, unsigned int subclass,
30 TP_ARGS(lock, subclass, trylock, read, check, next_lock, ip),
34 __string(name, lock->name)
41 __entry->lockdep_addr = lock;
50 DECLARE_EVENT_CLASS(lock,
52 TP_PROTO(struct lockdep_map *lock, unsigned long ip),
54 TP_ARGS(lock, ip),
57 __string( name, lock
[all...]

12345678910>>...151