Home
last modified time | relevance | path

Searched full:lock (Results 1 – 25 of 4728) sorted by relevance

12345678910>>...190

/linux/include/linux/
H A Drwlock_api_smp.h18 void __lockfunc _raw_read_lock(rwlock_t *lock) __acquires(lock);
19 void __lockfunc _raw_write_lock(rwlock_t *lock) __acquires(lock);
20 void __lockfunc _raw_write_lock_nested(rwlock_t *lock, int subclass) __acquires(lock);
21 void __lockfunc _raw_read_lock_bh(rwlock_t *lock) __acquires(lock);
22 void __lockfunc _raw_write_lock_bh(rwlock_t *lock) __acquires(lock);
23 void __lockfunc _raw_read_lock_irq(rwlock_t *lock) __acquires(lock);
24 void __lockfunc _raw_write_lock_irq(rwlock_t *lock) __acquires(lock);
25 unsigned long __lockfunc _raw_read_lock_irqsave(rwlock_t *lock)
26 __acquires(lock);
27 unsigned long __lockfunc _raw_write_lock_irqsave(rwlock_t *lock)
[all …]
H A Dspinlock_api_up.h19 #define assert_raw_spin_locked(lock) do { (void)(lock); } while (0) argument
24 * flags straight, to suppress compiler warnings of unused lock
27 #define ___LOCK(lock) \ argument
28 do { __acquire(lock); (void)(lock); } while (0)
30 #define __LOCK(lock) \ argument
31 do { preempt_disable(); ___LOCK(lock); } while (0)
33 #define __LOCK_BH(lock) \ argument
34 do { __local_bh_disable_ip(_THIS_IP_, SOFTIRQ_LOCK_OFFSET); ___LOCK(lock); } while (0)
36 #define __LOCK_IRQ(lock) \ argument
37 do { local_irq_disable(); __LOCK(lock); } while (0)
[all …]
H A Dspinlock.h72 #define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME
101 extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
104 # define raw_spin_lock_init(lock) \ argument
108 __raw_spin_lock_init((lock), #lock, &__key, LD_WAIT_SPIN); \
112 # define raw_spin_lock_init(lock) \ argument
113 do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0)
116 #define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock) argument
119 raw_spin_is_contended(lock) global() argument
121 raw_spin_is_contended(lock) global() argument
184 do_raw_spin_lock(raw_spinlock_t * lock) do_raw_spin_lock() argument
191 do_raw_spin_trylock(raw_spinlock_t * lock) do_raw_spin_trylock() argument
201 do_raw_spin_unlock(raw_spinlock_t * lock) do_raw_spin_unlock() argument
215 raw_spin_trylock(lock) global() argument
217 raw_spin_lock(lock) global() argument
220 raw_spin_lock_nested(lock,subclass) global() argument
223 raw_spin_lock_nest_lock(lock,nest_lock) global() argument
234 raw_spin_lock_nested(lock,subclass) global() argument
236 raw_spin_lock_nest_lock(lock,nest_lock) global() argument
241 raw_spin_lock_irqsave(lock,flags) global() argument
248 raw_spin_lock_irqsave_nested(lock,flags,subclass) global() argument
254 raw_spin_lock_irqsave_nested(lock,flags,subclass) global() argument
263 raw_spin_lock_irqsave(lock,flags) global() argument
269 raw_spin_lock_irqsave_nested(lock,flags,subclass) global() argument
274 raw_spin_lock_irq(lock) global() argument
275 raw_spin_lock_bh(lock) global() argument
276 raw_spin_unlock(lock) global() argument
277 raw_spin_unlock_irq(lock) global() argument
279 raw_spin_unlock_irqrestore(lock,flags) global() argument
284 raw_spin_unlock_bh(lock) global() argument
286 raw_spin_trylock_bh(lock) global() argument
289 raw_spin_trylock_irq(lock) global() argument
296 raw_spin_trylock_irqsave(lock,flags) global() argument
324 spinlock_check(spinlock_t * lock) spinlock_check() argument
331 spin_lock_init(lock) global() argument
349 spin_lock(spinlock_t * lock) spin_lock() argument
354 spin_lock_bh(spinlock_t * lock) spin_lock_bh() argument
359 spin_trylock(spinlock_t * lock) spin_trylock() argument
364 spin_lock_nested(lock,subclass) global() argument
369 spin_lock_nest_lock(lock,nest_lock) global() argument
374 spin_lock_irq(spinlock_t * lock) spin_lock_irq() argument
379 spin_lock_irqsave(lock,flags) global() argument
384 spin_lock_irqsave_nested(lock,flags,subclass) global() argument
389 spin_unlock(spinlock_t * lock) spin_unlock() argument
394 spin_unlock_bh(spinlock_t * lock) spin_unlock_bh() argument
399 spin_unlock_irq(spinlock_t * lock) spin_unlock_irq() argument
404 spin_unlock_irqrestore(spinlock_t * lock,unsigned long flags) spin_unlock_irqrestore() argument
409 spin_trylock_bh(spinlock_t * lock) spin_trylock_bh() argument
414 spin_trylock_irq(spinlock_t * lock) spin_trylock_irq() argument
419 spin_trylock_irqsave(lock,flags) global() argument
442 spin_is_locked(spinlock_t * lock) spin_is_locked() argument
447 spin_is_contended(spinlock_t * lock) spin_is_contended() argument
452 assert_spin_locked(lock) global() argument
463 spin_needbreak(spinlock_t * lock) spin_needbreak() argument
479 rwlock_needbreak(rwlock_t * lock) rwlock_needbreak() argument
501 atomic_dec_and_lock(atomic,lock) global() argument
506 atomic_dec_and_lock_irqsave(atomic,lock,flags) global() argument
510 atomic_dec_and_raw_lock(atomic,lock) global() argument
515 atomic_dec_and_raw_lock_irqsave(atomic,lock,flags) global() argument
[all...]
H A Drwlock.h18 extern void __rwlock_init(rwlock_t *lock, const char *name,
20 # define rwlock_init(lock) \ argument
24 __rwlock_init((lock), #lock, &__key); \
27 # define rwlock_init(lock) \ argument
28 do { *(lock) = __RW_LOCK_UNLOCKED(lock); } while (0)
32 extern void do_raw_read_lock(rwlock_t *lock) __acquires(lock);
33 extern int do_raw_read_trylock(rwlock_t *lock);
34 extern void do_raw_read_unlock(rwlock_t *lock) __releases(lock);
35 extern void do_raw_write_lock(rwlock_t *lock) __acquires(lock);
36 extern int do_raw_write_trylock(rwlock_t *lock);
[all …]
H A Dspinlock_api_smp.h22 void __lockfunc _raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
23 void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
24 __acquires(lock);
26 _raw_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map)
27 __acquires(lock);
28 void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock) __acquires(lock);
29 void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock)
30 __acquires(lock);
32 unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock)
33 __acquires(lock);
[all …]
H A Dspinlock_up.h29 static inline void arch_spin_lock(arch_spinlock_t *lock) in arch_spin_lock() argument
31 lock->slock = 0; in arch_spin_lock()
35 static inline int arch_spin_trylock(arch_spinlock_t *lock) in arch_spin_trylock() argument
37 char oldval = lock->slock; in arch_spin_trylock()
39 lock->slock = 0; in arch_spin_trylock()
45 static inline void arch_spin_unlock(arch_spinlock_t *lock) in arch_spin_unlock() argument
48 lock->slock = 1; in arch_spin_unlock()
54 #define arch_read_lock(lock) do { barrier(); (void)(lock); } while (0) argument
55 #define arch_write_lock(lock) do { barrier(); (void)(lock); } while (0) argument
56 #define arch_read_trylock(lock) ({ barrier(); (void)(lock); 1; }) argument
[all …]
/linux/drivers/md/dm-vdo/
H A Ddedupe.c14 * deduplicate against a single block instead of being serialized through a PBN read lock. Only one
22 * A hash_lock acts like a state machine perhaps more than as a lock. Other than the starting and
25 * containing the lock. An asynchronous operation is almost always performed upon entering a state,
28 * In all states except DEDUPING, there is a single data_vio, called the lock agent, performing the
29 * asynchronous operations on behalf of the lock. The agent will change during the lifetime of the
30 * lock if the lock is shared by more than one data_vio. data_vios waiting to deduplicate are kept
31 * on a wait queue. Viewed a different way, the agent holds the lock exclusively until the lock
32 * enters the DEDUPING state, at which point it becomes a shared lock that all the waiters (and any
33 * new data_vios that arrive) use to share a PBN lock. In state DEDUPING, there is no agent. When
34 * the last data_vio in the lock calls back in DEDUPING, it becomes the agent and the lock becomes
[all …]
/linux/fs/ocfs2/dlm/
H A Ddlmast.c35 struct dlm_lock *lock);
36 static int dlm_should_cancel_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
39 * lock level will obsolete a pending bast.
40 * For example, if dlm_thread queued a bast for an EX lock that
42 * lock owner downconverted to NL, the bast is now obsolete.
44 * This is needed because the lock and convert paths can queue
47 static int dlm_should_cancel_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock) in dlm_should_cancel_bast() argument
50 assert_spin_locked(&lock->spinlock); in dlm_should_cancel_bast()
52 if (lock->ml.highest_blocked == LKM_IVMODE) in dlm_should_cancel_bast()
54 BUG_ON(lock->ml.highest_blocked == LKM_NLMODE); in dlm_should_cancel_bast()
[all …]
H A Ddlmlock.c5 * underlying calls for lock creation
45 struct dlm_lock *lock, int flags);
49 static void dlm_lock_detach_lockres(struct dlm_lock *lock);
66 /* Tell us whether we can grant a new lock request.
71 * returns: 1 if the lock can be granted, 0 otherwise.
74 struct dlm_lock *lock) in dlm_can_grant_new_lock() argument
79 if (!dlm_lock_compatible(tmplock->ml.type, lock->ml.type)) in dlm_can_grant_new_lock()
84 if (!dlm_lock_compatible(tmplock->ml.type, lock->ml.type)) in dlm_can_grant_new_lock()
87 lock->ml.type)) in dlm_can_grant_new_lock()
94 /* performs lock creation at the lockres master site
[all …]
H A Ddlmconvert.c5 * underlying calls for lock conversion
38 * only one that holds a lock on exit (res->spinlock).
43 struct dlm_lock *lock, int flags,
48 struct dlm_lock *lock, int flags, int type);
61 struct dlm_lock *lock, int flags, int type) in dlmconvert_master() argument
72 status = __dlmconvert_master(dlm, res, lock, flags, type, in dlmconvert_master()
83 dlm_queue_ast(dlm, lock); in dlmconvert_master()
93 /* performs lock conversion at the lockres master site
96 * taken: takes and drops lock->spinlock
99 * call_ast: whether ast should be called for this lock
[all …]
/linux/drivers/md/persistent-data/
H A Ddm-block-manager.c32 * trace is also emitted for the previous lock acquisition.
45 spinlock_t lock; member
61 static unsigned int __find_holder(struct block_lock *lock, in __find_holder() argument
67 if (lock->holders[i] == task) in __find_holder()
74 /* call this *after* you increment lock->count */
75 static void __add_holder(struct block_lock *lock, struct task_struct *task) in __add_holder() argument
77 unsigned int h = __find_holder(lock, NULL); in __add_holder()
83 lock->holders[h] = task; in __add_holder()
86 t = lock->traces + h; in __add_holder()
91 /* call this *before* you decrement lock->count */
[all …]
/linux/kernel/locking/
H A Dww_mutex.h9 __ww_waiter_first(struct mutex *lock) in __ww_waiter_first() argument
13 w = list_first_entry(&lock->wait_list, struct mutex_waiter, list); in __ww_waiter_first()
14 if (list_entry_is_head(w, &lock->wait_list, list)) in __ww_waiter_first()
21 __ww_waiter_next(struct mutex *lock, struct mutex_waiter *w) in __ww_waiter_next() argument
24 if (list_entry_is_head(w, &lock->wait_list, list)) in __ww_waiter_next()
31 __ww_waiter_prev(struct mutex *lock, struct mutex_waiter *w) in __ww_waiter_prev() argument
34 if (list_entry_is_head(w, &lock->wait_list, list)) in __ww_waiter_prev()
41 __ww_waiter_last(struct mutex *lock) in __ww_waiter_last() argument
45 w = list_last_entry(&lock->wait_list, struct mutex_waiter, list); in __ww_waiter_last()
46 if (list_entry_is_head(w, &lock->wait_list, list)) in __ww_waiter_last()
[all …]
H A Dqspinlock_paravirt.h30 * not running. The one lock stealing attempt allowed at slowpath entry
57 * Hybrid PV queued/unfair lock
60 * it will be called once when a lock waiter enter the PV slowpath before
64 * pv_wait_head_or_lock() to signal that it is ready to spin on the lock.
65 * When that bit becomes visible to the incoming waiters, no lock stealing
67 * enter the MCS wait queue. So lock starvation shouldn't happen as long
69 * and hence disabling lock stealing.
71 * When the pending bit isn't set, the lock waiters will stay in the unfair
72 * mode spinning on the lock unless the MCS wait queue is empty. In this
73 * case, the lock waiters will enter the queued mode slowpath trying to
[all …]
H A Dqrwlock.c15 #include <trace/events/lock.h>
18 * queued_read_lock_slowpath - acquire read lock of a queued rwlock
19 * @lock: Pointer to queued rwlock structure
21 void __lockfunc queued_read_lock_slowpath(struct qrwlock *lock) in queued_read_lock_slowpath() argument
24 * Readers come here when they cannot get the lock without waiting in queued_read_lock_slowpath()
28 * Readers in interrupt context will get the lock immediately in queued_read_lock_slowpath()
29 * if the writer is just waiting (not holding the lock yet), in queued_read_lock_slowpath()
30 * so spin with ACQUIRE semantics until the lock is available in queued_read_lock_slowpath()
33 atomic_cond_read_acquire(&lock->cnts, !(VAL & _QW_LOCKED)); in queued_read_lock_slowpath()
36 atomic_sub(_QR_BIAS, &lock->cnts); in queued_read_lock_slowpath()
[all …]
/linux/rust/kernel/sync/
H A Dlock.rs3 //! Generic kernel lock and guard.
5 //! It contains a generic Rust lock and guard that allow for different backends (e.g., mutexes,
22 /// The "backend" of a lock.
24 /// It is the actual implementation of the lock, without the need to repeat patterns used in all
29 /// - Implementers must ensure that only one thread/CPU may access the protected data once the lock
30 /// is owned, that is, between calls to [`lock`] and [`unlock`].
32 /// lock operation.
34 /// [`lock`]: Backend::lock
38 /// The state required by the lock.
41 /// The state required to be kept between [`lock`] and [`unlock`].
[all …]
/linux/include/asm-generic/
H A Dqrwlock.h3 * Queue read/write lock
28 #define _QW_LOCKED 0x0ff /* A writer holds the lock */
36 extern void queued_read_lock_slowpath(struct qrwlock *lock);
37 extern void queued_write_lock_slowpath(struct qrwlock *lock);
40 * queued_read_trylock - try to acquire read lock of a queued rwlock
41 * @lock : Pointer to queued rwlock structure
42 * Return: 1 if lock acquired, 0 if failed
44 static inline int queued_read_trylock(struct qrwlock *lock) in queued_read_trylock() argument
48 cnts = atomic_read(&lock->cnts); in queued_read_trylock()
50 cnts = (u32)atomic_add_return_acquire(_QR_BIAS, &lock->cnts); in queued_read_trylock()
[all …]
/linux/Documentation/locking/
H A Dlockdep-design.rst8 Lock-class
15 tens of thousands of) instantiations. For example a lock in the inode
17 lock class.
19 The validator tracks the 'usage state' of lock-classes, and it tracks
20 the dependencies between different lock-classes. Lock usage indicates
21 how a lock is used with regard to its IRQ contexts, while lock
22 dependency can be understood as lock order, where L1 -> L2 suggests that
26 continuing effort to prove lock usages and dependencies are correct or
29 A lock-class's behavior is constructed by its instances collectively:
30 when the first instance of a lock-class is used after bootup the class
[all …]
H A Drobust-futex-ABI.rst56 pointer to a single linked list of 'lock entries', one per lock,
58 to itself, 'head'. The last 'lock entry' points back to the 'head'.
61 address of the associated 'lock entry', plus or minus, of what will
62 be called the 'lock word', from that 'lock entry'. The 'lock word'
63 is always a 32 bit word, unlike the other words above. The 'lock
65 of the thread holding the lock in the bottom 30 bits. See further
69 the address of the 'lock entry', during list insertion and removal,
73 Each 'lock entry' on the single linked list starting at 'head' consists
74 of just a single word, pointing to the next 'lock entry', or back to
75 'head' if there are no more entries. In addition, nearby to each 'lock
[all …]
/linux/arch/powerpc/include/asm/
H A Dsimple_spinlock.h6 * Simple spin lock operations.
35 static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock) in arch_spin_value_unlocked() argument
37 return lock.slock == 0; in arch_spin_value_unlocked()
40 static inline int arch_spin_is_locked(arch_spinlock_t *lock) in arch_spin_is_locked() argument
42 return !arch_spin_value_unlocked(READ_ONCE(*lock)); in arch_spin_is_locked()
46 * This returns the old value in the lock, so we succeeded
47 * in getting the lock if the return value is 0.
49 static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock) in __arch_spin_trylock() argument
64 : "r" (token), "r" (&lock->slock), [eh] "n" (eh) in __arch_spin_trylock()
70 static inline int arch_spin_trylock(arch_spinlock_t *lock) in arch_spin_trylock() argument
[all …]
/linux/tools/testing/selftests/bpf/progs/
H A Dlinked_list.c12 struct bpf_spin_lock lock; member
28 int list_push_pop(struct bpf_spin_lock *lock, struct bpf_list_head *head, bool leave_in_map) in list_push_pop() argument
37 bpf_spin_lock(lock); in list_push_pop()
39 bpf_spin_unlock(lock); in list_push_pop()
46 bpf_spin_lock(lock); in list_push_pop()
48 bpf_spin_unlock(lock); in list_push_pop()
56 bpf_spin_lock(lock); in list_push_pop()
59 bpf_spin_unlock(lock); in list_push_pop()
62 bpf_spin_lock(lock); in list_push_pop()
64 bpf_spin_unlock(lock); in list_push_pop()
[all …]
/linux/tools/perf/tests/shell/
H A Dlock_contention.sh2 # kernel lock contention analysis test
36 if ! perf list tracepoint | grep -q lock:contention_begin; then
37 echo "[Skip] No lock contention tracepoints"
44 echo "[Skip] Low number of CPUs (`nproc`), lock event cannot be triggered certainly"
52 echo "Testing perf lock record and perf lock contention"
53 perf lock record -o ${perfdata} -- perf bench sched messaging -p > /dev/null 2>&1
55 perf lock contention -i ${perfdata} -E 1 -q 2> ${result}
65 echo "Testing perf lock contention --use-bpf"
67 if ! perf lock con -b true > /dev/null 2>&1 ; then
72 # the perf lock contention output goes to the stderr
[all …]
/linux/arch/alpha/include/asm/
H A Dspinlock.h11 * Simple spin lock operations. There are two variants, one clears IRQ's
17 #define arch_spin_is_locked(x) ((x)->lock != 0)
19 static inline int arch_spin_value_unlocked(arch_spinlock_t lock) in arch_spin_value_unlocked() argument
21 return lock.lock == 0; in arch_spin_value_unlocked()
24 static inline void arch_spin_unlock(arch_spinlock_t * lock) in arch_spin_unlock() argument
27 lock->lock = 0; in arch_spin_unlock()
30 static inline void arch_spin_lock(arch_spinlock_t * lock) in arch_spin_lock() argument
46 : "=&r" (tmp), "=m" (lock->lock) in arch_spin_lock()
47 : "m"(lock->lock) : "memory"); in arch_spin_lock()
50 static inline int arch_spin_trylock(arch_spinlock_t *lock) in arch_spin_trylock() argument
[all …]
/linux/fs/btrfs/
H A Dlocking.c17 * Lockdep class keys for extent_buffer->lock's in this root. For a given
27 * Lock-nesting across peer nodes is always done with the immediate parent
93 lockdep_set_class_and_name(&eb->lock, &ks->keys[level], ks->names[level]); in btrfs_set_buffer_lockdep_class()
124 * - try-lock semantics for readers and writers
131 * btrfs_tree_read_lock_nested - lock extent buffer for read
135 * This takes the read lock on the extent buffer, using the specified nesting
145 down_read_nested(&eb->lock, nest); in btrfs_tree_read_lock_nested()
150 * Try-lock for read.
156 if (down_read_trylock(&eb->lock)) { in btrfs_try_tree_read_lock()
164 * Release read lock.
[all …]
/linux/drivers/acpi/acpica/
H A Dutlock.c4 * Module Name: utlock - Reader/Writer lock interfaces
21 * PARAMETERS: lock - Pointer to a valid RW lock
25 * DESCRIPTION: Reader/writer lock creation and deletion interfaces.
28 acpi_status acpi_ut_create_rw_lock(struct acpi_rw_lock *lock) in acpi_ut_create_rw_lock() argument
32 lock->num_readers = 0; in acpi_ut_create_rw_lock()
33 status = acpi_os_create_mutex(&lock->reader_mutex); in acpi_ut_create_rw_lock()
38 status = acpi_os_create_mutex(&lock->writer_mutex); in acpi_ut_create_rw_lock()
42 void acpi_ut_delete_rw_lock(struct acpi_rw_lock *lock) in acpi_ut_delete_rw_lock() argument
45 acpi_os_delete_mutex(lock->reader_mutex); in acpi_ut_delete_rw_lock()
46 acpi_os_delete_mutex(lock->writer_mutex); in acpi_ut_delete_rw_lock()
[all …]
/linux/fs/lockd/
H A Dsvclock.c46 static int nlmsvc_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock);
86 * Insert a blocked lock into the global list
143 * Find a block for a given lock
146 nlmsvc_lookup_block(struct nlm_file *file, struct nlm_lock *lock) in nlmsvc_lookup_block() argument
152 file, lock->fl.c.flc_pid, in nlmsvc_lookup_block()
153 (long long)lock->fl.fl_start, in nlmsvc_lookup_block()
154 (long long)lock->fl.fl_end, in nlmsvc_lookup_block()
155 lock->fl.c.flc_type); in nlmsvc_lookup_block()
158 fl = &block->b_call->a_args.lock.fl; in nlmsvc_lookup_block()
164 if (block->b_file == file && nlm_compare_locks(fl, &lock->fl)) { in nlmsvc_lookup_block()
[all …]

12345678910>>...190