Home
last modified time | relevance | path

Searched full:lock (Results 1 – 25 of 4341) sorted by relevance

12345678910>>...174

/linux/include/linux/
H A Drwlock_api_smp.h18 void __lockfunc _raw_read_lock(rwlock_t *lock) __acquires(lock);
19 void __lockfunc _raw_write_lock(rwlock_t *lock) __acquires(lock);
20 void __lockfunc _raw_write_lock_nested(rwlock_t *lock, int subclass) __acquires(lock);
21 void __lockfunc _raw_read_lock_bh(rwlock_t *lock) __acquires(lock);
22 void __lockfunc _raw_write_lock_bh(rwlock_t *lock) __acquires(lock);
23 void __lockfunc _raw_read_lock_irq(rwlock_t *lock) __acquires(lock);
24 void __lockfunc _raw_write_lock_irq(rwlock_t *lock) __acquires(lock);
25 unsigned long __lockfunc _raw_read_lock_irqsave(rwlock_t *lock)
26 __acquires(lock);
27 unsigned long __lockfunc _raw_write_lock_irqsave(rwlock_t *lock)
[all …]
H A Dspinlock_api_up.h19 #define assert_raw_spin_locked(lock) do { (void)(lock); } while (0) argument
24 * flags straight, to suppress compiler warnings of unused lock
27 #define ___LOCK(lock) \ argument
28 do { __acquire(lock); (void)(lock); } while (0)
30 #define __LOCK(lock) \ argument
31 do { preempt_disable(); ___LOCK(lock); } while (0)
33 #define __LOCK_BH(lock) \ argument
34 do { __local_bh_disable_ip(_THIS_IP_, SOFTIRQ_LOCK_OFFSET); ___LOCK(lock); } while (0)
36 #define __LOCK_IRQ(lock) \ argument
37 do { local_irq_disable(); __LOCK(lock); } while (0)
[all …]
H A Dspinlock.h72 #define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME
101 extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
104 # define raw_spin_lock_init(lock) \ argument
108 __raw_spin_lock_init((lock), #lock, &__key, LD_WAIT_SPIN); \
112 # define raw_spin_lock_init(lock) \ argument
113 do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0)
116 #define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock) argument
119 raw_spin_is_contended(lock) global() argument
121 raw_spin_is_contended(lock) global() argument
184 do_raw_spin_lock(raw_spinlock_t * lock) do_raw_spin_lock() argument
191 do_raw_spin_trylock(raw_spinlock_t * lock) do_raw_spin_trylock() argument
201 do_raw_spin_unlock(raw_spinlock_t * lock) do_raw_spin_unlock() argument
215 raw_spin_trylock(lock) global() argument
217 raw_spin_lock(lock) global() argument
220 raw_spin_lock_nested(lock,subclass) global() argument
223 raw_spin_lock_nest_lock(lock,nest_lock) global() argument
234 raw_spin_lock_nested(lock,subclass) global() argument
236 raw_spin_lock_nest_lock(lock,nest_lock) global() argument
241 raw_spin_lock_irqsave(lock,flags) global() argument
248 raw_spin_lock_irqsave_nested(lock,flags,subclass) global() argument
254 raw_spin_lock_irqsave_nested(lock,flags,subclass) global() argument
263 raw_spin_lock_irqsave(lock,flags) global() argument
269 raw_spin_lock_irqsave_nested(lock,flags,subclass) global() argument
274 raw_spin_lock_irq(lock) global() argument
275 raw_spin_lock_bh(lock) global() argument
276 raw_spin_unlock(lock) global() argument
277 raw_spin_unlock_irq(lock) global() argument
279 raw_spin_unlock_irqrestore(lock,flags) global() argument
284 raw_spin_unlock_bh(lock) global() argument
286 raw_spin_trylock_bh(lock) global() argument
289 raw_spin_trylock_irq(lock) global() argument
296 raw_spin_trylock_irqsave(lock,flags) global() argument
324 spinlock_check(spinlock_t * lock) spinlock_check() argument
331 spin_lock_init(lock) global() argument
349 spin_lock(spinlock_t * lock) spin_lock() argument
354 spin_lock_bh(spinlock_t * lock) spin_lock_bh() argument
359 spin_trylock(spinlock_t * lock) spin_trylock() argument
364 spin_lock_nested(lock,subclass) global() argument
369 spin_lock_nest_lock(lock,nest_lock) global() argument
374 spin_lock_irq(spinlock_t * lock) spin_lock_irq() argument
379 spin_lock_irqsave(lock,flags) global() argument
384 spin_lock_irqsave_nested(lock,flags,subclass) global() argument
389 spin_unlock(spinlock_t * lock) spin_unlock() argument
394 spin_unlock_bh(spinlock_t * lock) spin_unlock_bh() argument
399 spin_unlock_irq(spinlock_t * lock) spin_unlock_irq() argument
404 spin_unlock_irqrestore(spinlock_t * lock,unsigned long flags) spin_unlock_irqrestore() argument
409 spin_trylock_bh(spinlock_t * lock) spin_trylock_bh() argument
414 spin_trylock_irq(spinlock_t * lock) spin_trylock_irq() argument
419 spin_trylock_irqsave(lock,flags) global() argument
442 spin_is_locked(spinlock_t * lock) spin_is_locked() argument
447 spin_is_contended(spinlock_t * lock) spin_is_contended() argument
452 assert_spin_locked(lock) global() argument
463 spin_needbreak(spinlock_t * lock) spin_needbreak() argument
479 rwlock_needbreak(rwlock_t * lock) rwlock_needbreak() argument
501 atomic_dec_and_lock(atomic,lock) global() argument
506 atomic_dec_and_lock_irqsave(atomic,lock,flags) global() argument
510 atomic_dec_and_raw_lock(atomic,lock) global() argument
515 atomic_dec_and_raw_lock_irqsave(atomic,lock,flags) global() argument
[all...]
H A Drwlock.h18 extern void __rwlock_init(rwlock_t *lock, const char *name,
20 # define rwlock_init(lock) \ argument
24 __rwlock_init((lock), #lock, &__key); \
27 # define rwlock_init(lock) \ argument
28 do { *(lock) = __RW_LOCK_UNLOCKED(lock); } while (0)
32 extern void do_raw_read_lock(rwlock_t *lock) __acquires(lock);
33 extern int do_raw_read_trylock(rwlock_t *lock);
34 extern void do_raw_read_unlock(rwlock_t *lock) __releases(lock);
35 extern void do_raw_write_lock(rwlock_t *lock) __acquires(lock);
36 extern int do_raw_write_trylock(rwlock_t *lock);
[all …]
H A Dspinlock_api_smp.h22 void __lockfunc _raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
23 void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
24 __acquires(lock);
26 _raw_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map)
27 __acquires(lock);
28 void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock) __acquires(lock);
29 void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock)
30 __acquires(lock);
32 unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock)
33 __acquires(lock);
[all …]
H A Dlocal_lock.h8 * local_lock_init - Runtime initialize a lock instance
10 #define local_lock_init(lock) __local_lock_init(lock) argument
13 * local_lock - Acquire a per CPU local lock
14 * @lock: The lock variable
16 #define local_lock(lock) __local_lock(lock) argument
19 * local_lock_irq - Acquire a per CPU local lock and disable interrupts
20 * @lock: The lock variable
22 #define local_lock_irq(lock) __local_lock_irq(lock) argument
25 * local_lock_irqsave - Acquire a per CPU local lock, save and disable
27 * @lock: The lock variable
[all …]
H A Dlocal_lock_internal.h73 #define __local_lock_init(lock) \ argument
77 debug_check_no_locks_freed((void *)lock, sizeof(*lock));\
78 lockdep_init_map_type(&(lock)->dep_map, #lock, &__key, \
81 local_lock_debug_init(lock); \
84 #define __local_trylock_init(lock) __local_lock_init(lock.llock) argument
86 #define __spinlock_nested_bh_init(lock) \ argument
90 debug_check_no_locks_freed((void *)lock, sizeof(*lock));\
91 lockdep_init_map_type(&(lock)->dep_map, #lock, &__key, \
94 local_lock_debug_init(lock); \
97 #define __local_lock_acquire(lock) \ argument
[all …]
H A Dspinlock_up.h29 static inline void arch_spin_lock(arch_spinlock_t *lock) in arch_spin_lock() argument
31 lock->slock = 0; in arch_spin_lock()
35 static inline int arch_spin_trylock(arch_spinlock_t *lock) in arch_spin_trylock() argument
37 char oldval = lock->slock; in arch_spin_trylock()
39 lock->slock = 0; in arch_spin_trylock()
45 static inline void arch_spin_unlock(arch_spinlock_t *lock) in arch_spin_unlock() argument
48 lock->slock = 1; in arch_spin_unlock()
54 #define arch_read_lock(lock) do { barrier(); (void)(lock); } while (0) argument
55 #define arch_write_lock(lock) do { barrier(); (void)(lock); } while (0) argument
56 #define arch_read_trylock(lock) ({ barrier(); (void)(lock); 1; }) argument
[all …]
/linux/kernel/locking/
H A Dspinlock_debug.c17 void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, in __raw_spin_lock_init() argument
22 * Make sure we are not reinitializing a held lock: in __raw_spin_lock_init()
24 debug_check_no_locks_freed((void *)lock, sizeof(*lock)); in __raw_spin_lock_init()
25 lockdep_init_map_wait(&lock->dep_map, name, key, 0, inner); in __raw_spin_lock_init()
27 lock->raw_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; in __raw_spin_lock_init()
28 lock->magic = SPINLOCK_MAGIC; in __raw_spin_lock_init()
29 lock->owner = SPINLOCK_OWNER_INIT; in __raw_spin_lock_init()
30 lock->owner_cpu = -1; in __raw_spin_lock_init()
36 void __rwlock_init(rwlock_t *lock, const char *name, in __rwlock_init() argument
41 * Make sure we are not reinitializing a held lock: in __rwlock_init()
[all …]
H A Drtmutex_api.c17 * Debug aware fast / slowpath lock,trylock,unlock
22 static __always_inline int __rt_mutex_lock_common(struct rt_mutex *lock, in __rt_mutex_lock_common() argument
30 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, _RET_IP_); in __rt_mutex_lock_common()
31 ret = __rt_mutex_lock(&lock->rtmutex, state); in __rt_mutex_lock_common()
33 mutex_release(&lock->dep_map, _RET_IP_); in __rt_mutex_lock_common()
45 * rt_mutex_lock_nested - lock a rt_mutex
47 * @lock: the rt_mutex to be locked
50 void __sched rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass) in rt_mutex_lock_nested() argument
52 __rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, NULL, subclass); in rt_mutex_lock_nested()
56 void __sched _rt_mutex_lock_nest_lock(struct rt_mutex *lock, struc argument
69 rt_mutex_lock(struct rt_mutex * lock) rt_mutex_lock() argument
85 rt_mutex_lock_interruptible(struct rt_mutex * lock) rt_mutex_lock_interruptible() argument
100 rt_mutex_lock_killable(struct rt_mutex * lock) rt_mutex_lock_killable() argument
118 rt_mutex_trylock(struct rt_mutex * lock) rt_mutex_trylock() argument
138 rt_mutex_unlock(struct rt_mutex * lock) rt_mutex_unlock() argument
148 rt_mutex_futex_trylock(struct rt_mutex_base * lock) rt_mutex_futex_trylock() argument
153 __rt_mutex_futex_trylock(struct rt_mutex_base * lock) __rt_mutex_futex_trylock() argument
165 __rt_mutex_futex_unlock(struct rt_mutex_base * lock,struct rt_wake_q_head * wqh) __rt_mutex_futex_unlock() argument
188 rt_mutex_futex_unlock(struct rt_mutex_base * lock) rt_mutex_futex_unlock() argument
213 __rt_mutex_init(struct rt_mutex * lock,const char * name,struct lock_class_key * key) __rt_mutex_init() argument
236 rt_mutex_init_proxy_locked(struct rt_mutex_base * lock,struct task_struct * proxy_owner) rt_mutex_init_proxy_locked() argument
267 rt_mutex_proxy_unlock(struct rt_mutex_base * lock) rt_mutex_proxy_unlock() argument
293 __rt_mutex_start_proxy_lock(struct rt_mutex_base * lock,struct rt_mutex_waiter * waiter,struct task_struct * task,struct wake_q_head * wake_q) __rt_mutex_start_proxy_lock() argument
341 rt_mutex_start_proxy_lock(struct rt_mutex_base * lock,struct rt_mutex_waiter * waiter,struct task_struct * task) rt_mutex_start_proxy_lock() argument
377 rt_mutex_wait_proxy_lock(struct rt_mutex_base * lock,struct hrtimer_sleeper * to,struct rt_mutex_waiter * waiter) rt_mutex_wait_proxy_lock() argument
417 rt_mutex_cleanup_proxy_lock(struct rt_mutex_base * lock,struct rt_mutex_waiter * waiter) rt_mutex_cleanup_proxy_lock() argument
508 __mutex_lock_common(struct mutex * lock,unsigned int state,unsigned int subclass,struct lockdep_map * nest_lock,unsigned long ip) __mutex_lock_common() argument
527 mutex_lock_nested(struct mutex * lock,unsigned int subclass) mutex_lock_nested() argument
533 _mutex_lock_nest_lock(struct mutex * lock,struct lockdep_map * nest_lock) _mutex_lock_nest_lock() argument
540 mutex_lock_interruptible_nested(struct mutex * lock,unsigned int subclass) mutex_lock_interruptible_nested() argument
547 mutex_lock_killable_nested(struct mutex * lock,unsigned int subclass) mutex_lock_killable_nested() argument
554 mutex_lock_io_nested(struct mutex * lock,unsigned int subclass) mutex_lock_io_nested() argument
568 mutex_lock(struct mutex * lock) mutex_lock() argument
574 mutex_lock_interruptible(struct mutex * lock) mutex_lock_interruptible() argument
580 mutex_lock_killable(struct mutex * lock) mutex_lock_killable() argument
586 mutex_lock_io(struct mutex * lock) mutex_lock_io() argument
596 mutex_trylock(struct mutex * lock) mutex_trylock() argument
611 mutex_unlock(struct mutex * lock) mutex_unlock() argument
[all...]
H A Dmutex.c35 #include <trace/events/lock.h>
47 __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) in __mutex_init() argument
49 atomic_long_set(&lock->owner, 0); in __mutex_init()
50 raw_spin_lock_init(&lock->wait_lock); in __mutex_init()
51 INIT_LIST_HEAD(&lock->wait_list); in __mutex_init()
53 osq_lock_init(&lock->osq); in __mutex_init()
56 debug_mutex_init(lock, name, key); in __mutex_init()
65 bool mutex_is_locked(struct mutex *lock) in mutex_is_locked() argument
67 return __mutex_owner(lock) != NULL; in mutex_is_locked()
77 unsigned long mutex_get_owner(struct mutex *lock) in mutex_get_owner() argument
87 __mutex_trylock_common(struct mutex * lock,bool handoff) __mutex_trylock_common() argument
126 __mutex_trylock_or_handoff(struct mutex * lock,bool handoff) __mutex_trylock_or_handoff() argument
134 __mutex_trylock(struct mutex * lock) __mutex_trylock() argument
150 __mutex_trylock_fast(struct mutex * lock) __mutex_trylock_fast() argument
163 __mutex_unlock_fast(struct mutex * lock) __mutex_unlock_fast() argument
171 __mutex_set_flag(struct mutex * lock,unsigned long flag) __mutex_set_flag() argument
176 __mutex_clear_flag(struct mutex * lock,unsigned long flag) __mutex_clear_flag() argument
181 __mutex_waiter_is_first(struct mutex * lock,struct mutex_waiter * waiter) __mutex_waiter_is_first() argument
191 __mutex_add_waiter(struct mutex * lock,struct mutex_waiter * waiter,struct list_head * list) __mutex_add_waiter() argument
205 __mutex_remove_waiter(struct mutex * lock,struct mutex_waiter * waiter) __mutex_remove_waiter() argument
223 __mutex_handoff(struct mutex * lock,struct task_struct * task) __mutex_handoff() argument
273 mutex_lock(struct mutex * lock) mutex_lock() argument
290 __mutex_trylock_or_owner(struct mutex * lock) __mutex_trylock_or_owner() argument
296 ww_mutex_spin_on_owner(struct mutex * lock,struct ww_acquire_ctx * ww_ctx,struct mutex_waiter * waiter) ww_mutex_spin_on_owner() argument
344 mutex_spin_on_owner(struct mutex * lock,struct task_struct * owner,struct ww_acquire_ctx * ww_ctx,struct mutex_waiter * waiter) mutex_spin_on_owner() argument
384 mutex_can_spin_on_owner(struct mutex * lock) mutex_can_spin_on_owner() argument
433 mutex_optimistic_spin(struct mutex * lock,struct ww_acquire_ctx * ww_ctx,struct mutex_waiter * waiter) mutex_optimistic_spin() argument
509 mutex_optimistic_spin(struct mutex * lock,struct ww_acquire_ctx * ww_ctx,struct mutex_waiter * waiter) mutex_optimistic_spin() argument
534 mutex_unlock(struct mutex * lock) mutex_unlock() argument
555 ww_mutex_unlock(struct ww_mutex * lock) ww_mutex_unlock() argument
566 __mutex_lock_common(struct mutex * lock,unsigned int state,unsigned int subclass,struct lockdep_map * nest_lock,unsigned long ip,struct ww_acquire_ctx * ww_ctx,const bool use_ww_ctx) __mutex_lock_common() argument
744 __mutex_lock(struct mutex * lock,unsigned int state,unsigned int subclass,struct lockdep_map * nest_lock,unsigned long ip) __mutex_lock() argument
751 __ww_mutex_lock(struct mutex * lock,unsigned int state,unsigned int subclass,unsigned long ip,struct ww_acquire_ctx * ww_ctx) __ww_mutex_lock() argument
797 mutex_lock_nested(struct mutex * lock,unsigned int subclass) mutex_lock_nested() argument
805 _mutex_lock_nest_lock(struct mutex * lock,struct lockdep_map * nest) _mutex_lock_nest_lock() argument
812 mutex_lock_killable_nested(struct mutex * lock,unsigned int subclass) mutex_lock_killable_nested() argument
819 mutex_lock_interruptible_nested(struct mutex * lock,unsigned int subclass) mutex_lock_interruptible_nested() argument
826 mutex_lock_io_nested(struct mutex * lock,unsigned int subclass) mutex_lock_io_nested() argument
840 ww_mutex_deadlock_injection(struct ww_mutex * lock,struct ww_acquire_ctx * ctx) ww_mutex_deadlock_injection() argument
866 ww_mutex_lock(struct ww_mutex * lock,struct ww_acquire_ctx * ctx) ww_mutex_lock() argument
881 ww_mutex_lock_interruptible(struct ww_mutex * lock,struct ww_acquire_ctx * ctx) ww_mutex_lock_interruptible() argument
901 __mutex_unlock_slowpath(struct mutex * lock,unsigned long ip) __mutex_unlock_slowpath() argument
976 mutex_lock_interruptible(struct mutex * lock) mutex_lock_interruptible() argument
1000 mutex_lock_killable(struct mutex * lock) mutex_lock_killable() argument
1021 mutex_lock_io(struct mutex * lock) mutex_lock_io() argument
1032 __mutex_lock_slowpath(struct mutex * lock) __mutex_lock_slowpath() argument
1038 __mutex_lock_killable_slowpath(struct mutex * lock) __mutex_lock_killable_slowpath() argument
1044 __mutex_lock_interruptible_slowpath(struct mutex * lock) __mutex_lock_interruptible_slowpath() argument
1050 __ww_mutex_lock_slowpath(struct ww_mutex * lock,struct ww_acquire_ctx * ctx) __ww_mutex_lock_slowpath() argument
1057 __ww_mutex_lock_interruptible_slowpath(struct ww_mutex * lock,struct ww_acquire_ctx * ctx) __ww_mutex_lock_interruptible_slowpath() argument
1080 mutex_trylock(struct mutex * lock) mutex_trylock() argument
1096 ww_mutex_lock(struct ww_mutex * lock,struct ww_acquire_ctx * ctx) ww_mutex_lock() argument
1111 ww_mutex_lock_interruptible(struct ww_mutex * lock,struct ww_acquire_ctx * ctx) ww_mutex_lock_interruptible() argument
1138 atomic_dec_and_mutex_lock(atomic_t * cnt,struct mutex * lock) atomic_dec_and_mutex_lock() argument
[all...]
H A Dww_mutex.h9 __ww_waiter_first(struct mutex *lock) in __ww_waiter_first() argument
13 w = list_first_entry(&lock->wait_list, struct mutex_waiter, list); in __ww_waiter_first()
14 if (list_entry_is_head(w, &lock->wait_list, list)) in __ww_waiter_first()
21 __ww_waiter_next(struct mutex *lock, struct mutex_waiter *w) in __ww_waiter_next() argument
24 if (list_entry_is_head(w, &lock->wait_list, list)) in __ww_waiter_next()
31 __ww_waiter_prev(struct mutex *lock, struct mutex_waiter *w) in __ww_waiter_prev() argument
34 if (list_entry_is_head(w, &lock->wait_list, list)) in __ww_waiter_prev()
41 __ww_waiter_last(struct mutex *lock) in __ww_waiter_last() argument
45 w = list_last_entry(&lock->wait_list, struct mutex_waiter, list); in __ww_waiter_last()
46 if (list_entry_is_head(w, &lock->wait_list, list)) in __ww_waiter_last()
[all …]
H A Dqspinlock_paravirt.h30 * not running. The one lock stealing attempt allowed at slowpath entry
57 * Hybrid PV queued/unfair lock
60 * it will be called once when a lock waiter enter the PV slowpath before
64 * pv_wait_head_or_lock() to signal that it is ready to spin on the lock.
65 * When that bit becomes visible to the incoming waiters, no lock stealing
67 * enter the MCS wait queue. So lock starvation shouldn't happen as long
69 * and hence disabling lock stealing.
71 * When the pending bit isn't set, the lock waiters will stay in the unfair
72 * mode spinning on the lock unless the MCS wait queue is empty. In this
73 * case, the lock waiters will enter the queued mode slowpath trying to
[all …]
/linux/fs/bcachefs/
H A Dsix.c14 #include <trace/events/lock.h>
27 static void do_six_unlock_type(struct six_lock *lock, enum six_lock_type type);
38 /* Value we add to the lock in order to take the lock: */
41 /* If the lock has this value (used as a mask), taking the lock fails: */
44 /* Mask that indicates lock is held for this type: */
47 /* Waitlist we wakeup when releasing the lock: */
72 static inline void six_set_bitmask(struct six_lock *lock, u32 mask) in six_set_bitmask() argument
74 if ((atomic_read(&lock->state) & mask) != mask) in six_set_bitmask()
75 atomic_or(mask, &lock->state); in six_set_bitmask()
78 static inline void six_clear_bitmask(struct six_lock *lock, u32 mask) in six_clear_bitmask() argument
[all …]
H A Dsix.h14 * write lock without deadlocking, so an operation that updates multiple nodes
23 * six_lock_read(&foo->lock);
24 * six_unlock_read(&foo->lock);
26 * An intent lock must be held before taking a write lock:
27 * six_lock_intent(&foo->lock);
28 * six_lock_write(&foo->lock);
29 * six_unlock_write(&foo->lock);
30 * six_unlock_intent(&foo->lock);
40 * There are also interfaces that take the lock type as an enum:
42 * six_lock_type(&foo->lock, SIX_LOCK_read);
[all …]
/linux/drivers/md/dm-vdo/
H A Ddedupe.c14 * deduplicate against a single block instead of being serialized through a PBN read lock. Only one
22 * A hash_lock acts like a state machine perhaps more than as a lock. Other than the starting and
25 * containing the lock. An asynchronous operation is almost always performed upon entering a state,
28 * In all states except DEDUPING, there is a single data_vio, called the lock agent, performing the
29 * asynchronous operations on behalf of the lock. The agent will change during the lifetime of the
30 * lock if the lock is shared by more than one data_vio. data_vios waiting to deduplicate are kept
31 * on a wait queue. Viewed a different way, the agent holds the lock exclusively until the lock
32 * enters the DEDUPING state, at which point it becomes a shared lock that all the waiters (and any
33 * new data_vios that arrive) use to share a PBN lock. In state DEDUPING, there is no agent. When
34 * the last data_vio in the lock calls back in DEDUPING, it becomes the agent and the lock becomes
[all …]
/linux/fs/ocfs2/dlm/
H A Ddlmast.c35 struct dlm_lock *lock);
36 static int dlm_should_cancel_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
39 * lock level will obsolete a pending bast.
40 * For example, if dlm_thread queued a bast for an EX lock that
42 * lock owner downconverted to NL, the bast is now obsolete.
44 * This is needed because the lock and convert paths can queue
47 static int dlm_should_cancel_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock) in dlm_should_cancel_bast() argument
50 assert_spin_locked(&lock->spinlock); in dlm_should_cancel_bast()
52 if (lock->ml.highest_blocked == LKM_IVMODE) in dlm_should_cancel_bast()
54 BUG_ON(lock->ml.highest_blocked == LKM_NLMODE); in dlm_should_cancel_bast()
[all …]
H A Ddlmlock.c5 * underlying calls for lock creation
45 struct dlm_lock *lock, int flags);
49 static void dlm_lock_detach_lockres(struct dlm_lock *lock);
66 /* Tell us whether we can grant a new lock request.
71 * returns: 1 if the lock can be granted, 0 otherwise.
74 struct dlm_lock *lock) in dlm_can_grant_new_lock() argument
79 if (!dlm_lock_compatible(tmplock->ml.type, lock->ml.type)) in dlm_can_grant_new_lock()
84 if (!dlm_lock_compatible(tmplock->ml.type, lock->ml.type)) in dlm_can_grant_new_lock()
87 lock->ml.type)) in dlm_can_grant_new_lock()
94 /* performs lock creation at the lockres master site
[all …]
H A Ddlmconvert.c5 * underlying calls for lock conversion
38 * only one that holds a lock on exit (res->spinlock).
43 struct dlm_lock *lock, int flags,
48 struct dlm_lock *lock, int flags, int type);
61 struct dlm_lock *lock, int flags, int type) in dlmconvert_master() argument
72 status = __dlmconvert_master(dlm, res, lock, flags, type, in dlmconvert_master()
83 dlm_queue_ast(dlm, lock); in dlmconvert_master()
93 /* performs lock conversion at the lockres master site
96 * taken: takes and drops lock->spinlock
99 * call_ast: whether ast should be called for this lock
[all …]
/linux/drivers/md/persistent-data/
H A Ddm-block-manager.c32 * trace is also emitted for the previous lock acquisition.
45 spinlock_t lock; member
61 static unsigned int __find_holder(struct block_lock *lock, in __find_holder() argument
67 if (lock->holders[i] == task) in __find_holder()
74 /* call this *after* you increment lock->count */
75 static void __add_holder(struct block_lock *lock, struct task_struct *task) in __add_holder() argument
77 unsigned int h = __find_holder(lock, NULL); in __add_holder()
83 lock->holders[h] = task; in __add_holder()
86 t = lock->traces + h; in __add_holder()
91 /* call this *before* you decrement lock->count */
[all …]
/linux/include/asm-generic/
H A Dqrwlock.h3 * Queue read/write lock
28 #define _QW_LOCKED 0x0ff /* A writer holds the lock */
36 extern void queued_read_lock_slowpath(struct qrwlock *lock);
37 extern void queued_write_lock_slowpath(struct qrwlock *lock);
40 * queued_read_trylock - try to acquire read lock of a queued rwlock
41 * @lock : Pointer to queued rwlock structure
42 * Return: 1 if lock acquired, 0 if failed
44 static inline int queued_read_trylock(struct qrwlock *lock) in queued_read_trylock() argument
48 cnts = atomic_read(&lock->cnts); in queued_read_trylock()
50 cnts = (u32)atomic_add_return_acquire(_QR_BIAS, &lock->cnts); in queued_read_trylock()
[all …]
/linux/Documentation/locking/
H A Dlockdep-design.rst8 Lock-class
15 tens of thousands of) instantiations. For example a lock in the inode
17 lock class.
19 The validator tracks the 'usage state' of lock-classes, and it tracks
20 the dependencies between different lock-classes. Lock usage indicates
21 how a lock is used with regard to its IRQ contexts, while lock
22 dependency can be understood as lock order, where L1 -> L2 suggests that
26 continuing effort to prove lock usages and dependencies are correct or
29 A lock-class's behavior is constructed by its instances collectively:
30 when the first instance of a lock-class is used after bootup the class
[all …]
H A Drobust-futex-ABI.rst56 pointer to a single linked list of 'lock entries', one per lock,
58 to itself, 'head'. The last 'lock entry' points back to the 'head'.
61 address of the associated 'lock entry', plus or minus, of what will
62 be called the 'lock word', from that 'lock entry'. The 'lock word'
63 is always a 32 bit word, unlike the other words above. The 'lock
65 of the thread holding the lock in the bottom 30 bits. See further
69 the address of the 'lock entry', during list insertion and removal,
73 Each 'lock entry' on the single linked list starting at 'head' consists
74 of just a single word, pointing to the next 'lock entry', or back to
75 'head' if there are no more entries. In addition, nearby to each 'lock
[all …]
/linux/arch/powerpc/include/asm/
H A Dsimple_spinlock.h6 * Simple spin lock operations.
35 static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock) in arch_spin_value_unlocked() argument
37 return lock.slock == 0; in arch_spin_value_unlocked()
40 static inline int arch_spin_is_locked(arch_spinlock_t *lock) in arch_spin_is_locked() argument
42 return !arch_spin_value_unlocked(READ_ONCE(*lock)); in arch_spin_is_locked()
46 * This returns the old value in the lock, so we succeeded
47 * in getting the lock if the return value is 0.
49 static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock) in __arch_spin_trylock() argument
64 : "r" (token), "r" (&lock->slock), [eh] "n" (eh) in __arch_spin_trylock()
70 static inline int arch_spin_trylock(arch_spinlock_t *lock) in arch_spin_trylock() argument
[all …]
/linux/tools/testing/selftests/bpf/progs/
H A Dlinked_list.c12 struct bpf_spin_lock lock; member
28 int list_push_pop(struct bpf_spin_lock *lock, struct bpf_list_head *head, bool leave_in_map) in list_push_pop() argument
37 bpf_spin_lock(lock); in list_push_pop()
39 bpf_spin_unlock(lock); in list_push_pop()
46 bpf_spin_lock(lock); in list_push_pop()
48 bpf_spin_unlock(lock); in list_push_pop()
56 bpf_spin_lock(lock); in list_push_pop()
59 bpf_spin_unlock(lock); in list_push_pop()
62 bpf_spin_lock(lock); in list_push_pop()
64 bpf_spin_unlock(lock); in list_push_pop()
[all …]

12345678910>>...174