| /linux/include/linux/ |
| H A D | rwlock_api_smp.h | 18 void __lockfunc _raw_read_lock(rwlock_t *lock) __acquires(lock); 19 void __lockfunc _raw_write_lock(rwlock_t *lock) __acquires(lock); 20 void __lockfunc _raw_write_lock_nested(rwlock_t *lock, int subclass) __acquires(lock); 21 void __lockfunc _raw_read_lock_bh(rwlock_t *lock) __acquires(lock); 22 void __lockfunc _raw_write_lock_bh(rwlock_t *lock) __acquires(lock); 23 void __lockfunc _raw_read_lock_irq(rwlock_t *lock) __acquires(lock); 24 void __lockfunc _raw_write_lock_irq(rwlock_t *lock) __acquires(lock); 25 unsigned long __lockfunc _raw_read_lock_irqsave(rwlock_t *lock) 26 __acquires(lock); 27 unsigned long __lockfunc _raw_write_lock_irqsave(rwlock_t *lock) [all …]
|
| H A D | spinlock_api_up.h | 19 #define assert_raw_spin_locked(lock) do { (void)(lock); } while (0) argument 27 #define ___LOCK(lock) \ argument 28 do { __acquire(lock); (void)(lock); } while (0) 30 #define __LOCK(lock) \ argument 31 do { preempt_disable(); ___LOCK(lock); } while (0) 33 #define __LOCK_BH(lock) \ argument 34 do { __local_bh_disable_ip(_THIS_IP_, SOFTIRQ_LOCK_OFFSET); ___LOCK(lock); } while (0) 36 #define __LOCK_IRQ(lock) \ argument 37 do { local_irq_disable(); __LOCK(lock); } while (0) 39 #define __LOCK_IRQSAVE(lock, flags) \ argument [all …]
|
| H A D | spinlock.h | 72 #define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME 101 extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, 104 # define raw_spin_lock_init(lock) \ argument 108 __raw_spin_lock_init((lock), #lock, &__key, LD_WAIT_SPIN); \ 112 # define raw_spin_lock_init(lock) \ argument 113 do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0) 116 #define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock) argument 119 raw_spin_is_contended(lock) global() argument 121 raw_spin_is_contended(lock) global() argument 184 do_raw_spin_lock(raw_spinlock_t * lock) do_raw_spin_lock() argument 191 do_raw_spin_trylock(raw_spinlock_t * lock) do_raw_spin_trylock() argument 201 do_raw_spin_unlock(raw_spinlock_t * lock) do_raw_spin_unlock() argument 215 raw_spin_trylock(lock) global() argument 217 raw_spin_lock(lock) global() argument 220 raw_spin_lock_nested(lock,subclass) global() argument 223 raw_spin_lock_nest_lock(lock,nest_lock) global() argument 234 raw_spin_lock_nested(lock,subclass) global() argument 236 raw_spin_lock_nest_lock(lock,nest_lock) global() argument 241 raw_spin_lock_irqsave(lock,flags) global() argument 248 raw_spin_lock_irqsave_nested(lock,flags,subclass) global() argument 254 raw_spin_lock_irqsave_nested(lock,flags,subclass) global() argument 263 raw_spin_lock_irqsave(lock,flags) global() argument 269 raw_spin_lock_irqsave_nested(lock,flags,subclass) global() argument 274 raw_spin_lock_irq(lock) global() argument 275 raw_spin_lock_bh(lock) global() argument 276 raw_spin_unlock(lock) global() argument 277 raw_spin_unlock_irq(lock) global() argument 279 raw_spin_unlock_irqrestore(lock,flags) global() argument 284 raw_spin_unlock_bh(lock) global() argument 286 raw_spin_trylock_bh(lock) global() argument 289 raw_spin_trylock_irq(lock) global() argument 296 raw_spin_trylock_irqsave(lock,flags) global() argument 324 spinlock_check(spinlock_t * lock) spinlock_check() argument 331 spin_lock_init(lock) global() argument 349 spin_lock(spinlock_t * lock) spin_lock() argument 354 spin_lock_bh(spinlock_t * lock) spin_lock_bh() argument 359 spin_trylock(spinlock_t * lock) spin_trylock() argument 364 spin_lock_nested(lock,subclass) global() argument 369 spin_lock_nest_lock(lock,nest_lock) global() argument 374 spin_lock_irq(spinlock_t * lock) spin_lock_irq() argument 379 spin_lock_irqsave(lock,flags) global() argument 384 spin_lock_irqsave_nested(lock,flags,subclass) global() argument 389 spin_unlock(spinlock_t * lock) spin_unlock() argument 394 spin_unlock_bh(spinlock_t * lock) spin_unlock_bh() argument 399 spin_unlock_irq(spinlock_t * lock) spin_unlock_irq() argument 404 spin_unlock_irqrestore(spinlock_t * lock,unsigned long flags) spin_unlock_irqrestore() argument 409 spin_trylock_bh(spinlock_t * lock) spin_trylock_bh() argument 414 spin_trylock_irq(spinlock_t * lock) spin_trylock_irq() argument 419 spin_trylock_irqsave(lock,flags) global() argument 442 spin_is_locked(spinlock_t * lock) spin_is_locked() argument 447 spin_is_contended(spinlock_t * lock) spin_is_contended() argument 452 assert_spin_locked(lock) global() argument 463 spin_needbreak(spinlock_t * lock) spin_needbreak() argument 479 rwlock_needbreak(rwlock_t * lock) rwlock_needbreak() argument 501 atomic_dec_and_lock(atomic,lock) global() argument 506 atomic_dec_and_lock_irqsave(atomic,lock,flags) global() argument 510 atomic_dec_and_raw_lock(atomic,lock) global() argument 515 atomic_dec_and_raw_lock_irqsave(atomic,lock,flags) global() argument [all...] |
| H A D | rwlock.h | 18 extern void __rwlock_init(rwlock_t *lock, const char *name, 20 # define rwlock_init(lock) \ argument 24 __rwlock_init((lock), #lock, &__key); \ 27 # define rwlock_init(lock) \ argument 28 do { *(lock) = __RW_LOCK_UNLOCKED(lock); } while (0) 32 extern void do_raw_read_lock(rwlock_t *lock) __acquires(lock); 33 extern int do_raw_read_trylock(rwlock_t *lock); 34 extern void do_raw_read_unlock(rwlock_t *lock) __releases(lock); 35 extern void do_raw_write_lock(rwlock_t *lock) __acquires(lock); 36 extern int do_raw_write_trylock(rwlock_t *lock); [all …]
|
| H A D | spinlock_api_smp.h | 22 void __lockfunc _raw_spin_lock(raw_spinlock_t *lock) __acquires(lock); 23 void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass) 24 __acquires(lock); 26 _raw_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map) 27 __acquires(lock); 28 void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock) __acquires(lock); 29 void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock) 30 __acquires(lock); 32 unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock) 33 __acquires(lock); [all …]
|
| H A D | spinlock_up.h | 29 static inline void arch_spin_lock(arch_spinlock_t *lock) in arch_spin_lock() argument 31 lock->slock = 0; in arch_spin_lock() 35 static inline int arch_spin_trylock(arch_spinlock_t *lock) in arch_spin_trylock() argument 37 char oldval = lock->slock; in arch_spin_trylock() 39 lock->slock = 0; in arch_spin_trylock() 45 static inline void arch_spin_unlock(arch_spinlock_t *lock) in arch_spin_unlock() argument 48 lock->slock = 1; in arch_spin_unlock() 54 #define arch_read_lock(lock) do { barrier(); (void)(lock); } while (0) argument 55 #define arch_write_lock(lock) do { barrier(); (void)(lock); } while (0) argument 56 #define arch_read_trylock(lock) ({ barrier(); (void)(lock); 1; }) argument [all …]
|
| H A D | seqlock.h | 127 __SEQ_LOCK(____s->lock = (_lock)); \ 130 #define seqcount_raw_spinlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, raw_spinlock) argument 131 #define seqcount_spinlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, spinlock) argument 132 #define seqcount_rwlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, rwlock) argument 133 #define seqcount_mutex_init(s, lock) seqcount_LOCKNAME_init(s, lock, mutex) argument 166 __SEQ_LOCK(lockbase##_lock(s->lock)); \ 167 __SEQ_LOCK(lockbase##_unlock(s->lock)); \ 192 __SEQ_LOCK(lockdep_assert_held(s->lock)); \ 240 __SEQ_LOCK(.lock = (assoc_lock)) \ in SEQCOUNT_LOCKNAME() 243 #define SEQCNT_RAW_SPINLOCK_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock) argument [all …]
|
| /linux/kernel/locking/ |
| H A D | ww_mutex.h | 9 __ww_waiter_first(struct mutex *lock) in __ww_waiter_first() argument 13 w = list_first_entry(&lock->wait_list, struct mutex_waiter, list); in __ww_waiter_first() 14 if (list_entry_is_head(w, &lock->wait_list, list)) in __ww_waiter_first() 21 __ww_waiter_next(struct mutex *lock, struct mutex_waiter *w) in __ww_waiter_next() argument 24 if (list_entry_is_head(w, &lock->wait_list, list)) in __ww_waiter_next() 31 __ww_waiter_prev(struct mutex *lock, struct mutex_waiter *w) in __ww_waiter_prev() argument 34 if (list_entry_is_head(w, &lock->wait_list, list)) in __ww_waiter_prev() 41 __ww_waiter_last(struct mutex *lock) in __ww_waiter_last() argument 45 w = list_last_entry(&lock->wait_list, struct mutex_waiter, list); in __ww_waiter_last() 46 if (list_entry_is_head(w, &lock->wait_list, list)) in __ww_waiter_last() [all …]
|
| H A D | qspinlock_paravirt.h | 81 static inline bool pv_hybrid_queued_unfair_trylock(struct qspinlock *lock) in pv_hybrid_queued_unfair_trylock() argument 88 int val = atomic_read(&lock->val); in pv_hybrid_queued_unfair_trylock() 92 try_cmpxchg_acquire(&lock->locked, &old, _Q_LOCKED_VAL)) { in pv_hybrid_queued_unfair_trylock() 110 static __always_inline void set_pending(struct qspinlock *lock) in set_pending() argument 112 WRITE_ONCE(lock->pending, 1); in set_pending() 120 static __always_inline bool trylock_clear_pending(struct qspinlock *lock) in trylock_clear_pending() argument 124 return !READ_ONCE(lock->locked) && in trylock_clear_pending() 125 try_cmpxchg_acquire(&lock->locked_pending, &old, _Q_LOCKED_VAL); in trylock_clear_pending() 128 static __always_inline void set_pending(struct qspinlock *lock) in set_pending() argument 130 atomic_or(_Q_PENDING_VAL, &lock->val); in set_pending() [all …]
|
| H A D | qspinlock.c | 25 #include <trace/events/lock.h> 36 * MCS lock. A copy of the original MCS lock paper ("Algorithms for Scalable 42 * This queued spinlock implementation is based on the MCS lock, however to 46 * In particular; where the traditional MCS lock consists of a tail pointer 55 * number. With one byte for the lock value and 3 bytes for the tail, only a 56 * 32-bit word is now needed. Even though we only need 1 bit for the lock, 60 * We also change the first spinner to spin on the lock bit instead of its 61 * node; whereby avoiding the need to carry a node from lock to unlock, and 62 * preserving existing lock AP 149 clear_pending(struct qspinlock * lock) clear_pending() argument 162 clear_pending_set_locked(struct qspinlock * lock) clear_pending_set_locked() argument 177 xchg_tail(struct qspinlock * lock,u32 tail) xchg_tail() argument 195 clear_pending(struct qspinlock * lock) clear_pending() argument 206 clear_pending_set_locked(struct qspinlock * lock) clear_pending_set_locked() argument 221 xchg_tail(struct qspinlock * lock,u32 tail) xchg_tail() argument 247 queued_fetch_set_pending_acquire(struct qspinlock * lock) queued_fetch_set_pending_acquire() argument 259 set_locked(struct qspinlock * lock) set_locked() argument 273 __pv_kick_node(struct qspinlock * lock,struct mcs_spinlock * node) __pv_kick_node() argument 275 __pv_wait_head_or_lock(struct qspinlock * lock,struct mcs_spinlock * node) __pv_wait_head_or_lock() argument 313 queued_spin_lock_slowpath(struct qspinlock * lock,u32 val) queued_spin_lock_slowpath() argument [all...] |
| /linux/arch/alpha/include/asm/ |
| H A D | spinlock.h | 17 #define arch_spin_is_locked(x) ((x)->lock != 0) 19 static inline int arch_spin_value_unlocked(arch_spinlock_t lock) in arch_spin_value_unlocked() argument 21 return lock.lock == 0; in arch_spin_value_unlocked() 24 static inline void arch_spin_unlock(arch_spinlock_t * lock) in arch_spin_unlock() argument 27 lock->lock = 0; in arch_spin_unlock() 30 static inline void arch_spin_lock(arch_spinlock_t * lock) in arch_spin_lock() argument 46 : "=&r" (tmp), "=m" (lock->lock) in arch_spin_lock() 47 : "m"(lock->lock) : "memory"); in arch_spin_lock() 50 static inline int arch_spin_trylock(arch_spinlock_t *lock) in arch_spin_trylock() argument 52 return !test_and_set_bit(0, &lock->lock); in arch_spin_trylock() [all …]
|
| /linux/drivers/md/persistent-data/ |
| H A D | dm-block-manager.c | 45 spinlock_t lock; member 61 static unsigned int __find_holder(struct block_lock *lock, in __find_holder() argument 67 if (lock->holders[i] == task) in __find_holder() 75 static void __add_holder(struct block_lock *lock, struct task_struct *task) in __add_holder() argument 77 unsigned int h = __find_holder(lock, NULL); in __add_holder() 83 lock->holders[h] = task; in __add_holder() 86 t = lock->traces + h; in __add_holder() 92 static void __del_holder(struct block_lock *lock, struct task_struct *task) in __del_holder() argument 94 unsigned int h = __find_holder(lock, task); in __del_holder() 96 lock->holders[h] = NULL; in __del_holder() [all …]
|
| /linux/fs/ocfs2/dlm/ |
| H A D | dlmast.c | 35 struct dlm_lock *lock); 36 static int dlm_should_cancel_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock); 47 static int dlm_should_cancel_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock) in dlm_should_cancel_bast() argument 50 assert_spin_locked(&lock->spinlock); in dlm_should_cancel_bast() 52 if (lock->ml.highest_blocked == LKM_IVMODE) in dlm_should_cancel_bast() 54 BUG_ON(lock->ml.highest_blocked == LKM_NLMODE); in dlm_should_cancel_bast() 56 if (lock->bast_pending && in dlm_should_cancel_bast() 57 list_empty(&lock->bast_list)) in dlm_should_cancel_bast() 61 if (lock->ml.type == LKM_EXMODE) in dlm_should_cancel_bast() 64 else if (lock->ml.type == LKM_NLMODE) in dlm_should_cancel_bast() [all …]
|
| H A D | dlmlock.c | 45 struct dlm_lock *lock, int flags); 49 static void dlm_lock_detach_lockres(struct dlm_lock *lock); 74 struct dlm_lock *lock) in dlm_can_grant_new_lock() argument 79 if (!dlm_lock_compatible(tmplock->ml.type, lock->ml.type)) in dlm_can_grant_new_lock() 84 if (!dlm_lock_compatible(tmplock->ml.type, lock->ml.type)) in dlm_can_grant_new_lock() 87 lock->ml.type)) in dlm_can_grant_new_lock() 103 struct dlm_lock *lock, int flags) in dlmlock_master() argument 108 mlog(0, "type=%d\n", lock->ml.type); in dlmlock_master() 115 lock->ml.node != dlm->node_num) { in dlmlock_master() 124 if (dlm_can_grant_new_lock(res, lock)) { in dlmlock_master() [all …]
|
| H A D | dlmconvert.c | 43 struct dlm_lock *lock, int flags, 48 struct dlm_lock *lock, int flags, int type); 61 struct dlm_lock *lock, int flags, int type) in dlmconvert_master() argument 72 status = __dlmconvert_master(dlm, res, lock, flags, type, in dlmconvert_master() 83 dlm_queue_ast(dlm, lock); in dlmconvert_master() 104 struct dlm_lock *lock, int flags, in __dlmconvert_master() argument 114 lock->ml.type, lock->ml.convert_type, type); in __dlmconvert_master() 116 spin_lock(&lock->spinlock); in __dlmconvert_master() 119 if (lock->ml.convert_type != LKM_IVMODE) { in __dlmconvert_master() 127 if (!dlm_lock_on_list(&res->granted, lock)) { in __dlmconvert_master() [all …]
|
| H A D | dlmunlock.c | 43 struct dlm_lock *lock, 48 struct dlm_lock *lock, 54 struct dlm_lock *lock, 83 struct dlm_lock *lock, in dlmunlock_common() argument 105 in_use = !list_empty(&lock->ast_list); in dlmunlock_common() 125 spin_lock(&lock->spinlock); in dlmunlock_common() 140 status = dlm_get_cancel_actions(dlm, res, lock, lksb, &actions); in dlmunlock_common() 142 status = dlm_get_unlock_actions(dlm, res, lock, lksb, &actions); in dlmunlock_common() 161 lock->cancel_pending = 1; in dlmunlock_common() 163 lock->unlock_pending = 1; in dlmunlock_common() [all …]
|
| /linux/arch/hexagon/include/asm/ |
| H A D | spinlock.h | 28 static inline void arch_read_lock(arch_rwlock_t *lock) in arch_read_lock() argument 37 : "r" (&lock->lock) in arch_read_lock() 43 static inline void arch_read_unlock(arch_rwlock_t *lock) in arch_read_unlock() argument 51 : "r" (&lock->lock) in arch_read_unlock() 58 static inline int arch_read_trylock(arch_rwlock_t *lock) in arch_read_trylock() argument 69 : "r" (&lock->lock) in arch_read_trylock() 76 static inline void arch_write_lock(arch_rwlock_t *lock) in arch_write_lock() argument 85 : "r" (&lock->lock) in arch_write_lock() 91 static inline int arch_write_trylock(arch_rwlock_t *lock) in arch_write_trylock() argument 102 : "r" (&lock->lock) in arch_write_trylock() [all …]
|
| /linux/arch/powerpc/include/asm/ |
| H A D | simple_spinlock.h | 35 static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock) in arch_spin_value_unlocked() argument 37 return lock.slock == 0; in arch_spin_value_unlocked() 40 static inline int arch_spin_is_locked(arch_spinlock_t *lock) in arch_spin_is_locked() argument 42 return !arch_spin_value_unlocked(READ_ONCE(*lock)); in arch_spin_is_locked() 49 static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock) in __arch_spin_trylock() argument 64 : "r" (token), "r" (&lock->slock), [eh] "n" (eh) in __arch_spin_trylock() 70 static inline int arch_spin_trylock(arch_spinlock_t *lock) in arch_spin_trylock() argument 72 return __arch_spin_trylock(lock) == 0; in arch_spin_trylock() 91 void splpar_spin_yield(arch_spinlock_t *lock); 92 void splpar_rw_yield(arch_rwlock_t *lock); [all …]
|
| /linux/drivers/acpi/acpica/ |
| H A D | utlock.c | 28 acpi_status acpi_ut_create_rw_lock(struct acpi_rw_lock *lock) in acpi_ut_create_rw_lock() argument 32 lock->num_readers = 0; in acpi_ut_create_rw_lock() 33 status = acpi_os_create_mutex(&lock->reader_mutex); in acpi_ut_create_rw_lock() 38 status = acpi_os_create_mutex(&lock->writer_mutex); in acpi_ut_create_rw_lock() 42 void acpi_ut_delete_rw_lock(struct acpi_rw_lock *lock) in acpi_ut_delete_rw_lock() argument 45 acpi_os_delete_mutex(lock->reader_mutex); in acpi_ut_delete_rw_lock() 46 acpi_os_delete_mutex(lock->writer_mutex); in acpi_ut_delete_rw_lock() 48 lock->num_readers = 0; in acpi_ut_delete_rw_lock() 49 lock->reader_mutex = NULL; in acpi_ut_delete_rw_lock() 50 lock->writer_mutex = NULL; in acpi_ut_delete_rw_lock() [all …]
|
| /linux/tools/testing/selftests/bpf/progs/ |
| H A D | linked_list.c | 12 struct bpf_spin_lock lock; member 28 int list_push_pop(struct bpf_spin_lock *lock, struct bpf_list_head *head, bool leave_in_map) in list_push_pop() argument 37 bpf_spin_lock(lock); in list_push_pop() 39 bpf_spin_unlock(lock); in list_push_pop() 46 bpf_spin_lock(lock); in list_push_pop() 48 bpf_spin_unlock(lock); in list_push_pop() 56 bpf_spin_lock(lock); in list_push_pop() 59 bpf_spin_unlock(lock); in list_push_pop() 62 bpf_spin_lock(lock); in list_push_pop() 64 bpf_spin_unlock(lock); in list_push_pop() [all …]
|
| /linux/drivers/md/dm-vdo/ |
| H A D | dedupe.c | 283 spinlock_t lock; member 342 static void return_hash_lock_to_pool(struct hash_zone *zone, struct hash_lock *lock) in return_hash_lock_to_pool() argument 344 memset(lock, 0, sizeof(*lock)); in return_hash_lock_to_pool() 345 INIT_LIST_HEAD(&lock->pool_node); in return_hash_lock_to_pool() 346 INIT_LIST_HEAD(&lock->duplicate_vios); in return_hash_lock_to_pool() 347 vdo_waitq_init(&lock->waiters); in return_hash_lock_to_pool() 348 list_add_tail(&lock->pool_node, &zone->lock_pool); in return_hash_lock_to_pool() 372 static inline u64 hash_lock_key(struct hash_lock *lock) in hash_lock_key() argument 374 return get_unaligned_le64(&lock->hash.name); in hash_lock_key() 424 static inline struct data_vio *dequeue_lock_waiter(struct hash_lock *lock) in dequeue_lock_waiter() argument [all …]
|
| H A D | physical-zone.c | 54 static inline bool has_lock_type(const struct pbn_lock *lock, enum pbn_lock_type type) in has_lock_type() argument 56 return (lock->implementation == &LOCK_IMPLEMENTATIONS[type]); in has_lock_type() 65 bool vdo_is_pbn_read_lock(const struct pbn_lock *lock) in vdo_is_pbn_read_lock() argument 67 return has_lock_type(lock, VIO_READ_LOCK); in vdo_is_pbn_read_lock() 70 static inline void set_pbn_lock_type(struct pbn_lock *lock, enum pbn_lock_type type) in set_pbn_lock_type() argument 72 lock->implementation = &LOCK_IMPLEMENTATIONS[type]; in set_pbn_lock_type() 82 void vdo_downgrade_pbn_write_lock(struct pbn_lock *lock, bool compressed_write) in vdo_downgrade_pbn_write_lock() argument 84 VDO_ASSERT_LOG_ONLY(!vdo_is_pbn_read_lock(lock), in vdo_downgrade_pbn_write_lock() 86 VDO_ASSERT_LOG_ONLY(!has_lock_type(lock, VIO_BLOCK_MAP_WRITE_LOCK), in vdo_downgrade_pbn_write_lock() 88 VDO_ASSERT_LOG_ONLY(lock->holder_count == 1, in vdo_downgrade_pbn_write_lock() [all …]
|
| /linux/include/trace/events/ |
| H A D | lock.h | 3 #define TRACE_SYSTEM lock 11 /* flags for lock:contention_begin */ 26 TP_PROTO(struct lockdep_map *lock, unsigned int subclass, 30 TP_ARGS(lock, subclass, trylock, read, check, next_lock, ip), 34 __string(name, lock->name) 41 __entry->lockdep_addr = lock; 50 DECLARE_EVENT_CLASS(lock, 52 TP_PROTO(struct lockdep_map *lock, unsigned long ip), 54 TP_ARGS(lock, ip), 57 __string( name, lock [all...] |
| /linux/arch/arm/include/asm/ |
| H A D | spinlock.h | 56 static inline void arch_spin_lock(arch_spinlock_t *lock) in arch_spin_lock() argument 62 prefetchw(&lock->slock); in arch_spin_lock() 70 : "r" (&lock->slock), "I" (1 << TICKET_SHIFT) in arch_spin_lock() 75 lockval.tickets.owner = READ_ONCE(lock->tickets.owner); in arch_spin_lock() 81 static inline int arch_spin_trylock(arch_spinlock_t *lock) in arch_spin_trylock() argument 86 prefetchw(&lock->slock); in arch_spin_trylock() 95 : "r" (&lock->slock), "I" (1 << TICKET_SHIFT) in arch_spin_trylock() 107 static inline void arch_spin_unlock(arch_spinlock_t *lock) in arch_spin_unlock() argument 110 lock->tickets.owner++; in arch_spin_unlock() 114 static inline int arch_spin_value_unlocked(arch_spinlock_t lock) in arch_spin_value_unlocked() argument [all …]
|
| /linux/tools/perf/tests/shell/ |
| H A D | lock_contention.sh | 36 if ! perf list tracepoint | grep -q lock:contention_begin; then 53 perf lock record -o ${perfdata} -- perf bench sched messaging -p > /dev/null 2>&1 55 perf lock contention -i ${perfdata} -E 1 -q 2> ${result} 67 if ! perf lock con -b true > /dev/null 2>&1 ; then 73 perf lock con -a -b -E 1 -q -- perf bench sched messaging -p > /dev/null 2> ${result} 84 perf lock record -o- -- perf bench sched messaging -p 2> ${errout} | \ 85 perf lock contention -i- -E 1 -q 2> ${result} 98 perf lock contention -i ${perfdata} -t -E 1 -q 2> ${result} 105 if ! perf lock con -b true > /dev/null 2>&1 ; then 110 perf lock con -a -b -t -E 1 -q -- perf bench sched messaging -p > /dev/null 2> ${result} [all …]
|