Searched refs:_Q_LOCKED_VAL (Results 1 – 12 of 12) sorted by relevance
| /linux/kernel/locking/ |
| H A D | qspinlock_paravirt.h | 92 try_cmpxchg_acquire(&lock->locked, &old, _Q_LOCKED_VAL)) { in pv_hybrid_queued_unfair_trylock() 125 try_cmpxchg_acquire(&lock->locked_pending, &old, _Q_LOCKED_VAL); in trylock_clear_pending() 144 new = (old & ~_Q_PENDING_MASK) | _Q_LOCKED_VAL; in trylock_clear_pending() 458 WRITE_ONCE(lock->locked, _Q_LOCKED_VAL); in pv_wait_head_or_lock() 481 return (u32)(atomic_read(&lock->val) | _Q_LOCKED_VAL); in pv_wait_head_or_lock() 545 u8 locked = _Q_LOCKED_VAL; in __pv_queued_spin_unlock()
|
| H A D | qspinlock.c | 343 * In the PV case we might already have _Q_LOCKED_VAL set, because in queued_spin_lock_slowpath() 353 if (atomic_try_cmpxchg_relaxed(&lock->val, &val, _Q_LOCKED_VAL)) in queued_spin_lock_slowpath()
|
| /linux/tools/testing/selftests/bpf/progs/ |
| H A D | bpf_arena_spin_lock.h | 107 #define _Q_LOCKED_VAL (1U << _Q_LOCKED_OFFSET) macro 193 WRITE_ONCE(lock->locked_pending, _Q_LOCKED_VAL); in clear_pending_set_locked() 204 WRITE_ONCE(lock->locked, _Q_LOCKED_VAL); in set_locked() 240 return likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL)); in arena_spin_trylock() 429 if (atomic_try_cmpxchg_relaxed(&lock->val, &val, _Q_LOCKED_VAL)) in arena_spin_lock_slowpath() 501 if (likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL))) in arena_spin_lock()
|
| /linux/include/asm-generic/ |
| H A D | qspinlock.h | 97 return likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL)); in queued_spin_trylock() 111 if (likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL))) in queued_spin_lock()
|
| H A D | qspinlock_types.h | 92 #define _Q_LOCKED_VAL (1U << _Q_LOCKED_OFFSET) macro
|
| /linux/arch/powerpc/lib/ |
| H A D | qspinlock.c | 147 "i" (_Q_LOCKED_VAL), in trylock_clean_tail() 219 BUG_ON(!(old & _Q_LOCKED_VAL)); in try_set_sleepy() 291 BUG_ON(!(val & _Q_LOCKED_VAL)); in __yield_to_locked_owner() 395 if (val & _Q_LOCKED_VAL) { in yield_to_prev() 479 if (unlikely(!(val & _Q_LOCKED_VAL))) { in try_to_steal_lock() 614 if (!(val & _Q_LOCKED_VAL)) in queued_spin_lock_mcs_queue() 662 if (unlikely(old & _Q_LOCKED_VAL)) { in queued_spin_lock_mcs_queue()
|
| /linux/arch/loongarch/include/asm/ |
| H A D | qspinlock.h | 29 if (val || !atomic_try_cmpxchg(&lock->val, &val, _Q_LOCKED_VAL)) { in virt_spin_lock()
|
| /linux/arch/x86/include/asm/ |
| H A D | qspinlock_paravirt.h | 43 "mov $" __stringify(_Q_LOCKED_VAL) ",%eax\n\t" \
|
| H A D | qspinlock.h | 104 if (val || !atomic_try_cmpxchg(&lock->val, &val, _Q_LOCKED_VAL)) { in virt_spin_lock()
|
| /linux/arch/powerpc/include/asm/ |
| H A D | qspinlock_types.h | 42 #define _Q_LOCKED_VAL (1U << _Q_LOCKED_OFFSET) macro
|
| H A D | qspinlock.h | 89 return _Q_LOCKED_VAL | (smp_processor_id() << _Q_OWNER_CPU_OFFSET); in queued_spin_encode_locked_val()
|
| /linux/kernel/bpf/ |
| H A D | rqspinlock.c | 631 if (atomic_try_cmpxchg_relaxed(&lock->val, &val, _Q_LOCKED_VAL)) in resilient_queued_spin_lock_slowpath()
|