xref: /linux/arch/x86/include/asm/qspinlock.h (revision c0e297dc61f8d4453e07afbea1fa8d0e67cd4a34)
1 #ifndef _ASM_X86_QSPINLOCK_H
2 #define _ASM_X86_QSPINLOCK_H
3 
4 #include <asm/cpufeature.h>
5 #include <asm-generic/qspinlock_types.h>
6 #include <asm/paravirt.h>
7 
8 #define	queued_spin_unlock queued_spin_unlock
9 /**
10  * queued_spin_unlock - release a queued spinlock
11  * @lock : Pointer to queued spinlock structure
12  *
13  * A smp_store_release() on the least-significant byte.
14  */
15 static inline void native_queued_spin_unlock(struct qspinlock *lock)
16 {
17 	smp_store_release((u8 *)lock, 0);
18 }
19 
20 #ifdef CONFIG_PARAVIRT_SPINLOCKS
21 extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
22 extern void __pv_init_lock_hash(void);
23 extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
24 extern void __raw_callee_save___pv_queued_spin_unlock(struct qspinlock *lock);
25 
26 static inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
27 {
28 	pv_queued_spin_lock_slowpath(lock, val);
29 }
30 
31 static inline void queued_spin_unlock(struct qspinlock *lock)
32 {
33 	pv_queued_spin_unlock(lock);
34 }
35 #else
36 static inline void queued_spin_unlock(struct qspinlock *lock)
37 {
38 	native_queued_spin_unlock(lock);
39 }
40 #endif
41 
42 #define virt_queued_spin_lock virt_queued_spin_lock
43 
44 static inline bool virt_queued_spin_lock(struct qspinlock *lock)
45 {
46 	if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
47 		return false;
48 
49 	while (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) != 0)
50 		cpu_relax();
51 
52 	return true;
53 }
54 
55 #include <asm-generic/qspinlock.h>
56 
57 #endif /* _ASM_X86_QSPINLOCK_H */
58