xref: /linux/arch/loongarch/include/asm/qspinlock.h (revision 53597deca0e38c30e6cd4ba2114fa42d2bcd85bb)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_LOONGARCH_QSPINLOCK_H
3 #define _ASM_LOONGARCH_QSPINLOCK_H
4 
5 #include <asm/kvm_para.h>
6 #include <linux/jump_label.h>
7 
8 #ifdef CONFIG_PARAVIRT
9 DECLARE_STATIC_KEY_FALSE(virt_preempt_key);
10 DECLARE_STATIC_KEY_FALSE(virt_spin_lock_key);
11 DECLARE_PER_CPU(struct kvm_steal_time, steal_time);
12 
13 #define virt_spin_lock virt_spin_lock
14 
15 static inline bool virt_spin_lock(struct qspinlock *lock)
16 {
17 	int val;
18 
19 	if (!static_branch_unlikely(&virt_spin_lock_key))
20 		return false;
21 
22 	/*
23 	 * On hypervisors without PARAVIRT_SPINLOCKS support we fall
24 	 * back to a Test-and-Set spinlock, because fair locks have
25 	 * horrible lock 'holder' preemption issues.
26 	 */
27 
28 __retry:
29 	val = atomic_read(&lock->val);
30 
31 	if (val || !atomic_try_cmpxchg(&lock->val, &val, _Q_LOCKED_VAL)) {
32 		cpu_relax();
33 		goto __retry;
34 	}
35 
36 	return true;
37 }
38 
39 /*
40  * Macro is better than inline function here
41  * With macro, parameter cpu is parsed only when it is used.
42  * With inline function, parameter cpu is parsed even though it is not used.
43  * This may cause cache line thrashing across NUMA nodes.
44  */
45 #define vcpu_is_preempted(cpu)							\
46 ({										\
47 	bool __val;								\
48 										\
49 	if (!static_branch_unlikely(&virt_preempt_key))				\
50 		__val = false;							\
51 	else {									\
52 		struct kvm_steal_time *src;					\
53 		src = &per_cpu(steal_time, cpu);				\
54 		__val = !!(READ_ONCE(src->preempted) & KVM_VCPU_PREEMPTED);	\
55 	}									\
56 	__val;									\
57 })
58 
59 #endif /* CONFIG_PARAVIRT */
60 
61 #include <asm-generic/qspinlock.h>
62 
63 #endif // _ASM_LOONGARCH_QSPINLOCK_H
64