xref: /linux/arch/loongarch/include/asm/qspinlock.h (revision 64dd3b6a79f0907d36de481b0f15fab323a53e5a)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_LOONGARCH_QSPINLOCK_H
3 #define _ASM_LOONGARCH_QSPINLOCK_H
4 
5 #include <linux/jump_label.h>
6 
7 #ifdef CONFIG_PARAVIRT
8 
9 DECLARE_STATIC_KEY_FALSE(virt_spin_lock_key);
10 
11 #define virt_spin_lock virt_spin_lock
12 
virt_spin_lock(struct qspinlock * lock)13 static inline bool virt_spin_lock(struct qspinlock *lock)
14 {
15 	int val;
16 
17 	if (!static_branch_unlikely(&virt_spin_lock_key))
18 		return false;
19 
20 	/*
21 	 * On hypervisors without PARAVIRT_SPINLOCKS support we fall
22 	 * back to a Test-and-Set spinlock, because fair locks have
23 	 * horrible lock 'holder' preemption issues.
24 	 */
25 
26 __retry:
27 	val = atomic_read(&lock->val);
28 
29 	if (val || !atomic_try_cmpxchg(&lock->val, &val, _Q_LOCKED_VAL)) {
30 		cpu_relax();
31 		goto __retry;
32 	}
33 
34 	return true;
35 }
36 
37 #endif /* CONFIG_PARAVIRT */
38 
39 #include <asm-generic/qspinlock.h>
40 
41 #endif // _ASM_LOONGARCH_QSPINLOCK_H
42