xref: /linux/arch/loongarch/include/asm/qspinlock.h (revision 64dd3b6a79f0907d36de481b0f15fab323a53e5a)
1*e5ba90abSBibo Mao /* SPDX-License-Identifier: GPL-2.0 */
2*e5ba90abSBibo Mao #ifndef _ASM_LOONGARCH_QSPINLOCK_H
3*e5ba90abSBibo Mao #define _ASM_LOONGARCH_QSPINLOCK_H
4*e5ba90abSBibo Mao 
5*e5ba90abSBibo Mao #include <linux/jump_label.h>
6*e5ba90abSBibo Mao 
7*e5ba90abSBibo Mao #ifdef CONFIG_PARAVIRT
8*e5ba90abSBibo Mao 
9*e5ba90abSBibo Mao DECLARE_STATIC_KEY_FALSE(virt_spin_lock_key);
10*e5ba90abSBibo Mao 
11*e5ba90abSBibo Mao #define virt_spin_lock virt_spin_lock
12*e5ba90abSBibo Mao 
virt_spin_lock(struct qspinlock * lock)13*e5ba90abSBibo Mao static inline bool virt_spin_lock(struct qspinlock *lock)
14*e5ba90abSBibo Mao {
15*e5ba90abSBibo Mao 	int val;
16*e5ba90abSBibo Mao 
17*e5ba90abSBibo Mao 	if (!static_branch_unlikely(&virt_spin_lock_key))
18*e5ba90abSBibo Mao 		return false;
19*e5ba90abSBibo Mao 
20*e5ba90abSBibo Mao 	/*
21*e5ba90abSBibo Mao 	 * On hypervisors without PARAVIRT_SPINLOCKS support we fall
22*e5ba90abSBibo Mao 	 * back to a Test-and-Set spinlock, because fair locks have
23*e5ba90abSBibo Mao 	 * horrible lock 'holder' preemption issues.
24*e5ba90abSBibo Mao 	 */
25*e5ba90abSBibo Mao 
26*e5ba90abSBibo Mao __retry:
27*e5ba90abSBibo Mao 	val = atomic_read(&lock->val);
28*e5ba90abSBibo Mao 
29*e5ba90abSBibo Mao 	if (val || !atomic_try_cmpxchg(&lock->val, &val, _Q_LOCKED_VAL)) {
30*e5ba90abSBibo Mao 		cpu_relax();
31*e5ba90abSBibo Mao 		goto __retry;
32*e5ba90abSBibo Mao 	}
33*e5ba90abSBibo Mao 
34*e5ba90abSBibo Mao 	return true;
35*e5ba90abSBibo Mao }
36*e5ba90abSBibo Mao 
37*e5ba90abSBibo Mao #endif /* CONFIG_PARAVIRT */
38*e5ba90abSBibo Mao 
39*e5ba90abSBibo Mao #include <asm-generic/qspinlock.h>
40*e5ba90abSBibo Mao 
41*e5ba90abSBibo Mao #endif // _ASM_LOONGARCH_QSPINLOCK_H
42