xref: /linux/arch/x86/kernel/paravirt-spinlocks.c (revision b0b449e6fec4cd182bd4384f7eb9002596079f68)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Split spinlock implementation out into its own file, so it can be
4  * compiled in a FTRACE-compatible way.
5  */
6 #include <linux/static_call.h>
7 #include <linux/spinlock.h>
8 #include <linux/export.h>
9 #include <linux/jump_label.h>
10 
11 DEFINE_STATIC_KEY_FALSE(virt_spin_lock_key);
12 
13 #ifdef CONFIG_SMP
14 void __init native_pv_lock_init(void)
15 {
16 	if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
17 		static_branch_enable(&virt_spin_lock_key);
18 }
19 #endif
20 
21 #ifdef CONFIG_PARAVIRT_SPINLOCKS
22 __visible void __native_queued_spin_unlock(struct qspinlock *lock)
23 {
24 	native_queued_spin_unlock(lock);
25 }
26 PV_CALLEE_SAVE_REGS_THUNK(__native_queued_spin_unlock);
27 
28 bool pv_is_native_spin_unlock(void)
29 {
30 	return pv_ops_lock.queued_spin_unlock.func ==
31 		__raw_callee_save___native_queued_spin_unlock;
32 }
33 
34 __visible bool __native_vcpu_is_preempted(long cpu)
35 {
36 	return false;
37 }
38 PV_CALLEE_SAVE_REGS_THUNK(__native_vcpu_is_preempted);
39 
40 bool pv_is_native_vcpu_is_preempted(void)
41 {
42 	return pv_ops_lock.vcpu_is_preempted.func ==
43 		__raw_callee_save___native_vcpu_is_preempted;
44 }
45 
46 void __init paravirt_set_cap(void)
47 {
48 	if (!pv_is_native_spin_unlock())
49 		setup_force_cpu_cap(X86_FEATURE_PVUNLOCK);
50 
51 	if (!pv_is_native_vcpu_is_preempted())
52 		setup_force_cpu_cap(X86_FEATURE_VCPUPREEMPT);
53 }
54 
55 struct pv_lock_ops pv_ops_lock = {
56 	.queued_spin_lock_slowpath	= native_queued_spin_lock_slowpath,
57 	.queued_spin_unlock		= PV_CALLEE_SAVE(__native_queued_spin_unlock),
58 	.wait				= paravirt_nop,
59 	.kick				= paravirt_nop,
60 	.vcpu_is_preempted		= PV_CALLEE_SAVE(__native_vcpu_is_preempted),
61 };
62 EXPORT_SYMBOL(pv_ops_lock);
63 #endif
64