xref: /linux/arch/x86/include/asm/kvm_para.h (revision ca55b2fef3a9373fcfc30f82fd26bc7fccbda732)
1 #ifndef _ASM_X86_KVM_PARA_H
2 #define _ASM_X86_KVM_PARA_H
3 
4 #include <asm/processor.h>
5 #include <asm/alternative.h>
6 #include <uapi/asm/kvm_para.h>
7 
8 extern void kvmclock_init(void);
9 extern int kvm_register_clock(char *txt);
10 
11 #ifdef CONFIG_KVM_GUEST
12 bool kvm_check_and_clear_guest_paused(void);
13 #else
14 static inline bool kvm_check_and_clear_guest_paused(void)
15 {
16 	return false;
17 }
18 #endif /* CONFIG_KVM_GUEST */
19 
20 #ifdef CONFIG_DEBUG_RODATA
21 #define KVM_HYPERCALL \
22         ALTERNATIVE(".byte 0x0f,0x01,0xc1", ".byte 0x0f,0x01,0xd9", X86_FEATURE_VMMCALL)
23 #else
24 /* On AMD processors, vmcall will generate a trap that we will
25  * then rewrite to the appropriate instruction.
26  */
27 #define KVM_HYPERCALL ".byte 0x0f,0x01,0xc1"
28 #endif
29 
30 /* For KVM hypercalls, a three-byte sequence of either the vmcall or the vmmcall
31  * instruction.  The hypervisor may replace it with something else but only the
32  * instructions are guaranteed to be supported.
33  *
34  * Up to four arguments may be passed in rbx, rcx, rdx, and rsi respectively.
35  * The hypercall number should be placed in rax and the return value will be
36  * placed in rax.  No other registers will be clobbered unless explicitly
37  * noted by the particular hypercall.
38  */
39 
40 static inline long kvm_hypercall0(unsigned int nr)
41 {
42 	long ret;
43 	asm volatile(KVM_HYPERCALL
44 		     : "=a"(ret)
45 		     : "a"(nr)
46 		     : "memory");
47 	return ret;
48 }
49 
50 static inline long kvm_hypercall1(unsigned int nr, unsigned long p1)
51 {
52 	long ret;
53 	asm volatile(KVM_HYPERCALL
54 		     : "=a"(ret)
55 		     : "a"(nr), "b"(p1)
56 		     : "memory");
57 	return ret;
58 }
59 
60 static inline long kvm_hypercall2(unsigned int nr, unsigned long p1,
61 				  unsigned long p2)
62 {
63 	long ret;
64 	asm volatile(KVM_HYPERCALL
65 		     : "=a"(ret)
66 		     : "a"(nr), "b"(p1), "c"(p2)
67 		     : "memory");
68 	return ret;
69 }
70 
71 static inline long kvm_hypercall3(unsigned int nr, unsigned long p1,
72 				  unsigned long p2, unsigned long p3)
73 {
74 	long ret;
75 	asm volatile(KVM_HYPERCALL
76 		     : "=a"(ret)
77 		     : "a"(nr), "b"(p1), "c"(p2), "d"(p3)
78 		     : "memory");
79 	return ret;
80 }
81 
82 static inline long kvm_hypercall4(unsigned int nr, unsigned long p1,
83 				  unsigned long p2, unsigned long p3,
84 				  unsigned long p4)
85 {
86 	long ret;
87 	asm volatile(KVM_HYPERCALL
88 		     : "=a"(ret)
89 		     : "a"(nr), "b"(p1), "c"(p2), "d"(p3), "S"(p4)
90 		     : "memory");
91 	return ret;
92 }
93 
94 #ifdef CONFIG_KVM_GUEST
95 bool kvm_para_available(void);
96 unsigned int kvm_arch_para_features(void);
97 void __init kvm_guest_init(void);
98 void kvm_async_pf_task_wait(u32 token);
99 void kvm_async_pf_task_wake(u32 token);
100 u32 kvm_read_and_reset_pf_reason(void);
101 extern void kvm_disable_steal_time(void);
102 
103 #ifdef CONFIG_PARAVIRT_SPINLOCKS
104 void __init kvm_spinlock_init(void);
105 #else /* !CONFIG_PARAVIRT_SPINLOCKS */
106 static inline void kvm_spinlock_init(void)
107 {
108 }
109 #endif /* CONFIG_PARAVIRT_SPINLOCKS */
110 
111 #else /* CONFIG_KVM_GUEST */
112 #define kvm_guest_init() do {} while (0)
113 #define kvm_async_pf_task_wait(T) do {} while(0)
114 #define kvm_async_pf_task_wake(T) do {} while(0)
115 
116 static inline bool kvm_para_available(void)
117 {
118 	return false;
119 }
120 
121 static inline unsigned int kvm_arch_para_features(void)
122 {
123 	return 0;
124 }
125 
126 static inline u32 kvm_read_and_reset_pf_reason(void)
127 {
128 	return 0;
129 }
130 
131 static inline void kvm_disable_steal_time(void)
132 {
133 	return;
134 }
135 #endif
136 
137 #endif /* _ASM_X86_KVM_PARA_H */
138