1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef ASM_KVM_CACHE_REGS_H
3 #define ASM_KVM_CACHE_REGS_H
4
5 #include <linux/kvm_host.h>
6
7 #define KVM_POSSIBLE_CR0_GUEST_BITS (X86_CR0_TS | X86_CR0_WP)
8 #define KVM_POSSIBLE_CR4_GUEST_BITS \
9 (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \
10 | X86_CR4_OSXMMEXCPT | X86_CR4_PGE | X86_CR4_TSD | X86_CR4_FSGSBASE \
11 | X86_CR4_CET)
12
13 #define X86_CR0_PDPTR_BITS (X86_CR0_CD | X86_CR0_NW | X86_CR0_PG)
14 #define X86_CR4_TLBFLUSH_BITS (X86_CR4_PGE | X86_CR4_PCIDE | X86_CR4_PAE | X86_CR4_SMEP)
15 #define X86_CR4_PDPTR_BITS (X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_SMEP)
16
17 static_assert(!(KVM_POSSIBLE_CR0_GUEST_BITS & X86_CR0_PDPTR_BITS));
18
19 #define BUILD_KVM_GPR_ACCESSORS(lname, uname) \
20 static __always_inline unsigned long kvm_##lname##_read(struct kvm_vcpu *vcpu)\
21 { \
22 return vcpu->arch.regs[VCPU_REGS_##uname]; \
23 } \
24 static __always_inline void kvm_##lname##_write(struct kvm_vcpu *vcpu, \
25 unsigned long val) \
26 { \
27 vcpu->arch.regs[VCPU_REGS_##uname] = val; \
28 }
BUILD_KVM_GPR_ACCESSORS(rax,RAX)29 BUILD_KVM_GPR_ACCESSORS(rax, RAX)
30 BUILD_KVM_GPR_ACCESSORS(rbx, RBX)
31 BUILD_KVM_GPR_ACCESSORS(rcx, RCX)
32 BUILD_KVM_GPR_ACCESSORS(rdx, RDX)
33 BUILD_KVM_GPR_ACCESSORS(rbp, RBP)
34 BUILD_KVM_GPR_ACCESSORS(rsi, RSI)
35 BUILD_KVM_GPR_ACCESSORS(rdi, RDI)
36 #ifdef CONFIG_X86_64
37 BUILD_KVM_GPR_ACCESSORS(r8, R8)
38 BUILD_KVM_GPR_ACCESSORS(r9, R9)
39 BUILD_KVM_GPR_ACCESSORS(r10, R10)
40 BUILD_KVM_GPR_ACCESSORS(r11, R11)
41 BUILD_KVM_GPR_ACCESSORS(r12, R12)
42 BUILD_KVM_GPR_ACCESSORS(r13, R13)
43 BUILD_KVM_GPR_ACCESSORS(r14, R14)
44 BUILD_KVM_GPR_ACCESSORS(r15, R15)
45 #endif
46
47 /*
48 * Using the register cache from interrupt context is generally not allowed, as
49 * caching a register and marking it available/dirty can't be done atomically,
50 * i.e. accesses from interrupt context may clobber state or read stale data if
51 * the vCPU task is in the process of updating the cache. The exception is if
52 * KVM is handling a PMI IRQ/NMI VM-Exit, as that bound code sequence doesn't
53 * touch the cache, it runs after the cache is reset (post VM-Exit), and PMIs
54 * need to access several registers that are cacheable.
55 */
56 #define kvm_assert_register_caching_allowed(vcpu) \
57 lockdep_assert_once(in_task() || kvm_arch_pmi_in_guest(vcpu))
58
59 /*
60 * avail dirty
61 * 0 0 register in VMCS/VMCB
62 * 0 1 *INVALID*
63 * 1 0 register in vcpu->arch
64 * 1 1 register in vcpu->arch, needs to be stored back
65 */
66 static inline bool kvm_register_is_available(struct kvm_vcpu *vcpu,
67 enum kvm_reg reg)
68 {
69 kvm_assert_register_caching_allowed(vcpu);
70 return test_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
71 }
72
kvm_register_is_dirty(struct kvm_vcpu * vcpu,enum kvm_reg reg)73 static inline bool kvm_register_is_dirty(struct kvm_vcpu *vcpu,
74 enum kvm_reg reg)
75 {
76 kvm_assert_register_caching_allowed(vcpu);
77 return test_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
78 }
79
kvm_register_mark_available(struct kvm_vcpu * vcpu,enum kvm_reg reg)80 static inline void kvm_register_mark_available(struct kvm_vcpu *vcpu,
81 enum kvm_reg reg)
82 {
83 kvm_assert_register_caching_allowed(vcpu);
84 __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
85 }
86
kvm_register_mark_dirty(struct kvm_vcpu * vcpu,enum kvm_reg reg)87 static inline void kvm_register_mark_dirty(struct kvm_vcpu *vcpu,
88 enum kvm_reg reg)
89 {
90 kvm_assert_register_caching_allowed(vcpu);
91 __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
92 __set_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
93 }
94
95 /*
96 * kvm_register_test_and_mark_available() is a special snowflake that uses an
97 * arch bitop directly to avoid the explicit instrumentation that comes with
98 * the generic bitops. This allows code that cannot be instrumented (noinstr
99 * functions), e.g. the low level VM-Enter/VM-Exit paths, to cache registers.
100 */
kvm_register_test_and_mark_available(struct kvm_vcpu * vcpu,enum kvm_reg reg)101 static __always_inline bool kvm_register_test_and_mark_available(struct kvm_vcpu *vcpu,
102 enum kvm_reg reg)
103 {
104 kvm_assert_register_caching_allowed(vcpu);
105 return arch___test_and_set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
106 }
107
108 /*
109 * The "raw" register helpers are only for cases where the full 64 bits of a
110 * register are read/written irrespective of current vCPU mode. In other words,
111 * odds are good you shouldn't be using the raw variants.
112 */
kvm_register_read_raw(struct kvm_vcpu * vcpu,int reg)113 static inline unsigned long kvm_register_read_raw(struct kvm_vcpu *vcpu, int reg)
114 {
115 if (WARN_ON_ONCE((unsigned int)reg >= NR_VCPU_REGS))
116 return 0;
117
118 if (!kvm_register_is_available(vcpu, reg))
119 kvm_x86_call(cache_reg)(vcpu, reg);
120
121 return vcpu->arch.regs[reg];
122 }
123
kvm_register_write_raw(struct kvm_vcpu * vcpu,int reg,unsigned long val)124 static inline void kvm_register_write_raw(struct kvm_vcpu *vcpu, int reg,
125 unsigned long val)
126 {
127 if (WARN_ON_ONCE((unsigned int)reg >= NR_VCPU_REGS))
128 return;
129
130 vcpu->arch.regs[reg] = val;
131 kvm_register_mark_dirty(vcpu, reg);
132 }
133
kvm_rip_read(struct kvm_vcpu * vcpu)134 static inline unsigned long kvm_rip_read(struct kvm_vcpu *vcpu)
135 {
136 return kvm_register_read_raw(vcpu, VCPU_REGS_RIP);
137 }
138
kvm_rip_write(struct kvm_vcpu * vcpu,unsigned long val)139 static inline void kvm_rip_write(struct kvm_vcpu *vcpu, unsigned long val)
140 {
141 kvm_register_write_raw(vcpu, VCPU_REGS_RIP, val);
142 }
143
kvm_rsp_read(struct kvm_vcpu * vcpu)144 static inline unsigned long kvm_rsp_read(struct kvm_vcpu *vcpu)
145 {
146 return kvm_register_read_raw(vcpu, VCPU_REGS_RSP);
147 }
148
kvm_rsp_write(struct kvm_vcpu * vcpu,unsigned long val)149 static inline void kvm_rsp_write(struct kvm_vcpu *vcpu, unsigned long val)
150 {
151 kvm_register_write_raw(vcpu, VCPU_REGS_RSP, val);
152 }
153
kvm_pdptr_read(struct kvm_vcpu * vcpu,int index)154 static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index)
155 {
156 might_sleep(); /* on svm */
157
158 if (!kvm_register_is_available(vcpu, VCPU_EXREG_PDPTR))
159 kvm_x86_call(cache_reg)(vcpu, VCPU_EXREG_PDPTR);
160
161 return vcpu->arch.walk_mmu->pdptrs[index];
162 }
163
kvm_pdptr_write(struct kvm_vcpu * vcpu,int index,u64 value)164 static inline void kvm_pdptr_write(struct kvm_vcpu *vcpu, int index, u64 value)
165 {
166 vcpu->arch.walk_mmu->pdptrs[index] = value;
167 }
168
kvm_read_cr0_bits(struct kvm_vcpu * vcpu,ulong mask)169 static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask)
170 {
171 ulong tmask = mask & KVM_POSSIBLE_CR0_GUEST_BITS;
172 if ((tmask & vcpu->arch.cr0_guest_owned_bits) &&
173 !kvm_register_is_available(vcpu, VCPU_EXREG_CR0))
174 kvm_x86_call(cache_reg)(vcpu, VCPU_EXREG_CR0);
175 return vcpu->arch.cr0 & mask;
176 }
177
kvm_is_cr0_bit_set(struct kvm_vcpu * vcpu,unsigned long cr0_bit)178 static __always_inline bool kvm_is_cr0_bit_set(struct kvm_vcpu *vcpu,
179 unsigned long cr0_bit)
180 {
181 BUILD_BUG_ON(!is_power_of_2(cr0_bit));
182
183 return !!kvm_read_cr0_bits(vcpu, cr0_bit);
184 }
185
kvm_read_cr0(struct kvm_vcpu * vcpu)186 static inline ulong kvm_read_cr0(struct kvm_vcpu *vcpu)
187 {
188 return kvm_read_cr0_bits(vcpu, ~0UL);
189 }
190
kvm_read_cr4_bits(struct kvm_vcpu * vcpu,ulong mask)191 static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask)
192 {
193 ulong tmask = mask & KVM_POSSIBLE_CR4_GUEST_BITS;
194 if ((tmask & vcpu->arch.cr4_guest_owned_bits) &&
195 !kvm_register_is_available(vcpu, VCPU_EXREG_CR4))
196 kvm_x86_call(cache_reg)(vcpu, VCPU_EXREG_CR4);
197 return vcpu->arch.cr4 & mask;
198 }
199
kvm_is_cr4_bit_set(struct kvm_vcpu * vcpu,unsigned long cr4_bit)200 static __always_inline bool kvm_is_cr4_bit_set(struct kvm_vcpu *vcpu,
201 unsigned long cr4_bit)
202 {
203 BUILD_BUG_ON(!is_power_of_2(cr4_bit));
204
205 return !!kvm_read_cr4_bits(vcpu, cr4_bit);
206 }
207
kvm_read_cr3(struct kvm_vcpu * vcpu)208 static inline ulong kvm_read_cr3(struct kvm_vcpu *vcpu)
209 {
210 if (!kvm_register_is_available(vcpu, VCPU_EXREG_CR3))
211 kvm_x86_call(cache_reg)(vcpu, VCPU_EXREG_CR3);
212 return vcpu->arch.cr3;
213 }
214
kvm_read_cr4(struct kvm_vcpu * vcpu)215 static inline ulong kvm_read_cr4(struct kvm_vcpu *vcpu)
216 {
217 return kvm_read_cr4_bits(vcpu, ~0UL);
218 }
219
kvm_read_edx_eax(struct kvm_vcpu * vcpu)220 static inline u64 kvm_read_edx_eax(struct kvm_vcpu *vcpu)
221 {
222 return (kvm_rax_read(vcpu) & -1u)
223 | ((u64)(kvm_rdx_read(vcpu) & -1u) << 32);
224 }
225
enter_guest_mode(struct kvm_vcpu * vcpu)226 static inline void enter_guest_mode(struct kvm_vcpu *vcpu)
227 {
228 vcpu->arch.hflags |= HF_GUEST_MASK;
229 vcpu->stat.guest_mode = 1;
230 }
231
leave_guest_mode(struct kvm_vcpu * vcpu)232 static inline void leave_guest_mode(struct kvm_vcpu *vcpu)
233 {
234 vcpu->arch.hflags &= ~HF_GUEST_MASK;
235
236 if (vcpu->arch.load_eoi_exitmap_pending) {
237 vcpu->arch.load_eoi_exitmap_pending = false;
238 kvm_make_request(KVM_REQ_LOAD_EOI_EXITMAP, vcpu);
239 }
240
241 vcpu->stat.guest_mode = 0;
242 }
243
is_guest_mode(struct kvm_vcpu * vcpu)244 static inline bool is_guest_mode(struct kvm_vcpu *vcpu)
245 {
246 return vcpu->arch.hflags & HF_GUEST_MASK;
247 }
248
249 #endif
250