xref: /linux/arch/x86/kvm/vmx/nested.h (revision 02680c23d7b3febe45ea3d4f9818c2b2dc89020a)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __KVM_X86_VMX_NESTED_H
3 #define __KVM_X86_VMX_NESTED_H
4 
5 #include "kvm_cache_regs.h"
6 #include "vmcs12.h"
7 #include "vmx.h"
8 
9 /*
10  * Status returned by nested_vmx_enter_non_root_mode():
11  */
12 enum nvmx_vmentry_status {
13 	NVMX_VMENTRY_SUCCESS,		/* Entered VMX non-root mode */
14 	NVMX_VMENTRY_VMFAIL,		/* Consistency check VMFail */
15 	NVMX_VMENTRY_VMEXIT,		/* Consistency check VMExit */
16 	NVMX_VMENTRY_KVM_INTERNAL_ERROR,/* KVM internal error */
17 };
18 
19 void vmx_leave_nested(struct kvm_vcpu *vcpu);
20 void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps);
21 void nested_vmx_hardware_unsetup(void);
22 __init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *));
23 void nested_vmx_set_vmcs_shadowing_bitmap(void);
24 void nested_vmx_free_vcpu(struct kvm_vcpu *vcpu);
25 enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
26 						     bool from_vmentry);
27 bool nested_vmx_reflect_vmexit(struct kvm_vcpu *vcpu);
28 void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
29 		       u32 exit_intr_info, unsigned long exit_qualification);
30 void nested_sync_vmcs12_to_shadow(struct kvm_vcpu *vcpu);
31 int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
32 int vmx_get_vmx_msr(struct nested_vmx_msrs *msrs, u32 msr_index, u64 *pdata);
33 int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification,
34 			u32 vmx_instruction_info, bool wr, int len, gva_t *ret);
35 void nested_vmx_pmu_entry_exit_ctls_update(struct kvm_vcpu *vcpu);
36 void nested_mark_vmcs12_pages_dirty(struct kvm_vcpu *vcpu);
37 bool nested_vmx_check_io_bitmaps(struct kvm_vcpu *vcpu, unsigned int port,
38 				 int size);
39 
40 static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu)
41 {
42 	return to_vmx(vcpu)->nested.cached_vmcs12;
43 }
44 
45 static inline struct vmcs12 *get_shadow_vmcs12(struct kvm_vcpu *vcpu)
46 {
47 	return to_vmx(vcpu)->nested.cached_shadow_vmcs12;
48 }
49 
50 /*
51  * Note: the same condition is checked against the state provided by userspace
52  * in vmx_set_nested_state; if it is satisfied, the nested state must include
53  * the VMCS12.
54  */
55 static inline int vmx_has_valid_vmcs12(struct kvm_vcpu *vcpu)
56 {
57 	struct vcpu_vmx *vmx = to_vmx(vcpu);
58 
59 	/*
60 	 * In case we do two consecutive get/set_nested_state()s while L2 was
61 	 * running hv_evmcs may end up not being mapped (we map it from
62 	 * nested_vmx_run()/vmx_vcpu_run()). Check is_guest_mode() as we always
63 	 * have vmcs12 if it is true.
64 	 */
65 	return is_guest_mode(vcpu) || vmx->nested.current_vmptr != -1ull ||
66 		vmx->nested.hv_evmcs;
67 }
68 
69 static inline u16 nested_get_vpid02(struct kvm_vcpu *vcpu)
70 {
71 	struct vcpu_vmx *vmx = to_vmx(vcpu);
72 
73 	return vmx->nested.vpid02 ? vmx->nested.vpid02 : vmx->vpid;
74 }
75 
76 static inline unsigned long nested_ept_get_eptp(struct kvm_vcpu *vcpu)
77 {
78 	/* return the page table to be shadowed - in our case, EPT12 */
79 	return get_vmcs12(vcpu)->ept_pointer;
80 }
81 
82 static inline bool nested_ept_ad_enabled(struct kvm_vcpu *vcpu)
83 {
84 	return nested_ept_get_eptp(vcpu) & VMX_EPTP_AD_ENABLE_BIT;
85 }
86 
87 /*
88  * Return the cr0 value that a nested guest would read. This is a combination
89  * of the real cr0 used to run the guest (guest_cr0), and the bits shadowed by
90  * its hypervisor (cr0_read_shadow).
91  */
92 static inline unsigned long nested_read_cr0(struct vmcs12 *fields)
93 {
94 	return (fields->guest_cr0 & ~fields->cr0_guest_host_mask) |
95 		(fields->cr0_read_shadow & fields->cr0_guest_host_mask);
96 }
97 static inline unsigned long nested_read_cr4(struct vmcs12 *fields)
98 {
99 	return (fields->guest_cr4 & ~fields->cr4_guest_host_mask) |
100 		(fields->cr4_read_shadow & fields->cr4_guest_host_mask);
101 }
102 
103 static inline unsigned nested_cpu_vmx_misc_cr3_count(struct kvm_vcpu *vcpu)
104 {
105 	return vmx_misc_cr3_count(to_vmx(vcpu)->nested.msrs.misc_low);
106 }
107 
108 /*
109  * Do the virtual VMX capability MSRs specify that L1 can use VMWRITE
110  * to modify any valid field of the VMCS, or are the VM-exit
111  * information fields read-only?
112  */
113 static inline bool nested_cpu_has_vmwrite_any_field(struct kvm_vcpu *vcpu)
114 {
115 	return to_vmx(vcpu)->nested.msrs.misc_low &
116 		MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS;
117 }
118 
119 static inline bool nested_cpu_has_zero_length_injection(struct kvm_vcpu *vcpu)
120 {
121 	return to_vmx(vcpu)->nested.msrs.misc_low & VMX_MISC_ZERO_LEN_INS;
122 }
123 
124 static inline bool nested_cpu_supports_monitor_trap_flag(struct kvm_vcpu *vcpu)
125 {
126 	return to_vmx(vcpu)->nested.msrs.procbased_ctls_high &
127 			CPU_BASED_MONITOR_TRAP_FLAG;
128 }
129 
130 static inline bool nested_cpu_has_vmx_shadow_vmcs(struct kvm_vcpu *vcpu)
131 {
132 	return to_vmx(vcpu)->nested.msrs.secondary_ctls_high &
133 		SECONDARY_EXEC_SHADOW_VMCS;
134 }
135 
136 static inline bool nested_cpu_has(struct vmcs12 *vmcs12, u32 bit)
137 {
138 	return vmcs12->cpu_based_vm_exec_control & bit;
139 }
140 
141 static inline bool nested_cpu_has2(struct vmcs12 *vmcs12, u32 bit)
142 {
143 	return (vmcs12->cpu_based_vm_exec_control &
144 			CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) &&
145 		(vmcs12->secondary_vm_exec_control & bit);
146 }
147 
148 static inline bool nested_cpu_has_preemption_timer(struct vmcs12 *vmcs12)
149 {
150 	return vmcs12->pin_based_vm_exec_control &
151 		PIN_BASED_VMX_PREEMPTION_TIMER;
152 }
153 
154 static inline bool nested_cpu_has_nmi_exiting(struct vmcs12 *vmcs12)
155 {
156 	return vmcs12->pin_based_vm_exec_control & PIN_BASED_NMI_EXITING;
157 }
158 
159 static inline bool nested_cpu_has_virtual_nmis(struct vmcs12 *vmcs12)
160 {
161 	return vmcs12->pin_based_vm_exec_control & PIN_BASED_VIRTUAL_NMIS;
162 }
163 
164 static inline int nested_cpu_has_mtf(struct vmcs12 *vmcs12)
165 {
166 	return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_TRAP_FLAG);
167 }
168 
169 static inline int nested_cpu_has_ept(struct vmcs12 *vmcs12)
170 {
171 	return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_EPT);
172 }
173 
174 static inline bool nested_cpu_has_xsaves(struct vmcs12 *vmcs12)
175 {
176 	return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES);
177 }
178 
179 static inline bool nested_cpu_has_pml(struct vmcs12 *vmcs12)
180 {
181 	return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_PML);
182 }
183 
184 static inline bool nested_cpu_has_virt_x2apic_mode(struct vmcs12 *vmcs12)
185 {
186 	return nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE);
187 }
188 
189 static inline bool nested_cpu_has_vpid(struct vmcs12 *vmcs12)
190 {
191 	return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_VPID);
192 }
193 
194 static inline bool nested_cpu_has_apic_reg_virt(struct vmcs12 *vmcs12)
195 {
196 	return nested_cpu_has2(vmcs12, SECONDARY_EXEC_APIC_REGISTER_VIRT);
197 }
198 
199 static inline bool nested_cpu_has_vid(struct vmcs12 *vmcs12)
200 {
201 	return nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
202 }
203 
204 static inline bool nested_cpu_has_posted_intr(struct vmcs12 *vmcs12)
205 {
206 	return vmcs12->pin_based_vm_exec_control & PIN_BASED_POSTED_INTR;
207 }
208 
209 static inline bool nested_cpu_has_vmfunc(struct vmcs12 *vmcs12)
210 {
211 	return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_VMFUNC);
212 }
213 
214 static inline bool nested_cpu_has_eptp_switching(struct vmcs12 *vmcs12)
215 {
216 	return nested_cpu_has_vmfunc(vmcs12) &&
217 		(vmcs12->vm_function_control &
218 		 VMX_VMFUNC_EPTP_SWITCHING);
219 }
220 
221 static inline bool nested_cpu_has_shadow_vmcs(struct vmcs12 *vmcs12)
222 {
223 	return nested_cpu_has2(vmcs12, SECONDARY_EXEC_SHADOW_VMCS);
224 }
225 
226 static inline bool nested_cpu_has_save_preemption_timer(struct vmcs12 *vmcs12)
227 {
228 	return vmcs12->vm_exit_controls &
229 	    VM_EXIT_SAVE_VMX_PREEMPTION_TIMER;
230 }
231 
232 static inline bool nested_exit_on_nmi(struct kvm_vcpu *vcpu)
233 {
234 	return nested_cpu_has_nmi_exiting(get_vmcs12(vcpu));
235 }
236 
237 /*
238  * In nested virtualization, check if L1 asked to exit on external interrupts.
239  * For most existing hypervisors, this will always return true.
240  */
241 static inline bool nested_exit_on_intr(struct kvm_vcpu *vcpu)
242 {
243 	return get_vmcs12(vcpu)->pin_based_vm_exec_control &
244 		PIN_BASED_EXT_INTR_MASK;
245 }
246 
247 static inline bool nested_cpu_has_encls_exit(struct vmcs12 *vmcs12)
248 {
249 	return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENCLS_EXITING);
250 }
251 
252 /*
253  * if fixed0[i] == 1: val[i] must be 1
254  * if fixed1[i] == 0: val[i] must be 0
255  */
256 static inline bool fixed_bits_valid(u64 val, u64 fixed0, u64 fixed1)
257 {
258 	return ((val & fixed1) | fixed0) == val;
259 }
260 
261 static inline bool nested_guest_cr0_valid(struct kvm_vcpu *vcpu, unsigned long val)
262 {
263 	u64 fixed0 = to_vmx(vcpu)->nested.msrs.cr0_fixed0;
264 	u64 fixed1 = to_vmx(vcpu)->nested.msrs.cr0_fixed1;
265 	struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
266 
267 	if (to_vmx(vcpu)->nested.msrs.secondary_ctls_high &
268 		SECONDARY_EXEC_UNRESTRICTED_GUEST &&
269 	    nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST))
270 		fixed0 &= ~(X86_CR0_PE | X86_CR0_PG);
271 
272 	return fixed_bits_valid(val, fixed0, fixed1);
273 }
274 
275 static inline bool nested_host_cr0_valid(struct kvm_vcpu *vcpu, unsigned long val)
276 {
277 	u64 fixed0 = to_vmx(vcpu)->nested.msrs.cr0_fixed0;
278 	u64 fixed1 = to_vmx(vcpu)->nested.msrs.cr0_fixed1;
279 
280 	return fixed_bits_valid(val, fixed0, fixed1);
281 }
282 
283 static inline bool nested_cr4_valid(struct kvm_vcpu *vcpu, unsigned long val)
284 {
285 	u64 fixed0 = to_vmx(vcpu)->nested.msrs.cr4_fixed0;
286 	u64 fixed1 = to_vmx(vcpu)->nested.msrs.cr4_fixed1;
287 
288 	return fixed_bits_valid(val, fixed0, fixed1);
289 }
290 
291 /* No difference in the restrictions on guest and host CR4 in VMX operation. */
292 #define nested_guest_cr4_valid	nested_cr4_valid
293 #define nested_host_cr4_valid	nested_cr4_valid
294 
295 extern struct kvm_x86_nested_ops vmx_nested_ops;
296 
297 #endif /* __KVM_X86_VMX_NESTED_H */
298