1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __KVM_X86_VMX_H
3 #define __KVM_X86_VMX_H
4
5 #include <linux/kvm_host.h>
6
7 #include <asm/kvm.h>
8 #include <asm/intel_pt.h>
9 #include <asm/perf_event.h>
10 #include <asm/posted_intr.h>
11
12 #include "capabilities.h"
13 #include "../kvm_cache_regs.h"
14 #include "pmu_intel.h"
15 #include "vmcs.h"
16 #include "vmx_ops.h"
17 #include "../cpuid.h"
18 #include "run_flags.h"
19 #include "../mmu.h"
20 #include "common.h"
21
22 #ifdef CONFIG_X86_64
23 #define MAX_NR_USER_RETURN_MSRS 7
24 #else
25 #define MAX_NR_USER_RETURN_MSRS 4
26 #endif
27
28 #define MAX_NR_LOADSTORE_MSRS 8
29
30 struct vmx_msrs {
31 unsigned int nr;
32 struct vmx_msr_entry val[MAX_NR_LOADSTORE_MSRS];
33 };
34
35 struct vmx_uret_msr {
36 bool load_into_hardware;
37 u64 data;
38 u64 mask;
39 };
40
41 enum segment_cache_field {
42 SEG_FIELD_SEL = 0,
43 SEG_FIELD_BASE = 1,
44 SEG_FIELD_LIMIT = 2,
45 SEG_FIELD_AR = 3,
46
47 SEG_FIELD_NR = 4
48 };
49
50 #define RTIT_ADDR_RANGE 4
51
52 struct pt_ctx {
53 u64 ctl;
54 u64 status;
55 u64 output_base;
56 u64 output_mask;
57 u64 cr3_match;
58 u64 addr_a[RTIT_ADDR_RANGE];
59 u64 addr_b[RTIT_ADDR_RANGE];
60 };
61
62 struct pt_desc {
63 u64 ctl_bitmask;
64 u32 num_address_ranges;
65 u32 caps[PT_CPUID_REGS_NUM * PT_CPUID_LEAVES];
66 struct pt_ctx host;
67 struct pt_ctx guest;
68 };
69
70 /*
71 * The nested_vmx structure is part of vcpu_vmx, and holds information we need
72 * for correct emulation of VMX (i.e., nested VMX) on this vcpu.
73 */
74 struct nested_vmx {
75 /* Has the level1 guest done vmxon? */
76 bool vmxon;
77 gpa_t vmxon_ptr;
78 bool pml_full;
79
80 /* The guest-physical address of the current VMCS L1 keeps for L2 */
81 gpa_t current_vmptr;
82 /*
83 * Cache of the guest's VMCS, existing outside of guest memory.
84 * Loaded from guest memory during VMPTRLD. Flushed to guest
85 * memory during VMCLEAR and VMPTRLD.
86 */
87 struct vmcs12 *cached_vmcs12;
88 /*
89 * Cache of the guest's shadow VMCS, existing outside of guest
90 * memory. Loaded from guest memory during VM entry. Flushed
91 * to guest memory during VM exit.
92 */
93 struct vmcs12 *cached_shadow_vmcs12;
94
95 /*
96 * GPA to HVA cache for accessing vmcs12->vmcs_link_pointer
97 */
98 struct gfn_to_hva_cache shadow_vmcs12_cache;
99
100 /*
101 * GPA to HVA cache for VMCS12
102 */
103 struct gfn_to_hva_cache vmcs12_cache;
104
105 /*
106 * Indicates if the shadow vmcs or enlightened vmcs must be updated
107 * with the data held by struct vmcs12.
108 */
109 bool need_vmcs12_to_shadow_sync;
110 bool dirty_vmcs12;
111
112 /*
113 * Indicates whether MSR bitmap for L2 needs to be rebuilt due to
114 * changes in MSR bitmap for L1 or switching to a different L2. Note,
115 * this flag can only be used reliably in conjunction with a paravirt L1
116 * which informs L0 whether any changes to MSR bitmap for L2 were done
117 * on its side.
118 */
119 bool force_msr_bitmap_recalc;
120
121 /*
122 * Indicates lazily loaded guest state has not yet been decached from
123 * vmcs02.
124 */
125 bool need_sync_vmcs02_to_vmcs12_rare;
126
127 /*
128 * vmcs02 has been initialized, i.e. state that is constant for
129 * vmcs02 has been written to the backing VMCS. Initialization
130 * is delayed until L1 actually attempts to run a nested VM.
131 */
132 bool vmcs02_initialized;
133
134 bool change_vmcs01_virtual_apic_mode;
135 bool reload_vmcs01_apic_access_page;
136 bool update_vmcs01_cpu_dirty_logging;
137 bool update_vmcs01_apicv_status;
138 bool update_vmcs01_hwapic_isr;
139
140 /*
141 * Enlightened VMCS has been enabled. It does not mean that L1 has to
142 * use it. However, VMX features available to L1 will be limited based
143 * on what the enlightened VMCS supports.
144 */
145 bool enlightened_vmcs_enabled;
146
147 /* L2 must run next, and mustn't decide to exit to L1. */
148 bool nested_run_pending;
149
150 /* Pending MTF VM-exit into L1. */
151 bool mtf_pending;
152
153 struct loaded_vmcs vmcs02;
154
155 /*
156 * Guest pages referred to in the vmcs02 with host-physical
157 * pointers, so we must keep them pinned while L2 runs.
158 */
159 struct kvm_host_map apic_access_page_map;
160 struct kvm_host_map virtual_apic_map;
161 struct kvm_host_map pi_desc_map;
162
163 struct pi_desc *pi_desc;
164 bool pi_pending;
165 u16 posted_intr_nv;
166
167 struct hrtimer preemption_timer;
168 u64 preemption_timer_deadline;
169 bool has_preemption_timer_deadline;
170 bool preemption_timer_expired;
171
172 /*
173 * Used to snapshot MSRs that are conditionally loaded on VM-Enter in
174 * order to propagate the guest's pre-VM-Enter value into vmcs02. For
175 * emulation of VMLAUNCH/VMRESUME, the snapshot will be of L1's value.
176 * For KVM_SET_NESTED_STATE, the snapshot is of L2's value, _if_
177 * userspace restores MSRs before nested state. If userspace restores
178 * MSRs after nested state, the snapshot holds garbage, but KVM can't
179 * detect that, and the garbage value in vmcs02 will be overwritten by
180 * MSR restoration in any case.
181 */
182 u64 pre_vmenter_debugctl;
183 u64 pre_vmenter_bndcfgs;
184
185 /* to migrate it to L1 if L2 writes to L1's CR8 directly */
186 int l1_tpr_threshold;
187
188 u16 vpid02;
189 u16 last_vpid;
190
191 struct nested_vmx_msrs msrs;
192
193 /* SMM related state */
194 struct {
195 /* in VMX operation on SMM entry? */
196 bool vmxon;
197 /* in guest mode on SMM entry? */
198 bool guest_mode;
199 } smm;
200
201 #ifdef CONFIG_KVM_HYPERV
202 gpa_t hv_evmcs_vmptr;
203 struct kvm_host_map hv_evmcs_map;
204 struct hv_enlightened_vmcs *hv_evmcs;
205 #endif
206 };
207
208 struct vcpu_vmx {
209 struct kvm_vcpu vcpu;
210 struct vcpu_vt vt;
211 u8 fail;
212 u8 x2apic_msr_bitmap_mode;
213
214 u32 idt_vectoring_info;
215 ulong rflags;
216
217 /*
218 * User return MSRs are always emulated when enabled in the guest, but
219 * only loaded into hardware when necessary, e.g. SYSCALL #UDs outside
220 * of 64-bit mode or if EFER.SCE=1, thus the SYSCALL MSRs don't need to
221 * be loaded into hardware if those conditions aren't met.
222 */
223 struct vmx_uret_msr guest_uret_msrs[MAX_NR_USER_RETURN_MSRS];
224 bool guest_uret_msrs_loaded;
225 #ifdef CONFIG_X86_64
226 u64 msr_guest_kernel_gs_base;
227 #endif
228
229 u64 spec_ctrl;
230 u32 msr_ia32_umwait_control;
231
232 /*
233 * loaded_vmcs points to the VMCS currently used in this vcpu. For a
234 * non-nested (L1) guest, it always points to vmcs01. For a nested
235 * guest (L2), it points to a different VMCS.
236 */
237 struct loaded_vmcs vmcs01;
238 struct loaded_vmcs *loaded_vmcs;
239
240 struct msr_autoload {
241 struct vmx_msrs guest;
242 struct vmx_msrs host;
243 } msr_autoload;
244
245 struct msr_autostore {
246 struct vmx_msrs guest;
247 } msr_autostore;
248
249 struct {
250 int vm86_active;
251 ulong save_rflags;
252 struct kvm_segment segs[8];
253 } rmode;
254 struct {
255 u32 bitmask; /* 4 bits per segment (1 bit per field) */
256 struct kvm_save_segment {
257 u16 selector;
258 unsigned long base;
259 u32 limit;
260 u32 ar;
261 } seg[8];
262 } segment_cache;
263 int vpid;
264
265 /* Support for a guest hypervisor (nested VMX) */
266 struct nested_vmx nested;
267
268 /* Dynamic PLE window. */
269 unsigned int ple_window;
270 bool ple_window_dirty;
271
272 /* Support for PML */
273 #define PML_LOG_NR_ENTRIES 512
274 /* PML is written backwards: this is the first entry written by the CPU */
275 #define PML_HEAD_INDEX (PML_LOG_NR_ENTRIES-1)
276
277 struct page *pml_pg;
278
279 /* apic deadline value in host tsc */
280 u64 hv_deadline_tsc;
281
282 /*
283 * Only bits masked by msr_ia32_feature_control_valid_bits can be set in
284 * msr_ia32_feature_control. FEAT_CTL_LOCKED is always included
285 * in msr_ia32_feature_control_valid_bits.
286 */
287 u64 msr_ia32_feature_control;
288 u64 msr_ia32_feature_control_valid_bits;
289 /* SGX Launch Control public key hash */
290 u64 msr_ia32_sgxlepubkeyhash[4];
291 u64 msr_ia32_mcu_opt_ctrl;
292 bool disable_fb_clear;
293
294 struct pt_desc pt_desc;
295 struct lbr_desc lbr_desc;
296
297 /* ve_info must be page aligned. */
298 struct vmx_ve_information *ve_info;
299 };
300
301 struct kvm_vmx {
302 struct kvm kvm;
303
304 unsigned int tss_addr;
305 bool ept_identity_pagetable_done;
306 gpa_t ept_identity_map_addr;
307 /* Posted Interrupt Descriptor (PID) table for IPI virtualization */
308 u64 *pid_table;
309 };
310
to_vt(struct kvm_vcpu * vcpu)311 static __always_inline struct vcpu_vt *to_vt(struct kvm_vcpu *vcpu)
312 {
313 return &(container_of(vcpu, struct vcpu_vmx, vcpu)->vt);
314 }
315
vt_to_vcpu(struct vcpu_vt * vt)316 static __always_inline struct kvm_vcpu *vt_to_vcpu(struct vcpu_vt *vt)
317 {
318 return &(container_of(vt, struct vcpu_vmx, vt)->vcpu);
319 }
320
vmx_get_exit_reason(struct kvm_vcpu * vcpu)321 static __always_inline union vmx_exit_reason vmx_get_exit_reason(struct kvm_vcpu *vcpu)
322 {
323 return to_vt(vcpu)->exit_reason;
324 }
325
vmx_get_exit_qual(struct kvm_vcpu * vcpu)326 static __always_inline unsigned long vmx_get_exit_qual(struct kvm_vcpu *vcpu)
327 {
328 struct vcpu_vt *vt = to_vt(vcpu);
329
330 if (!kvm_register_test_and_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_1) &&
331 !WARN_ON_ONCE(is_td_vcpu(vcpu)))
332 vt->exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
333
334 return vt->exit_qualification;
335 }
336
vmx_get_intr_info(struct kvm_vcpu * vcpu)337 static __always_inline u32 vmx_get_intr_info(struct kvm_vcpu *vcpu)
338 {
339 struct vcpu_vt *vt = to_vt(vcpu);
340
341 if (!kvm_register_test_and_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_2) &&
342 !WARN_ON_ONCE(is_td_vcpu(vcpu)))
343 vt->exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
344
345 return vt->exit_intr_info;
346 }
347
348 void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu);
349 int allocate_vpid(void);
350 void free_vpid(int vpid);
351 void vmx_set_constant_host_state(struct vcpu_vmx *vmx);
352 void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu);
353 void vmx_set_host_fs_gs(struct vmcs_host_state *host, u16 fs_sel, u16 gs_sel,
354 unsigned long fs_base, unsigned long gs_base);
355 int vmx_get_cpl(struct kvm_vcpu *vcpu);
356 int vmx_get_cpl_no_cache(struct kvm_vcpu *vcpu);
357 bool vmx_emulation_required(struct kvm_vcpu *vcpu);
358 unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu);
359 void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
360 u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu);
361 void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask);
362 int vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer);
363 void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
364 void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
365 void set_cr4_guest_host_mask(struct vcpu_vmx *vmx);
366 void ept_save_pdptrs(struct kvm_vcpu *vcpu);
367 void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
368 void __vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
369 u64 construct_eptp(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level);
370
371 bool vmx_guest_inject_ac(struct kvm_vcpu *vcpu);
372 void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu);
373 bool vmx_nmi_blocked(struct kvm_vcpu *vcpu);
374 bool __vmx_interrupt_blocked(struct kvm_vcpu *vcpu);
375 bool vmx_interrupt_blocked(struct kvm_vcpu *vcpu);
376 bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu);
377 void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked);
378 void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
379 struct vmx_uret_msr *vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr);
380 void pt_update_intercept_for_msr(struct kvm_vcpu *vcpu);
381 void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp);
382 void vmx_spec_ctrl_restore_host(struct vcpu_vmx *vmx, unsigned int flags);
383 unsigned int __vmx_vcpu_run_flags(struct vcpu_vmx *vmx);
384 bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs,
385 unsigned int flags);
386 int vmx_find_loadstore_msr_slot(struct vmx_msrs *m, u32 msr);
387 void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu);
388
389 void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type, bool set);
390
vmx_disable_intercept_for_msr(struct kvm_vcpu * vcpu,u32 msr,int type)391 static inline void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu,
392 u32 msr, int type)
393 {
394 vmx_set_intercept_for_msr(vcpu, msr, type, false);
395 }
396
vmx_enable_intercept_for_msr(struct kvm_vcpu * vcpu,u32 msr,int type)397 static inline void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu,
398 u32 msr, int type)
399 {
400 vmx_set_intercept_for_msr(vcpu, msr, type, true);
401 }
402
403 u64 vmx_get_l2_tsc_offset(struct kvm_vcpu *vcpu);
404 u64 vmx_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu);
405
406 gva_t vmx_get_untagged_addr(struct kvm_vcpu *vcpu, gva_t gva, unsigned int flags);
407
408 void vmx_update_cpu_dirty_logging(struct kvm_vcpu *vcpu);
409
410 u64 vmx_get_supported_debugctl(struct kvm_vcpu *vcpu, bool host_initiated);
411 bool vmx_is_valid_debugctl(struct kvm_vcpu *vcpu, u64 data, bool host_initiated);
412
413 #define VMX_HOST_OWNED_DEBUGCTL_BITS (DEBUGCTLMSR_FREEZE_IN_SMM)
414
vmx_guest_debugctl_write(struct kvm_vcpu * vcpu,u64 val)415 static inline void vmx_guest_debugctl_write(struct kvm_vcpu *vcpu, u64 val)
416 {
417 WARN_ON_ONCE(val & VMX_HOST_OWNED_DEBUGCTL_BITS);
418
419 val |= vcpu->arch.host_debugctl & VMX_HOST_OWNED_DEBUGCTL_BITS;
420 vmcs_write64(GUEST_IA32_DEBUGCTL, val);
421 }
422
vmx_guest_debugctl_read(void)423 static inline u64 vmx_guest_debugctl_read(void)
424 {
425 return vmcs_read64(GUEST_IA32_DEBUGCTL) & ~VMX_HOST_OWNED_DEBUGCTL_BITS;
426 }
427
vmx_reload_guest_debugctl(struct kvm_vcpu * vcpu)428 static inline void vmx_reload_guest_debugctl(struct kvm_vcpu *vcpu)
429 {
430 u64 val = vmcs_read64(GUEST_IA32_DEBUGCTL);
431
432 if (!((val ^ vcpu->arch.host_debugctl) & VMX_HOST_OWNED_DEBUGCTL_BITS))
433 return;
434
435 vmx_guest_debugctl_write(vcpu, val & ~VMX_HOST_OWNED_DEBUGCTL_BITS);
436 }
437
438 /*
439 * Note, early Intel manuals have the write-low and read-high bitmap offsets
440 * the wrong way round. The bitmaps control MSRs 0x00000000-0x00001fff and
441 * 0xc0000000-0xc0001fff. The former (low) uses bytes 0-0x3ff for reads and
442 * 0x800-0xbff for writes. The latter (high) uses 0x400-0x7ff for reads and
443 * 0xc00-0xfff for writes. MSRs not covered by either of the ranges always
444 * VM-Exit.
445 */
446 #define __BUILD_VMX_MSR_BITMAP_HELPER(rtype, action, bitop, access, base) \
447 static inline rtype vmx_##action##_msr_bitmap_##access(unsigned long *bitmap, \
448 u32 msr) \
449 { \
450 int f = sizeof(unsigned long); \
451 \
452 if (msr <= 0x1fff) \
453 return bitop##_bit(msr, bitmap + base / f); \
454 else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) \
455 return bitop##_bit(msr & 0x1fff, bitmap + (base + 0x400) / f); \
456 return (rtype)true; \
457 }
458 #define BUILD_VMX_MSR_BITMAP_HELPERS(ret_type, action, bitop) \
459 __BUILD_VMX_MSR_BITMAP_HELPER(ret_type, action, bitop, read, 0x0) \
460 __BUILD_VMX_MSR_BITMAP_HELPER(ret_type, action, bitop, write, 0x800)
461
BUILD_VMX_MSR_BITMAP_HELPERS(bool,test,test)462 BUILD_VMX_MSR_BITMAP_HELPERS(bool, test, test)
463 BUILD_VMX_MSR_BITMAP_HELPERS(void, clear, __clear)
464 BUILD_VMX_MSR_BITMAP_HELPERS(void, set, __set)
465
466 static inline u8 vmx_get_rvi(void)
467 {
468 return vmcs_read16(GUEST_INTR_STATUS) & 0xff;
469 }
470
471 #define __KVM_REQUIRED_VMX_VM_ENTRY_CONTROLS \
472 (VM_ENTRY_LOAD_DEBUG_CONTROLS)
473 #ifdef CONFIG_X86_64
474 #define KVM_REQUIRED_VMX_VM_ENTRY_CONTROLS \
475 (__KVM_REQUIRED_VMX_VM_ENTRY_CONTROLS | \
476 VM_ENTRY_IA32E_MODE)
477 #else
478 #define KVM_REQUIRED_VMX_VM_ENTRY_CONTROLS \
479 __KVM_REQUIRED_VMX_VM_ENTRY_CONTROLS
480 #endif
481 #define KVM_OPTIONAL_VMX_VM_ENTRY_CONTROLS \
482 (VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | \
483 VM_ENTRY_LOAD_IA32_PAT | \
484 VM_ENTRY_LOAD_IA32_EFER | \
485 VM_ENTRY_LOAD_BNDCFGS | \
486 VM_ENTRY_PT_CONCEAL_PIP | \
487 VM_ENTRY_LOAD_IA32_RTIT_CTL)
488
489 #define __KVM_REQUIRED_VMX_VM_EXIT_CONTROLS \
490 (VM_EXIT_SAVE_DEBUG_CONTROLS | \
491 VM_EXIT_ACK_INTR_ON_EXIT)
492 #ifdef CONFIG_X86_64
493 #define KVM_REQUIRED_VMX_VM_EXIT_CONTROLS \
494 (__KVM_REQUIRED_VMX_VM_EXIT_CONTROLS | \
495 VM_EXIT_HOST_ADDR_SPACE_SIZE)
496 #else
497 #define KVM_REQUIRED_VMX_VM_EXIT_CONTROLS \
498 __KVM_REQUIRED_VMX_VM_EXIT_CONTROLS
499 #endif
500 #define KVM_OPTIONAL_VMX_VM_EXIT_CONTROLS \
501 (VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | \
502 VM_EXIT_SAVE_IA32_PAT | \
503 VM_EXIT_LOAD_IA32_PAT | \
504 VM_EXIT_SAVE_IA32_EFER | \
505 VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | \
506 VM_EXIT_LOAD_IA32_EFER | \
507 VM_EXIT_CLEAR_BNDCFGS | \
508 VM_EXIT_PT_CONCEAL_PIP | \
509 VM_EXIT_CLEAR_IA32_RTIT_CTL)
510
511 #define KVM_REQUIRED_VMX_PIN_BASED_VM_EXEC_CONTROL \
512 (PIN_BASED_EXT_INTR_MASK | \
513 PIN_BASED_NMI_EXITING)
514 #define KVM_OPTIONAL_VMX_PIN_BASED_VM_EXEC_CONTROL \
515 (PIN_BASED_VIRTUAL_NMIS | \
516 PIN_BASED_POSTED_INTR | \
517 PIN_BASED_VMX_PREEMPTION_TIMER)
518
519 #define __KVM_REQUIRED_VMX_CPU_BASED_VM_EXEC_CONTROL \
520 (CPU_BASED_HLT_EXITING | \
521 CPU_BASED_CR3_LOAD_EXITING | \
522 CPU_BASED_CR3_STORE_EXITING | \
523 CPU_BASED_UNCOND_IO_EXITING | \
524 CPU_BASED_MOV_DR_EXITING | \
525 CPU_BASED_USE_TSC_OFFSETTING | \
526 CPU_BASED_MWAIT_EXITING | \
527 CPU_BASED_MONITOR_EXITING | \
528 CPU_BASED_INVLPG_EXITING | \
529 CPU_BASED_RDPMC_EXITING | \
530 CPU_BASED_INTR_WINDOW_EXITING)
531
532 #ifdef CONFIG_X86_64
533 #define KVM_REQUIRED_VMX_CPU_BASED_VM_EXEC_CONTROL \
534 (__KVM_REQUIRED_VMX_CPU_BASED_VM_EXEC_CONTROL | \
535 CPU_BASED_CR8_LOAD_EXITING | \
536 CPU_BASED_CR8_STORE_EXITING)
537 #else
538 #define KVM_REQUIRED_VMX_CPU_BASED_VM_EXEC_CONTROL \
539 __KVM_REQUIRED_VMX_CPU_BASED_VM_EXEC_CONTROL
540 #endif
541
542 #define KVM_OPTIONAL_VMX_CPU_BASED_VM_EXEC_CONTROL \
543 (CPU_BASED_RDTSC_EXITING | \
544 CPU_BASED_TPR_SHADOW | \
545 CPU_BASED_USE_IO_BITMAPS | \
546 CPU_BASED_MONITOR_TRAP_FLAG | \
547 CPU_BASED_USE_MSR_BITMAPS | \
548 CPU_BASED_NMI_WINDOW_EXITING | \
549 CPU_BASED_PAUSE_EXITING | \
550 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS | \
551 CPU_BASED_ACTIVATE_TERTIARY_CONTROLS)
552
553 #define KVM_REQUIRED_VMX_SECONDARY_VM_EXEC_CONTROL 0
554 #define KVM_OPTIONAL_VMX_SECONDARY_VM_EXEC_CONTROL \
555 (SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | \
556 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | \
557 SECONDARY_EXEC_WBINVD_EXITING | \
558 SECONDARY_EXEC_ENABLE_VPID | \
559 SECONDARY_EXEC_ENABLE_EPT | \
560 SECONDARY_EXEC_UNRESTRICTED_GUEST | \
561 SECONDARY_EXEC_PAUSE_LOOP_EXITING | \
562 SECONDARY_EXEC_DESC | \
563 SECONDARY_EXEC_ENABLE_RDTSCP | \
564 SECONDARY_EXEC_ENABLE_INVPCID | \
565 SECONDARY_EXEC_APIC_REGISTER_VIRT | \
566 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | \
567 SECONDARY_EXEC_SHADOW_VMCS | \
568 SECONDARY_EXEC_ENABLE_XSAVES | \
569 SECONDARY_EXEC_RDSEED_EXITING | \
570 SECONDARY_EXEC_RDRAND_EXITING | \
571 SECONDARY_EXEC_ENABLE_PML | \
572 SECONDARY_EXEC_TSC_SCALING | \
573 SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE | \
574 SECONDARY_EXEC_PT_USE_GPA | \
575 SECONDARY_EXEC_PT_CONCEAL_VMX | \
576 SECONDARY_EXEC_ENABLE_VMFUNC | \
577 SECONDARY_EXEC_BUS_LOCK_DETECTION | \
578 SECONDARY_EXEC_NOTIFY_VM_EXITING | \
579 SECONDARY_EXEC_ENCLS_EXITING | \
580 SECONDARY_EXEC_EPT_VIOLATION_VE)
581
582 #define KVM_REQUIRED_VMX_TERTIARY_VM_EXEC_CONTROL 0
583 #define KVM_OPTIONAL_VMX_TERTIARY_VM_EXEC_CONTROL \
584 (TERTIARY_EXEC_IPI_VIRT)
585
586 #define BUILD_CONTROLS_SHADOW(lname, uname, bits) \
587 static inline void lname##_controls_set(struct vcpu_vmx *vmx, u##bits val) \
588 { \
589 if (vmx->loaded_vmcs->controls_shadow.lname != val) { \
590 vmcs_write##bits(uname, val); \
591 vmx->loaded_vmcs->controls_shadow.lname = val; \
592 } \
593 } \
594 static inline u##bits __##lname##_controls_get(struct loaded_vmcs *vmcs) \
595 { \
596 return vmcs->controls_shadow.lname; \
597 } \
598 static inline u##bits lname##_controls_get(struct vcpu_vmx *vmx) \
599 { \
600 return __##lname##_controls_get(vmx->loaded_vmcs); \
601 } \
602 static __always_inline void lname##_controls_setbit(struct vcpu_vmx *vmx, u##bits val) \
603 { \
604 BUILD_BUG_ON(!(val & (KVM_REQUIRED_VMX_##uname | KVM_OPTIONAL_VMX_##uname))); \
605 lname##_controls_set(vmx, lname##_controls_get(vmx) | val); \
606 } \
607 static __always_inline void lname##_controls_clearbit(struct vcpu_vmx *vmx, u##bits val) \
608 { \
609 BUILD_BUG_ON(!(val & (KVM_REQUIRED_VMX_##uname | KVM_OPTIONAL_VMX_##uname))); \
610 lname##_controls_set(vmx, lname##_controls_get(vmx) & ~val); \
611 }
612 BUILD_CONTROLS_SHADOW(vm_entry, VM_ENTRY_CONTROLS, 32)
613 BUILD_CONTROLS_SHADOW(vm_exit, VM_EXIT_CONTROLS, 32)
614 BUILD_CONTROLS_SHADOW(pin, PIN_BASED_VM_EXEC_CONTROL, 32)
615 BUILD_CONTROLS_SHADOW(exec, CPU_BASED_VM_EXEC_CONTROL, 32)
616 BUILD_CONTROLS_SHADOW(secondary_exec, SECONDARY_VM_EXEC_CONTROL, 32)
617 BUILD_CONTROLS_SHADOW(tertiary_exec, TERTIARY_VM_EXEC_CONTROL, 64)
618
619 /*
620 * VMX_REGS_LAZY_LOAD_SET - The set of registers that will be updated in the
621 * cache on demand. Other registers not listed here are synced to
622 * the cache immediately after VM-Exit.
623 */
624 #define VMX_REGS_LAZY_LOAD_SET ((1 << VCPU_REGS_RIP) | \
625 (1 << VCPU_REGS_RSP) | \
626 (1 << VCPU_EXREG_RFLAGS) | \
627 (1 << VCPU_EXREG_PDPTR) | \
628 (1 << VCPU_EXREG_SEGMENTS) | \
629 (1 << VCPU_EXREG_CR0) | \
630 (1 << VCPU_EXREG_CR3) | \
631 (1 << VCPU_EXREG_CR4) | \
632 (1 << VCPU_EXREG_EXIT_INFO_1) | \
633 (1 << VCPU_EXREG_EXIT_INFO_2))
634
vmx_l1_guest_owned_cr0_bits(void)635 static inline unsigned long vmx_l1_guest_owned_cr0_bits(void)
636 {
637 unsigned long bits = KVM_POSSIBLE_CR0_GUEST_BITS;
638
639 /*
640 * CR0.WP needs to be intercepted when KVM is shadowing legacy paging
641 * in order to construct shadow PTEs with the correct protections.
642 * Note! CR0.WP technically can be passed through to the guest if
643 * paging is disabled, but checking CR0.PG would generate a cyclical
644 * dependency of sorts due to forcing the caller to ensure CR0 holds
645 * the correct value prior to determining which CR0 bits can be owned
646 * by L1. Keep it simple and limit the optimization to EPT.
647 */
648 if (!enable_ept)
649 bits &= ~X86_CR0_WP;
650 return bits;
651 }
652
to_kvm_vmx(struct kvm * kvm)653 static __always_inline struct kvm_vmx *to_kvm_vmx(struct kvm *kvm)
654 {
655 return container_of(kvm, struct kvm_vmx, kvm);
656 }
657
to_vmx(struct kvm_vcpu * vcpu)658 static __always_inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
659 {
660 return container_of(vcpu, struct vcpu_vmx, vcpu);
661 }
662
663 void intel_pmu_cross_mapped_check(struct kvm_pmu *pmu);
664 int intel_pmu_create_guest_lbr_event(struct kvm_vcpu *vcpu);
665 void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu);
666
667 struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu, gfp_t flags);
668 void free_vmcs(struct vmcs *vmcs);
669 int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs);
670 void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs);
671 void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs);
672
alloc_vmcs(bool shadow)673 static inline struct vmcs *alloc_vmcs(bool shadow)
674 {
675 return alloc_vmcs_cpu(shadow, raw_smp_processor_id(),
676 GFP_KERNEL_ACCOUNT);
677 }
678
vmx_has_waitpkg(struct vcpu_vmx * vmx)679 static inline bool vmx_has_waitpkg(struct vcpu_vmx *vmx)
680 {
681 return secondary_exec_controls_get(vmx) &
682 SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE;
683 }
684
vmx_need_pf_intercept(struct kvm_vcpu * vcpu)685 static inline bool vmx_need_pf_intercept(struct kvm_vcpu *vcpu)
686 {
687 if (!enable_ept)
688 return true;
689
690 return allow_smaller_maxphyaddr &&
691 cpuid_maxphyaddr(vcpu) < kvm_host.maxphyaddr;
692 }
693
is_unrestricted_guest(struct kvm_vcpu * vcpu)694 static inline bool is_unrestricted_guest(struct kvm_vcpu *vcpu)
695 {
696 return enable_unrestricted_guest && (!is_guest_mode(vcpu) ||
697 (secondary_exec_controls_get(to_vmx(vcpu)) &
698 SECONDARY_EXEC_UNRESTRICTED_GUEST));
699 }
700
701 bool __vmx_guest_state_valid(struct kvm_vcpu *vcpu);
vmx_guest_state_valid(struct kvm_vcpu * vcpu)702 static inline bool vmx_guest_state_valid(struct kvm_vcpu *vcpu)
703 {
704 return is_unrestricted_guest(vcpu) || __vmx_guest_state_valid(vcpu);
705 }
706
707 void dump_vmcs(struct kvm_vcpu *vcpu);
708
vmx_get_instr_info_reg2(u32 vmx_instr_info)709 static inline int vmx_get_instr_info_reg2(u32 vmx_instr_info)
710 {
711 return (vmx_instr_info >> 28) & 0xf;
712 }
713
vmx_can_use_ipiv(struct kvm_vcpu * vcpu)714 static inline bool vmx_can_use_ipiv(struct kvm_vcpu *vcpu)
715 {
716 return lapic_in_kernel(vcpu) && enable_ipiv;
717 }
718
vmx_segment_cache_clear(struct vcpu_vmx * vmx)719 static inline void vmx_segment_cache_clear(struct vcpu_vmx *vmx)
720 {
721 vmx->segment_cache.bitmask = 0;
722 }
723
724 int vmx_init(void);
725 void vmx_exit(void);
726
727 #endif /* __KVM_X86_VMX_H */
728