1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Kernel-based Virtual Machine driver for Linux
4 *
5 * This module enables machines with Intel VT-x extensions to run virtual
6 * machines without emulation or binary translation.
7 *
8 * Copyright (C) 2006 Qumranet, Inc.
9 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
10 *
11 * Authors:
12 * Avi Kivity <avi@qumranet.com>
13 * Yaniv Kamay <yaniv@qumranet.com>
14 */
15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
17 #include <linux/highmem.h>
18 #include <linux/hrtimer.h>
19 #include <linux/kernel.h>
20 #include <linux/kvm_host.h>
21 #include <linux/module.h>
22 #include <linux/moduleparam.h>
23 #include <linux/mod_devicetable.h>
24 #include <linux/mm.h>
25 #include <linux/objtool.h>
26 #include <linux/sched.h>
27 #include <linux/sched/smt.h>
28 #include <linux/slab.h>
29 #include <linux/tboot.h>
30 #include <linux/trace_events.h>
31 #include <linux/entry-kvm.h>
32
33 #include <asm/apic.h>
34 #include <asm/asm.h>
35 #include <asm/cpu.h>
36 #include <asm/cpu_device_id.h>
37 #include <asm/debugreg.h>
38 #include <asm/desc.h>
39 #include <asm/fpu/api.h>
40 #include <asm/fpu/xstate.h>
41 #include <asm/fred.h>
42 #include <asm/idtentry.h>
43 #include <asm/io.h>
44 #include <asm/irq_remapping.h>
45 #include <asm/reboot.h>
46 #include <asm/perf_event.h>
47 #include <asm/mmu_context.h>
48 #include <asm/mshyperv.h>
49 #include <asm/mwait.h>
50 #include <asm/spec-ctrl.h>
51 #include <asm/vmx.h>
52
53 #include <trace/events/ipi.h>
54
55 #include "capabilities.h"
56 #include "cpuid.h"
57 #include "hyperv.h"
58 #include "kvm_onhyperv.h"
59 #include "irq.h"
60 #include "kvm_cache_regs.h"
61 #include "lapic.h"
62 #include "mmu.h"
63 #include "nested.h"
64 #include "pmu.h"
65 #include "sgx.h"
66 #include "trace.h"
67 #include "vmcs.h"
68 #include "vmcs12.h"
69 #include "vmx.h"
70 #include "x86.h"
71 #include "x86_ops.h"
72 #include "smm.h"
73 #include "vmx_onhyperv.h"
74 #include "posted_intr.h"
75
76 MODULE_AUTHOR("Qumranet");
77 MODULE_DESCRIPTION("KVM support for VMX (Intel VT-x) extensions");
78 MODULE_LICENSE("GPL");
79
80 #ifdef MODULE
81 static const struct x86_cpu_id vmx_cpu_id[] = {
82 X86_MATCH_FEATURE(X86_FEATURE_VMX, NULL),
83 {}
84 };
85 MODULE_DEVICE_TABLE(x86cpu, vmx_cpu_id);
86 #endif
87
88 bool __read_mostly enable_vpid = 1;
89 module_param_named(vpid, enable_vpid, bool, 0444);
90
91 static bool __read_mostly enable_vnmi = 1;
92 module_param_named(vnmi, enable_vnmi, bool, 0444);
93
94 bool __read_mostly flexpriority_enabled = 1;
95 module_param_named(flexpriority, flexpriority_enabled, bool, 0444);
96
97 bool __read_mostly enable_ept = 1;
98 module_param_named(ept, enable_ept, bool, 0444);
99
100 bool __read_mostly enable_unrestricted_guest = 1;
101 module_param_named(unrestricted_guest,
102 enable_unrestricted_guest, bool, 0444);
103
104 bool __read_mostly enable_ept_ad_bits = 1;
105 module_param_named(eptad, enable_ept_ad_bits, bool, 0444);
106
107 static bool __read_mostly emulate_invalid_guest_state = true;
108 module_param(emulate_invalid_guest_state, bool, 0444);
109
110 static bool __read_mostly fasteoi = 1;
111 module_param(fasteoi, bool, 0444);
112
113 module_param(enable_apicv, bool, 0444);
114
115 bool __read_mostly enable_ipiv = true;
116 module_param(enable_ipiv, bool, 0444);
117
118 /*
119 * If nested=1, nested virtualization is supported, i.e., guests may use
120 * VMX and be a hypervisor for its own guests. If nested=0, guests may not
121 * use VMX instructions.
122 */
123 static bool __read_mostly nested = 1;
124 module_param(nested, bool, 0444);
125
126 bool __read_mostly enable_pml = 1;
127 module_param_named(pml, enable_pml, bool, 0444);
128
129 static bool __read_mostly error_on_inconsistent_vmcs_config = true;
130 module_param(error_on_inconsistent_vmcs_config, bool, 0444);
131
132 static bool __read_mostly dump_invalid_vmcs = 0;
133 module_param(dump_invalid_vmcs, bool, 0644);
134
135 #define MSR_BITMAP_MODE_X2APIC 1
136 #define MSR_BITMAP_MODE_X2APIC_APICV 2
137
138 #define KVM_VMX_TSC_MULTIPLIER_MAX 0xffffffffffffffffULL
139
140 /* Guest_tsc -> host_tsc conversion requires 64-bit division. */
141 static int __read_mostly cpu_preemption_timer_multi;
142 static bool __read_mostly enable_preemption_timer = 1;
143 #ifdef CONFIG_X86_64
144 module_param_named(preemption_timer, enable_preemption_timer, bool, S_IRUGO);
145 #endif
146
147 extern bool __read_mostly allow_smaller_maxphyaddr;
148 module_param(allow_smaller_maxphyaddr, bool, S_IRUGO);
149
150 #define KVM_VM_CR0_ALWAYS_OFF (X86_CR0_NW | X86_CR0_CD)
151 #define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST X86_CR0_NE
152 #define KVM_VM_CR0_ALWAYS_ON \
153 (KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE)
154
155 #define KVM_VM_CR4_ALWAYS_ON_UNRESTRICTED_GUEST X86_CR4_VMXE
156 #define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
157 #define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE)
158
159 #define RMODE_GUEST_OWNED_EFLAGS_BITS (~(X86_EFLAGS_IOPL | X86_EFLAGS_VM))
160
161 #define MSR_IA32_RTIT_STATUS_MASK (~(RTIT_STATUS_FILTEREN | \
162 RTIT_STATUS_CONTEXTEN | RTIT_STATUS_TRIGGEREN | \
163 RTIT_STATUS_ERROR | RTIT_STATUS_STOPPED | \
164 RTIT_STATUS_BYTECNT))
165
166 /*
167 * List of MSRs that can be directly passed to the guest.
168 * In addition to these x2apic, PT and LBR MSRs are handled specially.
169 */
170 static u32 vmx_possible_passthrough_msrs[MAX_POSSIBLE_PASSTHROUGH_MSRS] = {
171 MSR_IA32_SPEC_CTRL,
172 MSR_IA32_PRED_CMD,
173 MSR_IA32_FLUSH_CMD,
174 MSR_IA32_TSC,
175 #ifdef CONFIG_X86_64
176 MSR_FS_BASE,
177 MSR_GS_BASE,
178 MSR_KERNEL_GS_BASE,
179 MSR_IA32_XFD,
180 MSR_IA32_XFD_ERR,
181 #endif
182 MSR_IA32_SYSENTER_CS,
183 MSR_IA32_SYSENTER_ESP,
184 MSR_IA32_SYSENTER_EIP,
185 MSR_CORE_C1_RES,
186 MSR_CORE_C3_RESIDENCY,
187 MSR_CORE_C6_RESIDENCY,
188 MSR_CORE_C7_RESIDENCY,
189 };
190
191 /*
192 * These 2 parameters are used to config the controls for Pause-Loop Exiting:
193 * ple_gap: upper bound on the amount of time between two successive
194 * executions of PAUSE in a loop. Also indicate if ple enabled.
195 * According to test, this time is usually smaller than 128 cycles.
196 * ple_window: upper bound on the amount of time a guest is allowed to execute
197 * in a PAUSE loop. Tests indicate that most spinlocks are held for
198 * less than 2^12 cycles
199 * Time is measured based on a counter that runs at the same rate as the TSC,
200 * refer SDM volume 3b section 21.6.13 & 22.1.3.
201 */
202 static unsigned int ple_gap = KVM_DEFAULT_PLE_GAP;
203 module_param(ple_gap, uint, 0444);
204
205 static unsigned int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW;
206 module_param(ple_window, uint, 0444);
207
208 /* Default doubles per-vcpu window every exit. */
209 static unsigned int ple_window_grow = KVM_DEFAULT_PLE_WINDOW_GROW;
210 module_param(ple_window_grow, uint, 0444);
211
212 /* Default resets per-vcpu window every exit to ple_window. */
213 static unsigned int ple_window_shrink = KVM_DEFAULT_PLE_WINDOW_SHRINK;
214 module_param(ple_window_shrink, uint, 0444);
215
216 /* Default is to compute the maximum so we can never overflow. */
217 static unsigned int ple_window_max = KVM_VMX_DEFAULT_PLE_WINDOW_MAX;
218 module_param(ple_window_max, uint, 0444);
219
220 /* Default is SYSTEM mode, 1 for host-guest mode */
221 int __read_mostly pt_mode = PT_MODE_SYSTEM;
222 module_param(pt_mode, int, S_IRUGO);
223
224 struct x86_pmu_lbr __ro_after_init vmx_lbr_caps;
225
226 static DEFINE_STATIC_KEY_FALSE(vmx_l1d_should_flush);
227 static DEFINE_STATIC_KEY_FALSE(vmx_l1d_flush_cond);
228 static DEFINE_MUTEX(vmx_l1d_flush_mutex);
229
230 /* Storage for pre module init parameter parsing */
231 static enum vmx_l1d_flush_state __read_mostly vmentry_l1d_flush_param = VMENTER_L1D_FLUSH_AUTO;
232
233 static const struct {
234 const char *option;
235 bool for_parse;
236 } vmentry_l1d_param[] = {
237 [VMENTER_L1D_FLUSH_AUTO] = {"auto", true},
238 [VMENTER_L1D_FLUSH_NEVER] = {"never", true},
239 [VMENTER_L1D_FLUSH_COND] = {"cond", true},
240 [VMENTER_L1D_FLUSH_ALWAYS] = {"always", true},
241 [VMENTER_L1D_FLUSH_EPT_DISABLED] = {"EPT disabled", false},
242 [VMENTER_L1D_FLUSH_NOT_REQUIRED] = {"not required", false},
243 };
244
245 #define L1D_CACHE_ORDER 4
246 static void *vmx_l1d_flush_pages;
247
vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf)248 static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf)
249 {
250 struct page *page;
251 unsigned int i;
252
253 if (!boot_cpu_has_bug(X86_BUG_L1TF)) {
254 l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_NOT_REQUIRED;
255 return 0;
256 }
257
258 if (!enable_ept) {
259 l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_EPT_DISABLED;
260 return 0;
261 }
262
263 if (kvm_host.arch_capabilities & ARCH_CAP_SKIP_VMENTRY_L1DFLUSH) {
264 l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_NOT_REQUIRED;
265 return 0;
266 }
267
268 /* If set to auto use the default l1tf mitigation method */
269 if (l1tf == VMENTER_L1D_FLUSH_AUTO) {
270 switch (l1tf_mitigation) {
271 case L1TF_MITIGATION_OFF:
272 l1tf = VMENTER_L1D_FLUSH_NEVER;
273 break;
274 case L1TF_MITIGATION_FLUSH_NOWARN:
275 case L1TF_MITIGATION_FLUSH:
276 case L1TF_MITIGATION_FLUSH_NOSMT:
277 l1tf = VMENTER_L1D_FLUSH_COND;
278 break;
279 case L1TF_MITIGATION_FULL:
280 case L1TF_MITIGATION_FULL_FORCE:
281 l1tf = VMENTER_L1D_FLUSH_ALWAYS;
282 break;
283 }
284 } else if (l1tf_mitigation == L1TF_MITIGATION_FULL_FORCE) {
285 l1tf = VMENTER_L1D_FLUSH_ALWAYS;
286 }
287
288 if (l1tf != VMENTER_L1D_FLUSH_NEVER && !vmx_l1d_flush_pages &&
289 !boot_cpu_has(X86_FEATURE_FLUSH_L1D)) {
290 /*
291 * This allocation for vmx_l1d_flush_pages is not tied to a VM
292 * lifetime and so should not be charged to a memcg.
293 */
294 page = alloc_pages(GFP_KERNEL, L1D_CACHE_ORDER);
295 if (!page)
296 return -ENOMEM;
297 vmx_l1d_flush_pages = page_address(page);
298
299 /*
300 * Initialize each page with a different pattern in
301 * order to protect against KSM in the nested
302 * virtualization case.
303 */
304 for (i = 0; i < 1u << L1D_CACHE_ORDER; ++i) {
305 memset(vmx_l1d_flush_pages + i * PAGE_SIZE, i + 1,
306 PAGE_SIZE);
307 }
308 }
309
310 l1tf_vmx_mitigation = l1tf;
311
312 if (l1tf != VMENTER_L1D_FLUSH_NEVER)
313 static_branch_enable(&vmx_l1d_should_flush);
314 else
315 static_branch_disable(&vmx_l1d_should_flush);
316
317 if (l1tf == VMENTER_L1D_FLUSH_COND)
318 static_branch_enable(&vmx_l1d_flush_cond);
319 else
320 static_branch_disable(&vmx_l1d_flush_cond);
321 return 0;
322 }
323
vmentry_l1d_flush_parse(const char * s)324 static int vmentry_l1d_flush_parse(const char *s)
325 {
326 unsigned int i;
327
328 if (s) {
329 for (i = 0; i < ARRAY_SIZE(vmentry_l1d_param); i++) {
330 if (vmentry_l1d_param[i].for_parse &&
331 sysfs_streq(s, vmentry_l1d_param[i].option))
332 return i;
333 }
334 }
335 return -EINVAL;
336 }
337
vmentry_l1d_flush_set(const char * s,const struct kernel_param * kp)338 static int vmentry_l1d_flush_set(const char *s, const struct kernel_param *kp)
339 {
340 int l1tf, ret;
341
342 l1tf = vmentry_l1d_flush_parse(s);
343 if (l1tf < 0)
344 return l1tf;
345
346 if (!boot_cpu_has(X86_BUG_L1TF))
347 return 0;
348
349 /*
350 * Has vmx_init() run already? If not then this is the pre init
351 * parameter parsing. In that case just store the value and let
352 * vmx_init() do the proper setup after enable_ept has been
353 * established.
354 */
355 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO) {
356 vmentry_l1d_flush_param = l1tf;
357 return 0;
358 }
359
360 mutex_lock(&vmx_l1d_flush_mutex);
361 ret = vmx_setup_l1d_flush(l1tf);
362 mutex_unlock(&vmx_l1d_flush_mutex);
363 return ret;
364 }
365
vmentry_l1d_flush_get(char * s,const struct kernel_param * kp)366 static int vmentry_l1d_flush_get(char *s, const struct kernel_param *kp)
367 {
368 if (WARN_ON_ONCE(l1tf_vmx_mitigation >= ARRAY_SIZE(vmentry_l1d_param)))
369 return sysfs_emit(s, "???\n");
370
371 return sysfs_emit(s, "%s\n", vmentry_l1d_param[l1tf_vmx_mitigation].option);
372 }
373
vmx_disable_fb_clear(struct vcpu_vmx * vmx)374 static __always_inline void vmx_disable_fb_clear(struct vcpu_vmx *vmx)
375 {
376 u64 msr;
377
378 if (!vmx->disable_fb_clear)
379 return;
380
381 msr = __rdmsr(MSR_IA32_MCU_OPT_CTRL);
382 msr |= FB_CLEAR_DIS;
383 native_wrmsrl(MSR_IA32_MCU_OPT_CTRL, msr);
384 /* Cache the MSR value to avoid reading it later */
385 vmx->msr_ia32_mcu_opt_ctrl = msr;
386 }
387
vmx_enable_fb_clear(struct vcpu_vmx * vmx)388 static __always_inline void vmx_enable_fb_clear(struct vcpu_vmx *vmx)
389 {
390 if (!vmx->disable_fb_clear)
391 return;
392
393 vmx->msr_ia32_mcu_opt_ctrl &= ~FB_CLEAR_DIS;
394 native_wrmsrl(MSR_IA32_MCU_OPT_CTRL, vmx->msr_ia32_mcu_opt_ctrl);
395 }
396
vmx_update_fb_clear_dis(struct kvm_vcpu * vcpu,struct vcpu_vmx * vmx)397 static void vmx_update_fb_clear_dis(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx)
398 {
399 /*
400 * Disable VERW's behavior of clearing CPU buffers for the guest if the
401 * CPU isn't affected by MDS/TAA, and the host hasn't forcefully enabled
402 * the mitigation. Disabling the clearing behavior provides a
403 * performance boost for guests that aren't aware that manually clearing
404 * CPU buffers is unnecessary, at the cost of MSR accesses on VM-Entry
405 * and VM-Exit.
406 */
407 vmx->disable_fb_clear = !cpu_feature_enabled(X86_FEATURE_CLEAR_CPU_BUF) &&
408 (kvm_host.arch_capabilities & ARCH_CAP_FB_CLEAR_CTRL) &&
409 !boot_cpu_has_bug(X86_BUG_MDS) &&
410 !boot_cpu_has_bug(X86_BUG_TAA);
411
412 /*
413 * If guest will not execute VERW, there is no need to set FB_CLEAR_DIS
414 * at VMEntry. Skip the MSR read/write when a guest has no use case to
415 * execute VERW.
416 */
417 if ((vcpu->arch.arch_capabilities & ARCH_CAP_FB_CLEAR) ||
418 ((vcpu->arch.arch_capabilities & ARCH_CAP_MDS_NO) &&
419 (vcpu->arch.arch_capabilities & ARCH_CAP_TAA_NO) &&
420 (vcpu->arch.arch_capabilities & ARCH_CAP_PSDP_NO) &&
421 (vcpu->arch.arch_capabilities & ARCH_CAP_FBSDP_NO) &&
422 (vcpu->arch.arch_capabilities & ARCH_CAP_SBDR_SSDP_NO)))
423 vmx->disable_fb_clear = false;
424 }
425
426 static const struct kernel_param_ops vmentry_l1d_flush_ops = {
427 .set = vmentry_l1d_flush_set,
428 .get = vmentry_l1d_flush_get,
429 };
430 module_param_cb(vmentry_l1d_flush, &vmentry_l1d_flush_ops, NULL, 0644);
431
432 static u32 vmx_segment_access_rights(struct kvm_segment *var);
433
434 void vmx_vmexit(void);
435
436 #define vmx_insn_failed(fmt...) \
437 do { \
438 WARN_ONCE(1, fmt); \
439 pr_warn_ratelimited(fmt); \
440 } while (0)
441
vmread_error(unsigned long field)442 noinline void vmread_error(unsigned long field)
443 {
444 vmx_insn_failed("vmread failed: field=%lx\n", field);
445 }
446
447 #ifndef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
vmread_error_trampoline2(unsigned long field,bool fault)448 noinstr void vmread_error_trampoline2(unsigned long field, bool fault)
449 {
450 if (fault) {
451 kvm_spurious_fault();
452 } else {
453 instrumentation_begin();
454 vmread_error(field);
455 instrumentation_end();
456 }
457 }
458 #endif
459
vmwrite_error(unsigned long field,unsigned long value)460 noinline void vmwrite_error(unsigned long field, unsigned long value)
461 {
462 vmx_insn_failed("vmwrite failed: field=%lx val=%lx err=%u\n",
463 field, value, vmcs_read32(VM_INSTRUCTION_ERROR));
464 }
465
vmclear_error(struct vmcs * vmcs,u64 phys_addr)466 noinline void vmclear_error(struct vmcs *vmcs, u64 phys_addr)
467 {
468 vmx_insn_failed("vmclear failed: %p/%llx err=%u\n",
469 vmcs, phys_addr, vmcs_read32(VM_INSTRUCTION_ERROR));
470 }
471
vmptrld_error(struct vmcs * vmcs,u64 phys_addr)472 noinline void vmptrld_error(struct vmcs *vmcs, u64 phys_addr)
473 {
474 vmx_insn_failed("vmptrld failed: %p/%llx err=%u\n",
475 vmcs, phys_addr, vmcs_read32(VM_INSTRUCTION_ERROR));
476 }
477
invvpid_error(unsigned long ext,u16 vpid,gva_t gva)478 noinline void invvpid_error(unsigned long ext, u16 vpid, gva_t gva)
479 {
480 vmx_insn_failed("invvpid failed: ext=0x%lx vpid=%u gva=0x%lx\n",
481 ext, vpid, gva);
482 }
483
invept_error(unsigned long ext,u64 eptp,gpa_t gpa)484 noinline void invept_error(unsigned long ext, u64 eptp, gpa_t gpa)
485 {
486 vmx_insn_failed("invept failed: ext=0x%lx eptp=%llx gpa=0x%llx\n",
487 ext, eptp, gpa);
488 }
489
490 static DEFINE_PER_CPU(struct vmcs *, vmxarea);
491 DEFINE_PER_CPU(struct vmcs *, current_vmcs);
492 /*
493 * We maintain a per-CPU linked-list of VMCS loaded on that CPU. This is needed
494 * when a CPU is brought down, and we need to VMCLEAR all VMCSs loaded on it.
495 */
496 static DEFINE_PER_CPU(struct list_head, loaded_vmcss_on_cpu);
497
498 static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS);
499 static DEFINE_SPINLOCK(vmx_vpid_lock);
500
501 struct vmcs_config vmcs_config __ro_after_init;
502 struct vmx_capability vmx_capability __ro_after_init;
503
504 #define VMX_SEGMENT_FIELD(seg) \
505 [VCPU_SREG_##seg] = { \
506 .selector = GUEST_##seg##_SELECTOR, \
507 .base = GUEST_##seg##_BASE, \
508 .limit = GUEST_##seg##_LIMIT, \
509 .ar_bytes = GUEST_##seg##_AR_BYTES, \
510 }
511
512 static const struct kvm_vmx_segment_field {
513 unsigned selector;
514 unsigned base;
515 unsigned limit;
516 unsigned ar_bytes;
517 } kvm_vmx_segment_fields[] = {
518 VMX_SEGMENT_FIELD(CS),
519 VMX_SEGMENT_FIELD(DS),
520 VMX_SEGMENT_FIELD(ES),
521 VMX_SEGMENT_FIELD(FS),
522 VMX_SEGMENT_FIELD(GS),
523 VMX_SEGMENT_FIELD(SS),
524 VMX_SEGMENT_FIELD(TR),
525 VMX_SEGMENT_FIELD(LDTR),
526 };
527
vmx_segment_cache_clear(struct vcpu_vmx * vmx)528 static inline void vmx_segment_cache_clear(struct vcpu_vmx *vmx)
529 {
530 vmx->segment_cache.bitmask = 0;
531 }
532
533 static unsigned long host_idt_base;
534
535 #if IS_ENABLED(CONFIG_HYPERV)
536 static bool __read_mostly enlightened_vmcs = true;
537 module_param(enlightened_vmcs, bool, 0444);
538
hv_enable_l2_tlb_flush(struct kvm_vcpu * vcpu)539 static int hv_enable_l2_tlb_flush(struct kvm_vcpu *vcpu)
540 {
541 struct hv_enlightened_vmcs *evmcs;
542 hpa_t partition_assist_page = hv_get_partition_assist_page(vcpu);
543
544 if (partition_assist_page == INVALID_PAGE)
545 return -ENOMEM;
546
547 evmcs = (struct hv_enlightened_vmcs *)to_vmx(vcpu)->loaded_vmcs->vmcs;
548
549 evmcs->partition_assist_page = partition_assist_page;
550 evmcs->hv_vm_id = (unsigned long)vcpu->kvm;
551 evmcs->hv_enlightenments_control.nested_flush_hypercall = 1;
552
553 return 0;
554 }
555
hv_init_evmcs(void)556 static __init void hv_init_evmcs(void)
557 {
558 int cpu;
559
560 if (!enlightened_vmcs)
561 return;
562
563 /*
564 * Enlightened VMCS usage should be recommended and the host needs
565 * to support eVMCS v1 or above.
566 */
567 if (ms_hyperv.hints & HV_X64_ENLIGHTENED_VMCS_RECOMMENDED &&
568 (ms_hyperv.nested_features & HV_X64_ENLIGHTENED_VMCS_VERSION) >=
569 KVM_EVMCS_VERSION) {
570
571 /* Check that we have assist pages on all online CPUs */
572 for_each_online_cpu(cpu) {
573 if (!hv_get_vp_assist_page(cpu)) {
574 enlightened_vmcs = false;
575 break;
576 }
577 }
578
579 if (enlightened_vmcs) {
580 pr_info("Using Hyper-V Enlightened VMCS\n");
581 static_branch_enable(&__kvm_is_using_evmcs);
582 }
583
584 if (ms_hyperv.nested_features & HV_X64_NESTED_DIRECT_FLUSH)
585 vt_x86_ops.enable_l2_tlb_flush
586 = hv_enable_l2_tlb_flush;
587 } else {
588 enlightened_vmcs = false;
589 }
590 }
591
hv_reset_evmcs(void)592 static void hv_reset_evmcs(void)
593 {
594 struct hv_vp_assist_page *vp_ap;
595
596 if (!kvm_is_using_evmcs())
597 return;
598
599 /*
600 * KVM should enable eVMCS if and only if all CPUs have a VP assist
601 * page, and should reject CPU onlining if eVMCS is enabled the CPU
602 * doesn't have a VP assist page allocated.
603 */
604 vp_ap = hv_get_vp_assist_page(smp_processor_id());
605 if (WARN_ON_ONCE(!vp_ap))
606 return;
607
608 /*
609 * Reset everything to support using non-enlightened VMCS access later
610 * (e.g. when we reload the module with enlightened_vmcs=0)
611 */
612 vp_ap->nested_control.features.directhypercall = 0;
613 vp_ap->current_nested_vmcs = 0;
614 vp_ap->enlighten_vmentry = 0;
615 }
616
617 #else /* IS_ENABLED(CONFIG_HYPERV) */
hv_init_evmcs(void)618 static void hv_init_evmcs(void) {}
hv_reset_evmcs(void)619 static void hv_reset_evmcs(void) {}
620 #endif /* IS_ENABLED(CONFIG_HYPERV) */
621
622 /*
623 * Comment's format: document - errata name - stepping - processor name.
624 * Refer from
625 * https://www.virtualbox.org/svn/vbox/trunk/src/VBox/VMM/VMMR0/HMR0.cpp
626 */
627 static u32 vmx_preemption_cpu_tfms[] = {
628 /* 323344.pdf - BA86 - D0 - Xeon 7500 Series */
629 0x000206E6,
630 /* 323056.pdf - AAX65 - C2 - Xeon L3406 */
631 /* 322814.pdf - AAT59 - C2 - i7-600, i5-500, i5-400 and i3-300 Mobile */
632 /* 322911.pdf - AAU65 - C2 - i5-600, i3-500 Desktop and Pentium G6950 */
633 0x00020652,
634 /* 322911.pdf - AAU65 - K0 - i5-600, i3-500 Desktop and Pentium G6950 */
635 0x00020655,
636 /* 322373.pdf - AAO95 - B1 - Xeon 3400 Series */
637 /* 322166.pdf - AAN92 - B1 - i7-800 and i5-700 Desktop */
638 /*
639 * 320767.pdf - AAP86 - B1 -
640 * i7-900 Mobile Extreme, i7-800 and i7-700 Mobile
641 */
642 0x000106E5,
643 /* 321333.pdf - AAM126 - C0 - Xeon 3500 */
644 0x000106A0,
645 /* 321333.pdf - AAM126 - C1 - Xeon 3500 */
646 0x000106A1,
647 /* 320836.pdf - AAJ124 - C0 - i7-900 Desktop Extreme and i7-900 Desktop */
648 0x000106A4,
649 /* 321333.pdf - AAM126 - D0 - Xeon 3500 */
650 /* 321324.pdf - AAK139 - D0 - Xeon 5500 */
651 /* 320836.pdf - AAJ124 - D0 - i7-900 Extreme and i7-900 Desktop */
652 0x000106A5,
653 /* Xeon E3-1220 V2 */
654 0x000306A8,
655 };
656
cpu_has_broken_vmx_preemption_timer(void)657 static inline bool cpu_has_broken_vmx_preemption_timer(void)
658 {
659 u32 eax = cpuid_eax(0x00000001), i;
660
661 /* Clear the reserved bits */
662 eax &= ~(0x3U << 14 | 0xfU << 28);
663 for (i = 0; i < ARRAY_SIZE(vmx_preemption_cpu_tfms); i++)
664 if (eax == vmx_preemption_cpu_tfms[i])
665 return true;
666
667 return false;
668 }
669
cpu_need_virtualize_apic_accesses(struct kvm_vcpu * vcpu)670 static inline bool cpu_need_virtualize_apic_accesses(struct kvm_vcpu *vcpu)
671 {
672 return flexpriority_enabled && lapic_in_kernel(vcpu);
673 }
674
vmx_get_passthrough_msr_slot(u32 msr)675 static int vmx_get_passthrough_msr_slot(u32 msr)
676 {
677 int i;
678
679 switch (msr) {
680 case 0x800 ... 0x8ff:
681 /* x2APIC MSRs. These are handled in vmx_update_msr_bitmap_x2apic() */
682 return -ENOENT;
683 case MSR_IA32_RTIT_STATUS:
684 case MSR_IA32_RTIT_OUTPUT_BASE:
685 case MSR_IA32_RTIT_OUTPUT_MASK:
686 case MSR_IA32_RTIT_CR3_MATCH:
687 case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B:
688 /* PT MSRs. These are handled in pt_update_intercept_for_msr() */
689 case MSR_LBR_SELECT:
690 case MSR_LBR_TOS:
691 case MSR_LBR_INFO_0 ... MSR_LBR_INFO_0 + 31:
692 case MSR_LBR_NHM_FROM ... MSR_LBR_NHM_FROM + 31:
693 case MSR_LBR_NHM_TO ... MSR_LBR_NHM_TO + 31:
694 case MSR_LBR_CORE_FROM ... MSR_LBR_CORE_FROM + 8:
695 case MSR_LBR_CORE_TO ... MSR_LBR_CORE_TO + 8:
696 /* LBR MSRs. These are handled in vmx_update_intercept_for_lbr_msrs() */
697 return -ENOENT;
698 }
699
700 for (i = 0; i < ARRAY_SIZE(vmx_possible_passthrough_msrs); i++) {
701 if (vmx_possible_passthrough_msrs[i] == msr)
702 return i;
703 }
704
705 WARN(1, "Invalid MSR %x, please adapt vmx_possible_passthrough_msrs[]", msr);
706 return -ENOENT;
707 }
708
vmx_find_uret_msr(struct vcpu_vmx * vmx,u32 msr)709 struct vmx_uret_msr *vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr)
710 {
711 int i;
712
713 i = kvm_find_user_return_msr(msr);
714 if (i >= 0)
715 return &vmx->guest_uret_msrs[i];
716 return NULL;
717 }
718
vmx_set_guest_uret_msr(struct vcpu_vmx * vmx,struct vmx_uret_msr * msr,u64 data)719 static int vmx_set_guest_uret_msr(struct vcpu_vmx *vmx,
720 struct vmx_uret_msr *msr, u64 data)
721 {
722 unsigned int slot = msr - vmx->guest_uret_msrs;
723 int ret = 0;
724
725 if (msr->load_into_hardware) {
726 preempt_disable();
727 ret = kvm_set_user_return_msr(slot, data, msr->mask);
728 preempt_enable();
729 }
730 if (!ret)
731 msr->data = data;
732 return ret;
733 }
734
735 /*
736 * Disable VMX and clear CR4.VMXE (even if VMXOFF faults)
737 *
738 * Note, VMXOFF causes a #UD if the CPU is !post-VMXON, but it's impossible to
739 * atomically track post-VMXON state, e.g. this may be called in NMI context.
740 * Eat all faults as all other faults on VMXOFF faults are mode related, i.e.
741 * faults are guaranteed to be due to the !post-VMXON check unless the CPU is
742 * magically in RM, VM86, compat mode, or at CPL>0.
743 */
kvm_cpu_vmxoff(void)744 static int kvm_cpu_vmxoff(void)
745 {
746 asm goto("1: vmxoff\n\t"
747 _ASM_EXTABLE(1b, %l[fault])
748 ::: "cc", "memory" : fault);
749
750 cr4_clear_bits(X86_CR4_VMXE);
751 return 0;
752
753 fault:
754 cr4_clear_bits(X86_CR4_VMXE);
755 return -EIO;
756 }
757
vmx_emergency_disable(void)758 static void vmx_emergency_disable(void)
759 {
760 int cpu = raw_smp_processor_id();
761 struct loaded_vmcs *v;
762
763 kvm_rebooting = true;
764
765 /*
766 * Note, CR4.VMXE can be _cleared_ in NMI context, but it can only be
767 * set in task context. If this races with VMX is disabled by an NMI,
768 * VMCLEAR and VMXOFF may #UD, but KVM will eat those faults due to
769 * kvm_rebooting set.
770 */
771 if (!(__read_cr4() & X86_CR4_VMXE))
772 return;
773
774 list_for_each_entry(v, &per_cpu(loaded_vmcss_on_cpu, cpu),
775 loaded_vmcss_on_cpu_link)
776 vmcs_clear(v->vmcs);
777
778 kvm_cpu_vmxoff();
779 }
780
__loaded_vmcs_clear(void * arg)781 static void __loaded_vmcs_clear(void *arg)
782 {
783 struct loaded_vmcs *loaded_vmcs = arg;
784 int cpu = raw_smp_processor_id();
785
786 if (loaded_vmcs->cpu != cpu)
787 return; /* vcpu migration can race with cpu offline */
788 if (per_cpu(current_vmcs, cpu) == loaded_vmcs->vmcs)
789 per_cpu(current_vmcs, cpu) = NULL;
790
791 vmcs_clear(loaded_vmcs->vmcs);
792 if (loaded_vmcs->shadow_vmcs && loaded_vmcs->launched)
793 vmcs_clear(loaded_vmcs->shadow_vmcs);
794
795 list_del(&loaded_vmcs->loaded_vmcss_on_cpu_link);
796
797 /*
798 * Ensure all writes to loaded_vmcs, including deleting it from its
799 * current percpu list, complete before setting loaded_vmcs->cpu to
800 * -1, otherwise a different cpu can see loaded_vmcs->cpu == -1 first
801 * and add loaded_vmcs to its percpu list before it's deleted from this
802 * cpu's list. Pairs with the smp_rmb() in vmx_vcpu_load_vmcs().
803 */
804 smp_wmb();
805
806 loaded_vmcs->cpu = -1;
807 loaded_vmcs->launched = 0;
808 }
809
loaded_vmcs_clear(struct loaded_vmcs * loaded_vmcs)810 void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs)
811 {
812 int cpu = loaded_vmcs->cpu;
813
814 if (cpu != -1)
815 smp_call_function_single(cpu,
816 __loaded_vmcs_clear, loaded_vmcs, 1);
817 }
818
vmx_segment_cache_test_set(struct vcpu_vmx * vmx,unsigned seg,unsigned field)819 static bool vmx_segment_cache_test_set(struct vcpu_vmx *vmx, unsigned seg,
820 unsigned field)
821 {
822 bool ret;
823 u32 mask = 1 << (seg * SEG_FIELD_NR + field);
824
825 if (!kvm_register_is_available(&vmx->vcpu, VCPU_EXREG_SEGMENTS)) {
826 kvm_register_mark_available(&vmx->vcpu, VCPU_EXREG_SEGMENTS);
827 vmx->segment_cache.bitmask = 0;
828 }
829 ret = vmx->segment_cache.bitmask & mask;
830 vmx->segment_cache.bitmask |= mask;
831 return ret;
832 }
833
vmx_read_guest_seg_selector(struct vcpu_vmx * vmx,unsigned seg)834 static u16 vmx_read_guest_seg_selector(struct vcpu_vmx *vmx, unsigned seg)
835 {
836 u16 *p = &vmx->segment_cache.seg[seg].selector;
837
838 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_SEL))
839 *p = vmcs_read16(kvm_vmx_segment_fields[seg].selector);
840 return *p;
841 }
842
vmx_read_guest_seg_base(struct vcpu_vmx * vmx,unsigned seg)843 static ulong vmx_read_guest_seg_base(struct vcpu_vmx *vmx, unsigned seg)
844 {
845 ulong *p = &vmx->segment_cache.seg[seg].base;
846
847 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_BASE))
848 *p = vmcs_readl(kvm_vmx_segment_fields[seg].base);
849 return *p;
850 }
851
vmx_read_guest_seg_limit(struct vcpu_vmx * vmx,unsigned seg)852 static u32 vmx_read_guest_seg_limit(struct vcpu_vmx *vmx, unsigned seg)
853 {
854 u32 *p = &vmx->segment_cache.seg[seg].limit;
855
856 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_LIMIT))
857 *p = vmcs_read32(kvm_vmx_segment_fields[seg].limit);
858 return *p;
859 }
860
vmx_read_guest_seg_ar(struct vcpu_vmx * vmx,unsigned seg)861 static u32 vmx_read_guest_seg_ar(struct vcpu_vmx *vmx, unsigned seg)
862 {
863 u32 *p = &vmx->segment_cache.seg[seg].ar;
864
865 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_AR))
866 *p = vmcs_read32(kvm_vmx_segment_fields[seg].ar_bytes);
867 return *p;
868 }
869
vmx_update_exception_bitmap(struct kvm_vcpu * vcpu)870 void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu)
871 {
872 u32 eb;
873
874 eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR) |
875 (1u << DB_VECTOR) | (1u << AC_VECTOR);
876 /*
877 * #VE isn't used for VMX. To test against unexpected changes
878 * related to #VE for VMX, intercept unexpected #VE and warn on it.
879 */
880 if (IS_ENABLED(CONFIG_KVM_INTEL_PROVE_VE))
881 eb |= 1u << VE_VECTOR;
882 /*
883 * Guest access to VMware backdoor ports could legitimately
884 * trigger #GP because of TSS I/O permission bitmap.
885 * We intercept those #GP and allow access to them anyway
886 * as VMware does.
887 */
888 if (enable_vmware_backdoor)
889 eb |= (1u << GP_VECTOR);
890 if ((vcpu->guest_debug &
891 (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) ==
892 (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP))
893 eb |= 1u << BP_VECTOR;
894 if (to_vmx(vcpu)->rmode.vm86_active)
895 eb = ~0;
896 if (!vmx_need_pf_intercept(vcpu))
897 eb &= ~(1u << PF_VECTOR);
898
899 /* When we are running a nested L2 guest and L1 specified for it a
900 * certain exception bitmap, we must trap the same exceptions and pass
901 * them to L1. When running L2, we will only handle the exceptions
902 * specified above if L1 did not want them.
903 */
904 if (is_guest_mode(vcpu))
905 eb |= get_vmcs12(vcpu)->exception_bitmap;
906 else {
907 int mask = 0, match = 0;
908
909 if (enable_ept && (eb & (1u << PF_VECTOR))) {
910 /*
911 * If EPT is enabled, #PF is currently only intercepted
912 * if MAXPHYADDR is smaller on the guest than on the
913 * host. In that case we only care about present,
914 * non-reserved faults. For vmcs02, however, PFEC_MASK
915 * and PFEC_MATCH are set in prepare_vmcs02_rare.
916 */
917 mask = PFERR_PRESENT_MASK | PFERR_RSVD_MASK;
918 match = PFERR_PRESENT_MASK;
919 }
920 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, mask);
921 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, match);
922 }
923
924 /*
925 * Disabling xfd interception indicates that dynamic xfeatures
926 * might be used in the guest. Always trap #NM in this case
927 * to save guest xfd_err timely.
928 */
929 if (vcpu->arch.xfd_no_write_intercept)
930 eb |= (1u << NM_VECTOR);
931
932 vmcs_write32(EXCEPTION_BITMAP, eb);
933 }
934
935 /*
936 * Check if MSR is intercepted for currently loaded MSR bitmap.
937 */
msr_write_intercepted(struct vcpu_vmx * vmx,u32 msr)938 static bool msr_write_intercepted(struct vcpu_vmx *vmx, u32 msr)
939 {
940 if (!(exec_controls_get(vmx) & CPU_BASED_USE_MSR_BITMAPS))
941 return true;
942
943 return vmx_test_msr_bitmap_write(vmx->loaded_vmcs->msr_bitmap, msr);
944 }
945
__vmx_vcpu_run_flags(struct vcpu_vmx * vmx)946 unsigned int __vmx_vcpu_run_flags(struct vcpu_vmx *vmx)
947 {
948 unsigned int flags = 0;
949
950 if (vmx->loaded_vmcs->launched)
951 flags |= VMX_RUN_VMRESUME;
952
953 /*
954 * If writes to the SPEC_CTRL MSR aren't intercepted, the guest is free
955 * to change it directly without causing a vmexit. In that case read
956 * it after vmexit and store it in vmx->spec_ctrl.
957 */
958 if (!msr_write_intercepted(vmx, MSR_IA32_SPEC_CTRL))
959 flags |= VMX_RUN_SAVE_SPEC_CTRL;
960
961 return flags;
962 }
963
clear_atomic_switch_msr_special(struct vcpu_vmx * vmx,unsigned long entry,unsigned long exit)964 static __always_inline void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx,
965 unsigned long entry, unsigned long exit)
966 {
967 vm_entry_controls_clearbit(vmx, entry);
968 vm_exit_controls_clearbit(vmx, exit);
969 }
970
vmx_find_loadstore_msr_slot(struct vmx_msrs * m,u32 msr)971 int vmx_find_loadstore_msr_slot(struct vmx_msrs *m, u32 msr)
972 {
973 unsigned int i;
974
975 for (i = 0; i < m->nr; ++i) {
976 if (m->val[i].index == msr)
977 return i;
978 }
979 return -ENOENT;
980 }
981
clear_atomic_switch_msr(struct vcpu_vmx * vmx,unsigned msr)982 static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
983 {
984 int i;
985 struct msr_autoload *m = &vmx->msr_autoload;
986
987 switch (msr) {
988 case MSR_EFER:
989 if (cpu_has_load_ia32_efer()) {
990 clear_atomic_switch_msr_special(vmx,
991 VM_ENTRY_LOAD_IA32_EFER,
992 VM_EXIT_LOAD_IA32_EFER);
993 return;
994 }
995 break;
996 case MSR_CORE_PERF_GLOBAL_CTRL:
997 if (cpu_has_load_perf_global_ctrl()) {
998 clear_atomic_switch_msr_special(vmx,
999 VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
1000 VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL);
1001 return;
1002 }
1003 break;
1004 }
1005 i = vmx_find_loadstore_msr_slot(&m->guest, msr);
1006 if (i < 0)
1007 goto skip_guest;
1008 --m->guest.nr;
1009 m->guest.val[i] = m->guest.val[m->guest.nr];
1010 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr);
1011
1012 skip_guest:
1013 i = vmx_find_loadstore_msr_slot(&m->host, msr);
1014 if (i < 0)
1015 return;
1016
1017 --m->host.nr;
1018 m->host.val[i] = m->host.val[m->host.nr];
1019 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr);
1020 }
1021
add_atomic_switch_msr_special(struct vcpu_vmx * vmx,unsigned long entry,unsigned long exit,unsigned long guest_val_vmcs,unsigned long host_val_vmcs,u64 guest_val,u64 host_val)1022 static __always_inline void add_atomic_switch_msr_special(struct vcpu_vmx *vmx,
1023 unsigned long entry, unsigned long exit,
1024 unsigned long guest_val_vmcs, unsigned long host_val_vmcs,
1025 u64 guest_val, u64 host_val)
1026 {
1027 vmcs_write64(guest_val_vmcs, guest_val);
1028 if (host_val_vmcs != HOST_IA32_EFER)
1029 vmcs_write64(host_val_vmcs, host_val);
1030 vm_entry_controls_setbit(vmx, entry);
1031 vm_exit_controls_setbit(vmx, exit);
1032 }
1033
add_atomic_switch_msr(struct vcpu_vmx * vmx,unsigned msr,u64 guest_val,u64 host_val,bool entry_only)1034 static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
1035 u64 guest_val, u64 host_val, bool entry_only)
1036 {
1037 int i, j = 0;
1038 struct msr_autoload *m = &vmx->msr_autoload;
1039
1040 switch (msr) {
1041 case MSR_EFER:
1042 if (cpu_has_load_ia32_efer()) {
1043 add_atomic_switch_msr_special(vmx,
1044 VM_ENTRY_LOAD_IA32_EFER,
1045 VM_EXIT_LOAD_IA32_EFER,
1046 GUEST_IA32_EFER,
1047 HOST_IA32_EFER,
1048 guest_val, host_val);
1049 return;
1050 }
1051 break;
1052 case MSR_CORE_PERF_GLOBAL_CTRL:
1053 if (cpu_has_load_perf_global_ctrl()) {
1054 add_atomic_switch_msr_special(vmx,
1055 VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
1056 VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL,
1057 GUEST_IA32_PERF_GLOBAL_CTRL,
1058 HOST_IA32_PERF_GLOBAL_CTRL,
1059 guest_val, host_val);
1060 return;
1061 }
1062 break;
1063 case MSR_IA32_PEBS_ENABLE:
1064 /* PEBS needs a quiescent period after being disabled (to write
1065 * a record). Disabling PEBS through VMX MSR swapping doesn't
1066 * provide that period, so a CPU could write host's record into
1067 * guest's memory.
1068 */
1069 wrmsrl(MSR_IA32_PEBS_ENABLE, 0);
1070 }
1071
1072 i = vmx_find_loadstore_msr_slot(&m->guest, msr);
1073 if (!entry_only)
1074 j = vmx_find_loadstore_msr_slot(&m->host, msr);
1075
1076 if ((i < 0 && m->guest.nr == MAX_NR_LOADSTORE_MSRS) ||
1077 (j < 0 && m->host.nr == MAX_NR_LOADSTORE_MSRS)) {
1078 printk_once(KERN_WARNING "Not enough msr switch entries. "
1079 "Can't add msr %x\n", msr);
1080 return;
1081 }
1082 if (i < 0) {
1083 i = m->guest.nr++;
1084 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr);
1085 }
1086 m->guest.val[i].index = msr;
1087 m->guest.val[i].value = guest_val;
1088
1089 if (entry_only)
1090 return;
1091
1092 if (j < 0) {
1093 j = m->host.nr++;
1094 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr);
1095 }
1096 m->host.val[j].index = msr;
1097 m->host.val[j].value = host_val;
1098 }
1099
update_transition_efer(struct vcpu_vmx * vmx)1100 static bool update_transition_efer(struct vcpu_vmx *vmx)
1101 {
1102 u64 guest_efer = vmx->vcpu.arch.efer;
1103 u64 ignore_bits = 0;
1104 int i;
1105
1106 /* Shadow paging assumes NX to be available. */
1107 if (!enable_ept)
1108 guest_efer |= EFER_NX;
1109
1110 /*
1111 * LMA and LME handled by hardware; SCE meaningless outside long mode.
1112 */
1113 ignore_bits |= EFER_SCE;
1114 #ifdef CONFIG_X86_64
1115 ignore_bits |= EFER_LMA | EFER_LME;
1116 /* SCE is meaningful only in long mode on Intel */
1117 if (guest_efer & EFER_LMA)
1118 ignore_bits &= ~(u64)EFER_SCE;
1119 #endif
1120
1121 /*
1122 * On EPT, we can't emulate NX, so we must switch EFER atomically.
1123 * On CPUs that support "load IA32_EFER", always switch EFER
1124 * atomically, since it's faster than switching it manually.
1125 */
1126 if (cpu_has_load_ia32_efer() ||
1127 (enable_ept && ((vmx->vcpu.arch.efer ^ kvm_host.efer) & EFER_NX))) {
1128 if (!(guest_efer & EFER_LMA))
1129 guest_efer &= ~EFER_LME;
1130 if (guest_efer != kvm_host.efer)
1131 add_atomic_switch_msr(vmx, MSR_EFER,
1132 guest_efer, kvm_host.efer, false);
1133 else
1134 clear_atomic_switch_msr(vmx, MSR_EFER);
1135 return false;
1136 }
1137
1138 i = kvm_find_user_return_msr(MSR_EFER);
1139 if (i < 0)
1140 return false;
1141
1142 clear_atomic_switch_msr(vmx, MSR_EFER);
1143
1144 guest_efer &= ~ignore_bits;
1145 guest_efer |= kvm_host.efer & ignore_bits;
1146
1147 vmx->guest_uret_msrs[i].data = guest_efer;
1148 vmx->guest_uret_msrs[i].mask = ~ignore_bits;
1149
1150 return true;
1151 }
1152
1153 #ifdef CONFIG_X86_32
1154 /*
1155 * On 32-bit kernels, VM exits still load the FS and GS bases from the
1156 * VMCS rather than the segment table. KVM uses this helper to figure
1157 * out the current bases to poke them into the VMCS before entry.
1158 */
segment_base(u16 selector)1159 static unsigned long segment_base(u16 selector)
1160 {
1161 struct desc_struct *table;
1162 unsigned long v;
1163
1164 if (!(selector & ~SEGMENT_RPL_MASK))
1165 return 0;
1166
1167 table = get_current_gdt_ro();
1168
1169 if ((selector & SEGMENT_TI_MASK) == SEGMENT_LDT) {
1170 u16 ldt_selector = kvm_read_ldt();
1171
1172 if (!(ldt_selector & ~SEGMENT_RPL_MASK))
1173 return 0;
1174
1175 table = (struct desc_struct *)segment_base(ldt_selector);
1176 }
1177 v = get_desc_base(&table[selector >> 3]);
1178 return v;
1179 }
1180 #endif
1181
pt_can_write_msr(struct vcpu_vmx * vmx)1182 static inline bool pt_can_write_msr(struct vcpu_vmx *vmx)
1183 {
1184 return vmx_pt_mode_is_host_guest() &&
1185 !(vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN);
1186 }
1187
pt_output_base_valid(struct kvm_vcpu * vcpu,u64 base)1188 static inline bool pt_output_base_valid(struct kvm_vcpu *vcpu, u64 base)
1189 {
1190 /* The base must be 128-byte aligned and a legal physical address. */
1191 return kvm_vcpu_is_legal_aligned_gpa(vcpu, base, 128);
1192 }
1193
pt_load_msr(struct pt_ctx * ctx,u32 addr_range)1194 static inline void pt_load_msr(struct pt_ctx *ctx, u32 addr_range)
1195 {
1196 u32 i;
1197
1198 wrmsrl(MSR_IA32_RTIT_STATUS, ctx->status);
1199 wrmsrl(MSR_IA32_RTIT_OUTPUT_BASE, ctx->output_base);
1200 wrmsrl(MSR_IA32_RTIT_OUTPUT_MASK, ctx->output_mask);
1201 wrmsrl(MSR_IA32_RTIT_CR3_MATCH, ctx->cr3_match);
1202 for (i = 0; i < addr_range; i++) {
1203 wrmsrl(MSR_IA32_RTIT_ADDR0_A + i * 2, ctx->addr_a[i]);
1204 wrmsrl(MSR_IA32_RTIT_ADDR0_B + i * 2, ctx->addr_b[i]);
1205 }
1206 }
1207
pt_save_msr(struct pt_ctx * ctx,u32 addr_range)1208 static inline void pt_save_msr(struct pt_ctx *ctx, u32 addr_range)
1209 {
1210 u32 i;
1211
1212 rdmsrl(MSR_IA32_RTIT_STATUS, ctx->status);
1213 rdmsrl(MSR_IA32_RTIT_OUTPUT_BASE, ctx->output_base);
1214 rdmsrl(MSR_IA32_RTIT_OUTPUT_MASK, ctx->output_mask);
1215 rdmsrl(MSR_IA32_RTIT_CR3_MATCH, ctx->cr3_match);
1216 for (i = 0; i < addr_range; i++) {
1217 rdmsrl(MSR_IA32_RTIT_ADDR0_A + i * 2, ctx->addr_a[i]);
1218 rdmsrl(MSR_IA32_RTIT_ADDR0_B + i * 2, ctx->addr_b[i]);
1219 }
1220 }
1221
pt_guest_enter(struct vcpu_vmx * vmx)1222 static void pt_guest_enter(struct vcpu_vmx *vmx)
1223 {
1224 if (vmx_pt_mode_is_system())
1225 return;
1226
1227 /*
1228 * GUEST_IA32_RTIT_CTL is already set in the VMCS.
1229 * Save host state before VM entry.
1230 */
1231 rdmsrl(MSR_IA32_RTIT_CTL, vmx->pt_desc.host.ctl);
1232 if (vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) {
1233 wrmsrl(MSR_IA32_RTIT_CTL, 0);
1234 pt_save_msr(&vmx->pt_desc.host, vmx->pt_desc.num_address_ranges);
1235 pt_load_msr(&vmx->pt_desc.guest, vmx->pt_desc.num_address_ranges);
1236 }
1237 }
1238
pt_guest_exit(struct vcpu_vmx * vmx)1239 static void pt_guest_exit(struct vcpu_vmx *vmx)
1240 {
1241 if (vmx_pt_mode_is_system())
1242 return;
1243
1244 if (vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) {
1245 pt_save_msr(&vmx->pt_desc.guest, vmx->pt_desc.num_address_ranges);
1246 pt_load_msr(&vmx->pt_desc.host, vmx->pt_desc.num_address_ranges);
1247 }
1248
1249 /*
1250 * KVM requires VM_EXIT_CLEAR_IA32_RTIT_CTL to expose PT to the guest,
1251 * i.e. RTIT_CTL is always cleared on VM-Exit. Restore it if necessary.
1252 */
1253 if (vmx->pt_desc.host.ctl)
1254 wrmsrl(MSR_IA32_RTIT_CTL, vmx->pt_desc.host.ctl);
1255 }
1256
vmx_set_host_fs_gs(struct vmcs_host_state * host,u16 fs_sel,u16 gs_sel,unsigned long fs_base,unsigned long gs_base)1257 void vmx_set_host_fs_gs(struct vmcs_host_state *host, u16 fs_sel, u16 gs_sel,
1258 unsigned long fs_base, unsigned long gs_base)
1259 {
1260 if (unlikely(fs_sel != host->fs_sel)) {
1261 if (!(fs_sel & 7))
1262 vmcs_write16(HOST_FS_SELECTOR, fs_sel);
1263 else
1264 vmcs_write16(HOST_FS_SELECTOR, 0);
1265 host->fs_sel = fs_sel;
1266 }
1267 if (unlikely(gs_sel != host->gs_sel)) {
1268 if (!(gs_sel & 7))
1269 vmcs_write16(HOST_GS_SELECTOR, gs_sel);
1270 else
1271 vmcs_write16(HOST_GS_SELECTOR, 0);
1272 host->gs_sel = gs_sel;
1273 }
1274 if (unlikely(fs_base != host->fs_base)) {
1275 vmcs_writel(HOST_FS_BASE, fs_base);
1276 host->fs_base = fs_base;
1277 }
1278 if (unlikely(gs_base != host->gs_base)) {
1279 vmcs_writel(HOST_GS_BASE, gs_base);
1280 host->gs_base = gs_base;
1281 }
1282 }
1283
vmx_prepare_switch_to_guest(struct kvm_vcpu * vcpu)1284 void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
1285 {
1286 struct vcpu_vmx *vmx = to_vmx(vcpu);
1287 struct vmcs_host_state *host_state;
1288 #ifdef CONFIG_X86_64
1289 int cpu = raw_smp_processor_id();
1290 #endif
1291 unsigned long fs_base, gs_base;
1292 u16 fs_sel, gs_sel;
1293 int i;
1294
1295 /*
1296 * Note that guest MSRs to be saved/restored can also be changed
1297 * when guest state is loaded. This happens when guest transitions
1298 * to/from long-mode by setting MSR_EFER.LMA.
1299 */
1300 if (!vmx->guest_uret_msrs_loaded) {
1301 vmx->guest_uret_msrs_loaded = true;
1302 for (i = 0; i < kvm_nr_uret_msrs; ++i) {
1303 if (!vmx->guest_uret_msrs[i].load_into_hardware)
1304 continue;
1305
1306 kvm_set_user_return_msr(i,
1307 vmx->guest_uret_msrs[i].data,
1308 vmx->guest_uret_msrs[i].mask);
1309 }
1310 }
1311
1312 if (vmx->nested.need_vmcs12_to_shadow_sync)
1313 nested_sync_vmcs12_to_shadow(vcpu);
1314
1315 if (vmx->guest_state_loaded)
1316 return;
1317
1318 host_state = &vmx->loaded_vmcs->host_state;
1319
1320 /*
1321 * Set host fs and gs selectors. Unfortunately, 22.2.3 does not
1322 * allow segment selectors with cpl > 0 or ti == 1.
1323 */
1324 host_state->ldt_sel = kvm_read_ldt();
1325
1326 #ifdef CONFIG_X86_64
1327 savesegment(ds, host_state->ds_sel);
1328 savesegment(es, host_state->es_sel);
1329
1330 gs_base = cpu_kernelmode_gs_base(cpu);
1331 if (likely(is_64bit_mm(current->mm))) {
1332 current_save_fsgs();
1333 fs_sel = current->thread.fsindex;
1334 gs_sel = current->thread.gsindex;
1335 fs_base = current->thread.fsbase;
1336 vmx->msr_host_kernel_gs_base = current->thread.gsbase;
1337 } else {
1338 savesegment(fs, fs_sel);
1339 savesegment(gs, gs_sel);
1340 fs_base = read_msr(MSR_FS_BASE);
1341 vmx->msr_host_kernel_gs_base = read_msr(MSR_KERNEL_GS_BASE);
1342 }
1343
1344 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
1345 #else
1346 savesegment(fs, fs_sel);
1347 savesegment(gs, gs_sel);
1348 fs_base = segment_base(fs_sel);
1349 gs_base = segment_base(gs_sel);
1350 #endif
1351
1352 vmx_set_host_fs_gs(host_state, fs_sel, gs_sel, fs_base, gs_base);
1353 vmx->guest_state_loaded = true;
1354 }
1355
vmx_prepare_switch_to_host(struct vcpu_vmx * vmx)1356 static void vmx_prepare_switch_to_host(struct vcpu_vmx *vmx)
1357 {
1358 struct vmcs_host_state *host_state;
1359
1360 if (!vmx->guest_state_loaded)
1361 return;
1362
1363 host_state = &vmx->loaded_vmcs->host_state;
1364
1365 ++vmx->vcpu.stat.host_state_reload;
1366
1367 #ifdef CONFIG_X86_64
1368 rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
1369 #endif
1370 if (host_state->ldt_sel || (host_state->gs_sel & 7)) {
1371 kvm_load_ldt(host_state->ldt_sel);
1372 #ifdef CONFIG_X86_64
1373 load_gs_index(host_state->gs_sel);
1374 #else
1375 loadsegment(gs, host_state->gs_sel);
1376 #endif
1377 }
1378 if (host_state->fs_sel & 7)
1379 loadsegment(fs, host_state->fs_sel);
1380 #ifdef CONFIG_X86_64
1381 if (unlikely(host_state->ds_sel | host_state->es_sel)) {
1382 loadsegment(ds, host_state->ds_sel);
1383 loadsegment(es, host_state->es_sel);
1384 }
1385 #endif
1386 invalidate_tss_limit();
1387 #ifdef CONFIG_X86_64
1388 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
1389 #endif
1390 load_fixmap_gdt(raw_smp_processor_id());
1391 vmx->guest_state_loaded = false;
1392 vmx->guest_uret_msrs_loaded = false;
1393 }
1394
1395 #ifdef CONFIG_X86_64
vmx_read_guest_kernel_gs_base(struct vcpu_vmx * vmx)1396 static u64 vmx_read_guest_kernel_gs_base(struct vcpu_vmx *vmx)
1397 {
1398 preempt_disable();
1399 if (vmx->guest_state_loaded)
1400 rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
1401 preempt_enable();
1402 return vmx->msr_guest_kernel_gs_base;
1403 }
1404
vmx_write_guest_kernel_gs_base(struct vcpu_vmx * vmx,u64 data)1405 static void vmx_write_guest_kernel_gs_base(struct vcpu_vmx *vmx, u64 data)
1406 {
1407 preempt_disable();
1408 if (vmx->guest_state_loaded)
1409 wrmsrl(MSR_KERNEL_GS_BASE, data);
1410 preempt_enable();
1411 vmx->msr_guest_kernel_gs_base = data;
1412 }
1413 #endif
1414
grow_ple_window(struct kvm_vcpu * vcpu)1415 static void grow_ple_window(struct kvm_vcpu *vcpu)
1416 {
1417 struct vcpu_vmx *vmx = to_vmx(vcpu);
1418 unsigned int old = vmx->ple_window;
1419
1420 vmx->ple_window = __grow_ple_window(old, ple_window,
1421 ple_window_grow,
1422 ple_window_max);
1423
1424 if (vmx->ple_window != old) {
1425 vmx->ple_window_dirty = true;
1426 trace_kvm_ple_window_update(vcpu->vcpu_id,
1427 vmx->ple_window, old);
1428 }
1429 }
1430
shrink_ple_window(struct kvm_vcpu * vcpu)1431 static void shrink_ple_window(struct kvm_vcpu *vcpu)
1432 {
1433 struct vcpu_vmx *vmx = to_vmx(vcpu);
1434 unsigned int old = vmx->ple_window;
1435
1436 vmx->ple_window = __shrink_ple_window(old, ple_window,
1437 ple_window_shrink,
1438 ple_window);
1439
1440 if (vmx->ple_window != old) {
1441 vmx->ple_window_dirty = true;
1442 trace_kvm_ple_window_update(vcpu->vcpu_id,
1443 vmx->ple_window, old);
1444 }
1445 }
1446
vmx_vcpu_load_vmcs(struct kvm_vcpu * vcpu,int cpu,struct loaded_vmcs * buddy)1447 void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu,
1448 struct loaded_vmcs *buddy)
1449 {
1450 struct vcpu_vmx *vmx = to_vmx(vcpu);
1451 bool already_loaded = vmx->loaded_vmcs->cpu == cpu;
1452 struct vmcs *prev;
1453
1454 if (!already_loaded) {
1455 loaded_vmcs_clear(vmx->loaded_vmcs);
1456 local_irq_disable();
1457
1458 /*
1459 * Ensure loaded_vmcs->cpu is read before adding loaded_vmcs to
1460 * this cpu's percpu list, otherwise it may not yet be deleted
1461 * from its previous cpu's percpu list. Pairs with the
1462 * smb_wmb() in __loaded_vmcs_clear().
1463 */
1464 smp_rmb();
1465
1466 list_add(&vmx->loaded_vmcs->loaded_vmcss_on_cpu_link,
1467 &per_cpu(loaded_vmcss_on_cpu, cpu));
1468 local_irq_enable();
1469 }
1470
1471 prev = per_cpu(current_vmcs, cpu);
1472 if (prev != vmx->loaded_vmcs->vmcs) {
1473 per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs;
1474 vmcs_load(vmx->loaded_vmcs->vmcs);
1475
1476 /*
1477 * No indirect branch prediction barrier needed when switching
1478 * the active VMCS within a vCPU, unless IBRS is advertised to
1479 * the vCPU. To minimize the number of IBPBs executed, KVM
1480 * performs IBPB on nested VM-Exit (a single nested transition
1481 * may switch the active VMCS multiple times).
1482 */
1483 if (!buddy || WARN_ON_ONCE(buddy->vmcs != prev))
1484 indirect_branch_prediction_barrier();
1485 }
1486
1487 if (!already_loaded) {
1488 void *gdt = get_current_gdt_ro();
1489
1490 /*
1491 * Flush all EPTP/VPID contexts, the new pCPU may have stale
1492 * TLB entries from its previous association with the vCPU.
1493 */
1494 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
1495
1496 /*
1497 * Linux uses per-cpu TSS and GDT, so set these when switching
1498 * processors. See 22.2.4.
1499 */
1500 vmcs_writel(HOST_TR_BASE,
1501 (unsigned long)&get_cpu_entry_area(cpu)->tss.x86_tss);
1502 vmcs_writel(HOST_GDTR_BASE, (unsigned long)gdt); /* 22.2.4 */
1503
1504 if (IS_ENABLED(CONFIG_IA32_EMULATION) || IS_ENABLED(CONFIG_X86_32)) {
1505 /* 22.2.3 */
1506 vmcs_writel(HOST_IA32_SYSENTER_ESP,
1507 (unsigned long)(cpu_entry_stack(cpu) + 1));
1508 }
1509
1510 vmx->loaded_vmcs->cpu = cpu;
1511 }
1512 }
1513
1514 /*
1515 * Switches to specified vcpu, until a matching vcpu_put(), but assumes
1516 * vcpu mutex is already taken.
1517 */
vmx_vcpu_load(struct kvm_vcpu * vcpu,int cpu)1518 void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1519 {
1520 struct vcpu_vmx *vmx = to_vmx(vcpu);
1521
1522 if (vcpu->scheduled_out && !kvm_pause_in_guest(vcpu->kvm))
1523 shrink_ple_window(vcpu);
1524
1525 vmx_vcpu_load_vmcs(vcpu, cpu, NULL);
1526
1527 vmx_vcpu_pi_load(vcpu, cpu);
1528
1529 vmx->host_debugctlmsr = get_debugctlmsr();
1530 }
1531
vmx_vcpu_put(struct kvm_vcpu * vcpu)1532 void vmx_vcpu_put(struct kvm_vcpu *vcpu)
1533 {
1534 vmx_vcpu_pi_put(vcpu);
1535
1536 vmx_prepare_switch_to_host(to_vmx(vcpu));
1537 }
1538
vmx_emulation_required(struct kvm_vcpu * vcpu)1539 bool vmx_emulation_required(struct kvm_vcpu *vcpu)
1540 {
1541 return emulate_invalid_guest_state && !vmx_guest_state_valid(vcpu);
1542 }
1543
vmx_get_rflags(struct kvm_vcpu * vcpu)1544 unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
1545 {
1546 struct vcpu_vmx *vmx = to_vmx(vcpu);
1547 unsigned long rflags, save_rflags;
1548
1549 if (!kvm_register_is_available(vcpu, VCPU_EXREG_RFLAGS)) {
1550 kvm_register_mark_available(vcpu, VCPU_EXREG_RFLAGS);
1551 rflags = vmcs_readl(GUEST_RFLAGS);
1552 if (vmx->rmode.vm86_active) {
1553 rflags &= RMODE_GUEST_OWNED_EFLAGS_BITS;
1554 save_rflags = vmx->rmode.save_rflags;
1555 rflags |= save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
1556 }
1557 vmx->rflags = rflags;
1558 }
1559 return vmx->rflags;
1560 }
1561
vmx_set_rflags(struct kvm_vcpu * vcpu,unsigned long rflags)1562 void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
1563 {
1564 struct vcpu_vmx *vmx = to_vmx(vcpu);
1565 unsigned long old_rflags;
1566
1567 /*
1568 * Unlike CR0 and CR4, RFLAGS handling requires checking if the vCPU
1569 * is an unrestricted guest in order to mark L2 as needing emulation
1570 * if L1 runs L2 as a restricted guest.
1571 */
1572 if (is_unrestricted_guest(vcpu)) {
1573 kvm_register_mark_available(vcpu, VCPU_EXREG_RFLAGS);
1574 vmx->rflags = rflags;
1575 vmcs_writel(GUEST_RFLAGS, rflags);
1576 return;
1577 }
1578
1579 old_rflags = vmx_get_rflags(vcpu);
1580 vmx->rflags = rflags;
1581 if (vmx->rmode.vm86_active) {
1582 vmx->rmode.save_rflags = rflags;
1583 rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
1584 }
1585 vmcs_writel(GUEST_RFLAGS, rflags);
1586
1587 if ((old_rflags ^ vmx->rflags) & X86_EFLAGS_VM)
1588 vmx->emulation_required = vmx_emulation_required(vcpu);
1589 }
1590
vmx_get_if_flag(struct kvm_vcpu * vcpu)1591 bool vmx_get_if_flag(struct kvm_vcpu *vcpu)
1592 {
1593 return vmx_get_rflags(vcpu) & X86_EFLAGS_IF;
1594 }
1595
vmx_get_interrupt_shadow(struct kvm_vcpu * vcpu)1596 u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu)
1597 {
1598 u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
1599 int ret = 0;
1600
1601 if (interruptibility & GUEST_INTR_STATE_STI)
1602 ret |= KVM_X86_SHADOW_INT_STI;
1603 if (interruptibility & GUEST_INTR_STATE_MOV_SS)
1604 ret |= KVM_X86_SHADOW_INT_MOV_SS;
1605
1606 return ret;
1607 }
1608
vmx_set_interrupt_shadow(struct kvm_vcpu * vcpu,int mask)1609 void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
1610 {
1611 u32 interruptibility_old = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
1612 u32 interruptibility = interruptibility_old;
1613
1614 interruptibility &= ~(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS);
1615
1616 if (mask & KVM_X86_SHADOW_INT_MOV_SS)
1617 interruptibility |= GUEST_INTR_STATE_MOV_SS;
1618 else if (mask & KVM_X86_SHADOW_INT_STI)
1619 interruptibility |= GUEST_INTR_STATE_STI;
1620
1621 if ((interruptibility != interruptibility_old))
1622 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, interruptibility);
1623 }
1624
vmx_rtit_ctl_check(struct kvm_vcpu * vcpu,u64 data)1625 static int vmx_rtit_ctl_check(struct kvm_vcpu *vcpu, u64 data)
1626 {
1627 struct vcpu_vmx *vmx = to_vmx(vcpu);
1628 unsigned long value;
1629
1630 /*
1631 * Any MSR write that attempts to change bits marked reserved will
1632 * case a #GP fault.
1633 */
1634 if (data & vmx->pt_desc.ctl_bitmask)
1635 return 1;
1636
1637 /*
1638 * Any attempt to modify IA32_RTIT_CTL while TraceEn is set will
1639 * result in a #GP unless the same write also clears TraceEn.
1640 */
1641 if ((vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) &&
1642 ((vmx->pt_desc.guest.ctl ^ data) & ~RTIT_CTL_TRACEEN))
1643 return 1;
1644
1645 /*
1646 * WRMSR to IA32_RTIT_CTL that sets TraceEn but clears this bit
1647 * and FabricEn would cause #GP, if
1648 * CPUID.(EAX=14H, ECX=0):ECX.SNGLRGNOUT[bit 2] = 0
1649 */
1650 if ((data & RTIT_CTL_TRACEEN) && !(data & RTIT_CTL_TOPA) &&
1651 !(data & RTIT_CTL_FABRIC_EN) &&
1652 !intel_pt_validate_cap(vmx->pt_desc.caps,
1653 PT_CAP_single_range_output))
1654 return 1;
1655
1656 /*
1657 * MTCFreq, CycThresh and PSBFreq encodings check, any MSR write that
1658 * utilize encodings marked reserved will cause a #GP fault.
1659 */
1660 value = intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_mtc_periods);
1661 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_mtc) &&
1662 !test_bit((data & RTIT_CTL_MTC_RANGE) >>
1663 RTIT_CTL_MTC_RANGE_OFFSET, &value))
1664 return 1;
1665 value = intel_pt_validate_cap(vmx->pt_desc.caps,
1666 PT_CAP_cycle_thresholds);
1667 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_psb_cyc) &&
1668 !test_bit((data & RTIT_CTL_CYC_THRESH) >>
1669 RTIT_CTL_CYC_THRESH_OFFSET, &value))
1670 return 1;
1671 value = intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_psb_periods);
1672 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_psb_cyc) &&
1673 !test_bit((data & RTIT_CTL_PSB_FREQ) >>
1674 RTIT_CTL_PSB_FREQ_OFFSET, &value))
1675 return 1;
1676
1677 /*
1678 * If ADDRx_CFG is reserved or the encodings is >2 will
1679 * cause a #GP fault.
1680 */
1681 value = (data & RTIT_CTL_ADDR0) >> RTIT_CTL_ADDR0_OFFSET;
1682 if ((value && (vmx->pt_desc.num_address_ranges < 1)) || (value > 2))
1683 return 1;
1684 value = (data & RTIT_CTL_ADDR1) >> RTIT_CTL_ADDR1_OFFSET;
1685 if ((value && (vmx->pt_desc.num_address_ranges < 2)) || (value > 2))
1686 return 1;
1687 value = (data & RTIT_CTL_ADDR2) >> RTIT_CTL_ADDR2_OFFSET;
1688 if ((value && (vmx->pt_desc.num_address_ranges < 3)) || (value > 2))
1689 return 1;
1690 value = (data & RTIT_CTL_ADDR3) >> RTIT_CTL_ADDR3_OFFSET;
1691 if ((value && (vmx->pt_desc.num_address_ranges < 4)) || (value > 2))
1692 return 1;
1693
1694 return 0;
1695 }
1696
vmx_check_emulate_instruction(struct kvm_vcpu * vcpu,int emul_type,void * insn,int insn_len)1697 int vmx_check_emulate_instruction(struct kvm_vcpu *vcpu, int emul_type,
1698 void *insn, int insn_len)
1699 {
1700 /*
1701 * Emulation of instructions in SGX enclaves is impossible as RIP does
1702 * not point at the failing instruction, and even if it did, the code
1703 * stream is inaccessible. Inject #UD instead of exiting to userspace
1704 * so that guest userspace can't DoS the guest simply by triggering
1705 * emulation (enclaves are CPL3 only).
1706 */
1707 if (to_vmx(vcpu)->exit_reason.enclave_mode) {
1708 kvm_queue_exception(vcpu, UD_VECTOR);
1709 return X86EMUL_PROPAGATE_FAULT;
1710 }
1711 return X86EMUL_CONTINUE;
1712 }
1713
skip_emulated_instruction(struct kvm_vcpu * vcpu)1714 static int skip_emulated_instruction(struct kvm_vcpu *vcpu)
1715 {
1716 union vmx_exit_reason exit_reason = to_vmx(vcpu)->exit_reason;
1717 unsigned long rip, orig_rip;
1718 u32 instr_len;
1719
1720 /*
1721 * Using VMCS.VM_EXIT_INSTRUCTION_LEN on EPT misconfig depends on
1722 * undefined behavior: Intel's SDM doesn't mandate the VMCS field be
1723 * set when EPT misconfig occurs. In practice, real hardware updates
1724 * VM_EXIT_INSTRUCTION_LEN on EPT misconfig, but other hypervisors
1725 * (namely Hyper-V) don't set it due to it being undefined behavior,
1726 * i.e. we end up advancing IP with some random value.
1727 */
1728 if (!static_cpu_has(X86_FEATURE_HYPERVISOR) ||
1729 exit_reason.basic != EXIT_REASON_EPT_MISCONFIG) {
1730 instr_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
1731
1732 /*
1733 * Emulating an enclave's instructions isn't supported as KVM
1734 * cannot access the enclave's memory or its true RIP, e.g. the
1735 * vmcs.GUEST_RIP points at the exit point of the enclave, not
1736 * the RIP that actually triggered the VM-Exit. But, because
1737 * most instructions that cause VM-Exit will #UD in an enclave,
1738 * most instruction-based VM-Exits simply do not occur.
1739 *
1740 * There are a few exceptions, notably the debug instructions
1741 * INT1ICEBRK and INT3, as they are allowed in debug enclaves
1742 * and generate #DB/#BP as expected, which KVM might intercept.
1743 * But again, the CPU does the dirty work and saves an instr
1744 * length of zero so VMMs don't shoot themselves in the foot.
1745 * WARN if KVM tries to skip a non-zero length instruction on
1746 * a VM-Exit from an enclave.
1747 */
1748 if (!instr_len)
1749 goto rip_updated;
1750
1751 WARN_ONCE(exit_reason.enclave_mode,
1752 "skipping instruction after SGX enclave VM-Exit");
1753
1754 orig_rip = kvm_rip_read(vcpu);
1755 rip = orig_rip + instr_len;
1756 #ifdef CONFIG_X86_64
1757 /*
1758 * We need to mask out the high 32 bits of RIP if not in 64-bit
1759 * mode, but just finding out that we are in 64-bit mode is
1760 * quite expensive. Only do it if there was a carry.
1761 */
1762 if (unlikely(((rip ^ orig_rip) >> 31) == 3) && !is_64_bit_mode(vcpu))
1763 rip = (u32)rip;
1764 #endif
1765 kvm_rip_write(vcpu, rip);
1766 } else {
1767 if (!kvm_emulate_instruction(vcpu, EMULTYPE_SKIP))
1768 return 0;
1769 }
1770
1771 rip_updated:
1772 /* skipping an emulated instruction also counts */
1773 vmx_set_interrupt_shadow(vcpu, 0);
1774
1775 return 1;
1776 }
1777
1778 /*
1779 * Recognizes a pending MTF VM-exit and records the nested state for later
1780 * delivery.
1781 */
vmx_update_emulated_instruction(struct kvm_vcpu * vcpu)1782 void vmx_update_emulated_instruction(struct kvm_vcpu *vcpu)
1783 {
1784 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
1785 struct vcpu_vmx *vmx = to_vmx(vcpu);
1786
1787 if (!is_guest_mode(vcpu))
1788 return;
1789
1790 /*
1791 * Per the SDM, MTF takes priority over debug-trap exceptions besides
1792 * TSS T-bit traps and ICEBP (INT1). KVM doesn't emulate T-bit traps
1793 * or ICEBP (in the emulator proper), and skipping of ICEBP after an
1794 * intercepted #DB deliberately avoids single-step #DB and MTF updates
1795 * as ICEBP is higher priority than both. As instruction emulation is
1796 * completed at this point (i.e. KVM is at the instruction boundary),
1797 * any #DB exception pending delivery must be a debug-trap of lower
1798 * priority than MTF. Record the pending MTF state to be delivered in
1799 * vmx_check_nested_events().
1800 */
1801 if (nested_cpu_has_mtf(vmcs12) &&
1802 (!vcpu->arch.exception.pending ||
1803 vcpu->arch.exception.vector == DB_VECTOR) &&
1804 (!vcpu->arch.exception_vmexit.pending ||
1805 vcpu->arch.exception_vmexit.vector == DB_VECTOR)) {
1806 vmx->nested.mtf_pending = true;
1807 kvm_make_request(KVM_REQ_EVENT, vcpu);
1808 } else {
1809 vmx->nested.mtf_pending = false;
1810 }
1811 }
1812
vmx_skip_emulated_instruction(struct kvm_vcpu * vcpu)1813 int vmx_skip_emulated_instruction(struct kvm_vcpu *vcpu)
1814 {
1815 vmx_update_emulated_instruction(vcpu);
1816 return skip_emulated_instruction(vcpu);
1817 }
1818
vmx_clear_hlt(struct kvm_vcpu * vcpu)1819 static void vmx_clear_hlt(struct kvm_vcpu *vcpu)
1820 {
1821 /*
1822 * Ensure that we clear the HLT state in the VMCS. We don't need to
1823 * explicitly skip the instruction because if the HLT state is set,
1824 * then the instruction is already executing and RIP has already been
1825 * advanced.
1826 */
1827 if (kvm_hlt_in_guest(vcpu->kvm) &&
1828 vmcs_read32(GUEST_ACTIVITY_STATE) == GUEST_ACTIVITY_HLT)
1829 vmcs_write32(GUEST_ACTIVITY_STATE, GUEST_ACTIVITY_ACTIVE);
1830 }
1831
vmx_inject_exception(struct kvm_vcpu * vcpu)1832 void vmx_inject_exception(struct kvm_vcpu *vcpu)
1833 {
1834 struct kvm_queued_exception *ex = &vcpu->arch.exception;
1835 u32 intr_info = ex->vector | INTR_INFO_VALID_MASK;
1836 struct vcpu_vmx *vmx = to_vmx(vcpu);
1837
1838 kvm_deliver_exception_payload(vcpu, ex);
1839
1840 if (ex->has_error_code) {
1841 /*
1842 * Despite the error code being architecturally defined as 32
1843 * bits, and the VMCS field being 32 bits, Intel CPUs and thus
1844 * VMX don't actually supporting setting bits 31:16. Hardware
1845 * will (should) never provide a bogus error code, but AMD CPUs
1846 * do generate error codes with bits 31:16 set, and so KVM's
1847 * ABI lets userspace shove in arbitrary 32-bit values. Drop
1848 * the upper bits to avoid VM-Fail, losing information that
1849 * doesn't really exist is preferable to killing the VM.
1850 */
1851 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, (u16)ex->error_code);
1852 intr_info |= INTR_INFO_DELIVER_CODE_MASK;
1853 }
1854
1855 if (vmx->rmode.vm86_active) {
1856 int inc_eip = 0;
1857 if (kvm_exception_is_soft(ex->vector))
1858 inc_eip = vcpu->arch.event_exit_inst_len;
1859 kvm_inject_realmode_interrupt(vcpu, ex->vector, inc_eip);
1860 return;
1861 }
1862
1863 WARN_ON_ONCE(vmx->emulation_required);
1864
1865 if (kvm_exception_is_soft(ex->vector)) {
1866 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
1867 vmx->vcpu.arch.event_exit_inst_len);
1868 intr_info |= INTR_TYPE_SOFT_EXCEPTION;
1869 } else
1870 intr_info |= INTR_TYPE_HARD_EXCEPTION;
1871
1872 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info);
1873
1874 vmx_clear_hlt(vcpu);
1875 }
1876
vmx_setup_uret_msr(struct vcpu_vmx * vmx,unsigned int msr,bool load_into_hardware)1877 static void vmx_setup_uret_msr(struct vcpu_vmx *vmx, unsigned int msr,
1878 bool load_into_hardware)
1879 {
1880 struct vmx_uret_msr *uret_msr;
1881
1882 uret_msr = vmx_find_uret_msr(vmx, msr);
1883 if (!uret_msr)
1884 return;
1885
1886 uret_msr->load_into_hardware = load_into_hardware;
1887 }
1888
1889 /*
1890 * Configuring user return MSRs to automatically save, load, and restore MSRs
1891 * that need to be shoved into hardware when running the guest. Note, omitting
1892 * an MSR here does _NOT_ mean it's not emulated, only that it will not be
1893 * loaded into hardware when running the guest.
1894 */
vmx_setup_uret_msrs(struct vcpu_vmx * vmx)1895 static void vmx_setup_uret_msrs(struct vcpu_vmx *vmx)
1896 {
1897 #ifdef CONFIG_X86_64
1898 bool load_syscall_msrs;
1899
1900 /*
1901 * The SYSCALL MSRs are only needed on long mode guests, and only
1902 * when EFER.SCE is set.
1903 */
1904 load_syscall_msrs = is_long_mode(&vmx->vcpu) &&
1905 (vmx->vcpu.arch.efer & EFER_SCE);
1906
1907 vmx_setup_uret_msr(vmx, MSR_STAR, load_syscall_msrs);
1908 vmx_setup_uret_msr(vmx, MSR_LSTAR, load_syscall_msrs);
1909 vmx_setup_uret_msr(vmx, MSR_SYSCALL_MASK, load_syscall_msrs);
1910 #endif
1911 vmx_setup_uret_msr(vmx, MSR_EFER, update_transition_efer(vmx));
1912
1913 vmx_setup_uret_msr(vmx, MSR_TSC_AUX,
1914 guest_cpuid_has(&vmx->vcpu, X86_FEATURE_RDTSCP) ||
1915 guest_cpuid_has(&vmx->vcpu, X86_FEATURE_RDPID));
1916
1917 /*
1918 * hle=0, rtm=0, tsx_ctrl=1 can be found with some combinations of new
1919 * kernel and old userspace. If those guests run on a tsx=off host, do
1920 * allow guests to use TSX_CTRL, but don't change the value in hardware
1921 * so that TSX remains always disabled.
1922 */
1923 vmx_setup_uret_msr(vmx, MSR_IA32_TSX_CTRL, boot_cpu_has(X86_FEATURE_RTM));
1924
1925 /*
1926 * The set of MSRs to load may have changed, reload MSRs before the
1927 * next VM-Enter.
1928 */
1929 vmx->guest_uret_msrs_loaded = false;
1930 }
1931
vmx_get_l2_tsc_offset(struct kvm_vcpu * vcpu)1932 u64 vmx_get_l2_tsc_offset(struct kvm_vcpu *vcpu)
1933 {
1934 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
1935
1936 if (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETTING))
1937 return vmcs12->tsc_offset;
1938
1939 return 0;
1940 }
1941
vmx_get_l2_tsc_multiplier(struct kvm_vcpu * vcpu)1942 u64 vmx_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu)
1943 {
1944 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
1945
1946 if (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETTING) &&
1947 nested_cpu_has2(vmcs12, SECONDARY_EXEC_TSC_SCALING))
1948 return vmcs12->tsc_multiplier;
1949
1950 return kvm_caps.default_tsc_scaling_ratio;
1951 }
1952
vmx_write_tsc_offset(struct kvm_vcpu * vcpu)1953 void vmx_write_tsc_offset(struct kvm_vcpu *vcpu)
1954 {
1955 vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
1956 }
1957
vmx_write_tsc_multiplier(struct kvm_vcpu * vcpu)1958 void vmx_write_tsc_multiplier(struct kvm_vcpu *vcpu)
1959 {
1960 vmcs_write64(TSC_MULTIPLIER, vcpu->arch.tsc_scaling_ratio);
1961 }
1962
1963 /*
1964 * Userspace is allowed to set any supported IA32_FEATURE_CONTROL regardless of
1965 * guest CPUID. Note, KVM allows userspace to set "VMX in SMX" to maintain
1966 * backwards compatibility even though KVM doesn't support emulating SMX. And
1967 * because userspace set "VMX in SMX", the guest must also be allowed to set it,
1968 * e.g. if the MSR is left unlocked and the guest does a RMW operation.
1969 */
1970 #define KVM_SUPPORTED_FEATURE_CONTROL (FEAT_CTL_LOCKED | \
1971 FEAT_CTL_VMX_ENABLED_INSIDE_SMX | \
1972 FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX | \
1973 FEAT_CTL_SGX_LC_ENABLED | \
1974 FEAT_CTL_SGX_ENABLED | \
1975 FEAT_CTL_LMCE_ENABLED)
1976
is_vmx_feature_control_msr_valid(struct vcpu_vmx * vmx,struct msr_data * msr)1977 static inline bool is_vmx_feature_control_msr_valid(struct vcpu_vmx *vmx,
1978 struct msr_data *msr)
1979 {
1980 uint64_t valid_bits;
1981
1982 /*
1983 * Ensure KVM_SUPPORTED_FEATURE_CONTROL is updated when new bits are
1984 * exposed to the guest.
1985 */
1986 WARN_ON_ONCE(vmx->msr_ia32_feature_control_valid_bits &
1987 ~KVM_SUPPORTED_FEATURE_CONTROL);
1988
1989 if (!msr->host_initiated &&
1990 (vmx->msr_ia32_feature_control & FEAT_CTL_LOCKED))
1991 return false;
1992
1993 if (msr->host_initiated)
1994 valid_bits = KVM_SUPPORTED_FEATURE_CONTROL;
1995 else
1996 valid_bits = vmx->msr_ia32_feature_control_valid_bits;
1997
1998 return !(msr->data & ~valid_bits);
1999 }
2000
vmx_get_msr_feature(struct kvm_msr_entry * msr)2001 int vmx_get_msr_feature(struct kvm_msr_entry *msr)
2002 {
2003 switch (msr->index) {
2004 case KVM_FIRST_EMULATED_VMX_MSR ... KVM_LAST_EMULATED_VMX_MSR:
2005 if (!nested)
2006 return 1;
2007 return vmx_get_vmx_msr(&vmcs_config.nested, msr->index, &msr->data);
2008 default:
2009 return KVM_MSR_RET_INVALID;
2010 }
2011 }
2012
2013 /*
2014 * Reads an msr value (of 'msr_info->index') into 'msr_info->data'.
2015 * Returns 0 on success, non-0 otherwise.
2016 * Assumes vcpu_load() was already called.
2017 */
vmx_get_msr(struct kvm_vcpu * vcpu,struct msr_data * msr_info)2018 int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2019 {
2020 struct vcpu_vmx *vmx = to_vmx(vcpu);
2021 struct vmx_uret_msr *msr;
2022 u32 index;
2023
2024 switch (msr_info->index) {
2025 #ifdef CONFIG_X86_64
2026 case MSR_FS_BASE:
2027 msr_info->data = vmcs_readl(GUEST_FS_BASE);
2028 break;
2029 case MSR_GS_BASE:
2030 msr_info->data = vmcs_readl(GUEST_GS_BASE);
2031 break;
2032 case MSR_KERNEL_GS_BASE:
2033 msr_info->data = vmx_read_guest_kernel_gs_base(vmx);
2034 break;
2035 #endif
2036 case MSR_EFER:
2037 return kvm_get_msr_common(vcpu, msr_info);
2038 case MSR_IA32_TSX_CTRL:
2039 if (!msr_info->host_initiated &&
2040 !(vcpu->arch.arch_capabilities & ARCH_CAP_TSX_CTRL_MSR))
2041 return 1;
2042 goto find_uret_msr;
2043 case MSR_IA32_UMWAIT_CONTROL:
2044 if (!msr_info->host_initiated && !vmx_has_waitpkg(vmx))
2045 return 1;
2046
2047 msr_info->data = vmx->msr_ia32_umwait_control;
2048 break;
2049 case MSR_IA32_SPEC_CTRL:
2050 if (!msr_info->host_initiated &&
2051 !guest_has_spec_ctrl_msr(vcpu))
2052 return 1;
2053
2054 msr_info->data = to_vmx(vcpu)->spec_ctrl;
2055 break;
2056 case MSR_IA32_SYSENTER_CS:
2057 msr_info->data = vmcs_read32(GUEST_SYSENTER_CS);
2058 break;
2059 case MSR_IA32_SYSENTER_EIP:
2060 msr_info->data = vmcs_readl(GUEST_SYSENTER_EIP);
2061 break;
2062 case MSR_IA32_SYSENTER_ESP:
2063 msr_info->data = vmcs_readl(GUEST_SYSENTER_ESP);
2064 break;
2065 case MSR_IA32_BNDCFGS:
2066 if (!kvm_mpx_supported() ||
2067 (!msr_info->host_initiated &&
2068 !guest_cpuid_has(vcpu, X86_FEATURE_MPX)))
2069 return 1;
2070 msr_info->data = vmcs_read64(GUEST_BNDCFGS);
2071 break;
2072 case MSR_IA32_MCG_EXT_CTL:
2073 if (!msr_info->host_initiated &&
2074 !(vmx->msr_ia32_feature_control &
2075 FEAT_CTL_LMCE_ENABLED))
2076 return 1;
2077 msr_info->data = vcpu->arch.mcg_ext_ctl;
2078 break;
2079 case MSR_IA32_FEAT_CTL:
2080 msr_info->data = vmx->msr_ia32_feature_control;
2081 break;
2082 case MSR_IA32_SGXLEPUBKEYHASH0 ... MSR_IA32_SGXLEPUBKEYHASH3:
2083 if (!msr_info->host_initiated &&
2084 !guest_cpuid_has(vcpu, X86_FEATURE_SGX_LC))
2085 return 1;
2086 msr_info->data = to_vmx(vcpu)->msr_ia32_sgxlepubkeyhash
2087 [msr_info->index - MSR_IA32_SGXLEPUBKEYHASH0];
2088 break;
2089 case KVM_FIRST_EMULATED_VMX_MSR ... KVM_LAST_EMULATED_VMX_MSR:
2090 if (!guest_can_use(vcpu, X86_FEATURE_VMX))
2091 return 1;
2092 if (vmx_get_vmx_msr(&vmx->nested.msrs, msr_info->index,
2093 &msr_info->data))
2094 return 1;
2095 #ifdef CONFIG_KVM_HYPERV
2096 /*
2097 * Enlightened VMCS v1 doesn't have certain VMCS fields but
2098 * instead of just ignoring the features, different Hyper-V
2099 * versions are either trying to use them and fail or do some
2100 * sanity checking and refuse to boot. Filter all unsupported
2101 * features out.
2102 */
2103 if (!msr_info->host_initiated && guest_cpuid_has_evmcs(vcpu))
2104 nested_evmcs_filter_control_msr(vcpu, msr_info->index,
2105 &msr_info->data);
2106 #endif
2107 break;
2108 case MSR_IA32_RTIT_CTL:
2109 if (!vmx_pt_mode_is_host_guest())
2110 return 1;
2111 msr_info->data = vmx->pt_desc.guest.ctl;
2112 break;
2113 case MSR_IA32_RTIT_STATUS:
2114 if (!vmx_pt_mode_is_host_guest())
2115 return 1;
2116 msr_info->data = vmx->pt_desc.guest.status;
2117 break;
2118 case MSR_IA32_RTIT_CR3_MATCH:
2119 if (!vmx_pt_mode_is_host_guest() ||
2120 !intel_pt_validate_cap(vmx->pt_desc.caps,
2121 PT_CAP_cr3_filtering))
2122 return 1;
2123 msr_info->data = vmx->pt_desc.guest.cr3_match;
2124 break;
2125 case MSR_IA32_RTIT_OUTPUT_BASE:
2126 if (!vmx_pt_mode_is_host_guest() ||
2127 (!intel_pt_validate_cap(vmx->pt_desc.caps,
2128 PT_CAP_topa_output) &&
2129 !intel_pt_validate_cap(vmx->pt_desc.caps,
2130 PT_CAP_single_range_output)))
2131 return 1;
2132 msr_info->data = vmx->pt_desc.guest.output_base;
2133 break;
2134 case MSR_IA32_RTIT_OUTPUT_MASK:
2135 if (!vmx_pt_mode_is_host_guest() ||
2136 (!intel_pt_validate_cap(vmx->pt_desc.caps,
2137 PT_CAP_topa_output) &&
2138 !intel_pt_validate_cap(vmx->pt_desc.caps,
2139 PT_CAP_single_range_output)))
2140 return 1;
2141 msr_info->data = vmx->pt_desc.guest.output_mask;
2142 break;
2143 case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B:
2144 index = msr_info->index - MSR_IA32_RTIT_ADDR0_A;
2145 if (!vmx_pt_mode_is_host_guest() ||
2146 (index >= 2 * vmx->pt_desc.num_address_ranges))
2147 return 1;
2148 if (index % 2)
2149 msr_info->data = vmx->pt_desc.guest.addr_b[index / 2];
2150 else
2151 msr_info->data = vmx->pt_desc.guest.addr_a[index / 2];
2152 break;
2153 case MSR_IA32_DEBUGCTLMSR:
2154 msr_info->data = vmcs_read64(GUEST_IA32_DEBUGCTL);
2155 break;
2156 default:
2157 find_uret_msr:
2158 msr = vmx_find_uret_msr(vmx, msr_info->index);
2159 if (msr) {
2160 msr_info->data = msr->data;
2161 break;
2162 }
2163 return kvm_get_msr_common(vcpu, msr_info);
2164 }
2165
2166 return 0;
2167 }
2168
nested_vmx_truncate_sysenter_addr(struct kvm_vcpu * vcpu,u64 data)2169 static u64 nested_vmx_truncate_sysenter_addr(struct kvm_vcpu *vcpu,
2170 u64 data)
2171 {
2172 #ifdef CONFIG_X86_64
2173 if (!guest_cpuid_has(vcpu, X86_FEATURE_LM))
2174 return (u32)data;
2175 #endif
2176 return (unsigned long)data;
2177 }
2178
vmx_get_supported_debugctl(struct kvm_vcpu * vcpu,bool host_initiated)2179 static u64 vmx_get_supported_debugctl(struct kvm_vcpu *vcpu, bool host_initiated)
2180 {
2181 u64 debugctl = 0;
2182
2183 if (boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT) &&
2184 (host_initiated || guest_cpuid_has(vcpu, X86_FEATURE_BUS_LOCK_DETECT)))
2185 debugctl |= DEBUGCTLMSR_BUS_LOCK_DETECT;
2186
2187 if ((kvm_caps.supported_perf_cap & PMU_CAP_LBR_FMT) &&
2188 (host_initiated || intel_pmu_lbr_is_enabled(vcpu)))
2189 debugctl |= DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI;
2190
2191 return debugctl;
2192 }
2193
2194 /*
2195 * Writes msr value into the appropriate "register".
2196 * Returns 0 on success, non-0 otherwise.
2197 * Assumes vcpu_load() was already called.
2198 */
vmx_set_msr(struct kvm_vcpu * vcpu,struct msr_data * msr_info)2199 int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2200 {
2201 struct vcpu_vmx *vmx = to_vmx(vcpu);
2202 struct vmx_uret_msr *msr;
2203 int ret = 0;
2204 u32 msr_index = msr_info->index;
2205 u64 data = msr_info->data;
2206 u32 index;
2207
2208 switch (msr_index) {
2209 case MSR_EFER:
2210 ret = kvm_set_msr_common(vcpu, msr_info);
2211 break;
2212 #ifdef CONFIG_X86_64
2213 case MSR_FS_BASE:
2214 vmx_segment_cache_clear(vmx);
2215 vmcs_writel(GUEST_FS_BASE, data);
2216 break;
2217 case MSR_GS_BASE:
2218 vmx_segment_cache_clear(vmx);
2219 vmcs_writel(GUEST_GS_BASE, data);
2220 break;
2221 case MSR_KERNEL_GS_BASE:
2222 vmx_write_guest_kernel_gs_base(vmx, data);
2223 break;
2224 case MSR_IA32_XFD:
2225 ret = kvm_set_msr_common(vcpu, msr_info);
2226 /*
2227 * Always intercepting WRMSR could incur non-negligible
2228 * overhead given xfd might be changed frequently in
2229 * guest context switch. Disable write interception
2230 * upon the first write with a non-zero value (indicating
2231 * potential usage on dynamic xfeatures). Also update
2232 * exception bitmap to trap #NM for proper virtualization
2233 * of guest xfd_err.
2234 */
2235 if (!ret && data) {
2236 vmx_disable_intercept_for_msr(vcpu, MSR_IA32_XFD,
2237 MSR_TYPE_RW);
2238 vcpu->arch.xfd_no_write_intercept = true;
2239 vmx_update_exception_bitmap(vcpu);
2240 }
2241 break;
2242 #endif
2243 case MSR_IA32_SYSENTER_CS:
2244 if (is_guest_mode(vcpu))
2245 get_vmcs12(vcpu)->guest_sysenter_cs = data;
2246 vmcs_write32(GUEST_SYSENTER_CS, data);
2247 break;
2248 case MSR_IA32_SYSENTER_EIP:
2249 if (is_guest_mode(vcpu)) {
2250 data = nested_vmx_truncate_sysenter_addr(vcpu, data);
2251 get_vmcs12(vcpu)->guest_sysenter_eip = data;
2252 }
2253 vmcs_writel(GUEST_SYSENTER_EIP, data);
2254 break;
2255 case MSR_IA32_SYSENTER_ESP:
2256 if (is_guest_mode(vcpu)) {
2257 data = nested_vmx_truncate_sysenter_addr(vcpu, data);
2258 get_vmcs12(vcpu)->guest_sysenter_esp = data;
2259 }
2260 vmcs_writel(GUEST_SYSENTER_ESP, data);
2261 break;
2262 case MSR_IA32_DEBUGCTLMSR: {
2263 u64 invalid;
2264
2265 invalid = data & ~vmx_get_supported_debugctl(vcpu, msr_info->host_initiated);
2266 if (invalid & (DEBUGCTLMSR_BTF|DEBUGCTLMSR_LBR)) {
2267 kvm_pr_unimpl_wrmsr(vcpu, msr_index, data);
2268 data &= ~(DEBUGCTLMSR_BTF|DEBUGCTLMSR_LBR);
2269 invalid &= ~(DEBUGCTLMSR_BTF|DEBUGCTLMSR_LBR);
2270 }
2271
2272 if (invalid)
2273 return 1;
2274
2275 if (is_guest_mode(vcpu) && get_vmcs12(vcpu)->vm_exit_controls &
2276 VM_EXIT_SAVE_DEBUG_CONTROLS)
2277 get_vmcs12(vcpu)->guest_ia32_debugctl = data;
2278
2279 vmcs_write64(GUEST_IA32_DEBUGCTL, data);
2280 if (intel_pmu_lbr_is_enabled(vcpu) && !to_vmx(vcpu)->lbr_desc.event &&
2281 (data & DEBUGCTLMSR_LBR))
2282 intel_pmu_create_guest_lbr_event(vcpu);
2283 return 0;
2284 }
2285 case MSR_IA32_BNDCFGS:
2286 if (!kvm_mpx_supported() ||
2287 (!msr_info->host_initiated &&
2288 !guest_cpuid_has(vcpu, X86_FEATURE_MPX)))
2289 return 1;
2290 if (is_noncanonical_address(data & PAGE_MASK, vcpu) ||
2291 (data & MSR_IA32_BNDCFGS_RSVD))
2292 return 1;
2293
2294 if (is_guest_mode(vcpu) &&
2295 ((vmx->nested.msrs.entry_ctls_high & VM_ENTRY_LOAD_BNDCFGS) ||
2296 (vmx->nested.msrs.exit_ctls_high & VM_EXIT_CLEAR_BNDCFGS)))
2297 get_vmcs12(vcpu)->guest_bndcfgs = data;
2298
2299 vmcs_write64(GUEST_BNDCFGS, data);
2300 break;
2301 case MSR_IA32_UMWAIT_CONTROL:
2302 if (!msr_info->host_initiated && !vmx_has_waitpkg(vmx))
2303 return 1;
2304
2305 /* The reserved bit 1 and non-32 bit [63:32] should be zero */
2306 if (data & (BIT_ULL(1) | GENMASK_ULL(63, 32)))
2307 return 1;
2308
2309 vmx->msr_ia32_umwait_control = data;
2310 break;
2311 case MSR_IA32_SPEC_CTRL:
2312 if (!msr_info->host_initiated &&
2313 !guest_has_spec_ctrl_msr(vcpu))
2314 return 1;
2315
2316 if (kvm_spec_ctrl_test_value(data))
2317 return 1;
2318
2319 vmx->spec_ctrl = data;
2320 if (!data)
2321 break;
2322
2323 /*
2324 * For non-nested:
2325 * When it's written (to non-zero) for the first time, pass
2326 * it through.
2327 *
2328 * For nested:
2329 * The handling of the MSR bitmap for L2 guests is done in
2330 * nested_vmx_prepare_msr_bitmap. We should not touch the
2331 * vmcs02.msr_bitmap here since it gets completely overwritten
2332 * in the merging. We update the vmcs01 here for L1 as well
2333 * since it will end up touching the MSR anyway now.
2334 */
2335 vmx_disable_intercept_for_msr(vcpu,
2336 MSR_IA32_SPEC_CTRL,
2337 MSR_TYPE_RW);
2338 break;
2339 case MSR_IA32_TSX_CTRL:
2340 if (!msr_info->host_initiated &&
2341 !(vcpu->arch.arch_capabilities & ARCH_CAP_TSX_CTRL_MSR))
2342 return 1;
2343 if (data & ~(TSX_CTRL_RTM_DISABLE | TSX_CTRL_CPUID_CLEAR))
2344 return 1;
2345 goto find_uret_msr;
2346 case MSR_IA32_CR_PAT:
2347 ret = kvm_set_msr_common(vcpu, msr_info);
2348 if (ret)
2349 break;
2350
2351 if (is_guest_mode(vcpu) &&
2352 get_vmcs12(vcpu)->vm_exit_controls & VM_EXIT_SAVE_IA32_PAT)
2353 get_vmcs12(vcpu)->guest_ia32_pat = data;
2354
2355 if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT)
2356 vmcs_write64(GUEST_IA32_PAT, data);
2357 break;
2358 case MSR_IA32_MCG_EXT_CTL:
2359 if ((!msr_info->host_initiated &&
2360 !(to_vmx(vcpu)->msr_ia32_feature_control &
2361 FEAT_CTL_LMCE_ENABLED)) ||
2362 (data & ~MCG_EXT_CTL_LMCE_EN))
2363 return 1;
2364 vcpu->arch.mcg_ext_ctl = data;
2365 break;
2366 case MSR_IA32_FEAT_CTL:
2367 if (!is_vmx_feature_control_msr_valid(vmx, msr_info))
2368 return 1;
2369
2370 vmx->msr_ia32_feature_control = data;
2371 if (msr_info->host_initiated && data == 0)
2372 vmx_leave_nested(vcpu);
2373
2374 /* SGX may be enabled/disabled by guest's firmware */
2375 vmx_write_encls_bitmap(vcpu, NULL);
2376 break;
2377 case MSR_IA32_SGXLEPUBKEYHASH0 ... MSR_IA32_SGXLEPUBKEYHASH3:
2378 /*
2379 * On real hardware, the LE hash MSRs are writable before
2380 * the firmware sets bit 0 in MSR 0x7a ("activating" SGX),
2381 * at which point SGX related bits in IA32_FEATURE_CONTROL
2382 * become writable.
2383 *
2384 * KVM does not emulate SGX activation for simplicity, so
2385 * allow writes to the LE hash MSRs if IA32_FEATURE_CONTROL
2386 * is unlocked. This is technically not architectural
2387 * behavior, but it's close enough.
2388 */
2389 if (!msr_info->host_initiated &&
2390 (!guest_cpuid_has(vcpu, X86_FEATURE_SGX_LC) ||
2391 ((vmx->msr_ia32_feature_control & FEAT_CTL_LOCKED) &&
2392 !(vmx->msr_ia32_feature_control & FEAT_CTL_SGX_LC_ENABLED))))
2393 return 1;
2394 vmx->msr_ia32_sgxlepubkeyhash
2395 [msr_index - MSR_IA32_SGXLEPUBKEYHASH0] = data;
2396 break;
2397 case KVM_FIRST_EMULATED_VMX_MSR ... KVM_LAST_EMULATED_VMX_MSR:
2398 if (!msr_info->host_initiated)
2399 return 1; /* they are read-only */
2400 if (!guest_can_use(vcpu, X86_FEATURE_VMX))
2401 return 1;
2402 return vmx_set_vmx_msr(vcpu, msr_index, data);
2403 case MSR_IA32_RTIT_CTL:
2404 if (!vmx_pt_mode_is_host_guest() ||
2405 vmx_rtit_ctl_check(vcpu, data) ||
2406 vmx->nested.vmxon)
2407 return 1;
2408 vmcs_write64(GUEST_IA32_RTIT_CTL, data);
2409 vmx->pt_desc.guest.ctl = data;
2410 pt_update_intercept_for_msr(vcpu);
2411 break;
2412 case MSR_IA32_RTIT_STATUS:
2413 if (!pt_can_write_msr(vmx))
2414 return 1;
2415 if (data & MSR_IA32_RTIT_STATUS_MASK)
2416 return 1;
2417 vmx->pt_desc.guest.status = data;
2418 break;
2419 case MSR_IA32_RTIT_CR3_MATCH:
2420 if (!pt_can_write_msr(vmx))
2421 return 1;
2422 if (!intel_pt_validate_cap(vmx->pt_desc.caps,
2423 PT_CAP_cr3_filtering))
2424 return 1;
2425 vmx->pt_desc.guest.cr3_match = data;
2426 break;
2427 case MSR_IA32_RTIT_OUTPUT_BASE:
2428 if (!pt_can_write_msr(vmx))
2429 return 1;
2430 if (!intel_pt_validate_cap(vmx->pt_desc.caps,
2431 PT_CAP_topa_output) &&
2432 !intel_pt_validate_cap(vmx->pt_desc.caps,
2433 PT_CAP_single_range_output))
2434 return 1;
2435 if (!pt_output_base_valid(vcpu, data))
2436 return 1;
2437 vmx->pt_desc.guest.output_base = data;
2438 break;
2439 case MSR_IA32_RTIT_OUTPUT_MASK:
2440 if (!pt_can_write_msr(vmx))
2441 return 1;
2442 if (!intel_pt_validate_cap(vmx->pt_desc.caps,
2443 PT_CAP_topa_output) &&
2444 !intel_pt_validate_cap(vmx->pt_desc.caps,
2445 PT_CAP_single_range_output))
2446 return 1;
2447 vmx->pt_desc.guest.output_mask = data;
2448 break;
2449 case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B:
2450 if (!pt_can_write_msr(vmx))
2451 return 1;
2452 index = msr_info->index - MSR_IA32_RTIT_ADDR0_A;
2453 if (index >= 2 * vmx->pt_desc.num_address_ranges)
2454 return 1;
2455 if (is_noncanonical_address(data, vcpu))
2456 return 1;
2457 if (index % 2)
2458 vmx->pt_desc.guest.addr_b[index / 2] = data;
2459 else
2460 vmx->pt_desc.guest.addr_a[index / 2] = data;
2461 break;
2462 case MSR_IA32_PERF_CAPABILITIES:
2463 if (data && !vcpu_to_pmu(vcpu)->version)
2464 return 1;
2465 if (data & PMU_CAP_LBR_FMT) {
2466 if ((data & PMU_CAP_LBR_FMT) !=
2467 (kvm_caps.supported_perf_cap & PMU_CAP_LBR_FMT))
2468 return 1;
2469 if (!cpuid_model_is_consistent(vcpu))
2470 return 1;
2471 }
2472 if (data & PERF_CAP_PEBS_FORMAT) {
2473 if ((data & PERF_CAP_PEBS_MASK) !=
2474 (kvm_caps.supported_perf_cap & PERF_CAP_PEBS_MASK))
2475 return 1;
2476 if (!guest_cpuid_has(vcpu, X86_FEATURE_DS))
2477 return 1;
2478 if (!guest_cpuid_has(vcpu, X86_FEATURE_DTES64))
2479 return 1;
2480 if (!cpuid_model_is_consistent(vcpu))
2481 return 1;
2482 }
2483 ret = kvm_set_msr_common(vcpu, msr_info);
2484 break;
2485
2486 default:
2487 find_uret_msr:
2488 msr = vmx_find_uret_msr(vmx, msr_index);
2489 if (msr)
2490 ret = vmx_set_guest_uret_msr(vmx, msr, data);
2491 else
2492 ret = kvm_set_msr_common(vcpu, msr_info);
2493 }
2494
2495 /* FB_CLEAR may have changed, also update the FB_CLEAR_DIS behavior */
2496 if (msr_index == MSR_IA32_ARCH_CAPABILITIES)
2497 vmx_update_fb_clear_dis(vcpu, vmx);
2498
2499 return ret;
2500 }
2501
vmx_cache_reg(struct kvm_vcpu * vcpu,enum kvm_reg reg)2502 void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
2503 {
2504 unsigned long guest_owned_bits;
2505
2506 kvm_register_mark_available(vcpu, reg);
2507
2508 switch (reg) {
2509 case VCPU_REGS_RSP:
2510 vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP);
2511 break;
2512 case VCPU_REGS_RIP:
2513 vcpu->arch.regs[VCPU_REGS_RIP] = vmcs_readl(GUEST_RIP);
2514 break;
2515 case VCPU_EXREG_PDPTR:
2516 if (enable_ept)
2517 ept_save_pdptrs(vcpu);
2518 break;
2519 case VCPU_EXREG_CR0:
2520 guest_owned_bits = vcpu->arch.cr0_guest_owned_bits;
2521
2522 vcpu->arch.cr0 &= ~guest_owned_bits;
2523 vcpu->arch.cr0 |= vmcs_readl(GUEST_CR0) & guest_owned_bits;
2524 break;
2525 case VCPU_EXREG_CR3:
2526 /*
2527 * When intercepting CR3 loads, e.g. for shadowing paging, KVM's
2528 * CR3 is loaded into hardware, not the guest's CR3.
2529 */
2530 if (!(exec_controls_get(to_vmx(vcpu)) & CPU_BASED_CR3_LOAD_EXITING))
2531 vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
2532 break;
2533 case VCPU_EXREG_CR4:
2534 guest_owned_bits = vcpu->arch.cr4_guest_owned_bits;
2535
2536 vcpu->arch.cr4 &= ~guest_owned_bits;
2537 vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & guest_owned_bits;
2538 break;
2539 default:
2540 KVM_BUG_ON(1, vcpu->kvm);
2541 break;
2542 }
2543 }
2544
2545 /*
2546 * There is no X86_FEATURE for SGX yet, but anyway we need to query CPUID
2547 * directly instead of going through cpu_has(), to ensure KVM is trapping
2548 * ENCLS whenever it's supported in hardware. It does not matter whether
2549 * the host OS supports or has enabled SGX.
2550 */
cpu_has_sgx(void)2551 static bool cpu_has_sgx(void)
2552 {
2553 return cpuid_eax(0) >= 0x12 && (cpuid_eax(0x12) & BIT(0));
2554 }
2555
2556 /*
2557 * Some cpus support VM_{ENTRY,EXIT}_IA32_PERF_GLOBAL_CTRL but they
2558 * can't be used due to errata where VM Exit may incorrectly clear
2559 * IA32_PERF_GLOBAL_CTRL[34:32]. Work around the errata by using the
2560 * MSR load mechanism to switch IA32_PERF_GLOBAL_CTRL.
2561 */
cpu_has_perf_global_ctrl_bug(void)2562 static bool cpu_has_perf_global_ctrl_bug(void)
2563 {
2564 switch (boot_cpu_data.x86_vfm) {
2565 case INTEL_NEHALEM_EP: /* AAK155 */
2566 case INTEL_NEHALEM: /* AAP115 */
2567 case INTEL_WESTMERE: /* AAT100 */
2568 case INTEL_WESTMERE_EP: /* BC86,AAY89,BD102 */
2569 case INTEL_NEHALEM_EX: /* BA97 */
2570 return true;
2571 default:
2572 break;
2573 }
2574
2575 return false;
2576 }
2577
adjust_vmx_controls(u32 ctl_min,u32 ctl_opt,u32 msr,u32 * result)2578 static int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt, u32 msr, u32 *result)
2579 {
2580 u32 vmx_msr_low, vmx_msr_high;
2581 u32 ctl = ctl_min | ctl_opt;
2582
2583 rdmsr(msr, vmx_msr_low, vmx_msr_high);
2584
2585 ctl &= vmx_msr_high; /* bit == 0 in high word ==> must be zero */
2586 ctl |= vmx_msr_low; /* bit == 1 in low word ==> must be one */
2587
2588 /* Ensure minimum (required) set of control bits are supported. */
2589 if (ctl_min & ~ctl)
2590 return -EIO;
2591
2592 *result = ctl;
2593 return 0;
2594 }
2595
adjust_vmx_controls64(u64 ctl_opt,u32 msr)2596 static u64 adjust_vmx_controls64(u64 ctl_opt, u32 msr)
2597 {
2598 u64 allowed;
2599
2600 rdmsrl(msr, allowed);
2601
2602 return ctl_opt & allowed;
2603 }
2604
setup_vmcs_config(struct vmcs_config * vmcs_conf,struct vmx_capability * vmx_cap)2605 static int setup_vmcs_config(struct vmcs_config *vmcs_conf,
2606 struct vmx_capability *vmx_cap)
2607 {
2608 u32 vmx_msr_low, vmx_msr_high;
2609 u32 _pin_based_exec_control = 0;
2610 u32 _cpu_based_exec_control = 0;
2611 u32 _cpu_based_2nd_exec_control = 0;
2612 u64 _cpu_based_3rd_exec_control = 0;
2613 u32 _vmexit_control = 0;
2614 u32 _vmentry_control = 0;
2615 u64 misc_msr;
2616 int i;
2617
2618 /*
2619 * LOAD/SAVE_DEBUG_CONTROLS are absent because both are mandatory.
2620 * SAVE_IA32_PAT and SAVE_IA32_EFER are absent because KVM always
2621 * intercepts writes to PAT and EFER, i.e. never enables those controls.
2622 */
2623 struct {
2624 u32 entry_control;
2625 u32 exit_control;
2626 } const vmcs_entry_exit_pairs[] = {
2627 { VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL, VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL },
2628 { VM_ENTRY_LOAD_IA32_PAT, VM_EXIT_LOAD_IA32_PAT },
2629 { VM_ENTRY_LOAD_IA32_EFER, VM_EXIT_LOAD_IA32_EFER },
2630 { VM_ENTRY_LOAD_BNDCFGS, VM_EXIT_CLEAR_BNDCFGS },
2631 { VM_ENTRY_LOAD_IA32_RTIT_CTL, VM_EXIT_CLEAR_IA32_RTIT_CTL },
2632 };
2633
2634 memset(vmcs_conf, 0, sizeof(*vmcs_conf));
2635
2636 if (adjust_vmx_controls(KVM_REQUIRED_VMX_CPU_BASED_VM_EXEC_CONTROL,
2637 KVM_OPTIONAL_VMX_CPU_BASED_VM_EXEC_CONTROL,
2638 MSR_IA32_VMX_PROCBASED_CTLS,
2639 &_cpu_based_exec_control))
2640 return -EIO;
2641 if (_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) {
2642 if (adjust_vmx_controls(KVM_REQUIRED_VMX_SECONDARY_VM_EXEC_CONTROL,
2643 KVM_OPTIONAL_VMX_SECONDARY_VM_EXEC_CONTROL,
2644 MSR_IA32_VMX_PROCBASED_CTLS2,
2645 &_cpu_based_2nd_exec_control))
2646 return -EIO;
2647 }
2648 if (!IS_ENABLED(CONFIG_KVM_INTEL_PROVE_VE))
2649 _cpu_based_2nd_exec_control &= ~SECONDARY_EXEC_EPT_VIOLATION_VE;
2650
2651 #ifndef CONFIG_X86_64
2652 if (!(_cpu_based_2nd_exec_control &
2653 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
2654 _cpu_based_exec_control &= ~CPU_BASED_TPR_SHADOW;
2655 #endif
2656
2657 if (!(_cpu_based_exec_control & CPU_BASED_TPR_SHADOW))
2658 _cpu_based_2nd_exec_control &= ~(
2659 SECONDARY_EXEC_APIC_REGISTER_VIRT |
2660 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
2661 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
2662
2663 rdmsr_safe(MSR_IA32_VMX_EPT_VPID_CAP,
2664 &vmx_cap->ept, &vmx_cap->vpid);
2665
2666 if (!(_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_EPT) &&
2667 vmx_cap->ept) {
2668 pr_warn_once("EPT CAP should not exist if not support "
2669 "1-setting enable EPT VM-execution control\n");
2670
2671 if (error_on_inconsistent_vmcs_config)
2672 return -EIO;
2673
2674 vmx_cap->ept = 0;
2675 _cpu_based_2nd_exec_control &= ~SECONDARY_EXEC_EPT_VIOLATION_VE;
2676 }
2677 if (!(_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_VPID) &&
2678 vmx_cap->vpid) {
2679 pr_warn_once("VPID CAP should not exist if not support "
2680 "1-setting enable VPID VM-execution control\n");
2681
2682 if (error_on_inconsistent_vmcs_config)
2683 return -EIO;
2684
2685 vmx_cap->vpid = 0;
2686 }
2687
2688 if (!cpu_has_sgx())
2689 _cpu_based_2nd_exec_control &= ~SECONDARY_EXEC_ENCLS_EXITING;
2690
2691 if (_cpu_based_exec_control & CPU_BASED_ACTIVATE_TERTIARY_CONTROLS)
2692 _cpu_based_3rd_exec_control =
2693 adjust_vmx_controls64(KVM_OPTIONAL_VMX_TERTIARY_VM_EXEC_CONTROL,
2694 MSR_IA32_VMX_PROCBASED_CTLS3);
2695
2696 if (adjust_vmx_controls(KVM_REQUIRED_VMX_VM_EXIT_CONTROLS,
2697 KVM_OPTIONAL_VMX_VM_EXIT_CONTROLS,
2698 MSR_IA32_VMX_EXIT_CTLS,
2699 &_vmexit_control))
2700 return -EIO;
2701
2702 if (adjust_vmx_controls(KVM_REQUIRED_VMX_PIN_BASED_VM_EXEC_CONTROL,
2703 KVM_OPTIONAL_VMX_PIN_BASED_VM_EXEC_CONTROL,
2704 MSR_IA32_VMX_PINBASED_CTLS,
2705 &_pin_based_exec_control))
2706 return -EIO;
2707
2708 if (cpu_has_broken_vmx_preemption_timer())
2709 _pin_based_exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER;
2710 if (!(_cpu_based_2nd_exec_control &
2711 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY))
2712 _pin_based_exec_control &= ~PIN_BASED_POSTED_INTR;
2713
2714 if (adjust_vmx_controls(KVM_REQUIRED_VMX_VM_ENTRY_CONTROLS,
2715 KVM_OPTIONAL_VMX_VM_ENTRY_CONTROLS,
2716 MSR_IA32_VMX_ENTRY_CTLS,
2717 &_vmentry_control))
2718 return -EIO;
2719
2720 for (i = 0; i < ARRAY_SIZE(vmcs_entry_exit_pairs); i++) {
2721 u32 n_ctrl = vmcs_entry_exit_pairs[i].entry_control;
2722 u32 x_ctrl = vmcs_entry_exit_pairs[i].exit_control;
2723
2724 if (!(_vmentry_control & n_ctrl) == !(_vmexit_control & x_ctrl))
2725 continue;
2726
2727 pr_warn_once("Inconsistent VM-Entry/VM-Exit pair, entry = %x, exit = %x\n",
2728 _vmentry_control & n_ctrl, _vmexit_control & x_ctrl);
2729
2730 if (error_on_inconsistent_vmcs_config)
2731 return -EIO;
2732
2733 _vmentry_control &= ~n_ctrl;
2734 _vmexit_control &= ~x_ctrl;
2735 }
2736
2737 rdmsr(MSR_IA32_VMX_BASIC, vmx_msr_low, vmx_msr_high);
2738
2739 /* IA-32 SDM Vol 3B: VMCS size is never greater than 4kB. */
2740 if ((vmx_msr_high & 0x1fff) > PAGE_SIZE)
2741 return -EIO;
2742
2743 #ifdef CONFIG_X86_64
2744 /* IA-32 SDM Vol 3B: 64-bit CPUs always have VMX_BASIC_MSR[48]==0. */
2745 if (vmx_msr_high & (1u<<16))
2746 return -EIO;
2747 #endif
2748
2749 /* Require Write-Back (WB) memory type for VMCS accesses. */
2750 if (((vmx_msr_high >> 18) & 15) != 6)
2751 return -EIO;
2752
2753 rdmsrl(MSR_IA32_VMX_MISC, misc_msr);
2754
2755 vmcs_conf->size = vmx_msr_high & 0x1fff;
2756 vmcs_conf->basic_cap = vmx_msr_high & ~0x1fff;
2757
2758 vmcs_conf->revision_id = vmx_msr_low;
2759
2760 vmcs_conf->pin_based_exec_ctrl = _pin_based_exec_control;
2761 vmcs_conf->cpu_based_exec_ctrl = _cpu_based_exec_control;
2762 vmcs_conf->cpu_based_2nd_exec_ctrl = _cpu_based_2nd_exec_control;
2763 vmcs_conf->cpu_based_3rd_exec_ctrl = _cpu_based_3rd_exec_control;
2764 vmcs_conf->vmexit_ctrl = _vmexit_control;
2765 vmcs_conf->vmentry_ctrl = _vmentry_control;
2766 vmcs_conf->misc = misc_msr;
2767
2768 #if IS_ENABLED(CONFIG_HYPERV)
2769 if (enlightened_vmcs)
2770 evmcs_sanitize_exec_ctrls(vmcs_conf);
2771 #endif
2772
2773 return 0;
2774 }
2775
__kvm_is_vmx_supported(void)2776 static bool __kvm_is_vmx_supported(void)
2777 {
2778 int cpu = smp_processor_id();
2779
2780 if (!(cpuid_ecx(1) & feature_bit(VMX))) {
2781 pr_err("VMX not supported by CPU %d\n", cpu);
2782 return false;
2783 }
2784
2785 if (!this_cpu_has(X86_FEATURE_MSR_IA32_FEAT_CTL) ||
2786 !this_cpu_has(X86_FEATURE_VMX)) {
2787 pr_err("VMX not enabled (by BIOS) in MSR_IA32_FEAT_CTL on CPU %d\n", cpu);
2788 return false;
2789 }
2790
2791 return true;
2792 }
2793
kvm_is_vmx_supported(void)2794 static bool kvm_is_vmx_supported(void)
2795 {
2796 bool supported;
2797
2798 migrate_disable();
2799 supported = __kvm_is_vmx_supported();
2800 migrate_enable();
2801
2802 return supported;
2803 }
2804
vmx_check_processor_compat(void)2805 int vmx_check_processor_compat(void)
2806 {
2807 int cpu = raw_smp_processor_id();
2808 struct vmcs_config vmcs_conf;
2809 struct vmx_capability vmx_cap;
2810
2811 if (!__kvm_is_vmx_supported())
2812 return -EIO;
2813
2814 if (setup_vmcs_config(&vmcs_conf, &vmx_cap) < 0) {
2815 pr_err("Failed to setup VMCS config on CPU %d\n", cpu);
2816 return -EIO;
2817 }
2818 if (nested)
2819 nested_vmx_setup_ctls_msrs(&vmcs_conf, vmx_cap.ept);
2820 if (memcmp(&vmcs_config, &vmcs_conf, sizeof(struct vmcs_config))) {
2821 pr_err("Inconsistent VMCS config on CPU %d\n", cpu);
2822 return -EIO;
2823 }
2824 return 0;
2825 }
2826
kvm_cpu_vmxon(u64 vmxon_pointer)2827 static int kvm_cpu_vmxon(u64 vmxon_pointer)
2828 {
2829 u64 msr;
2830
2831 cr4_set_bits(X86_CR4_VMXE);
2832
2833 asm goto("1: vmxon %[vmxon_pointer]\n\t"
2834 _ASM_EXTABLE(1b, %l[fault])
2835 : : [vmxon_pointer] "m"(vmxon_pointer)
2836 : : fault);
2837 return 0;
2838
2839 fault:
2840 WARN_ONCE(1, "VMXON faulted, MSR_IA32_FEAT_CTL (0x3a) = 0x%llx\n",
2841 rdmsrl_safe(MSR_IA32_FEAT_CTL, &msr) ? 0xdeadbeef : msr);
2842 cr4_clear_bits(X86_CR4_VMXE);
2843
2844 return -EFAULT;
2845 }
2846
vmx_hardware_enable(void)2847 int vmx_hardware_enable(void)
2848 {
2849 int cpu = raw_smp_processor_id();
2850 u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
2851 int r;
2852
2853 if (cr4_read_shadow() & X86_CR4_VMXE)
2854 return -EBUSY;
2855
2856 /*
2857 * This can happen if we hot-added a CPU but failed to allocate
2858 * VP assist page for it.
2859 */
2860 if (kvm_is_using_evmcs() && !hv_get_vp_assist_page(cpu))
2861 return -EFAULT;
2862
2863 intel_pt_handle_vmx(1);
2864
2865 r = kvm_cpu_vmxon(phys_addr);
2866 if (r) {
2867 intel_pt_handle_vmx(0);
2868 return r;
2869 }
2870
2871 return 0;
2872 }
2873
vmclear_local_loaded_vmcss(void)2874 static void vmclear_local_loaded_vmcss(void)
2875 {
2876 int cpu = raw_smp_processor_id();
2877 struct loaded_vmcs *v, *n;
2878
2879 list_for_each_entry_safe(v, n, &per_cpu(loaded_vmcss_on_cpu, cpu),
2880 loaded_vmcss_on_cpu_link)
2881 __loaded_vmcs_clear(v);
2882 }
2883
vmx_hardware_disable(void)2884 void vmx_hardware_disable(void)
2885 {
2886 vmclear_local_loaded_vmcss();
2887
2888 if (kvm_cpu_vmxoff())
2889 kvm_spurious_fault();
2890
2891 hv_reset_evmcs();
2892
2893 intel_pt_handle_vmx(0);
2894 }
2895
alloc_vmcs_cpu(bool shadow,int cpu,gfp_t flags)2896 struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu, gfp_t flags)
2897 {
2898 int node = cpu_to_node(cpu);
2899 struct page *pages;
2900 struct vmcs *vmcs;
2901
2902 pages = __alloc_pages_node(node, flags, 0);
2903 if (!pages)
2904 return NULL;
2905 vmcs = page_address(pages);
2906 memset(vmcs, 0, vmcs_config.size);
2907
2908 /* KVM supports Enlightened VMCS v1 only */
2909 if (kvm_is_using_evmcs())
2910 vmcs->hdr.revision_id = KVM_EVMCS_VERSION;
2911 else
2912 vmcs->hdr.revision_id = vmcs_config.revision_id;
2913
2914 if (shadow)
2915 vmcs->hdr.shadow_vmcs = 1;
2916 return vmcs;
2917 }
2918
free_vmcs(struct vmcs * vmcs)2919 void free_vmcs(struct vmcs *vmcs)
2920 {
2921 free_page((unsigned long)vmcs);
2922 }
2923
2924 /*
2925 * Free a VMCS, but before that VMCLEAR it on the CPU where it was last loaded
2926 */
free_loaded_vmcs(struct loaded_vmcs * loaded_vmcs)2927 void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
2928 {
2929 if (!loaded_vmcs->vmcs)
2930 return;
2931 loaded_vmcs_clear(loaded_vmcs);
2932 free_vmcs(loaded_vmcs->vmcs);
2933 loaded_vmcs->vmcs = NULL;
2934 if (loaded_vmcs->msr_bitmap)
2935 free_page((unsigned long)loaded_vmcs->msr_bitmap);
2936 WARN_ON(loaded_vmcs->shadow_vmcs != NULL);
2937 }
2938
alloc_loaded_vmcs(struct loaded_vmcs * loaded_vmcs)2939 int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
2940 {
2941 loaded_vmcs->vmcs = alloc_vmcs(false);
2942 if (!loaded_vmcs->vmcs)
2943 return -ENOMEM;
2944
2945 vmcs_clear(loaded_vmcs->vmcs);
2946
2947 loaded_vmcs->shadow_vmcs = NULL;
2948 loaded_vmcs->hv_timer_soft_disabled = false;
2949 loaded_vmcs->cpu = -1;
2950 loaded_vmcs->launched = 0;
2951
2952 if (cpu_has_vmx_msr_bitmap()) {
2953 loaded_vmcs->msr_bitmap = (unsigned long *)
2954 __get_free_page(GFP_KERNEL_ACCOUNT);
2955 if (!loaded_vmcs->msr_bitmap)
2956 goto out_vmcs;
2957 memset(loaded_vmcs->msr_bitmap, 0xff, PAGE_SIZE);
2958 }
2959
2960 memset(&loaded_vmcs->host_state, 0, sizeof(struct vmcs_host_state));
2961 memset(&loaded_vmcs->controls_shadow, 0,
2962 sizeof(struct vmcs_controls_shadow));
2963
2964 return 0;
2965
2966 out_vmcs:
2967 free_loaded_vmcs(loaded_vmcs);
2968 return -ENOMEM;
2969 }
2970
free_kvm_area(void)2971 static void free_kvm_area(void)
2972 {
2973 int cpu;
2974
2975 for_each_possible_cpu(cpu) {
2976 free_vmcs(per_cpu(vmxarea, cpu));
2977 per_cpu(vmxarea, cpu) = NULL;
2978 }
2979 }
2980
alloc_kvm_area(void)2981 static __init int alloc_kvm_area(void)
2982 {
2983 int cpu;
2984
2985 for_each_possible_cpu(cpu) {
2986 struct vmcs *vmcs;
2987
2988 vmcs = alloc_vmcs_cpu(false, cpu, GFP_KERNEL);
2989 if (!vmcs) {
2990 free_kvm_area();
2991 return -ENOMEM;
2992 }
2993
2994 /*
2995 * When eVMCS is enabled, alloc_vmcs_cpu() sets
2996 * vmcs->revision_id to KVM_EVMCS_VERSION instead of
2997 * revision_id reported by MSR_IA32_VMX_BASIC.
2998 *
2999 * However, even though not explicitly documented by
3000 * TLFS, VMXArea passed as VMXON argument should
3001 * still be marked with revision_id reported by
3002 * physical CPU.
3003 */
3004 if (kvm_is_using_evmcs())
3005 vmcs->hdr.revision_id = vmcs_config.revision_id;
3006
3007 per_cpu(vmxarea, cpu) = vmcs;
3008 }
3009 return 0;
3010 }
3011
fix_pmode_seg(struct kvm_vcpu * vcpu,int seg,struct kvm_segment * save)3012 static void fix_pmode_seg(struct kvm_vcpu *vcpu, int seg,
3013 struct kvm_segment *save)
3014 {
3015 if (!emulate_invalid_guest_state) {
3016 /*
3017 * CS and SS RPL should be equal during guest entry according
3018 * to VMX spec, but in reality it is not always so. Since vcpu
3019 * is in the middle of the transition from real mode to
3020 * protected mode it is safe to assume that RPL 0 is a good
3021 * default value.
3022 */
3023 if (seg == VCPU_SREG_CS || seg == VCPU_SREG_SS)
3024 save->selector &= ~SEGMENT_RPL_MASK;
3025 save->dpl = save->selector & SEGMENT_RPL_MASK;
3026 save->s = 1;
3027 }
3028 __vmx_set_segment(vcpu, save, seg);
3029 }
3030
enter_pmode(struct kvm_vcpu * vcpu)3031 static void enter_pmode(struct kvm_vcpu *vcpu)
3032 {
3033 unsigned long flags;
3034 struct vcpu_vmx *vmx = to_vmx(vcpu);
3035
3036 /*
3037 * Update real mode segment cache. It may be not up-to-date if segment
3038 * register was written while vcpu was in a guest mode.
3039 */
3040 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES);
3041 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS);
3042 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS);
3043 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS);
3044 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS);
3045 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS);
3046
3047 vmx->rmode.vm86_active = 0;
3048
3049 __vmx_set_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR);
3050
3051 flags = vmcs_readl(GUEST_RFLAGS);
3052 flags &= RMODE_GUEST_OWNED_EFLAGS_BITS;
3053 flags |= vmx->rmode.save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
3054 vmcs_writel(GUEST_RFLAGS, flags);
3055
3056 vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) |
3057 (vmcs_readl(CR4_READ_SHADOW) & X86_CR4_VME));
3058
3059 vmx_update_exception_bitmap(vcpu);
3060
3061 fix_pmode_seg(vcpu, VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]);
3062 fix_pmode_seg(vcpu, VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]);
3063 fix_pmode_seg(vcpu, VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]);
3064 fix_pmode_seg(vcpu, VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]);
3065 fix_pmode_seg(vcpu, VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]);
3066 fix_pmode_seg(vcpu, VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]);
3067 }
3068
fix_rmode_seg(int seg,struct kvm_segment * save)3069 static void fix_rmode_seg(int seg, struct kvm_segment *save)
3070 {
3071 const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
3072 struct kvm_segment var = *save;
3073
3074 var.dpl = 0x3;
3075 if (seg == VCPU_SREG_CS)
3076 var.type = 0x3;
3077
3078 if (!emulate_invalid_guest_state) {
3079 var.selector = var.base >> 4;
3080 var.base = var.base & 0xffff0;
3081 var.limit = 0xffff;
3082 var.g = 0;
3083 var.db = 0;
3084 var.present = 1;
3085 var.s = 1;
3086 var.l = 0;
3087 var.unusable = 0;
3088 var.type = 0x3;
3089 var.avl = 0;
3090 if (save->base & 0xf)
3091 pr_warn_once("segment base is not paragraph aligned "
3092 "when entering protected mode (seg=%d)", seg);
3093 }
3094
3095 vmcs_write16(sf->selector, var.selector);
3096 vmcs_writel(sf->base, var.base);
3097 vmcs_write32(sf->limit, var.limit);
3098 vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(&var));
3099 }
3100
enter_rmode(struct kvm_vcpu * vcpu)3101 static void enter_rmode(struct kvm_vcpu *vcpu)
3102 {
3103 unsigned long flags;
3104 struct vcpu_vmx *vmx = to_vmx(vcpu);
3105 struct kvm_vmx *kvm_vmx = to_kvm_vmx(vcpu->kvm);
3106
3107 /*
3108 * KVM should never use VM86 to virtualize Real Mode when L2 is active,
3109 * as using VM86 is unnecessary if unrestricted guest is enabled, and
3110 * if unrestricted guest is disabled, VM-Enter (from L1) with CR0.PG=0
3111 * should VM-Fail and KVM should reject userspace attempts to stuff
3112 * CR0.PG=0 when L2 is active.
3113 */
3114 WARN_ON_ONCE(is_guest_mode(vcpu));
3115
3116 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR);
3117 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES);
3118 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS);
3119 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS);
3120 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS);
3121 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS);
3122 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS);
3123
3124 vmx->rmode.vm86_active = 1;
3125
3126 vmx_segment_cache_clear(vmx);
3127
3128 vmcs_writel(GUEST_TR_BASE, kvm_vmx->tss_addr);
3129 vmcs_write32(GUEST_TR_LIMIT, RMODE_TSS_SIZE - 1);
3130 vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
3131
3132 flags = vmcs_readl(GUEST_RFLAGS);
3133 vmx->rmode.save_rflags = flags;
3134
3135 flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
3136
3137 vmcs_writel(GUEST_RFLAGS, flags);
3138 vmcs_writel(GUEST_CR4, vmcs_readl(GUEST_CR4) | X86_CR4_VME);
3139 vmx_update_exception_bitmap(vcpu);
3140
3141 fix_rmode_seg(VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]);
3142 fix_rmode_seg(VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]);
3143 fix_rmode_seg(VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]);
3144 fix_rmode_seg(VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]);
3145 fix_rmode_seg(VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]);
3146 fix_rmode_seg(VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]);
3147 }
3148
vmx_set_efer(struct kvm_vcpu * vcpu,u64 efer)3149 int vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
3150 {
3151 struct vcpu_vmx *vmx = to_vmx(vcpu);
3152
3153 /* Nothing to do if hardware doesn't support EFER. */
3154 if (!vmx_find_uret_msr(vmx, MSR_EFER))
3155 return 0;
3156
3157 vcpu->arch.efer = efer;
3158 #ifdef CONFIG_X86_64
3159 if (efer & EFER_LMA)
3160 vm_entry_controls_setbit(vmx, VM_ENTRY_IA32E_MODE);
3161 else
3162 vm_entry_controls_clearbit(vmx, VM_ENTRY_IA32E_MODE);
3163 #else
3164 if (KVM_BUG_ON(efer & EFER_LMA, vcpu->kvm))
3165 return 1;
3166 #endif
3167
3168 vmx_setup_uret_msrs(vmx);
3169 return 0;
3170 }
3171
3172 #ifdef CONFIG_X86_64
3173
enter_lmode(struct kvm_vcpu * vcpu)3174 static void enter_lmode(struct kvm_vcpu *vcpu)
3175 {
3176 u32 guest_tr_ar;
3177
3178 vmx_segment_cache_clear(to_vmx(vcpu));
3179
3180 guest_tr_ar = vmcs_read32(GUEST_TR_AR_BYTES);
3181 if ((guest_tr_ar & VMX_AR_TYPE_MASK) != VMX_AR_TYPE_BUSY_64_TSS) {
3182 pr_debug_ratelimited("%s: tss fixup for long mode. \n",
3183 __func__);
3184 vmcs_write32(GUEST_TR_AR_BYTES,
3185 (guest_tr_ar & ~VMX_AR_TYPE_MASK)
3186 | VMX_AR_TYPE_BUSY_64_TSS);
3187 }
3188 vmx_set_efer(vcpu, vcpu->arch.efer | EFER_LMA);
3189 }
3190
exit_lmode(struct kvm_vcpu * vcpu)3191 static void exit_lmode(struct kvm_vcpu *vcpu)
3192 {
3193 vmx_set_efer(vcpu, vcpu->arch.efer & ~EFER_LMA);
3194 }
3195
3196 #endif
3197
vmx_flush_tlb_all(struct kvm_vcpu * vcpu)3198 void vmx_flush_tlb_all(struct kvm_vcpu *vcpu)
3199 {
3200 struct vcpu_vmx *vmx = to_vmx(vcpu);
3201
3202 /*
3203 * INVEPT must be issued when EPT is enabled, irrespective of VPID, as
3204 * the CPU is not required to invalidate guest-physical mappings on
3205 * VM-Entry, even if VPID is disabled. Guest-physical mappings are
3206 * associated with the root EPT structure and not any particular VPID
3207 * (INVVPID also isn't required to invalidate guest-physical mappings).
3208 */
3209 if (enable_ept) {
3210 ept_sync_global();
3211 } else if (enable_vpid) {
3212 if (cpu_has_vmx_invvpid_global()) {
3213 vpid_sync_vcpu_global();
3214 } else {
3215 vpid_sync_vcpu_single(vmx->vpid);
3216 vpid_sync_vcpu_single(vmx->nested.vpid02);
3217 }
3218 }
3219 }
3220
vmx_get_current_vpid(struct kvm_vcpu * vcpu)3221 static inline int vmx_get_current_vpid(struct kvm_vcpu *vcpu)
3222 {
3223 if (is_guest_mode(vcpu))
3224 return nested_get_vpid02(vcpu);
3225 return to_vmx(vcpu)->vpid;
3226 }
3227
vmx_flush_tlb_current(struct kvm_vcpu * vcpu)3228 void vmx_flush_tlb_current(struct kvm_vcpu *vcpu)
3229 {
3230 struct kvm_mmu *mmu = vcpu->arch.mmu;
3231 u64 root_hpa = mmu->root.hpa;
3232
3233 /* No flush required if the current context is invalid. */
3234 if (!VALID_PAGE(root_hpa))
3235 return;
3236
3237 if (enable_ept)
3238 ept_sync_context(construct_eptp(vcpu, root_hpa,
3239 mmu->root_role.level));
3240 else
3241 vpid_sync_context(vmx_get_current_vpid(vcpu));
3242 }
3243
vmx_flush_tlb_gva(struct kvm_vcpu * vcpu,gva_t addr)3244 void vmx_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t addr)
3245 {
3246 /*
3247 * vpid_sync_vcpu_addr() is a nop if vpid==0, see the comment in
3248 * vmx_flush_tlb_guest() for an explanation of why this is ok.
3249 */
3250 vpid_sync_vcpu_addr(vmx_get_current_vpid(vcpu), addr);
3251 }
3252
vmx_flush_tlb_guest(struct kvm_vcpu * vcpu)3253 void vmx_flush_tlb_guest(struct kvm_vcpu *vcpu)
3254 {
3255 /*
3256 * vpid_sync_context() is a nop if vpid==0, e.g. if enable_vpid==0 or a
3257 * vpid couldn't be allocated for this vCPU. VM-Enter and VM-Exit are
3258 * required to flush GVA->{G,H}PA mappings from the TLB if vpid is
3259 * disabled (VM-Enter with vpid enabled and vpid==0 is disallowed),
3260 * i.e. no explicit INVVPID is necessary.
3261 */
3262 vpid_sync_context(vmx_get_current_vpid(vcpu));
3263 }
3264
vmx_ept_load_pdptrs(struct kvm_vcpu * vcpu)3265 void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu)
3266 {
3267 struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
3268
3269 if (!kvm_register_is_dirty(vcpu, VCPU_EXREG_PDPTR))
3270 return;
3271
3272 if (is_pae_paging(vcpu)) {
3273 vmcs_write64(GUEST_PDPTR0, mmu->pdptrs[0]);
3274 vmcs_write64(GUEST_PDPTR1, mmu->pdptrs[1]);
3275 vmcs_write64(GUEST_PDPTR2, mmu->pdptrs[2]);
3276 vmcs_write64(GUEST_PDPTR3, mmu->pdptrs[3]);
3277 }
3278 }
3279
ept_save_pdptrs(struct kvm_vcpu * vcpu)3280 void ept_save_pdptrs(struct kvm_vcpu *vcpu)
3281 {
3282 struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
3283
3284 if (WARN_ON_ONCE(!is_pae_paging(vcpu)))
3285 return;
3286
3287 mmu->pdptrs[0] = vmcs_read64(GUEST_PDPTR0);
3288 mmu->pdptrs[1] = vmcs_read64(GUEST_PDPTR1);
3289 mmu->pdptrs[2] = vmcs_read64(GUEST_PDPTR2);
3290 mmu->pdptrs[3] = vmcs_read64(GUEST_PDPTR3);
3291
3292 kvm_register_mark_available(vcpu, VCPU_EXREG_PDPTR);
3293 }
3294
3295 #define CR3_EXITING_BITS (CPU_BASED_CR3_LOAD_EXITING | \
3296 CPU_BASED_CR3_STORE_EXITING)
3297
vmx_is_valid_cr0(struct kvm_vcpu * vcpu,unsigned long cr0)3298 bool vmx_is_valid_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
3299 {
3300 if (is_guest_mode(vcpu))
3301 return nested_guest_cr0_valid(vcpu, cr0);
3302
3303 if (to_vmx(vcpu)->nested.vmxon)
3304 return nested_host_cr0_valid(vcpu, cr0);
3305
3306 return true;
3307 }
3308
vmx_set_cr0(struct kvm_vcpu * vcpu,unsigned long cr0)3309 void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
3310 {
3311 struct vcpu_vmx *vmx = to_vmx(vcpu);
3312 unsigned long hw_cr0, old_cr0_pg;
3313 u32 tmp;
3314
3315 old_cr0_pg = kvm_read_cr0_bits(vcpu, X86_CR0_PG);
3316
3317 hw_cr0 = (cr0 & ~KVM_VM_CR0_ALWAYS_OFF);
3318 if (enable_unrestricted_guest)
3319 hw_cr0 |= KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST;
3320 else {
3321 hw_cr0 |= KVM_VM_CR0_ALWAYS_ON;
3322 if (!enable_ept)
3323 hw_cr0 |= X86_CR0_WP;
3324
3325 if (vmx->rmode.vm86_active && (cr0 & X86_CR0_PE))
3326 enter_pmode(vcpu);
3327
3328 if (!vmx->rmode.vm86_active && !(cr0 & X86_CR0_PE))
3329 enter_rmode(vcpu);
3330 }
3331
3332 vmcs_writel(CR0_READ_SHADOW, cr0);
3333 vmcs_writel(GUEST_CR0, hw_cr0);
3334 vcpu->arch.cr0 = cr0;
3335 kvm_register_mark_available(vcpu, VCPU_EXREG_CR0);
3336
3337 #ifdef CONFIG_X86_64
3338 if (vcpu->arch.efer & EFER_LME) {
3339 if (!old_cr0_pg && (cr0 & X86_CR0_PG))
3340 enter_lmode(vcpu);
3341 else if (old_cr0_pg && !(cr0 & X86_CR0_PG))
3342 exit_lmode(vcpu);
3343 }
3344 #endif
3345
3346 if (enable_ept && !enable_unrestricted_guest) {
3347 /*
3348 * Ensure KVM has an up-to-date snapshot of the guest's CR3. If
3349 * the below code _enables_ CR3 exiting, vmx_cache_reg() will
3350 * (correctly) stop reading vmcs.GUEST_CR3 because it thinks
3351 * KVM's CR3 is installed.
3352 */
3353 if (!kvm_register_is_available(vcpu, VCPU_EXREG_CR3))
3354 vmx_cache_reg(vcpu, VCPU_EXREG_CR3);
3355
3356 /*
3357 * When running with EPT but not unrestricted guest, KVM must
3358 * intercept CR3 accesses when paging is _disabled_. This is
3359 * necessary because restricted guests can't actually run with
3360 * paging disabled, and so KVM stuffs its own CR3 in order to
3361 * run the guest when identity mapped page tables.
3362 *
3363 * Do _NOT_ check the old CR0.PG, e.g. to optimize away the
3364 * update, it may be stale with respect to CR3 interception,
3365 * e.g. after nested VM-Enter.
3366 *
3367 * Lastly, honor L1's desires, i.e. intercept CR3 loads and/or
3368 * stores to forward them to L1, even if KVM does not need to
3369 * intercept them to preserve its identity mapped page tables.
3370 */
3371 if (!(cr0 & X86_CR0_PG)) {
3372 exec_controls_setbit(vmx, CR3_EXITING_BITS);
3373 } else if (!is_guest_mode(vcpu)) {
3374 exec_controls_clearbit(vmx, CR3_EXITING_BITS);
3375 } else {
3376 tmp = exec_controls_get(vmx);
3377 tmp &= ~CR3_EXITING_BITS;
3378 tmp |= get_vmcs12(vcpu)->cpu_based_vm_exec_control & CR3_EXITING_BITS;
3379 exec_controls_set(vmx, tmp);
3380 }
3381
3382 /* Note, vmx_set_cr4() consumes the new vcpu->arch.cr0. */
3383 if ((old_cr0_pg ^ cr0) & X86_CR0_PG)
3384 vmx_set_cr4(vcpu, kvm_read_cr4(vcpu));
3385
3386 /*
3387 * When !CR0_PG -> CR0_PG, vcpu->arch.cr3 becomes active, but
3388 * GUEST_CR3 is still vmx->ept_identity_map_addr if EPT + !URG.
3389 */
3390 if (!(old_cr0_pg & X86_CR0_PG) && (cr0 & X86_CR0_PG))
3391 kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3);
3392 }
3393
3394 /* depends on vcpu->arch.cr0 to be set to a new value */
3395 vmx->emulation_required = vmx_emulation_required(vcpu);
3396 }
3397
vmx_get_max_ept_level(void)3398 static int vmx_get_max_ept_level(void)
3399 {
3400 if (cpu_has_vmx_ept_5levels())
3401 return 5;
3402 return 4;
3403 }
3404
construct_eptp(struct kvm_vcpu * vcpu,hpa_t root_hpa,int root_level)3405 u64 construct_eptp(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level)
3406 {
3407 u64 eptp = VMX_EPTP_MT_WB;
3408
3409 eptp |= (root_level == 5) ? VMX_EPTP_PWL_5 : VMX_EPTP_PWL_4;
3410
3411 if (enable_ept_ad_bits &&
3412 (!is_guest_mode(vcpu) || nested_ept_ad_enabled(vcpu)))
3413 eptp |= VMX_EPTP_AD_ENABLE_BIT;
3414 eptp |= root_hpa;
3415
3416 return eptp;
3417 }
3418
vmx_load_mmu_pgd(struct kvm_vcpu * vcpu,hpa_t root_hpa,int root_level)3419 void vmx_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level)
3420 {
3421 struct kvm *kvm = vcpu->kvm;
3422 bool update_guest_cr3 = true;
3423 unsigned long guest_cr3;
3424 u64 eptp;
3425
3426 if (enable_ept) {
3427 eptp = construct_eptp(vcpu, root_hpa, root_level);
3428 vmcs_write64(EPT_POINTER, eptp);
3429
3430 hv_track_root_tdp(vcpu, root_hpa);
3431
3432 if (!enable_unrestricted_guest && !is_paging(vcpu))
3433 guest_cr3 = to_kvm_vmx(kvm)->ept_identity_map_addr;
3434 else if (kvm_register_is_dirty(vcpu, VCPU_EXREG_CR3))
3435 guest_cr3 = vcpu->arch.cr3;
3436 else /* vmcs.GUEST_CR3 is already up-to-date. */
3437 update_guest_cr3 = false;
3438 vmx_ept_load_pdptrs(vcpu);
3439 } else {
3440 guest_cr3 = root_hpa | kvm_get_active_pcid(vcpu) |
3441 kvm_get_active_cr3_lam_bits(vcpu);
3442 }
3443
3444 if (update_guest_cr3)
3445 vmcs_writel(GUEST_CR3, guest_cr3);
3446 }
3447
vmx_is_valid_cr4(struct kvm_vcpu * vcpu,unsigned long cr4)3448 bool vmx_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
3449 {
3450 /*
3451 * We operate under the default treatment of SMM, so VMX cannot be
3452 * enabled under SMM. Note, whether or not VMXE is allowed at all,
3453 * i.e. is a reserved bit, is handled by common x86 code.
3454 */
3455 if ((cr4 & X86_CR4_VMXE) && is_smm(vcpu))
3456 return false;
3457
3458 if (to_vmx(vcpu)->nested.vmxon && !nested_cr4_valid(vcpu, cr4))
3459 return false;
3460
3461 return true;
3462 }
3463
vmx_set_cr4(struct kvm_vcpu * vcpu,unsigned long cr4)3464 void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
3465 {
3466 unsigned long old_cr4 = kvm_read_cr4(vcpu);
3467 struct vcpu_vmx *vmx = to_vmx(vcpu);
3468 unsigned long hw_cr4;
3469
3470 /*
3471 * Pass through host's Machine Check Enable value to hw_cr4, which
3472 * is in force while we are in guest mode. Do not let guests control
3473 * this bit, even if host CR4.MCE == 0.
3474 */
3475 hw_cr4 = (cr4_read_shadow() & X86_CR4_MCE) | (cr4 & ~X86_CR4_MCE);
3476 if (enable_unrestricted_guest)
3477 hw_cr4 |= KVM_VM_CR4_ALWAYS_ON_UNRESTRICTED_GUEST;
3478 else if (vmx->rmode.vm86_active)
3479 hw_cr4 |= KVM_RMODE_VM_CR4_ALWAYS_ON;
3480 else
3481 hw_cr4 |= KVM_PMODE_VM_CR4_ALWAYS_ON;
3482
3483 if (vmx_umip_emulated()) {
3484 if (cr4 & X86_CR4_UMIP) {
3485 secondary_exec_controls_setbit(vmx, SECONDARY_EXEC_DESC);
3486 hw_cr4 &= ~X86_CR4_UMIP;
3487 } else if (!is_guest_mode(vcpu) ||
3488 !nested_cpu_has2(get_vmcs12(vcpu), SECONDARY_EXEC_DESC)) {
3489 secondary_exec_controls_clearbit(vmx, SECONDARY_EXEC_DESC);
3490 }
3491 }
3492
3493 vcpu->arch.cr4 = cr4;
3494 kvm_register_mark_available(vcpu, VCPU_EXREG_CR4);
3495
3496 if (!enable_unrestricted_guest) {
3497 if (enable_ept) {
3498 if (!is_paging(vcpu)) {
3499 hw_cr4 &= ~X86_CR4_PAE;
3500 hw_cr4 |= X86_CR4_PSE;
3501 } else if (!(cr4 & X86_CR4_PAE)) {
3502 hw_cr4 &= ~X86_CR4_PAE;
3503 }
3504 }
3505
3506 /*
3507 * SMEP/SMAP/PKU is disabled if CPU is in non-paging mode in
3508 * hardware. To emulate this behavior, SMEP/SMAP/PKU needs
3509 * to be manually disabled when guest switches to non-paging
3510 * mode.
3511 *
3512 * If !enable_unrestricted_guest, the CPU is always running
3513 * with CR0.PG=1 and CR4 needs to be modified.
3514 * If enable_unrestricted_guest, the CPU automatically
3515 * disables SMEP/SMAP/PKU when the guest sets CR0.PG=0.
3516 */
3517 if (!is_paging(vcpu))
3518 hw_cr4 &= ~(X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE);
3519 }
3520
3521 vmcs_writel(CR4_READ_SHADOW, cr4);
3522 vmcs_writel(GUEST_CR4, hw_cr4);
3523
3524 if ((cr4 ^ old_cr4) & (X86_CR4_OSXSAVE | X86_CR4_PKE))
3525 kvm_update_cpuid_runtime(vcpu);
3526 }
3527
vmx_get_segment(struct kvm_vcpu * vcpu,struct kvm_segment * var,int seg)3528 void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg)
3529 {
3530 struct vcpu_vmx *vmx = to_vmx(vcpu);
3531 u32 ar;
3532
3533 if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) {
3534 *var = vmx->rmode.segs[seg];
3535 if (seg == VCPU_SREG_TR
3536 || var->selector == vmx_read_guest_seg_selector(vmx, seg))
3537 return;
3538 var->base = vmx_read_guest_seg_base(vmx, seg);
3539 var->selector = vmx_read_guest_seg_selector(vmx, seg);
3540 return;
3541 }
3542 var->base = vmx_read_guest_seg_base(vmx, seg);
3543 var->limit = vmx_read_guest_seg_limit(vmx, seg);
3544 var->selector = vmx_read_guest_seg_selector(vmx, seg);
3545 ar = vmx_read_guest_seg_ar(vmx, seg);
3546 var->unusable = (ar >> 16) & 1;
3547 var->type = ar & 15;
3548 var->s = (ar >> 4) & 1;
3549 var->dpl = (ar >> 5) & 3;
3550 /*
3551 * Some userspaces do not preserve unusable property. Since usable
3552 * segment has to be present according to VMX spec we can use present
3553 * property to amend userspace bug by making unusable segment always
3554 * nonpresent. vmx_segment_access_rights() already marks nonpresent
3555 * segment as unusable.
3556 */
3557 var->present = !var->unusable;
3558 var->avl = (ar >> 12) & 1;
3559 var->l = (ar >> 13) & 1;
3560 var->db = (ar >> 14) & 1;
3561 var->g = (ar >> 15) & 1;
3562 }
3563
vmx_get_segment_base(struct kvm_vcpu * vcpu,int seg)3564 u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg)
3565 {
3566 struct kvm_segment s;
3567
3568 if (to_vmx(vcpu)->rmode.vm86_active) {
3569 vmx_get_segment(vcpu, &s, seg);
3570 return s.base;
3571 }
3572 return vmx_read_guest_seg_base(to_vmx(vcpu), seg);
3573 }
3574
vmx_get_cpl(struct kvm_vcpu * vcpu)3575 int vmx_get_cpl(struct kvm_vcpu *vcpu)
3576 {
3577 struct vcpu_vmx *vmx = to_vmx(vcpu);
3578
3579 if (unlikely(vmx->rmode.vm86_active))
3580 return 0;
3581 else {
3582 int ar = vmx_read_guest_seg_ar(vmx, VCPU_SREG_SS);
3583 return VMX_AR_DPL(ar);
3584 }
3585 }
3586
vmx_segment_access_rights(struct kvm_segment * var)3587 static u32 vmx_segment_access_rights(struct kvm_segment *var)
3588 {
3589 u32 ar;
3590
3591 ar = var->type & 15;
3592 ar |= (var->s & 1) << 4;
3593 ar |= (var->dpl & 3) << 5;
3594 ar |= (var->present & 1) << 7;
3595 ar |= (var->avl & 1) << 12;
3596 ar |= (var->l & 1) << 13;
3597 ar |= (var->db & 1) << 14;
3598 ar |= (var->g & 1) << 15;
3599 ar |= (var->unusable || !var->present) << 16;
3600
3601 return ar;
3602 }
3603
__vmx_set_segment(struct kvm_vcpu * vcpu,struct kvm_segment * var,int seg)3604 void __vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg)
3605 {
3606 struct vcpu_vmx *vmx = to_vmx(vcpu);
3607 const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
3608
3609 vmx_segment_cache_clear(vmx);
3610
3611 if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) {
3612 vmx->rmode.segs[seg] = *var;
3613 if (seg == VCPU_SREG_TR)
3614 vmcs_write16(sf->selector, var->selector);
3615 else if (var->s)
3616 fix_rmode_seg(seg, &vmx->rmode.segs[seg]);
3617 return;
3618 }
3619
3620 vmcs_writel(sf->base, var->base);
3621 vmcs_write32(sf->limit, var->limit);
3622 vmcs_write16(sf->selector, var->selector);
3623
3624 /*
3625 * Fix the "Accessed" bit in AR field of segment registers for older
3626 * qemu binaries.
3627 * IA32 arch specifies that at the time of processor reset the
3628 * "Accessed" bit in the AR field of segment registers is 1. And qemu
3629 * is setting it to 0 in the userland code. This causes invalid guest
3630 * state vmexit when "unrestricted guest" mode is turned on.
3631 * Fix for this setup issue in cpu_reset is being pushed in the qemu
3632 * tree. Newer qemu binaries with that qemu fix would not need this
3633 * kvm hack.
3634 */
3635 if (is_unrestricted_guest(vcpu) && (seg != VCPU_SREG_LDTR))
3636 var->type |= 0x1; /* Accessed */
3637
3638 vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(var));
3639 }
3640
vmx_set_segment(struct kvm_vcpu * vcpu,struct kvm_segment * var,int seg)3641 void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg)
3642 {
3643 __vmx_set_segment(vcpu, var, seg);
3644
3645 to_vmx(vcpu)->emulation_required = vmx_emulation_required(vcpu);
3646 }
3647
vmx_get_cs_db_l_bits(struct kvm_vcpu * vcpu,int * db,int * l)3648 void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
3649 {
3650 u32 ar = vmx_read_guest_seg_ar(to_vmx(vcpu), VCPU_SREG_CS);
3651
3652 *db = (ar >> 14) & 1;
3653 *l = (ar >> 13) & 1;
3654 }
3655
vmx_get_idt(struct kvm_vcpu * vcpu,struct desc_ptr * dt)3656 void vmx_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
3657 {
3658 dt->size = vmcs_read32(GUEST_IDTR_LIMIT);
3659 dt->address = vmcs_readl(GUEST_IDTR_BASE);
3660 }
3661
vmx_set_idt(struct kvm_vcpu * vcpu,struct desc_ptr * dt)3662 void vmx_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
3663 {
3664 vmcs_write32(GUEST_IDTR_LIMIT, dt->size);
3665 vmcs_writel(GUEST_IDTR_BASE, dt->address);
3666 }
3667
vmx_get_gdt(struct kvm_vcpu * vcpu,struct desc_ptr * dt)3668 void vmx_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
3669 {
3670 dt->size = vmcs_read32(GUEST_GDTR_LIMIT);
3671 dt->address = vmcs_readl(GUEST_GDTR_BASE);
3672 }
3673
vmx_set_gdt(struct kvm_vcpu * vcpu,struct desc_ptr * dt)3674 void vmx_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
3675 {
3676 vmcs_write32(GUEST_GDTR_LIMIT, dt->size);
3677 vmcs_writel(GUEST_GDTR_BASE, dt->address);
3678 }
3679
rmode_segment_valid(struct kvm_vcpu * vcpu,int seg)3680 static bool rmode_segment_valid(struct kvm_vcpu *vcpu, int seg)
3681 {
3682 struct kvm_segment var;
3683 u32 ar;
3684
3685 vmx_get_segment(vcpu, &var, seg);
3686 var.dpl = 0x3;
3687 if (seg == VCPU_SREG_CS)
3688 var.type = 0x3;
3689 ar = vmx_segment_access_rights(&var);
3690
3691 if (var.base != (var.selector << 4))
3692 return false;
3693 if (var.limit != 0xffff)
3694 return false;
3695 if (ar != 0xf3)
3696 return false;
3697
3698 return true;
3699 }
3700
code_segment_valid(struct kvm_vcpu * vcpu)3701 static bool code_segment_valid(struct kvm_vcpu *vcpu)
3702 {
3703 struct kvm_segment cs;
3704 unsigned int cs_rpl;
3705
3706 vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
3707 cs_rpl = cs.selector & SEGMENT_RPL_MASK;
3708
3709 if (cs.unusable)
3710 return false;
3711 if (~cs.type & (VMX_AR_TYPE_CODE_MASK|VMX_AR_TYPE_ACCESSES_MASK))
3712 return false;
3713 if (!cs.s)
3714 return false;
3715 if (cs.type & VMX_AR_TYPE_WRITEABLE_MASK) {
3716 if (cs.dpl > cs_rpl)
3717 return false;
3718 } else {
3719 if (cs.dpl != cs_rpl)
3720 return false;
3721 }
3722 if (!cs.present)
3723 return false;
3724
3725 /* TODO: Add Reserved field check, this'll require a new member in the kvm_segment_field structure */
3726 return true;
3727 }
3728
stack_segment_valid(struct kvm_vcpu * vcpu)3729 static bool stack_segment_valid(struct kvm_vcpu *vcpu)
3730 {
3731 struct kvm_segment ss;
3732 unsigned int ss_rpl;
3733
3734 vmx_get_segment(vcpu, &ss, VCPU_SREG_SS);
3735 ss_rpl = ss.selector & SEGMENT_RPL_MASK;
3736
3737 if (ss.unusable)
3738 return true;
3739 if (ss.type != 3 && ss.type != 7)
3740 return false;
3741 if (!ss.s)
3742 return false;
3743 if (ss.dpl != ss_rpl) /* DPL != RPL */
3744 return false;
3745 if (!ss.present)
3746 return false;
3747
3748 return true;
3749 }
3750
data_segment_valid(struct kvm_vcpu * vcpu,int seg)3751 static bool data_segment_valid(struct kvm_vcpu *vcpu, int seg)
3752 {
3753 struct kvm_segment var;
3754 unsigned int rpl;
3755
3756 vmx_get_segment(vcpu, &var, seg);
3757 rpl = var.selector & SEGMENT_RPL_MASK;
3758
3759 if (var.unusable)
3760 return true;
3761 if (!var.s)
3762 return false;
3763 if (!var.present)
3764 return false;
3765 if (~var.type & (VMX_AR_TYPE_CODE_MASK|VMX_AR_TYPE_WRITEABLE_MASK)) {
3766 if (var.dpl < rpl) /* DPL < RPL */
3767 return false;
3768 }
3769
3770 /* TODO: Add other members to kvm_segment_field to allow checking for other access
3771 * rights flags
3772 */
3773 return true;
3774 }
3775
tr_valid(struct kvm_vcpu * vcpu)3776 static bool tr_valid(struct kvm_vcpu *vcpu)
3777 {
3778 struct kvm_segment tr;
3779
3780 vmx_get_segment(vcpu, &tr, VCPU_SREG_TR);
3781
3782 if (tr.unusable)
3783 return false;
3784 if (tr.selector & SEGMENT_TI_MASK) /* TI = 1 */
3785 return false;
3786 if (tr.type != 3 && tr.type != 11) /* TODO: Check if guest is in IA32e mode */
3787 return false;
3788 if (!tr.present)
3789 return false;
3790
3791 return true;
3792 }
3793
ldtr_valid(struct kvm_vcpu * vcpu)3794 static bool ldtr_valid(struct kvm_vcpu *vcpu)
3795 {
3796 struct kvm_segment ldtr;
3797
3798 vmx_get_segment(vcpu, &ldtr, VCPU_SREG_LDTR);
3799
3800 if (ldtr.unusable)
3801 return true;
3802 if (ldtr.selector & SEGMENT_TI_MASK) /* TI = 1 */
3803 return false;
3804 if (ldtr.type != 2)
3805 return false;
3806 if (!ldtr.present)
3807 return false;
3808
3809 return true;
3810 }
3811
cs_ss_rpl_check(struct kvm_vcpu * vcpu)3812 static bool cs_ss_rpl_check(struct kvm_vcpu *vcpu)
3813 {
3814 struct kvm_segment cs, ss;
3815
3816 vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
3817 vmx_get_segment(vcpu, &ss, VCPU_SREG_SS);
3818
3819 return ((cs.selector & SEGMENT_RPL_MASK) ==
3820 (ss.selector & SEGMENT_RPL_MASK));
3821 }
3822
3823 /*
3824 * Check if guest state is valid. Returns true if valid, false if
3825 * not.
3826 * We assume that registers are always usable
3827 */
__vmx_guest_state_valid(struct kvm_vcpu * vcpu)3828 bool __vmx_guest_state_valid(struct kvm_vcpu *vcpu)
3829 {
3830 /* real mode guest state checks */
3831 if (!is_protmode(vcpu) || (vmx_get_rflags(vcpu) & X86_EFLAGS_VM)) {
3832 if (!rmode_segment_valid(vcpu, VCPU_SREG_CS))
3833 return false;
3834 if (!rmode_segment_valid(vcpu, VCPU_SREG_SS))
3835 return false;
3836 if (!rmode_segment_valid(vcpu, VCPU_SREG_DS))
3837 return false;
3838 if (!rmode_segment_valid(vcpu, VCPU_SREG_ES))
3839 return false;
3840 if (!rmode_segment_valid(vcpu, VCPU_SREG_FS))
3841 return false;
3842 if (!rmode_segment_valid(vcpu, VCPU_SREG_GS))
3843 return false;
3844 } else {
3845 /* protected mode guest state checks */
3846 if (!cs_ss_rpl_check(vcpu))
3847 return false;
3848 if (!code_segment_valid(vcpu))
3849 return false;
3850 if (!stack_segment_valid(vcpu))
3851 return false;
3852 if (!data_segment_valid(vcpu, VCPU_SREG_DS))
3853 return false;
3854 if (!data_segment_valid(vcpu, VCPU_SREG_ES))
3855 return false;
3856 if (!data_segment_valid(vcpu, VCPU_SREG_FS))
3857 return false;
3858 if (!data_segment_valid(vcpu, VCPU_SREG_GS))
3859 return false;
3860 if (!tr_valid(vcpu))
3861 return false;
3862 if (!ldtr_valid(vcpu))
3863 return false;
3864 }
3865 /* TODO:
3866 * - Add checks on RIP
3867 * - Add checks on RFLAGS
3868 */
3869
3870 return true;
3871 }
3872
init_rmode_tss(struct kvm * kvm,void __user * ua)3873 static int init_rmode_tss(struct kvm *kvm, void __user *ua)
3874 {
3875 const void *zero_page = (const void *) __va(page_to_phys(ZERO_PAGE(0)));
3876 u16 data;
3877 int i;
3878
3879 for (i = 0; i < 3; i++) {
3880 if (__copy_to_user(ua + PAGE_SIZE * i, zero_page, PAGE_SIZE))
3881 return -EFAULT;
3882 }
3883
3884 data = TSS_BASE_SIZE + TSS_REDIRECTION_SIZE;
3885 if (__copy_to_user(ua + TSS_IOPB_BASE_OFFSET, &data, sizeof(u16)))
3886 return -EFAULT;
3887
3888 data = ~0;
3889 if (__copy_to_user(ua + RMODE_TSS_SIZE - 1, &data, sizeof(u8)))
3890 return -EFAULT;
3891
3892 return 0;
3893 }
3894
init_rmode_identity_map(struct kvm * kvm)3895 static int init_rmode_identity_map(struct kvm *kvm)
3896 {
3897 struct kvm_vmx *kvm_vmx = to_kvm_vmx(kvm);
3898 int i, r = 0;
3899 void __user *uaddr;
3900 u32 tmp;
3901
3902 /* Protect kvm_vmx->ept_identity_pagetable_done. */
3903 mutex_lock(&kvm->slots_lock);
3904
3905 if (likely(kvm_vmx->ept_identity_pagetable_done))
3906 goto out;
3907
3908 if (!kvm_vmx->ept_identity_map_addr)
3909 kvm_vmx->ept_identity_map_addr = VMX_EPT_IDENTITY_PAGETABLE_ADDR;
3910
3911 uaddr = __x86_set_memory_region(kvm,
3912 IDENTITY_PAGETABLE_PRIVATE_MEMSLOT,
3913 kvm_vmx->ept_identity_map_addr,
3914 PAGE_SIZE);
3915 if (IS_ERR(uaddr)) {
3916 r = PTR_ERR(uaddr);
3917 goto out;
3918 }
3919
3920 /* Set up identity-mapping pagetable for EPT in real mode */
3921 for (i = 0; i < (PAGE_SIZE / sizeof(tmp)); i++) {
3922 tmp = (i << 22) + (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER |
3923 _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_PSE);
3924 if (__copy_to_user(uaddr + i * sizeof(tmp), &tmp, sizeof(tmp))) {
3925 r = -EFAULT;
3926 goto out;
3927 }
3928 }
3929 kvm_vmx->ept_identity_pagetable_done = true;
3930
3931 out:
3932 mutex_unlock(&kvm->slots_lock);
3933 return r;
3934 }
3935
seg_setup(int seg)3936 static void seg_setup(int seg)
3937 {
3938 const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
3939 unsigned int ar;
3940
3941 vmcs_write16(sf->selector, 0);
3942 vmcs_writel(sf->base, 0);
3943 vmcs_write32(sf->limit, 0xffff);
3944 ar = 0x93;
3945 if (seg == VCPU_SREG_CS)
3946 ar |= 0x08; /* code segment */
3947
3948 vmcs_write32(sf->ar_bytes, ar);
3949 }
3950
allocate_vpid(void)3951 int allocate_vpid(void)
3952 {
3953 int vpid;
3954
3955 if (!enable_vpid)
3956 return 0;
3957 spin_lock(&vmx_vpid_lock);
3958 vpid = find_first_zero_bit(vmx_vpid_bitmap, VMX_NR_VPIDS);
3959 if (vpid < VMX_NR_VPIDS)
3960 __set_bit(vpid, vmx_vpid_bitmap);
3961 else
3962 vpid = 0;
3963 spin_unlock(&vmx_vpid_lock);
3964 return vpid;
3965 }
3966
free_vpid(int vpid)3967 void free_vpid(int vpid)
3968 {
3969 if (!enable_vpid || vpid == 0)
3970 return;
3971 spin_lock(&vmx_vpid_lock);
3972 __clear_bit(vpid, vmx_vpid_bitmap);
3973 spin_unlock(&vmx_vpid_lock);
3974 }
3975
vmx_msr_bitmap_l01_changed(struct vcpu_vmx * vmx)3976 static void vmx_msr_bitmap_l01_changed(struct vcpu_vmx *vmx)
3977 {
3978 /*
3979 * When KVM is a nested hypervisor on top of Hyper-V and uses
3980 * 'Enlightened MSR Bitmap' feature L0 needs to know that MSR
3981 * bitmap has changed.
3982 */
3983 if (kvm_is_using_evmcs()) {
3984 struct hv_enlightened_vmcs *evmcs = (void *)vmx->vmcs01.vmcs;
3985
3986 if (evmcs->hv_enlightenments_control.msr_bitmap)
3987 evmcs->hv_clean_fields &=
3988 ~HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP;
3989 }
3990
3991 vmx->nested.force_msr_bitmap_recalc = true;
3992 }
3993
vmx_disable_intercept_for_msr(struct kvm_vcpu * vcpu,u32 msr,int type)3994 void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
3995 {
3996 struct vcpu_vmx *vmx = to_vmx(vcpu);
3997 unsigned long *msr_bitmap = vmx->vmcs01.msr_bitmap;
3998 int idx;
3999
4000 if (!cpu_has_vmx_msr_bitmap())
4001 return;
4002
4003 vmx_msr_bitmap_l01_changed(vmx);
4004
4005 /*
4006 * Mark the desired intercept state in shadow bitmap, this is needed
4007 * for resync when the MSR filters change.
4008 */
4009 idx = vmx_get_passthrough_msr_slot(msr);
4010 if (idx >= 0) {
4011 if (type & MSR_TYPE_R)
4012 clear_bit(idx, vmx->shadow_msr_intercept.read);
4013 if (type & MSR_TYPE_W)
4014 clear_bit(idx, vmx->shadow_msr_intercept.write);
4015 }
4016
4017 if ((type & MSR_TYPE_R) &&
4018 !kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_READ)) {
4019 vmx_set_msr_bitmap_read(msr_bitmap, msr);
4020 type &= ~MSR_TYPE_R;
4021 }
4022
4023 if ((type & MSR_TYPE_W) &&
4024 !kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_WRITE)) {
4025 vmx_set_msr_bitmap_write(msr_bitmap, msr);
4026 type &= ~MSR_TYPE_W;
4027 }
4028
4029 if (type & MSR_TYPE_R)
4030 vmx_clear_msr_bitmap_read(msr_bitmap, msr);
4031
4032 if (type & MSR_TYPE_W)
4033 vmx_clear_msr_bitmap_write(msr_bitmap, msr);
4034 }
4035
vmx_enable_intercept_for_msr(struct kvm_vcpu * vcpu,u32 msr,int type)4036 void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
4037 {
4038 struct vcpu_vmx *vmx = to_vmx(vcpu);
4039 unsigned long *msr_bitmap = vmx->vmcs01.msr_bitmap;
4040 int idx;
4041
4042 if (!cpu_has_vmx_msr_bitmap())
4043 return;
4044
4045 vmx_msr_bitmap_l01_changed(vmx);
4046
4047 /*
4048 * Mark the desired intercept state in shadow bitmap, this is needed
4049 * for resync when the MSR filter changes.
4050 */
4051 idx = vmx_get_passthrough_msr_slot(msr);
4052 if (idx >= 0) {
4053 if (type & MSR_TYPE_R)
4054 set_bit(idx, vmx->shadow_msr_intercept.read);
4055 if (type & MSR_TYPE_W)
4056 set_bit(idx, vmx->shadow_msr_intercept.write);
4057 }
4058
4059 if (type & MSR_TYPE_R)
4060 vmx_set_msr_bitmap_read(msr_bitmap, msr);
4061
4062 if (type & MSR_TYPE_W)
4063 vmx_set_msr_bitmap_write(msr_bitmap, msr);
4064 }
4065
vmx_update_msr_bitmap_x2apic(struct kvm_vcpu * vcpu)4066 static void vmx_update_msr_bitmap_x2apic(struct kvm_vcpu *vcpu)
4067 {
4068 /*
4069 * x2APIC indices for 64-bit accesses into the RDMSR and WRMSR halves
4070 * of the MSR bitmap. KVM emulates APIC registers up through 0x3f0,
4071 * i.e. MSR 0x83f, and so only needs to dynamically manipulate 64 bits.
4072 */
4073 const int read_idx = APIC_BASE_MSR / BITS_PER_LONG_LONG;
4074 const int write_idx = read_idx + (0x800 / sizeof(u64));
4075 struct vcpu_vmx *vmx = to_vmx(vcpu);
4076 u64 *msr_bitmap = (u64 *)vmx->vmcs01.msr_bitmap;
4077 u8 mode;
4078
4079 if (!cpu_has_vmx_msr_bitmap() || WARN_ON_ONCE(!lapic_in_kernel(vcpu)))
4080 return;
4081
4082 if (cpu_has_secondary_exec_ctrls() &&
4083 (secondary_exec_controls_get(vmx) &
4084 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE)) {
4085 mode = MSR_BITMAP_MODE_X2APIC;
4086 if (enable_apicv && kvm_vcpu_apicv_active(vcpu))
4087 mode |= MSR_BITMAP_MODE_X2APIC_APICV;
4088 } else {
4089 mode = 0;
4090 }
4091
4092 if (mode == vmx->x2apic_msr_bitmap_mode)
4093 return;
4094
4095 vmx->x2apic_msr_bitmap_mode = mode;
4096
4097 /*
4098 * Reset the bitmap for MSRs 0x800 - 0x83f. Leave AMD's uber-extended
4099 * registers (0x840 and above) intercepted, KVM doesn't support them.
4100 * Intercept all writes by default and poke holes as needed. Pass
4101 * through reads for all valid registers by default in x2APIC+APICv
4102 * mode, only the current timer count needs on-demand emulation by KVM.
4103 */
4104 if (mode & MSR_BITMAP_MODE_X2APIC_APICV)
4105 msr_bitmap[read_idx] = ~kvm_lapic_readable_reg_mask(vcpu->arch.apic);
4106 else
4107 msr_bitmap[read_idx] = ~0ull;
4108 msr_bitmap[write_idx] = ~0ull;
4109
4110 /*
4111 * TPR reads and writes can be virtualized even if virtual interrupt
4112 * delivery is not in use.
4113 */
4114 vmx_set_intercept_for_msr(vcpu, X2APIC_MSR(APIC_TASKPRI), MSR_TYPE_RW,
4115 !(mode & MSR_BITMAP_MODE_X2APIC));
4116
4117 if (mode & MSR_BITMAP_MODE_X2APIC_APICV) {
4118 vmx_enable_intercept_for_msr(vcpu, X2APIC_MSR(APIC_TMCCT), MSR_TYPE_RW);
4119 vmx_disable_intercept_for_msr(vcpu, X2APIC_MSR(APIC_EOI), MSR_TYPE_W);
4120 vmx_disable_intercept_for_msr(vcpu, X2APIC_MSR(APIC_SELF_IPI), MSR_TYPE_W);
4121 if (enable_ipiv)
4122 vmx_disable_intercept_for_msr(vcpu, X2APIC_MSR(APIC_ICR), MSR_TYPE_RW);
4123 }
4124 }
4125
pt_update_intercept_for_msr(struct kvm_vcpu * vcpu)4126 void pt_update_intercept_for_msr(struct kvm_vcpu *vcpu)
4127 {
4128 struct vcpu_vmx *vmx = to_vmx(vcpu);
4129 bool flag = !(vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN);
4130 u32 i;
4131
4132 vmx_set_intercept_for_msr(vcpu, MSR_IA32_RTIT_STATUS, MSR_TYPE_RW, flag);
4133 vmx_set_intercept_for_msr(vcpu, MSR_IA32_RTIT_OUTPUT_BASE, MSR_TYPE_RW, flag);
4134 vmx_set_intercept_for_msr(vcpu, MSR_IA32_RTIT_OUTPUT_MASK, MSR_TYPE_RW, flag);
4135 vmx_set_intercept_for_msr(vcpu, MSR_IA32_RTIT_CR3_MATCH, MSR_TYPE_RW, flag);
4136 for (i = 0; i < vmx->pt_desc.num_address_ranges; i++) {
4137 vmx_set_intercept_for_msr(vcpu, MSR_IA32_RTIT_ADDR0_A + i * 2, MSR_TYPE_RW, flag);
4138 vmx_set_intercept_for_msr(vcpu, MSR_IA32_RTIT_ADDR0_B + i * 2, MSR_TYPE_RW, flag);
4139 }
4140 }
4141
vmx_msr_filter_changed(struct kvm_vcpu * vcpu)4142 void vmx_msr_filter_changed(struct kvm_vcpu *vcpu)
4143 {
4144 struct vcpu_vmx *vmx = to_vmx(vcpu);
4145 u32 i;
4146
4147 if (!cpu_has_vmx_msr_bitmap())
4148 return;
4149
4150 /*
4151 * Redo intercept permissions for MSRs that KVM is passing through to
4152 * the guest. Disabling interception will check the new MSR filter and
4153 * ensure that KVM enables interception if usersepace wants to filter
4154 * the MSR. MSRs that KVM is already intercepting don't need to be
4155 * refreshed since KVM is going to intercept them regardless of what
4156 * userspace wants.
4157 */
4158 for (i = 0; i < ARRAY_SIZE(vmx_possible_passthrough_msrs); i++) {
4159 u32 msr = vmx_possible_passthrough_msrs[i];
4160
4161 if (!test_bit(i, vmx->shadow_msr_intercept.read))
4162 vmx_disable_intercept_for_msr(vcpu, msr, MSR_TYPE_R);
4163
4164 if (!test_bit(i, vmx->shadow_msr_intercept.write))
4165 vmx_disable_intercept_for_msr(vcpu, msr, MSR_TYPE_W);
4166 }
4167
4168 /* PT MSRs can be passed through iff PT is exposed to the guest. */
4169 if (vmx_pt_mode_is_host_guest())
4170 pt_update_intercept_for_msr(vcpu);
4171 }
4172
kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu * vcpu,int pi_vec)4173 static inline void kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu,
4174 int pi_vec)
4175 {
4176 #ifdef CONFIG_SMP
4177 if (vcpu->mode == IN_GUEST_MODE) {
4178 /*
4179 * The vector of the virtual has already been set in the PIR.
4180 * Send a notification event to deliver the virtual interrupt
4181 * unless the vCPU is the currently running vCPU, i.e. the
4182 * event is being sent from a fastpath VM-Exit handler, in
4183 * which case the PIR will be synced to the vIRR before
4184 * re-entering the guest.
4185 *
4186 * When the target is not the running vCPU, the following
4187 * possibilities emerge:
4188 *
4189 * Case 1: vCPU stays in non-root mode. Sending a notification
4190 * event posts the interrupt to the vCPU.
4191 *
4192 * Case 2: vCPU exits to root mode and is still runnable. The
4193 * PIR will be synced to the vIRR before re-entering the guest.
4194 * Sending a notification event is ok as the host IRQ handler
4195 * will ignore the spurious event.
4196 *
4197 * Case 3: vCPU exits to root mode and is blocked. vcpu_block()
4198 * has already synced PIR to vIRR and never blocks the vCPU if
4199 * the vIRR is not empty. Therefore, a blocked vCPU here does
4200 * not wait for any requested interrupts in PIR, and sending a
4201 * notification event also results in a benign, spurious event.
4202 */
4203
4204 if (vcpu != kvm_get_running_vcpu())
4205 __apic_send_IPI_mask(get_cpu_mask(vcpu->cpu), pi_vec);
4206 return;
4207 }
4208 #endif
4209 /*
4210 * The vCPU isn't in the guest; wake the vCPU in case it is blocking,
4211 * otherwise do nothing as KVM will grab the highest priority pending
4212 * IRQ via ->sync_pir_to_irr() in vcpu_enter_guest().
4213 */
4214 kvm_vcpu_wake_up(vcpu);
4215 }
4216
vmx_deliver_nested_posted_interrupt(struct kvm_vcpu * vcpu,int vector)4217 static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu,
4218 int vector)
4219 {
4220 struct vcpu_vmx *vmx = to_vmx(vcpu);
4221
4222 if (is_guest_mode(vcpu) &&
4223 vector == vmx->nested.posted_intr_nv) {
4224 /*
4225 * If a posted intr is not recognized by hardware,
4226 * we will accomplish it in the next vmentry.
4227 */
4228 vmx->nested.pi_pending = true;
4229 kvm_make_request(KVM_REQ_EVENT, vcpu);
4230
4231 /*
4232 * This pairs with the smp_mb_*() after setting vcpu->mode in
4233 * vcpu_enter_guest() to guarantee the vCPU sees the event
4234 * request if triggering a posted interrupt "fails" because
4235 * vcpu->mode != IN_GUEST_MODE. The extra barrier is needed as
4236 * the smb_wmb() in kvm_make_request() only ensures everything
4237 * done before making the request is visible when the request
4238 * is visible, it doesn't ensure ordering between the store to
4239 * vcpu->requests and the load from vcpu->mode.
4240 */
4241 smp_mb__after_atomic();
4242
4243 /* the PIR and ON have been set by L1. */
4244 kvm_vcpu_trigger_posted_interrupt(vcpu, POSTED_INTR_NESTED_VECTOR);
4245 return 0;
4246 }
4247 return -1;
4248 }
4249 /*
4250 * Send interrupt to vcpu via posted interrupt way.
4251 * 1. If target vcpu is running(non-root mode), send posted interrupt
4252 * notification to vcpu and hardware will sync PIR to vIRR atomically.
4253 * 2. If target vcpu isn't running(root mode), kick it to pick up the
4254 * interrupt from PIR in next vmentry.
4255 */
vmx_deliver_posted_interrupt(struct kvm_vcpu * vcpu,int vector)4256 static int vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector)
4257 {
4258 struct vcpu_vmx *vmx = to_vmx(vcpu);
4259 int r;
4260
4261 r = vmx_deliver_nested_posted_interrupt(vcpu, vector);
4262 if (!r)
4263 return 0;
4264
4265 /* Note, this is called iff the local APIC is in-kernel. */
4266 if (!vcpu->arch.apic->apicv_active)
4267 return -1;
4268
4269 if (pi_test_and_set_pir(vector, &vmx->pi_desc))
4270 return 0;
4271
4272 /* If a previous notification has sent the IPI, nothing to do. */
4273 if (pi_test_and_set_on(&vmx->pi_desc))
4274 return 0;
4275
4276 /*
4277 * The implied barrier in pi_test_and_set_on() pairs with the smp_mb_*()
4278 * after setting vcpu->mode in vcpu_enter_guest(), thus the vCPU is
4279 * guaranteed to see PID.ON=1 and sync the PIR to IRR if triggering a
4280 * posted interrupt "fails" because vcpu->mode != IN_GUEST_MODE.
4281 */
4282 kvm_vcpu_trigger_posted_interrupt(vcpu, POSTED_INTR_VECTOR);
4283 return 0;
4284 }
4285
vmx_deliver_interrupt(struct kvm_lapic * apic,int delivery_mode,int trig_mode,int vector)4286 void vmx_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode,
4287 int trig_mode, int vector)
4288 {
4289 struct kvm_vcpu *vcpu = apic->vcpu;
4290
4291 if (vmx_deliver_posted_interrupt(vcpu, vector)) {
4292 kvm_lapic_set_irr(vector, apic);
4293 kvm_make_request(KVM_REQ_EVENT, vcpu);
4294 kvm_vcpu_kick(vcpu);
4295 } else {
4296 trace_kvm_apicv_accept_irq(vcpu->vcpu_id, delivery_mode,
4297 trig_mode, vector);
4298 }
4299 }
4300
4301 /*
4302 * Set up the vmcs's constant host-state fields, i.e., host-state fields that
4303 * will not change in the lifetime of the guest.
4304 * Note that host-state that does change is set elsewhere. E.g., host-state
4305 * that is set differently for each CPU is set in vmx_vcpu_load(), not here.
4306 */
vmx_set_constant_host_state(struct vcpu_vmx * vmx)4307 void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
4308 {
4309 u32 low32, high32;
4310 unsigned long tmpl;
4311 unsigned long cr0, cr3, cr4;
4312
4313 cr0 = read_cr0();
4314 WARN_ON(cr0 & X86_CR0_TS);
4315 vmcs_writel(HOST_CR0, cr0); /* 22.2.3 */
4316
4317 /*
4318 * Save the most likely value for this task's CR3 in the VMCS.
4319 * We can't use __get_current_cr3_fast() because we're not atomic.
4320 */
4321 cr3 = __read_cr3();
4322 vmcs_writel(HOST_CR3, cr3); /* 22.2.3 FIXME: shadow tables */
4323 vmx->loaded_vmcs->host_state.cr3 = cr3;
4324
4325 /* Save the most likely value for this task's CR4 in the VMCS. */
4326 cr4 = cr4_read_shadow();
4327 vmcs_writel(HOST_CR4, cr4); /* 22.2.3, 22.2.5 */
4328 vmx->loaded_vmcs->host_state.cr4 = cr4;
4329
4330 vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
4331 #ifdef CONFIG_X86_64
4332 /*
4333 * Load null selectors, so we can avoid reloading them in
4334 * vmx_prepare_switch_to_host(), in case userspace uses
4335 * the null selectors too (the expected case).
4336 */
4337 vmcs_write16(HOST_DS_SELECTOR, 0);
4338 vmcs_write16(HOST_ES_SELECTOR, 0);
4339 #else
4340 vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
4341 vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS); /* 22.2.4 */
4342 #endif
4343 vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
4344 vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8); /* 22.2.4 */
4345
4346 vmcs_writel(HOST_IDTR_BASE, host_idt_base); /* 22.2.4 */
4347
4348 vmcs_writel(HOST_RIP, (unsigned long)vmx_vmexit); /* 22.2.5 */
4349
4350 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
4351 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
4352
4353 /*
4354 * SYSENTER is used for 32-bit system calls on either 32-bit or
4355 * 64-bit kernels. It is always zero If neither is allowed, otherwise
4356 * vmx_vcpu_load_vmcs loads it with the per-CPU entry stack (and may
4357 * have already done so!).
4358 */
4359 if (!IS_ENABLED(CONFIG_IA32_EMULATION) && !IS_ENABLED(CONFIG_X86_32))
4360 vmcs_writel(HOST_IA32_SYSENTER_ESP, 0);
4361
4362 rdmsrl(MSR_IA32_SYSENTER_EIP, tmpl);
4363 vmcs_writel(HOST_IA32_SYSENTER_EIP, tmpl); /* 22.2.3 */
4364
4365 if (vmcs_config.vmexit_ctrl & VM_EXIT_LOAD_IA32_PAT) {
4366 rdmsr(MSR_IA32_CR_PAT, low32, high32);
4367 vmcs_write64(HOST_IA32_PAT, low32 | ((u64) high32 << 32));
4368 }
4369
4370 if (cpu_has_load_ia32_efer())
4371 vmcs_write64(HOST_IA32_EFER, kvm_host.efer);
4372 }
4373
set_cr4_guest_host_mask(struct vcpu_vmx * vmx)4374 void set_cr4_guest_host_mask(struct vcpu_vmx *vmx)
4375 {
4376 struct kvm_vcpu *vcpu = &vmx->vcpu;
4377
4378 vcpu->arch.cr4_guest_owned_bits = KVM_POSSIBLE_CR4_GUEST_BITS &
4379 ~vcpu->arch.cr4_guest_rsvd_bits;
4380 if (!enable_ept) {
4381 vcpu->arch.cr4_guest_owned_bits &= ~X86_CR4_TLBFLUSH_BITS;
4382 vcpu->arch.cr4_guest_owned_bits &= ~X86_CR4_PDPTR_BITS;
4383 }
4384 if (is_guest_mode(&vmx->vcpu))
4385 vcpu->arch.cr4_guest_owned_bits &=
4386 ~get_vmcs12(vcpu)->cr4_guest_host_mask;
4387 vmcs_writel(CR4_GUEST_HOST_MASK, ~vcpu->arch.cr4_guest_owned_bits);
4388 }
4389
vmx_pin_based_exec_ctrl(struct vcpu_vmx * vmx)4390 static u32 vmx_pin_based_exec_ctrl(struct vcpu_vmx *vmx)
4391 {
4392 u32 pin_based_exec_ctrl = vmcs_config.pin_based_exec_ctrl;
4393
4394 if (!kvm_vcpu_apicv_active(&vmx->vcpu))
4395 pin_based_exec_ctrl &= ~PIN_BASED_POSTED_INTR;
4396
4397 if (!enable_vnmi)
4398 pin_based_exec_ctrl &= ~PIN_BASED_VIRTUAL_NMIS;
4399
4400 if (!enable_preemption_timer)
4401 pin_based_exec_ctrl &= ~PIN_BASED_VMX_PREEMPTION_TIMER;
4402
4403 return pin_based_exec_ctrl;
4404 }
4405
vmx_vmentry_ctrl(void)4406 static u32 vmx_vmentry_ctrl(void)
4407 {
4408 u32 vmentry_ctrl = vmcs_config.vmentry_ctrl;
4409
4410 if (vmx_pt_mode_is_system())
4411 vmentry_ctrl &= ~(VM_ENTRY_PT_CONCEAL_PIP |
4412 VM_ENTRY_LOAD_IA32_RTIT_CTL);
4413 /*
4414 * IA32e mode, and loading of EFER and PERF_GLOBAL_CTRL are toggled dynamically.
4415 */
4416 vmentry_ctrl &= ~(VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL |
4417 VM_ENTRY_LOAD_IA32_EFER |
4418 VM_ENTRY_IA32E_MODE);
4419
4420 if (cpu_has_perf_global_ctrl_bug())
4421 vmentry_ctrl &= ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
4422
4423 return vmentry_ctrl;
4424 }
4425
vmx_vmexit_ctrl(void)4426 static u32 vmx_vmexit_ctrl(void)
4427 {
4428 u32 vmexit_ctrl = vmcs_config.vmexit_ctrl;
4429
4430 /*
4431 * Not used by KVM and never set in vmcs01 or vmcs02, but emulated for
4432 * nested virtualization and thus allowed to be set in vmcs12.
4433 */
4434 vmexit_ctrl &= ~(VM_EXIT_SAVE_IA32_PAT | VM_EXIT_SAVE_IA32_EFER |
4435 VM_EXIT_SAVE_VMX_PREEMPTION_TIMER);
4436
4437 if (vmx_pt_mode_is_system())
4438 vmexit_ctrl &= ~(VM_EXIT_PT_CONCEAL_PIP |
4439 VM_EXIT_CLEAR_IA32_RTIT_CTL);
4440
4441 if (cpu_has_perf_global_ctrl_bug())
4442 vmexit_ctrl &= ~VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL;
4443
4444 /* Loading of EFER and PERF_GLOBAL_CTRL are toggled dynamically */
4445 return vmexit_ctrl &
4446 ~(VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | VM_EXIT_LOAD_IA32_EFER);
4447 }
4448
vmx_refresh_apicv_exec_ctrl(struct kvm_vcpu * vcpu)4449 void vmx_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
4450 {
4451 struct vcpu_vmx *vmx = to_vmx(vcpu);
4452
4453 if (is_guest_mode(vcpu)) {
4454 vmx->nested.update_vmcs01_apicv_status = true;
4455 return;
4456 }
4457
4458 pin_controls_set(vmx, vmx_pin_based_exec_ctrl(vmx));
4459
4460 if (kvm_vcpu_apicv_active(vcpu)) {
4461 secondary_exec_controls_setbit(vmx,
4462 SECONDARY_EXEC_APIC_REGISTER_VIRT |
4463 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
4464 if (enable_ipiv)
4465 tertiary_exec_controls_setbit(vmx, TERTIARY_EXEC_IPI_VIRT);
4466 } else {
4467 secondary_exec_controls_clearbit(vmx,
4468 SECONDARY_EXEC_APIC_REGISTER_VIRT |
4469 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
4470 if (enable_ipiv)
4471 tertiary_exec_controls_clearbit(vmx, TERTIARY_EXEC_IPI_VIRT);
4472 }
4473
4474 vmx_update_msr_bitmap_x2apic(vcpu);
4475 }
4476
vmx_exec_control(struct vcpu_vmx * vmx)4477 static u32 vmx_exec_control(struct vcpu_vmx *vmx)
4478 {
4479 u32 exec_control = vmcs_config.cpu_based_exec_ctrl;
4480
4481 /*
4482 * Not used by KVM, but fully supported for nesting, i.e. are allowed in
4483 * vmcs12 and propagated to vmcs02 when set in vmcs12.
4484 */
4485 exec_control &= ~(CPU_BASED_RDTSC_EXITING |
4486 CPU_BASED_USE_IO_BITMAPS |
4487 CPU_BASED_MONITOR_TRAP_FLAG |
4488 CPU_BASED_PAUSE_EXITING);
4489
4490 /* INTR_WINDOW_EXITING and NMI_WINDOW_EXITING are toggled dynamically */
4491 exec_control &= ~(CPU_BASED_INTR_WINDOW_EXITING |
4492 CPU_BASED_NMI_WINDOW_EXITING);
4493
4494 if (vmx->vcpu.arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)
4495 exec_control &= ~CPU_BASED_MOV_DR_EXITING;
4496
4497 if (!cpu_need_tpr_shadow(&vmx->vcpu))
4498 exec_control &= ~CPU_BASED_TPR_SHADOW;
4499
4500 #ifdef CONFIG_X86_64
4501 if (exec_control & CPU_BASED_TPR_SHADOW)
4502 exec_control &= ~(CPU_BASED_CR8_LOAD_EXITING |
4503 CPU_BASED_CR8_STORE_EXITING);
4504 else
4505 exec_control |= CPU_BASED_CR8_STORE_EXITING |
4506 CPU_BASED_CR8_LOAD_EXITING;
4507 #endif
4508 /* No need to intercept CR3 access or INVPLG when using EPT. */
4509 if (enable_ept)
4510 exec_control &= ~(CPU_BASED_CR3_LOAD_EXITING |
4511 CPU_BASED_CR3_STORE_EXITING |
4512 CPU_BASED_INVLPG_EXITING);
4513 if (kvm_mwait_in_guest(vmx->vcpu.kvm))
4514 exec_control &= ~(CPU_BASED_MWAIT_EXITING |
4515 CPU_BASED_MONITOR_EXITING);
4516 if (kvm_hlt_in_guest(vmx->vcpu.kvm))
4517 exec_control &= ~CPU_BASED_HLT_EXITING;
4518 return exec_control;
4519 }
4520
vmx_tertiary_exec_control(struct vcpu_vmx * vmx)4521 static u64 vmx_tertiary_exec_control(struct vcpu_vmx *vmx)
4522 {
4523 u64 exec_control = vmcs_config.cpu_based_3rd_exec_ctrl;
4524
4525 /*
4526 * IPI virtualization relies on APICv. Disable IPI virtualization if
4527 * APICv is inhibited.
4528 */
4529 if (!enable_ipiv || !kvm_vcpu_apicv_active(&vmx->vcpu))
4530 exec_control &= ~TERTIARY_EXEC_IPI_VIRT;
4531
4532 return exec_control;
4533 }
4534
4535 /*
4536 * Adjust a single secondary execution control bit to intercept/allow an
4537 * instruction in the guest. This is usually done based on whether or not a
4538 * feature has been exposed to the guest in order to correctly emulate faults.
4539 */
4540 static inline void
vmx_adjust_secondary_exec_control(struct vcpu_vmx * vmx,u32 * exec_control,u32 control,bool enabled,bool exiting)4541 vmx_adjust_secondary_exec_control(struct vcpu_vmx *vmx, u32 *exec_control,
4542 u32 control, bool enabled, bool exiting)
4543 {
4544 /*
4545 * If the control is for an opt-in feature, clear the control if the
4546 * feature is not exposed to the guest, i.e. not enabled. If the
4547 * control is opt-out, i.e. an exiting control, clear the control if
4548 * the feature _is_ exposed to the guest, i.e. exiting/interception is
4549 * disabled for the associated instruction. Note, the caller is
4550 * responsible presetting exec_control to set all supported bits.
4551 */
4552 if (enabled == exiting)
4553 *exec_control &= ~control;
4554
4555 /*
4556 * Update the nested MSR settings so that a nested VMM can/can't set
4557 * controls for features that are/aren't exposed to the guest.
4558 */
4559 if (nested) {
4560 /*
4561 * All features that can be added or removed to VMX MSRs must
4562 * be supported in the first place for nested virtualization.
4563 */
4564 if (WARN_ON_ONCE(!(vmcs_config.nested.secondary_ctls_high & control)))
4565 enabled = false;
4566
4567 if (enabled)
4568 vmx->nested.msrs.secondary_ctls_high |= control;
4569 else
4570 vmx->nested.msrs.secondary_ctls_high &= ~control;
4571 }
4572 }
4573
4574 /*
4575 * Wrapper macro for the common case of adjusting a secondary execution control
4576 * based on a single guest CPUID bit, with a dedicated feature bit. This also
4577 * verifies that the control is actually supported by KVM and hardware.
4578 */
4579 #define vmx_adjust_sec_exec_control(vmx, exec_control, name, feat_name, ctrl_name, exiting) \
4580 ({ \
4581 struct kvm_vcpu *__vcpu = &(vmx)->vcpu; \
4582 bool __enabled; \
4583 \
4584 if (cpu_has_vmx_##name()) { \
4585 if (kvm_is_governed_feature(X86_FEATURE_##feat_name)) \
4586 __enabled = guest_can_use(__vcpu, X86_FEATURE_##feat_name); \
4587 else \
4588 __enabled = guest_cpuid_has(__vcpu, X86_FEATURE_##feat_name); \
4589 vmx_adjust_secondary_exec_control(vmx, exec_control, SECONDARY_EXEC_##ctrl_name,\
4590 __enabled, exiting); \
4591 } \
4592 })
4593
4594 /* More macro magic for ENABLE_/opt-in versus _EXITING/opt-out controls. */
4595 #define vmx_adjust_sec_exec_feature(vmx, exec_control, lname, uname) \
4596 vmx_adjust_sec_exec_control(vmx, exec_control, lname, uname, ENABLE_##uname, false)
4597
4598 #define vmx_adjust_sec_exec_exiting(vmx, exec_control, lname, uname) \
4599 vmx_adjust_sec_exec_control(vmx, exec_control, lname, uname, uname##_EXITING, true)
4600
vmx_secondary_exec_control(struct vcpu_vmx * vmx)4601 static u32 vmx_secondary_exec_control(struct vcpu_vmx *vmx)
4602 {
4603 struct kvm_vcpu *vcpu = &vmx->vcpu;
4604
4605 u32 exec_control = vmcs_config.cpu_based_2nd_exec_ctrl;
4606
4607 if (vmx_pt_mode_is_system())
4608 exec_control &= ~(SECONDARY_EXEC_PT_USE_GPA | SECONDARY_EXEC_PT_CONCEAL_VMX);
4609 if (!cpu_need_virtualize_apic_accesses(vcpu))
4610 exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
4611 if (vmx->vpid == 0)
4612 exec_control &= ~SECONDARY_EXEC_ENABLE_VPID;
4613 if (!enable_ept) {
4614 exec_control &= ~SECONDARY_EXEC_ENABLE_EPT;
4615 exec_control &= ~SECONDARY_EXEC_EPT_VIOLATION_VE;
4616 enable_unrestricted_guest = 0;
4617 }
4618 if (!enable_unrestricted_guest)
4619 exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST;
4620 if (kvm_pause_in_guest(vmx->vcpu.kvm))
4621 exec_control &= ~SECONDARY_EXEC_PAUSE_LOOP_EXITING;
4622 if (!kvm_vcpu_apicv_active(vcpu))
4623 exec_control &= ~(SECONDARY_EXEC_APIC_REGISTER_VIRT |
4624 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
4625 exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
4626
4627 /*
4628 * KVM doesn't support VMFUNC for L1, but the control is set in KVM's
4629 * base configuration as KVM emulates VMFUNC[EPTP_SWITCHING] for L2.
4630 */
4631 exec_control &= ~SECONDARY_EXEC_ENABLE_VMFUNC;
4632
4633 /* SECONDARY_EXEC_DESC is enabled/disabled on writes to CR4.UMIP,
4634 * in vmx_set_cr4. */
4635 exec_control &= ~SECONDARY_EXEC_DESC;
4636
4637 /* SECONDARY_EXEC_SHADOW_VMCS is enabled when L1 executes VMPTRLD
4638 (handle_vmptrld).
4639 We can NOT enable shadow_vmcs here because we don't have yet
4640 a current VMCS12
4641 */
4642 exec_control &= ~SECONDARY_EXEC_SHADOW_VMCS;
4643
4644 /*
4645 * PML is enabled/disabled when dirty logging of memsmlots changes, but
4646 * it needs to be set here when dirty logging is already active, e.g.
4647 * if this vCPU was created after dirty logging was enabled.
4648 */
4649 if (!enable_pml || !atomic_read(&vcpu->kvm->nr_memslots_dirty_logging))
4650 exec_control &= ~SECONDARY_EXEC_ENABLE_PML;
4651
4652 vmx_adjust_sec_exec_feature(vmx, &exec_control, xsaves, XSAVES);
4653
4654 /*
4655 * RDPID is also gated by ENABLE_RDTSCP, turn on the control if either
4656 * feature is exposed to the guest. This creates a virtualization hole
4657 * if both are supported in hardware but only one is exposed to the
4658 * guest, but letting the guest execute RDTSCP or RDPID when either one
4659 * is advertised is preferable to emulating the advertised instruction
4660 * in KVM on #UD, and obviously better than incorrectly injecting #UD.
4661 */
4662 if (cpu_has_vmx_rdtscp()) {
4663 bool rdpid_or_rdtscp_enabled =
4664 guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP) ||
4665 guest_cpuid_has(vcpu, X86_FEATURE_RDPID);
4666
4667 vmx_adjust_secondary_exec_control(vmx, &exec_control,
4668 SECONDARY_EXEC_ENABLE_RDTSCP,
4669 rdpid_or_rdtscp_enabled, false);
4670 }
4671
4672 vmx_adjust_sec_exec_feature(vmx, &exec_control, invpcid, INVPCID);
4673
4674 vmx_adjust_sec_exec_exiting(vmx, &exec_control, rdrand, RDRAND);
4675 vmx_adjust_sec_exec_exiting(vmx, &exec_control, rdseed, RDSEED);
4676
4677 vmx_adjust_sec_exec_control(vmx, &exec_control, waitpkg, WAITPKG,
4678 ENABLE_USR_WAIT_PAUSE, false);
4679
4680 if (!vcpu->kvm->arch.bus_lock_detection_enabled)
4681 exec_control &= ~SECONDARY_EXEC_BUS_LOCK_DETECTION;
4682
4683 if (!kvm_notify_vmexit_enabled(vcpu->kvm))
4684 exec_control &= ~SECONDARY_EXEC_NOTIFY_VM_EXITING;
4685
4686 return exec_control;
4687 }
4688
vmx_get_pid_table_order(struct kvm * kvm)4689 static inline int vmx_get_pid_table_order(struct kvm *kvm)
4690 {
4691 return get_order(kvm->arch.max_vcpu_ids * sizeof(*to_kvm_vmx(kvm)->pid_table));
4692 }
4693
vmx_alloc_ipiv_pid_table(struct kvm * kvm)4694 static int vmx_alloc_ipiv_pid_table(struct kvm *kvm)
4695 {
4696 struct page *pages;
4697 struct kvm_vmx *kvm_vmx = to_kvm_vmx(kvm);
4698
4699 if (!irqchip_in_kernel(kvm) || !enable_ipiv)
4700 return 0;
4701
4702 if (kvm_vmx->pid_table)
4703 return 0;
4704
4705 pages = alloc_pages(GFP_KERNEL_ACCOUNT | __GFP_ZERO,
4706 vmx_get_pid_table_order(kvm));
4707 if (!pages)
4708 return -ENOMEM;
4709
4710 kvm_vmx->pid_table = (void *)page_address(pages);
4711 return 0;
4712 }
4713
vmx_vcpu_precreate(struct kvm * kvm)4714 int vmx_vcpu_precreate(struct kvm *kvm)
4715 {
4716 return vmx_alloc_ipiv_pid_table(kvm);
4717 }
4718
4719 #define VMX_XSS_EXIT_BITMAP 0
4720
init_vmcs(struct vcpu_vmx * vmx)4721 static void init_vmcs(struct vcpu_vmx *vmx)
4722 {
4723 struct kvm *kvm = vmx->vcpu.kvm;
4724 struct kvm_vmx *kvm_vmx = to_kvm_vmx(kvm);
4725
4726 if (nested)
4727 nested_vmx_set_vmcs_shadowing_bitmap();
4728
4729 if (cpu_has_vmx_msr_bitmap())
4730 vmcs_write64(MSR_BITMAP, __pa(vmx->vmcs01.msr_bitmap));
4731
4732 vmcs_write64(VMCS_LINK_POINTER, INVALID_GPA); /* 22.3.1.5 */
4733
4734 /* Control */
4735 pin_controls_set(vmx, vmx_pin_based_exec_ctrl(vmx));
4736
4737 exec_controls_set(vmx, vmx_exec_control(vmx));
4738
4739 if (cpu_has_secondary_exec_ctrls()) {
4740 secondary_exec_controls_set(vmx, vmx_secondary_exec_control(vmx));
4741 if (vmx->ve_info)
4742 vmcs_write64(VE_INFORMATION_ADDRESS,
4743 __pa(vmx->ve_info));
4744 }
4745
4746 if (cpu_has_tertiary_exec_ctrls())
4747 tertiary_exec_controls_set(vmx, vmx_tertiary_exec_control(vmx));
4748
4749 if (enable_apicv && lapic_in_kernel(&vmx->vcpu)) {
4750 vmcs_write64(EOI_EXIT_BITMAP0, 0);
4751 vmcs_write64(EOI_EXIT_BITMAP1, 0);
4752 vmcs_write64(EOI_EXIT_BITMAP2, 0);
4753 vmcs_write64(EOI_EXIT_BITMAP3, 0);
4754
4755 vmcs_write16(GUEST_INTR_STATUS, 0);
4756
4757 vmcs_write16(POSTED_INTR_NV, POSTED_INTR_VECTOR);
4758 vmcs_write64(POSTED_INTR_DESC_ADDR, __pa((&vmx->pi_desc)));
4759 }
4760
4761 if (vmx_can_use_ipiv(&vmx->vcpu)) {
4762 vmcs_write64(PID_POINTER_TABLE, __pa(kvm_vmx->pid_table));
4763 vmcs_write16(LAST_PID_POINTER_INDEX, kvm->arch.max_vcpu_ids - 1);
4764 }
4765
4766 if (!kvm_pause_in_guest(kvm)) {
4767 vmcs_write32(PLE_GAP, ple_gap);
4768 vmx->ple_window = ple_window;
4769 vmx->ple_window_dirty = true;
4770 }
4771
4772 if (kvm_notify_vmexit_enabled(kvm))
4773 vmcs_write32(NOTIFY_WINDOW, kvm->arch.notify_window);
4774
4775 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, 0);
4776 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, 0);
4777 vmcs_write32(CR3_TARGET_COUNT, 0); /* 22.2.1 */
4778
4779 vmcs_write16(HOST_FS_SELECTOR, 0); /* 22.2.4 */
4780 vmcs_write16(HOST_GS_SELECTOR, 0); /* 22.2.4 */
4781 vmx_set_constant_host_state(vmx);
4782 vmcs_writel(HOST_FS_BASE, 0); /* 22.2.4 */
4783 vmcs_writel(HOST_GS_BASE, 0); /* 22.2.4 */
4784
4785 if (cpu_has_vmx_vmfunc())
4786 vmcs_write64(VM_FUNCTION_CONTROL, 0);
4787
4788 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
4789 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
4790 vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val));
4791 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
4792 vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val));
4793
4794 if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT)
4795 vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat);
4796
4797 vm_exit_controls_set(vmx, vmx_vmexit_ctrl());
4798
4799 /* 22.2.1, 20.8.1 */
4800 vm_entry_controls_set(vmx, vmx_vmentry_ctrl());
4801
4802 vmx->vcpu.arch.cr0_guest_owned_bits = vmx_l1_guest_owned_cr0_bits();
4803 vmcs_writel(CR0_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr0_guest_owned_bits);
4804
4805 set_cr4_guest_host_mask(vmx);
4806
4807 if (vmx->vpid != 0)
4808 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
4809
4810 if (cpu_has_vmx_xsaves())
4811 vmcs_write64(XSS_EXIT_BITMAP, VMX_XSS_EXIT_BITMAP);
4812
4813 if (enable_pml) {
4814 vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg));
4815 vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
4816 }
4817
4818 vmx_write_encls_bitmap(&vmx->vcpu, NULL);
4819
4820 if (vmx_pt_mode_is_host_guest()) {
4821 memset(&vmx->pt_desc, 0, sizeof(vmx->pt_desc));
4822 /* Bit[6~0] are forced to 1, writes are ignored. */
4823 vmx->pt_desc.guest.output_mask = 0x7F;
4824 vmcs_write64(GUEST_IA32_RTIT_CTL, 0);
4825 }
4826
4827 vmcs_write32(GUEST_SYSENTER_CS, 0);
4828 vmcs_writel(GUEST_SYSENTER_ESP, 0);
4829 vmcs_writel(GUEST_SYSENTER_EIP, 0);
4830 vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
4831
4832 if (cpu_has_vmx_tpr_shadow()) {
4833 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, 0);
4834 if (cpu_need_tpr_shadow(&vmx->vcpu))
4835 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR,
4836 __pa(vmx->vcpu.arch.apic->regs));
4837 vmcs_write32(TPR_THRESHOLD, 0);
4838 }
4839
4840 vmx_setup_uret_msrs(vmx);
4841 }
4842
__vmx_vcpu_reset(struct kvm_vcpu * vcpu)4843 static void __vmx_vcpu_reset(struct kvm_vcpu *vcpu)
4844 {
4845 struct vcpu_vmx *vmx = to_vmx(vcpu);
4846
4847 init_vmcs(vmx);
4848
4849 if (nested)
4850 memcpy(&vmx->nested.msrs, &vmcs_config.nested, sizeof(vmx->nested.msrs));
4851
4852 vcpu_setup_sgx_lepubkeyhash(vcpu);
4853
4854 vmx->nested.posted_intr_nv = -1;
4855 vmx->nested.vmxon_ptr = INVALID_GPA;
4856 vmx->nested.current_vmptr = INVALID_GPA;
4857
4858 #ifdef CONFIG_KVM_HYPERV
4859 vmx->nested.hv_evmcs_vmptr = EVMPTR_INVALID;
4860 #endif
4861
4862 vcpu->arch.microcode_version = 0x100000000ULL;
4863 vmx->msr_ia32_feature_control_valid_bits = FEAT_CTL_LOCKED;
4864
4865 /*
4866 * Enforce invariant: pi_desc.nv is always either POSTED_INTR_VECTOR
4867 * or POSTED_INTR_WAKEUP_VECTOR.
4868 */
4869 vmx->pi_desc.nv = POSTED_INTR_VECTOR;
4870 __pi_set_sn(&vmx->pi_desc);
4871 }
4872
vmx_vcpu_reset(struct kvm_vcpu * vcpu,bool init_event)4873 void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
4874 {
4875 struct vcpu_vmx *vmx = to_vmx(vcpu);
4876
4877 if (!init_event)
4878 __vmx_vcpu_reset(vcpu);
4879
4880 vmx->rmode.vm86_active = 0;
4881 vmx->spec_ctrl = 0;
4882
4883 vmx->msr_ia32_umwait_control = 0;
4884
4885 vmx->hv_deadline_tsc = -1;
4886 kvm_set_cr8(vcpu, 0);
4887
4888 vmx_segment_cache_clear(vmx);
4889 kvm_register_mark_available(vcpu, VCPU_EXREG_SEGMENTS);
4890
4891 seg_setup(VCPU_SREG_CS);
4892 vmcs_write16(GUEST_CS_SELECTOR, 0xf000);
4893 vmcs_writel(GUEST_CS_BASE, 0xffff0000ul);
4894
4895 seg_setup(VCPU_SREG_DS);
4896 seg_setup(VCPU_SREG_ES);
4897 seg_setup(VCPU_SREG_FS);
4898 seg_setup(VCPU_SREG_GS);
4899 seg_setup(VCPU_SREG_SS);
4900
4901 vmcs_write16(GUEST_TR_SELECTOR, 0);
4902 vmcs_writel(GUEST_TR_BASE, 0);
4903 vmcs_write32(GUEST_TR_LIMIT, 0xffff);
4904 vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
4905
4906 vmcs_write16(GUEST_LDTR_SELECTOR, 0);
4907 vmcs_writel(GUEST_LDTR_BASE, 0);
4908 vmcs_write32(GUEST_LDTR_LIMIT, 0xffff);
4909 vmcs_write32(GUEST_LDTR_AR_BYTES, 0x00082);
4910
4911 vmcs_writel(GUEST_GDTR_BASE, 0);
4912 vmcs_write32(GUEST_GDTR_LIMIT, 0xffff);
4913
4914 vmcs_writel(GUEST_IDTR_BASE, 0);
4915 vmcs_write32(GUEST_IDTR_LIMIT, 0xffff);
4916
4917 vmcs_write32(GUEST_ACTIVITY_STATE, GUEST_ACTIVITY_ACTIVE);
4918 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 0);
4919 vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS, 0);
4920 if (kvm_mpx_supported())
4921 vmcs_write64(GUEST_BNDCFGS, 0);
4922
4923 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); /* 22.2.1 */
4924
4925 kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
4926
4927 vpid_sync_context(vmx->vpid);
4928
4929 vmx_update_fb_clear_dis(vcpu, vmx);
4930 }
4931
vmx_enable_irq_window(struct kvm_vcpu * vcpu)4932 void vmx_enable_irq_window(struct kvm_vcpu *vcpu)
4933 {
4934 exec_controls_setbit(to_vmx(vcpu), CPU_BASED_INTR_WINDOW_EXITING);
4935 }
4936
vmx_enable_nmi_window(struct kvm_vcpu * vcpu)4937 void vmx_enable_nmi_window(struct kvm_vcpu *vcpu)
4938 {
4939 if (!enable_vnmi ||
4940 vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_STI) {
4941 vmx_enable_irq_window(vcpu);
4942 return;
4943 }
4944
4945 exec_controls_setbit(to_vmx(vcpu), CPU_BASED_NMI_WINDOW_EXITING);
4946 }
4947
vmx_inject_irq(struct kvm_vcpu * vcpu,bool reinjected)4948 void vmx_inject_irq(struct kvm_vcpu *vcpu, bool reinjected)
4949 {
4950 struct vcpu_vmx *vmx = to_vmx(vcpu);
4951 uint32_t intr;
4952 int irq = vcpu->arch.interrupt.nr;
4953
4954 trace_kvm_inj_virq(irq, vcpu->arch.interrupt.soft, reinjected);
4955
4956 ++vcpu->stat.irq_injections;
4957 if (vmx->rmode.vm86_active) {
4958 int inc_eip = 0;
4959 if (vcpu->arch.interrupt.soft)
4960 inc_eip = vcpu->arch.event_exit_inst_len;
4961 kvm_inject_realmode_interrupt(vcpu, irq, inc_eip);
4962 return;
4963 }
4964 intr = irq | INTR_INFO_VALID_MASK;
4965 if (vcpu->arch.interrupt.soft) {
4966 intr |= INTR_TYPE_SOFT_INTR;
4967 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
4968 vmx->vcpu.arch.event_exit_inst_len);
4969 } else
4970 intr |= INTR_TYPE_EXT_INTR;
4971 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr);
4972
4973 vmx_clear_hlt(vcpu);
4974 }
4975
vmx_inject_nmi(struct kvm_vcpu * vcpu)4976 void vmx_inject_nmi(struct kvm_vcpu *vcpu)
4977 {
4978 struct vcpu_vmx *vmx = to_vmx(vcpu);
4979
4980 if (!enable_vnmi) {
4981 /*
4982 * Tracking the NMI-blocked state in software is built upon
4983 * finding the next open IRQ window. This, in turn, depends on
4984 * well-behaving guests: They have to keep IRQs disabled at
4985 * least as long as the NMI handler runs. Otherwise we may
4986 * cause NMI nesting, maybe breaking the guest. But as this is
4987 * highly unlikely, we can live with the residual risk.
4988 */
4989 vmx->loaded_vmcs->soft_vnmi_blocked = 1;
4990 vmx->loaded_vmcs->vnmi_blocked_time = 0;
4991 }
4992
4993 ++vcpu->stat.nmi_injections;
4994 vmx->loaded_vmcs->nmi_known_unmasked = false;
4995
4996 if (vmx->rmode.vm86_active) {
4997 kvm_inject_realmode_interrupt(vcpu, NMI_VECTOR, 0);
4998 return;
4999 }
5000
5001 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
5002 INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR);
5003
5004 vmx_clear_hlt(vcpu);
5005 }
5006
vmx_get_nmi_mask(struct kvm_vcpu * vcpu)5007 bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu)
5008 {
5009 struct vcpu_vmx *vmx = to_vmx(vcpu);
5010 bool masked;
5011
5012 if (!enable_vnmi)
5013 return vmx->loaded_vmcs->soft_vnmi_blocked;
5014 if (vmx->loaded_vmcs->nmi_known_unmasked)
5015 return false;
5016 masked = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_NMI;
5017 vmx->loaded_vmcs->nmi_known_unmasked = !masked;
5018 return masked;
5019 }
5020
vmx_set_nmi_mask(struct kvm_vcpu * vcpu,bool masked)5021 void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
5022 {
5023 struct vcpu_vmx *vmx = to_vmx(vcpu);
5024
5025 if (!enable_vnmi) {
5026 if (vmx->loaded_vmcs->soft_vnmi_blocked != masked) {
5027 vmx->loaded_vmcs->soft_vnmi_blocked = masked;
5028 vmx->loaded_vmcs->vnmi_blocked_time = 0;
5029 }
5030 } else {
5031 vmx->loaded_vmcs->nmi_known_unmasked = !masked;
5032 if (masked)
5033 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
5034 GUEST_INTR_STATE_NMI);
5035 else
5036 vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO,
5037 GUEST_INTR_STATE_NMI);
5038 }
5039 }
5040
vmx_nmi_blocked(struct kvm_vcpu * vcpu)5041 bool vmx_nmi_blocked(struct kvm_vcpu *vcpu)
5042 {
5043 if (is_guest_mode(vcpu) && nested_exit_on_nmi(vcpu))
5044 return false;
5045
5046 if (!enable_vnmi && to_vmx(vcpu)->loaded_vmcs->soft_vnmi_blocked)
5047 return true;
5048
5049 return (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
5050 (GUEST_INTR_STATE_MOV_SS | GUEST_INTR_STATE_STI |
5051 GUEST_INTR_STATE_NMI));
5052 }
5053
vmx_nmi_allowed(struct kvm_vcpu * vcpu,bool for_injection)5054 int vmx_nmi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
5055 {
5056 if (to_vmx(vcpu)->nested.nested_run_pending)
5057 return -EBUSY;
5058
5059 /* An NMI must not be injected into L2 if it's supposed to VM-Exit. */
5060 if (for_injection && is_guest_mode(vcpu) && nested_exit_on_nmi(vcpu))
5061 return -EBUSY;
5062
5063 return !vmx_nmi_blocked(vcpu);
5064 }
5065
__vmx_interrupt_blocked(struct kvm_vcpu * vcpu)5066 bool __vmx_interrupt_blocked(struct kvm_vcpu *vcpu)
5067 {
5068 return !(vmx_get_rflags(vcpu) & X86_EFLAGS_IF) ||
5069 (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
5070 (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS));
5071 }
5072
vmx_interrupt_blocked(struct kvm_vcpu * vcpu)5073 bool vmx_interrupt_blocked(struct kvm_vcpu *vcpu)
5074 {
5075 if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu))
5076 return false;
5077
5078 return __vmx_interrupt_blocked(vcpu);
5079 }
5080
vmx_interrupt_allowed(struct kvm_vcpu * vcpu,bool for_injection)5081 int vmx_interrupt_allowed(struct kvm_vcpu *vcpu, bool for_injection)
5082 {
5083 if (to_vmx(vcpu)->nested.nested_run_pending)
5084 return -EBUSY;
5085
5086 /*
5087 * An IRQ must not be injected into L2 if it's supposed to VM-Exit,
5088 * e.g. if the IRQ arrived asynchronously after checking nested events.
5089 */
5090 if (for_injection && is_guest_mode(vcpu) && nested_exit_on_intr(vcpu))
5091 return -EBUSY;
5092
5093 return !vmx_interrupt_blocked(vcpu);
5094 }
5095
vmx_set_tss_addr(struct kvm * kvm,unsigned int addr)5096 int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr)
5097 {
5098 void __user *ret;
5099
5100 if (enable_unrestricted_guest)
5101 return 0;
5102
5103 mutex_lock(&kvm->slots_lock);
5104 ret = __x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, addr,
5105 PAGE_SIZE * 3);
5106 mutex_unlock(&kvm->slots_lock);
5107
5108 if (IS_ERR(ret))
5109 return PTR_ERR(ret);
5110
5111 to_kvm_vmx(kvm)->tss_addr = addr;
5112
5113 return init_rmode_tss(kvm, ret);
5114 }
5115
vmx_set_identity_map_addr(struct kvm * kvm,u64 ident_addr)5116 int vmx_set_identity_map_addr(struct kvm *kvm, u64 ident_addr)
5117 {
5118 to_kvm_vmx(kvm)->ept_identity_map_addr = ident_addr;
5119 return 0;
5120 }
5121
rmode_exception(struct kvm_vcpu * vcpu,int vec)5122 static bool rmode_exception(struct kvm_vcpu *vcpu, int vec)
5123 {
5124 switch (vec) {
5125 case BP_VECTOR:
5126 /*
5127 * Update instruction length as we may reinject the exception
5128 * from user space while in guest debugging mode.
5129 */
5130 to_vmx(vcpu)->vcpu.arch.event_exit_inst_len =
5131 vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
5132 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
5133 return false;
5134 fallthrough;
5135 case DB_VECTOR:
5136 return !(vcpu->guest_debug &
5137 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP));
5138 case DE_VECTOR:
5139 case OF_VECTOR:
5140 case BR_VECTOR:
5141 case UD_VECTOR:
5142 case DF_VECTOR:
5143 case SS_VECTOR:
5144 case GP_VECTOR:
5145 case MF_VECTOR:
5146 return true;
5147 }
5148 return false;
5149 }
5150
handle_rmode_exception(struct kvm_vcpu * vcpu,int vec,u32 err_code)5151 static int handle_rmode_exception(struct kvm_vcpu *vcpu,
5152 int vec, u32 err_code)
5153 {
5154 /*
5155 * Instruction with address size override prefix opcode 0x67
5156 * Cause the #SS fault with 0 error code in VM86 mode.
5157 */
5158 if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0) {
5159 if (kvm_emulate_instruction(vcpu, 0)) {
5160 if (vcpu->arch.halt_request) {
5161 vcpu->arch.halt_request = 0;
5162 return kvm_emulate_halt_noskip(vcpu);
5163 }
5164 return 1;
5165 }
5166 return 0;
5167 }
5168
5169 /*
5170 * Forward all other exceptions that are valid in real mode.
5171 * FIXME: Breaks guest debugging in real mode, needs to be fixed with
5172 * the required debugging infrastructure rework.
5173 */
5174 kvm_queue_exception(vcpu, vec);
5175 return 1;
5176 }
5177
handle_machine_check(struct kvm_vcpu * vcpu)5178 static int handle_machine_check(struct kvm_vcpu *vcpu)
5179 {
5180 /* handled by vmx_vcpu_run() */
5181 return 1;
5182 }
5183
5184 /*
5185 * If the host has split lock detection disabled, then #AC is
5186 * unconditionally injected into the guest, which is the pre split lock
5187 * detection behaviour.
5188 *
5189 * If the host has split lock detection enabled then #AC is
5190 * only injected into the guest when:
5191 * - Guest CPL == 3 (user mode)
5192 * - Guest has #AC detection enabled in CR0
5193 * - Guest EFLAGS has AC bit set
5194 */
vmx_guest_inject_ac(struct kvm_vcpu * vcpu)5195 bool vmx_guest_inject_ac(struct kvm_vcpu *vcpu)
5196 {
5197 if (!boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT))
5198 return true;
5199
5200 return vmx_get_cpl(vcpu) == 3 && kvm_is_cr0_bit_set(vcpu, X86_CR0_AM) &&
5201 (kvm_get_rflags(vcpu) & X86_EFLAGS_AC);
5202 }
5203
handle_exception_nmi(struct kvm_vcpu * vcpu)5204 static int handle_exception_nmi(struct kvm_vcpu *vcpu)
5205 {
5206 struct vcpu_vmx *vmx = to_vmx(vcpu);
5207 struct kvm_run *kvm_run = vcpu->run;
5208 u32 intr_info, ex_no, error_code;
5209 unsigned long cr2, dr6;
5210 u32 vect_info;
5211
5212 vect_info = vmx->idt_vectoring_info;
5213 intr_info = vmx_get_intr_info(vcpu);
5214
5215 /*
5216 * Machine checks are handled by handle_exception_irqoff(), or by
5217 * vmx_vcpu_run() if a #MC occurs on VM-Entry. NMIs are handled by
5218 * vmx_vcpu_enter_exit().
5219 */
5220 if (is_machine_check(intr_info) || is_nmi(intr_info))
5221 return 1;
5222
5223 /*
5224 * Queue the exception here instead of in handle_nm_fault_irqoff().
5225 * This ensures the nested_vmx check is not skipped so vmexit can
5226 * be reflected to L1 (when it intercepts #NM) before reaching this
5227 * point.
5228 */
5229 if (is_nm_fault(intr_info)) {
5230 kvm_queue_exception(vcpu, NM_VECTOR);
5231 return 1;
5232 }
5233
5234 if (is_invalid_opcode(intr_info))
5235 return handle_ud(vcpu);
5236
5237 if (WARN_ON_ONCE(is_ve_fault(intr_info))) {
5238 struct vmx_ve_information *ve_info = vmx->ve_info;
5239
5240 WARN_ONCE(ve_info->exit_reason != EXIT_REASON_EPT_VIOLATION,
5241 "Unexpected #VE on VM-Exit reason 0x%x", ve_info->exit_reason);
5242 dump_vmcs(vcpu);
5243 kvm_mmu_print_sptes(vcpu, ve_info->guest_physical_address, "#VE");
5244 return 1;
5245 }
5246
5247 error_code = 0;
5248 if (intr_info & INTR_INFO_DELIVER_CODE_MASK)
5249 error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
5250
5251 if (!vmx->rmode.vm86_active && is_gp_fault(intr_info)) {
5252 WARN_ON_ONCE(!enable_vmware_backdoor);
5253
5254 /*
5255 * VMware backdoor emulation on #GP interception only handles
5256 * IN{S}, OUT{S}, and RDPMC, none of which generate a non-zero
5257 * error code on #GP.
5258 */
5259 if (error_code) {
5260 kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
5261 return 1;
5262 }
5263 return kvm_emulate_instruction(vcpu, EMULTYPE_VMWARE_GP);
5264 }
5265
5266 /*
5267 * The #PF with PFEC.RSVD = 1 indicates the guest is accessing
5268 * MMIO, it is better to report an internal error.
5269 * See the comments in vmx_handle_exit.
5270 */
5271 if ((vect_info & VECTORING_INFO_VALID_MASK) &&
5272 !(is_page_fault(intr_info) && !(error_code & PFERR_RSVD_MASK))) {
5273 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
5274 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_SIMUL_EX;
5275 vcpu->run->internal.ndata = 4;
5276 vcpu->run->internal.data[0] = vect_info;
5277 vcpu->run->internal.data[1] = intr_info;
5278 vcpu->run->internal.data[2] = error_code;
5279 vcpu->run->internal.data[3] = vcpu->arch.last_vmentry_cpu;
5280 return 0;
5281 }
5282
5283 if (is_page_fault(intr_info)) {
5284 cr2 = vmx_get_exit_qual(vcpu);
5285 if (enable_ept && !vcpu->arch.apf.host_apf_flags) {
5286 /*
5287 * EPT will cause page fault only if we need to
5288 * detect illegal GPAs.
5289 */
5290 WARN_ON_ONCE(!allow_smaller_maxphyaddr);
5291 kvm_fixup_and_inject_pf_error(vcpu, cr2, error_code);
5292 return 1;
5293 } else
5294 return kvm_handle_page_fault(vcpu, error_code, cr2, NULL, 0);
5295 }
5296
5297 ex_no = intr_info & INTR_INFO_VECTOR_MASK;
5298
5299 if (vmx->rmode.vm86_active && rmode_exception(vcpu, ex_no))
5300 return handle_rmode_exception(vcpu, ex_no, error_code);
5301
5302 switch (ex_no) {
5303 case DB_VECTOR:
5304 dr6 = vmx_get_exit_qual(vcpu);
5305 if (!(vcpu->guest_debug &
5306 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) {
5307 /*
5308 * If the #DB was due to ICEBP, a.k.a. INT1, skip the
5309 * instruction. ICEBP generates a trap-like #DB, but
5310 * despite its interception control being tied to #DB,
5311 * is an instruction intercept, i.e. the VM-Exit occurs
5312 * on the ICEBP itself. Use the inner "skip" helper to
5313 * avoid single-step #DB and MTF updates, as ICEBP is
5314 * higher priority. Note, skipping ICEBP still clears
5315 * STI and MOVSS blocking.
5316 *
5317 * For all other #DBs, set vmcs.PENDING_DBG_EXCEPTIONS.BS
5318 * if single-step is enabled in RFLAGS and STI or MOVSS
5319 * blocking is active, as the CPU doesn't set the bit
5320 * on VM-Exit due to #DB interception. VM-Entry has a
5321 * consistency check that a single-step #DB is pending
5322 * in this scenario as the previous instruction cannot
5323 * have toggled RFLAGS.TF 0=>1 (because STI and POP/MOV
5324 * don't modify RFLAGS), therefore the one instruction
5325 * delay when activating single-step breakpoints must
5326 * have already expired. Note, the CPU sets/clears BS
5327 * as appropriate for all other VM-Exits types.
5328 */
5329 if (is_icebp(intr_info))
5330 WARN_ON(!skip_emulated_instruction(vcpu));
5331 else if ((vmx_get_rflags(vcpu) & X86_EFLAGS_TF) &&
5332 (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
5333 (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS)))
5334 vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS,
5335 vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS) | DR6_BS);
5336
5337 kvm_queue_exception_p(vcpu, DB_VECTOR, dr6);
5338 return 1;
5339 }
5340 kvm_run->debug.arch.dr6 = dr6 | DR6_ACTIVE_LOW;
5341 kvm_run->debug.arch.dr7 = vmcs_readl(GUEST_DR7);
5342 fallthrough;
5343 case BP_VECTOR:
5344 /*
5345 * Update instruction length as we may reinject #BP from
5346 * user space while in guest debugging mode. Reading it for
5347 * #DB as well causes no harm, it is not used in that case.
5348 */
5349 vmx->vcpu.arch.event_exit_inst_len =
5350 vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
5351 kvm_run->exit_reason = KVM_EXIT_DEBUG;
5352 kvm_run->debug.arch.pc = kvm_get_linear_rip(vcpu);
5353 kvm_run->debug.arch.exception = ex_no;
5354 break;
5355 case AC_VECTOR:
5356 if (vmx_guest_inject_ac(vcpu)) {
5357 kvm_queue_exception_e(vcpu, AC_VECTOR, error_code);
5358 return 1;
5359 }
5360
5361 /*
5362 * Handle split lock. Depending on detection mode this will
5363 * either warn and disable split lock detection for this
5364 * task or force SIGBUS on it.
5365 */
5366 if (handle_guest_split_lock(kvm_rip_read(vcpu)))
5367 return 1;
5368 fallthrough;
5369 default:
5370 kvm_run->exit_reason = KVM_EXIT_EXCEPTION;
5371 kvm_run->ex.exception = ex_no;
5372 kvm_run->ex.error_code = error_code;
5373 break;
5374 }
5375 return 0;
5376 }
5377
handle_external_interrupt(struct kvm_vcpu * vcpu)5378 static __always_inline int handle_external_interrupt(struct kvm_vcpu *vcpu)
5379 {
5380 ++vcpu->stat.irq_exits;
5381 return 1;
5382 }
5383
handle_triple_fault(struct kvm_vcpu * vcpu)5384 static int handle_triple_fault(struct kvm_vcpu *vcpu)
5385 {
5386 vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
5387 vcpu->mmio_needed = 0;
5388 return 0;
5389 }
5390
handle_io(struct kvm_vcpu * vcpu)5391 static int handle_io(struct kvm_vcpu *vcpu)
5392 {
5393 unsigned long exit_qualification;
5394 int size, in, string;
5395 unsigned port;
5396
5397 exit_qualification = vmx_get_exit_qual(vcpu);
5398 string = (exit_qualification & 16) != 0;
5399
5400 ++vcpu->stat.io_exits;
5401
5402 if (string)
5403 return kvm_emulate_instruction(vcpu, 0);
5404
5405 port = exit_qualification >> 16;
5406 size = (exit_qualification & 7) + 1;
5407 in = (exit_qualification & 8) != 0;
5408
5409 return kvm_fast_pio(vcpu, size, port, in);
5410 }
5411
vmx_patch_hypercall(struct kvm_vcpu * vcpu,unsigned char * hypercall)5412 void vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
5413 {
5414 /*
5415 * Patch in the VMCALL instruction:
5416 */
5417 hypercall[0] = 0x0f;
5418 hypercall[1] = 0x01;
5419 hypercall[2] = 0xc1;
5420 }
5421
5422 /* called to set cr0 as appropriate for a mov-to-cr0 exit. */
handle_set_cr0(struct kvm_vcpu * vcpu,unsigned long val)5423 static int handle_set_cr0(struct kvm_vcpu *vcpu, unsigned long val)
5424 {
5425 if (is_guest_mode(vcpu)) {
5426 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
5427 unsigned long orig_val = val;
5428
5429 /*
5430 * We get here when L2 changed cr0 in a way that did not change
5431 * any of L1's shadowed bits (see nested_vmx_exit_handled_cr),
5432 * but did change L0 shadowed bits. So we first calculate the
5433 * effective cr0 value that L1 would like to write into the
5434 * hardware. It consists of the L2-owned bits from the new
5435 * value combined with the L1-owned bits from L1's guest_cr0.
5436 */
5437 val = (val & ~vmcs12->cr0_guest_host_mask) |
5438 (vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask);
5439
5440 if (kvm_set_cr0(vcpu, val))
5441 return 1;
5442 vmcs_writel(CR0_READ_SHADOW, orig_val);
5443 return 0;
5444 } else {
5445 return kvm_set_cr0(vcpu, val);
5446 }
5447 }
5448
handle_set_cr4(struct kvm_vcpu * vcpu,unsigned long val)5449 static int handle_set_cr4(struct kvm_vcpu *vcpu, unsigned long val)
5450 {
5451 if (is_guest_mode(vcpu)) {
5452 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
5453 unsigned long orig_val = val;
5454
5455 /* analogously to handle_set_cr0 */
5456 val = (val & ~vmcs12->cr4_guest_host_mask) |
5457 (vmcs12->guest_cr4 & vmcs12->cr4_guest_host_mask);
5458 if (kvm_set_cr4(vcpu, val))
5459 return 1;
5460 vmcs_writel(CR4_READ_SHADOW, orig_val);
5461 return 0;
5462 } else
5463 return kvm_set_cr4(vcpu, val);
5464 }
5465
handle_desc(struct kvm_vcpu * vcpu)5466 static int handle_desc(struct kvm_vcpu *vcpu)
5467 {
5468 /*
5469 * UMIP emulation relies on intercepting writes to CR4.UMIP, i.e. this
5470 * and other code needs to be updated if UMIP can be guest owned.
5471 */
5472 BUILD_BUG_ON(KVM_POSSIBLE_CR4_GUEST_BITS & X86_CR4_UMIP);
5473
5474 WARN_ON_ONCE(!kvm_is_cr4_bit_set(vcpu, X86_CR4_UMIP));
5475 return kvm_emulate_instruction(vcpu, 0);
5476 }
5477
handle_cr(struct kvm_vcpu * vcpu)5478 static int handle_cr(struct kvm_vcpu *vcpu)
5479 {
5480 unsigned long exit_qualification, val;
5481 int cr;
5482 int reg;
5483 int err;
5484 int ret;
5485
5486 exit_qualification = vmx_get_exit_qual(vcpu);
5487 cr = exit_qualification & 15;
5488 reg = (exit_qualification >> 8) & 15;
5489 switch ((exit_qualification >> 4) & 3) {
5490 case 0: /* mov to cr */
5491 val = kvm_register_read(vcpu, reg);
5492 trace_kvm_cr_write(cr, val);
5493 switch (cr) {
5494 case 0:
5495 err = handle_set_cr0(vcpu, val);
5496 return kvm_complete_insn_gp(vcpu, err);
5497 case 3:
5498 WARN_ON_ONCE(enable_unrestricted_guest);
5499
5500 err = kvm_set_cr3(vcpu, val);
5501 return kvm_complete_insn_gp(vcpu, err);
5502 case 4:
5503 err = handle_set_cr4(vcpu, val);
5504 return kvm_complete_insn_gp(vcpu, err);
5505 case 8: {
5506 u8 cr8_prev = kvm_get_cr8(vcpu);
5507 u8 cr8 = (u8)val;
5508 err = kvm_set_cr8(vcpu, cr8);
5509 ret = kvm_complete_insn_gp(vcpu, err);
5510 if (lapic_in_kernel(vcpu))
5511 return ret;
5512 if (cr8_prev <= cr8)
5513 return ret;
5514 /*
5515 * TODO: we might be squashing a
5516 * KVM_GUESTDBG_SINGLESTEP-triggered
5517 * KVM_EXIT_DEBUG here.
5518 */
5519 vcpu->run->exit_reason = KVM_EXIT_SET_TPR;
5520 return 0;
5521 }
5522 }
5523 break;
5524 case 2: /* clts */
5525 KVM_BUG(1, vcpu->kvm, "Guest always owns CR0.TS");
5526 return -EIO;
5527 case 1: /*mov from cr*/
5528 switch (cr) {
5529 case 3:
5530 WARN_ON_ONCE(enable_unrestricted_guest);
5531
5532 val = kvm_read_cr3(vcpu);
5533 kvm_register_write(vcpu, reg, val);
5534 trace_kvm_cr_read(cr, val);
5535 return kvm_skip_emulated_instruction(vcpu);
5536 case 8:
5537 val = kvm_get_cr8(vcpu);
5538 kvm_register_write(vcpu, reg, val);
5539 trace_kvm_cr_read(cr, val);
5540 return kvm_skip_emulated_instruction(vcpu);
5541 }
5542 break;
5543 case 3: /* lmsw */
5544 val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f;
5545 trace_kvm_cr_write(0, (kvm_read_cr0_bits(vcpu, ~0xful) | val));
5546 kvm_lmsw(vcpu, val);
5547
5548 return kvm_skip_emulated_instruction(vcpu);
5549 default:
5550 break;
5551 }
5552 vcpu->run->exit_reason = 0;
5553 vcpu_unimpl(vcpu, "unhandled control register: op %d cr %d\n",
5554 (int)(exit_qualification >> 4) & 3, cr);
5555 return 0;
5556 }
5557
handle_dr(struct kvm_vcpu * vcpu)5558 static int handle_dr(struct kvm_vcpu *vcpu)
5559 {
5560 unsigned long exit_qualification;
5561 int dr, dr7, reg;
5562 int err = 1;
5563
5564 exit_qualification = vmx_get_exit_qual(vcpu);
5565 dr = exit_qualification & DEBUG_REG_ACCESS_NUM;
5566
5567 /* First, if DR does not exist, trigger UD */
5568 if (!kvm_require_dr(vcpu, dr))
5569 return 1;
5570
5571 if (vmx_get_cpl(vcpu) > 0)
5572 goto out;
5573
5574 dr7 = vmcs_readl(GUEST_DR7);
5575 if (dr7 & DR7_GD) {
5576 /*
5577 * As the vm-exit takes precedence over the debug trap, we
5578 * need to emulate the latter, either for the host or the
5579 * guest debugging itself.
5580 */
5581 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) {
5582 vcpu->run->debug.arch.dr6 = DR6_BD | DR6_ACTIVE_LOW;
5583 vcpu->run->debug.arch.dr7 = dr7;
5584 vcpu->run->debug.arch.pc = kvm_get_linear_rip(vcpu);
5585 vcpu->run->debug.arch.exception = DB_VECTOR;
5586 vcpu->run->exit_reason = KVM_EXIT_DEBUG;
5587 return 0;
5588 } else {
5589 kvm_queue_exception_p(vcpu, DB_VECTOR, DR6_BD);
5590 return 1;
5591 }
5592 }
5593
5594 if (vcpu->guest_debug == 0) {
5595 exec_controls_clearbit(to_vmx(vcpu), CPU_BASED_MOV_DR_EXITING);
5596
5597 /*
5598 * No more DR vmexits; force a reload of the debug registers
5599 * and reenter on this instruction. The next vmexit will
5600 * retrieve the full state of the debug registers.
5601 */
5602 vcpu->arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT;
5603 return 1;
5604 }
5605
5606 reg = DEBUG_REG_ACCESS_REG(exit_qualification);
5607 if (exit_qualification & TYPE_MOV_FROM_DR) {
5608 kvm_register_write(vcpu, reg, kvm_get_dr(vcpu, dr));
5609 err = 0;
5610 } else {
5611 err = kvm_set_dr(vcpu, dr, kvm_register_read(vcpu, reg));
5612 }
5613
5614 out:
5615 return kvm_complete_insn_gp(vcpu, err);
5616 }
5617
vmx_sync_dirty_debug_regs(struct kvm_vcpu * vcpu)5618 void vmx_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
5619 {
5620 get_debugreg(vcpu->arch.db[0], 0);
5621 get_debugreg(vcpu->arch.db[1], 1);
5622 get_debugreg(vcpu->arch.db[2], 2);
5623 get_debugreg(vcpu->arch.db[3], 3);
5624 get_debugreg(vcpu->arch.dr6, 6);
5625 vcpu->arch.dr7 = vmcs_readl(GUEST_DR7);
5626
5627 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT;
5628 exec_controls_setbit(to_vmx(vcpu), CPU_BASED_MOV_DR_EXITING);
5629
5630 /*
5631 * exc_debug expects dr6 to be cleared after it runs, avoid that it sees
5632 * a stale dr6 from the guest.
5633 */
5634 set_debugreg(DR6_RESERVED, 6);
5635 }
5636
vmx_set_dr7(struct kvm_vcpu * vcpu,unsigned long val)5637 void vmx_set_dr7(struct kvm_vcpu *vcpu, unsigned long val)
5638 {
5639 vmcs_writel(GUEST_DR7, val);
5640 }
5641
handle_tpr_below_threshold(struct kvm_vcpu * vcpu)5642 static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu)
5643 {
5644 kvm_apic_update_ppr(vcpu);
5645 return 1;
5646 }
5647
handle_interrupt_window(struct kvm_vcpu * vcpu)5648 static int handle_interrupt_window(struct kvm_vcpu *vcpu)
5649 {
5650 exec_controls_clearbit(to_vmx(vcpu), CPU_BASED_INTR_WINDOW_EXITING);
5651
5652 kvm_make_request(KVM_REQ_EVENT, vcpu);
5653
5654 ++vcpu->stat.irq_window_exits;
5655 return 1;
5656 }
5657
handle_invlpg(struct kvm_vcpu * vcpu)5658 static int handle_invlpg(struct kvm_vcpu *vcpu)
5659 {
5660 unsigned long exit_qualification = vmx_get_exit_qual(vcpu);
5661
5662 kvm_mmu_invlpg(vcpu, exit_qualification);
5663 return kvm_skip_emulated_instruction(vcpu);
5664 }
5665
handle_apic_access(struct kvm_vcpu * vcpu)5666 static int handle_apic_access(struct kvm_vcpu *vcpu)
5667 {
5668 if (likely(fasteoi)) {
5669 unsigned long exit_qualification = vmx_get_exit_qual(vcpu);
5670 int access_type, offset;
5671
5672 access_type = exit_qualification & APIC_ACCESS_TYPE;
5673 offset = exit_qualification & APIC_ACCESS_OFFSET;
5674 /*
5675 * Sane guest uses MOV to write EOI, with written value
5676 * not cared. So make a short-circuit here by avoiding
5677 * heavy instruction emulation.
5678 */
5679 if ((access_type == TYPE_LINEAR_APIC_INST_WRITE) &&
5680 (offset == APIC_EOI)) {
5681 kvm_lapic_set_eoi(vcpu);
5682 return kvm_skip_emulated_instruction(vcpu);
5683 }
5684 }
5685 return kvm_emulate_instruction(vcpu, 0);
5686 }
5687
handle_apic_eoi_induced(struct kvm_vcpu * vcpu)5688 static int handle_apic_eoi_induced(struct kvm_vcpu *vcpu)
5689 {
5690 unsigned long exit_qualification = vmx_get_exit_qual(vcpu);
5691 int vector = exit_qualification & 0xff;
5692
5693 /* EOI-induced VM exit is trap-like and thus no need to adjust IP */
5694 kvm_apic_set_eoi_accelerated(vcpu, vector);
5695 return 1;
5696 }
5697
handle_apic_write(struct kvm_vcpu * vcpu)5698 static int handle_apic_write(struct kvm_vcpu *vcpu)
5699 {
5700 unsigned long exit_qualification = vmx_get_exit_qual(vcpu);
5701
5702 /*
5703 * APIC-write VM-Exit is trap-like, KVM doesn't need to advance RIP and
5704 * hardware has done any necessary aliasing, offset adjustments, etc...
5705 * for the access. I.e. the correct value has already been written to
5706 * the vAPIC page for the correct 16-byte chunk. KVM needs only to
5707 * retrieve the register value and emulate the access.
5708 */
5709 u32 offset = exit_qualification & 0xff0;
5710
5711 kvm_apic_write_nodecode(vcpu, offset);
5712 return 1;
5713 }
5714
handle_task_switch(struct kvm_vcpu * vcpu)5715 static int handle_task_switch(struct kvm_vcpu *vcpu)
5716 {
5717 struct vcpu_vmx *vmx = to_vmx(vcpu);
5718 unsigned long exit_qualification;
5719 bool has_error_code = false;
5720 u32 error_code = 0;
5721 u16 tss_selector;
5722 int reason, type, idt_v, idt_index;
5723
5724 idt_v = (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK);
5725 idt_index = (vmx->idt_vectoring_info & VECTORING_INFO_VECTOR_MASK);
5726 type = (vmx->idt_vectoring_info & VECTORING_INFO_TYPE_MASK);
5727
5728 exit_qualification = vmx_get_exit_qual(vcpu);
5729
5730 reason = (u32)exit_qualification >> 30;
5731 if (reason == TASK_SWITCH_GATE && idt_v) {
5732 switch (type) {
5733 case INTR_TYPE_NMI_INTR:
5734 vcpu->arch.nmi_injected = false;
5735 vmx_set_nmi_mask(vcpu, true);
5736 break;
5737 case INTR_TYPE_EXT_INTR:
5738 case INTR_TYPE_SOFT_INTR:
5739 kvm_clear_interrupt_queue(vcpu);
5740 break;
5741 case INTR_TYPE_HARD_EXCEPTION:
5742 if (vmx->idt_vectoring_info &
5743 VECTORING_INFO_DELIVER_CODE_MASK) {
5744 has_error_code = true;
5745 error_code =
5746 vmcs_read32(IDT_VECTORING_ERROR_CODE);
5747 }
5748 fallthrough;
5749 case INTR_TYPE_SOFT_EXCEPTION:
5750 kvm_clear_exception_queue(vcpu);
5751 break;
5752 default:
5753 break;
5754 }
5755 }
5756 tss_selector = exit_qualification;
5757
5758 if (!idt_v || (type != INTR_TYPE_HARD_EXCEPTION &&
5759 type != INTR_TYPE_EXT_INTR &&
5760 type != INTR_TYPE_NMI_INTR))
5761 WARN_ON(!skip_emulated_instruction(vcpu));
5762
5763 /*
5764 * TODO: What about debug traps on tss switch?
5765 * Are we supposed to inject them and update dr6?
5766 */
5767 return kvm_task_switch(vcpu, tss_selector,
5768 type == INTR_TYPE_SOFT_INTR ? idt_index : -1,
5769 reason, has_error_code, error_code);
5770 }
5771
handle_ept_violation(struct kvm_vcpu * vcpu)5772 static int handle_ept_violation(struct kvm_vcpu *vcpu)
5773 {
5774 unsigned long exit_qualification;
5775 gpa_t gpa;
5776 u64 error_code;
5777
5778 exit_qualification = vmx_get_exit_qual(vcpu);
5779
5780 /*
5781 * EPT violation happened while executing iret from NMI,
5782 * "blocked by NMI" bit has to be set before next VM entry.
5783 * There are errata that may cause this bit to not be set:
5784 * AAK134, BY25.
5785 */
5786 if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) &&
5787 enable_vnmi &&
5788 (exit_qualification & INTR_INFO_UNBLOCK_NMI))
5789 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI);
5790
5791 gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
5792 trace_kvm_page_fault(vcpu, gpa, exit_qualification);
5793
5794 /* Is it a read fault? */
5795 error_code = (exit_qualification & EPT_VIOLATION_ACC_READ)
5796 ? PFERR_USER_MASK : 0;
5797 /* Is it a write fault? */
5798 error_code |= (exit_qualification & EPT_VIOLATION_ACC_WRITE)
5799 ? PFERR_WRITE_MASK : 0;
5800 /* Is it a fetch fault? */
5801 error_code |= (exit_qualification & EPT_VIOLATION_ACC_INSTR)
5802 ? PFERR_FETCH_MASK : 0;
5803 /* ept page table entry is present? */
5804 error_code |= (exit_qualification & EPT_VIOLATION_RWX_MASK)
5805 ? PFERR_PRESENT_MASK : 0;
5806
5807 error_code |= (exit_qualification & EPT_VIOLATION_GVA_TRANSLATED) != 0 ?
5808 PFERR_GUEST_FINAL_MASK : PFERR_GUEST_PAGE_MASK;
5809
5810 /*
5811 * Check that the GPA doesn't exceed physical memory limits, as that is
5812 * a guest page fault. We have to emulate the instruction here, because
5813 * if the illegal address is that of a paging structure, then
5814 * EPT_VIOLATION_ACC_WRITE bit is set. Alternatively, if supported we
5815 * would also use advanced VM-exit information for EPT violations to
5816 * reconstruct the page fault error code.
5817 */
5818 if (unlikely(allow_smaller_maxphyaddr && !kvm_vcpu_is_legal_gpa(vcpu, gpa)))
5819 return kvm_emulate_instruction(vcpu, 0);
5820
5821 return kvm_mmu_page_fault(vcpu, gpa, error_code, NULL, 0);
5822 }
5823
handle_ept_misconfig(struct kvm_vcpu * vcpu)5824 static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
5825 {
5826 gpa_t gpa;
5827
5828 if (vmx_check_emulate_instruction(vcpu, EMULTYPE_PF, NULL, 0))
5829 return 1;
5830
5831 /*
5832 * A nested guest cannot optimize MMIO vmexits, because we have an
5833 * nGPA here instead of the required GPA.
5834 */
5835 gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
5836 if (!is_guest_mode(vcpu) &&
5837 !kvm_io_bus_write(vcpu, KVM_FAST_MMIO_BUS, gpa, 0, NULL)) {
5838 trace_kvm_fast_mmio(gpa);
5839 return kvm_skip_emulated_instruction(vcpu);
5840 }
5841
5842 return kvm_mmu_page_fault(vcpu, gpa, PFERR_RSVD_MASK, NULL, 0);
5843 }
5844
handle_nmi_window(struct kvm_vcpu * vcpu)5845 static int handle_nmi_window(struct kvm_vcpu *vcpu)
5846 {
5847 if (KVM_BUG_ON(!enable_vnmi, vcpu->kvm))
5848 return -EIO;
5849
5850 exec_controls_clearbit(to_vmx(vcpu), CPU_BASED_NMI_WINDOW_EXITING);
5851 ++vcpu->stat.nmi_window_exits;
5852 kvm_make_request(KVM_REQ_EVENT, vcpu);
5853
5854 return 1;
5855 }
5856
vmx_emulation_required_with_pending_exception(struct kvm_vcpu * vcpu)5857 static bool vmx_emulation_required_with_pending_exception(struct kvm_vcpu *vcpu)
5858 {
5859 struct vcpu_vmx *vmx = to_vmx(vcpu);
5860
5861 return vmx->emulation_required && !vmx->rmode.vm86_active &&
5862 (kvm_is_exception_pending(vcpu) || vcpu->arch.exception.injected);
5863 }
5864
handle_invalid_guest_state(struct kvm_vcpu * vcpu)5865 static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
5866 {
5867 struct vcpu_vmx *vmx = to_vmx(vcpu);
5868 bool intr_window_requested;
5869 unsigned count = 130;
5870
5871 intr_window_requested = exec_controls_get(vmx) &
5872 CPU_BASED_INTR_WINDOW_EXITING;
5873
5874 while (vmx->emulation_required && count-- != 0) {
5875 if (intr_window_requested && !vmx_interrupt_blocked(vcpu))
5876 return handle_interrupt_window(&vmx->vcpu);
5877
5878 if (kvm_test_request(KVM_REQ_EVENT, vcpu))
5879 return 1;
5880
5881 if (!kvm_emulate_instruction(vcpu, 0))
5882 return 0;
5883
5884 if (vmx_emulation_required_with_pending_exception(vcpu)) {
5885 kvm_prepare_emulation_failure_exit(vcpu);
5886 return 0;
5887 }
5888
5889 if (vcpu->arch.halt_request) {
5890 vcpu->arch.halt_request = 0;
5891 return kvm_emulate_halt_noskip(vcpu);
5892 }
5893
5894 /*
5895 * Note, return 1 and not 0, vcpu_run() will invoke
5896 * xfer_to_guest_mode() which will create a proper return
5897 * code.
5898 */
5899 if (__xfer_to_guest_mode_work_pending())
5900 return 1;
5901 }
5902
5903 return 1;
5904 }
5905
vmx_vcpu_pre_run(struct kvm_vcpu * vcpu)5906 int vmx_vcpu_pre_run(struct kvm_vcpu *vcpu)
5907 {
5908 if (vmx_emulation_required_with_pending_exception(vcpu)) {
5909 kvm_prepare_emulation_failure_exit(vcpu);
5910 return 0;
5911 }
5912
5913 return 1;
5914 }
5915
5916 /*
5917 * Indicate a busy-waiting vcpu in spinlock. We do not enable the PAUSE
5918 * exiting, so only get here on cpu with PAUSE-Loop-Exiting.
5919 */
handle_pause(struct kvm_vcpu * vcpu)5920 static int handle_pause(struct kvm_vcpu *vcpu)
5921 {
5922 if (!kvm_pause_in_guest(vcpu->kvm))
5923 grow_ple_window(vcpu);
5924
5925 /*
5926 * Intel sdm vol3 ch-25.1.3 says: The "PAUSE-loop exiting"
5927 * VM-execution control is ignored if CPL > 0. OTOH, KVM
5928 * never set PAUSE_EXITING and just set PLE if supported,
5929 * so the vcpu must be CPL=0 if it gets a PAUSE exit.
5930 */
5931 kvm_vcpu_on_spin(vcpu, true);
5932 return kvm_skip_emulated_instruction(vcpu);
5933 }
5934
handle_monitor_trap(struct kvm_vcpu * vcpu)5935 static int handle_monitor_trap(struct kvm_vcpu *vcpu)
5936 {
5937 return 1;
5938 }
5939
handle_invpcid(struct kvm_vcpu * vcpu)5940 static int handle_invpcid(struct kvm_vcpu *vcpu)
5941 {
5942 u32 vmx_instruction_info;
5943 unsigned long type;
5944 gva_t gva;
5945 struct {
5946 u64 pcid;
5947 u64 gla;
5948 } operand;
5949 int gpr_index;
5950
5951 if (!guest_cpuid_has(vcpu, X86_FEATURE_INVPCID)) {
5952 kvm_queue_exception(vcpu, UD_VECTOR);
5953 return 1;
5954 }
5955
5956 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
5957 gpr_index = vmx_get_instr_info_reg2(vmx_instruction_info);
5958 type = kvm_register_read(vcpu, gpr_index);
5959
5960 /* According to the Intel instruction reference, the memory operand
5961 * is read even if it isn't needed (e.g., for type==all)
5962 */
5963 if (get_vmx_mem_address(vcpu, vmx_get_exit_qual(vcpu),
5964 vmx_instruction_info, false,
5965 sizeof(operand), &gva))
5966 return 1;
5967
5968 return kvm_handle_invpcid(vcpu, type, gva);
5969 }
5970
handle_pml_full(struct kvm_vcpu * vcpu)5971 static int handle_pml_full(struct kvm_vcpu *vcpu)
5972 {
5973 unsigned long exit_qualification;
5974
5975 trace_kvm_pml_full(vcpu->vcpu_id);
5976
5977 exit_qualification = vmx_get_exit_qual(vcpu);
5978
5979 /*
5980 * PML buffer FULL happened while executing iret from NMI,
5981 * "blocked by NMI" bit has to be set before next VM entry.
5982 */
5983 if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) &&
5984 enable_vnmi &&
5985 (exit_qualification & INTR_INFO_UNBLOCK_NMI))
5986 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
5987 GUEST_INTR_STATE_NMI);
5988
5989 /*
5990 * PML buffer already flushed at beginning of VMEXIT. Nothing to do
5991 * here.., and there's no userspace involvement needed for PML.
5992 */
5993 return 1;
5994 }
5995
handle_fastpath_preemption_timer(struct kvm_vcpu * vcpu,bool force_immediate_exit)5996 static fastpath_t handle_fastpath_preemption_timer(struct kvm_vcpu *vcpu,
5997 bool force_immediate_exit)
5998 {
5999 struct vcpu_vmx *vmx = to_vmx(vcpu);
6000
6001 /*
6002 * In the *extremely* unlikely scenario that this is a spurious VM-Exit
6003 * due to the timer expiring while it was "soft" disabled, just eat the
6004 * exit and re-enter the guest.
6005 */
6006 if (unlikely(vmx->loaded_vmcs->hv_timer_soft_disabled))
6007 return EXIT_FASTPATH_REENTER_GUEST;
6008
6009 /*
6010 * If the timer expired because KVM used it to force an immediate exit,
6011 * then mission accomplished.
6012 */
6013 if (force_immediate_exit)
6014 return EXIT_FASTPATH_EXIT_HANDLED;
6015
6016 /*
6017 * If L2 is active, go down the slow path as emulating the guest timer
6018 * expiration likely requires synthesizing a nested VM-Exit.
6019 */
6020 if (is_guest_mode(vcpu))
6021 return EXIT_FASTPATH_NONE;
6022
6023 kvm_lapic_expired_hv_timer(vcpu);
6024 return EXIT_FASTPATH_REENTER_GUEST;
6025 }
6026
handle_preemption_timer(struct kvm_vcpu * vcpu)6027 static int handle_preemption_timer(struct kvm_vcpu *vcpu)
6028 {
6029 /*
6030 * This non-fastpath handler is reached if and only if the preemption
6031 * timer was being used to emulate a guest timer while L2 is active.
6032 * All other scenarios are supposed to be handled in the fastpath.
6033 */
6034 WARN_ON_ONCE(!is_guest_mode(vcpu));
6035 kvm_lapic_expired_hv_timer(vcpu);
6036 return 1;
6037 }
6038
6039 /*
6040 * When nested=0, all VMX instruction VM Exits filter here. The handlers
6041 * are overwritten by nested_vmx_setup() when nested=1.
6042 */
handle_vmx_instruction(struct kvm_vcpu * vcpu)6043 static int handle_vmx_instruction(struct kvm_vcpu *vcpu)
6044 {
6045 kvm_queue_exception(vcpu, UD_VECTOR);
6046 return 1;
6047 }
6048
6049 #ifndef CONFIG_X86_SGX_KVM
handle_encls(struct kvm_vcpu * vcpu)6050 static int handle_encls(struct kvm_vcpu *vcpu)
6051 {
6052 /*
6053 * SGX virtualization is disabled. There is no software enable bit for
6054 * SGX, so KVM intercepts all ENCLS leafs and injects a #UD to prevent
6055 * the guest from executing ENCLS (when SGX is supported by hardware).
6056 */
6057 kvm_queue_exception(vcpu, UD_VECTOR);
6058 return 1;
6059 }
6060 #endif /* CONFIG_X86_SGX_KVM */
6061
handle_bus_lock_vmexit(struct kvm_vcpu * vcpu)6062 static int handle_bus_lock_vmexit(struct kvm_vcpu *vcpu)
6063 {
6064 /*
6065 * Hardware may or may not set the BUS_LOCK_DETECTED flag on BUS_LOCK
6066 * VM-Exits. Unconditionally set the flag here and leave the handling to
6067 * vmx_handle_exit().
6068 */
6069 to_vmx(vcpu)->exit_reason.bus_lock_detected = true;
6070 return 1;
6071 }
6072
handle_notify(struct kvm_vcpu * vcpu)6073 static int handle_notify(struct kvm_vcpu *vcpu)
6074 {
6075 unsigned long exit_qual = vmx_get_exit_qual(vcpu);
6076 bool context_invalid = exit_qual & NOTIFY_VM_CONTEXT_INVALID;
6077
6078 ++vcpu->stat.notify_window_exits;
6079
6080 /*
6081 * Notify VM exit happened while executing iret from NMI,
6082 * "blocked by NMI" bit has to be set before next VM entry.
6083 */
6084 if (enable_vnmi && (exit_qual & INTR_INFO_UNBLOCK_NMI))
6085 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
6086 GUEST_INTR_STATE_NMI);
6087
6088 if (vcpu->kvm->arch.notify_vmexit_flags & KVM_X86_NOTIFY_VMEXIT_USER ||
6089 context_invalid) {
6090 vcpu->run->exit_reason = KVM_EXIT_NOTIFY;
6091 vcpu->run->notify.flags = context_invalid ?
6092 KVM_NOTIFY_CONTEXT_INVALID : 0;
6093 return 0;
6094 }
6095
6096 return 1;
6097 }
6098
6099 /*
6100 * The exit handlers return 1 if the exit was handled fully and guest execution
6101 * may resume. Otherwise they set the kvm_run parameter to indicate what needs
6102 * to be done to userspace and return 0.
6103 */
6104 static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
6105 [EXIT_REASON_EXCEPTION_NMI] = handle_exception_nmi,
6106 [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt,
6107 [EXIT_REASON_TRIPLE_FAULT] = handle_triple_fault,
6108 [EXIT_REASON_NMI_WINDOW] = handle_nmi_window,
6109 [EXIT_REASON_IO_INSTRUCTION] = handle_io,
6110 [EXIT_REASON_CR_ACCESS] = handle_cr,
6111 [EXIT_REASON_DR_ACCESS] = handle_dr,
6112 [EXIT_REASON_CPUID] = kvm_emulate_cpuid,
6113 [EXIT_REASON_MSR_READ] = kvm_emulate_rdmsr,
6114 [EXIT_REASON_MSR_WRITE] = kvm_emulate_wrmsr,
6115 [EXIT_REASON_INTERRUPT_WINDOW] = handle_interrupt_window,
6116 [EXIT_REASON_HLT] = kvm_emulate_halt,
6117 [EXIT_REASON_INVD] = kvm_emulate_invd,
6118 [EXIT_REASON_INVLPG] = handle_invlpg,
6119 [EXIT_REASON_RDPMC] = kvm_emulate_rdpmc,
6120 [EXIT_REASON_VMCALL] = kvm_emulate_hypercall,
6121 [EXIT_REASON_VMCLEAR] = handle_vmx_instruction,
6122 [EXIT_REASON_VMLAUNCH] = handle_vmx_instruction,
6123 [EXIT_REASON_VMPTRLD] = handle_vmx_instruction,
6124 [EXIT_REASON_VMPTRST] = handle_vmx_instruction,
6125 [EXIT_REASON_VMREAD] = handle_vmx_instruction,
6126 [EXIT_REASON_VMRESUME] = handle_vmx_instruction,
6127 [EXIT_REASON_VMWRITE] = handle_vmx_instruction,
6128 [EXIT_REASON_VMOFF] = handle_vmx_instruction,
6129 [EXIT_REASON_VMON] = handle_vmx_instruction,
6130 [EXIT_REASON_TPR_BELOW_THRESHOLD] = handle_tpr_below_threshold,
6131 [EXIT_REASON_APIC_ACCESS] = handle_apic_access,
6132 [EXIT_REASON_APIC_WRITE] = handle_apic_write,
6133 [EXIT_REASON_EOI_INDUCED] = handle_apic_eoi_induced,
6134 [EXIT_REASON_WBINVD] = kvm_emulate_wbinvd,
6135 [EXIT_REASON_XSETBV] = kvm_emulate_xsetbv,
6136 [EXIT_REASON_TASK_SWITCH] = handle_task_switch,
6137 [EXIT_REASON_MCE_DURING_VMENTRY] = handle_machine_check,
6138 [EXIT_REASON_GDTR_IDTR] = handle_desc,
6139 [EXIT_REASON_LDTR_TR] = handle_desc,
6140 [EXIT_REASON_EPT_VIOLATION] = handle_ept_violation,
6141 [EXIT_REASON_EPT_MISCONFIG] = handle_ept_misconfig,
6142 [EXIT_REASON_PAUSE_INSTRUCTION] = handle_pause,
6143 [EXIT_REASON_MWAIT_INSTRUCTION] = kvm_emulate_mwait,
6144 [EXIT_REASON_MONITOR_TRAP_FLAG] = handle_monitor_trap,
6145 [EXIT_REASON_MONITOR_INSTRUCTION] = kvm_emulate_monitor,
6146 [EXIT_REASON_INVEPT] = handle_vmx_instruction,
6147 [EXIT_REASON_INVVPID] = handle_vmx_instruction,
6148 [EXIT_REASON_RDRAND] = kvm_handle_invalid_op,
6149 [EXIT_REASON_RDSEED] = kvm_handle_invalid_op,
6150 [EXIT_REASON_PML_FULL] = handle_pml_full,
6151 [EXIT_REASON_INVPCID] = handle_invpcid,
6152 [EXIT_REASON_VMFUNC] = handle_vmx_instruction,
6153 [EXIT_REASON_PREEMPTION_TIMER] = handle_preemption_timer,
6154 [EXIT_REASON_ENCLS] = handle_encls,
6155 [EXIT_REASON_BUS_LOCK] = handle_bus_lock_vmexit,
6156 [EXIT_REASON_NOTIFY] = handle_notify,
6157 };
6158
6159 static const int kvm_vmx_max_exit_handlers =
6160 ARRAY_SIZE(kvm_vmx_exit_handlers);
6161
vmx_get_exit_info(struct kvm_vcpu * vcpu,u32 * reason,u64 * info1,u64 * info2,u32 * intr_info,u32 * error_code)6162 void vmx_get_exit_info(struct kvm_vcpu *vcpu, u32 *reason,
6163 u64 *info1, u64 *info2, u32 *intr_info, u32 *error_code)
6164 {
6165 struct vcpu_vmx *vmx = to_vmx(vcpu);
6166
6167 *reason = vmx->exit_reason.full;
6168 *info1 = vmx_get_exit_qual(vcpu);
6169 if (!(vmx->exit_reason.failed_vmentry)) {
6170 *info2 = vmx->idt_vectoring_info;
6171 *intr_info = vmx_get_intr_info(vcpu);
6172 if (is_exception_with_error_code(*intr_info))
6173 *error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
6174 else
6175 *error_code = 0;
6176 } else {
6177 *info2 = 0;
6178 *intr_info = 0;
6179 *error_code = 0;
6180 }
6181 }
6182
vmx_destroy_pml_buffer(struct vcpu_vmx * vmx)6183 static void vmx_destroy_pml_buffer(struct vcpu_vmx *vmx)
6184 {
6185 if (vmx->pml_pg) {
6186 __free_page(vmx->pml_pg);
6187 vmx->pml_pg = NULL;
6188 }
6189 }
6190
vmx_flush_pml_buffer(struct kvm_vcpu * vcpu)6191 static void vmx_flush_pml_buffer(struct kvm_vcpu *vcpu)
6192 {
6193 struct vcpu_vmx *vmx = to_vmx(vcpu);
6194 u64 *pml_buf;
6195 u16 pml_idx;
6196
6197 pml_idx = vmcs_read16(GUEST_PML_INDEX);
6198
6199 /* Do nothing if PML buffer is empty */
6200 if (pml_idx == (PML_ENTITY_NUM - 1))
6201 return;
6202
6203 /* PML index always points to next available PML buffer entity */
6204 if (pml_idx >= PML_ENTITY_NUM)
6205 pml_idx = 0;
6206 else
6207 pml_idx++;
6208
6209 pml_buf = page_address(vmx->pml_pg);
6210 for (; pml_idx < PML_ENTITY_NUM; pml_idx++) {
6211 u64 gpa;
6212
6213 gpa = pml_buf[pml_idx];
6214 WARN_ON(gpa & (PAGE_SIZE - 1));
6215 kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT);
6216 }
6217
6218 /* reset PML index */
6219 vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
6220 }
6221
vmx_dump_sel(char * name,uint32_t sel)6222 static void vmx_dump_sel(char *name, uint32_t sel)
6223 {
6224 pr_err("%s sel=0x%04x, attr=0x%05x, limit=0x%08x, base=0x%016lx\n",
6225 name, vmcs_read16(sel),
6226 vmcs_read32(sel + GUEST_ES_AR_BYTES - GUEST_ES_SELECTOR),
6227 vmcs_read32(sel + GUEST_ES_LIMIT - GUEST_ES_SELECTOR),
6228 vmcs_readl(sel + GUEST_ES_BASE - GUEST_ES_SELECTOR));
6229 }
6230
vmx_dump_dtsel(char * name,uint32_t limit)6231 static void vmx_dump_dtsel(char *name, uint32_t limit)
6232 {
6233 pr_err("%s limit=0x%08x, base=0x%016lx\n",
6234 name, vmcs_read32(limit),
6235 vmcs_readl(limit + GUEST_GDTR_BASE - GUEST_GDTR_LIMIT));
6236 }
6237
vmx_dump_msrs(char * name,struct vmx_msrs * m)6238 static void vmx_dump_msrs(char *name, struct vmx_msrs *m)
6239 {
6240 unsigned int i;
6241 struct vmx_msr_entry *e;
6242
6243 pr_err("MSR %s:\n", name);
6244 for (i = 0, e = m->val; i < m->nr; ++i, ++e)
6245 pr_err(" %2d: msr=0x%08x value=0x%016llx\n", i, e->index, e->value);
6246 }
6247
dump_vmcs(struct kvm_vcpu * vcpu)6248 void dump_vmcs(struct kvm_vcpu *vcpu)
6249 {
6250 struct vcpu_vmx *vmx = to_vmx(vcpu);
6251 u32 vmentry_ctl, vmexit_ctl;
6252 u32 cpu_based_exec_ctrl, pin_based_exec_ctrl, secondary_exec_control;
6253 u64 tertiary_exec_control;
6254 unsigned long cr4;
6255 int efer_slot;
6256
6257 if (!dump_invalid_vmcs) {
6258 pr_warn_ratelimited("set kvm_intel.dump_invalid_vmcs=1 to dump internal KVM state.\n");
6259 return;
6260 }
6261
6262 vmentry_ctl = vmcs_read32(VM_ENTRY_CONTROLS);
6263 vmexit_ctl = vmcs_read32(VM_EXIT_CONTROLS);
6264 cpu_based_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
6265 pin_based_exec_ctrl = vmcs_read32(PIN_BASED_VM_EXEC_CONTROL);
6266 cr4 = vmcs_readl(GUEST_CR4);
6267
6268 if (cpu_has_secondary_exec_ctrls())
6269 secondary_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
6270 else
6271 secondary_exec_control = 0;
6272
6273 if (cpu_has_tertiary_exec_ctrls())
6274 tertiary_exec_control = vmcs_read64(TERTIARY_VM_EXEC_CONTROL);
6275 else
6276 tertiary_exec_control = 0;
6277
6278 pr_err("VMCS %p, last attempted VM-entry on CPU %d\n",
6279 vmx->loaded_vmcs->vmcs, vcpu->arch.last_vmentry_cpu);
6280 pr_err("*** Guest State ***\n");
6281 pr_err("CR0: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n",
6282 vmcs_readl(GUEST_CR0), vmcs_readl(CR0_READ_SHADOW),
6283 vmcs_readl(CR0_GUEST_HOST_MASK));
6284 pr_err("CR4: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n",
6285 cr4, vmcs_readl(CR4_READ_SHADOW), vmcs_readl(CR4_GUEST_HOST_MASK));
6286 pr_err("CR3 = 0x%016lx\n", vmcs_readl(GUEST_CR3));
6287 if (cpu_has_vmx_ept()) {
6288 pr_err("PDPTR0 = 0x%016llx PDPTR1 = 0x%016llx\n",
6289 vmcs_read64(GUEST_PDPTR0), vmcs_read64(GUEST_PDPTR1));
6290 pr_err("PDPTR2 = 0x%016llx PDPTR3 = 0x%016llx\n",
6291 vmcs_read64(GUEST_PDPTR2), vmcs_read64(GUEST_PDPTR3));
6292 }
6293 pr_err("RSP = 0x%016lx RIP = 0x%016lx\n",
6294 vmcs_readl(GUEST_RSP), vmcs_readl(GUEST_RIP));
6295 pr_err("RFLAGS=0x%08lx DR7 = 0x%016lx\n",
6296 vmcs_readl(GUEST_RFLAGS), vmcs_readl(GUEST_DR7));
6297 pr_err("Sysenter RSP=%016lx CS:RIP=%04x:%016lx\n",
6298 vmcs_readl(GUEST_SYSENTER_ESP),
6299 vmcs_read32(GUEST_SYSENTER_CS), vmcs_readl(GUEST_SYSENTER_EIP));
6300 vmx_dump_sel("CS: ", GUEST_CS_SELECTOR);
6301 vmx_dump_sel("DS: ", GUEST_DS_SELECTOR);
6302 vmx_dump_sel("SS: ", GUEST_SS_SELECTOR);
6303 vmx_dump_sel("ES: ", GUEST_ES_SELECTOR);
6304 vmx_dump_sel("FS: ", GUEST_FS_SELECTOR);
6305 vmx_dump_sel("GS: ", GUEST_GS_SELECTOR);
6306 vmx_dump_dtsel("GDTR:", GUEST_GDTR_LIMIT);
6307 vmx_dump_sel("LDTR:", GUEST_LDTR_SELECTOR);
6308 vmx_dump_dtsel("IDTR:", GUEST_IDTR_LIMIT);
6309 vmx_dump_sel("TR: ", GUEST_TR_SELECTOR);
6310 efer_slot = vmx_find_loadstore_msr_slot(&vmx->msr_autoload.guest, MSR_EFER);
6311 if (vmentry_ctl & VM_ENTRY_LOAD_IA32_EFER)
6312 pr_err("EFER= 0x%016llx\n", vmcs_read64(GUEST_IA32_EFER));
6313 else if (efer_slot >= 0)
6314 pr_err("EFER= 0x%016llx (autoload)\n",
6315 vmx->msr_autoload.guest.val[efer_slot].value);
6316 else if (vmentry_ctl & VM_ENTRY_IA32E_MODE)
6317 pr_err("EFER= 0x%016llx (effective)\n",
6318 vcpu->arch.efer | (EFER_LMA | EFER_LME));
6319 else
6320 pr_err("EFER= 0x%016llx (effective)\n",
6321 vcpu->arch.efer & ~(EFER_LMA | EFER_LME));
6322 if (vmentry_ctl & VM_ENTRY_LOAD_IA32_PAT)
6323 pr_err("PAT = 0x%016llx\n", vmcs_read64(GUEST_IA32_PAT));
6324 pr_err("DebugCtl = 0x%016llx DebugExceptions = 0x%016lx\n",
6325 vmcs_read64(GUEST_IA32_DEBUGCTL),
6326 vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS));
6327 if (cpu_has_load_perf_global_ctrl() &&
6328 vmentry_ctl & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL)
6329 pr_err("PerfGlobCtl = 0x%016llx\n",
6330 vmcs_read64(GUEST_IA32_PERF_GLOBAL_CTRL));
6331 if (vmentry_ctl & VM_ENTRY_LOAD_BNDCFGS)
6332 pr_err("BndCfgS = 0x%016llx\n", vmcs_read64(GUEST_BNDCFGS));
6333 pr_err("Interruptibility = %08x ActivityState = %08x\n",
6334 vmcs_read32(GUEST_INTERRUPTIBILITY_INFO),
6335 vmcs_read32(GUEST_ACTIVITY_STATE));
6336 if (secondary_exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY)
6337 pr_err("InterruptStatus = %04x\n",
6338 vmcs_read16(GUEST_INTR_STATUS));
6339 if (vmcs_read32(VM_ENTRY_MSR_LOAD_COUNT) > 0)
6340 vmx_dump_msrs("guest autoload", &vmx->msr_autoload.guest);
6341 if (vmcs_read32(VM_EXIT_MSR_STORE_COUNT) > 0)
6342 vmx_dump_msrs("guest autostore", &vmx->msr_autostore.guest);
6343
6344 pr_err("*** Host State ***\n");
6345 pr_err("RIP = 0x%016lx RSP = 0x%016lx\n",
6346 vmcs_readl(HOST_RIP), vmcs_readl(HOST_RSP));
6347 pr_err("CS=%04x SS=%04x DS=%04x ES=%04x FS=%04x GS=%04x TR=%04x\n",
6348 vmcs_read16(HOST_CS_SELECTOR), vmcs_read16(HOST_SS_SELECTOR),
6349 vmcs_read16(HOST_DS_SELECTOR), vmcs_read16(HOST_ES_SELECTOR),
6350 vmcs_read16(HOST_FS_SELECTOR), vmcs_read16(HOST_GS_SELECTOR),
6351 vmcs_read16(HOST_TR_SELECTOR));
6352 pr_err("FSBase=%016lx GSBase=%016lx TRBase=%016lx\n",
6353 vmcs_readl(HOST_FS_BASE), vmcs_readl(HOST_GS_BASE),
6354 vmcs_readl(HOST_TR_BASE));
6355 pr_err("GDTBase=%016lx IDTBase=%016lx\n",
6356 vmcs_readl(HOST_GDTR_BASE), vmcs_readl(HOST_IDTR_BASE));
6357 pr_err("CR0=%016lx CR3=%016lx CR4=%016lx\n",
6358 vmcs_readl(HOST_CR0), vmcs_readl(HOST_CR3),
6359 vmcs_readl(HOST_CR4));
6360 pr_err("Sysenter RSP=%016lx CS:RIP=%04x:%016lx\n",
6361 vmcs_readl(HOST_IA32_SYSENTER_ESP),
6362 vmcs_read32(HOST_IA32_SYSENTER_CS),
6363 vmcs_readl(HOST_IA32_SYSENTER_EIP));
6364 if (vmexit_ctl & VM_EXIT_LOAD_IA32_EFER)
6365 pr_err("EFER= 0x%016llx\n", vmcs_read64(HOST_IA32_EFER));
6366 if (vmexit_ctl & VM_EXIT_LOAD_IA32_PAT)
6367 pr_err("PAT = 0x%016llx\n", vmcs_read64(HOST_IA32_PAT));
6368 if (cpu_has_load_perf_global_ctrl() &&
6369 vmexit_ctl & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL)
6370 pr_err("PerfGlobCtl = 0x%016llx\n",
6371 vmcs_read64(HOST_IA32_PERF_GLOBAL_CTRL));
6372 if (vmcs_read32(VM_EXIT_MSR_LOAD_COUNT) > 0)
6373 vmx_dump_msrs("host autoload", &vmx->msr_autoload.host);
6374
6375 pr_err("*** Control State ***\n");
6376 pr_err("CPUBased=0x%08x SecondaryExec=0x%08x TertiaryExec=0x%016llx\n",
6377 cpu_based_exec_ctrl, secondary_exec_control, tertiary_exec_control);
6378 pr_err("PinBased=0x%08x EntryControls=%08x ExitControls=%08x\n",
6379 pin_based_exec_ctrl, vmentry_ctl, vmexit_ctl);
6380 pr_err("ExceptionBitmap=%08x PFECmask=%08x PFECmatch=%08x\n",
6381 vmcs_read32(EXCEPTION_BITMAP),
6382 vmcs_read32(PAGE_FAULT_ERROR_CODE_MASK),
6383 vmcs_read32(PAGE_FAULT_ERROR_CODE_MATCH));
6384 pr_err("VMEntry: intr_info=%08x errcode=%08x ilen=%08x\n",
6385 vmcs_read32(VM_ENTRY_INTR_INFO_FIELD),
6386 vmcs_read32(VM_ENTRY_EXCEPTION_ERROR_CODE),
6387 vmcs_read32(VM_ENTRY_INSTRUCTION_LEN));
6388 pr_err("VMExit: intr_info=%08x errcode=%08x ilen=%08x\n",
6389 vmcs_read32(VM_EXIT_INTR_INFO),
6390 vmcs_read32(VM_EXIT_INTR_ERROR_CODE),
6391 vmcs_read32(VM_EXIT_INSTRUCTION_LEN));
6392 pr_err(" reason=%08x qualification=%016lx\n",
6393 vmcs_read32(VM_EXIT_REASON), vmcs_readl(EXIT_QUALIFICATION));
6394 pr_err("IDTVectoring: info=%08x errcode=%08x\n",
6395 vmcs_read32(IDT_VECTORING_INFO_FIELD),
6396 vmcs_read32(IDT_VECTORING_ERROR_CODE));
6397 pr_err("TSC Offset = 0x%016llx\n", vmcs_read64(TSC_OFFSET));
6398 if (secondary_exec_control & SECONDARY_EXEC_TSC_SCALING)
6399 pr_err("TSC Multiplier = 0x%016llx\n",
6400 vmcs_read64(TSC_MULTIPLIER));
6401 if (cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW) {
6402 if (secondary_exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY) {
6403 u16 status = vmcs_read16(GUEST_INTR_STATUS);
6404 pr_err("SVI|RVI = %02x|%02x ", status >> 8, status & 0xff);
6405 }
6406 pr_cont("TPR Threshold = 0x%02x\n", vmcs_read32(TPR_THRESHOLD));
6407 if (secondary_exec_control & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)
6408 pr_err("APIC-access addr = 0x%016llx ", vmcs_read64(APIC_ACCESS_ADDR));
6409 pr_cont("virt-APIC addr = 0x%016llx\n", vmcs_read64(VIRTUAL_APIC_PAGE_ADDR));
6410 }
6411 if (pin_based_exec_ctrl & PIN_BASED_POSTED_INTR)
6412 pr_err("PostedIntrVec = 0x%02x\n", vmcs_read16(POSTED_INTR_NV));
6413 if ((secondary_exec_control & SECONDARY_EXEC_ENABLE_EPT))
6414 pr_err("EPT pointer = 0x%016llx\n", vmcs_read64(EPT_POINTER));
6415 if (secondary_exec_control & SECONDARY_EXEC_PAUSE_LOOP_EXITING)
6416 pr_err("PLE Gap=%08x Window=%08x\n",
6417 vmcs_read32(PLE_GAP), vmcs_read32(PLE_WINDOW));
6418 if (secondary_exec_control & SECONDARY_EXEC_ENABLE_VPID)
6419 pr_err("Virtual processor ID = 0x%04x\n",
6420 vmcs_read16(VIRTUAL_PROCESSOR_ID));
6421 if (secondary_exec_control & SECONDARY_EXEC_EPT_VIOLATION_VE) {
6422 struct vmx_ve_information *ve_info = vmx->ve_info;
6423 u64 ve_info_pa = vmcs_read64(VE_INFORMATION_ADDRESS);
6424
6425 /*
6426 * If KVM is dumping the VMCS, then something has gone wrong
6427 * already. Derefencing an address from the VMCS, which could
6428 * very well be corrupted, is a terrible idea. The virtual
6429 * address is known so use it.
6430 */
6431 pr_err("VE info address = 0x%016llx%s\n", ve_info_pa,
6432 ve_info_pa == __pa(ve_info) ? "" : "(corrupted!)");
6433 pr_err("ve_info: 0x%08x 0x%08x 0x%016llx 0x%016llx 0x%016llx 0x%04x\n",
6434 ve_info->exit_reason, ve_info->delivery,
6435 ve_info->exit_qualification,
6436 ve_info->guest_linear_address,
6437 ve_info->guest_physical_address, ve_info->eptp_index);
6438 }
6439 }
6440
6441 /*
6442 * The guest has exited. See if we can fix it or if we need userspace
6443 * assistance.
6444 */
__vmx_handle_exit(struct kvm_vcpu * vcpu,fastpath_t exit_fastpath)6445 static int __vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
6446 {
6447 struct vcpu_vmx *vmx = to_vmx(vcpu);
6448 union vmx_exit_reason exit_reason = vmx->exit_reason;
6449 u32 vectoring_info = vmx->idt_vectoring_info;
6450 u16 exit_handler_index;
6451
6452 /*
6453 * Flush logged GPAs PML buffer, this will make dirty_bitmap more
6454 * updated. Another good is, in kvm_vm_ioctl_get_dirty_log, before
6455 * querying dirty_bitmap, we only need to kick all vcpus out of guest
6456 * mode as if vcpus is in root mode, the PML buffer must has been
6457 * flushed already. Note, PML is never enabled in hardware while
6458 * running L2.
6459 */
6460 if (enable_pml && !is_guest_mode(vcpu))
6461 vmx_flush_pml_buffer(vcpu);
6462
6463 /*
6464 * KVM should never reach this point with a pending nested VM-Enter.
6465 * More specifically, short-circuiting VM-Entry to emulate L2 due to
6466 * invalid guest state should never happen as that means KVM knowingly
6467 * allowed a nested VM-Enter with an invalid vmcs12. More below.
6468 */
6469 if (KVM_BUG_ON(vmx->nested.nested_run_pending, vcpu->kvm))
6470 return -EIO;
6471
6472 if (is_guest_mode(vcpu)) {
6473 /*
6474 * PML is never enabled when running L2, bail immediately if a
6475 * PML full exit occurs as something is horribly wrong.
6476 */
6477 if (exit_reason.basic == EXIT_REASON_PML_FULL)
6478 goto unexpected_vmexit;
6479
6480 /*
6481 * The host physical addresses of some pages of guest memory
6482 * are loaded into the vmcs02 (e.g. vmcs12's Virtual APIC
6483 * Page). The CPU may write to these pages via their host
6484 * physical address while L2 is running, bypassing any
6485 * address-translation-based dirty tracking (e.g. EPT write
6486 * protection).
6487 *
6488 * Mark them dirty on every exit from L2 to prevent them from
6489 * getting out of sync with dirty tracking.
6490 */
6491 nested_mark_vmcs12_pages_dirty(vcpu);
6492
6493 /*
6494 * Synthesize a triple fault if L2 state is invalid. In normal
6495 * operation, nested VM-Enter rejects any attempt to enter L2
6496 * with invalid state. However, those checks are skipped if
6497 * state is being stuffed via RSM or KVM_SET_NESTED_STATE. If
6498 * L2 state is invalid, it means either L1 modified SMRAM state
6499 * or userspace provided bad state. Synthesize TRIPLE_FAULT as
6500 * doing so is architecturally allowed in the RSM case, and is
6501 * the least awful solution for the userspace case without
6502 * risking false positives.
6503 */
6504 if (vmx->emulation_required) {
6505 nested_vmx_vmexit(vcpu, EXIT_REASON_TRIPLE_FAULT, 0, 0);
6506 return 1;
6507 }
6508
6509 if (nested_vmx_reflect_vmexit(vcpu))
6510 return 1;
6511 }
6512
6513 /* If guest state is invalid, start emulating. L2 is handled above. */
6514 if (vmx->emulation_required)
6515 return handle_invalid_guest_state(vcpu);
6516
6517 if (exit_reason.failed_vmentry) {
6518 dump_vmcs(vcpu);
6519 vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY;
6520 vcpu->run->fail_entry.hardware_entry_failure_reason
6521 = exit_reason.full;
6522 vcpu->run->fail_entry.cpu = vcpu->arch.last_vmentry_cpu;
6523 return 0;
6524 }
6525
6526 if (unlikely(vmx->fail)) {
6527 dump_vmcs(vcpu);
6528 vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY;
6529 vcpu->run->fail_entry.hardware_entry_failure_reason
6530 = vmcs_read32(VM_INSTRUCTION_ERROR);
6531 vcpu->run->fail_entry.cpu = vcpu->arch.last_vmentry_cpu;
6532 return 0;
6533 }
6534
6535 /*
6536 * Note:
6537 * Do not try to fix EXIT_REASON_EPT_MISCONFIG if it caused by
6538 * delivery event since it indicates guest is accessing MMIO.
6539 * The vm-exit can be triggered again after return to guest that
6540 * will cause infinite loop.
6541 */
6542 if ((vectoring_info & VECTORING_INFO_VALID_MASK) &&
6543 (exit_reason.basic != EXIT_REASON_EXCEPTION_NMI &&
6544 exit_reason.basic != EXIT_REASON_EPT_VIOLATION &&
6545 exit_reason.basic != EXIT_REASON_PML_FULL &&
6546 exit_reason.basic != EXIT_REASON_APIC_ACCESS &&
6547 exit_reason.basic != EXIT_REASON_TASK_SWITCH &&
6548 exit_reason.basic != EXIT_REASON_NOTIFY)) {
6549 int ndata = 3;
6550
6551 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
6552 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV;
6553 vcpu->run->internal.data[0] = vectoring_info;
6554 vcpu->run->internal.data[1] = exit_reason.full;
6555 vcpu->run->internal.data[2] = vmx_get_exit_qual(vcpu);
6556 if (exit_reason.basic == EXIT_REASON_EPT_MISCONFIG) {
6557 vcpu->run->internal.data[ndata++] =
6558 vmcs_read64(GUEST_PHYSICAL_ADDRESS);
6559 }
6560 vcpu->run->internal.data[ndata++] = vcpu->arch.last_vmentry_cpu;
6561 vcpu->run->internal.ndata = ndata;
6562 return 0;
6563 }
6564
6565 if (unlikely(!enable_vnmi &&
6566 vmx->loaded_vmcs->soft_vnmi_blocked)) {
6567 if (!vmx_interrupt_blocked(vcpu)) {
6568 vmx->loaded_vmcs->soft_vnmi_blocked = 0;
6569 } else if (vmx->loaded_vmcs->vnmi_blocked_time > 1000000000LL &&
6570 vcpu->arch.nmi_pending) {
6571 /*
6572 * This CPU don't support us in finding the end of an
6573 * NMI-blocked window if the guest runs with IRQs
6574 * disabled. So we pull the trigger after 1 s of
6575 * futile waiting, but inform the user about this.
6576 */
6577 printk(KERN_WARNING "%s: Breaking out of NMI-blocked "
6578 "state on VCPU %d after 1 s timeout\n",
6579 __func__, vcpu->vcpu_id);
6580 vmx->loaded_vmcs->soft_vnmi_blocked = 0;
6581 }
6582 }
6583
6584 if (exit_fastpath != EXIT_FASTPATH_NONE)
6585 return 1;
6586
6587 if (exit_reason.basic >= kvm_vmx_max_exit_handlers)
6588 goto unexpected_vmexit;
6589 #ifdef CONFIG_MITIGATION_RETPOLINE
6590 if (exit_reason.basic == EXIT_REASON_MSR_WRITE)
6591 return kvm_emulate_wrmsr(vcpu);
6592 else if (exit_reason.basic == EXIT_REASON_PREEMPTION_TIMER)
6593 return handle_preemption_timer(vcpu);
6594 else if (exit_reason.basic == EXIT_REASON_INTERRUPT_WINDOW)
6595 return handle_interrupt_window(vcpu);
6596 else if (exit_reason.basic == EXIT_REASON_EXTERNAL_INTERRUPT)
6597 return handle_external_interrupt(vcpu);
6598 else if (exit_reason.basic == EXIT_REASON_HLT)
6599 return kvm_emulate_halt(vcpu);
6600 else if (exit_reason.basic == EXIT_REASON_EPT_MISCONFIG)
6601 return handle_ept_misconfig(vcpu);
6602 #endif
6603
6604 exit_handler_index = array_index_nospec((u16)exit_reason.basic,
6605 kvm_vmx_max_exit_handlers);
6606 if (!kvm_vmx_exit_handlers[exit_handler_index])
6607 goto unexpected_vmexit;
6608
6609 return kvm_vmx_exit_handlers[exit_handler_index](vcpu);
6610
6611 unexpected_vmexit:
6612 vcpu_unimpl(vcpu, "vmx: unexpected exit reason 0x%x\n",
6613 exit_reason.full);
6614 dump_vmcs(vcpu);
6615 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
6616 vcpu->run->internal.suberror =
6617 KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON;
6618 vcpu->run->internal.ndata = 2;
6619 vcpu->run->internal.data[0] = exit_reason.full;
6620 vcpu->run->internal.data[1] = vcpu->arch.last_vmentry_cpu;
6621 return 0;
6622 }
6623
vmx_handle_exit(struct kvm_vcpu * vcpu,fastpath_t exit_fastpath)6624 int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
6625 {
6626 int ret = __vmx_handle_exit(vcpu, exit_fastpath);
6627
6628 /*
6629 * Exit to user space when bus lock detected to inform that there is
6630 * a bus lock in guest.
6631 */
6632 if (to_vmx(vcpu)->exit_reason.bus_lock_detected) {
6633 if (ret > 0)
6634 vcpu->run->exit_reason = KVM_EXIT_X86_BUS_LOCK;
6635
6636 vcpu->run->flags |= KVM_RUN_X86_BUS_LOCK;
6637 return 0;
6638 }
6639 return ret;
6640 }
6641
6642 /*
6643 * Software based L1D cache flush which is used when microcode providing
6644 * the cache control MSR is not loaded.
6645 *
6646 * The L1D cache is 32 KiB on Nehalem and later microarchitectures, but to
6647 * flush it is required to read in 64 KiB because the replacement algorithm
6648 * is not exactly LRU. This could be sized at runtime via topology
6649 * information but as all relevant affected CPUs have 32KiB L1D cache size
6650 * there is no point in doing so.
6651 */
vmx_l1d_flush(struct kvm_vcpu * vcpu)6652 static noinstr void vmx_l1d_flush(struct kvm_vcpu *vcpu)
6653 {
6654 int size = PAGE_SIZE << L1D_CACHE_ORDER;
6655
6656 /*
6657 * This code is only executed when the flush mode is 'cond' or
6658 * 'always'
6659 */
6660 if (static_branch_likely(&vmx_l1d_flush_cond)) {
6661 bool flush_l1d;
6662
6663 /*
6664 * Clear the per-vcpu flush bit, it gets set again if the vCPU
6665 * is reloaded, i.e. if the vCPU is scheduled out or if KVM
6666 * exits to userspace, or if KVM reaches one of the unsafe
6667 * VMEXIT handlers, e.g. if KVM calls into the emulator.
6668 */
6669 flush_l1d = vcpu->arch.l1tf_flush_l1d;
6670 vcpu->arch.l1tf_flush_l1d = false;
6671
6672 /*
6673 * Clear the per-cpu flush bit, it gets set again from
6674 * the interrupt handlers.
6675 */
6676 flush_l1d |= kvm_get_cpu_l1tf_flush_l1d();
6677 kvm_clear_cpu_l1tf_flush_l1d();
6678
6679 if (!flush_l1d)
6680 return;
6681 }
6682
6683 vcpu->stat.l1d_flush++;
6684
6685 if (static_cpu_has(X86_FEATURE_FLUSH_L1D)) {
6686 native_wrmsrl(MSR_IA32_FLUSH_CMD, L1D_FLUSH);
6687 return;
6688 }
6689
6690 asm volatile(
6691 /* First ensure the pages are in the TLB */
6692 "xorl %%eax, %%eax\n"
6693 ".Lpopulate_tlb:\n\t"
6694 "movzbl (%[flush_pages], %%" _ASM_AX "), %%ecx\n\t"
6695 "addl $4096, %%eax\n\t"
6696 "cmpl %%eax, %[size]\n\t"
6697 "jne .Lpopulate_tlb\n\t"
6698 "xorl %%eax, %%eax\n\t"
6699 "cpuid\n\t"
6700 /* Now fill the cache */
6701 "xorl %%eax, %%eax\n"
6702 ".Lfill_cache:\n"
6703 "movzbl (%[flush_pages], %%" _ASM_AX "), %%ecx\n\t"
6704 "addl $64, %%eax\n\t"
6705 "cmpl %%eax, %[size]\n\t"
6706 "jne .Lfill_cache\n\t"
6707 "lfence\n"
6708 :: [flush_pages] "r" (vmx_l1d_flush_pages),
6709 [size] "r" (size)
6710 : "eax", "ebx", "ecx", "edx");
6711 }
6712
vmx_update_cr8_intercept(struct kvm_vcpu * vcpu,int tpr,int irr)6713 void vmx_update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
6714 {
6715 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
6716 int tpr_threshold;
6717
6718 if (is_guest_mode(vcpu) &&
6719 nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW))
6720 return;
6721
6722 tpr_threshold = (irr == -1 || tpr < irr) ? 0 : irr;
6723 if (is_guest_mode(vcpu))
6724 to_vmx(vcpu)->nested.l1_tpr_threshold = tpr_threshold;
6725 else
6726 vmcs_write32(TPR_THRESHOLD, tpr_threshold);
6727 }
6728
vmx_set_virtual_apic_mode(struct kvm_vcpu * vcpu)6729 void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu)
6730 {
6731 struct vcpu_vmx *vmx = to_vmx(vcpu);
6732 u32 sec_exec_control;
6733
6734 if (!lapic_in_kernel(vcpu))
6735 return;
6736
6737 if (!flexpriority_enabled &&
6738 !cpu_has_vmx_virtualize_x2apic_mode())
6739 return;
6740
6741 /* Postpone execution until vmcs01 is the current VMCS. */
6742 if (is_guest_mode(vcpu)) {
6743 vmx->nested.change_vmcs01_virtual_apic_mode = true;
6744 return;
6745 }
6746
6747 sec_exec_control = secondary_exec_controls_get(vmx);
6748 sec_exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
6749 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE);
6750
6751 switch (kvm_get_apic_mode(vcpu)) {
6752 case LAPIC_MODE_INVALID:
6753 WARN_ONCE(true, "Invalid local APIC state");
6754 break;
6755 case LAPIC_MODE_DISABLED:
6756 break;
6757 case LAPIC_MODE_XAPIC:
6758 if (flexpriority_enabled) {
6759 sec_exec_control |=
6760 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
6761 kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
6762
6763 /*
6764 * Flush the TLB, reloading the APIC access page will
6765 * only do so if its physical address has changed, but
6766 * the guest may have inserted a non-APIC mapping into
6767 * the TLB while the APIC access page was disabled.
6768 */
6769 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
6770 }
6771 break;
6772 case LAPIC_MODE_X2APIC:
6773 if (cpu_has_vmx_virtualize_x2apic_mode())
6774 sec_exec_control |=
6775 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
6776 break;
6777 }
6778 secondary_exec_controls_set(vmx, sec_exec_control);
6779
6780 vmx_update_msr_bitmap_x2apic(vcpu);
6781 }
6782
vmx_set_apic_access_page_addr(struct kvm_vcpu * vcpu)6783 void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu)
6784 {
6785 const gfn_t gfn = APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT;
6786 struct kvm *kvm = vcpu->kvm;
6787 struct kvm_memslots *slots = kvm_memslots(kvm);
6788 struct kvm_memory_slot *slot;
6789 unsigned long mmu_seq;
6790 kvm_pfn_t pfn;
6791
6792 /* Defer reload until vmcs01 is the current VMCS. */
6793 if (is_guest_mode(vcpu)) {
6794 to_vmx(vcpu)->nested.reload_vmcs01_apic_access_page = true;
6795 return;
6796 }
6797
6798 if (!(secondary_exec_controls_get(to_vmx(vcpu)) &
6799 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
6800 return;
6801
6802 /*
6803 * Explicitly grab the memslot using KVM's internal slot ID to ensure
6804 * KVM doesn't unintentionally grab a userspace memslot. It _should_
6805 * be impossible for userspace to create a memslot for the APIC when
6806 * APICv is enabled, but paranoia won't hurt in this case.
6807 */
6808 slot = id_to_memslot(slots, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT);
6809 if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
6810 return;
6811
6812 /*
6813 * Ensure that the mmu_notifier sequence count is read before KVM
6814 * retrieves the pfn from the primary MMU. Note, the memslot is
6815 * protected by SRCU, not the mmu_notifier. Pairs with the smp_wmb()
6816 * in kvm_mmu_invalidate_end().
6817 */
6818 mmu_seq = kvm->mmu_invalidate_seq;
6819 smp_rmb();
6820
6821 /*
6822 * No need to retry if the memslot does not exist or is invalid. KVM
6823 * controls the APIC-access page memslot, and only deletes the memslot
6824 * if APICv is permanently inhibited, i.e. the memslot won't reappear.
6825 */
6826 pfn = gfn_to_pfn_memslot(slot, gfn);
6827 if (is_error_noslot_pfn(pfn))
6828 return;
6829
6830 read_lock(&vcpu->kvm->mmu_lock);
6831 if (mmu_invalidate_retry_gfn(kvm, mmu_seq, gfn)) {
6832 kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
6833 read_unlock(&vcpu->kvm->mmu_lock);
6834 goto out;
6835 }
6836
6837 vmcs_write64(APIC_ACCESS_ADDR, pfn_to_hpa(pfn));
6838 read_unlock(&vcpu->kvm->mmu_lock);
6839
6840 /*
6841 * No need for a manual TLB flush at this point, KVM has already done a
6842 * flush if there were SPTEs pointing at the previous page.
6843 */
6844 out:
6845 /*
6846 * Do not pin apic access page in memory, the MMU notifier
6847 * will call us again if it is migrated or swapped out.
6848 */
6849 kvm_release_pfn_clean(pfn);
6850 }
6851
vmx_hwapic_isr_update(int max_isr)6852 void vmx_hwapic_isr_update(int max_isr)
6853 {
6854 u16 status;
6855 u8 old;
6856
6857 if (max_isr == -1)
6858 max_isr = 0;
6859
6860 status = vmcs_read16(GUEST_INTR_STATUS);
6861 old = status >> 8;
6862 if (max_isr != old) {
6863 status &= 0xff;
6864 status |= max_isr << 8;
6865 vmcs_write16(GUEST_INTR_STATUS, status);
6866 }
6867 }
6868
vmx_set_rvi(int vector)6869 static void vmx_set_rvi(int vector)
6870 {
6871 u16 status;
6872 u8 old;
6873
6874 if (vector == -1)
6875 vector = 0;
6876
6877 status = vmcs_read16(GUEST_INTR_STATUS);
6878 old = (u8)status & 0xff;
6879 if ((u8)vector != old) {
6880 status &= ~0xff;
6881 status |= (u8)vector;
6882 vmcs_write16(GUEST_INTR_STATUS, status);
6883 }
6884 }
6885
vmx_hwapic_irr_update(struct kvm_vcpu * vcpu,int max_irr)6886 void vmx_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr)
6887 {
6888 /*
6889 * When running L2, updating RVI is only relevant when
6890 * vmcs12 virtual-interrupt-delivery enabled.
6891 * However, it can be enabled only when L1 also
6892 * intercepts external-interrupts and in that case
6893 * we should not update vmcs02 RVI but instead intercept
6894 * interrupt. Therefore, do nothing when running L2.
6895 */
6896 if (!is_guest_mode(vcpu))
6897 vmx_set_rvi(max_irr);
6898 }
6899
vmx_sync_pir_to_irr(struct kvm_vcpu * vcpu)6900 int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
6901 {
6902 struct vcpu_vmx *vmx = to_vmx(vcpu);
6903 int max_irr;
6904 bool got_posted_interrupt;
6905
6906 if (KVM_BUG_ON(!enable_apicv, vcpu->kvm))
6907 return -EIO;
6908
6909 if (pi_test_on(&vmx->pi_desc)) {
6910 pi_clear_on(&vmx->pi_desc);
6911 /*
6912 * IOMMU can write to PID.ON, so the barrier matters even on UP.
6913 * But on x86 this is just a compiler barrier anyway.
6914 */
6915 smp_mb__after_atomic();
6916 got_posted_interrupt =
6917 kvm_apic_update_irr(vcpu, vmx->pi_desc.pir, &max_irr);
6918 } else {
6919 max_irr = kvm_lapic_find_highest_irr(vcpu);
6920 got_posted_interrupt = false;
6921 }
6922
6923 /*
6924 * Newly recognized interrupts are injected via either virtual interrupt
6925 * delivery (RVI) or KVM_REQ_EVENT. Virtual interrupt delivery is
6926 * disabled in two cases:
6927 *
6928 * 1) If L2 is running and the vCPU has a new pending interrupt. If L1
6929 * wants to exit on interrupts, KVM_REQ_EVENT is needed to synthesize a
6930 * VM-Exit to L1. If L1 doesn't want to exit, the interrupt is injected
6931 * into L2, but KVM doesn't use virtual interrupt delivery to inject
6932 * interrupts into L2, and so KVM_REQ_EVENT is again needed.
6933 *
6934 * 2) If APICv is disabled for this vCPU, assigned devices may still
6935 * attempt to post interrupts. The posted interrupt vector will cause
6936 * a VM-Exit and the subsequent entry will call sync_pir_to_irr.
6937 */
6938 if (!is_guest_mode(vcpu) && kvm_vcpu_apicv_active(vcpu))
6939 vmx_set_rvi(max_irr);
6940 else if (got_posted_interrupt)
6941 kvm_make_request(KVM_REQ_EVENT, vcpu);
6942
6943 return max_irr;
6944 }
6945
vmx_load_eoi_exitmap(struct kvm_vcpu * vcpu,u64 * eoi_exit_bitmap)6946 void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
6947 {
6948 if (!kvm_vcpu_apicv_active(vcpu))
6949 return;
6950
6951 vmcs_write64(EOI_EXIT_BITMAP0, eoi_exit_bitmap[0]);
6952 vmcs_write64(EOI_EXIT_BITMAP1, eoi_exit_bitmap[1]);
6953 vmcs_write64(EOI_EXIT_BITMAP2, eoi_exit_bitmap[2]);
6954 vmcs_write64(EOI_EXIT_BITMAP3, eoi_exit_bitmap[3]);
6955 }
6956
vmx_apicv_pre_state_restore(struct kvm_vcpu * vcpu)6957 void vmx_apicv_pre_state_restore(struct kvm_vcpu *vcpu)
6958 {
6959 struct vcpu_vmx *vmx = to_vmx(vcpu);
6960
6961 pi_clear_on(&vmx->pi_desc);
6962 memset(vmx->pi_desc.pir, 0, sizeof(vmx->pi_desc.pir));
6963 }
6964
6965 void vmx_do_interrupt_irqoff(unsigned long entry);
6966 void vmx_do_nmi_irqoff(void);
6967
handle_nm_fault_irqoff(struct kvm_vcpu * vcpu)6968 static void handle_nm_fault_irqoff(struct kvm_vcpu *vcpu)
6969 {
6970 /*
6971 * Save xfd_err to guest_fpu before interrupt is enabled, so the
6972 * MSR value is not clobbered by the host activity before the guest
6973 * has chance to consume it.
6974 *
6975 * Do not blindly read xfd_err here, since this exception might
6976 * be caused by L1 interception on a platform which doesn't
6977 * support xfd at all.
6978 *
6979 * Do it conditionally upon guest_fpu::xfd. xfd_err matters
6980 * only when xfd contains a non-zero value.
6981 *
6982 * Queuing exception is done in vmx_handle_exit. See comment there.
6983 */
6984 if (vcpu->arch.guest_fpu.fpstate->xfd)
6985 rdmsrl(MSR_IA32_XFD_ERR, vcpu->arch.guest_fpu.xfd_err);
6986 }
6987
handle_exception_irqoff(struct kvm_vcpu * vcpu,u32 intr_info)6988 static void handle_exception_irqoff(struct kvm_vcpu *vcpu, u32 intr_info)
6989 {
6990 /* if exit due to PF check for async PF */
6991 if (is_page_fault(intr_info))
6992 vcpu->arch.apf.host_apf_flags = kvm_read_and_reset_apf_flags();
6993 /* if exit due to NM, handle before interrupts are enabled */
6994 else if (is_nm_fault(intr_info))
6995 handle_nm_fault_irqoff(vcpu);
6996 /* Handle machine checks before interrupts are enabled */
6997 else if (is_machine_check(intr_info))
6998 kvm_machine_check();
6999 }
7000
handle_external_interrupt_irqoff(struct kvm_vcpu * vcpu,u32 intr_info)7001 static void handle_external_interrupt_irqoff(struct kvm_vcpu *vcpu,
7002 u32 intr_info)
7003 {
7004 unsigned int vector = intr_info & INTR_INFO_VECTOR_MASK;
7005
7006 if (KVM_BUG(!is_external_intr(intr_info), vcpu->kvm,
7007 "unexpected VM-Exit interrupt info: 0x%x", intr_info))
7008 return;
7009
7010 kvm_before_interrupt(vcpu, KVM_HANDLING_IRQ);
7011 if (cpu_feature_enabled(X86_FEATURE_FRED))
7012 fred_entry_from_kvm(EVENT_TYPE_EXTINT, vector);
7013 else
7014 vmx_do_interrupt_irqoff(gate_offset((gate_desc *)host_idt_base + vector));
7015 kvm_after_interrupt(vcpu);
7016
7017 vcpu->arch.at_instruction_boundary = true;
7018 }
7019
vmx_handle_exit_irqoff(struct kvm_vcpu * vcpu)7020 void vmx_handle_exit_irqoff(struct kvm_vcpu *vcpu)
7021 {
7022 struct vcpu_vmx *vmx = to_vmx(vcpu);
7023
7024 if (vmx->emulation_required)
7025 return;
7026
7027 if (vmx->exit_reason.basic == EXIT_REASON_EXTERNAL_INTERRUPT)
7028 handle_external_interrupt_irqoff(vcpu, vmx_get_intr_info(vcpu));
7029 else if (vmx->exit_reason.basic == EXIT_REASON_EXCEPTION_NMI)
7030 handle_exception_irqoff(vcpu, vmx_get_intr_info(vcpu));
7031 }
7032
7033 /*
7034 * The kvm parameter can be NULL (module initialization, or invocation before
7035 * VM creation). Be sure to check the kvm parameter before using it.
7036 */
vmx_has_emulated_msr(struct kvm * kvm,u32 index)7037 bool vmx_has_emulated_msr(struct kvm *kvm, u32 index)
7038 {
7039 switch (index) {
7040 case MSR_IA32_SMBASE:
7041 if (!IS_ENABLED(CONFIG_KVM_SMM))
7042 return false;
7043 /*
7044 * We cannot do SMM unless we can run the guest in big
7045 * real mode.
7046 */
7047 return enable_unrestricted_guest || emulate_invalid_guest_state;
7048 case KVM_FIRST_EMULATED_VMX_MSR ... KVM_LAST_EMULATED_VMX_MSR:
7049 return nested;
7050 case MSR_AMD64_VIRT_SPEC_CTRL:
7051 case MSR_AMD64_TSC_RATIO:
7052 /* This is AMD only. */
7053 return false;
7054 default:
7055 return true;
7056 }
7057 }
7058
vmx_recover_nmi_blocking(struct vcpu_vmx * vmx)7059 static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx)
7060 {
7061 u32 exit_intr_info;
7062 bool unblock_nmi;
7063 u8 vector;
7064 bool idtv_info_valid;
7065
7066 idtv_info_valid = vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK;
7067
7068 if (enable_vnmi) {
7069 if (vmx->loaded_vmcs->nmi_known_unmasked)
7070 return;
7071
7072 exit_intr_info = vmx_get_intr_info(&vmx->vcpu);
7073 unblock_nmi = (exit_intr_info & INTR_INFO_UNBLOCK_NMI) != 0;
7074 vector = exit_intr_info & INTR_INFO_VECTOR_MASK;
7075 /*
7076 * SDM 3: 27.7.1.2 (September 2008)
7077 * Re-set bit "block by NMI" before VM entry if vmexit caused by
7078 * a guest IRET fault.
7079 * SDM 3: 23.2.2 (September 2008)
7080 * Bit 12 is undefined in any of the following cases:
7081 * If the VM exit sets the valid bit in the IDT-vectoring
7082 * information field.
7083 * If the VM exit is due to a double fault.
7084 */
7085 if ((exit_intr_info & INTR_INFO_VALID_MASK) && unblock_nmi &&
7086 vector != DF_VECTOR && !idtv_info_valid)
7087 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
7088 GUEST_INTR_STATE_NMI);
7089 else
7090 vmx->loaded_vmcs->nmi_known_unmasked =
7091 !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO)
7092 & GUEST_INTR_STATE_NMI);
7093 } else if (unlikely(vmx->loaded_vmcs->soft_vnmi_blocked))
7094 vmx->loaded_vmcs->vnmi_blocked_time +=
7095 ktime_to_ns(ktime_sub(ktime_get(),
7096 vmx->loaded_vmcs->entry_time));
7097 }
7098
__vmx_complete_interrupts(struct kvm_vcpu * vcpu,u32 idt_vectoring_info,int instr_len_field,int error_code_field)7099 static void __vmx_complete_interrupts(struct kvm_vcpu *vcpu,
7100 u32 idt_vectoring_info,
7101 int instr_len_field,
7102 int error_code_field)
7103 {
7104 u8 vector;
7105 int type;
7106 bool idtv_info_valid;
7107
7108 idtv_info_valid = idt_vectoring_info & VECTORING_INFO_VALID_MASK;
7109
7110 vcpu->arch.nmi_injected = false;
7111 kvm_clear_exception_queue(vcpu);
7112 kvm_clear_interrupt_queue(vcpu);
7113
7114 if (!idtv_info_valid)
7115 return;
7116
7117 kvm_make_request(KVM_REQ_EVENT, vcpu);
7118
7119 vector = idt_vectoring_info & VECTORING_INFO_VECTOR_MASK;
7120 type = idt_vectoring_info & VECTORING_INFO_TYPE_MASK;
7121
7122 switch (type) {
7123 case INTR_TYPE_NMI_INTR:
7124 vcpu->arch.nmi_injected = true;
7125 /*
7126 * SDM 3: 27.7.1.2 (September 2008)
7127 * Clear bit "block by NMI" before VM entry if a NMI
7128 * delivery faulted.
7129 */
7130 vmx_set_nmi_mask(vcpu, false);
7131 break;
7132 case INTR_TYPE_SOFT_EXCEPTION:
7133 vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field);
7134 fallthrough;
7135 case INTR_TYPE_HARD_EXCEPTION:
7136 if (idt_vectoring_info & VECTORING_INFO_DELIVER_CODE_MASK) {
7137 u32 err = vmcs_read32(error_code_field);
7138 kvm_requeue_exception_e(vcpu, vector, err);
7139 } else
7140 kvm_requeue_exception(vcpu, vector);
7141 break;
7142 case INTR_TYPE_SOFT_INTR:
7143 vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field);
7144 fallthrough;
7145 case INTR_TYPE_EXT_INTR:
7146 kvm_queue_interrupt(vcpu, vector, type == INTR_TYPE_SOFT_INTR);
7147 break;
7148 default:
7149 break;
7150 }
7151 }
7152
vmx_complete_interrupts(struct vcpu_vmx * vmx)7153 static void vmx_complete_interrupts(struct vcpu_vmx *vmx)
7154 {
7155 __vmx_complete_interrupts(&vmx->vcpu, vmx->idt_vectoring_info,
7156 VM_EXIT_INSTRUCTION_LEN,
7157 IDT_VECTORING_ERROR_CODE);
7158 }
7159
vmx_cancel_injection(struct kvm_vcpu * vcpu)7160 void vmx_cancel_injection(struct kvm_vcpu *vcpu)
7161 {
7162 __vmx_complete_interrupts(vcpu,
7163 vmcs_read32(VM_ENTRY_INTR_INFO_FIELD),
7164 VM_ENTRY_INSTRUCTION_LEN,
7165 VM_ENTRY_EXCEPTION_ERROR_CODE);
7166
7167 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);
7168 }
7169
atomic_switch_perf_msrs(struct vcpu_vmx * vmx)7170 static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx)
7171 {
7172 int i, nr_msrs;
7173 struct perf_guest_switch_msr *msrs;
7174 struct kvm_pmu *pmu = vcpu_to_pmu(&vmx->vcpu);
7175
7176 pmu->host_cross_mapped_mask = 0;
7177 if (pmu->pebs_enable & pmu->global_ctrl)
7178 intel_pmu_cross_mapped_check(pmu);
7179
7180 /* Note, nr_msrs may be garbage if perf_guest_get_msrs() returns NULL. */
7181 msrs = perf_guest_get_msrs(&nr_msrs, (void *)pmu);
7182 if (!msrs)
7183 return;
7184
7185 for (i = 0; i < nr_msrs; i++)
7186 if (msrs[i].host == msrs[i].guest)
7187 clear_atomic_switch_msr(vmx, msrs[i].msr);
7188 else
7189 add_atomic_switch_msr(vmx, msrs[i].msr, msrs[i].guest,
7190 msrs[i].host, false);
7191 }
7192
vmx_update_hv_timer(struct kvm_vcpu * vcpu,bool force_immediate_exit)7193 static void vmx_update_hv_timer(struct kvm_vcpu *vcpu, bool force_immediate_exit)
7194 {
7195 struct vcpu_vmx *vmx = to_vmx(vcpu);
7196 u64 tscl;
7197 u32 delta_tsc;
7198
7199 if (force_immediate_exit) {
7200 vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, 0);
7201 vmx->loaded_vmcs->hv_timer_soft_disabled = false;
7202 } else if (vmx->hv_deadline_tsc != -1) {
7203 tscl = rdtsc();
7204 if (vmx->hv_deadline_tsc > tscl)
7205 /* set_hv_timer ensures the delta fits in 32-bits */
7206 delta_tsc = (u32)((vmx->hv_deadline_tsc - tscl) >>
7207 cpu_preemption_timer_multi);
7208 else
7209 delta_tsc = 0;
7210
7211 vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, delta_tsc);
7212 vmx->loaded_vmcs->hv_timer_soft_disabled = false;
7213 } else if (!vmx->loaded_vmcs->hv_timer_soft_disabled) {
7214 vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, -1);
7215 vmx->loaded_vmcs->hv_timer_soft_disabled = true;
7216 }
7217 }
7218
vmx_update_host_rsp(struct vcpu_vmx * vmx,unsigned long host_rsp)7219 void noinstr vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp)
7220 {
7221 if (unlikely(host_rsp != vmx->loaded_vmcs->host_state.rsp)) {
7222 vmx->loaded_vmcs->host_state.rsp = host_rsp;
7223 vmcs_writel(HOST_RSP, host_rsp);
7224 }
7225 }
7226
vmx_spec_ctrl_restore_host(struct vcpu_vmx * vmx,unsigned int flags)7227 void noinstr vmx_spec_ctrl_restore_host(struct vcpu_vmx *vmx,
7228 unsigned int flags)
7229 {
7230 u64 hostval = this_cpu_read(x86_spec_ctrl_current);
7231
7232 if (!cpu_feature_enabled(X86_FEATURE_MSR_SPEC_CTRL))
7233 return;
7234
7235 if (flags & VMX_RUN_SAVE_SPEC_CTRL)
7236 vmx->spec_ctrl = __rdmsr(MSR_IA32_SPEC_CTRL);
7237
7238 /*
7239 * If the guest/host SPEC_CTRL values differ, restore the host value.
7240 *
7241 * For legacy IBRS, the IBRS bit always needs to be written after
7242 * transitioning from a less privileged predictor mode, regardless of
7243 * whether the guest/host values differ.
7244 */
7245 if (cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS) ||
7246 vmx->spec_ctrl != hostval)
7247 native_wrmsrl(MSR_IA32_SPEC_CTRL, hostval);
7248
7249 barrier_nospec();
7250 }
7251
vmx_exit_handlers_fastpath(struct kvm_vcpu * vcpu,bool force_immediate_exit)7252 static fastpath_t vmx_exit_handlers_fastpath(struct kvm_vcpu *vcpu,
7253 bool force_immediate_exit)
7254 {
7255 /*
7256 * If L2 is active, some VMX preemption timer exits can be handled in
7257 * the fastpath even, all other exits must use the slow path.
7258 */
7259 if (is_guest_mode(vcpu) &&
7260 to_vmx(vcpu)->exit_reason.basic != EXIT_REASON_PREEMPTION_TIMER)
7261 return EXIT_FASTPATH_NONE;
7262
7263 switch (to_vmx(vcpu)->exit_reason.basic) {
7264 case EXIT_REASON_MSR_WRITE:
7265 return handle_fastpath_set_msr_irqoff(vcpu);
7266 case EXIT_REASON_PREEMPTION_TIMER:
7267 return handle_fastpath_preemption_timer(vcpu, force_immediate_exit);
7268 default:
7269 return EXIT_FASTPATH_NONE;
7270 }
7271 }
7272
vmx_vcpu_enter_exit(struct kvm_vcpu * vcpu,unsigned int flags)7273 static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
7274 unsigned int flags)
7275 {
7276 struct vcpu_vmx *vmx = to_vmx(vcpu);
7277
7278 guest_state_enter_irqoff();
7279
7280 /*
7281 * L1D Flush includes CPU buffer clear to mitigate MDS, but VERW
7282 * mitigation for MDS is done late in VMentry and is still
7283 * executed in spite of L1D Flush. This is because an extra VERW
7284 * should not matter much after the big hammer L1D Flush.
7285 */
7286 if (static_branch_unlikely(&vmx_l1d_should_flush))
7287 vmx_l1d_flush(vcpu);
7288 else if (static_branch_unlikely(&mmio_stale_data_clear) &&
7289 kvm_arch_has_assigned_device(vcpu->kvm))
7290 mds_clear_cpu_buffers();
7291
7292 vmx_disable_fb_clear(vmx);
7293
7294 if (vcpu->arch.cr2 != native_read_cr2())
7295 native_write_cr2(vcpu->arch.cr2);
7296
7297 vmx->fail = __vmx_vcpu_run(vmx, (unsigned long *)&vcpu->arch.regs,
7298 flags);
7299
7300 vcpu->arch.cr2 = native_read_cr2();
7301 vcpu->arch.regs_avail &= ~VMX_REGS_LAZY_LOAD_SET;
7302
7303 vmx->idt_vectoring_info = 0;
7304
7305 vmx_enable_fb_clear(vmx);
7306
7307 if (unlikely(vmx->fail)) {
7308 vmx->exit_reason.full = 0xdead;
7309 goto out;
7310 }
7311
7312 vmx->exit_reason.full = vmcs_read32(VM_EXIT_REASON);
7313 if (likely(!vmx->exit_reason.failed_vmentry))
7314 vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
7315
7316 if ((u16)vmx->exit_reason.basic == EXIT_REASON_EXCEPTION_NMI &&
7317 is_nmi(vmx_get_intr_info(vcpu))) {
7318 kvm_before_interrupt(vcpu, KVM_HANDLING_NMI);
7319 if (cpu_feature_enabled(X86_FEATURE_FRED))
7320 fred_entry_from_kvm(EVENT_TYPE_NMI, NMI_VECTOR);
7321 else
7322 vmx_do_nmi_irqoff();
7323 kvm_after_interrupt(vcpu);
7324 }
7325
7326 out:
7327 guest_state_exit_irqoff();
7328 }
7329
vmx_vcpu_run(struct kvm_vcpu * vcpu,bool force_immediate_exit)7330 fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediate_exit)
7331 {
7332 struct vcpu_vmx *vmx = to_vmx(vcpu);
7333 unsigned long cr3, cr4;
7334
7335 /* Record the guest's net vcpu time for enforced NMI injections. */
7336 if (unlikely(!enable_vnmi &&
7337 vmx->loaded_vmcs->soft_vnmi_blocked))
7338 vmx->loaded_vmcs->entry_time = ktime_get();
7339
7340 /*
7341 * Don't enter VMX if guest state is invalid, let the exit handler
7342 * start emulation until we arrive back to a valid state. Synthesize a
7343 * consistency check VM-Exit due to invalid guest state and bail.
7344 */
7345 if (unlikely(vmx->emulation_required)) {
7346 vmx->fail = 0;
7347
7348 vmx->exit_reason.full = EXIT_REASON_INVALID_STATE;
7349 vmx->exit_reason.failed_vmentry = 1;
7350 kvm_register_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_1);
7351 vmx->exit_qualification = ENTRY_FAIL_DEFAULT;
7352 kvm_register_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_2);
7353 vmx->exit_intr_info = 0;
7354 return EXIT_FASTPATH_NONE;
7355 }
7356
7357 trace_kvm_entry(vcpu, force_immediate_exit);
7358
7359 if (vmx->ple_window_dirty) {
7360 vmx->ple_window_dirty = false;
7361 vmcs_write32(PLE_WINDOW, vmx->ple_window);
7362 }
7363
7364 /*
7365 * We did this in prepare_switch_to_guest, because it needs to
7366 * be within srcu_read_lock.
7367 */
7368 WARN_ON_ONCE(vmx->nested.need_vmcs12_to_shadow_sync);
7369
7370 if (kvm_register_is_dirty(vcpu, VCPU_REGS_RSP))
7371 vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]);
7372 if (kvm_register_is_dirty(vcpu, VCPU_REGS_RIP))
7373 vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
7374 vcpu->arch.regs_dirty = 0;
7375
7376 /*
7377 * Refresh vmcs.HOST_CR3 if necessary. This must be done immediately
7378 * prior to VM-Enter, as the kernel may load a new ASID (PCID) any time
7379 * it switches back to the current->mm, which can occur in KVM context
7380 * when switching to a temporary mm to patch kernel code, e.g. if KVM
7381 * toggles a static key while handling a VM-Exit.
7382 */
7383 cr3 = __get_current_cr3_fast();
7384 if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) {
7385 vmcs_writel(HOST_CR3, cr3);
7386 vmx->loaded_vmcs->host_state.cr3 = cr3;
7387 }
7388
7389 cr4 = cr4_read_shadow();
7390 if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) {
7391 vmcs_writel(HOST_CR4, cr4);
7392 vmx->loaded_vmcs->host_state.cr4 = cr4;
7393 }
7394
7395 /* When KVM_DEBUGREG_WONT_EXIT, dr6 is accessible in guest. */
7396 if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT))
7397 set_debugreg(vcpu->arch.dr6, 6);
7398
7399 /* When single-stepping over STI and MOV SS, we must clear the
7400 * corresponding interruptibility bits in the guest state. Otherwise
7401 * vmentry fails as it then expects bit 14 (BS) in pending debug
7402 * exceptions being set, but that's not correct for the guest debugging
7403 * case. */
7404 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
7405 vmx_set_interrupt_shadow(vcpu, 0);
7406
7407 kvm_load_guest_xsave_state(vcpu);
7408
7409 pt_guest_enter(vmx);
7410
7411 atomic_switch_perf_msrs(vmx);
7412 if (intel_pmu_lbr_is_enabled(vcpu))
7413 vmx_passthrough_lbr_msrs(vcpu);
7414
7415 if (enable_preemption_timer)
7416 vmx_update_hv_timer(vcpu, force_immediate_exit);
7417 else if (force_immediate_exit)
7418 smp_send_reschedule(vcpu->cpu);
7419
7420 kvm_wait_lapic_expire(vcpu);
7421
7422 /* The actual VMENTER/EXIT is in the .noinstr.text section. */
7423 vmx_vcpu_enter_exit(vcpu, __vmx_vcpu_run_flags(vmx));
7424
7425 /* All fields are clean at this point */
7426 if (kvm_is_using_evmcs()) {
7427 current_evmcs->hv_clean_fields |=
7428 HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL;
7429
7430 current_evmcs->hv_vp_id = kvm_hv_get_vpindex(vcpu);
7431 }
7432
7433 /* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */
7434 if (vmx->host_debugctlmsr)
7435 update_debugctlmsr(vmx->host_debugctlmsr);
7436
7437 #ifndef CONFIG_X86_64
7438 /*
7439 * The sysexit path does not restore ds/es, so we must set them to
7440 * a reasonable value ourselves.
7441 *
7442 * We can't defer this to vmx_prepare_switch_to_host() since that
7443 * function may be executed in interrupt context, which saves and
7444 * restore segments around it, nullifying its effect.
7445 */
7446 loadsegment(ds, __USER_DS);
7447 loadsegment(es, __USER_DS);
7448 #endif
7449
7450 pt_guest_exit(vmx);
7451
7452 kvm_load_host_xsave_state(vcpu);
7453
7454 if (is_guest_mode(vcpu)) {
7455 /*
7456 * Track VMLAUNCH/VMRESUME that have made past guest state
7457 * checking.
7458 */
7459 if (vmx->nested.nested_run_pending &&
7460 !vmx->exit_reason.failed_vmentry)
7461 ++vcpu->stat.nested_run;
7462
7463 vmx->nested.nested_run_pending = 0;
7464 }
7465
7466 if (unlikely(vmx->fail))
7467 return EXIT_FASTPATH_NONE;
7468
7469 if (unlikely((u16)vmx->exit_reason.basic == EXIT_REASON_MCE_DURING_VMENTRY))
7470 kvm_machine_check();
7471
7472 trace_kvm_exit(vcpu, KVM_ISA_VMX);
7473
7474 if (unlikely(vmx->exit_reason.failed_vmentry))
7475 return EXIT_FASTPATH_NONE;
7476
7477 vmx->loaded_vmcs->launched = 1;
7478
7479 vmx_recover_nmi_blocking(vmx);
7480 vmx_complete_interrupts(vmx);
7481
7482 return vmx_exit_handlers_fastpath(vcpu, force_immediate_exit);
7483 }
7484
vmx_vcpu_free(struct kvm_vcpu * vcpu)7485 void vmx_vcpu_free(struct kvm_vcpu *vcpu)
7486 {
7487 struct vcpu_vmx *vmx = to_vmx(vcpu);
7488
7489 if (enable_pml)
7490 vmx_destroy_pml_buffer(vmx);
7491 free_vpid(vmx->vpid);
7492 nested_vmx_free_vcpu(vcpu);
7493 free_loaded_vmcs(vmx->loaded_vmcs);
7494 free_page((unsigned long)vmx->ve_info);
7495 }
7496
vmx_vcpu_create(struct kvm_vcpu * vcpu)7497 int vmx_vcpu_create(struct kvm_vcpu *vcpu)
7498 {
7499 struct vmx_uret_msr *tsx_ctrl;
7500 struct vcpu_vmx *vmx;
7501 int i, err;
7502
7503 BUILD_BUG_ON(offsetof(struct vcpu_vmx, vcpu) != 0);
7504 vmx = to_vmx(vcpu);
7505
7506 INIT_LIST_HEAD(&vmx->pi_wakeup_list);
7507
7508 err = -ENOMEM;
7509
7510 vmx->vpid = allocate_vpid();
7511
7512 /*
7513 * If PML is turned on, failure on enabling PML just results in failure
7514 * of creating the vcpu, therefore we can simplify PML logic (by
7515 * avoiding dealing with cases, such as enabling PML partially on vcpus
7516 * for the guest), etc.
7517 */
7518 if (enable_pml) {
7519 vmx->pml_pg = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
7520 if (!vmx->pml_pg)
7521 goto free_vpid;
7522 }
7523
7524 for (i = 0; i < kvm_nr_uret_msrs; ++i)
7525 vmx->guest_uret_msrs[i].mask = -1ull;
7526 if (boot_cpu_has(X86_FEATURE_RTM)) {
7527 /*
7528 * TSX_CTRL_CPUID_CLEAR is handled in the CPUID interception.
7529 * Keep the host value unchanged to avoid changing CPUID bits
7530 * under the host kernel's feet.
7531 */
7532 tsx_ctrl = vmx_find_uret_msr(vmx, MSR_IA32_TSX_CTRL);
7533 if (tsx_ctrl)
7534 tsx_ctrl->mask = ~(u64)TSX_CTRL_CPUID_CLEAR;
7535 }
7536
7537 err = alloc_loaded_vmcs(&vmx->vmcs01);
7538 if (err < 0)
7539 goto free_pml;
7540
7541 /*
7542 * Use Hyper-V 'Enlightened MSR Bitmap' feature when KVM runs as a
7543 * nested (L1) hypervisor and Hyper-V in L0 supports it. Enable the
7544 * feature only for vmcs01, KVM currently isn't equipped to realize any
7545 * performance benefits from enabling it for vmcs02.
7546 */
7547 if (kvm_is_using_evmcs() &&
7548 (ms_hyperv.nested_features & HV_X64_NESTED_MSR_BITMAP)) {
7549 struct hv_enlightened_vmcs *evmcs = (void *)vmx->vmcs01.vmcs;
7550
7551 evmcs->hv_enlightenments_control.msr_bitmap = 1;
7552 }
7553
7554 /* The MSR bitmap starts with all ones */
7555 bitmap_fill(vmx->shadow_msr_intercept.read, MAX_POSSIBLE_PASSTHROUGH_MSRS);
7556 bitmap_fill(vmx->shadow_msr_intercept.write, MAX_POSSIBLE_PASSTHROUGH_MSRS);
7557
7558 vmx_disable_intercept_for_msr(vcpu, MSR_IA32_TSC, MSR_TYPE_R);
7559 #ifdef CONFIG_X86_64
7560 vmx_disable_intercept_for_msr(vcpu, MSR_FS_BASE, MSR_TYPE_RW);
7561 vmx_disable_intercept_for_msr(vcpu, MSR_GS_BASE, MSR_TYPE_RW);
7562 vmx_disable_intercept_for_msr(vcpu, MSR_KERNEL_GS_BASE, MSR_TYPE_RW);
7563 #endif
7564 vmx_disable_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_CS, MSR_TYPE_RW);
7565 vmx_disable_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_ESP, MSR_TYPE_RW);
7566 vmx_disable_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_EIP, MSR_TYPE_RW);
7567 if (kvm_cstate_in_guest(vcpu->kvm)) {
7568 vmx_disable_intercept_for_msr(vcpu, MSR_CORE_C1_RES, MSR_TYPE_R);
7569 vmx_disable_intercept_for_msr(vcpu, MSR_CORE_C3_RESIDENCY, MSR_TYPE_R);
7570 vmx_disable_intercept_for_msr(vcpu, MSR_CORE_C6_RESIDENCY, MSR_TYPE_R);
7571 vmx_disable_intercept_for_msr(vcpu, MSR_CORE_C7_RESIDENCY, MSR_TYPE_R);
7572 }
7573
7574 vmx->loaded_vmcs = &vmx->vmcs01;
7575
7576 if (cpu_need_virtualize_apic_accesses(vcpu)) {
7577 err = kvm_alloc_apic_access_page(vcpu->kvm);
7578 if (err)
7579 goto free_vmcs;
7580 }
7581
7582 if (enable_ept && !enable_unrestricted_guest) {
7583 err = init_rmode_identity_map(vcpu->kvm);
7584 if (err)
7585 goto free_vmcs;
7586 }
7587
7588 err = -ENOMEM;
7589 if (vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_EPT_VIOLATION_VE) {
7590 struct page *page;
7591
7592 BUILD_BUG_ON(sizeof(*vmx->ve_info) > PAGE_SIZE);
7593
7594 /* ve_info must be page aligned. */
7595 page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
7596 if (!page)
7597 goto free_vmcs;
7598
7599 vmx->ve_info = page_to_virt(page);
7600 }
7601
7602 if (vmx_can_use_ipiv(vcpu))
7603 WRITE_ONCE(to_kvm_vmx(vcpu->kvm)->pid_table[vcpu->vcpu_id],
7604 __pa(&vmx->pi_desc) | PID_TABLE_ENTRY_VALID);
7605
7606 return 0;
7607
7608 free_vmcs:
7609 free_loaded_vmcs(vmx->loaded_vmcs);
7610 free_pml:
7611 vmx_destroy_pml_buffer(vmx);
7612 free_vpid:
7613 free_vpid(vmx->vpid);
7614 return err;
7615 }
7616
7617 #define L1TF_MSG_SMT "L1TF CPU bug present and SMT on, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html for details.\n"
7618 #define L1TF_MSG_L1D "L1TF CPU bug present and virtualization mitigation disabled, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html for details.\n"
7619
vmx_vm_init(struct kvm * kvm)7620 int vmx_vm_init(struct kvm *kvm)
7621 {
7622 if (!ple_gap)
7623 kvm->arch.pause_in_guest = true;
7624
7625 if (boot_cpu_has(X86_BUG_L1TF) && enable_ept) {
7626 switch (l1tf_mitigation) {
7627 case L1TF_MITIGATION_OFF:
7628 case L1TF_MITIGATION_FLUSH_NOWARN:
7629 /* 'I explicitly don't care' is set */
7630 break;
7631 case L1TF_MITIGATION_FLUSH:
7632 case L1TF_MITIGATION_FLUSH_NOSMT:
7633 case L1TF_MITIGATION_FULL:
7634 /*
7635 * Warn upon starting the first VM in a potentially
7636 * insecure environment.
7637 */
7638 if (sched_smt_active())
7639 pr_warn_once(L1TF_MSG_SMT);
7640 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER)
7641 pr_warn_once(L1TF_MSG_L1D);
7642 break;
7643 case L1TF_MITIGATION_FULL_FORCE:
7644 /* Flush is enforced */
7645 break;
7646 }
7647 }
7648 return 0;
7649 }
7650
vmx_get_mt_mask(struct kvm_vcpu * vcpu,gfn_t gfn,bool is_mmio)7651 u8 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
7652 {
7653 /*
7654 * Force UC for host MMIO regions, as allowing the guest to access MMIO
7655 * with cacheable accesses will result in Machine Checks.
7656 */
7657 if (is_mmio)
7658 return MTRR_TYPE_UNCACHABLE << VMX_EPT_MT_EPTE_SHIFT;
7659
7660 /*
7661 * Force WB and ignore guest PAT if the VM does NOT have a non-coherent
7662 * device attached. Letting the guest control memory types on Intel
7663 * CPUs may result in unexpected behavior, and so KVM's ABI is to trust
7664 * the guest to behave only as a last resort.
7665 */
7666 if (!kvm_arch_has_noncoherent_dma(vcpu->kvm))
7667 return (MTRR_TYPE_WRBACK << VMX_EPT_MT_EPTE_SHIFT) | VMX_EPT_IPAT_BIT;
7668
7669 return (MTRR_TYPE_WRBACK << VMX_EPT_MT_EPTE_SHIFT);
7670 }
7671
vmcs_set_secondary_exec_control(struct vcpu_vmx * vmx,u32 new_ctl)7672 static void vmcs_set_secondary_exec_control(struct vcpu_vmx *vmx, u32 new_ctl)
7673 {
7674 /*
7675 * These bits in the secondary execution controls field
7676 * are dynamic, the others are mostly based on the hypervisor
7677 * architecture and the guest's CPUID. Do not touch the
7678 * dynamic bits.
7679 */
7680 u32 mask =
7681 SECONDARY_EXEC_SHADOW_VMCS |
7682 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
7683 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
7684 SECONDARY_EXEC_DESC;
7685
7686 u32 cur_ctl = secondary_exec_controls_get(vmx);
7687
7688 secondary_exec_controls_set(vmx, (new_ctl & ~mask) | (cur_ctl & mask));
7689 }
7690
7691 /*
7692 * Generate MSR_IA32_VMX_CR{0,4}_FIXED1 according to CPUID. Only set bits
7693 * (indicating "allowed-1") if they are supported in the guest's CPUID.
7694 */
nested_vmx_cr_fixed1_bits_update(struct kvm_vcpu * vcpu)7695 static void nested_vmx_cr_fixed1_bits_update(struct kvm_vcpu *vcpu)
7696 {
7697 struct vcpu_vmx *vmx = to_vmx(vcpu);
7698 struct kvm_cpuid_entry2 *entry;
7699
7700 vmx->nested.msrs.cr0_fixed1 = 0xffffffff;
7701 vmx->nested.msrs.cr4_fixed1 = X86_CR4_PCE;
7702
7703 #define cr4_fixed1_update(_cr4_mask, _reg, _cpuid_mask) do { \
7704 if (entry && (entry->_reg & (_cpuid_mask))) \
7705 vmx->nested.msrs.cr4_fixed1 |= (_cr4_mask); \
7706 } while (0)
7707
7708 entry = kvm_find_cpuid_entry(vcpu, 0x1);
7709 cr4_fixed1_update(X86_CR4_VME, edx, feature_bit(VME));
7710 cr4_fixed1_update(X86_CR4_PVI, edx, feature_bit(VME));
7711 cr4_fixed1_update(X86_CR4_TSD, edx, feature_bit(TSC));
7712 cr4_fixed1_update(X86_CR4_DE, edx, feature_bit(DE));
7713 cr4_fixed1_update(X86_CR4_PSE, edx, feature_bit(PSE));
7714 cr4_fixed1_update(X86_CR4_PAE, edx, feature_bit(PAE));
7715 cr4_fixed1_update(X86_CR4_MCE, edx, feature_bit(MCE));
7716 cr4_fixed1_update(X86_CR4_PGE, edx, feature_bit(PGE));
7717 cr4_fixed1_update(X86_CR4_OSFXSR, edx, feature_bit(FXSR));
7718 cr4_fixed1_update(X86_CR4_OSXMMEXCPT, edx, feature_bit(XMM));
7719 cr4_fixed1_update(X86_CR4_VMXE, ecx, feature_bit(VMX));
7720 cr4_fixed1_update(X86_CR4_SMXE, ecx, feature_bit(SMX));
7721 cr4_fixed1_update(X86_CR4_PCIDE, ecx, feature_bit(PCID));
7722 cr4_fixed1_update(X86_CR4_OSXSAVE, ecx, feature_bit(XSAVE));
7723
7724 entry = kvm_find_cpuid_entry_index(vcpu, 0x7, 0);
7725 cr4_fixed1_update(X86_CR4_FSGSBASE, ebx, feature_bit(FSGSBASE));
7726 cr4_fixed1_update(X86_CR4_SMEP, ebx, feature_bit(SMEP));
7727 cr4_fixed1_update(X86_CR4_SMAP, ebx, feature_bit(SMAP));
7728 cr4_fixed1_update(X86_CR4_PKE, ecx, feature_bit(PKU));
7729 cr4_fixed1_update(X86_CR4_UMIP, ecx, feature_bit(UMIP));
7730 cr4_fixed1_update(X86_CR4_LA57, ecx, feature_bit(LA57));
7731
7732 entry = kvm_find_cpuid_entry_index(vcpu, 0x7, 1);
7733 cr4_fixed1_update(X86_CR4_LAM_SUP, eax, feature_bit(LAM));
7734
7735 #undef cr4_fixed1_update
7736 }
7737
update_intel_pt_cfg(struct kvm_vcpu * vcpu)7738 static void update_intel_pt_cfg(struct kvm_vcpu *vcpu)
7739 {
7740 struct vcpu_vmx *vmx = to_vmx(vcpu);
7741 struct kvm_cpuid_entry2 *best = NULL;
7742 int i;
7743
7744 for (i = 0; i < PT_CPUID_LEAVES; i++) {
7745 best = kvm_find_cpuid_entry_index(vcpu, 0x14, i);
7746 if (!best)
7747 return;
7748 vmx->pt_desc.caps[CPUID_EAX + i*PT_CPUID_REGS_NUM] = best->eax;
7749 vmx->pt_desc.caps[CPUID_EBX + i*PT_CPUID_REGS_NUM] = best->ebx;
7750 vmx->pt_desc.caps[CPUID_ECX + i*PT_CPUID_REGS_NUM] = best->ecx;
7751 vmx->pt_desc.caps[CPUID_EDX + i*PT_CPUID_REGS_NUM] = best->edx;
7752 }
7753
7754 /* Get the number of configurable Address Ranges for filtering */
7755 vmx->pt_desc.num_address_ranges = intel_pt_validate_cap(vmx->pt_desc.caps,
7756 PT_CAP_num_address_ranges);
7757
7758 /* Initialize and clear the no dependency bits */
7759 vmx->pt_desc.ctl_bitmask = ~(RTIT_CTL_TRACEEN | RTIT_CTL_OS |
7760 RTIT_CTL_USR | RTIT_CTL_TSC_EN | RTIT_CTL_DISRETC |
7761 RTIT_CTL_BRANCH_EN);
7762
7763 /*
7764 * If CPUID.(EAX=14H,ECX=0):EBX[0]=1 CR3Filter can be set otherwise
7765 * will inject an #GP
7766 */
7767 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_cr3_filtering))
7768 vmx->pt_desc.ctl_bitmask &= ~RTIT_CTL_CR3EN;
7769
7770 /*
7771 * If CPUID.(EAX=14H,ECX=0):EBX[1]=1 CYCEn, CycThresh and
7772 * PSBFreq can be set
7773 */
7774 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_psb_cyc))
7775 vmx->pt_desc.ctl_bitmask &= ~(RTIT_CTL_CYCLEACC |
7776 RTIT_CTL_CYC_THRESH | RTIT_CTL_PSB_FREQ);
7777
7778 /*
7779 * If CPUID.(EAX=14H,ECX=0):EBX[3]=1 MTCEn and MTCFreq can be set
7780 */
7781 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_mtc))
7782 vmx->pt_desc.ctl_bitmask &= ~(RTIT_CTL_MTC_EN |
7783 RTIT_CTL_MTC_RANGE);
7784
7785 /* If CPUID.(EAX=14H,ECX=0):EBX[4]=1 FUPonPTW and PTWEn can be set */
7786 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_ptwrite))
7787 vmx->pt_desc.ctl_bitmask &= ~(RTIT_CTL_FUP_ON_PTW |
7788 RTIT_CTL_PTW_EN);
7789
7790 /* If CPUID.(EAX=14H,ECX=0):EBX[5]=1 PwrEvEn can be set */
7791 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_power_event_trace))
7792 vmx->pt_desc.ctl_bitmask &= ~RTIT_CTL_PWR_EVT_EN;
7793
7794 /* If CPUID.(EAX=14H,ECX=0):ECX[0]=1 ToPA can be set */
7795 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_topa_output))
7796 vmx->pt_desc.ctl_bitmask &= ~RTIT_CTL_TOPA;
7797
7798 /* If CPUID.(EAX=14H,ECX=0):ECX[3]=1 FabricEn can be set */
7799 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_output_subsys))
7800 vmx->pt_desc.ctl_bitmask &= ~RTIT_CTL_FABRIC_EN;
7801
7802 /* unmask address range configure area */
7803 for (i = 0; i < vmx->pt_desc.num_address_ranges; i++)
7804 vmx->pt_desc.ctl_bitmask &= ~(0xfULL << (32 + i * 4));
7805 }
7806
vmx_vcpu_after_set_cpuid(struct kvm_vcpu * vcpu)7807 void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
7808 {
7809 struct vcpu_vmx *vmx = to_vmx(vcpu);
7810
7811 /*
7812 * XSAVES is effectively enabled if and only if XSAVE is also exposed
7813 * to the guest. XSAVES depends on CR4.OSXSAVE, and CR4.OSXSAVE can be
7814 * set if and only if XSAVE is supported.
7815 */
7816 if (boot_cpu_has(X86_FEATURE_XSAVE) &&
7817 guest_cpuid_has(vcpu, X86_FEATURE_XSAVE))
7818 kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_XSAVES);
7819
7820 kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_VMX);
7821 kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_LAM);
7822
7823 vmx_setup_uret_msrs(vmx);
7824
7825 if (cpu_has_secondary_exec_ctrls())
7826 vmcs_set_secondary_exec_control(vmx,
7827 vmx_secondary_exec_control(vmx));
7828
7829 if (guest_can_use(vcpu, X86_FEATURE_VMX))
7830 vmx->msr_ia32_feature_control_valid_bits |=
7831 FEAT_CTL_VMX_ENABLED_INSIDE_SMX |
7832 FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX;
7833 else
7834 vmx->msr_ia32_feature_control_valid_bits &=
7835 ~(FEAT_CTL_VMX_ENABLED_INSIDE_SMX |
7836 FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX);
7837
7838 if (guest_can_use(vcpu, X86_FEATURE_VMX))
7839 nested_vmx_cr_fixed1_bits_update(vcpu);
7840
7841 if (boot_cpu_has(X86_FEATURE_INTEL_PT) &&
7842 guest_cpuid_has(vcpu, X86_FEATURE_INTEL_PT))
7843 update_intel_pt_cfg(vcpu);
7844
7845 if (boot_cpu_has(X86_FEATURE_RTM)) {
7846 struct vmx_uret_msr *msr;
7847 msr = vmx_find_uret_msr(vmx, MSR_IA32_TSX_CTRL);
7848 if (msr) {
7849 bool enabled = guest_cpuid_has(vcpu, X86_FEATURE_RTM);
7850 vmx_set_guest_uret_msr(vmx, msr, enabled ? 0 : TSX_CTRL_RTM_DISABLE);
7851 }
7852 }
7853
7854 if (kvm_cpu_cap_has(X86_FEATURE_XFD))
7855 vmx_set_intercept_for_msr(vcpu, MSR_IA32_XFD_ERR, MSR_TYPE_R,
7856 !guest_cpuid_has(vcpu, X86_FEATURE_XFD));
7857
7858 if (boot_cpu_has(X86_FEATURE_IBPB))
7859 vmx_set_intercept_for_msr(vcpu, MSR_IA32_PRED_CMD, MSR_TYPE_W,
7860 !guest_has_pred_cmd_msr(vcpu));
7861
7862 if (boot_cpu_has(X86_FEATURE_FLUSH_L1D))
7863 vmx_set_intercept_for_msr(vcpu, MSR_IA32_FLUSH_CMD, MSR_TYPE_W,
7864 !guest_cpuid_has(vcpu, X86_FEATURE_FLUSH_L1D));
7865
7866 set_cr4_guest_host_mask(vmx);
7867
7868 vmx_write_encls_bitmap(vcpu, NULL);
7869 if (guest_cpuid_has(vcpu, X86_FEATURE_SGX))
7870 vmx->msr_ia32_feature_control_valid_bits |= FEAT_CTL_SGX_ENABLED;
7871 else
7872 vmx->msr_ia32_feature_control_valid_bits &= ~FEAT_CTL_SGX_ENABLED;
7873
7874 if (guest_cpuid_has(vcpu, X86_FEATURE_SGX_LC))
7875 vmx->msr_ia32_feature_control_valid_bits |=
7876 FEAT_CTL_SGX_LC_ENABLED;
7877 else
7878 vmx->msr_ia32_feature_control_valid_bits &=
7879 ~FEAT_CTL_SGX_LC_ENABLED;
7880
7881 /* Refresh #PF interception to account for MAXPHYADDR changes. */
7882 vmx_update_exception_bitmap(vcpu);
7883 }
7884
vmx_get_perf_capabilities(void)7885 static __init u64 vmx_get_perf_capabilities(void)
7886 {
7887 u64 perf_cap = PMU_CAP_FW_WRITES;
7888 u64 host_perf_cap = 0;
7889
7890 if (!enable_pmu)
7891 return 0;
7892
7893 if (boot_cpu_has(X86_FEATURE_PDCM))
7894 rdmsrl(MSR_IA32_PERF_CAPABILITIES, host_perf_cap);
7895
7896 if (!cpu_feature_enabled(X86_FEATURE_ARCH_LBR)) {
7897 x86_perf_get_lbr(&vmx_lbr_caps);
7898
7899 /*
7900 * KVM requires LBR callstack support, as the overhead due to
7901 * context switching LBRs without said support is too high.
7902 * See intel_pmu_create_guest_lbr_event() for more info.
7903 */
7904 if (!vmx_lbr_caps.has_callstack)
7905 memset(&vmx_lbr_caps, 0, sizeof(vmx_lbr_caps));
7906 else if (vmx_lbr_caps.nr)
7907 perf_cap |= host_perf_cap & PMU_CAP_LBR_FMT;
7908 }
7909
7910 if (vmx_pebs_supported()) {
7911 perf_cap |= host_perf_cap & PERF_CAP_PEBS_MASK;
7912
7913 /*
7914 * Disallow adaptive PEBS as it is functionally broken, can be
7915 * used by the guest to read *host* LBRs, and can be used to
7916 * bypass userspace event filters. To correctly and safely
7917 * support adaptive PEBS, KVM needs to:
7918 *
7919 * 1. Account for the ADAPTIVE flag when (re)programming fixed
7920 * counters.
7921 *
7922 * 2. Gain support from perf (or take direct control of counter
7923 * programming) to support events without adaptive PEBS
7924 * enabled for the hardware counter.
7925 *
7926 * 3. Ensure LBR MSRs cannot hold host data on VM-Entry with
7927 * adaptive PEBS enabled and MSR_PEBS_DATA_CFG.LBRS=1.
7928 *
7929 * 4. Document which PMU events are effectively exposed to the
7930 * guest via adaptive PEBS, and make adaptive PEBS mutually
7931 * exclusive with KVM_SET_PMU_EVENT_FILTER if necessary.
7932 */
7933 perf_cap &= ~PERF_CAP_PEBS_BASELINE;
7934 }
7935
7936 return perf_cap;
7937 }
7938
vmx_set_cpu_caps(void)7939 static __init void vmx_set_cpu_caps(void)
7940 {
7941 kvm_set_cpu_caps();
7942
7943 /* CPUID 0x1 */
7944 if (nested)
7945 kvm_cpu_cap_set(X86_FEATURE_VMX);
7946
7947 /* CPUID 0x7 */
7948 if (kvm_mpx_supported())
7949 kvm_cpu_cap_check_and_set(X86_FEATURE_MPX);
7950 if (!cpu_has_vmx_invpcid())
7951 kvm_cpu_cap_clear(X86_FEATURE_INVPCID);
7952 if (vmx_pt_mode_is_host_guest())
7953 kvm_cpu_cap_check_and_set(X86_FEATURE_INTEL_PT);
7954 if (vmx_pebs_supported()) {
7955 kvm_cpu_cap_check_and_set(X86_FEATURE_DS);
7956 kvm_cpu_cap_check_and_set(X86_FEATURE_DTES64);
7957 }
7958
7959 if (!enable_pmu)
7960 kvm_cpu_cap_clear(X86_FEATURE_PDCM);
7961 kvm_caps.supported_perf_cap = vmx_get_perf_capabilities();
7962
7963 if (!enable_sgx) {
7964 kvm_cpu_cap_clear(X86_FEATURE_SGX);
7965 kvm_cpu_cap_clear(X86_FEATURE_SGX_LC);
7966 kvm_cpu_cap_clear(X86_FEATURE_SGX1);
7967 kvm_cpu_cap_clear(X86_FEATURE_SGX2);
7968 }
7969
7970 if (vmx_umip_emulated())
7971 kvm_cpu_cap_set(X86_FEATURE_UMIP);
7972
7973 /* CPUID 0xD.1 */
7974 kvm_caps.supported_xss = 0;
7975 if (!cpu_has_vmx_xsaves())
7976 kvm_cpu_cap_clear(X86_FEATURE_XSAVES);
7977
7978 /* CPUID 0x80000001 and 0x7 (RDPID) */
7979 if (!cpu_has_vmx_rdtscp()) {
7980 kvm_cpu_cap_clear(X86_FEATURE_RDTSCP);
7981 kvm_cpu_cap_clear(X86_FEATURE_RDPID);
7982 }
7983
7984 if (cpu_has_vmx_waitpkg())
7985 kvm_cpu_cap_check_and_set(X86_FEATURE_WAITPKG);
7986 }
7987
vmx_check_intercept_io(struct kvm_vcpu * vcpu,struct x86_instruction_info * info)7988 static int vmx_check_intercept_io(struct kvm_vcpu *vcpu,
7989 struct x86_instruction_info *info)
7990 {
7991 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
7992 unsigned short port;
7993 bool intercept;
7994 int size;
7995
7996 if (info->intercept == x86_intercept_in ||
7997 info->intercept == x86_intercept_ins) {
7998 port = info->src_val;
7999 size = info->dst_bytes;
8000 } else {
8001 port = info->dst_val;
8002 size = info->src_bytes;
8003 }
8004
8005 /*
8006 * If the 'use IO bitmaps' VM-execution control is 0, IO instruction
8007 * VM-exits depend on the 'unconditional IO exiting' VM-execution
8008 * control.
8009 *
8010 * Otherwise, IO instruction VM-exits are controlled by the IO bitmaps.
8011 */
8012 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
8013 intercept = nested_cpu_has(vmcs12,
8014 CPU_BASED_UNCOND_IO_EXITING);
8015 else
8016 intercept = nested_vmx_check_io_bitmaps(vcpu, port, size);
8017
8018 /* FIXME: produce nested vmexit and return X86EMUL_INTERCEPTED. */
8019 return intercept ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE;
8020 }
8021
vmx_check_intercept(struct kvm_vcpu * vcpu,struct x86_instruction_info * info,enum x86_intercept_stage stage,struct x86_exception * exception)8022 int vmx_check_intercept(struct kvm_vcpu *vcpu,
8023 struct x86_instruction_info *info,
8024 enum x86_intercept_stage stage,
8025 struct x86_exception *exception)
8026 {
8027 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
8028
8029 switch (info->intercept) {
8030 /*
8031 * RDPID causes #UD if disabled through secondary execution controls.
8032 * Because it is marked as EmulateOnUD, we need to intercept it here.
8033 * Note, RDPID is hidden behind ENABLE_RDTSCP.
8034 */
8035 case x86_intercept_rdpid:
8036 if (!nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_RDTSCP)) {
8037 exception->vector = UD_VECTOR;
8038 exception->error_code_valid = false;
8039 return X86EMUL_PROPAGATE_FAULT;
8040 }
8041 break;
8042
8043 case x86_intercept_in:
8044 case x86_intercept_ins:
8045 case x86_intercept_out:
8046 case x86_intercept_outs:
8047 return vmx_check_intercept_io(vcpu, info);
8048
8049 case x86_intercept_lgdt:
8050 case x86_intercept_lidt:
8051 case x86_intercept_lldt:
8052 case x86_intercept_ltr:
8053 case x86_intercept_sgdt:
8054 case x86_intercept_sidt:
8055 case x86_intercept_sldt:
8056 case x86_intercept_str:
8057 if (!nested_cpu_has2(vmcs12, SECONDARY_EXEC_DESC))
8058 return X86EMUL_CONTINUE;
8059
8060 /* FIXME: produce nested vmexit and return X86EMUL_INTERCEPTED. */
8061 break;
8062
8063 case x86_intercept_pause:
8064 /*
8065 * PAUSE is a single-byte NOP with a REPE prefix, i.e. collides
8066 * with vanilla NOPs in the emulator. Apply the interception
8067 * check only to actual PAUSE instructions. Don't check
8068 * PAUSE-loop-exiting, software can't expect a given PAUSE to
8069 * exit, i.e. KVM is within its rights to allow L2 to execute
8070 * the PAUSE.
8071 */
8072 if ((info->rep_prefix != REPE_PREFIX) ||
8073 !nested_cpu_has2(vmcs12, CPU_BASED_PAUSE_EXITING))
8074 return X86EMUL_CONTINUE;
8075
8076 break;
8077
8078 /* TODO: check more intercepts... */
8079 default:
8080 break;
8081 }
8082
8083 return X86EMUL_UNHANDLEABLE;
8084 }
8085
8086 #ifdef CONFIG_X86_64
8087 /* (a << shift) / divisor, return 1 if overflow otherwise 0 */
u64_shl_div_u64(u64 a,unsigned int shift,u64 divisor,u64 * result)8088 static inline int u64_shl_div_u64(u64 a, unsigned int shift,
8089 u64 divisor, u64 *result)
8090 {
8091 u64 low = a << shift, high = a >> (64 - shift);
8092
8093 /* To avoid the overflow on divq */
8094 if (high >= divisor)
8095 return 1;
8096
8097 /* Low hold the result, high hold rem which is discarded */
8098 asm("divq %2\n\t" : "=a" (low), "=d" (high) :
8099 "rm" (divisor), "0" (low), "1" (high));
8100 *result = low;
8101
8102 return 0;
8103 }
8104
vmx_set_hv_timer(struct kvm_vcpu * vcpu,u64 guest_deadline_tsc,bool * expired)8105 int vmx_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc,
8106 bool *expired)
8107 {
8108 struct vcpu_vmx *vmx;
8109 u64 tscl, guest_tscl, delta_tsc, lapic_timer_advance_cycles;
8110 struct kvm_timer *ktimer = &vcpu->arch.apic->lapic_timer;
8111
8112 vmx = to_vmx(vcpu);
8113 tscl = rdtsc();
8114 guest_tscl = kvm_read_l1_tsc(vcpu, tscl);
8115 delta_tsc = max(guest_deadline_tsc, guest_tscl) - guest_tscl;
8116 lapic_timer_advance_cycles = nsec_to_cycles(vcpu,
8117 ktimer->timer_advance_ns);
8118
8119 if (delta_tsc > lapic_timer_advance_cycles)
8120 delta_tsc -= lapic_timer_advance_cycles;
8121 else
8122 delta_tsc = 0;
8123
8124 /* Convert to host delta tsc if tsc scaling is enabled */
8125 if (vcpu->arch.l1_tsc_scaling_ratio != kvm_caps.default_tsc_scaling_ratio &&
8126 delta_tsc && u64_shl_div_u64(delta_tsc,
8127 kvm_caps.tsc_scaling_ratio_frac_bits,
8128 vcpu->arch.l1_tsc_scaling_ratio, &delta_tsc))
8129 return -ERANGE;
8130
8131 /*
8132 * If the delta tsc can't fit in the 32 bit after the multi shift,
8133 * we can't use the preemption timer.
8134 * It's possible that it fits on later vmentries, but checking
8135 * on every vmentry is costly so we just use an hrtimer.
8136 */
8137 if (delta_tsc >> (cpu_preemption_timer_multi + 32))
8138 return -ERANGE;
8139
8140 vmx->hv_deadline_tsc = tscl + delta_tsc;
8141 *expired = !delta_tsc;
8142 return 0;
8143 }
8144
vmx_cancel_hv_timer(struct kvm_vcpu * vcpu)8145 void vmx_cancel_hv_timer(struct kvm_vcpu *vcpu)
8146 {
8147 to_vmx(vcpu)->hv_deadline_tsc = -1;
8148 }
8149 #endif
8150
vmx_update_cpu_dirty_logging(struct kvm_vcpu * vcpu)8151 void vmx_update_cpu_dirty_logging(struct kvm_vcpu *vcpu)
8152 {
8153 struct vcpu_vmx *vmx = to_vmx(vcpu);
8154
8155 if (WARN_ON_ONCE(!enable_pml))
8156 return;
8157
8158 if (is_guest_mode(vcpu)) {
8159 vmx->nested.update_vmcs01_cpu_dirty_logging = true;
8160 return;
8161 }
8162
8163 /*
8164 * Note, nr_memslots_dirty_logging can be changed concurrent with this
8165 * code, but in that case another update request will be made and so
8166 * the guest will never run with a stale PML value.
8167 */
8168 if (atomic_read(&vcpu->kvm->nr_memslots_dirty_logging))
8169 secondary_exec_controls_setbit(vmx, SECONDARY_EXEC_ENABLE_PML);
8170 else
8171 secondary_exec_controls_clearbit(vmx, SECONDARY_EXEC_ENABLE_PML);
8172 }
8173
vmx_setup_mce(struct kvm_vcpu * vcpu)8174 void vmx_setup_mce(struct kvm_vcpu *vcpu)
8175 {
8176 if (vcpu->arch.mcg_cap & MCG_LMCE_P)
8177 to_vmx(vcpu)->msr_ia32_feature_control_valid_bits |=
8178 FEAT_CTL_LMCE_ENABLED;
8179 else
8180 to_vmx(vcpu)->msr_ia32_feature_control_valid_bits &=
8181 ~FEAT_CTL_LMCE_ENABLED;
8182 }
8183
8184 #ifdef CONFIG_KVM_SMM
vmx_smi_allowed(struct kvm_vcpu * vcpu,bool for_injection)8185 int vmx_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
8186 {
8187 /* we need a nested vmexit to enter SMM, postpone if run is pending */
8188 if (to_vmx(vcpu)->nested.nested_run_pending)
8189 return -EBUSY;
8190 return !is_smm(vcpu);
8191 }
8192
vmx_enter_smm(struct kvm_vcpu * vcpu,union kvm_smram * smram)8193 int vmx_enter_smm(struct kvm_vcpu *vcpu, union kvm_smram *smram)
8194 {
8195 struct vcpu_vmx *vmx = to_vmx(vcpu);
8196
8197 /*
8198 * TODO: Implement custom flows for forcing the vCPU out/in of L2 on
8199 * SMI and RSM. Using the common VM-Exit + VM-Enter routines is wrong
8200 * SMI and RSM only modify state that is saved and restored via SMRAM.
8201 * E.g. most MSRs are left untouched, but many are modified by VM-Exit
8202 * and VM-Enter, and thus L2's values may be corrupted on SMI+RSM.
8203 */
8204 vmx->nested.smm.guest_mode = is_guest_mode(vcpu);
8205 if (vmx->nested.smm.guest_mode)
8206 nested_vmx_vmexit(vcpu, -1, 0, 0);
8207
8208 vmx->nested.smm.vmxon = vmx->nested.vmxon;
8209 vmx->nested.vmxon = false;
8210 vmx_clear_hlt(vcpu);
8211 return 0;
8212 }
8213
vmx_leave_smm(struct kvm_vcpu * vcpu,const union kvm_smram * smram)8214 int vmx_leave_smm(struct kvm_vcpu *vcpu, const union kvm_smram *smram)
8215 {
8216 struct vcpu_vmx *vmx = to_vmx(vcpu);
8217 int ret;
8218
8219 if (vmx->nested.smm.vmxon) {
8220 vmx->nested.vmxon = true;
8221 vmx->nested.smm.vmxon = false;
8222 }
8223
8224 if (vmx->nested.smm.guest_mode) {
8225 ret = nested_vmx_enter_non_root_mode(vcpu, false);
8226 if (ret)
8227 return ret;
8228
8229 vmx->nested.nested_run_pending = 1;
8230 vmx->nested.smm.guest_mode = false;
8231 }
8232 return 0;
8233 }
8234
vmx_enable_smi_window(struct kvm_vcpu * vcpu)8235 void vmx_enable_smi_window(struct kvm_vcpu *vcpu)
8236 {
8237 /* RSM will cause a vmexit anyway. */
8238 }
8239 #endif
8240
vmx_apic_init_signal_blocked(struct kvm_vcpu * vcpu)8241 bool vmx_apic_init_signal_blocked(struct kvm_vcpu *vcpu)
8242 {
8243 return to_vmx(vcpu)->nested.vmxon && !is_guest_mode(vcpu);
8244 }
8245
vmx_migrate_timers(struct kvm_vcpu * vcpu)8246 void vmx_migrate_timers(struct kvm_vcpu *vcpu)
8247 {
8248 if (is_guest_mode(vcpu)) {
8249 struct hrtimer *timer = &to_vmx(vcpu)->nested.preemption_timer;
8250
8251 if (hrtimer_try_to_cancel(timer) == 1)
8252 hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED);
8253 }
8254 }
8255
vmx_hardware_unsetup(void)8256 void vmx_hardware_unsetup(void)
8257 {
8258 kvm_set_posted_intr_wakeup_handler(NULL);
8259
8260 if (nested)
8261 nested_vmx_hardware_unsetup();
8262
8263 free_kvm_area();
8264 }
8265
vmx_vm_destroy(struct kvm * kvm)8266 void vmx_vm_destroy(struct kvm *kvm)
8267 {
8268 struct kvm_vmx *kvm_vmx = to_kvm_vmx(kvm);
8269
8270 free_pages((unsigned long)kvm_vmx->pid_table, vmx_get_pid_table_order(kvm));
8271 }
8272
8273 /*
8274 * Note, the SDM states that the linear address is masked *after* the modified
8275 * canonicality check, whereas KVM masks (untags) the address and then performs
8276 * a "normal" canonicality check. Functionally, the two methods are identical,
8277 * and when the masking occurs relative to the canonicality check isn't visible
8278 * to software, i.e. KVM's behavior doesn't violate the SDM.
8279 */
vmx_get_untagged_addr(struct kvm_vcpu * vcpu,gva_t gva,unsigned int flags)8280 gva_t vmx_get_untagged_addr(struct kvm_vcpu *vcpu, gva_t gva, unsigned int flags)
8281 {
8282 int lam_bit;
8283 unsigned long cr3_bits;
8284
8285 if (flags & (X86EMUL_F_FETCH | X86EMUL_F_IMPLICIT | X86EMUL_F_INVLPG))
8286 return gva;
8287
8288 if (!is_64_bit_mode(vcpu))
8289 return gva;
8290
8291 /*
8292 * Bit 63 determines if the address should be treated as user address
8293 * or a supervisor address.
8294 */
8295 if (!(gva & BIT_ULL(63))) {
8296 cr3_bits = kvm_get_active_cr3_lam_bits(vcpu);
8297 if (!(cr3_bits & (X86_CR3_LAM_U57 | X86_CR3_LAM_U48)))
8298 return gva;
8299
8300 /* LAM_U48 is ignored if LAM_U57 is set. */
8301 lam_bit = cr3_bits & X86_CR3_LAM_U57 ? 56 : 47;
8302 } else {
8303 if (!kvm_is_cr4_bit_set(vcpu, X86_CR4_LAM_SUP))
8304 return gva;
8305
8306 lam_bit = kvm_is_cr4_bit_set(vcpu, X86_CR4_LA57) ? 56 : 47;
8307 }
8308
8309 /*
8310 * Untag the address by sign-extending the lam_bit, but NOT to bit 63.
8311 * Bit 63 is retained from the raw virtual address so that untagging
8312 * doesn't change a user access to a supervisor access, and vice versa.
8313 */
8314 return (sign_extend64(gva, lam_bit) & ~BIT_ULL(63)) | (gva & BIT_ULL(63));
8315 }
8316
vmx_handle_intel_pt_intr(void)8317 static unsigned int vmx_handle_intel_pt_intr(void)
8318 {
8319 struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
8320
8321 /* '0' on failure so that the !PT case can use a RET0 static call. */
8322 if (!vcpu || !kvm_handling_nmi_from_guest(vcpu))
8323 return 0;
8324
8325 kvm_make_request(KVM_REQ_PMI, vcpu);
8326 __set_bit(MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI_BIT,
8327 (unsigned long *)&vcpu->arch.pmu.global_status);
8328 return 1;
8329 }
8330
vmx_setup_user_return_msrs(void)8331 static __init void vmx_setup_user_return_msrs(void)
8332 {
8333
8334 /*
8335 * Though SYSCALL is only supported in 64-bit mode on Intel CPUs, kvm
8336 * will emulate SYSCALL in legacy mode if the vendor string in guest
8337 * CPUID.0:{EBX,ECX,EDX} is "AuthenticAMD" or "AMDisbetter!" To
8338 * support this emulation, MSR_STAR is included in the list for i386,
8339 * but is never loaded into hardware. MSR_CSTAR is also never loaded
8340 * into hardware and is here purely for emulation purposes.
8341 */
8342 const u32 vmx_uret_msrs_list[] = {
8343 #ifdef CONFIG_X86_64
8344 MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR,
8345 #endif
8346 MSR_EFER, MSR_TSC_AUX, MSR_STAR,
8347 MSR_IA32_TSX_CTRL,
8348 };
8349 int i;
8350
8351 BUILD_BUG_ON(ARRAY_SIZE(vmx_uret_msrs_list) != MAX_NR_USER_RETURN_MSRS);
8352
8353 for (i = 0; i < ARRAY_SIZE(vmx_uret_msrs_list); ++i)
8354 kvm_add_user_return_msr(vmx_uret_msrs_list[i]);
8355 }
8356
vmx_setup_me_spte_mask(void)8357 static void __init vmx_setup_me_spte_mask(void)
8358 {
8359 u64 me_mask = 0;
8360
8361 /*
8362 * On pre-MKTME system, boot_cpu_data.x86_phys_bits equals to
8363 * kvm_host.maxphyaddr. On MKTME and/or TDX capable systems,
8364 * boot_cpu_data.x86_phys_bits holds the actual physical address
8365 * w/o the KeyID bits, and kvm_host.maxphyaddr equals to
8366 * MAXPHYADDR reported by CPUID. Those bits between are KeyID bits.
8367 */
8368 if (boot_cpu_data.x86_phys_bits != kvm_host.maxphyaddr)
8369 me_mask = rsvd_bits(boot_cpu_data.x86_phys_bits,
8370 kvm_host.maxphyaddr - 1);
8371
8372 /*
8373 * Unlike SME, host kernel doesn't support setting up any
8374 * MKTME KeyID on Intel platforms. No memory encryption
8375 * bits should be included into the SPTE.
8376 */
8377 kvm_mmu_set_me_spte_mask(0, me_mask);
8378 }
8379
vmx_hardware_setup(void)8380 __init int vmx_hardware_setup(void)
8381 {
8382 unsigned long host_bndcfgs;
8383 struct desc_ptr dt;
8384 int r;
8385
8386 store_idt(&dt);
8387 host_idt_base = dt.address;
8388
8389 vmx_setup_user_return_msrs();
8390
8391 if (setup_vmcs_config(&vmcs_config, &vmx_capability) < 0)
8392 return -EIO;
8393
8394 if (cpu_has_perf_global_ctrl_bug())
8395 pr_warn_once("VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL "
8396 "does not work properly. Using workaround\n");
8397
8398 if (boot_cpu_has(X86_FEATURE_NX))
8399 kvm_enable_efer_bits(EFER_NX);
8400
8401 if (boot_cpu_has(X86_FEATURE_MPX)) {
8402 rdmsrl(MSR_IA32_BNDCFGS, host_bndcfgs);
8403 WARN_ONCE(host_bndcfgs, "BNDCFGS in host will be lost");
8404 }
8405
8406 if (!cpu_has_vmx_mpx())
8407 kvm_caps.supported_xcr0 &= ~(XFEATURE_MASK_BNDREGS |
8408 XFEATURE_MASK_BNDCSR);
8409
8410 if (!cpu_has_vmx_vpid() || !cpu_has_vmx_invvpid() ||
8411 !(cpu_has_vmx_invvpid_single() || cpu_has_vmx_invvpid_global()))
8412 enable_vpid = 0;
8413
8414 if (!cpu_has_vmx_ept() ||
8415 !cpu_has_vmx_ept_4levels() ||
8416 !cpu_has_vmx_ept_mt_wb() ||
8417 !cpu_has_vmx_invept_global())
8418 enable_ept = 0;
8419
8420 /* NX support is required for shadow paging. */
8421 if (!enable_ept && !boot_cpu_has(X86_FEATURE_NX)) {
8422 pr_err_ratelimited("NX (Execute Disable) not supported\n");
8423 return -EOPNOTSUPP;
8424 }
8425
8426 if (!cpu_has_vmx_ept_ad_bits() || !enable_ept)
8427 enable_ept_ad_bits = 0;
8428
8429 if (!cpu_has_vmx_unrestricted_guest() || !enable_ept)
8430 enable_unrestricted_guest = 0;
8431
8432 if (!cpu_has_vmx_flexpriority())
8433 flexpriority_enabled = 0;
8434
8435 if (!cpu_has_virtual_nmis())
8436 enable_vnmi = 0;
8437
8438 #ifdef CONFIG_X86_SGX_KVM
8439 if (!cpu_has_vmx_encls_vmexit())
8440 enable_sgx = false;
8441 #endif
8442
8443 /*
8444 * set_apic_access_page_addr() is used to reload apic access
8445 * page upon invalidation. No need to do anything if not
8446 * using the APIC_ACCESS_ADDR VMCS field.
8447 */
8448 if (!flexpriority_enabled)
8449 vt_x86_ops.set_apic_access_page_addr = NULL;
8450
8451 if (!cpu_has_vmx_tpr_shadow())
8452 vt_x86_ops.update_cr8_intercept = NULL;
8453
8454 #if IS_ENABLED(CONFIG_HYPERV)
8455 if (ms_hyperv.nested_features & HV_X64_NESTED_GUEST_MAPPING_FLUSH
8456 && enable_ept) {
8457 vt_x86_ops.flush_remote_tlbs = hv_flush_remote_tlbs;
8458 vt_x86_ops.flush_remote_tlbs_range = hv_flush_remote_tlbs_range;
8459 }
8460 #endif
8461
8462 if (!cpu_has_vmx_ple()) {
8463 ple_gap = 0;
8464 ple_window = 0;
8465 ple_window_grow = 0;
8466 ple_window_max = 0;
8467 ple_window_shrink = 0;
8468 }
8469
8470 if (!cpu_has_vmx_apicv())
8471 enable_apicv = 0;
8472 if (!enable_apicv)
8473 vt_x86_ops.sync_pir_to_irr = NULL;
8474
8475 if (!enable_apicv || !cpu_has_vmx_ipiv())
8476 enable_ipiv = false;
8477
8478 if (cpu_has_vmx_tsc_scaling())
8479 kvm_caps.has_tsc_control = true;
8480
8481 kvm_caps.max_tsc_scaling_ratio = KVM_VMX_TSC_MULTIPLIER_MAX;
8482 kvm_caps.tsc_scaling_ratio_frac_bits = 48;
8483 kvm_caps.has_bus_lock_exit = cpu_has_vmx_bus_lock_detection();
8484 kvm_caps.has_notify_vmexit = cpu_has_notify_vmexit();
8485
8486 set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */
8487
8488 if (enable_ept)
8489 kvm_mmu_set_ept_masks(enable_ept_ad_bits,
8490 cpu_has_vmx_ept_execute_only());
8491
8492 /*
8493 * Setup shadow_me_value/shadow_me_mask to include MKTME KeyID
8494 * bits to shadow_zero_check.
8495 */
8496 vmx_setup_me_spte_mask();
8497
8498 kvm_configure_mmu(enable_ept, 0, vmx_get_max_ept_level(),
8499 ept_caps_to_lpage_level(vmx_capability.ept));
8500
8501 /*
8502 * Only enable PML when hardware supports PML feature, and both EPT
8503 * and EPT A/D bit features are enabled -- PML depends on them to work.
8504 */
8505 if (!enable_ept || !enable_ept_ad_bits || !cpu_has_vmx_pml())
8506 enable_pml = 0;
8507
8508 if (!enable_pml)
8509 vt_x86_ops.cpu_dirty_log_size = 0;
8510
8511 if (!cpu_has_vmx_preemption_timer())
8512 enable_preemption_timer = false;
8513
8514 if (enable_preemption_timer) {
8515 u64 use_timer_freq = 5000ULL * 1000 * 1000;
8516
8517 cpu_preemption_timer_multi =
8518 vmcs_config.misc & VMX_MISC_PREEMPTION_TIMER_RATE_MASK;
8519
8520 if (tsc_khz)
8521 use_timer_freq = (u64)tsc_khz * 1000;
8522 use_timer_freq >>= cpu_preemption_timer_multi;
8523
8524 /*
8525 * KVM "disables" the preemption timer by setting it to its max
8526 * value. Don't use the timer if it might cause spurious exits
8527 * at a rate faster than 0.1 Hz (of uninterrupted guest time).
8528 */
8529 if (use_timer_freq > 0xffffffffu / 10)
8530 enable_preemption_timer = false;
8531 }
8532
8533 if (!enable_preemption_timer) {
8534 vt_x86_ops.set_hv_timer = NULL;
8535 vt_x86_ops.cancel_hv_timer = NULL;
8536 }
8537
8538 kvm_caps.supported_mce_cap |= MCG_LMCE_P;
8539 kvm_caps.supported_mce_cap |= MCG_CMCI_P;
8540
8541 if (pt_mode != PT_MODE_SYSTEM && pt_mode != PT_MODE_HOST_GUEST)
8542 return -EINVAL;
8543 if (!enable_ept || !enable_pmu || !cpu_has_vmx_intel_pt())
8544 pt_mode = PT_MODE_SYSTEM;
8545 if (pt_mode == PT_MODE_HOST_GUEST)
8546 vt_init_ops.handle_intel_pt_intr = vmx_handle_intel_pt_intr;
8547 else
8548 vt_init_ops.handle_intel_pt_intr = NULL;
8549
8550 setup_default_sgx_lepubkeyhash();
8551
8552 if (nested) {
8553 nested_vmx_setup_ctls_msrs(&vmcs_config, vmx_capability.ept);
8554
8555 r = nested_vmx_hardware_setup(kvm_vmx_exit_handlers);
8556 if (r)
8557 return r;
8558 }
8559
8560 vmx_set_cpu_caps();
8561
8562 r = alloc_kvm_area();
8563 if (r && nested)
8564 nested_vmx_hardware_unsetup();
8565
8566 kvm_set_posted_intr_wakeup_handler(pi_wakeup_handler);
8567
8568 return r;
8569 }
8570
vmx_cleanup_l1d_flush(void)8571 static void vmx_cleanup_l1d_flush(void)
8572 {
8573 if (vmx_l1d_flush_pages) {
8574 free_pages((unsigned long)vmx_l1d_flush_pages, L1D_CACHE_ORDER);
8575 vmx_l1d_flush_pages = NULL;
8576 }
8577 /* Restore state so sysfs ignores VMX */
8578 l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
8579 }
8580
__vmx_exit(void)8581 static void __vmx_exit(void)
8582 {
8583 allow_smaller_maxphyaddr = false;
8584
8585 cpu_emergency_unregister_virt_callback(vmx_emergency_disable);
8586
8587 vmx_cleanup_l1d_flush();
8588 }
8589
vmx_exit(void)8590 static void vmx_exit(void)
8591 {
8592 kvm_exit();
8593 __vmx_exit();
8594 kvm_x86_vendor_exit();
8595
8596 }
8597 module_exit(vmx_exit);
8598
vmx_init(void)8599 static int __init vmx_init(void)
8600 {
8601 int r, cpu;
8602
8603 if (!kvm_is_vmx_supported())
8604 return -EOPNOTSUPP;
8605
8606 /*
8607 * Note, hv_init_evmcs() touches only VMX knobs, i.e. there's nothing
8608 * to unwind if a later step fails.
8609 */
8610 hv_init_evmcs();
8611
8612 r = kvm_x86_vendor_init(&vt_init_ops);
8613 if (r)
8614 return r;
8615
8616 /*
8617 * Must be called after common x86 init so enable_ept is properly set
8618 * up. Hand the parameter mitigation value in which was stored in
8619 * the pre module init parser. If no parameter was given, it will
8620 * contain 'auto' which will be turned into the default 'cond'
8621 * mitigation mode.
8622 */
8623 r = vmx_setup_l1d_flush(vmentry_l1d_flush_param);
8624 if (r)
8625 goto err_l1d_flush;
8626
8627 for_each_possible_cpu(cpu) {
8628 INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu));
8629
8630 pi_init_cpu(cpu);
8631 }
8632
8633 cpu_emergency_register_virt_callback(vmx_emergency_disable);
8634
8635 vmx_check_vmcs12_offsets();
8636
8637 /*
8638 * Shadow paging doesn't have a (further) performance penalty
8639 * from GUEST_MAXPHYADDR < HOST_MAXPHYADDR so enable it
8640 * by default
8641 */
8642 if (!enable_ept)
8643 allow_smaller_maxphyaddr = true;
8644
8645 /*
8646 * Common KVM initialization _must_ come last, after this, /dev/kvm is
8647 * exposed to userspace!
8648 */
8649 r = kvm_init(sizeof(struct vcpu_vmx), __alignof__(struct vcpu_vmx),
8650 THIS_MODULE);
8651 if (r)
8652 goto err_kvm_init;
8653
8654 return 0;
8655
8656 err_kvm_init:
8657 __vmx_exit();
8658 err_l1d_flush:
8659 kvm_x86_vendor_exit();
8660 return r;
8661 }
8662 module_init(vmx_init);
8663