xref: /linux/arch/x86/kvm/vmx/vmx.c (revision a382b06d297e78ed7ac67afd0d8e8690406ac4ca)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Kernel-based Virtual Machine driver for Linux
4  *
5  * This module enables machines with Intel VT-x extensions to run virtual
6  * machines without emulation or binary translation.
7  *
8  * Copyright (C) 2006 Qumranet, Inc.
9  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
10  *
11  * Authors:
12  *   Avi Kivity   <avi@qumranet.com>
13  *   Yaniv Kamay  <yaniv@qumranet.com>
14  */
15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16 
17 #include <linux/highmem.h>
18 #include <linux/hrtimer.h>
19 #include <linux/kernel.h>
20 #include <linux/kvm_host.h>
21 #include <linux/module.h>
22 #include <linux/moduleparam.h>
23 #include <linux/mod_devicetable.h>
24 #include <linux/mm.h>
25 #include <linux/objtool.h>
26 #include <linux/sched.h>
27 #include <linux/sched/smt.h>
28 #include <linux/slab.h>
29 #include <linux/tboot.h>
30 #include <linux/trace_events.h>
31 #include <linux/entry-kvm.h>
32 
33 #include <asm/apic.h>
34 #include <asm/asm.h>
35 #include <asm/cpu.h>
36 #include <asm/cpu_device_id.h>
37 #include <asm/debugreg.h>
38 #include <asm/desc.h>
39 #include <asm/fpu/api.h>
40 #include <asm/fpu/xstate.h>
41 #include <asm/fred.h>
42 #include <asm/idtentry.h>
43 #include <asm/io.h>
44 #include <asm/irq_remapping.h>
45 #include <asm/reboot.h>
46 #include <asm/perf_event.h>
47 #include <asm/mmu_context.h>
48 #include <asm/mshyperv.h>
49 #include <asm/mwait.h>
50 #include <asm/spec-ctrl.h>
51 #include <asm/vmx.h>
52 
53 #include <trace/events/ipi.h>
54 
55 #include "capabilities.h"
56 #include "cpuid.h"
57 #include "hyperv.h"
58 #include "kvm_onhyperv.h"
59 #include "irq.h"
60 #include "kvm_cache_regs.h"
61 #include "lapic.h"
62 #include "mmu.h"
63 #include "nested.h"
64 #include "pmu.h"
65 #include "sgx.h"
66 #include "trace.h"
67 #include "vmcs.h"
68 #include "vmcs12.h"
69 #include "vmx.h"
70 #include "x86.h"
71 #include "x86_ops.h"
72 #include "smm.h"
73 #include "vmx_onhyperv.h"
74 #include "posted_intr.h"
75 
76 MODULE_AUTHOR("Qumranet");
77 MODULE_DESCRIPTION("KVM support for VMX (Intel VT-x) extensions");
78 MODULE_LICENSE("GPL");
79 
80 #ifdef MODULE
81 static const struct x86_cpu_id vmx_cpu_id[] = {
82 	X86_MATCH_FEATURE(X86_FEATURE_VMX, NULL),
83 	{}
84 };
85 MODULE_DEVICE_TABLE(x86cpu, vmx_cpu_id);
86 #endif
87 
88 bool __read_mostly enable_vpid = 1;
89 module_param_named(vpid, enable_vpid, bool, 0444);
90 
91 static bool __read_mostly enable_vnmi = 1;
92 module_param_named(vnmi, enable_vnmi, bool, 0444);
93 
94 bool __read_mostly flexpriority_enabled = 1;
95 module_param_named(flexpriority, flexpriority_enabled, bool, 0444);
96 
97 bool __read_mostly enable_ept = 1;
98 module_param_named(ept, enable_ept, bool, 0444);
99 
100 bool __read_mostly enable_unrestricted_guest = 1;
101 module_param_named(unrestricted_guest,
102 			enable_unrestricted_guest, bool, 0444);
103 
104 bool __read_mostly enable_ept_ad_bits = 1;
105 module_param_named(eptad, enable_ept_ad_bits, bool, 0444);
106 
107 static bool __read_mostly emulate_invalid_guest_state = true;
108 module_param(emulate_invalid_guest_state, bool, 0444);
109 
110 static bool __read_mostly fasteoi = 1;
111 module_param(fasteoi, bool, 0444);
112 
113 module_param(enable_apicv, bool, 0444);
114 
115 bool __read_mostly enable_ipiv = true;
116 module_param(enable_ipiv, bool, 0444);
117 
118 /*
119  * If nested=1, nested virtualization is supported, i.e., guests may use
120  * VMX and be a hypervisor for its own guests. If nested=0, guests may not
121  * use VMX instructions.
122  */
123 static bool __read_mostly nested = 1;
124 module_param(nested, bool, 0444);
125 
126 bool __read_mostly enable_pml = 1;
127 module_param_named(pml, enable_pml, bool, 0444);
128 
129 static bool __read_mostly error_on_inconsistent_vmcs_config = true;
130 module_param(error_on_inconsistent_vmcs_config, bool, 0444);
131 
132 static bool __read_mostly dump_invalid_vmcs = 0;
133 module_param(dump_invalid_vmcs, bool, 0644);
134 
135 #define MSR_BITMAP_MODE_X2APIC		1
136 #define MSR_BITMAP_MODE_X2APIC_APICV	2
137 
138 #define KVM_VMX_TSC_MULTIPLIER_MAX     0xffffffffffffffffULL
139 
140 /* Guest_tsc -> host_tsc conversion requires 64-bit division.  */
141 static int __read_mostly cpu_preemption_timer_multi;
142 static bool __read_mostly enable_preemption_timer = 1;
143 #ifdef CONFIG_X86_64
144 module_param_named(preemption_timer, enable_preemption_timer, bool, S_IRUGO);
145 #endif
146 
147 extern bool __read_mostly allow_smaller_maxphyaddr;
148 module_param(allow_smaller_maxphyaddr, bool, S_IRUGO);
149 
150 #define KVM_VM_CR0_ALWAYS_OFF (X86_CR0_NW | X86_CR0_CD)
151 #define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST X86_CR0_NE
152 #define KVM_VM_CR0_ALWAYS_ON				\
153 	(KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE)
154 
155 #define KVM_VM_CR4_ALWAYS_ON_UNRESTRICTED_GUEST X86_CR4_VMXE
156 #define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
157 #define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE)
158 
159 #define RMODE_GUEST_OWNED_EFLAGS_BITS (~(X86_EFLAGS_IOPL | X86_EFLAGS_VM))
160 
161 #define MSR_IA32_RTIT_STATUS_MASK (~(RTIT_STATUS_FILTEREN | \
162 	RTIT_STATUS_CONTEXTEN | RTIT_STATUS_TRIGGEREN | \
163 	RTIT_STATUS_ERROR | RTIT_STATUS_STOPPED | \
164 	RTIT_STATUS_BYTECNT))
165 
166 /*
167  * List of MSRs that can be directly passed to the guest.
168  * In addition to these x2apic, PT and LBR MSRs are handled specially.
169  */
170 static u32 vmx_possible_passthrough_msrs[MAX_POSSIBLE_PASSTHROUGH_MSRS] = {
171 	MSR_IA32_SPEC_CTRL,
172 	MSR_IA32_PRED_CMD,
173 	MSR_IA32_FLUSH_CMD,
174 	MSR_IA32_TSC,
175 #ifdef CONFIG_X86_64
176 	MSR_FS_BASE,
177 	MSR_GS_BASE,
178 	MSR_KERNEL_GS_BASE,
179 	MSR_IA32_XFD,
180 	MSR_IA32_XFD_ERR,
181 #endif
182 	MSR_IA32_SYSENTER_CS,
183 	MSR_IA32_SYSENTER_ESP,
184 	MSR_IA32_SYSENTER_EIP,
185 	MSR_CORE_C1_RES,
186 	MSR_CORE_C3_RESIDENCY,
187 	MSR_CORE_C6_RESIDENCY,
188 	MSR_CORE_C7_RESIDENCY,
189 };
190 
191 /*
192  * These 2 parameters are used to config the controls for Pause-Loop Exiting:
193  * ple_gap:    upper bound on the amount of time between two successive
194  *             executions of PAUSE in a loop. Also indicate if ple enabled.
195  *             According to test, this time is usually smaller than 128 cycles.
196  * ple_window: upper bound on the amount of time a guest is allowed to execute
197  *             in a PAUSE loop. Tests indicate that most spinlocks are held for
198  *             less than 2^12 cycles
199  * Time is measured based on a counter that runs at the same rate as the TSC,
200  * refer SDM volume 3b section 21.6.13 & 22.1.3.
201  */
202 static unsigned int ple_gap = KVM_DEFAULT_PLE_GAP;
203 module_param(ple_gap, uint, 0444);
204 
205 static unsigned int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW;
206 module_param(ple_window, uint, 0444);
207 
208 /* Default doubles per-vcpu window every exit. */
209 static unsigned int ple_window_grow = KVM_DEFAULT_PLE_WINDOW_GROW;
210 module_param(ple_window_grow, uint, 0444);
211 
212 /* Default resets per-vcpu window every exit to ple_window. */
213 static unsigned int ple_window_shrink = KVM_DEFAULT_PLE_WINDOW_SHRINK;
214 module_param(ple_window_shrink, uint, 0444);
215 
216 /* Default is to compute the maximum so we can never overflow. */
217 static unsigned int ple_window_max        = KVM_VMX_DEFAULT_PLE_WINDOW_MAX;
218 module_param(ple_window_max, uint, 0444);
219 
220 /* Default is SYSTEM mode, 1 for host-guest mode (which is BROKEN) */
221 int __read_mostly pt_mode = PT_MODE_SYSTEM;
222 #ifdef CONFIG_BROKEN
223 module_param(pt_mode, int, S_IRUGO);
224 #endif
225 
226 struct x86_pmu_lbr __ro_after_init vmx_lbr_caps;
227 
228 static DEFINE_STATIC_KEY_FALSE(vmx_l1d_should_flush);
229 static DEFINE_STATIC_KEY_FALSE(vmx_l1d_flush_cond);
230 static DEFINE_MUTEX(vmx_l1d_flush_mutex);
231 
232 /* Storage for pre module init parameter parsing */
233 static enum vmx_l1d_flush_state __read_mostly vmentry_l1d_flush_param = VMENTER_L1D_FLUSH_AUTO;
234 
235 static const struct {
236 	const char *option;
237 	bool for_parse;
238 } vmentry_l1d_param[] = {
239 	[VMENTER_L1D_FLUSH_AUTO]	 = {"auto", true},
240 	[VMENTER_L1D_FLUSH_NEVER]	 = {"never", true},
241 	[VMENTER_L1D_FLUSH_COND]	 = {"cond", true},
242 	[VMENTER_L1D_FLUSH_ALWAYS]	 = {"always", true},
243 	[VMENTER_L1D_FLUSH_EPT_DISABLED] = {"EPT disabled", false},
244 	[VMENTER_L1D_FLUSH_NOT_REQUIRED] = {"not required", false},
245 };
246 
247 #define L1D_CACHE_ORDER 4
248 static void *vmx_l1d_flush_pages;
249 
vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf)250 static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf)
251 {
252 	struct page *page;
253 	unsigned int i;
254 
255 	if (!boot_cpu_has_bug(X86_BUG_L1TF)) {
256 		l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_NOT_REQUIRED;
257 		return 0;
258 	}
259 
260 	if (!enable_ept) {
261 		l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_EPT_DISABLED;
262 		return 0;
263 	}
264 
265 	if (kvm_host.arch_capabilities & ARCH_CAP_SKIP_VMENTRY_L1DFLUSH) {
266 		l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_NOT_REQUIRED;
267 		return 0;
268 	}
269 
270 	/* If set to auto use the default l1tf mitigation method */
271 	if (l1tf == VMENTER_L1D_FLUSH_AUTO) {
272 		switch (l1tf_mitigation) {
273 		case L1TF_MITIGATION_OFF:
274 			l1tf = VMENTER_L1D_FLUSH_NEVER;
275 			break;
276 		case L1TF_MITIGATION_FLUSH_NOWARN:
277 		case L1TF_MITIGATION_FLUSH:
278 		case L1TF_MITIGATION_FLUSH_NOSMT:
279 			l1tf = VMENTER_L1D_FLUSH_COND;
280 			break;
281 		case L1TF_MITIGATION_FULL:
282 		case L1TF_MITIGATION_FULL_FORCE:
283 			l1tf = VMENTER_L1D_FLUSH_ALWAYS;
284 			break;
285 		}
286 	} else if (l1tf_mitigation == L1TF_MITIGATION_FULL_FORCE) {
287 		l1tf = VMENTER_L1D_FLUSH_ALWAYS;
288 	}
289 
290 	if (l1tf != VMENTER_L1D_FLUSH_NEVER && !vmx_l1d_flush_pages &&
291 	    !boot_cpu_has(X86_FEATURE_FLUSH_L1D)) {
292 		/*
293 		 * This allocation for vmx_l1d_flush_pages is not tied to a VM
294 		 * lifetime and so should not be charged to a memcg.
295 		 */
296 		page = alloc_pages(GFP_KERNEL, L1D_CACHE_ORDER);
297 		if (!page)
298 			return -ENOMEM;
299 		vmx_l1d_flush_pages = page_address(page);
300 
301 		/*
302 		 * Initialize each page with a different pattern in
303 		 * order to protect against KSM in the nested
304 		 * virtualization case.
305 		 */
306 		for (i = 0; i < 1u << L1D_CACHE_ORDER; ++i) {
307 			memset(vmx_l1d_flush_pages + i * PAGE_SIZE, i + 1,
308 			       PAGE_SIZE);
309 		}
310 	}
311 
312 	l1tf_vmx_mitigation = l1tf;
313 
314 	if (l1tf != VMENTER_L1D_FLUSH_NEVER)
315 		static_branch_enable(&vmx_l1d_should_flush);
316 	else
317 		static_branch_disable(&vmx_l1d_should_flush);
318 
319 	if (l1tf == VMENTER_L1D_FLUSH_COND)
320 		static_branch_enable(&vmx_l1d_flush_cond);
321 	else
322 		static_branch_disable(&vmx_l1d_flush_cond);
323 	return 0;
324 }
325 
vmentry_l1d_flush_parse(const char * s)326 static int vmentry_l1d_flush_parse(const char *s)
327 {
328 	unsigned int i;
329 
330 	if (s) {
331 		for (i = 0; i < ARRAY_SIZE(vmentry_l1d_param); i++) {
332 			if (vmentry_l1d_param[i].for_parse &&
333 			    sysfs_streq(s, vmentry_l1d_param[i].option))
334 				return i;
335 		}
336 	}
337 	return -EINVAL;
338 }
339 
vmentry_l1d_flush_set(const char * s,const struct kernel_param * kp)340 static int vmentry_l1d_flush_set(const char *s, const struct kernel_param *kp)
341 {
342 	int l1tf, ret;
343 
344 	l1tf = vmentry_l1d_flush_parse(s);
345 	if (l1tf < 0)
346 		return l1tf;
347 
348 	if (!boot_cpu_has(X86_BUG_L1TF))
349 		return 0;
350 
351 	/*
352 	 * Has vmx_init() run already? If not then this is the pre init
353 	 * parameter parsing. In that case just store the value and let
354 	 * vmx_init() do the proper setup after enable_ept has been
355 	 * established.
356 	 */
357 	if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO) {
358 		vmentry_l1d_flush_param = l1tf;
359 		return 0;
360 	}
361 
362 	mutex_lock(&vmx_l1d_flush_mutex);
363 	ret = vmx_setup_l1d_flush(l1tf);
364 	mutex_unlock(&vmx_l1d_flush_mutex);
365 	return ret;
366 }
367 
vmentry_l1d_flush_get(char * s,const struct kernel_param * kp)368 static int vmentry_l1d_flush_get(char *s, const struct kernel_param *kp)
369 {
370 	if (WARN_ON_ONCE(l1tf_vmx_mitigation >= ARRAY_SIZE(vmentry_l1d_param)))
371 		return sysfs_emit(s, "???\n");
372 
373 	return sysfs_emit(s, "%s\n", vmentry_l1d_param[l1tf_vmx_mitigation].option);
374 }
375 
vmx_disable_fb_clear(struct vcpu_vmx * vmx)376 static __always_inline void vmx_disable_fb_clear(struct vcpu_vmx *vmx)
377 {
378 	u64 msr;
379 
380 	if (!vmx->disable_fb_clear)
381 		return;
382 
383 	msr = __rdmsr(MSR_IA32_MCU_OPT_CTRL);
384 	msr |= FB_CLEAR_DIS;
385 	native_wrmsrl(MSR_IA32_MCU_OPT_CTRL, msr);
386 	/* Cache the MSR value to avoid reading it later */
387 	vmx->msr_ia32_mcu_opt_ctrl = msr;
388 }
389 
vmx_enable_fb_clear(struct vcpu_vmx * vmx)390 static __always_inline void vmx_enable_fb_clear(struct vcpu_vmx *vmx)
391 {
392 	if (!vmx->disable_fb_clear)
393 		return;
394 
395 	vmx->msr_ia32_mcu_opt_ctrl &= ~FB_CLEAR_DIS;
396 	native_wrmsrl(MSR_IA32_MCU_OPT_CTRL, vmx->msr_ia32_mcu_opt_ctrl);
397 }
398 
vmx_update_fb_clear_dis(struct kvm_vcpu * vcpu,struct vcpu_vmx * vmx)399 static void vmx_update_fb_clear_dis(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx)
400 {
401 	/*
402 	 * Disable VERW's behavior of clearing CPU buffers for the guest if the
403 	 * CPU isn't affected by MDS/TAA, and the host hasn't forcefully enabled
404 	 * the mitigation. Disabling the clearing behavior provides a
405 	 * performance boost for guests that aren't aware that manually clearing
406 	 * CPU buffers is unnecessary, at the cost of MSR accesses on VM-Entry
407 	 * and VM-Exit.
408 	 */
409 	vmx->disable_fb_clear = !cpu_feature_enabled(X86_FEATURE_CLEAR_CPU_BUF) &&
410 				(kvm_host.arch_capabilities & ARCH_CAP_FB_CLEAR_CTRL) &&
411 				!boot_cpu_has_bug(X86_BUG_MDS) &&
412 				!boot_cpu_has_bug(X86_BUG_TAA);
413 
414 	/*
415 	 * If guest will not execute VERW, there is no need to set FB_CLEAR_DIS
416 	 * at VMEntry. Skip the MSR read/write when a guest has no use case to
417 	 * execute VERW.
418 	 */
419 	if ((vcpu->arch.arch_capabilities & ARCH_CAP_FB_CLEAR) ||
420 	   ((vcpu->arch.arch_capabilities & ARCH_CAP_MDS_NO) &&
421 	    (vcpu->arch.arch_capabilities & ARCH_CAP_TAA_NO) &&
422 	    (vcpu->arch.arch_capabilities & ARCH_CAP_PSDP_NO) &&
423 	    (vcpu->arch.arch_capabilities & ARCH_CAP_FBSDP_NO) &&
424 	    (vcpu->arch.arch_capabilities & ARCH_CAP_SBDR_SSDP_NO)))
425 		vmx->disable_fb_clear = false;
426 }
427 
428 static const struct kernel_param_ops vmentry_l1d_flush_ops = {
429 	.set = vmentry_l1d_flush_set,
430 	.get = vmentry_l1d_flush_get,
431 };
432 module_param_cb(vmentry_l1d_flush, &vmentry_l1d_flush_ops, NULL, 0644);
433 
434 static u32 vmx_segment_access_rights(struct kvm_segment *var);
435 
436 void vmx_vmexit(void);
437 
438 #define vmx_insn_failed(fmt...)		\
439 do {					\
440 	WARN_ONCE(1, fmt);		\
441 	pr_warn_ratelimited(fmt);	\
442 } while (0)
443 
vmread_error(unsigned long field)444 noinline void vmread_error(unsigned long field)
445 {
446 	vmx_insn_failed("vmread failed: field=%lx\n", field);
447 }
448 
449 #ifndef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
vmread_error_trampoline2(unsigned long field,bool fault)450 noinstr void vmread_error_trampoline2(unsigned long field, bool fault)
451 {
452 	if (fault) {
453 		kvm_spurious_fault();
454 	} else {
455 		instrumentation_begin();
456 		vmread_error(field);
457 		instrumentation_end();
458 	}
459 }
460 #endif
461 
vmwrite_error(unsigned long field,unsigned long value)462 noinline void vmwrite_error(unsigned long field, unsigned long value)
463 {
464 	vmx_insn_failed("vmwrite failed: field=%lx val=%lx err=%u\n",
465 			field, value, vmcs_read32(VM_INSTRUCTION_ERROR));
466 }
467 
vmclear_error(struct vmcs * vmcs,u64 phys_addr)468 noinline void vmclear_error(struct vmcs *vmcs, u64 phys_addr)
469 {
470 	vmx_insn_failed("vmclear failed: %p/%llx err=%u\n",
471 			vmcs, phys_addr, vmcs_read32(VM_INSTRUCTION_ERROR));
472 }
473 
vmptrld_error(struct vmcs * vmcs,u64 phys_addr)474 noinline void vmptrld_error(struct vmcs *vmcs, u64 phys_addr)
475 {
476 	vmx_insn_failed("vmptrld failed: %p/%llx err=%u\n",
477 			vmcs, phys_addr, vmcs_read32(VM_INSTRUCTION_ERROR));
478 }
479 
invvpid_error(unsigned long ext,u16 vpid,gva_t gva)480 noinline void invvpid_error(unsigned long ext, u16 vpid, gva_t gva)
481 {
482 	vmx_insn_failed("invvpid failed: ext=0x%lx vpid=%u gva=0x%lx\n",
483 			ext, vpid, gva);
484 }
485 
invept_error(unsigned long ext,u64 eptp)486 noinline void invept_error(unsigned long ext, u64 eptp)
487 {
488 	vmx_insn_failed("invept failed: ext=0x%lx eptp=%llx\n", ext, eptp);
489 }
490 
491 static DEFINE_PER_CPU(struct vmcs *, vmxarea);
492 DEFINE_PER_CPU(struct vmcs *, current_vmcs);
493 /*
494  * We maintain a per-CPU linked-list of VMCS loaded on that CPU. This is needed
495  * when a CPU is brought down, and we need to VMCLEAR all VMCSs loaded on it.
496  */
497 static DEFINE_PER_CPU(struct list_head, loaded_vmcss_on_cpu);
498 
499 static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS);
500 static DEFINE_SPINLOCK(vmx_vpid_lock);
501 
502 struct vmcs_config vmcs_config __ro_after_init;
503 struct vmx_capability vmx_capability __ro_after_init;
504 
505 #define VMX_SEGMENT_FIELD(seg)					\
506 	[VCPU_SREG_##seg] = {                                   \
507 		.selector = GUEST_##seg##_SELECTOR,		\
508 		.base = GUEST_##seg##_BASE,		   	\
509 		.limit = GUEST_##seg##_LIMIT,		   	\
510 		.ar_bytes = GUEST_##seg##_AR_BYTES,	   	\
511 	}
512 
513 static const struct kvm_vmx_segment_field {
514 	unsigned selector;
515 	unsigned base;
516 	unsigned limit;
517 	unsigned ar_bytes;
518 } kvm_vmx_segment_fields[] = {
519 	VMX_SEGMENT_FIELD(CS),
520 	VMX_SEGMENT_FIELD(DS),
521 	VMX_SEGMENT_FIELD(ES),
522 	VMX_SEGMENT_FIELD(FS),
523 	VMX_SEGMENT_FIELD(GS),
524 	VMX_SEGMENT_FIELD(SS),
525 	VMX_SEGMENT_FIELD(TR),
526 	VMX_SEGMENT_FIELD(LDTR),
527 };
528 
529 
530 static unsigned long host_idt_base;
531 
532 #if IS_ENABLED(CONFIG_HYPERV)
533 static bool __read_mostly enlightened_vmcs = true;
534 module_param(enlightened_vmcs, bool, 0444);
535 
hv_enable_l2_tlb_flush(struct kvm_vcpu * vcpu)536 static int hv_enable_l2_tlb_flush(struct kvm_vcpu *vcpu)
537 {
538 	struct hv_enlightened_vmcs *evmcs;
539 	hpa_t partition_assist_page = hv_get_partition_assist_page(vcpu);
540 
541 	if (partition_assist_page == INVALID_PAGE)
542 		return -ENOMEM;
543 
544 	evmcs = (struct hv_enlightened_vmcs *)to_vmx(vcpu)->loaded_vmcs->vmcs;
545 
546 	evmcs->partition_assist_page = partition_assist_page;
547 	evmcs->hv_vm_id = (unsigned long)vcpu->kvm;
548 	evmcs->hv_enlightenments_control.nested_flush_hypercall = 1;
549 
550 	return 0;
551 }
552 
hv_init_evmcs(void)553 static __init void hv_init_evmcs(void)
554 {
555 	int cpu;
556 
557 	if (!enlightened_vmcs)
558 		return;
559 
560 	/*
561 	 * Enlightened VMCS usage should be recommended and the host needs
562 	 * to support eVMCS v1 or above.
563 	 */
564 	if (ms_hyperv.hints & HV_X64_ENLIGHTENED_VMCS_RECOMMENDED &&
565 	    (ms_hyperv.nested_features & HV_X64_ENLIGHTENED_VMCS_VERSION) >=
566 	     KVM_EVMCS_VERSION) {
567 
568 		/* Check that we have assist pages on all online CPUs */
569 		for_each_online_cpu(cpu) {
570 			if (!hv_get_vp_assist_page(cpu)) {
571 				enlightened_vmcs = false;
572 				break;
573 			}
574 		}
575 
576 		if (enlightened_vmcs) {
577 			pr_info("Using Hyper-V Enlightened VMCS\n");
578 			static_branch_enable(&__kvm_is_using_evmcs);
579 		}
580 
581 		if (ms_hyperv.nested_features & HV_X64_NESTED_DIRECT_FLUSH)
582 			vt_x86_ops.enable_l2_tlb_flush
583 				= hv_enable_l2_tlb_flush;
584 	} else {
585 		enlightened_vmcs = false;
586 	}
587 }
588 
hv_reset_evmcs(void)589 static void hv_reset_evmcs(void)
590 {
591 	struct hv_vp_assist_page *vp_ap;
592 
593 	if (!kvm_is_using_evmcs())
594 		return;
595 
596 	/*
597 	 * KVM should enable eVMCS if and only if all CPUs have a VP assist
598 	 * page, and should reject CPU onlining if eVMCS is enabled the CPU
599 	 * doesn't have a VP assist page allocated.
600 	 */
601 	vp_ap = hv_get_vp_assist_page(smp_processor_id());
602 	if (WARN_ON_ONCE(!vp_ap))
603 		return;
604 
605 	/*
606 	 * Reset everything to support using non-enlightened VMCS access later
607 	 * (e.g. when we reload the module with enlightened_vmcs=0)
608 	 */
609 	vp_ap->nested_control.features.directhypercall = 0;
610 	vp_ap->current_nested_vmcs = 0;
611 	vp_ap->enlighten_vmentry = 0;
612 }
613 
614 #else /* IS_ENABLED(CONFIG_HYPERV) */
hv_init_evmcs(void)615 static void hv_init_evmcs(void) {}
hv_reset_evmcs(void)616 static void hv_reset_evmcs(void) {}
617 #endif /* IS_ENABLED(CONFIG_HYPERV) */
618 
619 /*
620  * Comment's format: document - errata name - stepping - processor name.
621  * Refer from
622  * https://www.virtualbox.org/svn/vbox/trunk/src/VBox/VMM/VMMR0/HMR0.cpp
623  */
624 static u32 vmx_preemption_cpu_tfms[] = {
625 /* 323344.pdf - BA86   - D0 - Xeon 7500 Series */
626 0x000206E6,
627 /* 323056.pdf - AAX65  - C2 - Xeon L3406 */
628 /* 322814.pdf - AAT59  - C2 - i7-600, i5-500, i5-400 and i3-300 Mobile */
629 /* 322911.pdf - AAU65  - C2 - i5-600, i3-500 Desktop and Pentium G6950 */
630 0x00020652,
631 /* 322911.pdf - AAU65  - K0 - i5-600, i3-500 Desktop and Pentium G6950 */
632 0x00020655,
633 /* 322373.pdf - AAO95  - B1 - Xeon 3400 Series */
634 /* 322166.pdf - AAN92  - B1 - i7-800 and i5-700 Desktop */
635 /*
636  * 320767.pdf - AAP86  - B1 -
637  * i7-900 Mobile Extreme, i7-800 and i7-700 Mobile
638  */
639 0x000106E5,
640 /* 321333.pdf - AAM126 - C0 - Xeon 3500 */
641 0x000106A0,
642 /* 321333.pdf - AAM126 - C1 - Xeon 3500 */
643 0x000106A1,
644 /* 320836.pdf - AAJ124 - C0 - i7-900 Desktop Extreme and i7-900 Desktop */
645 0x000106A4,
646  /* 321333.pdf - AAM126 - D0 - Xeon 3500 */
647  /* 321324.pdf - AAK139 - D0 - Xeon 5500 */
648  /* 320836.pdf - AAJ124 - D0 - i7-900 Extreme and i7-900 Desktop */
649 0x000106A5,
650  /* Xeon E3-1220 V2 */
651 0x000306A8,
652 };
653 
cpu_has_broken_vmx_preemption_timer(void)654 static inline bool cpu_has_broken_vmx_preemption_timer(void)
655 {
656 	u32 eax = cpuid_eax(0x00000001), i;
657 
658 	/* Clear the reserved bits */
659 	eax &= ~(0x3U << 14 | 0xfU << 28);
660 	for (i = 0; i < ARRAY_SIZE(vmx_preemption_cpu_tfms); i++)
661 		if (eax == vmx_preemption_cpu_tfms[i])
662 			return true;
663 
664 	return false;
665 }
666 
cpu_need_virtualize_apic_accesses(struct kvm_vcpu * vcpu)667 static inline bool cpu_need_virtualize_apic_accesses(struct kvm_vcpu *vcpu)
668 {
669 	return flexpriority_enabled && lapic_in_kernel(vcpu);
670 }
671 
vmx_get_passthrough_msr_slot(u32 msr)672 static int vmx_get_passthrough_msr_slot(u32 msr)
673 {
674 	int i;
675 
676 	switch (msr) {
677 	case 0x800 ... 0x8ff:
678 		/* x2APIC MSRs. These are handled in vmx_update_msr_bitmap_x2apic() */
679 		return -ENOENT;
680 	case MSR_IA32_RTIT_STATUS:
681 	case MSR_IA32_RTIT_OUTPUT_BASE:
682 	case MSR_IA32_RTIT_OUTPUT_MASK:
683 	case MSR_IA32_RTIT_CR3_MATCH:
684 	case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B:
685 		/* PT MSRs. These are handled in pt_update_intercept_for_msr() */
686 	case MSR_LBR_SELECT:
687 	case MSR_LBR_TOS:
688 	case MSR_LBR_INFO_0 ... MSR_LBR_INFO_0 + 31:
689 	case MSR_LBR_NHM_FROM ... MSR_LBR_NHM_FROM + 31:
690 	case MSR_LBR_NHM_TO ... MSR_LBR_NHM_TO + 31:
691 	case MSR_LBR_CORE_FROM ... MSR_LBR_CORE_FROM + 8:
692 	case MSR_LBR_CORE_TO ... MSR_LBR_CORE_TO + 8:
693 		/* LBR MSRs. These are handled in vmx_update_intercept_for_lbr_msrs() */
694 		return -ENOENT;
695 	}
696 
697 	for (i = 0; i < ARRAY_SIZE(vmx_possible_passthrough_msrs); i++) {
698 		if (vmx_possible_passthrough_msrs[i] == msr)
699 			return i;
700 	}
701 
702 	WARN(1, "Invalid MSR %x, please adapt vmx_possible_passthrough_msrs[]", msr);
703 	return -ENOENT;
704 }
705 
vmx_find_uret_msr(struct vcpu_vmx * vmx,u32 msr)706 struct vmx_uret_msr *vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr)
707 {
708 	int i;
709 
710 	i = kvm_find_user_return_msr(msr);
711 	if (i >= 0)
712 		return &vmx->guest_uret_msrs[i];
713 	return NULL;
714 }
715 
vmx_set_guest_uret_msr(struct vcpu_vmx * vmx,struct vmx_uret_msr * msr,u64 data)716 static int vmx_set_guest_uret_msr(struct vcpu_vmx *vmx,
717 				  struct vmx_uret_msr *msr, u64 data)
718 {
719 	unsigned int slot = msr - vmx->guest_uret_msrs;
720 	int ret = 0;
721 
722 	if (msr->load_into_hardware) {
723 		preempt_disable();
724 		ret = kvm_set_user_return_msr(slot, data, msr->mask);
725 		preempt_enable();
726 	}
727 	if (!ret)
728 		msr->data = data;
729 	return ret;
730 }
731 
732 /*
733  * Disable VMX and clear CR4.VMXE (even if VMXOFF faults)
734  *
735  * Note, VMXOFF causes a #UD if the CPU is !post-VMXON, but it's impossible to
736  * atomically track post-VMXON state, e.g. this may be called in NMI context.
737  * Eat all faults as all other faults on VMXOFF faults are mode related, i.e.
738  * faults are guaranteed to be due to the !post-VMXON check unless the CPU is
739  * magically in RM, VM86, compat mode, or at CPL>0.
740  */
kvm_cpu_vmxoff(void)741 static int kvm_cpu_vmxoff(void)
742 {
743 	asm goto("1: vmxoff\n\t"
744 			  _ASM_EXTABLE(1b, %l[fault])
745 			  ::: "cc", "memory" : fault);
746 
747 	cr4_clear_bits(X86_CR4_VMXE);
748 	return 0;
749 
750 fault:
751 	cr4_clear_bits(X86_CR4_VMXE);
752 	return -EIO;
753 }
754 
vmx_emergency_disable_virtualization_cpu(void)755 void vmx_emergency_disable_virtualization_cpu(void)
756 {
757 	int cpu = raw_smp_processor_id();
758 	struct loaded_vmcs *v;
759 
760 	kvm_rebooting = true;
761 
762 	/*
763 	 * Note, CR4.VMXE can be _cleared_ in NMI context, but it can only be
764 	 * set in task context.  If this races with VMX is disabled by an NMI,
765 	 * VMCLEAR and VMXOFF may #UD, but KVM will eat those faults due to
766 	 * kvm_rebooting set.
767 	 */
768 	if (!(__read_cr4() & X86_CR4_VMXE))
769 		return;
770 
771 	list_for_each_entry(v, &per_cpu(loaded_vmcss_on_cpu, cpu),
772 			    loaded_vmcss_on_cpu_link)
773 		vmcs_clear(v->vmcs);
774 
775 	kvm_cpu_vmxoff();
776 }
777 
__loaded_vmcs_clear(void * arg)778 static void __loaded_vmcs_clear(void *arg)
779 {
780 	struct loaded_vmcs *loaded_vmcs = arg;
781 	int cpu = raw_smp_processor_id();
782 
783 	if (loaded_vmcs->cpu != cpu)
784 		return; /* vcpu migration can race with cpu offline */
785 	if (per_cpu(current_vmcs, cpu) == loaded_vmcs->vmcs)
786 		per_cpu(current_vmcs, cpu) = NULL;
787 
788 	vmcs_clear(loaded_vmcs->vmcs);
789 	if (loaded_vmcs->shadow_vmcs && loaded_vmcs->launched)
790 		vmcs_clear(loaded_vmcs->shadow_vmcs);
791 
792 	list_del(&loaded_vmcs->loaded_vmcss_on_cpu_link);
793 
794 	/*
795 	 * Ensure all writes to loaded_vmcs, including deleting it from its
796 	 * current percpu list, complete before setting loaded_vmcs->cpu to
797 	 * -1, otherwise a different cpu can see loaded_vmcs->cpu == -1 first
798 	 * and add loaded_vmcs to its percpu list before it's deleted from this
799 	 * cpu's list. Pairs with the smp_rmb() in vmx_vcpu_load_vmcs().
800 	 */
801 	smp_wmb();
802 
803 	loaded_vmcs->cpu = -1;
804 	loaded_vmcs->launched = 0;
805 }
806 
loaded_vmcs_clear(struct loaded_vmcs * loaded_vmcs)807 void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs)
808 {
809 	int cpu = loaded_vmcs->cpu;
810 
811 	if (cpu != -1)
812 		smp_call_function_single(cpu,
813 			 __loaded_vmcs_clear, loaded_vmcs, 1);
814 }
815 
vmx_segment_cache_test_set(struct vcpu_vmx * vmx,unsigned seg,unsigned field)816 static bool vmx_segment_cache_test_set(struct vcpu_vmx *vmx, unsigned seg,
817 				       unsigned field)
818 {
819 	bool ret;
820 	u32 mask = 1 << (seg * SEG_FIELD_NR + field);
821 
822 	if (!kvm_register_is_available(&vmx->vcpu, VCPU_EXREG_SEGMENTS)) {
823 		kvm_register_mark_available(&vmx->vcpu, VCPU_EXREG_SEGMENTS);
824 		vmx->segment_cache.bitmask = 0;
825 	}
826 	ret = vmx->segment_cache.bitmask & mask;
827 	vmx->segment_cache.bitmask |= mask;
828 	return ret;
829 }
830 
vmx_read_guest_seg_selector(struct vcpu_vmx * vmx,unsigned seg)831 static u16 vmx_read_guest_seg_selector(struct vcpu_vmx *vmx, unsigned seg)
832 {
833 	u16 *p = &vmx->segment_cache.seg[seg].selector;
834 
835 	if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_SEL))
836 		*p = vmcs_read16(kvm_vmx_segment_fields[seg].selector);
837 	return *p;
838 }
839 
vmx_read_guest_seg_base(struct vcpu_vmx * vmx,unsigned seg)840 static ulong vmx_read_guest_seg_base(struct vcpu_vmx *vmx, unsigned seg)
841 {
842 	ulong *p = &vmx->segment_cache.seg[seg].base;
843 
844 	if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_BASE))
845 		*p = vmcs_readl(kvm_vmx_segment_fields[seg].base);
846 	return *p;
847 }
848 
vmx_read_guest_seg_limit(struct vcpu_vmx * vmx,unsigned seg)849 static u32 vmx_read_guest_seg_limit(struct vcpu_vmx *vmx, unsigned seg)
850 {
851 	u32 *p = &vmx->segment_cache.seg[seg].limit;
852 
853 	if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_LIMIT))
854 		*p = vmcs_read32(kvm_vmx_segment_fields[seg].limit);
855 	return *p;
856 }
857 
vmx_read_guest_seg_ar(struct vcpu_vmx * vmx,unsigned seg)858 static u32 vmx_read_guest_seg_ar(struct vcpu_vmx *vmx, unsigned seg)
859 {
860 	u32 *p = &vmx->segment_cache.seg[seg].ar;
861 
862 	if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_AR))
863 		*p = vmcs_read32(kvm_vmx_segment_fields[seg].ar_bytes);
864 	return *p;
865 }
866 
vmx_update_exception_bitmap(struct kvm_vcpu * vcpu)867 void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu)
868 {
869 	u32 eb;
870 
871 	eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR) |
872 	     (1u << DB_VECTOR) | (1u << AC_VECTOR);
873 	/*
874 	 * #VE isn't used for VMX.  To test against unexpected changes
875 	 * related to #VE for VMX, intercept unexpected #VE and warn on it.
876 	 */
877 	if (IS_ENABLED(CONFIG_KVM_INTEL_PROVE_VE))
878 		eb |= 1u << VE_VECTOR;
879 	/*
880 	 * Guest access to VMware backdoor ports could legitimately
881 	 * trigger #GP because of TSS I/O permission bitmap.
882 	 * We intercept those #GP and allow access to them anyway
883 	 * as VMware does.
884 	 */
885 	if (enable_vmware_backdoor)
886 		eb |= (1u << GP_VECTOR);
887 	if ((vcpu->guest_debug &
888 	     (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) ==
889 	    (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP))
890 		eb |= 1u << BP_VECTOR;
891 	if (to_vmx(vcpu)->rmode.vm86_active)
892 		eb = ~0;
893 	if (!vmx_need_pf_intercept(vcpu))
894 		eb &= ~(1u << PF_VECTOR);
895 
896 	/* When we are running a nested L2 guest and L1 specified for it a
897 	 * certain exception bitmap, we must trap the same exceptions and pass
898 	 * them to L1. When running L2, we will only handle the exceptions
899 	 * specified above if L1 did not want them.
900 	 */
901 	if (is_guest_mode(vcpu))
902 		eb |= get_vmcs12(vcpu)->exception_bitmap;
903 	else {
904 		int mask = 0, match = 0;
905 
906 		if (enable_ept && (eb & (1u << PF_VECTOR))) {
907 			/*
908 			 * If EPT is enabled, #PF is currently only intercepted
909 			 * if MAXPHYADDR is smaller on the guest than on the
910 			 * host.  In that case we only care about present,
911 			 * non-reserved faults.  For vmcs02, however, PFEC_MASK
912 			 * and PFEC_MATCH are set in prepare_vmcs02_rare.
913 			 */
914 			mask = PFERR_PRESENT_MASK | PFERR_RSVD_MASK;
915 			match = PFERR_PRESENT_MASK;
916 		}
917 		vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, mask);
918 		vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, match);
919 	}
920 
921 	/*
922 	 * Disabling xfd interception indicates that dynamic xfeatures
923 	 * might be used in the guest. Always trap #NM in this case
924 	 * to save guest xfd_err timely.
925 	 */
926 	if (vcpu->arch.xfd_no_write_intercept)
927 		eb |= (1u << NM_VECTOR);
928 
929 	vmcs_write32(EXCEPTION_BITMAP, eb);
930 }
931 
932 /*
933  * Check if MSR is intercepted for currently loaded MSR bitmap.
934  */
msr_write_intercepted(struct vcpu_vmx * vmx,u32 msr)935 static bool msr_write_intercepted(struct vcpu_vmx *vmx, u32 msr)
936 {
937 	if (!(exec_controls_get(vmx) & CPU_BASED_USE_MSR_BITMAPS))
938 		return true;
939 
940 	return vmx_test_msr_bitmap_write(vmx->loaded_vmcs->msr_bitmap, msr);
941 }
942 
__vmx_vcpu_run_flags(struct vcpu_vmx * vmx)943 unsigned int __vmx_vcpu_run_flags(struct vcpu_vmx *vmx)
944 {
945 	unsigned int flags = 0;
946 
947 	if (vmx->loaded_vmcs->launched)
948 		flags |= VMX_RUN_VMRESUME;
949 
950 	/*
951 	 * If writes to the SPEC_CTRL MSR aren't intercepted, the guest is free
952 	 * to change it directly without causing a vmexit.  In that case read
953 	 * it after vmexit and store it in vmx->spec_ctrl.
954 	 */
955 	if (!msr_write_intercepted(vmx, MSR_IA32_SPEC_CTRL))
956 		flags |= VMX_RUN_SAVE_SPEC_CTRL;
957 
958 	return flags;
959 }
960 
clear_atomic_switch_msr_special(struct vcpu_vmx * vmx,unsigned long entry,unsigned long exit)961 static __always_inline void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx,
962 		unsigned long entry, unsigned long exit)
963 {
964 	vm_entry_controls_clearbit(vmx, entry);
965 	vm_exit_controls_clearbit(vmx, exit);
966 }
967 
vmx_find_loadstore_msr_slot(struct vmx_msrs * m,u32 msr)968 int vmx_find_loadstore_msr_slot(struct vmx_msrs *m, u32 msr)
969 {
970 	unsigned int i;
971 
972 	for (i = 0; i < m->nr; ++i) {
973 		if (m->val[i].index == msr)
974 			return i;
975 	}
976 	return -ENOENT;
977 }
978 
clear_atomic_switch_msr(struct vcpu_vmx * vmx,unsigned msr)979 static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
980 {
981 	int i;
982 	struct msr_autoload *m = &vmx->msr_autoload;
983 
984 	switch (msr) {
985 	case MSR_EFER:
986 		if (cpu_has_load_ia32_efer()) {
987 			clear_atomic_switch_msr_special(vmx,
988 					VM_ENTRY_LOAD_IA32_EFER,
989 					VM_EXIT_LOAD_IA32_EFER);
990 			return;
991 		}
992 		break;
993 	case MSR_CORE_PERF_GLOBAL_CTRL:
994 		if (cpu_has_load_perf_global_ctrl()) {
995 			clear_atomic_switch_msr_special(vmx,
996 					VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
997 					VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL);
998 			return;
999 		}
1000 		break;
1001 	}
1002 	i = vmx_find_loadstore_msr_slot(&m->guest, msr);
1003 	if (i < 0)
1004 		goto skip_guest;
1005 	--m->guest.nr;
1006 	m->guest.val[i] = m->guest.val[m->guest.nr];
1007 	vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr);
1008 
1009 skip_guest:
1010 	i = vmx_find_loadstore_msr_slot(&m->host, msr);
1011 	if (i < 0)
1012 		return;
1013 
1014 	--m->host.nr;
1015 	m->host.val[i] = m->host.val[m->host.nr];
1016 	vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr);
1017 }
1018 
add_atomic_switch_msr_special(struct vcpu_vmx * vmx,unsigned long entry,unsigned long exit,unsigned long guest_val_vmcs,unsigned long host_val_vmcs,u64 guest_val,u64 host_val)1019 static __always_inline void add_atomic_switch_msr_special(struct vcpu_vmx *vmx,
1020 		unsigned long entry, unsigned long exit,
1021 		unsigned long guest_val_vmcs, unsigned long host_val_vmcs,
1022 		u64 guest_val, u64 host_val)
1023 {
1024 	vmcs_write64(guest_val_vmcs, guest_val);
1025 	if (host_val_vmcs != HOST_IA32_EFER)
1026 		vmcs_write64(host_val_vmcs, host_val);
1027 	vm_entry_controls_setbit(vmx, entry);
1028 	vm_exit_controls_setbit(vmx, exit);
1029 }
1030 
add_atomic_switch_msr(struct vcpu_vmx * vmx,unsigned msr,u64 guest_val,u64 host_val,bool entry_only)1031 static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
1032 				  u64 guest_val, u64 host_val, bool entry_only)
1033 {
1034 	int i, j = 0;
1035 	struct msr_autoload *m = &vmx->msr_autoload;
1036 
1037 	switch (msr) {
1038 	case MSR_EFER:
1039 		if (cpu_has_load_ia32_efer()) {
1040 			add_atomic_switch_msr_special(vmx,
1041 					VM_ENTRY_LOAD_IA32_EFER,
1042 					VM_EXIT_LOAD_IA32_EFER,
1043 					GUEST_IA32_EFER,
1044 					HOST_IA32_EFER,
1045 					guest_val, host_val);
1046 			return;
1047 		}
1048 		break;
1049 	case MSR_CORE_PERF_GLOBAL_CTRL:
1050 		if (cpu_has_load_perf_global_ctrl()) {
1051 			add_atomic_switch_msr_special(vmx,
1052 					VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
1053 					VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL,
1054 					GUEST_IA32_PERF_GLOBAL_CTRL,
1055 					HOST_IA32_PERF_GLOBAL_CTRL,
1056 					guest_val, host_val);
1057 			return;
1058 		}
1059 		break;
1060 	case MSR_IA32_PEBS_ENABLE:
1061 		/* PEBS needs a quiescent period after being disabled (to write
1062 		 * a record).  Disabling PEBS through VMX MSR swapping doesn't
1063 		 * provide that period, so a CPU could write host's record into
1064 		 * guest's memory.
1065 		 */
1066 		wrmsrl(MSR_IA32_PEBS_ENABLE, 0);
1067 	}
1068 
1069 	i = vmx_find_loadstore_msr_slot(&m->guest, msr);
1070 	if (!entry_only)
1071 		j = vmx_find_loadstore_msr_slot(&m->host, msr);
1072 
1073 	if ((i < 0 && m->guest.nr == MAX_NR_LOADSTORE_MSRS) ||
1074 	    (j < 0 &&  m->host.nr == MAX_NR_LOADSTORE_MSRS)) {
1075 		printk_once(KERN_WARNING "Not enough msr switch entries. "
1076 				"Can't add msr %x\n", msr);
1077 		return;
1078 	}
1079 	if (i < 0) {
1080 		i = m->guest.nr++;
1081 		vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr);
1082 	}
1083 	m->guest.val[i].index = msr;
1084 	m->guest.val[i].value = guest_val;
1085 
1086 	if (entry_only)
1087 		return;
1088 
1089 	if (j < 0) {
1090 		j = m->host.nr++;
1091 		vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr);
1092 	}
1093 	m->host.val[j].index = msr;
1094 	m->host.val[j].value = host_val;
1095 }
1096 
update_transition_efer(struct vcpu_vmx * vmx)1097 static bool update_transition_efer(struct vcpu_vmx *vmx)
1098 {
1099 	u64 guest_efer = vmx->vcpu.arch.efer;
1100 	u64 ignore_bits = 0;
1101 	int i;
1102 
1103 	/* Shadow paging assumes NX to be available.  */
1104 	if (!enable_ept)
1105 		guest_efer |= EFER_NX;
1106 
1107 	/*
1108 	 * LMA and LME handled by hardware; SCE meaningless outside long mode.
1109 	 */
1110 	ignore_bits |= EFER_SCE;
1111 #ifdef CONFIG_X86_64
1112 	ignore_bits |= EFER_LMA | EFER_LME;
1113 	/* SCE is meaningful only in long mode on Intel */
1114 	if (guest_efer & EFER_LMA)
1115 		ignore_bits &= ~(u64)EFER_SCE;
1116 #endif
1117 
1118 	/*
1119 	 * On EPT, we can't emulate NX, so we must switch EFER atomically.
1120 	 * On CPUs that support "load IA32_EFER", always switch EFER
1121 	 * atomically, since it's faster than switching it manually.
1122 	 */
1123 	if (cpu_has_load_ia32_efer() ||
1124 	    (enable_ept && ((vmx->vcpu.arch.efer ^ kvm_host.efer) & EFER_NX))) {
1125 		if (!(guest_efer & EFER_LMA))
1126 			guest_efer &= ~EFER_LME;
1127 		if (guest_efer != kvm_host.efer)
1128 			add_atomic_switch_msr(vmx, MSR_EFER,
1129 					      guest_efer, kvm_host.efer, false);
1130 		else
1131 			clear_atomic_switch_msr(vmx, MSR_EFER);
1132 		return false;
1133 	}
1134 
1135 	i = kvm_find_user_return_msr(MSR_EFER);
1136 	if (i < 0)
1137 		return false;
1138 
1139 	clear_atomic_switch_msr(vmx, MSR_EFER);
1140 
1141 	guest_efer &= ~ignore_bits;
1142 	guest_efer |= kvm_host.efer & ignore_bits;
1143 
1144 	vmx->guest_uret_msrs[i].data = guest_efer;
1145 	vmx->guest_uret_msrs[i].mask = ~ignore_bits;
1146 
1147 	return true;
1148 }
1149 
1150 #ifdef CONFIG_X86_32
1151 /*
1152  * On 32-bit kernels, VM exits still load the FS and GS bases from the
1153  * VMCS rather than the segment table.  KVM uses this helper to figure
1154  * out the current bases to poke them into the VMCS before entry.
1155  */
segment_base(u16 selector)1156 static unsigned long segment_base(u16 selector)
1157 {
1158 	struct desc_struct *table;
1159 	unsigned long v;
1160 
1161 	if (!(selector & ~SEGMENT_RPL_MASK))
1162 		return 0;
1163 
1164 	table = get_current_gdt_ro();
1165 
1166 	if ((selector & SEGMENT_TI_MASK) == SEGMENT_LDT) {
1167 		u16 ldt_selector = kvm_read_ldt();
1168 
1169 		if (!(ldt_selector & ~SEGMENT_RPL_MASK))
1170 			return 0;
1171 
1172 		table = (struct desc_struct *)segment_base(ldt_selector);
1173 	}
1174 	v = get_desc_base(&table[selector >> 3]);
1175 	return v;
1176 }
1177 #endif
1178 
pt_can_write_msr(struct vcpu_vmx * vmx)1179 static inline bool pt_can_write_msr(struct vcpu_vmx *vmx)
1180 {
1181 	return vmx_pt_mode_is_host_guest() &&
1182 	       !(vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN);
1183 }
1184 
pt_output_base_valid(struct kvm_vcpu * vcpu,u64 base)1185 static inline bool pt_output_base_valid(struct kvm_vcpu *vcpu, u64 base)
1186 {
1187 	/* The base must be 128-byte aligned and a legal physical address. */
1188 	return kvm_vcpu_is_legal_aligned_gpa(vcpu, base, 128);
1189 }
1190 
pt_load_msr(struct pt_ctx * ctx,u32 addr_range)1191 static inline void pt_load_msr(struct pt_ctx *ctx, u32 addr_range)
1192 {
1193 	u32 i;
1194 
1195 	wrmsrl(MSR_IA32_RTIT_STATUS, ctx->status);
1196 	wrmsrl(MSR_IA32_RTIT_OUTPUT_BASE, ctx->output_base);
1197 	wrmsrl(MSR_IA32_RTIT_OUTPUT_MASK, ctx->output_mask);
1198 	wrmsrl(MSR_IA32_RTIT_CR3_MATCH, ctx->cr3_match);
1199 	for (i = 0; i < addr_range; i++) {
1200 		wrmsrl(MSR_IA32_RTIT_ADDR0_A + i * 2, ctx->addr_a[i]);
1201 		wrmsrl(MSR_IA32_RTIT_ADDR0_B + i * 2, ctx->addr_b[i]);
1202 	}
1203 }
1204 
pt_save_msr(struct pt_ctx * ctx,u32 addr_range)1205 static inline void pt_save_msr(struct pt_ctx *ctx, u32 addr_range)
1206 {
1207 	u32 i;
1208 
1209 	rdmsrl(MSR_IA32_RTIT_STATUS, ctx->status);
1210 	rdmsrl(MSR_IA32_RTIT_OUTPUT_BASE, ctx->output_base);
1211 	rdmsrl(MSR_IA32_RTIT_OUTPUT_MASK, ctx->output_mask);
1212 	rdmsrl(MSR_IA32_RTIT_CR3_MATCH, ctx->cr3_match);
1213 	for (i = 0; i < addr_range; i++) {
1214 		rdmsrl(MSR_IA32_RTIT_ADDR0_A + i * 2, ctx->addr_a[i]);
1215 		rdmsrl(MSR_IA32_RTIT_ADDR0_B + i * 2, ctx->addr_b[i]);
1216 	}
1217 }
1218 
pt_guest_enter(struct vcpu_vmx * vmx)1219 static void pt_guest_enter(struct vcpu_vmx *vmx)
1220 {
1221 	if (vmx_pt_mode_is_system())
1222 		return;
1223 
1224 	/*
1225 	 * GUEST_IA32_RTIT_CTL is already set in the VMCS.
1226 	 * Save host state before VM entry.
1227 	 */
1228 	rdmsrl(MSR_IA32_RTIT_CTL, vmx->pt_desc.host.ctl);
1229 	if (vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) {
1230 		wrmsrl(MSR_IA32_RTIT_CTL, 0);
1231 		pt_save_msr(&vmx->pt_desc.host, vmx->pt_desc.num_address_ranges);
1232 		pt_load_msr(&vmx->pt_desc.guest, vmx->pt_desc.num_address_ranges);
1233 	}
1234 }
1235 
pt_guest_exit(struct vcpu_vmx * vmx)1236 static void pt_guest_exit(struct vcpu_vmx *vmx)
1237 {
1238 	if (vmx_pt_mode_is_system())
1239 		return;
1240 
1241 	if (vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) {
1242 		pt_save_msr(&vmx->pt_desc.guest, vmx->pt_desc.num_address_ranges);
1243 		pt_load_msr(&vmx->pt_desc.host, vmx->pt_desc.num_address_ranges);
1244 	}
1245 
1246 	/*
1247 	 * KVM requires VM_EXIT_CLEAR_IA32_RTIT_CTL to expose PT to the guest,
1248 	 * i.e. RTIT_CTL is always cleared on VM-Exit.  Restore it if necessary.
1249 	 */
1250 	if (vmx->pt_desc.host.ctl)
1251 		wrmsrl(MSR_IA32_RTIT_CTL, vmx->pt_desc.host.ctl);
1252 }
1253 
vmx_set_host_fs_gs(struct vmcs_host_state * host,u16 fs_sel,u16 gs_sel,unsigned long fs_base,unsigned long gs_base)1254 void vmx_set_host_fs_gs(struct vmcs_host_state *host, u16 fs_sel, u16 gs_sel,
1255 			unsigned long fs_base, unsigned long gs_base)
1256 {
1257 	if (unlikely(fs_sel != host->fs_sel)) {
1258 		if (!(fs_sel & 7))
1259 			vmcs_write16(HOST_FS_SELECTOR, fs_sel);
1260 		else
1261 			vmcs_write16(HOST_FS_SELECTOR, 0);
1262 		host->fs_sel = fs_sel;
1263 	}
1264 	if (unlikely(gs_sel != host->gs_sel)) {
1265 		if (!(gs_sel & 7))
1266 			vmcs_write16(HOST_GS_SELECTOR, gs_sel);
1267 		else
1268 			vmcs_write16(HOST_GS_SELECTOR, 0);
1269 		host->gs_sel = gs_sel;
1270 	}
1271 	if (unlikely(fs_base != host->fs_base)) {
1272 		vmcs_writel(HOST_FS_BASE, fs_base);
1273 		host->fs_base = fs_base;
1274 	}
1275 	if (unlikely(gs_base != host->gs_base)) {
1276 		vmcs_writel(HOST_GS_BASE, gs_base);
1277 		host->gs_base = gs_base;
1278 	}
1279 }
1280 
vmx_prepare_switch_to_guest(struct kvm_vcpu * vcpu)1281 void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
1282 {
1283 	struct vcpu_vmx *vmx = to_vmx(vcpu);
1284 	struct vmcs_host_state *host_state;
1285 #ifdef CONFIG_X86_64
1286 	int cpu = raw_smp_processor_id();
1287 #endif
1288 	unsigned long fs_base, gs_base;
1289 	u16 fs_sel, gs_sel;
1290 	int i;
1291 
1292 	/*
1293 	 * Note that guest MSRs to be saved/restored can also be changed
1294 	 * when guest state is loaded. This happens when guest transitions
1295 	 * to/from long-mode by setting MSR_EFER.LMA.
1296 	 */
1297 	if (!vmx->guest_uret_msrs_loaded) {
1298 		vmx->guest_uret_msrs_loaded = true;
1299 		for (i = 0; i < kvm_nr_uret_msrs; ++i) {
1300 			if (!vmx->guest_uret_msrs[i].load_into_hardware)
1301 				continue;
1302 
1303 			kvm_set_user_return_msr(i,
1304 						vmx->guest_uret_msrs[i].data,
1305 						vmx->guest_uret_msrs[i].mask);
1306 		}
1307 	}
1308 
1309 	if (vmx->nested.need_vmcs12_to_shadow_sync)
1310 		nested_sync_vmcs12_to_shadow(vcpu);
1311 
1312 	if (vmx->guest_state_loaded)
1313 		return;
1314 
1315 	host_state = &vmx->loaded_vmcs->host_state;
1316 
1317 	/*
1318 	 * Set host fs and gs selectors.  Unfortunately, 22.2.3 does not
1319 	 * allow segment selectors with cpl > 0 or ti == 1.
1320 	 */
1321 	host_state->ldt_sel = kvm_read_ldt();
1322 
1323 #ifdef CONFIG_X86_64
1324 	savesegment(ds, host_state->ds_sel);
1325 	savesegment(es, host_state->es_sel);
1326 
1327 	gs_base = cpu_kernelmode_gs_base(cpu);
1328 	if (likely(is_64bit_mm(current->mm))) {
1329 		current_save_fsgs();
1330 		fs_sel = current->thread.fsindex;
1331 		gs_sel = current->thread.gsindex;
1332 		fs_base = current->thread.fsbase;
1333 		vmx->msr_host_kernel_gs_base = current->thread.gsbase;
1334 	} else {
1335 		savesegment(fs, fs_sel);
1336 		savesegment(gs, gs_sel);
1337 		fs_base = read_msr(MSR_FS_BASE);
1338 		vmx->msr_host_kernel_gs_base = read_msr(MSR_KERNEL_GS_BASE);
1339 	}
1340 
1341 	wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
1342 #else
1343 	savesegment(fs, fs_sel);
1344 	savesegment(gs, gs_sel);
1345 	fs_base = segment_base(fs_sel);
1346 	gs_base = segment_base(gs_sel);
1347 #endif
1348 
1349 	vmx_set_host_fs_gs(host_state, fs_sel, gs_sel, fs_base, gs_base);
1350 	vmx->guest_state_loaded = true;
1351 }
1352 
vmx_prepare_switch_to_host(struct vcpu_vmx * vmx)1353 static void vmx_prepare_switch_to_host(struct vcpu_vmx *vmx)
1354 {
1355 	struct vmcs_host_state *host_state;
1356 
1357 	if (!vmx->guest_state_loaded)
1358 		return;
1359 
1360 	host_state = &vmx->loaded_vmcs->host_state;
1361 
1362 	++vmx->vcpu.stat.host_state_reload;
1363 
1364 #ifdef CONFIG_X86_64
1365 	rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
1366 #endif
1367 	if (host_state->ldt_sel || (host_state->gs_sel & 7)) {
1368 		kvm_load_ldt(host_state->ldt_sel);
1369 #ifdef CONFIG_X86_64
1370 		load_gs_index(host_state->gs_sel);
1371 #else
1372 		loadsegment(gs, host_state->gs_sel);
1373 #endif
1374 	}
1375 	if (host_state->fs_sel & 7)
1376 		loadsegment(fs, host_state->fs_sel);
1377 #ifdef CONFIG_X86_64
1378 	if (unlikely(host_state->ds_sel | host_state->es_sel)) {
1379 		loadsegment(ds, host_state->ds_sel);
1380 		loadsegment(es, host_state->es_sel);
1381 	}
1382 #endif
1383 	invalidate_tss_limit();
1384 #ifdef CONFIG_X86_64
1385 	wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
1386 #endif
1387 	load_fixmap_gdt(raw_smp_processor_id());
1388 	vmx->guest_state_loaded = false;
1389 	vmx->guest_uret_msrs_loaded = false;
1390 }
1391 
1392 #ifdef CONFIG_X86_64
vmx_read_guest_kernel_gs_base(struct vcpu_vmx * vmx)1393 static u64 vmx_read_guest_kernel_gs_base(struct vcpu_vmx *vmx)
1394 {
1395 	preempt_disable();
1396 	if (vmx->guest_state_loaded)
1397 		rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
1398 	preempt_enable();
1399 	return vmx->msr_guest_kernel_gs_base;
1400 }
1401 
vmx_write_guest_kernel_gs_base(struct vcpu_vmx * vmx,u64 data)1402 static void vmx_write_guest_kernel_gs_base(struct vcpu_vmx *vmx, u64 data)
1403 {
1404 	preempt_disable();
1405 	if (vmx->guest_state_loaded)
1406 		wrmsrl(MSR_KERNEL_GS_BASE, data);
1407 	preempt_enable();
1408 	vmx->msr_guest_kernel_gs_base = data;
1409 }
1410 #endif
1411 
grow_ple_window(struct kvm_vcpu * vcpu)1412 static void grow_ple_window(struct kvm_vcpu *vcpu)
1413 {
1414 	struct vcpu_vmx *vmx = to_vmx(vcpu);
1415 	unsigned int old = vmx->ple_window;
1416 
1417 	vmx->ple_window = __grow_ple_window(old, ple_window,
1418 					    ple_window_grow,
1419 					    ple_window_max);
1420 
1421 	if (vmx->ple_window != old) {
1422 		vmx->ple_window_dirty = true;
1423 		trace_kvm_ple_window_update(vcpu->vcpu_id,
1424 					    vmx->ple_window, old);
1425 	}
1426 }
1427 
shrink_ple_window(struct kvm_vcpu * vcpu)1428 static void shrink_ple_window(struct kvm_vcpu *vcpu)
1429 {
1430 	struct vcpu_vmx *vmx = to_vmx(vcpu);
1431 	unsigned int old = vmx->ple_window;
1432 
1433 	vmx->ple_window = __shrink_ple_window(old, ple_window,
1434 					      ple_window_shrink,
1435 					      ple_window);
1436 
1437 	if (vmx->ple_window != old) {
1438 		vmx->ple_window_dirty = true;
1439 		trace_kvm_ple_window_update(vcpu->vcpu_id,
1440 					    vmx->ple_window, old);
1441 	}
1442 }
1443 
vmx_vcpu_load_vmcs(struct kvm_vcpu * vcpu,int cpu,struct loaded_vmcs * buddy)1444 void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu,
1445 			struct loaded_vmcs *buddy)
1446 {
1447 	struct vcpu_vmx *vmx = to_vmx(vcpu);
1448 	bool already_loaded = vmx->loaded_vmcs->cpu == cpu;
1449 	struct vmcs *prev;
1450 
1451 	if (!already_loaded) {
1452 		loaded_vmcs_clear(vmx->loaded_vmcs);
1453 		local_irq_disable();
1454 
1455 		/*
1456 		 * Ensure loaded_vmcs->cpu is read before adding loaded_vmcs to
1457 		 * this cpu's percpu list, otherwise it may not yet be deleted
1458 		 * from its previous cpu's percpu list.  Pairs with the
1459 		 * smb_wmb() in __loaded_vmcs_clear().
1460 		 */
1461 		smp_rmb();
1462 
1463 		list_add(&vmx->loaded_vmcs->loaded_vmcss_on_cpu_link,
1464 			 &per_cpu(loaded_vmcss_on_cpu, cpu));
1465 		local_irq_enable();
1466 	}
1467 
1468 	prev = per_cpu(current_vmcs, cpu);
1469 	if (prev != vmx->loaded_vmcs->vmcs) {
1470 		per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs;
1471 		vmcs_load(vmx->loaded_vmcs->vmcs);
1472 
1473 		/*
1474 		 * No indirect branch prediction barrier needed when switching
1475 		 * the active VMCS within a vCPU, unless IBRS is advertised to
1476 		 * the vCPU.  To minimize the number of IBPBs executed, KVM
1477 		 * performs IBPB on nested VM-Exit (a single nested transition
1478 		 * may switch the active VMCS multiple times).
1479 		 */
1480 		if (!buddy || WARN_ON_ONCE(buddy->vmcs != prev))
1481 			indirect_branch_prediction_barrier();
1482 	}
1483 
1484 	if (!already_loaded) {
1485 		void *gdt = get_current_gdt_ro();
1486 
1487 		/*
1488 		 * Flush all EPTP/VPID contexts, the new pCPU may have stale
1489 		 * TLB entries from its previous association with the vCPU.
1490 		 */
1491 		kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
1492 
1493 		/*
1494 		 * Linux uses per-cpu TSS and GDT, so set these when switching
1495 		 * processors.  See 22.2.4.
1496 		 */
1497 		vmcs_writel(HOST_TR_BASE,
1498 			    (unsigned long)&get_cpu_entry_area(cpu)->tss.x86_tss);
1499 		vmcs_writel(HOST_GDTR_BASE, (unsigned long)gdt);   /* 22.2.4 */
1500 
1501 		if (IS_ENABLED(CONFIG_IA32_EMULATION) || IS_ENABLED(CONFIG_X86_32)) {
1502 			/* 22.2.3 */
1503 			vmcs_writel(HOST_IA32_SYSENTER_ESP,
1504 				    (unsigned long)(cpu_entry_stack(cpu) + 1));
1505 		}
1506 
1507 		vmx->loaded_vmcs->cpu = cpu;
1508 	}
1509 }
1510 
1511 /*
1512  * Switches to specified vcpu, until a matching vcpu_put(), but assumes
1513  * vcpu mutex is already taken.
1514  */
vmx_vcpu_load(struct kvm_vcpu * vcpu,int cpu)1515 void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1516 {
1517 	if (vcpu->scheduled_out && !kvm_pause_in_guest(vcpu->kvm))
1518 		shrink_ple_window(vcpu);
1519 
1520 	vmx_vcpu_load_vmcs(vcpu, cpu, NULL);
1521 
1522 	vmx_vcpu_pi_load(vcpu, cpu);
1523 }
1524 
vmx_vcpu_put(struct kvm_vcpu * vcpu)1525 void vmx_vcpu_put(struct kvm_vcpu *vcpu)
1526 {
1527 	vmx_vcpu_pi_put(vcpu);
1528 
1529 	vmx_prepare_switch_to_host(to_vmx(vcpu));
1530 }
1531 
vmx_emulation_required(struct kvm_vcpu * vcpu)1532 bool vmx_emulation_required(struct kvm_vcpu *vcpu)
1533 {
1534 	return emulate_invalid_guest_state && !vmx_guest_state_valid(vcpu);
1535 }
1536 
vmx_get_rflags(struct kvm_vcpu * vcpu)1537 unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
1538 {
1539 	struct vcpu_vmx *vmx = to_vmx(vcpu);
1540 	unsigned long rflags, save_rflags;
1541 
1542 	if (!kvm_register_is_available(vcpu, VCPU_EXREG_RFLAGS)) {
1543 		kvm_register_mark_available(vcpu, VCPU_EXREG_RFLAGS);
1544 		rflags = vmcs_readl(GUEST_RFLAGS);
1545 		if (vmx->rmode.vm86_active) {
1546 			rflags &= RMODE_GUEST_OWNED_EFLAGS_BITS;
1547 			save_rflags = vmx->rmode.save_rflags;
1548 			rflags |= save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
1549 		}
1550 		vmx->rflags = rflags;
1551 	}
1552 	return vmx->rflags;
1553 }
1554 
vmx_set_rflags(struct kvm_vcpu * vcpu,unsigned long rflags)1555 void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
1556 {
1557 	struct vcpu_vmx *vmx = to_vmx(vcpu);
1558 	unsigned long old_rflags;
1559 
1560 	/*
1561 	 * Unlike CR0 and CR4, RFLAGS handling requires checking if the vCPU
1562 	 * is an unrestricted guest in order to mark L2 as needing emulation
1563 	 * if L1 runs L2 as a restricted guest.
1564 	 */
1565 	if (is_unrestricted_guest(vcpu)) {
1566 		kvm_register_mark_available(vcpu, VCPU_EXREG_RFLAGS);
1567 		vmx->rflags = rflags;
1568 		vmcs_writel(GUEST_RFLAGS, rflags);
1569 		return;
1570 	}
1571 
1572 	old_rflags = vmx_get_rflags(vcpu);
1573 	vmx->rflags = rflags;
1574 	if (vmx->rmode.vm86_active) {
1575 		vmx->rmode.save_rflags = rflags;
1576 		rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
1577 	}
1578 	vmcs_writel(GUEST_RFLAGS, rflags);
1579 
1580 	if ((old_rflags ^ vmx->rflags) & X86_EFLAGS_VM)
1581 		vmx->emulation_required = vmx_emulation_required(vcpu);
1582 }
1583 
vmx_get_if_flag(struct kvm_vcpu * vcpu)1584 bool vmx_get_if_flag(struct kvm_vcpu *vcpu)
1585 {
1586 	return vmx_get_rflags(vcpu) & X86_EFLAGS_IF;
1587 }
1588 
vmx_get_interrupt_shadow(struct kvm_vcpu * vcpu)1589 u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu)
1590 {
1591 	u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
1592 	int ret = 0;
1593 
1594 	if (interruptibility & GUEST_INTR_STATE_STI)
1595 		ret |= KVM_X86_SHADOW_INT_STI;
1596 	if (interruptibility & GUEST_INTR_STATE_MOV_SS)
1597 		ret |= KVM_X86_SHADOW_INT_MOV_SS;
1598 
1599 	return ret;
1600 }
1601 
vmx_set_interrupt_shadow(struct kvm_vcpu * vcpu,int mask)1602 void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
1603 {
1604 	u32 interruptibility_old = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
1605 	u32 interruptibility = interruptibility_old;
1606 
1607 	interruptibility &= ~(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS);
1608 
1609 	if (mask & KVM_X86_SHADOW_INT_MOV_SS)
1610 		interruptibility |= GUEST_INTR_STATE_MOV_SS;
1611 	else if (mask & KVM_X86_SHADOW_INT_STI)
1612 		interruptibility |= GUEST_INTR_STATE_STI;
1613 
1614 	if ((interruptibility != interruptibility_old))
1615 		vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, interruptibility);
1616 }
1617 
vmx_rtit_ctl_check(struct kvm_vcpu * vcpu,u64 data)1618 static int vmx_rtit_ctl_check(struct kvm_vcpu *vcpu, u64 data)
1619 {
1620 	struct vcpu_vmx *vmx = to_vmx(vcpu);
1621 	unsigned long value;
1622 
1623 	/*
1624 	 * Any MSR write that attempts to change bits marked reserved will
1625 	 * case a #GP fault.
1626 	 */
1627 	if (data & vmx->pt_desc.ctl_bitmask)
1628 		return 1;
1629 
1630 	/*
1631 	 * Any attempt to modify IA32_RTIT_CTL while TraceEn is set will
1632 	 * result in a #GP unless the same write also clears TraceEn.
1633 	 */
1634 	if ((vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) &&
1635 	    (data & RTIT_CTL_TRACEEN) &&
1636 	    data != vmx->pt_desc.guest.ctl)
1637 		return 1;
1638 
1639 	/*
1640 	 * WRMSR to IA32_RTIT_CTL that sets TraceEn but clears this bit
1641 	 * and FabricEn would cause #GP, if
1642 	 * CPUID.(EAX=14H, ECX=0):ECX.SNGLRGNOUT[bit 2] = 0
1643 	 */
1644 	if ((data & RTIT_CTL_TRACEEN) && !(data & RTIT_CTL_TOPA) &&
1645 		!(data & RTIT_CTL_FABRIC_EN) &&
1646 		!intel_pt_validate_cap(vmx->pt_desc.caps,
1647 					PT_CAP_single_range_output))
1648 		return 1;
1649 
1650 	/*
1651 	 * MTCFreq, CycThresh and PSBFreq encodings check, any MSR write that
1652 	 * utilize encodings marked reserved will cause a #GP fault.
1653 	 */
1654 	value = intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_mtc_periods);
1655 	if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_mtc) &&
1656 			!test_bit((data & RTIT_CTL_MTC_RANGE) >>
1657 			RTIT_CTL_MTC_RANGE_OFFSET, &value))
1658 		return 1;
1659 	value = intel_pt_validate_cap(vmx->pt_desc.caps,
1660 						PT_CAP_cycle_thresholds);
1661 	if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_psb_cyc) &&
1662 			!test_bit((data & RTIT_CTL_CYC_THRESH) >>
1663 			RTIT_CTL_CYC_THRESH_OFFSET, &value))
1664 		return 1;
1665 	value = intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_psb_periods);
1666 	if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_psb_cyc) &&
1667 			!test_bit((data & RTIT_CTL_PSB_FREQ) >>
1668 			RTIT_CTL_PSB_FREQ_OFFSET, &value))
1669 		return 1;
1670 
1671 	/*
1672 	 * If ADDRx_CFG is reserved or the encodings is >2 will
1673 	 * cause a #GP fault.
1674 	 */
1675 	value = (data & RTIT_CTL_ADDR0) >> RTIT_CTL_ADDR0_OFFSET;
1676 	if ((value && (vmx->pt_desc.num_address_ranges < 1)) || (value > 2))
1677 		return 1;
1678 	value = (data & RTIT_CTL_ADDR1) >> RTIT_CTL_ADDR1_OFFSET;
1679 	if ((value && (vmx->pt_desc.num_address_ranges < 2)) || (value > 2))
1680 		return 1;
1681 	value = (data & RTIT_CTL_ADDR2) >> RTIT_CTL_ADDR2_OFFSET;
1682 	if ((value && (vmx->pt_desc.num_address_ranges < 3)) || (value > 2))
1683 		return 1;
1684 	value = (data & RTIT_CTL_ADDR3) >> RTIT_CTL_ADDR3_OFFSET;
1685 	if ((value && (vmx->pt_desc.num_address_ranges < 4)) || (value > 2))
1686 		return 1;
1687 
1688 	return 0;
1689 }
1690 
vmx_check_emulate_instruction(struct kvm_vcpu * vcpu,int emul_type,void * insn,int insn_len)1691 int vmx_check_emulate_instruction(struct kvm_vcpu *vcpu, int emul_type,
1692 				  void *insn, int insn_len)
1693 {
1694 	/*
1695 	 * Emulation of instructions in SGX enclaves is impossible as RIP does
1696 	 * not point at the failing instruction, and even if it did, the code
1697 	 * stream is inaccessible.  Inject #UD instead of exiting to userspace
1698 	 * so that guest userspace can't DoS the guest simply by triggering
1699 	 * emulation (enclaves are CPL3 only).
1700 	 */
1701 	if (to_vmx(vcpu)->exit_reason.enclave_mode) {
1702 		kvm_queue_exception(vcpu, UD_VECTOR);
1703 		return X86EMUL_PROPAGATE_FAULT;
1704 	}
1705 
1706 	/* Check that emulation is possible during event vectoring */
1707 	if ((to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) &&
1708 	    !kvm_can_emulate_event_vectoring(emul_type))
1709 		return X86EMUL_UNHANDLEABLE_VECTORING;
1710 
1711 	return X86EMUL_CONTINUE;
1712 }
1713 
skip_emulated_instruction(struct kvm_vcpu * vcpu)1714 static int skip_emulated_instruction(struct kvm_vcpu *vcpu)
1715 {
1716 	union vmx_exit_reason exit_reason = to_vmx(vcpu)->exit_reason;
1717 	unsigned long rip, orig_rip;
1718 	u32 instr_len;
1719 
1720 	/*
1721 	 * Using VMCS.VM_EXIT_INSTRUCTION_LEN on EPT misconfig depends on
1722 	 * undefined behavior: Intel's SDM doesn't mandate the VMCS field be
1723 	 * set when EPT misconfig occurs.  In practice, real hardware updates
1724 	 * VM_EXIT_INSTRUCTION_LEN on EPT misconfig, but other hypervisors
1725 	 * (namely Hyper-V) don't set it due to it being undefined behavior,
1726 	 * i.e. we end up advancing IP with some random value.
1727 	 */
1728 	if (!static_cpu_has(X86_FEATURE_HYPERVISOR) ||
1729 	    exit_reason.basic != EXIT_REASON_EPT_MISCONFIG) {
1730 		instr_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
1731 
1732 		/*
1733 		 * Emulating an enclave's instructions isn't supported as KVM
1734 		 * cannot access the enclave's memory or its true RIP, e.g. the
1735 		 * vmcs.GUEST_RIP points at the exit point of the enclave, not
1736 		 * the RIP that actually triggered the VM-Exit.  But, because
1737 		 * most instructions that cause VM-Exit will #UD in an enclave,
1738 		 * most instruction-based VM-Exits simply do not occur.
1739 		 *
1740 		 * There are a few exceptions, notably the debug instructions
1741 		 * INT1ICEBRK and INT3, as they are allowed in debug enclaves
1742 		 * and generate #DB/#BP as expected, which KVM might intercept.
1743 		 * But again, the CPU does the dirty work and saves an instr
1744 		 * length of zero so VMMs don't shoot themselves in the foot.
1745 		 * WARN if KVM tries to skip a non-zero length instruction on
1746 		 * a VM-Exit from an enclave.
1747 		 */
1748 		if (!instr_len)
1749 			goto rip_updated;
1750 
1751 		WARN_ONCE(exit_reason.enclave_mode,
1752 			  "skipping instruction after SGX enclave VM-Exit");
1753 
1754 		orig_rip = kvm_rip_read(vcpu);
1755 		rip = orig_rip + instr_len;
1756 #ifdef CONFIG_X86_64
1757 		/*
1758 		 * We need to mask out the high 32 bits of RIP if not in 64-bit
1759 		 * mode, but just finding out that we are in 64-bit mode is
1760 		 * quite expensive.  Only do it if there was a carry.
1761 		 */
1762 		if (unlikely(((rip ^ orig_rip) >> 31) == 3) && !is_64_bit_mode(vcpu))
1763 			rip = (u32)rip;
1764 #endif
1765 		kvm_rip_write(vcpu, rip);
1766 	} else {
1767 		if (!kvm_emulate_instruction(vcpu, EMULTYPE_SKIP))
1768 			return 0;
1769 	}
1770 
1771 rip_updated:
1772 	/* skipping an emulated instruction also counts */
1773 	vmx_set_interrupt_shadow(vcpu, 0);
1774 
1775 	return 1;
1776 }
1777 
1778 /*
1779  * Recognizes a pending MTF VM-exit and records the nested state for later
1780  * delivery.
1781  */
vmx_update_emulated_instruction(struct kvm_vcpu * vcpu)1782 void vmx_update_emulated_instruction(struct kvm_vcpu *vcpu)
1783 {
1784 	struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
1785 	struct vcpu_vmx *vmx = to_vmx(vcpu);
1786 
1787 	if (!is_guest_mode(vcpu))
1788 		return;
1789 
1790 	/*
1791 	 * Per the SDM, MTF takes priority over debug-trap exceptions besides
1792 	 * TSS T-bit traps and ICEBP (INT1).  KVM doesn't emulate T-bit traps
1793 	 * or ICEBP (in the emulator proper), and skipping of ICEBP after an
1794 	 * intercepted #DB deliberately avoids single-step #DB and MTF updates
1795 	 * as ICEBP is higher priority than both.  As instruction emulation is
1796 	 * completed at this point (i.e. KVM is at the instruction boundary),
1797 	 * any #DB exception pending delivery must be a debug-trap of lower
1798 	 * priority than MTF.  Record the pending MTF state to be delivered in
1799 	 * vmx_check_nested_events().
1800 	 */
1801 	if (nested_cpu_has_mtf(vmcs12) &&
1802 	    (!vcpu->arch.exception.pending ||
1803 	     vcpu->arch.exception.vector == DB_VECTOR) &&
1804 	    (!vcpu->arch.exception_vmexit.pending ||
1805 	     vcpu->arch.exception_vmexit.vector == DB_VECTOR)) {
1806 		vmx->nested.mtf_pending = true;
1807 		kvm_make_request(KVM_REQ_EVENT, vcpu);
1808 	} else {
1809 		vmx->nested.mtf_pending = false;
1810 	}
1811 }
1812 
vmx_skip_emulated_instruction(struct kvm_vcpu * vcpu)1813 int vmx_skip_emulated_instruction(struct kvm_vcpu *vcpu)
1814 {
1815 	vmx_update_emulated_instruction(vcpu);
1816 	return skip_emulated_instruction(vcpu);
1817 }
1818 
vmx_clear_hlt(struct kvm_vcpu * vcpu)1819 static void vmx_clear_hlt(struct kvm_vcpu *vcpu)
1820 {
1821 	/*
1822 	 * Ensure that we clear the HLT state in the VMCS.  We don't need to
1823 	 * explicitly skip the instruction because if the HLT state is set,
1824 	 * then the instruction is already executing and RIP has already been
1825 	 * advanced.
1826 	 */
1827 	if (kvm_hlt_in_guest(vcpu->kvm) &&
1828 			vmcs_read32(GUEST_ACTIVITY_STATE) == GUEST_ACTIVITY_HLT)
1829 		vmcs_write32(GUEST_ACTIVITY_STATE, GUEST_ACTIVITY_ACTIVE);
1830 }
1831 
vmx_inject_exception(struct kvm_vcpu * vcpu)1832 void vmx_inject_exception(struct kvm_vcpu *vcpu)
1833 {
1834 	struct kvm_queued_exception *ex = &vcpu->arch.exception;
1835 	u32 intr_info = ex->vector | INTR_INFO_VALID_MASK;
1836 	struct vcpu_vmx *vmx = to_vmx(vcpu);
1837 
1838 	kvm_deliver_exception_payload(vcpu, ex);
1839 
1840 	if (ex->has_error_code) {
1841 		/*
1842 		 * Despite the error code being architecturally defined as 32
1843 		 * bits, and the VMCS field being 32 bits, Intel CPUs and thus
1844 		 * VMX don't actually supporting setting bits 31:16.  Hardware
1845 		 * will (should) never provide a bogus error code, but AMD CPUs
1846 		 * do generate error codes with bits 31:16 set, and so KVM's
1847 		 * ABI lets userspace shove in arbitrary 32-bit values.  Drop
1848 		 * the upper bits to avoid VM-Fail, losing information that
1849 		 * doesn't really exist is preferable to killing the VM.
1850 		 */
1851 		vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, (u16)ex->error_code);
1852 		intr_info |= INTR_INFO_DELIVER_CODE_MASK;
1853 	}
1854 
1855 	if (vmx->rmode.vm86_active) {
1856 		int inc_eip = 0;
1857 		if (kvm_exception_is_soft(ex->vector))
1858 			inc_eip = vcpu->arch.event_exit_inst_len;
1859 		kvm_inject_realmode_interrupt(vcpu, ex->vector, inc_eip);
1860 		return;
1861 	}
1862 
1863 	WARN_ON_ONCE(vmx->emulation_required);
1864 
1865 	if (kvm_exception_is_soft(ex->vector)) {
1866 		vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
1867 			     vmx->vcpu.arch.event_exit_inst_len);
1868 		intr_info |= INTR_TYPE_SOFT_EXCEPTION;
1869 	} else
1870 		intr_info |= INTR_TYPE_HARD_EXCEPTION;
1871 
1872 	vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info);
1873 
1874 	vmx_clear_hlt(vcpu);
1875 }
1876 
vmx_setup_uret_msr(struct vcpu_vmx * vmx,unsigned int msr,bool load_into_hardware)1877 static void vmx_setup_uret_msr(struct vcpu_vmx *vmx, unsigned int msr,
1878 			       bool load_into_hardware)
1879 {
1880 	struct vmx_uret_msr *uret_msr;
1881 
1882 	uret_msr = vmx_find_uret_msr(vmx, msr);
1883 	if (!uret_msr)
1884 		return;
1885 
1886 	uret_msr->load_into_hardware = load_into_hardware;
1887 }
1888 
1889 /*
1890  * Configuring user return MSRs to automatically save, load, and restore MSRs
1891  * that need to be shoved into hardware when running the guest.  Note, omitting
1892  * an MSR here does _NOT_ mean it's not emulated, only that it will not be
1893  * loaded into hardware when running the guest.
1894  */
vmx_setup_uret_msrs(struct vcpu_vmx * vmx)1895 static void vmx_setup_uret_msrs(struct vcpu_vmx *vmx)
1896 {
1897 #ifdef CONFIG_X86_64
1898 	bool load_syscall_msrs;
1899 
1900 	/*
1901 	 * The SYSCALL MSRs are only needed on long mode guests, and only
1902 	 * when EFER.SCE is set.
1903 	 */
1904 	load_syscall_msrs = is_long_mode(&vmx->vcpu) &&
1905 			    (vmx->vcpu.arch.efer & EFER_SCE);
1906 
1907 	vmx_setup_uret_msr(vmx, MSR_STAR, load_syscall_msrs);
1908 	vmx_setup_uret_msr(vmx, MSR_LSTAR, load_syscall_msrs);
1909 	vmx_setup_uret_msr(vmx, MSR_SYSCALL_MASK, load_syscall_msrs);
1910 #endif
1911 	vmx_setup_uret_msr(vmx, MSR_EFER, update_transition_efer(vmx));
1912 
1913 	vmx_setup_uret_msr(vmx, MSR_TSC_AUX,
1914 			   guest_cpu_cap_has(&vmx->vcpu, X86_FEATURE_RDTSCP) ||
1915 			   guest_cpu_cap_has(&vmx->vcpu, X86_FEATURE_RDPID));
1916 
1917 	/*
1918 	 * hle=0, rtm=0, tsx_ctrl=1 can be found with some combinations of new
1919 	 * kernel and old userspace.  If those guests run on a tsx=off host, do
1920 	 * allow guests to use TSX_CTRL, but don't change the value in hardware
1921 	 * so that TSX remains always disabled.
1922 	 */
1923 	vmx_setup_uret_msr(vmx, MSR_IA32_TSX_CTRL, boot_cpu_has(X86_FEATURE_RTM));
1924 
1925 	/*
1926 	 * The set of MSRs to load may have changed, reload MSRs before the
1927 	 * next VM-Enter.
1928 	 */
1929 	vmx->guest_uret_msrs_loaded = false;
1930 }
1931 
vmx_get_l2_tsc_offset(struct kvm_vcpu * vcpu)1932 u64 vmx_get_l2_tsc_offset(struct kvm_vcpu *vcpu)
1933 {
1934 	struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
1935 
1936 	if (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETTING))
1937 		return vmcs12->tsc_offset;
1938 
1939 	return 0;
1940 }
1941 
vmx_get_l2_tsc_multiplier(struct kvm_vcpu * vcpu)1942 u64 vmx_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu)
1943 {
1944 	struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
1945 
1946 	if (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETTING) &&
1947 	    nested_cpu_has2(vmcs12, SECONDARY_EXEC_TSC_SCALING))
1948 		return vmcs12->tsc_multiplier;
1949 
1950 	return kvm_caps.default_tsc_scaling_ratio;
1951 }
1952 
vmx_write_tsc_offset(struct kvm_vcpu * vcpu)1953 void vmx_write_tsc_offset(struct kvm_vcpu *vcpu)
1954 {
1955 	vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
1956 }
1957 
vmx_write_tsc_multiplier(struct kvm_vcpu * vcpu)1958 void vmx_write_tsc_multiplier(struct kvm_vcpu *vcpu)
1959 {
1960 	vmcs_write64(TSC_MULTIPLIER, vcpu->arch.tsc_scaling_ratio);
1961 }
1962 
1963 /*
1964  * Userspace is allowed to set any supported IA32_FEATURE_CONTROL regardless of
1965  * guest CPUID.  Note, KVM allows userspace to set "VMX in SMX" to maintain
1966  * backwards compatibility even though KVM doesn't support emulating SMX.  And
1967  * because userspace set "VMX in SMX", the guest must also be allowed to set it,
1968  * e.g. if the MSR is left unlocked and the guest does a RMW operation.
1969  */
1970 #define KVM_SUPPORTED_FEATURE_CONTROL  (FEAT_CTL_LOCKED			 | \
1971 					FEAT_CTL_VMX_ENABLED_INSIDE_SMX	 | \
1972 					FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX | \
1973 					FEAT_CTL_SGX_LC_ENABLED		 | \
1974 					FEAT_CTL_SGX_ENABLED		 | \
1975 					FEAT_CTL_LMCE_ENABLED)
1976 
is_vmx_feature_control_msr_valid(struct vcpu_vmx * vmx,struct msr_data * msr)1977 static inline bool is_vmx_feature_control_msr_valid(struct vcpu_vmx *vmx,
1978 						    struct msr_data *msr)
1979 {
1980 	uint64_t valid_bits;
1981 
1982 	/*
1983 	 * Ensure KVM_SUPPORTED_FEATURE_CONTROL is updated when new bits are
1984 	 * exposed to the guest.
1985 	 */
1986 	WARN_ON_ONCE(vmx->msr_ia32_feature_control_valid_bits &
1987 		     ~KVM_SUPPORTED_FEATURE_CONTROL);
1988 
1989 	if (!msr->host_initiated &&
1990 	    (vmx->msr_ia32_feature_control & FEAT_CTL_LOCKED))
1991 		return false;
1992 
1993 	if (msr->host_initiated)
1994 		valid_bits = KVM_SUPPORTED_FEATURE_CONTROL;
1995 	else
1996 		valid_bits = vmx->msr_ia32_feature_control_valid_bits;
1997 
1998 	return !(msr->data & ~valid_bits);
1999 }
2000 
vmx_get_feature_msr(u32 msr,u64 * data)2001 int vmx_get_feature_msr(u32 msr, u64 *data)
2002 {
2003 	switch (msr) {
2004 	case KVM_FIRST_EMULATED_VMX_MSR ... KVM_LAST_EMULATED_VMX_MSR:
2005 		if (!nested)
2006 			return 1;
2007 		return vmx_get_vmx_msr(&vmcs_config.nested, msr, data);
2008 	default:
2009 		return KVM_MSR_RET_UNSUPPORTED;
2010 	}
2011 }
2012 
2013 /*
2014  * Reads an msr value (of 'msr_info->index') into 'msr_info->data'.
2015  * Returns 0 on success, non-0 otherwise.
2016  * Assumes vcpu_load() was already called.
2017  */
vmx_get_msr(struct kvm_vcpu * vcpu,struct msr_data * msr_info)2018 int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2019 {
2020 	struct vcpu_vmx *vmx = to_vmx(vcpu);
2021 	struct vmx_uret_msr *msr;
2022 	u32 index;
2023 
2024 	switch (msr_info->index) {
2025 #ifdef CONFIG_X86_64
2026 	case MSR_FS_BASE:
2027 		msr_info->data = vmcs_readl(GUEST_FS_BASE);
2028 		break;
2029 	case MSR_GS_BASE:
2030 		msr_info->data = vmcs_readl(GUEST_GS_BASE);
2031 		break;
2032 	case MSR_KERNEL_GS_BASE:
2033 		msr_info->data = vmx_read_guest_kernel_gs_base(vmx);
2034 		break;
2035 #endif
2036 	case MSR_EFER:
2037 		return kvm_get_msr_common(vcpu, msr_info);
2038 	case MSR_IA32_TSX_CTRL:
2039 		if (!msr_info->host_initiated &&
2040 		    !(vcpu->arch.arch_capabilities & ARCH_CAP_TSX_CTRL_MSR))
2041 			return 1;
2042 		goto find_uret_msr;
2043 	case MSR_IA32_UMWAIT_CONTROL:
2044 		if (!msr_info->host_initiated && !vmx_has_waitpkg(vmx))
2045 			return 1;
2046 
2047 		msr_info->data = vmx->msr_ia32_umwait_control;
2048 		break;
2049 	case MSR_IA32_SPEC_CTRL:
2050 		if (!msr_info->host_initiated &&
2051 		    !guest_has_spec_ctrl_msr(vcpu))
2052 			return 1;
2053 
2054 		msr_info->data = to_vmx(vcpu)->spec_ctrl;
2055 		break;
2056 	case MSR_IA32_SYSENTER_CS:
2057 		msr_info->data = vmcs_read32(GUEST_SYSENTER_CS);
2058 		break;
2059 	case MSR_IA32_SYSENTER_EIP:
2060 		msr_info->data = vmcs_readl(GUEST_SYSENTER_EIP);
2061 		break;
2062 	case MSR_IA32_SYSENTER_ESP:
2063 		msr_info->data = vmcs_readl(GUEST_SYSENTER_ESP);
2064 		break;
2065 	case MSR_IA32_BNDCFGS:
2066 		if (!kvm_mpx_supported() ||
2067 		    (!msr_info->host_initiated &&
2068 		     !guest_cpu_cap_has(vcpu, X86_FEATURE_MPX)))
2069 			return 1;
2070 		msr_info->data = vmcs_read64(GUEST_BNDCFGS);
2071 		break;
2072 	case MSR_IA32_MCG_EXT_CTL:
2073 		if (!msr_info->host_initiated &&
2074 		    !(vmx->msr_ia32_feature_control &
2075 		      FEAT_CTL_LMCE_ENABLED))
2076 			return 1;
2077 		msr_info->data = vcpu->arch.mcg_ext_ctl;
2078 		break;
2079 	case MSR_IA32_FEAT_CTL:
2080 		msr_info->data = vmx->msr_ia32_feature_control;
2081 		break;
2082 	case MSR_IA32_SGXLEPUBKEYHASH0 ... MSR_IA32_SGXLEPUBKEYHASH3:
2083 		if (!msr_info->host_initiated &&
2084 		    !guest_cpu_cap_has(vcpu, X86_FEATURE_SGX_LC))
2085 			return 1;
2086 		msr_info->data = to_vmx(vcpu)->msr_ia32_sgxlepubkeyhash
2087 			[msr_info->index - MSR_IA32_SGXLEPUBKEYHASH0];
2088 		break;
2089 	case KVM_FIRST_EMULATED_VMX_MSR ... KVM_LAST_EMULATED_VMX_MSR:
2090 		if (!guest_cpu_cap_has(vcpu, X86_FEATURE_VMX))
2091 			return 1;
2092 		if (vmx_get_vmx_msr(&vmx->nested.msrs, msr_info->index,
2093 				    &msr_info->data))
2094 			return 1;
2095 #ifdef CONFIG_KVM_HYPERV
2096 		/*
2097 		 * Enlightened VMCS v1 doesn't have certain VMCS fields but
2098 		 * instead of just ignoring the features, different Hyper-V
2099 		 * versions are either trying to use them and fail or do some
2100 		 * sanity checking and refuse to boot. Filter all unsupported
2101 		 * features out.
2102 		 */
2103 		if (!msr_info->host_initiated && guest_cpu_cap_has_evmcs(vcpu))
2104 			nested_evmcs_filter_control_msr(vcpu, msr_info->index,
2105 							&msr_info->data);
2106 #endif
2107 		break;
2108 	case MSR_IA32_RTIT_CTL:
2109 		if (!vmx_pt_mode_is_host_guest())
2110 			return 1;
2111 		msr_info->data = vmx->pt_desc.guest.ctl;
2112 		break;
2113 	case MSR_IA32_RTIT_STATUS:
2114 		if (!vmx_pt_mode_is_host_guest())
2115 			return 1;
2116 		msr_info->data = vmx->pt_desc.guest.status;
2117 		break;
2118 	case MSR_IA32_RTIT_CR3_MATCH:
2119 		if (!vmx_pt_mode_is_host_guest() ||
2120 			!intel_pt_validate_cap(vmx->pt_desc.caps,
2121 						PT_CAP_cr3_filtering))
2122 			return 1;
2123 		msr_info->data = vmx->pt_desc.guest.cr3_match;
2124 		break;
2125 	case MSR_IA32_RTIT_OUTPUT_BASE:
2126 		if (!vmx_pt_mode_is_host_guest() ||
2127 			(!intel_pt_validate_cap(vmx->pt_desc.caps,
2128 					PT_CAP_topa_output) &&
2129 			 !intel_pt_validate_cap(vmx->pt_desc.caps,
2130 					PT_CAP_single_range_output)))
2131 			return 1;
2132 		msr_info->data = vmx->pt_desc.guest.output_base;
2133 		break;
2134 	case MSR_IA32_RTIT_OUTPUT_MASK:
2135 		if (!vmx_pt_mode_is_host_guest() ||
2136 			(!intel_pt_validate_cap(vmx->pt_desc.caps,
2137 					PT_CAP_topa_output) &&
2138 			 !intel_pt_validate_cap(vmx->pt_desc.caps,
2139 					PT_CAP_single_range_output)))
2140 			return 1;
2141 		msr_info->data = vmx->pt_desc.guest.output_mask;
2142 		break;
2143 	case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B:
2144 		index = msr_info->index - MSR_IA32_RTIT_ADDR0_A;
2145 		if (!vmx_pt_mode_is_host_guest() ||
2146 		    (index >= 2 * vmx->pt_desc.num_address_ranges))
2147 			return 1;
2148 		if (index % 2)
2149 			msr_info->data = vmx->pt_desc.guest.addr_b[index / 2];
2150 		else
2151 			msr_info->data = vmx->pt_desc.guest.addr_a[index / 2];
2152 		break;
2153 	case MSR_IA32_DEBUGCTLMSR:
2154 		msr_info->data = vmcs_read64(GUEST_IA32_DEBUGCTL);
2155 		break;
2156 	default:
2157 	find_uret_msr:
2158 		msr = vmx_find_uret_msr(vmx, msr_info->index);
2159 		if (msr) {
2160 			msr_info->data = msr->data;
2161 			break;
2162 		}
2163 		return kvm_get_msr_common(vcpu, msr_info);
2164 	}
2165 
2166 	return 0;
2167 }
2168 
nested_vmx_truncate_sysenter_addr(struct kvm_vcpu * vcpu,u64 data)2169 static u64 nested_vmx_truncate_sysenter_addr(struct kvm_vcpu *vcpu,
2170 						    u64 data)
2171 {
2172 #ifdef CONFIG_X86_64
2173 	if (!guest_cpu_cap_has(vcpu, X86_FEATURE_LM))
2174 		return (u32)data;
2175 #endif
2176 	return (unsigned long)data;
2177 }
2178 
vmx_get_supported_debugctl(struct kvm_vcpu * vcpu,bool host_initiated)2179 static u64 vmx_get_supported_debugctl(struct kvm_vcpu *vcpu, bool host_initiated)
2180 {
2181 	u64 debugctl = 0;
2182 
2183 	if (boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT) &&
2184 	    (host_initiated || guest_cpu_cap_has(vcpu, X86_FEATURE_BUS_LOCK_DETECT)))
2185 		debugctl |= DEBUGCTLMSR_BUS_LOCK_DETECT;
2186 
2187 	if ((kvm_caps.supported_perf_cap & PMU_CAP_LBR_FMT) &&
2188 	    (host_initiated || intel_pmu_lbr_is_enabled(vcpu)))
2189 		debugctl |= DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI;
2190 
2191 	return debugctl;
2192 }
2193 
2194 /*
2195  * Writes msr value into the appropriate "register".
2196  * Returns 0 on success, non-0 otherwise.
2197  * Assumes vcpu_load() was already called.
2198  */
vmx_set_msr(struct kvm_vcpu * vcpu,struct msr_data * msr_info)2199 int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2200 {
2201 	struct vcpu_vmx *vmx = to_vmx(vcpu);
2202 	struct vmx_uret_msr *msr;
2203 	int ret = 0;
2204 	u32 msr_index = msr_info->index;
2205 	u64 data = msr_info->data;
2206 	u32 index;
2207 
2208 	switch (msr_index) {
2209 	case MSR_EFER:
2210 		ret = kvm_set_msr_common(vcpu, msr_info);
2211 		break;
2212 #ifdef CONFIG_X86_64
2213 	case MSR_FS_BASE:
2214 		vmx_segment_cache_clear(vmx);
2215 		vmcs_writel(GUEST_FS_BASE, data);
2216 		break;
2217 	case MSR_GS_BASE:
2218 		vmx_segment_cache_clear(vmx);
2219 		vmcs_writel(GUEST_GS_BASE, data);
2220 		break;
2221 	case MSR_KERNEL_GS_BASE:
2222 		vmx_write_guest_kernel_gs_base(vmx, data);
2223 		break;
2224 	case MSR_IA32_XFD:
2225 		ret = kvm_set_msr_common(vcpu, msr_info);
2226 		/*
2227 		 * Always intercepting WRMSR could incur non-negligible
2228 		 * overhead given xfd might be changed frequently in
2229 		 * guest context switch. Disable write interception
2230 		 * upon the first write with a non-zero value (indicating
2231 		 * potential usage on dynamic xfeatures). Also update
2232 		 * exception bitmap to trap #NM for proper virtualization
2233 		 * of guest xfd_err.
2234 		 */
2235 		if (!ret && data) {
2236 			vmx_disable_intercept_for_msr(vcpu, MSR_IA32_XFD,
2237 						      MSR_TYPE_RW);
2238 			vcpu->arch.xfd_no_write_intercept = true;
2239 			vmx_update_exception_bitmap(vcpu);
2240 		}
2241 		break;
2242 #endif
2243 	case MSR_IA32_SYSENTER_CS:
2244 		if (is_guest_mode(vcpu))
2245 			get_vmcs12(vcpu)->guest_sysenter_cs = data;
2246 		vmcs_write32(GUEST_SYSENTER_CS, data);
2247 		break;
2248 	case MSR_IA32_SYSENTER_EIP:
2249 		if (is_guest_mode(vcpu)) {
2250 			data = nested_vmx_truncate_sysenter_addr(vcpu, data);
2251 			get_vmcs12(vcpu)->guest_sysenter_eip = data;
2252 		}
2253 		vmcs_writel(GUEST_SYSENTER_EIP, data);
2254 		break;
2255 	case MSR_IA32_SYSENTER_ESP:
2256 		if (is_guest_mode(vcpu)) {
2257 			data = nested_vmx_truncate_sysenter_addr(vcpu, data);
2258 			get_vmcs12(vcpu)->guest_sysenter_esp = data;
2259 		}
2260 		vmcs_writel(GUEST_SYSENTER_ESP, data);
2261 		break;
2262 	case MSR_IA32_DEBUGCTLMSR: {
2263 		u64 invalid;
2264 
2265 		invalid = data & ~vmx_get_supported_debugctl(vcpu, msr_info->host_initiated);
2266 		if (invalid & (DEBUGCTLMSR_BTF|DEBUGCTLMSR_LBR)) {
2267 			kvm_pr_unimpl_wrmsr(vcpu, msr_index, data);
2268 			data &= ~(DEBUGCTLMSR_BTF|DEBUGCTLMSR_LBR);
2269 			invalid &= ~(DEBUGCTLMSR_BTF|DEBUGCTLMSR_LBR);
2270 		}
2271 
2272 		if (invalid)
2273 			return 1;
2274 
2275 		if (is_guest_mode(vcpu) && get_vmcs12(vcpu)->vm_exit_controls &
2276 						VM_EXIT_SAVE_DEBUG_CONTROLS)
2277 			get_vmcs12(vcpu)->guest_ia32_debugctl = data;
2278 
2279 		vmcs_write64(GUEST_IA32_DEBUGCTL, data);
2280 		if (intel_pmu_lbr_is_enabled(vcpu) && !to_vmx(vcpu)->lbr_desc.event &&
2281 		    (data & DEBUGCTLMSR_LBR))
2282 			intel_pmu_create_guest_lbr_event(vcpu);
2283 		return 0;
2284 	}
2285 	case MSR_IA32_BNDCFGS:
2286 		if (!kvm_mpx_supported() ||
2287 		    (!msr_info->host_initiated &&
2288 		     !guest_cpu_cap_has(vcpu, X86_FEATURE_MPX)))
2289 			return 1;
2290 		if (is_noncanonical_msr_address(data & PAGE_MASK, vcpu) ||
2291 		    (data & MSR_IA32_BNDCFGS_RSVD))
2292 			return 1;
2293 
2294 		if (is_guest_mode(vcpu) &&
2295 		    ((vmx->nested.msrs.entry_ctls_high & VM_ENTRY_LOAD_BNDCFGS) ||
2296 		     (vmx->nested.msrs.exit_ctls_high & VM_EXIT_CLEAR_BNDCFGS)))
2297 			get_vmcs12(vcpu)->guest_bndcfgs = data;
2298 
2299 		vmcs_write64(GUEST_BNDCFGS, data);
2300 		break;
2301 	case MSR_IA32_UMWAIT_CONTROL:
2302 		if (!msr_info->host_initiated && !vmx_has_waitpkg(vmx))
2303 			return 1;
2304 
2305 		/* The reserved bit 1 and non-32 bit [63:32] should be zero */
2306 		if (data & (BIT_ULL(1) | GENMASK_ULL(63, 32)))
2307 			return 1;
2308 
2309 		vmx->msr_ia32_umwait_control = data;
2310 		break;
2311 	case MSR_IA32_SPEC_CTRL:
2312 		if (!msr_info->host_initiated &&
2313 		    !guest_has_spec_ctrl_msr(vcpu))
2314 			return 1;
2315 
2316 		if (kvm_spec_ctrl_test_value(data))
2317 			return 1;
2318 
2319 		vmx->spec_ctrl = data;
2320 		if (!data)
2321 			break;
2322 
2323 		/*
2324 		 * For non-nested:
2325 		 * When it's written (to non-zero) for the first time, pass
2326 		 * it through.
2327 		 *
2328 		 * For nested:
2329 		 * The handling of the MSR bitmap for L2 guests is done in
2330 		 * nested_vmx_prepare_msr_bitmap. We should not touch the
2331 		 * vmcs02.msr_bitmap here since it gets completely overwritten
2332 		 * in the merging. We update the vmcs01 here for L1 as well
2333 		 * since it will end up touching the MSR anyway now.
2334 		 */
2335 		vmx_disable_intercept_for_msr(vcpu,
2336 					      MSR_IA32_SPEC_CTRL,
2337 					      MSR_TYPE_RW);
2338 		break;
2339 	case MSR_IA32_TSX_CTRL:
2340 		if (!msr_info->host_initiated &&
2341 		    !(vcpu->arch.arch_capabilities & ARCH_CAP_TSX_CTRL_MSR))
2342 			return 1;
2343 		if (data & ~(TSX_CTRL_RTM_DISABLE | TSX_CTRL_CPUID_CLEAR))
2344 			return 1;
2345 		goto find_uret_msr;
2346 	case MSR_IA32_CR_PAT:
2347 		ret = kvm_set_msr_common(vcpu, msr_info);
2348 		if (ret)
2349 			break;
2350 
2351 		if (is_guest_mode(vcpu) &&
2352 		    get_vmcs12(vcpu)->vm_exit_controls & VM_EXIT_SAVE_IA32_PAT)
2353 			get_vmcs12(vcpu)->guest_ia32_pat = data;
2354 
2355 		if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT)
2356 			vmcs_write64(GUEST_IA32_PAT, data);
2357 		break;
2358 	case MSR_IA32_MCG_EXT_CTL:
2359 		if ((!msr_info->host_initiated &&
2360 		     !(to_vmx(vcpu)->msr_ia32_feature_control &
2361 		       FEAT_CTL_LMCE_ENABLED)) ||
2362 		    (data & ~MCG_EXT_CTL_LMCE_EN))
2363 			return 1;
2364 		vcpu->arch.mcg_ext_ctl = data;
2365 		break;
2366 	case MSR_IA32_FEAT_CTL:
2367 		if (!is_vmx_feature_control_msr_valid(vmx, msr_info))
2368 			return 1;
2369 
2370 		vmx->msr_ia32_feature_control = data;
2371 		if (msr_info->host_initiated && data == 0)
2372 			vmx_leave_nested(vcpu);
2373 
2374 		/* SGX may be enabled/disabled by guest's firmware */
2375 		vmx_write_encls_bitmap(vcpu, NULL);
2376 		break;
2377 	case MSR_IA32_SGXLEPUBKEYHASH0 ... MSR_IA32_SGXLEPUBKEYHASH3:
2378 		/*
2379 		 * On real hardware, the LE hash MSRs are writable before
2380 		 * the firmware sets bit 0 in MSR 0x7a ("activating" SGX),
2381 		 * at which point SGX related bits in IA32_FEATURE_CONTROL
2382 		 * become writable.
2383 		 *
2384 		 * KVM does not emulate SGX activation for simplicity, so
2385 		 * allow writes to the LE hash MSRs if IA32_FEATURE_CONTROL
2386 		 * is unlocked.  This is technically not architectural
2387 		 * behavior, but it's close enough.
2388 		 */
2389 		if (!msr_info->host_initiated &&
2390 		    (!guest_cpu_cap_has(vcpu, X86_FEATURE_SGX_LC) ||
2391 		    ((vmx->msr_ia32_feature_control & FEAT_CTL_LOCKED) &&
2392 		    !(vmx->msr_ia32_feature_control & FEAT_CTL_SGX_LC_ENABLED))))
2393 			return 1;
2394 		vmx->msr_ia32_sgxlepubkeyhash
2395 			[msr_index - MSR_IA32_SGXLEPUBKEYHASH0] = data;
2396 		break;
2397 	case KVM_FIRST_EMULATED_VMX_MSR ... KVM_LAST_EMULATED_VMX_MSR:
2398 		if (!msr_info->host_initiated)
2399 			return 1; /* they are read-only */
2400 		if (!guest_cpu_cap_has(vcpu, X86_FEATURE_VMX))
2401 			return 1;
2402 		return vmx_set_vmx_msr(vcpu, msr_index, data);
2403 	case MSR_IA32_RTIT_CTL:
2404 		if (!vmx_pt_mode_is_host_guest() ||
2405 			vmx_rtit_ctl_check(vcpu, data) ||
2406 			vmx->nested.vmxon)
2407 			return 1;
2408 		vmcs_write64(GUEST_IA32_RTIT_CTL, data);
2409 		vmx->pt_desc.guest.ctl = data;
2410 		pt_update_intercept_for_msr(vcpu);
2411 		break;
2412 	case MSR_IA32_RTIT_STATUS:
2413 		if (!pt_can_write_msr(vmx))
2414 			return 1;
2415 		if (data & MSR_IA32_RTIT_STATUS_MASK)
2416 			return 1;
2417 		vmx->pt_desc.guest.status = data;
2418 		break;
2419 	case MSR_IA32_RTIT_CR3_MATCH:
2420 		if (!pt_can_write_msr(vmx))
2421 			return 1;
2422 		if (!intel_pt_validate_cap(vmx->pt_desc.caps,
2423 					   PT_CAP_cr3_filtering))
2424 			return 1;
2425 		vmx->pt_desc.guest.cr3_match = data;
2426 		break;
2427 	case MSR_IA32_RTIT_OUTPUT_BASE:
2428 		if (!pt_can_write_msr(vmx))
2429 			return 1;
2430 		if (!intel_pt_validate_cap(vmx->pt_desc.caps,
2431 					   PT_CAP_topa_output) &&
2432 		    !intel_pt_validate_cap(vmx->pt_desc.caps,
2433 					   PT_CAP_single_range_output))
2434 			return 1;
2435 		if (!pt_output_base_valid(vcpu, data))
2436 			return 1;
2437 		vmx->pt_desc.guest.output_base = data;
2438 		break;
2439 	case MSR_IA32_RTIT_OUTPUT_MASK:
2440 		if (!pt_can_write_msr(vmx))
2441 			return 1;
2442 		if (!intel_pt_validate_cap(vmx->pt_desc.caps,
2443 					   PT_CAP_topa_output) &&
2444 		    !intel_pt_validate_cap(vmx->pt_desc.caps,
2445 					   PT_CAP_single_range_output))
2446 			return 1;
2447 		vmx->pt_desc.guest.output_mask = data;
2448 		break;
2449 	case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B:
2450 		if (!pt_can_write_msr(vmx))
2451 			return 1;
2452 		index = msr_info->index - MSR_IA32_RTIT_ADDR0_A;
2453 		if (index >= 2 * vmx->pt_desc.num_address_ranges)
2454 			return 1;
2455 		if (is_noncanonical_msr_address(data, vcpu))
2456 			return 1;
2457 		if (index % 2)
2458 			vmx->pt_desc.guest.addr_b[index / 2] = data;
2459 		else
2460 			vmx->pt_desc.guest.addr_a[index / 2] = data;
2461 		break;
2462 	case MSR_IA32_PERF_CAPABILITIES:
2463 		if (data & PMU_CAP_LBR_FMT) {
2464 			if ((data & PMU_CAP_LBR_FMT) !=
2465 			    (kvm_caps.supported_perf_cap & PMU_CAP_LBR_FMT))
2466 				return 1;
2467 			if (!cpuid_model_is_consistent(vcpu))
2468 				return 1;
2469 		}
2470 		if (data & PERF_CAP_PEBS_FORMAT) {
2471 			if ((data & PERF_CAP_PEBS_MASK) !=
2472 			    (kvm_caps.supported_perf_cap & PERF_CAP_PEBS_MASK))
2473 				return 1;
2474 			if (!guest_cpu_cap_has(vcpu, X86_FEATURE_DS))
2475 				return 1;
2476 			if (!guest_cpu_cap_has(vcpu, X86_FEATURE_DTES64))
2477 				return 1;
2478 			if (!cpuid_model_is_consistent(vcpu))
2479 				return 1;
2480 		}
2481 		ret = kvm_set_msr_common(vcpu, msr_info);
2482 		break;
2483 
2484 	default:
2485 	find_uret_msr:
2486 		msr = vmx_find_uret_msr(vmx, msr_index);
2487 		if (msr)
2488 			ret = vmx_set_guest_uret_msr(vmx, msr, data);
2489 		else
2490 			ret = kvm_set_msr_common(vcpu, msr_info);
2491 	}
2492 
2493 	/* FB_CLEAR may have changed, also update the FB_CLEAR_DIS behavior */
2494 	if (msr_index == MSR_IA32_ARCH_CAPABILITIES)
2495 		vmx_update_fb_clear_dis(vcpu, vmx);
2496 
2497 	return ret;
2498 }
2499 
vmx_cache_reg(struct kvm_vcpu * vcpu,enum kvm_reg reg)2500 void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
2501 {
2502 	unsigned long guest_owned_bits;
2503 
2504 	kvm_register_mark_available(vcpu, reg);
2505 
2506 	switch (reg) {
2507 	case VCPU_REGS_RSP:
2508 		vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP);
2509 		break;
2510 	case VCPU_REGS_RIP:
2511 		vcpu->arch.regs[VCPU_REGS_RIP] = vmcs_readl(GUEST_RIP);
2512 		break;
2513 	case VCPU_EXREG_PDPTR:
2514 		if (enable_ept)
2515 			ept_save_pdptrs(vcpu);
2516 		break;
2517 	case VCPU_EXREG_CR0:
2518 		guest_owned_bits = vcpu->arch.cr0_guest_owned_bits;
2519 
2520 		vcpu->arch.cr0 &= ~guest_owned_bits;
2521 		vcpu->arch.cr0 |= vmcs_readl(GUEST_CR0) & guest_owned_bits;
2522 		break;
2523 	case VCPU_EXREG_CR3:
2524 		/*
2525 		 * When intercepting CR3 loads, e.g. for shadowing paging, KVM's
2526 		 * CR3 is loaded into hardware, not the guest's CR3.
2527 		 */
2528 		if (!(exec_controls_get(to_vmx(vcpu)) & CPU_BASED_CR3_LOAD_EXITING))
2529 			vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
2530 		break;
2531 	case VCPU_EXREG_CR4:
2532 		guest_owned_bits = vcpu->arch.cr4_guest_owned_bits;
2533 
2534 		vcpu->arch.cr4 &= ~guest_owned_bits;
2535 		vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & guest_owned_bits;
2536 		break;
2537 	default:
2538 		KVM_BUG_ON(1, vcpu->kvm);
2539 		break;
2540 	}
2541 }
2542 
2543 /*
2544  * There is no X86_FEATURE for SGX yet, but anyway we need to query CPUID
2545  * directly instead of going through cpu_has(), to ensure KVM is trapping
2546  * ENCLS whenever it's supported in hardware.  It does not matter whether
2547  * the host OS supports or has enabled SGX.
2548  */
cpu_has_sgx(void)2549 static bool cpu_has_sgx(void)
2550 {
2551 	return cpuid_eax(0) >= 0x12 && (cpuid_eax(0x12) & BIT(0));
2552 }
2553 
adjust_vmx_controls(u32 ctl_min,u32 ctl_opt,u32 msr,u32 * result)2554 static int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt, u32 msr, u32 *result)
2555 {
2556 	u32 vmx_msr_low, vmx_msr_high;
2557 	u32 ctl = ctl_min | ctl_opt;
2558 
2559 	rdmsr(msr, vmx_msr_low, vmx_msr_high);
2560 
2561 	ctl &= vmx_msr_high; /* bit == 0 in high word ==> must be zero */
2562 	ctl |= vmx_msr_low;  /* bit == 1 in low word  ==> must be one  */
2563 
2564 	/* Ensure minimum (required) set of control bits are supported. */
2565 	if (ctl_min & ~ctl)
2566 		return -EIO;
2567 
2568 	*result = ctl;
2569 	return 0;
2570 }
2571 
adjust_vmx_controls64(u64 ctl_opt,u32 msr)2572 static u64 adjust_vmx_controls64(u64 ctl_opt, u32 msr)
2573 {
2574 	u64 allowed;
2575 
2576 	rdmsrl(msr, allowed);
2577 
2578 	return  ctl_opt & allowed;
2579 }
2580 
setup_vmcs_config(struct vmcs_config * vmcs_conf,struct vmx_capability * vmx_cap)2581 static int setup_vmcs_config(struct vmcs_config *vmcs_conf,
2582 			     struct vmx_capability *vmx_cap)
2583 {
2584 	u32 _pin_based_exec_control = 0;
2585 	u32 _cpu_based_exec_control = 0;
2586 	u32 _cpu_based_2nd_exec_control = 0;
2587 	u64 _cpu_based_3rd_exec_control = 0;
2588 	u32 _vmexit_control = 0;
2589 	u32 _vmentry_control = 0;
2590 	u64 basic_msr;
2591 	u64 misc_msr;
2592 	int i;
2593 
2594 	/*
2595 	 * LOAD/SAVE_DEBUG_CONTROLS are absent because both are mandatory.
2596 	 * SAVE_IA32_PAT and SAVE_IA32_EFER are absent because KVM always
2597 	 * intercepts writes to PAT and EFER, i.e. never enables those controls.
2598 	 */
2599 	struct {
2600 		u32 entry_control;
2601 		u32 exit_control;
2602 	} const vmcs_entry_exit_pairs[] = {
2603 		{ VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,	VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL },
2604 		{ VM_ENTRY_LOAD_IA32_PAT,		VM_EXIT_LOAD_IA32_PAT },
2605 		{ VM_ENTRY_LOAD_IA32_EFER,		VM_EXIT_LOAD_IA32_EFER },
2606 		{ VM_ENTRY_LOAD_BNDCFGS,		VM_EXIT_CLEAR_BNDCFGS },
2607 		{ VM_ENTRY_LOAD_IA32_RTIT_CTL,		VM_EXIT_CLEAR_IA32_RTIT_CTL },
2608 	};
2609 
2610 	memset(vmcs_conf, 0, sizeof(*vmcs_conf));
2611 
2612 	if (adjust_vmx_controls(KVM_REQUIRED_VMX_CPU_BASED_VM_EXEC_CONTROL,
2613 				KVM_OPTIONAL_VMX_CPU_BASED_VM_EXEC_CONTROL,
2614 				MSR_IA32_VMX_PROCBASED_CTLS,
2615 				&_cpu_based_exec_control))
2616 		return -EIO;
2617 	if (_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) {
2618 		if (adjust_vmx_controls(KVM_REQUIRED_VMX_SECONDARY_VM_EXEC_CONTROL,
2619 					KVM_OPTIONAL_VMX_SECONDARY_VM_EXEC_CONTROL,
2620 					MSR_IA32_VMX_PROCBASED_CTLS2,
2621 					&_cpu_based_2nd_exec_control))
2622 			return -EIO;
2623 	}
2624 	if (!IS_ENABLED(CONFIG_KVM_INTEL_PROVE_VE))
2625 		_cpu_based_2nd_exec_control &= ~SECONDARY_EXEC_EPT_VIOLATION_VE;
2626 
2627 #ifndef CONFIG_X86_64
2628 	if (!(_cpu_based_2nd_exec_control &
2629 				SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
2630 		_cpu_based_exec_control &= ~CPU_BASED_TPR_SHADOW;
2631 #endif
2632 
2633 	if (!(_cpu_based_exec_control & CPU_BASED_TPR_SHADOW))
2634 		_cpu_based_2nd_exec_control &= ~(
2635 				SECONDARY_EXEC_APIC_REGISTER_VIRT |
2636 				SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
2637 				SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
2638 
2639 	rdmsr_safe(MSR_IA32_VMX_EPT_VPID_CAP,
2640 		&vmx_cap->ept, &vmx_cap->vpid);
2641 
2642 	if (!(_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_EPT) &&
2643 	    vmx_cap->ept) {
2644 		pr_warn_once("EPT CAP should not exist if not support "
2645 				"1-setting enable EPT VM-execution control\n");
2646 
2647 		if (error_on_inconsistent_vmcs_config)
2648 			return -EIO;
2649 
2650 		vmx_cap->ept = 0;
2651 		_cpu_based_2nd_exec_control &= ~SECONDARY_EXEC_EPT_VIOLATION_VE;
2652 	}
2653 	if (!(_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_VPID) &&
2654 	    vmx_cap->vpid) {
2655 		pr_warn_once("VPID CAP should not exist if not support "
2656 				"1-setting enable VPID VM-execution control\n");
2657 
2658 		if (error_on_inconsistent_vmcs_config)
2659 			return -EIO;
2660 
2661 		vmx_cap->vpid = 0;
2662 	}
2663 
2664 	if (!cpu_has_sgx())
2665 		_cpu_based_2nd_exec_control &= ~SECONDARY_EXEC_ENCLS_EXITING;
2666 
2667 	if (_cpu_based_exec_control & CPU_BASED_ACTIVATE_TERTIARY_CONTROLS)
2668 		_cpu_based_3rd_exec_control =
2669 			adjust_vmx_controls64(KVM_OPTIONAL_VMX_TERTIARY_VM_EXEC_CONTROL,
2670 					      MSR_IA32_VMX_PROCBASED_CTLS3);
2671 
2672 	if (adjust_vmx_controls(KVM_REQUIRED_VMX_VM_EXIT_CONTROLS,
2673 				KVM_OPTIONAL_VMX_VM_EXIT_CONTROLS,
2674 				MSR_IA32_VMX_EXIT_CTLS,
2675 				&_vmexit_control))
2676 		return -EIO;
2677 
2678 	if (adjust_vmx_controls(KVM_REQUIRED_VMX_PIN_BASED_VM_EXEC_CONTROL,
2679 				KVM_OPTIONAL_VMX_PIN_BASED_VM_EXEC_CONTROL,
2680 				MSR_IA32_VMX_PINBASED_CTLS,
2681 				&_pin_based_exec_control))
2682 		return -EIO;
2683 
2684 	if (cpu_has_broken_vmx_preemption_timer())
2685 		_pin_based_exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER;
2686 	if (!(_cpu_based_2nd_exec_control &
2687 		SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY))
2688 		_pin_based_exec_control &= ~PIN_BASED_POSTED_INTR;
2689 
2690 	if (adjust_vmx_controls(KVM_REQUIRED_VMX_VM_ENTRY_CONTROLS,
2691 				KVM_OPTIONAL_VMX_VM_ENTRY_CONTROLS,
2692 				MSR_IA32_VMX_ENTRY_CTLS,
2693 				&_vmentry_control))
2694 		return -EIO;
2695 
2696 	for (i = 0; i < ARRAY_SIZE(vmcs_entry_exit_pairs); i++) {
2697 		u32 n_ctrl = vmcs_entry_exit_pairs[i].entry_control;
2698 		u32 x_ctrl = vmcs_entry_exit_pairs[i].exit_control;
2699 
2700 		if (!(_vmentry_control & n_ctrl) == !(_vmexit_control & x_ctrl))
2701 			continue;
2702 
2703 		pr_warn_once("Inconsistent VM-Entry/VM-Exit pair, entry = %x, exit = %x\n",
2704 			     _vmentry_control & n_ctrl, _vmexit_control & x_ctrl);
2705 
2706 		if (error_on_inconsistent_vmcs_config)
2707 			return -EIO;
2708 
2709 		_vmentry_control &= ~n_ctrl;
2710 		_vmexit_control &= ~x_ctrl;
2711 	}
2712 
2713 	/*
2714 	 * Some cpus support VM_{ENTRY,EXIT}_IA32_PERF_GLOBAL_CTRL but they
2715 	 * can't be used due to an errata where VM Exit may incorrectly clear
2716 	 * IA32_PERF_GLOBAL_CTRL[34:32].  Workaround the errata by using the
2717 	 * MSR load mechanism to switch IA32_PERF_GLOBAL_CTRL.
2718 	 */
2719 	switch (boot_cpu_data.x86_vfm) {
2720 	case INTEL_NEHALEM_EP:	/* AAK155 */
2721 	case INTEL_NEHALEM:	/* AAP115 */
2722 	case INTEL_WESTMERE:	/* AAT100 */
2723 	case INTEL_WESTMERE_EP:	/* BC86,AAY89,BD102 */
2724 	case INTEL_NEHALEM_EX:	/* BA97 */
2725 		_vmentry_control &= ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
2726 		_vmexit_control &= ~VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL;
2727 		pr_warn_once("VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL "
2728 			     "does not work properly. Using workaround\n");
2729 		break;
2730 	default:
2731 		break;
2732 	}
2733 
2734 	rdmsrl(MSR_IA32_VMX_BASIC, basic_msr);
2735 
2736 	/* IA-32 SDM Vol 3B: VMCS size is never greater than 4kB. */
2737 	if (vmx_basic_vmcs_size(basic_msr) > PAGE_SIZE)
2738 		return -EIO;
2739 
2740 #ifdef CONFIG_X86_64
2741 	/*
2742 	 * KVM expects to be able to shove all legal physical addresses into
2743 	 * VMCS fields for 64-bit kernels, and per the SDM, "This bit is always
2744 	 * 0 for processors that support Intel 64 architecture".
2745 	 */
2746 	if (basic_msr & VMX_BASIC_32BIT_PHYS_ADDR_ONLY)
2747 		return -EIO;
2748 #endif
2749 
2750 	/* Require Write-Back (WB) memory type for VMCS accesses. */
2751 	if (vmx_basic_vmcs_mem_type(basic_msr) != X86_MEMTYPE_WB)
2752 		return -EIO;
2753 
2754 	rdmsrl(MSR_IA32_VMX_MISC, misc_msr);
2755 
2756 	vmcs_conf->basic = basic_msr;
2757 	vmcs_conf->pin_based_exec_ctrl = _pin_based_exec_control;
2758 	vmcs_conf->cpu_based_exec_ctrl = _cpu_based_exec_control;
2759 	vmcs_conf->cpu_based_2nd_exec_ctrl = _cpu_based_2nd_exec_control;
2760 	vmcs_conf->cpu_based_3rd_exec_ctrl = _cpu_based_3rd_exec_control;
2761 	vmcs_conf->vmexit_ctrl         = _vmexit_control;
2762 	vmcs_conf->vmentry_ctrl        = _vmentry_control;
2763 	vmcs_conf->misc	= misc_msr;
2764 
2765 #if IS_ENABLED(CONFIG_HYPERV)
2766 	if (enlightened_vmcs)
2767 		evmcs_sanitize_exec_ctrls(vmcs_conf);
2768 #endif
2769 
2770 	return 0;
2771 }
2772 
__kvm_is_vmx_supported(void)2773 static bool __kvm_is_vmx_supported(void)
2774 {
2775 	int cpu = smp_processor_id();
2776 
2777 	if (!(cpuid_ecx(1) & feature_bit(VMX))) {
2778 		pr_err("VMX not supported by CPU %d\n", cpu);
2779 		return false;
2780 	}
2781 
2782 	if (!this_cpu_has(X86_FEATURE_MSR_IA32_FEAT_CTL) ||
2783 	    !this_cpu_has(X86_FEATURE_VMX)) {
2784 		pr_err("VMX not enabled (by BIOS) in MSR_IA32_FEAT_CTL on CPU %d\n", cpu);
2785 		return false;
2786 	}
2787 
2788 	return true;
2789 }
2790 
kvm_is_vmx_supported(void)2791 static bool kvm_is_vmx_supported(void)
2792 {
2793 	bool supported;
2794 
2795 	migrate_disable();
2796 	supported = __kvm_is_vmx_supported();
2797 	migrate_enable();
2798 
2799 	return supported;
2800 }
2801 
vmx_check_processor_compat(void)2802 int vmx_check_processor_compat(void)
2803 {
2804 	int cpu = raw_smp_processor_id();
2805 	struct vmcs_config vmcs_conf;
2806 	struct vmx_capability vmx_cap;
2807 
2808 	if (!__kvm_is_vmx_supported())
2809 		return -EIO;
2810 
2811 	if (setup_vmcs_config(&vmcs_conf, &vmx_cap) < 0) {
2812 		pr_err("Failed to setup VMCS config on CPU %d\n", cpu);
2813 		return -EIO;
2814 	}
2815 	if (nested)
2816 		nested_vmx_setup_ctls_msrs(&vmcs_conf, vmx_cap.ept);
2817 	if (memcmp(&vmcs_config, &vmcs_conf, sizeof(struct vmcs_config))) {
2818 		pr_err("Inconsistent VMCS config on CPU %d\n", cpu);
2819 		return -EIO;
2820 	}
2821 	return 0;
2822 }
2823 
kvm_cpu_vmxon(u64 vmxon_pointer)2824 static int kvm_cpu_vmxon(u64 vmxon_pointer)
2825 {
2826 	u64 msr;
2827 
2828 	cr4_set_bits(X86_CR4_VMXE);
2829 
2830 	asm goto("1: vmxon %[vmxon_pointer]\n\t"
2831 			  _ASM_EXTABLE(1b, %l[fault])
2832 			  : : [vmxon_pointer] "m"(vmxon_pointer)
2833 			  : : fault);
2834 	return 0;
2835 
2836 fault:
2837 	WARN_ONCE(1, "VMXON faulted, MSR_IA32_FEAT_CTL (0x3a) = 0x%llx\n",
2838 		  rdmsrl_safe(MSR_IA32_FEAT_CTL, &msr) ? 0xdeadbeef : msr);
2839 	cr4_clear_bits(X86_CR4_VMXE);
2840 
2841 	return -EFAULT;
2842 }
2843 
vmx_enable_virtualization_cpu(void)2844 int vmx_enable_virtualization_cpu(void)
2845 {
2846 	int cpu = raw_smp_processor_id();
2847 	u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
2848 	int r;
2849 
2850 	if (cr4_read_shadow() & X86_CR4_VMXE)
2851 		return -EBUSY;
2852 
2853 	/*
2854 	 * This can happen if we hot-added a CPU but failed to allocate
2855 	 * VP assist page for it.
2856 	 */
2857 	if (kvm_is_using_evmcs() && !hv_get_vp_assist_page(cpu))
2858 		return -EFAULT;
2859 
2860 	intel_pt_handle_vmx(1);
2861 
2862 	r = kvm_cpu_vmxon(phys_addr);
2863 	if (r) {
2864 		intel_pt_handle_vmx(0);
2865 		return r;
2866 	}
2867 
2868 	return 0;
2869 }
2870 
vmclear_local_loaded_vmcss(void)2871 static void vmclear_local_loaded_vmcss(void)
2872 {
2873 	int cpu = raw_smp_processor_id();
2874 	struct loaded_vmcs *v, *n;
2875 
2876 	list_for_each_entry_safe(v, n, &per_cpu(loaded_vmcss_on_cpu, cpu),
2877 				 loaded_vmcss_on_cpu_link)
2878 		__loaded_vmcs_clear(v);
2879 }
2880 
vmx_disable_virtualization_cpu(void)2881 void vmx_disable_virtualization_cpu(void)
2882 {
2883 	vmclear_local_loaded_vmcss();
2884 
2885 	if (kvm_cpu_vmxoff())
2886 		kvm_spurious_fault();
2887 
2888 	hv_reset_evmcs();
2889 
2890 	intel_pt_handle_vmx(0);
2891 }
2892 
alloc_vmcs_cpu(bool shadow,int cpu,gfp_t flags)2893 struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu, gfp_t flags)
2894 {
2895 	int node = cpu_to_node(cpu);
2896 	struct page *pages;
2897 	struct vmcs *vmcs;
2898 
2899 	pages = __alloc_pages_node(node, flags, 0);
2900 	if (!pages)
2901 		return NULL;
2902 	vmcs = page_address(pages);
2903 	memset(vmcs, 0, vmx_basic_vmcs_size(vmcs_config.basic));
2904 
2905 	/* KVM supports Enlightened VMCS v1 only */
2906 	if (kvm_is_using_evmcs())
2907 		vmcs->hdr.revision_id = KVM_EVMCS_VERSION;
2908 	else
2909 		vmcs->hdr.revision_id = vmx_basic_vmcs_revision_id(vmcs_config.basic);
2910 
2911 	if (shadow)
2912 		vmcs->hdr.shadow_vmcs = 1;
2913 	return vmcs;
2914 }
2915 
free_vmcs(struct vmcs * vmcs)2916 void free_vmcs(struct vmcs *vmcs)
2917 {
2918 	free_page((unsigned long)vmcs);
2919 }
2920 
2921 /*
2922  * Free a VMCS, but before that VMCLEAR it on the CPU where it was last loaded
2923  */
free_loaded_vmcs(struct loaded_vmcs * loaded_vmcs)2924 void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
2925 {
2926 	if (!loaded_vmcs->vmcs)
2927 		return;
2928 	loaded_vmcs_clear(loaded_vmcs);
2929 	free_vmcs(loaded_vmcs->vmcs);
2930 	loaded_vmcs->vmcs = NULL;
2931 	if (loaded_vmcs->msr_bitmap)
2932 		free_page((unsigned long)loaded_vmcs->msr_bitmap);
2933 	WARN_ON(loaded_vmcs->shadow_vmcs != NULL);
2934 }
2935 
alloc_loaded_vmcs(struct loaded_vmcs * loaded_vmcs)2936 int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
2937 {
2938 	loaded_vmcs->vmcs = alloc_vmcs(false);
2939 	if (!loaded_vmcs->vmcs)
2940 		return -ENOMEM;
2941 
2942 	vmcs_clear(loaded_vmcs->vmcs);
2943 
2944 	loaded_vmcs->shadow_vmcs = NULL;
2945 	loaded_vmcs->hv_timer_soft_disabled = false;
2946 	loaded_vmcs->cpu = -1;
2947 	loaded_vmcs->launched = 0;
2948 
2949 	if (cpu_has_vmx_msr_bitmap()) {
2950 		loaded_vmcs->msr_bitmap = (unsigned long *)
2951 				__get_free_page(GFP_KERNEL_ACCOUNT);
2952 		if (!loaded_vmcs->msr_bitmap)
2953 			goto out_vmcs;
2954 		memset(loaded_vmcs->msr_bitmap, 0xff, PAGE_SIZE);
2955 	}
2956 
2957 	memset(&loaded_vmcs->host_state, 0, sizeof(struct vmcs_host_state));
2958 	memset(&loaded_vmcs->controls_shadow, 0,
2959 		sizeof(struct vmcs_controls_shadow));
2960 
2961 	return 0;
2962 
2963 out_vmcs:
2964 	free_loaded_vmcs(loaded_vmcs);
2965 	return -ENOMEM;
2966 }
2967 
free_kvm_area(void)2968 static void free_kvm_area(void)
2969 {
2970 	int cpu;
2971 
2972 	for_each_possible_cpu(cpu) {
2973 		free_vmcs(per_cpu(vmxarea, cpu));
2974 		per_cpu(vmxarea, cpu) = NULL;
2975 	}
2976 }
2977 
alloc_kvm_area(void)2978 static __init int alloc_kvm_area(void)
2979 {
2980 	int cpu;
2981 
2982 	for_each_possible_cpu(cpu) {
2983 		struct vmcs *vmcs;
2984 
2985 		vmcs = alloc_vmcs_cpu(false, cpu, GFP_KERNEL);
2986 		if (!vmcs) {
2987 			free_kvm_area();
2988 			return -ENOMEM;
2989 		}
2990 
2991 		/*
2992 		 * When eVMCS is enabled, alloc_vmcs_cpu() sets
2993 		 * vmcs->revision_id to KVM_EVMCS_VERSION instead of
2994 		 * revision_id reported by MSR_IA32_VMX_BASIC.
2995 		 *
2996 		 * However, even though not explicitly documented by
2997 		 * TLFS, VMXArea passed as VMXON argument should
2998 		 * still be marked with revision_id reported by
2999 		 * physical CPU.
3000 		 */
3001 		if (kvm_is_using_evmcs())
3002 			vmcs->hdr.revision_id = vmx_basic_vmcs_revision_id(vmcs_config.basic);
3003 
3004 		per_cpu(vmxarea, cpu) = vmcs;
3005 	}
3006 	return 0;
3007 }
3008 
fix_pmode_seg(struct kvm_vcpu * vcpu,int seg,struct kvm_segment * save)3009 static void fix_pmode_seg(struct kvm_vcpu *vcpu, int seg,
3010 		struct kvm_segment *save)
3011 {
3012 	if (!emulate_invalid_guest_state) {
3013 		/*
3014 		 * CS and SS RPL should be equal during guest entry according
3015 		 * to VMX spec, but in reality it is not always so. Since vcpu
3016 		 * is in the middle of the transition from real mode to
3017 		 * protected mode it is safe to assume that RPL 0 is a good
3018 		 * default value.
3019 		 */
3020 		if (seg == VCPU_SREG_CS || seg == VCPU_SREG_SS)
3021 			save->selector &= ~SEGMENT_RPL_MASK;
3022 		save->dpl = save->selector & SEGMENT_RPL_MASK;
3023 		save->s = 1;
3024 	}
3025 	__vmx_set_segment(vcpu, save, seg);
3026 }
3027 
enter_pmode(struct kvm_vcpu * vcpu)3028 static void enter_pmode(struct kvm_vcpu *vcpu)
3029 {
3030 	unsigned long flags;
3031 	struct vcpu_vmx *vmx = to_vmx(vcpu);
3032 
3033 	/*
3034 	 * Update real mode segment cache. It may be not up-to-date if segment
3035 	 * register was written while vcpu was in a guest mode.
3036 	 */
3037 	vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES);
3038 	vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS);
3039 	vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS);
3040 	vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS);
3041 	vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS);
3042 	vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS);
3043 
3044 	vmx->rmode.vm86_active = 0;
3045 
3046 	__vmx_set_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR);
3047 
3048 	flags = vmcs_readl(GUEST_RFLAGS);
3049 	flags &= RMODE_GUEST_OWNED_EFLAGS_BITS;
3050 	flags |= vmx->rmode.save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
3051 	vmcs_writel(GUEST_RFLAGS, flags);
3052 
3053 	vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) |
3054 			(vmcs_readl(CR4_READ_SHADOW) & X86_CR4_VME));
3055 
3056 	vmx_update_exception_bitmap(vcpu);
3057 
3058 	fix_pmode_seg(vcpu, VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]);
3059 	fix_pmode_seg(vcpu, VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]);
3060 	fix_pmode_seg(vcpu, VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]);
3061 	fix_pmode_seg(vcpu, VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]);
3062 	fix_pmode_seg(vcpu, VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]);
3063 	fix_pmode_seg(vcpu, VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]);
3064 }
3065 
fix_rmode_seg(int seg,struct kvm_segment * save)3066 static void fix_rmode_seg(int seg, struct kvm_segment *save)
3067 {
3068 	const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
3069 	struct kvm_segment var = *save;
3070 
3071 	var.dpl = 0x3;
3072 	if (seg == VCPU_SREG_CS)
3073 		var.type = 0x3;
3074 
3075 	if (!emulate_invalid_guest_state) {
3076 		var.selector = var.base >> 4;
3077 		var.base = var.base & 0xffff0;
3078 		var.limit = 0xffff;
3079 		var.g = 0;
3080 		var.db = 0;
3081 		var.present = 1;
3082 		var.s = 1;
3083 		var.l = 0;
3084 		var.unusable = 0;
3085 		var.type = 0x3;
3086 		var.avl = 0;
3087 		if (save->base & 0xf)
3088 			pr_warn_once("segment base is not paragraph aligned "
3089 				     "when entering protected mode (seg=%d)", seg);
3090 	}
3091 
3092 	vmcs_write16(sf->selector, var.selector);
3093 	vmcs_writel(sf->base, var.base);
3094 	vmcs_write32(sf->limit, var.limit);
3095 	vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(&var));
3096 }
3097 
enter_rmode(struct kvm_vcpu * vcpu)3098 static void enter_rmode(struct kvm_vcpu *vcpu)
3099 {
3100 	unsigned long flags;
3101 	struct vcpu_vmx *vmx = to_vmx(vcpu);
3102 	struct kvm_vmx *kvm_vmx = to_kvm_vmx(vcpu->kvm);
3103 
3104 	/*
3105 	 * KVM should never use VM86 to virtualize Real Mode when L2 is active,
3106 	 * as using VM86 is unnecessary if unrestricted guest is enabled, and
3107 	 * if unrestricted guest is disabled, VM-Enter (from L1) with CR0.PG=0
3108 	 * should VM-Fail and KVM should reject userspace attempts to stuff
3109 	 * CR0.PG=0 when L2 is active.
3110 	 */
3111 	WARN_ON_ONCE(is_guest_mode(vcpu));
3112 
3113 	vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR);
3114 	vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES);
3115 	vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS);
3116 	vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS);
3117 	vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS);
3118 	vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS);
3119 	vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS);
3120 
3121 	vmx->rmode.vm86_active = 1;
3122 
3123 	vmx_segment_cache_clear(vmx);
3124 
3125 	vmcs_writel(GUEST_TR_BASE, kvm_vmx->tss_addr);
3126 	vmcs_write32(GUEST_TR_LIMIT, RMODE_TSS_SIZE - 1);
3127 	vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
3128 
3129 	flags = vmcs_readl(GUEST_RFLAGS);
3130 	vmx->rmode.save_rflags = flags;
3131 
3132 	flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
3133 
3134 	vmcs_writel(GUEST_RFLAGS, flags);
3135 	vmcs_writel(GUEST_CR4, vmcs_readl(GUEST_CR4) | X86_CR4_VME);
3136 	vmx_update_exception_bitmap(vcpu);
3137 
3138 	fix_rmode_seg(VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]);
3139 	fix_rmode_seg(VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]);
3140 	fix_rmode_seg(VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]);
3141 	fix_rmode_seg(VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]);
3142 	fix_rmode_seg(VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]);
3143 	fix_rmode_seg(VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]);
3144 }
3145 
vmx_set_efer(struct kvm_vcpu * vcpu,u64 efer)3146 int vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
3147 {
3148 	struct vcpu_vmx *vmx = to_vmx(vcpu);
3149 
3150 	/* Nothing to do if hardware doesn't support EFER. */
3151 	if (!vmx_find_uret_msr(vmx, MSR_EFER))
3152 		return 0;
3153 
3154 	vcpu->arch.efer = efer;
3155 #ifdef CONFIG_X86_64
3156 	if (efer & EFER_LMA)
3157 		vm_entry_controls_setbit(vmx, VM_ENTRY_IA32E_MODE);
3158 	else
3159 		vm_entry_controls_clearbit(vmx, VM_ENTRY_IA32E_MODE);
3160 #else
3161 	if (KVM_BUG_ON(efer & EFER_LMA, vcpu->kvm))
3162 		return 1;
3163 #endif
3164 
3165 	vmx_setup_uret_msrs(vmx);
3166 	return 0;
3167 }
3168 
3169 #ifdef CONFIG_X86_64
3170 
enter_lmode(struct kvm_vcpu * vcpu)3171 static void enter_lmode(struct kvm_vcpu *vcpu)
3172 {
3173 	u32 guest_tr_ar;
3174 
3175 	vmx_segment_cache_clear(to_vmx(vcpu));
3176 
3177 	guest_tr_ar = vmcs_read32(GUEST_TR_AR_BYTES);
3178 	if ((guest_tr_ar & VMX_AR_TYPE_MASK) != VMX_AR_TYPE_BUSY_64_TSS) {
3179 		pr_debug_ratelimited("%s: tss fixup for long mode. \n",
3180 				     __func__);
3181 		vmcs_write32(GUEST_TR_AR_BYTES,
3182 			     (guest_tr_ar & ~VMX_AR_TYPE_MASK)
3183 			     | VMX_AR_TYPE_BUSY_64_TSS);
3184 	}
3185 	vmx_set_efer(vcpu, vcpu->arch.efer | EFER_LMA);
3186 }
3187 
exit_lmode(struct kvm_vcpu * vcpu)3188 static void exit_lmode(struct kvm_vcpu *vcpu)
3189 {
3190 	vmx_set_efer(vcpu, vcpu->arch.efer & ~EFER_LMA);
3191 }
3192 
3193 #endif
3194 
vmx_flush_tlb_all(struct kvm_vcpu * vcpu)3195 void vmx_flush_tlb_all(struct kvm_vcpu *vcpu)
3196 {
3197 	struct vcpu_vmx *vmx = to_vmx(vcpu);
3198 
3199 	/*
3200 	 * INVEPT must be issued when EPT is enabled, irrespective of VPID, as
3201 	 * the CPU is not required to invalidate guest-physical mappings on
3202 	 * VM-Entry, even if VPID is disabled.  Guest-physical mappings are
3203 	 * associated with the root EPT structure and not any particular VPID
3204 	 * (INVVPID also isn't required to invalidate guest-physical mappings).
3205 	 */
3206 	if (enable_ept) {
3207 		ept_sync_global();
3208 	} else if (enable_vpid) {
3209 		if (cpu_has_vmx_invvpid_global()) {
3210 			vpid_sync_vcpu_global();
3211 		} else {
3212 			vpid_sync_vcpu_single(vmx->vpid);
3213 			vpid_sync_vcpu_single(vmx->nested.vpid02);
3214 		}
3215 	}
3216 }
3217 
vmx_get_current_vpid(struct kvm_vcpu * vcpu)3218 static inline int vmx_get_current_vpid(struct kvm_vcpu *vcpu)
3219 {
3220 	if (is_guest_mode(vcpu) && nested_cpu_has_vpid(get_vmcs12(vcpu)))
3221 		return nested_get_vpid02(vcpu);
3222 	return to_vmx(vcpu)->vpid;
3223 }
3224 
vmx_flush_tlb_current(struct kvm_vcpu * vcpu)3225 void vmx_flush_tlb_current(struct kvm_vcpu *vcpu)
3226 {
3227 	struct kvm_mmu *mmu = vcpu->arch.mmu;
3228 	u64 root_hpa = mmu->root.hpa;
3229 
3230 	/* No flush required if the current context is invalid. */
3231 	if (!VALID_PAGE(root_hpa))
3232 		return;
3233 
3234 	if (enable_ept)
3235 		ept_sync_context(construct_eptp(vcpu, root_hpa,
3236 						mmu->root_role.level));
3237 	else
3238 		vpid_sync_context(vmx_get_current_vpid(vcpu));
3239 }
3240 
vmx_flush_tlb_gva(struct kvm_vcpu * vcpu,gva_t addr)3241 void vmx_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t addr)
3242 {
3243 	/*
3244 	 * vpid_sync_vcpu_addr() is a nop if vpid==0, see the comment in
3245 	 * vmx_flush_tlb_guest() for an explanation of why this is ok.
3246 	 */
3247 	vpid_sync_vcpu_addr(vmx_get_current_vpid(vcpu), addr);
3248 }
3249 
vmx_flush_tlb_guest(struct kvm_vcpu * vcpu)3250 void vmx_flush_tlb_guest(struct kvm_vcpu *vcpu)
3251 {
3252 	/*
3253 	 * vpid_sync_context() is a nop if vpid==0, e.g. if enable_vpid==0 or a
3254 	 * vpid couldn't be allocated for this vCPU.  VM-Enter and VM-Exit are
3255 	 * required to flush GVA->{G,H}PA mappings from the TLB if vpid is
3256 	 * disabled (VM-Enter with vpid enabled and vpid==0 is disallowed),
3257 	 * i.e. no explicit INVVPID is necessary.
3258 	 */
3259 	vpid_sync_context(vmx_get_current_vpid(vcpu));
3260 }
3261 
vmx_ept_load_pdptrs(struct kvm_vcpu * vcpu)3262 void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu)
3263 {
3264 	struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
3265 
3266 	if (!kvm_register_is_dirty(vcpu, VCPU_EXREG_PDPTR))
3267 		return;
3268 
3269 	if (is_pae_paging(vcpu)) {
3270 		vmcs_write64(GUEST_PDPTR0, mmu->pdptrs[0]);
3271 		vmcs_write64(GUEST_PDPTR1, mmu->pdptrs[1]);
3272 		vmcs_write64(GUEST_PDPTR2, mmu->pdptrs[2]);
3273 		vmcs_write64(GUEST_PDPTR3, mmu->pdptrs[3]);
3274 	}
3275 }
3276 
ept_save_pdptrs(struct kvm_vcpu * vcpu)3277 void ept_save_pdptrs(struct kvm_vcpu *vcpu)
3278 {
3279 	struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
3280 
3281 	if (WARN_ON_ONCE(!is_pae_paging(vcpu)))
3282 		return;
3283 
3284 	mmu->pdptrs[0] = vmcs_read64(GUEST_PDPTR0);
3285 	mmu->pdptrs[1] = vmcs_read64(GUEST_PDPTR1);
3286 	mmu->pdptrs[2] = vmcs_read64(GUEST_PDPTR2);
3287 	mmu->pdptrs[3] = vmcs_read64(GUEST_PDPTR3);
3288 
3289 	kvm_register_mark_available(vcpu, VCPU_EXREG_PDPTR);
3290 }
3291 
3292 #define CR3_EXITING_BITS (CPU_BASED_CR3_LOAD_EXITING | \
3293 			  CPU_BASED_CR3_STORE_EXITING)
3294 
vmx_is_valid_cr0(struct kvm_vcpu * vcpu,unsigned long cr0)3295 bool vmx_is_valid_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
3296 {
3297 	if (is_guest_mode(vcpu))
3298 		return nested_guest_cr0_valid(vcpu, cr0);
3299 
3300 	if (to_vmx(vcpu)->nested.vmxon)
3301 		return nested_host_cr0_valid(vcpu, cr0);
3302 
3303 	return true;
3304 }
3305 
vmx_set_cr0(struct kvm_vcpu * vcpu,unsigned long cr0)3306 void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
3307 {
3308 	struct vcpu_vmx *vmx = to_vmx(vcpu);
3309 	unsigned long hw_cr0, old_cr0_pg;
3310 	u32 tmp;
3311 
3312 	old_cr0_pg = kvm_read_cr0_bits(vcpu, X86_CR0_PG);
3313 
3314 	hw_cr0 = (cr0 & ~KVM_VM_CR0_ALWAYS_OFF);
3315 	if (enable_unrestricted_guest)
3316 		hw_cr0 |= KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST;
3317 	else {
3318 		hw_cr0 |= KVM_VM_CR0_ALWAYS_ON;
3319 		if (!enable_ept)
3320 			hw_cr0 |= X86_CR0_WP;
3321 
3322 		if (vmx->rmode.vm86_active && (cr0 & X86_CR0_PE))
3323 			enter_pmode(vcpu);
3324 
3325 		if (!vmx->rmode.vm86_active && !(cr0 & X86_CR0_PE))
3326 			enter_rmode(vcpu);
3327 	}
3328 
3329 	vmcs_writel(CR0_READ_SHADOW, cr0);
3330 	vmcs_writel(GUEST_CR0, hw_cr0);
3331 	vcpu->arch.cr0 = cr0;
3332 	kvm_register_mark_available(vcpu, VCPU_EXREG_CR0);
3333 
3334 #ifdef CONFIG_X86_64
3335 	if (vcpu->arch.efer & EFER_LME) {
3336 		if (!old_cr0_pg && (cr0 & X86_CR0_PG))
3337 			enter_lmode(vcpu);
3338 		else if (old_cr0_pg && !(cr0 & X86_CR0_PG))
3339 			exit_lmode(vcpu);
3340 	}
3341 #endif
3342 
3343 	if (enable_ept && !enable_unrestricted_guest) {
3344 		/*
3345 		 * Ensure KVM has an up-to-date snapshot of the guest's CR3.  If
3346 		 * the below code _enables_ CR3 exiting, vmx_cache_reg() will
3347 		 * (correctly) stop reading vmcs.GUEST_CR3 because it thinks
3348 		 * KVM's CR3 is installed.
3349 		 */
3350 		if (!kvm_register_is_available(vcpu, VCPU_EXREG_CR3))
3351 			vmx_cache_reg(vcpu, VCPU_EXREG_CR3);
3352 
3353 		/*
3354 		 * When running with EPT but not unrestricted guest, KVM must
3355 		 * intercept CR3 accesses when paging is _disabled_.  This is
3356 		 * necessary because restricted guests can't actually run with
3357 		 * paging disabled, and so KVM stuffs its own CR3 in order to
3358 		 * run the guest when identity mapped page tables.
3359 		 *
3360 		 * Do _NOT_ check the old CR0.PG, e.g. to optimize away the
3361 		 * update, it may be stale with respect to CR3 interception,
3362 		 * e.g. after nested VM-Enter.
3363 		 *
3364 		 * Lastly, honor L1's desires, i.e. intercept CR3 loads and/or
3365 		 * stores to forward them to L1, even if KVM does not need to
3366 		 * intercept them to preserve its identity mapped page tables.
3367 		 */
3368 		if (!(cr0 & X86_CR0_PG)) {
3369 			exec_controls_setbit(vmx, CR3_EXITING_BITS);
3370 		} else if (!is_guest_mode(vcpu)) {
3371 			exec_controls_clearbit(vmx, CR3_EXITING_BITS);
3372 		} else {
3373 			tmp = exec_controls_get(vmx);
3374 			tmp &= ~CR3_EXITING_BITS;
3375 			tmp |= get_vmcs12(vcpu)->cpu_based_vm_exec_control & CR3_EXITING_BITS;
3376 			exec_controls_set(vmx, tmp);
3377 		}
3378 
3379 		/* Note, vmx_set_cr4() consumes the new vcpu->arch.cr0. */
3380 		if ((old_cr0_pg ^ cr0) & X86_CR0_PG)
3381 			vmx_set_cr4(vcpu, kvm_read_cr4(vcpu));
3382 
3383 		/*
3384 		 * When !CR0_PG -> CR0_PG, vcpu->arch.cr3 becomes active, but
3385 		 * GUEST_CR3 is still vmx->ept_identity_map_addr if EPT + !URG.
3386 		 */
3387 		if (!(old_cr0_pg & X86_CR0_PG) && (cr0 & X86_CR0_PG))
3388 			kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3);
3389 	}
3390 
3391 	/* depends on vcpu->arch.cr0 to be set to a new value */
3392 	vmx->emulation_required = vmx_emulation_required(vcpu);
3393 }
3394 
vmx_get_max_ept_level(void)3395 static int vmx_get_max_ept_level(void)
3396 {
3397 	if (cpu_has_vmx_ept_5levels())
3398 		return 5;
3399 	return 4;
3400 }
3401 
construct_eptp(struct kvm_vcpu * vcpu,hpa_t root_hpa,int root_level)3402 u64 construct_eptp(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level)
3403 {
3404 	u64 eptp = VMX_EPTP_MT_WB;
3405 
3406 	eptp |= (root_level == 5) ? VMX_EPTP_PWL_5 : VMX_EPTP_PWL_4;
3407 
3408 	if (enable_ept_ad_bits &&
3409 	    (!is_guest_mode(vcpu) || nested_ept_ad_enabled(vcpu)))
3410 		eptp |= VMX_EPTP_AD_ENABLE_BIT;
3411 	eptp |= root_hpa;
3412 
3413 	return eptp;
3414 }
3415 
vmx_load_mmu_pgd(struct kvm_vcpu * vcpu,hpa_t root_hpa,int root_level)3416 void vmx_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level)
3417 {
3418 	struct kvm *kvm = vcpu->kvm;
3419 	bool update_guest_cr3 = true;
3420 	unsigned long guest_cr3;
3421 	u64 eptp;
3422 
3423 	if (enable_ept) {
3424 		eptp = construct_eptp(vcpu, root_hpa, root_level);
3425 		vmcs_write64(EPT_POINTER, eptp);
3426 
3427 		hv_track_root_tdp(vcpu, root_hpa);
3428 
3429 		if (!enable_unrestricted_guest && !is_paging(vcpu))
3430 			guest_cr3 = to_kvm_vmx(kvm)->ept_identity_map_addr;
3431 		else if (kvm_register_is_dirty(vcpu, VCPU_EXREG_CR3))
3432 			guest_cr3 = vcpu->arch.cr3;
3433 		else /* vmcs.GUEST_CR3 is already up-to-date. */
3434 			update_guest_cr3 = false;
3435 		vmx_ept_load_pdptrs(vcpu);
3436 	} else {
3437 		guest_cr3 = root_hpa | kvm_get_active_pcid(vcpu) |
3438 			    kvm_get_active_cr3_lam_bits(vcpu);
3439 	}
3440 
3441 	if (update_guest_cr3)
3442 		vmcs_writel(GUEST_CR3, guest_cr3);
3443 }
3444 
vmx_is_valid_cr4(struct kvm_vcpu * vcpu,unsigned long cr4)3445 bool vmx_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
3446 {
3447 	/*
3448 	 * We operate under the default treatment of SMM, so VMX cannot be
3449 	 * enabled under SMM.  Note, whether or not VMXE is allowed at all,
3450 	 * i.e. is a reserved bit, is handled by common x86 code.
3451 	 */
3452 	if ((cr4 & X86_CR4_VMXE) && is_smm(vcpu))
3453 		return false;
3454 
3455 	if (to_vmx(vcpu)->nested.vmxon && !nested_cr4_valid(vcpu, cr4))
3456 		return false;
3457 
3458 	return true;
3459 }
3460 
vmx_set_cr4(struct kvm_vcpu * vcpu,unsigned long cr4)3461 void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
3462 {
3463 	unsigned long old_cr4 = kvm_read_cr4(vcpu);
3464 	struct vcpu_vmx *vmx = to_vmx(vcpu);
3465 	unsigned long hw_cr4;
3466 
3467 	/*
3468 	 * Pass through host's Machine Check Enable value to hw_cr4, which
3469 	 * is in force while we are in guest mode.  Do not let guests control
3470 	 * this bit, even if host CR4.MCE == 0.
3471 	 */
3472 	hw_cr4 = (cr4_read_shadow() & X86_CR4_MCE) | (cr4 & ~X86_CR4_MCE);
3473 	if (enable_unrestricted_guest)
3474 		hw_cr4 |= KVM_VM_CR4_ALWAYS_ON_UNRESTRICTED_GUEST;
3475 	else if (vmx->rmode.vm86_active)
3476 		hw_cr4 |= KVM_RMODE_VM_CR4_ALWAYS_ON;
3477 	else
3478 		hw_cr4 |= KVM_PMODE_VM_CR4_ALWAYS_ON;
3479 
3480 	if (vmx_umip_emulated()) {
3481 		if (cr4 & X86_CR4_UMIP) {
3482 			secondary_exec_controls_setbit(vmx, SECONDARY_EXEC_DESC);
3483 			hw_cr4 &= ~X86_CR4_UMIP;
3484 		} else if (!is_guest_mode(vcpu) ||
3485 			!nested_cpu_has2(get_vmcs12(vcpu), SECONDARY_EXEC_DESC)) {
3486 			secondary_exec_controls_clearbit(vmx, SECONDARY_EXEC_DESC);
3487 		}
3488 	}
3489 
3490 	vcpu->arch.cr4 = cr4;
3491 	kvm_register_mark_available(vcpu, VCPU_EXREG_CR4);
3492 
3493 	if (!enable_unrestricted_guest) {
3494 		if (enable_ept) {
3495 			if (!is_paging(vcpu)) {
3496 				hw_cr4 &= ~X86_CR4_PAE;
3497 				hw_cr4 |= X86_CR4_PSE;
3498 			} else if (!(cr4 & X86_CR4_PAE)) {
3499 				hw_cr4 &= ~X86_CR4_PAE;
3500 			}
3501 		}
3502 
3503 		/*
3504 		 * SMEP/SMAP/PKU is disabled if CPU is in non-paging mode in
3505 		 * hardware.  To emulate this behavior, SMEP/SMAP/PKU needs
3506 		 * to be manually disabled when guest switches to non-paging
3507 		 * mode.
3508 		 *
3509 		 * If !enable_unrestricted_guest, the CPU is always running
3510 		 * with CR0.PG=1 and CR4 needs to be modified.
3511 		 * If enable_unrestricted_guest, the CPU automatically
3512 		 * disables SMEP/SMAP/PKU when the guest sets CR0.PG=0.
3513 		 */
3514 		if (!is_paging(vcpu))
3515 			hw_cr4 &= ~(X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE);
3516 	}
3517 
3518 	vmcs_writel(CR4_READ_SHADOW, cr4);
3519 	vmcs_writel(GUEST_CR4, hw_cr4);
3520 
3521 	if ((cr4 ^ old_cr4) & (X86_CR4_OSXSAVE | X86_CR4_PKE))
3522 		kvm_update_cpuid_runtime(vcpu);
3523 }
3524 
vmx_get_segment(struct kvm_vcpu * vcpu,struct kvm_segment * var,int seg)3525 void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg)
3526 {
3527 	struct vcpu_vmx *vmx = to_vmx(vcpu);
3528 	u32 ar;
3529 
3530 	if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) {
3531 		*var = vmx->rmode.segs[seg];
3532 		if (seg == VCPU_SREG_TR
3533 		    || var->selector == vmx_read_guest_seg_selector(vmx, seg))
3534 			return;
3535 		var->base = vmx_read_guest_seg_base(vmx, seg);
3536 		var->selector = vmx_read_guest_seg_selector(vmx, seg);
3537 		return;
3538 	}
3539 	var->base = vmx_read_guest_seg_base(vmx, seg);
3540 	var->limit = vmx_read_guest_seg_limit(vmx, seg);
3541 	var->selector = vmx_read_guest_seg_selector(vmx, seg);
3542 	ar = vmx_read_guest_seg_ar(vmx, seg);
3543 	var->unusable = (ar >> 16) & 1;
3544 	var->type = ar & 15;
3545 	var->s = (ar >> 4) & 1;
3546 	var->dpl = (ar >> 5) & 3;
3547 	/*
3548 	 * Some userspaces do not preserve unusable property. Since usable
3549 	 * segment has to be present according to VMX spec we can use present
3550 	 * property to amend userspace bug by making unusable segment always
3551 	 * nonpresent. vmx_segment_access_rights() already marks nonpresent
3552 	 * segment as unusable.
3553 	 */
3554 	var->present = !var->unusable;
3555 	var->avl = (ar >> 12) & 1;
3556 	var->l = (ar >> 13) & 1;
3557 	var->db = (ar >> 14) & 1;
3558 	var->g = (ar >> 15) & 1;
3559 }
3560 
vmx_get_segment_base(struct kvm_vcpu * vcpu,int seg)3561 u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg)
3562 {
3563 	struct kvm_segment s;
3564 
3565 	if (to_vmx(vcpu)->rmode.vm86_active) {
3566 		vmx_get_segment(vcpu, &s, seg);
3567 		return s.base;
3568 	}
3569 	return vmx_read_guest_seg_base(to_vmx(vcpu), seg);
3570 }
3571 
__vmx_get_cpl(struct kvm_vcpu * vcpu,bool no_cache)3572 static int __vmx_get_cpl(struct kvm_vcpu *vcpu, bool no_cache)
3573 {
3574 	struct vcpu_vmx *vmx = to_vmx(vcpu);
3575 	int ar;
3576 
3577 	if (unlikely(vmx->rmode.vm86_active))
3578 		return 0;
3579 
3580 	if (no_cache)
3581 		ar = vmcs_read32(GUEST_SS_AR_BYTES);
3582 	else
3583 		ar = vmx_read_guest_seg_ar(vmx, VCPU_SREG_SS);
3584 	return VMX_AR_DPL(ar);
3585 }
3586 
vmx_get_cpl(struct kvm_vcpu * vcpu)3587 int vmx_get_cpl(struct kvm_vcpu *vcpu)
3588 {
3589 	return __vmx_get_cpl(vcpu, false);
3590 }
3591 
vmx_get_cpl_no_cache(struct kvm_vcpu * vcpu)3592 int vmx_get_cpl_no_cache(struct kvm_vcpu *vcpu)
3593 {
3594 	return __vmx_get_cpl(vcpu, true);
3595 }
3596 
vmx_segment_access_rights(struct kvm_segment * var)3597 static u32 vmx_segment_access_rights(struct kvm_segment *var)
3598 {
3599 	u32 ar;
3600 
3601 	ar = var->type & 15;
3602 	ar |= (var->s & 1) << 4;
3603 	ar |= (var->dpl & 3) << 5;
3604 	ar |= (var->present & 1) << 7;
3605 	ar |= (var->avl & 1) << 12;
3606 	ar |= (var->l & 1) << 13;
3607 	ar |= (var->db & 1) << 14;
3608 	ar |= (var->g & 1) << 15;
3609 	ar |= (var->unusable || !var->present) << 16;
3610 
3611 	return ar;
3612 }
3613 
__vmx_set_segment(struct kvm_vcpu * vcpu,struct kvm_segment * var,int seg)3614 void __vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg)
3615 {
3616 	struct vcpu_vmx *vmx = to_vmx(vcpu);
3617 	const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
3618 
3619 	vmx_segment_cache_clear(vmx);
3620 
3621 	if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) {
3622 		vmx->rmode.segs[seg] = *var;
3623 		if (seg == VCPU_SREG_TR)
3624 			vmcs_write16(sf->selector, var->selector);
3625 		else if (var->s)
3626 			fix_rmode_seg(seg, &vmx->rmode.segs[seg]);
3627 		return;
3628 	}
3629 
3630 	vmcs_writel(sf->base, var->base);
3631 	vmcs_write32(sf->limit, var->limit);
3632 	vmcs_write16(sf->selector, var->selector);
3633 
3634 	/*
3635 	 *   Fix the "Accessed" bit in AR field of segment registers for older
3636 	 * qemu binaries.
3637 	 *   IA32 arch specifies that at the time of processor reset the
3638 	 * "Accessed" bit in the AR field of segment registers is 1. And qemu
3639 	 * is setting it to 0 in the userland code. This causes invalid guest
3640 	 * state vmexit when "unrestricted guest" mode is turned on.
3641 	 *    Fix for this setup issue in cpu_reset is being pushed in the qemu
3642 	 * tree. Newer qemu binaries with that qemu fix would not need this
3643 	 * kvm hack.
3644 	 */
3645 	if (is_unrestricted_guest(vcpu) && (seg != VCPU_SREG_LDTR))
3646 		var->type |= 0x1; /* Accessed */
3647 
3648 	vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(var));
3649 }
3650 
vmx_set_segment(struct kvm_vcpu * vcpu,struct kvm_segment * var,int seg)3651 void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg)
3652 {
3653 	__vmx_set_segment(vcpu, var, seg);
3654 
3655 	to_vmx(vcpu)->emulation_required = vmx_emulation_required(vcpu);
3656 }
3657 
vmx_get_cs_db_l_bits(struct kvm_vcpu * vcpu,int * db,int * l)3658 void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
3659 {
3660 	u32 ar = vmx_read_guest_seg_ar(to_vmx(vcpu), VCPU_SREG_CS);
3661 
3662 	*db = (ar >> 14) & 1;
3663 	*l = (ar >> 13) & 1;
3664 }
3665 
vmx_get_idt(struct kvm_vcpu * vcpu,struct desc_ptr * dt)3666 void vmx_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
3667 {
3668 	dt->size = vmcs_read32(GUEST_IDTR_LIMIT);
3669 	dt->address = vmcs_readl(GUEST_IDTR_BASE);
3670 }
3671 
vmx_set_idt(struct kvm_vcpu * vcpu,struct desc_ptr * dt)3672 void vmx_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
3673 {
3674 	vmcs_write32(GUEST_IDTR_LIMIT, dt->size);
3675 	vmcs_writel(GUEST_IDTR_BASE, dt->address);
3676 }
3677 
vmx_get_gdt(struct kvm_vcpu * vcpu,struct desc_ptr * dt)3678 void vmx_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
3679 {
3680 	dt->size = vmcs_read32(GUEST_GDTR_LIMIT);
3681 	dt->address = vmcs_readl(GUEST_GDTR_BASE);
3682 }
3683 
vmx_set_gdt(struct kvm_vcpu * vcpu,struct desc_ptr * dt)3684 void vmx_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
3685 {
3686 	vmcs_write32(GUEST_GDTR_LIMIT, dt->size);
3687 	vmcs_writel(GUEST_GDTR_BASE, dt->address);
3688 }
3689 
rmode_segment_valid(struct kvm_vcpu * vcpu,int seg)3690 static bool rmode_segment_valid(struct kvm_vcpu *vcpu, int seg)
3691 {
3692 	struct kvm_segment var;
3693 	u32 ar;
3694 
3695 	vmx_get_segment(vcpu, &var, seg);
3696 	var.dpl = 0x3;
3697 	if (seg == VCPU_SREG_CS)
3698 		var.type = 0x3;
3699 	ar = vmx_segment_access_rights(&var);
3700 
3701 	if (var.base != (var.selector << 4))
3702 		return false;
3703 	if (var.limit != 0xffff)
3704 		return false;
3705 	if (ar != 0xf3)
3706 		return false;
3707 
3708 	return true;
3709 }
3710 
code_segment_valid(struct kvm_vcpu * vcpu)3711 static bool code_segment_valid(struct kvm_vcpu *vcpu)
3712 {
3713 	struct kvm_segment cs;
3714 	unsigned int cs_rpl;
3715 
3716 	vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
3717 	cs_rpl = cs.selector & SEGMENT_RPL_MASK;
3718 
3719 	if (cs.unusable)
3720 		return false;
3721 	if (~cs.type & (VMX_AR_TYPE_CODE_MASK|VMX_AR_TYPE_ACCESSES_MASK))
3722 		return false;
3723 	if (!cs.s)
3724 		return false;
3725 	if (cs.type & VMX_AR_TYPE_WRITEABLE_MASK) {
3726 		if (cs.dpl > cs_rpl)
3727 			return false;
3728 	} else {
3729 		if (cs.dpl != cs_rpl)
3730 			return false;
3731 	}
3732 	if (!cs.present)
3733 		return false;
3734 
3735 	/* TODO: Add Reserved field check, this'll require a new member in the kvm_segment_field structure */
3736 	return true;
3737 }
3738 
stack_segment_valid(struct kvm_vcpu * vcpu)3739 static bool stack_segment_valid(struct kvm_vcpu *vcpu)
3740 {
3741 	struct kvm_segment ss;
3742 	unsigned int ss_rpl;
3743 
3744 	vmx_get_segment(vcpu, &ss, VCPU_SREG_SS);
3745 	ss_rpl = ss.selector & SEGMENT_RPL_MASK;
3746 
3747 	if (ss.unusable)
3748 		return true;
3749 	if (ss.type != 3 && ss.type != 7)
3750 		return false;
3751 	if (!ss.s)
3752 		return false;
3753 	if (ss.dpl != ss_rpl) /* DPL != RPL */
3754 		return false;
3755 	if (!ss.present)
3756 		return false;
3757 
3758 	return true;
3759 }
3760 
data_segment_valid(struct kvm_vcpu * vcpu,int seg)3761 static bool data_segment_valid(struct kvm_vcpu *vcpu, int seg)
3762 {
3763 	struct kvm_segment var;
3764 	unsigned int rpl;
3765 
3766 	vmx_get_segment(vcpu, &var, seg);
3767 	rpl = var.selector & SEGMENT_RPL_MASK;
3768 
3769 	if (var.unusable)
3770 		return true;
3771 	if (!var.s)
3772 		return false;
3773 	if (!var.present)
3774 		return false;
3775 	if (~var.type & (VMX_AR_TYPE_CODE_MASK|VMX_AR_TYPE_WRITEABLE_MASK)) {
3776 		if (var.dpl < rpl) /* DPL < RPL */
3777 			return false;
3778 	}
3779 
3780 	/* TODO: Add other members to kvm_segment_field to allow checking for other access
3781 	 * rights flags
3782 	 */
3783 	return true;
3784 }
3785 
tr_valid(struct kvm_vcpu * vcpu)3786 static bool tr_valid(struct kvm_vcpu *vcpu)
3787 {
3788 	struct kvm_segment tr;
3789 
3790 	vmx_get_segment(vcpu, &tr, VCPU_SREG_TR);
3791 
3792 	if (tr.unusable)
3793 		return false;
3794 	if (tr.selector & SEGMENT_TI_MASK)	/* TI = 1 */
3795 		return false;
3796 	if (tr.type != 3 && tr.type != 11) /* TODO: Check if guest is in IA32e mode */
3797 		return false;
3798 	if (!tr.present)
3799 		return false;
3800 
3801 	return true;
3802 }
3803 
ldtr_valid(struct kvm_vcpu * vcpu)3804 static bool ldtr_valid(struct kvm_vcpu *vcpu)
3805 {
3806 	struct kvm_segment ldtr;
3807 
3808 	vmx_get_segment(vcpu, &ldtr, VCPU_SREG_LDTR);
3809 
3810 	if (ldtr.unusable)
3811 		return true;
3812 	if (ldtr.selector & SEGMENT_TI_MASK)	/* TI = 1 */
3813 		return false;
3814 	if (ldtr.type != 2)
3815 		return false;
3816 	if (!ldtr.present)
3817 		return false;
3818 
3819 	return true;
3820 }
3821 
cs_ss_rpl_check(struct kvm_vcpu * vcpu)3822 static bool cs_ss_rpl_check(struct kvm_vcpu *vcpu)
3823 {
3824 	struct kvm_segment cs, ss;
3825 
3826 	vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
3827 	vmx_get_segment(vcpu, &ss, VCPU_SREG_SS);
3828 
3829 	return ((cs.selector & SEGMENT_RPL_MASK) ==
3830 		 (ss.selector & SEGMENT_RPL_MASK));
3831 }
3832 
3833 /*
3834  * Check if guest state is valid. Returns true if valid, false if
3835  * not.
3836  * We assume that registers are always usable
3837  */
__vmx_guest_state_valid(struct kvm_vcpu * vcpu)3838 bool __vmx_guest_state_valid(struct kvm_vcpu *vcpu)
3839 {
3840 	/* real mode guest state checks */
3841 	if (!is_protmode(vcpu) || (vmx_get_rflags(vcpu) & X86_EFLAGS_VM)) {
3842 		if (!rmode_segment_valid(vcpu, VCPU_SREG_CS))
3843 			return false;
3844 		if (!rmode_segment_valid(vcpu, VCPU_SREG_SS))
3845 			return false;
3846 		if (!rmode_segment_valid(vcpu, VCPU_SREG_DS))
3847 			return false;
3848 		if (!rmode_segment_valid(vcpu, VCPU_SREG_ES))
3849 			return false;
3850 		if (!rmode_segment_valid(vcpu, VCPU_SREG_FS))
3851 			return false;
3852 		if (!rmode_segment_valid(vcpu, VCPU_SREG_GS))
3853 			return false;
3854 	} else {
3855 	/* protected mode guest state checks */
3856 		if (!cs_ss_rpl_check(vcpu))
3857 			return false;
3858 		if (!code_segment_valid(vcpu))
3859 			return false;
3860 		if (!stack_segment_valid(vcpu))
3861 			return false;
3862 		if (!data_segment_valid(vcpu, VCPU_SREG_DS))
3863 			return false;
3864 		if (!data_segment_valid(vcpu, VCPU_SREG_ES))
3865 			return false;
3866 		if (!data_segment_valid(vcpu, VCPU_SREG_FS))
3867 			return false;
3868 		if (!data_segment_valid(vcpu, VCPU_SREG_GS))
3869 			return false;
3870 		if (!tr_valid(vcpu))
3871 			return false;
3872 		if (!ldtr_valid(vcpu))
3873 			return false;
3874 	}
3875 	/* TODO:
3876 	 * - Add checks on RIP
3877 	 * - Add checks on RFLAGS
3878 	 */
3879 
3880 	return true;
3881 }
3882 
init_rmode_tss(struct kvm * kvm,void __user * ua)3883 static int init_rmode_tss(struct kvm *kvm, void __user *ua)
3884 {
3885 	const void *zero_page = (const void *) __va(page_to_phys(ZERO_PAGE(0)));
3886 	u16 data;
3887 	int i;
3888 
3889 	for (i = 0; i < 3; i++) {
3890 		if (__copy_to_user(ua + PAGE_SIZE * i, zero_page, PAGE_SIZE))
3891 			return -EFAULT;
3892 	}
3893 
3894 	data = TSS_BASE_SIZE + TSS_REDIRECTION_SIZE;
3895 	if (__copy_to_user(ua + TSS_IOPB_BASE_OFFSET, &data, sizeof(u16)))
3896 		return -EFAULT;
3897 
3898 	data = ~0;
3899 	if (__copy_to_user(ua + RMODE_TSS_SIZE - 1, &data, sizeof(u8)))
3900 		return -EFAULT;
3901 
3902 	return 0;
3903 }
3904 
init_rmode_identity_map(struct kvm * kvm)3905 static int init_rmode_identity_map(struct kvm *kvm)
3906 {
3907 	struct kvm_vmx *kvm_vmx = to_kvm_vmx(kvm);
3908 	int i, r = 0;
3909 	void __user *uaddr;
3910 	u32 tmp;
3911 
3912 	/* Protect kvm_vmx->ept_identity_pagetable_done. */
3913 	mutex_lock(&kvm->slots_lock);
3914 
3915 	if (likely(kvm_vmx->ept_identity_pagetable_done))
3916 		goto out;
3917 
3918 	if (!kvm_vmx->ept_identity_map_addr)
3919 		kvm_vmx->ept_identity_map_addr = VMX_EPT_IDENTITY_PAGETABLE_ADDR;
3920 
3921 	uaddr = __x86_set_memory_region(kvm,
3922 					IDENTITY_PAGETABLE_PRIVATE_MEMSLOT,
3923 					kvm_vmx->ept_identity_map_addr,
3924 					PAGE_SIZE);
3925 	if (IS_ERR(uaddr)) {
3926 		r = PTR_ERR(uaddr);
3927 		goto out;
3928 	}
3929 
3930 	/* Set up identity-mapping pagetable for EPT in real mode */
3931 	for (i = 0; i < (PAGE_SIZE / sizeof(tmp)); i++) {
3932 		tmp = (i << 22) + (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER |
3933 			_PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_PSE);
3934 		if (__copy_to_user(uaddr + i * sizeof(tmp), &tmp, sizeof(tmp))) {
3935 			r = -EFAULT;
3936 			goto out;
3937 		}
3938 	}
3939 	kvm_vmx->ept_identity_pagetable_done = true;
3940 
3941 out:
3942 	mutex_unlock(&kvm->slots_lock);
3943 	return r;
3944 }
3945 
seg_setup(int seg)3946 static void seg_setup(int seg)
3947 {
3948 	const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
3949 	unsigned int ar;
3950 
3951 	vmcs_write16(sf->selector, 0);
3952 	vmcs_writel(sf->base, 0);
3953 	vmcs_write32(sf->limit, 0xffff);
3954 	ar = 0x93;
3955 	if (seg == VCPU_SREG_CS)
3956 		ar |= 0x08; /* code segment */
3957 
3958 	vmcs_write32(sf->ar_bytes, ar);
3959 }
3960 
allocate_vpid(void)3961 int allocate_vpid(void)
3962 {
3963 	int vpid;
3964 
3965 	if (!enable_vpid)
3966 		return 0;
3967 	spin_lock(&vmx_vpid_lock);
3968 	vpid = find_first_zero_bit(vmx_vpid_bitmap, VMX_NR_VPIDS);
3969 	if (vpid < VMX_NR_VPIDS)
3970 		__set_bit(vpid, vmx_vpid_bitmap);
3971 	else
3972 		vpid = 0;
3973 	spin_unlock(&vmx_vpid_lock);
3974 	return vpid;
3975 }
3976 
free_vpid(int vpid)3977 void free_vpid(int vpid)
3978 {
3979 	if (!enable_vpid || vpid == 0)
3980 		return;
3981 	spin_lock(&vmx_vpid_lock);
3982 	__clear_bit(vpid, vmx_vpid_bitmap);
3983 	spin_unlock(&vmx_vpid_lock);
3984 }
3985 
vmx_msr_bitmap_l01_changed(struct vcpu_vmx * vmx)3986 static void vmx_msr_bitmap_l01_changed(struct vcpu_vmx *vmx)
3987 {
3988 	/*
3989 	 * When KVM is a nested hypervisor on top of Hyper-V and uses
3990 	 * 'Enlightened MSR Bitmap' feature L0 needs to know that MSR
3991 	 * bitmap has changed.
3992 	 */
3993 	if (kvm_is_using_evmcs()) {
3994 		struct hv_enlightened_vmcs *evmcs = (void *)vmx->vmcs01.vmcs;
3995 
3996 		if (evmcs->hv_enlightenments_control.msr_bitmap)
3997 			evmcs->hv_clean_fields &=
3998 				~HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP;
3999 	}
4000 
4001 	vmx->nested.force_msr_bitmap_recalc = true;
4002 }
4003 
vmx_disable_intercept_for_msr(struct kvm_vcpu * vcpu,u32 msr,int type)4004 void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
4005 {
4006 	struct vcpu_vmx *vmx = to_vmx(vcpu);
4007 	unsigned long *msr_bitmap = vmx->vmcs01.msr_bitmap;
4008 	int idx;
4009 
4010 	if (!cpu_has_vmx_msr_bitmap())
4011 		return;
4012 
4013 	vmx_msr_bitmap_l01_changed(vmx);
4014 
4015 	/*
4016 	 * Mark the desired intercept state in shadow bitmap, this is needed
4017 	 * for resync when the MSR filters change.
4018 	 */
4019 	idx = vmx_get_passthrough_msr_slot(msr);
4020 	if (idx >= 0) {
4021 		if (type & MSR_TYPE_R)
4022 			clear_bit(idx, vmx->shadow_msr_intercept.read);
4023 		if (type & MSR_TYPE_W)
4024 			clear_bit(idx, vmx->shadow_msr_intercept.write);
4025 	}
4026 
4027 	if ((type & MSR_TYPE_R) &&
4028 	    !kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_READ)) {
4029 		vmx_set_msr_bitmap_read(msr_bitmap, msr);
4030 		type &= ~MSR_TYPE_R;
4031 	}
4032 
4033 	if ((type & MSR_TYPE_W) &&
4034 	    !kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_WRITE)) {
4035 		vmx_set_msr_bitmap_write(msr_bitmap, msr);
4036 		type &= ~MSR_TYPE_W;
4037 	}
4038 
4039 	if (type & MSR_TYPE_R)
4040 		vmx_clear_msr_bitmap_read(msr_bitmap, msr);
4041 
4042 	if (type & MSR_TYPE_W)
4043 		vmx_clear_msr_bitmap_write(msr_bitmap, msr);
4044 }
4045 
vmx_enable_intercept_for_msr(struct kvm_vcpu * vcpu,u32 msr,int type)4046 void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
4047 {
4048 	struct vcpu_vmx *vmx = to_vmx(vcpu);
4049 	unsigned long *msr_bitmap = vmx->vmcs01.msr_bitmap;
4050 	int idx;
4051 
4052 	if (!cpu_has_vmx_msr_bitmap())
4053 		return;
4054 
4055 	vmx_msr_bitmap_l01_changed(vmx);
4056 
4057 	/*
4058 	 * Mark the desired intercept state in shadow bitmap, this is needed
4059 	 * for resync when the MSR filter changes.
4060 	 */
4061 	idx = vmx_get_passthrough_msr_slot(msr);
4062 	if (idx >= 0) {
4063 		if (type & MSR_TYPE_R)
4064 			set_bit(idx, vmx->shadow_msr_intercept.read);
4065 		if (type & MSR_TYPE_W)
4066 			set_bit(idx, vmx->shadow_msr_intercept.write);
4067 	}
4068 
4069 	if (type & MSR_TYPE_R)
4070 		vmx_set_msr_bitmap_read(msr_bitmap, msr);
4071 
4072 	if (type & MSR_TYPE_W)
4073 		vmx_set_msr_bitmap_write(msr_bitmap, msr);
4074 }
4075 
vmx_update_msr_bitmap_x2apic(struct kvm_vcpu * vcpu)4076 static void vmx_update_msr_bitmap_x2apic(struct kvm_vcpu *vcpu)
4077 {
4078 	/*
4079 	 * x2APIC indices for 64-bit accesses into the RDMSR and WRMSR halves
4080 	 * of the MSR bitmap.  KVM emulates APIC registers up through 0x3f0,
4081 	 * i.e. MSR 0x83f, and so only needs to dynamically manipulate 64 bits.
4082 	 */
4083 	const int read_idx = APIC_BASE_MSR / BITS_PER_LONG_LONG;
4084 	const int write_idx = read_idx + (0x800 / sizeof(u64));
4085 	struct vcpu_vmx *vmx = to_vmx(vcpu);
4086 	u64 *msr_bitmap = (u64 *)vmx->vmcs01.msr_bitmap;
4087 	u8 mode;
4088 
4089 	if (!cpu_has_vmx_msr_bitmap() || WARN_ON_ONCE(!lapic_in_kernel(vcpu)))
4090 		return;
4091 
4092 	if (cpu_has_secondary_exec_ctrls() &&
4093 	    (secondary_exec_controls_get(vmx) &
4094 	     SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE)) {
4095 		mode = MSR_BITMAP_MODE_X2APIC;
4096 		if (enable_apicv && kvm_vcpu_apicv_active(vcpu))
4097 			mode |= MSR_BITMAP_MODE_X2APIC_APICV;
4098 	} else {
4099 		mode = 0;
4100 	}
4101 
4102 	if (mode == vmx->x2apic_msr_bitmap_mode)
4103 		return;
4104 
4105 	vmx->x2apic_msr_bitmap_mode = mode;
4106 
4107 	/*
4108 	 * Reset the bitmap for MSRs 0x800 - 0x83f.  Leave AMD's uber-extended
4109 	 * registers (0x840 and above) intercepted, KVM doesn't support them.
4110 	 * Intercept all writes by default and poke holes as needed.  Pass
4111 	 * through reads for all valid registers by default in x2APIC+APICv
4112 	 * mode, only the current timer count needs on-demand emulation by KVM.
4113 	 */
4114 	if (mode & MSR_BITMAP_MODE_X2APIC_APICV)
4115 		msr_bitmap[read_idx] = ~kvm_lapic_readable_reg_mask(vcpu->arch.apic);
4116 	else
4117 		msr_bitmap[read_idx] = ~0ull;
4118 	msr_bitmap[write_idx] = ~0ull;
4119 
4120 	/*
4121 	 * TPR reads and writes can be virtualized even if virtual interrupt
4122 	 * delivery is not in use.
4123 	 */
4124 	vmx_set_intercept_for_msr(vcpu, X2APIC_MSR(APIC_TASKPRI), MSR_TYPE_RW,
4125 				  !(mode & MSR_BITMAP_MODE_X2APIC));
4126 
4127 	if (mode & MSR_BITMAP_MODE_X2APIC_APICV) {
4128 		vmx_enable_intercept_for_msr(vcpu, X2APIC_MSR(APIC_TMCCT), MSR_TYPE_RW);
4129 		vmx_disable_intercept_for_msr(vcpu, X2APIC_MSR(APIC_EOI), MSR_TYPE_W);
4130 		vmx_disable_intercept_for_msr(vcpu, X2APIC_MSR(APIC_SELF_IPI), MSR_TYPE_W);
4131 		if (enable_ipiv)
4132 			vmx_disable_intercept_for_msr(vcpu, X2APIC_MSR(APIC_ICR), MSR_TYPE_RW);
4133 	}
4134 }
4135 
pt_update_intercept_for_msr(struct kvm_vcpu * vcpu)4136 void pt_update_intercept_for_msr(struct kvm_vcpu *vcpu)
4137 {
4138 	struct vcpu_vmx *vmx = to_vmx(vcpu);
4139 	bool flag = !(vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN);
4140 	u32 i;
4141 
4142 	vmx_set_intercept_for_msr(vcpu, MSR_IA32_RTIT_STATUS, MSR_TYPE_RW, flag);
4143 	vmx_set_intercept_for_msr(vcpu, MSR_IA32_RTIT_OUTPUT_BASE, MSR_TYPE_RW, flag);
4144 	vmx_set_intercept_for_msr(vcpu, MSR_IA32_RTIT_OUTPUT_MASK, MSR_TYPE_RW, flag);
4145 	vmx_set_intercept_for_msr(vcpu, MSR_IA32_RTIT_CR3_MATCH, MSR_TYPE_RW, flag);
4146 	for (i = 0; i < vmx->pt_desc.num_address_ranges; i++) {
4147 		vmx_set_intercept_for_msr(vcpu, MSR_IA32_RTIT_ADDR0_A + i * 2, MSR_TYPE_RW, flag);
4148 		vmx_set_intercept_for_msr(vcpu, MSR_IA32_RTIT_ADDR0_B + i * 2, MSR_TYPE_RW, flag);
4149 	}
4150 }
4151 
vmx_msr_filter_changed(struct kvm_vcpu * vcpu)4152 void vmx_msr_filter_changed(struct kvm_vcpu *vcpu)
4153 {
4154 	struct vcpu_vmx *vmx = to_vmx(vcpu);
4155 	u32 i;
4156 
4157 	if (!cpu_has_vmx_msr_bitmap())
4158 		return;
4159 
4160 	/*
4161 	 * Redo intercept permissions for MSRs that KVM is passing through to
4162 	 * the guest.  Disabling interception will check the new MSR filter and
4163 	 * ensure that KVM enables interception if usersepace wants to filter
4164 	 * the MSR.  MSRs that KVM is already intercepting don't need to be
4165 	 * refreshed since KVM is going to intercept them regardless of what
4166 	 * userspace wants.
4167 	 */
4168 	for (i = 0; i < ARRAY_SIZE(vmx_possible_passthrough_msrs); i++) {
4169 		u32 msr = vmx_possible_passthrough_msrs[i];
4170 
4171 		if (!test_bit(i, vmx->shadow_msr_intercept.read))
4172 			vmx_disable_intercept_for_msr(vcpu, msr, MSR_TYPE_R);
4173 
4174 		if (!test_bit(i, vmx->shadow_msr_intercept.write))
4175 			vmx_disable_intercept_for_msr(vcpu, msr, MSR_TYPE_W);
4176 	}
4177 
4178 	/* PT MSRs can be passed through iff PT is exposed to the guest. */
4179 	if (vmx_pt_mode_is_host_guest())
4180 		pt_update_intercept_for_msr(vcpu);
4181 }
4182 
kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu * vcpu,int pi_vec)4183 static inline void kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu,
4184 						     int pi_vec)
4185 {
4186 #ifdef CONFIG_SMP
4187 	if (vcpu->mode == IN_GUEST_MODE) {
4188 		/*
4189 		 * The vector of the virtual has already been set in the PIR.
4190 		 * Send a notification event to deliver the virtual interrupt
4191 		 * unless the vCPU is the currently running vCPU, i.e. the
4192 		 * event is being sent from a fastpath VM-Exit handler, in
4193 		 * which case the PIR will be synced to the vIRR before
4194 		 * re-entering the guest.
4195 		 *
4196 		 * When the target is not the running vCPU, the following
4197 		 * possibilities emerge:
4198 		 *
4199 		 * Case 1: vCPU stays in non-root mode. Sending a notification
4200 		 * event posts the interrupt to the vCPU.
4201 		 *
4202 		 * Case 2: vCPU exits to root mode and is still runnable. The
4203 		 * PIR will be synced to the vIRR before re-entering the guest.
4204 		 * Sending a notification event is ok as the host IRQ handler
4205 		 * will ignore the spurious event.
4206 		 *
4207 		 * Case 3: vCPU exits to root mode and is blocked. vcpu_block()
4208 		 * has already synced PIR to vIRR and never blocks the vCPU if
4209 		 * the vIRR is not empty. Therefore, a blocked vCPU here does
4210 		 * not wait for any requested interrupts in PIR, and sending a
4211 		 * notification event also results in a benign, spurious event.
4212 		 */
4213 
4214 		if (vcpu != kvm_get_running_vcpu())
4215 			__apic_send_IPI_mask(get_cpu_mask(vcpu->cpu), pi_vec);
4216 		return;
4217 	}
4218 #endif
4219 	/*
4220 	 * The vCPU isn't in the guest; wake the vCPU in case it is blocking,
4221 	 * otherwise do nothing as KVM will grab the highest priority pending
4222 	 * IRQ via ->sync_pir_to_irr() in vcpu_enter_guest().
4223 	 */
4224 	kvm_vcpu_wake_up(vcpu);
4225 }
4226 
vmx_deliver_nested_posted_interrupt(struct kvm_vcpu * vcpu,int vector)4227 static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu,
4228 						int vector)
4229 {
4230 	struct vcpu_vmx *vmx = to_vmx(vcpu);
4231 
4232 	/*
4233 	 * DO NOT query the vCPU's vmcs12, as vmcs12 is dynamically allocated
4234 	 * and freed, and must not be accessed outside of vcpu->mutex.  The
4235 	 * vCPU's cached PI NV is valid if and only if posted interrupts
4236 	 * enabled in its vmcs12, i.e. checking the vector also checks that
4237 	 * L1 has enabled posted interrupts for L2.
4238 	 */
4239 	if (is_guest_mode(vcpu) &&
4240 	    vector == vmx->nested.posted_intr_nv) {
4241 		/*
4242 		 * If a posted intr is not recognized by hardware,
4243 		 * we will accomplish it in the next vmentry.
4244 		 */
4245 		vmx->nested.pi_pending = true;
4246 		kvm_make_request(KVM_REQ_EVENT, vcpu);
4247 
4248 		/*
4249 		 * This pairs with the smp_mb_*() after setting vcpu->mode in
4250 		 * vcpu_enter_guest() to guarantee the vCPU sees the event
4251 		 * request if triggering a posted interrupt "fails" because
4252 		 * vcpu->mode != IN_GUEST_MODE.  The extra barrier is needed as
4253 		 * the smb_wmb() in kvm_make_request() only ensures everything
4254 		 * done before making the request is visible when the request
4255 		 * is visible, it doesn't ensure ordering between the store to
4256 		 * vcpu->requests and the load from vcpu->mode.
4257 		 */
4258 		smp_mb__after_atomic();
4259 
4260 		/* the PIR and ON have been set by L1. */
4261 		kvm_vcpu_trigger_posted_interrupt(vcpu, POSTED_INTR_NESTED_VECTOR);
4262 		return 0;
4263 	}
4264 	return -1;
4265 }
4266 /*
4267  * Send interrupt to vcpu via posted interrupt way.
4268  * 1. If target vcpu is running(non-root mode), send posted interrupt
4269  * notification to vcpu and hardware will sync PIR to vIRR atomically.
4270  * 2. If target vcpu isn't running(root mode), kick it to pick up the
4271  * interrupt from PIR in next vmentry.
4272  */
vmx_deliver_posted_interrupt(struct kvm_vcpu * vcpu,int vector)4273 static int vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector)
4274 {
4275 	struct vcpu_vmx *vmx = to_vmx(vcpu);
4276 	int r;
4277 
4278 	r = vmx_deliver_nested_posted_interrupt(vcpu, vector);
4279 	if (!r)
4280 		return 0;
4281 
4282 	/* Note, this is called iff the local APIC is in-kernel. */
4283 	if (!vcpu->arch.apic->apicv_active)
4284 		return -1;
4285 
4286 	if (pi_test_and_set_pir(vector, &vmx->pi_desc))
4287 		return 0;
4288 
4289 	/* If a previous notification has sent the IPI, nothing to do.  */
4290 	if (pi_test_and_set_on(&vmx->pi_desc))
4291 		return 0;
4292 
4293 	/*
4294 	 * The implied barrier in pi_test_and_set_on() pairs with the smp_mb_*()
4295 	 * after setting vcpu->mode in vcpu_enter_guest(), thus the vCPU is
4296 	 * guaranteed to see PID.ON=1 and sync the PIR to IRR if triggering a
4297 	 * posted interrupt "fails" because vcpu->mode != IN_GUEST_MODE.
4298 	 */
4299 	kvm_vcpu_trigger_posted_interrupt(vcpu, POSTED_INTR_VECTOR);
4300 	return 0;
4301 }
4302 
vmx_deliver_interrupt(struct kvm_lapic * apic,int delivery_mode,int trig_mode,int vector)4303 void vmx_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode,
4304 			   int trig_mode, int vector)
4305 {
4306 	struct kvm_vcpu *vcpu = apic->vcpu;
4307 
4308 	if (vmx_deliver_posted_interrupt(vcpu, vector)) {
4309 		kvm_lapic_set_irr(vector, apic);
4310 		kvm_make_request(KVM_REQ_EVENT, vcpu);
4311 		kvm_vcpu_kick(vcpu);
4312 	} else {
4313 		trace_kvm_apicv_accept_irq(vcpu->vcpu_id, delivery_mode,
4314 					   trig_mode, vector);
4315 	}
4316 }
4317 
4318 /*
4319  * Set up the vmcs's constant host-state fields, i.e., host-state fields that
4320  * will not change in the lifetime of the guest.
4321  * Note that host-state that does change is set elsewhere. E.g., host-state
4322  * that is set differently for each CPU is set in vmx_vcpu_load(), not here.
4323  */
vmx_set_constant_host_state(struct vcpu_vmx * vmx)4324 void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
4325 {
4326 	u32 low32, high32;
4327 	unsigned long tmpl;
4328 	unsigned long cr0, cr3, cr4;
4329 
4330 	cr0 = read_cr0();
4331 	WARN_ON(cr0 & X86_CR0_TS);
4332 	vmcs_writel(HOST_CR0, cr0);  /* 22.2.3 */
4333 
4334 	/*
4335 	 * Save the most likely value for this task's CR3 in the VMCS.
4336 	 * We can't use __get_current_cr3_fast() because we're not atomic.
4337 	 */
4338 	cr3 = __read_cr3();
4339 	vmcs_writel(HOST_CR3, cr3);		/* 22.2.3  FIXME: shadow tables */
4340 	vmx->loaded_vmcs->host_state.cr3 = cr3;
4341 
4342 	/* Save the most likely value for this task's CR4 in the VMCS. */
4343 	cr4 = cr4_read_shadow();
4344 	vmcs_writel(HOST_CR4, cr4);			/* 22.2.3, 22.2.5 */
4345 	vmx->loaded_vmcs->host_state.cr4 = cr4;
4346 
4347 	vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS);  /* 22.2.4 */
4348 #ifdef CONFIG_X86_64
4349 	/*
4350 	 * Load null selectors, so we can avoid reloading them in
4351 	 * vmx_prepare_switch_to_host(), in case userspace uses
4352 	 * the null selectors too (the expected case).
4353 	 */
4354 	vmcs_write16(HOST_DS_SELECTOR, 0);
4355 	vmcs_write16(HOST_ES_SELECTOR, 0);
4356 #else
4357 	vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS);  /* 22.2.4 */
4358 	vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS);  /* 22.2.4 */
4359 #endif
4360 	vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS);  /* 22.2.4 */
4361 	vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8);  /* 22.2.4 */
4362 
4363 	vmcs_writel(HOST_IDTR_BASE, host_idt_base);   /* 22.2.4 */
4364 
4365 	vmcs_writel(HOST_RIP, (unsigned long)vmx_vmexit); /* 22.2.5 */
4366 
4367 	rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
4368 	vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
4369 
4370 	/*
4371 	 * SYSENTER is used for 32-bit system calls on either 32-bit or
4372 	 * 64-bit kernels.  It is always zero If neither is allowed, otherwise
4373 	 * vmx_vcpu_load_vmcs loads it with the per-CPU entry stack (and may
4374 	 * have already done so!).
4375 	 */
4376 	if (!IS_ENABLED(CONFIG_IA32_EMULATION) && !IS_ENABLED(CONFIG_X86_32))
4377 		vmcs_writel(HOST_IA32_SYSENTER_ESP, 0);
4378 
4379 	rdmsrl(MSR_IA32_SYSENTER_EIP, tmpl);
4380 	vmcs_writel(HOST_IA32_SYSENTER_EIP, tmpl);   /* 22.2.3 */
4381 
4382 	if (vmcs_config.vmexit_ctrl & VM_EXIT_LOAD_IA32_PAT) {
4383 		rdmsr(MSR_IA32_CR_PAT, low32, high32);
4384 		vmcs_write64(HOST_IA32_PAT, low32 | ((u64) high32 << 32));
4385 	}
4386 
4387 	if (cpu_has_load_ia32_efer())
4388 		vmcs_write64(HOST_IA32_EFER, kvm_host.efer);
4389 }
4390 
set_cr4_guest_host_mask(struct vcpu_vmx * vmx)4391 void set_cr4_guest_host_mask(struct vcpu_vmx *vmx)
4392 {
4393 	struct kvm_vcpu *vcpu = &vmx->vcpu;
4394 
4395 	vcpu->arch.cr4_guest_owned_bits = KVM_POSSIBLE_CR4_GUEST_BITS &
4396 					  ~vcpu->arch.cr4_guest_rsvd_bits;
4397 	if (!enable_ept) {
4398 		vcpu->arch.cr4_guest_owned_bits &= ~X86_CR4_TLBFLUSH_BITS;
4399 		vcpu->arch.cr4_guest_owned_bits &= ~X86_CR4_PDPTR_BITS;
4400 	}
4401 	if (is_guest_mode(&vmx->vcpu))
4402 		vcpu->arch.cr4_guest_owned_bits &=
4403 			~get_vmcs12(vcpu)->cr4_guest_host_mask;
4404 	vmcs_writel(CR4_GUEST_HOST_MASK, ~vcpu->arch.cr4_guest_owned_bits);
4405 }
4406 
vmx_pin_based_exec_ctrl(struct vcpu_vmx * vmx)4407 static u32 vmx_pin_based_exec_ctrl(struct vcpu_vmx *vmx)
4408 {
4409 	u32 pin_based_exec_ctrl = vmcs_config.pin_based_exec_ctrl;
4410 
4411 	if (!kvm_vcpu_apicv_active(&vmx->vcpu))
4412 		pin_based_exec_ctrl &= ~PIN_BASED_POSTED_INTR;
4413 
4414 	if (!enable_vnmi)
4415 		pin_based_exec_ctrl &= ~PIN_BASED_VIRTUAL_NMIS;
4416 
4417 	if (!enable_preemption_timer)
4418 		pin_based_exec_ctrl &= ~PIN_BASED_VMX_PREEMPTION_TIMER;
4419 
4420 	return pin_based_exec_ctrl;
4421 }
4422 
vmx_vmentry_ctrl(void)4423 static u32 vmx_vmentry_ctrl(void)
4424 {
4425 	u32 vmentry_ctrl = vmcs_config.vmentry_ctrl;
4426 
4427 	if (vmx_pt_mode_is_system())
4428 		vmentry_ctrl &= ~(VM_ENTRY_PT_CONCEAL_PIP |
4429 				  VM_ENTRY_LOAD_IA32_RTIT_CTL);
4430 	/*
4431 	 * IA32e mode, and loading of EFER and PERF_GLOBAL_CTRL are toggled dynamically.
4432 	 */
4433 	vmentry_ctrl &= ~(VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL |
4434 			  VM_ENTRY_LOAD_IA32_EFER |
4435 			  VM_ENTRY_IA32E_MODE);
4436 
4437 	return vmentry_ctrl;
4438 }
4439 
vmx_vmexit_ctrl(void)4440 static u32 vmx_vmexit_ctrl(void)
4441 {
4442 	u32 vmexit_ctrl = vmcs_config.vmexit_ctrl;
4443 
4444 	/*
4445 	 * Not used by KVM and never set in vmcs01 or vmcs02, but emulated for
4446 	 * nested virtualization and thus allowed to be set in vmcs12.
4447 	 */
4448 	vmexit_ctrl &= ~(VM_EXIT_SAVE_IA32_PAT | VM_EXIT_SAVE_IA32_EFER |
4449 			 VM_EXIT_SAVE_VMX_PREEMPTION_TIMER);
4450 
4451 	if (vmx_pt_mode_is_system())
4452 		vmexit_ctrl &= ~(VM_EXIT_PT_CONCEAL_PIP |
4453 				 VM_EXIT_CLEAR_IA32_RTIT_CTL);
4454 	/* Loading of EFER and PERF_GLOBAL_CTRL are toggled dynamically */
4455 	return vmexit_ctrl &
4456 		~(VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | VM_EXIT_LOAD_IA32_EFER);
4457 }
4458 
vmx_refresh_apicv_exec_ctrl(struct kvm_vcpu * vcpu)4459 void vmx_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
4460 {
4461 	struct vcpu_vmx *vmx = to_vmx(vcpu);
4462 
4463 	if (is_guest_mode(vcpu)) {
4464 		vmx->nested.update_vmcs01_apicv_status = true;
4465 		return;
4466 	}
4467 
4468 	pin_controls_set(vmx, vmx_pin_based_exec_ctrl(vmx));
4469 
4470 	if (kvm_vcpu_apicv_active(vcpu)) {
4471 		secondary_exec_controls_setbit(vmx,
4472 					       SECONDARY_EXEC_APIC_REGISTER_VIRT |
4473 					       SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
4474 		if (enable_ipiv)
4475 			tertiary_exec_controls_setbit(vmx, TERTIARY_EXEC_IPI_VIRT);
4476 	} else {
4477 		secondary_exec_controls_clearbit(vmx,
4478 						 SECONDARY_EXEC_APIC_REGISTER_VIRT |
4479 						 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
4480 		if (enable_ipiv)
4481 			tertiary_exec_controls_clearbit(vmx, TERTIARY_EXEC_IPI_VIRT);
4482 	}
4483 
4484 	vmx_update_msr_bitmap_x2apic(vcpu);
4485 }
4486 
vmx_exec_control(struct vcpu_vmx * vmx)4487 static u32 vmx_exec_control(struct vcpu_vmx *vmx)
4488 {
4489 	u32 exec_control = vmcs_config.cpu_based_exec_ctrl;
4490 
4491 	/*
4492 	 * Not used by KVM, but fully supported for nesting, i.e. are allowed in
4493 	 * vmcs12 and propagated to vmcs02 when set in vmcs12.
4494 	 */
4495 	exec_control &= ~(CPU_BASED_RDTSC_EXITING |
4496 			  CPU_BASED_USE_IO_BITMAPS |
4497 			  CPU_BASED_MONITOR_TRAP_FLAG |
4498 			  CPU_BASED_PAUSE_EXITING);
4499 
4500 	/* INTR_WINDOW_EXITING and NMI_WINDOW_EXITING are toggled dynamically */
4501 	exec_control &= ~(CPU_BASED_INTR_WINDOW_EXITING |
4502 			  CPU_BASED_NMI_WINDOW_EXITING);
4503 
4504 	if (vmx->vcpu.arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)
4505 		exec_control &= ~CPU_BASED_MOV_DR_EXITING;
4506 
4507 	if (!cpu_need_tpr_shadow(&vmx->vcpu))
4508 		exec_control &= ~CPU_BASED_TPR_SHADOW;
4509 
4510 #ifdef CONFIG_X86_64
4511 	if (exec_control & CPU_BASED_TPR_SHADOW)
4512 		exec_control &= ~(CPU_BASED_CR8_LOAD_EXITING |
4513 				  CPU_BASED_CR8_STORE_EXITING);
4514 	else
4515 		exec_control |= CPU_BASED_CR8_STORE_EXITING |
4516 				CPU_BASED_CR8_LOAD_EXITING;
4517 #endif
4518 	/* No need to intercept CR3 access or INVPLG when using EPT. */
4519 	if (enable_ept)
4520 		exec_control &= ~(CPU_BASED_CR3_LOAD_EXITING |
4521 				  CPU_BASED_CR3_STORE_EXITING |
4522 				  CPU_BASED_INVLPG_EXITING);
4523 	if (kvm_mwait_in_guest(vmx->vcpu.kvm))
4524 		exec_control &= ~(CPU_BASED_MWAIT_EXITING |
4525 				CPU_BASED_MONITOR_EXITING);
4526 	if (kvm_hlt_in_guest(vmx->vcpu.kvm))
4527 		exec_control &= ~CPU_BASED_HLT_EXITING;
4528 	return exec_control;
4529 }
4530 
vmx_tertiary_exec_control(struct vcpu_vmx * vmx)4531 static u64 vmx_tertiary_exec_control(struct vcpu_vmx *vmx)
4532 {
4533 	u64 exec_control = vmcs_config.cpu_based_3rd_exec_ctrl;
4534 
4535 	/*
4536 	 * IPI virtualization relies on APICv. Disable IPI virtualization if
4537 	 * APICv is inhibited.
4538 	 */
4539 	if (!enable_ipiv || !kvm_vcpu_apicv_active(&vmx->vcpu))
4540 		exec_control &= ~TERTIARY_EXEC_IPI_VIRT;
4541 
4542 	return exec_control;
4543 }
4544 
4545 /*
4546  * Adjust a single secondary execution control bit to intercept/allow an
4547  * instruction in the guest.  This is usually done based on whether or not a
4548  * feature has been exposed to the guest in order to correctly emulate faults.
4549  */
4550 static inline void
vmx_adjust_secondary_exec_control(struct vcpu_vmx * vmx,u32 * exec_control,u32 control,bool enabled,bool exiting)4551 vmx_adjust_secondary_exec_control(struct vcpu_vmx *vmx, u32 *exec_control,
4552 				  u32 control, bool enabled, bool exiting)
4553 {
4554 	/*
4555 	 * If the control is for an opt-in feature, clear the control if the
4556 	 * feature is not exposed to the guest, i.e. not enabled.  If the
4557 	 * control is opt-out, i.e. an exiting control, clear the control if
4558 	 * the feature _is_ exposed to the guest, i.e. exiting/interception is
4559 	 * disabled for the associated instruction.  Note, the caller is
4560 	 * responsible presetting exec_control to set all supported bits.
4561 	 */
4562 	if (enabled == exiting)
4563 		*exec_control &= ~control;
4564 
4565 	/*
4566 	 * Update the nested MSR settings so that a nested VMM can/can't set
4567 	 * controls for features that are/aren't exposed to the guest.
4568 	 */
4569 	if (nested &&
4570 	    kvm_check_has_quirk(vmx->vcpu.kvm, KVM_X86_QUIRK_STUFF_FEATURE_MSRS)) {
4571 		/*
4572 		 * All features that can be added or removed to VMX MSRs must
4573 		 * be supported in the first place for nested virtualization.
4574 		 */
4575 		if (WARN_ON_ONCE(!(vmcs_config.nested.secondary_ctls_high & control)))
4576 			enabled = false;
4577 
4578 		if (enabled)
4579 			vmx->nested.msrs.secondary_ctls_high |= control;
4580 		else
4581 			vmx->nested.msrs.secondary_ctls_high &= ~control;
4582 	}
4583 }
4584 
4585 /*
4586  * Wrapper macro for the common case of adjusting a secondary execution control
4587  * based on a single guest CPUID bit, with a dedicated feature bit.  This also
4588  * verifies that the control is actually supported by KVM and hardware.
4589  */
4590 #define vmx_adjust_sec_exec_control(vmx, exec_control, name, feat_name, ctrl_name, exiting)	\
4591 ({												\
4592 	struct kvm_vcpu *__vcpu = &(vmx)->vcpu;							\
4593 	bool __enabled;										\
4594 												\
4595 	if (cpu_has_vmx_##name()) {								\
4596 		__enabled = guest_cpu_cap_has(__vcpu, X86_FEATURE_##feat_name);			\
4597 		vmx_adjust_secondary_exec_control(vmx, exec_control, SECONDARY_EXEC_##ctrl_name,\
4598 						  __enabled, exiting);				\
4599 	}											\
4600 })
4601 
4602 /* More macro magic for ENABLE_/opt-in versus _EXITING/opt-out controls. */
4603 #define vmx_adjust_sec_exec_feature(vmx, exec_control, lname, uname) \
4604 	vmx_adjust_sec_exec_control(vmx, exec_control, lname, uname, ENABLE_##uname, false)
4605 
4606 #define vmx_adjust_sec_exec_exiting(vmx, exec_control, lname, uname) \
4607 	vmx_adjust_sec_exec_control(vmx, exec_control, lname, uname, uname##_EXITING, true)
4608 
vmx_secondary_exec_control(struct vcpu_vmx * vmx)4609 static u32 vmx_secondary_exec_control(struct vcpu_vmx *vmx)
4610 {
4611 	struct kvm_vcpu *vcpu = &vmx->vcpu;
4612 
4613 	u32 exec_control = vmcs_config.cpu_based_2nd_exec_ctrl;
4614 
4615 	if (vmx_pt_mode_is_system())
4616 		exec_control &= ~(SECONDARY_EXEC_PT_USE_GPA | SECONDARY_EXEC_PT_CONCEAL_VMX);
4617 	if (!cpu_need_virtualize_apic_accesses(vcpu))
4618 		exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
4619 	if (vmx->vpid == 0)
4620 		exec_control &= ~SECONDARY_EXEC_ENABLE_VPID;
4621 	if (!enable_ept) {
4622 		exec_control &= ~SECONDARY_EXEC_ENABLE_EPT;
4623 		exec_control &= ~SECONDARY_EXEC_EPT_VIOLATION_VE;
4624 		enable_unrestricted_guest = 0;
4625 	}
4626 	if (!enable_unrestricted_guest)
4627 		exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST;
4628 	if (kvm_pause_in_guest(vmx->vcpu.kvm))
4629 		exec_control &= ~SECONDARY_EXEC_PAUSE_LOOP_EXITING;
4630 	if (!kvm_vcpu_apicv_active(vcpu))
4631 		exec_control &= ~(SECONDARY_EXEC_APIC_REGISTER_VIRT |
4632 				  SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
4633 	exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
4634 
4635 	/*
4636 	 * KVM doesn't support VMFUNC for L1, but the control is set in KVM's
4637 	 * base configuration as KVM emulates VMFUNC[EPTP_SWITCHING] for L2.
4638 	 */
4639 	exec_control &= ~SECONDARY_EXEC_ENABLE_VMFUNC;
4640 
4641 	/* SECONDARY_EXEC_DESC is enabled/disabled on writes to CR4.UMIP,
4642 	 * in vmx_set_cr4.  */
4643 	exec_control &= ~SECONDARY_EXEC_DESC;
4644 
4645 	/* SECONDARY_EXEC_SHADOW_VMCS is enabled when L1 executes VMPTRLD
4646 	   (handle_vmptrld).
4647 	   We can NOT enable shadow_vmcs here because we don't have yet
4648 	   a current VMCS12
4649 	*/
4650 	exec_control &= ~SECONDARY_EXEC_SHADOW_VMCS;
4651 
4652 	/*
4653 	 * PML is enabled/disabled when dirty logging of memsmlots changes, but
4654 	 * it needs to be set here when dirty logging is already active, e.g.
4655 	 * if this vCPU was created after dirty logging was enabled.
4656 	 */
4657 	if (!enable_pml || !atomic_read(&vcpu->kvm->nr_memslots_dirty_logging))
4658 		exec_control &= ~SECONDARY_EXEC_ENABLE_PML;
4659 
4660 	vmx_adjust_sec_exec_feature(vmx, &exec_control, xsaves, XSAVES);
4661 
4662 	/*
4663 	 * RDPID is also gated by ENABLE_RDTSCP, turn on the control if either
4664 	 * feature is exposed to the guest.  This creates a virtualization hole
4665 	 * if both are supported in hardware but only one is exposed to the
4666 	 * guest, but letting the guest execute RDTSCP or RDPID when either one
4667 	 * is advertised is preferable to emulating the advertised instruction
4668 	 * in KVM on #UD, and obviously better than incorrectly injecting #UD.
4669 	 */
4670 	if (cpu_has_vmx_rdtscp()) {
4671 		bool rdpid_or_rdtscp_enabled =
4672 			guest_cpu_cap_has(vcpu, X86_FEATURE_RDTSCP) ||
4673 			guest_cpu_cap_has(vcpu, X86_FEATURE_RDPID);
4674 
4675 		vmx_adjust_secondary_exec_control(vmx, &exec_control,
4676 						  SECONDARY_EXEC_ENABLE_RDTSCP,
4677 						  rdpid_or_rdtscp_enabled, false);
4678 	}
4679 
4680 	vmx_adjust_sec_exec_feature(vmx, &exec_control, invpcid, INVPCID);
4681 
4682 	vmx_adjust_sec_exec_exiting(vmx, &exec_control, rdrand, RDRAND);
4683 	vmx_adjust_sec_exec_exiting(vmx, &exec_control, rdseed, RDSEED);
4684 
4685 	vmx_adjust_sec_exec_control(vmx, &exec_control, waitpkg, WAITPKG,
4686 				    ENABLE_USR_WAIT_PAUSE, false);
4687 
4688 	if (!vcpu->kvm->arch.bus_lock_detection_enabled)
4689 		exec_control &= ~SECONDARY_EXEC_BUS_LOCK_DETECTION;
4690 
4691 	if (!kvm_notify_vmexit_enabled(vcpu->kvm))
4692 		exec_control &= ~SECONDARY_EXEC_NOTIFY_VM_EXITING;
4693 
4694 	return exec_control;
4695 }
4696 
vmx_get_pid_table_order(struct kvm * kvm)4697 static inline int vmx_get_pid_table_order(struct kvm *kvm)
4698 {
4699 	return get_order(kvm->arch.max_vcpu_ids * sizeof(*to_kvm_vmx(kvm)->pid_table));
4700 }
4701 
vmx_alloc_ipiv_pid_table(struct kvm * kvm)4702 static int vmx_alloc_ipiv_pid_table(struct kvm *kvm)
4703 {
4704 	struct page *pages;
4705 	struct kvm_vmx *kvm_vmx = to_kvm_vmx(kvm);
4706 
4707 	if (!irqchip_in_kernel(kvm) || !enable_ipiv)
4708 		return 0;
4709 
4710 	if (kvm_vmx->pid_table)
4711 		return 0;
4712 
4713 	pages = alloc_pages(GFP_KERNEL_ACCOUNT | __GFP_ZERO,
4714 			    vmx_get_pid_table_order(kvm));
4715 	if (!pages)
4716 		return -ENOMEM;
4717 
4718 	kvm_vmx->pid_table = (void *)page_address(pages);
4719 	return 0;
4720 }
4721 
vmx_vcpu_precreate(struct kvm * kvm)4722 int vmx_vcpu_precreate(struct kvm *kvm)
4723 {
4724 	return vmx_alloc_ipiv_pid_table(kvm);
4725 }
4726 
4727 #define VMX_XSS_EXIT_BITMAP 0
4728 
init_vmcs(struct vcpu_vmx * vmx)4729 static void init_vmcs(struct vcpu_vmx *vmx)
4730 {
4731 	struct kvm *kvm = vmx->vcpu.kvm;
4732 	struct kvm_vmx *kvm_vmx = to_kvm_vmx(kvm);
4733 
4734 	if (nested)
4735 		nested_vmx_set_vmcs_shadowing_bitmap();
4736 
4737 	if (cpu_has_vmx_msr_bitmap())
4738 		vmcs_write64(MSR_BITMAP, __pa(vmx->vmcs01.msr_bitmap));
4739 
4740 	vmcs_write64(VMCS_LINK_POINTER, INVALID_GPA); /* 22.3.1.5 */
4741 
4742 	/* Control */
4743 	pin_controls_set(vmx, vmx_pin_based_exec_ctrl(vmx));
4744 
4745 	exec_controls_set(vmx, vmx_exec_control(vmx));
4746 
4747 	if (cpu_has_secondary_exec_ctrls()) {
4748 		secondary_exec_controls_set(vmx, vmx_secondary_exec_control(vmx));
4749 		if (vmx->ve_info)
4750 			vmcs_write64(VE_INFORMATION_ADDRESS,
4751 				     __pa(vmx->ve_info));
4752 	}
4753 
4754 	if (cpu_has_tertiary_exec_ctrls())
4755 		tertiary_exec_controls_set(vmx, vmx_tertiary_exec_control(vmx));
4756 
4757 	if (enable_apicv && lapic_in_kernel(&vmx->vcpu)) {
4758 		vmcs_write64(EOI_EXIT_BITMAP0, 0);
4759 		vmcs_write64(EOI_EXIT_BITMAP1, 0);
4760 		vmcs_write64(EOI_EXIT_BITMAP2, 0);
4761 		vmcs_write64(EOI_EXIT_BITMAP3, 0);
4762 
4763 		vmcs_write16(GUEST_INTR_STATUS, 0);
4764 
4765 		vmcs_write16(POSTED_INTR_NV, POSTED_INTR_VECTOR);
4766 		vmcs_write64(POSTED_INTR_DESC_ADDR, __pa((&vmx->pi_desc)));
4767 	}
4768 
4769 	if (vmx_can_use_ipiv(&vmx->vcpu)) {
4770 		vmcs_write64(PID_POINTER_TABLE, __pa(kvm_vmx->pid_table));
4771 		vmcs_write16(LAST_PID_POINTER_INDEX, kvm->arch.max_vcpu_ids - 1);
4772 	}
4773 
4774 	if (!kvm_pause_in_guest(kvm)) {
4775 		vmcs_write32(PLE_GAP, ple_gap);
4776 		vmx->ple_window = ple_window;
4777 		vmx->ple_window_dirty = true;
4778 	}
4779 
4780 	if (kvm_notify_vmexit_enabled(kvm))
4781 		vmcs_write32(NOTIFY_WINDOW, kvm->arch.notify_window);
4782 
4783 	vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, 0);
4784 	vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, 0);
4785 	vmcs_write32(CR3_TARGET_COUNT, 0);           /* 22.2.1 */
4786 
4787 	vmcs_write16(HOST_FS_SELECTOR, 0);            /* 22.2.4 */
4788 	vmcs_write16(HOST_GS_SELECTOR, 0);            /* 22.2.4 */
4789 	vmx_set_constant_host_state(vmx);
4790 	vmcs_writel(HOST_FS_BASE, 0); /* 22.2.4 */
4791 	vmcs_writel(HOST_GS_BASE, 0); /* 22.2.4 */
4792 
4793 	if (cpu_has_vmx_vmfunc())
4794 		vmcs_write64(VM_FUNCTION_CONTROL, 0);
4795 
4796 	vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
4797 	vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
4798 	vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val));
4799 	vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
4800 	vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val));
4801 
4802 	if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT)
4803 		vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat);
4804 
4805 	vm_exit_controls_set(vmx, vmx_vmexit_ctrl());
4806 
4807 	/* 22.2.1, 20.8.1 */
4808 	vm_entry_controls_set(vmx, vmx_vmentry_ctrl());
4809 
4810 	vmx->vcpu.arch.cr0_guest_owned_bits = vmx_l1_guest_owned_cr0_bits();
4811 	vmcs_writel(CR0_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr0_guest_owned_bits);
4812 
4813 	set_cr4_guest_host_mask(vmx);
4814 
4815 	if (vmx->vpid != 0)
4816 		vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
4817 
4818 	if (cpu_has_vmx_xsaves())
4819 		vmcs_write64(XSS_EXIT_BITMAP, VMX_XSS_EXIT_BITMAP);
4820 
4821 	if (enable_pml) {
4822 		vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg));
4823 		vmcs_write16(GUEST_PML_INDEX, PML_HEAD_INDEX);
4824 	}
4825 
4826 	vmx_write_encls_bitmap(&vmx->vcpu, NULL);
4827 
4828 	if (vmx_pt_mode_is_host_guest()) {
4829 		memset(&vmx->pt_desc, 0, sizeof(vmx->pt_desc));
4830 		/* Bit[6~0] are forced to 1, writes are ignored. */
4831 		vmx->pt_desc.guest.output_mask = 0x7F;
4832 		vmcs_write64(GUEST_IA32_RTIT_CTL, 0);
4833 	}
4834 
4835 	vmcs_write32(GUEST_SYSENTER_CS, 0);
4836 	vmcs_writel(GUEST_SYSENTER_ESP, 0);
4837 	vmcs_writel(GUEST_SYSENTER_EIP, 0);
4838 	vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
4839 
4840 	if (cpu_has_vmx_tpr_shadow()) {
4841 		vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, 0);
4842 		if (cpu_need_tpr_shadow(&vmx->vcpu))
4843 			vmcs_write64(VIRTUAL_APIC_PAGE_ADDR,
4844 				     __pa(vmx->vcpu.arch.apic->regs));
4845 		vmcs_write32(TPR_THRESHOLD, 0);
4846 	}
4847 
4848 	vmx_setup_uret_msrs(vmx);
4849 }
4850 
__vmx_vcpu_reset(struct kvm_vcpu * vcpu)4851 static void __vmx_vcpu_reset(struct kvm_vcpu *vcpu)
4852 {
4853 	struct vcpu_vmx *vmx = to_vmx(vcpu);
4854 
4855 	init_vmcs(vmx);
4856 
4857 	if (nested &&
4858 	    kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_STUFF_FEATURE_MSRS))
4859 		memcpy(&vmx->nested.msrs, &vmcs_config.nested, sizeof(vmx->nested.msrs));
4860 
4861 	vcpu_setup_sgx_lepubkeyhash(vcpu);
4862 
4863 	vmx->nested.posted_intr_nv = -1;
4864 	vmx->nested.vmxon_ptr = INVALID_GPA;
4865 	vmx->nested.current_vmptr = INVALID_GPA;
4866 
4867 #ifdef CONFIG_KVM_HYPERV
4868 	vmx->nested.hv_evmcs_vmptr = EVMPTR_INVALID;
4869 #endif
4870 
4871 	if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_STUFF_FEATURE_MSRS))
4872 		vcpu->arch.microcode_version = 0x100000000ULL;
4873 	vmx->msr_ia32_feature_control_valid_bits = FEAT_CTL_LOCKED;
4874 
4875 	/*
4876 	 * Enforce invariant: pi_desc.nv is always either POSTED_INTR_VECTOR
4877 	 * or POSTED_INTR_WAKEUP_VECTOR.
4878 	 */
4879 	vmx->pi_desc.nv = POSTED_INTR_VECTOR;
4880 	__pi_set_sn(&vmx->pi_desc);
4881 }
4882 
vmx_vcpu_reset(struct kvm_vcpu * vcpu,bool init_event)4883 void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
4884 {
4885 	struct vcpu_vmx *vmx = to_vmx(vcpu);
4886 
4887 	if (!init_event)
4888 		__vmx_vcpu_reset(vcpu);
4889 
4890 	vmx->rmode.vm86_active = 0;
4891 	vmx->spec_ctrl = 0;
4892 
4893 	vmx->msr_ia32_umwait_control = 0;
4894 
4895 	vmx->hv_deadline_tsc = -1;
4896 	kvm_set_cr8(vcpu, 0);
4897 
4898 	seg_setup(VCPU_SREG_CS);
4899 	vmcs_write16(GUEST_CS_SELECTOR, 0xf000);
4900 	vmcs_writel(GUEST_CS_BASE, 0xffff0000ul);
4901 
4902 	seg_setup(VCPU_SREG_DS);
4903 	seg_setup(VCPU_SREG_ES);
4904 	seg_setup(VCPU_SREG_FS);
4905 	seg_setup(VCPU_SREG_GS);
4906 	seg_setup(VCPU_SREG_SS);
4907 
4908 	vmcs_write16(GUEST_TR_SELECTOR, 0);
4909 	vmcs_writel(GUEST_TR_BASE, 0);
4910 	vmcs_write32(GUEST_TR_LIMIT, 0xffff);
4911 	vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
4912 
4913 	vmcs_write16(GUEST_LDTR_SELECTOR, 0);
4914 	vmcs_writel(GUEST_LDTR_BASE, 0);
4915 	vmcs_write32(GUEST_LDTR_LIMIT, 0xffff);
4916 	vmcs_write32(GUEST_LDTR_AR_BYTES, 0x00082);
4917 
4918 	vmcs_writel(GUEST_GDTR_BASE, 0);
4919 	vmcs_write32(GUEST_GDTR_LIMIT, 0xffff);
4920 
4921 	vmcs_writel(GUEST_IDTR_BASE, 0);
4922 	vmcs_write32(GUEST_IDTR_LIMIT, 0xffff);
4923 
4924 	vmx_segment_cache_clear(vmx);
4925 	kvm_register_mark_available(vcpu, VCPU_EXREG_SEGMENTS);
4926 
4927 	vmcs_write32(GUEST_ACTIVITY_STATE, GUEST_ACTIVITY_ACTIVE);
4928 	vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 0);
4929 	vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS, 0);
4930 	if (kvm_mpx_supported())
4931 		vmcs_write64(GUEST_BNDCFGS, 0);
4932 
4933 	vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);  /* 22.2.1 */
4934 
4935 	kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
4936 
4937 	vpid_sync_context(vmx->vpid);
4938 
4939 	vmx_update_fb_clear_dis(vcpu, vmx);
4940 }
4941 
vmx_enable_irq_window(struct kvm_vcpu * vcpu)4942 void vmx_enable_irq_window(struct kvm_vcpu *vcpu)
4943 {
4944 	exec_controls_setbit(to_vmx(vcpu), CPU_BASED_INTR_WINDOW_EXITING);
4945 }
4946 
vmx_enable_nmi_window(struct kvm_vcpu * vcpu)4947 void vmx_enable_nmi_window(struct kvm_vcpu *vcpu)
4948 {
4949 	if (!enable_vnmi ||
4950 	    vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_STI) {
4951 		vmx_enable_irq_window(vcpu);
4952 		return;
4953 	}
4954 
4955 	exec_controls_setbit(to_vmx(vcpu), CPU_BASED_NMI_WINDOW_EXITING);
4956 }
4957 
vmx_inject_irq(struct kvm_vcpu * vcpu,bool reinjected)4958 void vmx_inject_irq(struct kvm_vcpu *vcpu, bool reinjected)
4959 {
4960 	struct vcpu_vmx *vmx = to_vmx(vcpu);
4961 	uint32_t intr;
4962 	int irq = vcpu->arch.interrupt.nr;
4963 
4964 	trace_kvm_inj_virq(irq, vcpu->arch.interrupt.soft, reinjected);
4965 
4966 	++vcpu->stat.irq_injections;
4967 	if (vmx->rmode.vm86_active) {
4968 		int inc_eip = 0;
4969 		if (vcpu->arch.interrupt.soft)
4970 			inc_eip = vcpu->arch.event_exit_inst_len;
4971 		kvm_inject_realmode_interrupt(vcpu, irq, inc_eip);
4972 		return;
4973 	}
4974 	intr = irq | INTR_INFO_VALID_MASK;
4975 	if (vcpu->arch.interrupt.soft) {
4976 		intr |= INTR_TYPE_SOFT_INTR;
4977 		vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
4978 			     vmx->vcpu.arch.event_exit_inst_len);
4979 	} else
4980 		intr |= INTR_TYPE_EXT_INTR;
4981 	vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr);
4982 
4983 	vmx_clear_hlt(vcpu);
4984 }
4985 
vmx_inject_nmi(struct kvm_vcpu * vcpu)4986 void vmx_inject_nmi(struct kvm_vcpu *vcpu)
4987 {
4988 	struct vcpu_vmx *vmx = to_vmx(vcpu);
4989 
4990 	if (!enable_vnmi) {
4991 		/*
4992 		 * Tracking the NMI-blocked state in software is built upon
4993 		 * finding the next open IRQ window. This, in turn, depends on
4994 		 * well-behaving guests: They have to keep IRQs disabled at
4995 		 * least as long as the NMI handler runs. Otherwise we may
4996 		 * cause NMI nesting, maybe breaking the guest. But as this is
4997 		 * highly unlikely, we can live with the residual risk.
4998 		 */
4999 		vmx->loaded_vmcs->soft_vnmi_blocked = 1;
5000 		vmx->loaded_vmcs->vnmi_blocked_time = 0;
5001 	}
5002 
5003 	++vcpu->stat.nmi_injections;
5004 	vmx->loaded_vmcs->nmi_known_unmasked = false;
5005 
5006 	if (vmx->rmode.vm86_active) {
5007 		kvm_inject_realmode_interrupt(vcpu, NMI_VECTOR, 0);
5008 		return;
5009 	}
5010 
5011 	vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
5012 			INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR);
5013 
5014 	vmx_clear_hlt(vcpu);
5015 }
5016 
vmx_get_nmi_mask(struct kvm_vcpu * vcpu)5017 bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu)
5018 {
5019 	struct vcpu_vmx *vmx = to_vmx(vcpu);
5020 	bool masked;
5021 
5022 	if (!enable_vnmi)
5023 		return vmx->loaded_vmcs->soft_vnmi_blocked;
5024 	if (vmx->loaded_vmcs->nmi_known_unmasked)
5025 		return false;
5026 	masked = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_NMI;
5027 	vmx->loaded_vmcs->nmi_known_unmasked = !masked;
5028 	return masked;
5029 }
5030 
vmx_set_nmi_mask(struct kvm_vcpu * vcpu,bool masked)5031 void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
5032 {
5033 	struct vcpu_vmx *vmx = to_vmx(vcpu);
5034 
5035 	if (!enable_vnmi) {
5036 		if (vmx->loaded_vmcs->soft_vnmi_blocked != masked) {
5037 			vmx->loaded_vmcs->soft_vnmi_blocked = masked;
5038 			vmx->loaded_vmcs->vnmi_blocked_time = 0;
5039 		}
5040 	} else {
5041 		vmx->loaded_vmcs->nmi_known_unmasked = !masked;
5042 		if (masked)
5043 			vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
5044 				      GUEST_INTR_STATE_NMI);
5045 		else
5046 			vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO,
5047 					GUEST_INTR_STATE_NMI);
5048 	}
5049 }
5050 
vmx_nmi_blocked(struct kvm_vcpu * vcpu)5051 bool vmx_nmi_blocked(struct kvm_vcpu *vcpu)
5052 {
5053 	if (is_guest_mode(vcpu) && nested_exit_on_nmi(vcpu))
5054 		return false;
5055 
5056 	if (!enable_vnmi && to_vmx(vcpu)->loaded_vmcs->soft_vnmi_blocked)
5057 		return true;
5058 
5059 	return (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
5060 		(GUEST_INTR_STATE_MOV_SS | GUEST_INTR_STATE_STI |
5061 		 GUEST_INTR_STATE_NMI));
5062 }
5063 
vmx_nmi_allowed(struct kvm_vcpu * vcpu,bool for_injection)5064 int vmx_nmi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
5065 {
5066 	if (to_vmx(vcpu)->nested.nested_run_pending)
5067 		return -EBUSY;
5068 
5069 	/* An NMI must not be injected into L2 if it's supposed to VM-Exit.  */
5070 	if (for_injection && is_guest_mode(vcpu) && nested_exit_on_nmi(vcpu))
5071 		return -EBUSY;
5072 
5073 	return !vmx_nmi_blocked(vcpu);
5074 }
5075 
__vmx_interrupt_blocked(struct kvm_vcpu * vcpu)5076 bool __vmx_interrupt_blocked(struct kvm_vcpu *vcpu)
5077 {
5078 	return !(vmx_get_rflags(vcpu) & X86_EFLAGS_IF) ||
5079 	       (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
5080 		(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS));
5081 }
5082 
vmx_interrupt_blocked(struct kvm_vcpu * vcpu)5083 bool vmx_interrupt_blocked(struct kvm_vcpu *vcpu)
5084 {
5085 	if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu))
5086 		return false;
5087 
5088 	return __vmx_interrupt_blocked(vcpu);
5089 }
5090 
vmx_interrupt_allowed(struct kvm_vcpu * vcpu,bool for_injection)5091 int vmx_interrupt_allowed(struct kvm_vcpu *vcpu, bool for_injection)
5092 {
5093 	if (to_vmx(vcpu)->nested.nested_run_pending)
5094 		return -EBUSY;
5095 
5096 	/*
5097 	 * An IRQ must not be injected into L2 if it's supposed to VM-Exit,
5098 	 * e.g. if the IRQ arrived asynchronously after checking nested events.
5099 	 */
5100 	if (for_injection && is_guest_mode(vcpu) && nested_exit_on_intr(vcpu))
5101 		return -EBUSY;
5102 
5103 	return !vmx_interrupt_blocked(vcpu);
5104 }
5105 
vmx_set_tss_addr(struct kvm * kvm,unsigned int addr)5106 int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr)
5107 {
5108 	void __user *ret;
5109 
5110 	if (enable_unrestricted_guest)
5111 		return 0;
5112 
5113 	mutex_lock(&kvm->slots_lock);
5114 	ret = __x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, addr,
5115 				      PAGE_SIZE * 3);
5116 	mutex_unlock(&kvm->slots_lock);
5117 
5118 	if (IS_ERR(ret))
5119 		return PTR_ERR(ret);
5120 
5121 	to_kvm_vmx(kvm)->tss_addr = addr;
5122 
5123 	return init_rmode_tss(kvm, ret);
5124 }
5125 
vmx_set_identity_map_addr(struct kvm * kvm,u64 ident_addr)5126 int vmx_set_identity_map_addr(struct kvm *kvm, u64 ident_addr)
5127 {
5128 	to_kvm_vmx(kvm)->ept_identity_map_addr = ident_addr;
5129 	return 0;
5130 }
5131 
rmode_exception(struct kvm_vcpu * vcpu,int vec)5132 static bool rmode_exception(struct kvm_vcpu *vcpu, int vec)
5133 {
5134 	switch (vec) {
5135 	case BP_VECTOR:
5136 		/*
5137 		 * Update instruction length as we may reinject the exception
5138 		 * from user space while in guest debugging mode.
5139 		 */
5140 		to_vmx(vcpu)->vcpu.arch.event_exit_inst_len =
5141 			vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
5142 		if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
5143 			return false;
5144 		fallthrough;
5145 	case DB_VECTOR:
5146 		return !(vcpu->guest_debug &
5147 			(KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP));
5148 	case DE_VECTOR:
5149 	case OF_VECTOR:
5150 	case BR_VECTOR:
5151 	case UD_VECTOR:
5152 	case DF_VECTOR:
5153 	case SS_VECTOR:
5154 	case GP_VECTOR:
5155 	case MF_VECTOR:
5156 		return true;
5157 	}
5158 	return false;
5159 }
5160 
handle_rmode_exception(struct kvm_vcpu * vcpu,int vec,u32 err_code)5161 static int handle_rmode_exception(struct kvm_vcpu *vcpu,
5162 				  int vec, u32 err_code)
5163 {
5164 	/*
5165 	 * Instruction with address size override prefix opcode 0x67
5166 	 * Cause the #SS fault with 0 error code in VM86 mode.
5167 	 */
5168 	if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0) {
5169 		if (kvm_emulate_instruction(vcpu, 0)) {
5170 			if (vcpu->arch.halt_request) {
5171 				vcpu->arch.halt_request = 0;
5172 				return kvm_emulate_halt_noskip(vcpu);
5173 			}
5174 			return 1;
5175 		}
5176 		return 0;
5177 	}
5178 
5179 	/*
5180 	 * Forward all other exceptions that are valid in real mode.
5181 	 * FIXME: Breaks guest debugging in real mode, needs to be fixed with
5182 	 *        the required debugging infrastructure rework.
5183 	 */
5184 	kvm_queue_exception(vcpu, vec);
5185 	return 1;
5186 }
5187 
handle_machine_check(struct kvm_vcpu * vcpu)5188 static int handle_machine_check(struct kvm_vcpu *vcpu)
5189 {
5190 	/* handled by vmx_vcpu_run() */
5191 	return 1;
5192 }
5193 
5194 /*
5195  * If the host has split lock detection disabled, then #AC is
5196  * unconditionally injected into the guest, which is the pre split lock
5197  * detection behaviour.
5198  *
5199  * If the host has split lock detection enabled then #AC is
5200  * only injected into the guest when:
5201  *  - Guest CPL == 3 (user mode)
5202  *  - Guest has #AC detection enabled in CR0
5203  *  - Guest EFLAGS has AC bit set
5204  */
vmx_guest_inject_ac(struct kvm_vcpu * vcpu)5205 bool vmx_guest_inject_ac(struct kvm_vcpu *vcpu)
5206 {
5207 	if (!boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT))
5208 		return true;
5209 
5210 	return vmx_get_cpl(vcpu) == 3 && kvm_is_cr0_bit_set(vcpu, X86_CR0_AM) &&
5211 	       (kvm_get_rflags(vcpu) & X86_EFLAGS_AC);
5212 }
5213 
handle_exception_nmi(struct kvm_vcpu * vcpu)5214 static int handle_exception_nmi(struct kvm_vcpu *vcpu)
5215 {
5216 	struct vcpu_vmx *vmx = to_vmx(vcpu);
5217 	struct kvm_run *kvm_run = vcpu->run;
5218 	u32 intr_info, ex_no, error_code;
5219 	unsigned long cr2, dr6;
5220 	u32 vect_info;
5221 
5222 	vect_info = vmx->idt_vectoring_info;
5223 	intr_info = vmx_get_intr_info(vcpu);
5224 
5225 	/*
5226 	 * Machine checks are handled by handle_exception_irqoff(), or by
5227 	 * vmx_vcpu_run() if a #MC occurs on VM-Entry.  NMIs are handled by
5228 	 * vmx_vcpu_enter_exit().
5229 	 */
5230 	if (is_machine_check(intr_info) || is_nmi(intr_info))
5231 		return 1;
5232 
5233 	/*
5234 	 * Queue the exception here instead of in handle_nm_fault_irqoff().
5235 	 * This ensures the nested_vmx check is not skipped so vmexit can
5236 	 * be reflected to L1 (when it intercepts #NM) before reaching this
5237 	 * point.
5238 	 */
5239 	if (is_nm_fault(intr_info)) {
5240 		kvm_queue_exception(vcpu, NM_VECTOR);
5241 		return 1;
5242 	}
5243 
5244 	if (is_invalid_opcode(intr_info))
5245 		return handle_ud(vcpu);
5246 
5247 	if (WARN_ON_ONCE(is_ve_fault(intr_info))) {
5248 		struct vmx_ve_information *ve_info = vmx->ve_info;
5249 
5250 		WARN_ONCE(ve_info->exit_reason != EXIT_REASON_EPT_VIOLATION,
5251 			  "Unexpected #VE on VM-Exit reason 0x%x", ve_info->exit_reason);
5252 		dump_vmcs(vcpu);
5253 		kvm_mmu_print_sptes(vcpu, ve_info->guest_physical_address, "#VE");
5254 		return 1;
5255 	}
5256 
5257 	error_code = 0;
5258 	if (intr_info & INTR_INFO_DELIVER_CODE_MASK)
5259 		error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
5260 
5261 	if (!vmx->rmode.vm86_active && is_gp_fault(intr_info)) {
5262 		WARN_ON_ONCE(!enable_vmware_backdoor);
5263 
5264 		/*
5265 		 * VMware backdoor emulation on #GP interception only handles
5266 		 * IN{S}, OUT{S}, and RDPMC, none of which generate a non-zero
5267 		 * error code on #GP.
5268 		 */
5269 		if (error_code) {
5270 			kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
5271 			return 1;
5272 		}
5273 		return kvm_emulate_instruction(vcpu, EMULTYPE_VMWARE_GP);
5274 	}
5275 
5276 	/*
5277 	 * The #PF with PFEC.RSVD = 1 indicates the guest is accessing
5278 	 * MMIO, it is better to report an internal error.
5279 	 * See the comments in vmx_handle_exit.
5280 	 */
5281 	if ((vect_info & VECTORING_INFO_VALID_MASK) &&
5282 	    !(is_page_fault(intr_info) && !(error_code & PFERR_RSVD_MASK))) {
5283 		vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
5284 		vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_SIMUL_EX;
5285 		vcpu->run->internal.ndata = 4;
5286 		vcpu->run->internal.data[0] = vect_info;
5287 		vcpu->run->internal.data[1] = intr_info;
5288 		vcpu->run->internal.data[2] = error_code;
5289 		vcpu->run->internal.data[3] = vcpu->arch.last_vmentry_cpu;
5290 		return 0;
5291 	}
5292 
5293 	if (is_page_fault(intr_info)) {
5294 		cr2 = vmx_get_exit_qual(vcpu);
5295 		if (enable_ept && !vcpu->arch.apf.host_apf_flags) {
5296 			/*
5297 			 * EPT will cause page fault only if we need to
5298 			 * detect illegal GPAs.
5299 			 */
5300 			WARN_ON_ONCE(!allow_smaller_maxphyaddr);
5301 			kvm_fixup_and_inject_pf_error(vcpu, cr2, error_code);
5302 			return 1;
5303 		} else
5304 			return kvm_handle_page_fault(vcpu, error_code, cr2, NULL, 0);
5305 	}
5306 
5307 	ex_no = intr_info & INTR_INFO_VECTOR_MASK;
5308 
5309 	if (vmx->rmode.vm86_active && rmode_exception(vcpu, ex_no))
5310 		return handle_rmode_exception(vcpu, ex_no, error_code);
5311 
5312 	switch (ex_no) {
5313 	case DB_VECTOR:
5314 		dr6 = vmx_get_exit_qual(vcpu);
5315 		if (!(vcpu->guest_debug &
5316 		      (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) {
5317 			/*
5318 			 * If the #DB was due to ICEBP, a.k.a. INT1, skip the
5319 			 * instruction.  ICEBP generates a trap-like #DB, but
5320 			 * despite its interception control being tied to #DB,
5321 			 * is an instruction intercept, i.e. the VM-Exit occurs
5322 			 * on the ICEBP itself.  Use the inner "skip" helper to
5323 			 * avoid single-step #DB and MTF updates, as ICEBP is
5324 			 * higher priority.  Note, skipping ICEBP still clears
5325 			 * STI and MOVSS blocking.
5326 			 *
5327 			 * For all other #DBs, set vmcs.PENDING_DBG_EXCEPTIONS.BS
5328 			 * if single-step is enabled in RFLAGS and STI or MOVSS
5329 			 * blocking is active, as the CPU doesn't set the bit
5330 			 * on VM-Exit due to #DB interception.  VM-Entry has a
5331 			 * consistency check that a single-step #DB is pending
5332 			 * in this scenario as the previous instruction cannot
5333 			 * have toggled RFLAGS.TF 0=>1 (because STI and POP/MOV
5334 			 * don't modify RFLAGS), therefore the one instruction
5335 			 * delay when activating single-step breakpoints must
5336 			 * have already expired.  Note, the CPU sets/clears BS
5337 			 * as appropriate for all other VM-Exits types.
5338 			 */
5339 			if (is_icebp(intr_info))
5340 				WARN_ON(!skip_emulated_instruction(vcpu));
5341 			else if ((vmx_get_rflags(vcpu) & X86_EFLAGS_TF) &&
5342 				 (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
5343 				  (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS)))
5344 				vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS,
5345 					    vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS) | DR6_BS);
5346 
5347 			kvm_queue_exception_p(vcpu, DB_VECTOR, dr6);
5348 			return 1;
5349 		}
5350 		kvm_run->debug.arch.dr6 = dr6 | DR6_ACTIVE_LOW;
5351 		kvm_run->debug.arch.dr7 = vmcs_readl(GUEST_DR7);
5352 		fallthrough;
5353 	case BP_VECTOR:
5354 		/*
5355 		 * Update instruction length as we may reinject #BP from
5356 		 * user space while in guest debugging mode. Reading it for
5357 		 * #DB as well causes no harm, it is not used in that case.
5358 		 */
5359 		vmx->vcpu.arch.event_exit_inst_len =
5360 			vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
5361 		kvm_run->exit_reason = KVM_EXIT_DEBUG;
5362 		kvm_run->debug.arch.pc = kvm_get_linear_rip(vcpu);
5363 		kvm_run->debug.arch.exception = ex_no;
5364 		break;
5365 	case AC_VECTOR:
5366 		if (vmx_guest_inject_ac(vcpu)) {
5367 			kvm_queue_exception_e(vcpu, AC_VECTOR, error_code);
5368 			return 1;
5369 		}
5370 
5371 		/*
5372 		 * Handle split lock. Depending on detection mode this will
5373 		 * either warn and disable split lock detection for this
5374 		 * task or force SIGBUS on it.
5375 		 */
5376 		if (handle_guest_split_lock(kvm_rip_read(vcpu)))
5377 			return 1;
5378 		fallthrough;
5379 	default:
5380 		kvm_run->exit_reason = KVM_EXIT_EXCEPTION;
5381 		kvm_run->ex.exception = ex_no;
5382 		kvm_run->ex.error_code = error_code;
5383 		break;
5384 	}
5385 	return 0;
5386 }
5387 
handle_external_interrupt(struct kvm_vcpu * vcpu)5388 static __always_inline int handle_external_interrupt(struct kvm_vcpu *vcpu)
5389 {
5390 	++vcpu->stat.irq_exits;
5391 	return 1;
5392 }
5393 
handle_triple_fault(struct kvm_vcpu * vcpu)5394 static int handle_triple_fault(struct kvm_vcpu *vcpu)
5395 {
5396 	vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
5397 	vcpu->mmio_needed = 0;
5398 	return 0;
5399 }
5400 
handle_io(struct kvm_vcpu * vcpu)5401 static int handle_io(struct kvm_vcpu *vcpu)
5402 {
5403 	unsigned long exit_qualification;
5404 	int size, in, string;
5405 	unsigned port;
5406 
5407 	exit_qualification = vmx_get_exit_qual(vcpu);
5408 	string = (exit_qualification & 16) != 0;
5409 
5410 	++vcpu->stat.io_exits;
5411 
5412 	if (string)
5413 		return kvm_emulate_instruction(vcpu, 0);
5414 
5415 	port = exit_qualification >> 16;
5416 	size = (exit_qualification & 7) + 1;
5417 	in = (exit_qualification & 8) != 0;
5418 
5419 	return kvm_fast_pio(vcpu, size, port, in);
5420 }
5421 
vmx_patch_hypercall(struct kvm_vcpu * vcpu,unsigned char * hypercall)5422 void vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
5423 {
5424 	/*
5425 	 * Patch in the VMCALL instruction:
5426 	 */
5427 	hypercall[0] = 0x0f;
5428 	hypercall[1] = 0x01;
5429 	hypercall[2] = 0xc1;
5430 }
5431 
5432 /* called to set cr0 as appropriate for a mov-to-cr0 exit. */
handle_set_cr0(struct kvm_vcpu * vcpu,unsigned long val)5433 static int handle_set_cr0(struct kvm_vcpu *vcpu, unsigned long val)
5434 {
5435 	if (is_guest_mode(vcpu)) {
5436 		struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
5437 		unsigned long orig_val = val;
5438 
5439 		/*
5440 		 * We get here when L2 changed cr0 in a way that did not change
5441 		 * any of L1's shadowed bits (see nested_vmx_exit_handled_cr),
5442 		 * but did change L0 shadowed bits. So we first calculate the
5443 		 * effective cr0 value that L1 would like to write into the
5444 		 * hardware. It consists of the L2-owned bits from the new
5445 		 * value combined with the L1-owned bits from L1's guest_cr0.
5446 		 */
5447 		val = (val & ~vmcs12->cr0_guest_host_mask) |
5448 			(vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask);
5449 
5450 		if (kvm_set_cr0(vcpu, val))
5451 			return 1;
5452 		vmcs_writel(CR0_READ_SHADOW, orig_val);
5453 		return 0;
5454 	} else {
5455 		return kvm_set_cr0(vcpu, val);
5456 	}
5457 }
5458 
handle_set_cr4(struct kvm_vcpu * vcpu,unsigned long val)5459 static int handle_set_cr4(struct kvm_vcpu *vcpu, unsigned long val)
5460 {
5461 	if (is_guest_mode(vcpu)) {
5462 		struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
5463 		unsigned long orig_val = val;
5464 
5465 		/* analogously to handle_set_cr0 */
5466 		val = (val & ~vmcs12->cr4_guest_host_mask) |
5467 			(vmcs12->guest_cr4 & vmcs12->cr4_guest_host_mask);
5468 		if (kvm_set_cr4(vcpu, val))
5469 			return 1;
5470 		vmcs_writel(CR4_READ_SHADOW, orig_val);
5471 		return 0;
5472 	} else
5473 		return kvm_set_cr4(vcpu, val);
5474 }
5475 
handle_desc(struct kvm_vcpu * vcpu)5476 static int handle_desc(struct kvm_vcpu *vcpu)
5477 {
5478 	/*
5479 	 * UMIP emulation relies on intercepting writes to CR4.UMIP, i.e. this
5480 	 * and other code needs to be updated if UMIP can be guest owned.
5481 	 */
5482 	BUILD_BUG_ON(KVM_POSSIBLE_CR4_GUEST_BITS & X86_CR4_UMIP);
5483 
5484 	WARN_ON_ONCE(!kvm_is_cr4_bit_set(vcpu, X86_CR4_UMIP));
5485 	return kvm_emulate_instruction(vcpu, 0);
5486 }
5487 
handle_cr(struct kvm_vcpu * vcpu)5488 static int handle_cr(struct kvm_vcpu *vcpu)
5489 {
5490 	unsigned long exit_qualification, val;
5491 	int cr;
5492 	int reg;
5493 	int err;
5494 	int ret;
5495 
5496 	exit_qualification = vmx_get_exit_qual(vcpu);
5497 	cr = exit_qualification & 15;
5498 	reg = (exit_qualification >> 8) & 15;
5499 	switch ((exit_qualification >> 4) & 3) {
5500 	case 0: /* mov to cr */
5501 		val = kvm_register_read(vcpu, reg);
5502 		trace_kvm_cr_write(cr, val);
5503 		switch (cr) {
5504 		case 0:
5505 			err = handle_set_cr0(vcpu, val);
5506 			return kvm_complete_insn_gp(vcpu, err);
5507 		case 3:
5508 			WARN_ON_ONCE(enable_unrestricted_guest);
5509 
5510 			err = kvm_set_cr3(vcpu, val);
5511 			return kvm_complete_insn_gp(vcpu, err);
5512 		case 4:
5513 			err = handle_set_cr4(vcpu, val);
5514 			return kvm_complete_insn_gp(vcpu, err);
5515 		case 8: {
5516 				u8 cr8_prev = kvm_get_cr8(vcpu);
5517 				u8 cr8 = (u8)val;
5518 				err = kvm_set_cr8(vcpu, cr8);
5519 				ret = kvm_complete_insn_gp(vcpu, err);
5520 				if (lapic_in_kernel(vcpu))
5521 					return ret;
5522 				if (cr8_prev <= cr8)
5523 					return ret;
5524 				/*
5525 				 * TODO: we might be squashing a
5526 				 * KVM_GUESTDBG_SINGLESTEP-triggered
5527 				 * KVM_EXIT_DEBUG here.
5528 				 */
5529 				vcpu->run->exit_reason = KVM_EXIT_SET_TPR;
5530 				return 0;
5531 			}
5532 		}
5533 		break;
5534 	case 2: /* clts */
5535 		KVM_BUG(1, vcpu->kvm, "Guest always owns CR0.TS");
5536 		return -EIO;
5537 	case 1: /*mov from cr*/
5538 		switch (cr) {
5539 		case 3:
5540 			WARN_ON_ONCE(enable_unrestricted_guest);
5541 
5542 			val = kvm_read_cr3(vcpu);
5543 			kvm_register_write(vcpu, reg, val);
5544 			trace_kvm_cr_read(cr, val);
5545 			return kvm_skip_emulated_instruction(vcpu);
5546 		case 8:
5547 			val = kvm_get_cr8(vcpu);
5548 			kvm_register_write(vcpu, reg, val);
5549 			trace_kvm_cr_read(cr, val);
5550 			return kvm_skip_emulated_instruction(vcpu);
5551 		}
5552 		break;
5553 	case 3: /* lmsw */
5554 		val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f;
5555 		trace_kvm_cr_write(0, (kvm_read_cr0_bits(vcpu, ~0xful) | val));
5556 		kvm_lmsw(vcpu, val);
5557 
5558 		return kvm_skip_emulated_instruction(vcpu);
5559 	default:
5560 		break;
5561 	}
5562 	vcpu->run->exit_reason = 0;
5563 	vcpu_unimpl(vcpu, "unhandled control register: op %d cr %d\n",
5564 	       (int)(exit_qualification >> 4) & 3, cr);
5565 	return 0;
5566 }
5567 
handle_dr(struct kvm_vcpu * vcpu)5568 static int handle_dr(struct kvm_vcpu *vcpu)
5569 {
5570 	unsigned long exit_qualification;
5571 	int dr, dr7, reg;
5572 	int err = 1;
5573 
5574 	exit_qualification = vmx_get_exit_qual(vcpu);
5575 	dr = exit_qualification & DEBUG_REG_ACCESS_NUM;
5576 
5577 	/* First, if DR does not exist, trigger UD */
5578 	if (!kvm_require_dr(vcpu, dr))
5579 		return 1;
5580 
5581 	if (vmx_get_cpl(vcpu) > 0)
5582 		goto out;
5583 
5584 	dr7 = vmcs_readl(GUEST_DR7);
5585 	if (dr7 & DR7_GD) {
5586 		/*
5587 		 * As the vm-exit takes precedence over the debug trap, we
5588 		 * need to emulate the latter, either for the host or the
5589 		 * guest debugging itself.
5590 		 */
5591 		if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) {
5592 			vcpu->run->debug.arch.dr6 = DR6_BD | DR6_ACTIVE_LOW;
5593 			vcpu->run->debug.arch.dr7 = dr7;
5594 			vcpu->run->debug.arch.pc = kvm_get_linear_rip(vcpu);
5595 			vcpu->run->debug.arch.exception = DB_VECTOR;
5596 			vcpu->run->exit_reason = KVM_EXIT_DEBUG;
5597 			return 0;
5598 		} else {
5599 			kvm_queue_exception_p(vcpu, DB_VECTOR, DR6_BD);
5600 			return 1;
5601 		}
5602 	}
5603 
5604 	if (vcpu->guest_debug == 0) {
5605 		exec_controls_clearbit(to_vmx(vcpu), CPU_BASED_MOV_DR_EXITING);
5606 
5607 		/*
5608 		 * No more DR vmexits; force a reload of the debug registers
5609 		 * and reenter on this instruction.  The next vmexit will
5610 		 * retrieve the full state of the debug registers.
5611 		 */
5612 		vcpu->arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT;
5613 		return 1;
5614 	}
5615 
5616 	reg = DEBUG_REG_ACCESS_REG(exit_qualification);
5617 	if (exit_qualification & TYPE_MOV_FROM_DR) {
5618 		kvm_register_write(vcpu, reg, kvm_get_dr(vcpu, dr));
5619 		err = 0;
5620 	} else {
5621 		err = kvm_set_dr(vcpu, dr, kvm_register_read(vcpu, reg));
5622 	}
5623 
5624 out:
5625 	return kvm_complete_insn_gp(vcpu, err);
5626 }
5627 
vmx_sync_dirty_debug_regs(struct kvm_vcpu * vcpu)5628 void vmx_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
5629 {
5630 	get_debugreg(vcpu->arch.db[0], 0);
5631 	get_debugreg(vcpu->arch.db[1], 1);
5632 	get_debugreg(vcpu->arch.db[2], 2);
5633 	get_debugreg(vcpu->arch.db[3], 3);
5634 	get_debugreg(vcpu->arch.dr6, 6);
5635 	vcpu->arch.dr7 = vmcs_readl(GUEST_DR7);
5636 
5637 	vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT;
5638 	exec_controls_setbit(to_vmx(vcpu), CPU_BASED_MOV_DR_EXITING);
5639 
5640 	/*
5641 	 * exc_debug expects dr6 to be cleared after it runs, avoid that it sees
5642 	 * a stale dr6 from the guest.
5643 	 */
5644 	set_debugreg(DR6_RESERVED, 6);
5645 }
5646 
vmx_set_dr6(struct kvm_vcpu * vcpu,unsigned long val)5647 void vmx_set_dr6(struct kvm_vcpu *vcpu, unsigned long val)
5648 {
5649 	lockdep_assert_irqs_disabled();
5650 	set_debugreg(vcpu->arch.dr6, 6);
5651 }
5652 
vmx_set_dr7(struct kvm_vcpu * vcpu,unsigned long val)5653 void vmx_set_dr7(struct kvm_vcpu *vcpu, unsigned long val)
5654 {
5655 	vmcs_writel(GUEST_DR7, val);
5656 }
5657 
handle_tpr_below_threshold(struct kvm_vcpu * vcpu)5658 static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu)
5659 {
5660 	kvm_apic_update_ppr(vcpu);
5661 	return 1;
5662 }
5663 
handle_interrupt_window(struct kvm_vcpu * vcpu)5664 static int handle_interrupt_window(struct kvm_vcpu *vcpu)
5665 {
5666 	exec_controls_clearbit(to_vmx(vcpu), CPU_BASED_INTR_WINDOW_EXITING);
5667 
5668 	kvm_make_request(KVM_REQ_EVENT, vcpu);
5669 
5670 	++vcpu->stat.irq_window_exits;
5671 	return 1;
5672 }
5673 
handle_invlpg(struct kvm_vcpu * vcpu)5674 static int handle_invlpg(struct kvm_vcpu *vcpu)
5675 {
5676 	unsigned long exit_qualification = vmx_get_exit_qual(vcpu);
5677 
5678 	kvm_mmu_invlpg(vcpu, exit_qualification);
5679 	return kvm_skip_emulated_instruction(vcpu);
5680 }
5681 
handle_apic_access(struct kvm_vcpu * vcpu)5682 static int handle_apic_access(struct kvm_vcpu *vcpu)
5683 {
5684 	if (likely(fasteoi)) {
5685 		unsigned long exit_qualification = vmx_get_exit_qual(vcpu);
5686 		int access_type, offset;
5687 
5688 		access_type = exit_qualification & APIC_ACCESS_TYPE;
5689 		offset = exit_qualification & APIC_ACCESS_OFFSET;
5690 		/*
5691 		 * Sane guest uses MOV to write EOI, with written value
5692 		 * not cared. So make a short-circuit here by avoiding
5693 		 * heavy instruction emulation.
5694 		 */
5695 		if ((access_type == TYPE_LINEAR_APIC_INST_WRITE) &&
5696 		    (offset == APIC_EOI)) {
5697 			kvm_lapic_set_eoi(vcpu);
5698 			return kvm_skip_emulated_instruction(vcpu);
5699 		}
5700 	}
5701 	return kvm_emulate_instruction(vcpu, 0);
5702 }
5703 
handle_apic_eoi_induced(struct kvm_vcpu * vcpu)5704 static int handle_apic_eoi_induced(struct kvm_vcpu *vcpu)
5705 {
5706 	unsigned long exit_qualification = vmx_get_exit_qual(vcpu);
5707 	int vector = exit_qualification & 0xff;
5708 
5709 	/* EOI-induced VM exit is trap-like and thus no need to adjust IP */
5710 	kvm_apic_set_eoi_accelerated(vcpu, vector);
5711 	return 1;
5712 }
5713 
handle_apic_write(struct kvm_vcpu * vcpu)5714 static int handle_apic_write(struct kvm_vcpu *vcpu)
5715 {
5716 	unsigned long exit_qualification = vmx_get_exit_qual(vcpu);
5717 
5718 	/*
5719 	 * APIC-write VM-Exit is trap-like, KVM doesn't need to advance RIP and
5720 	 * hardware has done any necessary aliasing, offset adjustments, etc...
5721 	 * for the access.  I.e. the correct value has already been  written to
5722 	 * the vAPIC page for the correct 16-byte chunk.  KVM needs only to
5723 	 * retrieve the register value and emulate the access.
5724 	 */
5725 	u32 offset = exit_qualification & 0xff0;
5726 
5727 	kvm_apic_write_nodecode(vcpu, offset);
5728 	return 1;
5729 }
5730 
handle_task_switch(struct kvm_vcpu * vcpu)5731 static int handle_task_switch(struct kvm_vcpu *vcpu)
5732 {
5733 	struct vcpu_vmx *vmx = to_vmx(vcpu);
5734 	unsigned long exit_qualification;
5735 	bool has_error_code = false;
5736 	u32 error_code = 0;
5737 	u16 tss_selector;
5738 	int reason, type, idt_v, idt_index;
5739 
5740 	idt_v = (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK);
5741 	idt_index = (vmx->idt_vectoring_info & VECTORING_INFO_VECTOR_MASK);
5742 	type = (vmx->idt_vectoring_info & VECTORING_INFO_TYPE_MASK);
5743 
5744 	exit_qualification = vmx_get_exit_qual(vcpu);
5745 
5746 	reason = (u32)exit_qualification >> 30;
5747 	if (reason == TASK_SWITCH_GATE && idt_v) {
5748 		switch (type) {
5749 		case INTR_TYPE_NMI_INTR:
5750 			vcpu->arch.nmi_injected = false;
5751 			vmx_set_nmi_mask(vcpu, true);
5752 			break;
5753 		case INTR_TYPE_EXT_INTR:
5754 		case INTR_TYPE_SOFT_INTR:
5755 			kvm_clear_interrupt_queue(vcpu);
5756 			break;
5757 		case INTR_TYPE_HARD_EXCEPTION:
5758 			if (vmx->idt_vectoring_info &
5759 			    VECTORING_INFO_DELIVER_CODE_MASK) {
5760 				has_error_code = true;
5761 				error_code =
5762 					vmcs_read32(IDT_VECTORING_ERROR_CODE);
5763 			}
5764 			fallthrough;
5765 		case INTR_TYPE_SOFT_EXCEPTION:
5766 			kvm_clear_exception_queue(vcpu);
5767 			break;
5768 		default:
5769 			break;
5770 		}
5771 	}
5772 	tss_selector = exit_qualification;
5773 
5774 	if (!idt_v || (type != INTR_TYPE_HARD_EXCEPTION &&
5775 		       type != INTR_TYPE_EXT_INTR &&
5776 		       type != INTR_TYPE_NMI_INTR))
5777 		WARN_ON(!skip_emulated_instruction(vcpu));
5778 
5779 	/*
5780 	 * TODO: What about debug traps on tss switch?
5781 	 *       Are we supposed to inject them and update dr6?
5782 	 */
5783 	return kvm_task_switch(vcpu, tss_selector,
5784 			       type == INTR_TYPE_SOFT_INTR ? idt_index : -1,
5785 			       reason, has_error_code, error_code);
5786 }
5787 
handle_ept_violation(struct kvm_vcpu * vcpu)5788 static int handle_ept_violation(struct kvm_vcpu *vcpu)
5789 {
5790 	unsigned long exit_qualification;
5791 	gpa_t gpa;
5792 	u64 error_code;
5793 
5794 	exit_qualification = vmx_get_exit_qual(vcpu);
5795 
5796 	/*
5797 	 * EPT violation happened while executing iret from NMI,
5798 	 * "blocked by NMI" bit has to be set before next VM entry.
5799 	 * There are errata that may cause this bit to not be set:
5800 	 * AAK134, BY25.
5801 	 */
5802 	if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) &&
5803 			enable_vnmi &&
5804 			(exit_qualification & INTR_INFO_UNBLOCK_NMI))
5805 		vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI);
5806 
5807 	gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
5808 	trace_kvm_page_fault(vcpu, gpa, exit_qualification);
5809 
5810 	/* Is it a read fault? */
5811 	error_code = (exit_qualification & EPT_VIOLATION_ACC_READ)
5812 		     ? PFERR_USER_MASK : 0;
5813 	/* Is it a write fault? */
5814 	error_code |= (exit_qualification & EPT_VIOLATION_ACC_WRITE)
5815 		      ? PFERR_WRITE_MASK : 0;
5816 	/* Is it a fetch fault? */
5817 	error_code |= (exit_qualification & EPT_VIOLATION_ACC_INSTR)
5818 		      ? PFERR_FETCH_MASK : 0;
5819 	/* ept page table entry is present? */
5820 	error_code |= (exit_qualification & EPT_VIOLATION_RWX_MASK)
5821 		      ? PFERR_PRESENT_MASK : 0;
5822 
5823 	if (error_code & EPT_VIOLATION_GVA_IS_VALID)
5824 		error_code |= (exit_qualification & EPT_VIOLATION_GVA_TRANSLATED) ?
5825 			      PFERR_GUEST_FINAL_MASK : PFERR_GUEST_PAGE_MASK;
5826 
5827 	/*
5828 	 * Check that the GPA doesn't exceed physical memory limits, as that is
5829 	 * a guest page fault.  We have to emulate the instruction here, because
5830 	 * if the illegal address is that of a paging structure, then
5831 	 * EPT_VIOLATION_ACC_WRITE bit is set.  Alternatively, if supported we
5832 	 * would also use advanced VM-exit information for EPT violations to
5833 	 * reconstruct the page fault error code.
5834 	 */
5835 	if (unlikely(allow_smaller_maxphyaddr && !kvm_vcpu_is_legal_gpa(vcpu, gpa)))
5836 		return kvm_emulate_instruction(vcpu, 0);
5837 
5838 	return kvm_mmu_page_fault(vcpu, gpa, error_code, NULL, 0);
5839 }
5840 
handle_ept_misconfig(struct kvm_vcpu * vcpu)5841 static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
5842 {
5843 	gpa_t gpa;
5844 
5845 	if (vmx_check_emulate_instruction(vcpu, EMULTYPE_PF, NULL, 0))
5846 		return 1;
5847 
5848 	/*
5849 	 * A nested guest cannot optimize MMIO vmexits, because we have an
5850 	 * nGPA here instead of the required GPA.
5851 	 */
5852 	gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
5853 	if (!is_guest_mode(vcpu) &&
5854 	    !kvm_io_bus_write(vcpu, KVM_FAST_MMIO_BUS, gpa, 0, NULL)) {
5855 		trace_kvm_fast_mmio(gpa);
5856 		return kvm_skip_emulated_instruction(vcpu);
5857 	}
5858 
5859 	return kvm_mmu_page_fault(vcpu, gpa, PFERR_RSVD_MASK, NULL, 0);
5860 }
5861 
handle_nmi_window(struct kvm_vcpu * vcpu)5862 static int handle_nmi_window(struct kvm_vcpu *vcpu)
5863 {
5864 	if (KVM_BUG_ON(!enable_vnmi, vcpu->kvm))
5865 		return -EIO;
5866 
5867 	exec_controls_clearbit(to_vmx(vcpu), CPU_BASED_NMI_WINDOW_EXITING);
5868 	++vcpu->stat.nmi_window_exits;
5869 	kvm_make_request(KVM_REQ_EVENT, vcpu);
5870 
5871 	return 1;
5872 }
5873 
vmx_emulation_required_with_pending_exception(struct kvm_vcpu * vcpu)5874 static bool vmx_emulation_required_with_pending_exception(struct kvm_vcpu *vcpu)
5875 {
5876 	struct vcpu_vmx *vmx = to_vmx(vcpu);
5877 
5878 	return vmx->emulation_required && !vmx->rmode.vm86_active &&
5879 	       (kvm_is_exception_pending(vcpu) || vcpu->arch.exception.injected);
5880 }
5881 
handle_invalid_guest_state(struct kvm_vcpu * vcpu)5882 static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
5883 {
5884 	struct vcpu_vmx *vmx = to_vmx(vcpu);
5885 	bool intr_window_requested;
5886 	unsigned count = 130;
5887 
5888 	intr_window_requested = exec_controls_get(vmx) &
5889 				CPU_BASED_INTR_WINDOW_EXITING;
5890 
5891 	while (vmx->emulation_required && count-- != 0) {
5892 		if (intr_window_requested && !vmx_interrupt_blocked(vcpu))
5893 			return handle_interrupt_window(&vmx->vcpu);
5894 
5895 		if (kvm_test_request(KVM_REQ_EVENT, vcpu))
5896 			return 1;
5897 
5898 		if (!kvm_emulate_instruction(vcpu, 0))
5899 			return 0;
5900 
5901 		if (vmx_emulation_required_with_pending_exception(vcpu)) {
5902 			kvm_prepare_emulation_failure_exit(vcpu);
5903 			return 0;
5904 		}
5905 
5906 		if (vcpu->arch.halt_request) {
5907 			vcpu->arch.halt_request = 0;
5908 			return kvm_emulate_halt_noskip(vcpu);
5909 		}
5910 
5911 		/*
5912 		 * Note, return 1 and not 0, vcpu_run() will invoke
5913 		 * xfer_to_guest_mode() which will create a proper return
5914 		 * code.
5915 		 */
5916 		if (__xfer_to_guest_mode_work_pending())
5917 			return 1;
5918 	}
5919 
5920 	return 1;
5921 }
5922 
vmx_vcpu_pre_run(struct kvm_vcpu * vcpu)5923 int vmx_vcpu_pre_run(struct kvm_vcpu *vcpu)
5924 {
5925 	if (vmx_emulation_required_with_pending_exception(vcpu)) {
5926 		kvm_prepare_emulation_failure_exit(vcpu);
5927 		return 0;
5928 	}
5929 
5930 	return 1;
5931 }
5932 
5933 /*
5934  * Indicate a busy-waiting vcpu in spinlock. We do not enable the PAUSE
5935  * exiting, so only get here on cpu with PAUSE-Loop-Exiting.
5936  */
handle_pause(struct kvm_vcpu * vcpu)5937 static int handle_pause(struct kvm_vcpu *vcpu)
5938 {
5939 	if (!kvm_pause_in_guest(vcpu->kvm))
5940 		grow_ple_window(vcpu);
5941 
5942 	/*
5943 	 * Intel sdm vol3 ch-25.1.3 says: The "PAUSE-loop exiting"
5944 	 * VM-execution control is ignored if CPL > 0. OTOH, KVM
5945 	 * never set PAUSE_EXITING and just set PLE if supported,
5946 	 * so the vcpu must be CPL=0 if it gets a PAUSE exit.
5947 	 */
5948 	kvm_vcpu_on_spin(vcpu, true);
5949 	return kvm_skip_emulated_instruction(vcpu);
5950 }
5951 
handle_monitor_trap(struct kvm_vcpu * vcpu)5952 static int handle_monitor_trap(struct kvm_vcpu *vcpu)
5953 {
5954 	return 1;
5955 }
5956 
handle_invpcid(struct kvm_vcpu * vcpu)5957 static int handle_invpcid(struct kvm_vcpu *vcpu)
5958 {
5959 	u32 vmx_instruction_info;
5960 	unsigned long type;
5961 	gva_t gva;
5962 	struct {
5963 		u64 pcid;
5964 		u64 gla;
5965 	} operand;
5966 	int gpr_index;
5967 
5968 	if (!guest_cpu_cap_has(vcpu, X86_FEATURE_INVPCID)) {
5969 		kvm_queue_exception(vcpu, UD_VECTOR);
5970 		return 1;
5971 	}
5972 
5973 	vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
5974 	gpr_index = vmx_get_instr_info_reg2(vmx_instruction_info);
5975 	type = kvm_register_read(vcpu, gpr_index);
5976 
5977 	/* According to the Intel instruction reference, the memory operand
5978 	 * is read even if it isn't needed (e.g., for type==all)
5979 	 */
5980 	if (get_vmx_mem_address(vcpu, vmx_get_exit_qual(vcpu),
5981 				vmx_instruction_info, false,
5982 				sizeof(operand), &gva))
5983 		return 1;
5984 
5985 	return kvm_handle_invpcid(vcpu, type, gva);
5986 }
5987 
handle_pml_full(struct kvm_vcpu * vcpu)5988 static int handle_pml_full(struct kvm_vcpu *vcpu)
5989 {
5990 	unsigned long exit_qualification;
5991 
5992 	trace_kvm_pml_full(vcpu->vcpu_id);
5993 
5994 	exit_qualification = vmx_get_exit_qual(vcpu);
5995 
5996 	/*
5997 	 * PML buffer FULL happened while executing iret from NMI,
5998 	 * "blocked by NMI" bit has to be set before next VM entry.
5999 	 */
6000 	if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) &&
6001 			enable_vnmi &&
6002 			(exit_qualification & INTR_INFO_UNBLOCK_NMI))
6003 		vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
6004 				GUEST_INTR_STATE_NMI);
6005 
6006 	/*
6007 	 * PML buffer already flushed at beginning of VMEXIT. Nothing to do
6008 	 * here.., and there's no userspace involvement needed for PML.
6009 	 */
6010 	return 1;
6011 }
6012 
handle_fastpath_preemption_timer(struct kvm_vcpu * vcpu,bool force_immediate_exit)6013 static fastpath_t handle_fastpath_preemption_timer(struct kvm_vcpu *vcpu,
6014 						   bool force_immediate_exit)
6015 {
6016 	struct vcpu_vmx *vmx = to_vmx(vcpu);
6017 
6018 	/*
6019 	 * In the *extremely* unlikely scenario that this is a spurious VM-Exit
6020 	 * due to the timer expiring while it was "soft" disabled, just eat the
6021 	 * exit and re-enter the guest.
6022 	 */
6023 	if (unlikely(vmx->loaded_vmcs->hv_timer_soft_disabled))
6024 		return EXIT_FASTPATH_REENTER_GUEST;
6025 
6026 	/*
6027 	 * If the timer expired because KVM used it to force an immediate exit,
6028 	 * then mission accomplished.
6029 	 */
6030 	if (force_immediate_exit)
6031 		return EXIT_FASTPATH_EXIT_HANDLED;
6032 
6033 	/*
6034 	 * If L2 is active, go down the slow path as emulating the guest timer
6035 	 * expiration likely requires synthesizing a nested VM-Exit.
6036 	 */
6037 	if (is_guest_mode(vcpu))
6038 		return EXIT_FASTPATH_NONE;
6039 
6040 	kvm_lapic_expired_hv_timer(vcpu);
6041 	return EXIT_FASTPATH_REENTER_GUEST;
6042 }
6043 
handle_preemption_timer(struct kvm_vcpu * vcpu)6044 static int handle_preemption_timer(struct kvm_vcpu *vcpu)
6045 {
6046 	/*
6047 	 * This non-fastpath handler is reached if and only if the preemption
6048 	 * timer was being used to emulate a guest timer while L2 is active.
6049 	 * All other scenarios are supposed to be handled in the fastpath.
6050 	 */
6051 	WARN_ON_ONCE(!is_guest_mode(vcpu));
6052 	kvm_lapic_expired_hv_timer(vcpu);
6053 	return 1;
6054 }
6055 
6056 /*
6057  * When nested=0, all VMX instruction VM Exits filter here.  The handlers
6058  * are overwritten by nested_vmx_hardware_setup() when nested=1.
6059  */
handle_vmx_instruction(struct kvm_vcpu * vcpu)6060 static int handle_vmx_instruction(struct kvm_vcpu *vcpu)
6061 {
6062 	kvm_queue_exception(vcpu, UD_VECTOR);
6063 	return 1;
6064 }
6065 
6066 #ifndef CONFIG_X86_SGX_KVM
handle_encls(struct kvm_vcpu * vcpu)6067 static int handle_encls(struct kvm_vcpu *vcpu)
6068 {
6069 	/*
6070 	 * SGX virtualization is disabled.  There is no software enable bit for
6071 	 * SGX, so KVM intercepts all ENCLS leafs and injects a #UD to prevent
6072 	 * the guest from executing ENCLS (when SGX is supported by hardware).
6073 	 */
6074 	kvm_queue_exception(vcpu, UD_VECTOR);
6075 	return 1;
6076 }
6077 #endif /* CONFIG_X86_SGX_KVM */
6078 
handle_bus_lock_vmexit(struct kvm_vcpu * vcpu)6079 static int handle_bus_lock_vmexit(struct kvm_vcpu *vcpu)
6080 {
6081 	/*
6082 	 * Hardware may or may not set the BUS_LOCK_DETECTED flag on BUS_LOCK
6083 	 * VM-Exits. Unconditionally set the flag here and leave the handling to
6084 	 * vmx_handle_exit().
6085 	 */
6086 	to_vmx(vcpu)->exit_reason.bus_lock_detected = true;
6087 	return 1;
6088 }
6089 
handle_notify(struct kvm_vcpu * vcpu)6090 static int handle_notify(struct kvm_vcpu *vcpu)
6091 {
6092 	unsigned long exit_qual = vmx_get_exit_qual(vcpu);
6093 	bool context_invalid = exit_qual & NOTIFY_VM_CONTEXT_INVALID;
6094 
6095 	++vcpu->stat.notify_window_exits;
6096 
6097 	/*
6098 	 * Notify VM exit happened while executing iret from NMI,
6099 	 * "blocked by NMI" bit has to be set before next VM entry.
6100 	 */
6101 	if (enable_vnmi && (exit_qual & INTR_INFO_UNBLOCK_NMI))
6102 		vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
6103 			      GUEST_INTR_STATE_NMI);
6104 
6105 	if (vcpu->kvm->arch.notify_vmexit_flags & KVM_X86_NOTIFY_VMEXIT_USER ||
6106 	    context_invalid) {
6107 		vcpu->run->exit_reason = KVM_EXIT_NOTIFY;
6108 		vcpu->run->notify.flags = context_invalid ?
6109 					  KVM_NOTIFY_CONTEXT_INVALID : 0;
6110 		return 0;
6111 	}
6112 
6113 	return 1;
6114 }
6115 
6116 /*
6117  * The exit handlers return 1 if the exit was handled fully and guest execution
6118  * may resume.  Otherwise they set the kvm_run parameter to indicate what needs
6119  * to be done to userspace and return 0.
6120  */
6121 static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
6122 	[EXIT_REASON_EXCEPTION_NMI]           = handle_exception_nmi,
6123 	[EXIT_REASON_EXTERNAL_INTERRUPT]      = handle_external_interrupt,
6124 	[EXIT_REASON_TRIPLE_FAULT]            = handle_triple_fault,
6125 	[EXIT_REASON_NMI_WINDOW]	      = handle_nmi_window,
6126 	[EXIT_REASON_IO_INSTRUCTION]          = handle_io,
6127 	[EXIT_REASON_CR_ACCESS]               = handle_cr,
6128 	[EXIT_REASON_DR_ACCESS]               = handle_dr,
6129 	[EXIT_REASON_CPUID]                   = kvm_emulate_cpuid,
6130 	[EXIT_REASON_MSR_READ]                = kvm_emulate_rdmsr,
6131 	[EXIT_REASON_MSR_WRITE]               = kvm_emulate_wrmsr,
6132 	[EXIT_REASON_INTERRUPT_WINDOW]        = handle_interrupt_window,
6133 	[EXIT_REASON_HLT]                     = kvm_emulate_halt,
6134 	[EXIT_REASON_INVD]		      = kvm_emulate_invd,
6135 	[EXIT_REASON_INVLPG]		      = handle_invlpg,
6136 	[EXIT_REASON_RDPMC]                   = kvm_emulate_rdpmc,
6137 	[EXIT_REASON_VMCALL]                  = kvm_emulate_hypercall,
6138 	[EXIT_REASON_VMCLEAR]		      = handle_vmx_instruction,
6139 	[EXIT_REASON_VMLAUNCH]		      = handle_vmx_instruction,
6140 	[EXIT_REASON_VMPTRLD]		      = handle_vmx_instruction,
6141 	[EXIT_REASON_VMPTRST]		      = handle_vmx_instruction,
6142 	[EXIT_REASON_VMREAD]		      = handle_vmx_instruction,
6143 	[EXIT_REASON_VMRESUME]		      = handle_vmx_instruction,
6144 	[EXIT_REASON_VMWRITE]		      = handle_vmx_instruction,
6145 	[EXIT_REASON_VMOFF]		      = handle_vmx_instruction,
6146 	[EXIT_REASON_VMON]		      = handle_vmx_instruction,
6147 	[EXIT_REASON_TPR_BELOW_THRESHOLD]     = handle_tpr_below_threshold,
6148 	[EXIT_REASON_APIC_ACCESS]             = handle_apic_access,
6149 	[EXIT_REASON_APIC_WRITE]              = handle_apic_write,
6150 	[EXIT_REASON_EOI_INDUCED]             = handle_apic_eoi_induced,
6151 	[EXIT_REASON_WBINVD]                  = kvm_emulate_wbinvd,
6152 	[EXIT_REASON_XSETBV]                  = kvm_emulate_xsetbv,
6153 	[EXIT_REASON_TASK_SWITCH]             = handle_task_switch,
6154 	[EXIT_REASON_MCE_DURING_VMENTRY]      = handle_machine_check,
6155 	[EXIT_REASON_GDTR_IDTR]		      = handle_desc,
6156 	[EXIT_REASON_LDTR_TR]		      = handle_desc,
6157 	[EXIT_REASON_EPT_VIOLATION]	      = handle_ept_violation,
6158 	[EXIT_REASON_EPT_MISCONFIG]           = handle_ept_misconfig,
6159 	[EXIT_REASON_PAUSE_INSTRUCTION]       = handle_pause,
6160 	[EXIT_REASON_MWAIT_INSTRUCTION]	      = kvm_emulate_mwait,
6161 	[EXIT_REASON_MONITOR_TRAP_FLAG]       = handle_monitor_trap,
6162 	[EXIT_REASON_MONITOR_INSTRUCTION]     = kvm_emulate_monitor,
6163 	[EXIT_REASON_INVEPT]                  = handle_vmx_instruction,
6164 	[EXIT_REASON_INVVPID]                 = handle_vmx_instruction,
6165 	[EXIT_REASON_RDRAND]                  = kvm_handle_invalid_op,
6166 	[EXIT_REASON_RDSEED]                  = kvm_handle_invalid_op,
6167 	[EXIT_REASON_PML_FULL]		      = handle_pml_full,
6168 	[EXIT_REASON_INVPCID]                 = handle_invpcid,
6169 	[EXIT_REASON_VMFUNC]		      = handle_vmx_instruction,
6170 	[EXIT_REASON_PREEMPTION_TIMER]	      = handle_preemption_timer,
6171 	[EXIT_REASON_ENCLS]		      = handle_encls,
6172 	[EXIT_REASON_BUS_LOCK]                = handle_bus_lock_vmexit,
6173 	[EXIT_REASON_NOTIFY]		      = handle_notify,
6174 };
6175 
6176 static const int kvm_vmx_max_exit_handlers =
6177 	ARRAY_SIZE(kvm_vmx_exit_handlers);
6178 
vmx_get_exit_info(struct kvm_vcpu * vcpu,u32 * reason,u64 * info1,u64 * info2,u32 * intr_info,u32 * error_code)6179 void vmx_get_exit_info(struct kvm_vcpu *vcpu, u32 *reason,
6180 		       u64 *info1, u64 *info2, u32 *intr_info, u32 *error_code)
6181 {
6182 	struct vcpu_vmx *vmx = to_vmx(vcpu);
6183 
6184 	*reason = vmx->exit_reason.full;
6185 	*info1 = vmx_get_exit_qual(vcpu);
6186 	if (!(vmx->exit_reason.failed_vmentry)) {
6187 		*info2 = vmx->idt_vectoring_info;
6188 		*intr_info = vmx_get_intr_info(vcpu);
6189 		if (is_exception_with_error_code(*intr_info))
6190 			*error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
6191 		else
6192 			*error_code = 0;
6193 	} else {
6194 		*info2 = 0;
6195 		*intr_info = 0;
6196 		*error_code = 0;
6197 	}
6198 }
6199 
vmx_get_entry_info(struct kvm_vcpu * vcpu,u32 * intr_info,u32 * error_code)6200 void vmx_get_entry_info(struct kvm_vcpu *vcpu, u32 *intr_info, u32 *error_code)
6201 {
6202 	*intr_info = vmcs_read32(VM_ENTRY_INTR_INFO_FIELD);
6203 	if (is_exception_with_error_code(*intr_info))
6204 		*error_code = vmcs_read32(VM_ENTRY_EXCEPTION_ERROR_CODE);
6205 	else
6206 		*error_code = 0;
6207 }
6208 
vmx_destroy_pml_buffer(struct vcpu_vmx * vmx)6209 static void vmx_destroy_pml_buffer(struct vcpu_vmx *vmx)
6210 {
6211 	if (vmx->pml_pg) {
6212 		__free_page(vmx->pml_pg);
6213 		vmx->pml_pg = NULL;
6214 	}
6215 }
6216 
vmx_flush_pml_buffer(struct kvm_vcpu * vcpu)6217 static void vmx_flush_pml_buffer(struct kvm_vcpu *vcpu)
6218 {
6219 	struct vcpu_vmx *vmx = to_vmx(vcpu);
6220 	u16 pml_idx, pml_tail_index;
6221 	u64 *pml_buf;
6222 	int i;
6223 
6224 	pml_idx = vmcs_read16(GUEST_PML_INDEX);
6225 
6226 	/* Do nothing if PML buffer is empty */
6227 	if (pml_idx == PML_HEAD_INDEX)
6228 		return;
6229 	/*
6230 	 * PML index always points to the next available PML buffer entity
6231 	 * unless PML log has just overflowed.
6232 	 */
6233 	pml_tail_index = (pml_idx >= PML_LOG_NR_ENTRIES) ? 0 : pml_idx + 1;
6234 
6235 	/*
6236 	 * PML log is written backwards: the CPU first writes the entry 511
6237 	 * then the entry 510, and so on.
6238 	 *
6239 	 * Read the entries in the same order they were written, to ensure that
6240 	 * the dirty ring is filled in the same order the CPU wrote them.
6241 	 */
6242 	pml_buf = page_address(vmx->pml_pg);
6243 
6244 	for (i = PML_HEAD_INDEX; i >= pml_tail_index; i--) {
6245 		u64 gpa;
6246 
6247 		gpa = pml_buf[i];
6248 		WARN_ON(gpa & (PAGE_SIZE - 1));
6249 		kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT);
6250 	}
6251 
6252 	/* reset PML index */
6253 	vmcs_write16(GUEST_PML_INDEX, PML_HEAD_INDEX);
6254 }
6255 
vmx_dump_sel(char * name,uint32_t sel)6256 static void vmx_dump_sel(char *name, uint32_t sel)
6257 {
6258 	pr_err("%s sel=0x%04x, attr=0x%05x, limit=0x%08x, base=0x%016lx\n",
6259 	       name, vmcs_read16(sel),
6260 	       vmcs_read32(sel + GUEST_ES_AR_BYTES - GUEST_ES_SELECTOR),
6261 	       vmcs_read32(sel + GUEST_ES_LIMIT - GUEST_ES_SELECTOR),
6262 	       vmcs_readl(sel + GUEST_ES_BASE - GUEST_ES_SELECTOR));
6263 }
6264 
vmx_dump_dtsel(char * name,uint32_t limit)6265 static void vmx_dump_dtsel(char *name, uint32_t limit)
6266 {
6267 	pr_err("%s                           limit=0x%08x, base=0x%016lx\n",
6268 	       name, vmcs_read32(limit),
6269 	       vmcs_readl(limit + GUEST_GDTR_BASE - GUEST_GDTR_LIMIT));
6270 }
6271 
vmx_dump_msrs(char * name,struct vmx_msrs * m)6272 static void vmx_dump_msrs(char *name, struct vmx_msrs *m)
6273 {
6274 	unsigned int i;
6275 	struct vmx_msr_entry *e;
6276 
6277 	pr_err("MSR %s:\n", name);
6278 	for (i = 0, e = m->val; i < m->nr; ++i, ++e)
6279 		pr_err("  %2d: msr=0x%08x value=0x%016llx\n", i, e->index, e->value);
6280 }
6281 
dump_vmcs(struct kvm_vcpu * vcpu)6282 void dump_vmcs(struct kvm_vcpu *vcpu)
6283 {
6284 	struct vcpu_vmx *vmx = to_vmx(vcpu);
6285 	u32 vmentry_ctl, vmexit_ctl;
6286 	u32 cpu_based_exec_ctrl, pin_based_exec_ctrl, secondary_exec_control;
6287 	u64 tertiary_exec_control;
6288 	unsigned long cr4;
6289 	int efer_slot;
6290 
6291 	if (!dump_invalid_vmcs) {
6292 		pr_warn_ratelimited("set kvm_intel.dump_invalid_vmcs=1 to dump internal KVM state.\n");
6293 		return;
6294 	}
6295 
6296 	vmentry_ctl = vmcs_read32(VM_ENTRY_CONTROLS);
6297 	vmexit_ctl = vmcs_read32(VM_EXIT_CONTROLS);
6298 	cpu_based_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
6299 	pin_based_exec_ctrl = vmcs_read32(PIN_BASED_VM_EXEC_CONTROL);
6300 	cr4 = vmcs_readl(GUEST_CR4);
6301 
6302 	if (cpu_has_secondary_exec_ctrls())
6303 		secondary_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
6304 	else
6305 		secondary_exec_control = 0;
6306 
6307 	if (cpu_has_tertiary_exec_ctrls())
6308 		tertiary_exec_control = vmcs_read64(TERTIARY_VM_EXEC_CONTROL);
6309 	else
6310 		tertiary_exec_control = 0;
6311 
6312 	pr_err("VMCS %p, last attempted VM-entry on CPU %d\n",
6313 	       vmx->loaded_vmcs->vmcs, vcpu->arch.last_vmentry_cpu);
6314 	pr_err("*** Guest State ***\n");
6315 	pr_err("CR0: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n",
6316 	       vmcs_readl(GUEST_CR0), vmcs_readl(CR0_READ_SHADOW),
6317 	       vmcs_readl(CR0_GUEST_HOST_MASK));
6318 	pr_err("CR4: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n",
6319 	       cr4, vmcs_readl(CR4_READ_SHADOW), vmcs_readl(CR4_GUEST_HOST_MASK));
6320 	pr_err("CR3 = 0x%016lx\n", vmcs_readl(GUEST_CR3));
6321 	if (cpu_has_vmx_ept()) {
6322 		pr_err("PDPTR0 = 0x%016llx  PDPTR1 = 0x%016llx\n",
6323 		       vmcs_read64(GUEST_PDPTR0), vmcs_read64(GUEST_PDPTR1));
6324 		pr_err("PDPTR2 = 0x%016llx  PDPTR3 = 0x%016llx\n",
6325 		       vmcs_read64(GUEST_PDPTR2), vmcs_read64(GUEST_PDPTR3));
6326 	}
6327 	pr_err("RSP = 0x%016lx  RIP = 0x%016lx\n",
6328 	       vmcs_readl(GUEST_RSP), vmcs_readl(GUEST_RIP));
6329 	pr_err("RFLAGS=0x%08lx         DR7 = 0x%016lx\n",
6330 	       vmcs_readl(GUEST_RFLAGS), vmcs_readl(GUEST_DR7));
6331 	pr_err("Sysenter RSP=%016lx CS:RIP=%04x:%016lx\n",
6332 	       vmcs_readl(GUEST_SYSENTER_ESP),
6333 	       vmcs_read32(GUEST_SYSENTER_CS), vmcs_readl(GUEST_SYSENTER_EIP));
6334 	vmx_dump_sel("CS:  ", GUEST_CS_SELECTOR);
6335 	vmx_dump_sel("DS:  ", GUEST_DS_SELECTOR);
6336 	vmx_dump_sel("SS:  ", GUEST_SS_SELECTOR);
6337 	vmx_dump_sel("ES:  ", GUEST_ES_SELECTOR);
6338 	vmx_dump_sel("FS:  ", GUEST_FS_SELECTOR);
6339 	vmx_dump_sel("GS:  ", GUEST_GS_SELECTOR);
6340 	vmx_dump_dtsel("GDTR:", GUEST_GDTR_LIMIT);
6341 	vmx_dump_sel("LDTR:", GUEST_LDTR_SELECTOR);
6342 	vmx_dump_dtsel("IDTR:", GUEST_IDTR_LIMIT);
6343 	vmx_dump_sel("TR:  ", GUEST_TR_SELECTOR);
6344 	efer_slot = vmx_find_loadstore_msr_slot(&vmx->msr_autoload.guest, MSR_EFER);
6345 	if (vmentry_ctl & VM_ENTRY_LOAD_IA32_EFER)
6346 		pr_err("EFER= 0x%016llx\n", vmcs_read64(GUEST_IA32_EFER));
6347 	else if (efer_slot >= 0)
6348 		pr_err("EFER= 0x%016llx (autoload)\n",
6349 		       vmx->msr_autoload.guest.val[efer_slot].value);
6350 	else if (vmentry_ctl & VM_ENTRY_IA32E_MODE)
6351 		pr_err("EFER= 0x%016llx (effective)\n",
6352 		       vcpu->arch.efer | (EFER_LMA | EFER_LME));
6353 	else
6354 		pr_err("EFER= 0x%016llx (effective)\n",
6355 		       vcpu->arch.efer & ~(EFER_LMA | EFER_LME));
6356 	if (vmentry_ctl & VM_ENTRY_LOAD_IA32_PAT)
6357 		pr_err("PAT = 0x%016llx\n", vmcs_read64(GUEST_IA32_PAT));
6358 	pr_err("DebugCtl = 0x%016llx  DebugExceptions = 0x%016lx\n",
6359 	       vmcs_read64(GUEST_IA32_DEBUGCTL),
6360 	       vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS));
6361 	if (cpu_has_load_perf_global_ctrl() &&
6362 	    vmentry_ctl & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL)
6363 		pr_err("PerfGlobCtl = 0x%016llx\n",
6364 		       vmcs_read64(GUEST_IA32_PERF_GLOBAL_CTRL));
6365 	if (vmentry_ctl & VM_ENTRY_LOAD_BNDCFGS)
6366 		pr_err("BndCfgS = 0x%016llx\n", vmcs_read64(GUEST_BNDCFGS));
6367 	pr_err("Interruptibility = %08x  ActivityState = %08x\n",
6368 	       vmcs_read32(GUEST_INTERRUPTIBILITY_INFO),
6369 	       vmcs_read32(GUEST_ACTIVITY_STATE));
6370 	if (secondary_exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY)
6371 		pr_err("InterruptStatus = %04x\n",
6372 		       vmcs_read16(GUEST_INTR_STATUS));
6373 	if (vmcs_read32(VM_ENTRY_MSR_LOAD_COUNT) > 0)
6374 		vmx_dump_msrs("guest autoload", &vmx->msr_autoload.guest);
6375 	if (vmcs_read32(VM_EXIT_MSR_STORE_COUNT) > 0)
6376 		vmx_dump_msrs("guest autostore", &vmx->msr_autostore.guest);
6377 
6378 	pr_err("*** Host State ***\n");
6379 	pr_err("RIP = 0x%016lx  RSP = 0x%016lx\n",
6380 	       vmcs_readl(HOST_RIP), vmcs_readl(HOST_RSP));
6381 	pr_err("CS=%04x SS=%04x DS=%04x ES=%04x FS=%04x GS=%04x TR=%04x\n",
6382 	       vmcs_read16(HOST_CS_SELECTOR), vmcs_read16(HOST_SS_SELECTOR),
6383 	       vmcs_read16(HOST_DS_SELECTOR), vmcs_read16(HOST_ES_SELECTOR),
6384 	       vmcs_read16(HOST_FS_SELECTOR), vmcs_read16(HOST_GS_SELECTOR),
6385 	       vmcs_read16(HOST_TR_SELECTOR));
6386 	pr_err("FSBase=%016lx GSBase=%016lx TRBase=%016lx\n",
6387 	       vmcs_readl(HOST_FS_BASE), vmcs_readl(HOST_GS_BASE),
6388 	       vmcs_readl(HOST_TR_BASE));
6389 	pr_err("GDTBase=%016lx IDTBase=%016lx\n",
6390 	       vmcs_readl(HOST_GDTR_BASE), vmcs_readl(HOST_IDTR_BASE));
6391 	pr_err("CR0=%016lx CR3=%016lx CR4=%016lx\n",
6392 	       vmcs_readl(HOST_CR0), vmcs_readl(HOST_CR3),
6393 	       vmcs_readl(HOST_CR4));
6394 	pr_err("Sysenter RSP=%016lx CS:RIP=%04x:%016lx\n",
6395 	       vmcs_readl(HOST_IA32_SYSENTER_ESP),
6396 	       vmcs_read32(HOST_IA32_SYSENTER_CS),
6397 	       vmcs_readl(HOST_IA32_SYSENTER_EIP));
6398 	if (vmexit_ctl & VM_EXIT_LOAD_IA32_EFER)
6399 		pr_err("EFER= 0x%016llx\n", vmcs_read64(HOST_IA32_EFER));
6400 	if (vmexit_ctl & VM_EXIT_LOAD_IA32_PAT)
6401 		pr_err("PAT = 0x%016llx\n", vmcs_read64(HOST_IA32_PAT));
6402 	if (cpu_has_load_perf_global_ctrl() &&
6403 	    vmexit_ctl & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL)
6404 		pr_err("PerfGlobCtl = 0x%016llx\n",
6405 		       vmcs_read64(HOST_IA32_PERF_GLOBAL_CTRL));
6406 	if (vmcs_read32(VM_EXIT_MSR_LOAD_COUNT) > 0)
6407 		vmx_dump_msrs("host autoload", &vmx->msr_autoload.host);
6408 
6409 	pr_err("*** Control State ***\n");
6410 	pr_err("CPUBased=0x%08x SecondaryExec=0x%08x TertiaryExec=0x%016llx\n",
6411 	       cpu_based_exec_ctrl, secondary_exec_control, tertiary_exec_control);
6412 	pr_err("PinBased=0x%08x EntryControls=%08x ExitControls=%08x\n",
6413 	       pin_based_exec_ctrl, vmentry_ctl, vmexit_ctl);
6414 	pr_err("ExceptionBitmap=%08x PFECmask=%08x PFECmatch=%08x\n",
6415 	       vmcs_read32(EXCEPTION_BITMAP),
6416 	       vmcs_read32(PAGE_FAULT_ERROR_CODE_MASK),
6417 	       vmcs_read32(PAGE_FAULT_ERROR_CODE_MATCH));
6418 	pr_err("VMEntry: intr_info=%08x errcode=%08x ilen=%08x\n",
6419 	       vmcs_read32(VM_ENTRY_INTR_INFO_FIELD),
6420 	       vmcs_read32(VM_ENTRY_EXCEPTION_ERROR_CODE),
6421 	       vmcs_read32(VM_ENTRY_INSTRUCTION_LEN));
6422 	pr_err("VMExit: intr_info=%08x errcode=%08x ilen=%08x\n",
6423 	       vmcs_read32(VM_EXIT_INTR_INFO),
6424 	       vmcs_read32(VM_EXIT_INTR_ERROR_CODE),
6425 	       vmcs_read32(VM_EXIT_INSTRUCTION_LEN));
6426 	pr_err("        reason=%08x qualification=%016lx\n",
6427 	       vmcs_read32(VM_EXIT_REASON), vmcs_readl(EXIT_QUALIFICATION));
6428 	pr_err("IDTVectoring: info=%08x errcode=%08x\n",
6429 	       vmcs_read32(IDT_VECTORING_INFO_FIELD),
6430 	       vmcs_read32(IDT_VECTORING_ERROR_CODE));
6431 	pr_err("TSC Offset = 0x%016llx\n", vmcs_read64(TSC_OFFSET));
6432 	if (secondary_exec_control & SECONDARY_EXEC_TSC_SCALING)
6433 		pr_err("TSC Multiplier = 0x%016llx\n",
6434 		       vmcs_read64(TSC_MULTIPLIER));
6435 	if (cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW) {
6436 		if (secondary_exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY) {
6437 			u16 status = vmcs_read16(GUEST_INTR_STATUS);
6438 			pr_err("SVI|RVI = %02x|%02x ", status >> 8, status & 0xff);
6439 		}
6440 		pr_cont("TPR Threshold = 0x%02x\n", vmcs_read32(TPR_THRESHOLD));
6441 		if (secondary_exec_control & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)
6442 			pr_err("APIC-access addr = 0x%016llx ", vmcs_read64(APIC_ACCESS_ADDR));
6443 		pr_cont("virt-APIC addr = 0x%016llx\n", vmcs_read64(VIRTUAL_APIC_PAGE_ADDR));
6444 	}
6445 	if (pin_based_exec_ctrl & PIN_BASED_POSTED_INTR)
6446 		pr_err("PostedIntrVec = 0x%02x\n", vmcs_read16(POSTED_INTR_NV));
6447 	if ((secondary_exec_control & SECONDARY_EXEC_ENABLE_EPT))
6448 		pr_err("EPT pointer = 0x%016llx\n", vmcs_read64(EPT_POINTER));
6449 	if (secondary_exec_control & SECONDARY_EXEC_PAUSE_LOOP_EXITING)
6450 		pr_err("PLE Gap=%08x Window=%08x\n",
6451 		       vmcs_read32(PLE_GAP), vmcs_read32(PLE_WINDOW));
6452 	if (secondary_exec_control & SECONDARY_EXEC_ENABLE_VPID)
6453 		pr_err("Virtual processor ID = 0x%04x\n",
6454 		       vmcs_read16(VIRTUAL_PROCESSOR_ID));
6455 	if (secondary_exec_control & SECONDARY_EXEC_EPT_VIOLATION_VE) {
6456 		struct vmx_ve_information *ve_info = vmx->ve_info;
6457 		u64 ve_info_pa = vmcs_read64(VE_INFORMATION_ADDRESS);
6458 
6459 		/*
6460 		 * If KVM is dumping the VMCS, then something has gone wrong
6461 		 * already.  Derefencing an address from the VMCS, which could
6462 		 * very well be corrupted, is a terrible idea.  The virtual
6463 		 * address is known so use it.
6464 		 */
6465 		pr_err("VE info address = 0x%016llx%s\n", ve_info_pa,
6466 		       ve_info_pa == __pa(ve_info) ? "" : "(corrupted!)");
6467 		pr_err("ve_info: 0x%08x 0x%08x 0x%016llx 0x%016llx 0x%016llx 0x%04x\n",
6468 		       ve_info->exit_reason, ve_info->delivery,
6469 		       ve_info->exit_qualification,
6470 		       ve_info->guest_linear_address,
6471 		       ve_info->guest_physical_address, ve_info->eptp_index);
6472 	}
6473 }
6474 
6475 /*
6476  * The guest has exited.  See if we can fix it or if we need userspace
6477  * assistance.
6478  */
__vmx_handle_exit(struct kvm_vcpu * vcpu,fastpath_t exit_fastpath)6479 static int __vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
6480 {
6481 	struct vcpu_vmx *vmx = to_vmx(vcpu);
6482 	union vmx_exit_reason exit_reason = vmx->exit_reason;
6483 	u32 vectoring_info = vmx->idt_vectoring_info;
6484 	u16 exit_handler_index;
6485 
6486 	/*
6487 	 * Flush logged GPAs PML buffer, this will make dirty_bitmap more
6488 	 * updated. Another good is, in kvm_vm_ioctl_get_dirty_log, before
6489 	 * querying dirty_bitmap, we only need to kick all vcpus out of guest
6490 	 * mode as if vcpus is in root mode, the PML buffer must has been
6491 	 * flushed already.  Note, PML is never enabled in hardware while
6492 	 * running L2.
6493 	 */
6494 	if (enable_pml && !is_guest_mode(vcpu))
6495 		vmx_flush_pml_buffer(vcpu);
6496 
6497 	/*
6498 	 * KVM should never reach this point with a pending nested VM-Enter.
6499 	 * More specifically, short-circuiting VM-Entry to emulate L2 due to
6500 	 * invalid guest state should never happen as that means KVM knowingly
6501 	 * allowed a nested VM-Enter with an invalid vmcs12.  More below.
6502 	 */
6503 	if (KVM_BUG_ON(vmx->nested.nested_run_pending, vcpu->kvm))
6504 		return -EIO;
6505 
6506 	if (is_guest_mode(vcpu)) {
6507 		/*
6508 		 * PML is never enabled when running L2, bail immediately if a
6509 		 * PML full exit occurs as something is horribly wrong.
6510 		 */
6511 		if (exit_reason.basic == EXIT_REASON_PML_FULL)
6512 			goto unexpected_vmexit;
6513 
6514 		/*
6515 		 * The host physical addresses of some pages of guest memory
6516 		 * are loaded into the vmcs02 (e.g. vmcs12's Virtual APIC
6517 		 * Page). The CPU may write to these pages via their host
6518 		 * physical address while L2 is running, bypassing any
6519 		 * address-translation-based dirty tracking (e.g. EPT write
6520 		 * protection).
6521 		 *
6522 		 * Mark them dirty on every exit from L2 to prevent them from
6523 		 * getting out of sync with dirty tracking.
6524 		 */
6525 		nested_mark_vmcs12_pages_dirty(vcpu);
6526 
6527 		/*
6528 		 * Synthesize a triple fault if L2 state is invalid.  In normal
6529 		 * operation, nested VM-Enter rejects any attempt to enter L2
6530 		 * with invalid state.  However, those checks are skipped if
6531 		 * state is being stuffed via RSM or KVM_SET_NESTED_STATE.  If
6532 		 * L2 state is invalid, it means either L1 modified SMRAM state
6533 		 * or userspace provided bad state.  Synthesize TRIPLE_FAULT as
6534 		 * doing so is architecturally allowed in the RSM case, and is
6535 		 * the least awful solution for the userspace case without
6536 		 * risking false positives.
6537 		 */
6538 		if (vmx->emulation_required) {
6539 			nested_vmx_vmexit(vcpu, EXIT_REASON_TRIPLE_FAULT, 0, 0);
6540 			return 1;
6541 		}
6542 
6543 		if (nested_vmx_reflect_vmexit(vcpu))
6544 			return 1;
6545 	}
6546 
6547 	/* If guest state is invalid, start emulating.  L2 is handled above. */
6548 	if (vmx->emulation_required)
6549 		return handle_invalid_guest_state(vcpu);
6550 
6551 	if (exit_reason.failed_vmentry) {
6552 		dump_vmcs(vcpu);
6553 		vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY;
6554 		vcpu->run->fail_entry.hardware_entry_failure_reason
6555 			= exit_reason.full;
6556 		vcpu->run->fail_entry.cpu = vcpu->arch.last_vmentry_cpu;
6557 		return 0;
6558 	}
6559 
6560 	if (unlikely(vmx->fail)) {
6561 		dump_vmcs(vcpu);
6562 		vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY;
6563 		vcpu->run->fail_entry.hardware_entry_failure_reason
6564 			= vmcs_read32(VM_INSTRUCTION_ERROR);
6565 		vcpu->run->fail_entry.cpu = vcpu->arch.last_vmentry_cpu;
6566 		return 0;
6567 	}
6568 
6569 	if ((vectoring_info & VECTORING_INFO_VALID_MASK) &&
6570 	    (exit_reason.basic != EXIT_REASON_EXCEPTION_NMI &&
6571 	     exit_reason.basic != EXIT_REASON_EPT_VIOLATION &&
6572 	     exit_reason.basic != EXIT_REASON_PML_FULL &&
6573 	     exit_reason.basic != EXIT_REASON_APIC_ACCESS &&
6574 	     exit_reason.basic != EXIT_REASON_TASK_SWITCH &&
6575 	     exit_reason.basic != EXIT_REASON_NOTIFY &&
6576 	     exit_reason.basic != EXIT_REASON_EPT_MISCONFIG)) {
6577 		kvm_prepare_event_vectoring_exit(vcpu, INVALID_GPA);
6578 		return 0;
6579 	}
6580 
6581 	if (unlikely(!enable_vnmi &&
6582 		     vmx->loaded_vmcs->soft_vnmi_blocked)) {
6583 		if (!vmx_interrupt_blocked(vcpu)) {
6584 			vmx->loaded_vmcs->soft_vnmi_blocked = 0;
6585 		} else if (vmx->loaded_vmcs->vnmi_blocked_time > 1000000000LL &&
6586 			   vcpu->arch.nmi_pending) {
6587 			/*
6588 			 * This CPU don't support us in finding the end of an
6589 			 * NMI-blocked window if the guest runs with IRQs
6590 			 * disabled. So we pull the trigger after 1 s of
6591 			 * futile waiting, but inform the user about this.
6592 			 */
6593 			printk(KERN_WARNING "%s: Breaking out of NMI-blocked "
6594 			       "state on VCPU %d after 1 s timeout\n",
6595 			       __func__, vcpu->vcpu_id);
6596 			vmx->loaded_vmcs->soft_vnmi_blocked = 0;
6597 		}
6598 	}
6599 
6600 	if (exit_fastpath != EXIT_FASTPATH_NONE)
6601 		return 1;
6602 
6603 	if (exit_reason.basic >= kvm_vmx_max_exit_handlers)
6604 		goto unexpected_vmexit;
6605 #ifdef CONFIG_MITIGATION_RETPOLINE
6606 	if (exit_reason.basic == EXIT_REASON_MSR_WRITE)
6607 		return kvm_emulate_wrmsr(vcpu);
6608 	else if (exit_reason.basic == EXIT_REASON_PREEMPTION_TIMER)
6609 		return handle_preemption_timer(vcpu);
6610 	else if (exit_reason.basic == EXIT_REASON_INTERRUPT_WINDOW)
6611 		return handle_interrupt_window(vcpu);
6612 	else if (exit_reason.basic == EXIT_REASON_EXTERNAL_INTERRUPT)
6613 		return handle_external_interrupt(vcpu);
6614 	else if (exit_reason.basic == EXIT_REASON_HLT)
6615 		return kvm_emulate_halt(vcpu);
6616 	else if (exit_reason.basic == EXIT_REASON_EPT_MISCONFIG)
6617 		return handle_ept_misconfig(vcpu);
6618 #endif
6619 
6620 	exit_handler_index = array_index_nospec((u16)exit_reason.basic,
6621 						kvm_vmx_max_exit_handlers);
6622 	if (!kvm_vmx_exit_handlers[exit_handler_index])
6623 		goto unexpected_vmexit;
6624 
6625 	return kvm_vmx_exit_handlers[exit_handler_index](vcpu);
6626 
6627 unexpected_vmexit:
6628 	vcpu_unimpl(vcpu, "vmx: unexpected exit reason 0x%x\n",
6629 		    exit_reason.full);
6630 	dump_vmcs(vcpu);
6631 	vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
6632 	vcpu->run->internal.suberror =
6633 			KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON;
6634 	vcpu->run->internal.ndata = 2;
6635 	vcpu->run->internal.data[0] = exit_reason.full;
6636 	vcpu->run->internal.data[1] = vcpu->arch.last_vmentry_cpu;
6637 	return 0;
6638 }
6639 
vmx_handle_exit(struct kvm_vcpu * vcpu,fastpath_t exit_fastpath)6640 int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
6641 {
6642 	int ret = __vmx_handle_exit(vcpu, exit_fastpath);
6643 
6644 	/*
6645 	 * Exit to user space when bus lock detected to inform that there is
6646 	 * a bus lock in guest.
6647 	 */
6648 	if (to_vmx(vcpu)->exit_reason.bus_lock_detected) {
6649 		if (ret > 0)
6650 			vcpu->run->exit_reason = KVM_EXIT_X86_BUS_LOCK;
6651 
6652 		vcpu->run->flags |= KVM_RUN_X86_BUS_LOCK;
6653 		return 0;
6654 	}
6655 	return ret;
6656 }
6657 
6658 /*
6659  * Software based L1D cache flush which is used when microcode providing
6660  * the cache control MSR is not loaded.
6661  *
6662  * The L1D cache is 32 KiB on Nehalem and later microarchitectures, but to
6663  * flush it is required to read in 64 KiB because the replacement algorithm
6664  * is not exactly LRU. This could be sized at runtime via topology
6665  * information but as all relevant affected CPUs have 32KiB L1D cache size
6666  * there is no point in doing so.
6667  */
vmx_l1d_flush(struct kvm_vcpu * vcpu)6668 static noinstr void vmx_l1d_flush(struct kvm_vcpu *vcpu)
6669 {
6670 	int size = PAGE_SIZE << L1D_CACHE_ORDER;
6671 
6672 	/*
6673 	 * This code is only executed when the flush mode is 'cond' or
6674 	 * 'always'
6675 	 */
6676 	if (static_branch_likely(&vmx_l1d_flush_cond)) {
6677 		bool flush_l1d;
6678 
6679 		/*
6680 		 * Clear the per-vcpu flush bit, it gets set again if the vCPU
6681 		 * is reloaded, i.e. if the vCPU is scheduled out or if KVM
6682 		 * exits to userspace, or if KVM reaches one of the unsafe
6683 		 * VMEXIT handlers, e.g. if KVM calls into the emulator.
6684 		 */
6685 		flush_l1d = vcpu->arch.l1tf_flush_l1d;
6686 		vcpu->arch.l1tf_flush_l1d = false;
6687 
6688 		/*
6689 		 * Clear the per-cpu flush bit, it gets set again from
6690 		 * the interrupt handlers.
6691 		 */
6692 		flush_l1d |= kvm_get_cpu_l1tf_flush_l1d();
6693 		kvm_clear_cpu_l1tf_flush_l1d();
6694 
6695 		if (!flush_l1d)
6696 			return;
6697 	}
6698 
6699 	vcpu->stat.l1d_flush++;
6700 
6701 	if (static_cpu_has(X86_FEATURE_FLUSH_L1D)) {
6702 		native_wrmsrl(MSR_IA32_FLUSH_CMD, L1D_FLUSH);
6703 		return;
6704 	}
6705 
6706 	asm volatile(
6707 		/* First ensure the pages are in the TLB */
6708 		"xorl	%%eax, %%eax\n"
6709 		".Lpopulate_tlb:\n\t"
6710 		"movzbl	(%[flush_pages], %%" _ASM_AX "), %%ecx\n\t"
6711 		"addl	$4096, %%eax\n\t"
6712 		"cmpl	%%eax, %[size]\n\t"
6713 		"jne	.Lpopulate_tlb\n\t"
6714 		"xorl	%%eax, %%eax\n\t"
6715 		"cpuid\n\t"
6716 		/* Now fill the cache */
6717 		"xorl	%%eax, %%eax\n"
6718 		".Lfill_cache:\n"
6719 		"movzbl	(%[flush_pages], %%" _ASM_AX "), %%ecx\n\t"
6720 		"addl	$64, %%eax\n\t"
6721 		"cmpl	%%eax, %[size]\n\t"
6722 		"jne	.Lfill_cache\n\t"
6723 		"lfence\n"
6724 		:: [flush_pages] "r" (vmx_l1d_flush_pages),
6725 		    [size] "r" (size)
6726 		: "eax", "ebx", "ecx", "edx");
6727 }
6728 
vmx_update_cr8_intercept(struct kvm_vcpu * vcpu,int tpr,int irr)6729 void vmx_update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
6730 {
6731 	struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
6732 	int tpr_threshold;
6733 
6734 	if (is_guest_mode(vcpu) &&
6735 		nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW))
6736 		return;
6737 
6738 	tpr_threshold = (irr == -1 || tpr < irr) ? 0 : irr;
6739 	if (is_guest_mode(vcpu))
6740 		to_vmx(vcpu)->nested.l1_tpr_threshold = tpr_threshold;
6741 	else
6742 		vmcs_write32(TPR_THRESHOLD, tpr_threshold);
6743 }
6744 
vmx_set_virtual_apic_mode(struct kvm_vcpu * vcpu)6745 void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu)
6746 {
6747 	struct vcpu_vmx *vmx = to_vmx(vcpu);
6748 	u32 sec_exec_control;
6749 
6750 	if (!lapic_in_kernel(vcpu))
6751 		return;
6752 
6753 	if (!flexpriority_enabled &&
6754 	    !cpu_has_vmx_virtualize_x2apic_mode())
6755 		return;
6756 
6757 	/* Postpone execution until vmcs01 is the current VMCS. */
6758 	if (is_guest_mode(vcpu)) {
6759 		vmx->nested.change_vmcs01_virtual_apic_mode = true;
6760 		return;
6761 	}
6762 
6763 	sec_exec_control = secondary_exec_controls_get(vmx);
6764 	sec_exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
6765 			      SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE);
6766 
6767 	switch (kvm_get_apic_mode(vcpu)) {
6768 	case LAPIC_MODE_INVALID:
6769 		WARN_ONCE(true, "Invalid local APIC state");
6770 		break;
6771 	case LAPIC_MODE_DISABLED:
6772 		break;
6773 	case LAPIC_MODE_XAPIC:
6774 		if (flexpriority_enabled) {
6775 			sec_exec_control |=
6776 				SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
6777 			kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
6778 
6779 			/*
6780 			 * Flush the TLB, reloading the APIC access page will
6781 			 * only do so if its physical address has changed, but
6782 			 * the guest may have inserted a non-APIC mapping into
6783 			 * the TLB while the APIC access page was disabled.
6784 			 */
6785 			kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
6786 		}
6787 		break;
6788 	case LAPIC_MODE_X2APIC:
6789 		if (cpu_has_vmx_virtualize_x2apic_mode())
6790 			sec_exec_control |=
6791 				SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
6792 		break;
6793 	}
6794 	secondary_exec_controls_set(vmx, sec_exec_control);
6795 
6796 	vmx_update_msr_bitmap_x2apic(vcpu);
6797 }
6798 
vmx_set_apic_access_page_addr(struct kvm_vcpu * vcpu)6799 void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu)
6800 {
6801 	const gfn_t gfn = APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT;
6802 	struct kvm *kvm = vcpu->kvm;
6803 	struct kvm_memslots *slots = kvm_memslots(kvm);
6804 	struct kvm_memory_slot *slot;
6805 	struct page *refcounted_page;
6806 	unsigned long mmu_seq;
6807 	kvm_pfn_t pfn;
6808 	bool writable;
6809 
6810 	/* Defer reload until vmcs01 is the current VMCS. */
6811 	if (is_guest_mode(vcpu)) {
6812 		to_vmx(vcpu)->nested.reload_vmcs01_apic_access_page = true;
6813 		return;
6814 	}
6815 
6816 	if (!(secondary_exec_controls_get(to_vmx(vcpu)) &
6817 	    SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
6818 		return;
6819 
6820 	/*
6821 	 * Explicitly grab the memslot using KVM's internal slot ID to ensure
6822 	 * KVM doesn't unintentionally grab a userspace memslot.  It _should_
6823 	 * be impossible for userspace to create a memslot for the APIC when
6824 	 * APICv is enabled, but paranoia won't hurt in this case.
6825 	 */
6826 	slot = id_to_memslot(slots, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT);
6827 	if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
6828 		return;
6829 
6830 	/*
6831 	 * Ensure that the mmu_notifier sequence count is read before KVM
6832 	 * retrieves the pfn from the primary MMU.  Note, the memslot is
6833 	 * protected by SRCU, not the mmu_notifier.  Pairs with the smp_wmb()
6834 	 * in kvm_mmu_invalidate_end().
6835 	 */
6836 	mmu_seq = kvm->mmu_invalidate_seq;
6837 	smp_rmb();
6838 
6839 	/*
6840 	 * No need to retry if the memslot does not exist or is invalid.  KVM
6841 	 * controls the APIC-access page memslot, and only deletes the memslot
6842 	 * if APICv is permanently inhibited, i.e. the memslot won't reappear.
6843 	 */
6844 	pfn = __kvm_faultin_pfn(slot, gfn, FOLL_WRITE, &writable, &refcounted_page);
6845 	if (is_error_noslot_pfn(pfn))
6846 		return;
6847 
6848 	read_lock(&vcpu->kvm->mmu_lock);
6849 	if (mmu_invalidate_retry_gfn(kvm, mmu_seq, gfn))
6850 		kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
6851 	else
6852 		vmcs_write64(APIC_ACCESS_ADDR, pfn_to_hpa(pfn));
6853 
6854 	/*
6855 	 * Do not pin the APIC access page in memory so that it can be freely
6856 	 * migrated, the MMU notifier will call us again if it is migrated or
6857 	 * swapped out.  KVM backs the memslot with anonymous memory, the pfn
6858 	 * should always point at a refcounted page (if the pfn is valid).
6859 	 */
6860 	if (!WARN_ON_ONCE(!refcounted_page))
6861 		kvm_release_page_clean(refcounted_page);
6862 
6863 	/*
6864 	 * No need for a manual TLB flush at this point, KVM has already done a
6865 	 * flush if there were SPTEs pointing at the previous page.
6866 	 */
6867 	read_unlock(&vcpu->kvm->mmu_lock);
6868 }
6869 
vmx_hwapic_isr_update(struct kvm_vcpu * vcpu,int max_isr)6870 void vmx_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr)
6871 {
6872 	u16 status;
6873 	u8 old;
6874 
6875 	/*
6876 	 * If L2 is active, defer the SVI update until vmcs01 is loaded, as SVI
6877 	 * is only relevant for if and only if Virtual Interrupt Delivery is
6878 	 * enabled in vmcs12, and if VID is enabled then L2 EOIs affect L2's
6879 	 * vAPIC, not L1's vAPIC.  KVM must update vmcs01 on the next nested
6880 	 * VM-Exit, otherwise L1 with run with a stale SVI.
6881 	 */
6882 	if (is_guest_mode(vcpu)) {
6883 		/*
6884 		 * KVM is supposed to forward intercepted L2 EOIs to L1 if VID
6885 		 * is enabled in vmcs12; as above, the EOIs affect L2's vAPIC.
6886 		 * Note, userspace can stuff state while L2 is active; assert
6887 		 * that VID is disabled if and only if the vCPU is in KVM_RUN
6888 		 * to avoid false positives if userspace is setting APIC state.
6889 		 */
6890 		WARN_ON_ONCE(vcpu->wants_to_run &&
6891 			     nested_cpu_has_vid(get_vmcs12(vcpu)));
6892 		to_vmx(vcpu)->nested.update_vmcs01_hwapic_isr = true;
6893 		return;
6894 	}
6895 
6896 	if (max_isr == -1)
6897 		max_isr = 0;
6898 
6899 	status = vmcs_read16(GUEST_INTR_STATUS);
6900 	old = status >> 8;
6901 	if (max_isr != old) {
6902 		status &= 0xff;
6903 		status |= max_isr << 8;
6904 		vmcs_write16(GUEST_INTR_STATUS, status);
6905 	}
6906 }
6907 
vmx_set_rvi(int vector)6908 static void vmx_set_rvi(int vector)
6909 {
6910 	u16 status;
6911 	u8 old;
6912 
6913 	if (vector == -1)
6914 		vector = 0;
6915 
6916 	status = vmcs_read16(GUEST_INTR_STATUS);
6917 	old = (u8)status & 0xff;
6918 	if ((u8)vector != old) {
6919 		status &= ~0xff;
6920 		status |= (u8)vector;
6921 		vmcs_write16(GUEST_INTR_STATUS, status);
6922 	}
6923 }
6924 
vmx_sync_pir_to_irr(struct kvm_vcpu * vcpu)6925 int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
6926 {
6927 	struct vcpu_vmx *vmx = to_vmx(vcpu);
6928 	int max_irr;
6929 	bool got_posted_interrupt;
6930 
6931 	if (KVM_BUG_ON(!enable_apicv, vcpu->kvm))
6932 		return -EIO;
6933 
6934 	if (pi_test_on(&vmx->pi_desc)) {
6935 		pi_clear_on(&vmx->pi_desc);
6936 		/*
6937 		 * IOMMU can write to PID.ON, so the barrier matters even on UP.
6938 		 * But on x86 this is just a compiler barrier anyway.
6939 		 */
6940 		smp_mb__after_atomic();
6941 		got_posted_interrupt =
6942 			kvm_apic_update_irr(vcpu, vmx->pi_desc.pir, &max_irr);
6943 	} else {
6944 		max_irr = kvm_lapic_find_highest_irr(vcpu);
6945 		got_posted_interrupt = false;
6946 	}
6947 
6948 	/*
6949 	 * Newly recognized interrupts are injected via either virtual interrupt
6950 	 * delivery (RVI) or KVM_REQ_EVENT.  Virtual interrupt delivery is
6951 	 * disabled in two cases:
6952 	 *
6953 	 * 1) If L2 is running and the vCPU has a new pending interrupt.  If L1
6954 	 * wants to exit on interrupts, KVM_REQ_EVENT is needed to synthesize a
6955 	 * VM-Exit to L1.  If L1 doesn't want to exit, the interrupt is injected
6956 	 * into L2, but KVM doesn't use virtual interrupt delivery to inject
6957 	 * interrupts into L2, and so KVM_REQ_EVENT is again needed.
6958 	 *
6959 	 * 2) If APICv is disabled for this vCPU, assigned devices may still
6960 	 * attempt to post interrupts.  The posted interrupt vector will cause
6961 	 * a VM-Exit and the subsequent entry will call sync_pir_to_irr.
6962 	 */
6963 	if (!is_guest_mode(vcpu) && kvm_vcpu_apicv_active(vcpu))
6964 		vmx_set_rvi(max_irr);
6965 	else if (got_posted_interrupt)
6966 		kvm_make_request(KVM_REQ_EVENT, vcpu);
6967 
6968 	return max_irr;
6969 }
6970 
vmx_load_eoi_exitmap(struct kvm_vcpu * vcpu,u64 * eoi_exit_bitmap)6971 void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
6972 {
6973 	if (!kvm_vcpu_apicv_active(vcpu))
6974 		return;
6975 
6976 	vmcs_write64(EOI_EXIT_BITMAP0, eoi_exit_bitmap[0]);
6977 	vmcs_write64(EOI_EXIT_BITMAP1, eoi_exit_bitmap[1]);
6978 	vmcs_write64(EOI_EXIT_BITMAP2, eoi_exit_bitmap[2]);
6979 	vmcs_write64(EOI_EXIT_BITMAP3, eoi_exit_bitmap[3]);
6980 }
6981 
vmx_apicv_pre_state_restore(struct kvm_vcpu * vcpu)6982 void vmx_apicv_pre_state_restore(struct kvm_vcpu *vcpu)
6983 {
6984 	struct vcpu_vmx *vmx = to_vmx(vcpu);
6985 
6986 	pi_clear_on(&vmx->pi_desc);
6987 	memset(vmx->pi_desc.pir, 0, sizeof(vmx->pi_desc.pir));
6988 }
6989 
6990 void vmx_do_interrupt_irqoff(unsigned long entry);
6991 void vmx_do_nmi_irqoff(void);
6992 
handle_nm_fault_irqoff(struct kvm_vcpu * vcpu)6993 static void handle_nm_fault_irqoff(struct kvm_vcpu *vcpu)
6994 {
6995 	/*
6996 	 * Save xfd_err to guest_fpu before interrupt is enabled, so the
6997 	 * MSR value is not clobbered by the host activity before the guest
6998 	 * has chance to consume it.
6999 	 *
7000 	 * Do not blindly read xfd_err here, since this exception might
7001 	 * be caused by L1 interception on a platform which doesn't
7002 	 * support xfd at all.
7003 	 *
7004 	 * Do it conditionally upon guest_fpu::xfd. xfd_err matters
7005 	 * only when xfd contains a non-zero value.
7006 	 *
7007 	 * Queuing exception is done in vmx_handle_exit. See comment there.
7008 	 */
7009 	if (vcpu->arch.guest_fpu.fpstate->xfd)
7010 		rdmsrl(MSR_IA32_XFD_ERR, vcpu->arch.guest_fpu.xfd_err);
7011 }
7012 
handle_exception_irqoff(struct kvm_vcpu * vcpu,u32 intr_info)7013 static void handle_exception_irqoff(struct kvm_vcpu *vcpu, u32 intr_info)
7014 {
7015 	/* if exit due to PF check for async PF */
7016 	if (is_page_fault(intr_info))
7017 		vcpu->arch.apf.host_apf_flags = kvm_read_and_reset_apf_flags();
7018 	/* if exit due to NM, handle before interrupts are enabled */
7019 	else if (is_nm_fault(intr_info))
7020 		handle_nm_fault_irqoff(vcpu);
7021 	/* Handle machine checks before interrupts are enabled */
7022 	else if (is_machine_check(intr_info))
7023 		kvm_machine_check();
7024 }
7025 
handle_external_interrupt_irqoff(struct kvm_vcpu * vcpu,u32 intr_info)7026 static void handle_external_interrupt_irqoff(struct kvm_vcpu *vcpu,
7027 					     u32 intr_info)
7028 {
7029 	unsigned int vector = intr_info & INTR_INFO_VECTOR_MASK;
7030 
7031 	if (KVM_BUG(!is_external_intr(intr_info), vcpu->kvm,
7032 	    "unexpected VM-Exit interrupt info: 0x%x", intr_info))
7033 		return;
7034 
7035 	kvm_before_interrupt(vcpu, KVM_HANDLING_IRQ);
7036 	if (cpu_feature_enabled(X86_FEATURE_FRED))
7037 		fred_entry_from_kvm(EVENT_TYPE_EXTINT, vector);
7038 	else
7039 		vmx_do_interrupt_irqoff(gate_offset((gate_desc *)host_idt_base + vector));
7040 	kvm_after_interrupt(vcpu);
7041 
7042 	vcpu->arch.at_instruction_boundary = true;
7043 }
7044 
vmx_handle_exit_irqoff(struct kvm_vcpu * vcpu)7045 void vmx_handle_exit_irqoff(struct kvm_vcpu *vcpu)
7046 {
7047 	struct vcpu_vmx *vmx = to_vmx(vcpu);
7048 
7049 	if (vmx->emulation_required)
7050 		return;
7051 
7052 	if (vmx->exit_reason.basic == EXIT_REASON_EXTERNAL_INTERRUPT)
7053 		handle_external_interrupt_irqoff(vcpu, vmx_get_intr_info(vcpu));
7054 	else if (vmx->exit_reason.basic == EXIT_REASON_EXCEPTION_NMI)
7055 		handle_exception_irqoff(vcpu, vmx_get_intr_info(vcpu));
7056 }
7057 
7058 /*
7059  * The kvm parameter can be NULL (module initialization, or invocation before
7060  * VM creation). Be sure to check the kvm parameter before using it.
7061  */
vmx_has_emulated_msr(struct kvm * kvm,u32 index)7062 bool vmx_has_emulated_msr(struct kvm *kvm, u32 index)
7063 {
7064 	switch (index) {
7065 	case MSR_IA32_SMBASE:
7066 		if (!IS_ENABLED(CONFIG_KVM_SMM))
7067 			return false;
7068 		/*
7069 		 * We cannot do SMM unless we can run the guest in big
7070 		 * real mode.
7071 		 */
7072 		return enable_unrestricted_guest || emulate_invalid_guest_state;
7073 	case KVM_FIRST_EMULATED_VMX_MSR ... KVM_LAST_EMULATED_VMX_MSR:
7074 		return nested;
7075 	case MSR_AMD64_VIRT_SPEC_CTRL:
7076 	case MSR_AMD64_TSC_RATIO:
7077 		/* This is AMD only.  */
7078 		return false;
7079 	default:
7080 		return true;
7081 	}
7082 }
7083 
vmx_recover_nmi_blocking(struct vcpu_vmx * vmx)7084 static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx)
7085 {
7086 	u32 exit_intr_info;
7087 	bool unblock_nmi;
7088 	u8 vector;
7089 	bool idtv_info_valid;
7090 
7091 	idtv_info_valid = vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK;
7092 
7093 	if (enable_vnmi) {
7094 		if (vmx->loaded_vmcs->nmi_known_unmasked)
7095 			return;
7096 
7097 		exit_intr_info = vmx_get_intr_info(&vmx->vcpu);
7098 		unblock_nmi = (exit_intr_info & INTR_INFO_UNBLOCK_NMI) != 0;
7099 		vector = exit_intr_info & INTR_INFO_VECTOR_MASK;
7100 		/*
7101 		 * SDM 3: 27.7.1.2 (September 2008)
7102 		 * Re-set bit "block by NMI" before VM entry if vmexit caused by
7103 		 * a guest IRET fault.
7104 		 * SDM 3: 23.2.2 (September 2008)
7105 		 * Bit 12 is undefined in any of the following cases:
7106 		 *  If the VM exit sets the valid bit in the IDT-vectoring
7107 		 *   information field.
7108 		 *  If the VM exit is due to a double fault.
7109 		 */
7110 		if ((exit_intr_info & INTR_INFO_VALID_MASK) && unblock_nmi &&
7111 		    vector != DF_VECTOR && !idtv_info_valid)
7112 			vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
7113 				      GUEST_INTR_STATE_NMI);
7114 		else
7115 			vmx->loaded_vmcs->nmi_known_unmasked =
7116 				!(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO)
7117 				  & GUEST_INTR_STATE_NMI);
7118 	} else if (unlikely(vmx->loaded_vmcs->soft_vnmi_blocked))
7119 		vmx->loaded_vmcs->vnmi_blocked_time +=
7120 			ktime_to_ns(ktime_sub(ktime_get(),
7121 					      vmx->loaded_vmcs->entry_time));
7122 }
7123 
__vmx_complete_interrupts(struct kvm_vcpu * vcpu,u32 idt_vectoring_info,int instr_len_field,int error_code_field)7124 static void __vmx_complete_interrupts(struct kvm_vcpu *vcpu,
7125 				      u32 idt_vectoring_info,
7126 				      int instr_len_field,
7127 				      int error_code_field)
7128 {
7129 	u8 vector;
7130 	int type;
7131 	bool idtv_info_valid;
7132 
7133 	idtv_info_valid = idt_vectoring_info & VECTORING_INFO_VALID_MASK;
7134 
7135 	vcpu->arch.nmi_injected = false;
7136 	kvm_clear_exception_queue(vcpu);
7137 	kvm_clear_interrupt_queue(vcpu);
7138 
7139 	if (!idtv_info_valid)
7140 		return;
7141 
7142 	kvm_make_request(KVM_REQ_EVENT, vcpu);
7143 
7144 	vector = idt_vectoring_info & VECTORING_INFO_VECTOR_MASK;
7145 	type = idt_vectoring_info & VECTORING_INFO_TYPE_MASK;
7146 
7147 	switch (type) {
7148 	case INTR_TYPE_NMI_INTR:
7149 		vcpu->arch.nmi_injected = true;
7150 		/*
7151 		 * SDM 3: 27.7.1.2 (September 2008)
7152 		 * Clear bit "block by NMI" before VM entry if a NMI
7153 		 * delivery faulted.
7154 		 */
7155 		vmx_set_nmi_mask(vcpu, false);
7156 		break;
7157 	case INTR_TYPE_SOFT_EXCEPTION:
7158 		vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field);
7159 		fallthrough;
7160 	case INTR_TYPE_HARD_EXCEPTION:
7161 		if (idt_vectoring_info & VECTORING_INFO_DELIVER_CODE_MASK) {
7162 			u32 err = vmcs_read32(error_code_field);
7163 			kvm_requeue_exception_e(vcpu, vector, err);
7164 		} else
7165 			kvm_requeue_exception(vcpu, vector);
7166 		break;
7167 	case INTR_TYPE_SOFT_INTR:
7168 		vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field);
7169 		fallthrough;
7170 	case INTR_TYPE_EXT_INTR:
7171 		kvm_queue_interrupt(vcpu, vector, type == INTR_TYPE_SOFT_INTR);
7172 		break;
7173 	default:
7174 		break;
7175 	}
7176 }
7177 
vmx_complete_interrupts(struct vcpu_vmx * vmx)7178 static void vmx_complete_interrupts(struct vcpu_vmx *vmx)
7179 {
7180 	__vmx_complete_interrupts(&vmx->vcpu, vmx->idt_vectoring_info,
7181 				  VM_EXIT_INSTRUCTION_LEN,
7182 				  IDT_VECTORING_ERROR_CODE);
7183 }
7184 
vmx_cancel_injection(struct kvm_vcpu * vcpu)7185 void vmx_cancel_injection(struct kvm_vcpu *vcpu)
7186 {
7187 	__vmx_complete_interrupts(vcpu,
7188 				  vmcs_read32(VM_ENTRY_INTR_INFO_FIELD),
7189 				  VM_ENTRY_INSTRUCTION_LEN,
7190 				  VM_ENTRY_EXCEPTION_ERROR_CODE);
7191 
7192 	vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);
7193 }
7194 
atomic_switch_perf_msrs(struct vcpu_vmx * vmx)7195 static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx)
7196 {
7197 	int i, nr_msrs;
7198 	struct perf_guest_switch_msr *msrs;
7199 	struct kvm_pmu *pmu = vcpu_to_pmu(&vmx->vcpu);
7200 
7201 	pmu->host_cross_mapped_mask = 0;
7202 	if (pmu->pebs_enable & pmu->global_ctrl)
7203 		intel_pmu_cross_mapped_check(pmu);
7204 
7205 	/* Note, nr_msrs may be garbage if perf_guest_get_msrs() returns NULL. */
7206 	msrs = perf_guest_get_msrs(&nr_msrs, (void *)pmu);
7207 	if (!msrs)
7208 		return;
7209 
7210 	for (i = 0; i < nr_msrs; i++)
7211 		if (msrs[i].host == msrs[i].guest)
7212 			clear_atomic_switch_msr(vmx, msrs[i].msr);
7213 		else
7214 			add_atomic_switch_msr(vmx, msrs[i].msr, msrs[i].guest,
7215 					msrs[i].host, false);
7216 }
7217 
vmx_update_hv_timer(struct kvm_vcpu * vcpu,bool force_immediate_exit)7218 static void vmx_update_hv_timer(struct kvm_vcpu *vcpu, bool force_immediate_exit)
7219 {
7220 	struct vcpu_vmx *vmx = to_vmx(vcpu);
7221 	u64 tscl;
7222 	u32 delta_tsc;
7223 
7224 	if (force_immediate_exit) {
7225 		vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, 0);
7226 		vmx->loaded_vmcs->hv_timer_soft_disabled = false;
7227 	} else if (vmx->hv_deadline_tsc != -1) {
7228 		tscl = rdtsc();
7229 		if (vmx->hv_deadline_tsc > tscl)
7230 			/* set_hv_timer ensures the delta fits in 32-bits */
7231 			delta_tsc = (u32)((vmx->hv_deadline_tsc - tscl) >>
7232 				cpu_preemption_timer_multi);
7233 		else
7234 			delta_tsc = 0;
7235 
7236 		vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, delta_tsc);
7237 		vmx->loaded_vmcs->hv_timer_soft_disabled = false;
7238 	} else if (!vmx->loaded_vmcs->hv_timer_soft_disabled) {
7239 		vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, -1);
7240 		vmx->loaded_vmcs->hv_timer_soft_disabled = true;
7241 	}
7242 }
7243 
vmx_update_host_rsp(struct vcpu_vmx * vmx,unsigned long host_rsp)7244 void noinstr vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp)
7245 {
7246 	if (unlikely(host_rsp != vmx->loaded_vmcs->host_state.rsp)) {
7247 		vmx->loaded_vmcs->host_state.rsp = host_rsp;
7248 		vmcs_writel(HOST_RSP, host_rsp);
7249 	}
7250 }
7251 
vmx_spec_ctrl_restore_host(struct vcpu_vmx * vmx,unsigned int flags)7252 void noinstr vmx_spec_ctrl_restore_host(struct vcpu_vmx *vmx,
7253 					unsigned int flags)
7254 {
7255 	u64 hostval = this_cpu_read(x86_spec_ctrl_current);
7256 
7257 	if (!cpu_feature_enabled(X86_FEATURE_MSR_SPEC_CTRL))
7258 		return;
7259 
7260 	if (flags & VMX_RUN_SAVE_SPEC_CTRL)
7261 		vmx->spec_ctrl = __rdmsr(MSR_IA32_SPEC_CTRL);
7262 
7263 	/*
7264 	 * If the guest/host SPEC_CTRL values differ, restore the host value.
7265 	 *
7266 	 * For legacy IBRS, the IBRS bit always needs to be written after
7267 	 * transitioning from a less privileged predictor mode, regardless of
7268 	 * whether the guest/host values differ.
7269 	 */
7270 	if (cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS) ||
7271 	    vmx->spec_ctrl != hostval)
7272 		native_wrmsrl(MSR_IA32_SPEC_CTRL, hostval);
7273 
7274 	barrier_nospec();
7275 }
7276 
vmx_exit_handlers_fastpath(struct kvm_vcpu * vcpu,bool force_immediate_exit)7277 static fastpath_t vmx_exit_handlers_fastpath(struct kvm_vcpu *vcpu,
7278 					     bool force_immediate_exit)
7279 {
7280 	/*
7281 	 * If L2 is active, some VMX preemption timer exits can be handled in
7282 	 * the fastpath even, all other exits must use the slow path.
7283 	 */
7284 	if (is_guest_mode(vcpu) &&
7285 	    to_vmx(vcpu)->exit_reason.basic != EXIT_REASON_PREEMPTION_TIMER)
7286 		return EXIT_FASTPATH_NONE;
7287 
7288 	switch (to_vmx(vcpu)->exit_reason.basic) {
7289 	case EXIT_REASON_MSR_WRITE:
7290 		return handle_fastpath_set_msr_irqoff(vcpu);
7291 	case EXIT_REASON_PREEMPTION_TIMER:
7292 		return handle_fastpath_preemption_timer(vcpu, force_immediate_exit);
7293 	case EXIT_REASON_HLT:
7294 		return handle_fastpath_hlt(vcpu);
7295 	default:
7296 		return EXIT_FASTPATH_NONE;
7297 	}
7298 }
7299 
vmx_vcpu_enter_exit(struct kvm_vcpu * vcpu,unsigned int flags)7300 static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
7301 					unsigned int flags)
7302 {
7303 	struct vcpu_vmx *vmx = to_vmx(vcpu);
7304 
7305 	guest_state_enter_irqoff();
7306 
7307 	/*
7308 	 * L1D Flush includes CPU buffer clear to mitigate MDS, but VERW
7309 	 * mitigation for MDS is done late in VMentry and is still
7310 	 * executed in spite of L1D Flush. This is because an extra VERW
7311 	 * should not matter much after the big hammer L1D Flush.
7312 	 */
7313 	if (static_branch_unlikely(&vmx_l1d_should_flush))
7314 		vmx_l1d_flush(vcpu);
7315 	else if (static_branch_unlikely(&mmio_stale_data_clear) &&
7316 		 kvm_arch_has_assigned_device(vcpu->kvm))
7317 		mds_clear_cpu_buffers();
7318 
7319 	vmx_disable_fb_clear(vmx);
7320 
7321 	if (vcpu->arch.cr2 != native_read_cr2())
7322 		native_write_cr2(vcpu->arch.cr2);
7323 
7324 	vmx->fail = __vmx_vcpu_run(vmx, (unsigned long *)&vcpu->arch.regs,
7325 				   flags);
7326 
7327 	vcpu->arch.cr2 = native_read_cr2();
7328 	vcpu->arch.regs_avail &= ~VMX_REGS_LAZY_LOAD_SET;
7329 
7330 	vmx->idt_vectoring_info = 0;
7331 
7332 	vmx_enable_fb_clear(vmx);
7333 
7334 	if (unlikely(vmx->fail)) {
7335 		vmx->exit_reason.full = 0xdead;
7336 		goto out;
7337 	}
7338 
7339 	vmx->exit_reason.full = vmcs_read32(VM_EXIT_REASON);
7340 	if (likely(!vmx->exit_reason.failed_vmentry))
7341 		vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
7342 
7343 	if ((u16)vmx->exit_reason.basic == EXIT_REASON_EXCEPTION_NMI &&
7344 	    is_nmi(vmx_get_intr_info(vcpu))) {
7345 		kvm_before_interrupt(vcpu, KVM_HANDLING_NMI);
7346 		if (cpu_feature_enabled(X86_FEATURE_FRED))
7347 			fred_entry_from_kvm(EVENT_TYPE_NMI, NMI_VECTOR);
7348 		else
7349 			vmx_do_nmi_irqoff();
7350 		kvm_after_interrupt(vcpu);
7351 	}
7352 
7353 out:
7354 	guest_state_exit_irqoff();
7355 }
7356 
vmx_vcpu_run(struct kvm_vcpu * vcpu,bool force_immediate_exit)7357 fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediate_exit)
7358 {
7359 	struct vcpu_vmx *vmx = to_vmx(vcpu);
7360 	unsigned long cr3, cr4;
7361 
7362 	/* Record the guest's net vcpu time for enforced NMI injections. */
7363 	if (unlikely(!enable_vnmi &&
7364 		     vmx->loaded_vmcs->soft_vnmi_blocked))
7365 		vmx->loaded_vmcs->entry_time = ktime_get();
7366 
7367 	/*
7368 	 * Don't enter VMX if guest state is invalid, let the exit handler
7369 	 * start emulation until we arrive back to a valid state.  Synthesize a
7370 	 * consistency check VM-Exit due to invalid guest state and bail.
7371 	 */
7372 	if (unlikely(vmx->emulation_required)) {
7373 		vmx->fail = 0;
7374 
7375 		vmx->exit_reason.full = EXIT_REASON_INVALID_STATE;
7376 		vmx->exit_reason.failed_vmentry = 1;
7377 		kvm_register_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_1);
7378 		vmx->exit_qualification = ENTRY_FAIL_DEFAULT;
7379 		kvm_register_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_2);
7380 		vmx->exit_intr_info = 0;
7381 		return EXIT_FASTPATH_NONE;
7382 	}
7383 
7384 	trace_kvm_entry(vcpu, force_immediate_exit);
7385 
7386 	if (vmx->ple_window_dirty) {
7387 		vmx->ple_window_dirty = false;
7388 		vmcs_write32(PLE_WINDOW, vmx->ple_window);
7389 	}
7390 
7391 	/*
7392 	 * We did this in prepare_switch_to_guest, because it needs to
7393 	 * be within srcu_read_lock.
7394 	 */
7395 	WARN_ON_ONCE(vmx->nested.need_vmcs12_to_shadow_sync);
7396 
7397 	if (kvm_register_is_dirty(vcpu, VCPU_REGS_RSP))
7398 		vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]);
7399 	if (kvm_register_is_dirty(vcpu, VCPU_REGS_RIP))
7400 		vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
7401 	vcpu->arch.regs_dirty = 0;
7402 
7403 	/*
7404 	 * Refresh vmcs.HOST_CR3 if necessary.  This must be done immediately
7405 	 * prior to VM-Enter, as the kernel may load a new ASID (PCID) any time
7406 	 * it switches back to the current->mm, which can occur in KVM context
7407 	 * when switching to a temporary mm to patch kernel code, e.g. if KVM
7408 	 * toggles a static key while handling a VM-Exit.
7409 	 */
7410 	cr3 = __get_current_cr3_fast();
7411 	if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) {
7412 		vmcs_writel(HOST_CR3, cr3);
7413 		vmx->loaded_vmcs->host_state.cr3 = cr3;
7414 	}
7415 
7416 	cr4 = cr4_read_shadow();
7417 	if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) {
7418 		vmcs_writel(HOST_CR4, cr4);
7419 		vmx->loaded_vmcs->host_state.cr4 = cr4;
7420 	}
7421 
7422 	/* When single-stepping over STI and MOV SS, we must clear the
7423 	 * corresponding interruptibility bits in the guest state. Otherwise
7424 	 * vmentry fails as it then expects bit 14 (BS) in pending debug
7425 	 * exceptions being set, but that's not correct for the guest debugging
7426 	 * case. */
7427 	if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
7428 		vmx_set_interrupt_shadow(vcpu, 0);
7429 
7430 	kvm_load_guest_xsave_state(vcpu);
7431 
7432 	pt_guest_enter(vmx);
7433 
7434 	atomic_switch_perf_msrs(vmx);
7435 	if (intel_pmu_lbr_is_enabled(vcpu))
7436 		vmx_passthrough_lbr_msrs(vcpu);
7437 
7438 	if (enable_preemption_timer)
7439 		vmx_update_hv_timer(vcpu, force_immediate_exit);
7440 	else if (force_immediate_exit)
7441 		smp_send_reschedule(vcpu->cpu);
7442 
7443 	kvm_wait_lapic_expire(vcpu);
7444 
7445 	/* The actual VMENTER/EXIT is in the .noinstr.text section. */
7446 	vmx_vcpu_enter_exit(vcpu, __vmx_vcpu_run_flags(vmx));
7447 
7448 	/* All fields are clean at this point */
7449 	if (kvm_is_using_evmcs()) {
7450 		current_evmcs->hv_clean_fields |=
7451 			HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL;
7452 
7453 		current_evmcs->hv_vp_id = kvm_hv_get_vpindex(vcpu);
7454 	}
7455 
7456 	/* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */
7457 	if (vcpu->arch.host_debugctl)
7458 		update_debugctlmsr(vcpu->arch.host_debugctl);
7459 
7460 #ifndef CONFIG_X86_64
7461 	/*
7462 	 * The sysexit path does not restore ds/es, so we must set them to
7463 	 * a reasonable value ourselves.
7464 	 *
7465 	 * We can't defer this to vmx_prepare_switch_to_host() since that
7466 	 * function may be executed in interrupt context, which saves and
7467 	 * restore segments around it, nullifying its effect.
7468 	 */
7469 	loadsegment(ds, __USER_DS);
7470 	loadsegment(es, __USER_DS);
7471 #endif
7472 
7473 	pt_guest_exit(vmx);
7474 
7475 	kvm_load_host_xsave_state(vcpu);
7476 
7477 	if (is_guest_mode(vcpu)) {
7478 		/*
7479 		 * Track VMLAUNCH/VMRESUME that have made past guest state
7480 		 * checking.
7481 		 */
7482 		if (vmx->nested.nested_run_pending &&
7483 		    !vmx->exit_reason.failed_vmentry)
7484 			++vcpu->stat.nested_run;
7485 
7486 		vmx->nested.nested_run_pending = 0;
7487 	}
7488 
7489 	if (unlikely(vmx->fail))
7490 		return EXIT_FASTPATH_NONE;
7491 
7492 	if (unlikely((u16)vmx->exit_reason.basic == EXIT_REASON_MCE_DURING_VMENTRY))
7493 		kvm_machine_check();
7494 
7495 	trace_kvm_exit(vcpu, KVM_ISA_VMX);
7496 
7497 	if (unlikely(vmx->exit_reason.failed_vmentry))
7498 		return EXIT_FASTPATH_NONE;
7499 
7500 	vmx->loaded_vmcs->launched = 1;
7501 
7502 	vmx_recover_nmi_blocking(vmx);
7503 	vmx_complete_interrupts(vmx);
7504 
7505 	return vmx_exit_handlers_fastpath(vcpu, force_immediate_exit);
7506 }
7507 
vmx_vcpu_free(struct kvm_vcpu * vcpu)7508 void vmx_vcpu_free(struct kvm_vcpu *vcpu)
7509 {
7510 	struct vcpu_vmx *vmx = to_vmx(vcpu);
7511 
7512 	if (enable_pml)
7513 		vmx_destroy_pml_buffer(vmx);
7514 	free_vpid(vmx->vpid);
7515 	nested_vmx_free_vcpu(vcpu);
7516 	free_loaded_vmcs(vmx->loaded_vmcs);
7517 	free_page((unsigned long)vmx->ve_info);
7518 }
7519 
vmx_vcpu_create(struct kvm_vcpu * vcpu)7520 int vmx_vcpu_create(struct kvm_vcpu *vcpu)
7521 {
7522 	struct vmx_uret_msr *tsx_ctrl;
7523 	struct vcpu_vmx *vmx;
7524 	int i, err;
7525 
7526 	BUILD_BUG_ON(offsetof(struct vcpu_vmx, vcpu) != 0);
7527 	vmx = to_vmx(vcpu);
7528 
7529 	INIT_LIST_HEAD(&vmx->pi_wakeup_list);
7530 
7531 	err = -ENOMEM;
7532 
7533 	vmx->vpid = allocate_vpid();
7534 
7535 	/*
7536 	 * If PML is turned on, failure on enabling PML just results in failure
7537 	 * of creating the vcpu, therefore we can simplify PML logic (by
7538 	 * avoiding dealing with cases, such as enabling PML partially on vcpus
7539 	 * for the guest), etc.
7540 	 */
7541 	if (enable_pml) {
7542 		vmx->pml_pg = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
7543 		if (!vmx->pml_pg)
7544 			goto free_vpid;
7545 	}
7546 
7547 	for (i = 0; i < kvm_nr_uret_msrs; ++i)
7548 		vmx->guest_uret_msrs[i].mask = -1ull;
7549 	if (boot_cpu_has(X86_FEATURE_RTM)) {
7550 		/*
7551 		 * TSX_CTRL_CPUID_CLEAR is handled in the CPUID interception.
7552 		 * Keep the host value unchanged to avoid changing CPUID bits
7553 		 * under the host kernel's feet.
7554 		 */
7555 		tsx_ctrl = vmx_find_uret_msr(vmx, MSR_IA32_TSX_CTRL);
7556 		if (tsx_ctrl)
7557 			tsx_ctrl->mask = ~(u64)TSX_CTRL_CPUID_CLEAR;
7558 	}
7559 
7560 	err = alloc_loaded_vmcs(&vmx->vmcs01);
7561 	if (err < 0)
7562 		goto free_pml;
7563 
7564 	/*
7565 	 * Use Hyper-V 'Enlightened MSR Bitmap' feature when KVM runs as a
7566 	 * nested (L1) hypervisor and Hyper-V in L0 supports it. Enable the
7567 	 * feature only for vmcs01, KVM currently isn't equipped to realize any
7568 	 * performance benefits from enabling it for vmcs02.
7569 	 */
7570 	if (kvm_is_using_evmcs() &&
7571 	    (ms_hyperv.nested_features & HV_X64_NESTED_MSR_BITMAP)) {
7572 		struct hv_enlightened_vmcs *evmcs = (void *)vmx->vmcs01.vmcs;
7573 
7574 		evmcs->hv_enlightenments_control.msr_bitmap = 1;
7575 	}
7576 
7577 	/* The MSR bitmap starts with all ones */
7578 	bitmap_fill(vmx->shadow_msr_intercept.read, MAX_POSSIBLE_PASSTHROUGH_MSRS);
7579 	bitmap_fill(vmx->shadow_msr_intercept.write, MAX_POSSIBLE_PASSTHROUGH_MSRS);
7580 
7581 	vmx_disable_intercept_for_msr(vcpu, MSR_IA32_TSC, MSR_TYPE_R);
7582 #ifdef CONFIG_X86_64
7583 	vmx_disable_intercept_for_msr(vcpu, MSR_FS_BASE, MSR_TYPE_RW);
7584 	vmx_disable_intercept_for_msr(vcpu, MSR_GS_BASE, MSR_TYPE_RW);
7585 	vmx_disable_intercept_for_msr(vcpu, MSR_KERNEL_GS_BASE, MSR_TYPE_RW);
7586 #endif
7587 	vmx_disable_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_CS, MSR_TYPE_RW);
7588 	vmx_disable_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_ESP, MSR_TYPE_RW);
7589 	vmx_disable_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_EIP, MSR_TYPE_RW);
7590 	if (kvm_cstate_in_guest(vcpu->kvm)) {
7591 		vmx_disable_intercept_for_msr(vcpu, MSR_CORE_C1_RES, MSR_TYPE_R);
7592 		vmx_disable_intercept_for_msr(vcpu, MSR_CORE_C3_RESIDENCY, MSR_TYPE_R);
7593 		vmx_disable_intercept_for_msr(vcpu, MSR_CORE_C6_RESIDENCY, MSR_TYPE_R);
7594 		vmx_disable_intercept_for_msr(vcpu, MSR_CORE_C7_RESIDENCY, MSR_TYPE_R);
7595 	}
7596 
7597 	vmx->loaded_vmcs = &vmx->vmcs01;
7598 
7599 	if (cpu_need_virtualize_apic_accesses(vcpu)) {
7600 		err = kvm_alloc_apic_access_page(vcpu->kvm);
7601 		if (err)
7602 			goto free_vmcs;
7603 	}
7604 
7605 	if (enable_ept && !enable_unrestricted_guest) {
7606 		err = init_rmode_identity_map(vcpu->kvm);
7607 		if (err)
7608 			goto free_vmcs;
7609 	}
7610 
7611 	err = -ENOMEM;
7612 	if (vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_EPT_VIOLATION_VE) {
7613 		struct page *page;
7614 
7615 		BUILD_BUG_ON(sizeof(*vmx->ve_info) > PAGE_SIZE);
7616 
7617 		/* ve_info must be page aligned. */
7618 		page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
7619 		if (!page)
7620 			goto free_vmcs;
7621 
7622 		vmx->ve_info = page_to_virt(page);
7623 	}
7624 
7625 	if (vmx_can_use_ipiv(vcpu))
7626 		WRITE_ONCE(to_kvm_vmx(vcpu->kvm)->pid_table[vcpu->vcpu_id],
7627 			   __pa(&vmx->pi_desc) | PID_TABLE_ENTRY_VALID);
7628 
7629 	return 0;
7630 
7631 free_vmcs:
7632 	free_loaded_vmcs(vmx->loaded_vmcs);
7633 free_pml:
7634 	vmx_destroy_pml_buffer(vmx);
7635 free_vpid:
7636 	free_vpid(vmx->vpid);
7637 	return err;
7638 }
7639 
7640 #define L1TF_MSG_SMT "L1TF CPU bug present and SMT on, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html for details.\n"
7641 #define L1TF_MSG_L1D "L1TF CPU bug present and virtualization mitigation disabled, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html for details.\n"
7642 
vmx_vm_init(struct kvm * kvm)7643 int vmx_vm_init(struct kvm *kvm)
7644 {
7645 	if (!ple_gap)
7646 		kvm->arch.pause_in_guest = true;
7647 
7648 	if (boot_cpu_has(X86_BUG_L1TF) && enable_ept) {
7649 		switch (l1tf_mitigation) {
7650 		case L1TF_MITIGATION_OFF:
7651 		case L1TF_MITIGATION_FLUSH_NOWARN:
7652 			/* 'I explicitly don't care' is set */
7653 			break;
7654 		case L1TF_MITIGATION_FLUSH:
7655 		case L1TF_MITIGATION_FLUSH_NOSMT:
7656 		case L1TF_MITIGATION_FULL:
7657 			/*
7658 			 * Warn upon starting the first VM in a potentially
7659 			 * insecure environment.
7660 			 */
7661 			if (sched_smt_active())
7662 				pr_warn_once(L1TF_MSG_SMT);
7663 			if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER)
7664 				pr_warn_once(L1TF_MSG_L1D);
7665 			break;
7666 		case L1TF_MITIGATION_FULL_FORCE:
7667 			/* Flush is enforced */
7668 			break;
7669 		}
7670 	}
7671 	return 0;
7672 }
7673 
vmx_get_mt_mask(struct kvm_vcpu * vcpu,gfn_t gfn,bool is_mmio)7674 u8 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
7675 {
7676 	/*
7677 	 * Force UC for host MMIO regions, as allowing the guest to access MMIO
7678 	 * with cacheable accesses will result in Machine Checks.
7679 	 */
7680 	if (is_mmio)
7681 		return MTRR_TYPE_UNCACHABLE << VMX_EPT_MT_EPTE_SHIFT;
7682 
7683 	/*
7684 	 * Force WB and ignore guest PAT if the VM does NOT have a non-coherent
7685 	 * device attached.  Letting the guest control memory types on Intel
7686 	 * CPUs may result in unexpected behavior, and so KVM's ABI is to trust
7687 	 * the guest to behave only as a last resort.
7688 	 */
7689 	if (!kvm_arch_has_noncoherent_dma(vcpu->kvm))
7690 		return (MTRR_TYPE_WRBACK << VMX_EPT_MT_EPTE_SHIFT) | VMX_EPT_IPAT_BIT;
7691 
7692 	return (MTRR_TYPE_WRBACK << VMX_EPT_MT_EPTE_SHIFT);
7693 }
7694 
vmcs_set_secondary_exec_control(struct vcpu_vmx * vmx,u32 new_ctl)7695 static void vmcs_set_secondary_exec_control(struct vcpu_vmx *vmx, u32 new_ctl)
7696 {
7697 	/*
7698 	 * These bits in the secondary execution controls field
7699 	 * are dynamic, the others are mostly based on the hypervisor
7700 	 * architecture and the guest's CPUID.  Do not touch the
7701 	 * dynamic bits.
7702 	 */
7703 	u32 mask =
7704 		SECONDARY_EXEC_SHADOW_VMCS |
7705 		SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
7706 		SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
7707 		SECONDARY_EXEC_DESC;
7708 
7709 	u32 cur_ctl = secondary_exec_controls_get(vmx);
7710 
7711 	secondary_exec_controls_set(vmx, (new_ctl & ~mask) | (cur_ctl & mask));
7712 }
7713 
7714 /*
7715  * Generate MSR_IA32_VMX_CR{0,4}_FIXED1 according to CPUID. Only set bits
7716  * (indicating "allowed-1") if they are supported in the guest's CPUID.
7717  */
nested_vmx_cr_fixed1_bits_update(struct kvm_vcpu * vcpu)7718 static void nested_vmx_cr_fixed1_bits_update(struct kvm_vcpu *vcpu)
7719 {
7720 	struct vcpu_vmx *vmx = to_vmx(vcpu);
7721 	struct kvm_cpuid_entry2 *entry;
7722 
7723 	vmx->nested.msrs.cr0_fixed1 = 0xffffffff;
7724 	vmx->nested.msrs.cr4_fixed1 = X86_CR4_PCE;
7725 
7726 #define cr4_fixed1_update(_cr4_mask, _reg, _cpuid_mask) do {		\
7727 	if (entry && (entry->_reg & (_cpuid_mask)))			\
7728 		vmx->nested.msrs.cr4_fixed1 |= (_cr4_mask);	\
7729 } while (0)
7730 
7731 	entry = kvm_find_cpuid_entry(vcpu, 0x1);
7732 	cr4_fixed1_update(X86_CR4_VME,        edx, feature_bit(VME));
7733 	cr4_fixed1_update(X86_CR4_PVI,        edx, feature_bit(VME));
7734 	cr4_fixed1_update(X86_CR4_TSD,        edx, feature_bit(TSC));
7735 	cr4_fixed1_update(X86_CR4_DE,         edx, feature_bit(DE));
7736 	cr4_fixed1_update(X86_CR4_PSE,        edx, feature_bit(PSE));
7737 	cr4_fixed1_update(X86_CR4_PAE,        edx, feature_bit(PAE));
7738 	cr4_fixed1_update(X86_CR4_MCE,        edx, feature_bit(MCE));
7739 	cr4_fixed1_update(X86_CR4_PGE,        edx, feature_bit(PGE));
7740 	cr4_fixed1_update(X86_CR4_OSFXSR,     edx, feature_bit(FXSR));
7741 	cr4_fixed1_update(X86_CR4_OSXMMEXCPT, edx, feature_bit(XMM));
7742 	cr4_fixed1_update(X86_CR4_VMXE,       ecx, feature_bit(VMX));
7743 	cr4_fixed1_update(X86_CR4_SMXE,       ecx, feature_bit(SMX));
7744 	cr4_fixed1_update(X86_CR4_PCIDE,      ecx, feature_bit(PCID));
7745 	cr4_fixed1_update(X86_CR4_OSXSAVE,    ecx, feature_bit(XSAVE));
7746 
7747 	entry = kvm_find_cpuid_entry_index(vcpu, 0x7, 0);
7748 	cr4_fixed1_update(X86_CR4_FSGSBASE,   ebx, feature_bit(FSGSBASE));
7749 	cr4_fixed1_update(X86_CR4_SMEP,       ebx, feature_bit(SMEP));
7750 	cr4_fixed1_update(X86_CR4_SMAP,       ebx, feature_bit(SMAP));
7751 	cr4_fixed1_update(X86_CR4_PKE,        ecx, feature_bit(PKU));
7752 	cr4_fixed1_update(X86_CR4_UMIP,       ecx, feature_bit(UMIP));
7753 	cr4_fixed1_update(X86_CR4_LA57,       ecx, feature_bit(LA57));
7754 
7755 	entry = kvm_find_cpuid_entry_index(vcpu, 0x7, 1);
7756 	cr4_fixed1_update(X86_CR4_LAM_SUP,    eax, feature_bit(LAM));
7757 
7758 #undef cr4_fixed1_update
7759 }
7760 
update_intel_pt_cfg(struct kvm_vcpu * vcpu)7761 static void update_intel_pt_cfg(struct kvm_vcpu *vcpu)
7762 {
7763 	struct vcpu_vmx *vmx = to_vmx(vcpu);
7764 	struct kvm_cpuid_entry2 *best = NULL;
7765 	int i;
7766 
7767 	for (i = 0; i < PT_CPUID_LEAVES; i++) {
7768 		best = kvm_find_cpuid_entry_index(vcpu, 0x14, i);
7769 		if (!best)
7770 			return;
7771 		vmx->pt_desc.caps[CPUID_EAX + i*PT_CPUID_REGS_NUM] = best->eax;
7772 		vmx->pt_desc.caps[CPUID_EBX + i*PT_CPUID_REGS_NUM] = best->ebx;
7773 		vmx->pt_desc.caps[CPUID_ECX + i*PT_CPUID_REGS_NUM] = best->ecx;
7774 		vmx->pt_desc.caps[CPUID_EDX + i*PT_CPUID_REGS_NUM] = best->edx;
7775 	}
7776 
7777 	/* Get the number of configurable Address Ranges for filtering */
7778 	vmx->pt_desc.num_address_ranges = intel_pt_validate_cap(vmx->pt_desc.caps,
7779 						PT_CAP_num_address_ranges);
7780 
7781 	/* Initialize and clear the no dependency bits */
7782 	vmx->pt_desc.ctl_bitmask = ~(RTIT_CTL_TRACEEN | RTIT_CTL_OS |
7783 			RTIT_CTL_USR | RTIT_CTL_TSC_EN | RTIT_CTL_DISRETC |
7784 			RTIT_CTL_BRANCH_EN);
7785 
7786 	/*
7787 	 * If CPUID.(EAX=14H,ECX=0):EBX[0]=1 CR3Filter can be set otherwise
7788 	 * will inject an #GP
7789 	 */
7790 	if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_cr3_filtering))
7791 		vmx->pt_desc.ctl_bitmask &= ~RTIT_CTL_CR3EN;
7792 
7793 	/*
7794 	 * If CPUID.(EAX=14H,ECX=0):EBX[1]=1 CYCEn, CycThresh and
7795 	 * PSBFreq can be set
7796 	 */
7797 	if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_psb_cyc))
7798 		vmx->pt_desc.ctl_bitmask &= ~(RTIT_CTL_CYCLEACC |
7799 				RTIT_CTL_CYC_THRESH | RTIT_CTL_PSB_FREQ);
7800 
7801 	/*
7802 	 * If CPUID.(EAX=14H,ECX=0):EBX[3]=1 MTCEn and MTCFreq can be set
7803 	 */
7804 	if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_mtc))
7805 		vmx->pt_desc.ctl_bitmask &= ~(RTIT_CTL_MTC_EN |
7806 					      RTIT_CTL_MTC_RANGE);
7807 
7808 	/* If CPUID.(EAX=14H,ECX=0):EBX[4]=1 FUPonPTW and PTWEn can be set */
7809 	if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_ptwrite))
7810 		vmx->pt_desc.ctl_bitmask &= ~(RTIT_CTL_FUP_ON_PTW |
7811 							RTIT_CTL_PTW_EN);
7812 
7813 	/* If CPUID.(EAX=14H,ECX=0):EBX[5]=1 PwrEvEn can be set */
7814 	if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_power_event_trace))
7815 		vmx->pt_desc.ctl_bitmask &= ~RTIT_CTL_PWR_EVT_EN;
7816 
7817 	/* If CPUID.(EAX=14H,ECX=0):ECX[0]=1 ToPA can be set */
7818 	if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_topa_output))
7819 		vmx->pt_desc.ctl_bitmask &= ~RTIT_CTL_TOPA;
7820 
7821 	/* If CPUID.(EAX=14H,ECX=0):ECX[3]=1 FabricEn can be set */
7822 	if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_output_subsys))
7823 		vmx->pt_desc.ctl_bitmask &= ~RTIT_CTL_FABRIC_EN;
7824 
7825 	/* unmask address range configure area */
7826 	for (i = 0; i < vmx->pt_desc.num_address_ranges; i++)
7827 		vmx->pt_desc.ctl_bitmask &= ~(0xfULL << (32 + i * 4));
7828 }
7829 
vmx_vcpu_after_set_cpuid(struct kvm_vcpu * vcpu)7830 void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
7831 {
7832 	struct vcpu_vmx *vmx = to_vmx(vcpu);
7833 
7834 	/*
7835 	 * XSAVES is effectively enabled if and only if XSAVE is also exposed
7836 	 * to the guest.  XSAVES depends on CR4.OSXSAVE, and CR4.OSXSAVE can be
7837 	 * set if and only if XSAVE is supported.
7838 	 */
7839 	if (!guest_cpu_cap_has(vcpu, X86_FEATURE_XSAVE))
7840 		guest_cpu_cap_clear(vcpu, X86_FEATURE_XSAVES);
7841 
7842 	vmx_setup_uret_msrs(vmx);
7843 
7844 	if (cpu_has_secondary_exec_ctrls())
7845 		vmcs_set_secondary_exec_control(vmx,
7846 						vmx_secondary_exec_control(vmx));
7847 
7848 	if (guest_cpu_cap_has(vcpu, X86_FEATURE_VMX))
7849 		vmx->msr_ia32_feature_control_valid_bits |=
7850 			FEAT_CTL_VMX_ENABLED_INSIDE_SMX |
7851 			FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX;
7852 	else
7853 		vmx->msr_ia32_feature_control_valid_bits &=
7854 			~(FEAT_CTL_VMX_ENABLED_INSIDE_SMX |
7855 			  FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX);
7856 
7857 	if (guest_cpu_cap_has(vcpu, X86_FEATURE_VMX))
7858 		nested_vmx_cr_fixed1_bits_update(vcpu);
7859 
7860 	if (boot_cpu_has(X86_FEATURE_INTEL_PT) &&
7861 			guest_cpu_cap_has(vcpu, X86_FEATURE_INTEL_PT))
7862 		update_intel_pt_cfg(vcpu);
7863 
7864 	if (boot_cpu_has(X86_FEATURE_RTM)) {
7865 		struct vmx_uret_msr *msr;
7866 		msr = vmx_find_uret_msr(vmx, MSR_IA32_TSX_CTRL);
7867 		if (msr) {
7868 			bool enabled = guest_cpu_cap_has(vcpu, X86_FEATURE_RTM);
7869 			vmx_set_guest_uret_msr(vmx, msr, enabled ? 0 : TSX_CTRL_RTM_DISABLE);
7870 		}
7871 	}
7872 
7873 	if (kvm_cpu_cap_has(X86_FEATURE_XFD))
7874 		vmx_set_intercept_for_msr(vcpu, MSR_IA32_XFD_ERR, MSR_TYPE_R,
7875 					  !guest_cpu_cap_has(vcpu, X86_FEATURE_XFD));
7876 
7877 	if (boot_cpu_has(X86_FEATURE_IBPB))
7878 		vmx_set_intercept_for_msr(vcpu, MSR_IA32_PRED_CMD, MSR_TYPE_W,
7879 					  !guest_has_pred_cmd_msr(vcpu));
7880 
7881 	if (boot_cpu_has(X86_FEATURE_FLUSH_L1D))
7882 		vmx_set_intercept_for_msr(vcpu, MSR_IA32_FLUSH_CMD, MSR_TYPE_W,
7883 					  !guest_cpu_cap_has(vcpu, X86_FEATURE_FLUSH_L1D));
7884 
7885 	set_cr4_guest_host_mask(vmx);
7886 
7887 	vmx_write_encls_bitmap(vcpu, NULL);
7888 	if (guest_cpu_cap_has(vcpu, X86_FEATURE_SGX))
7889 		vmx->msr_ia32_feature_control_valid_bits |= FEAT_CTL_SGX_ENABLED;
7890 	else
7891 		vmx->msr_ia32_feature_control_valid_bits &= ~FEAT_CTL_SGX_ENABLED;
7892 
7893 	if (guest_cpu_cap_has(vcpu, X86_FEATURE_SGX_LC))
7894 		vmx->msr_ia32_feature_control_valid_bits |=
7895 			FEAT_CTL_SGX_LC_ENABLED;
7896 	else
7897 		vmx->msr_ia32_feature_control_valid_bits &=
7898 			~FEAT_CTL_SGX_LC_ENABLED;
7899 
7900 	/* Refresh #PF interception to account for MAXPHYADDR changes. */
7901 	vmx_update_exception_bitmap(vcpu);
7902 }
7903 
vmx_get_perf_capabilities(void)7904 static __init u64 vmx_get_perf_capabilities(void)
7905 {
7906 	u64 perf_cap = PMU_CAP_FW_WRITES;
7907 	u64 host_perf_cap = 0;
7908 
7909 	if (!enable_pmu)
7910 		return 0;
7911 
7912 	if (boot_cpu_has(X86_FEATURE_PDCM))
7913 		rdmsrl(MSR_IA32_PERF_CAPABILITIES, host_perf_cap);
7914 
7915 	if (!cpu_feature_enabled(X86_FEATURE_ARCH_LBR)) {
7916 		x86_perf_get_lbr(&vmx_lbr_caps);
7917 
7918 		/*
7919 		 * KVM requires LBR callstack support, as the overhead due to
7920 		 * context switching LBRs without said support is too high.
7921 		 * See intel_pmu_create_guest_lbr_event() for more info.
7922 		 */
7923 		if (!vmx_lbr_caps.has_callstack)
7924 			memset(&vmx_lbr_caps, 0, sizeof(vmx_lbr_caps));
7925 		else if (vmx_lbr_caps.nr)
7926 			perf_cap |= host_perf_cap & PMU_CAP_LBR_FMT;
7927 	}
7928 
7929 	if (vmx_pebs_supported()) {
7930 		perf_cap |= host_perf_cap & PERF_CAP_PEBS_MASK;
7931 
7932 		/*
7933 		 * Disallow adaptive PEBS as it is functionally broken, can be
7934 		 * used by the guest to read *host* LBRs, and can be used to
7935 		 * bypass userspace event filters.  To correctly and safely
7936 		 * support adaptive PEBS, KVM needs to:
7937 		 *
7938 		 * 1. Account for the ADAPTIVE flag when (re)programming fixed
7939 		 *    counters.
7940 		 *
7941 		 * 2. Gain support from perf (or take direct control of counter
7942 		 *    programming) to support events without adaptive PEBS
7943 		 *    enabled for the hardware counter.
7944 		 *
7945 		 * 3. Ensure LBR MSRs cannot hold host data on VM-Entry with
7946 		 *    adaptive PEBS enabled and MSR_PEBS_DATA_CFG.LBRS=1.
7947 		 *
7948 		 * 4. Document which PMU events are effectively exposed to the
7949 		 *    guest via adaptive PEBS, and make adaptive PEBS mutually
7950 		 *    exclusive with KVM_SET_PMU_EVENT_FILTER if necessary.
7951 		 */
7952 		perf_cap &= ~PERF_CAP_PEBS_BASELINE;
7953 	}
7954 
7955 	return perf_cap;
7956 }
7957 
vmx_set_cpu_caps(void)7958 static __init void vmx_set_cpu_caps(void)
7959 {
7960 	kvm_set_cpu_caps();
7961 
7962 	/* CPUID 0x1 */
7963 	if (nested)
7964 		kvm_cpu_cap_set(X86_FEATURE_VMX);
7965 
7966 	/* CPUID 0x7 */
7967 	if (kvm_mpx_supported())
7968 		kvm_cpu_cap_check_and_set(X86_FEATURE_MPX);
7969 	if (!cpu_has_vmx_invpcid())
7970 		kvm_cpu_cap_clear(X86_FEATURE_INVPCID);
7971 	if (vmx_pt_mode_is_host_guest())
7972 		kvm_cpu_cap_check_and_set(X86_FEATURE_INTEL_PT);
7973 	if (vmx_pebs_supported()) {
7974 		kvm_cpu_cap_check_and_set(X86_FEATURE_DS);
7975 		kvm_cpu_cap_check_and_set(X86_FEATURE_DTES64);
7976 	}
7977 
7978 	if (!enable_pmu)
7979 		kvm_cpu_cap_clear(X86_FEATURE_PDCM);
7980 	kvm_caps.supported_perf_cap = vmx_get_perf_capabilities();
7981 
7982 	if (!enable_sgx) {
7983 		kvm_cpu_cap_clear(X86_FEATURE_SGX);
7984 		kvm_cpu_cap_clear(X86_FEATURE_SGX_LC);
7985 		kvm_cpu_cap_clear(X86_FEATURE_SGX1);
7986 		kvm_cpu_cap_clear(X86_FEATURE_SGX2);
7987 		kvm_cpu_cap_clear(X86_FEATURE_SGX_EDECCSSA);
7988 	}
7989 
7990 	if (vmx_umip_emulated())
7991 		kvm_cpu_cap_set(X86_FEATURE_UMIP);
7992 
7993 	/* CPUID 0xD.1 */
7994 	kvm_caps.supported_xss = 0;
7995 	if (!cpu_has_vmx_xsaves())
7996 		kvm_cpu_cap_clear(X86_FEATURE_XSAVES);
7997 
7998 	/* CPUID 0x80000001 and 0x7 (RDPID) */
7999 	if (!cpu_has_vmx_rdtscp()) {
8000 		kvm_cpu_cap_clear(X86_FEATURE_RDTSCP);
8001 		kvm_cpu_cap_clear(X86_FEATURE_RDPID);
8002 	}
8003 
8004 	if (cpu_has_vmx_waitpkg())
8005 		kvm_cpu_cap_check_and_set(X86_FEATURE_WAITPKG);
8006 }
8007 
vmx_check_intercept_io(struct kvm_vcpu * vcpu,struct x86_instruction_info * info)8008 static int vmx_check_intercept_io(struct kvm_vcpu *vcpu,
8009 				  struct x86_instruction_info *info)
8010 {
8011 	struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
8012 	unsigned short port;
8013 	bool intercept;
8014 	int size;
8015 
8016 	if (info->intercept == x86_intercept_in ||
8017 	    info->intercept == x86_intercept_ins) {
8018 		port = info->src_val;
8019 		size = info->dst_bytes;
8020 	} else {
8021 		port = info->dst_val;
8022 		size = info->src_bytes;
8023 	}
8024 
8025 	/*
8026 	 * If the 'use IO bitmaps' VM-execution control is 0, IO instruction
8027 	 * VM-exits depend on the 'unconditional IO exiting' VM-execution
8028 	 * control.
8029 	 *
8030 	 * Otherwise, IO instruction VM-exits are controlled by the IO bitmaps.
8031 	 */
8032 	if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
8033 		intercept = nested_cpu_has(vmcs12,
8034 					   CPU_BASED_UNCOND_IO_EXITING);
8035 	else
8036 		intercept = nested_vmx_check_io_bitmaps(vcpu, port, size);
8037 
8038 	/* FIXME: produce nested vmexit and return X86EMUL_INTERCEPTED.  */
8039 	return intercept ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE;
8040 }
8041 
vmx_check_intercept(struct kvm_vcpu * vcpu,struct x86_instruction_info * info,enum x86_intercept_stage stage,struct x86_exception * exception)8042 int vmx_check_intercept(struct kvm_vcpu *vcpu,
8043 			struct x86_instruction_info *info,
8044 			enum x86_intercept_stage stage,
8045 			struct x86_exception *exception)
8046 {
8047 	struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
8048 
8049 	switch (info->intercept) {
8050 	/*
8051 	 * RDPID causes #UD if disabled through secondary execution controls.
8052 	 * Because it is marked as EmulateOnUD, we need to intercept it here.
8053 	 * Note, RDPID is hidden behind ENABLE_RDTSCP.
8054 	 */
8055 	case x86_intercept_rdpid:
8056 		if (!nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_RDTSCP)) {
8057 			exception->vector = UD_VECTOR;
8058 			exception->error_code_valid = false;
8059 			return X86EMUL_PROPAGATE_FAULT;
8060 		}
8061 		break;
8062 
8063 	case x86_intercept_in:
8064 	case x86_intercept_ins:
8065 	case x86_intercept_out:
8066 	case x86_intercept_outs:
8067 		return vmx_check_intercept_io(vcpu, info);
8068 
8069 	case x86_intercept_lgdt:
8070 	case x86_intercept_lidt:
8071 	case x86_intercept_lldt:
8072 	case x86_intercept_ltr:
8073 	case x86_intercept_sgdt:
8074 	case x86_intercept_sidt:
8075 	case x86_intercept_sldt:
8076 	case x86_intercept_str:
8077 		if (!nested_cpu_has2(vmcs12, SECONDARY_EXEC_DESC))
8078 			return X86EMUL_CONTINUE;
8079 
8080 		/* FIXME: produce nested vmexit and return X86EMUL_INTERCEPTED.  */
8081 		break;
8082 
8083 	case x86_intercept_pause:
8084 		/*
8085 		 * PAUSE is a single-byte NOP with a REPE prefix, i.e. collides
8086 		 * with vanilla NOPs in the emulator.  Apply the interception
8087 		 * check only to actual PAUSE instructions.  Don't check
8088 		 * PAUSE-loop-exiting, software can't expect a given PAUSE to
8089 		 * exit, i.e. KVM is within its rights to allow L2 to execute
8090 		 * the PAUSE.
8091 		 */
8092 		if ((info->rep_prefix != REPE_PREFIX) ||
8093 		    !nested_cpu_has2(vmcs12, CPU_BASED_PAUSE_EXITING))
8094 			return X86EMUL_CONTINUE;
8095 
8096 		break;
8097 
8098 	/* TODO: check more intercepts... */
8099 	default:
8100 		break;
8101 	}
8102 
8103 	return X86EMUL_UNHANDLEABLE;
8104 }
8105 
8106 #ifdef CONFIG_X86_64
8107 /* (a << shift) / divisor, return 1 if overflow otherwise 0 */
u64_shl_div_u64(u64 a,unsigned int shift,u64 divisor,u64 * result)8108 static inline int u64_shl_div_u64(u64 a, unsigned int shift,
8109 				  u64 divisor, u64 *result)
8110 {
8111 	u64 low = a << shift, high = a >> (64 - shift);
8112 
8113 	/* To avoid the overflow on divq */
8114 	if (high >= divisor)
8115 		return 1;
8116 
8117 	/* Low hold the result, high hold rem which is discarded */
8118 	asm("divq %2\n\t" : "=a" (low), "=d" (high) :
8119 	    "rm" (divisor), "0" (low), "1" (high));
8120 	*result = low;
8121 
8122 	return 0;
8123 }
8124 
vmx_set_hv_timer(struct kvm_vcpu * vcpu,u64 guest_deadline_tsc,bool * expired)8125 int vmx_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc,
8126 		     bool *expired)
8127 {
8128 	struct vcpu_vmx *vmx;
8129 	u64 tscl, guest_tscl, delta_tsc, lapic_timer_advance_cycles;
8130 	struct kvm_timer *ktimer = &vcpu->arch.apic->lapic_timer;
8131 
8132 	vmx = to_vmx(vcpu);
8133 	tscl = rdtsc();
8134 	guest_tscl = kvm_read_l1_tsc(vcpu, tscl);
8135 	delta_tsc = max(guest_deadline_tsc, guest_tscl) - guest_tscl;
8136 	lapic_timer_advance_cycles = nsec_to_cycles(vcpu,
8137 						    ktimer->timer_advance_ns);
8138 
8139 	if (delta_tsc > lapic_timer_advance_cycles)
8140 		delta_tsc -= lapic_timer_advance_cycles;
8141 	else
8142 		delta_tsc = 0;
8143 
8144 	/* Convert to host delta tsc if tsc scaling is enabled */
8145 	if (vcpu->arch.l1_tsc_scaling_ratio != kvm_caps.default_tsc_scaling_ratio &&
8146 	    delta_tsc && u64_shl_div_u64(delta_tsc,
8147 				kvm_caps.tsc_scaling_ratio_frac_bits,
8148 				vcpu->arch.l1_tsc_scaling_ratio, &delta_tsc))
8149 		return -ERANGE;
8150 
8151 	/*
8152 	 * If the delta tsc can't fit in the 32 bit after the multi shift,
8153 	 * we can't use the preemption timer.
8154 	 * It's possible that it fits on later vmentries, but checking
8155 	 * on every vmentry is costly so we just use an hrtimer.
8156 	 */
8157 	if (delta_tsc >> (cpu_preemption_timer_multi + 32))
8158 		return -ERANGE;
8159 
8160 	vmx->hv_deadline_tsc = tscl + delta_tsc;
8161 	*expired = !delta_tsc;
8162 	return 0;
8163 }
8164 
vmx_cancel_hv_timer(struct kvm_vcpu * vcpu)8165 void vmx_cancel_hv_timer(struct kvm_vcpu *vcpu)
8166 {
8167 	to_vmx(vcpu)->hv_deadline_tsc = -1;
8168 }
8169 #endif
8170 
vmx_update_cpu_dirty_logging(struct kvm_vcpu * vcpu)8171 void vmx_update_cpu_dirty_logging(struct kvm_vcpu *vcpu)
8172 {
8173 	struct vcpu_vmx *vmx = to_vmx(vcpu);
8174 
8175 	if (WARN_ON_ONCE(!enable_pml))
8176 		return;
8177 
8178 	if (is_guest_mode(vcpu)) {
8179 		vmx->nested.update_vmcs01_cpu_dirty_logging = true;
8180 		return;
8181 	}
8182 
8183 	/*
8184 	 * Note, nr_memslots_dirty_logging can be changed concurrent with this
8185 	 * code, but in that case another update request will be made and so
8186 	 * the guest will never run with a stale PML value.
8187 	 */
8188 	if (atomic_read(&vcpu->kvm->nr_memslots_dirty_logging))
8189 		secondary_exec_controls_setbit(vmx, SECONDARY_EXEC_ENABLE_PML);
8190 	else
8191 		secondary_exec_controls_clearbit(vmx, SECONDARY_EXEC_ENABLE_PML);
8192 }
8193 
vmx_setup_mce(struct kvm_vcpu * vcpu)8194 void vmx_setup_mce(struct kvm_vcpu *vcpu)
8195 {
8196 	if (vcpu->arch.mcg_cap & MCG_LMCE_P)
8197 		to_vmx(vcpu)->msr_ia32_feature_control_valid_bits |=
8198 			FEAT_CTL_LMCE_ENABLED;
8199 	else
8200 		to_vmx(vcpu)->msr_ia32_feature_control_valid_bits &=
8201 			~FEAT_CTL_LMCE_ENABLED;
8202 }
8203 
8204 #ifdef CONFIG_KVM_SMM
vmx_smi_allowed(struct kvm_vcpu * vcpu,bool for_injection)8205 int vmx_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
8206 {
8207 	/* we need a nested vmexit to enter SMM, postpone if run is pending */
8208 	if (to_vmx(vcpu)->nested.nested_run_pending)
8209 		return -EBUSY;
8210 	return !is_smm(vcpu);
8211 }
8212 
vmx_enter_smm(struct kvm_vcpu * vcpu,union kvm_smram * smram)8213 int vmx_enter_smm(struct kvm_vcpu *vcpu, union kvm_smram *smram)
8214 {
8215 	struct vcpu_vmx *vmx = to_vmx(vcpu);
8216 
8217 	/*
8218 	 * TODO: Implement custom flows for forcing the vCPU out/in of L2 on
8219 	 * SMI and RSM.  Using the common VM-Exit + VM-Enter routines is wrong
8220 	 * SMI and RSM only modify state that is saved and restored via SMRAM.
8221 	 * E.g. most MSRs are left untouched, but many are modified by VM-Exit
8222 	 * and VM-Enter, and thus L2's values may be corrupted on SMI+RSM.
8223 	 */
8224 	vmx->nested.smm.guest_mode = is_guest_mode(vcpu);
8225 	if (vmx->nested.smm.guest_mode)
8226 		nested_vmx_vmexit(vcpu, -1, 0, 0);
8227 
8228 	vmx->nested.smm.vmxon = vmx->nested.vmxon;
8229 	vmx->nested.vmxon = false;
8230 	vmx_clear_hlt(vcpu);
8231 	return 0;
8232 }
8233 
vmx_leave_smm(struct kvm_vcpu * vcpu,const union kvm_smram * smram)8234 int vmx_leave_smm(struct kvm_vcpu *vcpu, const union kvm_smram *smram)
8235 {
8236 	struct vcpu_vmx *vmx = to_vmx(vcpu);
8237 	int ret;
8238 
8239 	if (vmx->nested.smm.vmxon) {
8240 		vmx->nested.vmxon = true;
8241 		vmx->nested.smm.vmxon = false;
8242 	}
8243 
8244 	if (vmx->nested.smm.guest_mode) {
8245 		ret = nested_vmx_enter_non_root_mode(vcpu, false);
8246 		if (ret)
8247 			return ret;
8248 
8249 		vmx->nested.nested_run_pending = 1;
8250 		vmx->nested.smm.guest_mode = false;
8251 	}
8252 	return 0;
8253 }
8254 
vmx_enable_smi_window(struct kvm_vcpu * vcpu)8255 void vmx_enable_smi_window(struct kvm_vcpu *vcpu)
8256 {
8257 	/* RSM will cause a vmexit anyway.  */
8258 }
8259 #endif
8260 
vmx_apic_init_signal_blocked(struct kvm_vcpu * vcpu)8261 bool vmx_apic_init_signal_blocked(struct kvm_vcpu *vcpu)
8262 {
8263 	return to_vmx(vcpu)->nested.vmxon && !is_guest_mode(vcpu);
8264 }
8265 
vmx_migrate_timers(struct kvm_vcpu * vcpu)8266 void vmx_migrate_timers(struct kvm_vcpu *vcpu)
8267 {
8268 	if (is_guest_mode(vcpu)) {
8269 		struct hrtimer *timer = &to_vmx(vcpu)->nested.preemption_timer;
8270 
8271 		if (hrtimer_try_to_cancel(timer) == 1)
8272 			hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED);
8273 	}
8274 }
8275 
vmx_hardware_unsetup(void)8276 void vmx_hardware_unsetup(void)
8277 {
8278 	kvm_set_posted_intr_wakeup_handler(NULL);
8279 
8280 	if (nested)
8281 		nested_vmx_hardware_unsetup();
8282 
8283 	free_kvm_area();
8284 }
8285 
vmx_vm_destroy(struct kvm * kvm)8286 void vmx_vm_destroy(struct kvm *kvm)
8287 {
8288 	struct kvm_vmx *kvm_vmx = to_kvm_vmx(kvm);
8289 
8290 	free_pages((unsigned long)kvm_vmx->pid_table, vmx_get_pid_table_order(kvm));
8291 }
8292 
8293 /*
8294  * Note, the SDM states that the linear address is masked *after* the modified
8295  * canonicality check, whereas KVM masks (untags) the address and then performs
8296  * a "normal" canonicality check.  Functionally, the two methods are identical,
8297  * and when the masking occurs relative to the canonicality check isn't visible
8298  * to software, i.e. KVM's behavior doesn't violate the SDM.
8299  */
vmx_get_untagged_addr(struct kvm_vcpu * vcpu,gva_t gva,unsigned int flags)8300 gva_t vmx_get_untagged_addr(struct kvm_vcpu *vcpu, gva_t gva, unsigned int flags)
8301 {
8302 	int lam_bit;
8303 	unsigned long cr3_bits;
8304 
8305 	if (flags & (X86EMUL_F_FETCH | X86EMUL_F_IMPLICIT | X86EMUL_F_INVLPG))
8306 		return gva;
8307 
8308 	if (!is_64_bit_mode(vcpu))
8309 		return gva;
8310 
8311 	/*
8312 	 * Bit 63 determines if the address should be treated as user address
8313 	 * or a supervisor address.
8314 	 */
8315 	if (!(gva & BIT_ULL(63))) {
8316 		cr3_bits = kvm_get_active_cr3_lam_bits(vcpu);
8317 		if (!(cr3_bits & (X86_CR3_LAM_U57 | X86_CR3_LAM_U48)))
8318 			return gva;
8319 
8320 		/* LAM_U48 is ignored if LAM_U57 is set. */
8321 		lam_bit = cr3_bits & X86_CR3_LAM_U57 ? 56 : 47;
8322 	} else {
8323 		if (!kvm_is_cr4_bit_set(vcpu, X86_CR4_LAM_SUP))
8324 			return gva;
8325 
8326 		lam_bit = kvm_is_cr4_bit_set(vcpu, X86_CR4_LA57) ? 56 : 47;
8327 	}
8328 
8329 	/*
8330 	 * Untag the address by sign-extending the lam_bit, but NOT to bit 63.
8331 	 * Bit 63 is retained from the raw virtual address so that untagging
8332 	 * doesn't change a user access to a supervisor access, and vice versa.
8333 	 */
8334 	return (sign_extend64(gva, lam_bit) & ~BIT_ULL(63)) | (gva & BIT_ULL(63));
8335 }
8336 
vmx_handle_intel_pt_intr(void)8337 static unsigned int vmx_handle_intel_pt_intr(void)
8338 {
8339 	struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
8340 
8341 	/* '0' on failure so that the !PT case can use a RET0 static call. */
8342 	if (!vcpu || !kvm_handling_nmi_from_guest(vcpu))
8343 		return 0;
8344 
8345 	kvm_make_request(KVM_REQ_PMI, vcpu);
8346 	__set_bit(MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI_BIT,
8347 		  (unsigned long *)&vcpu->arch.pmu.global_status);
8348 	return 1;
8349 }
8350 
vmx_setup_user_return_msrs(void)8351 static __init void vmx_setup_user_return_msrs(void)
8352 {
8353 
8354 	/*
8355 	 * Though SYSCALL is only supported in 64-bit mode on Intel CPUs, kvm
8356 	 * will emulate SYSCALL in legacy mode if the vendor string in guest
8357 	 * CPUID.0:{EBX,ECX,EDX} is "AuthenticAMD" or "AMDisbetter!" To
8358 	 * support this emulation, MSR_STAR is included in the list for i386,
8359 	 * but is never loaded into hardware.  MSR_CSTAR is also never loaded
8360 	 * into hardware and is here purely for emulation purposes.
8361 	 */
8362 	const u32 vmx_uret_msrs_list[] = {
8363 	#ifdef CONFIG_X86_64
8364 		MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR,
8365 	#endif
8366 		MSR_EFER, MSR_TSC_AUX, MSR_STAR,
8367 		MSR_IA32_TSX_CTRL,
8368 	};
8369 	int i;
8370 
8371 	BUILD_BUG_ON(ARRAY_SIZE(vmx_uret_msrs_list) != MAX_NR_USER_RETURN_MSRS);
8372 
8373 	for (i = 0; i < ARRAY_SIZE(vmx_uret_msrs_list); ++i)
8374 		kvm_add_user_return_msr(vmx_uret_msrs_list[i]);
8375 }
8376 
vmx_setup_me_spte_mask(void)8377 static void __init vmx_setup_me_spte_mask(void)
8378 {
8379 	u64 me_mask = 0;
8380 
8381 	/*
8382 	 * On pre-MKTME system, boot_cpu_data.x86_phys_bits equals to
8383 	 * kvm_host.maxphyaddr.  On MKTME and/or TDX capable systems,
8384 	 * boot_cpu_data.x86_phys_bits holds the actual physical address
8385 	 * w/o the KeyID bits, and kvm_host.maxphyaddr equals to
8386 	 * MAXPHYADDR reported by CPUID.  Those bits between are KeyID bits.
8387 	 */
8388 	if (boot_cpu_data.x86_phys_bits != kvm_host.maxphyaddr)
8389 		me_mask = rsvd_bits(boot_cpu_data.x86_phys_bits,
8390 				    kvm_host.maxphyaddr - 1);
8391 
8392 	/*
8393 	 * Unlike SME, host kernel doesn't support setting up any
8394 	 * MKTME KeyID on Intel platforms.  No memory encryption
8395 	 * bits should be included into the SPTE.
8396 	 */
8397 	kvm_mmu_set_me_spte_mask(0, me_mask);
8398 }
8399 
vmx_hardware_setup(void)8400 __init int vmx_hardware_setup(void)
8401 {
8402 	unsigned long host_bndcfgs;
8403 	struct desc_ptr dt;
8404 	int r;
8405 
8406 	store_idt(&dt);
8407 	host_idt_base = dt.address;
8408 
8409 	vmx_setup_user_return_msrs();
8410 
8411 	if (setup_vmcs_config(&vmcs_config, &vmx_capability) < 0)
8412 		return -EIO;
8413 
8414 	if (boot_cpu_has(X86_FEATURE_NX))
8415 		kvm_enable_efer_bits(EFER_NX);
8416 
8417 	if (boot_cpu_has(X86_FEATURE_MPX)) {
8418 		rdmsrl(MSR_IA32_BNDCFGS, host_bndcfgs);
8419 		WARN_ONCE(host_bndcfgs, "BNDCFGS in host will be lost");
8420 	}
8421 
8422 	if (!cpu_has_vmx_mpx())
8423 		kvm_caps.supported_xcr0 &= ~(XFEATURE_MASK_BNDREGS |
8424 					     XFEATURE_MASK_BNDCSR);
8425 
8426 	if (!cpu_has_vmx_vpid() || !cpu_has_vmx_invvpid() ||
8427 	    !(cpu_has_vmx_invvpid_single() || cpu_has_vmx_invvpid_global()))
8428 		enable_vpid = 0;
8429 
8430 	if (!cpu_has_vmx_ept() ||
8431 	    !cpu_has_vmx_ept_4levels() ||
8432 	    !cpu_has_vmx_ept_mt_wb() ||
8433 	    !cpu_has_vmx_invept_global())
8434 		enable_ept = 0;
8435 
8436 	/* NX support is required for shadow paging. */
8437 	if (!enable_ept && !boot_cpu_has(X86_FEATURE_NX)) {
8438 		pr_err_ratelimited("NX (Execute Disable) not supported\n");
8439 		return -EOPNOTSUPP;
8440 	}
8441 
8442 	if (!cpu_has_vmx_ept_ad_bits() || !enable_ept)
8443 		enable_ept_ad_bits = 0;
8444 
8445 	if (!cpu_has_vmx_unrestricted_guest() || !enable_ept)
8446 		enable_unrestricted_guest = 0;
8447 
8448 	if (!cpu_has_vmx_flexpriority())
8449 		flexpriority_enabled = 0;
8450 
8451 	if (!cpu_has_virtual_nmis())
8452 		enable_vnmi = 0;
8453 
8454 #ifdef CONFIG_X86_SGX_KVM
8455 	if (!cpu_has_vmx_encls_vmexit())
8456 		enable_sgx = false;
8457 #endif
8458 
8459 	/*
8460 	 * set_apic_access_page_addr() is used to reload apic access
8461 	 * page upon invalidation.  No need to do anything if not
8462 	 * using the APIC_ACCESS_ADDR VMCS field.
8463 	 */
8464 	if (!flexpriority_enabled)
8465 		vt_x86_ops.set_apic_access_page_addr = NULL;
8466 
8467 	if (!cpu_has_vmx_tpr_shadow())
8468 		vt_x86_ops.update_cr8_intercept = NULL;
8469 
8470 #if IS_ENABLED(CONFIG_HYPERV)
8471 	if (ms_hyperv.nested_features & HV_X64_NESTED_GUEST_MAPPING_FLUSH
8472 	    && enable_ept) {
8473 		vt_x86_ops.flush_remote_tlbs = hv_flush_remote_tlbs;
8474 		vt_x86_ops.flush_remote_tlbs_range = hv_flush_remote_tlbs_range;
8475 	}
8476 #endif
8477 
8478 	if (!cpu_has_vmx_ple()) {
8479 		ple_gap = 0;
8480 		ple_window = 0;
8481 		ple_window_grow = 0;
8482 		ple_window_max = 0;
8483 		ple_window_shrink = 0;
8484 	}
8485 
8486 	if (!cpu_has_vmx_apicv())
8487 		enable_apicv = 0;
8488 	if (!enable_apicv)
8489 		vt_x86_ops.sync_pir_to_irr = NULL;
8490 
8491 	if (!enable_apicv || !cpu_has_vmx_ipiv())
8492 		enable_ipiv = false;
8493 
8494 	if (cpu_has_vmx_tsc_scaling())
8495 		kvm_caps.has_tsc_control = true;
8496 
8497 	kvm_caps.max_tsc_scaling_ratio = KVM_VMX_TSC_MULTIPLIER_MAX;
8498 	kvm_caps.tsc_scaling_ratio_frac_bits = 48;
8499 	kvm_caps.has_bus_lock_exit = cpu_has_vmx_bus_lock_detection();
8500 	kvm_caps.has_notify_vmexit = cpu_has_notify_vmexit();
8501 
8502 	set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */
8503 
8504 	if (enable_ept)
8505 		kvm_mmu_set_ept_masks(enable_ept_ad_bits,
8506 				      cpu_has_vmx_ept_execute_only());
8507 
8508 	/*
8509 	 * Setup shadow_me_value/shadow_me_mask to include MKTME KeyID
8510 	 * bits to shadow_zero_check.
8511 	 */
8512 	vmx_setup_me_spte_mask();
8513 
8514 	kvm_configure_mmu(enable_ept, 0, vmx_get_max_ept_level(),
8515 			  ept_caps_to_lpage_level(vmx_capability.ept));
8516 
8517 	/*
8518 	 * Only enable PML when hardware supports PML feature, and both EPT
8519 	 * and EPT A/D bit features are enabled -- PML depends on them to work.
8520 	 */
8521 	if (!enable_ept || !enable_ept_ad_bits || !cpu_has_vmx_pml())
8522 		enable_pml = 0;
8523 
8524 	if (!enable_pml)
8525 		vt_x86_ops.cpu_dirty_log_size = 0;
8526 
8527 	if (!cpu_has_vmx_preemption_timer())
8528 		enable_preemption_timer = false;
8529 
8530 	if (enable_preemption_timer) {
8531 		u64 use_timer_freq = 5000ULL * 1000 * 1000;
8532 
8533 		cpu_preemption_timer_multi =
8534 			vmx_misc_preemption_timer_rate(vmcs_config.misc);
8535 
8536 		if (tsc_khz)
8537 			use_timer_freq = (u64)tsc_khz * 1000;
8538 		use_timer_freq >>= cpu_preemption_timer_multi;
8539 
8540 		/*
8541 		 * KVM "disables" the preemption timer by setting it to its max
8542 		 * value.  Don't use the timer if it might cause spurious exits
8543 		 * at a rate faster than 0.1 Hz (of uninterrupted guest time).
8544 		 */
8545 		if (use_timer_freq > 0xffffffffu / 10)
8546 			enable_preemption_timer = false;
8547 	}
8548 
8549 	if (!enable_preemption_timer) {
8550 		vt_x86_ops.set_hv_timer = NULL;
8551 		vt_x86_ops.cancel_hv_timer = NULL;
8552 	}
8553 
8554 	kvm_caps.supported_mce_cap |= MCG_LMCE_P;
8555 	kvm_caps.supported_mce_cap |= MCG_CMCI_P;
8556 
8557 	if (pt_mode != PT_MODE_SYSTEM && pt_mode != PT_MODE_HOST_GUEST)
8558 		return -EINVAL;
8559 	if (!enable_ept || !enable_pmu || !cpu_has_vmx_intel_pt())
8560 		pt_mode = PT_MODE_SYSTEM;
8561 	if (pt_mode == PT_MODE_HOST_GUEST)
8562 		vt_init_ops.handle_intel_pt_intr = vmx_handle_intel_pt_intr;
8563 	else
8564 		vt_init_ops.handle_intel_pt_intr = NULL;
8565 
8566 	setup_default_sgx_lepubkeyhash();
8567 
8568 	if (nested) {
8569 		nested_vmx_setup_ctls_msrs(&vmcs_config, vmx_capability.ept);
8570 
8571 		r = nested_vmx_hardware_setup(kvm_vmx_exit_handlers);
8572 		if (r)
8573 			return r;
8574 	}
8575 
8576 	vmx_set_cpu_caps();
8577 
8578 	r = alloc_kvm_area();
8579 	if (r && nested)
8580 		nested_vmx_hardware_unsetup();
8581 
8582 	kvm_set_posted_intr_wakeup_handler(pi_wakeup_handler);
8583 
8584 	return r;
8585 }
8586 
vmx_cleanup_l1d_flush(void)8587 static void vmx_cleanup_l1d_flush(void)
8588 {
8589 	if (vmx_l1d_flush_pages) {
8590 		free_pages((unsigned long)vmx_l1d_flush_pages, L1D_CACHE_ORDER);
8591 		vmx_l1d_flush_pages = NULL;
8592 	}
8593 	/* Restore state so sysfs ignores VMX */
8594 	l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
8595 }
8596 
__vmx_exit(void)8597 static void __vmx_exit(void)
8598 {
8599 	allow_smaller_maxphyaddr = false;
8600 
8601 	vmx_cleanup_l1d_flush();
8602 }
8603 
vmx_exit(void)8604 static void __exit vmx_exit(void)
8605 {
8606 	kvm_exit();
8607 	__vmx_exit();
8608 	kvm_x86_vendor_exit();
8609 
8610 }
8611 module_exit(vmx_exit);
8612 
vmx_init(void)8613 static int __init vmx_init(void)
8614 {
8615 	int r, cpu;
8616 
8617 	if (!kvm_is_vmx_supported())
8618 		return -EOPNOTSUPP;
8619 
8620 	/*
8621 	 * Note, hv_init_evmcs() touches only VMX knobs, i.e. there's nothing
8622 	 * to unwind if a later step fails.
8623 	 */
8624 	hv_init_evmcs();
8625 
8626 	r = kvm_x86_vendor_init(&vt_init_ops);
8627 	if (r)
8628 		return r;
8629 
8630 	/*
8631 	 * Must be called after common x86 init so enable_ept is properly set
8632 	 * up. Hand the parameter mitigation value in which was stored in
8633 	 * the pre module init parser. If no parameter was given, it will
8634 	 * contain 'auto' which will be turned into the default 'cond'
8635 	 * mitigation mode.
8636 	 */
8637 	r = vmx_setup_l1d_flush(vmentry_l1d_flush_param);
8638 	if (r)
8639 		goto err_l1d_flush;
8640 
8641 	for_each_possible_cpu(cpu) {
8642 		INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu));
8643 
8644 		pi_init_cpu(cpu);
8645 	}
8646 
8647 	vmx_check_vmcs12_offsets();
8648 
8649 	/*
8650 	 * Shadow paging doesn't have a (further) performance penalty
8651 	 * from GUEST_MAXPHYADDR < HOST_MAXPHYADDR so enable it
8652 	 * by default
8653 	 */
8654 	if (!enable_ept)
8655 		allow_smaller_maxphyaddr = true;
8656 
8657 	/*
8658 	 * Common KVM initialization _must_ come last, after this, /dev/kvm is
8659 	 * exposed to userspace!
8660 	 */
8661 	r = kvm_init(sizeof(struct vcpu_vmx), __alignof__(struct vcpu_vmx),
8662 		     THIS_MODULE);
8663 	if (r)
8664 		goto err_kvm_init;
8665 
8666 	return 0;
8667 
8668 err_kvm_init:
8669 	__vmx_exit();
8670 err_l1d_flush:
8671 	kvm_x86_vendor_exit();
8672 	return r;
8673 }
8674 module_init(vmx_init);
8675