xref: /linux/arch/x86/kvm/vmx/vmx.c (revision e1914add2799225a87502051415fc5c32aeb02ae)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Kernel-based Virtual Machine driver for Linux
4  *
5  * This module enables machines with Intel VT-x extensions to run virtual
6  * machines without emulation or binary translation.
7  *
8  * Copyright (C) 2006 Qumranet, Inc.
9  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
10  *
11  * Authors:
12  *   Avi Kivity   <avi@qumranet.com>
13  *   Yaniv Kamay  <yaniv@qumranet.com>
14  */
15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16 
17 #include <linux/highmem.h>
18 #include <linux/hrtimer.h>
19 #include <linux/kernel.h>
20 #include <linux/kvm_host.h>
21 #include <linux/module.h>
22 #include <linux/moduleparam.h>
23 #include <linux/mod_devicetable.h>
24 #include <linux/mm.h>
25 #include <linux/objtool.h>
26 #include <linux/sched.h>
27 #include <linux/sched/smt.h>
28 #include <linux/slab.h>
29 #include <linux/tboot.h>
30 #include <linux/trace_events.h>
31 
32 #include <asm/apic.h>
33 #include <asm/asm.h>
34 #include <asm/cpu.h>
35 #include <asm/cpu_device_id.h>
36 #include <asm/debugreg.h>
37 #include <asm/desc.h>
38 #include <asm/fpu/api.h>
39 #include <asm/fpu/xstate.h>
40 #include <asm/fred.h>
41 #include <asm/idtentry.h>
42 #include <asm/io.h>
43 #include <asm/irq_remapping.h>
44 #include <asm/reboot.h>
45 #include <asm/perf_event.h>
46 #include <asm/mmu_context.h>
47 #include <asm/mshyperv.h>
48 #include <asm/msr.h>
49 #include <asm/mwait.h>
50 #include <asm/spec-ctrl.h>
51 #include <asm/virt.h>
52 #include <asm/vmx.h>
53 
54 #include <trace/events/ipi.h>
55 
56 #include "capabilities.h"
57 #include "common.h"
58 #include "cpuid.h"
59 #include "hyperv.h"
60 #include "kvm_onhyperv.h"
61 #include "irq.h"
62 #include "kvm_cache_regs.h"
63 #include "lapic.h"
64 #include "mmu.h"
65 #include "nested.h"
66 #include "pmu.h"
67 #include "sgx.h"
68 #include "trace.h"
69 #include "vmcs.h"
70 #include "vmcs12.h"
71 #include "vmx.h"
72 #include "x86.h"
73 #include "x86_ops.h"
74 #include "smm.h"
75 #include "vmx_onhyperv.h"
76 #include "posted_intr.h"
77 
78 #include "mmu/spte.h"
79 
80 MODULE_AUTHOR("Qumranet");
81 MODULE_DESCRIPTION("KVM support for VMX (Intel VT-x) extensions");
82 MODULE_LICENSE("GPL");
83 
84 #ifdef MODULE
85 static const struct x86_cpu_id vmx_cpu_id[] = {
86 	X86_MATCH_FEATURE(X86_FEATURE_VMX, NULL),
87 	{}
88 };
89 MODULE_DEVICE_TABLE(x86cpu, vmx_cpu_id);
90 #endif
91 
92 bool __read_mostly enable_vpid = 1;
93 module_param_named(vpid, enable_vpid, bool, 0444);
94 
95 static bool __read_mostly enable_vnmi = 1;
96 module_param_named(vnmi, enable_vnmi, bool, 0444);
97 
98 bool __read_mostly flexpriority_enabled = 1;
99 module_param_named(flexpriority, flexpriority_enabled, bool, 0444);
100 
101 bool __read_mostly enable_ept = 1;
102 module_param_named(ept, enable_ept, bool, 0444);
103 
104 bool __read_mostly enable_unrestricted_guest = 1;
105 module_param_named(unrestricted_guest,
106 			enable_unrestricted_guest, bool, 0444);
107 
108 bool __read_mostly enable_ept_ad_bits = 1;
109 module_param_named(eptad, enable_ept_ad_bits, bool, 0444);
110 
111 bool __read_mostly enable_cet = 1;
112 module_param_named(cet, enable_cet, bool, 0444);
113 
114 static bool __read_mostly emulate_invalid_guest_state = true;
115 module_param(emulate_invalid_guest_state, bool, 0444);
116 
117 static bool __read_mostly fasteoi = 1;
118 module_param(fasteoi, bool, 0444);
119 
120 module_param(enable_apicv, bool, 0444);
121 module_param(enable_ipiv, bool, 0444);
122 
123 module_param(enable_device_posted_irqs, bool, 0444);
124 
125 /*
126  * If nested=1, nested virtualization is supported, i.e., guests may use
127  * VMX and be a hypervisor for its own guests. If nested=0, guests may not
128  * use VMX instructions.
129  */
130 static bool __read_mostly nested = 1;
131 module_param(nested, bool, 0444);
132 
133 bool __read_mostly enable_pml = 1;
134 module_param_named(pml, enable_pml, bool, 0444);
135 
136 static bool __read_mostly error_on_inconsistent_vmcs_config = true;
137 module_param(error_on_inconsistent_vmcs_config, bool, 0444);
138 
139 static bool __read_mostly dump_invalid_vmcs = 0;
140 module_param(dump_invalid_vmcs, bool, 0644);
141 
142 #define MSR_BITMAP_MODE_X2APIC		1
143 #define MSR_BITMAP_MODE_X2APIC_APICV	2
144 
145 #define KVM_VMX_TSC_MULTIPLIER_MAX     0xffffffffffffffffULL
146 
147 /* Guest_tsc -> host_tsc conversion requires 64-bit division.  */
148 static int __read_mostly cpu_preemption_timer_multi;
149 static bool __read_mostly enable_preemption_timer = 1;
150 #ifdef CONFIG_X86_64
151 module_param_named(preemption_timer, enable_preemption_timer, bool, S_IRUGO);
152 #endif
153 
154 extern bool __read_mostly allow_smaller_maxphyaddr;
155 module_param(allow_smaller_maxphyaddr, bool, S_IRUGO);
156 
157 module_param(enable_mediated_pmu, bool, 0444);
158 
159 #define KVM_VM_CR0_ALWAYS_OFF (X86_CR0_NW | X86_CR0_CD)
160 #define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST X86_CR0_NE
161 #define KVM_VM_CR0_ALWAYS_ON				\
162 	(KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE)
163 
164 #define KVM_VM_CR4_ALWAYS_ON_UNRESTRICTED_GUEST X86_CR4_VMXE
165 #define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
166 #define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE)
167 
168 #define RMODE_GUEST_OWNED_EFLAGS_BITS (~(X86_EFLAGS_IOPL | X86_EFLAGS_VM))
169 
170 #define MSR_IA32_RTIT_STATUS_MASK (~(RTIT_STATUS_FILTEREN | \
171 	RTIT_STATUS_CONTEXTEN | RTIT_STATUS_TRIGGEREN | \
172 	RTIT_STATUS_ERROR | RTIT_STATUS_STOPPED | \
173 	RTIT_STATUS_BYTECNT))
174 
175 /*
176  * These 2 parameters are used to config the controls for Pause-Loop Exiting:
177  * ple_gap:    upper bound on the amount of time between two successive
178  *             executions of PAUSE in a loop. Also indicate if ple enabled.
179  *             According to test, this time is usually smaller than 128 cycles.
180  * ple_window: upper bound on the amount of time a guest is allowed to execute
181  *             in a PAUSE loop. Tests indicate that most spinlocks are held for
182  *             less than 2^12 cycles
183  * Time is measured based on a counter that runs at the same rate as the TSC,
184  * refer SDM volume 3b section 21.6.13 & 22.1.3.
185  */
186 static unsigned int ple_gap = KVM_DEFAULT_PLE_GAP;
187 module_param(ple_gap, uint, 0444);
188 
189 static unsigned int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW;
190 module_param(ple_window, uint, 0444);
191 
192 /* Default doubles per-vcpu window every exit. */
193 static unsigned int ple_window_grow = KVM_DEFAULT_PLE_WINDOW_GROW;
194 module_param(ple_window_grow, uint, 0444);
195 
196 /* Default resets per-vcpu window every exit to ple_window. */
197 static unsigned int ple_window_shrink = KVM_DEFAULT_PLE_WINDOW_SHRINK;
198 module_param(ple_window_shrink, uint, 0444);
199 
200 /* Default is to compute the maximum so we can never overflow. */
201 static unsigned int ple_window_max        = KVM_VMX_DEFAULT_PLE_WINDOW_MAX;
202 module_param(ple_window_max, uint, 0444);
203 
204 /* Default is SYSTEM mode, 1 for host-guest mode (which is BROKEN) */
205 int __read_mostly pt_mode = PT_MODE_SYSTEM;
206 #ifdef CONFIG_BROKEN
207 module_param(pt_mode, int, S_IRUGO);
208 #endif
209 
210 struct x86_pmu_lbr __ro_after_init vmx_lbr_caps;
211 
212 #ifdef CONFIG_CPU_MITIGATIONS
213 static DEFINE_STATIC_KEY_FALSE(vmx_l1d_should_flush);
214 static DEFINE_STATIC_KEY_FALSE(vmx_l1d_flush_cond);
215 static DEFINE_MUTEX(vmx_l1d_flush_mutex);
216 
217 /* Storage for pre module init parameter parsing */
218 static enum vmx_l1d_flush_state __read_mostly vmentry_l1d_flush_param = VMENTER_L1D_FLUSH_AUTO;
219 
220 static const struct {
221 	const char *option;
222 	bool for_parse;
223 } vmentry_l1d_param[] = {
224 	[VMENTER_L1D_FLUSH_AUTO]	 = {"auto", true},
225 	[VMENTER_L1D_FLUSH_NEVER]	 = {"never", true},
226 	[VMENTER_L1D_FLUSH_COND]	 = {"cond", true},
227 	[VMENTER_L1D_FLUSH_ALWAYS]	 = {"always", true},
228 	[VMENTER_L1D_FLUSH_EPT_DISABLED] = {"EPT disabled", false},
229 	[VMENTER_L1D_FLUSH_NOT_REQUIRED] = {"not required", false},
230 };
231 
232 #define L1D_CACHE_ORDER 4
233 static void *vmx_l1d_flush_pages;
234 
__vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf)235 static int __vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf)
236 {
237 	struct page *page;
238 	unsigned int i;
239 
240 	if (!boot_cpu_has_bug(X86_BUG_L1TF)) {
241 		l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_NOT_REQUIRED;
242 		return 0;
243 	}
244 
245 	if (!enable_ept) {
246 		l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_EPT_DISABLED;
247 		return 0;
248 	}
249 
250 	if (kvm_host.arch_capabilities & ARCH_CAP_SKIP_VMENTRY_L1DFLUSH) {
251 		l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_NOT_REQUIRED;
252 		return 0;
253 	}
254 
255 	/* If set to auto use the default l1tf mitigation method */
256 	if (l1tf == VMENTER_L1D_FLUSH_AUTO) {
257 		switch (l1tf_mitigation) {
258 		case L1TF_MITIGATION_OFF:
259 			l1tf = VMENTER_L1D_FLUSH_NEVER;
260 			break;
261 		case L1TF_MITIGATION_AUTO:
262 		case L1TF_MITIGATION_FLUSH_NOWARN:
263 		case L1TF_MITIGATION_FLUSH:
264 		case L1TF_MITIGATION_FLUSH_NOSMT:
265 			l1tf = VMENTER_L1D_FLUSH_COND;
266 			break;
267 		case L1TF_MITIGATION_FULL:
268 		case L1TF_MITIGATION_FULL_FORCE:
269 			l1tf = VMENTER_L1D_FLUSH_ALWAYS;
270 			break;
271 		}
272 	} else if (l1tf_mitigation == L1TF_MITIGATION_FULL_FORCE) {
273 		l1tf = VMENTER_L1D_FLUSH_ALWAYS;
274 	}
275 
276 	if (l1tf != VMENTER_L1D_FLUSH_NEVER && !vmx_l1d_flush_pages &&
277 	    !boot_cpu_has(X86_FEATURE_FLUSH_L1D)) {
278 		/*
279 		 * This allocation for vmx_l1d_flush_pages is not tied to a VM
280 		 * lifetime and so should not be charged to a memcg.
281 		 */
282 		page = alloc_pages(GFP_KERNEL, L1D_CACHE_ORDER);
283 		if (!page)
284 			return -ENOMEM;
285 		vmx_l1d_flush_pages = page_address(page);
286 
287 		/*
288 		 * Initialize each page with a different pattern in
289 		 * order to protect against KSM in the nested
290 		 * virtualization case.
291 		 */
292 		for (i = 0; i < 1u << L1D_CACHE_ORDER; ++i) {
293 			memset(vmx_l1d_flush_pages + i * PAGE_SIZE, i + 1,
294 			       PAGE_SIZE);
295 		}
296 	}
297 
298 	l1tf_vmx_mitigation = l1tf;
299 
300 	if (l1tf != VMENTER_L1D_FLUSH_NEVER)
301 		static_branch_enable(&vmx_l1d_should_flush);
302 	else
303 		static_branch_disable(&vmx_l1d_should_flush);
304 
305 	if (l1tf == VMENTER_L1D_FLUSH_COND)
306 		static_branch_enable(&vmx_l1d_flush_cond);
307 	else
308 		static_branch_disable(&vmx_l1d_flush_cond);
309 	return 0;
310 }
311 
vmx_setup_l1d_flush(void)312 static int vmx_setup_l1d_flush(void)
313 {
314 	/*
315 	 * Hand the parameter mitigation value in which was stored in the pre
316 	 * module init parser. If no parameter was given, it will contain
317 	 * 'auto' which will be turned into the default 'cond' mitigation mode.
318 	 */
319 	return __vmx_setup_l1d_flush(vmentry_l1d_flush_param);
320 }
321 
vmx_cleanup_l1d_flush(void)322 static void vmx_cleanup_l1d_flush(void)
323 {
324 	if (vmx_l1d_flush_pages) {
325 		free_pages((unsigned long)vmx_l1d_flush_pages, L1D_CACHE_ORDER);
326 		vmx_l1d_flush_pages = NULL;
327 	}
328 	/* Restore state so sysfs ignores VMX */
329 	l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
330 }
331 
vmentry_l1d_flush_parse(const char * s)332 static int vmentry_l1d_flush_parse(const char *s)
333 {
334 	unsigned int i;
335 
336 	if (s) {
337 		for (i = 0; i < ARRAY_SIZE(vmentry_l1d_param); i++) {
338 			if (vmentry_l1d_param[i].for_parse &&
339 			    sysfs_streq(s, vmentry_l1d_param[i].option))
340 				return i;
341 		}
342 	}
343 	return -EINVAL;
344 }
345 
vmentry_l1d_flush_set(const char * s,const struct kernel_param * kp)346 static int vmentry_l1d_flush_set(const char *s, const struct kernel_param *kp)
347 {
348 	int l1tf, ret;
349 
350 	l1tf = vmentry_l1d_flush_parse(s);
351 	if (l1tf < 0)
352 		return l1tf;
353 
354 	if (!boot_cpu_has(X86_BUG_L1TF))
355 		return 0;
356 
357 	/*
358 	 * Has vmx_init() run already? If not then this is the pre init
359 	 * parameter parsing. In that case just store the value and let
360 	 * vmx_init() do the proper setup after enable_ept has been
361 	 * established.
362 	 */
363 	if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO) {
364 		vmentry_l1d_flush_param = l1tf;
365 		return 0;
366 	}
367 
368 	mutex_lock(&vmx_l1d_flush_mutex);
369 	ret = __vmx_setup_l1d_flush(l1tf);
370 	mutex_unlock(&vmx_l1d_flush_mutex);
371 	return ret;
372 }
373 
vmentry_l1d_flush_get(char * s,const struct kernel_param * kp)374 static int vmentry_l1d_flush_get(char *s, const struct kernel_param *kp)
375 {
376 	if (WARN_ON_ONCE(l1tf_vmx_mitigation >= ARRAY_SIZE(vmentry_l1d_param)))
377 		return sysfs_emit(s, "???\n");
378 
379 	return sysfs_emit(s, "%s\n", vmentry_l1d_param[l1tf_vmx_mitigation].option);
380 }
381 
382 /*
383  * Software based L1D cache flush which is used when microcode providing
384  * the cache control MSR is not loaded.
385  *
386  * The L1D cache is 32 KiB on Nehalem and later microarchitectures, but to
387  * flush it is required to read in 64 KiB because the replacement algorithm
388  * is not exactly LRU. This could be sized at runtime via topology
389  * information but as all relevant affected CPUs have 32KiB L1D cache size
390  * there is no point in doing so.
391  */
vmx_l1d_flush(struct kvm_vcpu * vcpu)392 static noinstr void vmx_l1d_flush(struct kvm_vcpu *vcpu)
393 {
394 	int size = PAGE_SIZE << L1D_CACHE_ORDER;
395 
396 	if (!static_branch_unlikely(&vmx_l1d_should_flush))
397 		return;
398 
399 	/*
400 	 * This code is only executed when the flush mode is 'cond' or
401 	 * 'always'
402 	 */
403 	if (static_branch_likely(&vmx_l1d_flush_cond)) {
404 		/*
405 		 * Clear the per-cpu flush bit, it gets set again if the vCPU
406 		 * is reloaded, i.e. if the vCPU is scheduled out or if KVM
407 		 * exits to userspace, or if KVM reaches one of the unsafe
408 		 * VMEXIT handlers, e.g. if KVM calls into the emulator,
409 		 * or from the interrupt handlers.
410 		 */
411 		if (!kvm_get_cpu_l1tf_flush_l1d())
412 			return;
413 		kvm_clear_cpu_l1tf_flush_l1d();
414 	}
415 
416 	vcpu->stat.l1d_flush++;
417 
418 	if (static_cpu_has(X86_FEATURE_FLUSH_L1D)) {
419 		native_wrmsrq(MSR_IA32_FLUSH_CMD, L1D_FLUSH);
420 		return;
421 	}
422 
423 	asm volatile(
424 		/* First ensure the pages are in the TLB */
425 		"xorl	%%eax, %%eax\n"
426 		".Lpopulate_tlb:\n\t"
427 		"movzbl	(%[flush_pages], %%" _ASM_AX "), %%ecx\n\t"
428 		"addl	$4096, %%eax\n\t"
429 		"cmpl	%%eax, %[size]\n\t"
430 		"jne	.Lpopulate_tlb\n\t"
431 		"xorl	%%eax, %%eax\n\t"
432 		"cpuid\n\t"
433 		/* Now fill the cache */
434 		"xorl	%%eax, %%eax\n"
435 		".Lfill_cache:\n"
436 		"movzbl	(%[flush_pages], %%" _ASM_AX "), %%ecx\n\t"
437 		"addl	$64, %%eax\n\t"
438 		"cmpl	%%eax, %[size]\n\t"
439 		"jne	.Lfill_cache\n\t"
440 		"lfence\n"
441 		:: [flush_pages] "r" (vmx_l1d_flush_pages),
442 		    [size] "r" (size)
443 		: "eax", "ebx", "ecx", "edx");
444 }
445 
446 #else /* CONFIG_CPU_MITIGATIONS*/
vmx_setup_l1d_flush(void)447 static int vmx_setup_l1d_flush(void)
448 {
449 	l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_NEVER;
450 	return 0;
451 }
vmx_cleanup_l1d_flush(void)452 static void vmx_cleanup_l1d_flush(void)
453 {
454 	l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
455 }
vmx_l1d_flush(struct kvm_vcpu * vcpu)456 static __always_inline void vmx_l1d_flush(struct kvm_vcpu *vcpu)
457 {
458 
459 }
vmentry_l1d_flush_set(const char * s,const struct kernel_param * kp)460 static int vmentry_l1d_flush_set(const char *s, const struct kernel_param *kp)
461 {
462 	pr_warn_once("Kernel compiled without mitigations, ignoring vmentry_l1d_flush\n");
463 	return 0;
464 }
vmentry_l1d_flush_get(char * s,const struct kernel_param * kp)465 static int vmentry_l1d_flush_get(char *s, const struct kernel_param *kp)
466 {
467 	return sysfs_emit(s, "never\n");
468 }
469 #endif
470 
471 static const struct kernel_param_ops vmentry_l1d_flush_ops = {
472 	.set = vmentry_l1d_flush_set,
473 	.get = vmentry_l1d_flush_get,
474 };
475 module_param_cb(vmentry_l1d_flush, &vmentry_l1d_flush_ops, NULL, 0644);
476 
vmx_disable_fb_clear(struct vcpu_vmx * vmx)477 static __always_inline void vmx_disable_fb_clear(struct vcpu_vmx *vmx)
478 {
479 	u64 msr;
480 
481 	if (!vmx->disable_fb_clear)
482 		return;
483 
484 	msr = native_rdmsrq(MSR_IA32_MCU_OPT_CTRL);
485 	msr |= FB_CLEAR_DIS;
486 	native_wrmsrq(MSR_IA32_MCU_OPT_CTRL, msr);
487 	/* Cache the MSR value to avoid reading it later */
488 	vmx->msr_ia32_mcu_opt_ctrl = msr;
489 }
490 
vmx_enable_fb_clear(struct vcpu_vmx * vmx)491 static __always_inline void vmx_enable_fb_clear(struct vcpu_vmx *vmx)
492 {
493 	if (!vmx->disable_fb_clear)
494 		return;
495 
496 	vmx->msr_ia32_mcu_opt_ctrl &= ~FB_CLEAR_DIS;
497 	native_wrmsrq(MSR_IA32_MCU_OPT_CTRL, vmx->msr_ia32_mcu_opt_ctrl);
498 }
499 
vmx_update_fb_clear_dis(struct kvm_vcpu * vcpu,struct vcpu_vmx * vmx)500 static void vmx_update_fb_clear_dis(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx)
501 {
502 	/*
503 	 * Disable VERW's behavior of clearing CPU buffers for the guest if the
504 	 * CPU isn't affected by MDS/TAA, and the host hasn't forcefully enabled
505 	 * the mitigation. Disabling the clearing behavior provides a
506 	 * performance boost for guests that aren't aware that manually clearing
507 	 * CPU buffers is unnecessary, at the cost of MSR accesses on VM-Entry
508 	 * and VM-Exit.
509 	 */
510 	vmx->disable_fb_clear = !cpu_feature_enabled(X86_FEATURE_CLEAR_CPU_BUF) &&
511 				(kvm_host.arch_capabilities & ARCH_CAP_FB_CLEAR_CTRL) &&
512 				!boot_cpu_has_bug(X86_BUG_MDS) &&
513 				!boot_cpu_has_bug(X86_BUG_TAA);
514 
515 	/*
516 	 * If guest will not execute VERW, there is no need to set FB_CLEAR_DIS
517 	 * at VMEntry. Skip the MSR read/write when a guest has no use case to
518 	 * execute VERW.
519 	 */
520 	if ((vcpu->arch.arch_capabilities & ARCH_CAP_FB_CLEAR) ||
521 	   ((vcpu->arch.arch_capabilities & ARCH_CAP_MDS_NO) &&
522 	    (vcpu->arch.arch_capabilities & ARCH_CAP_TAA_NO) &&
523 	    (vcpu->arch.arch_capabilities & ARCH_CAP_PSDP_NO) &&
524 	    (vcpu->arch.arch_capabilities & ARCH_CAP_FBSDP_NO) &&
525 	    (vcpu->arch.arch_capabilities & ARCH_CAP_SBDR_SSDP_NO)))
526 		vmx->disable_fb_clear = false;
527 }
528 
529 static u32 vmx_segment_access_rights(struct kvm_segment *var);
530 
531 void vmx_vmexit(void);
532 
533 #define vmx_insn_failed(fmt...)		\
534 do {					\
535 	WARN_ONCE(1, fmt);		\
536 	pr_warn_ratelimited(fmt);	\
537 } while (0)
538 
vmread_error(unsigned long field)539 noinline void vmread_error(unsigned long field)
540 {
541 	vmx_insn_failed("vmread failed: field=%lx\n", field);
542 }
543 
544 #ifndef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
vmread_error_trampoline2(unsigned long field,bool fault)545 noinstr void vmread_error_trampoline2(unsigned long field, bool fault)
546 {
547 	if (fault) {
548 		kvm_spurious_fault();
549 	} else {
550 		instrumentation_begin();
551 		vmread_error(field);
552 		instrumentation_end();
553 	}
554 }
555 #endif
556 
vmwrite_error(unsigned long field,unsigned long value)557 noinline void vmwrite_error(unsigned long field, unsigned long value)
558 {
559 	vmx_insn_failed("vmwrite failed: field=%lx val=%lx err=%u\n",
560 			field, value, vmcs_read32(VM_INSTRUCTION_ERROR));
561 }
562 
vmclear_error(struct vmcs * vmcs,u64 phys_addr)563 noinline void vmclear_error(struct vmcs *vmcs, u64 phys_addr)
564 {
565 	vmx_insn_failed("vmclear failed: %p/%llx err=%u\n",
566 			vmcs, phys_addr, vmcs_read32(VM_INSTRUCTION_ERROR));
567 }
568 
vmptrld_error(struct vmcs * vmcs,u64 phys_addr)569 noinline void vmptrld_error(struct vmcs *vmcs, u64 phys_addr)
570 {
571 	vmx_insn_failed("vmptrld failed: %p/%llx err=%u\n",
572 			vmcs, phys_addr, vmcs_read32(VM_INSTRUCTION_ERROR));
573 }
574 
invvpid_error(unsigned long ext,u16 vpid,gva_t gva)575 noinline void invvpid_error(unsigned long ext, u16 vpid, gva_t gva)
576 {
577 	vmx_insn_failed("invvpid failed: ext=0x%lx vpid=%u gva=0x%lx\n",
578 			ext, vpid, gva);
579 }
580 
invept_error(unsigned long ext,u64 eptp)581 noinline void invept_error(unsigned long ext, u64 eptp)
582 {
583 	vmx_insn_failed("invept failed: ext=0x%lx eptp=%llx\n", ext, eptp);
584 }
585 
586 DEFINE_PER_CPU(struct vmcs *, current_vmcs);
587 /*
588  * We maintain a per-CPU linked-list of VMCS loaded on that CPU. This is needed
589  * when a CPU is brought down, and we need to VMCLEAR all VMCSs loaded on it.
590  */
591 static DEFINE_PER_CPU(struct list_head, loaded_vmcss_on_cpu);
592 
593 static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS);
594 static DEFINE_SPINLOCK(vmx_vpid_lock);
595 
596 struct vmcs_config vmcs_config __ro_after_init;
597 struct vmx_capability vmx_capability __ro_after_init;
598 
599 #define VMX_SEGMENT_FIELD(seg)					\
600 	[VCPU_SREG_##seg] = {                                   \
601 		.selector = GUEST_##seg##_SELECTOR,		\
602 		.base = GUEST_##seg##_BASE,		   	\
603 		.limit = GUEST_##seg##_LIMIT,		   	\
604 		.ar_bytes = GUEST_##seg##_AR_BYTES,	   	\
605 	}
606 
607 static const struct kvm_vmx_segment_field {
608 	unsigned selector;
609 	unsigned base;
610 	unsigned limit;
611 	unsigned ar_bytes;
612 } kvm_vmx_segment_fields[] = {
613 	VMX_SEGMENT_FIELD(CS),
614 	VMX_SEGMENT_FIELD(DS),
615 	VMX_SEGMENT_FIELD(ES),
616 	VMX_SEGMENT_FIELD(FS),
617 	VMX_SEGMENT_FIELD(GS),
618 	VMX_SEGMENT_FIELD(SS),
619 	VMX_SEGMENT_FIELD(TR),
620 	VMX_SEGMENT_FIELD(LDTR),
621 };
622 
623 
624 static unsigned long host_idt_base;
625 
626 #if IS_ENABLED(CONFIG_HYPERV)
627 static bool __read_mostly enlightened_vmcs = true;
628 module_param(enlightened_vmcs, bool, 0444);
629 
hv_enable_l2_tlb_flush(struct kvm_vcpu * vcpu)630 static int hv_enable_l2_tlb_flush(struct kvm_vcpu *vcpu)
631 {
632 	struct hv_enlightened_vmcs *evmcs;
633 	hpa_t partition_assist_page = hv_get_partition_assist_page(vcpu);
634 
635 	if (partition_assist_page == INVALID_PAGE)
636 		return -ENOMEM;
637 
638 	evmcs = (struct hv_enlightened_vmcs *)to_vmx(vcpu)->loaded_vmcs->vmcs;
639 
640 	evmcs->partition_assist_page = partition_assist_page;
641 	evmcs->hv_vm_id = (unsigned long)vcpu->kvm;
642 	evmcs->hv_enlightenments_control.nested_flush_hypercall = 1;
643 
644 	return 0;
645 }
646 
hv_init_evmcs(void)647 static __init void hv_init_evmcs(void)
648 {
649 	int cpu;
650 
651 	if (!enlightened_vmcs)
652 		return;
653 
654 	/*
655 	 * Enlightened VMCS usage should be recommended and the host needs
656 	 * to support eVMCS v1 or above.
657 	 */
658 	if (ms_hyperv.hints & HV_X64_ENLIGHTENED_VMCS_RECOMMENDED &&
659 	    (ms_hyperv.nested_features & HV_X64_ENLIGHTENED_VMCS_VERSION) >=
660 	     KVM_EVMCS_VERSION) {
661 
662 		/* Check that we have assist pages on all online CPUs */
663 		for_each_online_cpu(cpu) {
664 			if (!hv_get_vp_assist_page(cpu)) {
665 				enlightened_vmcs = false;
666 				break;
667 			}
668 		}
669 
670 		if (enlightened_vmcs) {
671 			pr_info("Using Hyper-V Enlightened VMCS\n");
672 			static_branch_enable(&__kvm_is_using_evmcs);
673 		}
674 
675 		if (ms_hyperv.nested_features & HV_X64_NESTED_DIRECT_FLUSH)
676 			vt_x86_ops.enable_l2_tlb_flush
677 				= hv_enable_l2_tlb_flush;
678 	} else {
679 		enlightened_vmcs = false;
680 	}
681 }
682 
hv_reset_evmcs(void)683 static void hv_reset_evmcs(void)
684 {
685 	struct hv_vp_assist_page *vp_ap;
686 
687 	if (!kvm_is_using_evmcs())
688 		return;
689 
690 	/*
691 	 * KVM should enable eVMCS if and only if all CPUs have a VP assist
692 	 * page, and should reject CPU onlining if eVMCS is enabled the CPU
693 	 * doesn't have a VP assist page allocated.
694 	 */
695 	vp_ap = hv_get_vp_assist_page(smp_processor_id());
696 	if (WARN_ON_ONCE(!vp_ap))
697 		return;
698 
699 	/*
700 	 * Reset everything to support using non-enlightened VMCS access later
701 	 * (e.g. when we reload the module with enlightened_vmcs=0)
702 	 */
703 	vp_ap->nested_control.features.directhypercall = 0;
704 	vp_ap->current_nested_vmcs = 0;
705 	vp_ap->enlighten_vmentry = 0;
706 }
707 
708 #else /* IS_ENABLED(CONFIG_HYPERV) */
hv_init_evmcs(void)709 static void hv_init_evmcs(void) {}
hv_reset_evmcs(void)710 static void hv_reset_evmcs(void) {}
711 #endif /* IS_ENABLED(CONFIG_HYPERV) */
712 
713 /*
714  * Comment's format: document - errata name - stepping - processor name.
715  * Refer from
716  * https://www.virtualbox.org/svn/vbox/trunk/src/VBox/VMM/VMMR0/HMR0.cpp
717  */
718 static u32 vmx_preemption_cpu_tfms[] = {
719 /* 323344.pdf - BA86   - D0 - Xeon 7500 Series */
720 0x000206E6,
721 /* 323056.pdf - AAX65  - C2 - Xeon L3406 */
722 /* 322814.pdf - AAT59  - C2 - i7-600, i5-500, i5-400 and i3-300 Mobile */
723 /* 322911.pdf - AAU65  - C2 - i5-600, i3-500 Desktop and Pentium G6950 */
724 0x00020652,
725 /* 322911.pdf - AAU65  - K0 - i5-600, i3-500 Desktop and Pentium G6950 */
726 0x00020655,
727 /* 322373.pdf - AAO95  - B1 - Xeon 3400 Series */
728 /* 322166.pdf - AAN92  - B1 - i7-800 and i5-700 Desktop */
729 /*
730  * 320767.pdf - AAP86  - B1 -
731  * i7-900 Mobile Extreme, i7-800 and i7-700 Mobile
732  */
733 0x000106E5,
734 /* 321333.pdf - AAM126 - C0 - Xeon 3500 */
735 0x000106A0,
736 /* 321333.pdf - AAM126 - C1 - Xeon 3500 */
737 0x000106A1,
738 /* 320836.pdf - AAJ124 - C0 - i7-900 Desktop Extreme and i7-900 Desktop */
739 0x000106A4,
740  /* 321333.pdf - AAM126 - D0 - Xeon 3500 */
741  /* 321324.pdf - AAK139 - D0 - Xeon 5500 */
742  /* 320836.pdf - AAJ124 - D0 - i7-900 Extreme and i7-900 Desktop */
743 0x000106A5,
744  /* Xeon E3-1220 V2 */
745 0x000306A8,
746 };
747 
cpu_has_broken_vmx_preemption_timer(void)748 static inline bool cpu_has_broken_vmx_preemption_timer(void)
749 {
750 	u32 eax = cpuid_eax(0x00000001), i;
751 
752 	/* Clear the reserved bits */
753 	eax &= ~(0x3U << 14 | 0xfU << 28);
754 	for (i = 0; i < ARRAY_SIZE(vmx_preemption_cpu_tfms); i++)
755 		if (eax == vmx_preemption_cpu_tfms[i])
756 			return true;
757 
758 	return false;
759 }
760 
cpu_need_virtualize_apic_accesses(struct kvm_vcpu * vcpu)761 static inline bool cpu_need_virtualize_apic_accesses(struct kvm_vcpu *vcpu)
762 {
763 	return flexpriority_enabled && lapic_in_kernel(vcpu);
764 }
765 
vmx_find_uret_msr(struct vcpu_vmx * vmx,u32 msr)766 struct vmx_uret_msr *vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr)
767 {
768 	int i;
769 
770 	i = kvm_find_user_return_msr(msr);
771 	if (i >= 0)
772 		return &vmx->guest_uret_msrs[i];
773 	return NULL;
774 }
775 
vmx_set_guest_uret_msr(struct vcpu_vmx * vmx,struct vmx_uret_msr * msr,u64 data)776 static int vmx_set_guest_uret_msr(struct vcpu_vmx *vmx,
777 				  struct vmx_uret_msr *msr, u64 data)
778 {
779 	unsigned int slot = msr - vmx->guest_uret_msrs;
780 	int ret = 0;
781 
782 	if (msr->load_into_hardware) {
783 		preempt_disable();
784 		ret = kvm_set_user_return_msr(slot, data, msr->mask);
785 		preempt_enable();
786 	}
787 	if (!ret)
788 		msr->data = data;
789 	return ret;
790 }
791 
vmx_emergency_disable_virtualization_cpu(void)792 void vmx_emergency_disable_virtualization_cpu(void)
793 {
794 	int cpu = raw_smp_processor_id();
795 	struct loaded_vmcs *v;
796 
797 	list_for_each_entry(v, &per_cpu(loaded_vmcss_on_cpu, cpu),
798 			    loaded_vmcss_on_cpu_link) {
799 		vmcs_clear(v->vmcs);
800 		if (v->shadow_vmcs)
801 			vmcs_clear(v->shadow_vmcs);
802 	}
803 }
804 
__loaded_vmcs_clear(void * arg)805 static void __loaded_vmcs_clear(void *arg)
806 {
807 	struct loaded_vmcs *loaded_vmcs = arg;
808 	int cpu = raw_smp_processor_id();
809 
810 	if (loaded_vmcs->cpu != cpu)
811 		return; /* vcpu migration can race with cpu offline */
812 	if (per_cpu(current_vmcs, cpu) == loaded_vmcs->vmcs)
813 		per_cpu(current_vmcs, cpu) = NULL;
814 
815 	vmcs_clear(loaded_vmcs->vmcs);
816 	if (loaded_vmcs->shadow_vmcs && loaded_vmcs->launched)
817 		vmcs_clear(loaded_vmcs->shadow_vmcs);
818 
819 	list_del(&loaded_vmcs->loaded_vmcss_on_cpu_link);
820 
821 	/*
822 	 * Ensure all writes to loaded_vmcs, including deleting it from its
823 	 * current percpu list, complete before setting loaded_vmcs->cpu to
824 	 * -1, otherwise a different cpu can see loaded_vmcs->cpu == -1 first
825 	 * and add loaded_vmcs to its percpu list before it's deleted from this
826 	 * cpu's list. Pairs with the smp_rmb() in vmx_vcpu_load_vmcs().
827 	 */
828 	smp_wmb();
829 
830 	loaded_vmcs->cpu = -1;
831 	loaded_vmcs->launched = 0;
832 }
833 
loaded_vmcs_clear(struct loaded_vmcs * loaded_vmcs)834 static void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs)
835 {
836 	int cpu = loaded_vmcs->cpu;
837 
838 	if (cpu != -1)
839 		smp_call_function_single(cpu,
840 			 __loaded_vmcs_clear, loaded_vmcs, 1);
841 }
842 
vmx_segment_cache_test_set(struct vcpu_vmx * vmx,unsigned seg,unsigned field)843 static bool vmx_segment_cache_test_set(struct vcpu_vmx *vmx, unsigned seg,
844 				       unsigned field)
845 {
846 	bool ret;
847 	u32 mask = 1 << (seg * SEG_FIELD_NR + field);
848 
849 	if (!kvm_register_is_available(&vmx->vcpu, VCPU_EXREG_SEGMENTS)) {
850 		kvm_register_mark_available(&vmx->vcpu, VCPU_EXREG_SEGMENTS);
851 		vmx->segment_cache.bitmask = 0;
852 	}
853 	ret = vmx->segment_cache.bitmask & mask;
854 	vmx->segment_cache.bitmask |= mask;
855 	return ret;
856 }
857 
vmx_read_guest_seg_selector(struct vcpu_vmx * vmx,unsigned seg)858 static u16 vmx_read_guest_seg_selector(struct vcpu_vmx *vmx, unsigned seg)
859 {
860 	u16 *p = &vmx->segment_cache.seg[seg].selector;
861 
862 	if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_SEL))
863 		*p = vmcs_read16(kvm_vmx_segment_fields[seg].selector);
864 	return *p;
865 }
866 
vmx_read_guest_seg_base(struct vcpu_vmx * vmx,unsigned seg)867 static ulong vmx_read_guest_seg_base(struct vcpu_vmx *vmx, unsigned seg)
868 {
869 	ulong *p = &vmx->segment_cache.seg[seg].base;
870 
871 	if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_BASE))
872 		*p = vmcs_readl(kvm_vmx_segment_fields[seg].base);
873 	return *p;
874 }
875 
vmx_read_guest_seg_limit(struct vcpu_vmx * vmx,unsigned seg)876 static u32 vmx_read_guest_seg_limit(struct vcpu_vmx *vmx, unsigned seg)
877 {
878 	u32 *p = &vmx->segment_cache.seg[seg].limit;
879 
880 	if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_LIMIT))
881 		*p = vmcs_read32(kvm_vmx_segment_fields[seg].limit);
882 	return *p;
883 }
884 
vmx_read_guest_seg_ar(struct vcpu_vmx * vmx,unsigned seg)885 static u32 vmx_read_guest_seg_ar(struct vcpu_vmx *vmx, unsigned seg)
886 {
887 	u32 *p = &vmx->segment_cache.seg[seg].ar;
888 
889 	if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_AR))
890 		*p = vmcs_read32(kvm_vmx_segment_fields[seg].ar_bytes);
891 	return *p;
892 }
893 
vmx_update_exception_bitmap(struct kvm_vcpu * vcpu)894 void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu)
895 {
896 	u32 eb;
897 
898 	eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR) |
899 	     (1u << DB_VECTOR) | (1u << AC_VECTOR);
900 	/*
901 	 * #VE isn't used for VMX.  To test against unexpected changes
902 	 * related to #VE for VMX, intercept unexpected #VE and warn on it.
903 	 */
904 	if (IS_ENABLED(CONFIG_KVM_INTEL_PROVE_VE))
905 		eb |= 1u << VE_VECTOR;
906 	/*
907 	 * Guest access to VMware backdoor ports could legitimately
908 	 * trigger #GP because of TSS I/O permission bitmap.
909 	 * We intercept those #GP and allow access to them anyway
910 	 * as VMware does.
911 	 */
912 	if (enable_vmware_backdoor)
913 		eb |= (1u << GP_VECTOR);
914 	if ((vcpu->guest_debug &
915 	     (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) ==
916 	    (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP))
917 		eb |= 1u << BP_VECTOR;
918 	if (to_vmx(vcpu)->rmode.vm86_active)
919 		eb = ~0;
920 	if (!vmx_need_pf_intercept(vcpu))
921 		eb &= ~(1u << PF_VECTOR);
922 
923 	/* When we are running a nested L2 guest and L1 specified for it a
924 	 * certain exception bitmap, we must trap the same exceptions and pass
925 	 * them to L1. When running L2, we will only handle the exceptions
926 	 * specified above if L1 did not want them.
927 	 */
928 	if (is_guest_mode(vcpu))
929 		eb |= get_vmcs12(vcpu)->exception_bitmap;
930 	else {
931 		int mask = 0, match = 0;
932 
933 		if (enable_ept && (eb & (1u << PF_VECTOR))) {
934 			/*
935 			 * If EPT is enabled, #PF is currently only intercepted
936 			 * if MAXPHYADDR is smaller on the guest than on the
937 			 * host.  In that case we only care about present,
938 			 * non-reserved faults.  For vmcs02, however, PFEC_MASK
939 			 * and PFEC_MATCH are set in prepare_vmcs02_rare.
940 			 */
941 			mask = PFERR_PRESENT_MASK | PFERR_RSVD_MASK;
942 			match = PFERR_PRESENT_MASK;
943 		}
944 		vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, mask);
945 		vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, match);
946 	}
947 
948 	/*
949 	 * Disabling xfd interception indicates that dynamic xfeatures
950 	 * might be used in the guest. Always trap #NM in this case
951 	 * to save guest xfd_err timely.
952 	 */
953 	if (vcpu->arch.xfd_no_write_intercept)
954 		eb |= (1u << NM_VECTOR);
955 
956 	vmcs_write32(EXCEPTION_BITMAP, eb);
957 }
958 
959 /*
960  * Check if MSR is intercepted for currently loaded MSR bitmap.
961  */
msr_write_intercepted(struct vcpu_vmx * vmx,u32 msr)962 static bool msr_write_intercepted(struct vcpu_vmx *vmx, u32 msr)
963 {
964 	if (!(exec_controls_get(vmx) & CPU_BASED_USE_MSR_BITMAPS))
965 		return true;
966 
967 	return vmx_test_msr_bitmap_write(vmx->loaded_vmcs->msr_bitmap, msr);
968 }
969 
__vmx_vcpu_run_flags(struct vcpu_vmx * vmx)970 unsigned int __vmx_vcpu_run_flags(struct vcpu_vmx *vmx)
971 {
972 	unsigned int flags = 0;
973 
974 	if (vmx->loaded_vmcs->launched)
975 		flags |= VMX_RUN_VMRESUME;
976 
977 	/*
978 	 * If writes to the SPEC_CTRL MSR aren't intercepted, the guest is free
979 	 * to change it directly without causing a vmexit.  In that case read
980 	 * it after vmexit and store it in vmx->spec_ctrl.
981 	 */
982 	if (!msr_write_intercepted(vmx, MSR_IA32_SPEC_CTRL))
983 		flags |= VMX_RUN_SAVE_SPEC_CTRL;
984 
985 	if (cpu_feature_enabled(X86_FEATURE_CLEAR_CPU_BUF_VM_MMIO) &&
986 	    kvm_vcpu_can_access_host_mmio(&vmx->vcpu))
987 		flags |= VMX_RUN_CLEAR_CPU_BUFFERS_FOR_MMIO;
988 
989 	return flags;
990 }
991 
clear_atomic_switch_msr_special(struct vcpu_vmx * vmx,unsigned long entry,unsigned long exit)992 static __always_inline void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx,
993 		unsigned long entry, unsigned long exit)
994 {
995 	vm_entry_controls_clearbit(vmx, entry);
996 	vm_exit_controls_clearbit(vmx, exit);
997 }
998 
vmx_find_loadstore_msr_slot(struct vmx_msrs * m,u32 msr)999 static int vmx_find_loadstore_msr_slot(struct vmx_msrs *m, u32 msr)
1000 {
1001 	unsigned int i;
1002 
1003 	for (i = 0; i < m->nr; ++i) {
1004 		if (m->val[i].index == msr)
1005 			return i;
1006 	}
1007 	return -ENOENT;
1008 }
1009 
vmx_remove_auto_msr(struct vmx_msrs * m,u32 msr,unsigned long vmcs_count_field)1010 static void vmx_remove_auto_msr(struct vmx_msrs *m, u32 msr,
1011 				unsigned long vmcs_count_field)
1012 {
1013 	int i;
1014 
1015 	i = vmx_find_loadstore_msr_slot(m, msr);
1016 	if (i < 0)
1017 		return;
1018 
1019 	--m->nr;
1020 	m->val[i] = m->val[m->nr];
1021 	vmcs_write32(vmcs_count_field, m->nr);
1022 }
1023 
clear_atomic_switch_msr(struct vcpu_vmx * vmx,unsigned msr)1024 static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
1025 {
1026 	struct msr_autoload *m = &vmx->msr_autoload;
1027 
1028 	switch (msr) {
1029 	case MSR_EFER:
1030 		if (cpu_has_load_ia32_efer()) {
1031 			clear_atomic_switch_msr_special(vmx,
1032 					VM_ENTRY_LOAD_IA32_EFER,
1033 					VM_EXIT_LOAD_IA32_EFER);
1034 			return;
1035 		}
1036 		break;
1037 	case MSR_CORE_PERF_GLOBAL_CTRL:
1038 		if (cpu_has_load_perf_global_ctrl()) {
1039 			clear_atomic_switch_msr_special(vmx,
1040 					VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
1041 					VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL);
1042 			return;
1043 		}
1044 		break;
1045 	}
1046 
1047 	vmx_remove_auto_msr(&m->guest, msr, VM_ENTRY_MSR_LOAD_COUNT);
1048 	vmx_remove_auto_msr(&m->host, msr, VM_EXIT_MSR_LOAD_COUNT);
1049 }
1050 
add_atomic_switch_msr_special(struct vcpu_vmx * vmx,unsigned long entry,unsigned long exit,unsigned long guest_val_vmcs,unsigned long host_val_vmcs,u64 guest_val,u64 host_val)1051 static __always_inline void add_atomic_switch_msr_special(struct vcpu_vmx *vmx,
1052 		unsigned long entry, unsigned long exit,
1053 		unsigned long guest_val_vmcs, unsigned long host_val_vmcs,
1054 		u64 guest_val, u64 host_val)
1055 {
1056 	vmcs_write64(guest_val_vmcs, guest_val);
1057 	if (host_val_vmcs != HOST_IA32_EFER)
1058 		vmcs_write64(host_val_vmcs, host_val);
1059 	vm_entry_controls_setbit(vmx, entry);
1060 	vm_exit_controls_setbit(vmx, exit);
1061 }
1062 
vmx_add_auto_msr(struct vmx_msrs * m,u32 msr,u64 value,unsigned long vmcs_count_field,struct kvm * kvm)1063 static void vmx_add_auto_msr(struct vmx_msrs *m, u32 msr, u64 value,
1064 			     unsigned long vmcs_count_field, struct kvm *kvm)
1065 {
1066 	int i;
1067 
1068 	i = vmx_find_loadstore_msr_slot(m, msr);
1069 	if (i < 0) {
1070 		if (KVM_BUG_ON(m->nr == MAX_NR_LOADSTORE_MSRS, kvm))
1071 			return;
1072 
1073 		i = m->nr++;
1074 		m->val[i].index = msr;
1075 		vmcs_write32(vmcs_count_field, m->nr);
1076 	}
1077 	m->val[i].value = value;
1078 }
1079 
add_atomic_switch_msr(struct vcpu_vmx * vmx,unsigned msr,u64 guest_val,u64 host_val)1080 static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
1081 				  u64 guest_val, u64 host_val)
1082 {
1083 	struct msr_autoload *m = &vmx->msr_autoload;
1084 	struct kvm *kvm = vmx->vcpu.kvm;
1085 
1086 	switch (msr) {
1087 	case MSR_EFER:
1088 		if (cpu_has_load_ia32_efer()) {
1089 			add_atomic_switch_msr_special(vmx,
1090 					VM_ENTRY_LOAD_IA32_EFER,
1091 					VM_EXIT_LOAD_IA32_EFER,
1092 					GUEST_IA32_EFER,
1093 					HOST_IA32_EFER,
1094 					guest_val, host_val);
1095 			return;
1096 		}
1097 		break;
1098 	case MSR_CORE_PERF_GLOBAL_CTRL:
1099 		if (cpu_has_load_perf_global_ctrl()) {
1100 			add_atomic_switch_msr_special(vmx,
1101 					VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
1102 					VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL,
1103 					GUEST_IA32_PERF_GLOBAL_CTRL,
1104 					HOST_IA32_PERF_GLOBAL_CTRL,
1105 					guest_val, host_val);
1106 			return;
1107 		}
1108 		break;
1109 	case MSR_IA32_PEBS_ENABLE:
1110 		/* PEBS needs a quiescent period after being disabled (to write
1111 		 * a record).  Disabling PEBS through VMX MSR swapping doesn't
1112 		 * provide that period, so a CPU could write host's record into
1113 		 * guest's memory.
1114 		 */
1115 		wrmsrq(MSR_IA32_PEBS_ENABLE, 0);
1116 	}
1117 
1118 	vmx_add_auto_msr(&m->guest, msr, guest_val, VM_ENTRY_MSR_LOAD_COUNT, kvm);
1119 	vmx_add_auto_msr(&m->host, msr, host_val, VM_EXIT_MSR_LOAD_COUNT, kvm);
1120 }
1121 
update_transition_efer(struct vcpu_vmx * vmx)1122 static bool update_transition_efer(struct vcpu_vmx *vmx)
1123 {
1124 	u64 guest_efer = vmx->vcpu.arch.efer;
1125 	u64 ignore_bits = 0;
1126 	int i;
1127 
1128 	/* Shadow paging assumes NX to be available.  */
1129 	if (!enable_ept)
1130 		guest_efer |= EFER_NX;
1131 
1132 	/*
1133 	 * LMA and LME handled by hardware; SCE meaningless outside long mode.
1134 	 */
1135 	ignore_bits |= EFER_SCE;
1136 #ifdef CONFIG_X86_64
1137 	ignore_bits |= EFER_LMA | EFER_LME;
1138 	/* SCE is meaningful only in long mode on Intel */
1139 	if (guest_efer & EFER_LMA)
1140 		ignore_bits &= ~(u64)EFER_SCE;
1141 #endif
1142 
1143 	/*
1144 	 * On EPT, we can't emulate NX, so we must switch EFER atomically.
1145 	 * On CPUs that support "load IA32_EFER", always switch EFER
1146 	 * atomically, since it's faster than switching it manually.
1147 	 */
1148 	if (cpu_has_load_ia32_efer() ||
1149 	    (enable_ept && ((vmx->vcpu.arch.efer ^ kvm_host.efer) & EFER_NX))) {
1150 		if (!(guest_efer & EFER_LMA))
1151 			guest_efer &= ~EFER_LME;
1152 		if (guest_efer != kvm_host.efer)
1153 			add_atomic_switch_msr(vmx, MSR_EFER, guest_efer, kvm_host.efer);
1154 		else
1155 			clear_atomic_switch_msr(vmx, MSR_EFER);
1156 		return false;
1157 	}
1158 
1159 	i = kvm_find_user_return_msr(MSR_EFER);
1160 	if (i < 0)
1161 		return false;
1162 
1163 	clear_atomic_switch_msr(vmx, MSR_EFER);
1164 
1165 	guest_efer &= ~ignore_bits;
1166 	guest_efer |= kvm_host.efer & ignore_bits;
1167 
1168 	vmx->guest_uret_msrs[i].data = guest_efer;
1169 	vmx->guest_uret_msrs[i].mask = ~ignore_bits;
1170 
1171 	return true;
1172 }
1173 
vmx_add_autostore_msr(struct vcpu_vmx * vmx,u32 msr)1174 static void vmx_add_autostore_msr(struct vcpu_vmx *vmx, u32 msr)
1175 {
1176 	vmx_add_auto_msr(&vmx->msr_autostore, msr, 0, VM_EXIT_MSR_STORE_COUNT,
1177 			 vmx->vcpu.kvm);
1178 }
1179 
vmx_remove_autostore_msr(struct vcpu_vmx * vmx,u32 msr)1180 static void vmx_remove_autostore_msr(struct vcpu_vmx *vmx, u32 msr)
1181 {
1182 	vmx_remove_auto_msr(&vmx->msr_autostore, msr, VM_EXIT_MSR_STORE_COUNT);
1183 }
1184 
1185 #ifdef CONFIG_X86_32
1186 /*
1187  * On 32-bit kernels, VM exits still load the FS and GS bases from the
1188  * VMCS rather than the segment table.  KVM uses this helper to figure
1189  * out the current bases to poke them into the VMCS before entry.
1190  */
segment_base(u16 selector)1191 static unsigned long segment_base(u16 selector)
1192 {
1193 	struct desc_struct *table;
1194 	unsigned long v;
1195 
1196 	if (!(selector & ~SEGMENT_RPL_MASK))
1197 		return 0;
1198 
1199 	table = get_current_gdt_ro();
1200 
1201 	if ((selector & SEGMENT_TI_MASK) == SEGMENT_LDT) {
1202 		u16 ldt_selector = kvm_read_ldt();
1203 
1204 		if (!(ldt_selector & ~SEGMENT_RPL_MASK))
1205 			return 0;
1206 
1207 		table = (struct desc_struct *)segment_base(ldt_selector);
1208 	}
1209 	v = get_desc_base(&table[selector >> 3]);
1210 	return v;
1211 }
1212 #endif
1213 
pt_can_write_msr(struct vcpu_vmx * vmx)1214 static inline bool pt_can_write_msr(struct vcpu_vmx *vmx)
1215 {
1216 	return vmx_pt_mode_is_host_guest() &&
1217 	       !(vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN);
1218 }
1219 
pt_output_base_valid(struct kvm_vcpu * vcpu,u64 base)1220 static inline bool pt_output_base_valid(struct kvm_vcpu *vcpu, u64 base)
1221 {
1222 	/* The base must be 128-byte aligned and a legal physical address. */
1223 	return kvm_vcpu_is_legal_aligned_gpa(vcpu, base, 128);
1224 }
1225 
pt_load_msr(struct pt_ctx * ctx,u32 addr_range)1226 static inline void pt_load_msr(struct pt_ctx *ctx, u32 addr_range)
1227 {
1228 	u32 i;
1229 
1230 	wrmsrq(MSR_IA32_RTIT_STATUS, ctx->status);
1231 	wrmsrq(MSR_IA32_RTIT_OUTPUT_BASE, ctx->output_base);
1232 	wrmsrq(MSR_IA32_RTIT_OUTPUT_MASK, ctx->output_mask);
1233 	wrmsrq(MSR_IA32_RTIT_CR3_MATCH, ctx->cr3_match);
1234 	for (i = 0; i < addr_range; i++) {
1235 		wrmsrq(MSR_IA32_RTIT_ADDR0_A + i * 2, ctx->addr_a[i]);
1236 		wrmsrq(MSR_IA32_RTIT_ADDR0_B + i * 2, ctx->addr_b[i]);
1237 	}
1238 }
1239 
pt_save_msr(struct pt_ctx * ctx,u32 addr_range)1240 static inline void pt_save_msr(struct pt_ctx *ctx, u32 addr_range)
1241 {
1242 	u32 i;
1243 
1244 	rdmsrq(MSR_IA32_RTIT_STATUS, ctx->status);
1245 	rdmsrq(MSR_IA32_RTIT_OUTPUT_BASE, ctx->output_base);
1246 	rdmsrq(MSR_IA32_RTIT_OUTPUT_MASK, ctx->output_mask);
1247 	rdmsrq(MSR_IA32_RTIT_CR3_MATCH, ctx->cr3_match);
1248 	for (i = 0; i < addr_range; i++) {
1249 		rdmsrq(MSR_IA32_RTIT_ADDR0_A + i * 2, ctx->addr_a[i]);
1250 		rdmsrq(MSR_IA32_RTIT_ADDR0_B + i * 2, ctx->addr_b[i]);
1251 	}
1252 }
1253 
pt_guest_enter(struct vcpu_vmx * vmx)1254 static void pt_guest_enter(struct vcpu_vmx *vmx)
1255 {
1256 	if (vmx_pt_mode_is_system())
1257 		return;
1258 
1259 	/*
1260 	 * GUEST_IA32_RTIT_CTL is already set in the VMCS.
1261 	 * Save host state before VM entry.
1262 	 */
1263 	rdmsrq(MSR_IA32_RTIT_CTL, vmx->pt_desc.host.ctl);
1264 	if (vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) {
1265 		wrmsrq(MSR_IA32_RTIT_CTL, 0);
1266 		pt_save_msr(&vmx->pt_desc.host, vmx->pt_desc.num_address_ranges);
1267 		pt_load_msr(&vmx->pt_desc.guest, vmx->pt_desc.num_address_ranges);
1268 	}
1269 }
1270 
pt_guest_exit(struct vcpu_vmx * vmx)1271 static void pt_guest_exit(struct vcpu_vmx *vmx)
1272 {
1273 	if (vmx_pt_mode_is_system())
1274 		return;
1275 
1276 	if (vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) {
1277 		pt_save_msr(&vmx->pt_desc.guest, vmx->pt_desc.num_address_ranges);
1278 		pt_load_msr(&vmx->pt_desc.host, vmx->pt_desc.num_address_ranges);
1279 	}
1280 
1281 	/*
1282 	 * KVM requires VM_EXIT_CLEAR_IA32_RTIT_CTL to expose PT to the guest,
1283 	 * i.e. RTIT_CTL is always cleared on VM-Exit.  Restore it if necessary.
1284 	 */
1285 	if (vmx->pt_desc.host.ctl)
1286 		wrmsrq(MSR_IA32_RTIT_CTL, vmx->pt_desc.host.ctl);
1287 }
1288 
vmx_set_host_fs_gs(struct vmcs_host_state * host,u16 fs_sel,u16 gs_sel,unsigned long fs_base,unsigned long gs_base)1289 void vmx_set_host_fs_gs(struct vmcs_host_state *host, u16 fs_sel, u16 gs_sel,
1290 			unsigned long fs_base, unsigned long gs_base)
1291 {
1292 	if (unlikely(fs_sel != host->fs_sel)) {
1293 		if (!(fs_sel & 7))
1294 			vmcs_write16(HOST_FS_SELECTOR, fs_sel);
1295 		else
1296 			vmcs_write16(HOST_FS_SELECTOR, 0);
1297 		host->fs_sel = fs_sel;
1298 	}
1299 	if (unlikely(gs_sel != host->gs_sel)) {
1300 		if (!(gs_sel & 7))
1301 			vmcs_write16(HOST_GS_SELECTOR, gs_sel);
1302 		else
1303 			vmcs_write16(HOST_GS_SELECTOR, 0);
1304 		host->gs_sel = gs_sel;
1305 	}
1306 	if (unlikely(fs_base != host->fs_base)) {
1307 		vmcs_writel(HOST_FS_BASE, fs_base);
1308 		host->fs_base = fs_base;
1309 	}
1310 	if (unlikely(gs_base != host->gs_base)) {
1311 		vmcs_writel(HOST_GS_BASE, gs_base);
1312 		host->gs_base = gs_base;
1313 	}
1314 }
1315 
vmx_prepare_switch_to_guest(struct kvm_vcpu * vcpu)1316 void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
1317 {
1318 	struct vcpu_vmx *vmx = to_vmx(vcpu);
1319 	struct vcpu_vt *vt = to_vt(vcpu);
1320 	struct vmcs_host_state *host_state;
1321 #ifdef CONFIG_X86_64
1322 	int cpu = raw_smp_processor_id();
1323 #endif
1324 	unsigned long fs_base, gs_base;
1325 	u16 fs_sel, gs_sel;
1326 	int i;
1327 
1328 	/*
1329 	 * Note that guest MSRs to be saved/restored can also be changed
1330 	 * when guest state is loaded. This happens when guest transitions
1331 	 * to/from long-mode by setting MSR_EFER.LMA.
1332 	 */
1333 	if (!vmx->guest_uret_msrs_loaded) {
1334 		vmx->guest_uret_msrs_loaded = true;
1335 		for (i = 0; i < kvm_nr_uret_msrs; ++i) {
1336 			if (!vmx->guest_uret_msrs[i].load_into_hardware)
1337 				continue;
1338 
1339 			kvm_set_user_return_msr(i,
1340 						vmx->guest_uret_msrs[i].data,
1341 						vmx->guest_uret_msrs[i].mask);
1342 		}
1343 	}
1344 
1345 	if (vmx->nested.need_vmcs12_to_shadow_sync)
1346 		nested_sync_vmcs12_to_shadow(vcpu);
1347 
1348 	if (vt->guest_state_loaded)
1349 		return;
1350 
1351 	host_state = &vmx->loaded_vmcs->host_state;
1352 
1353 	/*
1354 	 * Set host fs and gs selectors.  Unfortunately, 22.2.3 does not
1355 	 * allow segment selectors with cpl > 0 or ti == 1.
1356 	 */
1357 	host_state->ldt_sel = kvm_read_ldt();
1358 
1359 #ifdef CONFIG_X86_64
1360 	savesegment(ds, host_state->ds_sel);
1361 	savesegment(es, host_state->es_sel);
1362 
1363 	gs_base = cpu_kernelmode_gs_base(cpu);
1364 	if (likely(is_64bit_mm(current->mm))) {
1365 		current_save_fsgs();
1366 		fs_sel = current->thread.fsindex;
1367 		gs_sel = current->thread.gsindex;
1368 		fs_base = current->thread.fsbase;
1369 		vt->msr_host_kernel_gs_base = current->thread.gsbase;
1370 	} else {
1371 		savesegment(fs, fs_sel);
1372 		savesegment(gs, gs_sel);
1373 		fs_base = read_msr(MSR_FS_BASE);
1374 		vt->msr_host_kernel_gs_base = read_msr(MSR_KERNEL_GS_BASE);
1375 	}
1376 
1377 	wrmsrq(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
1378 #else
1379 	savesegment(fs, fs_sel);
1380 	savesegment(gs, gs_sel);
1381 	fs_base = segment_base(fs_sel);
1382 	gs_base = segment_base(gs_sel);
1383 #endif
1384 
1385 	vmx_set_host_fs_gs(host_state, fs_sel, gs_sel, fs_base, gs_base);
1386 	vt->guest_state_loaded = true;
1387 }
1388 
vmx_prepare_switch_to_host(struct vcpu_vmx * vmx)1389 static void vmx_prepare_switch_to_host(struct vcpu_vmx *vmx)
1390 {
1391 	struct vmcs_host_state *host_state;
1392 
1393 	if (!vmx->vt.guest_state_loaded)
1394 		return;
1395 
1396 	host_state = &vmx->loaded_vmcs->host_state;
1397 
1398 	++vmx->vcpu.stat.host_state_reload;
1399 
1400 #ifdef CONFIG_X86_64
1401 	rdmsrq(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
1402 #endif
1403 	if (host_state->ldt_sel || (host_state->gs_sel & 7)) {
1404 		kvm_load_ldt(host_state->ldt_sel);
1405 #ifdef CONFIG_X86_64
1406 		load_gs_index(host_state->gs_sel);
1407 #else
1408 		loadsegment(gs, host_state->gs_sel);
1409 #endif
1410 	}
1411 	if (host_state->fs_sel & 7)
1412 		loadsegment(fs, host_state->fs_sel);
1413 #ifdef CONFIG_X86_64
1414 	if (unlikely(host_state->ds_sel | host_state->es_sel)) {
1415 		loadsegment(ds, host_state->ds_sel);
1416 		loadsegment(es, host_state->es_sel);
1417 	}
1418 #endif
1419 	invalidate_tss_limit();
1420 #ifdef CONFIG_X86_64
1421 	wrmsrq(MSR_KERNEL_GS_BASE, vmx->vt.msr_host_kernel_gs_base);
1422 #endif
1423 	load_fixmap_gdt(raw_smp_processor_id());
1424 	vmx->vt.guest_state_loaded = false;
1425 	vmx->guest_uret_msrs_loaded = false;
1426 }
1427 
1428 #ifdef CONFIG_X86_64
vmx_read_guest_host_msr(struct vcpu_vmx * vmx,u32 msr,u64 * cache)1429 static u64 vmx_read_guest_host_msr(struct vcpu_vmx *vmx, u32 msr, u64 *cache)
1430 {
1431 	preempt_disable();
1432 	if (vmx->vt.guest_state_loaded)
1433 		*cache = read_msr(msr);
1434 	preempt_enable();
1435 	return *cache;
1436 }
1437 
vmx_write_guest_host_msr(struct vcpu_vmx * vmx,u32 msr,u64 data,u64 * cache)1438 static void vmx_write_guest_host_msr(struct vcpu_vmx *vmx, u32 msr, u64 data,
1439 				     u64 *cache)
1440 {
1441 	preempt_disable();
1442 	if (vmx->vt.guest_state_loaded)
1443 		wrmsrns(msr, data);
1444 	preempt_enable();
1445 	*cache = data;
1446 }
1447 
vmx_read_guest_kernel_gs_base(struct vcpu_vmx * vmx)1448 static u64 vmx_read_guest_kernel_gs_base(struct vcpu_vmx *vmx)
1449 {
1450 	return vmx_read_guest_host_msr(vmx, MSR_KERNEL_GS_BASE,
1451 				       &vmx->msr_guest_kernel_gs_base);
1452 }
1453 
vmx_write_guest_kernel_gs_base(struct vcpu_vmx * vmx,u64 data)1454 static void vmx_write_guest_kernel_gs_base(struct vcpu_vmx *vmx, u64 data)
1455 {
1456 	vmx_write_guest_host_msr(vmx, MSR_KERNEL_GS_BASE, data,
1457 				 &vmx->msr_guest_kernel_gs_base);
1458 }
1459 #endif
1460 
grow_ple_window(struct kvm_vcpu * vcpu)1461 static void grow_ple_window(struct kvm_vcpu *vcpu)
1462 {
1463 	struct vcpu_vmx *vmx = to_vmx(vcpu);
1464 	unsigned int old = vmx->ple_window;
1465 
1466 	vmx->ple_window = __grow_ple_window(old, ple_window,
1467 					    ple_window_grow,
1468 					    ple_window_max);
1469 
1470 	if (vmx->ple_window != old) {
1471 		vmx->ple_window_dirty = true;
1472 		trace_kvm_ple_window_update(vcpu->vcpu_id,
1473 					    vmx->ple_window, old);
1474 	}
1475 }
1476 
shrink_ple_window(struct kvm_vcpu * vcpu)1477 static void shrink_ple_window(struct kvm_vcpu *vcpu)
1478 {
1479 	struct vcpu_vmx *vmx = to_vmx(vcpu);
1480 	unsigned int old = vmx->ple_window;
1481 
1482 	vmx->ple_window = __shrink_ple_window(old, ple_window,
1483 					      ple_window_shrink,
1484 					      ple_window);
1485 
1486 	if (vmx->ple_window != old) {
1487 		vmx->ple_window_dirty = true;
1488 		trace_kvm_ple_window_update(vcpu->vcpu_id,
1489 					    vmx->ple_window, old);
1490 	}
1491 }
1492 
vmx_vcpu_load_vmcs(struct kvm_vcpu * vcpu,int cpu)1493 void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu)
1494 {
1495 	struct vcpu_vmx *vmx = to_vmx(vcpu);
1496 	bool already_loaded = vmx->loaded_vmcs->cpu == cpu;
1497 	struct vmcs *prev;
1498 
1499 	if (!already_loaded) {
1500 		loaded_vmcs_clear(vmx->loaded_vmcs);
1501 		local_irq_disable();
1502 
1503 		/*
1504 		 * Ensure loaded_vmcs->cpu is read before adding loaded_vmcs to
1505 		 * this cpu's percpu list, otherwise it may not yet be deleted
1506 		 * from its previous cpu's percpu list.  Pairs with the
1507 		 * smb_wmb() in __loaded_vmcs_clear().
1508 		 */
1509 		smp_rmb();
1510 
1511 		list_add(&vmx->loaded_vmcs->loaded_vmcss_on_cpu_link,
1512 			 &per_cpu(loaded_vmcss_on_cpu, cpu));
1513 		local_irq_enable();
1514 	}
1515 
1516 	prev = per_cpu(current_vmcs, cpu);
1517 	if (prev != vmx->loaded_vmcs->vmcs) {
1518 		per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs;
1519 		vmcs_load(vmx->loaded_vmcs->vmcs);
1520 	}
1521 
1522 	if (!already_loaded) {
1523 		void *gdt = get_current_gdt_ro();
1524 
1525 		/*
1526 		 * Flush all EPTP/VPID contexts, the new pCPU may have stale
1527 		 * TLB entries from its previous association with the vCPU.
1528 		 */
1529 		kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
1530 
1531 		/*
1532 		 * Linux uses per-cpu TSS and GDT, so set these when switching
1533 		 * processors.  See 22.2.4.
1534 		 */
1535 		vmcs_writel(HOST_TR_BASE,
1536 			    (unsigned long)&get_cpu_entry_area(cpu)->tss.x86_tss);
1537 		vmcs_writel(HOST_GDTR_BASE, (unsigned long)gdt);   /* 22.2.4 */
1538 
1539 		if (IS_ENABLED(CONFIG_IA32_EMULATION) || IS_ENABLED(CONFIG_X86_32)) {
1540 			/* 22.2.3 */
1541 			vmcs_writel(HOST_IA32_SYSENTER_ESP,
1542 				    (unsigned long)(cpu_entry_stack(cpu) + 1));
1543 		}
1544 
1545 		vmx->loaded_vmcs->cpu = cpu;
1546 	}
1547 }
1548 
1549 /*
1550  * Switches to specified vcpu, until a matching vcpu_put(), but assumes
1551  * vcpu mutex is already taken.
1552  */
vmx_vcpu_load(struct kvm_vcpu * vcpu,int cpu)1553 void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1554 {
1555 	if (vcpu->scheduled_out && !kvm_pause_in_guest(vcpu->kvm))
1556 		shrink_ple_window(vcpu);
1557 
1558 	vmx_vcpu_load_vmcs(vcpu, cpu);
1559 
1560 	vmx_vcpu_pi_load(vcpu, cpu);
1561 }
1562 
vmx_vcpu_put(struct kvm_vcpu * vcpu)1563 void vmx_vcpu_put(struct kvm_vcpu *vcpu)
1564 {
1565 	vmx_vcpu_pi_put(vcpu);
1566 
1567 	vmx_prepare_switch_to_host(to_vmx(vcpu));
1568 }
1569 
vmx_switch_loaded_vmcs(struct kvm_vcpu * vcpu,struct loaded_vmcs * vmcs)1570 static void vmx_switch_loaded_vmcs(struct kvm_vcpu *vcpu,
1571 				   struct loaded_vmcs *vmcs)
1572 {
1573 	struct vcpu_vmx *vmx = to_vmx(vcpu);
1574 	int cpu;
1575 
1576 	cpu = get_cpu();
1577 	vmx->loaded_vmcs = vmcs;
1578 	vmx_vcpu_load_vmcs(vcpu, cpu);
1579 	put_cpu();
1580 }
1581 
vmx_load_vmcs01(struct kvm_vcpu * vcpu)1582 static void vmx_load_vmcs01(struct kvm_vcpu *vcpu)
1583 {
1584 	struct vcpu_vmx *vmx = to_vmx(vcpu);
1585 
1586 	if (!is_guest_mode(vcpu)) {
1587 		WARN_ON_ONCE(vmx->loaded_vmcs != &vmx->vmcs01);
1588 		return;
1589 	}
1590 
1591 	WARN_ON_ONCE(vmx->loaded_vmcs != &vmx->nested.vmcs02);
1592 	vmx_switch_loaded_vmcs(vcpu, &vmx->vmcs01);
1593 }
1594 
vmx_put_vmcs01(struct kvm_vcpu * vcpu)1595 static void vmx_put_vmcs01(struct kvm_vcpu *vcpu)
1596 {
1597 	if (!is_guest_mode(vcpu))
1598 		return;
1599 
1600 	vmx_switch_loaded_vmcs(vcpu, &to_vmx(vcpu)->nested.vmcs02);
1601 }
DEFINE_GUARD(vmx_vmcs01,struct kvm_vcpu *,vmx_load_vmcs01 (_T),vmx_put_vmcs01 (_T))1602 DEFINE_GUARD(vmx_vmcs01, struct kvm_vcpu *,
1603 	     vmx_load_vmcs01(_T), vmx_put_vmcs01(_T))
1604 
1605 bool vmx_emulation_required(struct kvm_vcpu *vcpu)
1606 {
1607 	return emulate_invalid_guest_state && !vmx_guest_state_valid(vcpu);
1608 }
1609 
vmx_get_rflags(struct kvm_vcpu * vcpu)1610 unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
1611 {
1612 	struct vcpu_vmx *vmx = to_vmx(vcpu);
1613 	unsigned long rflags, save_rflags;
1614 
1615 	if (!kvm_register_is_available(vcpu, VCPU_EXREG_RFLAGS)) {
1616 		kvm_register_mark_available(vcpu, VCPU_EXREG_RFLAGS);
1617 		rflags = vmcs_readl(GUEST_RFLAGS);
1618 		if (vmx->rmode.vm86_active) {
1619 			rflags &= RMODE_GUEST_OWNED_EFLAGS_BITS;
1620 			save_rflags = vmx->rmode.save_rflags;
1621 			rflags |= save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
1622 		}
1623 		vmx->rflags = rflags;
1624 	}
1625 	return vmx->rflags;
1626 }
1627 
vmx_set_rflags(struct kvm_vcpu * vcpu,unsigned long rflags)1628 void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
1629 {
1630 	struct vcpu_vmx *vmx = to_vmx(vcpu);
1631 	unsigned long old_rflags;
1632 
1633 	/*
1634 	 * Unlike CR0 and CR4, RFLAGS handling requires checking if the vCPU
1635 	 * is an unrestricted guest in order to mark L2 as needing emulation
1636 	 * if L1 runs L2 as a restricted guest.
1637 	 */
1638 	if (is_unrestricted_guest(vcpu)) {
1639 		kvm_register_mark_available(vcpu, VCPU_EXREG_RFLAGS);
1640 		vmx->rflags = rflags;
1641 		vmcs_writel(GUEST_RFLAGS, rflags);
1642 		return;
1643 	}
1644 
1645 	old_rflags = vmx_get_rflags(vcpu);
1646 	vmx->rflags = rflags;
1647 	if (vmx->rmode.vm86_active) {
1648 		vmx->rmode.save_rflags = rflags;
1649 		rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
1650 	}
1651 	vmcs_writel(GUEST_RFLAGS, rflags);
1652 
1653 	if ((old_rflags ^ vmx->rflags) & X86_EFLAGS_VM)
1654 		vmx->vt.emulation_required = vmx_emulation_required(vcpu);
1655 }
1656 
vmx_get_if_flag(struct kvm_vcpu * vcpu)1657 bool vmx_get_if_flag(struct kvm_vcpu *vcpu)
1658 {
1659 	return vmx_get_rflags(vcpu) & X86_EFLAGS_IF;
1660 }
1661 
vmx_get_interrupt_shadow(struct kvm_vcpu * vcpu)1662 u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu)
1663 {
1664 	u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
1665 	int ret = 0;
1666 
1667 	if (interruptibility & GUEST_INTR_STATE_STI)
1668 		ret |= KVM_X86_SHADOW_INT_STI;
1669 	if (interruptibility & GUEST_INTR_STATE_MOV_SS)
1670 		ret |= KVM_X86_SHADOW_INT_MOV_SS;
1671 
1672 	return ret;
1673 }
1674 
vmx_set_interrupt_shadow(struct kvm_vcpu * vcpu,int mask)1675 void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
1676 {
1677 	u32 interruptibility_old = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
1678 	u32 interruptibility = interruptibility_old;
1679 
1680 	interruptibility &= ~(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS);
1681 
1682 	if (mask & KVM_X86_SHADOW_INT_MOV_SS)
1683 		interruptibility |= GUEST_INTR_STATE_MOV_SS;
1684 	else if (mask & KVM_X86_SHADOW_INT_STI)
1685 		interruptibility |= GUEST_INTR_STATE_STI;
1686 
1687 	if ((interruptibility != interruptibility_old))
1688 		vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, interruptibility);
1689 }
1690 
vmx_rtit_ctl_check(struct kvm_vcpu * vcpu,u64 data)1691 static int vmx_rtit_ctl_check(struct kvm_vcpu *vcpu, u64 data)
1692 {
1693 	struct vcpu_vmx *vmx = to_vmx(vcpu);
1694 	unsigned long value;
1695 
1696 	/*
1697 	 * Any MSR write that attempts to change bits marked reserved will
1698 	 * case a #GP fault.
1699 	 */
1700 	if (data & vmx->pt_desc.ctl_bitmask)
1701 		return 1;
1702 
1703 	/*
1704 	 * Any attempt to modify IA32_RTIT_CTL while TraceEn is set will
1705 	 * result in a #GP unless the same write also clears TraceEn.
1706 	 */
1707 	if ((vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) &&
1708 	    (data & RTIT_CTL_TRACEEN) &&
1709 	    data != vmx->pt_desc.guest.ctl)
1710 		return 1;
1711 
1712 	/*
1713 	 * WRMSR to IA32_RTIT_CTL that sets TraceEn but clears this bit
1714 	 * and FabricEn would cause #GP, if
1715 	 * CPUID.(EAX=14H, ECX=0):ECX.SNGLRGNOUT[bit 2] = 0
1716 	 */
1717 	if ((data & RTIT_CTL_TRACEEN) && !(data & RTIT_CTL_TOPA) &&
1718 		!(data & RTIT_CTL_FABRIC_EN) &&
1719 		!intel_pt_validate_cap(vmx->pt_desc.caps,
1720 					PT_CAP_single_range_output))
1721 		return 1;
1722 
1723 	/*
1724 	 * MTCFreq, CycThresh and PSBFreq encodings check, any MSR write that
1725 	 * utilize encodings marked reserved will cause a #GP fault.
1726 	 */
1727 	value = intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_mtc_periods);
1728 	if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_mtc) &&
1729 			!test_bit((data & RTIT_CTL_MTC_RANGE) >>
1730 			RTIT_CTL_MTC_RANGE_OFFSET, &value))
1731 		return 1;
1732 	value = intel_pt_validate_cap(vmx->pt_desc.caps,
1733 						PT_CAP_cycle_thresholds);
1734 	if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_psb_cyc) &&
1735 			!test_bit((data & RTIT_CTL_CYC_THRESH) >>
1736 			RTIT_CTL_CYC_THRESH_OFFSET, &value))
1737 		return 1;
1738 	value = intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_psb_periods);
1739 	if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_psb_cyc) &&
1740 			!test_bit((data & RTIT_CTL_PSB_FREQ) >>
1741 			RTIT_CTL_PSB_FREQ_OFFSET, &value))
1742 		return 1;
1743 
1744 	/*
1745 	 * If ADDRx_CFG is reserved or the encodings is >2 will
1746 	 * cause a #GP fault.
1747 	 */
1748 	value = (data & RTIT_CTL_ADDR0) >> RTIT_CTL_ADDR0_OFFSET;
1749 	if ((value && (vmx->pt_desc.num_address_ranges < 1)) || (value > 2))
1750 		return 1;
1751 	value = (data & RTIT_CTL_ADDR1) >> RTIT_CTL_ADDR1_OFFSET;
1752 	if ((value && (vmx->pt_desc.num_address_ranges < 2)) || (value > 2))
1753 		return 1;
1754 	value = (data & RTIT_CTL_ADDR2) >> RTIT_CTL_ADDR2_OFFSET;
1755 	if ((value && (vmx->pt_desc.num_address_ranges < 3)) || (value > 2))
1756 		return 1;
1757 	value = (data & RTIT_CTL_ADDR3) >> RTIT_CTL_ADDR3_OFFSET;
1758 	if ((value && (vmx->pt_desc.num_address_ranges < 4)) || (value > 2))
1759 		return 1;
1760 
1761 	return 0;
1762 }
1763 
vmx_check_emulate_instruction(struct kvm_vcpu * vcpu,int emul_type,void * insn,int insn_len)1764 int vmx_check_emulate_instruction(struct kvm_vcpu *vcpu, int emul_type,
1765 				  void *insn, int insn_len)
1766 {
1767 	/*
1768 	 * Emulation of instructions in SGX enclaves is impossible as RIP does
1769 	 * not point at the failing instruction, and even if it did, the code
1770 	 * stream is inaccessible.  Inject #UD instead of exiting to userspace
1771 	 * so that guest userspace can't DoS the guest simply by triggering
1772 	 * emulation (enclaves are CPL3 only).
1773 	 */
1774 	if (vmx_get_exit_reason(vcpu).enclave_mode) {
1775 		kvm_queue_exception(vcpu, UD_VECTOR);
1776 		return X86EMUL_PROPAGATE_FAULT;
1777 	}
1778 
1779 	/* Check that emulation is possible during event vectoring */
1780 	if ((to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) &&
1781 	    !kvm_can_emulate_event_vectoring(emul_type))
1782 		return X86EMUL_UNHANDLEABLE_VECTORING;
1783 
1784 	return X86EMUL_CONTINUE;
1785 }
1786 
skip_emulated_instruction(struct kvm_vcpu * vcpu)1787 static int skip_emulated_instruction(struct kvm_vcpu *vcpu)
1788 {
1789 	union vmx_exit_reason exit_reason = vmx_get_exit_reason(vcpu);
1790 	unsigned long rip, orig_rip;
1791 	u32 instr_len;
1792 
1793 	/*
1794 	 * Using VMCS.VM_EXIT_INSTRUCTION_LEN on EPT misconfig depends on
1795 	 * undefined behavior: Intel's SDM doesn't mandate the VMCS field be
1796 	 * set when EPT misconfig occurs.  In practice, real hardware updates
1797 	 * VM_EXIT_INSTRUCTION_LEN on EPT misconfig, but other hypervisors
1798 	 * (namely Hyper-V) don't set it due to it being undefined behavior,
1799 	 * i.e. we end up advancing IP with some random value.
1800 	 */
1801 	if (!static_cpu_has(X86_FEATURE_HYPERVISOR) ||
1802 	    exit_reason.basic != EXIT_REASON_EPT_MISCONFIG) {
1803 		instr_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
1804 
1805 		/*
1806 		 * Emulating an enclave's instructions isn't supported as KVM
1807 		 * cannot access the enclave's memory or its true RIP, e.g. the
1808 		 * vmcs.GUEST_RIP points at the exit point of the enclave, not
1809 		 * the RIP that actually triggered the VM-Exit.  But, because
1810 		 * most instructions that cause VM-Exit will #UD in an enclave,
1811 		 * most instruction-based VM-Exits simply do not occur.
1812 		 *
1813 		 * There are a few exceptions, notably the debug instructions
1814 		 * INT1ICEBRK and INT3, as they are allowed in debug enclaves
1815 		 * and generate #DB/#BP as expected, which KVM might intercept.
1816 		 * But again, the CPU does the dirty work and saves an instr
1817 		 * length of zero so VMMs don't shoot themselves in the foot.
1818 		 * WARN if KVM tries to skip a non-zero length instruction on
1819 		 * a VM-Exit from an enclave.
1820 		 */
1821 		if (!instr_len)
1822 			goto rip_updated;
1823 
1824 		WARN_ONCE(exit_reason.enclave_mode,
1825 			  "skipping instruction after SGX enclave VM-Exit");
1826 
1827 		orig_rip = kvm_rip_read(vcpu);
1828 		rip = orig_rip + instr_len;
1829 #ifdef CONFIG_X86_64
1830 		/*
1831 		 * We need to mask out the high 32 bits of RIP if not in 64-bit
1832 		 * mode, but just finding out that we are in 64-bit mode is
1833 		 * quite expensive.  Only do it if there was a carry.
1834 		 */
1835 		if (unlikely(((rip ^ orig_rip) >> 31) == 3) && !is_64_bit_mode(vcpu))
1836 			rip = (u32)rip;
1837 #endif
1838 		kvm_rip_write(vcpu, rip);
1839 	} else {
1840 		if (!kvm_emulate_instruction(vcpu, EMULTYPE_SKIP))
1841 			return 0;
1842 	}
1843 
1844 rip_updated:
1845 	/* skipping an emulated instruction also counts */
1846 	vmx_set_interrupt_shadow(vcpu, 0);
1847 
1848 	return 1;
1849 }
1850 
1851 /*
1852  * Recognizes a pending MTF VM-exit and records the nested state for later
1853  * delivery.
1854  */
vmx_update_emulated_instruction(struct kvm_vcpu * vcpu)1855 void vmx_update_emulated_instruction(struct kvm_vcpu *vcpu)
1856 {
1857 	struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
1858 	struct vcpu_vmx *vmx = to_vmx(vcpu);
1859 
1860 	if (!is_guest_mode(vcpu))
1861 		return;
1862 
1863 	/*
1864 	 * Per the SDM, MTF takes priority over debug-trap exceptions besides
1865 	 * TSS T-bit traps and ICEBP (INT1).  KVM doesn't emulate T-bit traps
1866 	 * or ICEBP (in the emulator proper), and skipping of ICEBP after an
1867 	 * intercepted #DB deliberately avoids single-step #DB and MTF updates
1868 	 * as ICEBP is higher priority than both.  As instruction emulation is
1869 	 * completed at this point (i.e. KVM is at the instruction boundary),
1870 	 * any #DB exception pending delivery must be a debug-trap of lower
1871 	 * priority than MTF.  Record the pending MTF state to be delivered in
1872 	 * vmx_check_nested_events().
1873 	 */
1874 	if (nested_cpu_has_mtf(vmcs12) &&
1875 	    (!vcpu->arch.exception.pending ||
1876 	     vcpu->arch.exception.vector == DB_VECTOR) &&
1877 	    (!vcpu->arch.exception_vmexit.pending ||
1878 	     vcpu->arch.exception_vmexit.vector == DB_VECTOR)) {
1879 		vmx->nested.mtf_pending = true;
1880 		kvm_make_request(KVM_REQ_EVENT, vcpu);
1881 	} else {
1882 		vmx->nested.mtf_pending = false;
1883 	}
1884 }
1885 
vmx_skip_emulated_instruction(struct kvm_vcpu * vcpu)1886 int vmx_skip_emulated_instruction(struct kvm_vcpu *vcpu)
1887 {
1888 	vmx_update_emulated_instruction(vcpu);
1889 	return skip_emulated_instruction(vcpu);
1890 }
1891 
vmx_clear_hlt(struct kvm_vcpu * vcpu)1892 static void vmx_clear_hlt(struct kvm_vcpu *vcpu)
1893 {
1894 	/*
1895 	 * Ensure that we clear the HLT state in the VMCS.  We don't need to
1896 	 * explicitly skip the instruction because if the HLT state is set,
1897 	 * then the instruction is already executing and RIP has already been
1898 	 * advanced.
1899 	 */
1900 	if (kvm_hlt_in_guest(vcpu->kvm) &&
1901 			vmcs_read32(GUEST_ACTIVITY_STATE) == GUEST_ACTIVITY_HLT)
1902 		vmcs_write32(GUEST_ACTIVITY_STATE, GUEST_ACTIVITY_ACTIVE);
1903 }
1904 
vmx_inject_exception(struct kvm_vcpu * vcpu)1905 void vmx_inject_exception(struct kvm_vcpu *vcpu)
1906 {
1907 	struct kvm_queued_exception *ex = &vcpu->arch.exception;
1908 	u32 intr_info = ex->vector | INTR_INFO_VALID_MASK;
1909 	struct vcpu_vmx *vmx = to_vmx(vcpu);
1910 
1911 	kvm_deliver_exception_payload(vcpu, ex);
1912 
1913 	if (ex->has_error_code) {
1914 		/*
1915 		 * Despite the error code being architecturally defined as 32
1916 		 * bits, and the VMCS field being 32 bits, Intel CPUs and thus
1917 		 * VMX don't actually supporting setting bits 31:16.  Hardware
1918 		 * will (should) never provide a bogus error code, but AMD CPUs
1919 		 * do generate error codes with bits 31:16 set, and so KVM's
1920 		 * ABI lets userspace shove in arbitrary 32-bit values.  Drop
1921 		 * the upper bits to avoid VM-Fail, losing information that
1922 		 * doesn't really exist is preferable to killing the VM.
1923 		 */
1924 		vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, (u16)ex->error_code);
1925 		intr_info |= INTR_INFO_DELIVER_CODE_MASK;
1926 	}
1927 
1928 	if (vmx->rmode.vm86_active) {
1929 		int inc_eip = 0;
1930 		if (kvm_exception_is_soft(ex->vector))
1931 			inc_eip = vcpu->arch.event_exit_inst_len;
1932 		kvm_inject_realmode_interrupt(vcpu, ex->vector, inc_eip);
1933 		return;
1934 	}
1935 
1936 	WARN_ON_ONCE(vmx->vt.emulation_required);
1937 
1938 	if (kvm_exception_is_soft(ex->vector)) {
1939 		vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
1940 			     vmx->vcpu.arch.event_exit_inst_len);
1941 		intr_info |= INTR_TYPE_SOFT_EXCEPTION;
1942 	} else
1943 		intr_info |= INTR_TYPE_HARD_EXCEPTION;
1944 
1945 	vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info);
1946 
1947 	vmx_clear_hlt(vcpu);
1948 }
1949 
vmx_setup_uret_msr(struct vcpu_vmx * vmx,unsigned int msr,bool load_into_hardware)1950 static void vmx_setup_uret_msr(struct vcpu_vmx *vmx, unsigned int msr,
1951 			       bool load_into_hardware)
1952 {
1953 	struct vmx_uret_msr *uret_msr;
1954 
1955 	uret_msr = vmx_find_uret_msr(vmx, msr);
1956 	if (!uret_msr)
1957 		return;
1958 
1959 	uret_msr->load_into_hardware = load_into_hardware;
1960 }
1961 
1962 /*
1963  * Configuring user return MSRs to automatically save, load, and restore MSRs
1964  * that need to be shoved into hardware when running the guest.  Note, omitting
1965  * an MSR here does _NOT_ mean it's not emulated, only that it will not be
1966  * loaded into hardware when running the guest.
1967  */
vmx_setup_uret_msrs(struct vcpu_vmx * vmx)1968 static void vmx_setup_uret_msrs(struct vcpu_vmx *vmx)
1969 {
1970 #ifdef CONFIG_X86_64
1971 	bool load_syscall_msrs;
1972 
1973 	/*
1974 	 * The SYSCALL MSRs are only needed on long mode guests, and only
1975 	 * when EFER.SCE is set.
1976 	 */
1977 	load_syscall_msrs = is_long_mode(&vmx->vcpu) &&
1978 			    (vmx->vcpu.arch.efer & EFER_SCE);
1979 
1980 	vmx_setup_uret_msr(vmx, MSR_STAR, load_syscall_msrs);
1981 	vmx_setup_uret_msr(vmx, MSR_LSTAR, load_syscall_msrs);
1982 	vmx_setup_uret_msr(vmx, MSR_SYSCALL_MASK, load_syscall_msrs);
1983 #endif
1984 	vmx_setup_uret_msr(vmx, MSR_EFER, update_transition_efer(vmx));
1985 
1986 	vmx_setup_uret_msr(vmx, MSR_TSC_AUX,
1987 			   guest_cpu_cap_has(&vmx->vcpu, X86_FEATURE_RDTSCP) ||
1988 			   guest_cpu_cap_has(&vmx->vcpu, X86_FEATURE_RDPID));
1989 
1990 	/*
1991 	 * hle=0, rtm=0, tsx_ctrl=1 can be found with some combinations of new
1992 	 * kernel and old userspace.  If those guests run on a tsx=off host, do
1993 	 * allow guests to use TSX_CTRL, but don't change the value in hardware
1994 	 * so that TSX remains always disabled.
1995 	 */
1996 	vmx_setup_uret_msr(vmx, MSR_IA32_TSX_CTRL, boot_cpu_has(X86_FEATURE_RTM));
1997 
1998 	/*
1999 	 * The set of MSRs to load may have changed, reload MSRs before the
2000 	 * next VM-Enter.
2001 	 */
2002 	vmx->guest_uret_msrs_loaded = false;
2003 }
2004 
vmx_get_l2_tsc_offset(struct kvm_vcpu * vcpu)2005 u64 vmx_get_l2_tsc_offset(struct kvm_vcpu *vcpu)
2006 {
2007 	struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
2008 
2009 	if (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETTING))
2010 		return vmcs12->tsc_offset;
2011 
2012 	return 0;
2013 }
2014 
vmx_get_l2_tsc_multiplier(struct kvm_vcpu * vcpu)2015 u64 vmx_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu)
2016 {
2017 	struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
2018 
2019 	if (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETTING) &&
2020 	    nested_cpu_has2(vmcs12, SECONDARY_EXEC_TSC_SCALING))
2021 		return vmcs12->tsc_multiplier;
2022 
2023 	return kvm_caps.default_tsc_scaling_ratio;
2024 }
2025 
vmx_write_tsc_offset(struct kvm_vcpu * vcpu)2026 void vmx_write_tsc_offset(struct kvm_vcpu *vcpu)
2027 {
2028 	vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
2029 }
2030 
vmx_write_tsc_multiplier(struct kvm_vcpu * vcpu)2031 void vmx_write_tsc_multiplier(struct kvm_vcpu *vcpu)
2032 {
2033 	vmcs_write64(TSC_MULTIPLIER, vcpu->arch.tsc_scaling_ratio);
2034 }
2035 
2036 /*
2037  * Userspace is allowed to set any supported IA32_FEATURE_CONTROL regardless of
2038  * guest CPUID.  Note, KVM allows userspace to set "VMX in SMX" to maintain
2039  * backwards compatibility even though KVM doesn't support emulating SMX.  And
2040  * because userspace set "VMX in SMX", the guest must also be allowed to set it,
2041  * e.g. if the MSR is left unlocked and the guest does a RMW operation.
2042  */
2043 #define KVM_SUPPORTED_FEATURE_CONTROL  (FEAT_CTL_LOCKED			 | \
2044 					FEAT_CTL_VMX_ENABLED_INSIDE_SMX	 | \
2045 					FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX | \
2046 					FEAT_CTL_SGX_LC_ENABLED		 | \
2047 					FEAT_CTL_SGX_ENABLED		 | \
2048 					FEAT_CTL_LMCE_ENABLED)
2049 
is_vmx_feature_control_msr_valid(struct vcpu_vmx * vmx,struct msr_data * msr)2050 static inline bool is_vmx_feature_control_msr_valid(struct vcpu_vmx *vmx,
2051 						    struct msr_data *msr)
2052 {
2053 	uint64_t valid_bits;
2054 
2055 	/*
2056 	 * Ensure KVM_SUPPORTED_FEATURE_CONTROL is updated when new bits are
2057 	 * exposed to the guest.
2058 	 */
2059 	WARN_ON_ONCE(vmx->msr_ia32_feature_control_valid_bits &
2060 		     ~KVM_SUPPORTED_FEATURE_CONTROL);
2061 
2062 	if (!msr->host_initiated &&
2063 	    (vmx->msr_ia32_feature_control & FEAT_CTL_LOCKED))
2064 		return false;
2065 
2066 	if (msr->host_initiated)
2067 		valid_bits = KVM_SUPPORTED_FEATURE_CONTROL;
2068 	else
2069 		valid_bits = vmx->msr_ia32_feature_control_valid_bits;
2070 
2071 	return !(msr->data & ~valid_bits);
2072 }
2073 
vmx_get_feature_msr(u32 msr,u64 * data)2074 int vmx_get_feature_msr(u32 msr, u64 *data)
2075 {
2076 	switch (msr) {
2077 	case KVM_FIRST_EMULATED_VMX_MSR ... KVM_LAST_EMULATED_VMX_MSR:
2078 		if (!nested)
2079 			return 1;
2080 		return vmx_get_vmx_msr(&vmcs_config.nested, msr, data);
2081 	default:
2082 		return KVM_MSR_RET_UNSUPPORTED;
2083 	}
2084 }
2085 
2086 /*
2087  * Reads an msr value (of 'msr_info->index') into 'msr_info->data'.
2088  * Returns 0 on success, non-0 otherwise.
2089  * Assumes vcpu_load() was already called.
2090  */
vmx_get_msr(struct kvm_vcpu * vcpu,struct msr_data * msr_info)2091 int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2092 {
2093 	struct vcpu_vmx *vmx = to_vmx(vcpu);
2094 	struct vmx_uret_msr *msr;
2095 	u32 index;
2096 
2097 	switch (msr_info->index) {
2098 #ifdef CONFIG_X86_64
2099 	case MSR_FS_BASE:
2100 		msr_info->data = vmcs_readl(GUEST_FS_BASE);
2101 		break;
2102 	case MSR_GS_BASE:
2103 		msr_info->data = vmcs_readl(GUEST_GS_BASE);
2104 		break;
2105 	case MSR_KERNEL_GS_BASE:
2106 		msr_info->data = vmx_read_guest_kernel_gs_base(vmx);
2107 		break;
2108 #endif
2109 	case MSR_EFER:
2110 		return kvm_get_msr_common(vcpu, msr_info);
2111 	case MSR_IA32_TSX_CTRL:
2112 		if (!msr_info->host_initiated &&
2113 		    !(vcpu->arch.arch_capabilities & ARCH_CAP_TSX_CTRL_MSR))
2114 			return 1;
2115 		goto find_uret_msr;
2116 	case MSR_IA32_UMWAIT_CONTROL:
2117 		if (!msr_info->host_initiated && !vmx_has_waitpkg(vmx))
2118 			return 1;
2119 
2120 		msr_info->data = vmx->msr_ia32_umwait_control;
2121 		break;
2122 	case MSR_IA32_SPEC_CTRL:
2123 		if (!msr_info->host_initiated &&
2124 		    !guest_has_spec_ctrl_msr(vcpu))
2125 			return 1;
2126 
2127 		msr_info->data = to_vmx(vcpu)->spec_ctrl;
2128 		break;
2129 	case MSR_IA32_SYSENTER_CS:
2130 		msr_info->data = vmcs_read32(GUEST_SYSENTER_CS);
2131 		break;
2132 	case MSR_IA32_SYSENTER_EIP:
2133 		msr_info->data = vmcs_readl(GUEST_SYSENTER_EIP);
2134 		break;
2135 	case MSR_IA32_SYSENTER_ESP:
2136 		msr_info->data = vmcs_readl(GUEST_SYSENTER_ESP);
2137 		break;
2138 	case MSR_IA32_BNDCFGS:
2139 		if (!kvm_mpx_supported() ||
2140 		    (!msr_info->host_initiated &&
2141 		     !guest_cpu_cap_has(vcpu, X86_FEATURE_MPX)))
2142 			return 1;
2143 		msr_info->data = vmcs_read64(GUEST_BNDCFGS);
2144 		break;
2145 	case MSR_IA32_MCG_EXT_CTL:
2146 		if (!msr_info->host_initiated &&
2147 		    !(vmx->msr_ia32_feature_control &
2148 		      FEAT_CTL_LMCE_ENABLED))
2149 			return 1;
2150 		msr_info->data = vcpu->arch.mcg_ext_ctl;
2151 		break;
2152 	case MSR_IA32_FEAT_CTL:
2153 		msr_info->data = vmx->msr_ia32_feature_control;
2154 		break;
2155 	case MSR_IA32_SGXLEPUBKEYHASH0 ... MSR_IA32_SGXLEPUBKEYHASH3:
2156 		if (!msr_info->host_initiated &&
2157 		    !guest_cpu_cap_has(vcpu, X86_FEATURE_SGX_LC))
2158 			return 1;
2159 		msr_info->data = to_vmx(vcpu)->msr_ia32_sgxlepubkeyhash
2160 			[msr_info->index - MSR_IA32_SGXLEPUBKEYHASH0];
2161 		break;
2162 	case KVM_FIRST_EMULATED_VMX_MSR ... KVM_LAST_EMULATED_VMX_MSR:
2163 		if (!guest_cpu_cap_has(vcpu, X86_FEATURE_VMX))
2164 			return 1;
2165 		if (vmx_get_vmx_msr(&vmx->nested.msrs, msr_info->index,
2166 				    &msr_info->data))
2167 			return 1;
2168 #ifdef CONFIG_KVM_HYPERV
2169 		/*
2170 		 * Enlightened VMCS v1 doesn't have certain VMCS fields but
2171 		 * instead of just ignoring the features, different Hyper-V
2172 		 * versions are either trying to use them and fail or do some
2173 		 * sanity checking and refuse to boot. Filter all unsupported
2174 		 * features out.
2175 		 */
2176 		if (!msr_info->host_initiated && guest_cpu_cap_has_evmcs(vcpu))
2177 			nested_evmcs_filter_control_msr(vcpu, msr_info->index,
2178 							&msr_info->data);
2179 #endif
2180 		break;
2181 	case MSR_IA32_RTIT_CTL:
2182 		if (!vmx_pt_mode_is_host_guest())
2183 			return 1;
2184 		msr_info->data = vmx->pt_desc.guest.ctl;
2185 		break;
2186 	case MSR_IA32_RTIT_STATUS:
2187 		if (!vmx_pt_mode_is_host_guest())
2188 			return 1;
2189 		msr_info->data = vmx->pt_desc.guest.status;
2190 		break;
2191 	case MSR_IA32_RTIT_CR3_MATCH:
2192 		if (!vmx_pt_mode_is_host_guest() ||
2193 			!intel_pt_validate_cap(vmx->pt_desc.caps,
2194 						PT_CAP_cr3_filtering))
2195 			return 1;
2196 		msr_info->data = vmx->pt_desc.guest.cr3_match;
2197 		break;
2198 	case MSR_IA32_RTIT_OUTPUT_BASE:
2199 		if (!vmx_pt_mode_is_host_guest() ||
2200 			(!intel_pt_validate_cap(vmx->pt_desc.caps,
2201 					PT_CAP_topa_output) &&
2202 			 !intel_pt_validate_cap(vmx->pt_desc.caps,
2203 					PT_CAP_single_range_output)))
2204 			return 1;
2205 		msr_info->data = vmx->pt_desc.guest.output_base;
2206 		break;
2207 	case MSR_IA32_RTIT_OUTPUT_MASK:
2208 		if (!vmx_pt_mode_is_host_guest() ||
2209 			(!intel_pt_validate_cap(vmx->pt_desc.caps,
2210 					PT_CAP_topa_output) &&
2211 			 !intel_pt_validate_cap(vmx->pt_desc.caps,
2212 					PT_CAP_single_range_output)))
2213 			return 1;
2214 		msr_info->data = vmx->pt_desc.guest.output_mask;
2215 		break;
2216 	case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B:
2217 		index = msr_info->index - MSR_IA32_RTIT_ADDR0_A;
2218 		if (!vmx_pt_mode_is_host_guest() ||
2219 		    (index >= 2 * vmx->pt_desc.num_address_ranges))
2220 			return 1;
2221 		if (index % 2)
2222 			msr_info->data = vmx->pt_desc.guest.addr_b[index / 2];
2223 		else
2224 			msr_info->data = vmx->pt_desc.guest.addr_a[index / 2];
2225 		break;
2226 	case MSR_IA32_S_CET:
2227 		msr_info->data = vmcs_readl(GUEST_S_CET);
2228 		break;
2229 	case MSR_KVM_INTERNAL_GUEST_SSP:
2230 		msr_info->data = vmcs_readl(GUEST_SSP);
2231 		break;
2232 	case MSR_IA32_INT_SSP_TAB:
2233 		msr_info->data = vmcs_readl(GUEST_INTR_SSP_TABLE);
2234 		break;
2235 	case MSR_IA32_DEBUGCTLMSR:
2236 		msr_info->data = vmx_guest_debugctl_read();
2237 		break;
2238 	default:
2239 	find_uret_msr:
2240 		msr = vmx_find_uret_msr(vmx, msr_info->index);
2241 		if (msr) {
2242 			msr_info->data = msr->data;
2243 			break;
2244 		}
2245 		return kvm_get_msr_common(vcpu, msr_info);
2246 	}
2247 
2248 	return 0;
2249 }
2250 
nested_vmx_truncate_sysenter_addr(struct kvm_vcpu * vcpu,u64 data)2251 static u64 nested_vmx_truncate_sysenter_addr(struct kvm_vcpu *vcpu,
2252 						    u64 data)
2253 {
2254 #ifdef CONFIG_X86_64
2255 	if (!guest_cpu_cap_has(vcpu, X86_FEATURE_LM))
2256 		return (u32)data;
2257 #endif
2258 	return (unsigned long)data;
2259 }
2260 
vmx_get_supported_debugctl(struct kvm_vcpu * vcpu,bool host_initiated)2261 u64 vmx_get_supported_debugctl(struct kvm_vcpu *vcpu, bool host_initiated)
2262 {
2263 	u64 debugctl = 0;
2264 
2265 	if (boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT) &&
2266 	    (host_initiated || guest_cpu_cap_has(vcpu, X86_FEATURE_BUS_LOCK_DETECT)))
2267 		debugctl |= DEBUGCTLMSR_BUS_LOCK_DETECT;
2268 
2269 	if ((kvm_caps.supported_perf_cap & PERF_CAP_LBR_FMT) &&
2270 	    (host_initiated || intel_pmu_lbr_is_enabled(vcpu)))
2271 		debugctl |= DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI;
2272 
2273 	if (boot_cpu_has(X86_FEATURE_RTM) &&
2274 	    (host_initiated || guest_cpu_cap_has(vcpu, X86_FEATURE_RTM)))
2275 		debugctl |= DEBUGCTLMSR_RTM_DEBUG;
2276 
2277 	return debugctl;
2278 }
2279 
vmx_is_valid_debugctl(struct kvm_vcpu * vcpu,u64 data,bool host_initiated)2280 bool vmx_is_valid_debugctl(struct kvm_vcpu *vcpu, u64 data, bool host_initiated)
2281 {
2282 	u64 invalid;
2283 
2284 	invalid = data & ~vmx_get_supported_debugctl(vcpu, host_initiated);
2285 	if (invalid & (DEBUGCTLMSR_BTF | DEBUGCTLMSR_LBR)) {
2286 		kvm_pr_unimpl_wrmsr(vcpu, MSR_IA32_DEBUGCTLMSR, data);
2287 		invalid &= ~(DEBUGCTLMSR_BTF | DEBUGCTLMSR_LBR);
2288 	}
2289 	return !invalid;
2290 }
2291 
2292 /*
2293  * Writes msr value into the appropriate "register".
2294  * Returns 0 on success, non-0 otherwise.
2295  * Assumes vcpu_load() was already called.
2296  */
vmx_set_msr(struct kvm_vcpu * vcpu,struct msr_data * msr_info)2297 int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2298 {
2299 	struct vcpu_vmx *vmx = to_vmx(vcpu);
2300 	struct vmx_uret_msr *msr;
2301 	int ret = 0;
2302 	u32 msr_index = msr_info->index;
2303 	u64 data = msr_info->data;
2304 	u32 index;
2305 
2306 	switch (msr_index) {
2307 	case MSR_EFER:
2308 		ret = kvm_set_msr_common(vcpu, msr_info);
2309 		break;
2310 #ifdef CONFIG_X86_64
2311 	case MSR_FS_BASE:
2312 		vmx_segment_cache_clear(vmx);
2313 		vmcs_writel(GUEST_FS_BASE, data);
2314 		break;
2315 	case MSR_GS_BASE:
2316 		vmx_segment_cache_clear(vmx);
2317 		vmcs_writel(GUEST_GS_BASE, data);
2318 		break;
2319 	case MSR_KERNEL_GS_BASE:
2320 		vmx_write_guest_kernel_gs_base(vmx, data);
2321 		break;
2322 	case MSR_IA32_XFD:
2323 		ret = kvm_set_msr_common(vcpu, msr_info);
2324 		/*
2325 		 * Always intercepting WRMSR could incur non-negligible
2326 		 * overhead given xfd might be changed frequently in
2327 		 * guest context switch. Disable write interception
2328 		 * upon the first write with a non-zero value (indicating
2329 		 * potential usage on dynamic xfeatures). Also update
2330 		 * exception bitmap to trap #NM for proper virtualization
2331 		 * of guest xfd_err.
2332 		 */
2333 		if (!ret && data) {
2334 			vmx_disable_intercept_for_msr(vcpu, MSR_IA32_XFD,
2335 						      MSR_TYPE_RW);
2336 			vcpu->arch.xfd_no_write_intercept = true;
2337 			vmx_update_exception_bitmap(vcpu);
2338 		}
2339 		break;
2340 #endif
2341 	case MSR_IA32_SYSENTER_CS:
2342 		if (is_guest_mode(vcpu))
2343 			get_vmcs12(vcpu)->guest_sysenter_cs = data;
2344 		vmcs_write32(GUEST_SYSENTER_CS, data);
2345 		break;
2346 	case MSR_IA32_SYSENTER_EIP:
2347 		if (is_guest_mode(vcpu)) {
2348 			data = nested_vmx_truncate_sysenter_addr(vcpu, data);
2349 			get_vmcs12(vcpu)->guest_sysenter_eip = data;
2350 		}
2351 		vmcs_writel(GUEST_SYSENTER_EIP, data);
2352 		break;
2353 	case MSR_IA32_SYSENTER_ESP:
2354 		if (is_guest_mode(vcpu)) {
2355 			data = nested_vmx_truncate_sysenter_addr(vcpu, data);
2356 			get_vmcs12(vcpu)->guest_sysenter_esp = data;
2357 		}
2358 		vmcs_writel(GUEST_SYSENTER_ESP, data);
2359 		break;
2360 	case MSR_IA32_DEBUGCTLMSR:
2361 		if (!vmx_is_valid_debugctl(vcpu, data, msr_info->host_initiated))
2362 			return 1;
2363 
2364 		data &= vmx_get_supported_debugctl(vcpu, msr_info->host_initiated);
2365 
2366 		if (is_guest_mode(vcpu) && get_vmcs12(vcpu)->vm_exit_controls &
2367 						VM_EXIT_SAVE_DEBUG_CONTROLS)
2368 			get_vmcs12(vcpu)->guest_ia32_debugctl = data;
2369 
2370 		vmx_guest_debugctl_write(vcpu, data);
2371 
2372 		if (intel_pmu_lbr_is_enabled(vcpu) && !to_vmx(vcpu)->lbr_desc.event &&
2373 		    (data & DEBUGCTLMSR_LBR))
2374 			intel_pmu_create_guest_lbr_event(vcpu);
2375 		return 0;
2376 	case MSR_IA32_BNDCFGS:
2377 		if (!kvm_mpx_supported() ||
2378 		    (!msr_info->host_initiated &&
2379 		     !guest_cpu_cap_has(vcpu, X86_FEATURE_MPX)))
2380 			return 1;
2381 		if (is_noncanonical_msr_address(data & PAGE_MASK, vcpu) ||
2382 		    (data & MSR_IA32_BNDCFGS_RSVD))
2383 			return 1;
2384 
2385 		if (is_guest_mode(vcpu) &&
2386 		    ((vmx->nested.msrs.entry_ctls_high & VM_ENTRY_LOAD_BNDCFGS) ||
2387 		     (vmx->nested.msrs.exit_ctls_high & VM_EXIT_CLEAR_BNDCFGS)))
2388 			get_vmcs12(vcpu)->guest_bndcfgs = data;
2389 
2390 		vmcs_write64(GUEST_BNDCFGS, data);
2391 		break;
2392 	case MSR_IA32_UMWAIT_CONTROL:
2393 		if (!msr_info->host_initiated && !vmx_has_waitpkg(vmx))
2394 			return 1;
2395 
2396 		/* The reserved bit 1 and non-32 bit [63:32] should be zero */
2397 		if (data & (BIT_ULL(1) | GENMASK_ULL(63, 32)))
2398 			return 1;
2399 
2400 		vmx->msr_ia32_umwait_control = data;
2401 		break;
2402 	case MSR_IA32_SPEC_CTRL:
2403 		if (!msr_info->host_initiated &&
2404 		    !guest_has_spec_ctrl_msr(vcpu))
2405 			return 1;
2406 
2407 		if (kvm_spec_ctrl_test_value(data))
2408 			return 1;
2409 
2410 		vmx->spec_ctrl = data;
2411 		if (!data)
2412 			break;
2413 
2414 		/*
2415 		 * For non-nested:
2416 		 * When it's written (to non-zero) for the first time, pass
2417 		 * it through.
2418 		 *
2419 		 * For nested:
2420 		 * The handling of the MSR bitmap for L2 guests is done in
2421 		 * nested_vmx_prepare_msr_bitmap. We should not touch the
2422 		 * vmcs02.msr_bitmap here since it gets completely overwritten
2423 		 * in the merging. We update the vmcs01 here for L1 as well
2424 		 * since it will end up touching the MSR anyway now.
2425 		 */
2426 		vmx_disable_intercept_for_msr(vcpu,
2427 					      MSR_IA32_SPEC_CTRL,
2428 					      MSR_TYPE_RW);
2429 		break;
2430 	case MSR_IA32_TSX_CTRL:
2431 		if (!msr_info->host_initiated &&
2432 		    !(vcpu->arch.arch_capabilities & ARCH_CAP_TSX_CTRL_MSR))
2433 			return 1;
2434 		if (data & ~(TSX_CTRL_RTM_DISABLE | TSX_CTRL_CPUID_CLEAR))
2435 			return 1;
2436 		goto find_uret_msr;
2437 	case MSR_IA32_CR_PAT:
2438 		ret = kvm_set_msr_common(vcpu, msr_info);
2439 		if (ret)
2440 			break;
2441 
2442 		if (is_guest_mode(vcpu) &&
2443 		    get_vmcs12(vcpu)->vm_exit_controls & VM_EXIT_SAVE_IA32_PAT)
2444 			get_vmcs12(vcpu)->guest_ia32_pat = data;
2445 
2446 		if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT)
2447 			vmcs_write64(GUEST_IA32_PAT, data);
2448 		break;
2449 	case MSR_IA32_MCG_EXT_CTL:
2450 		if ((!msr_info->host_initiated &&
2451 		     !(to_vmx(vcpu)->msr_ia32_feature_control &
2452 		       FEAT_CTL_LMCE_ENABLED)) ||
2453 		    (data & ~MCG_EXT_CTL_LMCE_EN))
2454 			return 1;
2455 		vcpu->arch.mcg_ext_ctl = data;
2456 		break;
2457 	case MSR_IA32_FEAT_CTL:
2458 		if (!is_vmx_feature_control_msr_valid(vmx, msr_info))
2459 			return 1;
2460 
2461 		vmx->msr_ia32_feature_control = data;
2462 		if (msr_info->host_initiated && data == 0)
2463 			vmx_leave_nested(vcpu);
2464 
2465 		/* SGX may be enabled/disabled by guest's firmware */
2466 		vmx_write_encls_bitmap(vcpu, NULL);
2467 		break;
2468 	case MSR_IA32_SGXLEPUBKEYHASH0 ... MSR_IA32_SGXLEPUBKEYHASH3:
2469 		/*
2470 		 * On real hardware, the LE hash MSRs are writable before
2471 		 * the firmware sets bit 0 in MSR 0x7a ("activating" SGX),
2472 		 * at which point SGX related bits in IA32_FEATURE_CONTROL
2473 		 * become writable.
2474 		 *
2475 		 * KVM does not emulate SGX activation for simplicity, so
2476 		 * allow writes to the LE hash MSRs if IA32_FEATURE_CONTROL
2477 		 * is unlocked.  This is technically not architectural
2478 		 * behavior, but it's close enough.
2479 		 */
2480 		if (!msr_info->host_initiated &&
2481 		    (!guest_cpu_cap_has(vcpu, X86_FEATURE_SGX_LC) ||
2482 		    ((vmx->msr_ia32_feature_control & FEAT_CTL_LOCKED) &&
2483 		    !(vmx->msr_ia32_feature_control & FEAT_CTL_SGX_LC_ENABLED))))
2484 			return 1;
2485 		vmx->msr_ia32_sgxlepubkeyhash
2486 			[msr_index - MSR_IA32_SGXLEPUBKEYHASH0] = data;
2487 		break;
2488 	case KVM_FIRST_EMULATED_VMX_MSR ... KVM_LAST_EMULATED_VMX_MSR:
2489 		if (!msr_info->host_initiated)
2490 			return 1; /* they are read-only */
2491 		if (!guest_cpu_cap_has(vcpu, X86_FEATURE_VMX))
2492 			return 1;
2493 		return vmx_set_vmx_msr(vcpu, msr_index, data);
2494 	case MSR_IA32_RTIT_CTL:
2495 		if (!vmx_pt_mode_is_host_guest() ||
2496 			vmx_rtit_ctl_check(vcpu, data) ||
2497 			vmx->nested.vmxon)
2498 			return 1;
2499 		vmcs_write64(GUEST_IA32_RTIT_CTL, data);
2500 		vmx->pt_desc.guest.ctl = data;
2501 		pt_update_intercept_for_msr(vcpu);
2502 		break;
2503 	case MSR_IA32_RTIT_STATUS:
2504 		if (!pt_can_write_msr(vmx))
2505 			return 1;
2506 		if (data & MSR_IA32_RTIT_STATUS_MASK)
2507 			return 1;
2508 		vmx->pt_desc.guest.status = data;
2509 		break;
2510 	case MSR_IA32_RTIT_CR3_MATCH:
2511 		if (!pt_can_write_msr(vmx))
2512 			return 1;
2513 		if (!intel_pt_validate_cap(vmx->pt_desc.caps,
2514 					   PT_CAP_cr3_filtering))
2515 			return 1;
2516 		vmx->pt_desc.guest.cr3_match = data;
2517 		break;
2518 	case MSR_IA32_RTIT_OUTPUT_BASE:
2519 		if (!pt_can_write_msr(vmx))
2520 			return 1;
2521 		if (!intel_pt_validate_cap(vmx->pt_desc.caps,
2522 					   PT_CAP_topa_output) &&
2523 		    !intel_pt_validate_cap(vmx->pt_desc.caps,
2524 					   PT_CAP_single_range_output))
2525 			return 1;
2526 		if (!pt_output_base_valid(vcpu, data))
2527 			return 1;
2528 		vmx->pt_desc.guest.output_base = data;
2529 		break;
2530 	case MSR_IA32_RTIT_OUTPUT_MASK:
2531 		if (!pt_can_write_msr(vmx))
2532 			return 1;
2533 		if (!intel_pt_validate_cap(vmx->pt_desc.caps,
2534 					   PT_CAP_topa_output) &&
2535 		    !intel_pt_validate_cap(vmx->pt_desc.caps,
2536 					   PT_CAP_single_range_output))
2537 			return 1;
2538 		vmx->pt_desc.guest.output_mask = data;
2539 		break;
2540 	case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B:
2541 		if (!pt_can_write_msr(vmx))
2542 			return 1;
2543 		index = msr_info->index - MSR_IA32_RTIT_ADDR0_A;
2544 		if (index >= 2 * vmx->pt_desc.num_address_ranges)
2545 			return 1;
2546 		if (is_noncanonical_msr_address(data, vcpu))
2547 			return 1;
2548 		if (index % 2)
2549 			vmx->pt_desc.guest.addr_b[index / 2] = data;
2550 		else
2551 			vmx->pt_desc.guest.addr_a[index / 2] = data;
2552 		break;
2553 	case MSR_IA32_S_CET:
2554 		vmcs_writel(GUEST_S_CET, data);
2555 		break;
2556 	case MSR_KVM_INTERNAL_GUEST_SSP:
2557 		vmcs_writel(GUEST_SSP, data);
2558 		break;
2559 	case MSR_IA32_INT_SSP_TAB:
2560 		vmcs_writel(GUEST_INTR_SSP_TABLE, data);
2561 		break;
2562 	case MSR_IA32_PERF_CAPABILITIES:
2563 		if (data & PERF_CAP_LBR_FMT) {
2564 			if ((data & PERF_CAP_LBR_FMT) !=
2565 			    (kvm_caps.supported_perf_cap & PERF_CAP_LBR_FMT))
2566 				return 1;
2567 			if (!cpuid_model_is_consistent(vcpu))
2568 				return 1;
2569 		}
2570 		if (data & PERF_CAP_PEBS_FORMAT) {
2571 			if ((data & PERF_CAP_PEBS_MASK) !=
2572 			    (kvm_caps.supported_perf_cap & PERF_CAP_PEBS_MASK))
2573 				return 1;
2574 			if (!guest_cpu_cap_has(vcpu, X86_FEATURE_DS))
2575 				return 1;
2576 			if (!guest_cpu_cap_has(vcpu, X86_FEATURE_DTES64))
2577 				return 1;
2578 			if (!cpuid_model_is_consistent(vcpu))
2579 				return 1;
2580 		}
2581 		ret = kvm_set_msr_common(vcpu, msr_info);
2582 		break;
2583 
2584 	default:
2585 	find_uret_msr:
2586 		msr = vmx_find_uret_msr(vmx, msr_index);
2587 		if (msr)
2588 			ret = vmx_set_guest_uret_msr(vmx, msr, data);
2589 		else
2590 			ret = kvm_set_msr_common(vcpu, msr_info);
2591 	}
2592 
2593 	/* FB_CLEAR may have changed, also update the FB_CLEAR_DIS behavior */
2594 	if (msr_index == MSR_IA32_ARCH_CAPABILITIES)
2595 		vmx_update_fb_clear_dis(vcpu, vmx);
2596 
2597 	return ret;
2598 }
2599 
vmx_cache_reg(struct kvm_vcpu * vcpu,enum kvm_reg reg)2600 void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
2601 {
2602 	unsigned long guest_owned_bits;
2603 
2604 	kvm_register_mark_available(vcpu, reg);
2605 
2606 	switch (reg) {
2607 	case VCPU_REGS_RSP:
2608 		vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP);
2609 		break;
2610 	case VCPU_REGS_RIP:
2611 		vcpu->arch.regs[VCPU_REGS_RIP] = vmcs_readl(GUEST_RIP);
2612 		break;
2613 	case VCPU_EXREG_PDPTR:
2614 		if (enable_ept)
2615 			ept_save_pdptrs(vcpu);
2616 		break;
2617 	case VCPU_EXREG_CR0:
2618 		guest_owned_bits = vcpu->arch.cr0_guest_owned_bits;
2619 
2620 		vcpu->arch.cr0 &= ~guest_owned_bits;
2621 		vcpu->arch.cr0 |= vmcs_readl(GUEST_CR0) & guest_owned_bits;
2622 		break;
2623 	case VCPU_EXREG_CR3:
2624 		/*
2625 		 * When intercepting CR3 loads, e.g. for shadowing paging, KVM's
2626 		 * CR3 is loaded into hardware, not the guest's CR3.
2627 		 */
2628 		if (!(exec_controls_get(to_vmx(vcpu)) & CPU_BASED_CR3_LOAD_EXITING))
2629 			vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
2630 		break;
2631 	case VCPU_EXREG_CR4:
2632 		guest_owned_bits = vcpu->arch.cr4_guest_owned_bits;
2633 
2634 		vcpu->arch.cr4 &= ~guest_owned_bits;
2635 		vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & guest_owned_bits;
2636 		break;
2637 	default:
2638 		KVM_BUG_ON(1, vcpu->kvm);
2639 		break;
2640 	}
2641 }
2642 
2643 /*
2644  * There is no X86_FEATURE for SGX yet, but anyway we need to query CPUID
2645  * directly instead of going through cpu_has(), to ensure KVM is trapping
2646  * ENCLS whenever it's supported in hardware.  It does not matter whether
2647  * the host OS supports or has enabled SGX.
2648  */
cpu_has_sgx(void)2649 static bool cpu_has_sgx(void)
2650 {
2651 	return cpuid_eax(0) >= 0x12 && (cpuid_eax(0x12) & BIT(0));
2652 }
2653 
adjust_vmx_controls(u32 ctl_min,u32 ctl_opt,u32 msr,u32 * result)2654 static int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt, u32 msr, u32 *result)
2655 {
2656 	u32 vmx_msr_low, vmx_msr_high;
2657 	u32 ctl = ctl_min | ctl_opt;
2658 
2659 	rdmsr(msr, vmx_msr_low, vmx_msr_high);
2660 
2661 	ctl &= vmx_msr_high; /* bit == 0 in high word ==> must be zero */
2662 	ctl |= vmx_msr_low;  /* bit == 1 in low word  ==> must be one  */
2663 
2664 	/* Ensure minimum (required) set of control bits are supported. */
2665 	if (ctl_min & ~ctl)
2666 		return -EIO;
2667 
2668 	*result = ctl;
2669 	return 0;
2670 }
2671 
adjust_vmx_controls64(u64 ctl_opt,u32 msr)2672 static u64 adjust_vmx_controls64(u64 ctl_opt, u32 msr)
2673 {
2674 	u64 allowed;
2675 
2676 	rdmsrq(msr, allowed);
2677 
2678 	return  ctl_opt & allowed;
2679 }
2680 
2681 #define vmx_check_entry_exit_pairs(pairs, entry_controls, exit_controls)	\
2682 ({										\
2683 	int i, r = 0;								\
2684 										\
2685 	BUILD_BUG_ON(sizeof(pairs[0].entry_control) != sizeof(entry_controls));	\
2686 	BUILD_BUG_ON(sizeof(pairs[0].exit_control)  != sizeof(exit_controls));	\
2687 										\
2688 	for (i = 0; i < ARRAY_SIZE(pairs); i++) {				\
2689 		typeof(entry_controls) n_ctrl = pairs[i].entry_control;		\
2690 		typeof(exit_controls) x_ctrl = pairs[i].exit_control;		\
2691 										\
2692 		if (!(entry_controls & n_ctrl) == !(exit_controls & x_ctrl))	\
2693 			continue;						\
2694 										\
2695 		pr_warn_once("Inconsistent VM-Entry/VM-Exit pair, "		\
2696 			     "entry = %llx (%llx), exit = %llx (%llx)\n",	\
2697 			     (u64)(entry_controls & n_ctrl), (u64)n_ctrl,	\
2698 			     (u64)(exit_controls & x_ctrl), (u64)x_ctrl);	\
2699 										\
2700 		if (error_on_inconsistent_vmcs_config)				\
2701 			r = -EIO;						\
2702 										\
2703 		entry_controls &= ~n_ctrl;					\
2704 		exit_controls &= ~x_ctrl;					\
2705 	}									\
2706 	r;									\
2707 })
2708 
setup_vmcs_config(struct vmcs_config * vmcs_conf,struct vmx_capability * vmx_cap)2709 static int setup_vmcs_config(struct vmcs_config *vmcs_conf,
2710 			     struct vmx_capability *vmx_cap)
2711 {
2712 	u32 _pin_based_exec_control = 0;
2713 	u32 _cpu_based_exec_control = 0;
2714 	u32 _cpu_based_2nd_exec_control = 0;
2715 	u64 _cpu_based_3rd_exec_control = 0;
2716 	u32 _vmexit_control = 0;
2717 	u32 _vmentry_control = 0;
2718 	u64 basic_msr;
2719 	u64 misc_msr;
2720 
2721 	/*
2722 	 * LOAD/SAVE_DEBUG_CONTROLS are absent because both are mandatory.
2723 	 * SAVE_IA32_PAT and SAVE_IA32_EFER are absent because KVM always
2724 	 * intercepts writes to PAT and EFER, i.e. never enables those controls.
2725 	 */
2726 	struct {
2727 		u32 entry_control;
2728 		u32 exit_control;
2729 	} const vmcs_entry_exit_pairs[] = {
2730 		{ VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,	VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL },
2731 		{ VM_ENTRY_LOAD_IA32_PAT,		VM_EXIT_LOAD_IA32_PAT },
2732 		{ VM_ENTRY_LOAD_IA32_EFER,		VM_EXIT_LOAD_IA32_EFER },
2733 		{ VM_ENTRY_LOAD_BNDCFGS,		VM_EXIT_CLEAR_BNDCFGS },
2734 		{ VM_ENTRY_LOAD_IA32_RTIT_CTL,		VM_EXIT_CLEAR_IA32_RTIT_CTL },
2735 		{ VM_ENTRY_LOAD_CET_STATE,		VM_EXIT_LOAD_CET_STATE },
2736 	};
2737 
2738 	memset(vmcs_conf, 0, sizeof(*vmcs_conf));
2739 
2740 	if (adjust_vmx_controls(KVM_REQUIRED_VMX_CPU_BASED_VM_EXEC_CONTROL,
2741 				KVM_OPTIONAL_VMX_CPU_BASED_VM_EXEC_CONTROL,
2742 				MSR_IA32_VMX_PROCBASED_CTLS,
2743 				&_cpu_based_exec_control))
2744 		return -EIO;
2745 	if (_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) {
2746 		if (adjust_vmx_controls(KVM_REQUIRED_VMX_SECONDARY_VM_EXEC_CONTROL,
2747 					KVM_OPTIONAL_VMX_SECONDARY_VM_EXEC_CONTROL,
2748 					MSR_IA32_VMX_PROCBASED_CTLS2,
2749 					&_cpu_based_2nd_exec_control))
2750 			return -EIO;
2751 	}
2752 	if (!IS_ENABLED(CONFIG_KVM_INTEL_PROVE_VE))
2753 		_cpu_based_2nd_exec_control &= ~SECONDARY_EXEC_EPT_VIOLATION_VE;
2754 
2755 #ifndef CONFIG_X86_64
2756 	if (!(_cpu_based_2nd_exec_control &
2757 				SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
2758 		_cpu_based_exec_control &= ~CPU_BASED_TPR_SHADOW;
2759 #endif
2760 
2761 	if (!(_cpu_based_exec_control & CPU_BASED_TPR_SHADOW))
2762 		_cpu_based_2nd_exec_control &= ~(
2763 				SECONDARY_EXEC_APIC_REGISTER_VIRT |
2764 				SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
2765 				SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
2766 
2767 	rdmsr_safe(MSR_IA32_VMX_EPT_VPID_CAP,
2768 		&vmx_cap->ept, &vmx_cap->vpid);
2769 
2770 	if (!(_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_EPT) &&
2771 	    vmx_cap->ept) {
2772 		pr_warn_once("EPT CAP should not exist if not support "
2773 				"1-setting enable EPT VM-execution control\n");
2774 
2775 		if (error_on_inconsistent_vmcs_config)
2776 			return -EIO;
2777 
2778 		vmx_cap->ept = 0;
2779 		_cpu_based_2nd_exec_control &= ~SECONDARY_EXEC_EPT_VIOLATION_VE;
2780 	}
2781 	if (!(_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_VPID) &&
2782 	    vmx_cap->vpid) {
2783 		pr_warn_once("VPID CAP should not exist if not support "
2784 				"1-setting enable VPID VM-execution control\n");
2785 
2786 		if (error_on_inconsistent_vmcs_config)
2787 			return -EIO;
2788 
2789 		vmx_cap->vpid = 0;
2790 	}
2791 
2792 	if (!cpu_has_sgx())
2793 		_cpu_based_2nd_exec_control &= ~SECONDARY_EXEC_ENCLS_EXITING;
2794 
2795 	if (_cpu_based_exec_control & CPU_BASED_ACTIVATE_TERTIARY_CONTROLS)
2796 		_cpu_based_3rd_exec_control =
2797 			adjust_vmx_controls64(KVM_OPTIONAL_VMX_TERTIARY_VM_EXEC_CONTROL,
2798 					      MSR_IA32_VMX_PROCBASED_CTLS3);
2799 
2800 	if (adjust_vmx_controls(KVM_REQUIRED_VMX_VM_EXIT_CONTROLS,
2801 				KVM_OPTIONAL_VMX_VM_EXIT_CONTROLS,
2802 				MSR_IA32_VMX_EXIT_CTLS,
2803 				&_vmexit_control))
2804 		return -EIO;
2805 
2806 	if (adjust_vmx_controls(KVM_REQUIRED_VMX_PIN_BASED_VM_EXEC_CONTROL,
2807 				KVM_OPTIONAL_VMX_PIN_BASED_VM_EXEC_CONTROL,
2808 				MSR_IA32_VMX_PINBASED_CTLS,
2809 				&_pin_based_exec_control))
2810 		return -EIO;
2811 
2812 	if (cpu_has_broken_vmx_preemption_timer())
2813 		_pin_based_exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER;
2814 	if (!(_cpu_based_2nd_exec_control &
2815 		SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY))
2816 		_pin_based_exec_control &= ~PIN_BASED_POSTED_INTR;
2817 
2818 	if (adjust_vmx_controls(KVM_REQUIRED_VMX_VM_ENTRY_CONTROLS,
2819 				KVM_OPTIONAL_VMX_VM_ENTRY_CONTROLS,
2820 				MSR_IA32_VMX_ENTRY_CTLS,
2821 				&_vmentry_control))
2822 		return -EIO;
2823 
2824 	if (vmx_check_entry_exit_pairs(vmcs_entry_exit_pairs,
2825 				       _vmentry_control, _vmexit_control))
2826 		return -EIO;
2827 
2828 	/*
2829 	 * Some cpus support VM_{ENTRY,EXIT}_IA32_PERF_GLOBAL_CTRL but they
2830 	 * can't be used due to an errata where VM Exit may incorrectly clear
2831 	 * IA32_PERF_GLOBAL_CTRL[34:32].  Workaround the errata by using the
2832 	 * MSR load mechanism to switch IA32_PERF_GLOBAL_CTRL.
2833 	 */
2834 	switch (boot_cpu_data.x86_vfm) {
2835 	case INTEL_NEHALEM_EP:	/* AAK155 */
2836 	case INTEL_NEHALEM:	/* AAP115 */
2837 	case INTEL_WESTMERE:	/* AAT100 */
2838 	case INTEL_WESTMERE_EP:	/* BC86,AAY89,BD102 */
2839 	case INTEL_NEHALEM_EX:	/* BA97 */
2840 		_vmentry_control &= ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
2841 		_vmexit_control &= ~VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL;
2842 		pr_warn_once("VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL "
2843 			     "does not work properly. Using workaround\n");
2844 		break;
2845 	default:
2846 		break;
2847 	}
2848 
2849 	rdmsrq(MSR_IA32_VMX_BASIC, basic_msr);
2850 
2851 	/* IA-32 SDM Vol 3B: VMCS size is never greater than 4kB. */
2852 	if (vmx_basic_vmcs_size(basic_msr) > PAGE_SIZE)
2853 		return -EIO;
2854 
2855 #ifdef CONFIG_X86_64
2856 	/*
2857 	 * KVM expects to be able to shove all legal physical addresses into
2858 	 * VMCS fields for 64-bit kernels, and per the SDM, "This bit is always
2859 	 * 0 for processors that support Intel 64 architecture".
2860 	 */
2861 	if (basic_msr & VMX_BASIC_32BIT_PHYS_ADDR_ONLY)
2862 		return -EIO;
2863 #endif
2864 
2865 	/* Require Write-Back (WB) memory type for VMCS accesses. */
2866 	if (vmx_basic_vmcs_mem_type(basic_msr) != X86_MEMTYPE_WB)
2867 		return -EIO;
2868 
2869 	rdmsrq(MSR_IA32_VMX_MISC, misc_msr);
2870 
2871 	vmcs_conf->basic = basic_msr;
2872 	vmcs_conf->pin_based_exec_ctrl = _pin_based_exec_control;
2873 	vmcs_conf->cpu_based_exec_ctrl = _cpu_based_exec_control;
2874 	vmcs_conf->cpu_based_2nd_exec_ctrl = _cpu_based_2nd_exec_control;
2875 	vmcs_conf->cpu_based_3rd_exec_ctrl = _cpu_based_3rd_exec_control;
2876 	vmcs_conf->vmexit_ctrl         = _vmexit_control;
2877 	vmcs_conf->vmentry_ctrl        = _vmentry_control;
2878 	vmcs_conf->misc	= misc_msr;
2879 
2880 #if IS_ENABLED(CONFIG_HYPERV)
2881 	if (enlightened_vmcs)
2882 		evmcs_sanitize_exec_ctrls(vmcs_conf);
2883 #endif
2884 
2885 	return 0;
2886 }
2887 
__kvm_is_vmx_supported(void)2888 static bool __kvm_is_vmx_supported(void)
2889 {
2890 	int cpu = smp_processor_id();
2891 
2892 	if (!(cpuid_ecx(1) & feature_bit(VMX))) {
2893 		pr_err("VMX not supported by CPU %d\n", cpu);
2894 		return false;
2895 	}
2896 
2897 	if (!this_cpu_has(X86_FEATURE_MSR_IA32_FEAT_CTL)) {
2898 		pr_err("VMX not enabled (by BIOS) in MSR_IA32_FEAT_CTL on CPU %d\n", cpu);
2899 		return false;
2900 	}
2901 
2902 	if (!this_cpu_has(X86_FEATURE_VMX)) {
2903 		pr_err("VMX not fully enabled on CPU %d.  Check kernel logs and/or BIOS\n", cpu);
2904 		return false;
2905 	}
2906 
2907 	return true;
2908 }
2909 
kvm_is_vmx_supported(void)2910 static bool kvm_is_vmx_supported(void)
2911 {
2912 	bool supported;
2913 
2914 	migrate_disable();
2915 	supported = __kvm_is_vmx_supported();
2916 	migrate_enable();
2917 
2918 	return supported;
2919 }
2920 
vmx_check_processor_compat(void)2921 int vmx_check_processor_compat(void)
2922 {
2923 	int cpu = raw_smp_processor_id();
2924 	struct vmcs_config vmcs_conf;
2925 	struct vmx_capability vmx_cap;
2926 
2927 	if (!__kvm_is_vmx_supported())
2928 		return -EIO;
2929 
2930 	if (setup_vmcs_config(&vmcs_conf, &vmx_cap) < 0) {
2931 		pr_err("Failed to setup VMCS config on CPU %d\n", cpu);
2932 		return -EIO;
2933 	}
2934 	if (nested)
2935 		nested_vmx_setup_ctls_msrs(&vmcs_conf, vmx_cap.ept);
2936 
2937 	if (memcmp(&vmcs_config, &vmcs_conf, sizeof(struct vmcs_config))) {
2938 		u32 *gold = (void *)&vmcs_config;
2939 		u32 *mine = (void *)&vmcs_conf;
2940 		int i;
2941 
2942 		BUILD_BUG_ON(sizeof(struct vmcs_config) % sizeof(u32));
2943 
2944 		pr_err("VMCS config on CPU %d doesn't match reference config:", cpu);
2945 		for (i = 0; i < sizeof(struct vmcs_config) / sizeof(u32); i++) {
2946 			if (gold[i] == mine[i])
2947 				continue;
2948 
2949 			pr_cont("\n  Offset %u REF = 0x%08x, CPU%u = 0x%08x, mismatch = 0x%08x",
2950 				i * (int)sizeof(u32), gold[i], cpu, mine[i], gold[i] ^ mine[i]);
2951 		}
2952 		pr_cont("\n");
2953 		return -EIO;
2954 	}
2955 	return 0;
2956 }
2957 
vmx_enable_virtualization_cpu(void)2958 int vmx_enable_virtualization_cpu(void)
2959 {
2960 	int cpu = raw_smp_processor_id();
2961 
2962 	/*
2963 	 * This can happen if we hot-added a CPU but failed to allocate
2964 	 * VP assist page for it.
2965 	 */
2966 	if (kvm_is_using_evmcs() && !hv_get_vp_assist_page(cpu))
2967 		return -EFAULT;
2968 
2969 	return x86_virt_get_ref(X86_FEATURE_VMX);
2970 }
2971 
vmclear_local_loaded_vmcss(void)2972 static void vmclear_local_loaded_vmcss(void)
2973 {
2974 	int cpu = raw_smp_processor_id();
2975 	struct loaded_vmcs *v, *n;
2976 
2977 	list_for_each_entry_safe(v, n, &per_cpu(loaded_vmcss_on_cpu, cpu),
2978 				 loaded_vmcss_on_cpu_link)
2979 		__loaded_vmcs_clear(v);
2980 }
2981 
vmx_disable_virtualization_cpu(void)2982 void vmx_disable_virtualization_cpu(void)
2983 {
2984 	vmclear_local_loaded_vmcss();
2985 
2986 	x86_virt_put_ref(X86_FEATURE_VMX);
2987 
2988 	hv_reset_evmcs();
2989 }
2990 
alloc_vmcs_cpu(bool shadow,int cpu,gfp_t flags)2991 struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu, gfp_t flags)
2992 {
2993 	int node = cpu_to_node(cpu);
2994 	struct page *pages;
2995 	struct vmcs *vmcs;
2996 
2997 	pages = __alloc_pages_node(node, flags, 0);
2998 	if (!pages)
2999 		return NULL;
3000 	vmcs = page_address(pages);
3001 	memset(vmcs, 0, vmx_basic_vmcs_size(vmcs_config.basic));
3002 
3003 	/* KVM supports Enlightened VMCS v1 only */
3004 	if (kvm_is_using_evmcs())
3005 		vmcs->hdr.revision_id = KVM_EVMCS_VERSION;
3006 	else
3007 		vmcs->hdr.revision_id = vmx_basic_vmcs_revision_id(vmcs_config.basic);
3008 
3009 	if (shadow)
3010 		vmcs->hdr.shadow_vmcs = 1;
3011 	return vmcs;
3012 }
3013 
free_vmcs(struct vmcs * vmcs)3014 void free_vmcs(struct vmcs *vmcs)
3015 {
3016 	free_page((unsigned long)vmcs);
3017 }
3018 
3019 /*
3020  * Free a VMCS, but before that VMCLEAR it on the CPU where it was last loaded
3021  */
free_loaded_vmcs(struct loaded_vmcs * loaded_vmcs)3022 void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
3023 {
3024 	if (!loaded_vmcs->vmcs)
3025 		return;
3026 	loaded_vmcs_clear(loaded_vmcs);
3027 	free_vmcs(loaded_vmcs->vmcs);
3028 	loaded_vmcs->vmcs = NULL;
3029 	if (loaded_vmcs->msr_bitmap)
3030 		free_page((unsigned long)loaded_vmcs->msr_bitmap);
3031 	WARN_ON(loaded_vmcs->shadow_vmcs != NULL);
3032 }
3033 
alloc_loaded_vmcs(struct loaded_vmcs * loaded_vmcs)3034 int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
3035 {
3036 	loaded_vmcs->vmcs = alloc_vmcs(false);
3037 	if (!loaded_vmcs->vmcs)
3038 		return -ENOMEM;
3039 
3040 	vmcs_clear(loaded_vmcs->vmcs);
3041 
3042 	loaded_vmcs->shadow_vmcs = NULL;
3043 	loaded_vmcs->hv_timer_soft_disabled = false;
3044 	loaded_vmcs->cpu = -1;
3045 	loaded_vmcs->launched = 0;
3046 
3047 	if (cpu_has_vmx_msr_bitmap()) {
3048 		loaded_vmcs->msr_bitmap = (unsigned long *)
3049 				__get_free_page(GFP_KERNEL_ACCOUNT);
3050 		if (!loaded_vmcs->msr_bitmap)
3051 			goto out_vmcs;
3052 		memset(loaded_vmcs->msr_bitmap, 0xff, PAGE_SIZE);
3053 	}
3054 
3055 	memset(&loaded_vmcs->host_state, 0, sizeof(struct vmcs_host_state));
3056 	memset(&loaded_vmcs->controls_shadow, 0,
3057 		sizeof(struct vmcs_controls_shadow));
3058 
3059 	return 0;
3060 
3061 out_vmcs:
3062 	free_loaded_vmcs(loaded_vmcs);
3063 	return -ENOMEM;
3064 }
3065 
fix_pmode_seg(struct kvm_vcpu * vcpu,int seg,struct kvm_segment * save)3066 static void fix_pmode_seg(struct kvm_vcpu *vcpu, int seg,
3067 		struct kvm_segment *save)
3068 {
3069 	if (!emulate_invalid_guest_state) {
3070 		/*
3071 		 * CS and SS RPL should be equal during guest entry according
3072 		 * to VMX spec, but in reality it is not always so. Since vcpu
3073 		 * is in the middle of the transition from real mode to
3074 		 * protected mode it is safe to assume that RPL 0 is a good
3075 		 * default value.
3076 		 */
3077 		if (seg == VCPU_SREG_CS || seg == VCPU_SREG_SS)
3078 			save->selector &= ~SEGMENT_RPL_MASK;
3079 		save->dpl = save->selector & SEGMENT_RPL_MASK;
3080 		save->s = 1;
3081 	}
3082 	__vmx_set_segment(vcpu, save, seg);
3083 }
3084 
enter_pmode(struct kvm_vcpu * vcpu)3085 static void enter_pmode(struct kvm_vcpu *vcpu)
3086 {
3087 	unsigned long flags;
3088 	struct vcpu_vmx *vmx = to_vmx(vcpu);
3089 
3090 	/*
3091 	 * Update real mode segment cache. It may be not up-to-date if segment
3092 	 * register was written while vcpu was in a guest mode.
3093 	 */
3094 	vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES);
3095 	vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS);
3096 	vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS);
3097 	vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS);
3098 	vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS);
3099 	vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS);
3100 
3101 	vmx->rmode.vm86_active = 0;
3102 
3103 	__vmx_set_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR);
3104 
3105 	flags = vmcs_readl(GUEST_RFLAGS);
3106 	flags &= RMODE_GUEST_OWNED_EFLAGS_BITS;
3107 	flags |= vmx->rmode.save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
3108 	vmcs_writel(GUEST_RFLAGS, flags);
3109 
3110 	vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) |
3111 			(vmcs_readl(CR4_READ_SHADOW) & X86_CR4_VME));
3112 
3113 	vmx_update_exception_bitmap(vcpu);
3114 
3115 	fix_pmode_seg(vcpu, VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]);
3116 	fix_pmode_seg(vcpu, VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]);
3117 	fix_pmode_seg(vcpu, VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]);
3118 	fix_pmode_seg(vcpu, VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]);
3119 	fix_pmode_seg(vcpu, VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]);
3120 	fix_pmode_seg(vcpu, VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]);
3121 }
3122 
fix_rmode_seg(int seg,struct kvm_segment * save)3123 static void fix_rmode_seg(int seg, struct kvm_segment *save)
3124 {
3125 	const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
3126 	struct kvm_segment var = *save;
3127 
3128 	var.dpl = 0x3;
3129 	if (seg == VCPU_SREG_CS)
3130 		var.type = 0x3;
3131 
3132 	if (!emulate_invalid_guest_state) {
3133 		var.selector = var.base >> 4;
3134 		var.base = var.base & 0xffff0;
3135 		var.limit = 0xffff;
3136 		var.g = 0;
3137 		var.db = 0;
3138 		var.present = 1;
3139 		var.s = 1;
3140 		var.l = 0;
3141 		var.unusable = 0;
3142 		var.type = 0x3;
3143 		var.avl = 0;
3144 		if (save->base & 0xf)
3145 			pr_warn_once("segment base is not paragraph aligned "
3146 				     "when entering protected mode (seg=%d)", seg);
3147 	}
3148 
3149 	vmcs_write16(sf->selector, var.selector);
3150 	vmcs_writel(sf->base, var.base);
3151 	vmcs_write32(sf->limit, var.limit);
3152 	vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(&var));
3153 }
3154 
enter_rmode(struct kvm_vcpu * vcpu)3155 static void enter_rmode(struct kvm_vcpu *vcpu)
3156 {
3157 	unsigned long flags;
3158 	struct vcpu_vmx *vmx = to_vmx(vcpu);
3159 	struct kvm_vmx *kvm_vmx = to_kvm_vmx(vcpu->kvm);
3160 
3161 	/*
3162 	 * KVM should never use VM86 to virtualize Real Mode when L2 is active,
3163 	 * as using VM86 is unnecessary if unrestricted guest is enabled, and
3164 	 * if unrestricted guest is disabled, VM-Enter (from L1) with CR0.PG=0
3165 	 * should VM-Fail and KVM should reject userspace attempts to stuff
3166 	 * CR0.PG=0 when L2 is active.
3167 	 */
3168 	WARN_ON_ONCE(is_guest_mode(vcpu));
3169 
3170 	vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR);
3171 	vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES);
3172 	vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS);
3173 	vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS);
3174 	vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS);
3175 	vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS);
3176 	vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS);
3177 
3178 	vmx->rmode.vm86_active = 1;
3179 
3180 	vmx_segment_cache_clear(vmx);
3181 
3182 	vmcs_writel(GUEST_TR_BASE, kvm_vmx->tss_addr);
3183 	vmcs_write32(GUEST_TR_LIMIT, RMODE_TSS_SIZE - 1);
3184 	vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
3185 
3186 	flags = vmcs_readl(GUEST_RFLAGS);
3187 	vmx->rmode.save_rflags = flags;
3188 
3189 	flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
3190 
3191 	vmcs_writel(GUEST_RFLAGS, flags);
3192 	vmcs_writel(GUEST_CR4, vmcs_readl(GUEST_CR4) | X86_CR4_VME);
3193 	vmx_update_exception_bitmap(vcpu);
3194 
3195 	fix_rmode_seg(VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]);
3196 	fix_rmode_seg(VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]);
3197 	fix_rmode_seg(VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]);
3198 	fix_rmode_seg(VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]);
3199 	fix_rmode_seg(VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]);
3200 	fix_rmode_seg(VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]);
3201 }
3202 
vmx_set_efer(struct kvm_vcpu * vcpu,u64 efer)3203 int vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
3204 {
3205 	struct vcpu_vmx *vmx = to_vmx(vcpu);
3206 
3207 	/* Nothing to do if hardware doesn't support EFER. */
3208 	if (!vmx_find_uret_msr(vmx, MSR_EFER))
3209 		return 0;
3210 
3211 	vcpu->arch.efer = efer;
3212 #ifdef CONFIG_X86_64
3213 	if (efer & EFER_LMA)
3214 		vm_entry_controls_setbit(vmx, VM_ENTRY_IA32E_MODE);
3215 	else
3216 		vm_entry_controls_clearbit(vmx, VM_ENTRY_IA32E_MODE);
3217 #else
3218 	if (KVM_BUG_ON(efer & EFER_LMA, vcpu->kvm))
3219 		return 1;
3220 #endif
3221 
3222 	vmx_setup_uret_msrs(vmx);
3223 	return 0;
3224 }
3225 
3226 #ifdef CONFIG_X86_64
3227 
enter_lmode(struct kvm_vcpu * vcpu)3228 static void enter_lmode(struct kvm_vcpu *vcpu)
3229 {
3230 	u32 guest_tr_ar;
3231 
3232 	vmx_segment_cache_clear(to_vmx(vcpu));
3233 
3234 	guest_tr_ar = vmcs_read32(GUEST_TR_AR_BYTES);
3235 	if ((guest_tr_ar & VMX_AR_TYPE_MASK) != VMX_AR_TYPE_BUSY_64_TSS) {
3236 		pr_debug_ratelimited("%s: tss fixup for long mode. \n",
3237 				     __func__);
3238 		vmcs_write32(GUEST_TR_AR_BYTES,
3239 			     (guest_tr_ar & ~VMX_AR_TYPE_MASK)
3240 			     | VMX_AR_TYPE_BUSY_64_TSS);
3241 	}
3242 	vmx_set_efer(vcpu, vcpu->arch.efer | EFER_LMA);
3243 }
3244 
exit_lmode(struct kvm_vcpu * vcpu)3245 static void exit_lmode(struct kvm_vcpu *vcpu)
3246 {
3247 	vmx_set_efer(vcpu, vcpu->arch.efer & ~EFER_LMA);
3248 }
3249 
3250 #endif
3251 
vmx_flush_tlb_all(struct kvm_vcpu * vcpu)3252 void vmx_flush_tlb_all(struct kvm_vcpu *vcpu)
3253 {
3254 	struct vcpu_vmx *vmx = to_vmx(vcpu);
3255 
3256 	/*
3257 	 * INVEPT must be issued when EPT is enabled, irrespective of VPID, as
3258 	 * the CPU is not required to invalidate guest-physical mappings on
3259 	 * VM-Entry, even if VPID is disabled.  Guest-physical mappings are
3260 	 * associated with the root EPT structure and not any particular VPID
3261 	 * (INVVPID also isn't required to invalidate guest-physical mappings).
3262 	 */
3263 	if (enable_ept) {
3264 		ept_sync_global();
3265 	} else if (enable_vpid) {
3266 		if (cpu_has_vmx_invvpid_global()) {
3267 			vpid_sync_vcpu_global();
3268 		} else {
3269 			vpid_sync_vcpu_single(vmx->vpid);
3270 			vpid_sync_vcpu_single(vmx->nested.vpid02);
3271 		}
3272 	}
3273 }
3274 
vmx_get_current_vpid(struct kvm_vcpu * vcpu)3275 static inline int vmx_get_current_vpid(struct kvm_vcpu *vcpu)
3276 {
3277 	if (is_guest_mode(vcpu) && nested_cpu_has_vpid(get_vmcs12(vcpu)))
3278 		return nested_get_vpid02(vcpu);
3279 	return to_vmx(vcpu)->vpid;
3280 }
3281 
construct_eptp(hpa_t root_hpa)3282 static u64 construct_eptp(hpa_t root_hpa)
3283 {
3284 	u64 eptp = root_hpa | VMX_EPTP_MT_WB;
3285 	struct kvm_mmu_page *root;
3286 
3287 	if (kvm_mmu_is_dummy_root(root_hpa))
3288 		return eptp | VMX_EPTP_PWL_4;
3289 
3290 	/*
3291 	 * EPT roots should always have an associated MMU page.  Return a "bad"
3292 	 * EPTP to induce VM-Fail instead of continuing on in a unknown state.
3293 	 */
3294 	root = root_to_sp(root_hpa);
3295 	if (WARN_ON_ONCE(!root))
3296 		return INVALID_PAGE;
3297 
3298 	eptp |= (root->role.level == 5) ? VMX_EPTP_PWL_5 : VMX_EPTP_PWL_4;
3299 
3300 	if (enable_ept_ad_bits && !root->role.ad_disabled)
3301 		eptp |= VMX_EPTP_AD_ENABLE_BIT;
3302 
3303 	return eptp;
3304 }
3305 
vmx_flush_tlb_ept_root(hpa_t root_hpa)3306 static void vmx_flush_tlb_ept_root(hpa_t root_hpa)
3307 {
3308 	u64 eptp = construct_eptp(root_hpa);
3309 
3310 	if (VALID_PAGE(eptp))
3311 		ept_sync_context(eptp);
3312 	else
3313 		ept_sync_global();
3314 }
3315 
vmx_flush_tlb_current(struct kvm_vcpu * vcpu)3316 void vmx_flush_tlb_current(struct kvm_vcpu *vcpu)
3317 {
3318 	struct kvm_mmu *mmu = vcpu->arch.mmu;
3319 	u64 root_hpa = mmu->root.hpa;
3320 
3321 	/* No flush required if the current context is invalid. */
3322 	if (!VALID_PAGE(root_hpa))
3323 		return;
3324 
3325 	if (enable_ept)
3326 		vmx_flush_tlb_ept_root(root_hpa);
3327 	else
3328 		vpid_sync_context(vmx_get_current_vpid(vcpu));
3329 }
3330 
vmx_flush_tlb_gva(struct kvm_vcpu * vcpu,gva_t addr)3331 void vmx_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t addr)
3332 {
3333 	/*
3334 	 * vpid_sync_vcpu_addr() is a nop if vpid==0, see the comment in
3335 	 * vmx_flush_tlb_guest() for an explanation of why this is ok.
3336 	 */
3337 	vpid_sync_vcpu_addr(vmx_get_current_vpid(vcpu), addr);
3338 }
3339 
vmx_flush_tlb_guest(struct kvm_vcpu * vcpu)3340 void vmx_flush_tlb_guest(struct kvm_vcpu *vcpu)
3341 {
3342 	/*
3343 	 * vpid_sync_context() is a nop if vpid==0, e.g. if enable_vpid==0 or a
3344 	 * vpid couldn't be allocated for this vCPU.  VM-Enter and VM-Exit are
3345 	 * required to flush GVA->{G,H}PA mappings from the TLB if vpid is
3346 	 * disabled (VM-Enter with vpid enabled and vpid==0 is disallowed),
3347 	 * i.e. no explicit INVVPID is necessary.
3348 	 */
3349 	vpid_sync_context(vmx_get_current_vpid(vcpu));
3350 }
3351 
vmx_ept_load_pdptrs(struct kvm_vcpu * vcpu)3352 void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu)
3353 {
3354 	struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
3355 
3356 	if (!kvm_register_is_dirty(vcpu, VCPU_EXREG_PDPTR))
3357 		return;
3358 
3359 	if (is_pae_paging(vcpu)) {
3360 		vmcs_write64(GUEST_PDPTR0, mmu->pdptrs[0]);
3361 		vmcs_write64(GUEST_PDPTR1, mmu->pdptrs[1]);
3362 		vmcs_write64(GUEST_PDPTR2, mmu->pdptrs[2]);
3363 		vmcs_write64(GUEST_PDPTR3, mmu->pdptrs[3]);
3364 	}
3365 }
3366 
ept_save_pdptrs(struct kvm_vcpu * vcpu)3367 void ept_save_pdptrs(struct kvm_vcpu *vcpu)
3368 {
3369 	struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
3370 
3371 	if (WARN_ON_ONCE(!is_pae_paging(vcpu)))
3372 		return;
3373 
3374 	mmu->pdptrs[0] = vmcs_read64(GUEST_PDPTR0);
3375 	mmu->pdptrs[1] = vmcs_read64(GUEST_PDPTR1);
3376 	mmu->pdptrs[2] = vmcs_read64(GUEST_PDPTR2);
3377 	mmu->pdptrs[3] = vmcs_read64(GUEST_PDPTR3);
3378 
3379 	kvm_register_mark_available(vcpu, VCPU_EXREG_PDPTR);
3380 }
3381 
3382 #define CR3_EXITING_BITS (CPU_BASED_CR3_LOAD_EXITING | \
3383 			  CPU_BASED_CR3_STORE_EXITING)
3384 
vmx_is_valid_cr0(struct kvm_vcpu * vcpu,unsigned long cr0)3385 bool vmx_is_valid_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
3386 {
3387 	if (is_guest_mode(vcpu))
3388 		return nested_guest_cr0_valid(vcpu, cr0);
3389 
3390 	if (to_vmx(vcpu)->nested.vmxon)
3391 		return nested_host_cr0_valid(vcpu, cr0);
3392 
3393 	return true;
3394 }
3395 
vmx_set_cr0(struct kvm_vcpu * vcpu,unsigned long cr0)3396 void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
3397 {
3398 	struct vcpu_vmx *vmx = to_vmx(vcpu);
3399 	unsigned long hw_cr0, old_cr0_pg;
3400 	u32 tmp;
3401 
3402 	old_cr0_pg = kvm_read_cr0_bits(vcpu, X86_CR0_PG);
3403 
3404 	hw_cr0 = (cr0 & ~KVM_VM_CR0_ALWAYS_OFF);
3405 	if (enable_unrestricted_guest)
3406 		hw_cr0 |= KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST;
3407 	else {
3408 		hw_cr0 |= KVM_VM_CR0_ALWAYS_ON;
3409 		if (!enable_ept)
3410 			hw_cr0 |= X86_CR0_WP;
3411 
3412 		if (vmx->rmode.vm86_active && (cr0 & X86_CR0_PE))
3413 			enter_pmode(vcpu);
3414 
3415 		if (!vmx->rmode.vm86_active && !(cr0 & X86_CR0_PE))
3416 			enter_rmode(vcpu);
3417 	}
3418 
3419 	vmcs_writel(CR0_READ_SHADOW, cr0);
3420 	vmcs_writel(GUEST_CR0, hw_cr0);
3421 	vcpu->arch.cr0 = cr0;
3422 	kvm_register_mark_available(vcpu, VCPU_EXREG_CR0);
3423 
3424 #ifdef CONFIG_X86_64
3425 	if (vcpu->arch.efer & EFER_LME) {
3426 		if (!old_cr0_pg && (cr0 & X86_CR0_PG))
3427 			enter_lmode(vcpu);
3428 		else if (old_cr0_pg && !(cr0 & X86_CR0_PG))
3429 			exit_lmode(vcpu);
3430 	}
3431 #endif
3432 
3433 	if (enable_ept && !enable_unrestricted_guest) {
3434 		/*
3435 		 * Ensure KVM has an up-to-date snapshot of the guest's CR3.  If
3436 		 * the below code _enables_ CR3 exiting, vmx_cache_reg() will
3437 		 * (correctly) stop reading vmcs.GUEST_CR3 because it thinks
3438 		 * KVM's CR3 is installed.
3439 		 */
3440 		if (!kvm_register_is_available(vcpu, VCPU_EXREG_CR3))
3441 			vmx_cache_reg(vcpu, VCPU_EXREG_CR3);
3442 
3443 		/*
3444 		 * When running with EPT but not unrestricted guest, KVM must
3445 		 * intercept CR3 accesses when paging is _disabled_.  This is
3446 		 * necessary because restricted guests can't actually run with
3447 		 * paging disabled, and so KVM stuffs its own CR3 in order to
3448 		 * run the guest when identity mapped page tables.
3449 		 *
3450 		 * Do _NOT_ check the old CR0.PG, e.g. to optimize away the
3451 		 * update, it may be stale with respect to CR3 interception,
3452 		 * e.g. after nested VM-Enter.
3453 		 *
3454 		 * Lastly, honor L1's desires, i.e. intercept CR3 loads and/or
3455 		 * stores to forward them to L1, even if KVM does not need to
3456 		 * intercept them to preserve its identity mapped page tables.
3457 		 */
3458 		if (!(cr0 & X86_CR0_PG)) {
3459 			exec_controls_setbit(vmx, CR3_EXITING_BITS);
3460 		} else if (!is_guest_mode(vcpu)) {
3461 			exec_controls_clearbit(vmx, CR3_EXITING_BITS);
3462 		} else {
3463 			tmp = exec_controls_get(vmx);
3464 			tmp &= ~CR3_EXITING_BITS;
3465 			tmp |= get_vmcs12(vcpu)->cpu_based_vm_exec_control & CR3_EXITING_BITS;
3466 			exec_controls_set(vmx, tmp);
3467 		}
3468 
3469 		/* Note, vmx_set_cr4() consumes the new vcpu->arch.cr0. */
3470 		if ((old_cr0_pg ^ cr0) & X86_CR0_PG)
3471 			vmx_set_cr4(vcpu, kvm_read_cr4(vcpu));
3472 
3473 		/*
3474 		 * When !CR0_PG -> CR0_PG, vcpu->arch.cr3 becomes active, but
3475 		 * GUEST_CR3 is still vmx->ept_identity_map_addr if EPT + !URG.
3476 		 */
3477 		if (!(old_cr0_pg & X86_CR0_PG) && (cr0 & X86_CR0_PG))
3478 			kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3);
3479 	}
3480 
3481 	/* depends on vcpu->arch.cr0 to be set to a new value */
3482 	vmx->vt.emulation_required = vmx_emulation_required(vcpu);
3483 }
3484 
vmx_get_max_ept_level(void)3485 static int vmx_get_max_ept_level(void)
3486 {
3487 	if (cpu_has_vmx_ept_5levels())
3488 		return 5;
3489 	return 4;
3490 }
3491 
vmx_load_mmu_pgd(struct kvm_vcpu * vcpu,hpa_t root_hpa,int root_level)3492 void vmx_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level)
3493 {
3494 	struct kvm *kvm = vcpu->kvm;
3495 	bool update_guest_cr3 = true;
3496 	unsigned long guest_cr3;
3497 
3498 	if (enable_ept) {
3499 		KVM_MMU_WARN_ON(root_to_sp(root_hpa) &&
3500 				root_level != root_to_sp(root_hpa)->role.level);
3501 		vmcs_write64(EPT_POINTER, construct_eptp(root_hpa));
3502 
3503 		hv_track_root_tdp(vcpu, root_hpa);
3504 
3505 		if (!enable_unrestricted_guest && !is_paging(vcpu))
3506 			guest_cr3 = to_kvm_vmx(kvm)->ept_identity_map_addr;
3507 		else if (kvm_register_is_dirty(vcpu, VCPU_EXREG_CR3))
3508 			guest_cr3 = vcpu->arch.cr3;
3509 		else /* vmcs.GUEST_CR3 is already up-to-date. */
3510 			update_guest_cr3 = false;
3511 		vmx_ept_load_pdptrs(vcpu);
3512 	} else {
3513 		guest_cr3 = root_hpa | kvm_get_active_pcid(vcpu) |
3514 			    kvm_get_active_cr3_lam_bits(vcpu);
3515 	}
3516 
3517 	if (update_guest_cr3)
3518 		vmcs_writel(GUEST_CR3, guest_cr3);
3519 }
3520 
vmx_is_valid_cr4(struct kvm_vcpu * vcpu,unsigned long cr4)3521 bool vmx_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
3522 {
3523 	/*
3524 	 * We operate under the default treatment of SMM, so VMX cannot be
3525 	 * enabled under SMM.  Note, whether or not VMXE is allowed at all,
3526 	 * i.e. is a reserved bit, is handled by common x86 code.
3527 	 */
3528 	if ((cr4 & X86_CR4_VMXE) && is_smm(vcpu))
3529 		return false;
3530 
3531 	if (to_vmx(vcpu)->nested.vmxon && !nested_cr4_valid(vcpu, cr4))
3532 		return false;
3533 
3534 	return true;
3535 }
3536 
vmx_set_cr4(struct kvm_vcpu * vcpu,unsigned long cr4)3537 void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
3538 {
3539 	unsigned long old_cr4 = kvm_read_cr4(vcpu);
3540 	struct vcpu_vmx *vmx = to_vmx(vcpu);
3541 	unsigned long hw_cr4;
3542 
3543 	/*
3544 	 * Pass through host's Machine Check Enable value to hw_cr4, which
3545 	 * is in force while we are in guest mode.  Do not let guests control
3546 	 * this bit, even if host CR4.MCE == 0.
3547 	 */
3548 	hw_cr4 = (cr4_read_shadow() & X86_CR4_MCE) | (cr4 & ~X86_CR4_MCE);
3549 	if (enable_unrestricted_guest)
3550 		hw_cr4 |= KVM_VM_CR4_ALWAYS_ON_UNRESTRICTED_GUEST;
3551 	else if (vmx->rmode.vm86_active)
3552 		hw_cr4 |= KVM_RMODE_VM_CR4_ALWAYS_ON;
3553 	else
3554 		hw_cr4 |= KVM_PMODE_VM_CR4_ALWAYS_ON;
3555 
3556 	if (vmx_umip_emulated()) {
3557 		if (cr4 & X86_CR4_UMIP) {
3558 			secondary_exec_controls_setbit(vmx, SECONDARY_EXEC_DESC);
3559 			hw_cr4 &= ~X86_CR4_UMIP;
3560 		} else if (!is_guest_mode(vcpu) ||
3561 			!nested_cpu_has2(get_vmcs12(vcpu), SECONDARY_EXEC_DESC)) {
3562 			secondary_exec_controls_clearbit(vmx, SECONDARY_EXEC_DESC);
3563 		}
3564 	}
3565 
3566 	vcpu->arch.cr4 = cr4;
3567 	kvm_register_mark_available(vcpu, VCPU_EXREG_CR4);
3568 
3569 	if (!enable_unrestricted_guest) {
3570 		if (enable_ept) {
3571 			if (!is_paging(vcpu)) {
3572 				hw_cr4 &= ~X86_CR4_PAE;
3573 				hw_cr4 |= X86_CR4_PSE;
3574 			} else if (!(cr4 & X86_CR4_PAE)) {
3575 				hw_cr4 &= ~X86_CR4_PAE;
3576 			}
3577 		}
3578 
3579 		/*
3580 		 * SMEP/SMAP/PKU is disabled if CPU is in non-paging mode in
3581 		 * hardware.  To emulate this behavior, SMEP/SMAP/PKU needs
3582 		 * to be manually disabled when guest switches to non-paging
3583 		 * mode.
3584 		 *
3585 		 * If !enable_unrestricted_guest, the CPU is always running
3586 		 * with CR0.PG=1 and CR4 needs to be modified.
3587 		 * If enable_unrestricted_guest, the CPU automatically
3588 		 * disables SMEP/SMAP/PKU when the guest sets CR0.PG=0.
3589 		 */
3590 		if (!is_paging(vcpu))
3591 			hw_cr4 &= ~(X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE);
3592 	}
3593 
3594 	vmcs_writel(CR4_READ_SHADOW, cr4);
3595 	vmcs_writel(GUEST_CR4, hw_cr4);
3596 
3597 	if ((cr4 ^ old_cr4) & (X86_CR4_OSXSAVE | X86_CR4_PKE))
3598 		vcpu->arch.cpuid_dynamic_bits_dirty = true;
3599 }
3600 
vmx_get_segment(struct kvm_vcpu * vcpu,struct kvm_segment * var,int seg)3601 void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg)
3602 {
3603 	struct vcpu_vmx *vmx = to_vmx(vcpu);
3604 	u32 ar;
3605 
3606 	if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) {
3607 		*var = vmx->rmode.segs[seg];
3608 		if (seg == VCPU_SREG_TR
3609 		    || var->selector == vmx_read_guest_seg_selector(vmx, seg))
3610 			return;
3611 		var->base = vmx_read_guest_seg_base(vmx, seg);
3612 		var->selector = vmx_read_guest_seg_selector(vmx, seg);
3613 		return;
3614 	}
3615 	var->base = vmx_read_guest_seg_base(vmx, seg);
3616 	var->limit = vmx_read_guest_seg_limit(vmx, seg);
3617 	var->selector = vmx_read_guest_seg_selector(vmx, seg);
3618 	ar = vmx_read_guest_seg_ar(vmx, seg);
3619 	var->unusable = (ar >> 16) & 1;
3620 	var->type = ar & 15;
3621 	var->s = (ar >> 4) & 1;
3622 	var->dpl = (ar >> 5) & 3;
3623 	/*
3624 	 * Some userspaces do not preserve unusable property. Since usable
3625 	 * segment has to be present according to VMX spec we can use present
3626 	 * property to amend userspace bug by making unusable segment always
3627 	 * nonpresent. vmx_segment_access_rights() already marks nonpresent
3628 	 * segment as unusable.
3629 	 */
3630 	var->present = !var->unusable;
3631 	var->avl = (ar >> 12) & 1;
3632 	var->l = (ar >> 13) & 1;
3633 	var->db = (ar >> 14) & 1;
3634 	var->g = (ar >> 15) & 1;
3635 }
3636 
vmx_get_segment_base(struct kvm_vcpu * vcpu,int seg)3637 u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg)
3638 {
3639 	struct kvm_segment s;
3640 
3641 	if (to_vmx(vcpu)->rmode.vm86_active) {
3642 		vmx_get_segment(vcpu, &s, seg);
3643 		return s.base;
3644 	}
3645 	return vmx_read_guest_seg_base(to_vmx(vcpu), seg);
3646 }
3647 
__vmx_get_cpl(struct kvm_vcpu * vcpu,bool no_cache)3648 static int __vmx_get_cpl(struct kvm_vcpu *vcpu, bool no_cache)
3649 {
3650 	struct vcpu_vmx *vmx = to_vmx(vcpu);
3651 	int ar;
3652 
3653 	if (unlikely(vmx->rmode.vm86_active))
3654 		return 0;
3655 
3656 	if (no_cache)
3657 		ar = vmcs_read32(GUEST_SS_AR_BYTES);
3658 	else
3659 		ar = vmx_read_guest_seg_ar(vmx, VCPU_SREG_SS);
3660 	return VMX_AR_DPL(ar);
3661 }
3662 
vmx_get_cpl(struct kvm_vcpu * vcpu)3663 int vmx_get_cpl(struct kvm_vcpu *vcpu)
3664 {
3665 	return __vmx_get_cpl(vcpu, false);
3666 }
3667 
vmx_get_cpl_no_cache(struct kvm_vcpu * vcpu)3668 int vmx_get_cpl_no_cache(struct kvm_vcpu *vcpu)
3669 {
3670 	return __vmx_get_cpl(vcpu, true);
3671 }
3672 
vmx_segment_access_rights(struct kvm_segment * var)3673 static u32 vmx_segment_access_rights(struct kvm_segment *var)
3674 {
3675 	u32 ar;
3676 
3677 	ar = var->type & 15;
3678 	ar |= (var->s & 1) << 4;
3679 	ar |= (var->dpl & 3) << 5;
3680 	ar |= (var->present & 1) << 7;
3681 	ar |= (var->avl & 1) << 12;
3682 	ar |= (var->l & 1) << 13;
3683 	ar |= (var->db & 1) << 14;
3684 	ar |= (var->g & 1) << 15;
3685 	ar |= (var->unusable || !var->present) << 16;
3686 
3687 	return ar;
3688 }
3689 
__vmx_set_segment(struct kvm_vcpu * vcpu,struct kvm_segment * var,int seg)3690 void __vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg)
3691 {
3692 	struct vcpu_vmx *vmx = to_vmx(vcpu);
3693 	const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
3694 
3695 	vmx_segment_cache_clear(vmx);
3696 
3697 	if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) {
3698 		vmx->rmode.segs[seg] = *var;
3699 		if (seg == VCPU_SREG_TR)
3700 			vmcs_write16(sf->selector, var->selector);
3701 		else if (var->s)
3702 			fix_rmode_seg(seg, &vmx->rmode.segs[seg]);
3703 		return;
3704 	}
3705 
3706 	vmcs_writel(sf->base, var->base);
3707 	vmcs_write32(sf->limit, var->limit);
3708 	vmcs_write16(sf->selector, var->selector);
3709 
3710 	/*
3711 	 *   Fix the "Accessed" bit in AR field of segment registers for older
3712 	 * qemu binaries.
3713 	 *   IA32 arch specifies that at the time of processor reset the
3714 	 * "Accessed" bit in the AR field of segment registers is 1. And qemu
3715 	 * is setting it to 0 in the userland code. This causes invalid guest
3716 	 * state vmexit when "unrestricted guest" mode is turned on.
3717 	 *    Fix for this setup issue in cpu_reset is being pushed in the qemu
3718 	 * tree. Newer qemu binaries with that qemu fix would not need this
3719 	 * kvm hack.
3720 	 */
3721 	if (is_unrestricted_guest(vcpu) && (seg != VCPU_SREG_LDTR))
3722 		var->type |= 0x1; /* Accessed */
3723 
3724 	vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(var));
3725 }
3726 
vmx_set_segment(struct kvm_vcpu * vcpu,struct kvm_segment * var,int seg)3727 void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg)
3728 {
3729 	__vmx_set_segment(vcpu, var, seg);
3730 
3731 	to_vmx(vcpu)->vt.emulation_required = vmx_emulation_required(vcpu);
3732 }
3733 
vmx_get_cs_db_l_bits(struct kvm_vcpu * vcpu,int * db,int * l)3734 void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
3735 {
3736 	u32 ar = vmx_read_guest_seg_ar(to_vmx(vcpu), VCPU_SREG_CS);
3737 
3738 	*db = (ar >> 14) & 1;
3739 	*l = (ar >> 13) & 1;
3740 }
3741 
vmx_get_idt(struct kvm_vcpu * vcpu,struct desc_ptr * dt)3742 void vmx_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
3743 {
3744 	dt->size = vmcs_read32(GUEST_IDTR_LIMIT);
3745 	dt->address = vmcs_readl(GUEST_IDTR_BASE);
3746 }
3747 
vmx_set_idt(struct kvm_vcpu * vcpu,struct desc_ptr * dt)3748 void vmx_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
3749 {
3750 	vmcs_write32(GUEST_IDTR_LIMIT, dt->size);
3751 	vmcs_writel(GUEST_IDTR_BASE, dt->address);
3752 }
3753 
vmx_get_gdt(struct kvm_vcpu * vcpu,struct desc_ptr * dt)3754 void vmx_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
3755 {
3756 	dt->size = vmcs_read32(GUEST_GDTR_LIMIT);
3757 	dt->address = vmcs_readl(GUEST_GDTR_BASE);
3758 }
3759 
vmx_set_gdt(struct kvm_vcpu * vcpu,struct desc_ptr * dt)3760 void vmx_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
3761 {
3762 	vmcs_write32(GUEST_GDTR_LIMIT, dt->size);
3763 	vmcs_writel(GUEST_GDTR_BASE, dt->address);
3764 }
3765 
rmode_segment_valid(struct kvm_vcpu * vcpu,int seg)3766 static bool rmode_segment_valid(struct kvm_vcpu *vcpu, int seg)
3767 {
3768 	struct kvm_segment var;
3769 	u32 ar;
3770 
3771 	vmx_get_segment(vcpu, &var, seg);
3772 	var.dpl = 0x3;
3773 	if (seg == VCPU_SREG_CS)
3774 		var.type = 0x3;
3775 	ar = vmx_segment_access_rights(&var);
3776 
3777 	if (var.base != (var.selector << 4))
3778 		return false;
3779 	if (var.limit != 0xffff)
3780 		return false;
3781 	if (ar != 0xf3)
3782 		return false;
3783 
3784 	return true;
3785 }
3786 
code_segment_valid(struct kvm_vcpu * vcpu)3787 static bool code_segment_valid(struct kvm_vcpu *vcpu)
3788 {
3789 	struct kvm_segment cs;
3790 	unsigned int cs_rpl;
3791 
3792 	vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
3793 	cs_rpl = cs.selector & SEGMENT_RPL_MASK;
3794 
3795 	if (cs.unusable)
3796 		return false;
3797 	if (~cs.type & (VMX_AR_TYPE_CODE_MASK|VMX_AR_TYPE_ACCESSES_MASK))
3798 		return false;
3799 	if (!cs.s)
3800 		return false;
3801 	if (cs.type & VMX_AR_TYPE_WRITEABLE_MASK) {
3802 		if (cs.dpl > cs_rpl)
3803 			return false;
3804 	} else {
3805 		if (cs.dpl != cs_rpl)
3806 			return false;
3807 	}
3808 	if (!cs.present)
3809 		return false;
3810 
3811 	/* TODO: Add Reserved field check, this'll require a new member in the kvm_segment_field structure */
3812 	return true;
3813 }
3814 
stack_segment_valid(struct kvm_vcpu * vcpu)3815 static bool stack_segment_valid(struct kvm_vcpu *vcpu)
3816 {
3817 	struct kvm_segment ss;
3818 	unsigned int ss_rpl;
3819 
3820 	vmx_get_segment(vcpu, &ss, VCPU_SREG_SS);
3821 	ss_rpl = ss.selector & SEGMENT_RPL_MASK;
3822 
3823 	if (ss.unusable)
3824 		return true;
3825 	if (ss.type != 3 && ss.type != 7)
3826 		return false;
3827 	if (!ss.s)
3828 		return false;
3829 	if (ss.dpl != ss_rpl) /* DPL != RPL */
3830 		return false;
3831 	if (!ss.present)
3832 		return false;
3833 
3834 	return true;
3835 }
3836 
data_segment_valid(struct kvm_vcpu * vcpu,int seg)3837 static bool data_segment_valid(struct kvm_vcpu *vcpu, int seg)
3838 {
3839 	struct kvm_segment var;
3840 	unsigned int rpl;
3841 
3842 	vmx_get_segment(vcpu, &var, seg);
3843 	rpl = var.selector & SEGMENT_RPL_MASK;
3844 
3845 	if (var.unusable)
3846 		return true;
3847 	if (!var.s)
3848 		return false;
3849 	if (!var.present)
3850 		return false;
3851 	if (~var.type & (VMX_AR_TYPE_CODE_MASK|VMX_AR_TYPE_WRITEABLE_MASK)) {
3852 		if (var.dpl < rpl) /* DPL < RPL */
3853 			return false;
3854 	}
3855 
3856 	/* TODO: Add other members to kvm_segment_field to allow checking for other access
3857 	 * rights flags
3858 	 */
3859 	return true;
3860 }
3861 
tr_valid(struct kvm_vcpu * vcpu)3862 static bool tr_valid(struct kvm_vcpu *vcpu)
3863 {
3864 	struct kvm_segment tr;
3865 
3866 	vmx_get_segment(vcpu, &tr, VCPU_SREG_TR);
3867 
3868 	if (tr.unusable)
3869 		return false;
3870 	if (tr.selector & SEGMENT_TI_MASK)	/* TI = 1 */
3871 		return false;
3872 	if (tr.type != 3 && tr.type != 11) /* TODO: Check if guest is in IA32e mode */
3873 		return false;
3874 	if (!tr.present)
3875 		return false;
3876 
3877 	return true;
3878 }
3879 
ldtr_valid(struct kvm_vcpu * vcpu)3880 static bool ldtr_valid(struct kvm_vcpu *vcpu)
3881 {
3882 	struct kvm_segment ldtr;
3883 
3884 	vmx_get_segment(vcpu, &ldtr, VCPU_SREG_LDTR);
3885 
3886 	if (ldtr.unusable)
3887 		return true;
3888 	if (ldtr.selector & SEGMENT_TI_MASK)	/* TI = 1 */
3889 		return false;
3890 	if (ldtr.type != 2)
3891 		return false;
3892 	if (!ldtr.present)
3893 		return false;
3894 
3895 	return true;
3896 }
3897 
cs_ss_rpl_check(struct kvm_vcpu * vcpu)3898 static bool cs_ss_rpl_check(struct kvm_vcpu *vcpu)
3899 {
3900 	struct kvm_segment cs, ss;
3901 
3902 	vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
3903 	vmx_get_segment(vcpu, &ss, VCPU_SREG_SS);
3904 
3905 	return ((cs.selector & SEGMENT_RPL_MASK) ==
3906 		 (ss.selector & SEGMENT_RPL_MASK));
3907 }
3908 
3909 /*
3910  * Check if guest state is valid. Returns true if valid, false if
3911  * not.
3912  * We assume that registers are always usable
3913  */
__vmx_guest_state_valid(struct kvm_vcpu * vcpu)3914 bool __vmx_guest_state_valid(struct kvm_vcpu *vcpu)
3915 {
3916 	/* real mode guest state checks */
3917 	if (!is_protmode(vcpu) || (vmx_get_rflags(vcpu) & X86_EFLAGS_VM)) {
3918 		if (!rmode_segment_valid(vcpu, VCPU_SREG_CS))
3919 			return false;
3920 		if (!rmode_segment_valid(vcpu, VCPU_SREG_SS))
3921 			return false;
3922 		if (!rmode_segment_valid(vcpu, VCPU_SREG_DS))
3923 			return false;
3924 		if (!rmode_segment_valid(vcpu, VCPU_SREG_ES))
3925 			return false;
3926 		if (!rmode_segment_valid(vcpu, VCPU_SREG_FS))
3927 			return false;
3928 		if (!rmode_segment_valid(vcpu, VCPU_SREG_GS))
3929 			return false;
3930 	} else {
3931 	/* protected mode guest state checks */
3932 		if (!cs_ss_rpl_check(vcpu))
3933 			return false;
3934 		if (!code_segment_valid(vcpu))
3935 			return false;
3936 		if (!stack_segment_valid(vcpu))
3937 			return false;
3938 		if (!data_segment_valid(vcpu, VCPU_SREG_DS))
3939 			return false;
3940 		if (!data_segment_valid(vcpu, VCPU_SREG_ES))
3941 			return false;
3942 		if (!data_segment_valid(vcpu, VCPU_SREG_FS))
3943 			return false;
3944 		if (!data_segment_valid(vcpu, VCPU_SREG_GS))
3945 			return false;
3946 		if (!tr_valid(vcpu))
3947 			return false;
3948 		if (!ldtr_valid(vcpu))
3949 			return false;
3950 	}
3951 	/* TODO:
3952 	 * - Add checks on RIP
3953 	 * - Add checks on RFLAGS
3954 	 */
3955 
3956 	return true;
3957 }
3958 
init_rmode_tss(struct kvm * kvm,void __user * ua)3959 static int init_rmode_tss(struct kvm *kvm, void __user *ua)
3960 {
3961 	const void *zero_page = (const void *) __va(page_to_phys(ZERO_PAGE(0)));
3962 	u16 data;
3963 	int i;
3964 
3965 	for (i = 0; i < 3; i++) {
3966 		if (__copy_to_user(ua + PAGE_SIZE * i, zero_page, PAGE_SIZE))
3967 			return -EFAULT;
3968 	}
3969 
3970 	data = TSS_BASE_SIZE + TSS_REDIRECTION_SIZE;
3971 	if (__copy_to_user(ua + TSS_IOPB_BASE_OFFSET, &data, sizeof(u16)))
3972 		return -EFAULT;
3973 
3974 	data = ~0;
3975 	if (__copy_to_user(ua + RMODE_TSS_SIZE - 1, &data, sizeof(u8)))
3976 		return -EFAULT;
3977 
3978 	return 0;
3979 }
3980 
init_rmode_identity_map(struct kvm * kvm)3981 static int init_rmode_identity_map(struct kvm *kvm)
3982 {
3983 	struct kvm_vmx *kvm_vmx = to_kvm_vmx(kvm);
3984 	int i, r = 0;
3985 	void __user *uaddr;
3986 	u32 tmp;
3987 
3988 	/* Protect kvm_vmx->ept_identity_pagetable_done. */
3989 	mutex_lock(&kvm->slots_lock);
3990 
3991 	if (likely(kvm_vmx->ept_identity_pagetable_done))
3992 		goto out;
3993 
3994 	if (!kvm_vmx->ept_identity_map_addr)
3995 		kvm_vmx->ept_identity_map_addr = VMX_EPT_IDENTITY_PAGETABLE_ADDR;
3996 
3997 	uaddr = __x86_set_memory_region(kvm,
3998 					IDENTITY_PAGETABLE_PRIVATE_MEMSLOT,
3999 					kvm_vmx->ept_identity_map_addr,
4000 					PAGE_SIZE);
4001 	if (IS_ERR(uaddr)) {
4002 		r = PTR_ERR(uaddr);
4003 		goto out;
4004 	}
4005 
4006 	/* Set up identity-mapping pagetable for EPT in real mode */
4007 	for (i = 0; i < (PAGE_SIZE / sizeof(tmp)); i++) {
4008 		tmp = (i << 22) + (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER |
4009 			_PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_PSE);
4010 		if (__copy_to_user(uaddr + i * sizeof(tmp), &tmp, sizeof(tmp))) {
4011 			r = -EFAULT;
4012 			goto out;
4013 		}
4014 	}
4015 	kvm_vmx->ept_identity_pagetable_done = true;
4016 
4017 out:
4018 	mutex_unlock(&kvm->slots_lock);
4019 	return r;
4020 }
4021 
seg_setup(int seg)4022 static void seg_setup(int seg)
4023 {
4024 	const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
4025 	unsigned int ar;
4026 
4027 	vmcs_write16(sf->selector, 0);
4028 	vmcs_writel(sf->base, 0);
4029 	vmcs_write32(sf->limit, 0xffff);
4030 	ar = 0x93;
4031 	if (seg == VCPU_SREG_CS)
4032 		ar |= 0x08; /* code segment */
4033 
4034 	vmcs_write32(sf->ar_bytes, ar);
4035 }
4036 
allocate_vpid(void)4037 int allocate_vpid(void)
4038 {
4039 	int vpid;
4040 
4041 	if (!enable_vpid)
4042 		return 0;
4043 	spin_lock(&vmx_vpid_lock);
4044 	vpid = find_first_zero_bit(vmx_vpid_bitmap, VMX_NR_VPIDS);
4045 	if (vpid < VMX_NR_VPIDS)
4046 		__set_bit(vpid, vmx_vpid_bitmap);
4047 	else
4048 		vpid = 0;
4049 	spin_unlock(&vmx_vpid_lock);
4050 	return vpid;
4051 }
4052 
free_vpid(int vpid)4053 void free_vpid(int vpid)
4054 {
4055 	if (!enable_vpid || vpid == 0)
4056 		return;
4057 	spin_lock(&vmx_vpid_lock);
4058 	__clear_bit(vpid, vmx_vpid_bitmap);
4059 	spin_unlock(&vmx_vpid_lock);
4060 }
4061 
vmx_msr_bitmap_l01_changed(struct vcpu_vmx * vmx)4062 static void vmx_msr_bitmap_l01_changed(struct vcpu_vmx *vmx)
4063 {
4064 	/*
4065 	 * When KVM is a nested hypervisor on top of Hyper-V and uses
4066 	 * 'Enlightened MSR Bitmap' feature L0 needs to know that MSR
4067 	 * bitmap has changed.
4068 	 */
4069 	if (kvm_is_using_evmcs()) {
4070 		struct hv_enlightened_vmcs *evmcs = (void *)vmx->vmcs01.vmcs;
4071 
4072 		if (evmcs->hv_enlightenments_control.msr_bitmap)
4073 			evmcs->hv_clean_fields &=
4074 				~HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP;
4075 	}
4076 
4077 	vmx->nested.force_msr_bitmap_recalc = true;
4078 }
4079 
vmx_set_intercept_for_msr(struct kvm_vcpu * vcpu,u32 msr,int type,bool set)4080 void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type, bool set)
4081 {
4082 	struct vcpu_vmx *vmx = to_vmx(vcpu);
4083 	unsigned long *msr_bitmap = vmx->vmcs01.msr_bitmap;
4084 
4085 	if (!cpu_has_vmx_msr_bitmap())
4086 		return;
4087 
4088 	vmx_msr_bitmap_l01_changed(vmx);
4089 
4090 	if (type & MSR_TYPE_R) {
4091 		if (!set && kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_READ))
4092 			vmx_clear_msr_bitmap_read(msr_bitmap, msr);
4093 		else
4094 			vmx_set_msr_bitmap_read(msr_bitmap, msr);
4095 	}
4096 
4097 	if (type & MSR_TYPE_W) {
4098 		if (!set && kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_WRITE))
4099 			vmx_clear_msr_bitmap_write(msr_bitmap, msr);
4100 		else
4101 			vmx_set_msr_bitmap_write(msr_bitmap, msr);
4102 	}
4103 }
4104 
vmx_update_msr_bitmap_x2apic(struct kvm_vcpu * vcpu)4105 static void vmx_update_msr_bitmap_x2apic(struct kvm_vcpu *vcpu)
4106 {
4107 	/*
4108 	 * x2APIC indices for 64-bit accesses into the RDMSR and WRMSR halves
4109 	 * of the MSR bitmap.  KVM emulates APIC registers up through 0x3f0,
4110 	 * i.e. MSR 0x83f, and so only needs to dynamically manipulate 64 bits.
4111 	 */
4112 	const int read_idx = APIC_BASE_MSR / BITS_PER_LONG_LONG;
4113 	const int write_idx = read_idx + (0x800 / sizeof(u64));
4114 	struct vcpu_vmx *vmx = to_vmx(vcpu);
4115 	u64 *msr_bitmap = (u64 *)vmx->vmcs01.msr_bitmap;
4116 	u8 mode;
4117 
4118 	if (!cpu_has_vmx_msr_bitmap() || WARN_ON_ONCE(!lapic_in_kernel(vcpu)))
4119 		return;
4120 
4121 	if (cpu_has_secondary_exec_ctrls() &&
4122 	    (secondary_exec_controls_get(vmx) &
4123 	     SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE)) {
4124 		mode = MSR_BITMAP_MODE_X2APIC;
4125 		if (enable_apicv && kvm_vcpu_apicv_active(vcpu))
4126 			mode |= MSR_BITMAP_MODE_X2APIC_APICV;
4127 	} else {
4128 		mode = 0;
4129 	}
4130 
4131 	if (mode == vmx->x2apic_msr_bitmap_mode)
4132 		return;
4133 
4134 	vmx->x2apic_msr_bitmap_mode = mode;
4135 
4136 	/*
4137 	 * Reset the bitmap for MSRs 0x800 - 0x83f.  Leave AMD's uber-extended
4138 	 * registers (0x840 and above) intercepted, KVM doesn't support them.
4139 	 * Intercept all writes by default and poke holes as needed.  Pass
4140 	 * through reads for all valid registers by default in x2APIC+APICv
4141 	 * mode, only the current timer count needs on-demand emulation by KVM.
4142 	 */
4143 	if (mode & MSR_BITMAP_MODE_X2APIC_APICV)
4144 		msr_bitmap[read_idx] = ~kvm_lapic_readable_reg_mask(vcpu->arch.apic);
4145 	else
4146 		msr_bitmap[read_idx] = ~0ull;
4147 	msr_bitmap[write_idx] = ~0ull;
4148 
4149 	/*
4150 	 * TPR reads and writes can be virtualized even if virtual interrupt
4151 	 * delivery is not in use.
4152 	 */
4153 	vmx_set_intercept_for_msr(vcpu, X2APIC_MSR(APIC_TASKPRI), MSR_TYPE_RW,
4154 				  !(mode & MSR_BITMAP_MODE_X2APIC));
4155 
4156 	if (mode & MSR_BITMAP_MODE_X2APIC_APICV) {
4157 		vmx_enable_intercept_for_msr(vcpu, X2APIC_MSR(APIC_TMCCT), MSR_TYPE_RW);
4158 		vmx_disable_intercept_for_msr(vcpu, X2APIC_MSR(APIC_EOI), MSR_TYPE_W);
4159 		vmx_disable_intercept_for_msr(vcpu, X2APIC_MSR(APIC_SELF_IPI), MSR_TYPE_W);
4160 		if (enable_ipiv)
4161 			vmx_disable_intercept_for_msr(vcpu, X2APIC_MSR(APIC_ICR), MSR_TYPE_RW);
4162 	}
4163 }
4164 
pt_update_intercept_for_msr(struct kvm_vcpu * vcpu)4165 void pt_update_intercept_for_msr(struct kvm_vcpu *vcpu)
4166 {
4167 	struct vcpu_vmx *vmx = to_vmx(vcpu);
4168 	bool flag = !(vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN);
4169 	u32 i;
4170 
4171 	vmx_set_intercept_for_msr(vcpu, MSR_IA32_RTIT_STATUS, MSR_TYPE_RW, flag);
4172 	vmx_set_intercept_for_msr(vcpu, MSR_IA32_RTIT_OUTPUT_BASE, MSR_TYPE_RW, flag);
4173 	vmx_set_intercept_for_msr(vcpu, MSR_IA32_RTIT_OUTPUT_MASK, MSR_TYPE_RW, flag);
4174 	vmx_set_intercept_for_msr(vcpu, MSR_IA32_RTIT_CR3_MATCH, MSR_TYPE_RW, flag);
4175 	for (i = 0; i < vmx->pt_desc.num_address_ranges; i++) {
4176 		vmx_set_intercept_for_msr(vcpu, MSR_IA32_RTIT_ADDR0_A + i * 2, MSR_TYPE_RW, flag);
4177 		vmx_set_intercept_for_msr(vcpu, MSR_IA32_RTIT_ADDR0_B + i * 2, MSR_TYPE_RW, flag);
4178 	}
4179 }
4180 
vmx_recalc_pmu_msr_intercepts(struct kvm_vcpu * vcpu)4181 static void vmx_recalc_pmu_msr_intercepts(struct kvm_vcpu *vcpu)
4182 {
4183 	u64 vm_exit_controls_bits = VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
4184 				    VM_EXIT_SAVE_IA32_PERF_GLOBAL_CTRL;
4185 	bool has_mediated_pmu = kvm_vcpu_has_mediated_pmu(vcpu);
4186 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
4187 	struct vcpu_vmx *vmx = to_vmx(vcpu);
4188 	bool intercept = !has_mediated_pmu;
4189 	int i;
4190 
4191 	if (!enable_mediated_pmu)
4192 		return;
4193 
4194 	if (!cpu_has_save_perf_global_ctrl()) {
4195 		vm_exit_controls_bits &= ~VM_EXIT_SAVE_IA32_PERF_GLOBAL_CTRL;
4196 
4197 		if (has_mediated_pmu)
4198 			vmx_add_autostore_msr(vmx, MSR_CORE_PERF_GLOBAL_CTRL);
4199 		else
4200 			vmx_remove_autostore_msr(vmx, MSR_CORE_PERF_GLOBAL_CTRL);
4201 	}
4202 
4203 	vm_entry_controls_changebit(vmx, VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
4204 				    has_mediated_pmu);
4205 
4206 	vm_exit_controls_changebit(vmx, vm_exit_controls_bits, has_mediated_pmu);
4207 
4208 	for (i = 0; i < pmu->nr_arch_gp_counters; i++) {
4209 		vmx_set_intercept_for_msr(vcpu, MSR_IA32_PERFCTR0 + i,
4210 					  MSR_TYPE_RW, intercept);
4211 		vmx_set_intercept_for_msr(vcpu, MSR_IA32_PMC0 + i, MSR_TYPE_RW,
4212 					  intercept || !fw_writes_is_enabled(vcpu));
4213 	}
4214 	for ( ; i < kvm_pmu_cap.num_counters_gp; i++) {
4215 		vmx_set_intercept_for_msr(vcpu, MSR_IA32_PERFCTR0 + i,
4216 					  MSR_TYPE_RW, true);
4217 		vmx_set_intercept_for_msr(vcpu, MSR_IA32_PMC0 + i,
4218 					  MSR_TYPE_RW, true);
4219 	}
4220 
4221 	for (i = 0; i < pmu->nr_arch_fixed_counters; i++)
4222 		vmx_set_intercept_for_msr(vcpu, MSR_CORE_PERF_FIXED_CTR0 + i,
4223 					  MSR_TYPE_RW, intercept);
4224 	for ( ; i < kvm_pmu_cap.num_counters_fixed; i++)
4225 		vmx_set_intercept_for_msr(vcpu, MSR_CORE_PERF_FIXED_CTR0 + i,
4226 					  MSR_TYPE_RW, true);
4227 
4228 	intercept = kvm_need_perf_global_ctrl_intercept(vcpu);
4229 	vmx_set_intercept_for_msr(vcpu, MSR_CORE_PERF_GLOBAL_STATUS,
4230 				  MSR_TYPE_RW, intercept);
4231 	vmx_set_intercept_for_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL,
4232 				  MSR_TYPE_RW, intercept);
4233 	vmx_set_intercept_for_msr(vcpu, MSR_CORE_PERF_GLOBAL_OVF_CTRL,
4234 				  MSR_TYPE_RW, intercept);
4235 }
4236 
vmx_recalc_msr_intercepts(struct kvm_vcpu * vcpu)4237 static void vmx_recalc_msr_intercepts(struct kvm_vcpu *vcpu)
4238 {
4239 	bool intercept;
4240 
4241 	if (!cpu_has_vmx_msr_bitmap())
4242 		return;
4243 
4244 	vmx_disable_intercept_for_msr(vcpu, MSR_IA32_TSC, MSR_TYPE_R);
4245 #ifdef CONFIG_X86_64
4246 	vmx_disable_intercept_for_msr(vcpu, MSR_FS_BASE, MSR_TYPE_RW);
4247 	vmx_disable_intercept_for_msr(vcpu, MSR_GS_BASE, MSR_TYPE_RW);
4248 	vmx_disable_intercept_for_msr(vcpu, MSR_KERNEL_GS_BASE, MSR_TYPE_RW);
4249 #endif
4250 	vmx_disable_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_CS, MSR_TYPE_RW);
4251 	vmx_disable_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_ESP, MSR_TYPE_RW);
4252 	vmx_disable_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_EIP, MSR_TYPE_RW);
4253 	if (kvm_cstate_in_guest(vcpu->kvm)) {
4254 		vmx_disable_intercept_for_msr(vcpu, MSR_CORE_C1_RES, MSR_TYPE_R);
4255 		vmx_disable_intercept_for_msr(vcpu, MSR_CORE_C3_RESIDENCY, MSR_TYPE_R);
4256 		vmx_disable_intercept_for_msr(vcpu, MSR_CORE_C6_RESIDENCY, MSR_TYPE_R);
4257 		vmx_disable_intercept_for_msr(vcpu, MSR_CORE_C7_RESIDENCY, MSR_TYPE_R);
4258 	}
4259 	if (kvm_aperfmperf_in_guest(vcpu->kvm)) {
4260 		vmx_disable_intercept_for_msr(vcpu, MSR_IA32_APERF, MSR_TYPE_R);
4261 		vmx_disable_intercept_for_msr(vcpu, MSR_IA32_MPERF, MSR_TYPE_R);
4262 	}
4263 
4264 	/* PT MSRs can be passed through iff PT is exposed to the guest. */
4265 	if (vmx_pt_mode_is_host_guest())
4266 		pt_update_intercept_for_msr(vcpu);
4267 
4268 	if (vcpu->arch.xfd_no_write_intercept)
4269 		vmx_disable_intercept_for_msr(vcpu, MSR_IA32_XFD, MSR_TYPE_RW);
4270 
4271 	vmx_set_intercept_for_msr(vcpu, MSR_IA32_SPEC_CTRL, MSR_TYPE_RW,
4272 				  !to_vmx(vcpu)->spec_ctrl);
4273 
4274 	if (kvm_cpu_cap_has(X86_FEATURE_XFD))
4275 		vmx_set_intercept_for_msr(vcpu, MSR_IA32_XFD_ERR, MSR_TYPE_R,
4276 					  !guest_cpu_cap_has(vcpu, X86_FEATURE_XFD));
4277 
4278 	if (cpu_feature_enabled(X86_FEATURE_IBPB))
4279 		vmx_set_intercept_for_msr(vcpu, MSR_IA32_PRED_CMD, MSR_TYPE_W,
4280 					  !guest_has_pred_cmd_msr(vcpu));
4281 
4282 	if (cpu_feature_enabled(X86_FEATURE_FLUSH_L1D))
4283 		vmx_set_intercept_for_msr(vcpu, MSR_IA32_FLUSH_CMD, MSR_TYPE_W,
4284 					  !guest_cpu_cap_has(vcpu, X86_FEATURE_FLUSH_L1D));
4285 
4286 	if (kvm_cpu_cap_has(X86_FEATURE_SHSTK)) {
4287 		intercept = !guest_cpu_cap_has(vcpu, X86_FEATURE_SHSTK);
4288 
4289 		vmx_set_intercept_for_msr(vcpu, MSR_IA32_PL0_SSP, MSR_TYPE_RW, intercept);
4290 		vmx_set_intercept_for_msr(vcpu, MSR_IA32_PL1_SSP, MSR_TYPE_RW, intercept);
4291 		vmx_set_intercept_for_msr(vcpu, MSR_IA32_PL2_SSP, MSR_TYPE_RW, intercept);
4292 		vmx_set_intercept_for_msr(vcpu, MSR_IA32_PL3_SSP, MSR_TYPE_RW, intercept);
4293 	}
4294 
4295 	if (kvm_cpu_cap_has(X86_FEATURE_SHSTK) || kvm_cpu_cap_has(X86_FEATURE_IBT)) {
4296 		intercept = !guest_cpu_cap_has(vcpu, X86_FEATURE_IBT) &&
4297 			    !guest_cpu_cap_has(vcpu, X86_FEATURE_SHSTK);
4298 
4299 		vmx_set_intercept_for_msr(vcpu, MSR_IA32_U_CET, MSR_TYPE_RW, intercept);
4300 		vmx_set_intercept_for_msr(vcpu, MSR_IA32_S_CET, MSR_TYPE_RW, intercept);
4301 	}
4302 
4303 	vmx_recalc_pmu_msr_intercepts(vcpu);
4304 
4305 	/*
4306 	 * x2APIC and LBR MSR intercepts are modified on-demand and cannot be
4307 	 * filtered by userspace.
4308 	 */
4309 }
4310 
vmx_recalc_instruction_intercepts(struct kvm_vcpu * vcpu)4311 static void vmx_recalc_instruction_intercepts(struct kvm_vcpu *vcpu)
4312 {
4313 	exec_controls_changebit(to_vmx(vcpu), CPU_BASED_RDPMC_EXITING,
4314 				kvm_need_rdpmc_intercept(vcpu));
4315 }
4316 
vmx_recalc_intercepts(struct kvm_vcpu * vcpu)4317 void vmx_recalc_intercepts(struct kvm_vcpu *vcpu)
4318 {
4319 	vmx_recalc_instruction_intercepts(vcpu);
4320 	vmx_recalc_msr_intercepts(vcpu);
4321 }
4322 
vmx_deliver_nested_posted_interrupt(struct kvm_vcpu * vcpu,int vector)4323 static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu,
4324 						int vector)
4325 {
4326 	struct vcpu_vmx *vmx = to_vmx(vcpu);
4327 
4328 	/*
4329 	 * DO NOT query the vCPU's vmcs12, as vmcs12 is dynamically allocated
4330 	 * and freed, and must not be accessed outside of vcpu->mutex.  The
4331 	 * vCPU's cached PI NV is valid if and only if posted interrupts
4332 	 * enabled in its vmcs12, i.e. checking the vector also checks that
4333 	 * L1 has enabled posted interrupts for L2.
4334 	 */
4335 	if (is_guest_mode(vcpu) &&
4336 	    vector == vmx->nested.posted_intr_nv) {
4337 		/*
4338 		 * If a posted intr is not recognized by hardware,
4339 		 * we will accomplish it in the next vmentry.
4340 		 */
4341 		vmx->nested.pi_pending = true;
4342 		kvm_make_request(KVM_REQ_EVENT, vcpu);
4343 
4344 		/*
4345 		 * This pairs with the smp_mb_*() after setting vcpu->mode in
4346 		 * vcpu_enter_guest() to guarantee the vCPU sees the event
4347 		 * request if triggering a posted interrupt "fails" because
4348 		 * vcpu->mode != IN_GUEST_MODE.  The extra barrier is needed as
4349 		 * the smb_wmb() in kvm_make_request() only ensures everything
4350 		 * done before making the request is visible when the request
4351 		 * is visible, it doesn't ensure ordering between the store to
4352 		 * vcpu->requests and the load from vcpu->mode.
4353 		 */
4354 		smp_mb__after_atomic();
4355 
4356 		/* the PIR and ON have been set by L1. */
4357 		kvm_vcpu_trigger_posted_interrupt(vcpu, POSTED_INTR_NESTED_VECTOR);
4358 		return 0;
4359 	}
4360 	return -1;
4361 }
4362 /*
4363  * Send interrupt to vcpu via posted interrupt way.
4364  * 1. If target vcpu is running(non-root mode), send posted interrupt
4365  * notification to vcpu and hardware will sync PIR to vIRR atomically.
4366  * 2. If target vcpu isn't running(root mode), kick it to pick up the
4367  * interrupt from PIR in next vmentry.
4368  */
vmx_deliver_posted_interrupt(struct kvm_vcpu * vcpu,int vector)4369 static int vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector)
4370 {
4371 	struct vcpu_vt *vt = to_vt(vcpu);
4372 	int r;
4373 
4374 	r = vmx_deliver_nested_posted_interrupt(vcpu, vector);
4375 	if (!r)
4376 		return 0;
4377 
4378 	/* Note, this is called iff the local APIC is in-kernel. */
4379 	if (!vcpu->arch.apic->apicv_active)
4380 		return -1;
4381 
4382 	__vmx_deliver_posted_interrupt(vcpu, &vt->pi_desc, vector);
4383 	return 0;
4384 }
4385 
vmx_deliver_interrupt(struct kvm_lapic * apic,int delivery_mode,int trig_mode,int vector)4386 void vmx_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode,
4387 			   int trig_mode, int vector)
4388 {
4389 	struct kvm_vcpu *vcpu = apic->vcpu;
4390 
4391 	if (vmx_deliver_posted_interrupt(vcpu, vector)) {
4392 		kvm_lapic_set_irr(vector, apic);
4393 		kvm_make_request(KVM_REQ_EVENT, vcpu);
4394 		kvm_vcpu_kick(vcpu);
4395 	} else {
4396 		trace_kvm_apicv_accept_irq(vcpu->vcpu_id, delivery_mode,
4397 					   trig_mode, vector);
4398 	}
4399 }
4400 
4401 /*
4402  * Set up the vmcs's constant host-state fields, i.e., host-state fields that
4403  * will not change in the lifetime of the guest.
4404  * Note that host-state that does change is set elsewhere. E.g., host-state
4405  * that is set differently for each CPU is set in vmx_vcpu_load(), not here.
4406  */
vmx_set_constant_host_state(struct vcpu_vmx * vmx)4407 void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
4408 {
4409 	u32 low32, high32;
4410 	unsigned long tmpl;
4411 	unsigned long cr0, cr3, cr4;
4412 
4413 	cr0 = read_cr0();
4414 	WARN_ON(cr0 & X86_CR0_TS);
4415 	vmcs_writel(HOST_CR0, cr0);  /* 22.2.3 */
4416 
4417 	/*
4418 	 * Save the most likely value for this task's CR3 in the VMCS.
4419 	 * We can't use __get_current_cr3_fast() because we're not atomic.
4420 	 */
4421 	cr3 = __read_cr3();
4422 	vmcs_writel(HOST_CR3, cr3);		/* 22.2.3  FIXME: shadow tables */
4423 	vmx->loaded_vmcs->host_state.cr3 = cr3;
4424 
4425 	/* Save the most likely value for this task's CR4 in the VMCS. */
4426 	cr4 = cr4_read_shadow();
4427 	vmcs_writel(HOST_CR4, cr4);			/* 22.2.3, 22.2.5 */
4428 	vmx->loaded_vmcs->host_state.cr4 = cr4;
4429 
4430 	vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS);  /* 22.2.4 */
4431 #ifdef CONFIG_X86_64
4432 	/*
4433 	 * Load null selectors, so we can avoid reloading them in
4434 	 * vmx_prepare_switch_to_host(), in case userspace uses
4435 	 * the null selectors too (the expected case).
4436 	 */
4437 	vmcs_write16(HOST_DS_SELECTOR, 0);
4438 	vmcs_write16(HOST_ES_SELECTOR, 0);
4439 #else
4440 	vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS);  /* 22.2.4 */
4441 	vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS);  /* 22.2.4 */
4442 #endif
4443 	vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS);  /* 22.2.4 */
4444 	vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8);  /* 22.2.4 */
4445 
4446 	vmcs_writel(HOST_IDTR_BASE, host_idt_base);   /* 22.2.4 */
4447 
4448 	vmcs_writel(HOST_RIP, (unsigned long)vmx_vmexit); /* 22.2.5 */
4449 
4450 	rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
4451 	vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
4452 
4453 	/*
4454 	 * SYSENTER is used for 32-bit system calls on either 32-bit or
4455 	 * 64-bit kernels.  It is always zero If neither is allowed, otherwise
4456 	 * vmx_vcpu_load_vmcs loads it with the per-CPU entry stack (and may
4457 	 * have already done so!).
4458 	 */
4459 	if (!IS_ENABLED(CONFIG_IA32_EMULATION) && !IS_ENABLED(CONFIG_X86_32))
4460 		vmcs_writel(HOST_IA32_SYSENTER_ESP, 0);
4461 
4462 	rdmsrq(MSR_IA32_SYSENTER_EIP, tmpl);
4463 	vmcs_writel(HOST_IA32_SYSENTER_EIP, tmpl);   /* 22.2.3 */
4464 
4465 	if (vmcs_config.vmexit_ctrl & VM_EXIT_LOAD_IA32_PAT) {
4466 		rdmsr(MSR_IA32_CR_PAT, low32, high32);
4467 		vmcs_write64(HOST_IA32_PAT, low32 | ((u64) high32 << 32));
4468 	}
4469 
4470 	if (cpu_has_load_ia32_efer())
4471 		vmcs_write64(HOST_IA32_EFER, kvm_host.efer);
4472 
4473 	/*
4474 	 * Supervisor shadow stack is not enabled on host side, i.e.,
4475 	 * host IA32_S_CET.SHSTK_EN bit is guaranteed to 0 now, per SDM
4476 	 * description(RDSSP instruction), SSP is not readable in CPL0,
4477 	 * so resetting the two registers to 0s at VM-Exit does no harm
4478 	 * to kernel execution. When execution flow exits to userspace,
4479 	 * SSP is reloaded from IA32_PL3_SSP. Check SDM Vol.2A/B Chapter
4480 	 * 3 and 4 for details.
4481 	 */
4482 	if (enable_cet) {
4483 		vmcs_writel(HOST_S_CET, kvm_host.s_cet);
4484 		vmcs_writel(HOST_SSP, 0);
4485 		vmcs_writel(HOST_INTR_SSP_TABLE, 0);
4486 	}
4487 
4488 	/*
4489 	 * When running a guest with a mediated PMU, guest state is resident in
4490 	 * hardware after VM-Exit.  Zero PERF_GLOBAL_CTRL on exit so that host
4491 	 * activity doesn't bleed into the guest counters.  When running with
4492 	 * an emulated PMU, PERF_GLOBAL_CTRL is dynamically computed on every
4493 	 * entry/exit to merge guest and host PMU usage.
4494 	 */
4495 	if (enable_mediated_pmu)
4496 		vmcs_write64(HOST_IA32_PERF_GLOBAL_CTRL, 0);
4497 }
4498 
set_cr4_guest_host_mask(struct vcpu_vmx * vmx)4499 void set_cr4_guest_host_mask(struct vcpu_vmx *vmx)
4500 {
4501 	struct kvm_vcpu *vcpu = &vmx->vcpu;
4502 
4503 	vcpu->arch.cr4_guest_owned_bits = KVM_POSSIBLE_CR4_GUEST_BITS &
4504 					  ~vcpu->arch.cr4_guest_rsvd_bits;
4505 	if (!enable_ept) {
4506 		vcpu->arch.cr4_guest_owned_bits &= ~X86_CR4_TLBFLUSH_BITS;
4507 		vcpu->arch.cr4_guest_owned_bits &= ~X86_CR4_PDPTR_BITS;
4508 	}
4509 	if (is_guest_mode(&vmx->vcpu))
4510 		vcpu->arch.cr4_guest_owned_bits &=
4511 			~get_vmcs12(vcpu)->cr4_guest_host_mask;
4512 	vmcs_writel(CR4_GUEST_HOST_MASK, ~vcpu->arch.cr4_guest_owned_bits);
4513 }
4514 
vmx_pin_based_exec_ctrl(struct vcpu_vmx * vmx)4515 static u32 vmx_pin_based_exec_ctrl(struct vcpu_vmx *vmx)
4516 {
4517 	u32 pin_based_exec_ctrl = vmcs_config.pin_based_exec_ctrl;
4518 
4519 	if (!kvm_vcpu_apicv_active(&vmx->vcpu))
4520 		pin_based_exec_ctrl &= ~PIN_BASED_POSTED_INTR;
4521 
4522 	if (!enable_vnmi)
4523 		pin_based_exec_ctrl &= ~PIN_BASED_VIRTUAL_NMIS;
4524 
4525 	if (!enable_preemption_timer)
4526 		pin_based_exec_ctrl &= ~PIN_BASED_VMX_PREEMPTION_TIMER;
4527 
4528 	return pin_based_exec_ctrl;
4529 }
4530 
vmx_get_initial_vmentry_ctrl(void)4531 static u32 vmx_get_initial_vmentry_ctrl(void)
4532 {
4533 	u32 vmentry_ctrl = vmcs_config.vmentry_ctrl;
4534 
4535 	if (vmx_pt_mode_is_system())
4536 		vmentry_ctrl &= ~(VM_ENTRY_PT_CONCEAL_PIP |
4537 				  VM_ENTRY_LOAD_IA32_RTIT_CTL);
4538 
4539 	if (!enable_cet)
4540 		vmentry_ctrl &= ~VM_ENTRY_LOAD_CET_STATE;
4541 
4542 	/*
4543 	 * IA32e mode, and loading of EFER and PERF_GLOBAL_CTRL are toggled dynamically.
4544 	 */
4545 	vmentry_ctrl &= ~(VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL |
4546 			  VM_ENTRY_LOAD_IA32_EFER |
4547 			  VM_ENTRY_IA32E_MODE);
4548 
4549 	return vmentry_ctrl;
4550 }
4551 
vmx_get_initial_vmexit_ctrl(void)4552 static u32 vmx_get_initial_vmexit_ctrl(void)
4553 {
4554 	u32 vmexit_ctrl = vmcs_config.vmexit_ctrl;
4555 
4556 	if (!enable_cet)
4557 		vmexit_ctrl &= ~VM_EXIT_LOAD_CET_STATE;
4558 
4559 	/*
4560 	 * Not used by KVM and never set in vmcs01 or vmcs02, but emulated for
4561 	 * nested virtualization and thus allowed to be set in vmcs12.
4562 	 */
4563 	vmexit_ctrl &= ~(VM_EXIT_SAVE_IA32_PAT | VM_EXIT_SAVE_IA32_EFER |
4564 			 VM_EXIT_SAVE_VMX_PREEMPTION_TIMER);
4565 
4566 	if (vmx_pt_mode_is_system())
4567 		vmexit_ctrl &= ~(VM_EXIT_PT_CONCEAL_PIP |
4568 				 VM_EXIT_CLEAR_IA32_RTIT_CTL);
4569 	/* Loading of EFER and PERF_GLOBAL_CTRL are toggled dynamically */
4570 	return vmexit_ctrl &
4571 		~(VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | VM_EXIT_LOAD_IA32_EFER |
4572 		  VM_EXIT_SAVE_IA32_PERF_GLOBAL_CTRL);
4573 }
4574 
vmx_refresh_apicv_exec_ctrl(struct kvm_vcpu * vcpu)4575 void vmx_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
4576 {
4577 	struct vcpu_vmx *vmx = to_vmx(vcpu);
4578 
4579 	guard(vmx_vmcs01)(vcpu);
4580 
4581 	pin_controls_set(vmx, vmx_pin_based_exec_ctrl(vmx));
4582 
4583 	secondary_exec_controls_changebit(vmx,
4584 					  SECONDARY_EXEC_APIC_REGISTER_VIRT |
4585 					  SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY,
4586 					  kvm_vcpu_apicv_active(vcpu));
4587 	if (enable_ipiv)
4588 		tertiary_exec_controls_changebit(vmx, TERTIARY_EXEC_IPI_VIRT,
4589 						 kvm_vcpu_apicv_active(vcpu));
4590 
4591 	vmx_update_msr_bitmap_x2apic(vcpu);
4592 }
4593 
vmx_exec_control(struct vcpu_vmx * vmx)4594 static u32 vmx_exec_control(struct vcpu_vmx *vmx)
4595 {
4596 	u32 exec_control = vmcs_config.cpu_based_exec_ctrl;
4597 
4598 	/*
4599 	 * Not used by KVM, but fully supported for nesting, i.e. are allowed in
4600 	 * vmcs12 and propagated to vmcs02 when set in vmcs12.
4601 	 */
4602 	exec_control &= ~(CPU_BASED_RDTSC_EXITING |
4603 			  CPU_BASED_USE_IO_BITMAPS |
4604 			  CPU_BASED_MONITOR_TRAP_FLAG |
4605 			  CPU_BASED_PAUSE_EXITING);
4606 
4607 	/* INTR_WINDOW_EXITING and NMI_WINDOW_EXITING are toggled dynamically */
4608 	exec_control &= ~(CPU_BASED_INTR_WINDOW_EXITING |
4609 			  CPU_BASED_NMI_WINDOW_EXITING);
4610 
4611 	if (vmx->vcpu.arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)
4612 		exec_control &= ~CPU_BASED_MOV_DR_EXITING;
4613 
4614 	if (!cpu_need_tpr_shadow(&vmx->vcpu))
4615 		exec_control &= ~CPU_BASED_TPR_SHADOW;
4616 
4617 #ifdef CONFIG_X86_64
4618 	if (exec_control & CPU_BASED_TPR_SHADOW)
4619 		exec_control &= ~(CPU_BASED_CR8_LOAD_EXITING |
4620 				  CPU_BASED_CR8_STORE_EXITING);
4621 	else
4622 		exec_control |= CPU_BASED_CR8_STORE_EXITING |
4623 				CPU_BASED_CR8_LOAD_EXITING;
4624 #endif
4625 	/* No need to intercept CR3 access or INVPLG when using EPT. */
4626 	if (enable_ept)
4627 		exec_control &= ~(CPU_BASED_CR3_LOAD_EXITING |
4628 				  CPU_BASED_CR3_STORE_EXITING |
4629 				  CPU_BASED_INVLPG_EXITING);
4630 	if (kvm_mwait_in_guest(vmx->vcpu.kvm))
4631 		exec_control &= ~(CPU_BASED_MWAIT_EXITING |
4632 				CPU_BASED_MONITOR_EXITING);
4633 	if (kvm_hlt_in_guest(vmx->vcpu.kvm))
4634 		exec_control &= ~CPU_BASED_HLT_EXITING;
4635 	return exec_control;
4636 }
4637 
vmx_tertiary_exec_control(struct vcpu_vmx * vmx)4638 static u64 vmx_tertiary_exec_control(struct vcpu_vmx *vmx)
4639 {
4640 	u64 exec_control = vmcs_config.cpu_based_3rd_exec_ctrl;
4641 
4642 	/*
4643 	 * IPI virtualization relies on APICv. Disable IPI virtualization if
4644 	 * APICv is inhibited.
4645 	 */
4646 	if (!enable_ipiv || !kvm_vcpu_apicv_active(&vmx->vcpu))
4647 		exec_control &= ~TERTIARY_EXEC_IPI_VIRT;
4648 
4649 	return exec_control;
4650 }
4651 
4652 /*
4653  * Adjust a single secondary execution control bit to intercept/allow an
4654  * instruction in the guest.  This is usually done based on whether or not a
4655  * feature has been exposed to the guest in order to correctly emulate faults.
4656  */
4657 static inline void
vmx_adjust_secondary_exec_control(struct vcpu_vmx * vmx,u32 * exec_control,u32 control,bool enabled,bool exiting)4658 vmx_adjust_secondary_exec_control(struct vcpu_vmx *vmx, u32 *exec_control,
4659 				  u32 control, bool enabled, bool exiting)
4660 {
4661 	/*
4662 	 * If the control is for an opt-in feature, clear the control if the
4663 	 * feature is not exposed to the guest, i.e. not enabled.  If the
4664 	 * control is opt-out, i.e. an exiting control, clear the control if
4665 	 * the feature _is_ exposed to the guest, i.e. exiting/interception is
4666 	 * disabled for the associated instruction.  Note, the caller is
4667 	 * responsible presetting exec_control to set all supported bits.
4668 	 */
4669 	if (enabled == exiting)
4670 		*exec_control &= ~control;
4671 
4672 	/*
4673 	 * Update the nested MSR settings so that a nested VMM can/can't set
4674 	 * controls for features that are/aren't exposed to the guest.
4675 	 */
4676 	if (nested &&
4677 	    kvm_check_has_quirk(vmx->vcpu.kvm, KVM_X86_QUIRK_STUFF_FEATURE_MSRS)) {
4678 		/*
4679 		 * All features that can be added or removed to VMX MSRs must
4680 		 * be supported in the first place for nested virtualization.
4681 		 */
4682 		if (WARN_ON_ONCE(!(vmcs_config.nested.secondary_ctls_high & control)))
4683 			enabled = false;
4684 
4685 		if (enabled)
4686 			vmx->nested.msrs.secondary_ctls_high |= control;
4687 		else
4688 			vmx->nested.msrs.secondary_ctls_high &= ~control;
4689 	}
4690 }
4691 
4692 /*
4693  * Wrapper macro for the common case of adjusting a secondary execution control
4694  * based on a single guest CPUID bit, with a dedicated feature bit.  This also
4695  * verifies that the control is actually supported by KVM and hardware.
4696  */
4697 #define vmx_adjust_sec_exec_control(vmx, exec_control, name, feat_name, ctrl_name, exiting)	\
4698 ({												\
4699 	struct kvm_vcpu *__vcpu = &(vmx)->vcpu;							\
4700 	bool __enabled;										\
4701 												\
4702 	if (cpu_has_vmx_##name()) {								\
4703 		__enabled = guest_cpu_cap_has(__vcpu, X86_FEATURE_##feat_name);			\
4704 		vmx_adjust_secondary_exec_control(vmx, exec_control, SECONDARY_EXEC_##ctrl_name,\
4705 						  __enabled, exiting);				\
4706 	}											\
4707 })
4708 
4709 /* More macro magic for ENABLE_/opt-in versus _EXITING/opt-out controls. */
4710 #define vmx_adjust_sec_exec_feature(vmx, exec_control, lname, uname) \
4711 	vmx_adjust_sec_exec_control(vmx, exec_control, lname, uname, ENABLE_##uname, false)
4712 
4713 #define vmx_adjust_sec_exec_exiting(vmx, exec_control, lname, uname) \
4714 	vmx_adjust_sec_exec_control(vmx, exec_control, lname, uname, uname##_EXITING, true)
4715 
vmx_secondary_exec_control(struct vcpu_vmx * vmx)4716 static u32 vmx_secondary_exec_control(struct vcpu_vmx *vmx)
4717 {
4718 	struct kvm_vcpu *vcpu = &vmx->vcpu;
4719 
4720 	u32 exec_control = vmcs_config.cpu_based_2nd_exec_ctrl;
4721 
4722 	if (vmx_pt_mode_is_system())
4723 		exec_control &= ~(SECONDARY_EXEC_PT_USE_GPA | SECONDARY_EXEC_PT_CONCEAL_VMX);
4724 	if (!cpu_need_virtualize_apic_accesses(vcpu))
4725 		exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
4726 	if (vmx->vpid == 0)
4727 		exec_control &= ~SECONDARY_EXEC_ENABLE_VPID;
4728 	if (!enable_ept) {
4729 		exec_control &= ~SECONDARY_EXEC_ENABLE_EPT;
4730 		exec_control &= ~SECONDARY_EXEC_EPT_VIOLATION_VE;
4731 		enable_unrestricted_guest = 0;
4732 	}
4733 	if (!enable_unrestricted_guest)
4734 		exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST;
4735 	if (kvm_pause_in_guest(vmx->vcpu.kvm))
4736 		exec_control &= ~SECONDARY_EXEC_PAUSE_LOOP_EXITING;
4737 	if (!kvm_vcpu_apicv_active(vcpu))
4738 		exec_control &= ~(SECONDARY_EXEC_APIC_REGISTER_VIRT |
4739 				  SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
4740 	exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
4741 
4742 	/*
4743 	 * KVM doesn't support VMFUNC for L1, but the control is set in KVM's
4744 	 * base configuration as KVM emulates VMFUNC[EPTP_SWITCHING] for L2.
4745 	 */
4746 	exec_control &= ~SECONDARY_EXEC_ENABLE_VMFUNC;
4747 
4748 	/* SECONDARY_EXEC_DESC is enabled/disabled on writes to CR4.UMIP,
4749 	 * in vmx_set_cr4.  */
4750 	exec_control &= ~SECONDARY_EXEC_DESC;
4751 
4752 	/* SECONDARY_EXEC_SHADOW_VMCS is enabled when L1 executes VMPTRLD
4753 	   (handle_vmptrld).
4754 	   We can NOT enable shadow_vmcs here because we don't have yet
4755 	   a current VMCS12
4756 	*/
4757 	exec_control &= ~SECONDARY_EXEC_SHADOW_VMCS;
4758 
4759 	/*
4760 	 * PML is enabled/disabled when dirty logging of memsmlots changes, but
4761 	 * it needs to be set here when dirty logging is already active, e.g.
4762 	 * if this vCPU was created after dirty logging was enabled.
4763 	 */
4764 	if (!enable_pml || !atomic_read(&vcpu->kvm->nr_memslots_dirty_logging))
4765 		exec_control &= ~SECONDARY_EXEC_ENABLE_PML;
4766 
4767 	vmx_adjust_sec_exec_feature(vmx, &exec_control, xsaves, XSAVES);
4768 
4769 	/*
4770 	 * RDPID is also gated by ENABLE_RDTSCP, turn on the control if either
4771 	 * feature is exposed to the guest.  This creates a virtualization hole
4772 	 * if both are supported in hardware but only one is exposed to the
4773 	 * guest, but letting the guest execute RDTSCP or RDPID when either one
4774 	 * is advertised is preferable to emulating the advertised instruction
4775 	 * in KVM on #UD, and obviously better than incorrectly injecting #UD.
4776 	 */
4777 	if (cpu_has_vmx_rdtscp()) {
4778 		bool rdpid_or_rdtscp_enabled =
4779 			guest_cpu_cap_has(vcpu, X86_FEATURE_RDTSCP) ||
4780 			guest_cpu_cap_has(vcpu, X86_FEATURE_RDPID);
4781 
4782 		vmx_adjust_secondary_exec_control(vmx, &exec_control,
4783 						  SECONDARY_EXEC_ENABLE_RDTSCP,
4784 						  rdpid_or_rdtscp_enabled, false);
4785 	}
4786 
4787 	vmx_adjust_sec_exec_feature(vmx, &exec_control, invpcid, INVPCID);
4788 
4789 	vmx_adjust_sec_exec_exiting(vmx, &exec_control, rdrand, RDRAND);
4790 	vmx_adjust_sec_exec_exiting(vmx, &exec_control, rdseed, RDSEED);
4791 
4792 	vmx_adjust_sec_exec_control(vmx, &exec_control, waitpkg, WAITPKG,
4793 				    ENABLE_USR_WAIT_PAUSE, false);
4794 
4795 	if (!vcpu->kvm->arch.bus_lock_detection_enabled)
4796 		exec_control &= ~SECONDARY_EXEC_BUS_LOCK_DETECTION;
4797 
4798 	if (!kvm_notify_vmexit_enabled(vcpu->kvm))
4799 		exec_control &= ~SECONDARY_EXEC_NOTIFY_VM_EXITING;
4800 
4801 	return exec_control;
4802 }
4803 
vmx_get_pid_table_order(struct kvm * kvm)4804 static inline int vmx_get_pid_table_order(struct kvm *kvm)
4805 {
4806 	return get_order(kvm->arch.max_vcpu_ids * sizeof(*to_kvm_vmx(kvm)->pid_table));
4807 }
4808 
vmx_alloc_ipiv_pid_table(struct kvm * kvm)4809 static int vmx_alloc_ipiv_pid_table(struct kvm *kvm)
4810 {
4811 	struct page *pages;
4812 	struct kvm_vmx *kvm_vmx = to_kvm_vmx(kvm);
4813 
4814 	if (!irqchip_in_kernel(kvm) || !enable_ipiv)
4815 		return 0;
4816 
4817 	if (kvm_vmx->pid_table)
4818 		return 0;
4819 
4820 	pages = alloc_pages(GFP_KERNEL_ACCOUNT | __GFP_ZERO,
4821 			    vmx_get_pid_table_order(kvm));
4822 	if (!pages)
4823 		return -ENOMEM;
4824 
4825 	kvm_vmx->pid_table = (void *)page_address(pages);
4826 	return 0;
4827 }
4828 
vmx_vcpu_precreate(struct kvm * kvm)4829 int vmx_vcpu_precreate(struct kvm *kvm)
4830 {
4831 	return vmx_alloc_ipiv_pid_table(kvm);
4832 }
4833 
4834 #define VMX_XSS_EXIT_BITMAP 0
4835 
init_vmcs(struct vcpu_vmx * vmx)4836 static void init_vmcs(struct vcpu_vmx *vmx)
4837 {
4838 	struct kvm *kvm = vmx->vcpu.kvm;
4839 	struct kvm_vmx *kvm_vmx = to_kvm_vmx(kvm);
4840 
4841 	if (nested)
4842 		nested_vmx_set_vmcs_shadowing_bitmap();
4843 
4844 	if (cpu_has_vmx_msr_bitmap())
4845 		vmcs_write64(MSR_BITMAP, __pa(vmx->vmcs01.msr_bitmap));
4846 
4847 	vmcs_write64(VMCS_LINK_POINTER, INVALID_GPA); /* 22.3.1.5 */
4848 
4849 	/* Control */
4850 	pin_controls_set(vmx, vmx_pin_based_exec_ctrl(vmx));
4851 
4852 	exec_controls_set(vmx, vmx_exec_control(vmx));
4853 
4854 	if (cpu_has_secondary_exec_ctrls()) {
4855 		secondary_exec_controls_set(vmx, vmx_secondary_exec_control(vmx));
4856 		if (vmx->ve_info)
4857 			vmcs_write64(VE_INFORMATION_ADDRESS,
4858 				     __pa(vmx->ve_info));
4859 	}
4860 
4861 	if (cpu_has_tertiary_exec_ctrls())
4862 		tertiary_exec_controls_set(vmx, vmx_tertiary_exec_control(vmx));
4863 
4864 	if (enable_apicv && lapic_in_kernel(&vmx->vcpu)) {
4865 		vmcs_write64(EOI_EXIT_BITMAP0, 0);
4866 		vmcs_write64(EOI_EXIT_BITMAP1, 0);
4867 		vmcs_write64(EOI_EXIT_BITMAP2, 0);
4868 		vmcs_write64(EOI_EXIT_BITMAP3, 0);
4869 
4870 		vmcs_write16(GUEST_INTR_STATUS, 0);
4871 
4872 		vmcs_write16(POSTED_INTR_NV, POSTED_INTR_VECTOR);
4873 		vmcs_write64(POSTED_INTR_DESC_ADDR, __pa((&vmx->vt.pi_desc)));
4874 	}
4875 
4876 	if (vmx_can_use_ipiv(&vmx->vcpu)) {
4877 		vmcs_write64(PID_POINTER_TABLE, __pa(kvm_vmx->pid_table));
4878 		vmcs_write16(LAST_PID_POINTER_INDEX, kvm->arch.max_vcpu_ids - 1);
4879 	}
4880 
4881 	if (!kvm_pause_in_guest(kvm)) {
4882 		vmcs_write32(PLE_GAP, ple_gap);
4883 		vmx->ple_window = ple_window;
4884 		vmx->ple_window_dirty = true;
4885 	}
4886 
4887 	if (kvm_notify_vmexit_enabled(kvm))
4888 		vmcs_write32(NOTIFY_WINDOW, kvm->arch.notify_window);
4889 
4890 	vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, 0);
4891 	vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, 0);
4892 	vmcs_write32(CR3_TARGET_COUNT, 0);           /* 22.2.1 */
4893 
4894 	vmcs_write16(HOST_FS_SELECTOR, 0);            /* 22.2.4 */
4895 	vmcs_write16(HOST_GS_SELECTOR, 0);            /* 22.2.4 */
4896 	vmx_set_constant_host_state(vmx);
4897 	vmcs_writel(HOST_FS_BASE, 0); /* 22.2.4 */
4898 	vmcs_writel(HOST_GS_BASE, 0); /* 22.2.4 */
4899 
4900 	if (cpu_has_vmx_vmfunc())
4901 		vmcs_write64(VM_FUNCTION_CONTROL, 0);
4902 
4903 	vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
4904 	vmcs_write64(VM_EXIT_MSR_STORE_ADDR, __pa(vmx->msr_autostore.val));
4905 	vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
4906 	vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val));
4907 	vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
4908 	vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val));
4909 
4910 	if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT)
4911 		vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat);
4912 
4913 	vm_exit_controls_set(vmx, vmx_get_initial_vmexit_ctrl());
4914 
4915 	/* 22.2.1, 20.8.1 */
4916 	vm_entry_controls_set(vmx, vmx_get_initial_vmentry_ctrl());
4917 
4918 	vmx->vcpu.arch.cr0_guest_owned_bits = vmx_l1_guest_owned_cr0_bits();
4919 	vmcs_writel(CR0_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr0_guest_owned_bits);
4920 
4921 	set_cr4_guest_host_mask(vmx);
4922 
4923 	if (vmx->vpid != 0)
4924 		vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
4925 
4926 	if (cpu_has_vmx_xsaves())
4927 		vmcs_write64(XSS_EXIT_BITMAP, VMX_XSS_EXIT_BITMAP);
4928 
4929 	if (enable_pml) {
4930 		vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg));
4931 		vmcs_write16(GUEST_PML_INDEX, PML_HEAD_INDEX);
4932 	}
4933 
4934 	vmx_write_encls_bitmap(&vmx->vcpu, NULL);
4935 
4936 	if (vmx_pt_mode_is_host_guest()) {
4937 		memset(&vmx->pt_desc, 0, sizeof(vmx->pt_desc));
4938 		/* Bit[6~0] are forced to 1, writes are ignored. */
4939 		vmx->pt_desc.guest.output_mask = 0x7F;
4940 		vmcs_write64(GUEST_IA32_RTIT_CTL, 0);
4941 	}
4942 
4943 	vmcs_write32(GUEST_SYSENTER_CS, 0);
4944 	vmcs_writel(GUEST_SYSENTER_ESP, 0);
4945 	vmcs_writel(GUEST_SYSENTER_EIP, 0);
4946 
4947 	vmx_guest_debugctl_write(&vmx->vcpu, 0);
4948 
4949 	if (cpu_has_vmx_tpr_shadow()) {
4950 		vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, 0);
4951 		if (cpu_need_tpr_shadow(&vmx->vcpu))
4952 			vmcs_write64(VIRTUAL_APIC_PAGE_ADDR,
4953 				     __pa(vmx->vcpu.arch.apic->regs));
4954 		vmcs_write32(TPR_THRESHOLD, 0);
4955 	}
4956 
4957 	vmx_setup_uret_msrs(vmx);
4958 }
4959 
__vmx_vcpu_reset(struct kvm_vcpu * vcpu)4960 static void __vmx_vcpu_reset(struct kvm_vcpu *vcpu)
4961 {
4962 	struct vcpu_vmx *vmx = to_vmx(vcpu);
4963 
4964 	init_vmcs(vmx);
4965 
4966 	if (nested &&
4967 	    kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_STUFF_FEATURE_MSRS))
4968 		memcpy(&vmx->nested.msrs, &vmcs_config.nested, sizeof(vmx->nested.msrs));
4969 
4970 	vcpu_setup_sgx_lepubkeyhash(vcpu);
4971 
4972 	vmx->nested.posted_intr_nv = -1;
4973 	vmx->nested.vmxon_ptr = INVALID_GPA;
4974 	vmx->nested.current_vmptr = INVALID_GPA;
4975 
4976 #ifdef CONFIG_KVM_HYPERV
4977 	vmx->nested.hv_evmcs_vmptr = EVMPTR_INVALID;
4978 #endif
4979 
4980 	if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_STUFF_FEATURE_MSRS))
4981 		vcpu->arch.microcode_version = 0x100000000ULL;
4982 	vmx->msr_ia32_feature_control_valid_bits = FEAT_CTL_LOCKED;
4983 
4984 	/*
4985 	 * Enforce invariant: pi_desc.nv is always either POSTED_INTR_VECTOR
4986 	 * or POSTED_INTR_WAKEUP_VECTOR.
4987 	 */
4988 	vmx->vt.pi_desc.nv = POSTED_INTR_VECTOR;
4989 	__pi_set_sn(&vmx->vt.pi_desc);
4990 }
4991 
vmx_vcpu_reset(struct kvm_vcpu * vcpu,bool init_event)4992 void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
4993 {
4994 	struct vcpu_vmx *vmx = to_vmx(vcpu);
4995 
4996 	if (!init_event)
4997 		__vmx_vcpu_reset(vcpu);
4998 
4999 	vmx->rmode.vm86_active = 0;
5000 	vmx->spec_ctrl = 0;
5001 
5002 	vmx->msr_ia32_umwait_control = 0;
5003 
5004 	vmx->hv_deadline_tsc = -1;
5005 	kvm_set_cr8(vcpu, 0);
5006 
5007 	seg_setup(VCPU_SREG_CS);
5008 	vmcs_write16(GUEST_CS_SELECTOR, 0xf000);
5009 	vmcs_writel(GUEST_CS_BASE, 0xffff0000ul);
5010 
5011 	seg_setup(VCPU_SREG_DS);
5012 	seg_setup(VCPU_SREG_ES);
5013 	seg_setup(VCPU_SREG_FS);
5014 	seg_setup(VCPU_SREG_GS);
5015 	seg_setup(VCPU_SREG_SS);
5016 
5017 	vmcs_write16(GUEST_TR_SELECTOR, 0);
5018 	vmcs_writel(GUEST_TR_BASE, 0);
5019 	vmcs_write32(GUEST_TR_LIMIT, 0xffff);
5020 	vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
5021 
5022 	vmcs_write16(GUEST_LDTR_SELECTOR, 0);
5023 	vmcs_writel(GUEST_LDTR_BASE, 0);
5024 	vmcs_write32(GUEST_LDTR_LIMIT, 0xffff);
5025 	vmcs_write32(GUEST_LDTR_AR_BYTES, 0x00082);
5026 
5027 	vmcs_writel(GUEST_GDTR_BASE, 0);
5028 	vmcs_write32(GUEST_GDTR_LIMIT, 0xffff);
5029 
5030 	vmcs_writel(GUEST_IDTR_BASE, 0);
5031 	vmcs_write32(GUEST_IDTR_LIMIT, 0xffff);
5032 
5033 	vmx_segment_cache_clear(vmx);
5034 	kvm_register_mark_available(vcpu, VCPU_EXREG_SEGMENTS);
5035 
5036 	vmcs_write32(GUEST_ACTIVITY_STATE, GUEST_ACTIVITY_ACTIVE);
5037 	vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 0);
5038 	vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS, 0);
5039 	if (kvm_mpx_supported())
5040 		vmcs_write64(GUEST_BNDCFGS, 0);
5041 
5042 	vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);  /* 22.2.1 */
5043 
5044 	if (kvm_cpu_cap_has(X86_FEATURE_SHSTK)) {
5045 		vmcs_writel(GUEST_SSP, 0);
5046 		vmcs_writel(GUEST_INTR_SSP_TABLE, 0);
5047 	}
5048 	if (kvm_cpu_cap_has(X86_FEATURE_IBT) ||
5049 	    kvm_cpu_cap_has(X86_FEATURE_SHSTK))
5050 		vmcs_writel(GUEST_S_CET, 0);
5051 
5052 	kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
5053 
5054 	vpid_sync_context(vmx->vpid);
5055 
5056 	vmx_update_fb_clear_dis(vcpu, vmx);
5057 }
5058 
vmx_enable_irq_window(struct kvm_vcpu * vcpu)5059 void vmx_enable_irq_window(struct kvm_vcpu *vcpu)
5060 {
5061 	exec_controls_setbit(to_vmx(vcpu), CPU_BASED_INTR_WINDOW_EXITING);
5062 }
5063 
vmx_enable_nmi_window(struct kvm_vcpu * vcpu)5064 void vmx_enable_nmi_window(struct kvm_vcpu *vcpu)
5065 {
5066 	if (!enable_vnmi ||
5067 	    vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_STI) {
5068 		vmx_enable_irq_window(vcpu);
5069 		return;
5070 	}
5071 
5072 	exec_controls_setbit(to_vmx(vcpu), CPU_BASED_NMI_WINDOW_EXITING);
5073 }
5074 
vmx_inject_irq(struct kvm_vcpu * vcpu,bool reinjected)5075 void vmx_inject_irq(struct kvm_vcpu *vcpu, bool reinjected)
5076 {
5077 	struct vcpu_vmx *vmx = to_vmx(vcpu);
5078 	uint32_t intr;
5079 	int irq = vcpu->arch.interrupt.nr;
5080 
5081 	trace_kvm_inj_virq(irq, vcpu->arch.interrupt.soft, reinjected);
5082 
5083 	++vcpu->stat.irq_injections;
5084 	if (vmx->rmode.vm86_active) {
5085 		int inc_eip = 0;
5086 		if (vcpu->arch.interrupt.soft)
5087 			inc_eip = vcpu->arch.event_exit_inst_len;
5088 		kvm_inject_realmode_interrupt(vcpu, irq, inc_eip);
5089 		return;
5090 	}
5091 	intr = irq | INTR_INFO_VALID_MASK;
5092 	if (vcpu->arch.interrupt.soft) {
5093 		intr |= INTR_TYPE_SOFT_INTR;
5094 		vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
5095 			     vmx->vcpu.arch.event_exit_inst_len);
5096 	} else
5097 		intr |= INTR_TYPE_EXT_INTR;
5098 	vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr);
5099 
5100 	vmx_clear_hlt(vcpu);
5101 }
5102 
vmx_inject_nmi(struct kvm_vcpu * vcpu)5103 void vmx_inject_nmi(struct kvm_vcpu *vcpu)
5104 {
5105 	struct vcpu_vmx *vmx = to_vmx(vcpu);
5106 
5107 	if (!enable_vnmi) {
5108 		/*
5109 		 * Tracking the NMI-blocked state in software is built upon
5110 		 * finding the next open IRQ window. This, in turn, depends on
5111 		 * well-behaving guests: They have to keep IRQs disabled at
5112 		 * least as long as the NMI handler runs. Otherwise we may
5113 		 * cause NMI nesting, maybe breaking the guest. But as this is
5114 		 * highly unlikely, we can live with the residual risk.
5115 		 */
5116 		vmx->loaded_vmcs->soft_vnmi_blocked = 1;
5117 		vmx->loaded_vmcs->vnmi_blocked_time = 0;
5118 	}
5119 
5120 	++vcpu->stat.nmi_injections;
5121 	vmx->loaded_vmcs->nmi_known_unmasked = false;
5122 
5123 	if (vmx->rmode.vm86_active) {
5124 		kvm_inject_realmode_interrupt(vcpu, NMI_VECTOR, 0);
5125 		return;
5126 	}
5127 
5128 	vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
5129 			INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR);
5130 
5131 	vmx_clear_hlt(vcpu);
5132 }
5133 
vmx_get_nmi_mask(struct kvm_vcpu * vcpu)5134 bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu)
5135 {
5136 	struct vcpu_vmx *vmx = to_vmx(vcpu);
5137 	bool masked;
5138 
5139 	if (!enable_vnmi)
5140 		return vmx->loaded_vmcs->soft_vnmi_blocked;
5141 	if (vmx->loaded_vmcs->nmi_known_unmasked)
5142 		return false;
5143 	masked = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_NMI;
5144 	vmx->loaded_vmcs->nmi_known_unmasked = !masked;
5145 	return masked;
5146 }
5147 
vmx_set_nmi_mask(struct kvm_vcpu * vcpu,bool masked)5148 void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
5149 {
5150 	struct vcpu_vmx *vmx = to_vmx(vcpu);
5151 
5152 	if (!enable_vnmi) {
5153 		if (vmx->loaded_vmcs->soft_vnmi_blocked != masked) {
5154 			vmx->loaded_vmcs->soft_vnmi_blocked = masked;
5155 			vmx->loaded_vmcs->vnmi_blocked_time = 0;
5156 		}
5157 	} else {
5158 		vmx->loaded_vmcs->nmi_known_unmasked = !masked;
5159 		if (masked)
5160 			vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
5161 				      GUEST_INTR_STATE_NMI);
5162 		else
5163 			vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO,
5164 					GUEST_INTR_STATE_NMI);
5165 	}
5166 }
5167 
vmx_nmi_blocked(struct kvm_vcpu * vcpu)5168 bool vmx_nmi_blocked(struct kvm_vcpu *vcpu)
5169 {
5170 	if (is_guest_mode(vcpu) && nested_exit_on_nmi(vcpu))
5171 		return false;
5172 
5173 	if (!enable_vnmi && to_vmx(vcpu)->loaded_vmcs->soft_vnmi_blocked)
5174 		return true;
5175 
5176 	return (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
5177 		(GUEST_INTR_STATE_MOV_SS | GUEST_INTR_STATE_STI |
5178 		 GUEST_INTR_STATE_NMI));
5179 }
5180 
vmx_nmi_allowed(struct kvm_vcpu * vcpu,bool for_injection)5181 int vmx_nmi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
5182 {
5183 	if (vcpu->arch.nested_run_pending)
5184 		return -EBUSY;
5185 
5186 	/* An NMI must not be injected into L2 if it's supposed to VM-Exit.  */
5187 	if (for_injection && is_guest_mode(vcpu) && nested_exit_on_nmi(vcpu))
5188 		return -EBUSY;
5189 
5190 	return !vmx_nmi_blocked(vcpu);
5191 }
5192 
__vmx_interrupt_blocked(struct kvm_vcpu * vcpu)5193 bool __vmx_interrupt_blocked(struct kvm_vcpu *vcpu)
5194 {
5195 	return !(vmx_get_rflags(vcpu) & X86_EFLAGS_IF) ||
5196 	       (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
5197 		(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS));
5198 }
5199 
vmx_interrupt_blocked(struct kvm_vcpu * vcpu)5200 bool vmx_interrupt_blocked(struct kvm_vcpu *vcpu)
5201 {
5202 	if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu))
5203 		return false;
5204 
5205 	return __vmx_interrupt_blocked(vcpu);
5206 }
5207 
vmx_interrupt_allowed(struct kvm_vcpu * vcpu,bool for_injection)5208 int vmx_interrupt_allowed(struct kvm_vcpu *vcpu, bool for_injection)
5209 {
5210 	if (vcpu->arch.nested_run_pending)
5211 		return -EBUSY;
5212 
5213 	/*
5214 	 * An IRQ must not be injected into L2 if it's supposed to VM-Exit,
5215 	 * e.g. if the IRQ arrived asynchronously after checking nested events.
5216 	 */
5217 	if (for_injection && is_guest_mode(vcpu) && nested_exit_on_intr(vcpu))
5218 		return -EBUSY;
5219 
5220 	return !vmx_interrupt_blocked(vcpu);
5221 }
5222 
vmx_set_tss_addr(struct kvm * kvm,unsigned int addr)5223 int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr)
5224 {
5225 	void __user *ret;
5226 
5227 	if (enable_unrestricted_guest)
5228 		return 0;
5229 
5230 	mutex_lock(&kvm->slots_lock);
5231 	ret = __x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, addr,
5232 				      PAGE_SIZE * 3);
5233 	mutex_unlock(&kvm->slots_lock);
5234 
5235 	if (IS_ERR(ret))
5236 		return PTR_ERR(ret);
5237 
5238 	to_kvm_vmx(kvm)->tss_addr = addr;
5239 
5240 	return init_rmode_tss(kvm, ret);
5241 }
5242 
vmx_set_identity_map_addr(struct kvm * kvm,u64 ident_addr)5243 int vmx_set_identity_map_addr(struct kvm *kvm, u64 ident_addr)
5244 {
5245 	to_kvm_vmx(kvm)->ept_identity_map_addr = ident_addr;
5246 	return 0;
5247 }
5248 
rmode_exception(struct kvm_vcpu * vcpu,int vec)5249 static bool rmode_exception(struct kvm_vcpu *vcpu, int vec)
5250 {
5251 	switch (vec) {
5252 	case BP_VECTOR:
5253 		/*
5254 		 * Update instruction length as we may reinject the exception
5255 		 * from user space while in guest debugging mode.
5256 		 */
5257 		to_vmx(vcpu)->vcpu.arch.event_exit_inst_len =
5258 			vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
5259 		if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
5260 			return false;
5261 		fallthrough;
5262 	case DB_VECTOR:
5263 		return !(vcpu->guest_debug &
5264 			(KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP));
5265 	case DE_VECTOR:
5266 	case OF_VECTOR:
5267 	case BR_VECTOR:
5268 	case UD_VECTOR:
5269 	case DF_VECTOR:
5270 	case SS_VECTOR:
5271 	case GP_VECTOR:
5272 	case MF_VECTOR:
5273 		return true;
5274 	}
5275 	return false;
5276 }
5277 
handle_rmode_exception(struct kvm_vcpu * vcpu,int vec,u32 err_code)5278 static int handle_rmode_exception(struct kvm_vcpu *vcpu,
5279 				  int vec, u32 err_code)
5280 {
5281 	/*
5282 	 * Instruction with address size override prefix opcode 0x67
5283 	 * Cause the #SS fault with 0 error code in VM86 mode.
5284 	 */
5285 	if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0) {
5286 		if (kvm_emulate_instruction(vcpu, 0)) {
5287 			if (vcpu->arch.halt_request) {
5288 				vcpu->arch.halt_request = 0;
5289 				return kvm_emulate_halt_noskip(vcpu);
5290 			}
5291 			return 1;
5292 		}
5293 		return 0;
5294 	}
5295 
5296 	/*
5297 	 * Forward all other exceptions that are valid in real mode.
5298 	 * FIXME: Breaks guest debugging in real mode, needs to be fixed with
5299 	 *        the required debugging infrastructure rework.
5300 	 */
5301 	kvm_queue_exception(vcpu, vec);
5302 	return 1;
5303 }
5304 
handle_machine_check(struct kvm_vcpu * vcpu)5305 static int handle_machine_check(struct kvm_vcpu *vcpu)
5306 {
5307 	/* handled by vmx_vcpu_run() */
5308 	return 1;
5309 }
5310 
5311 /*
5312  * If the host has split lock detection disabled, then #AC is
5313  * unconditionally injected into the guest, which is the pre split lock
5314  * detection behaviour.
5315  *
5316  * If the host has split lock detection enabled then #AC is
5317  * only injected into the guest when:
5318  *  - Guest CPL == 3 (user mode)
5319  *  - Guest has #AC detection enabled in CR0
5320  *  - Guest EFLAGS has AC bit set
5321  */
vmx_guest_inject_ac(struct kvm_vcpu * vcpu)5322 bool vmx_guest_inject_ac(struct kvm_vcpu *vcpu)
5323 {
5324 	if (!boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT))
5325 		return true;
5326 
5327 	return vmx_get_cpl(vcpu) == 3 && kvm_is_cr0_bit_set(vcpu, X86_CR0_AM) &&
5328 	       (kvm_get_rflags(vcpu) & X86_EFLAGS_AC);
5329 }
5330 
is_xfd_nm_fault(struct kvm_vcpu * vcpu)5331 static bool is_xfd_nm_fault(struct kvm_vcpu *vcpu)
5332 {
5333 	return vcpu->arch.guest_fpu.fpstate->xfd &&
5334 	       !kvm_is_cr0_bit_set(vcpu, X86_CR0_TS);
5335 }
5336 
vmx_handle_page_fault(struct kvm_vcpu * vcpu,u32 error_code)5337 static int vmx_handle_page_fault(struct kvm_vcpu *vcpu, u32 error_code)
5338 {
5339 	unsigned long cr2 = vmx_get_exit_qual(vcpu);
5340 
5341 	if (vcpu->arch.apf.host_apf_flags)
5342 		goto handle_pf;
5343 
5344 	/* When using EPT, KVM intercepts #PF only to detect illegal GPAs. */
5345 	WARN_ON_ONCE(enable_ept && !allow_smaller_maxphyaddr);
5346 
5347 	/*
5348 	 * On SGX2 hardware, EPCM violations are delivered as #PF with the SGX
5349 	 * flag set in the error code (SGX1 hardware generates #GP(0)).  EPCM
5350 	 * violations have nothing to do with shadow paging and can never be
5351 	 * resolved by KVM; always reflect them into the guest.
5352 	 */
5353 	if (error_code & PFERR_SGX_MASK) {
5354 		WARN_ON_ONCE(!IS_ENABLED(CONFIG_X86_SGX_KVM) ||
5355 			     !cpu_feature_enabled(X86_FEATURE_SGX2));
5356 
5357 		if (guest_cpu_cap_has(vcpu, X86_FEATURE_SGX2))
5358 			kvm_fixup_and_inject_pf_error(vcpu, cr2, error_code);
5359 		else
5360 			kvm_inject_gp(vcpu, 0);
5361 		return 1;
5362 	}
5363 
5364 	/*
5365 	 * If EPT is enabled, fixup and inject the #PF.  KVM intercepts #PFs
5366 	 * only to set PFERR_RSVD as appropriate (hardware won't set RSVD due
5367 	 * to the GPA being legal with respect to host.MAXPHYADDR).
5368 	 */
5369 	if (enable_ept) {
5370 		kvm_fixup_and_inject_pf_error(vcpu, cr2, error_code);
5371 		return 1;
5372 	}
5373 
5374 handle_pf:
5375 	return kvm_handle_page_fault(vcpu, error_code, cr2, NULL, 0);
5376 }
5377 
handle_exception_nmi(struct kvm_vcpu * vcpu)5378 static int handle_exception_nmi(struct kvm_vcpu *vcpu)
5379 {
5380 	struct vcpu_vmx *vmx = to_vmx(vcpu);
5381 	struct kvm_run *kvm_run = vcpu->run;
5382 	u32 intr_info, ex_no, error_code;
5383 	unsigned long dr6;
5384 	u32 vect_info;
5385 
5386 	vect_info = vmx->idt_vectoring_info;
5387 	intr_info = vmx_get_intr_info(vcpu);
5388 
5389 	/*
5390 	 * Machine checks are handled by handle_exception_irqoff(), or by
5391 	 * vmx_vcpu_run() if a #MC occurs on VM-Entry.  NMIs are handled by
5392 	 * vmx_vcpu_enter_exit().
5393 	 */
5394 	if (is_machine_check(intr_info) || is_nmi(intr_info))
5395 		return 1;
5396 
5397 	/*
5398 	 * Queue the exception here instead of in handle_nm_fault_irqoff().
5399 	 * This ensures the nested_vmx check is not skipped so vmexit can
5400 	 * be reflected to L1 (when it intercepts #NM) before reaching this
5401 	 * point.
5402 	 */
5403 	if (is_nm_fault(intr_info)) {
5404 		kvm_queue_exception_p(vcpu, NM_VECTOR,
5405 				      is_xfd_nm_fault(vcpu) ? vcpu->arch.guest_fpu.xfd_err : 0);
5406 		return 1;
5407 	}
5408 
5409 	if (is_invalid_opcode(intr_info))
5410 		return handle_ud(vcpu);
5411 
5412 	if (WARN_ON_ONCE(is_ve_fault(intr_info))) {
5413 		struct vmx_ve_information *ve_info = vmx->ve_info;
5414 
5415 		WARN_ONCE(ve_info->exit_reason != EXIT_REASON_EPT_VIOLATION,
5416 			  "Unexpected #VE on VM-Exit reason 0x%x", ve_info->exit_reason);
5417 		dump_vmcs(vcpu);
5418 		kvm_mmu_print_sptes(vcpu, ve_info->guest_physical_address, "#VE");
5419 		return 1;
5420 	}
5421 
5422 	error_code = 0;
5423 	if (intr_info & INTR_INFO_DELIVER_CODE_MASK)
5424 		error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
5425 
5426 	if (!vmx->rmode.vm86_active && is_gp_fault(intr_info)) {
5427 		WARN_ON_ONCE(!enable_vmware_backdoor);
5428 
5429 		/*
5430 		 * VMware backdoor emulation on #GP interception only handles
5431 		 * IN{S}, OUT{S}, and RDPMC, none of which generate a non-zero
5432 		 * error code on #GP.
5433 		 */
5434 		if (error_code) {
5435 			kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
5436 			return 1;
5437 		}
5438 		return kvm_emulate_instruction(vcpu, EMULTYPE_VMWARE_GP);
5439 	}
5440 
5441 	/*
5442 	 * The #PF with PFEC.RSVD = 1 indicates the guest is accessing
5443 	 * MMIO, it is better to report an internal error.
5444 	 * See the comments in vmx_handle_exit.
5445 	 */
5446 	if ((vect_info & VECTORING_INFO_VALID_MASK) &&
5447 	    !(is_page_fault(intr_info) && !(error_code & PFERR_RSVD_MASK))) {
5448 		vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
5449 		vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_SIMUL_EX;
5450 		vcpu->run->internal.ndata = 4;
5451 		vcpu->run->internal.data[0] = vect_info;
5452 		vcpu->run->internal.data[1] = intr_info;
5453 		vcpu->run->internal.data[2] = error_code;
5454 		vcpu->run->internal.data[3] = vcpu->arch.last_vmentry_cpu;
5455 		return 0;
5456 	}
5457 
5458 	if (is_page_fault(intr_info))
5459 		return vmx_handle_page_fault(vcpu, error_code);
5460 
5461 	ex_no = intr_info & INTR_INFO_VECTOR_MASK;
5462 
5463 	if (vmx->rmode.vm86_active && rmode_exception(vcpu, ex_no))
5464 		return handle_rmode_exception(vcpu, ex_no, error_code);
5465 
5466 	switch (ex_no) {
5467 	case DB_VECTOR:
5468 		dr6 = vmx_get_exit_qual(vcpu);
5469 		if (!(vcpu->guest_debug &
5470 		      (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) {
5471 			/*
5472 			 * If the #DB was due to ICEBP, a.k.a. INT1, skip the
5473 			 * instruction.  ICEBP generates a trap-like #DB, but
5474 			 * despite its interception control being tied to #DB,
5475 			 * is an instruction intercept, i.e. the VM-Exit occurs
5476 			 * on the ICEBP itself.  Use the inner "skip" helper to
5477 			 * avoid single-step #DB and MTF updates, as ICEBP is
5478 			 * higher priority.  Note, skipping ICEBP still clears
5479 			 * STI and MOVSS blocking.
5480 			 *
5481 			 * For all other #DBs, set vmcs.PENDING_DBG_EXCEPTIONS.BS
5482 			 * if single-step is enabled in RFLAGS and STI or MOVSS
5483 			 * blocking is active, as the CPU doesn't set the bit
5484 			 * on VM-Exit due to #DB interception.  VM-Entry has a
5485 			 * consistency check that a single-step #DB is pending
5486 			 * in this scenario as the previous instruction cannot
5487 			 * have toggled RFLAGS.TF 0=>1 (because STI and POP/MOV
5488 			 * don't modify RFLAGS), therefore the one instruction
5489 			 * delay when activating single-step breakpoints must
5490 			 * have already expired.  Note, the CPU sets/clears BS
5491 			 * as appropriate for all other VM-Exits types.
5492 			 */
5493 			if (is_icebp(intr_info))
5494 				WARN_ON(!skip_emulated_instruction(vcpu));
5495 			else if ((vmx_get_rflags(vcpu) & X86_EFLAGS_TF) &&
5496 				 (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
5497 				  (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS)))
5498 				vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS,
5499 					    vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS) | DR6_BS);
5500 
5501 			kvm_queue_exception_p(vcpu, DB_VECTOR, dr6);
5502 			return 1;
5503 		}
5504 		kvm_run->debug.arch.dr6 = dr6 | DR6_ACTIVE_LOW;
5505 		kvm_run->debug.arch.dr7 = vmcs_readl(GUEST_DR7);
5506 		fallthrough;
5507 	case BP_VECTOR:
5508 		/*
5509 		 * Update instruction length as we may reinject #BP from
5510 		 * user space while in guest debugging mode. Reading it for
5511 		 * #DB as well causes no harm, it is not used in that case.
5512 		 */
5513 		vmx->vcpu.arch.event_exit_inst_len =
5514 			vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
5515 		kvm_run->exit_reason = KVM_EXIT_DEBUG;
5516 		kvm_run->debug.arch.pc = kvm_get_linear_rip(vcpu);
5517 		kvm_run->debug.arch.exception = ex_no;
5518 		break;
5519 	case AC_VECTOR:
5520 		if (vmx_guest_inject_ac(vcpu)) {
5521 			kvm_queue_exception_e(vcpu, AC_VECTOR, error_code);
5522 			return 1;
5523 		}
5524 
5525 		/*
5526 		 * Handle split lock. Depending on detection mode this will
5527 		 * either warn and disable split lock detection for this
5528 		 * task or force SIGBUS on it.
5529 		 */
5530 		if (handle_guest_split_lock(kvm_rip_read(vcpu)))
5531 			return 1;
5532 		fallthrough;
5533 	default:
5534 		kvm_run->exit_reason = KVM_EXIT_EXCEPTION;
5535 		kvm_run->ex.exception = ex_no;
5536 		kvm_run->ex.error_code = error_code;
5537 		break;
5538 	}
5539 	return 0;
5540 }
5541 
handle_external_interrupt(struct kvm_vcpu * vcpu)5542 static __always_inline int handle_external_interrupt(struct kvm_vcpu *vcpu)
5543 {
5544 	++vcpu->stat.irq_exits;
5545 	return 1;
5546 }
5547 
handle_triple_fault(struct kvm_vcpu * vcpu)5548 static int handle_triple_fault(struct kvm_vcpu *vcpu)
5549 {
5550 	vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
5551 	vcpu->mmio_needed = 0;
5552 	return 0;
5553 }
5554 
handle_io(struct kvm_vcpu * vcpu)5555 static int handle_io(struct kvm_vcpu *vcpu)
5556 {
5557 	unsigned long exit_qualification;
5558 	int size, in, string;
5559 	unsigned port;
5560 
5561 	exit_qualification = vmx_get_exit_qual(vcpu);
5562 	string = (exit_qualification & 16) != 0;
5563 
5564 	++vcpu->stat.io_exits;
5565 
5566 	if (string)
5567 		return kvm_emulate_instruction(vcpu, 0);
5568 
5569 	port = exit_qualification >> 16;
5570 	size = (exit_qualification & 7) + 1;
5571 	in = (exit_qualification & 8) != 0;
5572 
5573 	return kvm_fast_pio(vcpu, size, port, in);
5574 }
5575 
vmx_patch_hypercall(struct kvm_vcpu * vcpu,unsigned char * hypercall)5576 void vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
5577 {
5578 	/*
5579 	 * Patch in the VMCALL instruction:
5580 	 */
5581 	hypercall[0] = 0x0f;
5582 	hypercall[1] = 0x01;
5583 	hypercall[2] = 0xc1;
5584 }
5585 
5586 /* called to set cr0 as appropriate for a mov-to-cr0 exit. */
handle_set_cr0(struct kvm_vcpu * vcpu,unsigned long val)5587 static int handle_set_cr0(struct kvm_vcpu *vcpu, unsigned long val)
5588 {
5589 	if (is_guest_mode(vcpu)) {
5590 		struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
5591 		unsigned long orig_val = val;
5592 
5593 		/*
5594 		 * We get here when L2 changed cr0 in a way that did not change
5595 		 * any of L1's shadowed bits (see nested_vmx_exit_handled_cr),
5596 		 * but did change L0 shadowed bits. So we first calculate the
5597 		 * effective cr0 value that L1 would like to write into the
5598 		 * hardware. It consists of the L2-owned bits from the new
5599 		 * value combined with the L1-owned bits from L1's guest_cr0.
5600 		 */
5601 		val = (val & ~vmcs12->cr0_guest_host_mask) |
5602 			(vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask);
5603 
5604 		if (kvm_set_cr0(vcpu, val))
5605 			return 1;
5606 		vmcs_writel(CR0_READ_SHADOW, orig_val);
5607 		return 0;
5608 	} else {
5609 		return kvm_set_cr0(vcpu, val);
5610 	}
5611 }
5612 
handle_set_cr4(struct kvm_vcpu * vcpu,unsigned long val)5613 static int handle_set_cr4(struct kvm_vcpu *vcpu, unsigned long val)
5614 {
5615 	if (is_guest_mode(vcpu)) {
5616 		struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
5617 		unsigned long orig_val = val;
5618 
5619 		/* analogously to handle_set_cr0 */
5620 		val = (val & ~vmcs12->cr4_guest_host_mask) |
5621 			(vmcs12->guest_cr4 & vmcs12->cr4_guest_host_mask);
5622 		if (kvm_set_cr4(vcpu, val))
5623 			return 1;
5624 		vmcs_writel(CR4_READ_SHADOW, orig_val);
5625 		return 0;
5626 	} else
5627 		return kvm_set_cr4(vcpu, val);
5628 }
5629 
handle_desc(struct kvm_vcpu * vcpu)5630 static int handle_desc(struct kvm_vcpu *vcpu)
5631 {
5632 	/*
5633 	 * UMIP emulation relies on intercepting writes to CR4.UMIP, i.e. this
5634 	 * and other code needs to be updated if UMIP can be guest owned.
5635 	 */
5636 	BUILD_BUG_ON(KVM_POSSIBLE_CR4_GUEST_BITS & X86_CR4_UMIP);
5637 
5638 	WARN_ON_ONCE(!kvm_is_cr4_bit_set(vcpu, X86_CR4_UMIP));
5639 	return kvm_emulate_instruction(vcpu, 0);
5640 }
5641 
handle_cr(struct kvm_vcpu * vcpu)5642 static int handle_cr(struct kvm_vcpu *vcpu)
5643 {
5644 	unsigned long exit_qualification, val;
5645 	int cr;
5646 	int reg;
5647 	int err;
5648 	int ret;
5649 
5650 	exit_qualification = vmx_get_exit_qual(vcpu);
5651 	cr = exit_qualification & 15;
5652 	reg = (exit_qualification >> 8) & 15;
5653 	switch ((exit_qualification >> 4) & 3) {
5654 	case 0: /* mov to cr */
5655 		val = kvm_register_read(vcpu, reg);
5656 		trace_kvm_cr_write(cr, val);
5657 		switch (cr) {
5658 		case 0:
5659 			err = handle_set_cr0(vcpu, val);
5660 			return kvm_complete_insn_gp(vcpu, err);
5661 		case 3:
5662 			WARN_ON_ONCE(enable_unrestricted_guest);
5663 
5664 			err = kvm_set_cr3(vcpu, val);
5665 			return kvm_complete_insn_gp(vcpu, err);
5666 		case 4:
5667 			err = handle_set_cr4(vcpu, val);
5668 			return kvm_complete_insn_gp(vcpu, err);
5669 		case 8: {
5670 				u8 cr8_prev = kvm_get_cr8(vcpu);
5671 				u8 cr8 = (u8)val;
5672 				err = kvm_set_cr8(vcpu, cr8);
5673 				ret = kvm_complete_insn_gp(vcpu, err);
5674 				if (lapic_in_kernel(vcpu))
5675 					return ret;
5676 				if (cr8_prev <= cr8)
5677 					return ret;
5678 				/*
5679 				 * TODO: we might be squashing a
5680 				 * KVM_GUESTDBG_SINGLESTEP-triggered
5681 				 * KVM_EXIT_DEBUG here.
5682 				 */
5683 				vcpu->run->exit_reason = KVM_EXIT_SET_TPR;
5684 				return 0;
5685 			}
5686 		}
5687 		break;
5688 	case 2: /* clts */
5689 		KVM_BUG(1, vcpu->kvm, "Guest always owns CR0.TS");
5690 		return -EIO;
5691 	case 1: /*mov from cr*/
5692 		switch (cr) {
5693 		case 3:
5694 			WARN_ON_ONCE(enable_unrestricted_guest);
5695 
5696 			val = kvm_read_cr3(vcpu);
5697 			kvm_register_write(vcpu, reg, val);
5698 			trace_kvm_cr_read(cr, val);
5699 			return kvm_skip_emulated_instruction(vcpu);
5700 		case 8:
5701 			val = kvm_get_cr8(vcpu);
5702 			kvm_register_write(vcpu, reg, val);
5703 			trace_kvm_cr_read(cr, val);
5704 			return kvm_skip_emulated_instruction(vcpu);
5705 		}
5706 		break;
5707 	case 3: /* lmsw */
5708 		val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f;
5709 		trace_kvm_cr_write(0, (kvm_read_cr0_bits(vcpu, ~0xful) | val));
5710 		kvm_lmsw(vcpu, val);
5711 
5712 		return kvm_skip_emulated_instruction(vcpu);
5713 	default:
5714 		break;
5715 	}
5716 	vcpu->run->exit_reason = 0;
5717 	vcpu_unimpl(vcpu, "unhandled control register: op %d cr %d\n",
5718 	       (int)(exit_qualification >> 4) & 3, cr);
5719 	return 0;
5720 }
5721 
handle_dr(struct kvm_vcpu * vcpu)5722 static int handle_dr(struct kvm_vcpu *vcpu)
5723 {
5724 	unsigned long exit_qualification;
5725 	int dr, dr7, reg;
5726 	int err = 1;
5727 
5728 	exit_qualification = vmx_get_exit_qual(vcpu);
5729 	dr = exit_qualification & DEBUG_REG_ACCESS_NUM;
5730 
5731 	/* First, if DR does not exist, trigger UD */
5732 	if (!kvm_require_dr(vcpu, dr))
5733 		return 1;
5734 
5735 	if (vmx_get_cpl(vcpu) > 0)
5736 		goto out;
5737 
5738 	dr7 = vmcs_readl(GUEST_DR7);
5739 	if (dr7 & DR7_GD) {
5740 		/*
5741 		 * As the vm-exit takes precedence over the debug trap, we
5742 		 * need to emulate the latter, either for the host or the
5743 		 * guest debugging itself.
5744 		 */
5745 		if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) {
5746 			vcpu->run->debug.arch.dr6 = DR6_BD | DR6_ACTIVE_LOW;
5747 			vcpu->run->debug.arch.dr7 = dr7;
5748 			vcpu->run->debug.arch.pc = kvm_get_linear_rip(vcpu);
5749 			vcpu->run->debug.arch.exception = DB_VECTOR;
5750 			vcpu->run->exit_reason = KVM_EXIT_DEBUG;
5751 			return 0;
5752 		} else {
5753 			kvm_queue_exception_p(vcpu, DB_VECTOR, DR6_BD);
5754 			return 1;
5755 		}
5756 	}
5757 
5758 	if (vcpu->guest_debug == 0) {
5759 		exec_controls_clearbit(to_vmx(vcpu), CPU_BASED_MOV_DR_EXITING);
5760 
5761 		/*
5762 		 * No more DR vmexits; force a reload of the debug registers
5763 		 * and reenter on this instruction.  The next vmexit will
5764 		 * retrieve the full state of the debug registers.
5765 		 */
5766 		vcpu->arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT;
5767 		return 1;
5768 	}
5769 
5770 	reg = DEBUG_REG_ACCESS_REG(exit_qualification);
5771 	if (exit_qualification & TYPE_MOV_FROM_DR) {
5772 		kvm_register_write(vcpu, reg, kvm_get_dr(vcpu, dr));
5773 		err = 0;
5774 	} else {
5775 		err = kvm_set_dr(vcpu, dr, kvm_register_read(vcpu, reg));
5776 	}
5777 
5778 out:
5779 	return kvm_complete_insn_gp(vcpu, err);
5780 }
5781 
vmx_sync_dirty_debug_regs(struct kvm_vcpu * vcpu)5782 void vmx_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
5783 {
5784 	get_debugreg(vcpu->arch.db[0], 0);
5785 	get_debugreg(vcpu->arch.db[1], 1);
5786 	get_debugreg(vcpu->arch.db[2], 2);
5787 	get_debugreg(vcpu->arch.db[3], 3);
5788 	get_debugreg(vcpu->arch.dr6, 6);
5789 	vcpu->arch.dr7 = vmcs_readl(GUEST_DR7);
5790 
5791 	vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT;
5792 	exec_controls_setbit(to_vmx(vcpu), CPU_BASED_MOV_DR_EXITING);
5793 
5794 	/*
5795 	 * exc_debug expects dr6 to be cleared after it runs, avoid that it sees
5796 	 * a stale dr6 from the guest.
5797 	 */
5798 	set_debugreg(DR6_RESERVED, 6);
5799 }
5800 
vmx_set_dr7(struct kvm_vcpu * vcpu,unsigned long val)5801 void vmx_set_dr7(struct kvm_vcpu *vcpu, unsigned long val)
5802 {
5803 	vmcs_writel(GUEST_DR7, val);
5804 }
5805 
handle_tpr_below_threshold(struct kvm_vcpu * vcpu)5806 static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu)
5807 {
5808 	kvm_apic_update_ppr(vcpu);
5809 	return 1;
5810 }
5811 
handle_interrupt_window(struct kvm_vcpu * vcpu)5812 static int handle_interrupt_window(struct kvm_vcpu *vcpu)
5813 {
5814 	exec_controls_clearbit(to_vmx(vcpu), CPU_BASED_INTR_WINDOW_EXITING);
5815 
5816 	kvm_make_request(KVM_REQ_EVENT, vcpu);
5817 
5818 	++vcpu->stat.irq_window_exits;
5819 	return 1;
5820 }
5821 
handle_invlpg(struct kvm_vcpu * vcpu)5822 static int handle_invlpg(struct kvm_vcpu *vcpu)
5823 {
5824 	unsigned long exit_qualification = vmx_get_exit_qual(vcpu);
5825 
5826 	kvm_mmu_invlpg(vcpu, exit_qualification);
5827 	return kvm_skip_emulated_instruction(vcpu);
5828 }
5829 
handle_apic_access(struct kvm_vcpu * vcpu)5830 static int handle_apic_access(struct kvm_vcpu *vcpu)
5831 {
5832 	if (likely(fasteoi)) {
5833 		unsigned long exit_qualification = vmx_get_exit_qual(vcpu);
5834 		int access_type, offset;
5835 
5836 		access_type = exit_qualification & APIC_ACCESS_TYPE;
5837 		offset = exit_qualification & APIC_ACCESS_OFFSET;
5838 		/*
5839 		 * Sane guest uses MOV to write EOI, with written value
5840 		 * not cared. So make a short-circuit here by avoiding
5841 		 * heavy instruction emulation.
5842 		 */
5843 		if ((access_type == TYPE_LINEAR_APIC_INST_WRITE) &&
5844 		    (offset == APIC_EOI)) {
5845 			kvm_lapic_set_eoi(vcpu);
5846 			return kvm_skip_emulated_instruction(vcpu);
5847 		}
5848 	}
5849 	return kvm_emulate_instruction(vcpu, 0);
5850 }
5851 
handle_apic_eoi_induced(struct kvm_vcpu * vcpu)5852 static int handle_apic_eoi_induced(struct kvm_vcpu *vcpu)
5853 {
5854 	unsigned long exit_qualification = vmx_get_exit_qual(vcpu);
5855 	int vector = exit_qualification & 0xff;
5856 
5857 	/* EOI-induced VM exit is trap-like and thus no need to adjust IP */
5858 	kvm_apic_set_eoi_accelerated(vcpu, vector);
5859 	return 1;
5860 }
5861 
handle_apic_write(struct kvm_vcpu * vcpu)5862 static int handle_apic_write(struct kvm_vcpu *vcpu)
5863 {
5864 	unsigned long exit_qualification = vmx_get_exit_qual(vcpu);
5865 
5866 	/*
5867 	 * APIC-write VM-Exit is trap-like, KVM doesn't need to advance RIP and
5868 	 * hardware has done any necessary aliasing, offset adjustments, etc...
5869 	 * for the access.  I.e. the correct value has already been  written to
5870 	 * the vAPIC page for the correct 16-byte chunk.  KVM needs only to
5871 	 * retrieve the register value and emulate the access.
5872 	 */
5873 	u32 offset = exit_qualification & 0xff0;
5874 
5875 	kvm_apic_write_nodecode(vcpu, offset);
5876 	return 1;
5877 }
5878 
handle_task_switch(struct kvm_vcpu * vcpu)5879 static int handle_task_switch(struct kvm_vcpu *vcpu)
5880 {
5881 	struct vcpu_vmx *vmx = to_vmx(vcpu);
5882 	unsigned long exit_qualification;
5883 	bool has_error_code = false;
5884 	u32 error_code = 0;
5885 	u16 tss_selector;
5886 	int reason, type, idt_v, idt_index;
5887 
5888 	idt_v = (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK);
5889 	idt_index = (vmx->idt_vectoring_info & VECTORING_INFO_VECTOR_MASK);
5890 	type = (vmx->idt_vectoring_info & VECTORING_INFO_TYPE_MASK);
5891 
5892 	exit_qualification = vmx_get_exit_qual(vcpu);
5893 
5894 	reason = (u32)exit_qualification >> 30;
5895 	if (reason == TASK_SWITCH_GATE && idt_v) {
5896 		switch (type) {
5897 		case INTR_TYPE_NMI_INTR:
5898 			vcpu->arch.nmi_injected = false;
5899 			vmx_set_nmi_mask(vcpu, true);
5900 			break;
5901 		case INTR_TYPE_EXT_INTR:
5902 		case INTR_TYPE_SOFT_INTR:
5903 			kvm_clear_interrupt_queue(vcpu);
5904 			break;
5905 		case INTR_TYPE_HARD_EXCEPTION:
5906 			if (vmx->idt_vectoring_info &
5907 			    VECTORING_INFO_DELIVER_CODE_MASK) {
5908 				has_error_code = true;
5909 				error_code =
5910 					vmcs_read32(IDT_VECTORING_ERROR_CODE);
5911 			}
5912 			fallthrough;
5913 		case INTR_TYPE_SOFT_EXCEPTION:
5914 			kvm_clear_exception_queue(vcpu);
5915 			break;
5916 		default:
5917 			break;
5918 		}
5919 	}
5920 	tss_selector = exit_qualification;
5921 
5922 	if (!idt_v || (type != INTR_TYPE_HARD_EXCEPTION &&
5923 		       type != INTR_TYPE_EXT_INTR &&
5924 		       type != INTR_TYPE_NMI_INTR))
5925 		WARN_ON(!skip_emulated_instruction(vcpu));
5926 
5927 	/*
5928 	 * TODO: What about debug traps on tss switch?
5929 	 *       Are we supposed to inject them and update dr6?
5930 	 */
5931 	return kvm_task_switch(vcpu, tss_selector,
5932 			       type == INTR_TYPE_SOFT_INTR ? idt_index : -1,
5933 			       reason, has_error_code, error_code);
5934 }
5935 
handle_ept_violation(struct kvm_vcpu * vcpu)5936 static int handle_ept_violation(struct kvm_vcpu *vcpu)
5937 {
5938 	unsigned long exit_qualification = vmx_get_exit_qual(vcpu);
5939 	gpa_t gpa;
5940 
5941 	/*
5942 	 * EPT violation happened while executing iret from NMI,
5943 	 * "blocked by NMI" bit has to be set before next VM entry.
5944 	 * There are errata that may cause this bit to not be set:
5945 	 * AAK134, BY25.
5946 	 */
5947 	if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) &&
5948 			enable_vnmi &&
5949 			(exit_qualification & INTR_INFO_UNBLOCK_NMI))
5950 		vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI);
5951 
5952 	gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
5953 	trace_kvm_page_fault(vcpu, gpa, exit_qualification);
5954 
5955 	/*
5956 	 * Check that the GPA doesn't exceed physical memory limits, as that is
5957 	 * a guest page fault.  We have to emulate the instruction here, because
5958 	 * if the illegal address is that of a paging structure, then
5959 	 * EPT_VIOLATION_ACC_WRITE bit is set.  Alternatively, if supported we
5960 	 * would also use advanced VM-exit information for EPT violations to
5961 	 * reconstruct the page fault error code.
5962 	 */
5963 	if (unlikely(allow_smaller_maxphyaddr && !kvm_vcpu_is_legal_gpa(vcpu, gpa)))
5964 		return kvm_emulate_instruction(vcpu, 0);
5965 
5966 	return __vmx_handle_ept_violation(vcpu, gpa, exit_qualification);
5967 }
5968 
handle_ept_misconfig(struct kvm_vcpu * vcpu)5969 static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
5970 {
5971 	gpa_t gpa;
5972 
5973 	if (vmx_check_emulate_instruction(vcpu, EMULTYPE_PF, NULL, 0))
5974 		return 1;
5975 
5976 	/*
5977 	 * A nested guest cannot optimize MMIO vmexits, because we have an
5978 	 * nGPA here instead of the required GPA.
5979 	 */
5980 	gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
5981 	if (!is_guest_mode(vcpu) &&
5982 	    !kvm_io_bus_write(vcpu, KVM_FAST_MMIO_BUS, gpa, 0, NULL)) {
5983 		trace_kvm_fast_mmio(gpa);
5984 		return kvm_skip_emulated_instruction(vcpu);
5985 	}
5986 
5987 	return kvm_mmu_page_fault(vcpu, gpa, PFERR_RSVD_MASK, NULL, 0);
5988 }
5989 
handle_nmi_window(struct kvm_vcpu * vcpu)5990 static int handle_nmi_window(struct kvm_vcpu *vcpu)
5991 {
5992 	if (KVM_BUG_ON(!enable_vnmi, vcpu->kvm))
5993 		return -EIO;
5994 
5995 	exec_controls_clearbit(to_vmx(vcpu), CPU_BASED_NMI_WINDOW_EXITING);
5996 	++vcpu->stat.nmi_window_exits;
5997 	kvm_make_request(KVM_REQ_EVENT, vcpu);
5998 
5999 	return 1;
6000 }
6001 
6002 /*
6003  * Returns true if emulation is required (due to the vCPU having invalid state
6004  * with unsrestricted guest mode disabled) and KVM can't faithfully emulate the
6005  * current vCPU state.
6006  */
vmx_unhandleable_emulation_required(struct kvm_vcpu * vcpu)6007 static bool vmx_unhandleable_emulation_required(struct kvm_vcpu *vcpu)
6008 {
6009 	struct vcpu_vmx *vmx = to_vmx(vcpu);
6010 
6011 	if (!vmx->vt.emulation_required)
6012 		return false;
6013 
6014 	/*
6015 	 * It is architecturally impossible for emulation to be required when a
6016 	 * nested VM-Enter is pending completion, as VM-Enter will VM-Fail if
6017 	 * guest state is invalid and unrestricted guest is disabled, i.e. KVM
6018 	 * should synthesize VM-Fail instead emulation L2 code.  This path is
6019 	 * only reachable if userspace modifies L2 guest state after KVM has
6020 	 * performed the nested VM-Enter consistency checks.
6021 	 */
6022 	if (vcpu->arch.nested_run_pending)
6023 		return true;
6024 
6025 	/*
6026 	 * KVM only supports emulating exceptions if the vCPU is in Real Mode.
6027 	 * If emulation is required, KVM can't perform a successful VM-Enter to
6028 	 * inject the exception.
6029 	 */
6030 	return !vmx->rmode.vm86_active &&
6031 	       (kvm_is_exception_pending(vcpu) || vcpu->arch.exception.injected);
6032 }
6033 
handle_invalid_guest_state(struct kvm_vcpu * vcpu)6034 static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
6035 {
6036 	struct vcpu_vmx *vmx = to_vmx(vcpu);
6037 	bool intr_window_requested;
6038 	unsigned count = 130;
6039 
6040 	intr_window_requested = exec_controls_get(vmx) &
6041 				CPU_BASED_INTR_WINDOW_EXITING;
6042 
6043 	while (vmx->vt.emulation_required && count-- != 0) {
6044 		if (intr_window_requested && !vmx_interrupt_blocked(vcpu))
6045 			return handle_interrupt_window(&vmx->vcpu);
6046 
6047 		if (kvm_test_request(KVM_REQ_EVENT, vcpu))
6048 			return 1;
6049 
6050 		/*
6051 		 * Ensure that any updates to kvm->buses[] observed by the
6052 		 * previous instruction (emulated or otherwise) are also
6053 		 * visible to the instruction KVM is about to emulate.
6054 		 */
6055 		smp_rmb();
6056 
6057 		if (!kvm_emulate_instruction(vcpu, 0))
6058 			return 0;
6059 
6060 		if (vmx_unhandleable_emulation_required(vcpu)) {
6061 			kvm_prepare_emulation_failure_exit(vcpu);
6062 			return 0;
6063 		}
6064 
6065 		if (vcpu->arch.halt_request) {
6066 			vcpu->arch.halt_request = 0;
6067 			return kvm_emulate_halt_noskip(vcpu);
6068 		}
6069 
6070 		/*
6071 		 * Note, return 1 and not 0, vcpu_run() will invoke
6072 		 * xfer_to_guest_mode() which will create a proper return
6073 		 * code.
6074 		 */
6075 		if (__xfer_to_guest_mode_work_pending())
6076 			return 1;
6077 	}
6078 
6079 	return 1;
6080 }
6081 
vmx_vcpu_pre_run(struct kvm_vcpu * vcpu)6082 int vmx_vcpu_pre_run(struct kvm_vcpu *vcpu)
6083 {
6084 	if (vmx_unhandleable_emulation_required(vcpu)) {
6085 		kvm_prepare_emulation_failure_exit(vcpu);
6086 		return 0;
6087 	}
6088 
6089 	return 1;
6090 }
6091 
6092 /*
6093  * Indicate a busy-waiting vcpu in spinlock. We do not enable the PAUSE
6094  * exiting, so only get here on cpu with PAUSE-Loop-Exiting.
6095  */
handle_pause(struct kvm_vcpu * vcpu)6096 static int handle_pause(struct kvm_vcpu *vcpu)
6097 {
6098 	if (!kvm_pause_in_guest(vcpu->kvm))
6099 		grow_ple_window(vcpu);
6100 
6101 	/*
6102 	 * Intel sdm vol3 ch-25.1.3 says: The "PAUSE-loop exiting"
6103 	 * VM-execution control is ignored if CPL > 0. OTOH, KVM
6104 	 * never set PAUSE_EXITING and just set PLE if supported,
6105 	 * so the vcpu must be CPL=0 if it gets a PAUSE exit.
6106 	 */
6107 	kvm_vcpu_on_spin(vcpu, true);
6108 	return kvm_skip_emulated_instruction(vcpu);
6109 }
6110 
handle_monitor_trap(struct kvm_vcpu * vcpu)6111 static int handle_monitor_trap(struct kvm_vcpu *vcpu)
6112 {
6113 	return 1;
6114 }
6115 
handle_invpcid(struct kvm_vcpu * vcpu)6116 static int handle_invpcid(struct kvm_vcpu *vcpu)
6117 {
6118 	u32 vmx_instruction_info;
6119 	unsigned long type;
6120 	gva_t gva;
6121 	struct {
6122 		u64 pcid;
6123 		u64 gla;
6124 	} operand;
6125 	int gpr_index;
6126 
6127 	if (!guest_cpu_cap_has(vcpu, X86_FEATURE_INVPCID)) {
6128 		kvm_queue_exception(vcpu, UD_VECTOR);
6129 		return 1;
6130 	}
6131 
6132 	vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
6133 	gpr_index = vmx_get_instr_info_reg2(vmx_instruction_info);
6134 	type = kvm_register_read(vcpu, gpr_index);
6135 
6136 	/* According to the Intel instruction reference, the memory operand
6137 	 * is read even if it isn't needed (e.g., for type==all)
6138 	 */
6139 	if (get_vmx_mem_address(vcpu, vmx_get_exit_qual(vcpu),
6140 				vmx_instruction_info, false,
6141 				sizeof(operand), &gva))
6142 		return 1;
6143 
6144 	return kvm_handle_invpcid(vcpu, type, gva);
6145 }
6146 
handle_pml_full(struct kvm_vcpu * vcpu)6147 static int handle_pml_full(struct kvm_vcpu *vcpu)
6148 {
6149 	unsigned long exit_qualification;
6150 
6151 	trace_kvm_pml_full(vcpu->vcpu_id);
6152 
6153 	exit_qualification = vmx_get_exit_qual(vcpu);
6154 
6155 	/*
6156 	 * PML buffer FULL happened while executing iret from NMI,
6157 	 * "blocked by NMI" bit has to be set before next VM entry.
6158 	 */
6159 	if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) &&
6160 			enable_vnmi &&
6161 			(exit_qualification & INTR_INFO_UNBLOCK_NMI))
6162 		vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
6163 				GUEST_INTR_STATE_NMI);
6164 
6165 	/*
6166 	 * PML buffer already flushed at beginning of VMEXIT. Nothing to do
6167 	 * here.., and there's no userspace involvement needed for PML.
6168 	 */
6169 	return 1;
6170 }
6171 
handle_fastpath_preemption_timer(struct kvm_vcpu * vcpu,bool force_immediate_exit)6172 static fastpath_t handle_fastpath_preemption_timer(struct kvm_vcpu *vcpu,
6173 						   bool force_immediate_exit)
6174 {
6175 	struct vcpu_vmx *vmx = to_vmx(vcpu);
6176 
6177 	/*
6178 	 * In the *extremely* unlikely scenario that this is a spurious VM-Exit
6179 	 * due to the timer expiring while it was "soft" disabled, just eat the
6180 	 * exit and re-enter the guest.
6181 	 */
6182 	if (unlikely(vmx->loaded_vmcs->hv_timer_soft_disabled))
6183 		return EXIT_FASTPATH_REENTER_GUEST;
6184 
6185 	/*
6186 	 * If the timer expired because KVM used it to force an immediate exit,
6187 	 * then mission accomplished.
6188 	 */
6189 	if (force_immediate_exit)
6190 		return EXIT_FASTPATH_EXIT_HANDLED;
6191 
6192 	/*
6193 	 * If L2 is active, go down the slow path as emulating the guest timer
6194 	 * expiration likely requires synthesizing a nested VM-Exit.
6195 	 */
6196 	if (is_guest_mode(vcpu))
6197 		return EXIT_FASTPATH_NONE;
6198 
6199 	kvm_lapic_expired_hv_timer(vcpu);
6200 	return EXIT_FASTPATH_REENTER_GUEST;
6201 }
6202 
handle_preemption_timer(struct kvm_vcpu * vcpu)6203 static int handle_preemption_timer(struct kvm_vcpu *vcpu)
6204 {
6205 	/*
6206 	 * This non-fastpath handler is reached if and only if the preemption
6207 	 * timer was being used to emulate a guest timer while L2 is active.
6208 	 * All other scenarios are supposed to be handled in the fastpath.
6209 	 */
6210 	WARN_ON_ONCE(!is_guest_mode(vcpu));
6211 	kvm_lapic_expired_hv_timer(vcpu);
6212 	return 1;
6213 }
6214 
6215 /*
6216  * When nested=0, all VMX instruction VM Exits filter here.  The handlers
6217  * are overwritten by nested_vmx_hardware_setup() when nested=1.
6218  */
handle_vmx_instruction(struct kvm_vcpu * vcpu)6219 static int handle_vmx_instruction(struct kvm_vcpu *vcpu)
6220 {
6221 	kvm_queue_exception(vcpu, UD_VECTOR);
6222 	return 1;
6223 }
6224 
handle_tdx_instruction(struct kvm_vcpu * vcpu)6225 static int handle_tdx_instruction(struct kvm_vcpu *vcpu)
6226 {
6227 	kvm_queue_exception(vcpu, UD_VECTOR);
6228 	return 1;
6229 }
6230 
6231 #ifndef CONFIG_X86_SGX_KVM
handle_encls(struct kvm_vcpu * vcpu)6232 static int handle_encls(struct kvm_vcpu *vcpu)
6233 {
6234 	/*
6235 	 * SGX virtualization is disabled.  There is no software enable bit for
6236 	 * SGX, so KVM intercepts all ENCLS leafs and injects a #UD to prevent
6237 	 * the guest from executing ENCLS (when SGX is supported by hardware).
6238 	 */
6239 	kvm_queue_exception(vcpu, UD_VECTOR);
6240 	return 1;
6241 }
6242 #endif /* CONFIG_X86_SGX_KVM */
6243 
handle_bus_lock_vmexit(struct kvm_vcpu * vcpu)6244 static int handle_bus_lock_vmexit(struct kvm_vcpu *vcpu)
6245 {
6246 	/*
6247 	 * Hardware may or may not set the BUS_LOCK_DETECTED flag on BUS_LOCK
6248 	 * VM-Exits. Unconditionally set the flag here and leave the handling to
6249 	 * vmx_handle_exit().
6250 	 */
6251 	to_vt(vcpu)->exit_reason.bus_lock_detected = true;
6252 	return 1;
6253 }
6254 
handle_notify(struct kvm_vcpu * vcpu)6255 static int handle_notify(struct kvm_vcpu *vcpu)
6256 {
6257 	unsigned long exit_qual = vmx_get_exit_qual(vcpu);
6258 	bool context_invalid = exit_qual & NOTIFY_VM_CONTEXT_INVALID;
6259 
6260 	++vcpu->stat.notify_window_exits;
6261 
6262 	/*
6263 	 * Notify VM exit happened while executing iret from NMI,
6264 	 * "blocked by NMI" bit has to be set before next VM entry.
6265 	 */
6266 	if (enable_vnmi && (exit_qual & INTR_INFO_UNBLOCK_NMI))
6267 		vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
6268 			      GUEST_INTR_STATE_NMI);
6269 
6270 	if (vcpu->kvm->arch.notify_vmexit_flags & KVM_X86_NOTIFY_VMEXIT_USER ||
6271 	    context_invalid) {
6272 		vcpu->run->exit_reason = KVM_EXIT_NOTIFY;
6273 		vcpu->run->notify.flags = context_invalid ?
6274 					  KVM_NOTIFY_CONTEXT_INVALID : 0;
6275 		return 0;
6276 	}
6277 
6278 	return 1;
6279 }
6280 
vmx_get_msr_imm_reg(struct kvm_vcpu * vcpu)6281 static int vmx_get_msr_imm_reg(struct kvm_vcpu *vcpu)
6282 {
6283 	return vmx_get_instr_info_reg(vmcs_read32(VMX_INSTRUCTION_INFO));
6284 }
6285 
handle_rdmsr_imm(struct kvm_vcpu * vcpu)6286 static int handle_rdmsr_imm(struct kvm_vcpu *vcpu)
6287 {
6288 	return kvm_emulate_rdmsr_imm(vcpu, vmx_get_exit_qual(vcpu),
6289 				     vmx_get_msr_imm_reg(vcpu));
6290 }
6291 
handle_wrmsr_imm(struct kvm_vcpu * vcpu)6292 static int handle_wrmsr_imm(struct kvm_vcpu *vcpu)
6293 {
6294 	return kvm_emulate_wrmsr_imm(vcpu, vmx_get_exit_qual(vcpu),
6295 				     vmx_get_msr_imm_reg(vcpu));
6296 }
6297 
6298 /*
6299  * The exit handlers return 1 if the exit was handled fully and guest execution
6300  * may resume.  Otherwise they set the kvm_run parameter to indicate what needs
6301  * to be done to userspace and return 0.
6302  */
6303 static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
6304 	[EXIT_REASON_EXCEPTION_NMI]           = handle_exception_nmi,
6305 	[EXIT_REASON_EXTERNAL_INTERRUPT]      = handle_external_interrupt,
6306 	[EXIT_REASON_TRIPLE_FAULT]            = handle_triple_fault,
6307 	[EXIT_REASON_NMI_WINDOW]	      = handle_nmi_window,
6308 	[EXIT_REASON_IO_INSTRUCTION]          = handle_io,
6309 	[EXIT_REASON_CR_ACCESS]               = handle_cr,
6310 	[EXIT_REASON_DR_ACCESS]               = handle_dr,
6311 	[EXIT_REASON_CPUID]                   = kvm_emulate_cpuid,
6312 	[EXIT_REASON_MSR_READ]                = kvm_emulate_rdmsr,
6313 	[EXIT_REASON_MSR_WRITE]               = kvm_emulate_wrmsr,
6314 	[EXIT_REASON_INTERRUPT_WINDOW]        = handle_interrupt_window,
6315 	[EXIT_REASON_HLT]                     = kvm_emulate_halt,
6316 	[EXIT_REASON_INVD]		      = kvm_emulate_invd,
6317 	[EXIT_REASON_INVLPG]		      = handle_invlpg,
6318 	[EXIT_REASON_RDPMC]                   = kvm_emulate_rdpmc,
6319 	[EXIT_REASON_VMCALL]                  = kvm_emulate_hypercall,
6320 	[EXIT_REASON_VMCLEAR]		      = handle_vmx_instruction,
6321 	[EXIT_REASON_VMLAUNCH]		      = handle_vmx_instruction,
6322 	[EXIT_REASON_VMPTRLD]		      = handle_vmx_instruction,
6323 	[EXIT_REASON_VMPTRST]		      = handle_vmx_instruction,
6324 	[EXIT_REASON_VMREAD]		      = handle_vmx_instruction,
6325 	[EXIT_REASON_VMRESUME]		      = handle_vmx_instruction,
6326 	[EXIT_REASON_VMWRITE]		      = handle_vmx_instruction,
6327 	[EXIT_REASON_VMOFF]		      = handle_vmx_instruction,
6328 	[EXIT_REASON_VMON]		      = handle_vmx_instruction,
6329 	[EXIT_REASON_TPR_BELOW_THRESHOLD]     = handle_tpr_below_threshold,
6330 	[EXIT_REASON_APIC_ACCESS]             = handle_apic_access,
6331 	[EXIT_REASON_APIC_WRITE]              = handle_apic_write,
6332 	[EXIT_REASON_EOI_INDUCED]             = handle_apic_eoi_induced,
6333 	[EXIT_REASON_WBINVD]                  = kvm_emulate_wbinvd,
6334 	[EXIT_REASON_XSETBV]                  = kvm_emulate_xsetbv,
6335 	[EXIT_REASON_TASK_SWITCH]             = handle_task_switch,
6336 	[EXIT_REASON_MCE_DURING_VMENTRY]      = handle_machine_check,
6337 	[EXIT_REASON_GDTR_IDTR]		      = handle_desc,
6338 	[EXIT_REASON_LDTR_TR]		      = handle_desc,
6339 	[EXIT_REASON_EPT_VIOLATION]	      = handle_ept_violation,
6340 	[EXIT_REASON_EPT_MISCONFIG]           = handle_ept_misconfig,
6341 	[EXIT_REASON_PAUSE_INSTRUCTION]       = handle_pause,
6342 	[EXIT_REASON_MWAIT_INSTRUCTION]	      = kvm_emulate_mwait,
6343 	[EXIT_REASON_MONITOR_TRAP_FLAG]       = handle_monitor_trap,
6344 	[EXIT_REASON_MONITOR_INSTRUCTION]     = kvm_emulate_monitor,
6345 	[EXIT_REASON_INVEPT]                  = handle_vmx_instruction,
6346 	[EXIT_REASON_INVVPID]                 = handle_vmx_instruction,
6347 	[EXIT_REASON_RDRAND]                  = kvm_handle_invalid_op,
6348 	[EXIT_REASON_RDSEED]                  = kvm_handle_invalid_op,
6349 	[EXIT_REASON_PML_FULL]		      = handle_pml_full,
6350 	[EXIT_REASON_INVPCID]                 = handle_invpcid,
6351 	[EXIT_REASON_VMFUNC]		      = handle_vmx_instruction,
6352 	[EXIT_REASON_PREEMPTION_TIMER]	      = handle_preemption_timer,
6353 	[EXIT_REASON_ENCLS]		      = handle_encls,
6354 	[EXIT_REASON_BUS_LOCK]                = handle_bus_lock_vmexit,
6355 	[EXIT_REASON_NOTIFY]		      = handle_notify,
6356 	[EXIT_REASON_SEAMCALL]		      = handle_tdx_instruction,
6357 	[EXIT_REASON_TDCALL]		      = handle_tdx_instruction,
6358 	[EXIT_REASON_MSR_READ_IMM]            = handle_rdmsr_imm,
6359 	[EXIT_REASON_MSR_WRITE_IMM]           = handle_wrmsr_imm,
6360 };
6361 
6362 static const int kvm_vmx_max_exit_handlers =
6363 	ARRAY_SIZE(kvm_vmx_exit_handlers);
6364 
vmx_get_exit_info(struct kvm_vcpu * vcpu,u32 * reason,u64 * info1,u64 * info2,u32 * intr_info,u32 * error_code)6365 void vmx_get_exit_info(struct kvm_vcpu *vcpu, u32 *reason,
6366 		       u64 *info1, u64 *info2, u32 *intr_info, u32 *error_code)
6367 {
6368 	struct vcpu_vmx *vmx = to_vmx(vcpu);
6369 
6370 	*reason = vmx->vt.exit_reason.full;
6371 	*info1 = vmx_get_exit_qual(vcpu);
6372 	if (!(vmx->vt.exit_reason.failed_vmentry)) {
6373 		*info2 = vmx->idt_vectoring_info;
6374 		*intr_info = vmx_get_intr_info(vcpu);
6375 		if (is_exception_with_error_code(*intr_info))
6376 			*error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
6377 		else
6378 			*error_code = 0;
6379 	} else {
6380 		*info2 = 0;
6381 		*intr_info = 0;
6382 		*error_code = 0;
6383 	}
6384 }
6385 
vmx_get_entry_info(struct kvm_vcpu * vcpu,u32 * intr_info,u32 * error_code)6386 void vmx_get_entry_info(struct kvm_vcpu *vcpu, u32 *intr_info, u32 *error_code)
6387 {
6388 	*intr_info = vmcs_read32(VM_ENTRY_INTR_INFO_FIELD);
6389 	if (is_exception_with_error_code(*intr_info))
6390 		*error_code = vmcs_read32(VM_ENTRY_EXCEPTION_ERROR_CODE);
6391 	else
6392 		*error_code = 0;
6393 }
6394 
vmx_destroy_pml_buffer(struct vcpu_vmx * vmx)6395 static void vmx_destroy_pml_buffer(struct vcpu_vmx *vmx)
6396 {
6397 	if (vmx->pml_pg) {
6398 		__free_page(vmx->pml_pg);
6399 		vmx->pml_pg = NULL;
6400 	}
6401 }
6402 
vmx_flush_pml_buffer(struct kvm_vcpu * vcpu)6403 static void vmx_flush_pml_buffer(struct kvm_vcpu *vcpu)
6404 {
6405 	struct vcpu_vmx *vmx = to_vmx(vcpu);
6406 	u16 pml_idx, pml_tail_index;
6407 	u64 *pml_buf;
6408 	int i;
6409 
6410 	pml_idx = vmcs_read16(GUEST_PML_INDEX);
6411 
6412 	/* Do nothing if PML buffer is empty */
6413 	if (pml_idx == PML_HEAD_INDEX)
6414 		return;
6415 	/*
6416 	 * PML index always points to the next available PML buffer entity
6417 	 * unless PML log has just overflowed.
6418 	 */
6419 	pml_tail_index = (pml_idx >= PML_LOG_NR_ENTRIES) ? 0 : pml_idx + 1;
6420 
6421 	/*
6422 	 * PML log is written backwards: the CPU first writes the entry 511
6423 	 * then the entry 510, and so on.
6424 	 *
6425 	 * Read the entries in the same order they were written, to ensure that
6426 	 * the dirty ring is filled in the same order the CPU wrote them.
6427 	 */
6428 	pml_buf = page_address(vmx->pml_pg);
6429 
6430 	for (i = PML_HEAD_INDEX; i >= pml_tail_index; i--) {
6431 		u64 gpa;
6432 
6433 		gpa = pml_buf[i];
6434 		WARN_ON(gpa & (PAGE_SIZE - 1));
6435 		kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT);
6436 	}
6437 
6438 	/* reset PML index */
6439 	vmcs_write16(GUEST_PML_INDEX, PML_HEAD_INDEX);
6440 }
6441 
nested_vmx_mark_all_vmcs12_pages_dirty(struct kvm_vcpu * vcpu)6442 static void nested_vmx_mark_all_vmcs12_pages_dirty(struct kvm_vcpu *vcpu)
6443 {
6444 	struct vcpu_vmx *vmx = to_vmx(vcpu);
6445 
6446 	kvm_vcpu_map_mark_dirty(vcpu, &vmx->nested.apic_access_page_map);
6447 	kvm_vcpu_map_mark_dirty(vcpu, &vmx->nested.virtual_apic_map);
6448 	kvm_vcpu_map_mark_dirty(vcpu, &vmx->nested.pi_desc_map);
6449 }
6450 
vmx_dump_sel(char * name,uint32_t sel)6451 static void vmx_dump_sel(char *name, uint32_t sel)
6452 {
6453 	pr_err("%s sel=0x%04x, attr=0x%05x, limit=0x%08x, base=0x%016lx\n",
6454 	       name, vmcs_read16(sel),
6455 	       vmcs_read32(sel + GUEST_ES_AR_BYTES - GUEST_ES_SELECTOR),
6456 	       vmcs_read32(sel + GUEST_ES_LIMIT - GUEST_ES_SELECTOR),
6457 	       vmcs_readl(sel + GUEST_ES_BASE - GUEST_ES_SELECTOR));
6458 }
6459 
vmx_dump_dtsel(char * name,uint32_t limit)6460 static void vmx_dump_dtsel(char *name, uint32_t limit)
6461 {
6462 	pr_err("%s                           limit=0x%08x, base=0x%016lx\n",
6463 	       name, vmcs_read32(limit),
6464 	       vmcs_readl(limit + GUEST_GDTR_BASE - GUEST_GDTR_LIMIT));
6465 }
6466 
vmx_dump_msrs(char * name,struct vmx_msrs * m)6467 static void vmx_dump_msrs(char *name, struct vmx_msrs *m)
6468 {
6469 	unsigned int i;
6470 	struct vmx_msr_entry *e;
6471 
6472 	pr_err("MSR %s:\n", name);
6473 	for (i = 0, e = m->val; i < m->nr; ++i, ++e)
6474 		pr_err("  %2d: msr=0x%08x value=0x%016llx\n", i, e->index, e->value);
6475 }
6476 
dump_vmcs(struct kvm_vcpu * vcpu)6477 void dump_vmcs(struct kvm_vcpu *vcpu)
6478 {
6479 	struct vcpu_vmx *vmx = to_vmx(vcpu);
6480 	u32 vmentry_ctl, vmexit_ctl;
6481 	u32 cpu_based_exec_ctrl, pin_based_exec_ctrl, secondary_exec_control;
6482 	u64 tertiary_exec_control;
6483 	unsigned long cr4;
6484 	int efer_slot;
6485 
6486 	if (!dump_invalid_vmcs) {
6487 		pr_warn_ratelimited("set kvm_intel.dump_invalid_vmcs=1 to dump internal KVM state.\n");
6488 		return;
6489 	}
6490 
6491 	vmentry_ctl = vmcs_read32(VM_ENTRY_CONTROLS);
6492 	vmexit_ctl = vmcs_read32(VM_EXIT_CONTROLS);
6493 	cpu_based_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
6494 	pin_based_exec_ctrl = vmcs_read32(PIN_BASED_VM_EXEC_CONTROL);
6495 	cr4 = vmcs_readl(GUEST_CR4);
6496 
6497 	if (cpu_has_secondary_exec_ctrls())
6498 		secondary_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
6499 	else
6500 		secondary_exec_control = 0;
6501 
6502 	if (cpu_has_tertiary_exec_ctrls())
6503 		tertiary_exec_control = vmcs_read64(TERTIARY_VM_EXEC_CONTROL);
6504 	else
6505 		tertiary_exec_control = 0;
6506 
6507 	pr_err("VMCS %p, last attempted VM-entry on CPU %d\n",
6508 	       vmx->loaded_vmcs->vmcs, vcpu->arch.last_vmentry_cpu);
6509 	pr_err("*** Guest State ***\n");
6510 	pr_err("CR0: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n",
6511 	       vmcs_readl(GUEST_CR0), vmcs_readl(CR0_READ_SHADOW),
6512 	       vmcs_readl(CR0_GUEST_HOST_MASK));
6513 	pr_err("CR4: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n",
6514 	       cr4, vmcs_readl(CR4_READ_SHADOW), vmcs_readl(CR4_GUEST_HOST_MASK));
6515 	pr_err("CR3 = 0x%016lx\n", vmcs_readl(GUEST_CR3));
6516 	if (cpu_has_vmx_ept()) {
6517 		pr_err("PDPTR0 = 0x%016llx  PDPTR1 = 0x%016llx\n",
6518 		       vmcs_read64(GUEST_PDPTR0), vmcs_read64(GUEST_PDPTR1));
6519 		pr_err("PDPTR2 = 0x%016llx  PDPTR3 = 0x%016llx\n",
6520 		       vmcs_read64(GUEST_PDPTR2), vmcs_read64(GUEST_PDPTR3));
6521 	}
6522 	pr_err("RSP = 0x%016lx  RIP = 0x%016lx\n",
6523 	       vmcs_readl(GUEST_RSP), vmcs_readl(GUEST_RIP));
6524 	pr_err("RFLAGS=0x%08lx         DR7 = 0x%016lx\n",
6525 	       vmcs_readl(GUEST_RFLAGS), vmcs_readl(GUEST_DR7));
6526 	pr_err("Sysenter RSP=%016lx CS:RIP=%04x:%016lx\n",
6527 	       vmcs_readl(GUEST_SYSENTER_ESP),
6528 	       vmcs_read32(GUEST_SYSENTER_CS), vmcs_readl(GUEST_SYSENTER_EIP));
6529 	vmx_dump_sel("CS:  ", GUEST_CS_SELECTOR);
6530 	vmx_dump_sel("DS:  ", GUEST_DS_SELECTOR);
6531 	vmx_dump_sel("SS:  ", GUEST_SS_SELECTOR);
6532 	vmx_dump_sel("ES:  ", GUEST_ES_SELECTOR);
6533 	vmx_dump_sel("FS:  ", GUEST_FS_SELECTOR);
6534 	vmx_dump_sel("GS:  ", GUEST_GS_SELECTOR);
6535 	vmx_dump_dtsel("GDTR:", GUEST_GDTR_LIMIT);
6536 	vmx_dump_sel("LDTR:", GUEST_LDTR_SELECTOR);
6537 	vmx_dump_dtsel("IDTR:", GUEST_IDTR_LIMIT);
6538 	vmx_dump_sel("TR:  ", GUEST_TR_SELECTOR);
6539 	efer_slot = vmx_find_loadstore_msr_slot(&vmx->msr_autoload.guest, MSR_EFER);
6540 	if (vmentry_ctl & VM_ENTRY_LOAD_IA32_EFER)
6541 		pr_err("EFER= 0x%016llx\n", vmcs_read64(GUEST_IA32_EFER));
6542 	else if (efer_slot >= 0)
6543 		pr_err("EFER= 0x%016llx (autoload)\n",
6544 		       vmx->msr_autoload.guest.val[efer_slot].value);
6545 	else if (vmentry_ctl & VM_ENTRY_IA32E_MODE)
6546 		pr_err("EFER= 0x%016llx (effective)\n",
6547 		       vcpu->arch.efer | (EFER_LMA | EFER_LME));
6548 	else
6549 		pr_err("EFER= 0x%016llx (effective)\n",
6550 		       vcpu->arch.efer & ~(EFER_LMA | EFER_LME));
6551 	if (vmentry_ctl & VM_ENTRY_LOAD_IA32_PAT)
6552 		pr_err("PAT = 0x%016llx\n", vmcs_read64(GUEST_IA32_PAT));
6553 	pr_err("DebugCtl = 0x%016llx  DebugExceptions = 0x%016lx\n",
6554 	       vmcs_read64(GUEST_IA32_DEBUGCTL),
6555 	       vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS));
6556 	if (cpu_has_load_perf_global_ctrl() &&
6557 	    vmentry_ctl & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL)
6558 		pr_err("PerfGlobCtl = 0x%016llx\n",
6559 		       vmcs_read64(GUEST_IA32_PERF_GLOBAL_CTRL));
6560 	if (vmentry_ctl & VM_ENTRY_LOAD_BNDCFGS)
6561 		pr_err("BndCfgS = 0x%016llx\n", vmcs_read64(GUEST_BNDCFGS));
6562 	pr_err("Interruptibility = %08x  ActivityState = %08x\n",
6563 	       vmcs_read32(GUEST_INTERRUPTIBILITY_INFO),
6564 	       vmcs_read32(GUEST_ACTIVITY_STATE));
6565 	if (secondary_exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY)
6566 		pr_err("InterruptStatus = %04x\n",
6567 		       vmcs_read16(GUEST_INTR_STATUS));
6568 	if (vmcs_read32(VM_ENTRY_MSR_LOAD_COUNT) > 0)
6569 		vmx_dump_msrs("guest autoload", &vmx->msr_autoload.guest);
6570 	if (vmcs_read32(VM_EXIT_MSR_STORE_COUNT) > 0)
6571 		vmx_dump_msrs("autostore", &vmx->msr_autostore);
6572 
6573 	if (vmentry_ctl & VM_ENTRY_LOAD_CET_STATE)
6574 		pr_err("S_CET = 0x%016lx, SSP = 0x%016lx, SSP TABLE = 0x%016lx\n",
6575 		       vmcs_readl(GUEST_S_CET), vmcs_readl(GUEST_SSP),
6576 		       vmcs_readl(GUEST_INTR_SSP_TABLE));
6577 	pr_err("*** Host State ***\n");
6578 	pr_err("RIP = 0x%016lx  RSP = 0x%016lx\n",
6579 	       vmcs_readl(HOST_RIP), vmcs_readl(HOST_RSP));
6580 	pr_err("CS=%04x SS=%04x DS=%04x ES=%04x FS=%04x GS=%04x TR=%04x\n",
6581 	       vmcs_read16(HOST_CS_SELECTOR), vmcs_read16(HOST_SS_SELECTOR),
6582 	       vmcs_read16(HOST_DS_SELECTOR), vmcs_read16(HOST_ES_SELECTOR),
6583 	       vmcs_read16(HOST_FS_SELECTOR), vmcs_read16(HOST_GS_SELECTOR),
6584 	       vmcs_read16(HOST_TR_SELECTOR));
6585 	pr_err("FSBase=%016lx GSBase=%016lx TRBase=%016lx\n",
6586 	       vmcs_readl(HOST_FS_BASE), vmcs_readl(HOST_GS_BASE),
6587 	       vmcs_readl(HOST_TR_BASE));
6588 	pr_err("GDTBase=%016lx IDTBase=%016lx\n",
6589 	       vmcs_readl(HOST_GDTR_BASE), vmcs_readl(HOST_IDTR_BASE));
6590 	pr_err("CR0=%016lx CR3=%016lx CR4=%016lx\n",
6591 	       vmcs_readl(HOST_CR0), vmcs_readl(HOST_CR3),
6592 	       vmcs_readl(HOST_CR4));
6593 	pr_err("Sysenter RSP=%016lx CS:RIP=%04x:%016lx\n",
6594 	       vmcs_readl(HOST_IA32_SYSENTER_ESP),
6595 	       vmcs_read32(HOST_IA32_SYSENTER_CS),
6596 	       vmcs_readl(HOST_IA32_SYSENTER_EIP));
6597 	if (vmexit_ctl & VM_EXIT_LOAD_IA32_EFER)
6598 		pr_err("EFER= 0x%016llx\n", vmcs_read64(HOST_IA32_EFER));
6599 	if (vmexit_ctl & VM_EXIT_LOAD_IA32_PAT)
6600 		pr_err("PAT = 0x%016llx\n", vmcs_read64(HOST_IA32_PAT));
6601 	if (cpu_has_load_perf_global_ctrl() &&
6602 	    vmexit_ctl & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL)
6603 		pr_err("PerfGlobCtl = 0x%016llx\n",
6604 		       vmcs_read64(HOST_IA32_PERF_GLOBAL_CTRL));
6605 	if (vmcs_read32(VM_EXIT_MSR_LOAD_COUNT) > 0)
6606 		vmx_dump_msrs("host autoload", &vmx->msr_autoload.host);
6607 	if (vmexit_ctl & VM_EXIT_LOAD_CET_STATE)
6608 		pr_err("S_CET = 0x%016lx, SSP = 0x%016lx, SSP TABLE = 0x%016lx\n",
6609 		       vmcs_readl(HOST_S_CET), vmcs_readl(HOST_SSP),
6610 		       vmcs_readl(HOST_INTR_SSP_TABLE));
6611 
6612 	pr_err("*** Control State ***\n");
6613 	pr_err("CPUBased=0x%08x SecondaryExec=0x%08x TertiaryExec=0x%016llx\n",
6614 	       cpu_based_exec_ctrl, secondary_exec_control, tertiary_exec_control);
6615 	pr_err("PinBased=0x%08x EntryControls=%08x ExitControls=%08x\n",
6616 	       pin_based_exec_ctrl, vmentry_ctl, vmexit_ctl);
6617 	pr_err("ExceptionBitmap=%08x PFECmask=%08x PFECmatch=%08x\n",
6618 	       vmcs_read32(EXCEPTION_BITMAP),
6619 	       vmcs_read32(PAGE_FAULT_ERROR_CODE_MASK),
6620 	       vmcs_read32(PAGE_FAULT_ERROR_CODE_MATCH));
6621 	pr_err("VMEntry: intr_info=%08x errcode=%08x ilen=%08x\n",
6622 	       vmcs_read32(VM_ENTRY_INTR_INFO_FIELD),
6623 	       vmcs_read32(VM_ENTRY_EXCEPTION_ERROR_CODE),
6624 	       vmcs_read32(VM_ENTRY_INSTRUCTION_LEN));
6625 	pr_err("VMExit: intr_info=%08x errcode=%08x ilen=%08x\n",
6626 	       vmcs_read32(VM_EXIT_INTR_INFO),
6627 	       vmcs_read32(VM_EXIT_INTR_ERROR_CODE),
6628 	       vmcs_read32(VM_EXIT_INSTRUCTION_LEN));
6629 	pr_err("        reason=%08x qualification=%016lx\n",
6630 	       vmcs_read32(VM_EXIT_REASON), vmcs_readl(EXIT_QUALIFICATION));
6631 	pr_err("IDTVectoring: info=%08x errcode=%08x\n",
6632 	       vmcs_read32(IDT_VECTORING_INFO_FIELD),
6633 	       vmcs_read32(IDT_VECTORING_ERROR_CODE));
6634 	pr_err("TSC Offset = 0x%016llx\n", vmcs_read64(TSC_OFFSET));
6635 	if (secondary_exec_control & SECONDARY_EXEC_TSC_SCALING)
6636 		pr_err("TSC Multiplier = 0x%016llx\n",
6637 		       vmcs_read64(TSC_MULTIPLIER));
6638 	if (cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW) {
6639 		if (secondary_exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY) {
6640 			u16 status = vmcs_read16(GUEST_INTR_STATUS);
6641 			pr_err("SVI|RVI = %02x|%02x ", status >> 8, status & 0xff);
6642 		}
6643 		pr_cont("TPR Threshold = 0x%02x\n", vmcs_read32(TPR_THRESHOLD));
6644 		if (secondary_exec_control & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)
6645 			pr_err("APIC-access addr = 0x%016llx ", vmcs_read64(APIC_ACCESS_ADDR));
6646 		pr_cont("virt-APIC addr = 0x%016llx\n", vmcs_read64(VIRTUAL_APIC_PAGE_ADDR));
6647 	}
6648 	if (pin_based_exec_ctrl & PIN_BASED_POSTED_INTR)
6649 		pr_err("PostedIntrVec = 0x%02x\n", vmcs_read16(POSTED_INTR_NV));
6650 	if ((secondary_exec_control & SECONDARY_EXEC_ENABLE_EPT))
6651 		pr_err("EPT pointer = 0x%016llx\n", vmcs_read64(EPT_POINTER));
6652 	if (secondary_exec_control & SECONDARY_EXEC_PAUSE_LOOP_EXITING)
6653 		pr_err("PLE Gap=%08x Window=%08x\n",
6654 		       vmcs_read32(PLE_GAP), vmcs_read32(PLE_WINDOW));
6655 	if (secondary_exec_control & SECONDARY_EXEC_ENABLE_VPID)
6656 		pr_err("Virtual processor ID = 0x%04x\n",
6657 		       vmcs_read16(VIRTUAL_PROCESSOR_ID));
6658 	if (secondary_exec_control & SECONDARY_EXEC_EPT_VIOLATION_VE) {
6659 		struct vmx_ve_information *ve_info = vmx->ve_info;
6660 		u64 ve_info_pa = vmcs_read64(VE_INFORMATION_ADDRESS);
6661 
6662 		/*
6663 		 * If KVM is dumping the VMCS, then something has gone wrong
6664 		 * already.  Derefencing an address from the VMCS, which could
6665 		 * very well be corrupted, is a terrible idea.  The virtual
6666 		 * address is known so use it.
6667 		 */
6668 		pr_err("VE info address = 0x%016llx%s\n", ve_info_pa,
6669 		       ve_info_pa == __pa(ve_info) ? "" : "(corrupted!)");
6670 		pr_err("ve_info: 0x%08x 0x%08x 0x%016llx 0x%016llx 0x%016llx 0x%04x\n",
6671 		       ve_info->exit_reason, ve_info->delivery,
6672 		       ve_info->exit_qualification,
6673 		       ve_info->guest_linear_address,
6674 		       ve_info->guest_physical_address, ve_info->eptp_index);
6675 	}
6676 }
6677 
6678 /*
6679  * The guest has exited.  See if we can fix it or if we need userspace
6680  * assistance.
6681  */
__vmx_handle_exit(struct kvm_vcpu * vcpu,fastpath_t exit_fastpath)6682 static int __vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
6683 {
6684 	struct vcpu_vmx *vmx = to_vmx(vcpu);
6685 	union vmx_exit_reason exit_reason = vmx_get_exit_reason(vcpu);
6686 	u32 vectoring_info = vmx->idt_vectoring_info;
6687 	u16 exit_handler_index;
6688 
6689 	/*
6690 	 * Flush logged GPAs PML buffer, this will make dirty_bitmap more
6691 	 * updated. Another good is, in kvm_vm_ioctl_get_dirty_log, before
6692 	 * querying dirty_bitmap, we only need to kick all vcpus out of guest
6693 	 * mode as if vcpus is in root mode, the PML buffer must has been
6694 	 * flushed already.  Note, PML is never enabled in hardware while
6695 	 * running L2.
6696 	 */
6697 	if (enable_pml && !is_guest_mode(vcpu))
6698 		vmx_flush_pml_buffer(vcpu);
6699 
6700 	/*
6701 	 * KVM should never reach this point with a pending nested VM-Enter.
6702 	 * More specifically, short-circuiting VM-Entry to emulate L2 due to
6703 	 * invalid guest state should never happen as that means KVM knowingly
6704 	 * allowed a nested VM-Enter with an invalid vmcs12.  More below.
6705 	 */
6706 	if (KVM_BUG_ON(vcpu->arch.nested_run_pending, vcpu->kvm))
6707 		return -EIO;
6708 
6709 	if (is_guest_mode(vcpu)) {
6710 		/*
6711 		 * PML is never enabled when running L2, bail immediately if a
6712 		 * PML full exit occurs as something is horribly wrong.
6713 		 */
6714 		if (exit_reason.basic == EXIT_REASON_PML_FULL)
6715 			goto unexpected_vmexit;
6716 
6717 		/*
6718 		 * The host physical addresses of some pages of guest memory
6719 		 * are loaded into the vmcs02 (e.g. vmcs12's Virtual APIC
6720 		 * Page). The CPU may write to these pages via their host
6721 		 * physical address while L2 is running, bypassing any
6722 		 * address-translation-based dirty tracking (e.g. EPT write
6723 		 * protection).
6724 		 *
6725 		 * Mark them dirty on every exit from L2 to prevent them from
6726 		 * getting out of sync with dirty tracking.
6727 		 */
6728 		nested_vmx_mark_all_vmcs12_pages_dirty(vcpu);
6729 
6730 		/*
6731 		 * Synthesize a triple fault if L2 state is invalid.  In normal
6732 		 * operation, nested VM-Enter rejects any attempt to enter L2
6733 		 * with invalid state.  However, those checks are skipped if
6734 		 * state is being stuffed via RSM or KVM_SET_NESTED_STATE.  If
6735 		 * L2 state is invalid, it means either L1 modified SMRAM state
6736 		 * or userspace provided bad state.  Synthesize TRIPLE_FAULT as
6737 		 * doing so is architecturally allowed in the RSM case, and is
6738 		 * the least awful solution for the userspace case without
6739 		 * risking false positives.
6740 		 */
6741 		if (vmx->vt.emulation_required) {
6742 			nested_vmx_vmexit(vcpu, EXIT_REASON_TRIPLE_FAULT, 0, 0);
6743 			return 1;
6744 		}
6745 
6746 		if (nested_vmx_reflect_vmexit(vcpu))
6747 			return 1;
6748 	}
6749 
6750 	/* If guest state is invalid, start emulating.  L2 is handled above. */
6751 	if (vmx->vt.emulation_required)
6752 		return handle_invalid_guest_state(vcpu);
6753 
6754 	if (exit_reason.failed_vmentry) {
6755 		dump_vmcs(vcpu);
6756 		vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY;
6757 		vcpu->run->fail_entry.hardware_entry_failure_reason
6758 			= exit_reason.full;
6759 		vcpu->run->fail_entry.cpu = vcpu->arch.last_vmentry_cpu;
6760 		return 0;
6761 	}
6762 
6763 	if (unlikely(vmx->fail)) {
6764 		dump_vmcs(vcpu);
6765 		vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY;
6766 		vcpu->run->fail_entry.hardware_entry_failure_reason
6767 			= vmcs_read32(VM_INSTRUCTION_ERROR);
6768 		vcpu->run->fail_entry.cpu = vcpu->arch.last_vmentry_cpu;
6769 		return 0;
6770 	}
6771 
6772 	if ((vectoring_info & VECTORING_INFO_VALID_MASK) &&
6773 	    (exit_reason.basic != EXIT_REASON_EXCEPTION_NMI &&
6774 	     exit_reason.basic != EXIT_REASON_EPT_VIOLATION &&
6775 	     exit_reason.basic != EXIT_REASON_PML_FULL &&
6776 	     exit_reason.basic != EXIT_REASON_APIC_ACCESS &&
6777 	     exit_reason.basic != EXIT_REASON_TASK_SWITCH &&
6778 	     exit_reason.basic != EXIT_REASON_NOTIFY &&
6779 	     exit_reason.basic != EXIT_REASON_EPT_MISCONFIG)) {
6780 		kvm_prepare_event_vectoring_exit(vcpu, INVALID_GPA);
6781 		return 0;
6782 	}
6783 
6784 	if (unlikely(!enable_vnmi &&
6785 		     vmx->loaded_vmcs->soft_vnmi_blocked)) {
6786 		if (!vmx_interrupt_blocked(vcpu)) {
6787 			vmx->loaded_vmcs->soft_vnmi_blocked = 0;
6788 		} else if (vmx->loaded_vmcs->vnmi_blocked_time > 1000000000LL &&
6789 			   vcpu->arch.nmi_pending) {
6790 			/*
6791 			 * This CPU don't support us in finding the end of an
6792 			 * NMI-blocked window if the guest runs with IRQs
6793 			 * disabled. So we pull the trigger after 1 s of
6794 			 * futile waiting, but inform the user about this.
6795 			 */
6796 			printk(KERN_WARNING "%s: Breaking out of NMI-blocked "
6797 			       "state on VCPU %d after 1 s timeout\n",
6798 			       __func__, vcpu->vcpu_id);
6799 			vmx->loaded_vmcs->soft_vnmi_blocked = 0;
6800 		}
6801 	}
6802 
6803 	if (exit_fastpath != EXIT_FASTPATH_NONE)
6804 		return 1;
6805 
6806 	if (exit_reason.basic >= kvm_vmx_max_exit_handlers)
6807 		goto unexpected_vmexit;
6808 #ifdef CONFIG_MITIGATION_RETPOLINE
6809 	if (exit_reason.basic == EXIT_REASON_MSR_WRITE)
6810 		return kvm_emulate_wrmsr(vcpu);
6811 	else if (exit_reason.basic == EXIT_REASON_MSR_WRITE_IMM)
6812 		return handle_wrmsr_imm(vcpu);
6813 	else if (exit_reason.basic == EXIT_REASON_PREEMPTION_TIMER)
6814 		return handle_preemption_timer(vcpu);
6815 	else if (exit_reason.basic == EXIT_REASON_INTERRUPT_WINDOW)
6816 		return handle_interrupt_window(vcpu);
6817 	else if (exit_reason.basic == EXIT_REASON_EXTERNAL_INTERRUPT)
6818 		return handle_external_interrupt(vcpu);
6819 	else if (exit_reason.basic == EXIT_REASON_HLT)
6820 		return kvm_emulate_halt(vcpu);
6821 	else if (exit_reason.basic == EXIT_REASON_EPT_MISCONFIG)
6822 		return handle_ept_misconfig(vcpu);
6823 #endif
6824 
6825 	exit_handler_index = array_index_nospec((u16)exit_reason.basic,
6826 						kvm_vmx_max_exit_handlers);
6827 	if (!kvm_vmx_exit_handlers[exit_handler_index])
6828 		goto unexpected_vmexit;
6829 
6830 	return kvm_vmx_exit_handlers[exit_handler_index](vcpu);
6831 
6832 unexpected_vmexit:
6833 	dump_vmcs(vcpu);
6834 	kvm_prepare_unexpected_reason_exit(vcpu, exit_reason.full);
6835 	return 0;
6836 }
6837 
vmx_handle_exit(struct kvm_vcpu * vcpu,fastpath_t exit_fastpath)6838 int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
6839 {
6840 	int ret = __vmx_handle_exit(vcpu, exit_fastpath);
6841 
6842 	/*
6843 	 * Exit to user space when bus lock detected to inform that there is
6844 	 * a bus lock in guest.
6845 	 */
6846 	if (vmx_get_exit_reason(vcpu).bus_lock_detected) {
6847 		if (ret > 0)
6848 			vcpu->run->exit_reason = KVM_EXIT_X86_BUS_LOCK;
6849 
6850 		vcpu->run->flags |= KVM_RUN_X86_BUS_LOCK;
6851 		return 0;
6852 	}
6853 	return ret;
6854 }
6855 
vmx_update_cr8_intercept(struct kvm_vcpu * vcpu,int tpr,int irr)6856 void vmx_update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
6857 {
6858 	struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
6859 	int tpr_threshold;
6860 
6861 	if (is_guest_mode(vcpu) &&
6862 		nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW))
6863 		return;
6864 
6865 	guard(vmx_vmcs01)(vcpu);
6866 
6867 	tpr_threshold = (irr == -1 || tpr < irr) ? 0 : irr;
6868 	vmcs_write32(TPR_THRESHOLD, tpr_threshold);
6869 }
6870 
vmx_set_virtual_apic_mode(struct kvm_vcpu * vcpu)6871 void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu)
6872 {
6873 	struct vcpu_vmx *vmx = to_vmx(vcpu);
6874 	u32 sec_exec_control;
6875 
6876 	if (!lapic_in_kernel(vcpu))
6877 		return;
6878 
6879 	if (!flexpriority_enabled &&
6880 	    !cpu_has_vmx_virtualize_x2apic_mode())
6881 		return;
6882 
6883 	guard(vmx_vmcs01)(vcpu);
6884 
6885 	sec_exec_control = secondary_exec_controls_get(vmx);
6886 	sec_exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
6887 			      SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE);
6888 
6889 	switch (kvm_get_apic_mode(vcpu)) {
6890 	case LAPIC_MODE_INVALID:
6891 		WARN_ONCE(true, "Invalid local APIC state");
6892 		break;
6893 	case LAPIC_MODE_DISABLED:
6894 		break;
6895 	case LAPIC_MODE_XAPIC:
6896 		if (flexpriority_enabled) {
6897 			sec_exec_control |=
6898 				SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
6899 			kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
6900 
6901 			/*
6902 			 * Flush the TLB, reloading the APIC access page will
6903 			 * only do so if its physical address has changed, but
6904 			 * the guest may have inserted a non-APIC mapping into
6905 			 * the TLB while the APIC access page was disabled.
6906 			 *
6907 			 * If L2 is active, immediately flush L1's TLB instead
6908 			 * of requesting a flush of the current TLB, because
6909 			 * the current TLB context is L2's.
6910 			 */
6911 			if (!is_guest_mode(vcpu))
6912 				kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
6913 			else if (!enable_ept)
6914 				vpid_sync_context(vmx->vpid);
6915 			else if (VALID_PAGE(vcpu->arch.root_mmu.root.hpa))
6916 				vmx_flush_tlb_ept_root(vcpu->arch.root_mmu.root.hpa);
6917 		}
6918 		break;
6919 	case LAPIC_MODE_X2APIC:
6920 		if (cpu_has_vmx_virtualize_x2apic_mode())
6921 			sec_exec_control |=
6922 				SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
6923 		break;
6924 	}
6925 	secondary_exec_controls_set(vmx, sec_exec_control);
6926 
6927 	vmx_update_msr_bitmap_x2apic(vcpu);
6928 }
6929 
vmx_set_apic_access_page_addr(struct kvm_vcpu * vcpu)6930 void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu)
6931 {
6932 	const gfn_t gfn = APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT;
6933 	struct kvm *kvm = vcpu->kvm;
6934 	struct kvm_memslots *slots = kvm_memslots(kvm);
6935 	struct kvm_memory_slot *slot;
6936 	struct page *refcounted_page;
6937 	unsigned long mmu_seq;
6938 	kvm_pfn_t pfn;
6939 	bool writable;
6940 
6941 	/* Note, the VIRTUALIZE_APIC_ACCESSES check needs to query vmcs01. */
6942 	guard(vmx_vmcs01)(vcpu);
6943 
6944 	if (!(secondary_exec_controls_get(to_vmx(vcpu)) &
6945 	    SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
6946 		return;
6947 
6948 	/*
6949 	 * Explicitly grab the memslot using KVM's internal slot ID to ensure
6950 	 * KVM doesn't unintentionally grab a userspace memslot.  It _should_
6951 	 * be impossible for userspace to create a memslot for the APIC when
6952 	 * APICv is enabled, but paranoia won't hurt in this case.
6953 	 */
6954 	slot = id_to_memslot(slots, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT);
6955 	if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
6956 		return;
6957 
6958 	/*
6959 	 * Ensure that the mmu_notifier sequence count is read before KVM
6960 	 * retrieves the pfn from the primary MMU.  Note, the memslot is
6961 	 * protected by SRCU, not the mmu_notifier.  Pairs with the smp_wmb()
6962 	 * in kvm_mmu_invalidate_end().
6963 	 */
6964 	mmu_seq = kvm->mmu_invalidate_seq;
6965 	smp_rmb();
6966 
6967 	/*
6968 	 * No need to retry if the memslot does not exist or is invalid.  KVM
6969 	 * controls the APIC-access page memslot, and only deletes the memslot
6970 	 * if APICv is permanently inhibited, i.e. the memslot won't reappear.
6971 	 */
6972 	pfn = __kvm_faultin_pfn(slot, gfn, FOLL_WRITE, &writable, &refcounted_page);
6973 	if (is_error_noslot_pfn(pfn))
6974 		return;
6975 
6976 	read_lock(&vcpu->kvm->mmu_lock);
6977 	if (mmu_invalidate_retry_gfn(kvm, mmu_seq, gfn))
6978 		kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
6979 	else
6980 		vmcs_write64(APIC_ACCESS_ADDR, pfn_to_hpa(pfn));
6981 
6982 	/*
6983 	 * Do not pin the APIC access page in memory so that it can be freely
6984 	 * migrated, the MMU notifier will call us again if it is migrated or
6985 	 * swapped out.  KVM backs the memslot with anonymous memory, the pfn
6986 	 * should always point at a refcounted page (if the pfn is valid).
6987 	 */
6988 	if (!WARN_ON_ONCE(!refcounted_page))
6989 		kvm_release_page_clean(refcounted_page);
6990 
6991 	/*
6992 	 * No need for a manual TLB flush at this point, KVM has already done a
6993 	 * flush if there were SPTEs pointing at the previous page.
6994 	 */
6995 	read_unlock(&vcpu->kvm->mmu_lock);
6996 }
6997 
vmx_hwapic_isr_update(struct kvm_vcpu * vcpu,int max_isr)6998 void vmx_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr)
6999 {
7000 	u16 status;
7001 	u8 old;
7002 
7003 	if (max_isr == -1)
7004 		max_isr = 0;
7005 
7006 	/*
7007 	 * Always update SVI in vmcs01, as SVI is only relevant for L2 if and
7008 	 * only if Virtual Interrupt Delivery is enabled in vmcs12, and if VID
7009 	 * is enabled then L2 EOIs affect L2's vAPIC, not L1's vAPIC.
7010 	 */
7011 	guard(vmx_vmcs01)(vcpu);
7012 
7013 	status = vmcs_read16(GUEST_INTR_STATUS);
7014 	old = status >> 8;
7015 	if (max_isr != old) {
7016 		status &= 0xff;
7017 		status |= max_isr << 8;
7018 		vmcs_write16(GUEST_INTR_STATUS, status);
7019 	}
7020 }
7021 
vmx_set_rvi(int vector)7022 static void vmx_set_rvi(int vector)
7023 {
7024 	u16 status;
7025 	u8 old;
7026 
7027 	if (vector == -1)
7028 		vector = 0;
7029 
7030 	status = vmcs_read16(GUEST_INTR_STATUS);
7031 	old = (u8)status & 0xff;
7032 	if ((u8)vector != old) {
7033 		status &= ~0xff;
7034 		status |= (u8)vector;
7035 		vmcs_write16(GUEST_INTR_STATUS, status);
7036 	}
7037 }
7038 
vmx_sync_pir_to_irr(struct kvm_vcpu * vcpu)7039 int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
7040 {
7041 	struct vcpu_vt *vt = to_vt(vcpu);
7042 	bool max_irr_is_from_pir;
7043 	int max_irr;
7044 
7045 	if (KVM_BUG_ON(!enable_apicv, vcpu->kvm))
7046 		return -EIO;
7047 
7048 	if (pi_test_on(&vt->pi_desc)) {
7049 		pi_clear_on(&vt->pi_desc);
7050 		/*
7051 		 * IOMMU can write to PID.ON, so the barrier matters even on UP.
7052 		 * But on x86 this is just a compiler barrier anyway.
7053 		 */
7054 		smp_mb__after_atomic();
7055 		max_irr_is_from_pir = kvm_apic_update_irr(vcpu, vt->pi_desc.pir,
7056 							  &max_irr);
7057 	} else {
7058 		max_irr = kvm_lapic_find_highest_irr(vcpu);
7059 		max_irr_is_from_pir = false;
7060 	}
7061 
7062 	/*
7063 	 * If APICv is enabled and L2 is not active, then update the Requesting
7064 	 * Virtual Interrupt (RVI) portion of vmcs01.GUEST_INTR_STATUS with the
7065 	 * highest priority IRR to deliver the IRQ via Virtual Interrupt
7066 	 * Delivery.  Note, this is required even if the highest priority IRQ
7067 	 * was already pending in the IRR, as RVI isn't updated in lockstep with
7068 	 * the IRR (unlike apic->irr_pending).
7069 	 *
7070 	 * For the cases where Virtual Interrupt Delivery can't be used:
7071 	 *
7072 	 * 1) If L2 is running and the vCPU has a new pending interrupt.  If L1
7073 	 * wants to exit on interrupts, KVM_REQ_EVENT is needed to synthesize a
7074 	 * VM-Exit to L1.  If L1 doesn't want to exit, the interrupt is injected
7075 	 * into L2, but KVM doesn't use virtual interrupt delivery to inject
7076 	 * interrupts into L2, and so KVM_REQ_EVENT is again needed.
7077 	 *
7078 	 * 2) If APICv is disabled for this vCPU, assigned devices may still
7079 	 * attempt to post interrupts.  The posted interrupt vector will cause
7080 	 * a VM-Exit and the subsequent entry will call sync_pir_to_irr.
7081 	 *
7082 	 * In both cases, set KVM_REQ_EVENT if and only if the highest priority
7083 	 * pending IRQ came from the PIR, as setting KVM_REQ_EVENT if any IRQ
7084 	 * is pending may put the vCPU into an infinite loop, e.g. if the IRQ
7085 	 * is blocked, then it will stay pending until an IRQ window is opened.
7086 	 *
7087 	 * Note!  It's possible that one or more IRQs were moved from the PIR
7088 	 * to the IRR _without_ max_irr_is_from_pir being true!  I.e. if there
7089 	 * was a higher priority IRQ already pending in the IRR.  Not setting
7090 	 * KVM_REQ_EVENT in this case is intentional and safe.  If APICv is
7091 	 * inactive, or L2 is running with exit-on-interrupt off (in vmcs12),
7092 	 * i.e. without nested virtual interrupt delivery, then there's no need
7093 	 * to request an IRQ window as the lower priority IRQ only needs to be
7094 	 * delivered when the higher priority IRQ is dismissed from the ISR,
7095 	 * i.e. on the next EOI, and EOIs are always intercepted if APICv is
7096 	 * disabled or if L2 is running without nested VID.  If L2 is running
7097 	 * exit-on-interrupt on (in vmcs12), then the higher priority IRQ will
7098 	 * trigger a nested VM-Exit, at which point KVM will re-evaluate L1's
7099 	 * pending IRQs.
7100 	 */
7101 	if (!is_guest_mode(vcpu) && kvm_vcpu_apicv_active(vcpu))
7102 		vmx_set_rvi(max_irr);
7103 	else if (max_irr_is_from_pir)
7104 		kvm_make_request(KVM_REQ_EVENT, vcpu);
7105 
7106 	return max_irr;
7107 }
7108 
vmx_load_eoi_exitmap(struct kvm_vcpu * vcpu,u64 * eoi_exit_bitmap)7109 void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
7110 {
7111 	if (!kvm_vcpu_apicv_active(vcpu))
7112 		return;
7113 
7114 	vmcs_write64(EOI_EXIT_BITMAP0, eoi_exit_bitmap[0]);
7115 	vmcs_write64(EOI_EXIT_BITMAP1, eoi_exit_bitmap[1]);
7116 	vmcs_write64(EOI_EXIT_BITMAP2, eoi_exit_bitmap[2]);
7117 	vmcs_write64(EOI_EXIT_BITMAP3, eoi_exit_bitmap[3]);
7118 }
7119 
7120 void vmx_do_interrupt_irqoff(unsigned long entry);
7121 void vmx_do_nmi_irqoff(void);
7122 
handle_nm_fault_irqoff(struct kvm_vcpu * vcpu)7123 static void handle_nm_fault_irqoff(struct kvm_vcpu *vcpu)
7124 {
7125 	/*
7126 	 * Save xfd_err to guest_fpu before interrupt is enabled, so the
7127 	 * MSR value is not clobbered by the host activity before the guest
7128 	 * has chance to consume it.
7129 	 *
7130 	 * Update the guest's XFD_ERR if and only if XFD is enabled, as the #NM
7131 	 * interception may have been caused by L1 interception.  Per the SDM,
7132 	 * XFD_ERR is not modified for non-XFD #NM, i.e. if CR0.TS=1.
7133 	 *
7134 	 * Note, XFD_ERR is updated _before_ the #NM interception check, i.e.
7135 	 * unlike CR2 and DR6, the value is not a payload that is attached to
7136 	 * the #NM exception.
7137 	 */
7138 	if (is_xfd_nm_fault(vcpu))
7139 		rdmsrq(MSR_IA32_XFD_ERR, vcpu->arch.guest_fpu.xfd_err);
7140 }
7141 
handle_exception_irqoff(struct kvm_vcpu * vcpu,u32 intr_info)7142 static void handle_exception_irqoff(struct kvm_vcpu *vcpu, u32 intr_info)
7143 {
7144 	/* if exit due to PF check for async PF */
7145 	if (is_page_fault(intr_info))
7146 		vcpu->arch.apf.host_apf_flags = kvm_read_and_reset_apf_flags();
7147 	/* if exit due to NM, handle before interrupts are enabled */
7148 	else if (is_nm_fault(intr_info))
7149 		handle_nm_fault_irqoff(vcpu);
7150 	/* Handle machine checks before interrupts are enabled */
7151 	else if (is_machine_check(intr_info))
7152 		kvm_machine_check();
7153 }
7154 
handle_external_interrupt_irqoff(struct kvm_vcpu * vcpu,u32 intr_info)7155 static void handle_external_interrupt_irqoff(struct kvm_vcpu *vcpu,
7156 					     u32 intr_info)
7157 {
7158 	unsigned int vector = intr_info & INTR_INFO_VECTOR_MASK;
7159 
7160 	if (KVM_BUG(!is_external_intr(intr_info), vcpu->kvm,
7161 	    "unexpected VM-Exit interrupt info: 0x%x", intr_info))
7162 		return;
7163 
7164 	/*
7165 	 * Invoke the kernel's IRQ handler for the vector.  Use the FRED path
7166 	 * when it's available even if FRED isn't fully enabled, e.g. even if
7167 	 * FRED isn't supported in hardware, in order to avoid the indirect
7168 	 * CALL in the non-FRED path.
7169 	 */
7170 	kvm_before_interrupt(vcpu, KVM_HANDLING_IRQ);
7171 	if (IS_ENABLED(CONFIG_X86_FRED))
7172 		fred_entry_from_kvm(EVENT_TYPE_EXTINT, vector);
7173 	else
7174 		vmx_do_interrupt_irqoff(gate_offset((gate_desc *)host_idt_base + vector));
7175 	kvm_after_interrupt(vcpu);
7176 
7177 	vcpu->arch.at_instruction_boundary = true;
7178 }
7179 
vmx_handle_exit_irqoff(struct kvm_vcpu * vcpu)7180 void vmx_handle_exit_irqoff(struct kvm_vcpu *vcpu)
7181 {
7182 	if (to_vt(vcpu)->emulation_required)
7183 		return;
7184 
7185 	switch (vmx_get_exit_reason(vcpu).basic) {
7186 	case EXIT_REASON_EXTERNAL_INTERRUPT:
7187 		handle_external_interrupt_irqoff(vcpu, vmx_get_intr_info(vcpu));
7188 		break;
7189 	case EXIT_REASON_EXCEPTION_NMI:
7190 		handle_exception_irqoff(vcpu, vmx_get_intr_info(vcpu));
7191 		break;
7192 	case EXIT_REASON_MCE_DURING_VMENTRY:
7193 		kvm_machine_check();
7194 		break;
7195 	default:
7196 		break;
7197 	}
7198 }
7199 
7200 /*
7201  * The kvm parameter can be NULL (module initialization, or invocation before
7202  * VM creation). Be sure to check the kvm parameter before using it.
7203  */
vmx_has_emulated_msr(struct kvm * kvm,u32 index)7204 bool vmx_has_emulated_msr(struct kvm *kvm, u32 index)
7205 {
7206 	switch (index) {
7207 	case MSR_IA32_SMBASE:
7208 		if (!IS_ENABLED(CONFIG_KVM_SMM))
7209 			return false;
7210 		/*
7211 		 * We cannot do SMM unless we can run the guest in big
7212 		 * real mode.
7213 		 */
7214 		return enable_unrestricted_guest || emulate_invalid_guest_state;
7215 	case KVM_FIRST_EMULATED_VMX_MSR ... KVM_LAST_EMULATED_VMX_MSR:
7216 		return nested;
7217 	case MSR_AMD64_VIRT_SPEC_CTRL:
7218 	case MSR_AMD64_TSC_RATIO:
7219 		/* This is AMD only.  */
7220 		return false;
7221 	default:
7222 		return true;
7223 	}
7224 }
7225 
vmx_recover_nmi_blocking(struct vcpu_vmx * vmx)7226 static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx)
7227 {
7228 	u32 exit_intr_info;
7229 	bool unblock_nmi;
7230 	u8 vector;
7231 	bool idtv_info_valid;
7232 
7233 	idtv_info_valid = vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK;
7234 
7235 	if (enable_vnmi) {
7236 		if (vmx->loaded_vmcs->nmi_known_unmasked)
7237 			return;
7238 
7239 		exit_intr_info = vmx_get_intr_info(&vmx->vcpu);
7240 		unblock_nmi = (exit_intr_info & INTR_INFO_UNBLOCK_NMI) != 0;
7241 		vector = exit_intr_info & INTR_INFO_VECTOR_MASK;
7242 		/*
7243 		 * SDM 3: 27.7.1.2 (September 2008)
7244 		 * Re-set bit "block by NMI" before VM entry if vmexit caused by
7245 		 * a guest IRET fault.
7246 		 * SDM 3: 23.2.2 (September 2008)
7247 		 * Bit 12 is undefined in any of the following cases:
7248 		 *  If the VM exit sets the valid bit in the IDT-vectoring
7249 		 *   information field.
7250 		 *  If the VM exit is due to a double fault.
7251 		 */
7252 		if ((exit_intr_info & INTR_INFO_VALID_MASK) && unblock_nmi &&
7253 		    vector != DF_VECTOR && !idtv_info_valid)
7254 			vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
7255 				      GUEST_INTR_STATE_NMI);
7256 		else
7257 			vmx->loaded_vmcs->nmi_known_unmasked =
7258 				!(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO)
7259 				  & GUEST_INTR_STATE_NMI);
7260 	} else if (unlikely(vmx->loaded_vmcs->soft_vnmi_blocked))
7261 		vmx->loaded_vmcs->vnmi_blocked_time +=
7262 			ktime_to_ns(ktime_sub(ktime_get(),
7263 					      vmx->loaded_vmcs->entry_time));
7264 }
7265 
__vmx_complete_interrupts(struct kvm_vcpu * vcpu,u32 idt_vectoring_info,int instr_len_field,int error_code_field)7266 static void __vmx_complete_interrupts(struct kvm_vcpu *vcpu,
7267 				      u32 idt_vectoring_info,
7268 				      int instr_len_field,
7269 				      int error_code_field)
7270 {
7271 	u8 vector;
7272 	int type;
7273 	bool idtv_info_valid;
7274 
7275 	idtv_info_valid = idt_vectoring_info & VECTORING_INFO_VALID_MASK;
7276 
7277 	vcpu->arch.nmi_injected = false;
7278 	kvm_clear_exception_queue(vcpu);
7279 	kvm_clear_interrupt_queue(vcpu);
7280 
7281 	if (!idtv_info_valid)
7282 		return;
7283 
7284 	kvm_make_request(KVM_REQ_EVENT, vcpu);
7285 
7286 	vector = idt_vectoring_info & VECTORING_INFO_VECTOR_MASK;
7287 	type = idt_vectoring_info & VECTORING_INFO_TYPE_MASK;
7288 
7289 	switch (type) {
7290 	case INTR_TYPE_NMI_INTR:
7291 		vcpu->arch.nmi_injected = true;
7292 		/*
7293 		 * SDM 3: 27.7.1.2 (September 2008)
7294 		 * Clear bit "block by NMI" before VM entry if a NMI
7295 		 * delivery faulted.
7296 		 */
7297 		vmx_set_nmi_mask(vcpu, false);
7298 		break;
7299 	case INTR_TYPE_SOFT_EXCEPTION:
7300 		vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field);
7301 		fallthrough;
7302 	case INTR_TYPE_HARD_EXCEPTION: {
7303 		u32 error_code = 0;
7304 
7305 		if (idt_vectoring_info & VECTORING_INFO_DELIVER_CODE_MASK)
7306 			error_code = vmcs_read32(error_code_field);
7307 
7308 		kvm_requeue_exception(vcpu, vector,
7309 				      idt_vectoring_info & VECTORING_INFO_DELIVER_CODE_MASK,
7310 				      error_code);
7311 		break;
7312 	}
7313 	case INTR_TYPE_SOFT_INTR:
7314 		vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field);
7315 		fallthrough;
7316 	case INTR_TYPE_EXT_INTR:
7317 		kvm_queue_interrupt(vcpu, vector, type == INTR_TYPE_SOFT_INTR);
7318 		break;
7319 	default:
7320 		break;
7321 	}
7322 }
7323 
vmx_complete_interrupts(struct vcpu_vmx * vmx)7324 static void vmx_complete_interrupts(struct vcpu_vmx *vmx)
7325 {
7326 	__vmx_complete_interrupts(&vmx->vcpu, vmx->idt_vectoring_info,
7327 				  VM_EXIT_INSTRUCTION_LEN,
7328 				  IDT_VECTORING_ERROR_CODE);
7329 }
7330 
vmx_cancel_injection(struct kvm_vcpu * vcpu)7331 void vmx_cancel_injection(struct kvm_vcpu *vcpu)
7332 {
7333 	__vmx_complete_interrupts(vcpu,
7334 				  vmcs_read32(VM_ENTRY_INTR_INFO_FIELD),
7335 				  VM_ENTRY_INSTRUCTION_LEN,
7336 				  VM_ENTRY_EXCEPTION_ERROR_CODE);
7337 
7338 	vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);
7339 }
7340 
atomic_switch_perf_msrs(struct vcpu_vmx * vmx)7341 static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx)
7342 {
7343 	int i, nr_msrs;
7344 	struct perf_guest_switch_msr *msrs;
7345 	struct kvm_pmu *pmu = vcpu_to_pmu(&vmx->vcpu);
7346 
7347 	if (kvm_vcpu_has_mediated_pmu(&vmx->vcpu))
7348 		return;
7349 
7350 	pmu->host_cross_mapped_mask = 0;
7351 	if (pmu->pebs_enable & pmu->global_ctrl)
7352 		intel_pmu_cross_mapped_check(pmu);
7353 
7354 	/* Note, nr_msrs may be garbage if perf_guest_get_msrs() returns NULL. */
7355 	msrs = perf_guest_get_msrs(&nr_msrs, (void *)pmu);
7356 	if (!msrs)
7357 		return;
7358 
7359 	for (i = 0; i < nr_msrs; i++)
7360 		if (msrs[i].host == msrs[i].guest)
7361 			clear_atomic_switch_msr(vmx, msrs[i].msr);
7362 		else
7363 			add_atomic_switch_msr(vmx, msrs[i].msr, msrs[i].guest,
7364 					      msrs[i].host);
7365 }
7366 
vmx_refresh_guest_perf_global_control(struct kvm_vcpu * vcpu)7367 static void vmx_refresh_guest_perf_global_control(struct kvm_vcpu *vcpu)
7368 {
7369 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
7370 	struct vcpu_vmx *vmx = to_vmx(vcpu);
7371 
7372 	if (msr_write_intercepted(vmx, MSR_CORE_PERF_GLOBAL_CTRL))
7373 		return;
7374 
7375 	if (!cpu_has_save_perf_global_ctrl()) {
7376 		int slot = vmx_find_loadstore_msr_slot(&vmx->msr_autostore,
7377 						       MSR_CORE_PERF_GLOBAL_CTRL);
7378 
7379 		if (WARN_ON_ONCE(slot < 0))
7380 			return;
7381 
7382 		pmu->global_ctrl = vmx->msr_autostore.val[slot].value;
7383 		vmcs_write64(GUEST_IA32_PERF_GLOBAL_CTRL, pmu->global_ctrl);
7384 		return;
7385 	}
7386 
7387 	pmu->global_ctrl = vmcs_read64(GUEST_IA32_PERF_GLOBAL_CTRL);
7388 }
7389 
vmx_update_hv_timer(struct kvm_vcpu * vcpu,bool force_immediate_exit)7390 static void vmx_update_hv_timer(struct kvm_vcpu *vcpu, bool force_immediate_exit)
7391 {
7392 	struct vcpu_vmx *vmx = to_vmx(vcpu);
7393 	u64 tscl;
7394 	u32 delta_tsc;
7395 
7396 	if (force_immediate_exit) {
7397 		vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, 0);
7398 		vmx->loaded_vmcs->hv_timer_soft_disabled = false;
7399 	} else if (vmx->hv_deadline_tsc != -1) {
7400 		tscl = rdtsc();
7401 		if (vmx->hv_deadline_tsc > tscl)
7402 			/* set_hv_timer ensures the delta fits in 32-bits */
7403 			delta_tsc = (u32)((vmx->hv_deadline_tsc - tscl) >>
7404 				cpu_preemption_timer_multi);
7405 		else
7406 			delta_tsc = 0;
7407 
7408 		vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, delta_tsc);
7409 		vmx->loaded_vmcs->hv_timer_soft_disabled = false;
7410 	} else if (!vmx->loaded_vmcs->hv_timer_soft_disabled) {
7411 		vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, -1);
7412 		vmx->loaded_vmcs->hv_timer_soft_disabled = true;
7413 	}
7414 }
7415 
vmx_update_host_rsp(struct vcpu_vmx * vmx,unsigned long host_rsp)7416 void noinstr vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp)
7417 {
7418 	if (unlikely(host_rsp != vmx->loaded_vmcs->host_state.rsp)) {
7419 		vmx->loaded_vmcs->host_state.rsp = host_rsp;
7420 		vmcs_writel(HOST_RSP, host_rsp);
7421 	}
7422 }
7423 
vmx_spec_ctrl_restore_host(struct vcpu_vmx * vmx,unsigned int flags)7424 void noinstr vmx_spec_ctrl_restore_host(struct vcpu_vmx *vmx,
7425 					unsigned int flags)
7426 {
7427 	u64 hostval = this_cpu_read(x86_spec_ctrl_current);
7428 
7429 	if (!cpu_feature_enabled(X86_FEATURE_MSR_SPEC_CTRL))
7430 		return;
7431 
7432 	if (flags & VMX_RUN_SAVE_SPEC_CTRL)
7433 		vmx->spec_ctrl = native_rdmsrq(MSR_IA32_SPEC_CTRL);
7434 
7435 	/*
7436 	 * If the guest/host SPEC_CTRL values differ, restore the host value.
7437 	 *
7438 	 * For legacy IBRS, the IBRS bit always needs to be written after
7439 	 * transitioning from a less privileged predictor mode, regardless of
7440 	 * whether the guest/host values differ.
7441 	 */
7442 	if (cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS) ||
7443 	    vmx->spec_ctrl != hostval)
7444 		native_wrmsrq(MSR_IA32_SPEC_CTRL, hostval);
7445 
7446 	barrier_nospec();
7447 }
7448 
vmx_exit_handlers_fastpath(struct kvm_vcpu * vcpu,bool force_immediate_exit)7449 static fastpath_t vmx_exit_handlers_fastpath(struct kvm_vcpu *vcpu,
7450 					     bool force_immediate_exit)
7451 {
7452 	/*
7453 	 * If L2 is active, some VMX preemption timer exits can be handled in
7454 	 * the fastpath even, all other exits must use the slow path.
7455 	 */
7456 	if (is_guest_mode(vcpu) &&
7457 	    vmx_get_exit_reason(vcpu).basic != EXIT_REASON_PREEMPTION_TIMER)
7458 		return EXIT_FASTPATH_NONE;
7459 
7460 	switch (vmx_get_exit_reason(vcpu).basic) {
7461 	case EXIT_REASON_MSR_WRITE:
7462 		return handle_fastpath_wrmsr(vcpu);
7463 	case EXIT_REASON_MSR_WRITE_IMM:
7464 		return handle_fastpath_wrmsr_imm(vcpu, vmx_get_exit_qual(vcpu),
7465 						 vmx_get_msr_imm_reg(vcpu));
7466 	case EXIT_REASON_PREEMPTION_TIMER:
7467 		return handle_fastpath_preemption_timer(vcpu, force_immediate_exit);
7468 	case EXIT_REASON_HLT:
7469 		return handle_fastpath_hlt(vcpu);
7470 	case EXIT_REASON_INVD:
7471 		return handle_fastpath_invd(vcpu);
7472 	default:
7473 		return EXIT_FASTPATH_NONE;
7474 	}
7475 }
7476 
vmx_handle_nmi(struct kvm_vcpu * vcpu)7477 noinstr void vmx_handle_nmi(struct kvm_vcpu *vcpu)
7478 {
7479 	if ((u16)vmx_get_exit_reason(vcpu).basic != EXIT_REASON_EXCEPTION_NMI ||
7480 	    !is_nmi(vmx_get_intr_info(vcpu)))
7481 		return;
7482 
7483 	kvm_before_interrupt(vcpu, KVM_HANDLING_NMI);
7484 	if (cpu_feature_enabled(X86_FEATURE_FRED))
7485 		fred_entry_from_kvm(EVENT_TYPE_NMI, NMI_VECTOR);
7486 	else
7487 		vmx_do_nmi_irqoff();
7488 	kvm_after_interrupt(vcpu);
7489 }
7490 
vmx_vcpu_enter_exit(struct kvm_vcpu * vcpu,unsigned int flags)7491 static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
7492 					unsigned int flags)
7493 {
7494 	struct vcpu_vmx *vmx = to_vmx(vcpu);
7495 
7496 	guest_state_enter_irqoff();
7497 
7498 	vmx_l1d_flush(vcpu);
7499 
7500 	vmx_disable_fb_clear(vmx);
7501 
7502 	if (vcpu->arch.cr2 != native_read_cr2())
7503 		native_write_cr2(vcpu->arch.cr2);
7504 
7505 	vmx->fail = __vmx_vcpu_run(vmx, (unsigned long *)&vcpu->arch.regs,
7506 				   flags);
7507 
7508 	vcpu->arch.cr2 = native_read_cr2();
7509 	vcpu->arch.regs_avail &= ~VMX_REGS_LAZY_LOAD_SET;
7510 
7511 	vmx->idt_vectoring_info = 0;
7512 
7513 	vmx_enable_fb_clear(vmx);
7514 
7515 	if (unlikely(vmx->fail)) {
7516 		vmx->vt.exit_reason.full = 0xdead;
7517 		goto out;
7518 	}
7519 
7520 	vmx->vt.exit_reason.full = vmcs_read32(VM_EXIT_REASON);
7521 	if (likely(!vmx_get_exit_reason(vcpu).failed_vmentry))
7522 		vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
7523 
7524 	vmx_handle_nmi(vcpu);
7525 
7526 out:
7527 	guest_state_exit_irqoff();
7528 }
7529 
vmx_vcpu_run(struct kvm_vcpu * vcpu,u64 run_flags)7530 fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu, u64 run_flags)
7531 {
7532 	bool force_immediate_exit = run_flags & KVM_RUN_FORCE_IMMEDIATE_EXIT;
7533 	struct vcpu_vmx *vmx = to_vmx(vcpu);
7534 	unsigned long cr3, cr4;
7535 
7536 	/* Record the guest's net vcpu time for enforced NMI injections. */
7537 	if (unlikely(!enable_vnmi &&
7538 		     vmx->loaded_vmcs->soft_vnmi_blocked))
7539 		vmx->loaded_vmcs->entry_time = ktime_get();
7540 
7541 	/*
7542 	 * Don't enter VMX if guest state is invalid, let the exit handler
7543 	 * start emulation until we arrive back to a valid state.  Synthesize a
7544 	 * consistency check VM-Exit due to invalid guest state and bail.
7545 	 */
7546 	if (unlikely(vmx->vt.emulation_required)) {
7547 		vmx->fail = 0;
7548 
7549 		vmx->vt.exit_reason.full = EXIT_REASON_INVALID_STATE;
7550 		vmx->vt.exit_reason.failed_vmentry = 1;
7551 		kvm_register_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_1);
7552 		vmx->vt.exit_qualification = ENTRY_FAIL_DEFAULT;
7553 		kvm_register_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_2);
7554 		vmx->vt.exit_intr_info = 0;
7555 		return EXIT_FASTPATH_NONE;
7556 	}
7557 
7558 	trace_kvm_entry(vcpu, force_immediate_exit);
7559 
7560 	if (vmx->ple_window_dirty) {
7561 		vmx->ple_window_dirty = false;
7562 		vmcs_write32(PLE_WINDOW, vmx->ple_window);
7563 	}
7564 
7565 	/*
7566 	 * We did this in prepare_switch_to_guest, because it needs to
7567 	 * be within srcu_read_lock.
7568 	 */
7569 	WARN_ON_ONCE(vmx->nested.need_vmcs12_to_shadow_sync);
7570 
7571 	if (kvm_register_is_dirty(vcpu, VCPU_REGS_RSP))
7572 		vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]);
7573 	if (kvm_register_is_dirty(vcpu, VCPU_REGS_RIP))
7574 		vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
7575 	vcpu->arch.regs_dirty = 0;
7576 
7577 	if (run_flags & KVM_RUN_LOAD_GUEST_DR6)
7578 		set_debugreg(vcpu->arch.dr6, 6);
7579 
7580 	if (run_flags & KVM_RUN_LOAD_DEBUGCTL)
7581 		vmx_reload_guest_debugctl(vcpu);
7582 
7583 	/*
7584 	 * Refresh vmcs.HOST_CR3 if necessary.  This must be done immediately
7585 	 * prior to VM-Enter, as the kernel may load a new ASID (PCID) any time
7586 	 * it switches back to the current->mm, which can occur in KVM context
7587 	 * when switching to a temporary mm to patch kernel code, e.g. if KVM
7588 	 * toggles a static key while handling a VM-Exit.
7589 	 */
7590 	cr3 = __get_current_cr3_fast();
7591 	if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) {
7592 		vmcs_writel(HOST_CR3, cr3);
7593 		vmx->loaded_vmcs->host_state.cr3 = cr3;
7594 	}
7595 
7596 	cr4 = cr4_read_shadow();
7597 	if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) {
7598 		vmcs_writel(HOST_CR4, cr4);
7599 		vmx->loaded_vmcs->host_state.cr4 = cr4;
7600 	}
7601 
7602 	/* When single-stepping over STI and MOV SS, we must clear the
7603 	 * corresponding interruptibility bits in the guest state. Otherwise
7604 	 * vmentry fails as it then expects bit 14 (BS) in pending debug
7605 	 * exceptions being set, but that's not correct for the guest debugging
7606 	 * case. */
7607 	if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
7608 		vmx_set_interrupt_shadow(vcpu, 0);
7609 
7610 	pt_guest_enter(vmx);
7611 
7612 	atomic_switch_perf_msrs(vmx);
7613 	if (intel_pmu_lbr_is_enabled(vcpu))
7614 		vmx_passthrough_lbr_msrs(vcpu);
7615 
7616 	if (enable_preemption_timer)
7617 		vmx_update_hv_timer(vcpu, force_immediate_exit);
7618 	else if (force_immediate_exit)
7619 		smp_send_reschedule(vcpu->cpu);
7620 
7621 	kvm_wait_lapic_expire(vcpu);
7622 
7623 	/* The actual VMENTER/EXIT is in the .noinstr.text section. */
7624 	vmx_vcpu_enter_exit(vcpu, __vmx_vcpu_run_flags(vmx));
7625 
7626 	/* All fields are clean at this point */
7627 	if (kvm_is_using_evmcs()) {
7628 		current_evmcs->hv_clean_fields |=
7629 			HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL;
7630 
7631 		current_evmcs->hv_vp_id = kvm_hv_get_vpindex(vcpu);
7632 	}
7633 
7634 	/* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */
7635 	if (vcpu->arch.host_debugctl)
7636 		update_debugctlmsr(vcpu->arch.host_debugctl);
7637 
7638 #ifndef CONFIG_X86_64
7639 	/*
7640 	 * The sysexit path does not restore ds/es, so we must set them to
7641 	 * a reasonable value ourselves.
7642 	 *
7643 	 * We can't defer this to vmx_prepare_switch_to_host() since that
7644 	 * function may be executed in interrupt context, which saves and
7645 	 * restore segments around it, nullifying its effect.
7646 	 */
7647 	loadsegment(ds, __USER_DS);
7648 	loadsegment(es, __USER_DS);
7649 #endif
7650 
7651 	pt_guest_exit(vmx);
7652 
7653 	if (is_guest_mode(vcpu)) {
7654 		/*
7655 		 * Track VMLAUNCH/VMRESUME that have made past guest state
7656 		 * checking.
7657 		 */
7658 		if (vcpu->arch.nested_run_pending &&
7659 		    !vmx_get_exit_reason(vcpu).failed_vmentry)
7660 			++vcpu->stat.nested_run;
7661 
7662 		vcpu->arch.nested_run_pending = 0;
7663 	}
7664 
7665 	if (unlikely(vmx->fail))
7666 		return EXIT_FASTPATH_NONE;
7667 
7668 	trace_kvm_exit(vcpu, KVM_ISA_VMX);
7669 
7670 	if (unlikely(vmx_get_exit_reason(vcpu).failed_vmentry))
7671 		return EXIT_FASTPATH_NONE;
7672 
7673 	vmx->loaded_vmcs->launched = 1;
7674 
7675 	vmx_refresh_guest_perf_global_control(vcpu);
7676 
7677 	vmx_recover_nmi_blocking(vmx);
7678 	vmx_complete_interrupts(vmx);
7679 
7680 	return vmx_exit_handlers_fastpath(vcpu, force_immediate_exit);
7681 }
7682 
vmx_vcpu_free(struct kvm_vcpu * vcpu)7683 void vmx_vcpu_free(struct kvm_vcpu *vcpu)
7684 {
7685 	struct vcpu_vmx *vmx = to_vmx(vcpu);
7686 
7687 	if (enable_pml)
7688 		vmx_destroy_pml_buffer(vmx);
7689 	free_vpid(vmx->vpid);
7690 	nested_vmx_free_vcpu(vcpu);
7691 	free_loaded_vmcs(vmx->loaded_vmcs);
7692 	free_page((unsigned long)vmx->ve_info);
7693 }
7694 
vmx_vcpu_create(struct kvm_vcpu * vcpu)7695 int vmx_vcpu_create(struct kvm_vcpu *vcpu)
7696 {
7697 	struct vmx_uret_msr *tsx_ctrl;
7698 	struct vcpu_vmx *vmx;
7699 	int i, err;
7700 
7701 	BUILD_BUG_ON(offsetof(struct vcpu_vmx, vcpu) != 0);
7702 	vmx = to_vmx(vcpu);
7703 
7704 	INIT_LIST_HEAD(&vmx->vt.pi_wakeup_list);
7705 
7706 	err = -ENOMEM;
7707 
7708 	vmx->vpid = allocate_vpid();
7709 
7710 	/*
7711 	 * If PML is turned on, failure on enabling PML just results in failure
7712 	 * of creating the vcpu, therefore we can simplify PML logic (by
7713 	 * avoiding dealing with cases, such as enabling PML partially on vcpus
7714 	 * for the guest), etc.
7715 	 */
7716 	if (enable_pml) {
7717 		vmx->pml_pg = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
7718 		if (!vmx->pml_pg)
7719 			goto free_vpid;
7720 	}
7721 
7722 	for (i = 0; i < kvm_nr_uret_msrs; ++i)
7723 		vmx->guest_uret_msrs[i].mask = -1ull;
7724 	if (boot_cpu_has(X86_FEATURE_RTM)) {
7725 		/*
7726 		 * TSX_CTRL_CPUID_CLEAR is handled in the CPUID interception.
7727 		 * Keep the host value unchanged to avoid changing CPUID bits
7728 		 * under the host kernel's feet.
7729 		 */
7730 		tsx_ctrl = vmx_find_uret_msr(vmx, MSR_IA32_TSX_CTRL);
7731 		if (tsx_ctrl)
7732 			tsx_ctrl->mask = ~(u64)TSX_CTRL_CPUID_CLEAR;
7733 	}
7734 
7735 	err = alloc_loaded_vmcs(&vmx->vmcs01);
7736 	if (err < 0)
7737 		goto free_pml;
7738 
7739 	/*
7740 	 * Use Hyper-V 'Enlightened MSR Bitmap' feature when KVM runs as a
7741 	 * nested (L1) hypervisor and Hyper-V in L0 supports it. Enable the
7742 	 * feature only for vmcs01, KVM currently isn't equipped to realize any
7743 	 * performance benefits from enabling it for vmcs02.
7744 	 */
7745 	if (kvm_is_using_evmcs() &&
7746 	    (ms_hyperv.nested_features & HV_X64_NESTED_MSR_BITMAP)) {
7747 		struct hv_enlightened_vmcs *evmcs = (void *)vmx->vmcs01.vmcs;
7748 
7749 		evmcs->hv_enlightenments_control.msr_bitmap = 1;
7750 	}
7751 
7752 	vmx->loaded_vmcs = &vmx->vmcs01;
7753 
7754 	if (cpu_need_virtualize_apic_accesses(vcpu)) {
7755 		err = kvm_alloc_apic_access_page(vcpu->kvm);
7756 		if (err)
7757 			goto free_vmcs;
7758 	}
7759 
7760 	if (enable_ept && !enable_unrestricted_guest) {
7761 		err = init_rmode_identity_map(vcpu->kvm);
7762 		if (err)
7763 			goto free_vmcs;
7764 	}
7765 
7766 	err = -ENOMEM;
7767 	if (vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_EPT_VIOLATION_VE) {
7768 		struct page *page;
7769 
7770 		BUILD_BUG_ON(sizeof(*vmx->ve_info) > PAGE_SIZE);
7771 
7772 		/* ve_info must be page aligned. */
7773 		page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
7774 		if (!page)
7775 			goto free_vmcs;
7776 
7777 		vmx->ve_info = page_to_virt(page);
7778 	}
7779 
7780 	if (vmx_can_use_ipiv(vcpu))
7781 		WRITE_ONCE(to_kvm_vmx(vcpu->kvm)->pid_table[vcpu->vcpu_id],
7782 			   __pa(&vmx->vt.pi_desc) | PID_TABLE_ENTRY_VALID);
7783 
7784 	return 0;
7785 
7786 free_vmcs:
7787 	free_loaded_vmcs(vmx->loaded_vmcs);
7788 free_pml:
7789 	vmx_destroy_pml_buffer(vmx);
7790 free_vpid:
7791 	free_vpid(vmx->vpid);
7792 	return err;
7793 }
7794 
7795 #define L1TF_MSG_SMT "L1TF CPU bug present and SMT on, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html for details.\n"
7796 #define L1TF_MSG_L1D "L1TF CPU bug present and virtualization mitigation disabled, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html for details.\n"
7797 
vmx_vm_init(struct kvm * kvm)7798 int vmx_vm_init(struct kvm *kvm)
7799 {
7800 	if (!ple_gap)
7801 		kvm_disable_exits(kvm, KVM_X86_DISABLE_EXITS_PAUSE);
7802 
7803 	if (boot_cpu_has(X86_BUG_L1TF) && enable_ept) {
7804 		switch (l1tf_mitigation) {
7805 		case L1TF_MITIGATION_OFF:
7806 		case L1TF_MITIGATION_FLUSH_NOWARN:
7807 			/* 'I explicitly don't care' is set */
7808 			break;
7809 		case L1TF_MITIGATION_AUTO:
7810 		case L1TF_MITIGATION_FLUSH:
7811 		case L1TF_MITIGATION_FLUSH_NOSMT:
7812 		case L1TF_MITIGATION_FULL:
7813 			/*
7814 			 * Warn upon starting the first VM in a potentially
7815 			 * insecure environment.
7816 			 */
7817 			if (sched_smt_active())
7818 				pr_warn_once(L1TF_MSG_SMT);
7819 			if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER)
7820 				pr_warn_once(L1TF_MSG_L1D);
7821 			break;
7822 		case L1TF_MITIGATION_FULL_FORCE:
7823 			/* Flush is enforced */
7824 			break;
7825 		}
7826 	}
7827 
7828 	if (enable_pml)
7829 		kvm->arch.cpu_dirty_log_size = PML_LOG_NR_ENTRIES;
7830 	return 0;
7831 }
7832 
vmx_ignore_guest_pat(struct kvm * kvm)7833 static inline bool vmx_ignore_guest_pat(struct kvm *kvm)
7834 {
7835 	/*
7836 	 * Non-coherent DMA devices need the guest to flush CPU properly.
7837 	 * In that case it is not possible to map all guest RAM as WB, so
7838 	 * always trust guest PAT.
7839 	 */
7840 	return !kvm_arch_has_noncoherent_dma(kvm) &&
7841 	       kvm_check_has_quirk(kvm, KVM_X86_QUIRK_IGNORE_GUEST_PAT);
7842 }
7843 
vmx_get_mt_mask(struct kvm_vcpu * vcpu,gfn_t gfn,bool is_mmio)7844 u8 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
7845 {
7846 	/*
7847 	 * Force UC for host MMIO regions, as allowing the guest to access MMIO
7848 	 * with cacheable accesses will result in Machine Checks.
7849 	 */
7850 	if (is_mmio)
7851 		return MTRR_TYPE_UNCACHABLE << VMX_EPT_MT_EPTE_SHIFT;
7852 
7853 	/* Force WB if ignoring guest PAT */
7854 	if (vmx_ignore_guest_pat(vcpu->kvm))
7855 		return (MTRR_TYPE_WRBACK << VMX_EPT_MT_EPTE_SHIFT) | VMX_EPT_IPAT_BIT;
7856 
7857 	return (MTRR_TYPE_WRBACK << VMX_EPT_MT_EPTE_SHIFT);
7858 }
7859 
vmcs_set_secondary_exec_control(struct vcpu_vmx * vmx,u32 new_ctl)7860 static void vmcs_set_secondary_exec_control(struct vcpu_vmx *vmx, u32 new_ctl)
7861 {
7862 	/*
7863 	 * These bits in the secondary execution controls field
7864 	 * are dynamic, the others are mostly based on the hypervisor
7865 	 * architecture and the guest's CPUID.  Do not touch the
7866 	 * dynamic bits.
7867 	 */
7868 	u32 mask =
7869 		SECONDARY_EXEC_SHADOW_VMCS |
7870 		SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
7871 		SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
7872 		SECONDARY_EXEC_DESC;
7873 
7874 	u32 cur_ctl = secondary_exec_controls_get(vmx);
7875 
7876 	secondary_exec_controls_set(vmx, (new_ctl & ~mask) | (cur_ctl & mask));
7877 }
7878 
7879 /*
7880  * Generate MSR_IA32_VMX_CR{0,4}_FIXED1 according to CPUID. Only set bits
7881  * (indicating "allowed-1") if they are supported in the guest's CPUID.
7882  */
nested_vmx_cr_fixed1_bits_update(struct kvm_vcpu * vcpu)7883 static void nested_vmx_cr_fixed1_bits_update(struct kvm_vcpu *vcpu)
7884 {
7885 	struct vcpu_vmx *vmx = to_vmx(vcpu);
7886 	struct kvm_cpuid_entry2 *entry;
7887 
7888 	vmx->nested.msrs.cr0_fixed1 = 0xffffffff;
7889 	vmx->nested.msrs.cr4_fixed1 = X86_CR4_PCE;
7890 
7891 #define cr4_fixed1_update(_cr4_mask, _reg, _cpuid_mask) do {		\
7892 	if (entry && (entry->_reg & (_cpuid_mask)))			\
7893 		vmx->nested.msrs.cr4_fixed1 |= (_cr4_mask);	\
7894 } while (0)
7895 
7896 	entry = kvm_find_cpuid_entry(vcpu, 0x1);
7897 	cr4_fixed1_update(X86_CR4_VME,        edx, feature_bit(VME));
7898 	cr4_fixed1_update(X86_CR4_PVI,        edx, feature_bit(VME));
7899 	cr4_fixed1_update(X86_CR4_TSD,        edx, feature_bit(TSC));
7900 	cr4_fixed1_update(X86_CR4_DE,         edx, feature_bit(DE));
7901 	cr4_fixed1_update(X86_CR4_PSE,        edx, feature_bit(PSE));
7902 	cr4_fixed1_update(X86_CR4_PAE,        edx, feature_bit(PAE));
7903 	cr4_fixed1_update(X86_CR4_MCE,        edx, feature_bit(MCE));
7904 	cr4_fixed1_update(X86_CR4_PGE,        edx, feature_bit(PGE));
7905 	cr4_fixed1_update(X86_CR4_OSFXSR,     edx, feature_bit(FXSR));
7906 	cr4_fixed1_update(X86_CR4_OSXMMEXCPT, edx, feature_bit(XMM));
7907 	cr4_fixed1_update(X86_CR4_VMXE,       ecx, feature_bit(VMX));
7908 	cr4_fixed1_update(X86_CR4_SMXE,       ecx, feature_bit(SMX));
7909 	cr4_fixed1_update(X86_CR4_PCIDE,      ecx, feature_bit(PCID));
7910 	cr4_fixed1_update(X86_CR4_OSXSAVE,    ecx, feature_bit(XSAVE));
7911 
7912 	entry = kvm_find_cpuid_entry_index(vcpu, 0x7, 0);
7913 	cr4_fixed1_update(X86_CR4_FSGSBASE,   ebx, feature_bit(FSGSBASE));
7914 	cr4_fixed1_update(X86_CR4_SMEP,       ebx, feature_bit(SMEP));
7915 	cr4_fixed1_update(X86_CR4_SMAP,       ebx, feature_bit(SMAP));
7916 	cr4_fixed1_update(X86_CR4_PKE,        ecx, feature_bit(PKU));
7917 	cr4_fixed1_update(X86_CR4_UMIP,       ecx, feature_bit(UMIP));
7918 	cr4_fixed1_update(X86_CR4_LA57,       ecx, feature_bit(LA57));
7919 	cr4_fixed1_update(X86_CR4_CET,	      ecx, feature_bit(SHSTK));
7920 	cr4_fixed1_update(X86_CR4_CET,	      edx, feature_bit(IBT));
7921 
7922 	entry = kvm_find_cpuid_entry_index(vcpu, 0x7, 1);
7923 	cr4_fixed1_update(X86_CR4_LAM_SUP,    eax, feature_bit(LAM));
7924 
7925 #undef cr4_fixed1_update
7926 }
7927 
update_intel_pt_cfg(struct kvm_vcpu * vcpu)7928 static void update_intel_pt_cfg(struct kvm_vcpu *vcpu)
7929 {
7930 	struct vcpu_vmx *vmx = to_vmx(vcpu);
7931 	struct kvm_cpuid_entry2 *best = NULL;
7932 	int i;
7933 
7934 	for (i = 0; i < PT_CPUID_LEAVES; i++) {
7935 		best = kvm_find_cpuid_entry_index(vcpu, 0x14, i);
7936 		if (!best)
7937 			return;
7938 		vmx->pt_desc.caps[CPUID_EAX + i*PT_CPUID_REGS_NUM] = best->eax;
7939 		vmx->pt_desc.caps[CPUID_EBX + i*PT_CPUID_REGS_NUM] = best->ebx;
7940 		vmx->pt_desc.caps[CPUID_ECX + i*PT_CPUID_REGS_NUM] = best->ecx;
7941 		vmx->pt_desc.caps[CPUID_EDX + i*PT_CPUID_REGS_NUM] = best->edx;
7942 	}
7943 
7944 	/* Get the number of configurable Address Ranges for filtering */
7945 	vmx->pt_desc.num_address_ranges = intel_pt_validate_cap(vmx->pt_desc.caps,
7946 						PT_CAP_num_address_ranges);
7947 
7948 	/* Initialize and clear the no dependency bits */
7949 	vmx->pt_desc.ctl_bitmask = ~(RTIT_CTL_TRACEEN | RTIT_CTL_OS |
7950 			RTIT_CTL_USR | RTIT_CTL_TSC_EN | RTIT_CTL_DISRETC |
7951 			RTIT_CTL_BRANCH_EN);
7952 
7953 	/*
7954 	 * If CPUID.(EAX=14H,ECX=0):EBX[0]=1 CR3Filter can be set otherwise
7955 	 * will inject an #GP
7956 	 */
7957 	if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_cr3_filtering))
7958 		vmx->pt_desc.ctl_bitmask &= ~RTIT_CTL_CR3EN;
7959 
7960 	/*
7961 	 * If CPUID.(EAX=14H,ECX=0):EBX[1]=1 CYCEn, CycThresh and
7962 	 * PSBFreq can be set
7963 	 */
7964 	if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_psb_cyc))
7965 		vmx->pt_desc.ctl_bitmask &= ~(RTIT_CTL_CYCLEACC |
7966 				RTIT_CTL_CYC_THRESH | RTIT_CTL_PSB_FREQ);
7967 
7968 	/*
7969 	 * If CPUID.(EAX=14H,ECX=0):EBX[3]=1 MTCEn and MTCFreq can be set
7970 	 */
7971 	if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_mtc))
7972 		vmx->pt_desc.ctl_bitmask &= ~(RTIT_CTL_MTC_EN |
7973 					      RTIT_CTL_MTC_RANGE);
7974 
7975 	/* If CPUID.(EAX=14H,ECX=0):EBX[4]=1 FUPonPTW and PTWEn can be set */
7976 	if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_ptwrite))
7977 		vmx->pt_desc.ctl_bitmask &= ~(RTIT_CTL_FUP_ON_PTW |
7978 							RTIT_CTL_PTW_EN);
7979 
7980 	/* If CPUID.(EAX=14H,ECX=0):EBX[5]=1 PwrEvEn can be set */
7981 	if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_power_event_trace))
7982 		vmx->pt_desc.ctl_bitmask &= ~RTIT_CTL_PWR_EVT_EN;
7983 
7984 	/* If CPUID.(EAX=14H,ECX=0):ECX[0]=1 ToPA can be set */
7985 	if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_topa_output))
7986 		vmx->pt_desc.ctl_bitmask &= ~RTIT_CTL_TOPA;
7987 
7988 	/* If CPUID.(EAX=14H,ECX=0):ECX[3]=1 FabricEn can be set */
7989 	if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_output_subsys))
7990 		vmx->pt_desc.ctl_bitmask &= ~RTIT_CTL_FABRIC_EN;
7991 
7992 	/* unmask address range configure area */
7993 	for (i = 0; i < vmx->pt_desc.num_address_ranges; i++)
7994 		vmx->pt_desc.ctl_bitmask &= ~(0xfULL << (32 + i * 4));
7995 }
7996 
vmx_vcpu_after_set_cpuid(struct kvm_vcpu * vcpu)7997 void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
7998 {
7999 	struct vcpu_vmx *vmx = to_vmx(vcpu);
8000 
8001 	/*
8002 	 * XSAVES is effectively enabled if and only if XSAVE is also exposed
8003 	 * to the guest.  XSAVES depends on CR4.OSXSAVE, and CR4.OSXSAVE can be
8004 	 * set if and only if XSAVE is supported.
8005 	 */
8006 	if (!guest_cpu_cap_has(vcpu, X86_FEATURE_XSAVE))
8007 		guest_cpu_cap_clear(vcpu, X86_FEATURE_XSAVES);
8008 
8009 	vmx_setup_uret_msrs(vmx);
8010 
8011 	if (cpu_has_secondary_exec_ctrls())
8012 		vmcs_set_secondary_exec_control(vmx,
8013 						vmx_secondary_exec_control(vmx));
8014 
8015 	if (guest_cpu_cap_has(vcpu, X86_FEATURE_VMX))
8016 		vmx->msr_ia32_feature_control_valid_bits |=
8017 			FEAT_CTL_VMX_ENABLED_INSIDE_SMX |
8018 			FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX;
8019 	else
8020 		vmx->msr_ia32_feature_control_valid_bits &=
8021 			~(FEAT_CTL_VMX_ENABLED_INSIDE_SMX |
8022 			  FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX);
8023 
8024 	if (guest_cpu_cap_has(vcpu, X86_FEATURE_VMX))
8025 		nested_vmx_cr_fixed1_bits_update(vcpu);
8026 
8027 	if (boot_cpu_has(X86_FEATURE_INTEL_PT) &&
8028 			guest_cpu_cap_has(vcpu, X86_FEATURE_INTEL_PT))
8029 		update_intel_pt_cfg(vcpu);
8030 
8031 	if (boot_cpu_has(X86_FEATURE_RTM)) {
8032 		struct vmx_uret_msr *msr;
8033 		msr = vmx_find_uret_msr(vmx, MSR_IA32_TSX_CTRL);
8034 		if (msr) {
8035 			bool enabled = guest_cpu_cap_has(vcpu, X86_FEATURE_RTM);
8036 			vmx_set_guest_uret_msr(vmx, msr, enabled ? 0 : TSX_CTRL_RTM_DISABLE);
8037 		}
8038 	}
8039 
8040 	set_cr4_guest_host_mask(vmx);
8041 
8042 	vmx_write_encls_bitmap(vcpu, NULL);
8043 	if (guest_cpu_cap_has(vcpu, X86_FEATURE_SGX))
8044 		vmx->msr_ia32_feature_control_valid_bits |= FEAT_CTL_SGX_ENABLED;
8045 	else
8046 		vmx->msr_ia32_feature_control_valid_bits &= ~FEAT_CTL_SGX_ENABLED;
8047 
8048 	if (guest_cpu_cap_has(vcpu, X86_FEATURE_SGX_LC))
8049 		vmx->msr_ia32_feature_control_valid_bits |=
8050 			FEAT_CTL_SGX_LC_ENABLED;
8051 	else
8052 		vmx->msr_ia32_feature_control_valid_bits &=
8053 			~FEAT_CTL_SGX_LC_ENABLED;
8054 
8055 	/* Refresh #PF interception to account for MAXPHYADDR changes. */
8056 	vmx_update_exception_bitmap(vcpu);
8057 }
8058 
vmx_get_perf_capabilities(void)8059 static __init u64 vmx_get_perf_capabilities(void)
8060 {
8061 	u64 perf_cap = PERF_CAP_FW_WRITES;
8062 	u64 host_perf_cap = 0;
8063 
8064 	if (!enable_pmu)
8065 		return 0;
8066 
8067 	if (boot_cpu_has(X86_FEATURE_PDCM))
8068 		rdmsrq(MSR_IA32_PERF_CAPABILITIES, host_perf_cap);
8069 
8070 	if (!cpu_feature_enabled(X86_FEATURE_ARCH_LBR) &&
8071 	    !enable_mediated_pmu) {
8072 		x86_perf_get_lbr(&vmx_lbr_caps);
8073 
8074 		/*
8075 		 * KVM requires LBR callstack support, as the overhead due to
8076 		 * context switching LBRs without said support is too high.
8077 		 * See intel_pmu_create_guest_lbr_event() for more info.
8078 		 */
8079 		if (!vmx_lbr_caps.has_callstack)
8080 			memset(&vmx_lbr_caps, 0, sizeof(vmx_lbr_caps));
8081 		else if (vmx_lbr_caps.nr)
8082 			perf_cap |= host_perf_cap & PERF_CAP_LBR_FMT;
8083 	}
8084 
8085 	if (vmx_pebs_supported()) {
8086 		perf_cap |= host_perf_cap & PERF_CAP_PEBS_MASK;
8087 
8088 		/*
8089 		 * Disallow adaptive PEBS as it is functionally broken, can be
8090 		 * used by the guest to read *host* LBRs, and can be used to
8091 		 * bypass userspace event filters.  To correctly and safely
8092 		 * support adaptive PEBS, KVM needs to:
8093 		 *
8094 		 * 1. Account for the ADAPTIVE flag when (re)programming fixed
8095 		 *    counters.
8096 		 *
8097 		 * 2. Gain support from perf (or take direct control of counter
8098 		 *    programming) to support events without adaptive PEBS
8099 		 *    enabled for the hardware counter.
8100 		 *
8101 		 * 3. Ensure LBR MSRs cannot hold host data on VM-Entry with
8102 		 *    adaptive PEBS enabled and MSR_PEBS_DATA_CFG.LBRS=1.
8103 		 *
8104 		 * 4. Document which PMU events are effectively exposed to the
8105 		 *    guest via adaptive PEBS, and make adaptive PEBS mutually
8106 		 *    exclusive with KVM_SET_PMU_EVENT_FILTER if necessary.
8107 		 */
8108 		perf_cap &= ~PERF_CAP_PEBS_BASELINE;
8109 	}
8110 
8111 	return perf_cap;
8112 }
8113 
vmx_set_cpu_caps(void)8114 static __init void vmx_set_cpu_caps(void)
8115 {
8116 	kvm_initialize_cpu_caps();
8117 
8118 	/* CPUID 0x1 */
8119 	if (nested)
8120 		kvm_cpu_cap_set(X86_FEATURE_VMX);
8121 
8122 	/* CPUID 0x7 */
8123 	if (kvm_mpx_supported())
8124 		kvm_cpu_cap_check_and_set(X86_FEATURE_MPX);
8125 	if (!cpu_has_vmx_invpcid())
8126 		kvm_cpu_cap_clear(X86_FEATURE_INVPCID);
8127 	if (vmx_pt_mode_is_host_guest())
8128 		kvm_cpu_cap_check_and_set(X86_FEATURE_INTEL_PT);
8129 	if (vmx_pebs_supported()) {
8130 		kvm_cpu_cap_check_and_set(X86_FEATURE_DS);
8131 		kvm_cpu_cap_check_and_set(X86_FEATURE_DTES64);
8132 	}
8133 
8134 	if (!enable_pmu)
8135 		kvm_cpu_cap_clear(X86_FEATURE_PDCM);
8136 	kvm_caps.supported_perf_cap = vmx_get_perf_capabilities();
8137 
8138 	if (!enable_sgx) {
8139 		kvm_cpu_cap_clear(X86_FEATURE_SGX);
8140 		kvm_cpu_cap_clear(X86_FEATURE_SGX_LC);
8141 		kvm_cpu_cap_clear(X86_FEATURE_SGX1);
8142 		kvm_cpu_cap_clear(X86_FEATURE_SGX2);
8143 		kvm_cpu_cap_clear(X86_FEATURE_SGX_EDECCSSA);
8144 	}
8145 
8146 	if (vmx_umip_emulated())
8147 		kvm_cpu_cap_set(X86_FEATURE_UMIP);
8148 
8149 	/* CPUID 0xD.1 */
8150 	if (!cpu_has_vmx_xsaves())
8151 		kvm_cpu_cap_clear(X86_FEATURE_XSAVES);
8152 
8153 	/* CPUID 0x80000001 and 0x7 (RDPID) */
8154 	if (!cpu_has_vmx_rdtscp()) {
8155 		kvm_cpu_cap_clear(X86_FEATURE_RDTSCP);
8156 		kvm_cpu_cap_clear(X86_FEATURE_RDPID);
8157 	}
8158 
8159 	if (cpu_has_vmx_waitpkg())
8160 		kvm_cpu_cap_check_and_set(X86_FEATURE_WAITPKG);
8161 
8162 	/*
8163 	 * Disable CET if unrestricted_guest is unsupported as KVM doesn't
8164 	 * enforce CET HW behaviors in emulator. On platforms with
8165 	 * VMX_BASIC[bit56] == 0, inject #CP at VMX entry with error code
8166 	 * fails, so disable CET in this case too.
8167 	 */
8168 	if (!enable_cet || !enable_unrestricted_guest ||
8169 	    !cpu_has_vmx_basic_no_hw_errcode_cc()) {
8170 		kvm_cpu_cap_clear(X86_FEATURE_SHSTK);
8171 		kvm_cpu_cap_clear(X86_FEATURE_IBT);
8172 	}
8173 
8174 	kvm_setup_xss_caps();
8175 	kvm_finalize_cpu_caps();
8176 }
8177 
vmx_is_io_intercepted(struct kvm_vcpu * vcpu,struct x86_instruction_info * info,unsigned long * exit_qualification)8178 static bool vmx_is_io_intercepted(struct kvm_vcpu *vcpu,
8179 				  struct x86_instruction_info *info,
8180 				  unsigned long *exit_qualification)
8181 {
8182 	struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
8183 	unsigned short port;
8184 	int size;
8185 	bool imm;
8186 
8187 	/*
8188 	 * If the 'use IO bitmaps' VM-execution control is 0, IO instruction
8189 	 * VM-exits depend on the 'unconditional IO exiting' VM-execution
8190 	 * control.
8191 	 *
8192 	 * Otherwise, IO instruction VM-exits are controlled by the IO bitmaps.
8193 	 */
8194 	if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
8195 		return nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING);
8196 
8197 	if (info->intercept == x86_intercept_in ||
8198 	    info->intercept == x86_intercept_ins) {
8199 		port = info->src_val;
8200 		size = info->dst_bytes;
8201 		imm  = info->src_type == OP_IMM;
8202 	} else {
8203 		port = info->dst_val;
8204 		size = info->src_bytes;
8205 		imm  = info->dst_type == OP_IMM;
8206 	}
8207 
8208 
8209 	*exit_qualification = ((unsigned long)port << 16) | (size - 1);
8210 
8211 	if (info->intercept == x86_intercept_ins ||
8212 	    info->intercept == x86_intercept_outs)
8213 		*exit_qualification |= BIT(4);
8214 
8215 	if (info->rep_prefix)
8216 		*exit_qualification |= BIT(5);
8217 
8218 	if (imm)
8219 		*exit_qualification |= BIT(6);
8220 
8221 	return nested_vmx_check_io_bitmaps(vcpu, port, size);
8222 }
8223 
vmx_check_intercept(struct kvm_vcpu * vcpu,struct x86_instruction_info * info,enum x86_intercept_stage stage,struct x86_exception * exception)8224 int vmx_check_intercept(struct kvm_vcpu *vcpu,
8225 			struct x86_instruction_info *info,
8226 			enum x86_intercept_stage stage,
8227 			struct x86_exception *exception)
8228 {
8229 	struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
8230 	unsigned long exit_qualification = 0;
8231 	u32 vm_exit_reason;
8232 	u64 exit_insn_len;
8233 
8234 	switch (info->intercept) {
8235 	case x86_intercept_rdpid:
8236 		/*
8237 		 * RDPID causes #UD if not enabled through secondary execution
8238 		 * controls (ENABLE_RDTSCP).  Note, the implicit MSR access to
8239 		 * TSC_AUX is NOT subject to interception, i.e. checking only
8240 		 * the dedicated execution control is architecturally correct.
8241 		 */
8242 		if (!nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_RDTSCP)) {
8243 			exception->vector = UD_VECTOR;
8244 			exception->error_code_valid = false;
8245 			return X86EMUL_PROPAGATE_FAULT;
8246 		}
8247 		return X86EMUL_CONTINUE;
8248 
8249 	case x86_intercept_in:
8250 	case x86_intercept_ins:
8251 	case x86_intercept_out:
8252 	case x86_intercept_outs:
8253 		if (!vmx_is_io_intercepted(vcpu, info, &exit_qualification))
8254 			return X86EMUL_CONTINUE;
8255 
8256 		vm_exit_reason = EXIT_REASON_IO_INSTRUCTION;
8257 		break;
8258 
8259 	case x86_intercept_lgdt:
8260 	case x86_intercept_lidt:
8261 	case x86_intercept_lldt:
8262 	case x86_intercept_ltr:
8263 	case x86_intercept_sgdt:
8264 	case x86_intercept_sidt:
8265 	case x86_intercept_sldt:
8266 	case x86_intercept_str:
8267 		if (!nested_cpu_has2(vmcs12, SECONDARY_EXEC_DESC))
8268 			return X86EMUL_CONTINUE;
8269 
8270 		if (info->intercept == x86_intercept_lldt ||
8271 		    info->intercept == x86_intercept_ltr ||
8272 		    info->intercept == x86_intercept_sldt ||
8273 		    info->intercept == x86_intercept_str)
8274 			vm_exit_reason = EXIT_REASON_LDTR_TR;
8275 		else
8276 			vm_exit_reason = EXIT_REASON_GDTR_IDTR;
8277 		/*
8278 		 * FIXME: Decode the ModR/M to generate the correct exit
8279 		 *        qualification for memory operands.
8280 		 */
8281 		break;
8282 
8283 	case x86_intercept_hlt:
8284 		if (!nested_cpu_has(vmcs12, CPU_BASED_HLT_EXITING))
8285 			return X86EMUL_CONTINUE;
8286 
8287 		vm_exit_reason = EXIT_REASON_HLT;
8288 		break;
8289 
8290 	case x86_intercept_pause:
8291 		/*
8292 		 * PAUSE is a single-byte NOP with a REPE prefix, i.e. collides
8293 		 * with vanilla NOPs in the emulator.  Apply the interception
8294 		 * check only to actual PAUSE instructions.  Don't check
8295 		 * PAUSE-loop-exiting, software can't expect a given PAUSE to
8296 		 * exit, i.e. KVM is within its rights to allow L2 to execute
8297 		 * the PAUSE.
8298 		 */
8299 		if ((info->rep_prefix != REPE_PREFIX) ||
8300 		    !nested_cpu_has(vmcs12, CPU_BASED_PAUSE_EXITING))
8301 			return X86EMUL_CONTINUE;
8302 
8303 		vm_exit_reason = EXIT_REASON_PAUSE_INSTRUCTION;
8304 		break;
8305 
8306 	/* TODO: check more intercepts... */
8307 	default:
8308 		return X86EMUL_UNHANDLEABLE;
8309 	}
8310 
8311 	exit_insn_len = abs_diff((s64)info->next_rip, (s64)info->rip);
8312 	if (!exit_insn_len || exit_insn_len > X86_MAX_INSTRUCTION_LENGTH)
8313 		return X86EMUL_UNHANDLEABLE;
8314 
8315 	__nested_vmx_vmexit(vcpu, vm_exit_reason, 0, exit_qualification,
8316 			    exit_insn_len);
8317 	return X86EMUL_INTERCEPTED;
8318 }
8319 
8320 #ifdef CONFIG_X86_64
8321 /* (a << shift) / divisor, return 1 if overflow otherwise 0 */
u64_shl_div_u64(u64 a,unsigned int shift,u64 divisor,u64 * result)8322 static inline int u64_shl_div_u64(u64 a, unsigned int shift,
8323 				  u64 divisor, u64 *result)
8324 {
8325 	u64 low = a << shift, high = a >> (64 - shift);
8326 
8327 	/* To avoid the overflow on divq */
8328 	if (high >= divisor)
8329 		return 1;
8330 
8331 	/* Low hold the result, high hold rem which is discarded */
8332 	asm("divq %2\n\t" : "=a" (low), "=d" (high) :
8333 	    "rm" (divisor), "0" (low), "1" (high));
8334 	*result = low;
8335 
8336 	return 0;
8337 }
8338 
vmx_set_hv_timer(struct kvm_vcpu * vcpu,u64 guest_deadline_tsc,bool * expired)8339 int vmx_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc,
8340 		     bool *expired)
8341 {
8342 	struct vcpu_vmx *vmx;
8343 	u64 tscl, guest_tscl, delta_tsc, lapic_timer_advance_cycles;
8344 	struct kvm_timer *ktimer = &vcpu->arch.apic->lapic_timer;
8345 
8346 	vmx = to_vmx(vcpu);
8347 	tscl = rdtsc();
8348 	guest_tscl = kvm_read_l1_tsc(vcpu, tscl);
8349 	delta_tsc = max(guest_deadline_tsc, guest_tscl) - guest_tscl;
8350 	lapic_timer_advance_cycles = nsec_to_cycles(vcpu,
8351 						    ktimer->timer_advance_ns);
8352 
8353 	if (delta_tsc > lapic_timer_advance_cycles)
8354 		delta_tsc -= lapic_timer_advance_cycles;
8355 	else
8356 		delta_tsc = 0;
8357 
8358 	/* Convert to host delta tsc if tsc scaling is enabled */
8359 	if (vcpu->arch.l1_tsc_scaling_ratio != kvm_caps.default_tsc_scaling_ratio &&
8360 	    delta_tsc && u64_shl_div_u64(delta_tsc,
8361 				kvm_caps.tsc_scaling_ratio_frac_bits,
8362 				vcpu->arch.l1_tsc_scaling_ratio, &delta_tsc))
8363 		return -ERANGE;
8364 
8365 	/*
8366 	 * If the delta tsc can't fit in the 32 bit after the multi shift,
8367 	 * we can't use the preemption timer.
8368 	 * It's possible that it fits on later vmentries, but checking
8369 	 * on every vmentry is costly so we just use an hrtimer.
8370 	 */
8371 	if (delta_tsc >> (cpu_preemption_timer_multi + 32))
8372 		return -ERANGE;
8373 
8374 	vmx->hv_deadline_tsc = tscl + delta_tsc;
8375 	*expired = !delta_tsc;
8376 	return 0;
8377 }
8378 
vmx_cancel_hv_timer(struct kvm_vcpu * vcpu)8379 void vmx_cancel_hv_timer(struct kvm_vcpu *vcpu)
8380 {
8381 	to_vmx(vcpu)->hv_deadline_tsc = -1;
8382 }
8383 #endif
8384 
vmx_update_cpu_dirty_logging(struct kvm_vcpu * vcpu)8385 void vmx_update_cpu_dirty_logging(struct kvm_vcpu *vcpu)
8386 {
8387 	struct vcpu_vmx *vmx = to_vmx(vcpu);
8388 
8389 	if (WARN_ON_ONCE(!enable_pml))
8390 		return;
8391 
8392 	guard(vmx_vmcs01)(vcpu);
8393 
8394 	/*
8395 	 * Note, nr_memslots_dirty_logging can be changed concurrent with this
8396 	 * code, but in that case another update request will be made and so
8397 	 * the guest will never run with a stale PML value.
8398 	 */
8399 	if (atomic_read(&vcpu->kvm->nr_memslots_dirty_logging))
8400 		secondary_exec_controls_setbit(vmx, SECONDARY_EXEC_ENABLE_PML);
8401 	else
8402 		secondary_exec_controls_clearbit(vmx, SECONDARY_EXEC_ENABLE_PML);
8403 }
8404 
vmx_setup_mce(struct kvm_vcpu * vcpu)8405 void vmx_setup_mce(struct kvm_vcpu *vcpu)
8406 {
8407 	if (vcpu->arch.mcg_cap & MCG_LMCE_P)
8408 		to_vmx(vcpu)->msr_ia32_feature_control_valid_bits |=
8409 			FEAT_CTL_LMCE_ENABLED;
8410 	else
8411 		to_vmx(vcpu)->msr_ia32_feature_control_valid_bits &=
8412 			~FEAT_CTL_LMCE_ENABLED;
8413 }
8414 
8415 #ifdef CONFIG_KVM_SMM
vmx_smi_allowed(struct kvm_vcpu * vcpu,bool for_injection)8416 int vmx_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
8417 {
8418 	/* we need a nested vmexit to enter SMM, postpone if run is pending */
8419 	if (vcpu->arch.nested_run_pending)
8420 		return -EBUSY;
8421 	return !is_smm(vcpu);
8422 }
8423 
vmx_enter_smm(struct kvm_vcpu * vcpu,union kvm_smram * smram)8424 int vmx_enter_smm(struct kvm_vcpu *vcpu, union kvm_smram *smram)
8425 {
8426 	struct vcpu_vmx *vmx = to_vmx(vcpu);
8427 
8428 	/*
8429 	 * TODO: Implement custom flows for forcing the vCPU out/in of L2 on
8430 	 * SMI and RSM.  Using the common VM-Exit + VM-Enter routines is wrong
8431 	 * SMI and RSM only modify state that is saved and restored via SMRAM.
8432 	 * E.g. most MSRs are left untouched, but many are modified by VM-Exit
8433 	 * and VM-Enter, and thus L2's values may be corrupted on SMI+RSM.
8434 	 */
8435 	vmx->nested.smm.guest_mode = is_guest_mode(vcpu);
8436 	if (vmx->nested.smm.guest_mode)
8437 		nested_vmx_vmexit(vcpu, -1, 0, 0);
8438 
8439 	vmx->nested.smm.vmxon = vmx->nested.vmxon;
8440 	vmx->nested.vmxon = false;
8441 	vmx_clear_hlt(vcpu);
8442 	return 0;
8443 }
8444 
vmx_leave_smm(struct kvm_vcpu * vcpu,const union kvm_smram * smram)8445 int vmx_leave_smm(struct kvm_vcpu *vcpu, const union kvm_smram *smram)
8446 {
8447 	struct vcpu_vmx *vmx = to_vmx(vcpu);
8448 	int ret;
8449 
8450 	if (vmx->nested.smm.vmxon) {
8451 		vmx->nested.vmxon = true;
8452 		vmx->nested.smm.vmxon = false;
8453 	}
8454 
8455 	if (vmx->nested.smm.guest_mode) {
8456 		/* Triple fault if the state is invalid.  */
8457 		if (nested_vmx_check_restored_vmcs12(vcpu) < 0)
8458 			return 1;
8459 
8460 		ret = nested_vmx_enter_non_root_mode(vcpu, false);
8461 		if (ret != NVMX_VMENTRY_SUCCESS)
8462 			return 1;
8463 
8464 		vcpu->arch.nested_run_pending = KVM_NESTED_RUN_PENDING;
8465 		vmx->nested.smm.guest_mode = false;
8466 	}
8467 	return 0;
8468 }
8469 
vmx_enable_smi_window(struct kvm_vcpu * vcpu)8470 void vmx_enable_smi_window(struct kvm_vcpu *vcpu)
8471 {
8472 	/* RSM will cause a vmexit anyway.  */
8473 }
8474 #endif
8475 
vmx_apic_init_signal_blocked(struct kvm_vcpu * vcpu)8476 bool vmx_apic_init_signal_blocked(struct kvm_vcpu *vcpu)
8477 {
8478 	return to_vmx(vcpu)->nested.vmxon && !is_guest_mode(vcpu);
8479 }
8480 
vmx_migrate_timers(struct kvm_vcpu * vcpu)8481 void vmx_migrate_timers(struct kvm_vcpu *vcpu)
8482 {
8483 	if (is_guest_mode(vcpu)) {
8484 		struct hrtimer *timer = &to_vmx(vcpu)->nested.preemption_timer;
8485 
8486 		if (hrtimer_try_to_cancel(timer) == 1)
8487 			hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED);
8488 	}
8489 }
8490 
vmx_hardware_unsetup(void)8491 void vmx_hardware_unsetup(void)
8492 {
8493 	kvm_set_posted_intr_wakeup_handler(NULL);
8494 
8495 	if (nested)
8496 		nested_vmx_hardware_unsetup();
8497 }
8498 
vmx_vm_destroy(struct kvm * kvm)8499 void vmx_vm_destroy(struct kvm *kvm)
8500 {
8501 	struct kvm_vmx *kvm_vmx = to_kvm_vmx(kvm);
8502 
8503 	free_pages((unsigned long)kvm_vmx->pid_table, vmx_get_pid_table_order(kvm));
8504 }
8505 
8506 /*
8507  * Note, the SDM states that the linear address is masked *after* the modified
8508  * canonicality check, whereas KVM masks (untags) the address and then performs
8509  * a "normal" canonicality check.  Functionally, the two methods are identical,
8510  * and when the masking occurs relative to the canonicality check isn't visible
8511  * to software, i.e. KVM's behavior doesn't violate the SDM.
8512  */
vmx_get_untagged_addr(struct kvm_vcpu * vcpu,gva_t gva,unsigned int flags)8513 gva_t vmx_get_untagged_addr(struct kvm_vcpu *vcpu, gva_t gva, unsigned int flags)
8514 {
8515 	int lam_bit;
8516 	unsigned long cr3_bits;
8517 
8518 	if (flags & (X86EMUL_F_FETCH | X86EMUL_F_IMPLICIT | X86EMUL_F_INVLPG))
8519 		return gva;
8520 
8521 	if (!is_64_bit_mode(vcpu))
8522 		return gva;
8523 
8524 	/*
8525 	 * Bit 63 determines if the address should be treated as user address
8526 	 * or a supervisor address.
8527 	 */
8528 	if (!(gva & BIT_ULL(63))) {
8529 		cr3_bits = kvm_get_active_cr3_lam_bits(vcpu);
8530 		if (!(cr3_bits & (X86_CR3_LAM_U57 | X86_CR3_LAM_U48)))
8531 			return gva;
8532 
8533 		/* LAM_U48 is ignored if LAM_U57 is set. */
8534 		lam_bit = cr3_bits & X86_CR3_LAM_U57 ? 56 : 47;
8535 	} else {
8536 		if (!kvm_is_cr4_bit_set(vcpu, X86_CR4_LAM_SUP))
8537 			return gva;
8538 
8539 		lam_bit = kvm_is_cr4_bit_set(vcpu, X86_CR4_LA57) ? 56 : 47;
8540 	}
8541 
8542 	/*
8543 	 * Untag the address by sign-extending the lam_bit, but NOT to bit 63.
8544 	 * Bit 63 is retained from the raw virtual address so that untagging
8545 	 * doesn't change a user access to a supervisor access, and vice versa.
8546 	 */
8547 	return (sign_extend64(gva, lam_bit) & ~BIT_ULL(63)) | (gva & BIT_ULL(63));
8548 }
8549 
vmx_handle_intel_pt_intr(void)8550 static unsigned int vmx_handle_intel_pt_intr(void)
8551 {
8552 	struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
8553 
8554 	/* '0' on failure so that the !PT case can use a RET0 static call. */
8555 	if (!vcpu || !kvm_handling_nmi_from_guest(vcpu))
8556 		return 0;
8557 
8558 	kvm_make_request(KVM_REQ_PMI, vcpu);
8559 	__set_bit(MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI_BIT,
8560 		  (unsigned long *)&vcpu->arch.pmu.global_status);
8561 	return 1;
8562 }
8563 
vmx_setup_user_return_msrs(void)8564 static __init void vmx_setup_user_return_msrs(void)
8565 {
8566 
8567 	/*
8568 	 * Though SYSCALL is only supported in 64-bit mode on Intel CPUs, kvm
8569 	 * will emulate SYSCALL in legacy mode if the vendor string in guest
8570 	 * CPUID.0:{EBX,ECX,EDX} is "AuthenticAMD" or "AMDisbetter!" To
8571 	 * support this emulation, MSR_STAR is included in the list for i386,
8572 	 * but is never loaded into hardware.  MSR_CSTAR is also never loaded
8573 	 * into hardware and is here purely for emulation purposes.
8574 	 */
8575 	const u32 vmx_uret_msrs_list[] = {
8576 	#ifdef CONFIG_X86_64
8577 		MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR,
8578 	#endif
8579 		MSR_EFER, MSR_TSC_AUX, MSR_STAR,
8580 		MSR_IA32_TSX_CTRL,
8581 	};
8582 	int i;
8583 
8584 	BUILD_BUG_ON(ARRAY_SIZE(vmx_uret_msrs_list) != MAX_NR_USER_RETURN_MSRS);
8585 
8586 	for (i = 0; i < ARRAY_SIZE(vmx_uret_msrs_list); ++i)
8587 		kvm_add_user_return_msr(vmx_uret_msrs_list[i]);
8588 }
8589 
vmx_setup_me_spte_mask(void)8590 static void __init vmx_setup_me_spte_mask(void)
8591 {
8592 	u64 me_mask = 0;
8593 
8594 	/*
8595 	 * On pre-MKTME system, boot_cpu_data.x86_phys_bits equals to
8596 	 * kvm_host.maxphyaddr.  On MKTME and/or TDX capable systems,
8597 	 * boot_cpu_data.x86_phys_bits holds the actual physical address
8598 	 * w/o the KeyID bits, and kvm_host.maxphyaddr equals to
8599 	 * MAXPHYADDR reported by CPUID.  Those bits between are KeyID bits.
8600 	 */
8601 	if (boot_cpu_data.x86_phys_bits != kvm_host.maxphyaddr)
8602 		me_mask = rsvd_bits(boot_cpu_data.x86_phys_bits,
8603 				    kvm_host.maxphyaddr - 1);
8604 
8605 	/*
8606 	 * Unlike SME, host kernel doesn't support setting up any
8607 	 * MKTME KeyID on Intel platforms.  No memory encryption
8608 	 * bits should be included into the SPTE.
8609 	 */
8610 	kvm_mmu_set_me_spte_mask(0, me_mask);
8611 }
8612 
vmx_hardware_setup(void)8613 __init int vmx_hardware_setup(void)
8614 {
8615 	unsigned long host_bndcfgs;
8616 	struct desc_ptr dt;
8617 	int r;
8618 
8619 	store_idt(&dt);
8620 	host_idt_base = dt.address;
8621 
8622 	vmx_setup_user_return_msrs();
8623 
8624 	if (boot_cpu_has(X86_FEATURE_MPX)) {
8625 		rdmsrq(MSR_IA32_BNDCFGS, host_bndcfgs);
8626 		WARN_ONCE(host_bndcfgs, "BNDCFGS in host will be lost");
8627 	}
8628 
8629 	if (!cpu_has_vmx_mpx())
8630 		kvm_caps.supported_xcr0 &= ~(XFEATURE_MASK_BNDREGS |
8631 					     XFEATURE_MASK_BNDCSR);
8632 
8633 	if (!cpu_has_vmx_vpid() || !cpu_has_vmx_invvpid() ||
8634 	    !(cpu_has_vmx_invvpid_single() || cpu_has_vmx_invvpid_global()))
8635 		enable_vpid = 0;
8636 
8637 	if (!cpu_has_vmx_ept() ||
8638 	    !cpu_has_vmx_ept_4levels() ||
8639 	    !cpu_has_vmx_ept_mt_wb() ||
8640 	    !cpu_has_vmx_invept_global())
8641 		enable_ept = 0;
8642 
8643 	if (!cpu_has_load_cet_ctrl())
8644 		enable_cet = 0;
8645 
8646 	/* NX support is required for shadow paging. */
8647 	if (!enable_ept && !boot_cpu_has(X86_FEATURE_NX)) {
8648 		pr_err_ratelimited("NX (Execute Disable) not supported\n");
8649 		return -EOPNOTSUPP;
8650 	}
8651 
8652 	/*
8653 	 * Shadow paging doesn't have a (further) performance penalty
8654 	 * from GUEST_MAXPHYADDR < HOST_MAXPHYADDR so enable it
8655 	 * by default
8656 	 */
8657 	if (!enable_ept)
8658 		allow_smaller_maxphyaddr = true;
8659 
8660 	if (!cpu_has_vmx_ept_ad_bits() || !enable_ept)
8661 		enable_ept_ad_bits = 0;
8662 
8663 	if (!cpu_has_vmx_unrestricted_guest() || !enable_ept)
8664 		enable_unrestricted_guest = 0;
8665 
8666 	if (!cpu_has_vmx_flexpriority())
8667 		flexpriority_enabled = 0;
8668 
8669 	if (!cpu_has_virtual_nmis())
8670 		enable_vnmi = 0;
8671 
8672 #ifdef CONFIG_X86_SGX_KVM
8673 	if (!cpu_has_vmx_encls_vmexit())
8674 		enable_sgx = false;
8675 #endif
8676 
8677 	/*
8678 	 * set_apic_access_page_addr() is used to reload apic access
8679 	 * page upon invalidation.  No need to do anything if not
8680 	 * using the APIC_ACCESS_ADDR VMCS field.
8681 	 */
8682 	if (!flexpriority_enabled)
8683 		vt_x86_ops.set_apic_access_page_addr = NULL;
8684 
8685 	if (!cpu_has_vmx_tpr_shadow())
8686 		vt_x86_ops.update_cr8_intercept = NULL;
8687 
8688 #if IS_ENABLED(CONFIG_HYPERV)
8689 	if (ms_hyperv.nested_features & HV_X64_NESTED_GUEST_MAPPING_FLUSH
8690 	    && enable_ept) {
8691 		vt_x86_ops.flush_remote_tlbs = hv_flush_remote_tlbs;
8692 		vt_x86_ops.flush_remote_tlbs_range = hv_flush_remote_tlbs_range;
8693 	}
8694 #endif
8695 
8696 	if (!cpu_has_vmx_ple()) {
8697 		ple_gap = 0;
8698 		ple_window = 0;
8699 		ple_window_grow = 0;
8700 		ple_window_max = 0;
8701 		ple_window_shrink = 0;
8702 	}
8703 
8704 	if (!cpu_has_vmx_apicv())
8705 		enable_apicv = 0;
8706 	if (!enable_apicv)
8707 		vt_x86_ops.sync_pir_to_irr = NULL;
8708 
8709 	if (!enable_apicv || !cpu_has_vmx_ipiv())
8710 		enable_ipiv = false;
8711 
8712 	if (cpu_has_vmx_tsc_scaling())
8713 		kvm_caps.has_tsc_control = true;
8714 
8715 	kvm_caps.max_tsc_scaling_ratio = KVM_VMX_TSC_MULTIPLIER_MAX;
8716 	kvm_caps.tsc_scaling_ratio_frac_bits = 48;
8717 	kvm_caps.has_bus_lock_exit = cpu_has_vmx_bus_lock_detection();
8718 	kvm_caps.has_notify_vmexit = cpu_has_notify_vmexit();
8719 
8720 	set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */
8721 
8722 	if (enable_ept)
8723 		kvm_mmu_set_ept_masks(enable_ept_ad_bits,
8724 				      cpu_has_vmx_ept_execute_only());
8725 	else
8726 		vt_x86_ops.get_mt_mask = NULL;
8727 
8728 	/*
8729 	 * Setup shadow_me_value/shadow_me_mask to include MKTME KeyID
8730 	 * bits to shadow_zero_check.
8731 	 */
8732 	vmx_setup_me_spte_mask();
8733 
8734 	kvm_configure_mmu(enable_ept, 0, vmx_get_max_ept_level(),
8735 			  ept_caps_to_lpage_level(vmx_capability.ept));
8736 
8737 	/*
8738 	 * Only enable PML when hardware supports PML feature, and both EPT
8739 	 * and EPT A/D bit features are enabled -- PML depends on them to work.
8740 	 */
8741 	if (!enable_ept || !enable_ept_ad_bits || !cpu_has_vmx_pml())
8742 		enable_pml = 0;
8743 
8744 	if (!cpu_has_vmx_preemption_timer())
8745 		enable_preemption_timer = false;
8746 
8747 	if (enable_preemption_timer) {
8748 		u64 use_timer_freq = 5000ULL * 1000 * 1000;
8749 
8750 		cpu_preemption_timer_multi =
8751 			vmx_misc_preemption_timer_rate(vmcs_config.misc);
8752 
8753 		if (tsc_khz)
8754 			use_timer_freq = (u64)tsc_khz * 1000;
8755 		use_timer_freq >>= cpu_preemption_timer_multi;
8756 
8757 		/*
8758 		 * KVM "disables" the preemption timer by setting it to its max
8759 		 * value.  Don't use the timer if it might cause spurious exits
8760 		 * at a rate faster than 0.1 Hz (of uninterrupted guest time).
8761 		 */
8762 		if (use_timer_freq > 0xffffffffu / 10)
8763 			enable_preemption_timer = false;
8764 	}
8765 
8766 	if (!enable_preemption_timer) {
8767 		vt_x86_ops.set_hv_timer = NULL;
8768 		vt_x86_ops.cancel_hv_timer = NULL;
8769 	}
8770 
8771 	kvm_caps.supported_mce_cap |= MCG_LMCE_P;
8772 	kvm_caps.supported_mce_cap |= MCG_CMCI_P;
8773 
8774 	if (pt_mode != PT_MODE_SYSTEM && pt_mode != PT_MODE_HOST_GUEST)
8775 		return -EINVAL;
8776 	if (!enable_ept || !enable_pmu || !cpu_has_vmx_intel_pt())
8777 		pt_mode = PT_MODE_SYSTEM;
8778 	if (pt_mode == PT_MODE_HOST_GUEST)
8779 		vt_init_ops.handle_intel_pt_intr = vmx_handle_intel_pt_intr;
8780 	else
8781 		vt_init_ops.handle_intel_pt_intr = NULL;
8782 
8783 	setup_default_sgx_lepubkeyhash();
8784 
8785 	vmx_set_cpu_caps();
8786 
8787 	/*
8788 	 * Configure nested capabilities after core CPU capabilities so that
8789 	 * nested support can be conditional on base support, e.g. so that KVM
8790 	 * can hide/show features based on kvm_cpu_cap_has().
8791 	 */
8792 	if (nested) {
8793 		r = nested_vmx_hardware_setup(kvm_vmx_exit_handlers);
8794 		if (r)
8795 			return r;
8796 	}
8797 
8798 	kvm_set_posted_intr_wakeup_handler(pi_wakeup_handler);
8799 
8800 	/*
8801 	 * On Intel CPUs that lack self-snoop feature, letting the guest control
8802 	 * memory types may result in unexpected behavior. So always ignore guest
8803 	 * PAT on those CPUs and map VM as writeback, not allowing userspace to
8804 	 * disable the quirk.
8805 	 *
8806 	 * On certain Intel CPUs (e.g. SPR, ICX), though self-snoop feature is
8807 	 * supported, UC is slow enough to cause issues with some older guests (e.g.
8808 	 * an old version of bochs driver uses ioremap() instead of ioremap_wc() to
8809 	 * map the video RAM, causing wayland desktop to fail to get started
8810 	 * correctly). To avoid breaking those older guests that rely on KVM to force
8811 	 * memory type to WB, provide KVM_X86_QUIRK_IGNORE_GUEST_PAT to preserve the
8812 	 * safer (for performance) default behavior.
8813 	 *
8814 	 * On top of this, non-coherent DMA devices need the guest to flush CPU
8815 	 * caches properly.  This also requires honoring guest PAT, and is forced
8816 	 * independent of the quirk in vmx_ignore_guest_pat().
8817 	 */
8818 	if (!static_cpu_has(X86_FEATURE_SELFSNOOP))
8819 		kvm_caps.supported_quirks &= ~KVM_X86_QUIRK_IGNORE_GUEST_PAT;
8820 
8821 	kvm_caps.inapplicable_quirks &= ~KVM_X86_QUIRK_IGNORE_GUEST_PAT;
8822 
8823 	return 0;
8824 }
8825 
vmx_exit(void)8826 void vmx_exit(void)
8827 {
8828 	allow_smaller_maxphyaddr = false;
8829 
8830 	vmx_cleanup_l1d_flush();
8831 
8832 	kvm_x86_vendor_exit();
8833 }
8834 
vmx_init(void)8835 int __init vmx_init(void)
8836 {
8837 	int r, cpu;
8838 
8839 	KVM_SANITY_CHECK_VM_STRUCT_SIZE(kvm_vmx);
8840 
8841 	if (!kvm_is_vmx_supported())
8842 		return -EOPNOTSUPP;
8843 
8844 	/*
8845 	 * Note, VMCS and eVMCS configuration only touch VMX knobs/variables,
8846 	 * i.e. there's nothing to unwind if a later step fails.
8847 	 */
8848 	hv_init_evmcs();
8849 
8850 	/*
8851 	 * Parse the VMCS config and VMX capabilities before anything else, so
8852 	 * that the information is available to all setup flows.
8853 	 */
8854 	if (setup_vmcs_config(&vmcs_config, &vmx_capability) < 0)
8855 		return -EIO;
8856 
8857 	r = kvm_x86_vendor_init(&vt_init_ops);
8858 	if (r)
8859 		return r;
8860 
8861 	/* Must be called after common x86 init so enable_ept is setup. */
8862 	r = vmx_setup_l1d_flush();
8863 	if (r)
8864 		goto err_l1d_flush;
8865 
8866 	for_each_possible_cpu(cpu) {
8867 		INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu));
8868 
8869 		pi_init_cpu(cpu);
8870 	}
8871 
8872 	vmx_check_vmcs12_offsets();
8873 
8874 	return 0;
8875 
8876 err_l1d_flush:
8877 	kvm_x86_vendor_exit();
8878 	return r;
8879 }
8880