xref: /linux/arch/x86/kvm/svm/svm.c (revision e1914add2799225a87502051415fc5c32aeb02ae)
1 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
2 
3 #include <linux/kvm_host.h>
4 
5 #include "irq.h"
6 #include "mmu.h"
7 #include "kvm_cache_regs.h"
8 #include "x86.h"
9 #include "smm.h"
10 #include "cpuid.h"
11 #include "pmu.h"
12 
13 #include <linux/module.h>
14 #include <linux/mod_devicetable.h>
15 #include <linux/kernel.h>
16 #include <linux/vmalloc.h>
17 #include <linux/highmem.h>
18 #include <linux/amd-iommu.h>
19 #include <linux/sched.h>
20 #include <linux/trace_events.h>
21 #include <linux/slab.h>
22 #include <linux/hashtable.h>
23 #include <linux/objtool.h>
24 #include <linux/psp-sev.h>
25 #include <linux/file.h>
26 #include <linux/pagemap.h>
27 #include <linux/swap.h>
28 #include <linux/rwsem.h>
29 #include <linux/cc_platform.h>
30 #include <linux/smp.h>
31 #include <linux/string_choices.h>
32 #include <linux/mutex.h>
33 
34 #include <asm/apic.h>
35 #include <asm/msr.h>
36 #include <asm/perf_event.h>
37 #include <asm/tlbflush.h>
38 #include <asm/desc.h>
39 #include <asm/debugreg.h>
40 #include <asm/kvm_para.h>
41 #include <asm/irq_remapping.h>
42 #include <asm/spec-ctrl.h>
43 #include <asm/cpu_device_id.h>
44 #include <asm/traps.h>
45 #include <asm/reboot.h>
46 #include <asm/fpu/api.h>
47 #include <asm/virt.h>
48 
49 #include <trace/events/ipi.h>
50 
51 #include "trace.h"
52 
53 #include "svm.h"
54 #include "svm_ops.h"
55 
56 #include "hyperv.h"
57 #include "kvm_onhyperv.h"
58 #include "svm_onhyperv.h"
59 
60 MODULE_AUTHOR("Qumranet");
61 MODULE_DESCRIPTION("KVM support for SVM (AMD-V) extensions");
62 MODULE_LICENSE("GPL");
63 
64 #ifdef MODULE
65 static const struct x86_cpu_id svm_cpu_id[] = {
66 	X86_MATCH_FEATURE(X86_FEATURE_SVM, NULL),
67 	{}
68 };
69 MODULE_DEVICE_TABLE(x86cpu, svm_cpu_id);
70 #endif
71 
72 #define SEG_TYPE_LDT 2
73 #define SEG_TYPE_BUSY_TSS16 3
74 
75 static bool erratum_383_found __read_mostly;
76 
77 /*
78  * Set osvw_len to higher value when updated Revision Guides
79  * are published and we know what the new status bits are
80  */
81 static uint64_t osvw_len = 4, osvw_status;
82 static DEFINE_SPINLOCK(osvw_lock);
83 
84 static DEFINE_PER_CPU(u64, current_tsc_ratio);
85 
86 /*
87  * These 2 parameters are used to config the controls for Pause-Loop Exiting:
88  * pause_filter_count: On processors that support Pause filtering(indicated
89  *	by CPUID Fn8000_000A_EDX), the VMCB provides a 16 bit pause filter
90  *	count value. On VMRUN this value is loaded into an internal counter.
91  *	Each time a pause instruction is executed, this counter is decremented
92  *	until it reaches zero at which time a #VMEXIT is generated if pause
93  *	intercept is enabled. Refer to  AMD APM Vol 2 Section 15.14.4 Pause
94  *	Intercept Filtering for more details.
95  *	This also indicate if ple logic enabled.
96  *
97  * pause_filter_thresh: In addition, some processor families support advanced
98  *	pause filtering (indicated by CPUID Fn8000_000A_EDX) upper bound on
99  *	the amount of time a guest is allowed to execute in a pause loop.
100  *	In this mode, a 16-bit pause filter threshold field is added in the
101  *	VMCB. The threshold value is a cycle count that is used to reset the
102  *	pause counter. As with simple pause filtering, VMRUN loads the pause
103  *	count value from VMCB into an internal counter. Then, on each pause
104  *	instruction the hardware checks the elapsed number of cycles since
105  *	the most recent pause instruction against the pause filter threshold.
106  *	If the elapsed cycle count is greater than the pause filter threshold,
107  *	then the internal pause count is reloaded from the VMCB and execution
108  *	continues. If the elapsed cycle count is less than the pause filter
109  *	threshold, then the internal pause count is decremented. If the count
110  *	value is less than zero and PAUSE intercept is enabled, a #VMEXIT is
111  *	triggered. If advanced pause filtering is supported and pause filter
112  *	threshold field is set to zero, the filter will operate in the simpler,
113  *	count only mode.
114  */
115 
116 static unsigned short __ro_after_init pause_filter_thresh = KVM_DEFAULT_PLE_GAP;
117 module_param(pause_filter_thresh, ushort, 0444);
118 
119 static unsigned short __ro_after_init pause_filter_count = KVM_SVM_DEFAULT_PLE_WINDOW;
120 module_param(pause_filter_count, ushort, 0444);
121 
122 /* Default doubles per-vcpu window every exit. */
123 static unsigned short __ro_after_init pause_filter_count_grow = KVM_DEFAULT_PLE_WINDOW_GROW;
124 module_param(pause_filter_count_grow, ushort, 0444);
125 
126 /* Default resets per-vcpu window every exit to pause_filter_count. */
127 static unsigned short __ro_after_init pause_filter_count_shrink = KVM_DEFAULT_PLE_WINDOW_SHRINK;
128 module_param(pause_filter_count_shrink, ushort, 0444);
129 
130 /* Default is to compute the maximum so we can never overflow. */
131 static unsigned short __ro_after_init pause_filter_count_max = KVM_SVM_DEFAULT_PLE_WINDOW_MAX;
132 module_param(pause_filter_count_max, ushort, 0444);
133 
134 /*
135  * Use nested page tables by default.  Note, NPT may get forced off by
136  * svm_hardware_setup() if it's unsupported by hardware or the host kernel.
137  */
138 bool __ro_after_init npt_enabled = true;
139 module_param_named(npt, npt_enabled, bool, 0444);
140 
141 /* allow nested virtualization in KVM/SVM */
142 static int __ro_after_init nested = true;
143 module_param(nested, int, 0444);
144 
145 /* enable/disable Next RIP Save */
146 int __ro_after_init nrips = true;
147 module_param(nrips, int, 0444);
148 
149 /* enable/disable Virtual VMLOAD VMSAVE */
150 static int __ro_after_init vls = true;
151 module_param(vls, int, 0444);
152 
153 /* enable/disable Virtual GIF */
154 int __ro_after_init vgif = true;
155 module_param(vgif, int, 0444);
156 
157 /* enable/disable LBR virtualization */
158 int __ro_after_init lbrv = true;
159 module_param(lbrv, int, 0444);
160 
161 static int __ro_after_init tsc_scaling = true;
162 module_param(tsc_scaling, int, 0444);
163 
164 module_param(enable_device_posted_irqs, bool, 0444);
165 
166 bool __read_mostly dump_invalid_vmcb;
167 module_param(dump_invalid_vmcb, bool, 0644);
168 
169 
170 bool __ro_after_init intercept_smi = true;
171 module_param(intercept_smi, bool, 0444);
172 
173 bool __ro_after_init vnmi = true;
174 module_param(vnmi, bool, 0444);
175 
176 module_param(enable_mediated_pmu, bool, 0444);
177 
178 static bool __ro_after_init svm_gp_erratum_intercept = true;
179 
180 static u8 rsm_ins_bytes[] = "\x0f\xaa";
181 
182 static unsigned long __read_mostly iopm_base;
183 
184 DEFINE_PER_CPU(struct svm_cpu_data, svm_data);
185 
186 static DEFINE_MUTEX(vmcb_dump_mutex);
187 
188 /*
189  * Only MSR_TSC_AUX is switched via the user return hook.  EFER is switched via
190  * the VMCB, and the SYSCALL/SYSENTER MSRs are handled by VMLOAD/VMSAVE.
191  *
192  * RDTSCP and RDPID are not used in the kernel, specifically to allow KVM to
193  * defer the restoration of TSC_AUX until the CPU returns to userspace.
194  */
195 int tsc_aux_uret_slot __ro_after_init = -1;
196 
get_npt_level(void)197 static int get_npt_level(void)
198 {
199 #ifdef CONFIG_X86_64
200 	return pgtable_l5_enabled() ? PT64_ROOT_5LEVEL : PT64_ROOT_4LEVEL;
201 #else
202 	return PT32E_ROOT_LEVEL;
203 #endif
204 }
205 
svm_set_efer(struct kvm_vcpu * vcpu,u64 efer)206 int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
207 {
208 	struct vcpu_svm *svm = to_svm(vcpu);
209 	u64 old_efer = vcpu->arch.efer;
210 	vcpu->arch.efer = efer;
211 
212 	if (!npt_enabled) {
213 		/* Shadow paging assumes NX to be available.  */
214 		efer |= EFER_NX;
215 
216 		if (!(efer & EFER_LMA))
217 			efer &= ~EFER_LME;
218 	}
219 
220 	if ((old_efer & EFER_SVME) != (efer & EFER_SVME)) {
221 		if (!(efer & EFER_SVME)) {
222 			/*
223 			 * Architecturally, clearing EFER.SVME while a guest is
224 			 * running yields undefined behavior, i.e. KVM can do
225 			 * literally anything.  Force the vCPU back into L1 as
226 			 * that is the safest option for KVM, but synthesize a
227 			 * triple fault (for L1!) so that KVM at least doesn't
228 			 * run random L2 code in the context of L1.  Do so if
229 			 * and only if the vCPU is actively running, e.g. to
230 			 * avoid positives if userspace is stuffing state.
231 			 */
232 			if (is_guest_mode(vcpu) && vcpu->wants_to_run)
233 				kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
234 
235 			svm_leave_nested(vcpu);
236 			/* #GP intercept is still needed for vmware backdoor */
237 			if (!enable_vmware_backdoor)
238 				clr_exception_intercept(svm, GP_VECTOR);
239 
240 			/*
241 			 * Free the nested guest state, unless we are in SMM.
242 			 * In this case we will return to the nested guest
243 			 * as soon as we leave SMM.
244 			 */
245 			if (!is_smm(vcpu))
246 				svm_free_nested(svm);
247 
248 		} else {
249 			int ret = svm_allocate_nested(svm);
250 
251 			if (ret) {
252 				vcpu->arch.efer = old_efer;
253 				return ret;
254 			}
255 
256 			/*
257 			 * Never intercept #GP for SEV guests, KVM can't
258 			 * decrypt guest memory to workaround the erratum.
259 			 */
260 			if (svm_gp_erratum_intercept && !is_sev_guest(vcpu))
261 				set_exception_intercept(svm, GP_VECTOR);
262 		}
263 
264 		kvm_make_request(KVM_REQ_RECALC_INTERCEPTS, vcpu);
265 	}
266 
267 	svm->vmcb->save.efer = efer | EFER_SVME;
268 	vmcb_mark_dirty(svm->vmcb, VMCB_CR);
269 	return 0;
270 }
271 
svm_get_interrupt_shadow(struct kvm_vcpu * vcpu)272 static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu)
273 {
274 	struct vcpu_svm *svm = to_svm(vcpu);
275 	u32 ret = 0;
276 
277 	if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK)
278 		ret = KVM_X86_SHADOW_INT_STI | KVM_X86_SHADOW_INT_MOV_SS;
279 	return ret;
280 }
281 
svm_set_interrupt_shadow(struct kvm_vcpu * vcpu,int mask)282 static void svm_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
283 {
284 	struct vcpu_svm *svm = to_svm(vcpu);
285 
286 	if (mask == 0)
287 		svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
288 	else
289 		svm->vmcb->control.int_state |= SVM_INTERRUPT_SHADOW_MASK;
290 
291 }
292 
__svm_skip_emulated_instruction(struct kvm_vcpu * vcpu,int emul_type,bool commit_side_effects)293 static int __svm_skip_emulated_instruction(struct kvm_vcpu *vcpu,
294 					   int emul_type,
295 					   bool commit_side_effects)
296 {
297 	struct vcpu_svm *svm = to_svm(vcpu);
298 	unsigned long old_rflags;
299 
300 	/*
301 	 * SEV-ES does not expose the next RIP. The RIP update is controlled by
302 	 * the type of exit and the #VC handler in the guest.
303 	 */
304 	if (is_sev_es_guest(vcpu))
305 		goto done;
306 
307 	if (nrips && svm->vmcb->control.next_rip != 0) {
308 		WARN_ON_ONCE(!static_cpu_has(X86_FEATURE_NRIPS));
309 		svm->next_rip = svm->vmcb->control.next_rip;
310 	}
311 
312 	if (!svm->next_rip) {
313 		if (unlikely(!commit_side_effects))
314 			old_rflags = svm->vmcb->save.rflags;
315 
316 		if (!kvm_emulate_instruction(vcpu, emul_type))
317 			return 0;
318 
319 		if (unlikely(!commit_side_effects))
320 			svm->vmcb->save.rflags = old_rflags;
321 	} else {
322 		kvm_rip_write(vcpu, svm->next_rip);
323 	}
324 
325 done:
326 	if (likely(commit_side_effects))
327 		svm_set_interrupt_shadow(vcpu, 0);
328 
329 	return 1;
330 }
331 
svm_skip_emulated_instruction(struct kvm_vcpu * vcpu)332 static int svm_skip_emulated_instruction(struct kvm_vcpu *vcpu)
333 {
334 	return __svm_skip_emulated_instruction(vcpu, EMULTYPE_SKIP, true);
335 }
336 
svm_update_soft_interrupt_rip(struct kvm_vcpu * vcpu,u8 vector)337 static int svm_update_soft_interrupt_rip(struct kvm_vcpu *vcpu, u8 vector)
338 {
339 	const int emul_type = EMULTYPE_SKIP | EMULTYPE_SKIP_SOFT_INT |
340 			      EMULTYPE_SET_SOFT_INT_VECTOR(vector);
341 	unsigned long rip, old_rip = kvm_rip_read(vcpu);
342 	struct vcpu_svm *svm = to_svm(vcpu);
343 
344 	/*
345 	 * Due to architectural shortcomings, the CPU doesn't always provide
346 	 * NextRIP, e.g. if KVM intercepted an exception that occurred while
347 	 * the CPU was vectoring an INTO/INT3 in the guest.  Temporarily skip
348 	 * the instruction even if NextRIP is supported to acquire the next
349 	 * RIP so that it can be shoved into the NextRIP field, otherwise
350 	 * hardware will fail to advance guest RIP during event injection.
351 	 * Drop the exception/interrupt if emulation fails and effectively
352 	 * retry the instruction, it's the least awful option.  If NRIPS is
353 	 * in use, the skip must not commit any side effects such as clearing
354 	 * the interrupt shadow or RFLAGS.RF.
355 	 */
356 	if (!__svm_skip_emulated_instruction(vcpu, emul_type, !nrips))
357 		return -EIO;
358 
359 	rip = kvm_rip_read(vcpu);
360 
361 	/*
362 	 * Save the injection information, even when using next_rip, as the
363 	 * VMCB's next_rip will be lost (cleared on VM-Exit) if the injection
364 	 * doesn't complete due to a VM-Exit occurring while the CPU is
365 	 * vectoring the event.   Decoding the instruction isn't guaranteed to
366 	 * work as there may be no backing instruction, e.g. if the event is
367 	 * being injected by L1 for L2, or if the guest is patching INT3 into
368 	 * a different instruction.
369 	 */
370 	svm->soft_int_injected = true;
371 	svm->soft_int_csbase = svm->vmcb->save.cs.base;
372 	svm->soft_int_old_rip = old_rip;
373 	svm->soft_int_next_rip = rip;
374 
375 	if (nrips)
376 		kvm_rip_write(vcpu, old_rip);
377 
378 	if (static_cpu_has(X86_FEATURE_NRIPS))
379 		svm->vmcb->control.next_rip = rip;
380 
381 	return 0;
382 }
383 
svm_inject_exception(struct kvm_vcpu * vcpu)384 static void svm_inject_exception(struct kvm_vcpu *vcpu)
385 {
386 	struct kvm_queued_exception *ex = &vcpu->arch.exception;
387 	struct vcpu_svm *svm = to_svm(vcpu);
388 
389 	kvm_deliver_exception_payload(vcpu, ex);
390 
391 	if (kvm_exception_is_soft(ex->vector) &&
392 	    svm_update_soft_interrupt_rip(vcpu, ex->vector))
393 		return;
394 
395 	svm->vmcb->control.event_inj = ex->vector
396 		| SVM_EVTINJ_VALID
397 		| (ex->has_error_code ? SVM_EVTINJ_VALID_ERR : 0)
398 		| SVM_EVTINJ_TYPE_EXEPT;
399 	svm->vmcb->control.event_inj_err = ex->error_code;
400 }
401 
svm_init_erratum_383(void)402 static void svm_init_erratum_383(void)
403 {
404 	u64 val;
405 
406 	if (!static_cpu_has_bug(X86_BUG_AMD_TLB_MMATCH))
407 		return;
408 
409 	/* Use _safe variants to not break nested virtualization */
410 	if (native_read_msr_safe(MSR_AMD64_DC_CFG, &val))
411 		return;
412 
413 	val |= (1ULL << 47);
414 
415 	native_write_msr_safe(MSR_AMD64_DC_CFG, val);
416 
417 	erratum_383_found = true;
418 }
419 
svm_init_osvw(struct kvm_vcpu * vcpu)420 static void svm_init_osvw(struct kvm_vcpu *vcpu)
421 {
422 	/*
423 	 * Guests should see errata 400 and 415 as fixed (assuming that
424 	 * HLT and IO instructions are intercepted).
425 	 */
426 	vcpu->arch.osvw.length = (osvw_len >= 3) ? (osvw_len) : 3;
427 	vcpu->arch.osvw.status = osvw_status & ~(6ULL);
428 
429 	/*
430 	 * By increasing VCPU's osvw.length to 3 we are telling the guest that
431 	 * all osvw.status bits inside that length, including bit 0 (which is
432 	 * reserved for erratum 298), are valid. However, if host processor's
433 	 * osvw_len is 0 then osvw_status[0] carries no information. We need to
434 	 * be conservative here and therefore we tell the guest that erratum 298
435 	 * is present (because we really don't know).
436 	 */
437 	if (osvw_len == 0 && boot_cpu_data.x86 == 0x10)
438 		vcpu->arch.osvw.status |= 1;
439 }
440 
svm_init_os_visible_workarounds(void)441 static void svm_init_os_visible_workarounds(void)
442 {
443 	u64 len, status;
444 
445 	/*
446 	 * Get OS-Visible Workarounds (OSVW) bits.
447 	 *
448 	 * Note that it is possible to have a system with mixed processor
449 	 * revisions and therefore different OSVW bits. If bits are not the same
450 	 * on different processors then choose the worst case (i.e. if erratum
451 	 * is present on one processor and not on another then assume that the
452 	 * erratum is present everywhere).
453 	 *
454 	 * Note #2!  The OSVW MSRs are used to communciate that an erratum is
455 	 * NOT present!  Software must assume erratum as present if its bit is
456 	 * set in OSVW_STATUS *or* the bit number exceeds OSVW_ID_LENGTH.  If
457 	 * either RDMSR fails, simply zero out the length to treat all errata
458 	 * as being present.  Similarly, use the *minimum* length across all
459 	 * CPUs, not the maximum length.
460 	 *
461 	 * If the length is zero, then is KVM already treating all errata as
462 	 * being present and there's nothing left to do.
463 	 */
464 	if (!osvw_len)
465 		return;
466 
467 	if (!this_cpu_has(X86_FEATURE_OSVW) ||
468 	    native_read_msr_safe(MSR_AMD64_OSVW_ID_LENGTH, &len) ||
469 	    native_read_msr_safe(MSR_AMD64_OSVW_STATUS, &status))
470 		len = status = 0;
471 
472 	if (status == READ_ONCE(osvw_status) && len >= READ_ONCE(osvw_len))
473 		return;
474 
475 	guard(spinlock)(&osvw_lock);
476 
477 	if (len < osvw_len)
478 		osvw_len = len;
479 	osvw_status |= status;
480 	osvw_status &= (1ULL << osvw_len) - 1;
481 }
482 
__kvm_is_svm_supported(void)483 static bool __kvm_is_svm_supported(void)
484 {
485 	int cpu = smp_processor_id();
486 	struct cpuinfo_x86 *c = &cpu_data(cpu);
487 
488 	if (c->x86_vendor != X86_VENDOR_AMD &&
489 	    c->x86_vendor != X86_VENDOR_HYGON) {
490 		pr_err("CPU %d isn't AMD or Hygon\n", cpu);
491 		return false;
492 	}
493 
494 	if (!cpu_has(c, X86_FEATURE_SVM)) {
495 		pr_err("SVM not supported by CPU %d\n", cpu);
496 		return false;
497 	}
498 
499 	if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) {
500 		pr_info("KVM is unsupported when running as an SEV guest\n");
501 		return false;
502 	}
503 
504 	return true;
505 }
506 
kvm_is_svm_supported(void)507 static bool kvm_is_svm_supported(void)
508 {
509 	bool supported;
510 
511 	migrate_disable();
512 	supported = __kvm_is_svm_supported();
513 	migrate_enable();
514 
515 	return supported;
516 }
517 
svm_check_processor_compat(void)518 static int svm_check_processor_compat(void)
519 {
520 	if (!__kvm_is_svm_supported())
521 		return -EIO;
522 
523 	return 0;
524 }
525 
__svm_write_tsc_multiplier(u64 multiplier)526 static void __svm_write_tsc_multiplier(u64 multiplier)
527 {
528 	if (multiplier == __this_cpu_read(current_tsc_ratio))
529 		return;
530 
531 	wrmsrq(MSR_AMD64_TSC_RATIO, multiplier);
532 	__this_cpu_write(current_tsc_ratio, multiplier);
533 }
534 
sev_es_host_save_area(struct svm_cpu_data * sd)535 static __always_inline struct sev_es_save_area *sev_es_host_save_area(struct svm_cpu_data *sd)
536 {
537 	return &sd->save_area->host_sev_es_save;
538 }
539 
svm_emergency_disable_virtualization_cpu(void)540 static void svm_emergency_disable_virtualization_cpu(void)
541 {
542 	wrmsrq(MSR_VM_HSAVE_PA, 0);
543 }
544 
svm_disable_virtualization_cpu(void)545 static void svm_disable_virtualization_cpu(void)
546 {
547 	/* Make sure we clean up behind us */
548 	if (tsc_scaling)
549 		__svm_write_tsc_multiplier(SVM_TSC_RATIO_DEFAULT);
550 
551 	x86_virt_put_ref(X86_FEATURE_SVM);
552 	wrmsrq(MSR_VM_HSAVE_PA, 0);
553 
554 	amd_pmu_disable_virt();
555 }
556 
svm_enable_virtualization_cpu(void)557 static int svm_enable_virtualization_cpu(void)
558 {
559 
560 	struct svm_cpu_data *sd;
561 	int me = raw_smp_processor_id();
562 	int r;
563 
564 	r = x86_virt_get_ref(X86_FEATURE_SVM);
565 	if (r)
566 		return r;
567 
568 	sd = per_cpu_ptr(&svm_data, me);
569 	sd->asid_generation = 1;
570 	sd->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
571 	sd->next_asid = sd->max_asid + 1;
572 	sd->min_asid = max_sev_asid + 1;
573 
574 	wrmsrq(MSR_VM_HSAVE_PA, sd->save_area_pa);
575 
576 	if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) {
577 		/*
578 		 * Set the default value, even if we don't use TSC scaling
579 		 * to avoid having stale value in the msr
580 		 */
581 		__svm_write_tsc_multiplier(SVM_TSC_RATIO_DEFAULT);
582 	}
583 
584 	svm_init_os_visible_workarounds();
585 
586 	svm_init_erratum_383();
587 
588 	amd_pmu_enable_virt();
589 
590 	return 0;
591 }
592 
svm_cpu_uninit(int cpu)593 static void svm_cpu_uninit(int cpu)
594 {
595 	struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, cpu);
596 
597 	if (!sd->save_area)
598 		return;
599 
600 	kfree(sd->sev_vmcbs);
601 	__free_page(__sme_pa_to_page(sd->save_area_pa));
602 	sd->save_area_pa = 0;
603 	sd->save_area = NULL;
604 }
605 
svm_cpu_init(int cpu)606 static int svm_cpu_init(int cpu)
607 {
608 	struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, cpu);
609 	struct page *save_area_page;
610 	int ret = -ENOMEM;
611 
612 	memset(sd, 0, sizeof(struct svm_cpu_data));
613 	save_area_page = snp_safe_alloc_page_node(cpu_to_node(cpu), GFP_KERNEL);
614 	if (!save_area_page)
615 		return ret;
616 
617 	ret = sev_cpu_init(sd);
618 	if (ret)
619 		goto free_save_area;
620 
621 	sd->save_area = page_address(save_area_page);
622 	sd->save_area_pa = __sme_page_pa(save_area_page);
623 	return 0;
624 
625 free_save_area:
626 	__free_page(save_area_page);
627 	return ret;
628 
629 }
630 
set_dr_intercepts(struct vcpu_svm * svm)631 static void set_dr_intercepts(struct vcpu_svm *svm)
632 {
633 	struct vmcb *vmcb = svm->vmcb01.ptr;
634 
635 	vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_READ);
636 	vmcb_set_intercept(&vmcb->control, INTERCEPT_DR1_READ);
637 	vmcb_set_intercept(&vmcb->control, INTERCEPT_DR2_READ);
638 	vmcb_set_intercept(&vmcb->control, INTERCEPT_DR3_READ);
639 	vmcb_set_intercept(&vmcb->control, INTERCEPT_DR4_READ);
640 	vmcb_set_intercept(&vmcb->control, INTERCEPT_DR5_READ);
641 	vmcb_set_intercept(&vmcb->control, INTERCEPT_DR6_READ);
642 	vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_WRITE);
643 	vmcb_set_intercept(&vmcb->control, INTERCEPT_DR1_WRITE);
644 	vmcb_set_intercept(&vmcb->control, INTERCEPT_DR2_WRITE);
645 	vmcb_set_intercept(&vmcb->control, INTERCEPT_DR3_WRITE);
646 	vmcb_set_intercept(&vmcb->control, INTERCEPT_DR4_WRITE);
647 	vmcb_set_intercept(&vmcb->control, INTERCEPT_DR5_WRITE);
648 	vmcb_set_intercept(&vmcb->control, INTERCEPT_DR6_WRITE);
649 	vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_READ);
650 	vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_WRITE);
651 
652 	svm_mark_intercepts_dirty(svm);
653 }
654 
clr_dr_intercepts(struct vcpu_svm * svm)655 static void clr_dr_intercepts(struct vcpu_svm *svm)
656 {
657 	struct vmcb *vmcb = svm->vmcb01.ptr;
658 
659 	vmcb->control.intercepts[INTERCEPT_DR] = 0;
660 
661 	svm_mark_intercepts_dirty(svm);
662 }
663 
msr_write_intercepted(struct kvm_vcpu * vcpu,u32 msr)664 static bool msr_write_intercepted(struct kvm_vcpu *vcpu, u32 msr)
665 {
666 	/*
667 	 * For non-nested case:
668 	 * If the L01 MSR bitmap does not intercept the MSR, then we need to
669 	 * save it.
670 	 *
671 	 * For nested case:
672 	 * If the L02 MSR bitmap does not intercept the MSR, then we need to
673 	 * save it.
674 	 */
675 	void *msrpm = is_guest_mode(vcpu) ? to_svm(vcpu)->nested.msrpm :
676 					    to_svm(vcpu)->msrpm;
677 
678 	return svm_test_msr_bitmap_write(msrpm, msr);
679 }
680 
svm_set_intercept_for_msr(struct kvm_vcpu * vcpu,u32 msr,int type,bool set)681 void svm_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type, bool set)
682 {
683 	struct vcpu_svm *svm = to_svm(vcpu);
684 	void *msrpm = svm->msrpm;
685 
686 	/* Don't disable interception for MSRs userspace wants to handle. */
687 	if (type & MSR_TYPE_R) {
688 		if (!set && kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_READ))
689 			svm_clear_msr_bitmap_read(msrpm, msr);
690 		else
691 			svm_set_msr_bitmap_read(msrpm, msr);
692 	}
693 
694 	if (type & MSR_TYPE_W) {
695 		if (!set && kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_WRITE))
696 			svm_clear_msr_bitmap_write(msrpm, msr);
697 		else
698 			svm_set_msr_bitmap_write(msrpm, msr);
699 	}
700 
701 	svm_hv_vmcb_dirty_nested_enlightenments(vcpu);
702 	svm->nested.force_msr_bitmap_recalc = true;
703 }
704 
svm_alloc_permissions_map(unsigned long size,gfp_t gfp_mask)705 void *svm_alloc_permissions_map(unsigned long size, gfp_t gfp_mask)
706 {
707 	unsigned int order = get_order(size);
708 	struct page *pages = alloc_pages(gfp_mask, order);
709 	void *pm;
710 
711 	if (!pages)
712 		return NULL;
713 
714 	/*
715 	 * Set all bits in the permissions map so that all MSR and I/O accesses
716 	 * are intercepted by default.
717 	 */
718 	pm = page_address(pages);
719 	memset(pm, 0xff, PAGE_SIZE * (1 << order));
720 
721 	return pm;
722 }
723 
svm_recalc_lbr_msr_intercepts(struct kvm_vcpu * vcpu)724 static void svm_recalc_lbr_msr_intercepts(struct kvm_vcpu *vcpu)
725 {
726 	struct vcpu_svm *svm = to_svm(vcpu);
727 	bool intercept = !(svm->vmcb->control.misc_ctl2 & SVM_MISC2_ENABLE_V_LBR);
728 
729 	if (intercept == svm->lbr_msrs_intercepted)
730 		return;
731 
732 	svm_set_intercept_for_msr(vcpu, MSR_IA32_LASTBRANCHFROMIP, MSR_TYPE_RW, intercept);
733 	svm_set_intercept_for_msr(vcpu, MSR_IA32_LASTBRANCHTOIP, MSR_TYPE_RW, intercept);
734 	svm_set_intercept_for_msr(vcpu, MSR_IA32_LASTINTFROMIP, MSR_TYPE_RW, intercept);
735 	svm_set_intercept_for_msr(vcpu, MSR_IA32_LASTINTTOIP, MSR_TYPE_RW, intercept);
736 
737 	if (is_sev_es_guest(vcpu))
738 		svm_set_intercept_for_msr(vcpu, MSR_IA32_DEBUGCTLMSR, MSR_TYPE_RW, intercept);
739 
740 	svm->lbr_msrs_intercepted = intercept;
741 }
742 
svm_vcpu_free_msrpm(void * msrpm)743 void svm_vcpu_free_msrpm(void *msrpm)
744 {
745 	__free_pages(virt_to_page(msrpm), get_order(MSRPM_SIZE));
746 }
747 
svm_recalc_pmu_msr_intercepts(struct kvm_vcpu * vcpu)748 static void svm_recalc_pmu_msr_intercepts(struct kvm_vcpu *vcpu)
749 {
750 	bool intercept = !kvm_vcpu_has_mediated_pmu(vcpu);
751 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
752 	int i;
753 
754 	if (!enable_mediated_pmu)
755 		return;
756 
757 	/* Legacy counters are always available for AMD CPUs with a PMU. */
758 	for (i = 0; i < min(pmu->nr_arch_gp_counters, AMD64_NUM_COUNTERS); i++)
759 		svm_set_intercept_for_msr(vcpu, MSR_K7_PERFCTR0 + i,
760 					  MSR_TYPE_RW, intercept);
761 
762 	intercept |= !guest_cpu_cap_has(vcpu, X86_FEATURE_PERFCTR_CORE);
763 	for (i = 0; i < pmu->nr_arch_gp_counters; i++)
764 		svm_set_intercept_for_msr(vcpu, MSR_F15H_PERF_CTR + 2 * i,
765 					  MSR_TYPE_RW, intercept);
766 
767 	for ( ; i < kvm_pmu_cap.num_counters_gp; i++)
768 		svm_enable_intercept_for_msr(vcpu, MSR_F15H_PERF_CTR + 2 * i,
769 					     MSR_TYPE_RW);
770 
771 	intercept = kvm_need_perf_global_ctrl_intercept(vcpu);
772 	svm_set_intercept_for_msr(vcpu, MSR_AMD64_PERF_CNTR_GLOBAL_CTL,
773 				  MSR_TYPE_RW, intercept);
774 	svm_set_intercept_for_msr(vcpu, MSR_AMD64_PERF_CNTR_GLOBAL_STATUS,
775 				  MSR_TYPE_RW, intercept);
776 	svm_set_intercept_for_msr(vcpu, MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR,
777 				  MSR_TYPE_RW, intercept);
778 	svm_set_intercept_for_msr(vcpu, MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_SET,
779 				  MSR_TYPE_RW, intercept);
780 }
781 
svm_recalc_msr_intercepts(struct kvm_vcpu * vcpu)782 static void svm_recalc_msr_intercepts(struct kvm_vcpu *vcpu)
783 {
784 	struct vcpu_svm *svm = to_svm(vcpu);
785 
786 	svm_disable_intercept_for_msr(vcpu, MSR_STAR, MSR_TYPE_RW);
787 	svm_disable_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_CS, MSR_TYPE_RW);
788 
789 #ifdef CONFIG_X86_64
790 	svm_disable_intercept_for_msr(vcpu, MSR_GS_BASE, MSR_TYPE_RW);
791 	svm_disable_intercept_for_msr(vcpu, MSR_FS_BASE, MSR_TYPE_RW);
792 	svm_disable_intercept_for_msr(vcpu, MSR_KERNEL_GS_BASE, MSR_TYPE_RW);
793 	svm_disable_intercept_for_msr(vcpu, MSR_LSTAR, MSR_TYPE_RW);
794 	svm_disable_intercept_for_msr(vcpu, MSR_CSTAR, MSR_TYPE_RW);
795 	svm_disable_intercept_for_msr(vcpu, MSR_SYSCALL_MASK, MSR_TYPE_RW);
796 #endif
797 
798 	if (lbrv)
799 		svm_recalc_lbr_msr_intercepts(vcpu);
800 
801 	if (cpu_feature_enabled(X86_FEATURE_IBPB))
802 		svm_set_intercept_for_msr(vcpu, MSR_IA32_PRED_CMD, MSR_TYPE_W,
803 					  !guest_has_pred_cmd_msr(vcpu));
804 
805 	if (cpu_feature_enabled(X86_FEATURE_FLUSH_L1D))
806 		svm_set_intercept_for_msr(vcpu, MSR_IA32_FLUSH_CMD, MSR_TYPE_W,
807 					  !guest_cpu_cap_has(vcpu, X86_FEATURE_FLUSH_L1D));
808 
809 	/*
810 	 * Disable interception of SPEC_CTRL if KVM doesn't need to manually
811 	 * context switch the MSR (SPEC_CTRL is virtualized by the CPU), or if
812 	 * the guest has a non-zero SPEC_CTRL value, i.e. is likely actively
813 	 * using SPEC_CTRL.
814 	 */
815 	if (cpu_feature_enabled(X86_FEATURE_V_SPEC_CTRL))
816 		svm_set_intercept_for_msr(vcpu, MSR_IA32_SPEC_CTRL, MSR_TYPE_RW,
817 					  !guest_has_spec_ctrl_msr(vcpu));
818 	else
819 		svm_set_intercept_for_msr(vcpu, MSR_IA32_SPEC_CTRL, MSR_TYPE_RW,
820 					  !svm->spec_ctrl);
821 
822 	/*
823 	 * Intercept SYSENTER_EIP and SYSENTER_ESP when emulating an Intel CPU,
824 	 * as AMD hardware only store 32 bits, whereas Intel CPUs track 64 bits.
825 	 */
826 	svm_set_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_EIP, MSR_TYPE_RW,
827 				  guest_cpuid_is_intel_compatible(vcpu));
828 	svm_set_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_ESP, MSR_TYPE_RW,
829 				  guest_cpuid_is_intel_compatible(vcpu));
830 
831 	if (kvm_aperfmperf_in_guest(vcpu->kvm)) {
832 		svm_disable_intercept_for_msr(vcpu, MSR_IA32_APERF, MSR_TYPE_R);
833 		svm_disable_intercept_for_msr(vcpu, MSR_IA32_MPERF, MSR_TYPE_R);
834 	}
835 
836 	if (kvm_cpu_cap_has(X86_FEATURE_SHSTK)) {
837 		bool shstk_enabled = guest_cpu_cap_has(vcpu, X86_FEATURE_SHSTK);
838 
839 		svm_set_intercept_for_msr(vcpu, MSR_IA32_U_CET, MSR_TYPE_RW, !shstk_enabled);
840 		svm_set_intercept_for_msr(vcpu, MSR_IA32_S_CET, MSR_TYPE_RW, !shstk_enabled);
841 		svm_set_intercept_for_msr(vcpu, MSR_IA32_PL0_SSP, MSR_TYPE_RW, !shstk_enabled);
842 		svm_set_intercept_for_msr(vcpu, MSR_IA32_PL1_SSP, MSR_TYPE_RW, !shstk_enabled);
843 		svm_set_intercept_for_msr(vcpu, MSR_IA32_PL2_SSP, MSR_TYPE_RW, !shstk_enabled);
844 		svm_set_intercept_for_msr(vcpu, MSR_IA32_PL3_SSP, MSR_TYPE_RW, !shstk_enabled);
845 	}
846 
847 	if (is_sev_es_guest(vcpu))
848 		sev_es_recalc_msr_intercepts(vcpu);
849 
850 	svm_recalc_pmu_msr_intercepts(vcpu);
851 
852 	/*
853 	 * x2APIC intercepts are modified on-demand and cannot be filtered by
854 	 * userspace.
855 	 */
856 }
857 
__svm_enable_lbrv(struct kvm_vcpu * vcpu)858 static void __svm_enable_lbrv(struct kvm_vcpu *vcpu)
859 {
860 	to_svm(vcpu)->vmcb->control.misc_ctl2 |= SVM_MISC2_ENABLE_V_LBR;
861 }
862 
svm_enable_lbrv(struct kvm_vcpu * vcpu)863 void svm_enable_lbrv(struct kvm_vcpu *vcpu)
864 {
865 	__svm_enable_lbrv(vcpu);
866 	svm_recalc_lbr_msr_intercepts(vcpu);
867 }
868 
__svm_disable_lbrv(struct kvm_vcpu * vcpu)869 static void __svm_disable_lbrv(struct kvm_vcpu *vcpu)
870 {
871 	KVM_BUG_ON(is_sev_es_guest(vcpu), vcpu->kvm);
872 	to_svm(vcpu)->vmcb->control.misc_ctl2 &= ~SVM_MISC2_ENABLE_V_LBR;
873 }
874 
svm_update_lbrv(struct kvm_vcpu * vcpu)875 void svm_update_lbrv(struct kvm_vcpu *vcpu)
876 {
877 	struct vcpu_svm *svm = to_svm(vcpu);
878 	bool current_enable_lbrv = svm->vmcb->control.misc_ctl2 & SVM_MISC2_ENABLE_V_LBR;
879 	bool enable_lbrv = (svm->vmcb->save.dbgctl & DEBUGCTLMSR_LBR) ||
880 			    (is_guest_mode(vcpu) && guest_cpu_cap_has(vcpu, X86_FEATURE_LBRV) &&
881 			    (svm->nested.ctl.misc_ctl2 & SVM_MISC2_ENABLE_V_LBR));
882 
883 	if (enable_lbrv && !current_enable_lbrv)
884 		__svm_enable_lbrv(vcpu);
885 	else if (!enable_lbrv && current_enable_lbrv)
886 		__svm_disable_lbrv(vcpu);
887 
888 	/*
889 	 * During nested transitions, it is possible that the current VMCB has
890 	 * LBR_CTL set, but the previous LBR_CTL had it cleared (or vice versa).
891 	 * In this case, even though LBR_CTL does not need an update, intercepts
892 	 * do, so always recalculate the intercepts here.
893 	 */
894 	svm_recalc_lbr_msr_intercepts(vcpu);
895 }
896 
disable_nmi_singlestep(struct vcpu_svm * svm)897 void disable_nmi_singlestep(struct vcpu_svm *svm)
898 {
899 	svm->nmi_singlestep = false;
900 
901 	if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP)) {
902 		/* Clear our flags if they were not set by the guest */
903 		if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF))
904 			svm->vmcb->save.rflags &= ~X86_EFLAGS_TF;
905 		if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_RF))
906 			svm->vmcb->save.rflags &= ~X86_EFLAGS_RF;
907 	}
908 }
909 
grow_ple_window(struct kvm_vcpu * vcpu)910 static void grow_ple_window(struct kvm_vcpu *vcpu)
911 {
912 	struct vcpu_svm *svm = to_svm(vcpu);
913 	struct vmcb_control_area *control = &svm->vmcb->control;
914 	int old = control->pause_filter_count;
915 
916 	/* Adjusting pause_filter_count makes no sense if PLE is disabled.  */
917 	WARN_ON_ONCE(kvm_pause_in_guest(vcpu->kvm));
918 
919 	/*
920 	 * While running L2, KVM should intercept PAUSE if and only if L1 wants
921 	 * to intercept PAUSE, and L1's intercept should take priority, i.e.
922 	 * KVM should never handle a PAUSE intercept from L2.
923 	 */
924 	if (WARN_ON_ONCE(is_guest_mode(vcpu)))
925 		return;
926 
927 	control->pause_filter_count = __grow_ple_window(old,
928 							pause_filter_count,
929 							pause_filter_count_grow,
930 							pause_filter_count_max);
931 
932 	if (control->pause_filter_count != old) {
933 		vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
934 		trace_kvm_ple_window_update(vcpu->vcpu_id,
935 					    control->pause_filter_count, old);
936 	}
937 }
938 
shrink_ple_window(struct kvm_vcpu * vcpu)939 static void shrink_ple_window(struct kvm_vcpu *vcpu)
940 {
941 	struct vcpu_svm *svm = to_svm(vcpu);
942 	struct vmcb_control_area *control = &svm->vmcb->control;
943 	int old = control->pause_filter_count;
944 
945 	/* Adjusting pause_filter_count makes no sense if PLE is disabled.  */
946 	WARN_ON_ONCE(kvm_pause_in_guest(vcpu->kvm));
947 
948 	if (is_guest_mode(vcpu))
949 		return;
950 
951 	control->pause_filter_count =
952 				__shrink_ple_window(old,
953 						    pause_filter_count,
954 						    pause_filter_count_shrink,
955 						    pause_filter_count);
956 	if (control->pause_filter_count != old) {
957 		vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
958 		trace_kvm_ple_window_update(vcpu->vcpu_id,
959 					    control->pause_filter_count, old);
960 	}
961 }
962 
svm_hardware_unsetup(void)963 static void svm_hardware_unsetup(void)
964 {
965 	int cpu;
966 
967 	avic_hardware_unsetup();
968 
969 	sev_hardware_unsetup();
970 
971 	for_each_possible_cpu(cpu)
972 		svm_cpu_uninit(cpu);
973 
974 	__free_pages(__sme_pa_to_page(iopm_base), get_order(IOPM_SIZE));
975 	iopm_base = 0;
976 }
977 
init_seg(struct vmcb_seg * seg)978 static void init_seg(struct vmcb_seg *seg)
979 {
980 	seg->selector = 0;
981 	seg->attrib = SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK |
982 		      SVM_SELECTOR_WRITE_MASK; /* Read/Write Data Segment */
983 	seg->limit = 0xffff;
984 	seg->base = 0;
985 }
986 
init_sys_seg(struct vmcb_seg * seg,uint32_t type)987 static void init_sys_seg(struct vmcb_seg *seg, uint32_t type)
988 {
989 	seg->selector = 0;
990 	seg->attrib = SVM_SELECTOR_P_MASK | type;
991 	seg->limit = 0xffff;
992 	seg->base = 0;
993 }
994 
svm_get_l2_tsc_offset(struct kvm_vcpu * vcpu)995 static u64 svm_get_l2_tsc_offset(struct kvm_vcpu *vcpu)
996 {
997 	struct vcpu_svm *svm = to_svm(vcpu);
998 
999 	return svm->nested.ctl.tsc_offset;
1000 }
1001 
svm_get_l2_tsc_multiplier(struct kvm_vcpu * vcpu)1002 static u64 svm_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu)
1003 {
1004 	struct vcpu_svm *svm = to_svm(vcpu);
1005 
1006 	return svm->tsc_ratio_msr;
1007 }
1008 
svm_write_tsc_offset(struct kvm_vcpu * vcpu)1009 static void svm_write_tsc_offset(struct kvm_vcpu *vcpu)
1010 {
1011 	struct vcpu_svm *svm = to_svm(vcpu);
1012 
1013 	svm->vmcb01.ptr->control.tsc_offset = vcpu->arch.l1_tsc_offset;
1014 	svm->vmcb->control.tsc_offset = vcpu->arch.tsc_offset;
1015 	vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
1016 }
1017 
svm_write_tsc_multiplier(struct kvm_vcpu * vcpu)1018 void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu)
1019 {
1020 	preempt_disable();
1021 	if (to_svm(vcpu)->guest_state_loaded)
1022 		__svm_write_tsc_multiplier(vcpu->arch.tsc_scaling_ratio);
1023 	preempt_enable();
1024 }
1025 
svm_has_pending_gif_event(struct vcpu_svm * svm)1026 static bool svm_has_pending_gif_event(struct vcpu_svm *svm)
1027 {
1028 	return svm->vcpu.arch.smi_pending ||
1029 	       svm->vcpu.arch.nmi_pending ||
1030 	       kvm_cpu_has_injectable_intr(&svm->vcpu) ||
1031 	       kvm_apic_has_pending_init_or_sipi(&svm->vcpu);
1032 }
1033 
1034 /* Evaluate instruction intercepts that depend on guest CPUID features. */
svm_recalc_instruction_intercepts(struct kvm_vcpu * vcpu)1035 static void svm_recalc_instruction_intercepts(struct kvm_vcpu *vcpu)
1036 {
1037 	struct vcpu_svm *svm = to_svm(vcpu);
1038 
1039 	/*
1040 	 * Intercept INVPCID if shadow paging is enabled to sync/free shadow
1041 	 * roots, or if INVPCID is disabled in the guest to inject #UD.
1042 	 */
1043 	if (kvm_cpu_cap_has(X86_FEATURE_INVPCID)) {
1044 		if (!npt_enabled ||
1045 		    !guest_cpu_cap_has(&svm->vcpu, X86_FEATURE_INVPCID))
1046 			svm_set_intercept(svm, INTERCEPT_INVPCID);
1047 		else
1048 			svm_clr_intercept(svm, INTERCEPT_INVPCID);
1049 	}
1050 
1051 	if (kvm_cpu_cap_has(X86_FEATURE_RDTSCP)) {
1052 		if (guest_cpu_cap_has(vcpu, X86_FEATURE_RDTSCP))
1053 			svm_clr_intercept(svm, INTERCEPT_RDTSCP);
1054 		else
1055 			svm_set_intercept(svm, INTERCEPT_RDTSCP);
1056 	}
1057 
1058 	/*
1059 	 * Intercept instructions that #UD if EFER.SVME=0, as SVME must be set
1060 	 * even when running the guest, i.e. hardware will only ever see
1061 	 * EFER.SVME=1.
1062 	 *
1063 	 * No need to toggle any of the vgif/vls/etc. enable bits here, as they
1064 	 * are set when the VMCB is initialized and never cleared (if the
1065 	 * relevant intercepts are set, the enablements are meaningless anyway).
1066 	 *
1067 	 * FIXME: When #GP is not intercepted, a #GP on these instructions (e.g.
1068 	 * due to CPL > 0) could be injected by hardware before the instruction
1069 	 * is intercepted, leading to #GP taking precedence over #UD from the
1070 	 * guest's perspective.
1071 	 */
1072 	if (!(vcpu->arch.efer & EFER_SVME)) {
1073 		svm_set_intercept(svm, INTERCEPT_VMLOAD);
1074 		svm_set_intercept(svm, INTERCEPT_VMSAVE);
1075 		svm_set_intercept(svm, INTERCEPT_CLGI);
1076 		svm_set_intercept(svm, INTERCEPT_STGI);
1077 	} else {
1078 		/*
1079 		 * If hardware supports Virtual VMLOAD VMSAVE then enable it
1080 		 * in VMCB and clear intercepts to avoid #VMEXIT.
1081 		 */
1082 		if (guest_cpuid_is_intel_compatible(vcpu)) {
1083 			svm_set_intercept(svm, INTERCEPT_VMLOAD);
1084 			svm_set_intercept(svm, INTERCEPT_VMSAVE);
1085 		} else if (vls) {
1086 			svm_clr_intercept(svm, INTERCEPT_VMLOAD);
1087 			svm_clr_intercept(svm, INTERCEPT_VMSAVE);
1088 		}
1089 
1090 		/*
1091 		 * Process pending events when clearing STGI/CLGI intercepts if
1092 		 * there's at least one pending event that is masked by GIF, so
1093 		 * that KVM re-evaluates if the intercept needs to be set again
1094 		 * to track when GIF is re-enabled (e.g. for NMI injection).
1095 		 */
1096 		if (vgif) {
1097 			svm_clr_intercept(svm, INTERCEPT_CLGI);
1098 			svm_clr_intercept(svm, INTERCEPT_STGI);
1099 
1100 			if (svm_has_pending_gif_event(svm))
1101 				kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
1102 		}
1103 	}
1104 
1105 	if (kvm_need_rdpmc_intercept(vcpu))
1106 		svm_set_intercept(svm, INTERCEPT_RDPMC);
1107 	else
1108 		svm_clr_intercept(svm, INTERCEPT_RDPMC);
1109 }
1110 
svm_recalc_intercepts(struct kvm_vcpu * vcpu)1111 static void svm_recalc_intercepts(struct kvm_vcpu *vcpu)
1112 {
1113 	svm_recalc_instruction_intercepts(vcpu);
1114 	svm_recalc_msr_intercepts(vcpu);
1115 }
1116 
init_vmcb(struct kvm_vcpu * vcpu,bool init_event)1117 static void init_vmcb(struct kvm_vcpu *vcpu, bool init_event)
1118 {
1119 	struct vcpu_svm *svm = to_svm(vcpu);
1120 	struct vmcb *vmcb = svm->vmcb01.ptr;
1121 	struct vmcb_control_area *control = &vmcb->control;
1122 	struct vmcb_save_area *save = &vmcb->save;
1123 
1124 	svm_set_intercept(svm, INTERCEPT_CR0_READ);
1125 	svm_set_intercept(svm, INTERCEPT_CR3_READ);
1126 	svm_set_intercept(svm, INTERCEPT_CR4_READ);
1127 	svm_set_intercept(svm, INTERCEPT_CR0_WRITE);
1128 	svm_set_intercept(svm, INTERCEPT_CR3_WRITE);
1129 	svm_set_intercept(svm, INTERCEPT_CR4_WRITE);
1130 	svm_set_intercept(svm, INTERCEPT_CR8_WRITE);
1131 
1132 	set_dr_intercepts(svm);
1133 
1134 	set_exception_intercept(svm, PF_VECTOR);
1135 	set_exception_intercept(svm, UD_VECTOR);
1136 	set_exception_intercept(svm, MC_VECTOR);
1137 	set_exception_intercept(svm, AC_VECTOR);
1138 	set_exception_intercept(svm, DB_VECTOR);
1139 	/*
1140 	 * Guest access to VMware backdoor ports could legitimately
1141 	 * trigger #GP because of TSS I/O permission bitmap.
1142 	 * We intercept those #GP and allow access to them anyway
1143 	 * as VMware does.
1144 	 */
1145 	if (enable_vmware_backdoor)
1146 		set_exception_intercept(svm, GP_VECTOR);
1147 
1148 	svm_set_intercept(svm, INTERCEPT_INTR);
1149 	svm_set_intercept(svm, INTERCEPT_NMI);
1150 
1151 	if (intercept_smi)
1152 		svm_set_intercept(svm, INTERCEPT_SMI);
1153 
1154 	svm_set_intercept(svm, INTERCEPT_SELECTIVE_CR0);
1155 	svm_set_intercept(svm, INTERCEPT_RDPMC);
1156 	svm_set_intercept(svm, INTERCEPT_CPUID);
1157 	svm_set_intercept(svm, INTERCEPT_INVD);
1158 	svm_set_intercept(svm, INTERCEPT_INVLPG);
1159 	svm_set_intercept(svm, INTERCEPT_INVLPGA);
1160 	svm_set_intercept(svm, INTERCEPT_IOIO_PROT);
1161 	svm_set_intercept(svm, INTERCEPT_MSR_PROT);
1162 	svm_set_intercept(svm, INTERCEPT_TASK_SWITCH);
1163 	svm_set_intercept(svm, INTERCEPT_SHUTDOWN);
1164 	svm_set_intercept(svm, INTERCEPT_VMRUN);
1165 	svm_set_intercept(svm, INTERCEPT_VMMCALL);
1166 	svm_set_intercept(svm, INTERCEPT_VMLOAD);
1167 	svm_set_intercept(svm, INTERCEPT_VMSAVE);
1168 	svm_set_intercept(svm, INTERCEPT_STGI);
1169 	svm_set_intercept(svm, INTERCEPT_CLGI);
1170 	svm_set_intercept(svm, INTERCEPT_SKINIT);
1171 	svm_set_intercept(svm, INTERCEPT_WBINVD);
1172 	svm_set_intercept(svm, INTERCEPT_XSETBV);
1173 	svm_set_intercept(svm, INTERCEPT_RDPRU);
1174 	svm_set_intercept(svm, INTERCEPT_RSM);
1175 
1176 	if (!kvm_mwait_in_guest(vcpu->kvm)) {
1177 		svm_set_intercept(svm, INTERCEPT_MONITOR);
1178 		svm_set_intercept(svm, INTERCEPT_MWAIT);
1179 	}
1180 
1181 	if (!kvm_hlt_in_guest(vcpu->kvm)) {
1182 		if (cpu_feature_enabled(X86_FEATURE_IDLE_HLT))
1183 			svm_set_intercept(svm, INTERCEPT_IDLE_HLT);
1184 		else
1185 			svm_set_intercept(svm, INTERCEPT_HLT);
1186 	}
1187 
1188 	control->iopm_base_pa = iopm_base;
1189 	control->msrpm_base_pa = __sme_set(__pa(svm->msrpm));
1190 	control->int_ctl = V_INTR_MASKING_MASK;
1191 
1192 	init_seg(&save->es);
1193 	init_seg(&save->ss);
1194 	init_seg(&save->ds);
1195 	init_seg(&save->fs);
1196 	init_seg(&save->gs);
1197 
1198 	save->cs.selector = 0xf000;
1199 	save->cs.base = 0xffff0000;
1200 	/* Executable/Readable Code Segment */
1201 	save->cs.attrib = SVM_SELECTOR_READ_MASK | SVM_SELECTOR_P_MASK |
1202 		SVM_SELECTOR_S_MASK | SVM_SELECTOR_CODE_MASK;
1203 	save->cs.limit = 0xffff;
1204 
1205 	save->gdtr.base = 0;
1206 	save->gdtr.limit = 0xffff;
1207 	save->idtr.base = 0;
1208 	save->idtr.limit = 0xffff;
1209 
1210 	init_sys_seg(&save->ldtr, SEG_TYPE_LDT);
1211 	init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16);
1212 
1213 	if (npt_enabled) {
1214 		/* Setup VMCB for Nested Paging */
1215 		control->misc_ctl |= SVM_MISC_ENABLE_NP;
1216 		svm_clr_intercept(svm, INTERCEPT_INVLPG);
1217 		clr_exception_intercept(svm, PF_VECTOR);
1218 		svm_clr_intercept(svm, INTERCEPT_CR3_READ);
1219 		svm_clr_intercept(svm, INTERCEPT_CR3_WRITE);
1220 		save->g_pat = vcpu->arch.pat;
1221 		save->cr3 = 0;
1222 	}
1223 	svm->current_vmcb->asid_generation = 0;
1224 	svm->asid = 0;
1225 
1226 	svm->nested.vmcb12_gpa = INVALID_GPA;
1227 	svm->nested.last_vmcb12_gpa = INVALID_GPA;
1228 
1229 	if (!kvm_pause_in_guest(vcpu->kvm)) {
1230 		control->pause_filter_count = pause_filter_count;
1231 		if (pause_filter_thresh)
1232 			control->pause_filter_thresh = pause_filter_thresh;
1233 		svm_set_intercept(svm, INTERCEPT_PAUSE);
1234 	} else {
1235 		svm_clr_intercept(svm, INTERCEPT_PAUSE);
1236 	}
1237 
1238 	if (guest_cpu_cap_has(vcpu, X86_FEATURE_ERAPS))
1239 		svm->vmcb->control.erap_ctl |= ERAP_CONTROL_ALLOW_LARGER_RAP;
1240 
1241 	if (enable_apicv && irqchip_in_kernel(vcpu->kvm))
1242 		avic_init_vmcb(svm, vmcb);
1243 
1244 	if (vnmi)
1245 		svm->vmcb->control.int_ctl |= V_NMI_ENABLE_MASK;
1246 
1247 	if (vgif)
1248 		svm->vmcb->control.int_ctl |= V_GIF_ENABLE_MASK;
1249 
1250 	if (vls)
1251 		svm->vmcb->control.misc_ctl2 |= SVM_MISC2_ENABLE_V_VMLOAD_VMSAVE;
1252 
1253 	if (vcpu->kvm->arch.bus_lock_detection_enabled)
1254 		svm_set_intercept(svm, INTERCEPT_BUSLOCK);
1255 
1256 	if (is_sev_guest(vcpu))
1257 		sev_init_vmcb(svm, init_event);
1258 
1259 	svm_hv_init_vmcb(vmcb);
1260 
1261 	kvm_make_request(KVM_REQ_RECALC_INTERCEPTS, vcpu);
1262 
1263 	vmcb_mark_all_dirty(vmcb);
1264 
1265 	enable_gif(svm);
1266 }
1267 
__svm_vcpu_reset(struct kvm_vcpu * vcpu)1268 static void __svm_vcpu_reset(struct kvm_vcpu *vcpu)
1269 {
1270 	struct vcpu_svm *svm = to_svm(vcpu);
1271 
1272 	svm_init_osvw(vcpu);
1273 
1274 	if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_STUFF_FEATURE_MSRS))
1275 		vcpu->arch.microcode_version = 0x01000065;
1276 	svm->tsc_ratio_msr = kvm_caps.default_tsc_scaling_ratio;
1277 
1278 	svm->nmi_masked = false;
1279 	svm->awaiting_iret_completion = false;
1280 }
1281 
svm_vcpu_reset(struct kvm_vcpu * vcpu,bool init_event)1282 static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
1283 {
1284 	struct vcpu_svm *svm = to_svm(vcpu);
1285 
1286 	svm->spec_ctrl = 0;
1287 	svm->virt_spec_ctrl = 0;
1288 
1289 	init_vmcb(vcpu, init_event);
1290 
1291 	if (!init_event)
1292 		__svm_vcpu_reset(vcpu);
1293 }
1294 
svm_switch_vmcb(struct vcpu_svm * svm,struct kvm_vmcb_info * target_vmcb)1295 void svm_switch_vmcb(struct vcpu_svm *svm, struct kvm_vmcb_info *target_vmcb)
1296 {
1297 	svm->current_vmcb = target_vmcb;
1298 	svm->vmcb = target_vmcb->ptr;
1299 }
1300 
svm_vcpu_precreate(struct kvm * kvm)1301 static int svm_vcpu_precreate(struct kvm *kvm)
1302 {
1303 	return avic_alloc_physical_id_table(kvm);
1304 }
1305 
svm_vcpu_create(struct kvm_vcpu * vcpu)1306 static int svm_vcpu_create(struct kvm_vcpu *vcpu)
1307 {
1308 	struct vcpu_svm *svm;
1309 	struct page *vmcb01_page;
1310 	int err;
1311 
1312 	BUILD_BUG_ON(offsetof(struct vcpu_svm, vcpu) != 0);
1313 	svm = to_svm(vcpu);
1314 
1315 	err = -ENOMEM;
1316 	vmcb01_page = snp_safe_alloc_page();
1317 	if (!vmcb01_page)
1318 		goto out;
1319 
1320 	err = sev_vcpu_create(vcpu);
1321 	if (err)
1322 		goto error_free_vmcb_page;
1323 
1324 	err = avic_init_vcpu(svm);
1325 	if (err)
1326 		goto error_free_sev;
1327 
1328 	svm->msrpm = svm_vcpu_alloc_msrpm();
1329 	if (!svm->msrpm) {
1330 		err = -ENOMEM;
1331 		goto error_free_sev;
1332 	}
1333 
1334 	svm->x2avic_msrs_intercepted = true;
1335 	svm->lbr_msrs_intercepted = true;
1336 
1337 	svm->vmcb01.ptr = page_address(vmcb01_page);
1338 	svm->vmcb01.pa = __sme_set(page_to_pfn(vmcb01_page) << PAGE_SHIFT);
1339 	svm_switch_vmcb(svm, &svm->vmcb01);
1340 
1341 	svm->guest_state_loaded = false;
1342 
1343 	return 0;
1344 
1345 error_free_sev:
1346 	sev_free_vcpu(vcpu);
1347 error_free_vmcb_page:
1348 	__free_page(vmcb01_page);
1349 out:
1350 	return err;
1351 }
1352 
svm_vcpu_free(struct kvm_vcpu * vcpu)1353 static void svm_vcpu_free(struct kvm_vcpu *vcpu)
1354 {
1355 	struct vcpu_svm *svm = to_svm(vcpu);
1356 
1357 	WARN_ON_ONCE(!list_empty(&svm->ir_list));
1358 
1359 	svm_leave_nested(vcpu);
1360 	svm_free_nested(svm);
1361 
1362 	sev_free_vcpu(vcpu);
1363 
1364 	__free_page(__sme_pa_to_page(svm->vmcb01.pa));
1365 	svm_vcpu_free_msrpm(svm->msrpm);
1366 }
1367 
1368 #ifdef CONFIG_CPU_MITIGATIONS
1369 static DEFINE_SPINLOCK(srso_lock);
1370 static atomic_t srso_nr_vms;
1371 
svm_srso_clear_bp_spec_reduce(void * ign)1372 static void svm_srso_clear_bp_spec_reduce(void *ign)
1373 {
1374 	struct svm_cpu_data *sd = this_cpu_ptr(&svm_data);
1375 
1376 	if (!sd->bp_spec_reduce_set)
1377 		return;
1378 
1379 	msr_clear_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_BP_SPEC_REDUCE_BIT);
1380 	sd->bp_spec_reduce_set = false;
1381 }
1382 
svm_srso_vm_destroy(void)1383 static void svm_srso_vm_destroy(void)
1384 {
1385 	if (!cpu_feature_enabled(X86_FEATURE_SRSO_BP_SPEC_REDUCE))
1386 		return;
1387 
1388 	if (atomic_dec_return(&srso_nr_vms))
1389 		return;
1390 
1391 	guard(spinlock)(&srso_lock);
1392 
1393 	/*
1394 	 * Verify a new VM didn't come along, acquire the lock, and increment
1395 	 * the count before this task acquired the lock.
1396 	 */
1397 	if (atomic_read(&srso_nr_vms))
1398 		return;
1399 
1400 	on_each_cpu(svm_srso_clear_bp_spec_reduce, NULL, 1);
1401 }
1402 
svm_srso_vm_init(void)1403 static void svm_srso_vm_init(void)
1404 {
1405 	if (!cpu_feature_enabled(X86_FEATURE_SRSO_BP_SPEC_REDUCE))
1406 		return;
1407 
1408 	/*
1409 	 * Acquire the lock on 0 => 1 transitions to ensure a potential 1 => 0
1410 	 * transition, i.e. destroying the last VM, is fully complete, e.g. so
1411 	 * that a delayed IPI doesn't clear BP_SPEC_REDUCE after a vCPU runs.
1412 	 */
1413 	if (atomic_inc_not_zero(&srso_nr_vms))
1414 		return;
1415 
1416 	guard(spinlock)(&srso_lock);
1417 
1418 	atomic_inc(&srso_nr_vms);
1419 }
1420 #else
svm_srso_vm_init(void)1421 static void svm_srso_vm_init(void) { }
svm_srso_vm_destroy(void)1422 static void svm_srso_vm_destroy(void) { }
1423 #endif
1424 
svm_prepare_switch_to_guest(struct kvm_vcpu * vcpu)1425 static void svm_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
1426 {
1427 	struct vcpu_svm *svm = to_svm(vcpu);
1428 	struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, vcpu->cpu);
1429 
1430 	if (is_sev_es_guest(vcpu))
1431 		sev_es_unmap_ghcb(svm);
1432 
1433 	if (svm->guest_state_loaded)
1434 		return;
1435 
1436 	/*
1437 	 * Save additional host state that will be restored on VMEXIT (sev-es)
1438 	 * or subsequent vmload of host save area.
1439 	 */
1440 	vmsave(sd->save_area_pa);
1441 	if (is_sev_es_guest(vcpu))
1442 		sev_es_prepare_switch_to_guest(svm, sev_es_host_save_area(sd));
1443 
1444 	if (tsc_scaling)
1445 		__svm_write_tsc_multiplier(vcpu->arch.tsc_scaling_ratio);
1446 
1447 	/*
1448 	 * TSC_AUX is always virtualized (context switched by hardware) for
1449 	 * SEV-ES guests when the feature is available.  For non-SEV-ES guests,
1450 	 * context switch TSC_AUX via the user_return MSR infrastructure (not
1451 	 * all CPUs support TSC_AUX virtualization).
1452 	 */
1453 	if (likely(tsc_aux_uret_slot >= 0) &&
1454 	    (!boot_cpu_has(X86_FEATURE_V_TSC_AUX) || !is_sev_es_guest(vcpu)))
1455 		kvm_set_user_return_msr(tsc_aux_uret_slot, svm->tsc_aux, -1ull);
1456 
1457 	if (cpu_feature_enabled(X86_FEATURE_SRSO_BP_SPEC_REDUCE) &&
1458 	    !sd->bp_spec_reduce_set) {
1459 		sd->bp_spec_reduce_set = true;
1460 		msr_set_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_BP_SPEC_REDUCE_BIT);
1461 	}
1462 	svm->guest_state_loaded = true;
1463 }
1464 
svm_prepare_host_switch(struct kvm_vcpu * vcpu)1465 static void svm_prepare_host_switch(struct kvm_vcpu *vcpu)
1466 {
1467 	to_svm(vcpu)->guest_state_loaded = false;
1468 }
1469 
svm_vcpu_load(struct kvm_vcpu * vcpu,int cpu)1470 static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1471 {
1472 	if (vcpu->scheduled_out && !kvm_pause_in_guest(vcpu->kvm))
1473 		shrink_ple_window(vcpu);
1474 
1475 	if (kvm_vcpu_apicv_active(vcpu))
1476 		avic_vcpu_load(vcpu, cpu);
1477 }
1478 
svm_vcpu_put(struct kvm_vcpu * vcpu)1479 static void svm_vcpu_put(struct kvm_vcpu *vcpu)
1480 {
1481 	if (kvm_vcpu_apicv_active(vcpu))
1482 		avic_vcpu_put(vcpu);
1483 
1484 	svm_prepare_host_switch(vcpu);
1485 
1486 	++vcpu->stat.host_state_reload;
1487 }
1488 
svm_get_rflags(struct kvm_vcpu * vcpu)1489 static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
1490 {
1491 	struct vcpu_svm *svm = to_svm(vcpu);
1492 	unsigned long rflags = svm->vmcb->save.rflags;
1493 
1494 	if (svm->nmi_singlestep) {
1495 		/* Hide our flags if they were not set by the guest */
1496 		if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF))
1497 			rflags &= ~X86_EFLAGS_TF;
1498 		if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_RF))
1499 			rflags &= ~X86_EFLAGS_RF;
1500 	}
1501 	return rflags;
1502 }
1503 
svm_set_rflags(struct kvm_vcpu * vcpu,unsigned long rflags)1504 static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
1505 {
1506 	if (to_svm(vcpu)->nmi_singlestep)
1507 		rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
1508 
1509        /*
1510         * Any change of EFLAGS.VM is accompanied by a reload of SS
1511         * (caused by either a task switch or an inter-privilege IRET),
1512         * so we do not need to update the CPL here.
1513         */
1514 	to_svm(vcpu)->vmcb->save.rflags = rflags;
1515 }
1516 
svm_get_if_flag(struct kvm_vcpu * vcpu)1517 static bool svm_get_if_flag(struct kvm_vcpu *vcpu)
1518 {
1519 	struct vmcb *vmcb = to_svm(vcpu)->vmcb;
1520 
1521 	return is_sev_es_guest(vcpu)
1522 		? vmcb->control.int_state & SVM_GUEST_INTERRUPT_MASK
1523 		: kvm_get_rflags(vcpu) & X86_EFLAGS_IF;
1524 }
1525 
svm_cache_reg(struct kvm_vcpu * vcpu,enum kvm_reg reg)1526 static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
1527 {
1528 	kvm_register_mark_available(vcpu, reg);
1529 
1530 	switch (reg) {
1531 	case VCPU_EXREG_PDPTR:
1532 		/*
1533 		 * When !npt_enabled, mmu->pdptrs[] is already available since
1534 		 * it is always updated per SDM when moving to CRs.
1535 		 */
1536 		if (npt_enabled)
1537 			load_pdptrs(vcpu, kvm_read_cr3(vcpu));
1538 		break;
1539 	default:
1540 		KVM_BUG_ON(1, vcpu->kvm);
1541 	}
1542 }
1543 
svm_set_vintr(struct vcpu_svm * svm)1544 static void svm_set_vintr(struct vcpu_svm *svm)
1545 {
1546 	struct vmcb_control_area *control;
1547 
1548 	/*
1549 	 * The following fields are ignored when AVIC is enabled
1550 	 */
1551 	WARN_ON(kvm_vcpu_apicv_activated(&svm->vcpu));
1552 
1553 	svm_set_intercept(svm, INTERCEPT_VINTR);
1554 
1555 	/*
1556 	 * Recalculating intercepts may have cleared the VINTR intercept.  If
1557 	 * V_INTR_MASKING is enabled in vmcb12, then the effective RFLAGS.IF
1558 	 * for L1 physical interrupts is L1's RFLAGS.IF at the time of VMRUN.
1559 	 * Requesting an interrupt window if save.RFLAGS.IF=0 is pointless as
1560 	 * interrupts will never be unblocked while L2 is running.
1561 	 */
1562 	if (!svm_is_intercept(svm, INTERCEPT_VINTR))
1563 		return;
1564 
1565 	/*
1566 	 * This is just a dummy VINTR to actually cause a vmexit to happen.
1567 	 * Actual injection of virtual interrupts happens through EVENTINJ.
1568 	 */
1569 	control = &svm->vmcb->control;
1570 	control->int_vector = 0x0;
1571 	control->int_ctl &= ~V_INTR_PRIO_MASK;
1572 	control->int_ctl |= V_IRQ_MASK |
1573 		((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT);
1574 	vmcb_mark_dirty(svm->vmcb, VMCB_INTR);
1575 }
1576 
svm_clear_vintr(struct vcpu_svm * svm)1577 static void svm_clear_vintr(struct vcpu_svm *svm)
1578 {
1579 	svm_clr_intercept(svm, INTERCEPT_VINTR);
1580 
1581 	/* Drop int_ctl fields related to VINTR injection.  */
1582 	svm->vmcb->control.int_ctl &= ~V_IRQ_INJECTION_BITS_MASK;
1583 	if (is_guest_mode(&svm->vcpu)) {
1584 		svm->vmcb01.ptr->control.int_ctl &= ~V_IRQ_INJECTION_BITS_MASK;
1585 
1586 		WARN_ON((svm->vmcb->control.int_ctl & V_TPR_MASK) !=
1587 			(svm->nested.ctl.int_ctl & V_TPR_MASK));
1588 
1589 		svm->vmcb->control.int_ctl |= svm->nested.ctl.int_ctl &
1590 			V_IRQ_INJECTION_BITS_MASK;
1591 
1592 		svm->vmcb->control.int_vector = svm->nested.ctl.int_vector;
1593 	}
1594 
1595 	vmcb_mark_dirty(svm->vmcb, VMCB_INTR);
1596 }
1597 
svm_seg(struct kvm_vcpu * vcpu,int seg)1598 static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg)
1599 {
1600 	struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
1601 	struct vmcb_save_area *save01 = &to_svm(vcpu)->vmcb01.ptr->save;
1602 
1603 	switch (seg) {
1604 	case VCPU_SREG_CS: return &save->cs;
1605 	case VCPU_SREG_DS: return &save->ds;
1606 	case VCPU_SREG_ES: return &save->es;
1607 	case VCPU_SREG_FS: return &save01->fs;
1608 	case VCPU_SREG_GS: return &save01->gs;
1609 	case VCPU_SREG_SS: return &save->ss;
1610 	case VCPU_SREG_TR: return &save01->tr;
1611 	case VCPU_SREG_LDTR: return &save01->ldtr;
1612 	}
1613 	BUG();
1614 	return NULL;
1615 }
1616 
svm_get_segment_base(struct kvm_vcpu * vcpu,int seg)1617 static u64 svm_get_segment_base(struct kvm_vcpu *vcpu, int seg)
1618 {
1619 	struct vmcb_seg *s = svm_seg(vcpu, seg);
1620 
1621 	return s->base;
1622 }
1623 
svm_get_segment(struct kvm_vcpu * vcpu,struct kvm_segment * var,int seg)1624 static void svm_get_segment(struct kvm_vcpu *vcpu,
1625 			    struct kvm_segment *var, int seg)
1626 {
1627 	struct vmcb_seg *s = svm_seg(vcpu, seg);
1628 
1629 	var->base = s->base;
1630 	var->limit = s->limit;
1631 	var->selector = s->selector;
1632 	var->type = s->attrib & SVM_SELECTOR_TYPE_MASK;
1633 	var->s = (s->attrib >> SVM_SELECTOR_S_SHIFT) & 1;
1634 	var->dpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3;
1635 	var->present = (s->attrib >> SVM_SELECTOR_P_SHIFT) & 1;
1636 	var->avl = (s->attrib >> SVM_SELECTOR_AVL_SHIFT) & 1;
1637 	var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1;
1638 	var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1;
1639 
1640 	/*
1641 	 * AMD CPUs circa 2014 track the G bit for all segments except CS.
1642 	 * However, the SVM spec states that the G bit is not observed by the
1643 	 * CPU, and some VMware virtual CPUs drop the G bit for all segments.
1644 	 * So let's synthesize a legal G bit for all segments, this helps
1645 	 * running KVM nested. It also helps cross-vendor migration, because
1646 	 * Intel's vmentry has a check on the 'G' bit.
1647 	 */
1648 	var->g = s->limit > 0xfffff;
1649 
1650 	/*
1651 	 * AMD's VMCB does not have an explicit unusable field, so emulate it
1652 	 * for cross vendor migration purposes by "not present"
1653 	 */
1654 	var->unusable = !var->present;
1655 
1656 	switch (seg) {
1657 	case VCPU_SREG_TR:
1658 		/*
1659 		 * Work around a bug where the busy flag in the tr selector
1660 		 * isn't exposed
1661 		 */
1662 		var->type |= 0x2;
1663 		break;
1664 	case VCPU_SREG_DS:
1665 	case VCPU_SREG_ES:
1666 	case VCPU_SREG_FS:
1667 	case VCPU_SREG_GS:
1668 		/*
1669 		 * The accessed bit must always be set in the segment
1670 		 * descriptor cache, although it can be cleared in the
1671 		 * descriptor, the cached bit always remains at 1. Since
1672 		 * Intel has a check on this, set it here to support
1673 		 * cross-vendor migration.
1674 		 */
1675 		if (!var->unusable)
1676 			var->type |= 0x1;
1677 		break;
1678 	case VCPU_SREG_SS:
1679 		/*
1680 		 * On AMD CPUs sometimes the DB bit in the segment
1681 		 * descriptor is left as 1, although the whole segment has
1682 		 * been made unusable. Clear it here to pass an Intel VMX
1683 		 * entry check when cross vendor migrating.
1684 		 */
1685 		if (var->unusable)
1686 			var->db = 0;
1687 		/* This is symmetric with svm_set_segment() */
1688 		var->dpl = to_svm(vcpu)->vmcb->save.cpl;
1689 		break;
1690 	}
1691 }
1692 
svm_get_cpl(struct kvm_vcpu * vcpu)1693 static int svm_get_cpl(struct kvm_vcpu *vcpu)
1694 {
1695 	struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
1696 
1697 	return save->cpl;
1698 }
1699 
svm_get_cs_db_l_bits(struct kvm_vcpu * vcpu,int * db,int * l)1700 static void svm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
1701 {
1702 	struct kvm_segment cs;
1703 
1704 	svm_get_segment(vcpu, &cs, VCPU_SREG_CS);
1705 	*db = cs.db;
1706 	*l = cs.l;
1707 }
1708 
svm_get_idt(struct kvm_vcpu * vcpu,struct desc_ptr * dt)1709 static void svm_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
1710 {
1711 	struct vcpu_svm *svm = to_svm(vcpu);
1712 
1713 	dt->size = svm->vmcb->save.idtr.limit;
1714 	dt->address = svm->vmcb->save.idtr.base;
1715 }
1716 
svm_set_idt(struct kvm_vcpu * vcpu,struct desc_ptr * dt)1717 static void svm_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
1718 {
1719 	struct vcpu_svm *svm = to_svm(vcpu);
1720 
1721 	svm->vmcb->save.idtr.limit = dt->size;
1722 	svm->vmcb->save.idtr.base = dt->address ;
1723 	vmcb_mark_dirty(svm->vmcb, VMCB_DT);
1724 }
1725 
svm_get_gdt(struct kvm_vcpu * vcpu,struct desc_ptr * dt)1726 static void svm_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
1727 {
1728 	struct vcpu_svm *svm = to_svm(vcpu);
1729 
1730 	dt->size = svm->vmcb->save.gdtr.limit;
1731 	dt->address = svm->vmcb->save.gdtr.base;
1732 }
1733 
svm_set_gdt(struct kvm_vcpu * vcpu,struct desc_ptr * dt)1734 static void svm_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
1735 {
1736 	struct vcpu_svm *svm = to_svm(vcpu);
1737 
1738 	svm->vmcb->save.gdtr.limit = dt->size;
1739 	svm->vmcb->save.gdtr.base = dt->address ;
1740 	vmcb_mark_dirty(svm->vmcb, VMCB_DT);
1741 }
1742 
sev_post_set_cr3(struct kvm_vcpu * vcpu,unsigned long cr3)1743 static void sev_post_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
1744 {
1745 	struct vcpu_svm *svm = to_svm(vcpu);
1746 
1747 	/*
1748 	 * For guests that don't set guest_state_protected, the cr3 update is
1749 	 * handled via kvm_mmu_load() while entering the guest. For guests
1750 	 * that do (SEV-ES/SEV-SNP), the cr3 update needs to be written to
1751 	 * VMCB save area now, since the save area will become the initial
1752 	 * contents of the VMSA, and future VMCB save area updates won't be
1753 	 * seen.
1754 	 */
1755 	if (is_sev_es_guest(vcpu)) {
1756 		svm->vmcb->save.cr3 = cr3;
1757 		vmcb_mark_dirty(svm->vmcb, VMCB_CR);
1758 	}
1759 }
1760 
svm_is_valid_cr0(struct kvm_vcpu * vcpu,unsigned long cr0)1761 static bool svm_is_valid_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
1762 {
1763 	return true;
1764 }
1765 
svm_set_cr0(struct kvm_vcpu * vcpu,unsigned long cr0)1766 void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
1767 {
1768 	struct vcpu_svm *svm = to_svm(vcpu);
1769 	u64 hcr0 = cr0;
1770 	bool old_paging = is_paging(vcpu);
1771 
1772 #ifdef CONFIG_X86_64
1773 	if (vcpu->arch.efer & EFER_LME) {
1774 		if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
1775 			vcpu->arch.efer |= EFER_LMA;
1776 			if (!vcpu->arch.guest_state_protected)
1777 				svm->vmcb->save.efer |= EFER_LMA | EFER_LME;
1778 		}
1779 
1780 		if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) {
1781 			vcpu->arch.efer &= ~EFER_LMA;
1782 			if (!vcpu->arch.guest_state_protected)
1783 				svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME);
1784 		}
1785 	}
1786 #endif
1787 	vcpu->arch.cr0 = cr0;
1788 
1789 	if (!npt_enabled) {
1790 		hcr0 |= X86_CR0_PG | X86_CR0_WP;
1791 		if (old_paging != is_paging(vcpu))
1792 			svm_set_cr4(vcpu, kvm_read_cr4(vcpu));
1793 	}
1794 
1795 	/*
1796 	 * re-enable caching here because the QEMU bios
1797 	 * does not do it - this results in some delay at
1798 	 * reboot
1799 	 */
1800 	if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
1801 		hcr0 &= ~(X86_CR0_CD | X86_CR0_NW);
1802 
1803 	svm->vmcb->save.cr0 = hcr0;
1804 	vmcb_mark_dirty(svm->vmcb, VMCB_CR);
1805 
1806 	/*
1807 	 * SEV-ES guests must always keep the CR intercepts cleared. CR
1808 	 * tracking is done using the CR write traps.
1809 	 */
1810 	if (is_sev_es_guest(vcpu))
1811 		return;
1812 
1813 	if (hcr0 == cr0) {
1814 		/* Selective CR0 write remains on.  */
1815 		svm_clr_intercept(svm, INTERCEPT_CR0_READ);
1816 		svm_clr_intercept(svm, INTERCEPT_CR0_WRITE);
1817 	} else {
1818 		svm_set_intercept(svm, INTERCEPT_CR0_READ);
1819 		svm_set_intercept(svm, INTERCEPT_CR0_WRITE);
1820 	}
1821 }
1822 
svm_is_valid_cr4(struct kvm_vcpu * vcpu,unsigned long cr4)1823 static bool svm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
1824 {
1825 	return true;
1826 }
1827 
svm_set_cr4(struct kvm_vcpu * vcpu,unsigned long cr4)1828 void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
1829 {
1830 	unsigned long host_cr4_mce = cr4_read_shadow() & X86_CR4_MCE;
1831 	unsigned long old_cr4 = vcpu->arch.cr4;
1832 
1833 	vcpu->arch.cr4 = cr4;
1834 	if (!npt_enabled) {
1835 		cr4 |= X86_CR4_PAE;
1836 
1837 		if (!is_paging(vcpu))
1838 			cr4 &= ~(X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE);
1839 	}
1840 	cr4 |= host_cr4_mce;
1841 	to_svm(vcpu)->vmcb->save.cr4 = cr4;
1842 	vmcb_mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
1843 
1844 	if ((cr4 ^ old_cr4) & (X86_CR4_OSXSAVE | X86_CR4_PKE))
1845 		vcpu->arch.cpuid_dynamic_bits_dirty = true;
1846 }
1847 
svm_set_segment(struct kvm_vcpu * vcpu,struct kvm_segment * var,int seg)1848 static void svm_set_segment(struct kvm_vcpu *vcpu,
1849 			    struct kvm_segment *var, int seg)
1850 {
1851 	struct vcpu_svm *svm = to_svm(vcpu);
1852 	struct vmcb_seg *s = svm_seg(vcpu, seg);
1853 
1854 	s->base = var->base;
1855 	s->limit = var->limit;
1856 	s->selector = var->selector;
1857 	s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK);
1858 	s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT;
1859 	s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT;
1860 	s->attrib |= ((var->present & 1) && !var->unusable) << SVM_SELECTOR_P_SHIFT;
1861 	s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT;
1862 	s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
1863 	s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
1864 	s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
1865 
1866 	/*
1867 	 * This is always accurate, except if SYSRET returned to a segment
1868 	 * with SS.DPL != 3.  Intel does not have this quirk, and always
1869 	 * forces SS.DPL to 3 on sysret, so we ignore that case; fixing it
1870 	 * would entail passing the CPL to userspace and back.
1871 	 */
1872 	if (seg == VCPU_SREG_SS)
1873 		/* This is symmetric with svm_get_segment() */
1874 		svm->vmcb->save.cpl = (var->dpl & 3);
1875 
1876 	vmcb_mark_dirty(svm->vmcb, VMCB_SEG);
1877 }
1878 
svm_update_exception_bitmap(struct kvm_vcpu * vcpu)1879 static void svm_update_exception_bitmap(struct kvm_vcpu *vcpu)
1880 {
1881 	struct vcpu_svm *svm = to_svm(vcpu);
1882 
1883 	clr_exception_intercept(svm, BP_VECTOR);
1884 
1885 	if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) {
1886 		if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
1887 			set_exception_intercept(svm, BP_VECTOR);
1888 	}
1889 }
1890 
new_asid(struct vcpu_svm * svm,struct svm_cpu_data * sd)1891 static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
1892 {
1893 	if (sd->next_asid > sd->max_asid) {
1894 		++sd->asid_generation;
1895 		sd->next_asid = sd->min_asid;
1896 		svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
1897 		vmcb_mark_dirty(svm->vmcb, VMCB_ASID);
1898 	}
1899 
1900 	svm->current_vmcb->asid_generation = sd->asid_generation;
1901 	svm->asid = sd->next_asid++;
1902 }
1903 
svm_set_dr6(struct kvm_vcpu * vcpu,unsigned long value)1904 static void svm_set_dr6(struct kvm_vcpu *vcpu, unsigned long value)
1905 {
1906 	struct vmcb *vmcb = to_svm(vcpu)->vmcb;
1907 
1908 	if (vcpu->arch.guest_state_protected)
1909 		return;
1910 
1911 	if (unlikely(value != vmcb->save.dr6)) {
1912 		vmcb->save.dr6 = value;
1913 		vmcb_mark_dirty(vmcb, VMCB_DR);
1914 	}
1915 }
1916 
svm_sync_dirty_debug_regs(struct kvm_vcpu * vcpu)1917 static void svm_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
1918 {
1919 	struct vcpu_svm *svm = to_svm(vcpu);
1920 
1921 	if (WARN_ON_ONCE(is_sev_es_guest(vcpu)))
1922 		return;
1923 
1924 	get_debugreg(vcpu->arch.db[0], 0);
1925 	get_debugreg(vcpu->arch.db[1], 1);
1926 	get_debugreg(vcpu->arch.db[2], 2);
1927 	get_debugreg(vcpu->arch.db[3], 3);
1928 	/*
1929 	 * We cannot reset svm->vmcb->save.dr6 to DR6_ACTIVE_LOW here,
1930 	 * because db_interception might need it.  We can do it before vmentry.
1931 	 */
1932 	vcpu->arch.dr6 = svm->vmcb->save.dr6;
1933 	vcpu->arch.dr7 = svm->vmcb->save.dr7;
1934 	vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT;
1935 	set_dr_intercepts(svm);
1936 }
1937 
svm_set_dr7(struct kvm_vcpu * vcpu,unsigned long value)1938 static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value)
1939 {
1940 	struct vcpu_svm *svm = to_svm(vcpu);
1941 
1942 	if (vcpu->arch.guest_state_protected)
1943 		return;
1944 
1945 	svm->vmcb->save.dr7 = value;
1946 	vmcb_mark_dirty(svm->vmcb, VMCB_DR);
1947 }
1948 
pf_interception(struct kvm_vcpu * vcpu)1949 static int pf_interception(struct kvm_vcpu *vcpu)
1950 {
1951 	struct vcpu_svm *svm = to_svm(vcpu);
1952 
1953 	u64 fault_address = svm->vmcb->control.exit_info_2;
1954 	u64 error_code = svm->vmcb->control.exit_info_1;
1955 
1956 	return kvm_handle_page_fault(vcpu, error_code, fault_address,
1957 			static_cpu_has(X86_FEATURE_DECODEASSISTS) ?
1958 			svm->vmcb->control.insn_bytes : NULL,
1959 			svm->vmcb->control.insn_len);
1960 }
1961 
1962 static int svm_check_emulate_instruction(struct kvm_vcpu *vcpu, int emul_type,
1963 					 void *insn, int insn_len);
1964 
npf_interception(struct kvm_vcpu * vcpu)1965 static int npf_interception(struct kvm_vcpu *vcpu)
1966 {
1967 	struct vcpu_svm *svm = to_svm(vcpu);
1968 	int rc;
1969 
1970 	u64 error_code = svm->vmcb->control.exit_info_1;
1971 	gpa_t gpa = svm->vmcb->control.exit_info_2;
1972 
1973 	/*
1974 	 * WARN if hardware generates a fault with an error code that collides
1975 	 * with KVM-defined sythentic flags.  Clear the flags and continue on,
1976 	 * i.e. don't terminate the VM, as KVM can't possibly be relying on a
1977 	 * flag that KVM doesn't know about.
1978 	 */
1979 	if (WARN_ON_ONCE(error_code & PFERR_SYNTHETIC_MASK))
1980 		error_code &= ~PFERR_SYNTHETIC_MASK;
1981 
1982 	/*
1983 	 * Expedite fast MMIO kicks if the next RIP is known and KVM is allowed
1984 	 * emulate a page fault, e.g. skipping the current instruction is wrong
1985 	 * if the #NPF occurred while vectoring an event.
1986 	 */
1987 	if ((error_code & PFERR_RSVD_MASK) && !is_guest_mode(vcpu)) {
1988 		const int emul_type = EMULTYPE_PF | EMULTYPE_NO_DECODE;
1989 
1990 		if (svm_check_emulate_instruction(vcpu, emul_type, NULL, 0))
1991 			return 1;
1992 
1993 		if (nrips && svm->vmcb->control.next_rip &&
1994 		    !kvm_io_bus_write(vcpu, KVM_FAST_MMIO_BUS, gpa, 0, NULL)) {
1995 			trace_kvm_fast_mmio(gpa);
1996 			return kvm_skip_emulated_instruction(vcpu);
1997 		}
1998 	}
1999 
2000 	if (is_sev_snp_guest(vcpu) && (error_code & PFERR_GUEST_ENC_MASK))
2001 		error_code |= PFERR_PRIVATE_ACCESS;
2002 
2003 	trace_kvm_page_fault(vcpu, gpa, error_code);
2004 	rc = kvm_mmu_page_fault(vcpu, gpa, error_code,
2005 				static_cpu_has(X86_FEATURE_DECODEASSISTS) ?
2006 				svm->vmcb->control.insn_bytes : NULL,
2007 				svm->vmcb->control.insn_len);
2008 
2009 	if (rc > 0 && error_code & PFERR_GUEST_RMP_MASK)
2010 		sev_handle_rmp_fault(vcpu, gpa, error_code);
2011 
2012 	return rc;
2013 }
2014 
db_interception(struct kvm_vcpu * vcpu)2015 static int db_interception(struct kvm_vcpu *vcpu)
2016 {
2017 	struct kvm_run *kvm_run = vcpu->run;
2018 	struct vcpu_svm *svm = to_svm(vcpu);
2019 
2020 	if (!(vcpu->guest_debug &
2021 	      (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) &&
2022 		!svm->nmi_singlestep) {
2023 		u32 payload = svm->vmcb->save.dr6 ^ DR6_ACTIVE_LOW;
2024 		kvm_queue_exception_p(vcpu, DB_VECTOR, payload);
2025 		return 1;
2026 	}
2027 
2028 	if (svm->nmi_singlestep) {
2029 		disable_nmi_singlestep(svm);
2030 		/* Make sure we check for pending NMIs upon entry */
2031 		kvm_make_request(KVM_REQ_EVENT, vcpu);
2032 	}
2033 
2034 	if (vcpu->guest_debug &
2035 	    (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) {
2036 		kvm_run->exit_reason = KVM_EXIT_DEBUG;
2037 		kvm_run->debug.arch.dr6 = svm->vmcb->save.dr6;
2038 		kvm_run->debug.arch.dr7 = svm->vmcb->save.dr7;
2039 		kvm_run->debug.arch.pc =
2040 			svm->vmcb->save.cs.base + svm->vmcb->save.rip;
2041 		kvm_run->debug.arch.exception = DB_VECTOR;
2042 		return 0;
2043 	}
2044 
2045 	return 1;
2046 }
2047 
bp_interception(struct kvm_vcpu * vcpu)2048 static int bp_interception(struct kvm_vcpu *vcpu)
2049 {
2050 	struct vcpu_svm *svm = to_svm(vcpu);
2051 	struct kvm_run *kvm_run = vcpu->run;
2052 
2053 	kvm_run->exit_reason = KVM_EXIT_DEBUG;
2054 	kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip;
2055 	kvm_run->debug.arch.exception = BP_VECTOR;
2056 	return 0;
2057 }
2058 
ud_interception(struct kvm_vcpu * vcpu)2059 static int ud_interception(struct kvm_vcpu *vcpu)
2060 {
2061 	return handle_ud(vcpu);
2062 }
2063 
ac_interception(struct kvm_vcpu * vcpu)2064 static int ac_interception(struct kvm_vcpu *vcpu)
2065 {
2066 	kvm_queue_exception_e(vcpu, AC_VECTOR, 0);
2067 	return 1;
2068 }
2069 
is_erratum_383(void)2070 static bool is_erratum_383(void)
2071 {
2072 	int i;
2073 	u64 value;
2074 
2075 	if (!erratum_383_found)
2076 		return false;
2077 
2078 	if (native_read_msr_safe(MSR_IA32_MC0_STATUS, &value))
2079 		return false;
2080 
2081 	/* Bit 62 may or may not be set for this mce */
2082 	value &= ~(1ULL << 62);
2083 
2084 	if (value != 0xb600000000010015ULL)
2085 		return false;
2086 
2087 	/* Clear MCi_STATUS registers */
2088 	for (i = 0; i < 6; ++i)
2089 		native_write_msr_safe(MSR_IA32_MCx_STATUS(i), 0);
2090 
2091 	if (!native_read_msr_safe(MSR_IA32_MCG_STATUS, &value)) {
2092 		value &= ~(1ULL << 2);
2093 		native_write_msr_safe(MSR_IA32_MCG_STATUS, value);
2094 	}
2095 
2096 	/* Flush tlb to evict multi-match entries */
2097 	__flush_tlb_all();
2098 
2099 	return true;
2100 }
2101 
svm_handle_mce(struct kvm_vcpu * vcpu)2102 static void svm_handle_mce(struct kvm_vcpu *vcpu)
2103 {
2104 	if (is_erratum_383()) {
2105 		/*
2106 		 * Erratum 383 triggered. Guest state is corrupt so kill the
2107 		 * guest.
2108 		 */
2109 		pr_err("Guest triggered AMD Erratum 383\n");
2110 
2111 		kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
2112 
2113 		return;
2114 	}
2115 
2116 	/*
2117 	 * On an #MC intercept the MCE handler is not called automatically in
2118 	 * the host. So do it by hand here.
2119 	 */
2120 	kvm_machine_check();
2121 }
2122 
mc_interception(struct kvm_vcpu * vcpu)2123 static int mc_interception(struct kvm_vcpu *vcpu)
2124 {
2125 	return 1;
2126 }
2127 
shutdown_interception(struct kvm_vcpu * vcpu)2128 static int shutdown_interception(struct kvm_vcpu *vcpu)
2129 {
2130 	struct kvm_run *kvm_run = vcpu->run;
2131 	struct vcpu_svm *svm = to_svm(vcpu);
2132 
2133 
2134 	/*
2135 	 * VMCB is undefined after a SHUTDOWN intercept.  INIT the vCPU to put
2136 	 * the VMCB in a known good state.  Unfortuately, KVM doesn't have
2137 	 * KVM_MP_STATE_SHUTDOWN and can't add it without potentially breaking
2138 	 * userspace.  At a platform view, INIT is acceptable behavior as
2139 	 * there exist bare metal platforms that automatically INIT the CPU
2140 	 * in response to shutdown.
2141 	 *
2142 	 * The VM save area for SEV-ES guests has already been encrypted so it
2143 	 * cannot be reinitialized, i.e. synthesizing INIT is futile.
2144 	 */
2145 	if (!is_sev_es_guest(vcpu)) {
2146 		clear_page(svm->vmcb);
2147 #ifdef CONFIG_KVM_SMM
2148 		if (is_smm(vcpu))
2149 			kvm_smm_changed(vcpu, false);
2150 #endif
2151 		kvm_vcpu_reset(vcpu, true);
2152 	}
2153 
2154 	kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
2155 	return 0;
2156 }
2157 
io_interception(struct kvm_vcpu * vcpu)2158 static int io_interception(struct kvm_vcpu *vcpu)
2159 {
2160 	struct vcpu_svm *svm = to_svm(vcpu);
2161 	u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */
2162 	int size, in, string;
2163 	unsigned port;
2164 
2165 	++vcpu->stat.io_exits;
2166 	string = (io_info & SVM_IOIO_STR_MASK) != 0;
2167 	in = (io_info & SVM_IOIO_TYPE_MASK) != 0;
2168 	port = io_info >> 16;
2169 	size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT;
2170 
2171 	if (string) {
2172 		if (is_sev_es_guest(vcpu))
2173 			return sev_es_string_io(svm, size, port, in);
2174 		else
2175 			return kvm_emulate_instruction(vcpu, 0);
2176 	}
2177 
2178 	svm->next_rip = svm->vmcb->control.exit_info_2;
2179 
2180 	return kvm_fast_pio(vcpu, size, port, in);
2181 }
2182 
nmi_interception(struct kvm_vcpu * vcpu)2183 static int nmi_interception(struct kvm_vcpu *vcpu)
2184 {
2185 	return 1;
2186 }
2187 
smi_interception(struct kvm_vcpu * vcpu)2188 static int smi_interception(struct kvm_vcpu *vcpu)
2189 {
2190 	return 1;
2191 }
2192 
intr_interception(struct kvm_vcpu * vcpu)2193 static int intr_interception(struct kvm_vcpu *vcpu)
2194 {
2195 	++vcpu->stat.irq_exits;
2196 	return 1;
2197 }
2198 
vmload_vmsave_interception(struct kvm_vcpu * vcpu,bool vmload)2199 static int vmload_vmsave_interception(struct kvm_vcpu *vcpu, bool vmload)
2200 {
2201 	u64 vmcb12_gpa = kvm_register_read(vcpu, VCPU_REGS_RAX);
2202 	struct vcpu_svm *svm = to_svm(vcpu);
2203 	struct vmcb *vmcb12;
2204 	struct kvm_host_map map;
2205 	int ret;
2206 
2207 	if (nested_svm_check_permissions(vcpu))
2208 		return 1;
2209 
2210 	if (!page_address_valid(vcpu, vmcb12_gpa)) {
2211 		kvm_inject_gp(vcpu, 0);
2212 		return 1;
2213 	}
2214 
2215 	if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcb12_gpa), &map))
2216 		return kvm_handle_memory_failure(vcpu, X86EMUL_IO_NEEDED, NULL);
2217 
2218 	vmcb12 = map.hva;
2219 
2220 	ret = kvm_skip_emulated_instruction(vcpu);
2221 
2222 	/* KVM always performs VMLOAD/VMSAVE on VMCB01 (see __svm_vcpu_run()) */
2223 	if (vmload) {
2224 		svm_copy_vmloadsave_state(svm->vmcb01.ptr, vmcb12);
2225 		svm->sysenter_eip_hi = 0;
2226 		svm->sysenter_esp_hi = 0;
2227 	} else {
2228 		svm_copy_vmloadsave_state(vmcb12, svm->vmcb01.ptr);
2229 	}
2230 
2231 	kvm_vcpu_unmap(vcpu, &map);
2232 
2233 	return ret;
2234 }
2235 
vmload_interception(struct kvm_vcpu * vcpu)2236 static int vmload_interception(struct kvm_vcpu *vcpu)
2237 {
2238 	return vmload_vmsave_interception(vcpu, true);
2239 }
2240 
vmsave_interception(struct kvm_vcpu * vcpu)2241 static int vmsave_interception(struct kvm_vcpu *vcpu)
2242 {
2243 	return vmload_vmsave_interception(vcpu, false);
2244 }
2245 
vmrun_interception(struct kvm_vcpu * vcpu)2246 static int vmrun_interception(struct kvm_vcpu *vcpu)
2247 {
2248 	if (nested_svm_check_permissions(vcpu))
2249 		return 1;
2250 
2251 	return nested_svm_vmrun(vcpu);
2252 }
2253 
2254 /* Return 0 if not SVM instr, otherwise return associated exit_code */
svm_get_decoded_instr_exit_code(struct kvm_vcpu * vcpu)2255 static u64 svm_get_decoded_instr_exit_code(struct kvm_vcpu *vcpu)
2256 {
2257 	struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
2258 
2259 	if (ctxt->b != 0x1 || ctxt->opcode_len != 2)
2260 		return 0;
2261 
2262 	BUILD_BUG_ON(!SVM_EXIT_VMRUN || !SVM_EXIT_VMLOAD || !SVM_EXIT_VMSAVE);
2263 
2264 	switch (ctxt->modrm) {
2265 	case 0xd8: /* VMRUN */
2266 		return SVM_EXIT_VMRUN;
2267 	case 0xda: /* VMLOAD */
2268 		return SVM_EXIT_VMLOAD;
2269 	case 0xdb: /* VMSAVE */
2270 		return SVM_EXIT_VMSAVE;
2271 	default:
2272 		break;
2273 	}
2274 
2275 	return 0;
2276 }
2277 
2278 /*
2279  * #GP handling code. Note that #GP can be triggered under the following two
2280  * cases:
2281  *   1) SVM VM-related instructions (VMRUN/VMSAVE/VMLOAD) that trigger #GP on
2282  *      some AMD CPUs when EAX of these instructions are in the reserved memory
2283  *      regions (e.g. SMM memory on host).
2284  *   2) VMware backdoor
2285  */
gp_interception(struct kvm_vcpu * vcpu)2286 static int gp_interception(struct kvm_vcpu *vcpu)
2287 {
2288 	struct vcpu_svm *svm = to_svm(vcpu);
2289 	u32 error_code = svm->vmcb->control.exit_info_1;
2290 	u64 svm_exit_code;
2291 
2292 	/* Both #GP cases have zero error_code */
2293 	if (error_code)
2294 		goto reinject;
2295 
2296 	/* Decode the instruction for usage later */
2297 	if (x86_decode_emulated_instruction(vcpu, 0, NULL, 0) != EMULATION_OK)
2298 		goto reinject;
2299 
2300 	/* FIXME: Handle SVM instructions through the emulator */
2301 	svm_exit_code = svm_get_decoded_instr_exit_code(vcpu);
2302 	if (svm_exit_code) {
2303 		if (!is_guest_mode(vcpu))
2304 			return svm_invoke_exit_handler(vcpu, svm_exit_code);
2305 
2306 		if (nested_svm_check_permissions(vcpu))
2307 			return 1;
2308 
2309 		if (!page_address_valid(vcpu, kvm_register_read(vcpu, VCPU_REGS_RAX)))
2310 			goto reinject;
2311 
2312 		/*
2313 		 * FIXME: Only synthesize a #VMEXIT if L1 sets the intercept,
2314 		 * but only after the VMLOAD/VMSAVE exit handlers can properly
2315 		 * handle VMLOAD/VMSAVE from L2 with VLS enabled in L1 (i.e.
2316 		 * RAX is an L2 GPA that needs translation through L1's NPT).
2317 		 */
2318 		nested_svm_simple_vmexit(svm, svm_exit_code);
2319 		return 1;
2320 	}
2321 
2322 	/*
2323 	 * VMware backdoor emulation on #GP interception only handles
2324 	 * IN{S}, OUT{S}, and RDPMC, and only for L1.
2325 	 */
2326 	if (!enable_vmware_backdoor || is_guest_mode(vcpu))
2327 		goto reinject;
2328 
2329 	return kvm_emulate_instruction(vcpu, EMULTYPE_VMWARE_GP | EMULTYPE_NO_DECODE);
2330 
2331 reinject:
2332 	kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
2333 	return 1;
2334 }
2335 
svm_set_gif(struct vcpu_svm * svm,bool value)2336 void svm_set_gif(struct vcpu_svm *svm, bool value)
2337 {
2338 	if (value) {
2339 		/*
2340 		 * If VGIF is enabled, the STGI intercept is only added to
2341 		 * detect the opening of the SMI/NMI window; remove it now.
2342 		 * Likewise, clear the VINTR intercept, we will set it
2343 		 * again while processing KVM_REQ_EVENT if needed.
2344 		 */
2345 		if (vgif)
2346 			svm_clr_intercept(svm, INTERCEPT_STGI);
2347 		if (svm_is_intercept(svm, INTERCEPT_VINTR))
2348 			svm_clear_vintr(svm);
2349 
2350 		enable_gif(svm);
2351 		if (svm_has_pending_gif_event(svm))
2352 			kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
2353 	} else {
2354 		disable_gif(svm);
2355 
2356 		/*
2357 		 * After a CLGI no interrupts should come.  But if vGIF is
2358 		 * in use, we still rely on the VINTR intercept (rather than
2359 		 * STGI) to detect an open interrupt window.
2360 		*/
2361 		if (!vgif)
2362 			svm_clear_vintr(svm);
2363 	}
2364 }
2365 
stgi_interception(struct kvm_vcpu * vcpu)2366 static int stgi_interception(struct kvm_vcpu *vcpu)
2367 {
2368 	int ret;
2369 
2370 	if (nested_svm_check_permissions(vcpu))
2371 		return 1;
2372 
2373 	ret = kvm_skip_emulated_instruction(vcpu);
2374 	svm_set_gif(to_svm(vcpu), true);
2375 	return ret;
2376 }
2377 
clgi_interception(struct kvm_vcpu * vcpu)2378 static int clgi_interception(struct kvm_vcpu *vcpu)
2379 {
2380 	int ret;
2381 
2382 	if (nested_svm_check_permissions(vcpu))
2383 		return 1;
2384 
2385 	ret = kvm_skip_emulated_instruction(vcpu);
2386 	svm_set_gif(to_svm(vcpu), false);
2387 	return ret;
2388 }
2389 
invlpga_interception(struct kvm_vcpu * vcpu)2390 static int invlpga_interception(struct kvm_vcpu *vcpu)
2391 {
2392 	gva_t gva = kvm_rax_read(vcpu);
2393 	u32 asid = kvm_rcx_read(vcpu);
2394 
2395 	if (nested_svm_check_permissions(vcpu))
2396 		return 1;
2397 
2398 	/* FIXME: Handle an address size prefix. */
2399 	if (!is_long_mode(vcpu))
2400 		gva = (u32)gva;
2401 
2402 	trace_kvm_invlpga(to_svm(vcpu)->vmcb->save.rip, asid, gva);
2403 
2404 	/* Let's treat INVLPGA the same as INVLPG (can be optimized!) */
2405 	kvm_mmu_invlpg(vcpu, gva);
2406 
2407 	return kvm_skip_emulated_instruction(vcpu);
2408 }
2409 
skinit_interception(struct kvm_vcpu * vcpu)2410 static int skinit_interception(struct kvm_vcpu *vcpu)
2411 {
2412 	trace_kvm_skinit(to_svm(vcpu)->vmcb->save.rip, kvm_rax_read(vcpu));
2413 
2414 	kvm_queue_exception(vcpu, UD_VECTOR);
2415 	return 1;
2416 }
2417 
task_switch_interception(struct kvm_vcpu * vcpu)2418 static int task_switch_interception(struct kvm_vcpu *vcpu)
2419 {
2420 	struct vcpu_svm *svm = to_svm(vcpu);
2421 	u16 tss_selector;
2422 	int reason;
2423 	int int_type = svm->vmcb->control.exit_int_info &
2424 		SVM_EXITINTINFO_TYPE_MASK;
2425 	int int_vec = svm->vmcb->control.exit_int_info & SVM_EVTINJ_VEC_MASK;
2426 	uint32_t type =
2427 		svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_TYPE_MASK;
2428 	uint32_t idt_v =
2429 		svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID;
2430 	bool has_error_code = false;
2431 	u32 error_code = 0;
2432 
2433 	tss_selector = (u16)svm->vmcb->control.exit_info_1;
2434 
2435 	if (svm->vmcb->control.exit_info_2 &
2436 	    (1ULL << SVM_EXITINFOSHIFT_TS_REASON_IRET))
2437 		reason = TASK_SWITCH_IRET;
2438 	else if (svm->vmcb->control.exit_info_2 &
2439 		 (1ULL << SVM_EXITINFOSHIFT_TS_REASON_JMP))
2440 		reason = TASK_SWITCH_JMP;
2441 	else if (idt_v)
2442 		reason = TASK_SWITCH_GATE;
2443 	else
2444 		reason = TASK_SWITCH_CALL;
2445 
2446 	if (reason == TASK_SWITCH_GATE) {
2447 		switch (type) {
2448 		case SVM_EXITINTINFO_TYPE_NMI:
2449 			vcpu->arch.nmi_injected = false;
2450 			break;
2451 		case SVM_EXITINTINFO_TYPE_EXEPT:
2452 			if (svm->vmcb->control.exit_info_2 &
2453 			    (1ULL << SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE)) {
2454 				has_error_code = true;
2455 				error_code =
2456 					(u32)svm->vmcb->control.exit_info_2;
2457 			}
2458 			kvm_clear_exception_queue(vcpu);
2459 			break;
2460 		case SVM_EXITINTINFO_TYPE_INTR:
2461 		case SVM_EXITINTINFO_TYPE_SOFT:
2462 			kvm_clear_interrupt_queue(vcpu);
2463 			break;
2464 		default:
2465 			break;
2466 		}
2467 	}
2468 
2469 	if (reason != TASK_SWITCH_GATE ||
2470 	    int_type == SVM_EXITINTINFO_TYPE_SOFT ||
2471 	    (int_type == SVM_EXITINTINFO_TYPE_EXEPT &&
2472 	     (int_vec == OF_VECTOR || int_vec == BP_VECTOR))) {
2473 		if (!svm_skip_emulated_instruction(vcpu))
2474 			return 0;
2475 	}
2476 
2477 	if (int_type != SVM_EXITINTINFO_TYPE_SOFT)
2478 		int_vec = -1;
2479 
2480 	return kvm_task_switch(vcpu, tss_selector, int_vec, reason,
2481 			       has_error_code, error_code);
2482 }
2483 
svm_clr_iret_intercept(struct vcpu_svm * svm)2484 static void svm_clr_iret_intercept(struct vcpu_svm *svm)
2485 {
2486 	if (!is_sev_es_guest(&svm->vcpu))
2487 		svm_clr_intercept(svm, INTERCEPT_IRET);
2488 }
2489 
svm_set_iret_intercept(struct vcpu_svm * svm)2490 static void svm_set_iret_intercept(struct vcpu_svm *svm)
2491 {
2492 	if (!is_sev_es_guest(&svm->vcpu))
2493 		svm_set_intercept(svm, INTERCEPT_IRET);
2494 }
2495 
iret_interception(struct kvm_vcpu * vcpu)2496 static int iret_interception(struct kvm_vcpu *vcpu)
2497 {
2498 	struct vcpu_svm *svm = to_svm(vcpu);
2499 
2500 	WARN_ON_ONCE(is_sev_es_guest(vcpu));
2501 
2502 	++vcpu->stat.nmi_window_exits;
2503 	svm->awaiting_iret_completion = true;
2504 
2505 	svm_clr_iret_intercept(svm);
2506 	svm->nmi_iret_rip = kvm_rip_read(vcpu);
2507 
2508 	kvm_make_request(KVM_REQ_EVENT, vcpu);
2509 	return 1;
2510 }
2511 
invlpg_interception(struct kvm_vcpu * vcpu)2512 static int invlpg_interception(struct kvm_vcpu *vcpu)
2513 {
2514 	if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
2515 		return kvm_emulate_instruction(vcpu, 0);
2516 
2517 	kvm_mmu_invlpg(vcpu, to_svm(vcpu)->vmcb->control.exit_info_1);
2518 	return kvm_skip_emulated_instruction(vcpu);
2519 }
2520 
emulate_on_interception(struct kvm_vcpu * vcpu)2521 static int emulate_on_interception(struct kvm_vcpu *vcpu)
2522 {
2523 	return kvm_emulate_instruction(vcpu, 0);
2524 }
2525 
rsm_interception(struct kvm_vcpu * vcpu)2526 static int rsm_interception(struct kvm_vcpu *vcpu)
2527 {
2528 	return kvm_emulate_instruction_from_buffer(vcpu, rsm_ins_bytes, 2);
2529 }
2530 
check_selective_cr0_intercepted(struct kvm_vcpu * vcpu,unsigned long val)2531 static bool check_selective_cr0_intercepted(struct kvm_vcpu *vcpu,
2532 					    unsigned long val)
2533 {
2534 	struct vcpu_svm *svm = to_svm(vcpu);
2535 	unsigned long cr0 = vcpu->arch.cr0;
2536 	bool ret = false;
2537 
2538 	if (!is_guest_mode(vcpu) ||
2539 	    (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_SELECTIVE_CR0))))
2540 		return false;
2541 
2542 	cr0 &= ~SVM_CR0_SELECTIVE_MASK;
2543 	val &= ~SVM_CR0_SELECTIVE_MASK;
2544 
2545 	if (cr0 ^ val) {
2546 		svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE;
2547 		ret = (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE);
2548 	}
2549 
2550 	return ret;
2551 }
2552 
2553 #define CR_VALID (1ULL << 63)
2554 
cr_interception(struct kvm_vcpu * vcpu)2555 static int cr_interception(struct kvm_vcpu *vcpu)
2556 {
2557 	struct vcpu_svm *svm = to_svm(vcpu);
2558 	int reg, cr;
2559 	unsigned long val;
2560 	int err;
2561 
2562 	if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
2563 		return emulate_on_interception(vcpu);
2564 
2565 	if (unlikely((svm->vmcb->control.exit_info_1 & CR_VALID) == 0))
2566 		return emulate_on_interception(vcpu);
2567 
2568 	reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
2569 	if (svm->vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE)
2570 		cr = SVM_EXIT_WRITE_CR0 - SVM_EXIT_READ_CR0;
2571 	else
2572 		cr = svm->vmcb->control.exit_code - SVM_EXIT_READ_CR0;
2573 
2574 	err = 0;
2575 	if (cr >= 16) { /* mov to cr */
2576 		cr -= 16;
2577 		val = kvm_register_read(vcpu, reg);
2578 		trace_kvm_cr_write(cr, val);
2579 		switch (cr) {
2580 		case 0:
2581 			if (!check_selective_cr0_intercepted(vcpu, val))
2582 				err = kvm_set_cr0(vcpu, val);
2583 			else
2584 				return 1;
2585 
2586 			break;
2587 		case 3:
2588 			err = kvm_set_cr3(vcpu, val);
2589 			break;
2590 		case 4:
2591 			err = kvm_set_cr4(vcpu, val);
2592 			break;
2593 		case 8:
2594 			err = kvm_set_cr8(vcpu, val);
2595 			break;
2596 		default:
2597 			WARN(1, "unhandled write to CR%d", cr);
2598 			kvm_queue_exception(vcpu, UD_VECTOR);
2599 			return 1;
2600 		}
2601 	} else { /* mov from cr */
2602 		switch (cr) {
2603 		case 0:
2604 			val = kvm_read_cr0(vcpu);
2605 			break;
2606 		case 2:
2607 			val = vcpu->arch.cr2;
2608 			break;
2609 		case 3:
2610 			val = kvm_read_cr3(vcpu);
2611 			break;
2612 		case 4:
2613 			val = kvm_read_cr4(vcpu);
2614 			break;
2615 		case 8:
2616 			val = kvm_get_cr8(vcpu);
2617 			break;
2618 		default:
2619 			WARN(1, "unhandled read from CR%d", cr);
2620 			kvm_queue_exception(vcpu, UD_VECTOR);
2621 			return 1;
2622 		}
2623 		kvm_register_write(vcpu, reg, val);
2624 		trace_kvm_cr_read(cr, val);
2625 	}
2626 	return kvm_complete_insn_gp(vcpu, err);
2627 }
2628 
cr_trap(struct kvm_vcpu * vcpu)2629 static int cr_trap(struct kvm_vcpu *vcpu)
2630 {
2631 	struct vcpu_svm *svm = to_svm(vcpu);
2632 	unsigned long old_value, new_value;
2633 	unsigned int cr;
2634 	int ret = 0;
2635 
2636 	new_value = (unsigned long)svm->vmcb->control.exit_info_1;
2637 
2638 	cr = svm->vmcb->control.exit_code - SVM_EXIT_CR0_WRITE_TRAP;
2639 	switch (cr) {
2640 	case 0:
2641 		old_value = kvm_read_cr0(vcpu);
2642 		svm_set_cr0(vcpu, new_value);
2643 
2644 		kvm_post_set_cr0(vcpu, old_value, new_value);
2645 		break;
2646 	case 4:
2647 		old_value = kvm_read_cr4(vcpu);
2648 		svm_set_cr4(vcpu, new_value);
2649 
2650 		kvm_post_set_cr4(vcpu, old_value, new_value);
2651 		break;
2652 	case 8:
2653 		ret = kvm_set_cr8(vcpu, new_value);
2654 		break;
2655 	default:
2656 		WARN(1, "unhandled CR%d write trap", cr);
2657 		kvm_queue_exception(vcpu, UD_VECTOR);
2658 		return 1;
2659 	}
2660 
2661 	return kvm_complete_insn_gp(vcpu, ret);
2662 }
2663 
dr_interception(struct kvm_vcpu * vcpu)2664 static int dr_interception(struct kvm_vcpu *vcpu)
2665 {
2666 	struct vcpu_svm *svm = to_svm(vcpu);
2667 	int reg, dr;
2668 	int err = 0;
2669 
2670 	/*
2671 	 * SEV-ES intercepts DR7 only to disable guest debugging and the guest issues a VMGEXIT
2672 	 * for DR7 write only. KVM cannot change DR7 (always swapped as type 'A') so return early.
2673 	 */
2674 	if (is_sev_es_guest(vcpu))
2675 		return 1;
2676 
2677 	if (vcpu->guest_debug == 0) {
2678 		/*
2679 		 * No more DR vmexits; force a reload of the debug registers
2680 		 * and reenter on this instruction.  The next vmexit will
2681 		 * retrieve the full state of the debug registers.
2682 		 */
2683 		clr_dr_intercepts(svm);
2684 		vcpu->arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT;
2685 		return 1;
2686 	}
2687 
2688 	if (!boot_cpu_has(X86_FEATURE_DECODEASSISTS))
2689 		return emulate_on_interception(vcpu);
2690 
2691 	reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
2692 	dr = svm->vmcb->control.exit_code - SVM_EXIT_READ_DR0;
2693 	if (dr >= 16) { /* mov to DRn  */
2694 		dr -= 16;
2695 		err = kvm_set_dr(vcpu, dr, kvm_register_read(vcpu, reg));
2696 	} else {
2697 		kvm_register_write(vcpu, reg, kvm_get_dr(vcpu, dr));
2698 	}
2699 
2700 	return kvm_complete_insn_gp(vcpu, err);
2701 }
2702 
cr8_write_interception(struct kvm_vcpu * vcpu)2703 static int cr8_write_interception(struct kvm_vcpu *vcpu)
2704 {
2705 	u8 cr8_prev = kvm_get_cr8(vcpu);
2706 	int r;
2707 
2708 	WARN_ON_ONCE(kvm_vcpu_apicv_active(vcpu));
2709 
2710 	/* instruction emulation calls kvm_set_cr8() */
2711 	r = cr_interception(vcpu);
2712 	if (lapic_in_kernel(vcpu))
2713 		return r;
2714 	if (cr8_prev <= kvm_get_cr8(vcpu))
2715 		return r;
2716 	vcpu->run->exit_reason = KVM_EXIT_SET_TPR;
2717 	return 0;
2718 }
2719 
efer_trap(struct kvm_vcpu * vcpu)2720 static int efer_trap(struct kvm_vcpu *vcpu)
2721 {
2722 	struct msr_data msr_info;
2723 	int ret;
2724 
2725 	/*
2726 	 * Clear the EFER_SVME bit from EFER. The SVM code always sets this
2727 	 * bit in svm_set_efer(), but __kvm_valid_efer() checks it against
2728 	 * whether the guest has X86_FEATURE_SVM - this avoids a failure if
2729 	 * the guest doesn't have X86_FEATURE_SVM.
2730 	 */
2731 	msr_info.host_initiated = false;
2732 	msr_info.index = MSR_EFER;
2733 	msr_info.data = to_svm(vcpu)->vmcb->control.exit_info_1 & ~EFER_SVME;
2734 	ret = kvm_set_msr_common(vcpu, &msr_info);
2735 
2736 	return kvm_complete_insn_gp(vcpu, ret);
2737 }
2738 
svm_get_feature_msr(u32 msr,u64 * data)2739 static int svm_get_feature_msr(u32 msr, u64 *data)
2740 {
2741 	*data = 0;
2742 
2743 	switch (msr) {
2744 	case MSR_AMD64_DE_CFG:
2745 		if (cpu_feature_enabled(X86_FEATURE_LFENCE_RDTSC))
2746 			*data |= MSR_AMD64_DE_CFG_LFENCE_SERIALIZE;
2747 		break;
2748 	default:
2749 		return KVM_MSR_RET_UNSUPPORTED;
2750 	}
2751 
2752 	return 0;
2753 }
2754 
svm_vmcb_lbr(struct vcpu_svm * svm,u32 msr)2755 static u64 *svm_vmcb_lbr(struct vcpu_svm *svm, u32 msr)
2756 {
2757 	switch (msr) {
2758 	case MSR_IA32_LASTBRANCHFROMIP:
2759 		return &svm->vmcb->save.br_from;
2760 	case MSR_IA32_LASTBRANCHTOIP:
2761 		return &svm->vmcb->save.br_to;
2762 	case MSR_IA32_LASTINTFROMIP:
2763 		return &svm->vmcb->save.last_excp_from;
2764 	case MSR_IA32_LASTINTTOIP:
2765 		return &svm->vmcb->save.last_excp_to;
2766 	default:
2767 		break;
2768 	}
2769 	KVM_BUG_ON(1, svm->vcpu.kvm);
2770 	return &svm->vmcb->save.br_from;
2771 }
2772 
sev_es_prevent_msr_access(struct kvm_vcpu * vcpu,struct msr_data * msr_info)2773 static bool sev_es_prevent_msr_access(struct kvm_vcpu *vcpu,
2774 				      struct msr_data *msr_info)
2775 {
2776 	return is_sev_es_guest(vcpu) && vcpu->arch.guest_state_protected &&
2777 	       msr_info->index != MSR_IA32_XSS &&
2778 	       !msr_write_intercepted(vcpu, msr_info->index);
2779 }
2780 
svm_get_msr(struct kvm_vcpu * vcpu,struct msr_data * msr_info)2781 static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2782 {
2783 	struct vcpu_svm *svm = to_svm(vcpu);
2784 
2785 	if (sev_es_prevent_msr_access(vcpu, msr_info)) {
2786 		msr_info->data = 0;
2787 		return vcpu->kvm->arch.has_protected_state ? -EINVAL : 0;
2788 	}
2789 
2790 	switch (msr_info->index) {
2791 	case MSR_AMD64_TSC_RATIO:
2792 		if (!msr_info->host_initiated &&
2793 		    !guest_cpu_cap_has(vcpu, X86_FEATURE_TSCRATEMSR))
2794 			return 1;
2795 		msr_info->data = svm->tsc_ratio_msr;
2796 		break;
2797 	case MSR_STAR:
2798 		msr_info->data = svm->vmcb01.ptr->save.star;
2799 		break;
2800 #ifdef CONFIG_X86_64
2801 	case MSR_LSTAR:
2802 		msr_info->data = svm->vmcb01.ptr->save.lstar;
2803 		break;
2804 	case MSR_CSTAR:
2805 		msr_info->data = svm->vmcb01.ptr->save.cstar;
2806 		break;
2807 	case MSR_GS_BASE:
2808 		msr_info->data = svm->vmcb01.ptr->save.gs.base;
2809 		break;
2810 	case MSR_FS_BASE:
2811 		msr_info->data = svm->vmcb01.ptr->save.fs.base;
2812 		break;
2813 	case MSR_KERNEL_GS_BASE:
2814 		msr_info->data = svm->vmcb01.ptr->save.kernel_gs_base;
2815 		break;
2816 	case MSR_SYSCALL_MASK:
2817 		msr_info->data = svm->vmcb01.ptr->save.sfmask;
2818 		break;
2819 #endif
2820 	case MSR_IA32_SYSENTER_CS:
2821 		msr_info->data = svm->vmcb01.ptr->save.sysenter_cs;
2822 		break;
2823 	case MSR_IA32_SYSENTER_EIP:
2824 		msr_info->data = (u32)svm->vmcb01.ptr->save.sysenter_eip;
2825 		if (guest_cpuid_is_intel_compatible(vcpu))
2826 			msr_info->data |= (u64)svm->sysenter_eip_hi << 32;
2827 		break;
2828 	case MSR_IA32_SYSENTER_ESP:
2829 		msr_info->data = svm->vmcb01.ptr->save.sysenter_esp;
2830 		if (guest_cpuid_is_intel_compatible(vcpu))
2831 			msr_info->data |= (u64)svm->sysenter_esp_hi << 32;
2832 		break;
2833 	case MSR_IA32_S_CET:
2834 		msr_info->data = svm->vmcb->save.s_cet;
2835 		break;
2836 	case MSR_IA32_INT_SSP_TAB:
2837 		msr_info->data = svm->vmcb->save.isst_addr;
2838 		break;
2839 	case MSR_KVM_INTERNAL_GUEST_SSP:
2840 		msr_info->data = svm->vmcb->save.ssp;
2841 		break;
2842 	case MSR_TSC_AUX:
2843 		msr_info->data = svm->tsc_aux;
2844 		break;
2845 	case MSR_IA32_DEBUGCTLMSR:
2846 		msr_info->data = lbrv ? svm->vmcb->save.dbgctl : 0;
2847 		break;
2848 	case MSR_IA32_LASTBRANCHFROMIP:
2849 	case MSR_IA32_LASTBRANCHTOIP:
2850 	case MSR_IA32_LASTINTFROMIP:
2851 	case MSR_IA32_LASTINTTOIP:
2852 		msr_info->data = lbrv ? *svm_vmcb_lbr(svm, msr_info->index) : 0;
2853 		break;
2854 	case MSR_VM_HSAVE_PA:
2855 		msr_info->data = svm->nested.hsave_msr;
2856 		break;
2857 	case MSR_VM_CR:
2858 		msr_info->data = svm->nested.vm_cr_msr;
2859 		break;
2860 	case MSR_IA32_SPEC_CTRL:
2861 		if (!msr_info->host_initiated &&
2862 		    !guest_has_spec_ctrl_msr(vcpu))
2863 			return 1;
2864 
2865 		if (boot_cpu_has(X86_FEATURE_V_SPEC_CTRL))
2866 			msr_info->data = svm->vmcb->save.spec_ctrl;
2867 		else
2868 			msr_info->data = svm->spec_ctrl;
2869 		break;
2870 	case MSR_AMD64_VIRT_SPEC_CTRL:
2871 		if (!msr_info->host_initiated &&
2872 		    !guest_cpu_cap_has(vcpu, X86_FEATURE_VIRT_SSBD))
2873 			return 1;
2874 
2875 		msr_info->data = svm->virt_spec_ctrl;
2876 		break;
2877 	case MSR_F15H_IC_CFG: {
2878 
2879 		int family, model;
2880 
2881 		family = guest_cpuid_family(vcpu);
2882 		model  = guest_cpuid_model(vcpu);
2883 
2884 		if (family < 0 || model < 0)
2885 			return kvm_get_msr_common(vcpu, msr_info);
2886 
2887 		msr_info->data = 0;
2888 
2889 		if (family == 0x15 &&
2890 		    (model >= 0x2 && model < 0x20))
2891 			msr_info->data = 0x1E;
2892 		}
2893 		break;
2894 	case MSR_AMD64_DE_CFG:
2895 		msr_info->data = svm->msr_decfg;
2896 		break;
2897 	default:
2898 		return kvm_get_msr_common(vcpu, msr_info);
2899 	}
2900 	return 0;
2901 }
2902 
svm_complete_emulated_msr(struct kvm_vcpu * vcpu,int err)2903 static int svm_complete_emulated_msr(struct kvm_vcpu *vcpu, int err)
2904 {
2905 	struct vcpu_svm *svm = to_svm(vcpu);
2906 	if (!err || !is_sev_es_guest(vcpu) || WARN_ON_ONCE(!svm->sev_es.ghcb))
2907 		return kvm_complete_insn_gp(vcpu, err);
2908 
2909 	svm_vmgexit_inject_exception(svm, X86_TRAP_GP);
2910 	return 1;
2911 }
2912 
svm_set_vm_cr(struct kvm_vcpu * vcpu,u64 data)2913 static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data)
2914 {
2915 	struct vcpu_svm *svm = to_svm(vcpu);
2916 	int svm_dis, chg_mask;
2917 
2918 	if (data & ~SVM_VM_CR_VALID_MASK)
2919 		return 1;
2920 
2921 	chg_mask = SVM_VM_CR_VALID_MASK;
2922 
2923 	if (svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK)
2924 		chg_mask &= ~(SVM_VM_CR_SVM_LOCK_MASK | SVM_VM_CR_SVM_DIS_MASK);
2925 
2926 	svm->nested.vm_cr_msr &= ~chg_mask;
2927 	svm->nested.vm_cr_msr |= (data & chg_mask);
2928 
2929 	svm_dis = svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK;
2930 
2931 	/* check for svm_disable while efer.svme is set */
2932 	if (svm_dis && (vcpu->arch.efer & EFER_SVME))
2933 		return 1;
2934 
2935 	return 0;
2936 }
2937 
svm_set_msr(struct kvm_vcpu * vcpu,struct msr_data * msr)2938 static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
2939 {
2940 	struct vcpu_svm *svm = to_svm(vcpu);
2941 	int ret = 0;
2942 
2943 	u32 ecx = msr->index;
2944 	u64 data = msr->data;
2945 
2946 	if (sev_es_prevent_msr_access(vcpu, msr))
2947 		return vcpu->kvm->arch.has_protected_state ? -EINVAL : 0;
2948 
2949 	switch (ecx) {
2950 	case MSR_AMD64_TSC_RATIO:
2951 
2952 		if (!guest_cpu_cap_has(vcpu, X86_FEATURE_TSCRATEMSR)) {
2953 
2954 			if (!msr->host_initiated)
2955 				return 1;
2956 			/*
2957 			 * In case TSC scaling is not enabled, always
2958 			 * leave this MSR at the default value.
2959 			 *
2960 			 * Due to bug in qemu 6.2.0, it would try to set
2961 			 * this msr to 0 if tsc scaling is not enabled.
2962 			 * Ignore this value as well.
2963 			 */
2964 			if (data != 0 && data != svm->tsc_ratio_msr)
2965 				return 1;
2966 			break;
2967 		}
2968 
2969 		if (data & SVM_TSC_RATIO_RSVD)
2970 			return 1;
2971 
2972 		svm->tsc_ratio_msr = data;
2973 
2974 		if (guest_cpu_cap_has(vcpu, X86_FEATURE_TSCRATEMSR) &&
2975 		    is_guest_mode(vcpu))
2976 			nested_svm_update_tsc_ratio_msr(vcpu);
2977 
2978 		break;
2979 	case MSR_IA32_CR_PAT:
2980 		ret = kvm_set_msr_common(vcpu, msr);
2981 		if (ret)
2982 			break;
2983 
2984 		svm->vmcb01.ptr->save.g_pat = data;
2985 		if (is_guest_mode(vcpu))
2986 			nested_vmcb02_compute_g_pat(svm);
2987 		vmcb_mark_dirty(svm->vmcb, VMCB_NPT);
2988 		break;
2989 	case MSR_IA32_SPEC_CTRL:
2990 		if (!msr->host_initiated &&
2991 		    !guest_has_spec_ctrl_msr(vcpu))
2992 			return 1;
2993 
2994 		if (kvm_spec_ctrl_test_value(data))
2995 			return 1;
2996 
2997 		if (boot_cpu_has(X86_FEATURE_V_SPEC_CTRL))
2998 			svm->vmcb->save.spec_ctrl = data;
2999 		else
3000 			svm->spec_ctrl = data;
3001 		if (!data)
3002 			break;
3003 
3004 		/*
3005 		 * For non-nested:
3006 		 * When it's written (to non-zero) for the first time, pass
3007 		 * it through.
3008 		 *
3009 		 * For nested:
3010 		 * The handling of the MSR bitmap for L2 guests is done in
3011 		 * nested_svm_merge_msrpm().
3012 		 * We update the L1 MSR bit as well since it will end up
3013 		 * touching the MSR anyway now.
3014 		 */
3015 		svm_disable_intercept_for_msr(vcpu, MSR_IA32_SPEC_CTRL, MSR_TYPE_RW);
3016 		break;
3017 	case MSR_AMD64_VIRT_SPEC_CTRL:
3018 		if (!msr->host_initiated &&
3019 		    !guest_cpu_cap_has(vcpu, X86_FEATURE_VIRT_SSBD))
3020 			return 1;
3021 
3022 		if (data & ~SPEC_CTRL_SSBD)
3023 			return 1;
3024 
3025 		svm->virt_spec_ctrl = data;
3026 		break;
3027 	case MSR_STAR:
3028 		svm->vmcb01.ptr->save.star = data;
3029 		break;
3030 #ifdef CONFIG_X86_64
3031 	case MSR_LSTAR:
3032 		svm->vmcb01.ptr->save.lstar = data;
3033 		break;
3034 	case MSR_CSTAR:
3035 		svm->vmcb01.ptr->save.cstar = data;
3036 		break;
3037 	case MSR_GS_BASE:
3038 		svm->vmcb01.ptr->save.gs.base = data;
3039 		break;
3040 	case MSR_FS_BASE:
3041 		svm->vmcb01.ptr->save.fs.base = data;
3042 		break;
3043 	case MSR_KERNEL_GS_BASE:
3044 		svm->vmcb01.ptr->save.kernel_gs_base = data;
3045 		break;
3046 	case MSR_SYSCALL_MASK:
3047 		svm->vmcb01.ptr->save.sfmask = data;
3048 		break;
3049 #endif
3050 	case MSR_IA32_SYSENTER_CS:
3051 		svm->vmcb01.ptr->save.sysenter_cs = data;
3052 		break;
3053 	case MSR_IA32_SYSENTER_EIP:
3054 		svm->vmcb01.ptr->save.sysenter_eip = (u32)data;
3055 		/*
3056 		 * We only intercept the MSR_IA32_SYSENTER_{EIP|ESP} msrs
3057 		 * when we spoof an Intel vendor ID (for cross vendor migration).
3058 		 * In this case we use this intercept to track the high
3059 		 * 32 bit part of these msrs to support Intel's
3060 		 * implementation of SYSENTER/SYSEXIT.
3061 		 */
3062 		svm->sysenter_eip_hi = guest_cpuid_is_intel_compatible(vcpu) ? (data >> 32) : 0;
3063 		break;
3064 	case MSR_IA32_SYSENTER_ESP:
3065 		svm->vmcb01.ptr->save.sysenter_esp = (u32)data;
3066 		svm->sysenter_esp_hi = guest_cpuid_is_intel_compatible(vcpu) ? (data >> 32) : 0;
3067 		break;
3068 	case MSR_IA32_S_CET:
3069 		svm->vmcb->save.s_cet = data;
3070 		vmcb_mark_dirty(svm->vmcb01.ptr, VMCB_CET);
3071 		break;
3072 	case MSR_IA32_INT_SSP_TAB:
3073 		svm->vmcb->save.isst_addr = data;
3074 		vmcb_mark_dirty(svm->vmcb01.ptr, VMCB_CET);
3075 		break;
3076 	case MSR_KVM_INTERNAL_GUEST_SSP:
3077 		svm->vmcb->save.ssp = data;
3078 		vmcb_mark_dirty(svm->vmcb01.ptr, VMCB_CET);
3079 		break;
3080 	case MSR_TSC_AUX:
3081 		/*
3082 		 * TSC_AUX is always virtualized for SEV-ES guests when the
3083 		 * feature is available. The user return MSR support is not
3084 		 * required in this case because TSC_AUX is restored on #VMEXIT
3085 		 * from the host save area.
3086 		 */
3087 		if (boot_cpu_has(X86_FEATURE_V_TSC_AUX) && is_sev_es_guest(vcpu))
3088 			break;
3089 
3090 		/*
3091 		 * TSC_AUX is usually changed only during boot and never read
3092 		 * directly.  Intercept TSC_AUX and switch it via user return.
3093 		 */
3094 		preempt_disable();
3095 		ret = kvm_set_user_return_msr(tsc_aux_uret_slot, data, -1ull);
3096 		preempt_enable();
3097 		if (ret)
3098 			break;
3099 
3100 		svm->tsc_aux = data;
3101 		break;
3102 	case MSR_IA32_DEBUGCTLMSR:
3103 		if (!lbrv) {
3104 			kvm_pr_unimpl_wrmsr(vcpu, ecx, data);
3105 			break;
3106 		}
3107 
3108 		/*
3109 		 * Suppress BTF as KVM doesn't virtualize BTF, but there's no
3110 		 * way to communicate lack of support to the guest.
3111 		 */
3112 		if (data & DEBUGCTLMSR_BTF) {
3113 			kvm_pr_unimpl_wrmsr(vcpu, MSR_IA32_DEBUGCTLMSR, data);
3114 			data &= ~DEBUGCTLMSR_BTF;
3115 		}
3116 
3117 		if (data & DEBUGCTL_RESERVED_BITS)
3118 			return 1;
3119 
3120 		if (svm->vmcb->save.dbgctl == data)
3121 			break;
3122 
3123 		svm->vmcb->save.dbgctl = data;
3124 		vmcb_mark_dirty(svm->vmcb, VMCB_LBR);
3125 		svm_update_lbrv(vcpu);
3126 		break;
3127 	case MSR_IA32_LASTBRANCHFROMIP:
3128 	case MSR_IA32_LASTBRANCHTOIP:
3129 	case MSR_IA32_LASTINTFROMIP:
3130 	case MSR_IA32_LASTINTTOIP:
3131 		if (!lbrv)
3132 			return KVM_MSR_RET_UNSUPPORTED;
3133 		if (!msr->host_initiated)
3134 			return 1;
3135 		*svm_vmcb_lbr(svm, ecx) = data;
3136 		vmcb_mark_dirty(svm->vmcb, VMCB_LBR);
3137 		break;
3138 	case MSR_VM_HSAVE_PA:
3139 		/*
3140 		 * Old kernels did not validate the value written to
3141 		 * MSR_VM_HSAVE_PA.  Allow KVM_SET_MSR to set an invalid
3142 		 * value to allow live migrating buggy or malicious guests
3143 		 * originating from those kernels.
3144 		 */
3145 		if (!msr->host_initiated && !page_address_valid(vcpu, data))
3146 			return 1;
3147 
3148 		svm->nested.hsave_msr = data & PAGE_MASK;
3149 		break;
3150 	case MSR_VM_CR:
3151 		return svm_set_vm_cr(vcpu, data);
3152 	case MSR_VM_IGNNE:
3153 		kvm_pr_unimpl_wrmsr(vcpu, ecx, data);
3154 		break;
3155 	case MSR_AMD64_DE_CFG: {
3156 		u64 supported_de_cfg;
3157 
3158 		if (svm_get_feature_msr(ecx, &supported_de_cfg))
3159 			return 1;
3160 
3161 		if (data & ~supported_de_cfg)
3162 			return 1;
3163 
3164 		svm->msr_decfg = data;
3165 		break;
3166 	}
3167 	default:
3168 		return kvm_set_msr_common(vcpu, msr);
3169 	}
3170 	return ret;
3171 }
3172 
msr_interception(struct kvm_vcpu * vcpu)3173 static int msr_interception(struct kvm_vcpu *vcpu)
3174 {
3175 	if (to_svm(vcpu)->vmcb->control.exit_info_1)
3176 		return kvm_emulate_wrmsr(vcpu);
3177 	else
3178 		return kvm_emulate_rdmsr(vcpu);
3179 }
3180 
interrupt_window_interception(struct kvm_vcpu * vcpu)3181 static int interrupt_window_interception(struct kvm_vcpu *vcpu)
3182 {
3183 	kvm_make_request(KVM_REQ_EVENT, vcpu);
3184 	svm_clear_vintr(to_svm(vcpu));
3185 
3186 	++vcpu->stat.irq_window_exits;
3187 	return 1;
3188 }
3189 
pause_interception(struct kvm_vcpu * vcpu)3190 static int pause_interception(struct kvm_vcpu *vcpu)
3191 {
3192 	bool in_kernel;
3193 	/*
3194 	 * CPL is not made available for an SEV-ES guest, therefore
3195 	 * vcpu->arch.preempted_in_kernel can never be true.  Just
3196 	 * set in_kernel to false as well.
3197 	 */
3198 	in_kernel = !is_sev_es_guest(vcpu) && svm_get_cpl(vcpu) == 0;
3199 
3200 	grow_ple_window(vcpu);
3201 
3202 	kvm_vcpu_on_spin(vcpu, in_kernel);
3203 	return kvm_skip_emulated_instruction(vcpu);
3204 }
3205 
invpcid_interception(struct kvm_vcpu * vcpu)3206 static int invpcid_interception(struct kvm_vcpu *vcpu)
3207 {
3208 	struct vcpu_svm *svm = to_svm(vcpu);
3209 	unsigned long type;
3210 	gva_t gva;
3211 
3212 	if (!guest_cpu_cap_has(vcpu, X86_FEATURE_INVPCID)) {
3213 		kvm_queue_exception(vcpu, UD_VECTOR);
3214 		return 1;
3215 	}
3216 
3217 	/*
3218 	 * For an INVPCID intercept:
3219 	 * EXITINFO1 provides the linear address of the memory operand.
3220 	 * EXITINFO2 provides the contents of the register operand.
3221 	 */
3222 	type = svm->vmcb->control.exit_info_2;
3223 	gva = svm->vmcb->control.exit_info_1;
3224 
3225 	/*
3226 	 * FIXME: Perform segment checks for 32-bit mode, and inject #SS if the
3227 	 *        stack segment is used.  The intercept takes priority over all
3228 	 *        #GP checks except CPL>0, but somehow still generates a linear
3229 	 *        address?  The APM is sorely lacking.
3230 	 */
3231 	if (is_noncanonical_address(gva, vcpu, 0)) {
3232 		kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
3233 		return 1;
3234 	}
3235 
3236 	return kvm_handle_invpcid(vcpu, type, gva);
3237 }
3238 
complete_userspace_buslock(struct kvm_vcpu * vcpu)3239 static inline int complete_userspace_buslock(struct kvm_vcpu *vcpu)
3240 {
3241 	struct vcpu_svm *svm = to_svm(vcpu);
3242 
3243 	/*
3244 	 * If userspace has NOT changed RIP, then KVM's ABI is to let the guest
3245 	 * execute the bus-locking instruction.  Set the bus lock counter to '1'
3246 	 * to effectively step past the bus lock.
3247 	 */
3248 	if (kvm_is_linear_rip(vcpu, vcpu->arch.cui_linear_rip))
3249 		svm->vmcb->control.bus_lock_counter = 1;
3250 
3251 	return 1;
3252 }
3253 
bus_lock_exit(struct kvm_vcpu * vcpu)3254 static int bus_lock_exit(struct kvm_vcpu *vcpu)
3255 {
3256 	struct vcpu_svm *svm = to_svm(vcpu);
3257 
3258 	vcpu->run->exit_reason = KVM_EXIT_X86_BUS_LOCK;
3259 	vcpu->run->flags |= KVM_RUN_X86_BUS_LOCK;
3260 
3261 	vcpu->arch.cui_linear_rip = kvm_get_linear_rip(vcpu);
3262 	vcpu->arch.complete_userspace_io = complete_userspace_buslock;
3263 
3264 	if (is_guest_mode(vcpu))
3265 		svm->nested.last_bus_lock_rip = vcpu->arch.cui_linear_rip;
3266 
3267 	return 0;
3268 }
3269 
vmmcall_interception(struct kvm_vcpu * vcpu)3270 static int vmmcall_interception(struct kvm_vcpu *vcpu)
3271 {
3272 	/*
3273 	 * Inject a #UD if L2 is active and the VMMCALL isn't a Hyper-V TLB
3274 	 * hypercall, as VMMCALL #UDs if it's not intercepted, and this path is
3275 	 * reachable if and only if L1 doesn't want to intercept VMMCALL or has
3276 	 * enabled L0 (KVM) handling of Hyper-V L2 TLB flush hypercalls.
3277 	 */
3278 	if (is_guest_mode(vcpu) && !nested_svm_is_l2_tlb_flush_hcall(vcpu)) {
3279 		kvm_queue_exception(vcpu, UD_VECTOR);
3280 		return 1;
3281 	}
3282 
3283 	return kvm_emulate_hypercall(vcpu);
3284 }
3285 
3286 static int (*const svm_exit_handlers[])(struct kvm_vcpu *vcpu) = {
3287 	[SVM_EXIT_READ_CR0]			= cr_interception,
3288 	[SVM_EXIT_READ_CR3]			= cr_interception,
3289 	[SVM_EXIT_READ_CR4]			= cr_interception,
3290 	[SVM_EXIT_READ_CR8]			= cr_interception,
3291 	[SVM_EXIT_CR0_SEL_WRITE]		= cr_interception,
3292 	[SVM_EXIT_WRITE_CR0]			= cr_interception,
3293 	[SVM_EXIT_WRITE_CR3]			= cr_interception,
3294 	[SVM_EXIT_WRITE_CR4]			= cr_interception,
3295 	[SVM_EXIT_WRITE_CR8]			= cr8_write_interception,
3296 	[SVM_EXIT_READ_DR0]			= dr_interception,
3297 	[SVM_EXIT_READ_DR1]			= dr_interception,
3298 	[SVM_EXIT_READ_DR2]			= dr_interception,
3299 	[SVM_EXIT_READ_DR3]			= dr_interception,
3300 	[SVM_EXIT_READ_DR4]			= dr_interception,
3301 	[SVM_EXIT_READ_DR5]			= dr_interception,
3302 	[SVM_EXIT_READ_DR6]			= dr_interception,
3303 	[SVM_EXIT_READ_DR7]			= dr_interception,
3304 	[SVM_EXIT_WRITE_DR0]			= dr_interception,
3305 	[SVM_EXIT_WRITE_DR1]			= dr_interception,
3306 	[SVM_EXIT_WRITE_DR2]			= dr_interception,
3307 	[SVM_EXIT_WRITE_DR3]			= dr_interception,
3308 	[SVM_EXIT_WRITE_DR4]			= dr_interception,
3309 	[SVM_EXIT_WRITE_DR5]			= dr_interception,
3310 	[SVM_EXIT_WRITE_DR6]			= dr_interception,
3311 	[SVM_EXIT_WRITE_DR7]			= dr_interception,
3312 	[SVM_EXIT_EXCP_BASE + DB_VECTOR]	= db_interception,
3313 	[SVM_EXIT_EXCP_BASE + BP_VECTOR]	= bp_interception,
3314 	[SVM_EXIT_EXCP_BASE + UD_VECTOR]	= ud_interception,
3315 	[SVM_EXIT_EXCP_BASE + PF_VECTOR]	= pf_interception,
3316 	[SVM_EXIT_EXCP_BASE + MC_VECTOR]	= mc_interception,
3317 	[SVM_EXIT_EXCP_BASE + AC_VECTOR]	= ac_interception,
3318 	[SVM_EXIT_EXCP_BASE + GP_VECTOR]	= gp_interception,
3319 	[SVM_EXIT_INTR]				= intr_interception,
3320 	[SVM_EXIT_NMI]				= nmi_interception,
3321 	[SVM_EXIT_SMI]				= smi_interception,
3322 	[SVM_EXIT_VINTR]			= interrupt_window_interception,
3323 	[SVM_EXIT_RDPMC]			= kvm_emulate_rdpmc,
3324 	[SVM_EXIT_CPUID]			= kvm_emulate_cpuid,
3325 	[SVM_EXIT_IRET]                         = iret_interception,
3326 	[SVM_EXIT_INVD]                         = kvm_emulate_invd,
3327 	[SVM_EXIT_PAUSE]			= pause_interception,
3328 	[SVM_EXIT_HLT]				= kvm_emulate_halt,
3329 	[SVM_EXIT_INVLPG]			= invlpg_interception,
3330 	[SVM_EXIT_INVLPGA]			= invlpga_interception,
3331 	[SVM_EXIT_IOIO]				= io_interception,
3332 	[SVM_EXIT_MSR]				= msr_interception,
3333 	[SVM_EXIT_TASK_SWITCH]			= task_switch_interception,
3334 	[SVM_EXIT_SHUTDOWN]			= shutdown_interception,
3335 	[SVM_EXIT_VMRUN]			= vmrun_interception,
3336 	[SVM_EXIT_VMMCALL]			= vmmcall_interception,
3337 	[SVM_EXIT_VMLOAD]			= vmload_interception,
3338 	[SVM_EXIT_VMSAVE]			= vmsave_interception,
3339 	[SVM_EXIT_STGI]				= stgi_interception,
3340 	[SVM_EXIT_CLGI]				= clgi_interception,
3341 	[SVM_EXIT_SKINIT]			= skinit_interception,
3342 	[SVM_EXIT_RDTSCP]			= kvm_handle_invalid_op,
3343 	[SVM_EXIT_WBINVD]                       = kvm_emulate_wbinvd,
3344 	[SVM_EXIT_MONITOR]			= kvm_emulate_monitor,
3345 	[SVM_EXIT_MWAIT]			= kvm_emulate_mwait,
3346 	[SVM_EXIT_XSETBV]			= kvm_emulate_xsetbv,
3347 	[SVM_EXIT_RDPRU]			= kvm_handle_invalid_op,
3348 	[SVM_EXIT_EFER_WRITE_TRAP]		= efer_trap,
3349 	[SVM_EXIT_CR0_WRITE_TRAP]		= cr_trap,
3350 	[SVM_EXIT_CR4_WRITE_TRAP]		= cr_trap,
3351 	[SVM_EXIT_CR8_WRITE_TRAP]		= cr_trap,
3352 	[SVM_EXIT_INVPCID]                      = invpcid_interception,
3353 	[SVM_EXIT_IDLE_HLT]			= kvm_emulate_halt,
3354 	[SVM_EXIT_NPF]				= npf_interception,
3355 	[SVM_EXIT_BUS_LOCK]			= bus_lock_exit,
3356 	[SVM_EXIT_RSM]                          = rsm_interception,
3357 	[SVM_EXIT_AVIC_INCOMPLETE_IPI]		= avic_incomplete_ipi_interception,
3358 	[SVM_EXIT_AVIC_UNACCELERATED_ACCESS]	= avic_unaccelerated_access_interception,
3359 #ifdef CONFIG_KVM_AMD_SEV
3360 	[SVM_EXIT_VMGEXIT]			= sev_handle_vmgexit,
3361 #endif
3362 };
3363 
dump_vmcb(struct kvm_vcpu * vcpu)3364 static void dump_vmcb(struct kvm_vcpu *vcpu)
3365 {
3366 	struct vcpu_svm *svm = to_svm(vcpu);
3367 	struct vmcb_control_area *control = &svm->vmcb->control;
3368 	struct vmcb_save_area *save = &svm->vmcb->save;
3369 	struct vmcb_save_area *save01 = &svm->vmcb01.ptr->save;
3370 	char *vm_type;
3371 
3372 	if (!dump_invalid_vmcb) {
3373 		pr_warn_ratelimited("set kvm_amd.dump_invalid_vmcb=1 to dump internal KVM state.\n");
3374 		return;
3375 	}
3376 
3377 	guard(mutex)(&vmcb_dump_mutex);
3378 
3379 	vm_type = is_sev_snp_guest(vcpu) ? "SEV-SNP" :
3380 		  is_sev_es_guest(vcpu) ? "SEV-ES" :
3381 		  is_sev_guest(vcpu) ? "SEV" : "SVM";
3382 
3383 	pr_err("%s vCPU%u VMCB %p, last attempted VMRUN on CPU %d\n",
3384 	       vm_type, vcpu->vcpu_id, svm->current_vmcb->ptr, vcpu->arch.last_vmentry_cpu);
3385 	pr_err("VMCB Control Area:\n");
3386 	pr_err("%-20s%04x\n", "cr_read:", control->intercepts[INTERCEPT_CR] & 0xffff);
3387 	pr_err("%-20s%04x\n", "cr_write:", control->intercepts[INTERCEPT_CR] >> 16);
3388 	pr_err("%-20s%04x\n", "dr_read:", control->intercepts[INTERCEPT_DR] & 0xffff);
3389 	pr_err("%-20s%04x\n", "dr_write:", control->intercepts[INTERCEPT_DR] >> 16);
3390 	pr_err("%-20s%08x\n", "exceptions:", control->intercepts[INTERCEPT_EXCEPTION]);
3391 	pr_err("%-20s%08x %08x\n", "intercepts:",
3392               control->intercepts[INTERCEPT_WORD3],
3393 	       control->intercepts[INTERCEPT_WORD4]);
3394 	pr_err("%-20s%d\n", "pause filter count:", control->pause_filter_count);
3395 	pr_err("%-20s%d\n", "pause filter threshold:",
3396 	       control->pause_filter_thresh);
3397 	pr_err("%-20s%016llx\n", "iopm_base_pa:", control->iopm_base_pa);
3398 	pr_err("%-20s%016llx\n", "msrpm_base_pa:", control->msrpm_base_pa);
3399 	pr_err("%-20s%016llx\n", "tsc_offset:", control->tsc_offset);
3400 	pr_err("%-20s%d\n", "asid:", control->asid);
3401 	pr_err("%-20s%d\n", "tlb_ctl:", control->tlb_ctl);
3402 	pr_err("%-20s%d\n", "erap_ctl:", control->erap_ctl);
3403 	pr_err("%-20s%08x\n", "int_ctl:", control->int_ctl);
3404 	pr_err("%-20s%08x\n", "int_vector:", control->int_vector);
3405 	pr_err("%-20s%08x\n", "int_state:", control->int_state);
3406 	pr_err("%-20s%016llx\n", "exit_code:", control->exit_code);
3407 	pr_err("%-20s%016llx\n", "exit_info1:", control->exit_info_1);
3408 	pr_err("%-20s%016llx\n", "exit_info2:", control->exit_info_2);
3409 	pr_err("%-20s%08x\n", "exit_int_info:", control->exit_int_info);
3410 	pr_err("%-20s%08x\n", "exit_int_info_err:", control->exit_int_info_err);
3411 	pr_err("%-20s%lld\n", "misc_ctl:", control->misc_ctl);
3412 	pr_err("%-20s%016llx\n", "nested_cr3:", control->nested_cr3);
3413 	pr_err("%-20s%016llx\n", "avic_vapic_bar:", control->avic_vapic_bar);
3414 	pr_err("%-20s%016llx\n", "ghcb:", control->ghcb_gpa);
3415 	pr_err("%-20s%08x\n", "event_inj:", control->event_inj);
3416 	pr_err("%-20s%08x\n", "event_inj_err:", control->event_inj_err);
3417 	pr_err("%-20s%lld\n", "misc_ctl2:", control->misc_ctl2);
3418 	pr_err("%-20s%016llx\n", "next_rip:", control->next_rip);
3419 	pr_err("%-20s%016llx\n", "avic_backing_page:", control->avic_backing_page);
3420 	pr_err("%-20s%016llx\n", "avic_logical_id:", control->avic_logical_id);
3421 	pr_err("%-20s%016llx\n", "avic_physical_id:", control->avic_physical_id);
3422 	pr_err("%-20s%016llx\n", "vmsa_pa:", control->vmsa_pa);
3423 	pr_err("%-20s%016llx\n", "allowed_sev_features:", control->allowed_sev_features);
3424 	pr_err("%-20s%016llx\n", "guest_sev_features:", control->guest_sev_features);
3425 
3426 	if (is_sev_es_guest(vcpu)) {
3427 		save = sev_decrypt_vmsa(vcpu);
3428 		if (!save)
3429 			goto no_vmsa;
3430 
3431 		save01 = save;
3432 	}
3433 
3434 	pr_err("VMCB State Save Area:\n");
3435 	pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3436 	       "es:",
3437 	       save->es.selector, save->es.attrib,
3438 	       save->es.limit, save->es.base);
3439 	pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3440 	       "cs:",
3441 	       save->cs.selector, save->cs.attrib,
3442 	       save->cs.limit, save->cs.base);
3443 	pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3444 	       "ss:",
3445 	       save->ss.selector, save->ss.attrib,
3446 	       save->ss.limit, save->ss.base);
3447 	pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3448 	       "ds:",
3449 	       save->ds.selector, save->ds.attrib,
3450 	       save->ds.limit, save->ds.base);
3451 	pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3452 	       "fs:",
3453 	       save01->fs.selector, save01->fs.attrib,
3454 	       save01->fs.limit, save01->fs.base);
3455 	pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3456 	       "gs:",
3457 	       save01->gs.selector, save01->gs.attrib,
3458 	       save01->gs.limit, save01->gs.base);
3459 	pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3460 	       "gdtr:",
3461 	       save->gdtr.selector, save->gdtr.attrib,
3462 	       save->gdtr.limit, save->gdtr.base);
3463 	pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3464 	       "ldtr:",
3465 	       save01->ldtr.selector, save01->ldtr.attrib,
3466 	       save01->ldtr.limit, save01->ldtr.base);
3467 	pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3468 	       "idtr:",
3469 	       save->idtr.selector, save->idtr.attrib,
3470 	       save->idtr.limit, save->idtr.base);
3471 	pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3472 	       "tr:",
3473 	       save01->tr.selector, save01->tr.attrib,
3474 	       save01->tr.limit, save01->tr.base);
3475 	pr_err("vmpl: %d   cpl:  %d               efer:          %016llx\n",
3476 	       save->vmpl, save->cpl, save->efer);
3477 	pr_err("%-15s %016llx %-13s %016llx\n",
3478 	       "cr0:", save->cr0, "cr2:", save->cr2);
3479 	pr_err("%-15s %016llx %-13s %016llx\n",
3480 	       "cr3:", save->cr3, "cr4:", save->cr4);
3481 	pr_err("%-15s %016llx %-13s %016llx\n",
3482 	       "dr6:", save->dr6, "dr7:", save->dr7);
3483 	pr_err("%-15s %016llx %-13s %016llx\n",
3484 	       "rip:", save->rip, "rflags:", save->rflags);
3485 	pr_err("%-15s %016llx %-13s %016llx\n",
3486 	       "rsp:", save->rsp, "rax:", save->rax);
3487 	pr_err("%-15s %016llx %-13s %016llx\n",
3488 	       "s_cet:", save->s_cet, "ssp:", save->ssp);
3489 	pr_err("%-15s %016llx\n",
3490 	       "isst_addr:", save->isst_addr);
3491 	pr_err("%-15s %016llx %-13s %016llx\n",
3492 	       "star:", save01->star, "lstar:", save01->lstar);
3493 	pr_err("%-15s %016llx %-13s %016llx\n",
3494 	       "cstar:", save01->cstar, "sfmask:", save01->sfmask);
3495 	pr_err("%-15s %016llx %-13s %016llx\n",
3496 	       "kernel_gs_base:", save01->kernel_gs_base,
3497 	       "sysenter_cs:", save01->sysenter_cs);
3498 	pr_err("%-15s %016llx %-13s %016llx\n",
3499 	       "sysenter_esp:", save01->sysenter_esp,
3500 	       "sysenter_eip:", save01->sysenter_eip);
3501 	pr_err("%-15s %016llx %-13s %016llx\n",
3502 	       "gpat:", save->g_pat, "dbgctl:", save->dbgctl);
3503 	pr_err("%-15s %016llx %-13s %016llx\n",
3504 	       "br_from:", save->br_from, "br_to:", save->br_to);
3505 	pr_err("%-15s %016llx %-13s %016llx\n",
3506 	       "excp_from:", save->last_excp_from,
3507 	       "excp_to:", save->last_excp_to);
3508 
3509 	if (is_sev_es_guest(vcpu)) {
3510 		struct sev_es_save_area *vmsa = (struct sev_es_save_area *)save;
3511 
3512 		pr_err("%-15s %016llx\n",
3513 		       "sev_features", vmsa->sev_features);
3514 
3515 		pr_err("%-15s %016llx %-13s %016llx\n",
3516 		       "pl0_ssp:", vmsa->pl0_ssp, "pl1_ssp:", vmsa->pl1_ssp);
3517 		pr_err("%-15s %016llx %-13s %016llx\n",
3518 		       "pl2_ssp:", vmsa->pl2_ssp, "pl3_ssp:", vmsa->pl3_ssp);
3519 		pr_err("%-15s %016llx\n",
3520 		       "u_cet:", vmsa->u_cet);
3521 
3522 		pr_err("%-15s %016llx %-13s %016llx\n",
3523 		       "rax:", vmsa->rax, "rbx:", vmsa->rbx);
3524 		pr_err("%-15s %016llx %-13s %016llx\n",
3525 		       "rcx:", vmsa->rcx, "rdx:", vmsa->rdx);
3526 		pr_err("%-15s %016llx %-13s %016llx\n",
3527 		       "rsi:", vmsa->rsi, "rdi:", vmsa->rdi);
3528 		pr_err("%-15s %016llx %-13s %016llx\n",
3529 		       "rbp:", vmsa->rbp, "rsp:", vmsa->rsp);
3530 		pr_err("%-15s %016llx %-13s %016llx\n",
3531 		       "r8:", vmsa->r8, "r9:", vmsa->r9);
3532 		pr_err("%-15s %016llx %-13s %016llx\n",
3533 		       "r10:", vmsa->r10, "r11:", vmsa->r11);
3534 		pr_err("%-15s %016llx %-13s %016llx\n",
3535 		       "r12:", vmsa->r12, "r13:", vmsa->r13);
3536 		pr_err("%-15s %016llx %-13s %016llx\n",
3537 		       "r14:", vmsa->r14, "r15:", vmsa->r15);
3538 		pr_err("%-15s %016llx %-13s %016llx\n",
3539 		       "xcr0:", vmsa->xcr0, "xss:", vmsa->xss);
3540 	} else {
3541 		pr_err("%-15s %016llx %-13s %016lx\n",
3542 		       "rax:", save->rax, "rbx:",
3543 		       vcpu->arch.regs[VCPU_REGS_RBX]);
3544 		pr_err("%-15s %016lx %-13s %016lx\n",
3545 		       "rcx:", vcpu->arch.regs[VCPU_REGS_RCX],
3546 		       "rdx:", vcpu->arch.regs[VCPU_REGS_RDX]);
3547 		pr_err("%-15s %016lx %-13s %016lx\n",
3548 		       "rsi:", vcpu->arch.regs[VCPU_REGS_RSI],
3549 		       "rdi:", vcpu->arch.regs[VCPU_REGS_RDI]);
3550 		pr_err("%-15s %016lx %-13s %016llx\n",
3551 		       "rbp:", vcpu->arch.regs[VCPU_REGS_RBP],
3552 		       "rsp:", save->rsp);
3553 #ifdef CONFIG_X86_64
3554 		pr_err("%-15s %016lx %-13s %016lx\n",
3555 		       "r8:", vcpu->arch.regs[VCPU_REGS_R8],
3556 		       "r9:", vcpu->arch.regs[VCPU_REGS_R9]);
3557 		pr_err("%-15s %016lx %-13s %016lx\n",
3558 		       "r10:", vcpu->arch.regs[VCPU_REGS_R10],
3559 		       "r11:", vcpu->arch.regs[VCPU_REGS_R11]);
3560 		pr_err("%-15s %016lx %-13s %016lx\n",
3561 		       "r12:", vcpu->arch.regs[VCPU_REGS_R12],
3562 		       "r13:", vcpu->arch.regs[VCPU_REGS_R13]);
3563 		pr_err("%-15s %016lx %-13s %016lx\n",
3564 		       "r14:", vcpu->arch.regs[VCPU_REGS_R14],
3565 		       "r15:", vcpu->arch.regs[VCPU_REGS_R15]);
3566 #endif
3567 	}
3568 
3569 no_vmsa:
3570 	if (is_sev_es_guest(vcpu))
3571 		sev_free_decrypted_vmsa(vcpu, save);
3572 }
3573 
svm_invoke_exit_handler(struct kvm_vcpu * vcpu,u64 __exit_code)3574 int svm_invoke_exit_handler(struct kvm_vcpu *vcpu, u64 __exit_code)
3575 {
3576 	u32 exit_code = __exit_code;
3577 
3578 	/*
3579 	 * SVM uses negative values, i.e. 64-bit values, to indicate that VMRUN
3580 	 * failed.  Report all such errors to userspace (note, VMEXIT_INVALID,
3581 	 * a.k.a. SVM_EXIT_ERR, is special cased by svm_handle_exit()).  Skip
3582 	 * the check when running as a VM, as KVM has historically left garbage
3583 	 * in bits 63:32, i.e. running KVM-on-KVM would hit false positives if
3584 	 * the underlying kernel is buggy.
3585 	 */
3586 	if (!cpu_feature_enabled(X86_FEATURE_HYPERVISOR) &&
3587 	    (u64)exit_code != __exit_code)
3588 		goto unexpected_vmexit;
3589 
3590 #ifdef CONFIG_MITIGATION_RETPOLINE
3591 	if (exit_code == SVM_EXIT_MSR)
3592 		return msr_interception(vcpu);
3593 	else if (exit_code == SVM_EXIT_VINTR)
3594 		return interrupt_window_interception(vcpu);
3595 	else if (exit_code == SVM_EXIT_INTR)
3596 		return intr_interception(vcpu);
3597 	else if (exit_code == SVM_EXIT_HLT || exit_code == SVM_EXIT_IDLE_HLT)
3598 		return kvm_emulate_halt(vcpu);
3599 	else if (exit_code == SVM_EXIT_NPF)
3600 		return npf_interception(vcpu);
3601 #ifdef CONFIG_KVM_AMD_SEV
3602 	else if (exit_code == SVM_EXIT_VMGEXIT)
3603 		return sev_handle_vmgexit(vcpu);
3604 #endif
3605 #endif
3606 	if (exit_code >= ARRAY_SIZE(svm_exit_handlers))
3607 		goto unexpected_vmexit;
3608 
3609 	exit_code = array_index_nospec(exit_code, ARRAY_SIZE(svm_exit_handlers));
3610 	if (!svm_exit_handlers[exit_code])
3611 		goto unexpected_vmexit;
3612 
3613 	return svm_exit_handlers[exit_code](vcpu);
3614 
3615 unexpected_vmexit:
3616 	dump_vmcb(vcpu);
3617 	kvm_prepare_unexpected_reason_exit(vcpu, __exit_code);
3618 	return 0;
3619 }
3620 
svm_get_exit_info(struct kvm_vcpu * vcpu,u32 * reason,u64 * info1,u64 * info2,u32 * intr_info,u32 * error_code)3621 static void svm_get_exit_info(struct kvm_vcpu *vcpu, u32 *reason,
3622 			      u64 *info1, u64 *info2,
3623 			      u32 *intr_info, u32 *error_code)
3624 {
3625 	struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control;
3626 
3627 	*reason = control->exit_code;
3628 	*info1 = control->exit_info_1;
3629 	*info2 = control->exit_info_2;
3630 	*intr_info = control->exit_int_info;
3631 	if ((*intr_info & SVM_EXITINTINFO_VALID) &&
3632 	    (*intr_info & SVM_EXITINTINFO_VALID_ERR))
3633 		*error_code = control->exit_int_info_err;
3634 	else
3635 		*error_code = 0;
3636 }
3637 
svm_get_entry_info(struct kvm_vcpu * vcpu,u32 * intr_info,u32 * error_code)3638 static void svm_get_entry_info(struct kvm_vcpu *vcpu, u32 *intr_info,
3639 			       u32 *error_code)
3640 {
3641 	struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control;
3642 
3643 	*intr_info = control->event_inj;
3644 
3645 	if ((*intr_info & SVM_EXITINTINFO_VALID) &&
3646 	    (*intr_info & SVM_EXITINTINFO_VALID_ERR))
3647 		*error_code = control->event_inj_err;
3648 	else
3649 		*error_code = 0;
3650 
3651 }
3652 
svm_handle_exit(struct kvm_vcpu * vcpu,fastpath_t exit_fastpath)3653 static int svm_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
3654 {
3655 	struct vcpu_svm *svm = to_svm(vcpu);
3656 	struct kvm_run *kvm_run = vcpu->run;
3657 
3658 	/* SEV-ES guests must use the CR write traps to track CR registers. */
3659 	if (!is_sev_es_guest(vcpu)) {
3660 		if (!svm_is_intercept(svm, INTERCEPT_CR0_WRITE))
3661 			vcpu->arch.cr0 = svm->vmcb->save.cr0;
3662 		if (npt_enabled)
3663 			vcpu->arch.cr3 = svm->vmcb->save.cr3;
3664 	}
3665 
3666 	if (is_guest_mode(vcpu)) {
3667 		int vmexit;
3668 
3669 		trace_kvm_nested_vmexit(vcpu, KVM_ISA_SVM);
3670 
3671 		vmexit = nested_svm_exit_special(svm);
3672 
3673 		if (vmexit == NESTED_EXIT_CONTINUE)
3674 			vmexit = nested_svm_exit_handled(svm);
3675 
3676 		if (vmexit == NESTED_EXIT_DONE)
3677 			return 1;
3678 	}
3679 
3680 	if (svm_is_vmrun_failure(svm->vmcb->control.exit_code)) {
3681 		kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
3682 		kvm_run->fail_entry.hardware_entry_failure_reason
3683 			= svm->vmcb->control.exit_code;
3684 		kvm_run->fail_entry.cpu = vcpu->arch.last_vmentry_cpu;
3685 		dump_vmcb(vcpu);
3686 		return 0;
3687 	}
3688 
3689 	if (exit_fastpath != EXIT_FASTPATH_NONE)
3690 		return 1;
3691 
3692 	return svm_invoke_exit_handler(vcpu, svm->vmcb->control.exit_code);
3693 }
3694 
svm_set_nested_run_soft_int_state(struct kvm_vcpu * vcpu)3695 static void svm_set_nested_run_soft_int_state(struct kvm_vcpu *vcpu)
3696 {
3697 	struct vcpu_svm *svm = to_svm(vcpu);
3698 
3699 	svm->soft_int_csbase = svm->vmcb->save.cs.base;
3700 	svm->soft_int_old_rip = kvm_rip_read(vcpu);
3701 	if (!guest_cpu_cap_has(vcpu, X86_FEATURE_NRIPS))
3702 		svm->soft_int_next_rip = kvm_rip_read(vcpu);
3703 }
3704 
pre_svm_run(struct kvm_vcpu * vcpu)3705 static int pre_svm_run(struct kvm_vcpu *vcpu)
3706 {
3707 	struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, vcpu->cpu);
3708 	struct vcpu_svm *svm = to_svm(vcpu);
3709 
3710 	/*
3711 	 * If the previous vmrun of the vmcb occurred on a different physical
3712 	 * cpu, then mark the vmcb dirty and assign a new asid.  Hardware's
3713 	 * vmcb clean bits are per logical CPU, as are KVM's asid assignments.
3714 	 */
3715 	if (unlikely(svm->current_vmcb->cpu != vcpu->cpu)) {
3716 		svm->current_vmcb->asid_generation = 0;
3717 		vmcb_mark_all_dirty(svm->vmcb);
3718 		svm->current_vmcb->cpu = vcpu->cpu;
3719         }
3720 
3721 	if (is_sev_guest(vcpu))
3722 		return pre_sev_run(svm, vcpu->cpu);
3723 
3724 	/* FIXME: handle wraparound of asid_generation */
3725 	if (svm->current_vmcb->asid_generation != sd->asid_generation)
3726 		new_asid(svm, sd);
3727 
3728 	return 0;
3729 }
3730 
svm_inject_nmi(struct kvm_vcpu * vcpu)3731 static void svm_inject_nmi(struct kvm_vcpu *vcpu)
3732 {
3733 	struct vcpu_svm *svm = to_svm(vcpu);
3734 
3735 	svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
3736 
3737 	if (svm->nmi_l1_to_l2)
3738 		return;
3739 
3740 	/*
3741 	 * No need to manually track NMI masking when vNMI is enabled, hardware
3742 	 * automatically sets V_NMI_BLOCKING_MASK as appropriate, including the
3743 	 * case where software directly injects an NMI.
3744 	 */
3745 	if (!is_vnmi_enabled(svm)) {
3746 		svm->nmi_masked = true;
3747 		svm_set_iret_intercept(svm);
3748 	}
3749 	++vcpu->stat.nmi_injections;
3750 }
3751 
svm_is_vnmi_pending(struct kvm_vcpu * vcpu)3752 static bool svm_is_vnmi_pending(struct kvm_vcpu *vcpu)
3753 {
3754 	struct vcpu_svm *svm = to_svm(vcpu);
3755 
3756 	if (!is_vnmi_enabled(svm))
3757 		return false;
3758 
3759 	return !!(svm->vmcb->control.int_ctl & V_NMI_PENDING_MASK);
3760 }
3761 
svm_set_vnmi_pending(struct kvm_vcpu * vcpu)3762 static bool svm_set_vnmi_pending(struct kvm_vcpu *vcpu)
3763 {
3764 	struct vcpu_svm *svm = to_svm(vcpu);
3765 
3766 	if (!is_vnmi_enabled(svm))
3767 		return false;
3768 
3769 	if (svm->vmcb->control.int_ctl & V_NMI_PENDING_MASK)
3770 		return false;
3771 
3772 	svm->vmcb->control.int_ctl |= V_NMI_PENDING_MASK;
3773 	vmcb_mark_dirty(svm->vmcb, VMCB_INTR);
3774 
3775 	/*
3776 	 * Because the pending NMI is serviced by hardware, KVM can't know when
3777 	 * the NMI is "injected", but for all intents and purposes, passing the
3778 	 * NMI off to hardware counts as injection.
3779 	 */
3780 	++vcpu->stat.nmi_injections;
3781 
3782 	return true;
3783 }
3784 
svm_inject_irq(struct kvm_vcpu * vcpu,bool reinjected)3785 static void svm_inject_irq(struct kvm_vcpu *vcpu, bool reinjected)
3786 {
3787 	struct kvm_queued_interrupt *intr = &vcpu->arch.interrupt;
3788 	struct vcpu_svm *svm = to_svm(vcpu);
3789 	u32 type;
3790 
3791 	if (intr->soft) {
3792 		if (svm_update_soft_interrupt_rip(vcpu, intr->nr))
3793 			return;
3794 
3795 		type = SVM_EVTINJ_TYPE_SOFT;
3796 	} else {
3797 		type = SVM_EVTINJ_TYPE_INTR;
3798 	}
3799 
3800 	/*
3801 	 * If AVIC was inhibited in order to detect an IRQ window, and there's
3802 	 * no other injectable interrupts pending or L2 is active (see below),
3803 	 * then drop the inhibit as the window has served its purpose.
3804 	 *
3805 	 * If L2 is active, this path is reachable if L1 is not intercepting
3806 	 * IRQs, i.e. if KVM is injecting L1 IRQs into L2.  AVIC is locally
3807 	 * inhibited while L2 is active; drop the VM-wide inhibit to optimize
3808 	 * the case in which the interrupt window was requested while L1 was
3809 	 * active (the vCPU was not running nested).
3810 	 */
3811 	if (svm->avic_irq_window &&
3812 	    (!kvm_cpu_has_injectable_intr(vcpu) || is_guest_mode(vcpu))) {
3813 		svm->avic_irq_window = false;
3814 		kvm_dec_apicv_irq_window_req(svm->vcpu.kvm);
3815 	}
3816 
3817 	trace_kvm_inj_virq(intr->nr, intr->soft, reinjected);
3818 	++vcpu->stat.irq_injections;
3819 
3820 	svm->vmcb->control.event_inj = intr->nr | SVM_EVTINJ_VALID | type;
3821 }
3822 
svm_fixup_nested_rips(struct kvm_vcpu * vcpu)3823 static void svm_fixup_nested_rips(struct kvm_vcpu *vcpu)
3824 {
3825 	struct vcpu_svm *svm = to_svm(vcpu);
3826 
3827 	if (!is_guest_mode(vcpu) || !vcpu->arch.nested_run_pending)
3828 		return;
3829 
3830 	/*
3831 	 * If nrips is supported in hardware but not exposed to L1, stuff the
3832 	 * actual L2 RIP to emulate what a nrips=0 CPU would do (L1 is
3833 	 * responsible for advancing RIP prior to injecting the event). Once L2
3834 	 * runs after L1 executes VMRUN, NextRIP is updated by the CPU and/or
3835 	 * KVM, and this is no longer needed.
3836 	 *
3837 	 * This is done here (as opposed to when preparing vmcb02) to use the
3838 	 * most up-to-date value of RIP regardless of the order of restoring
3839 	 * registers and nested state in the vCPU save+restore path.
3840 	 */
3841 	if (boot_cpu_has(X86_FEATURE_NRIPS) &&
3842 	    !guest_cpu_cap_has(vcpu, X86_FEATURE_NRIPS))
3843 		svm->vmcb->control.next_rip = kvm_rip_read(vcpu);
3844 
3845 	/*
3846 	 * Simiarly, initialize the soft int metadata here to use the most
3847 	 * up-to-date values of RIP and CS base, regardless of restore order.
3848 	 */
3849 	if (svm->soft_int_injected)
3850 		svm_set_nested_run_soft_int_state(vcpu);
3851 }
3852 
svm_complete_interrupt_delivery(struct kvm_vcpu * vcpu,int delivery_mode,int trig_mode,int vector)3853 void svm_complete_interrupt_delivery(struct kvm_vcpu *vcpu, int delivery_mode,
3854 				     int trig_mode, int vector)
3855 {
3856 	/*
3857 	 * apic->apicv_active must be read after vcpu->mode.
3858 	 * Pairs with smp_store_release in vcpu_enter_guest.
3859 	 */
3860 	bool in_guest_mode = (smp_load_acquire(&vcpu->mode) == IN_GUEST_MODE);
3861 
3862 	/* Note, this is called iff the local APIC is in-kernel. */
3863 	if (!READ_ONCE(vcpu->arch.apic->apicv_active)) {
3864 		/* Process the interrupt via kvm_check_and_inject_events(). */
3865 		kvm_make_request(KVM_REQ_EVENT, vcpu);
3866 		kvm_vcpu_kick(vcpu);
3867 		return;
3868 	}
3869 
3870 	trace_kvm_apicv_accept_irq(vcpu->vcpu_id, delivery_mode, trig_mode, vector);
3871 	if (in_guest_mode) {
3872 		/*
3873 		 * Signal the doorbell to tell hardware to inject the IRQ.  If
3874 		 * the vCPU exits the guest before the doorbell chimes, hardware
3875 		 * will automatically process AVIC interrupts at the next VMRUN.
3876 		 */
3877 		avic_ring_doorbell(vcpu);
3878 	} else {
3879 		/*
3880 		 * Wake the vCPU if it was blocking.  KVM will then detect the
3881 		 * pending IRQ when checking if the vCPU has a wake event.
3882 		 */
3883 		kvm_vcpu_wake_up(vcpu);
3884 	}
3885 }
3886 
svm_deliver_interrupt(struct kvm_lapic * apic,int delivery_mode,int trig_mode,int vector)3887 static void svm_deliver_interrupt(struct kvm_lapic *apic,  int delivery_mode,
3888 				  int trig_mode, int vector)
3889 {
3890 	kvm_lapic_set_irr(vector, apic);
3891 
3892 	/*
3893 	 * Pairs with the smp_mb_*() after setting vcpu->guest_mode in
3894 	 * vcpu_enter_guest() to ensure the write to the vIRR is ordered before
3895 	 * the read of guest_mode.  This guarantees that either VMRUN will see
3896 	 * and process the new vIRR entry, or that svm_complete_interrupt_delivery
3897 	 * will signal the doorbell if the CPU has already entered the guest.
3898 	 */
3899 	smp_mb__after_atomic();
3900 	svm_complete_interrupt_delivery(apic->vcpu, delivery_mode, trig_mode, vector);
3901 }
3902 
svm_update_cr8_intercept(struct kvm_vcpu * vcpu,int tpr,int irr)3903 static void svm_update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
3904 {
3905 	struct vcpu_svm *svm = to_svm(vcpu);
3906 
3907 	/*
3908 	 * SEV-ES guests must always keep the CR intercepts cleared. CR
3909 	 * tracking is done using the CR write traps.
3910 	 */
3911 	if (is_sev_es_guest(vcpu))
3912 		return;
3913 
3914 	if (nested_svm_virtualize_tpr(vcpu))
3915 		return;
3916 
3917 	svm_clr_intercept(svm, INTERCEPT_CR8_WRITE);
3918 
3919 	if (irr == -1)
3920 		return;
3921 
3922 	if (tpr >= irr)
3923 		svm_set_intercept(svm, INTERCEPT_CR8_WRITE);
3924 }
3925 
svm_get_nmi_mask(struct kvm_vcpu * vcpu)3926 static bool svm_get_nmi_mask(struct kvm_vcpu *vcpu)
3927 {
3928 	struct vcpu_svm *svm = to_svm(vcpu);
3929 
3930 	if (is_vnmi_enabled(svm))
3931 		return svm->vmcb->control.int_ctl & V_NMI_BLOCKING_MASK;
3932 	else
3933 		return svm->nmi_masked;
3934 }
3935 
svm_set_nmi_mask(struct kvm_vcpu * vcpu,bool masked)3936 static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
3937 {
3938 	struct vcpu_svm *svm = to_svm(vcpu);
3939 
3940 	if (is_vnmi_enabled(svm)) {
3941 		if (masked)
3942 			svm->vmcb->control.int_ctl |= V_NMI_BLOCKING_MASK;
3943 		else
3944 			svm->vmcb->control.int_ctl &= ~V_NMI_BLOCKING_MASK;
3945 
3946 	} else {
3947 		svm->nmi_masked = masked;
3948 		if (masked)
3949 			svm_set_iret_intercept(svm);
3950 		else
3951 			svm_clr_iret_intercept(svm);
3952 	}
3953 }
3954 
svm_nmi_blocked(struct kvm_vcpu * vcpu)3955 bool svm_nmi_blocked(struct kvm_vcpu *vcpu)
3956 {
3957 	struct vcpu_svm *svm = to_svm(vcpu);
3958 	struct vmcb *vmcb = svm->vmcb;
3959 
3960 	if (!gif_set(svm))
3961 		return true;
3962 
3963 	if (is_guest_mode(vcpu) && nested_exit_on_nmi(svm))
3964 		return false;
3965 
3966 	if (svm_get_nmi_mask(vcpu))
3967 		return true;
3968 
3969 	return vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK;
3970 }
3971 
svm_nmi_allowed(struct kvm_vcpu * vcpu,bool for_injection)3972 static int svm_nmi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
3973 {
3974 	struct vcpu_svm *svm = to_svm(vcpu);
3975 	if (vcpu->arch.nested_run_pending)
3976 		return -EBUSY;
3977 
3978 	if (svm_nmi_blocked(vcpu))
3979 		return 0;
3980 
3981 	/* An NMI must not be injected into L2 if it's supposed to VM-Exit.  */
3982 	if (for_injection && is_guest_mode(vcpu) && nested_exit_on_nmi(svm))
3983 		return -EBUSY;
3984 	return 1;
3985 }
3986 
svm_interrupt_blocked(struct kvm_vcpu * vcpu)3987 bool svm_interrupt_blocked(struct kvm_vcpu *vcpu)
3988 {
3989 	struct vcpu_svm *svm = to_svm(vcpu);
3990 	struct vmcb *vmcb = svm->vmcb;
3991 
3992 	if (!gif_set(svm))
3993 		return true;
3994 
3995 	if (is_guest_mode(vcpu)) {
3996 		/* As long as interrupts are being delivered...  */
3997 		if ((svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK)
3998 		    ? !(svm->vmcb01.ptr->save.rflags & X86_EFLAGS_IF)
3999 		    : !(kvm_get_rflags(vcpu) & X86_EFLAGS_IF))
4000 			return true;
4001 
4002 		/* ... vmexits aren't blocked by the interrupt shadow  */
4003 		if (nested_exit_on_intr(svm))
4004 			return false;
4005 	} else {
4006 		if (!svm_get_if_flag(vcpu))
4007 			return true;
4008 	}
4009 
4010 	return (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK);
4011 }
4012 
svm_interrupt_allowed(struct kvm_vcpu * vcpu,bool for_injection)4013 static int svm_interrupt_allowed(struct kvm_vcpu *vcpu, bool for_injection)
4014 {
4015 	struct vcpu_svm *svm = to_svm(vcpu);
4016 
4017 	if (vcpu->arch.nested_run_pending)
4018 		return -EBUSY;
4019 
4020 	if (svm_interrupt_blocked(vcpu))
4021 		return 0;
4022 
4023 	/*
4024 	 * An IRQ must not be injected into L2 if it's supposed to VM-Exit,
4025 	 * e.g. if the IRQ arrived asynchronously after checking nested events.
4026 	 */
4027 	if (for_injection && is_guest_mode(vcpu) && nested_exit_on_intr(svm))
4028 		return -EBUSY;
4029 
4030 	return 1;
4031 }
4032 
svm_enable_irq_window(struct kvm_vcpu * vcpu)4033 static void svm_enable_irq_window(struct kvm_vcpu *vcpu)
4034 {
4035 	struct vcpu_svm *svm = to_svm(vcpu);
4036 
4037 	/*
4038 	 * In case GIF=0 we can't rely on the CPU to tell us when GIF becomes
4039 	 * 1, because that's a separate STGI/VMRUN intercept.  The next time we
4040 	 * get that intercept, this function will be called again though and
4041 	 * we'll get the vintr intercept. However, if the vGIF feature is
4042 	 * enabled, the STGI interception will not occur. Enable the irq
4043 	 * window under the assumption that the hardware will set the GIF.
4044 	 */
4045 	if (vgif || gif_set(svm)) {
4046 		/*
4047 		 * KVM only enables IRQ windows when AVIC is enabled if there's
4048 		 * pending ExtINT since it cannot be injected via AVIC (ExtINT
4049 		 * bypasses the local APIC).  V_IRQ is ignored by hardware when
4050 		 * AVIC is enabled, and so KVM needs to temporarily disable
4051 		 * AVIC in order to detect when it's ok to inject the ExtINT.
4052 		 *
4053 		 * If running nested, AVIC is already locally inhibited on this
4054 		 * vCPU (L2 vCPUs use a different MMU that never maps the AVIC
4055 		 * backing page), therefore there is no need to increment the
4056 		 * VM-wide AVIC inhibit.  KVM will re-evaluate events when the
4057 		 * vCPU exits to L1 and enable an IRQ window if the ExtINT is
4058 		 * still pending.
4059 		 *
4060 		 * Note, the IRQ window inhibit needs to be updated even if
4061 		 * AVIC is inhibited for a different reason, as KVM needs to
4062 		 * keep AVIC inhibited if the other reason is cleared and there
4063 		 * is still an injectable interrupt pending.
4064 		 */
4065 		if (enable_apicv && !svm->avic_irq_window && !is_guest_mode(vcpu)) {
4066 			svm->avic_irq_window = true;
4067 			kvm_inc_apicv_irq_window_req(vcpu->kvm);
4068 		}
4069 
4070 		svm_set_vintr(svm);
4071 	}
4072 }
4073 
svm_enable_nmi_window(struct kvm_vcpu * vcpu)4074 static void svm_enable_nmi_window(struct kvm_vcpu *vcpu)
4075 {
4076 	struct vcpu_svm *svm = to_svm(vcpu);
4077 
4078 	/*
4079 	 * If NMIs are outright masked, i.e. the vCPU is already handling an
4080 	 * NMI, and KVM has not yet intercepted an IRET, then there is nothing
4081 	 * more to do at this time as KVM has already enabled IRET intercepts.
4082 	 * If KVM has already intercepted IRET, then single-step over the IRET,
4083 	 * as NMIs aren't architecturally unmasked until the IRET completes.
4084 	 *
4085 	 * If vNMI is enabled, KVM should never request an NMI window if NMIs
4086 	 * are masked, as KVM allows at most one to-be-injected NMI and one
4087 	 * pending NMI.  If two NMIs arrive simultaneously, KVM will inject one
4088 	 * NMI and set V_NMI_PENDING for the other, but if and only if NMIs are
4089 	 * unmasked.  KVM _will_ request an NMI window in some situations, e.g.
4090 	 * if the vCPU is in an STI shadow or if GIF=0, KVM can't immediately
4091 	 * inject the NMI.  In those situations, KVM needs to single-step over
4092 	 * the STI shadow or intercept STGI.
4093 	 */
4094 	if (svm_get_nmi_mask(vcpu)) {
4095 		WARN_ON_ONCE(is_vnmi_enabled(svm));
4096 
4097 		if (!svm->awaiting_iret_completion)
4098 			return; /* IRET will cause a vm exit */
4099 	}
4100 
4101 	/*
4102 	 * SEV-ES guests are responsible for signaling when a vCPU is ready to
4103 	 * receive a new NMI, as SEV-ES guests can't be single-stepped, i.e.
4104 	 * KVM can't intercept and single-step IRET to detect when NMIs are
4105 	 * unblocked (architecturally speaking).  See SVM_VMGEXIT_NMI_COMPLETE.
4106 	 *
4107 	 * Note, GIF is guaranteed to be '1' for SEV-ES guests as hardware
4108 	 * ignores SEV-ES guest writes to EFER.SVME *and* CLGI/STGI are not
4109 	 * supported NAEs in the GHCB protocol.
4110 	 */
4111 	if (is_sev_es_guest(vcpu))
4112 		return;
4113 
4114 	if (!gif_set(svm)) {
4115 		if (vgif)
4116 			svm_set_intercept(svm, INTERCEPT_STGI);
4117 		return; /* STGI will cause a vm exit */
4118 	}
4119 
4120 	/*
4121 	 * Something prevents NMI from been injected. Single step over possible
4122 	 * problem (IRET or exception injection or interrupt shadow)
4123 	 */
4124 	svm->nmi_singlestep_guest_rflags = svm_get_rflags(vcpu);
4125 	svm->nmi_singlestep = true;
4126 	svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
4127 }
4128 
svm_flush_tlb_asid(struct kvm_vcpu * vcpu)4129 static void svm_flush_tlb_asid(struct kvm_vcpu *vcpu)
4130 {
4131 	struct vcpu_svm *svm = to_svm(vcpu);
4132 
4133 	/*
4134 	 * Unlike VMX, SVM doesn't provide a way to flush only NPT TLB entries.
4135 	 * A TLB flush for the current ASID flushes both "host" and "guest" TLB
4136 	 * entries, and thus is a superset of Hyper-V's fine grained flushing.
4137 	 */
4138 	kvm_hv_vcpu_purge_flush_tlb(vcpu);
4139 
4140 	/*
4141 	 * Flush only the current ASID even if the TLB flush was invoked via
4142 	 * kvm_flush_remote_tlbs().  Although flushing remote TLBs requires all
4143 	 * ASIDs to be flushed, KVM uses a single ASID for L1 and L2, and
4144 	 * unconditionally does a TLB flush on both nested VM-Enter and nested
4145 	 * VM-Exit (via kvm_mmu_reset_context()).
4146 	 */
4147 	if (static_cpu_has(X86_FEATURE_FLUSHBYASID))
4148 		svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
4149 	else
4150 		svm->current_vmcb->asid_generation--;
4151 }
4152 
svm_flush_tlb_current(struct kvm_vcpu * vcpu)4153 static void svm_flush_tlb_current(struct kvm_vcpu *vcpu)
4154 {
4155 	hpa_t root_tdp = vcpu->arch.mmu->root.hpa;
4156 
4157 	/*
4158 	 * When running on Hyper-V with EnlightenedNptTlb enabled, explicitly
4159 	 * flush the NPT mappings via hypercall as flushing the ASID only
4160 	 * affects virtual to physical mappings, it does not invalidate guest
4161 	 * physical to host physical mappings.
4162 	 */
4163 	if (svm_hv_is_enlightened_tlb_enabled(vcpu) && VALID_PAGE(root_tdp))
4164 		hyperv_flush_guest_mapping(root_tdp);
4165 
4166 	svm_flush_tlb_asid(vcpu);
4167 }
4168 
svm_flush_tlb_all(struct kvm_vcpu * vcpu)4169 static void svm_flush_tlb_all(struct kvm_vcpu *vcpu)
4170 {
4171 	/*
4172 	 * When running on Hyper-V with EnlightenedNptTlb enabled, remote TLB
4173 	 * flushes should be routed to hv_flush_remote_tlbs() without requesting
4174 	 * a "regular" remote flush.  Reaching this point means either there's
4175 	 * a KVM bug or a prior hv_flush_remote_tlbs() call failed, both of
4176 	 * which might be fatal to the guest.  Yell, but try to recover.
4177 	 */
4178 	if (WARN_ON_ONCE(svm_hv_is_enlightened_tlb_enabled(vcpu)))
4179 		hv_flush_remote_tlbs(vcpu->kvm);
4180 
4181 	svm_flush_tlb_asid(vcpu);
4182 }
4183 
svm_flush_tlb_gva(struct kvm_vcpu * vcpu,gva_t gva)4184 static void svm_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t gva)
4185 {
4186 	struct vcpu_svm *svm = to_svm(vcpu);
4187 
4188 	invlpga(gva, svm->vmcb->control.asid);
4189 }
4190 
svm_flush_tlb_guest(struct kvm_vcpu * vcpu)4191 static void svm_flush_tlb_guest(struct kvm_vcpu *vcpu)
4192 {
4193 	kvm_register_mark_dirty(vcpu, VCPU_EXREG_ERAPS);
4194 
4195 	svm_flush_tlb_asid(vcpu);
4196 }
4197 
sync_cr8_to_lapic(struct kvm_vcpu * vcpu)4198 static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu)
4199 {
4200 	struct vcpu_svm *svm = to_svm(vcpu);
4201 
4202 	if (nested_svm_virtualize_tpr(vcpu))
4203 		return;
4204 
4205 	if (!svm_is_intercept(svm, INTERCEPT_CR8_WRITE)) {
4206 		int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK;
4207 		kvm_set_cr8(vcpu, cr8);
4208 	}
4209 }
4210 
sync_lapic_to_cr8(struct kvm_vcpu * vcpu)4211 static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu)
4212 {
4213 	struct vcpu_svm *svm = to_svm(vcpu);
4214 	u64 cr8;
4215 
4216 	if (nested_svm_virtualize_tpr(vcpu))
4217 		return;
4218 
4219 	cr8 = kvm_get_cr8(vcpu);
4220 	svm->vmcb->control.int_ctl &= ~V_TPR_MASK;
4221 	svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK;
4222 }
4223 
svm_complete_soft_interrupt(struct kvm_vcpu * vcpu,u8 vector,int type)4224 static void svm_complete_soft_interrupt(struct kvm_vcpu *vcpu, u8 vector,
4225 					int type)
4226 {
4227 	bool is_exception = (type == SVM_EXITINTINFO_TYPE_EXEPT);
4228 	bool is_soft = (type == SVM_EXITINTINFO_TYPE_SOFT);
4229 	struct vcpu_svm *svm = to_svm(vcpu);
4230 
4231 	/*
4232 	 * Initialize the soft int fields *before* reading them below if KVM
4233 	 * aborted entry to the guest with a nested VMRUN pending.  To ensure
4234 	 * KVM uses up-to-date values for RIP and CS base across save/restore,
4235 	 * regardless of restore order, KVM waits to set the soft int fields
4236 	 * until VMRUN is imminent.  But when canceling injection, KVM requeues
4237 	 * the soft int and will reinject it via the standard injection flow,
4238 	 * and so KVM needs to grab the state from the pending nested VMRUN.
4239 	 */
4240 	if (is_guest_mode(vcpu) && vcpu->arch.nested_run_pending)
4241 		svm_set_nested_run_soft_int_state(vcpu);
4242 
4243 	/*
4244 	 * If NRIPS is enabled, KVM must snapshot the pre-VMRUN next_rip that's
4245 	 * associated with the original soft exception/interrupt.  next_rip is
4246 	 * cleared on all exits that can occur while vectoring an event, so KVM
4247 	 * needs to manually set next_rip for re-injection.  Unlike the !nrips
4248 	 * case below, this needs to be done if and only if KVM is re-injecting
4249 	 * the same event, i.e. if the event is a soft exception/interrupt,
4250 	 * otherwise next_rip is unused on VMRUN.
4251 	 */
4252 	if (nrips && (is_soft || (is_exception && kvm_exception_is_soft(vector))) &&
4253 	    kvm_is_linear_rip(vcpu, svm->soft_int_old_rip + svm->soft_int_csbase))
4254 		svm->vmcb->control.next_rip = svm->soft_int_next_rip;
4255 	/*
4256 	 * If NRIPS isn't enabled, KVM must manually advance RIP prior to
4257 	 * injecting the soft exception/interrupt.  That advancement needs to
4258 	 * be unwound if vectoring didn't complete.  Note, the new event may
4259 	 * not be the injected event, e.g. if KVM injected an INTn, the INTn
4260 	 * hit a #NP in the guest, and the #NP encountered a #PF, the #NP will
4261 	 * be the reported vectored event, but RIP still needs to be unwound.
4262 	 */
4263 	else if (!nrips && (is_soft || is_exception) &&
4264 		 kvm_is_linear_rip(vcpu, svm->soft_int_next_rip + svm->soft_int_csbase))
4265 		kvm_rip_write(vcpu, svm->soft_int_old_rip);
4266 }
4267 
svm_complete_interrupts(struct kvm_vcpu * vcpu)4268 static void svm_complete_interrupts(struct kvm_vcpu *vcpu)
4269 {
4270 	struct vcpu_svm *svm = to_svm(vcpu);
4271 	u8 vector;
4272 	int type;
4273 	u32 exitintinfo = svm->vmcb->control.exit_int_info;
4274 	bool nmi_l1_to_l2 = svm->nmi_l1_to_l2;
4275 	bool soft_int_injected = svm->soft_int_injected;
4276 
4277 	svm->nmi_l1_to_l2 = false;
4278 	svm->soft_int_injected = false;
4279 
4280 	/*
4281 	 * If we've made progress since setting awaiting_iret_completion, we've
4282 	 * executed an IRET and can allow NMI injection.
4283 	 */
4284 	if (svm->awaiting_iret_completion &&
4285 	    kvm_rip_read(vcpu) != svm->nmi_iret_rip) {
4286 		svm->awaiting_iret_completion = false;
4287 		svm->nmi_masked = false;
4288 		kvm_make_request(KVM_REQ_EVENT, vcpu);
4289 	}
4290 
4291 	vcpu->arch.nmi_injected = false;
4292 	kvm_clear_exception_queue(vcpu);
4293 	kvm_clear_interrupt_queue(vcpu);
4294 
4295 	if (!(exitintinfo & SVM_EXITINTINFO_VALID))
4296 		return;
4297 
4298 	kvm_make_request(KVM_REQ_EVENT, vcpu);
4299 
4300 	vector = exitintinfo & SVM_EXITINTINFO_VEC_MASK;
4301 	type = exitintinfo & SVM_EXITINTINFO_TYPE_MASK;
4302 
4303 	if (soft_int_injected)
4304 		svm_complete_soft_interrupt(vcpu, vector, type);
4305 
4306 	switch (type) {
4307 	case SVM_EXITINTINFO_TYPE_NMI:
4308 		vcpu->arch.nmi_injected = true;
4309 		svm->nmi_l1_to_l2 = nmi_l1_to_l2;
4310 		break;
4311 	case SVM_EXITINTINFO_TYPE_EXEPT: {
4312 		u32 error_code = 0;
4313 
4314 		/*
4315 		 * Never re-inject a #VC exception.
4316 		 */
4317 		if (vector == X86_TRAP_VC)
4318 			break;
4319 
4320 		if (exitintinfo & SVM_EXITINTINFO_VALID_ERR)
4321 			error_code = svm->vmcb->control.exit_int_info_err;
4322 
4323 		kvm_requeue_exception(vcpu, vector,
4324 				      exitintinfo & SVM_EXITINTINFO_VALID_ERR,
4325 				      error_code);
4326 		break;
4327 	}
4328 	case SVM_EXITINTINFO_TYPE_INTR:
4329 		kvm_queue_interrupt(vcpu, vector, false);
4330 		break;
4331 	case SVM_EXITINTINFO_TYPE_SOFT:
4332 		kvm_queue_interrupt(vcpu, vector, true);
4333 		break;
4334 	default:
4335 		break;
4336 	}
4337 
4338 }
4339 
svm_cancel_injection(struct kvm_vcpu * vcpu)4340 static void svm_cancel_injection(struct kvm_vcpu *vcpu)
4341 {
4342 	struct vcpu_svm *svm = to_svm(vcpu);
4343 	struct vmcb_control_area *control = &svm->vmcb->control;
4344 
4345 	control->exit_int_info = control->event_inj;
4346 	control->exit_int_info_err = control->event_inj_err;
4347 	control->event_inj = 0;
4348 	svm_complete_interrupts(vcpu);
4349 }
4350 
svm_vcpu_pre_run(struct kvm_vcpu * vcpu)4351 static int svm_vcpu_pre_run(struct kvm_vcpu *vcpu)
4352 {
4353 #ifdef CONFIG_KVM_AMD_SEV
4354 	if (to_kvm_sev_info(vcpu->kvm)->need_init)
4355 		return -EINVAL;
4356 #endif
4357 
4358 	return 1;
4359 }
4360 
svm_exit_handlers_fastpath(struct kvm_vcpu * vcpu)4361 static fastpath_t svm_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
4362 {
4363 	struct vcpu_svm *svm = to_svm(vcpu);
4364 	struct vmcb_control_area *control = &svm->vmcb->control;
4365 
4366 	/*
4367 	 * Next RIP must be provided as IRQs are disabled, and accessing guest
4368 	 * memory to decode the instruction might fault, i.e. might sleep.
4369 	 */
4370 	if (!nrips || !control->next_rip)
4371 		return EXIT_FASTPATH_NONE;
4372 
4373 	if (is_guest_mode(vcpu))
4374 		return EXIT_FASTPATH_NONE;
4375 
4376 	switch (control->exit_code) {
4377 	case SVM_EXIT_MSR:
4378 		if (!control->exit_info_1)
4379 			break;
4380 		return handle_fastpath_wrmsr(vcpu);
4381 	case SVM_EXIT_HLT:
4382 		return handle_fastpath_hlt(vcpu);
4383 	case SVM_EXIT_INVD:
4384 		return handle_fastpath_invd(vcpu);
4385 	default:
4386 		break;
4387 	}
4388 
4389 	return EXIT_FASTPATH_NONE;
4390 }
4391 
svm_vcpu_enter_exit(struct kvm_vcpu * vcpu,bool spec_ctrl_intercepted)4392 static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, bool spec_ctrl_intercepted)
4393 {
4394 	struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, vcpu->cpu);
4395 	struct vcpu_svm *svm = to_svm(vcpu);
4396 
4397 	guest_state_enter_irqoff();
4398 
4399 	/*
4400 	 * Set RFLAGS.IF prior to VMRUN, as the host's RFLAGS.IF at the time of
4401 	 * VMRUN controls whether or not physical IRQs are masked (KVM always
4402 	 * runs with V_INTR_MASKING_MASK).  Toggle RFLAGS.IF here to avoid the
4403 	 * temptation to do STI+VMRUN+CLI, as AMD CPUs bleed the STI shadow
4404 	 * into guest state if delivery of an event during VMRUN triggers a
4405 	 * #VMEXIT, and the guest_state transitions already tell lockdep that
4406 	 * IRQs are being enabled/disabled.  Note!  GIF=0 for the entirety of
4407 	 * this path, so IRQs aren't actually unmasked while running host code.
4408 	 */
4409 	raw_local_irq_enable();
4410 
4411 	amd_clear_divider();
4412 
4413 	if (is_sev_es_guest(vcpu))
4414 		__svm_sev_es_vcpu_run(svm, spec_ctrl_intercepted,
4415 				      sev_es_host_save_area(sd));
4416 	else
4417 		__svm_vcpu_run(svm, spec_ctrl_intercepted);
4418 
4419 	raw_local_irq_disable();
4420 
4421 	guest_state_exit_irqoff();
4422 }
4423 
svm_vcpu_run(struct kvm_vcpu * vcpu,u64 run_flags)4424 static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu, u64 run_flags)
4425 {
4426 	bool force_immediate_exit = run_flags & KVM_RUN_FORCE_IMMEDIATE_EXIT;
4427 	struct vcpu_svm *svm = to_svm(vcpu);
4428 	bool spec_ctrl_intercepted = msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL);
4429 
4430 	trace_kvm_entry(vcpu, force_immediate_exit);
4431 
4432 	svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
4433 	svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
4434 	svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
4435 
4436 	/*
4437 	 * Disable singlestep if we're injecting an interrupt/exception.
4438 	 * We don't want our modified rflags to be pushed on the stack where
4439 	 * we might not be able to easily reset them if we disabled NMI
4440 	 * singlestep later.
4441 	 */
4442 	if (svm->nmi_singlestep && svm->vmcb->control.event_inj) {
4443 		/*
4444 		 * Event injection happens before external interrupts cause a
4445 		 * vmexit and interrupts are disabled here, so smp_send_reschedule
4446 		 * is enough to force an immediate vmexit.
4447 		 */
4448 		disable_nmi_singlestep(svm);
4449 		force_immediate_exit = true;
4450 	}
4451 
4452 	if (force_immediate_exit)
4453 		smp_send_reschedule(vcpu->cpu);
4454 
4455 	if (pre_svm_run(vcpu)) {
4456 		vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY;
4457 		vcpu->run->fail_entry.hardware_entry_failure_reason = SVM_EXIT_ERR;
4458 		vcpu->run->fail_entry.cpu = vcpu->cpu;
4459 		return EXIT_FASTPATH_EXIT_USERSPACE;
4460 	}
4461 
4462 	sync_lapic_to_cr8(vcpu);
4463 
4464 	if (unlikely(svm->asid != svm->vmcb->control.asid)) {
4465 		svm->vmcb->control.asid = svm->asid;
4466 		vmcb_mark_dirty(svm->vmcb, VMCB_ASID);
4467 	}
4468 	svm->vmcb->save.cr2 = vcpu->arch.cr2;
4469 
4470 	if (guest_cpu_cap_has(vcpu, X86_FEATURE_ERAPS) &&
4471 	    kvm_register_is_dirty(vcpu, VCPU_EXREG_ERAPS))
4472 		svm->vmcb->control.erap_ctl |= ERAP_CONTROL_CLEAR_RAP;
4473 
4474 	svm_fixup_nested_rips(vcpu);
4475 
4476 	svm_hv_update_vp_id(svm->vmcb, vcpu);
4477 
4478 	/*
4479 	 * Run with all-zero DR6 unless the guest can write DR6 freely, so that
4480 	 * KVM can get the exact cause of a #DB.  Note, loading guest DR6 from
4481 	 * KVM's snapshot is only necessary when DR accesses won't exit.
4482 	 */
4483 	if (unlikely(run_flags & KVM_RUN_LOAD_GUEST_DR6))
4484 		svm_set_dr6(vcpu, vcpu->arch.dr6);
4485 	else if (likely(!(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)))
4486 		svm_set_dr6(vcpu, DR6_ACTIVE_LOW);
4487 
4488 	clgi();
4489 
4490 	/*
4491 	 * Hardware only context switches DEBUGCTL if LBR virtualization is
4492 	 * enabled.  Manually load DEBUGCTL if necessary (and restore it after
4493 	 * VM-Exit), as running with the host's DEBUGCTL can negatively affect
4494 	 * guest state and can even be fatal, e.g. due to Bus Lock Detect.
4495 	 */
4496 	if (!(svm->vmcb->control.misc_ctl2 & SVM_MISC2_ENABLE_V_LBR) &&
4497 	    vcpu->arch.host_debugctl != svm->vmcb->save.dbgctl)
4498 		update_debugctlmsr(svm->vmcb->save.dbgctl);
4499 
4500 	kvm_wait_lapic_expire(vcpu);
4501 
4502 	/*
4503 	 * If this vCPU has touched SPEC_CTRL, restore the guest's value if
4504 	 * it's non-zero. Since vmentry is serialising on affected CPUs, there
4505 	 * is no need to worry about the conditional branch over the wrmsr
4506 	 * being speculatively taken.
4507 	 */
4508 	if (!static_cpu_has(X86_FEATURE_V_SPEC_CTRL))
4509 		x86_spec_ctrl_set_guest(svm->virt_spec_ctrl);
4510 
4511 	svm_vcpu_enter_exit(vcpu, spec_ctrl_intercepted);
4512 
4513 	if (!static_cpu_has(X86_FEATURE_V_SPEC_CTRL))
4514 		x86_spec_ctrl_restore_host(svm->virt_spec_ctrl);
4515 
4516 	if (!is_sev_es_guest(vcpu)) {
4517 		vcpu->arch.cr2 = svm->vmcb->save.cr2;
4518 		vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
4519 		vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
4520 		vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
4521 	}
4522 	vcpu->arch.regs_dirty = 0;
4523 
4524 	if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
4525 		kvm_before_interrupt(vcpu, KVM_HANDLING_NMI);
4526 
4527 	if (!(svm->vmcb->control.misc_ctl2 & SVM_MISC2_ENABLE_V_LBR) &&
4528 	    vcpu->arch.host_debugctl != svm->vmcb->save.dbgctl)
4529 		update_debugctlmsr(vcpu->arch.host_debugctl);
4530 
4531 	stgi();
4532 
4533 	/* Any pending NMI will happen here */
4534 
4535 	if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
4536 		kvm_after_interrupt(vcpu);
4537 
4538 	sync_cr8_to_lapic(vcpu);
4539 
4540 	svm->next_rip = 0;
4541 	if (is_guest_mode(vcpu)) {
4542 		nested_sync_control_from_vmcb02(svm);
4543 
4544 		/* Track VMRUNs that have made past consistency checking */
4545 		if (vcpu->arch.nested_run_pending &&
4546 		    !svm_is_vmrun_failure(svm->vmcb->control.exit_code))
4547                         ++vcpu->stat.nested_run;
4548 
4549 		vcpu->arch.nested_run_pending = 0;
4550 	}
4551 
4552 	svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
4553 
4554 	/*
4555 	 * Unconditionally mask off the CLEAR_RAP bit, the AND is just as cheap
4556 	 * as the TEST+Jcc to avoid it.
4557 	 */
4558 	if (cpu_feature_enabled(X86_FEATURE_ERAPS))
4559 		svm->vmcb->control.erap_ctl &= ~ERAP_CONTROL_CLEAR_RAP;
4560 
4561 	vmcb_mark_all_clean(svm->vmcb);
4562 
4563 	/* if exit due to PF check for async PF */
4564 	if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR)
4565 		vcpu->arch.apf.host_apf_flags =
4566 			kvm_read_and_reset_apf_flags();
4567 
4568 	vcpu->arch.regs_avail &= ~SVM_REGS_LAZY_LOAD_SET;
4569 
4570 	if (!msr_write_intercepted(vcpu, MSR_AMD64_PERF_CNTR_GLOBAL_CTL))
4571 		rdmsrq(MSR_AMD64_PERF_CNTR_GLOBAL_CTL, vcpu_to_pmu(vcpu)->global_ctrl);
4572 
4573 	trace_kvm_exit(vcpu, KVM_ISA_SVM);
4574 
4575 	svm_complete_interrupts(vcpu);
4576 
4577 	/*
4578 	 * Update the cache after completing interrupts to get an accurate
4579 	 * NextRIP, e.g. when re-injecting a soft interrupt.
4580 	 *
4581 	 * FIXME: Rework svm_get_nested_state() to not pull data from the
4582 	 *        cache (except for maybe int_ctl).
4583 	 */
4584 	if (is_guest_mode(vcpu))
4585 		svm->nested.ctl.next_rip = svm->vmcb->control.next_rip;
4586 
4587 	return svm_exit_handlers_fastpath(vcpu);
4588 }
4589 
svm_load_mmu_pgd(struct kvm_vcpu * vcpu,hpa_t root_hpa,int root_level)4590 static void svm_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa,
4591 			     int root_level)
4592 {
4593 	struct vcpu_svm *svm = to_svm(vcpu);
4594 	unsigned long cr3;
4595 
4596 	if (npt_enabled) {
4597 		svm->vmcb->control.nested_cr3 = __sme_set(root_hpa);
4598 		vmcb_mark_dirty(svm->vmcb, VMCB_NPT);
4599 
4600 		hv_track_root_tdp(vcpu, root_hpa);
4601 
4602 		cr3 = vcpu->arch.cr3;
4603 	} else if (root_level >= PT64_ROOT_4LEVEL) {
4604 		cr3 = __sme_set(root_hpa) | kvm_get_active_pcid(vcpu);
4605 	} else {
4606 		/* PCID in the guest should be impossible with a 32-bit MMU. */
4607 		WARN_ON_ONCE(kvm_get_active_pcid(vcpu));
4608 		cr3 = root_hpa;
4609 	}
4610 
4611 	svm->vmcb->save.cr3 = cr3;
4612 	vmcb_mark_dirty(svm->vmcb, VMCB_CR);
4613 }
4614 
4615 static void
svm_patch_hypercall(struct kvm_vcpu * vcpu,unsigned char * hypercall)4616 svm_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
4617 {
4618 	/*
4619 	 * Patch in the VMMCALL instruction:
4620 	 */
4621 	hypercall[0] = 0x0f;
4622 	hypercall[1] = 0x01;
4623 	hypercall[2] = 0xd9;
4624 }
4625 
4626 /*
4627  * The kvm parameter can be NULL (module initialization, or invocation before
4628  * VM creation). Be sure to check the kvm parameter before using it.
4629  */
svm_has_emulated_msr(struct kvm * kvm,u32 index)4630 static bool svm_has_emulated_msr(struct kvm *kvm, u32 index)
4631 {
4632 	switch (index) {
4633 	case MSR_IA32_MCG_EXT_CTL:
4634 	case KVM_FIRST_EMULATED_VMX_MSR ... KVM_LAST_EMULATED_VMX_MSR:
4635 		return false;
4636 	case MSR_IA32_SMBASE:
4637 		if (!IS_ENABLED(CONFIG_KVM_SMM))
4638 			return false;
4639 
4640 #ifdef CONFIG_KVM_AMD_SEV
4641 		/*
4642 		 * KVM can't access register state to emulate SMM for SEV-ES
4643 		 * guests.  Conusming stale data here is "fine", as KVM only
4644 		 * checks for MSR_IA32_SMBASE support without a vCPU when
4645 		 * userspace is querying KVM_CAP_X86_SMM.
4646 		 */
4647 		if (kvm && ____sev_es_guest(kvm))
4648 			return false;
4649 #endif
4650 		break;
4651 	default:
4652 		break;
4653 	}
4654 
4655 	return true;
4656 }
4657 
svm_vcpu_after_set_cpuid(struct kvm_vcpu * vcpu)4658 static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
4659 {
4660 	struct vcpu_svm *svm = to_svm(vcpu);
4661 
4662 	/*
4663 	 * SVM doesn't provide a way to disable just XSAVES in the guest, KVM
4664 	 * can only disable all variants of by disallowing CR4.OSXSAVE from
4665 	 * being set.  As a result, if the host has XSAVE and XSAVES, and the
4666 	 * guest has XSAVE enabled, the guest can execute XSAVES without
4667 	 * faulting.  Treat XSAVES as enabled in this case regardless of
4668 	 * whether it's advertised to the guest so that KVM context switches
4669 	 * XSS on VM-Enter/VM-Exit.  Failure to do so would effectively give
4670 	 * the guest read/write access to the host's XSS.
4671 	 */
4672 	guest_cpu_cap_change(vcpu, X86_FEATURE_XSAVES,
4673 			     boot_cpu_has(X86_FEATURE_XSAVES) &&
4674 			     guest_cpu_cap_has(vcpu, X86_FEATURE_XSAVE));
4675 
4676 	/*
4677 	 * Intercept VMLOAD if the vCPU model is Intel in order to emulate that
4678 	 * VMLOAD drops bits 63:32 of SYSENTER (ignoring the fact that exposing
4679 	 * SVM on Intel is bonkers and extremely unlikely to work).
4680 	 */
4681 	if (guest_cpuid_is_intel_compatible(vcpu))
4682 		guest_cpu_cap_clear(vcpu, X86_FEATURE_V_VMSAVE_VMLOAD);
4683 
4684 	if (is_sev_guest(vcpu))
4685 		sev_vcpu_after_set_cpuid(svm);
4686 }
4687 
svm_has_wbinvd_exit(void)4688 static bool svm_has_wbinvd_exit(void)
4689 {
4690 	return true;
4691 }
4692 
4693 #define PRE_EX(exit)  { .exit_code = (exit), \
4694 			.stage = X86_ICPT_PRE_EXCEPT, }
4695 #define POST_EX(exit) { .exit_code = (exit), \
4696 			.stage = X86_ICPT_POST_EXCEPT, }
4697 #define POST_MEM(exit) { .exit_code = (exit), \
4698 			.stage = X86_ICPT_POST_MEMACCESS, }
4699 
4700 static const struct __x86_intercept {
4701 	u32 exit_code;
4702 	enum x86_intercept_stage stage;
4703 } x86_intercept_map[] = {
4704 	[x86_intercept_cr_read]		= POST_EX(SVM_EXIT_READ_CR0),
4705 	[x86_intercept_cr_write]	= POST_EX(SVM_EXIT_WRITE_CR0),
4706 	[x86_intercept_clts]		= POST_EX(SVM_EXIT_WRITE_CR0),
4707 	[x86_intercept_lmsw]		= POST_EX(SVM_EXIT_WRITE_CR0),
4708 	[x86_intercept_smsw]		= POST_EX(SVM_EXIT_READ_CR0),
4709 	[x86_intercept_dr_read]		= POST_EX(SVM_EXIT_READ_DR0),
4710 	[x86_intercept_dr_write]	= POST_EX(SVM_EXIT_WRITE_DR0),
4711 	[x86_intercept_sldt]		= POST_EX(SVM_EXIT_LDTR_READ),
4712 	[x86_intercept_str]		= POST_EX(SVM_EXIT_TR_READ),
4713 	[x86_intercept_lldt]		= POST_EX(SVM_EXIT_LDTR_WRITE),
4714 	[x86_intercept_ltr]		= POST_EX(SVM_EXIT_TR_WRITE),
4715 	[x86_intercept_sgdt]		= POST_EX(SVM_EXIT_GDTR_READ),
4716 	[x86_intercept_sidt]		= POST_EX(SVM_EXIT_IDTR_READ),
4717 	[x86_intercept_lgdt]		= POST_EX(SVM_EXIT_GDTR_WRITE),
4718 	[x86_intercept_lidt]		= POST_EX(SVM_EXIT_IDTR_WRITE),
4719 	[x86_intercept_vmrun]		= POST_EX(SVM_EXIT_VMRUN),
4720 	[x86_intercept_vmmcall]		= POST_EX(SVM_EXIT_VMMCALL),
4721 	[x86_intercept_vmload]		= POST_EX(SVM_EXIT_VMLOAD),
4722 	[x86_intercept_vmsave]		= POST_EX(SVM_EXIT_VMSAVE),
4723 	[x86_intercept_stgi]		= POST_EX(SVM_EXIT_STGI),
4724 	[x86_intercept_clgi]		= POST_EX(SVM_EXIT_CLGI),
4725 	[x86_intercept_skinit]		= POST_EX(SVM_EXIT_SKINIT),
4726 	[x86_intercept_invlpga]		= POST_EX(SVM_EXIT_INVLPGA),
4727 	[x86_intercept_rdtscp]		= POST_EX(SVM_EXIT_RDTSCP),
4728 	[x86_intercept_monitor]		= POST_MEM(SVM_EXIT_MONITOR),
4729 	[x86_intercept_mwait]		= POST_EX(SVM_EXIT_MWAIT),
4730 	[x86_intercept_invlpg]		= POST_EX(SVM_EXIT_INVLPG),
4731 	[x86_intercept_invd]		= POST_EX(SVM_EXIT_INVD),
4732 	[x86_intercept_wbinvd]		= POST_EX(SVM_EXIT_WBINVD),
4733 	[x86_intercept_wrmsr]		= POST_EX(SVM_EXIT_MSR),
4734 	[x86_intercept_rdtsc]		= POST_EX(SVM_EXIT_RDTSC),
4735 	[x86_intercept_rdmsr]		= POST_EX(SVM_EXIT_MSR),
4736 	[x86_intercept_rdpmc]		= POST_EX(SVM_EXIT_RDPMC),
4737 	[x86_intercept_cpuid]		= PRE_EX(SVM_EXIT_CPUID),
4738 	[x86_intercept_rsm]		= PRE_EX(SVM_EXIT_RSM),
4739 	[x86_intercept_pause]		= PRE_EX(SVM_EXIT_PAUSE),
4740 	[x86_intercept_pushf]		= PRE_EX(SVM_EXIT_PUSHF),
4741 	[x86_intercept_popf]		= PRE_EX(SVM_EXIT_POPF),
4742 	[x86_intercept_intn]		= PRE_EX(SVM_EXIT_SWINT),
4743 	[x86_intercept_iret]		= PRE_EX(SVM_EXIT_IRET),
4744 	[x86_intercept_icebp]		= PRE_EX(SVM_EXIT_ICEBP),
4745 	[x86_intercept_hlt]		= POST_EX(SVM_EXIT_HLT),
4746 	[x86_intercept_in]		= POST_EX(SVM_EXIT_IOIO),
4747 	[x86_intercept_ins]		= POST_EX(SVM_EXIT_IOIO),
4748 	[x86_intercept_out]		= POST_EX(SVM_EXIT_IOIO),
4749 	[x86_intercept_outs]		= POST_EX(SVM_EXIT_IOIO),
4750 	[x86_intercept_xsetbv]		= PRE_EX(SVM_EXIT_XSETBV),
4751 };
4752 
4753 #undef PRE_EX
4754 #undef POST_EX
4755 #undef POST_MEM
4756 
svm_check_intercept(struct kvm_vcpu * vcpu,struct x86_instruction_info * info,enum x86_intercept_stage stage,struct x86_exception * exception)4757 static int svm_check_intercept(struct kvm_vcpu *vcpu,
4758 			       struct x86_instruction_info *info,
4759 			       enum x86_intercept_stage stage,
4760 			       struct x86_exception *exception)
4761 {
4762 	struct vcpu_svm *svm = to_svm(vcpu);
4763 	int vmexit, ret = X86EMUL_CONTINUE;
4764 	struct __x86_intercept icpt_info;
4765 	struct vmcb *vmcb = svm->vmcb;
4766 
4767 	if (info->intercept >= ARRAY_SIZE(x86_intercept_map))
4768 		goto out;
4769 
4770 	icpt_info = x86_intercept_map[info->intercept];
4771 
4772 	if (stage != icpt_info.stage)
4773 		goto out;
4774 
4775 	switch (icpt_info.exit_code) {
4776 	case SVM_EXIT_READ_CR0:
4777 		if (info->intercept == x86_intercept_cr_read)
4778 			icpt_info.exit_code += info->modrm_reg;
4779 		break;
4780 	case SVM_EXIT_WRITE_CR0: {
4781 		unsigned long cr0, val;
4782 
4783 		/*
4784 		 * Adjust the exit code accordingly if a CR other than CR0 is
4785 		 * being written, and skip straight to the common handling as
4786 		 * only CR0 has an additional selective intercept.
4787 		 */
4788 		if (info->intercept == x86_intercept_cr_write && info->modrm_reg) {
4789 			icpt_info.exit_code += info->modrm_reg;
4790 			break;
4791 		}
4792 
4793 		/*
4794 		 * Convert the exit_code to SVM_EXIT_CR0_SEL_WRITE if a
4795 		 * selective CR0 intercept is triggered (the common logic will
4796 		 * treat the selective intercept as being enabled).  Note, the
4797 		 * unconditional intercept has higher priority, i.e. this is
4798 		 * only relevant if *only* the selective intercept is enabled.
4799 		 */
4800 		if (vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_CR0_WRITE) ||
4801 		    !(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_SELECTIVE_CR0)))
4802 			break;
4803 
4804 		/* CLTS never triggers INTERCEPT_SELECTIVE_CR0 */
4805 		if (info->intercept == x86_intercept_clts)
4806 			break;
4807 
4808 		/* LMSW always triggers INTERCEPT_SELECTIVE_CR0 */
4809 		if (info->intercept == x86_intercept_lmsw) {
4810 			icpt_info.exit_code = SVM_EXIT_CR0_SEL_WRITE;
4811 			break;
4812 		}
4813 
4814 		/*
4815 		 * MOV-to-CR0 only triggers INTERCEPT_SELECTIVE_CR0 if any bit
4816 		 * other than SVM_CR0_SELECTIVE_MASK is changed.
4817 		 */
4818 		cr0 = vcpu->arch.cr0 & ~SVM_CR0_SELECTIVE_MASK;
4819 		val = info->src_val  & ~SVM_CR0_SELECTIVE_MASK;
4820 		if (cr0 ^ val)
4821 			icpt_info.exit_code = SVM_EXIT_CR0_SEL_WRITE;
4822 		break;
4823 	}
4824 	case SVM_EXIT_READ_DR0:
4825 	case SVM_EXIT_WRITE_DR0:
4826 		icpt_info.exit_code += info->modrm_reg;
4827 		break;
4828 	case SVM_EXIT_MSR:
4829 		if (info->intercept == x86_intercept_wrmsr)
4830 			vmcb->control.exit_info_1 = 1;
4831 		else
4832 			vmcb->control.exit_info_1 = 0;
4833 		break;
4834 	case SVM_EXIT_PAUSE:
4835 		/*
4836 		 * We get this for NOP only, but pause
4837 		 * is rep not, check this here
4838 		 */
4839 		if (info->rep_prefix != REPE_PREFIX)
4840 			goto out;
4841 		break;
4842 	case SVM_EXIT_IOIO: {
4843 		u64 exit_info;
4844 		u32 bytes;
4845 
4846 		if (info->intercept == x86_intercept_in ||
4847 		    info->intercept == x86_intercept_ins) {
4848 			exit_info = ((info->src_val & 0xffff) << 16) |
4849 				SVM_IOIO_TYPE_MASK;
4850 			bytes = info->dst_bytes;
4851 		} else {
4852 			exit_info = (info->dst_val & 0xffff) << 16;
4853 			bytes = info->src_bytes;
4854 		}
4855 
4856 		if (info->intercept == x86_intercept_outs ||
4857 		    info->intercept == x86_intercept_ins)
4858 			exit_info |= SVM_IOIO_STR_MASK;
4859 
4860 		if (info->rep_prefix)
4861 			exit_info |= SVM_IOIO_REP_MASK;
4862 
4863 		bytes = min(bytes, 4u);
4864 
4865 		exit_info |= bytes << SVM_IOIO_SIZE_SHIFT;
4866 
4867 		exit_info |= (u32)info->ad_bytes << (SVM_IOIO_ASIZE_SHIFT - 1);
4868 
4869 		vmcb->control.exit_info_1 = exit_info;
4870 		vmcb->control.exit_info_2 = info->next_rip;
4871 
4872 		break;
4873 	}
4874 	default:
4875 		break;
4876 	}
4877 
4878 	/* TODO: Advertise NRIPS to guest hypervisor unconditionally */
4879 	if (static_cpu_has(X86_FEATURE_NRIPS))
4880 		vmcb->control.next_rip  = info->next_rip;
4881 	vmcb->control.exit_code = icpt_info.exit_code;
4882 	vmexit = nested_svm_exit_handled(svm);
4883 
4884 	ret = (vmexit == NESTED_EXIT_DONE) ? X86EMUL_INTERCEPTED
4885 					   : X86EMUL_CONTINUE;
4886 
4887 out:
4888 	return ret;
4889 }
4890 
svm_handle_exit_irqoff(struct kvm_vcpu * vcpu)4891 static void svm_handle_exit_irqoff(struct kvm_vcpu *vcpu)
4892 {
4893 	switch (to_svm(vcpu)->vmcb->control.exit_code) {
4894 	case SVM_EXIT_EXCP_BASE + MC_VECTOR:
4895 		svm_handle_mce(vcpu);
4896 		break;
4897 	case SVM_EXIT_INTR:
4898 		vcpu->arch.at_instruction_boundary = true;
4899 		break;
4900 	default:
4901 		break;
4902 	}
4903 }
4904 
svm_setup_mce(struct kvm_vcpu * vcpu)4905 static void svm_setup_mce(struct kvm_vcpu *vcpu)
4906 {
4907 	/* [63:9] are reserved. */
4908 	vcpu->arch.mcg_cap &= 0x1ff;
4909 }
4910 
4911 #ifdef CONFIG_KVM_SMM
svm_smi_blocked(struct kvm_vcpu * vcpu)4912 bool svm_smi_blocked(struct kvm_vcpu *vcpu)
4913 {
4914 	struct vcpu_svm *svm = to_svm(vcpu);
4915 
4916 	/* Per APM Vol.2 15.22.2 "Response to SMI" */
4917 	if (!gif_set(svm))
4918 		return true;
4919 
4920 	return is_smm(vcpu);
4921 }
4922 
svm_smi_allowed(struct kvm_vcpu * vcpu,bool for_injection)4923 static int svm_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
4924 {
4925 	struct vcpu_svm *svm = to_svm(vcpu);
4926 	if (vcpu->arch.nested_run_pending)
4927 		return -EBUSY;
4928 
4929 	if (svm_smi_blocked(vcpu))
4930 		return 0;
4931 
4932 	/* An SMI must not be injected into L2 if it's supposed to VM-Exit.  */
4933 	if (for_injection && is_guest_mode(vcpu) && nested_exit_on_smi(svm))
4934 		return -EBUSY;
4935 
4936 	return 1;
4937 }
4938 
svm_enter_smm(struct kvm_vcpu * vcpu,union kvm_smram * smram)4939 static int svm_enter_smm(struct kvm_vcpu *vcpu, union kvm_smram *smram)
4940 {
4941 	struct vcpu_svm *svm = to_svm(vcpu);
4942 	struct kvm_host_map map_save;
4943 
4944 	if (!is_guest_mode(vcpu))
4945 		return 0;
4946 
4947 	/*
4948 	 * 32-bit SMRAM format doesn't preserve EFER and SVM state.  Userspace is
4949 	 * responsible for ensuring nested SVM and SMIs are mutually exclusive.
4950 	 */
4951 
4952 	if (!guest_cpu_cap_has(vcpu, X86_FEATURE_LM))
4953 		return 1;
4954 
4955 	smram->smram64.svm_guest_flag = 1;
4956 	smram->smram64.svm_guest_vmcb_gpa = svm->nested.vmcb12_gpa;
4957 
4958 	svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
4959 	svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
4960 	svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
4961 
4962 	nested_svm_simple_vmexit(svm, SVM_EXIT_SW);
4963 
4964 	/*
4965 	 * KVM uses VMCB01 to store L1 host state while L2 runs but
4966 	 * VMCB01 is going to be used during SMM and thus the state will
4967 	 * be lost. Temporary save non-VMLOAD/VMSAVE state to the host save
4968 	 * area pointed to by MSR_VM_HSAVE_PA. APM guarantees that the
4969 	 * format of the area is identical to guest save area offsetted
4970 	 * by 0x400 (matches the offset of 'struct vmcb_save_area'
4971 	 * within 'struct vmcb'). Note: HSAVE area may also be used by
4972 	 * L1 hypervisor to save additional host context (e.g. KVM does
4973 	 * that, see svm_prepare_switch_to_guest()) which must be
4974 	 * preserved.
4975 	 */
4976 	if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr), &map_save))
4977 		return 1;
4978 
4979 	BUILD_BUG_ON(offsetof(struct vmcb, save) != 0x400);
4980 
4981 	svm_copy_vmrun_state(map_save.hva + 0x400,
4982 			     &svm->vmcb01.ptr->save);
4983 
4984 	kvm_vcpu_unmap(vcpu, &map_save);
4985 	return 0;
4986 }
4987 
svm_leave_smm(struct kvm_vcpu * vcpu,const union kvm_smram * smram)4988 static int svm_leave_smm(struct kvm_vcpu *vcpu, const union kvm_smram *smram)
4989 {
4990 	struct vcpu_svm *svm = to_svm(vcpu);
4991 	struct kvm_host_map map, map_save;
4992 	struct vmcb *vmcb12;
4993 	int ret;
4994 
4995 	const struct kvm_smram_state_64 *smram64 = &smram->smram64;
4996 
4997 	if (!guest_cpu_cap_has(vcpu, X86_FEATURE_LM))
4998 		return 0;
4999 
5000 	/* Non-zero if SMI arrived while vCPU was in guest mode. */
5001 	if (!smram64->svm_guest_flag)
5002 		return 0;
5003 
5004 	if (!guest_cpu_cap_has(vcpu, X86_FEATURE_SVM))
5005 		return 1;
5006 
5007 	if (!(smram64->efer & EFER_SVME))
5008 		return 1;
5009 
5010 	if (kvm_vcpu_map(vcpu, gpa_to_gfn(smram64->svm_guest_vmcb_gpa), &map))
5011 		return 1;
5012 
5013 	ret = 1;
5014 	if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr), &map_save))
5015 		goto unmap_map;
5016 
5017 	if (svm_allocate_nested(svm))
5018 		goto unmap_save;
5019 
5020 	/*
5021 	 * Restore L1 host state from L1 HSAVE area as VMCB01 was
5022 	 * used during SMM (see svm_enter_smm())
5023 	 */
5024 
5025 	svm_copy_vmrun_state(&svm->vmcb01.ptr->save, map_save.hva + 0x400);
5026 
5027 	/*
5028 	 * Enter the nested guest now
5029 	 */
5030 
5031 	vmcb_mark_all_dirty(svm->vmcb01.ptr);
5032 
5033 	vmcb12 = map.hva;
5034 	nested_copy_vmcb_control_to_cache(svm, &vmcb12->control);
5035 	nested_copy_vmcb_save_to_cache(svm, &vmcb12->save);
5036 
5037 	if (nested_svm_check_cached_vmcb12(vcpu) < 0)
5038 		goto unmap_save;
5039 
5040 	if (enter_svm_guest_mode(vcpu, smram64->svm_guest_vmcb_gpa, false) != 0)
5041 		goto unmap_save;
5042 
5043 	ret = 0;
5044 	vcpu->arch.nested_run_pending = KVM_NESTED_RUN_PENDING;
5045 
5046 unmap_save:
5047 	kvm_vcpu_unmap(vcpu, &map_save);
5048 unmap_map:
5049 	kvm_vcpu_unmap(vcpu, &map);
5050 	return ret;
5051 }
5052 
svm_enable_smi_window(struct kvm_vcpu * vcpu)5053 static void svm_enable_smi_window(struct kvm_vcpu *vcpu)
5054 {
5055 	struct vcpu_svm *svm = to_svm(vcpu);
5056 
5057 	if (!gif_set(svm)) {
5058 		if (vgif)
5059 			svm_set_intercept(svm, INTERCEPT_STGI);
5060 		/* STGI will cause a vm exit */
5061 	} else {
5062 		/* We must be in SMM; RSM will cause a vmexit anyway.  */
5063 	}
5064 }
5065 #endif
5066 
svm_check_emulate_instruction(struct kvm_vcpu * vcpu,int emul_type,void * insn,int insn_len)5067 static int svm_check_emulate_instruction(struct kvm_vcpu *vcpu, int emul_type,
5068 					 void *insn, int insn_len)
5069 {
5070 	struct vcpu_svm *svm = to_svm(vcpu);
5071 	bool smep, smap, is_user;
5072 	u64 error_code;
5073 
5074 	/* Check that emulation is possible during event vectoring */
5075 	if ((svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_TYPE_MASK) &&
5076 	    !kvm_can_emulate_event_vectoring(emul_type))
5077 		return X86EMUL_UNHANDLEABLE_VECTORING;
5078 
5079 	/* Emulation is always possible when KVM has access to all guest state. */
5080 	if (!is_sev_guest(vcpu))
5081 		return X86EMUL_CONTINUE;
5082 
5083 	/* #UD and #GP should never be intercepted for SEV guests. */
5084 	WARN_ON_ONCE(emul_type & (EMULTYPE_TRAP_UD |
5085 				  EMULTYPE_TRAP_UD_FORCED |
5086 				  EMULTYPE_VMWARE_GP));
5087 
5088 	/*
5089 	 * Emulation is impossible for SEV-ES guests as KVM doesn't have access
5090 	 * to guest register state.
5091 	 */
5092 	if (is_sev_es_guest(vcpu))
5093 		return X86EMUL_RETRY_INSTR;
5094 
5095 	/*
5096 	 * Emulation is possible if the instruction is already decoded, e.g.
5097 	 * when completing I/O after returning from userspace.
5098 	 */
5099 	if (emul_type & EMULTYPE_NO_DECODE)
5100 		return X86EMUL_CONTINUE;
5101 
5102 	/*
5103 	 * Emulation is possible for SEV guests if and only if a prefilled
5104 	 * buffer containing the bytes of the intercepted instruction is
5105 	 * available. SEV guest memory is encrypted with a guest specific key
5106 	 * and cannot be decrypted by KVM, i.e. KVM would read ciphertext and
5107 	 * decode garbage.
5108 	 *
5109 	 * If KVM is NOT trying to simply skip an instruction, inject #UD if
5110 	 * KVM reached this point without an instruction buffer.  In practice,
5111 	 * this path should never be hit by a well-behaved guest, e.g. KVM
5112 	 * doesn't intercept #UD or #GP for SEV guests, but this path is still
5113 	 * theoretically reachable, e.g. via unaccelerated fault-like AVIC
5114 	 * access, and needs to be handled by KVM to avoid putting the guest
5115 	 * into an infinite loop.   Injecting #UD is somewhat arbitrary, but
5116 	 * its the least awful option given lack of insight into the guest.
5117 	 *
5118 	 * If KVM is trying to skip an instruction, simply resume the guest.
5119 	 * If a #NPF occurs while the guest is vectoring an INT3/INTO, then KVM
5120 	 * will attempt to re-inject the INT3/INTO and skip the instruction.
5121 	 * In that scenario, retrying the INT3/INTO and hoping the guest will
5122 	 * make forward progress is the only option that has a chance of
5123 	 * success (and in practice it will work the vast majority of the time).
5124 	 */
5125 	if (unlikely(!insn)) {
5126 		if (emul_type & EMULTYPE_SKIP)
5127 			return X86EMUL_UNHANDLEABLE;
5128 
5129 		kvm_queue_exception(vcpu, UD_VECTOR);
5130 		return X86EMUL_PROPAGATE_FAULT;
5131 	}
5132 
5133 	/*
5134 	 * Emulate for SEV guests if the insn buffer is not empty.  The buffer
5135 	 * will be empty if the DecodeAssist microcode cannot fetch bytes for
5136 	 * the faulting instruction because the code fetch itself faulted, e.g.
5137 	 * the guest attempted to fetch from emulated MMIO or a guest page
5138 	 * table used to translate CS:RIP resides in emulated MMIO.
5139 	 */
5140 	if (likely(insn_len))
5141 		return X86EMUL_CONTINUE;
5142 
5143 	/*
5144 	 * Detect and workaround Errata 1096 Fam_17h_00_0Fh.
5145 	 *
5146 	 * Errata:
5147 	 * When CPU raises #NPF on guest data access and vCPU CR4.SMAP=1, it is
5148 	 * possible that CPU microcode implementing DecodeAssist will fail to
5149 	 * read guest memory at CS:RIP and vmcb.GuestIntrBytes will incorrectly
5150 	 * be '0'.  This happens because microcode reads CS:RIP using a _data_
5151 	 * loap uop with CPL=0 privileges.  If the load hits a SMAP #PF, ucode
5152 	 * gives up and does not fill the instruction bytes buffer.
5153 	 *
5154 	 * As above, KVM reaches this point iff the VM is an SEV guest, the CPU
5155 	 * supports DecodeAssist, a #NPF was raised, KVM's page fault handler
5156 	 * triggered emulation (e.g. for MMIO), and the CPU returned 0 in the
5157 	 * GuestIntrBytes field of the VMCB.
5158 	 *
5159 	 * This does _not_ mean that the erratum has been encountered, as the
5160 	 * DecodeAssist will also fail if the load for CS:RIP hits a legitimate
5161 	 * #PF, e.g. if the guest attempt to execute from emulated MMIO and
5162 	 * encountered a reserved/not-present #PF.
5163 	 *
5164 	 * To hit the erratum, the following conditions must be true:
5165 	 *    1. CR4.SMAP=1 (obviously).
5166 	 *    2. CR4.SMEP=0 || CPL=3.  If SMEP=1 and CPL<3, the erratum cannot
5167 	 *       have been hit as the guest would have encountered a SMEP
5168 	 *       violation #PF, not a #NPF.
5169 	 *    3. The #NPF is not due to a code fetch, in which case failure to
5170 	 *       retrieve the instruction bytes is legitimate (see abvoe).
5171 	 *
5172 	 * In addition, don't apply the erratum workaround if the #NPF occurred
5173 	 * while translating guest page tables (see below).
5174 	 */
5175 	error_code = svm->vmcb->control.exit_info_1;
5176 	if (error_code & (PFERR_GUEST_PAGE_MASK | PFERR_FETCH_MASK))
5177 		goto resume_guest;
5178 
5179 	smep = kvm_is_cr4_bit_set(vcpu, X86_CR4_SMEP);
5180 	smap = kvm_is_cr4_bit_set(vcpu, X86_CR4_SMAP);
5181 	is_user = svm_get_cpl(vcpu) == 3;
5182 	if (smap && (!smep || is_user)) {
5183 		pr_err_ratelimited("SEV Guest triggered AMD Erratum 1096\n");
5184 
5185 		/*
5186 		 * If the fault occurred in userspace, arbitrarily inject #GP
5187 		 * to avoid killing the guest and to hopefully avoid confusing
5188 		 * the guest kernel too much, e.g. injecting #PF would not be
5189 		 * coherent with respect to the guest's page tables.  Request
5190 		 * triple fault if the fault occurred in the kernel as there's
5191 		 * no fault that KVM can inject without confusing the guest.
5192 		 * In practice, the triple fault is moot as no sane SEV kernel
5193 		 * will execute from user memory while also running with SMAP=1.
5194 		 */
5195 		if (is_user)
5196 			kvm_inject_gp(vcpu, 0);
5197 		else
5198 			kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
5199 		return X86EMUL_PROPAGATE_FAULT;
5200 	}
5201 
5202 resume_guest:
5203 	/*
5204 	 * If the erratum was not hit, simply resume the guest and let it fault
5205 	 * again.  While awful, e.g. the vCPU may get stuck in an infinite loop
5206 	 * if the fault is at CPL=0, it's the lesser of all evils.  Exiting to
5207 	 * userspace will kill the guest, and letting the emulator read garbage
5208 	 * will yield random behavior and potentially corrupt the guest.
5209 	 *
5210 	 * Simply resuming the guest is technically not a violation of the SEV
5211 	 * architecture.  AMD's APM states that all code fetches and page table
5212 	 * accesses for SEV guest are encrypted, regardless of the C-Bit.  The
5213 	 * APM also states that encrypted accesses to MMIO are "ignored", but
5214 	 * doesn't explicitly define "ignored", i.e. doing nothing and letting
5215 	 * the guest spin is technically "ignoring" the access.
5216 	 */
5217 	return X86EMUL_RETRY_INSTR;
5218 }
5219 
svm_apic_init_signal_blocked(struct kvm_vcpu * vcpu)5220 static bool svm_apic_init_signal_blocked(struct kvm_vcpu *vcpu)
5221 {
5222 	struct vcpu_svm *svm = to_svm(vcpu);
5223 
5224 	return !gif_set(svm);
5225 }
5226 
svm_vcpu_deliver_sipi_vector(struct kvm_vcpu * vcpu,u8 vector)5227 static void svm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
5228 {
5229 	if (!is_sev_es_guest(vcpu))
5230 		return kvm_vcpu_deliver_sipi_vector(vcpu, vector);
5231 
5232 	sev_vcpu_deliver_sipi_vector(vcpu, vector);
5233 }
5234 
svm_vm_destroy(struct kvm * kvm)5235 static void svm_vm_destroy(struct kvm *kvm)
5236 {
5237 	avic_vm_destroy(kvm);
5238 	sev_vm_destroy(kvm);
5239 
5240 	svm_srso_vm_destroy();
5241 }
5242 
svm_vm_init(struct kvm * kvm)5243 static int svm_vm_init(struct kvm *kvm)
5244 {
5245 	sev_vm_init(kvm);
5246 
5247 	if (!pause_filter_count || !pause_filter_thresh)
5248 		kvm_disable_exits(kvm, KVM_X86_DISABLE_EXITS_PAUSE);
5249 
5250 	if (enable_apicv) {
5251 		int ret = avic_vm_init(kvm);
5252 		if (ret)
5253 			return ret;
5254 	}
5255 
5256 	svm_srso_vm_init();
5257 	return 0;
5258 }
5259 
svm_alloc_apic_backing_page(struct kvm_vcpu * vcpu)5260 static void *svm_alloc_apic_backing_page(struct kvm_vcpu *vcpu)
5261 {
5262 	struct page *page = snp_safe_alloc_page();
5263 
5264 	if (!page)
5265 		return NULL;
5266 
5267 	return page_address(page);
5268 }
5269 
5270 struct kvm_x86_ops svm_x86_ops __initdata = {
5271 	.name = KBUILD_MODNAME,
5272 
5273 	.check_processor_compatibility = svm_check_processor_compat,
5274 
5275 	.hardware_unsetup = svm_hardware_unsetup,
5276 	.enable_virtualization_cpu = svm_enable_virtualization_cpu,
5277 	.disable_virtualization_cpu = svm_disable_virtualization_cpu,
5278 	.emergency_disable_virtualization_cpu = svm_emergency_disable_virtualization_cpu,
5279 	.has_emulated_msr = svm_has_emulated_msr,
5280 
5281 	.vcpu_precreate = svm_vcpu_precreate,
5282 	.vcpu_create = svm_vcpu_create,
5283 	.vcpu_free = svm_vcpu_free,
5284 	.vcpu_reset = svm_vcpu_reset,
5285 
5286 	.vm_size = sizeof(struct kvm_svm),
5287 	.vm_init = svm_vm_init,
5288 	.vm_destroy = svm_vm_destroy,
5289 
5290 	.prepare_switch_to_guest = svm_prepare_switch_to_guest,
5291 	.vcpu_load = svm_vcpu_load,
5292 	.vcpu_put = svm_vcpu_put,
5293 	.vcpu_blocking = avic_vcpu_blocking,
5294 	.vcpu_unblocking = avic_vcpu_unblocking,
5295 
5296 	.update_exception_bitmap = svm_update_exception_bitmap,
5297 	.get_feature_msr = svm_get_feature_msr,
5298 	.get_msr = svm_get_msr,
5299 	.set_msr = svm_set_msr,
5300 	.get_segment_base = svm_get_segment_base,
5301 	.get_segment = svm_get_segment,
5302 	.set_segment = svm_set_segment,
5303 	.get_cpl = svm_get_cpl,
5304 	.get_cpl_no_cache = svm_get_cpl,
5305 	.get_cs_db_l_bits = svm_get_cs_db_l_bits,
5306 	.is_valid_cr0 = svm_is_valid_cr0,
5307 	.set_cr0 = svm_set_cr0,
5308 	.post_set_cr3 = sev_post_set_cr3,
5309 	.is_valid_cr4 = svm_is_valid_cr4,
5310 	.set_cr4 = svm_set_cr4,
5311 	.set_efer = svm_set_efer,
5312 	.get_idt = svm_get_idt,
5313 	.set_idt = svm_set_idt,
5314 	.get_gdt = svm_get_gdt,
5315 	.set_gdt = svm_set_gdt,
5316 	.set_dr7 = svm_set_dr7,
5317 	.sync_dirty_debug_regs = svm_sync_dirty_debug_regs,
5318 	.cache_reg = svm_cache_reg,
5319 	.get_rflags = svm_get_rflags,
5320 	.set_rflags = svm_set_rflags,
5321 	.get_if_flag = svm_get_if_flag,
5322 
5323 	.flush_tlb_all = svm_flush_tlb_all,
5324 	.flush_tlb_current = svm_flush_tlb_current,
5325 	.flush_tlb_gva = svm_flush_tlb_gva,
5326 	.flush_tlb_guest = svm_flush_tlb_guest,
5327 
5328 	.vcpu_pre_run = svm_vcpu_pre_run,
5329 	.vcpu_run = svm_vcpu_run,
5330 	.handle_exit = svm_handle_exit,
5331 	.skip_emulated_instruction = svm_skip_emulated_instruction,
5332 	.update_emulated_instruction = NULL,
5333 	.set_interrupt_shadow = svm_set_interrupt_shadow,
5334 	.get_interrupt_shadow = svm_get_interrupt_shadow,
5335 	.patch_hypercall = svm_patch_hypercall,
5336 	.inject_irq = svm_inject_irq,
5337 	.inject_nmi = svm_inject_nmi,
5338 	.is_vnmi_pending = svm_is_vnmi_pending,
5339 	.set_vnmi_pending = svm_set_vnmi_pending,
5340 	.inject_exception = svm_inject_exception,
5341 	.cancel_injection = svm_cancel_injection,
5342 	.interrupt_allowed = svm_interrupt_allowed,
5343 	.nmi_allowed = svm_nmi_allowed,
5344 	.get_nmi_mask = svm_get_nmi_mask,
5345 	.set_nmi_mask = svm_set_nmi_mask,
5346 	.enable_nmi_window = svm_enable_nmi_window,
5347 	.enable_irq_window = svm_enable_irq_window,
5348 	.update_cr8_intercept = svm_update_cr8_intercept,
5349 
5350 	.x2apic_icr_is_split = true,
5351 	.set_virtual_apic_mode = avic_refresh_virtual_apic_mode,
5352 	.refresh_apicv_exec_ctrl = avic_refresh_apicv_exec_ctrl,
5353 	.apicv_post_state_restore = avic_apicv_post_state_restore,
5354 	.required_apicv_inhibits = AVIC_REQUIRED_APICV_INHIBITS,
5355 
5356 	.get_exit_info = svm_get_exit_info,
5357 	.get_entry_info = svm_get_entry_info,
5358 
5359 	.vcpu_after_set_cpuid = svm_vcpu_after_set_cpuid,
5360 
5361 	.has_wbinvd_exit = svm_has_wbinvd_exit,
5362 
5363 	.get_l2_tsc_offset = svm_get_l2_tsc_offset,
5364 	.get_l2_tsc_multiplier = svm_get_l2_tsc_multiplier,
5365 	.write_tsc_offset = svm_write_tsc_offset,
5366 	.write_tsc_multiplier = svm_write_tsc_multiplier,
5367 
5368 	.load_mmu_pgd = svm_load_mmu_pgd,
5369 
5370 	.check_intercept = svm_check_intercept,
5371 	.handle_exit_irqoff = svm_handle_exit_irqoff,
5372 
5373 	.nested_ops = &svm_nested_ops,
5374 
5375 	.deliver_interrupt = svm_deliver_interrupt,
5376 	.pi_update_irte = avic_pi_update_irte,
5377 	.setup_mce = svm_setup_mce,
5378 
5379 #ifdef CONFIG_KVM_SMM
5380 	.smi_allowed = svm_smi_allowed,
5381 	.enter_smm = svm_enter_smm,
5382 	.leave_smm = svm_leave_smm,
5383 	.enable_smi_window = svm_enable_smi_window,
5384 #endif
5385 
5386 #ifdef CONFIG_KVM_AMD_SEV
5387 	.dev_get_attr = sev_dev_get_attr,
5388 	.mem_enc_ioctl = sev_mem_enc_ioctl,
5389 	.mem_enc_register_region = sev_mem_enc_register_region,
5390 	.mem_enc_unregister_region = sev_mem_enc_unregister_region,
5391 	.guest_memory_reclaimed = sev_guest_memory_reclaimed,
5392 
5393 	.vm_copy_enc_context_from = sev_vm_copy_enc_context_from,
5394 	.vm_move_enc_context_from = sev_vm_move_enc_context_from,
5395 #endif
5396 	.check_emulate_instruction = svm_check_emulate_instruction,
5397 
5398 	.apic_init_signal_blocked = svm_apic_init_signal_blocked,
5399 
5400 	.recalc_intercepts = svm_recalc_intercepts,
5401 	.complete_emulated_msr = svm_complete_emulated_msr,
5402 
5403 	.vcpu_deliver_sipi_vector = svm_vcpu_deliver_sipi_vector,
5404 	.vcpu_get_apicv_inhibit_reasons = avic_vcpu_get_apicv_inhibit_reasons,
5405 	.alloc_apic_backing_page = svm_alloc_apic_backing_page,
5406 
5407 	.gmem_prepare = sev_gmem_prepare,
5408 	.gmem_invalidate = sev_gmem_invalidate,
5409 	.gmem_max_mapping_level = sev_gmem_max_mapping_level,
5410 };
5411 
5412 /*
5413  * The default MMIO mask is a single bit (excluding the present bit),
5414  * which could conflict with the memory encryption bit. Check for
5415  * memory encryption support and override the default MMIO mask if
5416  * memory encryption is enabled.
5417  */
svm_adjust_mmio_mask(void)5418 static __init void svm_adjust_mmio_mask(void)
5419 {
5420 	unsigned int enc_bit, mask_bit;
5421 	u64 msr, mask;
5422 
5423 	/* If there is no memory encryption support, use existing mask */
5424 	if (cpuid_eax(0x80000000) < 0x8000001f)
5425 		return;
5426 
5427 	/* If memory encryption is not enabled, use existing mask */
5428 	rdmsrq(MSR_AMD64_SYSCFG, msr);
5429 	if (!(msr & MSR_AMD64_SYSCFG_MEM_ENCRYPT))
5430 		return;
5431 
5432 	enc_bit = cpuid_ebx(0x8000001f) & 0x3f;
5433 	mask_bit = boot_cpu_data.x86_phys_bits;
5434 
5435 	/* Increment the mask bit if it is the same as the encryption bit */
5436 	if (enc_bit == mask_bit)
5437 		mask_bit++;
5438 
5439 	/*
5440 	 * If the mask bit location is below 52, then some bits above the
5441 	 * physical addressing limit will always be reserved, so use the
5442 	 * rsvd_bits() function to generate the mask. This mask, along with
5443 	 * the present bit, will be used to generate a page fault with
5444 	 * PFER.RSV = 1.
5445 	 *
5446 	 * If the mask bit location is 52 (or above), then clear the mask.
5447 	 */
5448 	mask = (mask_bit < 52) ? rsvd_bits(mask_bit, 51) | PT_PRESENT_MASK : 0;
5449 
5450 	kvm_mmu_set_mmio_spte_mask(mask, mask, PT_WRITABLE_MASK | PT_USER_MASK);
5451 }
5452 
svm_set_cpu_caps(void)5453 static __init void svm_set_cpu_caps(void)
5454 {
5455 	kvm_initialize_cpu_caps();
5456 
5457 	kvm_caps.supported_perf_cap = 0;
5458 
5459 	kvm_cpu_cap_clear(X86_FEATURE_IBT);
5460 
5461 	/* CPUID 0x80000001 and 0x8000000A (SVM features) */
5462 	if (nested) {
5463 		kvm_cpu_cap_set(X86_FEATURE_SVM);
5464 		kvm_cpu_cap_set(X86_FEATURE_VMCBCLEAN);
5465 
5466 		/*
5467 		 * KVM currently flushes TLBs on *every* nested SVM transition,
5468 		 * and so for all intents and purposes KVM supports flushing by
5469 		 * ASID, i.e. KVM is guaranteed to honor every L1 ASID flush.
5470 		 */
5471 		kvm_cpu_cap_set(X86_FEATURE_FLUSHBYASID);
5472 
5473 		if (nrips)
5474 			kvm_cpu_cap_set(X86_FEATURE_NRIPS);
5475 
5476 		if (npt_enabled)
5477 			kvm_cpu_cap_set(X86_FEATURE_NPT);
5478 
5479 		if (tsc_scaling)
5480 			kvm_cpu_cap_set(X86_FEATURE_TSCRATEMSR);
5481 
5482 		if (vls)
5483 			kvm_cpu_cap_set(X86_FEATURE_V_VMSAVE_VMLOAD);
5484 		if (lbrv)
5485 			kvm_cpu_cap_set(X86_FEATURE_LBRV);
5486 
5487 		if (boot_cpu_has(X86_FEATURE_PAUSEFILTER))
5488 			kvm_cpu_cap_set(X86_FEATURE_PAUSEFILTER);
5489 
5490 		if (boot_cpu_has(X86_FEATURE_PFTHRESHOLD))
5491 			kvm_cpu_cap_set(X86_FEATURE_PFTHRESHOLD);
5492 
5493 		if (vgif)
5494 			kvm_cpu_cap_set(X86_FEATURE_VGIF);
5495 
5496 		if (vnmi)
5497 			kvm_cpu_cap_set(X86_FEATURE_VNMI);
5498 
5499 		/* Nested VM can receive #VMEXIT instead of triggering #GP */
5500 		kvm_cpu_cap_set(X86_FEATURE_SVME_ADDR_CHK);
5501 	}
5502 
5503 	if (cpu_feature_enabled(X86_FEATURE_BUS_LOCK_THRESHOLD))
5504 		kvm_caps.has_bus_lock_exit = true;
5505 
5506 	/* CPUID 0x80000008 */
5507 	if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD) ||
5508 	    boot_cpu_has(X86_FEATURE_AMD_SSBD))
5509 		kvm_cpu_cap_set(X86_FEATURE_VIRT_SSBD);
5510 
5511 	if (enable_pmu) {
5512 		/*
5513 		 * Enumerate support for PERFCTR_CORE if and only if KVM has
5514 		 * access to enough counters to virtualize "core" support,
5515 		 * otherwise limit vPMU support to the legacy number of counters.
5516 		 */
5517 		if (kvm_pmu_cap.num_counters_gp < AMD64_NUM_COUNTERS_CORE)
5518 			kvm_pmu_cap.num_counters_gp = min(AMD64_NUM_COUNTERS,
5519 							  kvm_pmu_cap.num_counters_gp);
5520 		else
5521 			kvm_cpu_cap_check_and_set(X86_FEATURE_PERFCTR_CORE);
5522 
5523 		if (kvm_pmu_cap.version != 2 ||
5524 		    !kvm_cpu_cap_has(X86_FEATURE_PERFCTR_CORE))
5525 			kvm_cpu_cap_clear(X86_FEATURE_PERFMON_V2);
5526 	}
5527 
5528 	/* CPUID 0x8000001F (SME/SEV features) */
5529 	sev_set_cpu_caps();
5530 
5531 	/*
5532 	 * Clear capabilities that are automatically configured by common code,
5533 	 * but that require explicit SVM support (that isn't yet implemented).
5534 	 */
5535 	kvm_cpu_cap_clear(X86_FEATURE_BUS_LOCK_DETECT);
5536 	kvm_cpu_cap_clear(X86_FEATURE_MSR_IMM);
5537 
5538 	kvm_setup_xss_caps();
5539 	kvm_finalize_cpu_caps();
5540 }
5541 
svm_hardware_setup(void)5542 static __init int svm_hardware_setup(void)
5543 {
5544 	void *iopm_va;
5545 	int cpu, r;
5546 
5547 	/*
5548 	 * NX is required for shadow paging and for NPT if the NX huge pages
5549 	 * mitigation is enabled.
5550 	 */
5551 	if (!boot_cpu_has(X86_FEATURE_NX)) {
5552 		pr_err_ratelimited("NX (Execute Disable) not supported\n");
5553 		return -EOPNOTSUPP;
5554 	}
5555 
5556 	kvm_caps.supported_xcr0 &= ~(XFEATURE_MASK_BNDREGS |
5557 				     XFEATURE_MASK_BNDCSR);
5558 
5559 	if (tsc_scaling) {
5560 		if (!boot_cpu_has(X86_FEATURE_TSCRATEMSR)) {
5561 			tsc_scaling = false;
5562 		} else {
5563 			pr_info("TSC scaling supported\n");
5564 			kvm_caps.has_tsc_control = true;
5565 		}
5566 	}
5567 	kvm_caps.max_tsc_scaling_ratio = SVM_TSC_RATIO_MAX;
5568 	kvm_caps.tsc_scaling_ratio_frac_bits = 32;
5569 
5570 	tsc_aux_uret_slot = kvm_add_user_return_msr(MSR_TSC_AUX);
5571 
5572 	/* Check for pause filtering support */
5573 	if (!boot_cpu_has(X86_FEATURE_PAUSEFILTER)) {
5574 		pause_filter_count = 0;
5575 		pause_filter_thresh = 0;
5576 	} else if (!boot_cpu_has(X86_FEATURE_PFTHRESHOLD)) {
5577 		pause_filter_thresh = 0;
5578 	}
5579 
5580 	if (nested) {
5581 		pr_info("Nested Virtualization enabled\n");
5582 		kvm_enable_efer_bits(EFER_SVME);
5583 		if (!boot_cpu_has(X86_FEATURE_EFER_LMSLE_MBZ))
5584 			kvm_enable_efer_bits(EFER_LMSLE);
5585 
5586 		r = nested_svm_init_msrpm_merge_offsets();
5587 		if (r)
5588 			return r;
5589 	}
5590 
5591 	/*
5592 	 * KVM's MMU doesn't support using 2-level paging for itself, and thus
5593 	 * NPT isn't supported if the host is using 2-level paging since host
5594 	 * CR4 is unchanged on VMRUN.
5595 	 */
5596 	if (!IS_ENABLED(CONFIG_X86_64) && !IS_ENABLED(CONFIG_X86_PAE))
5597 		npt_enabled = false;
5598 
5599 	if (!boot_cpu_has(X86_FEATURE_NPT))
5600 		npt_enabled = false;
5601 
5602 	/* Force VM NPT level equal to the host's paging level */
5603 	kvm_configure_mmu(npt_enabled, get_npt_level(),
5604 			  get_npt_level(), PG_LEVEL_1G);
5605 	pr_info("Nested Paging %s\n", str_enabled_disabled(npt_enabled));
5606 
5607 	/*
5608 	 * It seems that on AMD processors PTE's accessed bit is
5609 	 * being set by the CPU hardware before the NPF vmexit.
5610 	 * This is not expected behaviour and our tests fail because
5611 	 * of it.
5612 	 * A workaround here is to disable support for
5613 	 * GUEST_MAXPHYADDR < HOST_MAXPHYADDR if NPT is enabled.
5614 	 * In this case userspace can know if there is support using
5615 	 * KVM_CAP_SMALLER_MAXPHYADDR extension and decide how to handle
5616 	 * it
5617 	 * If future AMD CPU models change the behaviour described above,
5618 	 * this variable can be changed accordingly
5619 	 */
5620 	allow_smaller_maxphyaddr = !npt_enabled;
5621 
5622 	/* Setup shadow_me_value and shadow_me_mask */
5623 	kvm_mmu_set_me_spte_mask(sme_me_mask, sme_me_mask);
5624 
5625 	svm_adjust_mmio_mask();
5626 
5627 	nrips = nrips && boot_cpu_has(X86_FEATURE_NRIPS);
5628 
5629 	if (lbrv) {
5630 		if (!boot_cpu_has(X86_FEATURE_LBRV))
5631 			lbrv = false;
5632 		else
5633 			pr_info("LBR virtualization supported\n");
5634 	}
5635 
5636 	iopm_va = svm_alloc_permissions_map(IOPM_SIZE, GFP_KERNEL);
5637 	if (!iopm_va)
5638 		return -ENOMEM;
5639 
5640 	iopm_base = __sme_set(__pa(iopm_va));
5641 
5642 	/*
5643 	 * Note, SEV setup consumes npt_enabled and enable_mmio_caching (which
5644 	 * may be modified by svm_adjust_mmio_mask()), as well as nrips.
5645 	 */
5646 	sev_hardware_setup();
5647 
5648 	svm_hv_hardware_setup();
5649 
5650 	enable_apicv = avic_hardware_setup();
5651 	if (!enable_apicv) {
5652 		enable_ipiv = false;
5653 		svm_x86_ops.vcpu_blocking = NULL;
5654 		svm_x86_ops.vcpu_unblocking = NULL;
5655 		svm_x86_ops.vcpu_get_apicv_inhibit_reasons = NULL;
5656 	}
5657 
5658 	if (vls) {
5659 		if (!npt_enabled ||
5660 		    !boot_cpu_has(X86_FEATURE_V_VMSAVE_VMLOAD) ||
5661 		    !IS_ENABLED(CONFIG_X86_64)) {
5662 			vls = false;
5663 		} else {
5664 			pr_info("Virtual VMLOAD VMSAVE supported\n");
5665 		}
5666 	}
5667 
5668 	if (boot_cpu_has(X86_FEATURE_SVME_ADDR_CHK))
5669 		svm_gp_erratum_intercept = false;
5670 
5671 	if (vgif) {
5672 		if (!boot_cpu_has(X86_FEATURE_VGIF))
5673 			vgif = false;
5674 		else
5675 			pr_info("Virtual GIF supported\n");
5676 	}
5677 
5678 	vnmi = vgif && vnmi && boot_cpu_has(X86_FEATURE_VNMI);
5679 	if (vnmi)
5680 		pr_info("Virtual NMI enabled\n");
5681 
5682 	if (!vnmi) {
5683 		svm_x86_ops.is_vnmi_pending = NULL;
5684 		svm_x86_ops.set_vnmi_pending = NULL;
5685 	}
5686 
5687 	if (!enable_pmu)
5688 		pr_info("PMU virtualization is disabled\n");
5689 
5690 	svm_set_cpu_caps();
5691 
5692 	kvm_caps.inapplicable_quirks &= ~KVM_X86_QUIRK_CD_NW_CLEARED;
5693 
5694 	for_each_possible_cpu(cpu) {
5695 		r = svm_cpu_init(cpu);
5696 		if (r)
5697 			goto err;
5698 	}
5699 
5700 	return 0;
5701 
5702 err:
5703 	svm_hardware_unsetup();
5704 	return r;
5705 }
5706 
5707 
5708 static struct kvm_x86_init_ops svm_init_ops __initdata = {
5709 	.hardware_setup = svm_hardware_setup,
5710 
5711 	.runtime_ops = &svm_x86_ops,
5712 	.pmu_ops = &amd_pmu_ops,
5713 };
5714 
__svm_exit(void)5715 static void __svm_exit(void)
5716 {
5717 	kvm_x86_vendor_exit();
5718 }
5719 
svm_init(void)5720 static int __init svm_init(void)
5721 {
5722 	int r;
5723 
5724 	KVM_SANITY_CHECK_VM_STRUCT_SIZE(kvm_svm);
5725 
5726 	__unused_size_checks();
5727 
5728 	if (!kvm_is_svm_supported())
5729 		return -EOPNOTSUPP;
5730 
5731 	r = kvm_x86_vendor_init(&svm_init_ops);
5732 	if (r)
5733 		return r;
5734 
5735 	/*
5736 	 * Common KVM initialization _must_ come last, after this, /dev/kvm is
5737 	 * exposed to userspace!
5738 	 */
5739 	r = kvm_init(sizeof(struct vcpu_svm), __alignof__(struct vcpu_svm),
5740 		     THIS_MODULE);
5741 	if (r)
5742 		goto err_kvm_init;
5743 
5744 	return 0;
5745 
5746 err_kvm_init:
5747 	__svm_exit();
5748 	return r;
5749 }
5750 
svm_exit(void)5751 static void __exit svm_exit(void)
5752 {
5753 	kvm_exit();
5754 	__svm_exit();
5755 }
5756 
5757 module_init(svm_init)
5758 module_exit(svm_exit)
5759