1 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
2
3 #include <linux/kvm_host.h>
4
5 #include "irq.h"
6 #include "mmu.h"
7 #include "kvm_cache_regs.h"
8 #include "x86.h"
9 #include "smm.h"
10 #include "cpuid.h"
11 #include "pmu.h"
12
13 #include <linux/module.h>
14 #include <linux/mod_devicetable.h>
15 #include <linux/kernel.h>
16 #include <linux/vmalloc.h>
17 #include <linux/highmem.h>
18 #include <linux/amd-iommu.h>
19 #include <linux/sched.h>
20 #include <linux/trace_events.h>
21 #include <linux/slab.h>
22 #include <linux/hashtable.h>
23 #include <linux/objtool.h>
24 #include <linux/psp-sev.h>
25 #include <linux/file.h>
26 #include <linux/pagemap.h>
27 #include <linux/swap.h>
28 #include <linux/rwsem.h>
29 #include <linux/cc_platform.h>
30 #include <linux/smp.h>
31 #include <linux/string_choices.h>
32 #include <linux/mutex.h>
33
34 #include <asm/apic.h>
35 #include <asm/msr.h>
36 #include <asm/perf_event.h>
37 #include <asm/tlbflush.h>
38 #include <asm/desc.h>
39 #include <asm/debugreg.h>
40 #include <asm/kvm_para.h>
41 #include <asm/irq_remapping.h>
42 #include <asm/spec-ctrl.h>
43 #include <asm/cpu_device_id.h>
44 #include <asm/traps.h>
45 #include <asm/reboot.h>
46 #include <asm/fpu/api.h>
47 #include <asm/virt.h>
48
49 #include <trace/events/ipi.h>
50
51 #include "trace.h"
52
53 #include "svm.h"
54 #include "svm_ops.h"
55
56 #include "hyperv.h"
57 #include "kvm_onhyperv.h"
58 #include "svm_onhyperv.h"
59
60 MODULE_AUTHOR("Qumranet");
61 MODULE_DESCRIPTION("KVM support for SVM (AMD-V) extensions");
62 MODULE_LICENSE("GPL");
63
64 #ifdef MODULE
65 static const struct x86_cpu_id svm_cpu_id[] = {
66 X86_MATCH_FEATURE(X86_FEATURE_SVM, NULL),
67 {}
68 };
69 MODULE_DEVICE_TABLE(x86cpu, svm_cpu_id);
70 #endif
71
72 #define SEG_TYPE_LDT 2
73 #define SEG_TYPE_BUSY_TSS16 3
74
75 static bool erratum_383_found __read_mostly;
76
77 /*
78 * Set osvw_len to higher value when updated Revision Guides
79 * are published and we know what the new status bits are
80 */
81 static uint64_t osvw_len = 4, osvw_status;
82 static DEFINE_SPINLOCK(osvw_lock);
83
84 static DEFINE_PER_CPU(u64, current_tsc_ratio);
85
86 /*
87 * These 2 parameters are used to config the controls for Pause-Loop Exiting:
88 * pause_filter_count: On processors that support Pause filtering(indicated
89 * by CPUID Fn8000_000A_EDX), the VMCB provides a 16 bit pause filter
90 * count value. On VMRUN this value is loaded into an internal counter.
91 * Each time a pause instruction is executed, this counter is decremented
92 * until it reaches zero at which time a #VMEXIT is generated if pause
93 * intercept is enabled. Refer to AMD APM Vol 2 Section 15.14.4 Pause
94 * Intercept Filtering for more details.
95 * This also indicate if ple logic enabled.
96 *
97 * pause_filter_thresh: In addition, some processor families support advanced
98 * pause filtering (indicated by CPUID Fn8000_000A_EDX) upper bound on
99 * the amount of time a guest is allowed to execute in a pause loop.
100 * In this mode, a 16-bit pause filter threshold field is added in the
101 * VMCB. The threshold value is a cycle count that is used to reset the
102 * pause counter. As with simple pause filtering, VMRUN loads the pause
103 * count value from VMCB into an internal counter. Then, on each pause
104 * instruction the hardware checks the elapsed number of cycles since
105 * the most recent pause instruction against the pause filter threshold.
106 * If the elapsed cycle count is greater than the pause filter threshold,
107 * then the internal pause count is reloaded from the VMCB and execution
108 * continues. If the elapsed cycle count is less than the pause filter
109 * threshold, then the internal pause count is decremented. If the count
110 * value is less than zero and PAUSE intercept is enabled, a #VMEXIT is
111 * triggered. If advanced pause filtering is supported and pause filter
112 * threshold field is set to zero, the filter will operate in the simpler,
113 * count only mode.
114 */
115
116 static unsigned short __ro_after_init pause_filter_thresh = KVM_DEFAULT_PLE_GAP;
117 module_param(pause_filter_thresh, ushort, 0444);
118
119 static unsigned short __ro_after_init pause_filter_count = KVM_SVM_DEFAULT_PLE_WINDOW;
120 module_param(pause_filter_count, ushort, 0444);
121
122 /* Default doubles per-vcpu window every exit. */
123 static unsigned short __ro_after_init pause_filter_count_grow = KVM_DEFAULT_PLE_WINDOW_GROW;
124 module_param(pause_filter_count_grow, ushort, 0444);
125
126 /* Default resets per-vcpu window every exit to pause_filter_count. */
127 static unsigned short __ro_after_init pause_filter_count_shrink = KVM_DEFAULT_PLE_WINDOW_SHRINK;
128 module_param(pause_filter_count_shrink, ushort, 0444);
129
130 /* Default is to compute the maximum so we can never overflow. */
131 static unsigned short __ro_after_init pause_filter_count_max = KVM_SVM_DEFAULT_PLE_WINDOW_MAX;
132 module_param(pause_filter_count_max, ushort, 0444);
133
134 /*
135 * Use nested page tables by default. Note, NPT may get forced off by
136 * svm_hardware_setup() if it's unsupported by hardware or the host kernel.
137 */
138 bool __ro_after_init npt_enabled = true;
139 module_param_named(npt, npt_enabled, bool, 0444);
140
141 /* allow nested virtualization in KVM/SVM */
142 static int __ro_after_init nested = true;
143 module_param(nested, int, 0444);
144
145 /* enable/disable Next RIP Save */
146 int __ro_after_init nrips = true;
147 module_param(nrips, int, 0444);
148
149 /* enable/disable Virtual VMLOAD VMSAVE */
150 static int __ro_after_init vls = true;
151 module_param(vls, int, 0444);
152
153 /* enable/disable Virtual GIF */
154 int __ro_after_init vgif = true;
155 module_param(vgif, int, 0444);
156
157 /* enable/disable LBR virtualization */
158 int __ro_after_init lbrv = true;
159 module_param(lbrv, int, 0444);
160
161 static int __ro_after_init tsc_scaling = true;
162 module_param(tsc_scaling, int, 0444);
163
164 module_param(enable_device_posted_irqs, bool, 0444);
165
166 bool __read_mostly dump_invalid_vmcb;
167 module_param(dump_invalid_vmcb, bool, 0644);
168
169
170 bool __ro_after_init intercept_smi = true;
171 module_param(intercept_smi, bool, 0444);
172
173 bool __ro_after_init vnmi = true;
174 module_param(vnmi, bool, 0444);
175
176 module_param(enable_mediated_pmu, bool, 0444);
177
178 static bool __ro_after_init svm_gp_erratum_intercept = true;
179
180 static u8 rsm_ins_bytes[] = "\x0f\xaa";
181
182 static unsigned long __read_mostly iopm_base;
183
184 DEFINE_PER_CPU(struct svm_cpu_data, svm_data);
185
186 static DEFINE_MUTEX(vmcb_dump_mutex);
187
188 /*
189 * Only MSR_TSC_AUX is switched via the user return hook. EFER is switched via
190 * the VMCB, and the SYSCALL/SYSENTER MSRs are handled by VMLOAD/VMSAVE.
191 *
192 * RDTSCP and RDPID are not used in the kernel, specifically to allow KVM to
193 * defer the restoration of TSC_AUX until the CPU returns to userspace.
194 */
195 int tsc_aux_uret_slot __ro_after_init = -1;
196
get_npt_level(void)197 static int get_npt_level(void)
198 {
199 #ifdef CONFIG_X86_64
200 return pgtable_l5_enabled() ? PT64_ROOT_5LEVEL : PT64_ROOT_4LEVEL;
201 #else
202 return PT32E_ROOT_LEVEL;
203 #endif
204 }
205
svm_set_efer(struct kvm_vcpu * vcpu,u64 efer)206 int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
207 {
208 struct vcpu_svm *svm = to_svm(vcpu);
209 u64 old_efer = vcpu->arch.efer;
210 vcpu->arch.efer = efer;
211
212 if (!npt_enabled) {
213 /* Shadow paging assumes NX to be available. */
214 efer |= EFER_NX;
215
216 if (!(efer & EFER_LMA))
217 efer &= ~EFER_LME;
218 }
219
220 if ((old_efer & EFER_SVME) != (efer & EFER_SVME)) {
221 if (!(efer & EFER_SVME)) {
222 /*
223 * Architecturally, clearing EFER.SVME while a guest is
224 * running yields undefined behavior, i.e. KVM can do
225 * literally anything. Force the vCPU back into L1 as
226 * that is the safest option for KVM, but synthesize a
227 * triple fault (for L1!) so that KVM at least doesn't
228 * run random L2 code in the context of L1. Do so if
229 * and only if the vCPU is actively running, e.g. to
230 * avoid positives if userspace is stuffing state.
231 */
232 if (is_guest_mode(vcpu) && vcpu->wants_to_run)
233 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
234
235 svm_leave_nested(vcpu);
236 /* #GP intercept is still needed for vmware backdoor */
237 if (!enable_vmware_backdoor)
238 clr_exception_intercept(svm, GP_VECTOR);
239
240 /*
241 * Free the nested guest state, unless we are in SMM.
242 * In this case we will return to the nested guest
243 * as soon as we leave SMM.
244 */
245 if (!is_smm(vcpu))
246 svm_free_nested(svm);
247
248 } else {
249 int ret = svm_allocate_nested(svm);
250
251 if (ret) {
252 vcpu->arch.efer = old_efer;
253 return ret;
254 }
255
256 /*
257 * Never intercept #GP for SEV guests, KVM can't
258 * decrypt guest memory to workaround the erratum.
259 */
260 if (svm_gp_erratum_intercept && !is_sev_guest(vcpu))
261 set_exception_intercept(svm, GP_VECTOR);
262 }
263
264 kvm_make_request(KVM_REQ_RECALC_INTERCEPTS, vcpu);
265 }
266
267 svm->vmcb->save.efer = efer | EFER_SVME;
268 vmcb_mark_dirty(svm->vmcb, VMCB_CR);
269 return 0;
270 }
271
svm_get_interrupt_shadow(struct kvm_vcpu * vcpu)272 static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu)
273 {
274 struct vcpu_svm *svm = to_svm(vcpu);
275 u32 ret = 0;
276
277 if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK)
278 ret = KVM_X86_SHADOW_INT_STI | KVM_X86_SHADOW_INT_MOV_SS;
279 return ret;
280 }
281
svm_set_interrupt_shadow(struct kvm_vcpu * vcpu,int mask)282 static void svm_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
283 {
284 struct vcpu_svm *svm = to_svm(vcpu);
285
286 if (mask == 0)
287 svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
288 else
289 svm->vmcb->control.int_state |= SVM_INTERRUPT_SHADOW_MASK;
290
291 }
292
__svm_skip_emulated_instruction(struct kvm_vcpu * vcpu,int emul_type,bool commit_side_effects)293 static int __svm_skip_emulated_instruction(struct kvm_vcpu *vcpu,
294 int emul_type,
295 bool commit_side_effects)
296 {
297 struct vcpu_svm *svm = to_svm(vcpu);
298 unsigned long old_rflags;
299
300 /*
301 * SEV-ES does not expose the next RIP. The RIP update is controlled by
302 * the type of exit and the #VC handler in the guest.
303 */
304 if (is_sev_es_guest(vcpu))
305 goto done;
306
307 if (nrips && svm->vmcb->control.next_rip != 0) {
308 WARN_ON_ONCE(!static_cpu_has(X86_FEATURE_NRIPS));
309 svm->next_rip = svm->vmcb->control.next_rip;
310 }
311
312 if (!svm->next_rip) {
313 if (unlikely(!commit_side_effects))
314 old_rflags = svm->vmcb->save.rflags;
315
316 if (!kvm_emulate_instruction(vcpu, emul_type))
317 return 0;
318
319 if (unlikely(!commit_side_effects))
320 svm->vmcb->save.rflags = old_rflags;
321 } else {
322 kvm_rip_write(vcpu, svm->next_rip);
323 }
324
325 done:
326 if (likely(commit_side_effects))
327 svm_set_interrupt_shadow(vcpu, 0);
328
329 return 1;
330 }
331
svm_skip_emulated_instruction(struct kvm_vcpu * vcpu)332 static int svm_skip_emulated_instruction(struct kvm_vcpu *vcpu)
333 {
334 return __svm_skip_emulated_instruction(vcpu, EMULTYPE_SKIP, true);
335 }
336
svm_update_soft_interrupt_rip(struct kvm_vcpu * vcpu,u8 vector)337 static int svm_update_soft_interrupt_rip(struct kvm_vcpu *vcpu, u8 vector)
338 {
339 const int emul_type = EMULTYPE_SKIP | EMULTYPE_SKIP_SOFT_INT |
340 EMULTYPE_SET_SOFT_INT_VECTOR(vector);
341 unsigned long rip, old_rip = kvm_rip_read(vcpu);
342 struct vcpu_svm *svm = to_svm(vcpu);
343
344 /*
345 * Due to architectural shortcomings, the CPU doesn't always provide
346 * NextRIP, e.g. if KVM intercepted an exception that occurred while
347 * the CPU was vectoring an INTO/INT3 in the guest. Temporarily skip
348 * the instruction even if NextRIP is supported to acquire the next
349 * RIP so that it can be shoved into the NextRIP field, otherwise
350 * hardware will fail to advance guest RIP during event injection.
351 * Drop the exception/interrupt if emulation fails and effectively
352 * retry the instruction, it's the least awful option. If NRIPS is
353 * in use, the skip must not commit any side effects such as clearing
354 * the interrupt shadow or RFLAGS.RF.
355 */
356 if (!__svm_skip_emulated_instruction(vcpu, emul_type, !nrips))
357 return -EIO;
358
359 rip = kvm_rip_read(vcpu);
360
361 /*
362 * Save the injection information, even when using next_rip, as the
363 * VMCB's next_rip will be lost (cleared on VM-Exit) if the injection
364 * doesn't complete due to a VM-Exit occurring while the CPU is
365 * vectoring the event. Decoding the instruction isn't guaranteed to
366 * work as there may be no backing instruction, e.g. if the event is
367 * being injected by L1 for L2, or if the guest is patching INT3 into
368 * a different instruction.
369 */
370 svm->soft_int_injected = true;
371 svm->soft_int_csbase = svm->vmcb->save.cs.base;
372 svm->soft_int_old_rip = old_rip;
373 svm->soft_int_next_rip = rip;
374
375 if (nrips)
376 kvm_rip_write(vcpu, old_rip);
377
378 if (static_cpu_has(X86_FEATURE_NRIPS))
379 svm->vmcb->control.next_rip = rip;
380
381 return 0;
382 }
383
svm_inject_exception(struct kvm_vcpu * vcpu)384 static void svm_inject_exception(struct kvm_vcpu *vcpu)
385 {
386 struct kvm_queued_exception *ex = &vcpu->arch.exception;
387 struct vcpu_svm *svm = to_svm(vcpu);
388
389 kvm_deliver_exception_payload(vcpu, ex);
390
391 if (kvm_exception_is_soft(ex->vector) &&
392 svm_update_soft_interrupt_rip(vcpu, ex->vector))
393 return;
394
395 svm->vmcb->control.event_inj = ex->vector
396 | SVM_EVTINJ_VALID
397 | (ex->has_error_code ? SVM_EVTINJ_VALID_ERR : 0)
398 | SVM_EVTINJ_TYPE_EXEPT;
399 svm->vmcb->control.event_inj_err = ex->error_code;
400 }
401
svm_init_erratum_383(void)402 static void svm_init_erratum_383(void)
403 {
404 u64 val;
405
406 if (!static_cpu_has_bug(X86_BUG_AMD_TLB_MMATCH))
407 return;
408
409 /* Use _safe variants to not break nested virtualization */
410 if (native_read_msr_safe(MSR_AMD64_DC_CFG, &val))
411 return;
412
413 val |= (1ULL << 47);
414
415 native_write_msr_safe(MSR_AMD64_DC_CFG, val);
416
417 erratum_383_found = true;
418 }
419
svm_init_osvw(struct kvm_vcpu * vcpu)420 static void svm_init_osvw(struct kvm_vcpu *vcpu)
421 {
422 /*
423 * Guests should see errata 400 and 415 as fixed (assuming that
424 * HLT and IO instructions are intercepted).
425 */
426 vcpu->arch.osvw.length = (osvw_len >= 3) ? (osvw_len) : 3;
427 vcpu->arch.osvw.status = osvw_status & ~(6ULL);
428
429 /*
430 * By increasing VCPU's osvw.length to 3 we are telling the guest that
431 * all osvw.status bits inside that length, including bit 0 (which is
432 * reserved for erratum 298), are valid. However, if host processor's
433 * osvw_len is 0 then osvw_status[0] carries no information. We need to
434 * be conservative here and therefore we tell the guest that erratum 298
435 * is present (because we really don't know).
436 */
437 if (osvw_len == 0 && boot_cpu_data.x86 == 0x10)
438 vcpu->arch.osvw.status |= 1;
439 }
440
svm_init_os_visible_workarounds(void)441 static void svm_init_os_visible_workarounds(void)
442 {
443 u64 len, status;
444
445 /*
446 * Get OS-Visible Workarounds (OSVW) bits.
447 *
448 * Note that it is possible to have a system with mixed processor
449 * revisions and therefore different OSVW bits. If bits are not the same
450 * on different processors then choose the worst case (i.e. if erratum
451 * is present on one processor and not on another then assume that the
452 * erratum is present everywhere).
453 *
454 * Note #2! The OSVW MSRs are used to communciate that an erratum is
455 * NOT present! Software must assume erratum as present if its bit is
456 * set in OSVW_STATUS *or* the bit number exceeds OSVW_ID_LENGTH. If
457 * either RDMSR fails, simply zero out the length to treat all errata
458 * as being present. Similarly, use the *minimum* length across all
459 * CPUs, not the maximum length.
460 *
461 * If the length is zero, then is KVM already treating all errata as
462 * being present and there's nothing left to do.
463 */
464 if (!osvw_len)
465 return;
466
467 if (!this_cpu_has(X86_FEATURE_OSVW) ||
468 native_read_msr_safe(MSR_AMD64_OSVW_ID_LENGTH, &len) ||
469 native_read_msr_safe(MSR_AMD64_OSVW_STATUS, &status))
470 len = status = 0;
471
472 if (status == READ_ONCE(osvw_status) && len >= READ_ONCE(osvw_len))
473 return;
474
475 guard(spinlock)(&osvw_lock);
476
477 if (len < osvw_len)
478 osvw_len = len;
479 osvw_status |= status;
480 osvw_status &= (1ULL << osvw_len) - 1;
481 }
482
__kvm_is_svm_supported(void)483 static bool __kvm_is_svm_supported(void)
484 {
485 int cpu = smp_processor_id();
486 struct cpuinfo_x86 *c = &cpu_data(cpu);
487
488 if (c->x86_vendor != X86_VENDOR_AMD &&
489 c->x86_vendor != X86_VENDOR_HYGON) {
490 pr_err("CPU %d isn't AMD or Hygon\n", cpu);
491 return false;
492 }
493
494 if (!cpu_has(c, X86_FEATURE_SVM)) {
495 pr_err("SVM not supported by CPU %d\n", cpu);
496 return false;
497 }
498
499 if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) {
500 pr_info("KVM is unsupported when running as an SEV guest\n");
501 return false;
502 }
503
504 return true;
505 }
506
kvm_is_svm_supported(void)507 static bool kvm_is_svm_supported(void)
508 {
509 bool supported;
510
511 migrate_disable();
512 supported = __kvm_is_svm_supported();
513 migrate_enable();
514
515 return supported;
516 }
517
svm_check_processor_compat(void)518 static int svm_check_processor_compat(void)
519 {
520 if (!__kvm_is_svm_supported())
521 return -EIO;
522
523 return 0;
524 }
525
__svm_write_tsc_multiplier(u64 multiplier)526 static void __svm_write_tsc_multiplier(u64 multiplier)
527 {
528 if (multiplier == __this_cpu_read(current_tsc_ratio))
529 return;
530
531 wrmsrq(MSR_AMD64_TSC_RATIO, multiplier);
532 __this_cpu_write(current_tsc_ratio, multiplier);
533 }
534
sev_es_host_save_area(struct svm_cpu_data * sd)535 static __always_inline struct sev_es_save_area *sev_es_host_save_area(struct svm_cpu_data *sd)
536 {
537 return &sd->save_area->host_sev_es_save;
538 }
539
svm_emergency_disable_virtualization_cpu(void)540 static void svm_emergency_disable_virtualization_cpu(void)
541 {
542 wrmsrq(MSR_VM_HSAVE_PA, 0);
543 }
544
svm_disable_virtualization_cpu(void)545 static void svm_disable_virtualization_cpu(void)
546 {
547 /* Make sure we clean up behind us */
548 if (tsc_scaling)
549 __svm_write_tsc_multiplier(SVM_TSC_RATIO_DEFAULT);
550
551 x86_virt_put_ref(X86_FEATURE_SVM);
552 wrmsrq(MSR_VM_HSAVE_PA, 0);
553
554 amd_pmu_disable_virt();
555 }
556
svm_enable_virtualization_cpu(void)557 static int svm_enable_virtualization_cpu(void)
558 {
559
560 struct svm_cpu_data *sd;
561 int me = raw_smp_processor_id();
562 int r;
563
564 r = x86_virt_get_ref(X86_FEATURE_SVM);
565 if (r)
566 return r;
567
568 sd = per_cpu_ptr(&svm_data, me);
569 sd->asid_generation = 1;
570 sd->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
571 sd->next_asid = sd->max_asid + 1;
572 sd->min_asid = max_sev_asid + 1;
573
574 wrmsrq(MSR_VM_HSAVE_PA, sd->save_area_pa);
575
576 if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) {
577 /*
578 * Set the default value, even if we don't use TSC scaling
579 * to avoid having stale value in the msr
580 */
581 __svm_write_tsc_multiplier(SVM_TSC_RATIO_DEFAULT);
582 }
583
584 svm_init_os_visible_workarounds();
585
586 svm_init_erratum_383();
587
588 amd_pmu_enable_virt();
589
590 return 0;
591 }
592
svm_cpu_uninit(int cpu)593 static void svm_cpu_uninit(int cpu)
594 {
595 struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, cpu);
596
597 if (!sd->save_area)
598 return;
599
600 kfree(sd->sev_vmcbs);
601 __free_page(__sme_pa_to_page(sd->save_area_pa));
602 sd->save_area_pa = 0;
603 sd->save_area = NULL;
604 }
605
svm_cpu_init(int cpu)606 static int svm_cpu_init(int cpu)
607 {
608 struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, cpu);
609 struct page *save_area_page;
610 int ret = -ENOMEM;
611
612 memset(sd, 0, sizeof(struct svm_cpu_data));
613 save_area_page = snp_safe_alloc_page_node(cpu_to_node(cpu), GFP_KERNEL);
614 if (!save_area_page)
615 return ret;
616
617 ret = sev_cpu_init(sd);
618 if (ret)
619 goto free_save_area;
620
621 sd->save_area = page_address(save_area_page);
622 sd->save_area_pa = __sme_page_pa(save_area_page);
623 return 0;
624
625 free_save_area:
626 __free_page(save_area_page);
627 return ret;
628
629 }
630
set_dr_intercepts(struct vcpu_svm * svm)631 static void set_dr_intercepts(struct vcpu_svm *svm)
632 {
633 struct vmcb *vmcb = svm->vmcb01.ptr;
634
635 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_READ);
636 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR1_READ);
637 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR2_READ);
638 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR3_READ);
639 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR4_READ);
640 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR5_READ);
641 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR6_READ);
642 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_WRITE);
643 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR1_WRITE);
644 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR2_WRITE);
645 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR3_WRITE);
646 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR4_WRITE);
647 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR5_WRITE);
648 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR6_WRITE);
649 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_READ);
650 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_WRITE);
651
652 svm_mark_intercepts_dirty(svm);
653 }
654
clr_dr_intercepts(struct vcpu_svm * svm)655 static void clr_dr_intercepts(struct vcpu_svm *svm)
656 {
657 struct vmcb *vmcb = svm->vmcb01.ptr;
658
659 vmcb->control.intercepts[INTERCEPT_DR] = 0;
660
661 svm_mark_intercepts_dirty(svm);
662 }
663
msr_write_intercepted(struct kvm_vcpu * vcpu,u32 msr)664 static bool msr_write_intercepted(struct kvm_vcpu *vcpu, u32 msr)
665 {
666 /*
667 * For non-nested case:
668 * If the L01 MSR bitmap does not intercept the MSR, then we need to
669 * save it.
670 *
671 * For nested case:
672 * If the L02 MSR bitmap does not intercept the MSR, then we need to
673 * save it.
674 */
675 void *msrpm = is_guest_mode(vcpu) ? to_svm(vcpu)->nested.msrpm :
676 to_svm(vcpu)->msrpm;
677
678 return svm_test_msr_bitmap_write(msrpm, msr);
679 }
680
svm_set_intercept_for_msr(struct kvm_vcpu * vcpu,u32 msr,int type,bool set)681 void svm_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type, bool set)
682 {
683 struct vcpu_svm *svm = to_svm(vcpu);
684 void *msrpm = svm->msrpm;
685
686 /* Don't disable interception for MSRs userspace wants to handle. */
687 if (type & MSR_TYPE_R) {
688 if (!set && kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_READ))
689 svm_clear_msr_bitmap_read(msrpm, msr);
690 else
691 svm_set_msr_bitmap_read(msrpm, msr);
692 }
693
694 if (type & MSR_TYPE_W) {
695 if (!set && kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_WRITE))
696 svm_clear_msr_bitmap_write(msrpm, msr);
697 else
698 svm_set_msr_bitmap_write(msrpm, msr);
699 }
700
701 svm_hv_vmcb_dirty_nested_enlightenments(vcpu);
702 svm->nested.force_msr_bitmap_recalc = true;
703 }
704
svm_alloc_permissions_map(unsigned long size,gfp_t gfp_mask)705 void *svm_alloc_permissions_map(unsigned long size, gfp_t gfp_mask)
706 {
707 unsigned int order = get_order(size);
708 struct page *pages = alloc_pages(gfp_mask, order);
709 void *pm;
710
711 if (!pages)
712 return NULL;
713
714 /*
715 * Set all bits in the permissions map so that all MSR and I/O accesses
716 * are intercepted by default.
717 */
718 pm = page_address(pages);
719 memset(pm, 0xff, PAGE_SIZE * (1 << order));
720
721 return pm;
722 }
723
svm_recalc_lbr_msr_intercepts(struct kvm_vcpu * vcpu)724 static void svm_recalc_lbr_msr_intercepts(struct kvm_vcpu *vcpu)
725 {
726 struct vcpu_svm *svm = to_svm(vcpu);
727 bool intercept = !(svm->vmcb->control.misc_ctl2 & SVM_MISC2_ENABLE_V_LBR);
728
729 if (intercept == svm->lbr_msrs_intercepted)
730 return;
731
732 svm_set_intercept_for_msr(vcpu, MSR_IA32_LASTBRANCHFROMIP, MSR_TYPE_RW, intercept);
733 svm_set_intercept_for_msr(vcpu, MSR_IA32_LASTBRANCHTOIP, MSR_TYPE_RW, intercept);
734 svm_set_intercept_for_msr(vcpu, MSR_IA32_LASTINTFROMIP, MSR_TYPE_RW, intercept);
735 svm_set_intercept_for_msr(vcpu, MSR_IA32_LASTINTTOIP, MSR_TYPE_RW, intercept);
736
737 if (is_sev_es_guest(vcpu))
738 svm_set_intercept_for_msr(vcpu, MSR_IA32_DEBUGCTLMSR, MSR_TYPE_RW, intercept);
739
740 svm->lbr_msrs_intercepted = intercept;
741 }
742
svm_vcpu_free_msrpm(void * msrpm)743 void svm_vcpu_free_msrpm(void *msrpm)
744 {
745 __free_pages(virt_to_page(msrpm), get_order(MSRPM_SIZE));
746 }
747
svm_recalc_pmu_msr_intercepts(struct kvm_vcpu * vcpu)748 static void svm_recalc_pmu_msr_intercepts(struct kvm_vcpu *vcpu)
749 {
750 bool intercept = !kvm_vcpu_has_mediated_pmu(vcpu);
751 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
752 int i;
753
754 if (!enable_mediated_pmu)
755 return;
756
757 /* Legacy counters are always available for AMD CPUs with a PMU. */
758 for (i = 0; i < min(pmu->nr_arch_gp_counters, AMD64_NUM_COUNTERS); i++)
759 svm_set_intercept_for_msr(vcpu, MSR_K7_PERFCTR0 + i,
760 MSR_TYPE_RW, intercept);
761
762 intercept |= !guest_cpu_cap_has(vcpu, X86_FEATURE_PERFCTR_CORE);
763 for (i = 0; i < pmu->nr_arch_gp_counters; i++)
764 svm_set_intercept_for_msr(vcpu, MSR_F15H_PERF_CTR + 2 * i,
765 MSR_TYPE_RW, intercept);
766
767 for ( ; i < kvm_pmu_cap.num_counters_gp; i++)
768 svm_enable_intercept_for_msr(vcpu, MSR_F15H_PERF_CTR + 2 * i,
769 MSR_TYPE_RW);
770
771 intercept = kvm_need_perf_global_ctrl_intercept(vcpu);
772 svm_set_intercept_for_msr(vcpu, MSR_AMD64_PERF_CNTR_GLOBAL_CTL,
773 MSR_TYPE_RW, intercept);
774 svm_set_intercept_for_msr(vcpu, MSR_AMD64_PERF_CNTR_GLOBAL_STATUS,
775 MSR_TYPE_RW, intercept);
776 svm_set_intercept_for_msr(vcpu, MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR,
777 MSR_TYPE_RW, intercept);
778 svm_set_intercept_for_msr(vcpu, MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_SET,
779 MSR_TYPE_RW, intercept);
780 }
781
svm_recalc_msr_intercepts(struct kvm_vcpu * vcpu)782 static void svm_recalc_msr_intercepts(struct kvm_vcpu *vcpu)
783 {
784 struct vcpu_svm *svm = to_svm(vcpu);
785
786 svm_disable_intercept_for_msr(vcpu, MSR_STAR, MSR_TYPE_RW);
787 svm_disable_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_CS, MSR_TYPE_RW);
788
789 #ifdef CONFIG_X86_64
790 svm_disable_intercept_for_msr(vcpu, MSR_GS_BASE, MSR_TYPE_RW);
791 svm_disable_intercept_for_msr(vcpu, MSR_FS_BASE, MSR_TYPE_RW);
792 svm_disable_intercept_for_msr(vcpu, MSR_KERNEL_GS_BASE, MSR_TYPE_RW);
793 svm_disable_intercept_for_msr(vcpu, MSR_LSTAR, MSR_TYPE_RW);
794 svm_disable_intercept_for_msr(vcpu, MSR_CSTAR, MSR_TYPE_RW);
795 svm_disable_intercept_for_msr(vcpu, MSR_SYSCALL_MASK, MSR_TYPE_RW);
796 #endif
797
798 if (lbrv)
799 svm_recalc_lbr_msr_intercepts(vcpu);
800
801 if (cpu_feature_enabled(X86_FEATURE_IBPB))
802 svm_set_intercept_for_msr(vcpu, MSR_IA32_PRED_CMD, MSR_TYPE_W,
803 !guest_has_pred_cmd_msr(vcpu));
804
805 if (cpu_feature_enabled(X86_FEATURE_FLUSH_L1D))
806 svm_set_intercept_for_msr(vcpu, MSR_IA32_FLUSH_CMD, MSR_TYPE_W,
807 !guest_cpu_cap_has(vcpu, X86_FEATURE_FLUSH_L1D));
808
809 /*
810 * Disable interception of SPEC_CTRL if KVM doesn't need to manually
811 * context switch the MSR (SPEC_CTRL is virtualized by the CPU), or if
812 * the guest has a non-zero SPEC_CTRL value, i.e. is likely actively
813 * using SPEC_CTRL.
814 */
815 if (cpu_feature_enabled(X86_FEATURE_V_SPEC_CTRL))
816 svm_set_intercept_for_msr(vcpu, MSR_IA32_SPEC_CTRL, MSR_TYPE_RW,
817 !guest_has_spec_ctrl_msr(vcpu));
818 else
819 svm_set_intercept_for_msr(vcpu, MSR_IA32_SPEC_CTRL, MSR_TYPE_RW,
820 !svm->spec_ctrl);
821
822 /*
823 * Intercept SYSENTER_EIP and SYSENTER_ESP when emulating an Intel CPU,
824 * as AMD hardware only store 32 bits, whereas Intel CPUs track 64 bits.
825 */
826 svm_set_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_EIP, MSR_TYPE_RW,
827 guest_cpuid_is_intel_compatible(vcpu));
828 svm_set_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_ESP, MSR_TYPE_RW,
829 guest_cpuid_is_intel_compatible(vcpu));
830
831 if (kvm_aperfmperf_in_guest(vcpu->kvm)) {
832 svm_disable_intercept_for_msr(vcpu, MSR_IA32_APERF, MSR_TYPE_R);
833 svm_disable_intercept_for_msr(vcpu, MSR_IA32_MPERF, MSR_TYPE_R);
834 }
835
836 if (kvm_cpu_cap_has(X86_FEATURE_SHSTK)) {
837 bool shstk_enabled = guest_cpu_cap_has(vcpu, X86_FEATURE_SHSTK);
838
839 svm_set_intercept_for_msr(vcpu, MSR_IA32_U_CET, MSR_TYPE_RW, !shstk_enabled);
840 svm_set_intercept_for_msr(vcpu, MSR_IA32_S_CET, MSR_TYPE_RW, !shstk_enabled);
841 svm_set_intercept_for_msr(vcpu, MSR_IA32_PL0_SSP, MSR_TYPE_RW, !shstk_enabled);
842 svm_set_intercept_for_msr(vcpu, MSR_IA32_PL1_SSP, MSR_TYPE_RW, !shstk_enabled);
843 svm_set_intercept_for_msr(vcpu, MSR_IA32_PL2_SSP, MSR_TYPE_RW, !shstk_enabled);
844 svm_set_intercept_for_msr(vcpu, MSR_IA32_PL3_SSP, MSR_TYPE_RW, !shstk_enabled);
845 }
846
847 if (is_sev_es_guest(vcpu))
848 sev_es_recalc_msr_intercepts(vcpu);
849
850 svm_recalc_pmu_msr_intercepts(vcpu);
851
852 /*
853 * x2APIC intercepts are modified on-demand and cannot be filtered by
854 * userspace.
855 */
856 }
857
__svm_enable_lbrv(struct kvm_vcpu * vcpu)858 static void __svm_enable_lbrv(struct kvm_vcpu *vcpu)
859 {
860 to_svm(vcpu)->vmcb->control.misc_ctl2 |= SVM_MISC2_ENABLE_V_LBR;
861 }
862
svm_enable_lbrv(struct kvm_vcpu * vcpu)863 void svm_enable_lbrv(struct kvm_vcpu *vcpu)
864 {
865 __svm_enable_lbrv(vcpu);
866 svm_recalc_lbr_msr_intercepts(vcpu);
867 }
868
__svm_disable_lbrv(struct kvm_vcpu * vcpu)869 static void __svm_disable_lbrv(struct kvm_vcpu *vcpu)
870 {
871 KVM_BUG_ON(is_sev_es_guest(vcpu), vcpu->kvm);
872 to_svm(vcpu)->vmcb->control.misc_ctl2 &= ~SVM_MISC2_ENABLE_V_LBR;
873 }
874
svm_update_lbrv(struct kvm_vcpu * vcpu)875 void svm_update_lbrv(struct kvm_vcpu *vcpu)
876 {
877 struct vcpu_svm *svm = to_svm(vcpu);
878 bool current_enable_lbrv = svm->vmcb->control.misc_ctl2 & SVM_MISC2_ENABLE_V_LBR;
879 bool enable_lbrv = (svm->vmcb->save.dbgctl & DEBUGCTLMSR_LBR) ||
880 (is_guest_mode(vcpu) && guest_cpu_cap_has(vcpu, X86_FEATURE_LBRV) &&
881 (svm->nested.ctl.misc_ctl2 & SVM_MISC2_ENABLE_V_LBR));
882
883 if (enable_lbrv && !current_enable_lbrv)
884 __svm_enable_lbrv(vcpu);
885 else if (!enable_lbrv && current_enable_lbrv)
886 __svm_disable_lbrv(vcpu);
887
888 /*
889 * During nested transitions, it is possible that the current VMCB has
890 * LBR_CTL set, but the previous LBR_CTL had it cleared (or vice versa).
891 * In this case, even though LBR_CTL does not need an update, intercepts
892 * do, so always recalculate the intercepts here.
893 */
894 svm_recalc_lbr_msr_intercepts(vcpu);
895 }
896
disable_nmi_singlestep(struct vcpu_svm * svm)897 void disable_nmi_singlestep(struct vcpu_svm *svm)
898 {
899 svm->nmi_singlestep = false;
900
901 if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP)) {
902 /* Clear our flags if they were not set by the guest */
903 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF))
904 svm->vmcb->save.rflags &= ~X86_EFLAGS_TF;
905 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_RF))
906 svm->vmcb->save.rflags &= ~X86_EFLAGS_RF;
907 }
908 }
909
grow_ple_window(struct kvm_vcpu * vcpu)910 static void grow_ple_window(struct kvm_vcpu *vcpu)
911 {
912 struct vcpu_svm *svm = to_svm(vcpu);
913 struct vmcb_control_area *control = &svm->vmcb->control;
914 int old = control->pause_filter_count;
915
916 if (kvm_pause_in_guest(vcpu->kvm))
917 return;
918
919 control->pause_filter_count = __grow_ple_window(old,
920 pause_filter_count,
921 pause_filter_count_grow,
922 pause_filter_count_max);
923
924 if (control->pause_filter_count != old) {
925 vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
926 trace_kvm_ple_window_update(vcpu->vcpu_id,
927 control->pause_filter_count, old);
928 }
929 }
930
shrink_ple_window(struct kvm_vcpu * vcpu)931 static void shrink_ple_window(struct kvm_vcpu *vcpu)
932 {
933 struct vcpu_svm *svm = to_svm(vcpu);
934 struct vmcb_control_area *control = &svm->vmcb->control;
935 int old = control->pause_filter_count;
936
937 if (kvm_pause_in_guest(vcpu->kvm))
938 return;
939
940 control->pause_filter_count =
941 __shrink_ple_window(old,
942 pause_filter_count,
943 pause_filter_count_shrink,
944 pause_filter_count);
945 if (control->pause_filter_count != old) {
946 vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
947 trace_kvm_ple_window_update(vcpu->vcpu_id,
948 control->pause_filter_count, old);
949 }
950 }
951
svm_hardware_unsetup(void)952 static void svm_hardware_unsetup(void)
953 {
954 int cpu;
955
956 avic_hardware_unsetup();
957
958 sev_hardware_unsetup();
959
960 for_each_possible_cpu(cpu)
961 svm_cpu_uninit(cpu);
962
963 __free_pages(__sme_pa_to_page(iopm_base), get_order(IOPM_SIZE));
964 iopm_base = 0;
965 }
966
init_seg(struct vmcb_seg * seg)967 static void init_seg(struct vmcb_seg *seg)
968 {
969 seg->selector = 0;
970 seg->attrib = SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK |
971 SVM_SELECTOR_WRITE_MASK; /* Read/Write Data Segment */
972 seg->limit = 0xffff;
973 seg->base = 0;
974 }
975
init_sys_seg(struct vmcb_seg * seg,uint32_t type)976 static void init_sys_seg(struct vmcb_seg *seg, uint32_t type)
977 {
978 seg->selector = 0;
979 seg->attrib = SVM_SELECTOR_P_MASK | type;
980 seg->limit = 0xffff;
981 seg->base = 0;
982 }
983
svm_get_l2_tsc_offset(struct kvm_vcpu * vcpu)984 static u64 svm_get_l2_tsc_offset(struct kvm_vcpu *vcpu)
985 {
986 struct vcpu_svm *svm = to_svm(vcpu);
987
988 return svm->nested.ctl.tsc_offset;
989 }
990
svm_get_l2_tsc_multiplier(struct kvm_vcpu * vcpu)991 static u64 svm_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu)
992 {
993 struct vcpu_svm *svm = to_svm(vcpu);
994
995 return svm->tsc_ratio_msr;
996 }
997
svm_write_tsc_offset(struct kvm_vcpu * vcpu)998 static void svm_write_tsc_offset(struct kvm_vcpu *vcpu)
999 {
1000 struct vcpu_svm *svm = to_svm(vcpu);
1001
1002 svm->vmcb01.ptr->control.tsc_offset = vcpu->arch.l1_tsc_offset;
1003 svm->vmcb->control.tsc_offset = vcpu->arch.tsc_offset;
1004 vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
1005 }
1006
svm_write_tsc_multiplier(struct kvm_vcpu * vcpu)1007 void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu)
1008 {
1009 preempt_disable();
1010 if (to_svm(vcpu)->guest_state_loaded)
1011 __svm_write_tsc_multiplier(vcpu->arch.tsc_scaling_ratio);
1012 preempt_enable();
1013 }
1014
svm_has_pending_gif_event(struct vcpu_svm * svm)1015 static bool svm_has_pending_gif_event(struct vcpu_svm *svm)
1016 {
1017 return svm->vcpu.arch.smi_pending ||
1018 svm->vcpu.arch.nmi_pending ||
1019 kvm_cpu_has_injectable_intr(&svm->vcpu) ||
1020 kvm_apic_has_pending_init_or_sipi(&svm->vcpu);
1021 }
1022
1023 /* Evaluate instruction intercepts that depend on guest CPUID features. */
svm_recalc_instruction_intercepts(struct kvm_vcpu * vcpu)1024 static void svm_recalc_instruction_intercepts(struct kvm_vcpu *vcpu)
1025 {
1026 struct vcpu_svm *svm = to_svm(vcpu);
1027
1028 /*
1029 * Intercept INVPCID if shadow paging is enabled to sync/free shadow
1030 * roots, or if INVPCID is disabled in the guest to inject #UD.
1031 */
1032 if (kvm_cpu_cap_has(X86_FEATURE_INVPCID)) {
1033 if (!npt_enabled ||
1034 !guest_cpu_cap_has(&svm->vcpu, X86_FEATURE_INVPCID))
1035 svm_set_intercept(svm, INTERCEPT_INVPCID);
1036 else
1037 svm_clr_intercept(svm, INTERCEPT_INVPCID);
1038 }
1039
1040 if (kvm_cpu_cap_has(X86_FEATURE_RDTSCP)) {
1041 if (guest_cpu_cap_has(vcpu, X86_FEATURE_RDTSCP))
1042 svm_clr_intercept(svm, INTERCEPT_RDTSCP);
1043 else
1044 svm_set_intercept(svm, INTERCEPT_RDTSCP);
1045 }
1046
1047 /*
1048 * Intercept instructions that #UD if EFER.SVME=0, as SVME must be set
1049 * even when running the guest, i.e. hardware will only ever see
1050 * EFER.SVME=1.
1051 *
1052 * No need to toggle any of the vgif/vls/etc. enable bits here, as they
1053 * are set when the VMCB is initialized and never cleared (if the
1054 * relevant intercepts are set, the enablements are meaningless anyway).
1055 *
1056 * FIXME: When #GP is not intercepted, a #GP on these instructions (e.g.
1057 * due to CPL > 0) could be injected by hardware before the instruction
1058 * is intercepted, leading to #GP taking precedence over #UD from the
1059 * guest's perspective.
1060 */
1061 if (!(vcpu->arch.efer & EFER_SVME)) {
1062 svm_set_intercept(svm, INTERCEPT_VMLOAD);
1063 svm_set_intercept(svm, INTERCEPT_VMSAVE);
1064 svm_set_intercept(svm, INTERCEPT_CLGI);
1065 svm_set_intercept(svm, INTERCEPT_STGI);
1066 } else {
1067 /*
1068 * If hardware supports Virtual VMLOAD VMSAVE then enable it
1069 * in VMCB and clear intercepts to avoid #VMEXIT.
1070 */
1071 if (guest_cpuid_is_intel_compatible(vcpu)) {
1072 svm_set_intercept(svm, INTERCEPT_VMLOAD);
1073 svm_set_intercept(svm, INTERCEPT_VMSAVE);
1074 } else if (vls) {
1075 svm_clr_intercept(svm, INTERCEPT_VMLOAD);
1076 svm_clr_intercept(svm, INTERCEPT_VMSAVE);
1077 }
1078
1079 /*
1080 * Process pending events when clearing STGI/CLGI intercepts if
1081 * there's at least one pending event that is masked by GIF, so
1082 * that KVM re-evaluates if the intercept needs to be set again
1083 * to track when GIF is re-enabled (e.g. for NMI injection).
1084 */
1085 if (vgif) {
1086 svm_clr_intercept(svm, INTERCEPT_CLGI);
1087 svm_clr_intercept(svm, INTERCEPT_STGI);
1088
1089 if (svm_has_pending_gif_event(svm))
1090 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
1091 }
1092 }
1093
1094 if (kvm_need_rdpmc_intercept(vcpu))
1095 svm_set_intercept(svm, INTERCEPT_RDPMC);
1096 else
1097 svm_clr_intercept(svm, INTERCEPT_RDPMC);
1098 }
1099
svm_recalc_intercepts(struct kvm_vcpu * vcpu)1100 static void svm_recalc_intercepts(struct kvm_vcpu *vcpu)
1101 {
1102 svm_recalc_instruction_intercepts(vcpu);
1103 svm_recalc_msr_intercepts(vcpu);
1104 }
1105
init_vmcb(struct kvm_vcpu * vcpu,bool init_event)1106 static void init_vmcb(struct kvm_vcpu *vcpu, bool init_event)
1107 {
1108 struct vcpu_svm *svm = to_svm(vcpu);
1109 struct vmcb *vmcb = svm->vmcb01.ptr;
1110 struct vmcb_control_area *control = &vmcb->control;
1111 struct vmcb_save_area *save = &vmcb->save;
1112
1113 svm_set_intercept(svm, INTERCEPT_CR0_READ);
1114 svm_set_intercept(svm, INTERCEPT_CR3_READ);
1115 svm_set_intercept(svm, INTERCEPT_CR4_READ);
1116 svm_set_intercept(svm, INTERCEPT_CR0_WRITE);
1117 svm_set_intercept(svm, INTERCEPT_CR3_WRITE);
1118 svm_set_intercept(svm, INTERCEPT_CR4_WRITE);
1119 svm_set_intercept(svm, INTERCEPT_CR8_WRITE);
1120
1121 set_dr_intercepts(svm);
1122
1123 set_exception_intercept(svm, PF_VECTOR);
1124 set_exception_intercept(svm, UD_VECTOR);
1125 set_exception_intercept(svm, MC_VECTOR);
1126 set_exception_intercept(svm, AC_VECTOR);
1127 set_exception_intercept(svm, DB_VECTOR);
1128 /*
1129 * Guest access to VMware backdoor ports could legitimately
1130 * trigger #GP because of TSS I/O permission bitmap.
1131 * We intercept those #GP and allow access to them anyway
1132 * as VMware does.
1133 */
1134 if (enable_vmware_backdoor)
1135 set_exception_intercept(svm, GP_VECTOR);
1136
1137 svm_set_intercept(svm, INTERCEPT_INTR);
1138 svm_set_intercept(svm, INTERCEPT_NMI);
1139
1140 if (intercept_smi)
1141 svm_set_intercept(svm, INTERCEPT_SMI);
1142
1143 svm_set_intercept(svm, INTERCEPT_SELECTIVE_CR0);
1144 svm_set_intercept(svm, INTERCEPT_RDPMC);
1145 svm_set_intercept(svm, INTERCEPT_CPUID);
1146 svm_set_intercept(svm, INTERCEPT_INVD);
1147 svm_set_intercept(svm, INTERCEPT_INVLPG);
1148 svm_set_intercept(svm, INTERCEPT_INVLPGA);
1149 svm_set_intercept(svm, INTERCEPT_IOIO_PROT);
1150 svm_set_intercept(svm, INTERCEPT_MSR_PROT);
1151 svm_set_intercept(svm, INTERCEPT_TASK_SWITCH);
1152 svm_set_intercept(svm, INTERCEPT_SHUTDOWN);
1153 svm_set_intercept(svm, INTERCEPT_VMRUN);
1154 svm_set_intercept(svm, INTERCEPT_VMMCALL);
1155 svm_set_intercept(svm, INTERCEPT_VMLOAD);
1156 svm_set_intercept(svm, INTERCEPT_VMSAVE);
1157 svm_set_intercept(svm, INTERCEPT_STGI);
1158 svm_set_intercept(svm, INTERCEPT_CLGI);
1159 svm_set_intercept(svm, INTERCEPT_SKINIT);
1160 svm_set_intercept(svm, INTERCEPT_WBINVD);
1161 svm_set_intercept(svm, INTERCEPT_XSETBV);
1162 svm_set_intercept(svm, INTERCEPT_RDPRU);
1163 svm_set_intercept(svm, INTERCEPT_RSM);
1164
1165 if (!kvm_mwait_in_guest(vcpu->kvm)) {
1166 svm_set_intercept(svm, INTERCEPT_MONITOR);
1167 svm_set_intercept(svm, INTERCEPT_MWAIT);
1168 }
1169
1170 if (!kvm_hlt_in_guest(vcpu->kvm)) {
1171 if (cpu_feature_enabled(X86_FEATURE_IDLE_HLT))
1172 svm_set_intercept(svm, INTERCEPT_IDLE_HLT);
1173 else
1174 svm_set_intercept(svm, INTERCEPT_HLT);
1175 }
1176
1177 control->iopm_base_pa = iopm_base;
1178 control->msrpm_base_pa = __sme_set(__pa(svm->msrpm));
1179 control->int_ctl = V_INTR_MASKING_MASK;
1180
1181 init_seg(&save->es);
1182 init_seg(&save->ss);
1183 init_seg(&save->ds);
1184 init_seg(&save->fs);
1185 init_seg(&save->gs);
1186
1187 save->cs.selector = 0xf000;
1188 save->cs.base = 0xffff0000;
1189 /* Executable/Readable Code Segment */
1190 save->cs.attrib = SVM_SELECTOR_READ_MASK | SVM_SELECTOR_P_MASK |
1191 SVM_SELECTOR_S_MASK | SVM_SELECTOR_CODE_MASK;
1192 save->cs.limit = 0xffff;
1193
1194 save->gdtr.base = 0;
1195 save->gdtr.limit = 0xffff;
1196 save->idtr.base = 0;
1197 save->idtr.limit = 0xffff;
1198
1199 init_sys_seg(&save->ldtr, SEG_TYPE_LDT);
1200 init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16);
1201
1202 if (npt_enabled) {
1203 /* Setup VMCB for Nested Paging */
1204 control->misc_ctl |= SVM_MISC_ENABLE_NP;
1205 svm_clr_intercept(svm, INTERCEPT_INVLPG);
1206 clr_exception_intercept(svm, PF_VECTOR);
1207 svm_clr_intercept(svm, INTERCEPT_CR3_READ);
1208 svm_clr_intercept(svm, INTERCEPT_CR3_WRITE);
1209 save->g_pat = vcpu->arch.pat;
1210 save->cr3 = 0;
1211 }
1212 svm->current_vmcb->asid_generation = 0;
1213 svm->asid = 0;
1214
1215 svm->nested.vmcb12_gpa = INVALID_GPA;
1216 svm->nested.last_vmcb12_gpa = INVALID_GPA;
1217
1218 if (!kvm_pause_in_guest(vcpu->kvm)) {
1219 control->pause_filter_count = pause_filter_count;
1220 if (pause_filter_thresh)
1221 control->pause_filter_thresh = pause_filter_thresh;
1222 svm_set_intercept(svm, INTERCEPT_PAUSE);
1223 } else {
1224 svm_clr_intercept(svm, INTERCEPT_PAUSE);
1225 }
1226
1227 if (guest_cpu_cap_has(vcpu, X86_FEATURE_ERAPS))
1228 svm->vmcb->control.erap_ctl |= ERAP_CONTROL_ALLOW_LARGER_RAP;
1229
1230 if (enable_apicv && irqchip_in_kernel(vcpu->kvm))
1231 avic_init_vmcb(svm, vmcb);
1232
1233 if (vnmi)
1234 svm->vmcb->control.int_ctl |= V_NMI_ENABLE_MASK;
1235
1236 if (vgif)
1237 svm->vmcb->control.int_ctl |= V_GIF_ENABLE_MASK;
1238
1239 if (vls)
1240 svm->vmcb->control.misc_ctl2 |= SVM_MISC2_ENABLE_V_VMLOAD_VMSAVE;
1241
1242 if (vcpu->kvm->arch.bus_lock_detection_enabled)
1243 svm_set_intercept(svm, INTERCEPT_BUSLOCK);
1244
1245 if (is_sev_guest(vcpu))
1246 sev_init_vmcb(svm, init_event);
1247
1248 svm_hv_init_vmcb(vmcb);
1249
1250 kvm_make_request(KVM_REQ_RECALC_INTERCEPTS, vcpu);
1251
1252 vmcb_mark_all_dirty(vmcb);
1253
1254 enable_gif(svm);
1255 }
1256
__svm_vcpu_reset(struct kvm_vcpu * vcpu)1257 static void __svm_vcpu_reset(struct kvm_vcpu *vcpu)
1258 {
1259 struct vcpu_svm *svm = to_svm(vcpu);
1260
1261 svm_init_osvw(vcpu);
1262
1263 if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_STUFF_FEATURE_MSRS))
1264 vcpu->arch.microcode_version = 0x01000065;
1265 svm->tsc_ratio_msr = kvm_caps.default_tsc_scaling_ratio;
1266
1267 svm->nmi_masked = false;
1268 svm->awaiting_iret_completion = false;
1269 }
1270
svm_vcpu_reset(struct kvm_vcpu * vcpu,bool init_event)1271 static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
1272 {
1273 struct vcpu_svm *svm = to_svm(vcpu);
1274
1275 svm->spec_ctrl = 0;
1276 svm->virt_spec_ctrl = 0;
1277
1278 init_vmcb(vcpu, init_event);
1279
1280 if (!init_event)
1281 __svm_vcpu_reset(vcpu);
1282 }
1283
svm_switch_vmcb(struct vcpu_svm * svm,struct kvm_vmcb_info * target_vmcb)1284 void svm_switch_vmcb(struct vcpu_svm *svm, struct kvm_vmcb_info *target_vmcb)
1285 {
1286 svm->current_vmcb = target_vmcb;
1287 svm->vmcb = target_vmcb->ptr;
1288 }
1289
svm_vcpu_precreate(struct kvm * kvm)1290 static int svm_vcpu_precreate(struct kvm *kvm)
1291 {
1292 return avic_alloc_physical_id_table(kvm);
1293 }
1294
svm_vcpu_create(struct kvm_vcpu * vcpu)1295 static int svm_vcpu_create(struct kvm_vcpu *vcpu)
1296 {
1297 struct vcpu_svm *svm;
1298 struct page *vmcb01_page;
1299 int err;
1300
1301 BUILD_BUG_ON(offsetof(struct vcpu_svm, vcpu) != 0);
1302 svm = to_svm(vcpu);
1303
1304 err = -ENOMEM;
1305 vmcb01_page = snp_safe_alloc_page();
1306 if (!vmcb01_page)
1307 goto out;
1308
1309 err = sev_vcpu_create(vcpu);
1310 if (err)
1311 goto error_free_vmcb_page;
1312
1313 err = avic_init_vcpu(svm);
1314 if (err)
1315 goto error_free_sev;
1316
1317 svm->msrpm = svm_vcpu_alloc_msrpm();
1318 if (!svm->msrpm) {
1319 err = -ENOMEM;
1320 goto error_free_sev;
1321 }
1322
1323 svm->x2avic_msrs_intercepted = true;
1324 svm->lbr_msrs_intercepted = true;
1325
1326 svm->vmcb01.ptr = page_address(vmcb01_page);
1327 svm->vmcb01.pa = __sme_set(page_to_pfn(vmcb01_page) << PAGE_SHIFT);
1328 svm_switch_vmcb(svm, &svm->vmcb01);
1329
1330 svm->guest_state_loaded = false;
1331
1332 return 0;
1333
1334 error_free_sev:
1335 sev_free_vcpu(vcpu);
1336 error_free_vmcb_page:
1337 __free_page(vmcb01_page);
1338 out:
1339 return err;
1340 }
1341
svm_vcpu_free(struct kvm_vcpu * vcpu)1342 static void svm_vcpu_free(struct kvm_vcpu *vcpu)
1343 {
1344 struct vcpu_svm *svm = to_svm(vcpu);
1345
1346 WARN_ON_ONCE(!list_empty(&svm->ir_list));
1347
1348 svm_leave_nested(vcpu);
1349 svm_free_nested(svm);
1350
1351 sev_free_vcpu(vcpu);
1352
1353 __free_page(__sme_pa_to_page(svm->vmcb01.pa));
1354 svm_vcpu_free_msrpm(svm->msrpm);
1355 }
1356
1357 #ifdef CONFIG_CPU_MITIGATIONS
1358 static DEFINE_SPINLOCK(srso_lock);
1359 static atomic_t srso_nr_vms;
1360
svm_srso_clear_bp_spec_reduce(void * ign)1361 static void svm_srso_clear_bp_spec_reduce(void *ign)
1362 {
1363 struct svm_cpu_data *sd = this_cpu_ptr(&svm_data);
1364
1365 if (!sd->bp_spec_reduce_set)
1366 return;
1367
1368 msr_clear_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_BP_SPEC_REDUCE_BIT);
1369 sd->bp_spec_reduce_set = false;
1370 }
1371
svm_srso_vm_destroy(void)1372 static void svm_srso_vm_destroy(void)
1373 {
1374 if (!cpu_feature_enabled(X86_FEATURE_SRSO_BP_SPEC_REDUCE))
1375 return;
1376
1377 if (atomic_dec_return(&srso_nr_vms))
1378 return;
1379
1380 guard(spinlock)(&srso_lock);
1381
1382 /*
1383 * Verify a new VM didn't come along, acquire the lock, and increment
1384 * the count before this task acquired the lock.
1385 */
1386 if (atomic_read(&srso_nr_vms))
1387 return;
1388
1389 on_each_cpu(svm_srso_clear_bp_spec_reduce, NULL, 1);
1390 }
1391
svm_srso_vm_init(void)1392 static void svm_srso_vm_init(void)
1393 {
1394 if (!cpu_feature_enabled(X86_FEATURE_SRSO_BP_SPEC_REDUCE))
1395 return;
1396
1397 /*
1398 * Acquire the lock on 0 => 1 transitions to ensure a potential 1 => 0
1399 * transition, i.e. destroying the last VM, is fully complete, e.g. so
1400 * that a delayed IPI doesn't clear BP_SPEC_REDUCE after a vCPU runs.
1401 */
1402 if (atomic_inc_not_zero(&srso_nr_vms))
1403 return;
1404
1405 guard(spinlock)(&srso_lock);
1406
1407 atomic_inc(&srso_nr_vms);
1408 }
1409 #else
svm_srso_vm_init(void)1410 static void svm_srso_vm_init(void) { }
svm_srso_vm_destroy(void)1411 static void svm_srso_vm_destroy(void) { }
1412 #endif
1413
svm_prepare_switch_to_guest(struct kvm_vcpu * vcpu)1414 static void svm_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
1415 {
1416 struct vcpu_svm *svm = to_svm(vcpu);
1417 struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, vcpu->cpu);
1418
1419 if (is_sev_es_guest(vcpu))
1420 sev_es_unmap_ghcb(svm);
1421
1422 if (svm->guest_state_loaded)
1423 return;
1424
1425 /*
1426 * Save additional host state that will be restored on VMEXIT (sev-es)
1427 * or subsequent vmload of host save area.
1428 */
1429 vmsave(sd->save_area_pa);
1430 if (is_sev_es_guest(vcpu))
1431 sev_es_prepare_switch_to_guest(svm, sev_es_host_save_area(sd));
1432
1433 if (tsc_scaling)
1434 __svm_write_tsc_multiplier(vcpu->arch.tsc_scaling_ratio);
1435
1436 /*
1437 * TSC_AUX is always virtualized (context switched by hardware) for
1438 * SEV-ES guests when the feature is available. For non-SEV-ES guests,
1439 * context switch TSC_AUX via the user_return MSR infrastructure (not
1440 * all CPUs support TSC_AUX virtualization).
1441 */
1442 if (likely(tsc_aux_uret_slot >= 0) &&
1443 (!boot_cpu_has(X86_FEATURE_V_TSC_AUX) || !is_sev_es_guest(vcpu)))
1444 kvm_set_user_return_msr(tsc_aux_uret_slot, svm->tsc_aux, -1ull);
1445
1446 if (cpu_feature_enabled(X86_FEATURE_SRSO_BP_SPEC_REDUCE) &&
1447 !sd->bp_spec_reduce_set) {
1448 sd->bp_spec_reduce_set = true;
1449 msr_set_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_BP_SPEC_REDUCE_BIT);
1450 }
1451 svm->guest_state_loaded = true;
1452 }
1453
svm_prepare_host_switch(struct kvm_vcpu * vcpu)1454 static void svm_prepare_host_switch(struct kvm_vcpu *vcpu)
1455 {
1456 to_svm(vcpu)->guest_state_loaded = false;
1457 }
1458
svm_vcpu_load(struct kvm_vcpu * vcpu,int cpu)1459 static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1460 {
1461 if (vcpu->scheduled_out && !kvm_pause_in_guest(vcpu->kvm))
1462 shrink_ple_window(vcpu);
1463
1464 if (kvm_vcpu_apicv_active(vcpu))
1465 avic_vcpu_load(vcpu, cpu);
1466 }
1467
svm_vcpu_put(struct kvm_vcpu * vcpu)1468 static void svm_vcpu_put(struct kvm_vcpu *vcpu)
1469 {
1470 if (kvm_vcpu_apicv_active(vcpu))
1471 avic_vcpu_put(vcpu);
1472
1473 svm_prepare_host_switch(vcpu);
1474
1475 ++vcpu->stat.host_state_reload;
1476 }
1477
svm_get_rflags(struct kvm_vcpu * vcpu)1478 static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
1479 {
1480 struct vcpu_svm *svm = to_svm(vcpu);
1481 unsigned long rflags = svm->vmcb->save.rflags;
1482
1483 if (svm->nmi_singlestep) {
1484 /* Hide our flags if they were not set by the guest */
1485 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF))
1486 rflags &= ~X86_EFLAGS_TF;
1487 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_RF))
1488 rflags &= ~X86_EFLAGS_RF;
1489 }
1490 return rflags;
1491 }
1492
svm_set_rflags(struct kvm_vcpu * vcpu,unsigned long rflags)1493 static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
1494 {
1495 if (to_svm(vcpu)->nmi_singlestep)
1496 rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
1497
1498 /*
1499 * Any change of EFLAGS.VM is accompanied by a reload of SS
1500 * (caused by either a task switch or an inter-privilege IRET),
1501 * so we do not need to update the CPL here.
1502 */
1503 to_svm(vcpu)->vmcb->save.rflags = rflags;
1504 }
1505
svm_get_if_flag(struct kvm_vcpu * vcpu)1506 static bool svm_get_if_flag(struct kvm_vcpu *vcpu)
1507 {
1508 struct vmcb *vmcb = to_svm(vcpu)->vmcb;
1509
1510 return is_sev_es_guest(vcpu)
1511 ? vmcb->control.int_state & SVM_GUEST_INTERRUPT_MASK
1512 : kvm_get_rflags(vcpu) & X86_EFLAGS_IF;
1513 }
1514
svm_cache_reg(struct kvm_vcpu * vcpu,enum kvm_reg reg)1515 static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
1516 {
1517 kvm_register_mark_available(vcpu, reg);
1518
1519 switch (reg) {
1520 case VCPU_EXREG_PDPTR:
1521 /*
1522 * When !npt_enabled, mmu->pdptrs[] is already available since
1523 * it is always updated per SDM when moving to CRs.
1524 */
1525 if (npt_enabled)
1526 load_pdptrs(vcpu, kvm_read_cr3(vcpu));
1527 break;
1528 default:
1529 KVM_BUG_ON(1, vcpu->kvm);
1530 }
1531 }
1532
svm_set_vintr(struct vcpu_svm * svm)1533 static void svm_set_vintr(struct vcpu_svm *svm)
1534 {
1535 struct vmcb_control_area *control;
1536
1537 /*
1538 * The following fields are ignored when AVIC is enabled
1539 */
1540 WARN_ON(kvm_vcpu_apicv_activated(&svm->vcpu));
1541
1542 svm_set_intercept(svm, INTERCEPT_VINTR);
1543
1544 /*
1545 * Recalculating intercepts may have cleared the VINTR intercept. If
1546 * V_INTR_MASKING is enabled in vmcb12, then the effective RFLAGS.IF
1547 * for L1 physical interrupts is L1's RFLAGS.IF at the time of VMRUN.
1548 * Requesting an interrupt window if save.RFLAGS.IF=0 is pointless as
1549 * interrupts will never be unblocked while L2 is running.
1550 */
1551 if (!svm_is_intercept(svm, INTERCEPT_VINTR))
1552 return;
1553
1554 /*
1555 * This is just a dummy VINTR to actually cause a vmexit to happen.
1556 * Actual injection of virtual interrupts happens through EVENTINJ.
1557 */
1558 control = &svm->vmcb->control;
1559 control->int_vector = 0x0;
1560 control->int_ctl &= ~V_INTR_PRIO_MASK;
1561 control->int_ctl |= V_IRQ_MASK |
1562 ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT);
1563 vmcb_mark_dirty(svm->vmcb, VMCB_INTR);
1564 }
1565
svm_clear_vintr(struct vcpu_svm * svm)1566 static void svm_clear_vintr(struct vcpu_svm *svm)
1567 {
1568 svm_clr_intercept(svm, INTERCEPT_VINTR);
1569
1570 /* Drop int_ctl fields related to VINTR injection. */
1571 svm->vmcb->control.int_ctl &= ~V_IRQ_INJECTION_BITS_MASK;
1572 if (is_guest_mode(&svm->vcpu)) {
1573 svm->vmcb01.ptr->control.int_ctl &= ~V_IRQ_INJECTION_BITS_MASK;
1574
1575 WARN_ON((svm->vmcb->control.int_ctl & V_TPR_MASK) !=
1576 (svm->nested.ctl.int_ctl & V_TPR_MASK));
1577
1578 svm->vmcb->control.int_ctl |= svm->nested.ctl.int_ctl &
1579 V_IRQ_INJECTION_BITS_MASK;
1580
1581 svm->vmcb->control.int_vector = svm->nested.ctl.int_vector;
1582 }
1583
1584 vmcb_mark_dirty(svm->vmcb, VMCB_INTR);
1585 }
1586
svm_seg(struct kvm_vcpu * vcpu,int seg)1587 static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg)
1588 {
1589 struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
1590 struct vmcb_save_area *save01 = &to_svm(vcpu)->vmcb01.ptr->save;
1591
1592 switch (seg) {
1593 case VCPU_SREG_CS: return &save->cs;
1594 case VCPU_SREG_DS: return &save->ds;
1595 case VCPU_SREG_ES: return &save->es;
1596 case VCPU_SREG_FS: return &save01->fs;
1597 case VCPU_SREG_GS: return &save01->gs;
1598 case VCPU_SREG_SS: return &save->ss;
1599 case VCPU_SREG_TR: return &save01->tr;
1600 case VCPU_SREG_LDTR: return &save01->ldtr;
1601 }
1602 BUG();
1603 return NULL;
1604 }
1605
svm_get_segment_base(struct kvm_vcpu * vcpu,int seg)1606 static u64 svm_get_segment_base(struct kvm_vcpu *vcpu, int seg)
1607 {
1608 struct vmcb_seg *s = svm_seg(vcpu, seg);
1609
1610 return s->base;
1611 }
1612
svm_get_segment(struct kvm_vcpu * vcpu,struct kvm_segment * var,int seg)1613 static void svm_get_segment(struct kvm_vcpu *vcpu,
1614 struct kvm_segment *var, int seg)
1615 {
1616 struct vmcb_seg *s = svm_seg(vcpu, seg);
1617
1618 var->base = s->base;
1619 var->limit = s->limit;
1620 var->selector = s->selector;
1621 var->type = s->attrib & SVM_SELECTOR_TYPE_MASK;
1622 var->s = (s->attrib >> SVM_SELECTOR_S_SHIFT) & 1;
1623 var->dpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3;
1624 var->present = (s->attrib >> SVM_SELECTOR_P_SHIFT) & 1;
1625 var->avl = (s->attrib >> SVM_SELECTOR_AVL_SHIFT) & 1;
1626 var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1;
1627 var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1;
1628
1629 /*
1630 * AMD CPUs circa 2014 track the G bit for all segments except CS.
1631 * However, the SVM spec states that the G bit is not observed by the
1632 * CPU, and some VMware virtual CPUs drop the G bit for all segments.
1633 * So let's synthesize a legal G bit for all segments, this helps
1634 * running KVM nested. It also helps cross-vendor migration, because
1635 * Intel's vmentry has a check on the 'G' bit.
1636 */
1637 var->g = s->limit > 0xfffff;
1638
1639 /*
1640 * AMD's VMCB does not have an explicit unusable field, so emulate it
1641 * for cross vendor migration purposes by "not present"
1642 */
1643 var->unusable = !var->present;
1644
1645 switch (seg) {
1646 case VCPU_SREG_TR:
1647 /*
1648 * Work around a bug where the busy flag in the tr selector
1649 * isn't exposed
1650 */
1651 var->type |= 0x2;
1652 break;
1653 case VCPU_SREG_DS:
1654 case VCPU_SREG_ES:
1655 case VCPU_SREG_FS:
1656 case VCPU_SREG_GS:
1657 /*
1658 * The accessed bit must always be set in the segment
1659 * descriptor cache, although it can be cleared in the
1660 * descriptor, the cached bit always remains at 1. Since
1661 * Intel has a check on this, set it here to support
1662 * cross-vendor migration.
1663 */
1664 if (!var->unusable)
1665 var->type |= 0x1;
1666 break;
1667 case VCPU_SREG_SS:
1668 /*
1669 * On AMD CPUs sometimes the DB bit in the segment
1670 * descriptor is left as 1, although the whole segment has
1671 * been made unusable. Clear it here to pass an Intel VMX
1672 * entry check when cross vendor migrating.
1673 */
1674 if (var->unusable)
1675 var->db = 0;
1676 /* This is symmetric with svm_set_segment() */
1677 var->dpl = to_svm(vcpu)->vmcb->save.cpl;
1678 break;
1679 }
1680 }
1681
svm_get_cpl(struct kvm_vcpu * vcpu)1682 static int svm_get_cpl(struct kvm_vcpu *vcpu)
1683 {
1684 struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
1685
1686 return save->cpl;
1687 }
1688
svm_get_cs_db_l_bits(struct kvm_vcpu * vcpu,int * db,int * l)1689 static void svm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
1690 {
1691 struct kvm_segment cs;
1692
1693 svm_get_segment(vcpu, &cs, VCPU_SREG_CS);
1694 *db = cs.db;
1695 *l = cs.l;
1696 }
1697
svm_get_idt(struct kvm_vcpu * vcpu,struct desc_ptr * dt)1698 static void svm_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
1699 {
1700 struct vcpu_svm *svm = to_svm(vcpu);
1701
1702 dt->size = svm->vmcb->save.idtr.limit;
1703 dt->address = svm->vmcb->save.idtr.base;
1704 }
1705
svm_set_idt(struct kvm_vcpu * vcpu,struct desc_ptr * dt)1706 static void svm_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
1707 {
1708 struct vcpu_svm *svm = to_svm(vcpu);
1709
1710 svm->vmcb->save.idtr.limit = dt->size;
1711 svm->vmcb->save.idtr.base = dt->address ;
1712 vmcb_mark_dirty(svm->vmcb, VMCB_DT);
1713 }
1714
svm_get_gdt(struct kvm_vcpu * vcpu,struct desc_ptr * dt)1715 static void svm_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
1716 {
1717 struct vcpu_svm *svm = to_svm(vcpu);
1718
1719 dt->size = svm->vmcb->save.gdtr.limit;
1720 dt->address = svm->vmcb->save.gdtr.base;
1721 }
1722
svm_set_gdt(struct kvm_vcpu * vcpu,struct desc_ptr * dt)1723 static void svm_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
1724 {
1725 struct vcpu_svm *svm = to_svm(vcpu);
1726
1727 svm->vmcb->save.gdtr.limit = dt->size;
1728 svm->vmcb->save.gdtr.base = dt->address ;
1729 vmcb_mark_dirty(svm->vmcb, VMCB_DT);
1730 }
1731
sev_post_set_cr3(struct kvm_vcpu * vcpu,unsigned long cr3)1732 static void sev_post_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
1733 {
1734 struct vcpu_svm *svm = to_svm(vcpu);
1735
1736 /*
1737 * For guests that don't set guest_state_protected, the cr3 update is
1738 * handled via kvm_mmu_load() while entering the guest. For guests
1739 * that do (SEV-ES/SEV-SNP), the cr3 update needs to be written to
1740 * VMCB save area now, since the save area will become the initial
1741 * contents of the VMSA, and future VMCB save area updates won't be
1742 * seen.
1743 */
1744 if (is_sev_es_guest(vcpu)) {
1745 svm->vmcb->save.cr3 = cr3;
1746 vmcb_mark_dirty(svm->vmcb, VMCB_CR);
1747 }
1748 }
1749
svm_is_valid_cr0(struct kvm_vcpu * vcpu,unsigned long cr0)1750 static bool svm_is_valid_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
1751 {
1752 return true;
1753 }
1754
svm_set_cr0(struct kvm_vcpu * vcpu,unsigned long cr0)1755 void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
1756 {
1757 struct vcpu_svm *svm = to_svm(vcpu);
1758 u64 hcr0 = cr0;
1759 bool old_paging = is_paging(vcpu);
1760
1761 #ifdef CONFIG_X86_64
1762 if (vcpu->arch.efer & EFER_LME) {
1763 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
1764 vcpu->arch.efer |= EFER_LMA;
1765 if (!vcpu->arch.guest_state_protected)
1766 svm->vmcb->save.efer |= EFER_LMA | EFER_LME;
1767 }
1768
1769 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) {
1770 vcpu->arch.efer &= ~EFER_LMA;
1771 if (!vcpu->arch.guest_state_protected)
1772 svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME);
1773 }
1774 }
1775 #endif
1776 vcpu->arch.cr0 = cr0;
1777
1778 if (!npt_enabled) {
1779 hcr0 |= X86_CR0_PG | X86_CR0_WP;
1780 if (old_paging != is_paging(vcpu))
1781 svm_set_cr4(vcpu, kvm_read_cr4(vcpu));
1782 }
1783
1784 /*
1785 * re-enable caching here because the QEMU bios
1786 * does not do it - this results in some delay at
1787 * reboot
1788 */
1789 if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
1790 hcr0 &= ~(X86_CR0_CD | X86_CR0_NW);
1791
1792 svm->vmcb->save.cr0 = hcr0;
1793 vmcb_mark_dirty(svm->vmcb, VMCB_CR);
1794
1795 /*
1796 * SEV-ES guests must always keep the CR intercepts cleared. CR
1797 * tracking is done using the CR write traps.
1798 */
1799 if (is_sev_es_guest(vcpu))
1800 return;
1801
1802 if (hcr0 == cr0) {
1803 /* Selective CR0 write remains on. */
1804 svm_clr_intercept(svm, INTERCEPT_CR0_READ);
1805 svm_clr_intercept(svm, INTERCEPT_CR0_WRITE);
1806 } else {
1807 svm_set_intercept(svm, INTERCEPT_CR0_READ);
1808 svm_set_intercept(svm, INTERCEPT_CR0_WRITE);
1809 }
1810 }
1811
svm_is_valid_cr4(struct kvm_vcpu * vcpu,unsigned long cr4)1812 static bool svm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
1813 {
1814 return true;
1815 }
1816
svm_set_cr4(struct kvm_vcpu * vcpu,unsigned long cr4)1817 void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
1818 {
1819 unsigned long host_cr4_mce = cr4_read_shadow() & X86_CR4_MCE;
1820 unsigned long old_cr4 = vcpu->arch.cr4;
1821
1822 vcpu->arch.cr4 = cr4;
1823 if (!npt_enabled) {
1824 cr4 |= X86_CR4_PAE;
1825
1826 if (!is_paging(vcpu))
1827 cr4 &= ~(X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE);
1828 }
1829 cr4 |= host_cr4_mce;
1830 to_svm(vcpu)->vmcb->save.cr4 = cr4;
1831 vmcb_mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
1832
1833 if ((cr4 ^ old_cr4) & (X86_CR4_OSXSAVE | X86_CR4_PKE))
1834 vcpu->arch.cpuid_dynamic_bits_dirty = true;
1835 }
1836
svm_set_segment(struct kvm_vcpu * vcpu,struct kvm_segment * var,int seg)1837 static void svm_set_segment(struct kvm_vcpu *vcpu,
1838 struct kvm_segment *var, int seg)
1839 {
1840 struct vcpu_svm *svm = to_svm(vcpu);
1841 struct vmcb_seg *s = svm_seg(vcpu, seg);
1842
1843 s->base = var->base;
1844 s->limit = var->limit;
1845 s->selector = var->selector;
1846 s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK);
1847 s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT;
1848 s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT;
1849 s->attrib |= ((var->present & 1) && !var->unusable) << SVM_SELECTOR_P_SHIFT;
1850 s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT;
1851 s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
1852 s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
1853 s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
1854
1855 /*
1856 * This is always accurate, except if SYSRET returned to a segment
1857 * with SS.DPL != 3. Intel does not have this quirk, and always
1858 * forces SS.DPL to 3 on sysret, so we ignore that case; fixing it
1859 * would entail passing the CPL to userspace and back.
1860 */
1861 if (seg == VCPU_SREG_SS)
1862 /* This is symmetric with svm_get_segment() */
1863 svm->vmcb->save.cpl = (var->dpl & 3);
1864
1865 vmcb_mark_dirty(svm->vmcb, VMCB_SEG);
1866 }
1867
svm_update_exception_bitmap(struct kvm_vcpu * vcpu)1868 static void svm_update_exception_bitmap(struct kvm_vcpu *vcpu)
1869 {
1870 struct vcpu_svm *svm = to_svm(vcpu);
1871
1872 clr_exception_intercept(svm, BP_VECTOR);
1873
1874 if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) {
1875 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
1876 set_exception_intercept(svm, BP_VECTOR);
1877 }
1878 }
1879
new_asid(struct vcpu_svm * svm,struct svm_cpu_data * sd)1880 static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
1881 {
1882 if (sd->next_asid > sd->max_asid) {
1883 ++sd->asid_generation;
1884 sd->next_asid = sd->min_asid;
1885 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
1886 vmcb_mark_dirty(svm->vmcb, VMCB_ASID);
1887 }
1888
1889 svm->current_vmcb->asid_generation = sd->asid_generation;
1890 svm->asid = sd->next_asid++;
1891 }
1892
svm_set_dr6(struct kvm_vcpu * vcpu,unsigned long value)1893 static void svm_set_dr6(struct kvm_vcpu *vcpu, unsigned long value)
1894 {
1895 struct vmcb *vmcb = to_svm(vcpu)->vmcb;
1896
1897 if (vcpu->arch.guest_state_protected)
1898 return;
1899
1900 if (unlikely(value != vmcb->save.dr6)) {
1901 vmcb->save.dr6 = value;
1902 vmcb_mark_dirty(vmcb, VMCB_DR);
1903 }
1904 }
1905
svm_sync_dirty_debug_regs(struct kvm_vcpu * vcpu)1906 static void svm_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
1907 {
1908 struct vcpu_svm *svm = to_svm(vcpu);
1909
1910 if (WARN_ON_ONCE(is_sev_es_guest(vcpu)))
1911 return;
1912
1913 get_debugreg(vcpu->arch.db[0], 0);
1914 get_debugreg(vcpu->arch.db[1], 1);
1915 get_debugreg(vcpu->arch.db[2], 2);
1916 get_debugreg(vcpu->arch.db[3], 3);
1917 /*
1918 * We cannot reset svm->vmcb->save.dr6 to DR6_ACTIVE_LOW here,
1919 * because db_interception might need it. We can do it before vmentry.
1920 */
1921 vcpu->arch.dr6 = svm->vmcb->save.dr6;
1922 vcpu->arch.dr7 = svm->vmcb->save.dr7;
1923 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT;
1924 set_dr_intercepts(svm);
1925 }
1926
svm_set_dr7(struct kvm_vcpu * vcpu,unsigned long value)1927 static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value)
1928 {
1929 struct vcpu_svm *svm = to_svm(vcpu);
1930
1931 if (vcpu->arch.guest_state_protected)
1932 return;
1933
1934 svm->vmcb->save.dr7 = value;
1935 vmcb_mark_dirty(svm->vmcb, VMCB_DR);
1936 }
1937
pf_interception(struct kvm_vcpu * vcpu)1938 static int pf_interception(struct kvm_vcpu *vcpu)
1939 {
1940 struct vcpu_svm *svm = to_svm(vcpu);
1941
1942 u64 fault_address = svm->vmcb->control.exit_info_2;
1943 u64 error_code = svm->vmcb->control.exit_info_1;
1944
1945 return kvm_handle_page_fault(vcpu, error_code, fault_address,
1946 static_cpu_has(X86_FEATURE_DECODEASSISTS) ?
1947 svm->vmcb->control.insn_bytes : NULL,
1948 svm->vmcb->control.insn_len);
1949 }
1950
1951 static int svm_check_emulate_instruction(struct kvm_vcpu *vcpu, int emul_type,
1952 void *insn, int insn_len);
1953
npf_interception(struct kvm_vcpu * vcpu)1954 static int npf_interception(struct kvm_vcpu *vcpu)
1955 {
1956 struct vcpu_svm *svm = to_svm(vcpu);
1957 int rc;
1958
1959 u64 error_code = svm->vmcb->control.exit_info_1;
1960 gpa_t gpa = svm->vmcb->control.exit_info_2;
1961
1962 /*
1963 * WARN if hardware generates a fault with an error code that collides
1964 * with KVM-defined sythentic flags. Clear the flags and continue on,
1965 * i.e. don't terminate the VM, as KVM can't possibly be relying on a
1966 * flag that KVM doesn't know about.
1967 */
1968 if (WARN_ON_ONCE(error_code & PFERR_SYNTHETIC_MASK))
1969 error_code &= ~PFERR_SYNTHETIC_MASK;
1970
1971 /*
1972 * Expedite fast MMIO kicks if the next RIP is known and KVM is allowed
1973 * emulate a page fault, e.g. skipping the current instruction is wrong
1974 * if the #NPF occurred while vectoring an event.
1975 */
1976 if ((error_code & PFERR_RSVD_MASK) && !is_guest_mode(vcpu)) {
1977 const int emul_type = EMULTYPE_PF | EMULTYPE_NO_DECODE;
1978
1979 if (svm_check_emulate_instruction(vcpu, emul_type, NULL, 0))
1980 return 1;
1981
1982 if (nrips && svm->vmcb->control.next_rip &&
1983 !kvm_io_bus_write(vcpu, KVM_FAST_MMIO_BUS, gpa, 0, NULL)) {
1984 trace_kvm_fast_mmio(gpa);
1985 return kvm_skip_emulated_instruction(vcpu);
1986 }
1987 }
1988
1989 if (is_sev_snp_guest(vcpu) && (error_code & PFERR_GUEST_ENC_MASK))
1990 error_code |= PFERR_PRIVATE_ACCESS;
1991
1992 trace_kvm_page_fault(vcpu, gpa, error_code);
1993 rc = kvm_mmu_page_fault(vcpu, gpa, error_code,
1994 static_cpu_has(X86_FEATURE_DECODEASSISTS) ?
1995 svm->vmcb->control.insn_bytes : NULL,
1996 svm->vmcb->control.insn_len);
1997
1998 if (rc > 0 && error_code & PFERR_GUEST_RMP_MASK)
1999 sev_handle_rmp_fault(vcpu, gpa, error_code);
2000
2001 return rc;
2002 }
2003
db_interception(struct kvm_vcpu * vcpu)2004 static int db_interception(struct kvm_vcpu *vcpu)
2005 {
2006 struct kvm_run *kvm_run = vcpu->run;
2007 struct vcpu_svm *svm = to_svm(vcpu);
2008
2009 if (!(vcpu->guest_debug &
2010 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) &&
2011 !svm->nmi_singlestep) {
2012 u32 payload = svm->vmcb->save.dr6 ^ DR6_ACTIVE_LOW;
2013 kvm_queue_exception_p(vcpu, DB_VECTOR, payload);
2014 return 1;
2015 }
2016
2017 if (svm->nmi_singlestep) {
2018 disable_nmi_singlestep(svm);
2019 /* Make sure we check for pending NMIs upon entry */
2020 kvm_make_request(KVM_REQ_EVENT, vcpu);
2021 }
2022
2023 if (vcpu->guest_debug &
2024 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) {
2025 kvm_run->exit_reason = KVM_EXIT_DEBUG;
2026 kvm_run->debug.arch.dr6 = svm->vmcb->save.dr6;
2027 kvm_run->debug.arch.dr7 = svm->vmcb->save.dr7;
2028 kvm_run->debug.arch.pc =
2029 svm->vmcb->save.cs.base + svm->vmcb->save.rip;
2030 kvm_run->debug.arch.exception = DB_VECTOR;
2031 return 0;
2032 }
2033
2034 return 1;
2035 }
2036
bp_interception(struct kvm_vcpu * vcpu)2037 static int bp_interception(struct kvm_vcpu *vcpu)
2038 {
2039 struct vcpu_svm *svm = to_svm(vcpu);
2040 struct kvm_run *kvm_run = vcpu->run;
2041
2042 kvm_run->exit_reason = KVM_EXIT_DEBUG;
2043 kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip;
2044 kvm_run->debug.arch.exception = BP_VECTOR;
2045 return 0;
2046 }
2047
ud_interception(struct kvm_vcpu * vcpu)2048 static int ud_interception(struct kvm_vcpu *vcpu)
2049 {
2050 return handle_ud(vcpu);
2051 }
2052
ac_interception(struct kvm_vcpu * vcpu)2053 static int ac_interception(struct kvm_vcpu *vcpu)
2054 {
2055 kvm_queue_exception_e(vcpu, AC_VECTOR, 0);
2056 return 1;
2057 }
2058
is_erratum_383(void)2059 static bool is_erratum_383(void)
2060 {
2061 int i;
2062 u64 value;
2063
2064 if (!erratum_383_found)
2065 return false;
2066
2067 if (native_read_msr_safe(MSR_IA32_MC0_STATUS, &value))
2068 return false;
2069
2070 /* Bit 62 may or may not be set for this mce */
2071 value &= ~(1ULL << 62);
2072
2073 if (value != 0xb600000000010015ULL)
2074 return false;
2075
2076 /* Clear MCi_STATUS registers */
2077 for (i = 0; i < 6; ++i)
2078 native_write_msr_safe(MSR_IA32_MCx_STATUS(i), 0);
2079
2080 if (!native_read_msr_safe(MSR_IA32_MCG_STATUS, &value)) {
2081 value &= ~(1ULL << 2);
2082 native_write_msr_safe(MSR_IA32_MCG_STATUS, value);
2083 }
2084
2085 /* Flush tlb to evict multi-match entries */
2086 __flush_tlb_all();
2087
2088 return true;
2089 }
2090
svm_handle_mce(struct kvm_vcpu * vcpu)2091 static void svm_handle_mce(struct kvm_vcpu *vcpu)
2092 {
2093 if (is_erratum_383()) {
2094 /*
2095 * Erratum 383 triggered. Guest state is corrupt so kill the
2096 * guest.
2097 */
2098 pr_err("Guest triggered AMD Erratum 383\n");
2099
2100 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
2101
2102 return;
2103 }
2104
2105 /*
2106 * On an #MC intercept the MCE handler is not called automatically in
2107 * the host. So do it by hand here.
2108 */
2109 kvm_machine_check();
2110 }
2111
mc_interception(struct kvm_vcpu * vcpu)2112 static int mc_interception(struct kvm_vcpu *vcpu)
2113 {
2114 return 1;
2115 }
2116
shutdown_interception(struct kvm_vcpu * vcpu)2117 static int shutdown_interception(struct kvm_vcpu *vcpu)
2118 {
2119 struct kvm_run *kvm_run = vcpu->run;
2120 struct vcpu_svm *svm = to_svm(vcpu);
2121
2122
2123 /*
2124 * VMCB is undefined after a SHUTDOWN intercept. INIT the vCPU to put
2125 * the VMCB in a known good state. Unfortuately, KVM doesn't have
2126 * KVM_MP_STATE_SHUTDOWN and can't add it without potentially breaking
2127 * userspace. At a platform view, INIT is acceptable behavior as
2128 * there exist bare metal platforms that automatically INIT the CPU
2129 * in response to shutdown.
2130 *
2131 * The VM save area for SEV-ES guests has already been encrypted so it
2132 * cannot be reinitialized, i.e. synthesizing INIT is futile.
2133 */
2134 if (!is_sev_es_guest(vcpu)) {
2135 clear_page(svm->vmcb);
2136 #ifdef CONFIG_KVM_SMM
2137 if (is_smm(vcpu))
2138 kvm_smm_changed(vcpu, false);
2139 #endif
2140 kvm_vcpu_reset(vcpu, true);
2141 }
2142
2143 kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
2144 return 0;
2145 }
2146
io_interception(struct kvm_vcpu * vcpu)2147 static int io_interception(struct kvm_vcpu *vcpu)
2148 {
2149 struct vcpu_svm *svm = to_svm(vcpu);
2150 u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */
2151 int size, in, string;
2152 unsigned port;
2153
2154 ++vcpu->stat.io_exits;
2155 string = (io_info & SVM_IOIO_STR_MASK) != 0;
2156 in = (io_info & SVM_IOIO_TYPE_MASK) != 0;
2157 port = io_info >> 16;
2158 size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT;
2159
2160 if (string) {
2161 if (is_sev_es_guest(vcpu))
2162 return sev_es_string_io(svm, size, port, in);
2163 else
2164 return kvm_emulate_instruction(vcpu, 0);
2165 }
2166
2167 svm->next_rip = svm->vmcb->control.exit_info_2;
2168
2169 return kvm_fast_pio(vcpu, size, port, in);
2170 }
2171
nmi_interception(struct kvm_vcpu * vcpu)2172 static int nmi_interception(struct kvm_vcpu *vcpu)
2173 {
2174 return 1;
2175 }
2176
smi_interception(struct kvm_vcpu * vcpu)2177 static int smi_interception(struct kvm_vcpu *vcpu)
2178 {
2179 return 1;
2180 }
2181
intr_interception(struct kvm_vcpu * vcpu)2182 static int intr_interception(struct kvm_vcpu *vcpu)
2183 {
2184 ++vcpu->stat.irq_exits;
2185 return 1;
2186 }
2187
vmload_vmsave_interception(struct kvm_vcpu * vcpu,bool vmload)2188 static int vmload_vmsave_interception(struct kvm_vcpu *vcpu, bool vmload)
2189 {
2190 u64 vmcb12_gpa = kvm_register_read(vcpu, VCPU_REGS_RAX);
2191 struct vcpu_svm *svm = to_svm(vcpu);
2192 struct vmcb *vmcb12;
2193 struct kvm_host_map map;
2194 int ret;
2195
2196 if (nested_svm_check_permissions(vcpu))
2197 return 1;
2198
2199 if (!page_address_valid(vcpu, vmcb12_gpa)) {
2200 kvm_inject_gp(vcpu, 0);
2201 return 1;
2202 }
2203
2204 if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcb12_gpa), &map))
2205 return kvm_handle_memory_failure(vcpu, X86EMUL_IO_NEEDED, NULL);
2206
2207 vmcb12 = map.hva;
2208
2209 ret = kvm_skip_emulated_instruction(vcpu);
2210
2211 /* KVM always performs VMLOAD/VMSAVE on VMCB01 (see __svm_vcpu_run()) */
2212 if (vmload) {
2213 svm_copy_vmloadsave_state(svm->vmcb01.ptr, vmcb12);
2214 svm->sysenter_eip_hi = 0;
2215 svm->sysenter_esp_hi = 0;
2216 } else {
2217 svm_copy_vmloadsave_state(vmcb12, svm->vmcb01.ptr);
2218 }
2219
2220 kvm_vcpu_unmap(vcpu, &map);
2221
2222 return ret;
2223 }
2224
vmload_interception(struct kvm_vcpu * vcpu)2225 static int vmload_interception(struct kvm_vcpu *vcpu)
2226 {
2227 return vmload_vmsave_interception(vcpu, true);
2228 }
2229
vmsave_interception(struct kvm_vcpu * vcpu)2230 static int vmsave_interception(struct kvm_vcpu *vcpu)
2231 {
2232 return vmload_vmsave_interception(vcpu, false);
2233 }
2234
vmrun_interception(struct kvm_vcpu * vcpu)2235 static int vmrun_interception(struct kvm_vcpu *vcpu)
2236 {
2237 if (nested_svm_check_permissions(vcpu))
2238 return 1;
2239
2240 return nested_svm_vmrun(vcpu);
2241 }
2242
2243 /* Return 0 if not SVM instr, otherwise return associated exit_code */
svm_get_decoded_instr_exit_code(struct kvm_vcpu * vcpu)2244 static u64 svm_get_decoded_instr_exit_code(struct kvm_vcpu *vcpu)
2245 {
2246 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
2247
2248 if (ctxt->b != 0x1 || ctxt->opcode_len != 2)
2249 return 0;
2250
2251 BUILD_BUG_ON(!SVM_EXIT_VMRUN || !SVM_EXIT_VMLOAD || !SVM_EXIT_VMSAVE);
2252
2253 switch (ctxt->modrm) {
2254 case 0xd8: /* VMRUN */
2255 return SVM_EXIT_VMRUN;
2256 case 0xda: /* VMLOAD */
2257 return SVM_EXIT_VMLOAD;
2258 case 0xdb: /* VMSAVE */
2259 return SVM_EXIT_VMSAVE;
2260 default:
2261 break;
2262 }
2263
2264 return 0;
2265 }
2266
2267 /*
2268 * #GP handling code. Note that #GP can be triggered under the following two
2269 * cases:
2270 * 1) SVM VM-related instructions (VMRUN/VMSAVE/VMLOAD) that trigger #GP on
2271 * some AMD CPUs when EAX of these instructions are in the reserved memory
2272 * regions (e.g. SMM memory on host).
2273 * 2) VMware backdoor
2274 */
gp_interception(struct kvm_vcpu * vcpu)2275 static int gp_interception(struct kvm_vcpu *vcpu)
2276 {
2277 struct vcpu_svm *svm = to_svm(vcpu);
2278 u32 error_code = svm->vmcb->control.exit_info_1;
2279 u64 svm_exit_code;
2280
2281 /* Both #GP cases have zero error_code */
2282 if (error_code)
2283 goto reinject;
2284
2285 /* Decode the instruction for usage later */
2286 if (x86_decode_emulated_instruction(vcpu, 0, NULL, 0) != EMULATION_OK)
2287 goto reinject;
2288
2289 /* FIXME: Handle SVM instructions through the emulator */
2290 svm_exit_code = svm_get_decoded_instr_exit_code(vcpu);
2291 if (svm_exit_code) {
2292 if (!is_guest_mode(vcpu))
2293 return svm_invoke_exit_handler(vcpu, svm_exit_code);
2294
2295 if (nested_svm_check_permissions(vcpu))
2296 return 1;
2297
2298 if (!page_address_valid(vcpu, kvm_register_read(vcpu, VCPU_REGS_RAX)))
2299 goto reinject;
2300
2301 /*
2302 * FIXME: Only synthesize a #VMEXIT if L1 sets the intercept,
2303 * but only after the VMLOAD/VMSAVE exit handlers can properly
2304 * handle VMLOAD/VMSAVE from L2 with VLS enabled in L1 (i.e.
2305 * RAX is an L2 GPA that needs translation through L1's NPT).
2306 */
2307 nested_svm_simple_vmexit(svm, svm_exit_code);
2308 return 1;
2309 }
2310
2311 /*
2312 * VMware backdoor emulation on #GP interception only handles
2313 * IN{S}, OUT{S}, and RDPMC, and only for L1.
2314 */
2315 if (!enable_vmware_backdoor || is_guest_mode(vcpu))
2316 goto reinject;
2317
2318 return kvm_emulate_instruction(vcpu, EMULTYPE_VMWARE_GP | EMULTYPE_NO_DECODE);
2319
2320 reinject:
2321 kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
2322 return 1;
2323 }
2324
svm_set_gif(struct vcpu_svm * svm,bool value)2325 void svm_set_gif(struct vcpu_svm *svm, bool value)
2326 {
2327 if (value) {
2328 /*
2329 * If VGIF is enabled, the STGI intercept is only added to
2330 * detect the opening of the SMI/NMI window; remove it now.
2331 * Likewise, clear the VINTR intercept, we will set it
2332 * again while processing KVM_REQ_EVENT if needed.
2333 */
2334 if (vgif)
2335 svm_clr_intercept(svm, INTERCEPT_STGI);
2336 if (svm_is_intercept(svm, INTERCEPT_VINTR))
2337 svm_clear_vintr(svm);
2338
2339 enable_gif(svm);
2340 if (svm_has_pending_gif_event(svm))
2341 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
2342 } else {
2343 disable_gif(svm);
2344
2345 /*
2346 * After a CLGI no interrupts should come. But if vGIF is
2347 * in use, we still rely on the VINTR intercept (rather than
2348 * STGI) to detect an open interrupt window.
2349 */
2350 if (!vgif)
2351 svm_clear_vintr(svm);
2352 }
2353 }
2354
stgi_interception(struct kvm_vcpu * vcpu)2355 static int stgi_interception(struct kvm_vcpu *vcpu)
2356 {
2357 int ret;
2358
2359 if (nested_svm_check_permissions(vcpu))
2360 return 1;
2361
2362 ret = kvm_skip_emulated_instruction(vcpu);
2363 svm_set_gif(to_svm(vcpu), true);
2364 return ret;
2365 }
2366
clgi_interception(struct kvm_vcpu * vcpu)2367 static int clgi_interception(struct kvm_vcpu *vcpu)
2368 {
2369 int ret;
2370
2371 if (nested_svm_check_permissions(vcpu))
2372 return 1;
2373
2374 ret = kvm_skip_emulated_instruction(vcpu);
2375 svm_set_gif(to_svm(vcpu), false);
2376 return ret;
2377 }
2378
invlpga_interception(struct kvm_vcpu * vcpu)2379 static int invlpga_interception(struct kvm_vcpu *vcpu)
2380 {
2381 gva_t gva = kvm_rax_read(vcpu);
2382 u32 asid = kvm_rcx_read(vcpu);
2383
2384 if (nested_svm_check_permissions(vcpu))
2385 return 1;
2386
2387 /* FIXME: Handle an address size prefix. */
2388 if (!is_long_mode(vcpu))
2389 gva = (u32)gva;
2390
2391 trace_kvm_invlpga(to_svm(vcpu)->vmcb->save.rip, asid, gva);
2392
2393 /* Let's treat INVLPGA the same as INVLPG (can be optimized!) */
2394 kvm_mmu_invlpg(vcpu, gva);
2395
2396 return kvm_skip_emulated_instruction(vcpu);
2397 }
2398
skinit_interception(struct kvm_vcpu * vcpu)2399 static int skinit_interception(struct kvm_vcpu *vcpu)
2400 {
2401 trace_kvm_skinit(to_svm(vcpu)->vmcb->save.rip, kvm_rax_read(vcpu));
2402
2403 kvm_queue_exception(vcpu, UD_VECTOR);
2404 return 1;
2405 }
2406
task_switch_interception(struct kvm_vcpu * vcpu)2407 static int task_switch_interception(struct kvm_vcpu *vcpu)
2408 {
2409 struct vcpu_svm *svm = to_svm(vcpu);
2410 u16 tss_selector;
2411 int reason;
2412 int int_type = svm->vmcb->control.exit_int_info &
2413 SVM_EXITINTINFO_TYPE_MASK;
2414 int int_vec = svm->vmcb->control.exit_int_info & SVM_EVTINJ_VEC_MASK;
2415 uint32_t type =
2416 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_TYPE_MASK;
2417 uint32_t idt_v =
2418 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID;
2419 bool has_error_code = false;
2420 u32 error_code = 0;
2421
2422 tss_selector = (u16)svm->vmcb->control.exit_info_1;
2423
2424 if (svm->vmcb->control.exit_info_2 &
2425 (1ULL << SVM_EXITINFOSHIFT_TS_REASON_IRET))
2426 reason = TASK_SWITCH_IRET;
2427 else if (svm->vmcb->control.exit_info_2 &
2428 (1ULL << SVM_EXITINFOSHIFT_TS_REASON_JMP))
2429 reason = TASK_SWITCH_JMP;
2430 else if (idt_v)
2431 reason = TASK_SWITCH_GATE;
2432 else
2433 reason = TASK_SWITCH_CALL;
2434
2435 if (reason == TASK_SWITCH_GATE) {
2436 switch (type) {
2437 case SVM_EXITINTINFO_TYPE_NMI:
2438 vcpu->arch.nmi_injected = false;
2439 break;
2440 case SVM_EXITINTINFO_TYPE_EXEPT:
2441 if (svm->vmcb->control.exit_info_2 &
2442 (1ULL << SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE)) {
2443 has_error_code = true;
2444 error_code =
2445 (u32)svm->vmcb->control.exit_info_2;
2446 }
2447 kvm_clear_exception_queue(vcpu);
2448 break;
2449 case SVM_EXITINTINFO_TYPE_INTR:
2450 case SVM_EXITINTINFO_TYPE_SOFT:
2451 kvm_clear_interrupt_queue(vcpu);
2452 break;
2453 default:
2454 break;
2455 }
2456 }
2457
2458 if (reason != TASK_SWITCH_GATE ||
2459 int_type == SVM_EXITINTINFO_TYPE_SOFT ||
2460 (int_type == SVM_EXITINTINFO_TYPE_EXEPT &&
2461 (int_vec == OF_VECTOR || int_vec == BP_VECTOR))) {
2462 if (!svm_skip_emulated_instruction(vcpu))
2463 return 0;
2464 }
2465
2466 if (int_type != SVM_EXITINTINFO_TYPE_SOFT)
2467 int_vec = -1;
2468
2469 return kvm_task_switch(vcpu, tss_selector, int_vec, reason,
2470 has_error_code, error_code);
2471 }
2472
svm_clr_iret_intercept(struct vcpu_svm * svm)2473 static void svm_clr_iret_intercept(struct vcpu_svm *svm)
2474 {
2475 if (!is_sev_es_guest(&svm->vcpu))
2476 svm_clr_intercept(svm, INTERCEPT_IRET);
2477 }
2478
svm_set_iret_intercept(struct vcpu_svm * svm)2479 static void svm_set_iret_intercept(struct vcpu_svm *svm)
2480 {
2481 if (!is_sev_es_guest(&svm->vcpu))
2482 svm_set_intercept(svm, INTERCEPT_IRET);
2483 }
2484
iret_interception(struct kvm_vcpu * vcpu)2485 static int iret_interception(struct kvm_vcpu *vcpu)
2486 {
2487 struct vcpu_svm *svm = to_svm(vcpu);
2488
2489 WARN_ON_ONCE(is_sev_es_guest(vcpu));
2490
2491 ++vcpu->stat.nmi_window_exits;
2492 svm->awaiting_iret_completion = true;
2493
2494 svm_clr_iret_intercept(svm);
2495 svm->nmi_iret_rip = kvm_rip_read(vcpu);
2496
2497 kvm_make_request(KVM_REQ_EVENT, vcpu);
2498 return 1;
2499 }
2500
invlpg_interception(struct kvm_vcpu * vcpu)2501 static int invlpg_interception(struct kvm_vcpu *vcpu)
2502 {
2503 if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
2504 return kvm_emulate_instruction(vcpu, 0);
2505
2506 kvm_mmu_invlpg(vcpu, to_svm(vcpu)->vmcb->control.exit_info_1);
2507 return kvm_skip_emulated_instruction(vcpu);
2508 }
2509
emulate_on_interception(struct kvm_vcpu * vcpu)2510 static int emulate_on_interception(struct kvm_vcpu *vcpu)
2511 {
2512 return kvm_emulate_instruction(vcpu, 0);
2513 }
2514
rsm_interception(struct kvm_vcpu * vcpu)2515 static int rsm_interception(struct kvm_vcpu *vcpu)
2516 {
2517 return kvm_emulate_instruction_from_buffer(vcpu, rsm_ins_bytes, 2);
2518 }
2519
check_selective_cr0_intercepted(struct kvm_vcpu * vcpu,unsigned long val)2520 static bool check_selective_cr0_intercepted(struct kvm_vcpu *vcpu,
2521 unsigned long val)
2522 {
2523 struct vcpu_svm *svm = to_svm(vcpu);
2524 unsigned long cr0 = vcpu->arch.cr0;
2525 bool ret = false;
2526
2527 if (!is_guest_mode(vcpu) ||
2528 (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_SELECTIVE_CR0))))
2529 return false;
2530
2531 cr0 &= ~SVM_CR0_SELECTIVE_MASK;
2532 val &= ~SVM_CR0_SELECTIVE_MASK;
2533
2534 if (cr0 ^ val) {
2535 svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE;
2536 ret = (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE);
2537 }
2538
2539 return ret;
2540 }
2541
2542 #define CR_VALID (1ULL << 63)
2543
cr_interception(struct kvm_vcpu * vcpu)2544 static int cr_interception(struct kvm_vcpu *vcpu)
2545 {
2546 struct vcpu_svm *svm = to_svm(vcpu);
2547 int reg, cr;
2548 unsigned long val;
2549 int err;
2550
2551 if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
2552 return emulate_on_interception(vcpu);
2553
2554 if (unlikely((svm->vmcb->control.exit_info_1 & CR_VALID) == 0))
2555 return emulate_on_interception(vcpu);
2556
2557 reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
2558 if (svm->vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE)
2559 cr = SVM_EXIT_WRITE_CR0 - SVM_EXIT_READ_CR0;
2560 else
2561 cr = svm->vmcb->control.exit_code - SVM_EXIT_READ_CR0;
2562
2563 err = 0;
2564 if (cr >= 16) { /* mov to cr */
2565 cr -= 16;
2566 val = kvm_register_read(vcpu, reg);
2567 trace_kvm_cr_write(cr, val);
2568 switch (cr) {
2569 case 0:
2570 if (!check_selective_cr0_intercepted(vcpu, val))
2571 err = kvm_set_cr0(vcpu, val);
2572 else
2573 return 1;
2574
2575 break;
2576 case 3:
2577 err = kvm_set_cr3(vcpu, val);
2578 break;
2579 case 4:
2580 err = kvm_set_cr4(vcpu, val);
2581 break;
2582 case 8:
2583 err = kvm_set_cr8(vcpu, val);
2584 break;
2585 default:
2586 WARN(1, "unhandled write to CR%d", cr);
2587 kvm_queue_exception(vcpu, UD_VECTOR);
2588 return 1;
2589 }
2590 } else { /* mov from cr */
2591 switch (cr) {
2592 case 0:
2593 val = kvm_read_cr0(vcpu);
2594 break;
2595 case 2:
2596 val = vcpu->arch.cr2;
2597 break;
2598 case 3:
2599 val = kvm_read_cr3(vcpu);
2600 break;
2601 case 4:
2602 val = kvm_read_cr4(vcpu);
2603 break;
2604 case 8:
2605 val = kvm_get_cr8(vcpu);
2606 break;
2607 default:
2608 WARN(1, "unhandled read from CR%d", cr);
2609 kvm_queue_exception(vcpu, UD_VECTOR);
2610 return 1;
2611 }
2612 kvm_register_write(vcpu, reg, val);
2613 trace_kvm_cr_read(cr, val);
2614 }
2615 return kvm_complete_insn_gp(vcpu, err);
2616 }
2617
cr_trap(struct kvm_vcpu * vcpu)2618 static int cr_trap(struct kvm_vcpu *vcpu)
2619 {
2620 struct vcpu_svm *svm = to_svm(vcpu);
2621 unsigned long old_value, new_value;
2622 unsigned int cr;
2623 int ret = 0;
2624
2625 new_value = (unsigned long)svm->vmcb->control.exit_info_1;
2626
2627 cr = svm->vmcb->control.exit_code - SVM_EXIT_CR0_WRITE_TRAP;
2628 switch (cr) {
2629 case 0:
2630 old_value = kvm_read_cr0(vcpu);
2631 svm_set_cr0(vcpu, new_value);
2632
2633 kvm_post_set_cr0(vcpu, old_value, new_value);
2634 break;
2635 case 4:
2636 old_value = kvm_read_cr4(vcpu);
2637 svm_set_cr4(vcpu, new_value);
2638
2639 kvm_post_set_cr4(vcpu, old_value, new_value);
2640 break;
2641 case 8:
2642 ret = kvm_set_cr8(vcpu, new_value);
2643 break;
2644 default:
2645 WARN(1, "unhandled CR%d write trap", cr);
2646 kvm_queue_exception(vcpu, UD_VECTOR);
2647 return 1;
2648 }
2649
2650 return kvm_complete_insn_gp(vcpu, ret);
2651 }
2652
dr_interception(struct kvm_vcpu * vcpu)2653 static int dr_interception(struct kvm_vcpu *vcpu)
2654 {
2655 struct vcpu_svm *svm = to_svm(vcpu);
2656 int reg, dr;
2657 int err = 0;
2658
2659 /*
2660 * SEV-ES intercepts DR7 only to disable guest debugging and the guest issues a VMGEXIT
2661 * for DR7 write only. KVM cannot change DR7 (always swapped as type 'A') so return early.
2662 */
2663 if (is_sev_es_guest(vcpu))
2664 return 1;
2665
2666 if (vcpu->guest_debug == 0) {
2667 /*
2668 * No more DR vmexits; force a reload of the debug registers
2669 * and reenter on this instruction. The next vmexit will
2670 * retrieve the full state of the debug registers.
2671 */
2672 clr_dr_intercepts(svm);
2673 vcpu->arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT;
2674 return 1;
2675 }
2676
2677 if (!boot_cpu_has(X86_FEATURE_DECODEASSISTS))
2678 return emulate_on_interception(vcpu);
2679
2680 reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
2681 dr = svm->vmcb->control.exit_code - SVM_EXIT_READ_DR0;
2682 if (dr >= 16) { /* mov to DRn */
2683 dr -= 16;
2684 err = kvm_set_dr(vcpu, dr, kvm_register_read(vcpu, reg));
2685 } else {
2686 kvm_register_write(vcpu, reg, kvm_get_dr(vcpu, dr));
2687 }
2688
2689 return kvm_complete_insn_gp(vcpu, err);
2690 }
2691
cr8_write_interception(struct kvm_vcpu * vcpu)2692 static int cr8_write_interception(struct kvm_vcpu *vcpu)
2693 {
2694 u8 cr8_prev = kvm_get_cr8(vcpu);
2695 int r;
2696
2697 WARN_ON_ONCE(kvm_vcpu_apicv_active(vcpu));
2698
2699 /* instruction emulation calls kvm_set_cr8() */
2700 r = cr_interception(vcpu);
2701 if (lapic_in_kernel(vcpu))
2702 return r;
2703 if (cr8_prev <= kvm_get_cr8(vcpu))
2704 return r;
2705 vcpu->run->exit_reason = KVM_EXIT_SET_TPR;
2706 return 0;
2707 }
2708
efer_trap(struct kvm_vcpu * vcpu)2709 static int efer_trap(struct kvm_vcpu *vcpu)
2710 {
2711 struct msr_data msr_info;
2712 int ret;
2713
2714 /*
2715 * Clear the EFER_SVME bit from EFER. The SVM code always sets this
2716 * bit in svm_set_efer(), but __kvm_valid_efer() checks it against
2717 * whether the guest has X86_FEATURE_SVM - this avoids a failure if
2718 * the guest doesn't have X86_FEATURE_SVM.
2719 */
2720 msr_info.host_initiated = false;
2721 msr_info.index = MSR_EFER;
2722 msr_info.data = to_svm(vcpu)->vmcb->control.exit_info_1 & ~EFER_SVME;
2723 ret = kvm_set_msr_common(vcpu, &msr_info);
2724
2725 return kvm_complete_insn_gp(vcpu, ret);
2726 }
2727
svm_get_feature_msr(u32 msr,u64 * data)2728 static int svm_get_feature_msr(u32 msr, u64 *data)
2729 {
2730 *data = 0;
2731
2732 switch (msr) {
2733 case MSR_AMD64_DE_CFG:
2734 if (cpu_feature_enabled(X86_FEATURE_LFENCE_RDTSC))
2735 *data |= MSR_AMD64_DE_CFG_LFENCE_SERIALIZE;
2736 break;
2737 default:
2738 return KVM_MSR_RET_UNSUPPORTED;
2739 }
2740
2741 return 0;
2742 }
2743
svm_vmcb_lbr(struct vcpu_svm * svm,u32 msr)2744 static u64 *svm_vmcb_lbr(struct vcpu_svm *svm, u32 msr)
2745 {
2746 switch (msr) {
2747 case MSR_IA32_LASTBRANCHFROMIP:
2748 return &svm->vmcb->save.br_from;
2749 case MSR_IA32_LASTBRANCHTOIP:
2750 return &svm->vmcb->save.br_to;
2751 case MSR_IA32_LASTINTFROMIP:
2752 return &svm->vmcb->save.last_excp_from;
2753 case MSR_IA32_LASTINTTOIP:
2754 return &svm->vmcb->save.last_excp_to;
2755 default:
2756 break;
2757 }
2758 KVM_BUG_ON(1, svm->vcpu.kvm);
2759 return &svm->vmcb->save.br_from;
2760 }
2761
sev_es_prevent_msr_access(struct kvm_vcpu * vcpu,struct msr_data * msr_info)2762 static bool sev_es_prevent_msr_access(struct kvm_vcpu *vcpu,
2763 struct msr_data *msr_info)
2764 {
2765 return is_sev_es_guest(vcpu) && vcpu->arch.guest_state_protected &&
2766 msr_info->index != MSR_IA32_XSS &&
2767 !msr_write_intercepted(vcpu, msr_info->index);
2768 }
2769
svm_get_msr(struct kvm_vcpu * vcpu,struct msr_data * msr_info)2770 static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2771 {
2772 struct vcpu_svm *svm = to_svm(vcpu);
2773
2774 if (sev_es_prevent_msr_access(vcpu, msr_info)) {
2775 msr_info->data = 0;
2776 return vcpu->kvm->arch.has_protected_state ? -EINVAL : 0;
2777 }
2778
2779 switch (msr_info->index) {
2780 case MSR_AMD64_TSC_RATIO:
2781 if (!msr_info->host_initiated &&
2782 !guest_cpu_cap_has(vcpu, X86_FEATURE_TSCRATEMSR))
2783 return 1;
2784 msr_info->data = svm->tsc_ratio_msr;
2785 break;
2786 case MSR_STAR:
2787 msr_info->data = svm->vmcb01.ptr->save.star;
2788 break;
2789 #ifdef CONFIG_X86_64
2790 case MSR_LSTAR:
2791 msr_info->data = svm->vmcb01.ptr->save.lstar;
2792 break;
2793 case MSR_CSTAR:
2794 msr_info->data = svm->vmcb01.ptr->save.cstar;
2795 break;
2796 case MSR_GS_BASE:
2797 msr_info->data = svm->vmcb01.ptr->save.gs.base;
2798 break;
2799 case MSR_FS_BASE:
2800 msr_info->data = svm->vmcb01.ptr->save.fs.base;
2801 break;
2802 case MSR_KERNEL_GS_BASE:
2803 msr_info->data = svm->vmcb01.ptr->save.kernel_gs_base;
2804 break;
2805 case MSR_SYSCALL_MASK:
2806 msr_info->data = svm->vmcb01.ptr->save.sfmask;
2807 break;
2808 #endif
2809 case MSR_IA32_SYSENTER_CS:
2810 msr_info->data = svm->vmcb01.ptr->save.sysenter_cs;
2811 break;
2812 case MSR_IA32_SYSENTER_EIP:
2813 msr_info->data = (u32)svm->vmcb01.ptr->save.sysenter_eip;
2814 if (guest_cpuid_is_intel_compatible(vcpu))
2815 msr_info->data |= (u64)svm->sysenter_eip_hi << 32;
2816 break;
2817 case MSR_IA32_SYSENTER_ESP:
2818 msr_info->data = svm->vmcb01.ptr->save.sysenter_esp;
2819 if (guest_cpuid_is_intel_compatible(vcpu))
2820 msr_info->data |= (u64)svm->sysenter_esp_hi << 32;
2821 break;
2822 case MSR_IA32_S_CET:
2823 msr_info->data = svm->vmcb->save.s_cet;
2824 break;
2825 case MSR_IA32_INT_SSP_TAB:
2826 msr_info->data = svm->vmcb->save.isst_addr;
2827 break;
2828 case MSR_KVM_INTERNAL_GUEST_SSP:
2829 msr_info->data = svm->vmcb->save.ssp;
2830 break;
2831 case MSR_TSC_AUX:
2832 msr_info->data = svm->tsc_aux;
2833 break;
2834 case MSR_IA32_DEBUGCTLMSR:
2835 msr_info->data = lbrv ? svm->vmcb->save.dbgctl : 0;
2836 break;
2837 case MSR_IA32_LASTBRANCHFROMIP:
2838 case MSR_IA32_LASTBRANCHTOIP:
2839 case MSR_IA32_LASTINTFROMIP:
2840 case MSR_IA32_LASTINTTOIP:
2841 msr_info->data = lbrv ? *svm_vmcb_lbr(svm, msr_info->index) : 0;
2842 break;
2843 case MSR_VM_HSAVE_PA:
2844 msr_info->data = svm->nested.hsave_msr;
2845 break;
2846 case MSR_VM_CR:
2847 msr_info->data = svm->nested.vm_cr_msr;
2848 break;
2849 case MSR_IA32_SPEC_CTRL:
2850 if (!msr_info->host_initiated &&
2851 !guest_has_spec_ctrl_msr(vcpu))
2852 return 1;
2853
2854 if (boot_cpu_has(X86_FEATURE_V_SPEC_CTRL))
2855 msr_info->data = svm->vmcb->save.spec_ctrl;
2856 else
2857 msr_info->data = svm->spec_ctrl;
2858 break;
2859 case MSR_AMD64_VIRT_SPEC_CTRL:
2860 if (!msr_info->host_initiated &&
2861 !guest_cpu_cap_has(vcpu, X86_FEATURE_VIRT_SSBD))
2862 return 1;
2863
2864 msr_info->data = svm->virt_spec_ctrl;
2865 break;
2866 case MSR_F15H_IC_CFG: {
2867
2868 int family, model;
2869
2870 family = guest_cpuid_family(vcpu);
2871 model = guest_cpuid_model(vcpu);
2872
2873 if (family < 0 || model < 0)
2874 return kvm_get_msr_common(vcpu, msr_info);
2875
2876 msr_info->data = 0;
2877
2878 if (family == 0x15 &&
2879 (model >= 0x2 && model < 0x20))
2880 msr_info->data = 0x1E;
2881 }
2882 break;
2883 case MSR_AMD64_DE_CFG:
2884 msr_info->data = svm->msr_decfg;
2885 break;
2886 default:
2887 return kvm_get_msr_common(vcpu, msr_info);
2888 }
2889 return 0;
2890 }
2891
svm_complete_emulated_msr(struct kvm_vcpu * vcpu,int err)2892 static int svm_complete_emulated_msr(struct kvm_vcpu *vcpu, int err)
2893 {
2894 struct vcpu_svm *svm = to_svm(vcpu);
2895 if (!err || !is_sev_es_guest(vcpu) || WARN_ON_ONCE(!svm->sev_es.ghcb))
2896 return kvm_complete_insn_gp(vcpu, err);
2897
2898 svm_vmgexit_inject_exception(svm, X86_TRAP_GP);
2899 return 1;
2900 }
2901
svm_set_vm_cr(struct kvm_vcpu * vcpu,u64 data)2902 static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data)
2903 {
2904 struct vcpu_svm *svm = to_svm(vcpu);
2905 int svm_dis, chg_mask;
2906
2907 if (data & ~SVM_VM_CR_VALID_MASK)
2908 return 1;
2909
2910 chg_mask = SVM_VM_CR_VALID_MASK;
2911
2912 if (svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK)
2913 chg_mask &= ~(SVM_VM_CR_SVM_LOCK_MASK | SVM_VM_CR_SVM_DIS_MASK);
2914
2915 svm->nested.vm_cr_msr &= ~chg_mask;
2916 svm->nested.vm_cr_msr |= (data & chg_mask);
2917
2918 svm_dis = svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK;
2919
2920 /* check for svm_disable while efer.svme is set */
2921 if (svm_dis && (vcpu->arch.efer & EFER_SVME))
2922 return 1;
2923
2924 return 0;
2925 }
2926
svm_set_msr(struct kvm_vcpu * vcpu,struct msr_data * msr)2927 static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
2928 {
2929 struct vcpu_svm *svm = to_svm(vcpu);
2930 int ret = 0;
2931
2932 u32 ecx = msr->index;
2933 u64 data = msr->data;
2934
2935 if (sev_es_prevent_msr_access(vcpu, msr))
2936 return vcpu->kvm->arch.has_protected_state ? -EINVAL : 0;
2937
2938 switch (ecx) {
2939 case MSR_AMD64_TSC_RATIO:
2940
2941 if (!guest_cpu_cap_has(vcpu, X86_FEATURE_TSCRATEMSR)) {
2942
2943 if (!msr->host_initiated)
2944 return 1;
2945 /*
2946 * In case TSC scaling is not enabled, always
2947 * leave this MSR at the default value.
2948 *
2949 * Due to bug in qemu 6.2.0, it would try to set
2950 * this msr to 0 if tsc scaling is not enabled.
2951 * Ignore this value as well.
2952 */
2953 if (data != 0 && data != svm->tsc_ratio_msr)
2954 return 1;
2955 break;
2956 }
2957
2958 if (data & SVM_TSC_RATIO_RSVD)
2959 return 1;
2960
2961 svm->tsc_ratio_msr = data;
2962
2963 if (guest_cpu_cap_has(vcpu, X86_FEATURE_TSCRATEMSR) &&
2964 is_guest_mode(vcpu))
2965 nested_svm_update_tsc_ratio_msr(vcpu);
2966
2967 break;
2968 case MSR_IA32_CR_PAT:
2969 ret = kvm_set_msr_common(vcpu, msr);
2970 if (ret)
2971 break;
2972
2973 svm->vmcb01.ptr->save.g_pat = data;
2974 if (is_guest_mode(vcpu))
2975 nested_vmcb02_compute_g_pat(svm);
2976 vmcb_mark_dirty(svm->vmcb, VMCB_NPT);
2977 break;
2978 case MSR_IA32_SPEC_CTRL:
2979 if (!msr->host_initiated &&
2980 !guest_has_spec_ctrl_msr(vcpu))
2981 return 1;
2982
2983 if (kvm_spec_ctrl_test_value(data))
2984 return 1;
2985
2986 if (boot_cpu_has(X86_FEATURE_V_SPEC_CTRL))
2987 svm->vmcb->save.spec_ctrl = data;
2988 else
2989 svm->spec_ctrl = data;
2990 if (!data)
2991 break;
2992
2993 /*
2994 * For non-nested:
2995 * When it's written (to non-zero) for the first time, pass
2996 * it through.
2997 *
2998 * For nested:
2999 * The handling of the MSR bitmap for L2 guests is done in
3000 * nested_svm_merge_msrpm().
3001 * We update the L1 MSR bit as well since it will end up
3002 * touching the MSR anyway now.
3003 */
3004 svm_disable_intercept_for_msr(vcpu, MSR_IA32_SPEC_CTRL, MSR_TYPE_RW);
3005 break;
3006 case MSR_AMD64_VIRT_SPEC_CTRL:
3007 if (!msr->host_initiated &&
3008 !guest_cpu_cap_has(vcpu, X86_FEATURE_VIRT_SSBD))
3009 return 1;
3010
3011 if (data & ~SPEC_CTRL_SSBD)
3012 return 1;
3013
3014 svm->virt_spec_ctrl = data;
3015 break;
3016 case MSR_STAR:
3017 svm->vmcb01.ptr->save.star = data;
3018 break;
3019 #ifdef CONFIG_X86_64
3020 case MSR_LSTAR:
3021 svm->vmcb01.ptr->save.lstar = data;
3022 break;
3023 case MSR_CSTAR:
3024 svm->vmcb01.ptr->save.cstar = data;
3025 break;
3026 case MSR_GS_BASE:
3027 svm->vmcb01.ptr->save.gs.base = data;
3028 break;
3029 case MSR_FS_BASE:
3030 svm->vmcb01.ptr->save.fs.base = data;
3031 break;
3032 case MSR_KERNEL_GS_BASE:
3033 svm->vmcb01.ptr->save.kernel_gs_base = data;
3034 break;
3035 case MSR_SYSCALL_MASK:
3036 svm->vmcb01.ptr->save.sfmask = data;
3037 break;
3038 #endif
3039 case MSR_IA32_SYSENTER_CS:
3040 svm->vmcb01.ptr->save.sysenter_cs = data;
3041 break;
3042 case MSR_IA32_SYSENTER_EIP:
3043 svm->vmcb01.ptr->save.sysenter_eip = (u32)data;
3044 /*
3045 * We only intercept the MSR_IA32_SYSENTER_{EIP|ESP} msrs
3046 * when we spoof an Intel vendor ID (for cross vendor migration).
3047 * In this case we use this intercept to track the high
3048 * 32 bit part of these msrs to support Intel's
3049 * implementation of SYSENTER/SYSEXIT.
3050 */
3051 svm->sysenter_eip_hi = guest_cpuid_is_intel_compatible(vcpu) ? (data >> 32) : 0;
3052 break;
3053 case MSR_IA32_SYSENTER_ESP:
3054 svm->vmcb01.ptr->save.sysenter_esp = (u32)data;
3055 svm->sysenter_esp_hi = guest_cpuid_is_intel_compatible(vcpu) ? (data >> 32) : 0;
3056 break;
3057 case MSR_IA32_S_CET:
3058 svm->vmcb->save.s_cet = data;
3059 vmcb_mark_dirty(svm->vmcb01.ptr, VMCB_CET);
3060 break;
3061 case MSR_IA32_INT_SSP_TAB:
3062 svm->vmcb->save.isst_addr = data;
3063 vmcb_mark_dirty(svm->vmcb01.ptr, VMCB_CET);
3064 break;
3065 case MSR_KVM_INTERNAL_GUEST_SSP:
3066 svm->vmcb->save.ssp = data;
3067 vmcb_mark_dirty(svm->vmcb01.ptr, VMCB_CET);
3068 break;
3069 case MSR_TSC_AUX:
3070 /*
3071 * TSC_AUX is always virtualized for SEV-ES guests when the
3072 * feature is available. The user return MSR support is not
3073 * required in this case because TSC_AUX is restored on #VMEXIT
3074 * from the host save area.
3075 */
3076 if (boot_cpu_has(X86_FEATURE_V_TSC_AUX) && is_sev_es_guest(vcpu))
3077 break;
3078
3079 /*
3080 * TSC_AUX is usually changed only during boot and never read
3081 * directly. Intercept TSC_AUX and switch it via user return.
3082 */
3083 preempt_disable();
3084 ret = kvm_set_user_return_msr(tsc_aux_uret_slot, data, -1ull);
3085 preempt_enable();
3086 if (ret)
3087 break;
3088
3089 svm->tsc_aux = data;
3090 break;
3091 case MSR_IA32_DEBUGCTLMSR:
3092 if (!lbrv) {
3093 kvm_pr_unimpl_wrmsr(vcpu, ecx, data);
3094 break;
3095 }
3096
3097 /*
3098 * Suppress BTF as KVM doesn't virtualize BTF, but there's no
3099 * way to communicate lack of support to the guest.
3100 */
3101 if (data & DEBUGCTLMSR_BTF) {
3102 kvm_pr_unimpl_wrmsr(vcpu, MSR_IA32_DEBUGCTLMSR, data);
3103 data &= ~DEBUGCTLMSR_BTF;
3104 }
3105
3106 if (data & DEBUGCTL_RESERVED_BITS)
3107 return 1;
3108
3109 if (svm->vmcb->save.dbgctl == data)
3110 break;
3111
3112 svm->vmcb->save.dbgctl = data;
3113 vmcb_mark_dirty(svm->vmcb, VMCB_LBR);
3114 svm_update_lbrv(vcpu);
3115 break;
3116 case MSR_IA32_LASTBRANCHFROMIP:
3117 case MSR_IA32_LASTBRANCHTOIP:
3118 case MSR_IA32_LASTINTFROMIP:
3119 case MSR_IA32_LASTINTTOIP:
3120 if (!lbrv)
3121 return KVM_MSR_RET_UNSUPPORTED;
3122 if (!msr->host_initiated)
3123 return 1;
3124 *svm_vmcb_lbr(svm, ecx) = data;
3125 vmcb_mark_dirty(svm->vmcb, VMCB_LBR);
3126 break;
3127 case MSR_VM_HSAVE_PA:
3128 /*
3129 * Old kernels did not validate the value written to
3130 * MSR_VM_HSAVE_PA. Allow KVM_SET_MSR to set an invalid
3131 * value to allow live migrating buggy or malicious guests
3132 * originating from those kernels.
3133 */
3134 if (!msr->host_initiated && !page_address_valid(vcpu, data))
3135 return 1;
3136
3137 svm->nested.hsave_msr = data & PAGE_MASK;
3138 break;
3139 case MSR_VM_CR:
3140 return svm_set_vm_cr(vcpu, data);
3141 case MSR_VM_IGNNE:
3142 kvm_pr_unimpl_wrmsr(vcpu, ecx, data);
3143 break;
3144 case MSR_AMD64_DE_CFG: {
3145 u64 supported_de_cfg;
3146
3147 if (svm_get_feature_msr(ecx, &supported_de_cfg))
3148 return 1;
3149
3150 if (data & ~supported_de_cfg)
3151 return 1;
3152
3153 svm->msr_decfg = data;
3154 break;
3155 }
3156 default:
3157 return kvm_set_msr_common(vcpu, msr);
3158 }
3159 return ret;
3160 }
3161
msr_interception(struct kvm_vcpu * vcpu)3162 static int msr_interception(struct kvm_vcpu *vcpu)
3163 {
3164 if (to_svm(vcpu)->vmcb->control.exit_info_1)
3165 return kvm_emulate_wrmsr(vcpu);
3166 else
3167 return kvm_emulate_rdmsr(vcpu);
3168 }
3169
interrupt_window_interception(struct kvm_vcpu * vcpu)3170 static int interrupt_window_interception(struct kvm_vcpu *vcpu)
3171 {
3172 kvm_make_request(KVM_REQ_EVENT, vcpu);
3173 svm_clear_vintr(to_svm(vcpu));
3174
3175 ++vcpu->stat.irq_window_exits;
3176 return 1;
3177 }
3178
pause_interception(struct kvm_vcpu * vcpu)3179 static int pause_interception(struct kvm_vcpu *vcpu)
3180 {
3181 bool in_kernel;
3182 /*
3183 * CPL is not made available for an SEV-ES guest, therefore
3184 * vcpu->arch.preempted_in_kernel can never be true. Just
3185 * set in_kernel to false as well.
3186 */
3187 in_kernel = !is_sev_es_guest(vcpu) && svm_get_cpl(vcpu) == 0;
3188
3189 grow_ple_window(vcpu);
3190
3191 kvm_vcpu_on_spin(vcpu, in_kernel);
3192 return kvm_skip_emulated_instruction(vcpu);
3193 }
3194
invpcid_interception(struct kvm_vcpu * vcpu)3195 static int invpcid_interception(struct kvm_vcpu *vcpu)
3196 {
3197 struct vcpu_svm *svm = to_svm(vcpu);
3198 unsigned long type;
3199 gva_t gva;
3200
3201 if (!guest_cpu_cap_has(vcpu, X86_FEATURE_INVPCID)) {
3202 kvm_queue_exception(vcpu, UD_VECTOR);
3203 return 1;
3204 }
3205
3206 /*
3207 * For an INVPCID intercept:
3208 * EXITINFO1 provides the linear address of the memory operand.
3209 * EXITINFO2 provides the contents of the register operand.
3210 */
3211 type = svm->vmcb->control.exit_info_2;
3212 gva = svm->vmcb->control.exit_info_1;
3213
3214 /*
3215 * FIXME: Perform segment checks for 32-bit mode, and inject #SS if the
3216 * stack segment is used. The intercept takes priority over all
3217 * #GP checks except CPL>0, but somehow still generates a linear
3218 * address? The APM is sorely lacking.
3219 */
3220 if (is_noncanonical_address(gva, vcpu, 0)) {
3221 kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
3222 return 1;
3223 }
3224
3225 return kvm_handle_invpcid(vcpu, type, gva);
3226 }
3227
complete_userspace_buslock(struct kvm_vcpu * vcpu)3228 static inline int complete_userspace_buslock(struct kvm_vcpu *vcpu)
3229 {
3230 struct vcpu_svm *svm = to_svm(vcpu);
3231
3232 /*
3233 * If userspace has NOT changed RIP, then KVM's ABI is to let the guest
3234 * execute the bus-locking instruction. Set the bus lock counter to '1'
3235 * to effectively step past the bus lock.
3236 */
3237 if (kvm_is_linear_rip(vcpu, vcpu->arch.cui_linear_rip))
3238 svm->vmcb->control.bus_lock_counter = 1;
3239
3240 return 1;
3241 }
3242
bus_lock_exit(struct kvm_vcpu * vcpu)3243 static int bus_lock_exit(struct kvm_vcpu *vcpu)
3244 {
3245 struct vcpu_svm *svm = to_svm(vcpu);
3246
3247 vcpu->run->exit_reason = KVM_EXIT_X86_BUS_LOCK;
3248 vcpu->run->flags |= KVM_RUN_X86_BUS_LOCK;
3249
3250 vcpu->arch.cui_linear_rip = kvm_get_linear_rip(vcpu);
3251 vcpu->arch.complete_userspace_io = complete_userspace_buslock;
3252
3253 if (is_guest_mode(vcpu))
3254 svm->nested.last_bus_lock_rip = vcpu->arch.cui_linear_rip;
3255
3256 return 0;
3257 }
3258
vmmcall_interception(struct kvm_vcpu * vcpu)3259 static int vmmcall_interception(struct kvm_vcpu *vcpu)
3260 {
3261 /*
3262 * Inject a #UD if L2 is active and the VMMCALL isn't a Hyper-V TLB
3263 * hypercall, as VMMCALL #UDs if it's not intercepted, and this path is
3264 * reachable if and only if L1 doesn't want to intercept VMMCALL or has
3265 * enabled L0 (KVM) handling of Hyper-V L2 TLB flush hypercalls.
3266 */
3267 if (is_guest_mode(vcpu) && !nested_svm_is_l2_tlb_flush_hcall(vcpu)) {
3268 kvm_queue_exception(vcpu, UD_VECTOR);
3269 return 1;
3270 }
3271
3272 return kvm_emulate_hypercall(vcpu);
3273 }
3274
3275 static int (*const svm_exit_handlers[])(struct kvm_vcpu *vcpu) = {
3276 [SVM_EXIT_READ_CR0] = cr_interception,
3277 [SVM_EXIT_READ_CR3] = cr_interception,
3278 [SVM_EXIT_READ_CR4] = cr_interception,
3279 [SVM_EXIT_READ_CR8] = cr_interception,
3280 [SVM_EXIT_CR0_SEL_WRITE] = cr_interception,
3281 [SVM_EXIT_WRITE_CR0] = cr_interception,
3282 [SVM_EXIT_WRITE_CR3] = cr_interception,
3283 [SVM_EXIT_WRITE_CR4] = cr_interception,
3284 [SVM_EXIT_WRITE_CR8] = cr8_write_interception,
3285 [SVM_EXIT_READ_DR0] = dr_interception,
3286 [SVM_EXIT_READ_DR1] = dr_interception,
3287 [SVM_EXIT_READ_DR2] = dr_interception,
3288 [SVM_EXIT_READ_DR3] = dr_interception,
3289 [SVM_EXIT_READ_DR4] = dr_interception,
3290 [SVM_EXIT_READ_DR5] = dr_interception,
3291 [SVM_EXIT_READ_DR6] = dr_interception,
3292 [SVM_EXIT_READ_DR7] = dr_interception,
3293 [SVM_EXIT_WRITE_DR0] = dr_interception,
3294 [SVM_EXIT_WRITE_DR1] = dr_interception,
3295 [SVM_EXIT_WRITE_DR2] = dr_interception,
3296 [SVM_EXIT_WRITE_DR3] = dr_interception,
3297 [SVM_EXIT_WRITE_DR4] = dr_interception,
3298 [SVM_EXIT_WRITE_DR5] = dr_interception,
3299 [SVM_EXIT_WRITE_DR6] = dr_interception,
3300 [SVM_EXIT_WRITE_DR7] = dr_interception,
3301 [SVM_EXIT_EXCP_BASE + DB_VECTOR] = db_interception,
3302 [SVM_EXIT_EXCP_BASE + BP_VECTOR] = bp_interception,
3303 [SVM_EXIT_EXCP_BASE + UD_VECTOR] = ud_interception,
3304 [SVM_EXIT_EXCP_BASE + PF_VECTOR] = pf_interception,
3305 [SVM_EXIT_EXCP_BASE + MC_VECTOR] = mc_interception,
3306 [SVM_EXIT_EXCP_BASE + AC_VECTOR] = ac_interception,
3307 [SVM_EXIT_EXCP_BASE + GP_VECTOR] = gp_interception,
3308 [SVM_EXIT_INTR] = intr_interception,
3309 [SVM_EXIT_NMI] = nmi_interception,
3310 [SVM_EXIT_SMI] = smi_interception,
3311 [SVM_EXIT_VINTR] = interrupt_window_interception,
3312 [SVM_EXIT_RDPMC] = kvm_emulate_rdpmc,
3313 [SVM_EXIT_CPUID] = kvm_emulate_cpuid,
3314 [SVM_EXIT_IRET] = iret_interception,
3315 [SVM_EXIT_INVD] = kvm_emulate_invd,
3316 [SVM_EXIT_PAUSE] = pause_interception,
3317 [SVM_EXIT_HLT] = kvm_emulate_halt,
3318 [SVM_EXIT_INVLPG] = invlpg_interception,
3319 [SVM_EXIT_INVLPGA] = invlpga_interception,
3320 [SVM_EXIT_IOIO] = io_interception,
3321 [SVM_EXIT_MSR] = msr_interception,
3322 [SVM_EXIT_TASK_SWITCH] = task_switch_interception,
3323 [SVM_EXIT_SHUTDOWN] = shutdown_interception,
3324 [SVM_EXIT_VMRUN] = vmrun_interception,
3325 [SVM_EXIT_VMMCALL] = vmmcall_interception,
3326 [SVM_EXIT_VMLOAD] = vmload_interception,
3327 [SVM_EXIT_VMSAVE] = vmsave_interception,
3328 [SVM_EXIT_STGI] = stgi_interception,
3329 [SVM_EXIT_CLGI] = clgi_interception,
3330 [SVM_EXIT_SKINIT] = skinit_interception,
3331 [SVM_EXIT_RDTSCP] = kvm_handle_invalid_op,
3332 [SVM_EXIT_WBINVD] = kvm_emulate_wbinvd,
3333 [SVM_EXIT_MONITOR] = kvm_emulate_monitor,
3334 [SVM_EXIT_MWAIT] = kvm_emulate_mwait,
3335 [SVM_EXIT_XSETBV] = kvm_emulate_xsetbv,
3336 [SVM_EXIT_RDPRU] = kvm_handle_invalid_op,
3337 [SVM_EXIT_EFER_WRITE_TRAP] = efer_trap,
3338 [SVM_EXIT_CR0_WRITE_TRAP] = cr_trap,
3339 [SVM_EXIT_CR4_WRITE_TRAP] = cr_trap,
3340 [SVM_EXIT_CR8_WRITE_TRAP] = cr_trap,
3341 [SVM_EXIT_INVPCID] = invpcid_interception,
3342 [SVM_EXIT_IDLE_HLT] = kvm_emulate_halt,
3343 [SVM_EXIT_NPF] = npf_interception,
3344 [SVM_EXIT_BUS_LOCK] = bus_lock_exit,
3345 [SVM_EXIT_RSM] = rsm_interception,
3346 [SVM_EXIT_AVIC_INCOMPLETE_IPI] = avic_incomplete_ipi_interception,
3347 [SVM_EXIT_AVIC_UNACCELERATED_ACCESS] = avic_unaccelerated_access_interception,
3348 #ifdef CONFIG_KVM_AMD_SEV
3349 [SVM_EXIT_VMGEXIT] = sev_handle_vmgexit,
3350 #endif
3351 };
3352
dump_vmcb(struct kvm_vcpu * vcpu)3353 static void dump_vmcb(struct kvm_vcpu *vcpu)
3354 {
3355 struct vcpu_svm *svm = to_svm(vcpu);
3356 struct vmcb_control_area *control = &svm->vmcb->control;
3357 struct vmcb_save_area *save = &svm->vmcb->save;
3358 struct vmcb_save_area *save01 = &svm->vmcb01.ptr->save;
3359 char *vm_type;
3360
3361 if (!dump_invalid_vmcb) {
3362 pr_warn_ratelimited("set kvm_amd.dump_invalid_vmcb=1 to dump internal KVM state.\n");
3363 return;
3364 }
3365
3366 guard(mutex)(&vmcb_dump_mutex);
3367
3368 vm_type = is_sev_snp_guest(vcpu) ? "SEV-SNP" :
3369 is_sev_es_guest(vcpu) ? "SEV-ES" :
3370 is_sev_guest(vcpu) ? "SEV" : "SVM";
3371
3372 pr_err("%s vCPU%u VMCB %p, last attempted VMRUN on CPU %d\n",
3373 vm_type, vcpu->vcpu_id, svm->current_vmcb->ptr, vcpu->arch.last_vmentry_cpu);
3374 pr_err("VMCB Control Area:\n");
3375 pr_err("%-20s%04x\n", "cr_read:", control->intercepts[INTERCEPT_CR] & 0xffff);
3376 pr_err("%-20s%04x\n", "cr_write:", control->intercepts[INTERCEPT_CR] >> 16);
3377 pr_err("%-20s%04x\n", "dr_read:", control->intercepts[INTERCEPT_DR] & 0xffff);
3378 pr_err("%-20s%04x\n", "dr_write:", control->intercepts[INTERCEPT_DR] >> 16);
3379 pr_err("%-20s%08x\n", "exceptions:", control->intercepts[INTERCEPT_EXCEPTION]);
3380 pr_err("%-20s%08x %08x\n", "intercepts:",
3381 control->intercepts[INTERCEPT_WORD3],
3382 control->intercepts[INTERCEPT_WORD4]);
3383 pr_err("%-20s%d\n", "pause filter count:", control->pause_filter_count);
3384 pr_err("%-20s%d\n", "pause filter threshold:",
3385 control->pause_filter_thresh);
3386 pr_err("%-20s%016llx\n", "iopm_base_pa:", control->iopm_base_pa);
3387 pr_err("%-20s%016llx\n", "msrpm_base_pa:", control->msrpm_base_pa);
3388 pr_err("%-20s%016llx\n", "tsc_offset:", control->tsc_offset);
3389 pr_err("%-20s%d\n", "asid:", control->asid);
3390 pr_err("%-20s%d\n", "tlb_ctl:", control->tlb_ctl);
3391 pr_err("%-20s%d\n", "erap_ctl:", control->erap_ctl);
3392 pr_err("%-20s%08x\n", "int_ctl:", control->int_ctl);
3393 pr_err("%-20s%08x\n", "int_vector:", control->int_vector);
3394 pr_err("%-20s%08x\n", "int_state:", control->int_state);
3395 pr_err("%-20s%016llx\n", "exit_code:", control->exit_code);
3396 pr_err("%-20s%016llx\n", "exit_info1:", control->exit_info_1);
3397 pr_err("%-20s%016llx\n", "exit_info2:", control->exit_info_2);
3398 pr_err("%-20s%08x\n", "exit_int_info:", control->exit_int_info);
3399 pr_err("%-20s%08x\n", "exit_int_info_err:", control->exit_int_info_err);
3400 pr_err("%-20s%lld\n", "misc_ctl:", control->misc_ctl);
3401 pr_err("%-20s%016llx\n", "nested_cr3:", control->nested_cr3);
3402 pr_err("%-20s%016llx\n", "avic_vapic_bar:", control->avic_vapic_bar);
3403 pr_err("%-20s%016llx\n", "ghcb:", control->ghcb_gpa);
3404 pr_err("%-20s%08x\n", "event_inj:", control->event_inj);
3405 pr_err("%-20s%08x\n", "event_inj_err:", control->event_inj_err);
3406 pr_err("%-20s%lld\n", "misc_ctl2:", control->misc_ctl2);
3407 pr_err("%-20s%016llx\n", "next_rip:", control->next_rip);
3408 pr_err("%-20s%016llx\n", "avic_backing_page:", control->avic_backing_page);
3409 pr_err("%-20s%016llx\n", "avic_logical_id:", control->avic_logical_id);
3410 pr_err("%-20s%016llx\n", "avic_physical_id:", control->avic_physical_id);
3411 pr_err("%-20s%016llx\n", "vmsa_pa:", control->vmsa_pa);
3412 pr_err("%-20s%016llx\n", "allowed_sev_features:", control->allowed_sev_features);
3413 pr_err("%-20s%016llx\n", "guest_sev_features:", control->guest_sev_features);
3414
3415 if (is_sev_es_guest(vcpu)) {
3416 save = sev_decrypt_vmsa(vcpu);
3417 if (!save)
3418 goto no_vmsa;
3419
3420 save01 = save;
3421 }
3422
3423 pr_err("VMCB State Save Area:\n");
3424 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3425 "es:",
3426 save->es.selector, save->es.attrib,
3427 save->es.limit, save->es.base);
3428 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3429 "cs:",
3430 save->cs.selector, save->cs.attrib,
3431 save->cs.limit, save->cs.base);
3432 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3433 "ss:",
3434 save->ss.selector, save->ss.attrib,
3435 save->ss.limit, save->ss.base);
3436 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3437 "ds:",
3438 save->ds.selector, save->ds.attrib,
3439 save->ds.limit, save->ds.base);
3440 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3441 "fs:",
3442 save01->fs.selector, save01->fs.attrib,
3443 save01->fs.limit, save01->fs.base);
3444 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3445 "gs:",
3446 save01->gs.selector, save01->gs.attrib,
3447 save01->gs.limit, save01->gs.base);
3448 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3449 "gdtr:",
3450 save->gdtr.selector, save->gdtr.attrib,
3451 save->gdtr.limit, save->gdtr.base);
3452 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3453 "ldtr:",
3454 save01->ldtr.selector, save01->ldtr.attrib,
3455 save01->ldtr.limit, save01->ldtr.base);
3456 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3457 "idtr:",
3458 save->idtr.selector, save->idtr.attrib,
3459 save->idtr.limit, save->idtr.base);
3460 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3461 "tr:",
3462 save01->tr.selector, save01->tr.attrib,
3463 save01->tr.limit, save01->tr.base);
3464 pr_err("vmpl: %d cpl: %d efer: %016llx\n",
3465 save->vmpl, save->cpl, save->efer);
3466 pr_err("%-15s %016llx %-13s %016llx\n",
3467 "cr0:", save->cr0, "cr2:", save->cr2);
3468 pr_err("%-15s %016llx %-13s %016llx\n",
3469 "cr3:", save->cr3, "cr4:", save->cr4);
3470 pr_err("%-15s %016llx %-13s %016llx\n",
3471 "dr6:", save->dr6, "dr7:", save->dr7);
3472 pr_err("%-15s %016llx %-13s %016llx\n",
3473 "rip:", save->rip, "rflags:", save->rflags);
3474 pr_err("%-15s %016llx %-13s %016llx\n",
3475 "rsp:", save->rsp, "rax:", save->rax);
3476 pr_err("%-15s %016llx %-13s %016llx\n",
3477 "s_cet:", save->s_cet, "ssp:", save->ssp);
3478 pr_err("%-15s %016llx\n",
3479 "isst_addr:", save->isst_addr);
3480 pr_err("%-15s %016llx %-13s %016llx\n",
3481 "star:", save01->star, "lstar:", save01->lstar);
3482 pr_err("%-15s %016llx %-13s %016llx\n",
3483 "cstar:", save01->cstar, "sfmask:", save01->sfmask);
3484 pr_err("%-15s %016llx %-13s %016llx\n",
3485 "kernel_gs_base:", save01->kernel_gs_base,
3486 "sysenter_cs:", save01->sysenter_cs);
3487 pr_err("%-15s %016llx %-13s %016llx\n",
3488 "sysenter_esp:", save01->sysenter_esp,
3489 "sysenter_eip:", save01->sysenter_eip);
3490 pr_err("%-15s %016llx %-13s %016llx\n",
3491 "gpat:", save->g_pat, "dbgctl:", save->dbgctl);
3492 pr_err("%-15s %016llx %-13s %016llx\n",
3493 "br_from:", save->br_from, "br_to:", save->br_to);
3494 pr_err("%-15s %016llx %-13s %016llx\n",
3495 "excp_from:", save->last_excp_from,
3496 "excp_to:", save->last_excp_to);
3497
3498 if (is_sev_es_guest(vcpu)) {
3499 struct sev_es_save_area *vmsa = (struct sev_es_save_area *)save;
3500
3501 pr_err("%-15s %016llx\n",
3502 "sev_features", vmsa->sev_features);
3503
3504 pr_err("%-15s %016llx %-13s %016llx\n",
3505 "pl0_ssp:", vmsa->pl0_ssp, "pl1_ssp:", vmsa->pl1_ssp);
3506 pr_err("%-15s %016llx %-13s %016llx\n",
3507 "pl2_ssp:", vmsa->pl2_ssp, "pl3_ssp:", vmsa->pl3_ssp);
3508 pr_err("%-15s %016llx\n",
3509 "u_cet:", vmsa->u_cet);
3510
3511 pr_err("%-15s %016llx %-13s %016llx\n",
3512 "rax:", vmsa->rax, "rbx:", vmsa->rbx);
3513 pr_err("%-15s %016llx %-13s %016llx\n",
3514 "rcx:", vmsa->rcx, "rdx:", vmsa->rdx);
3515 pr_err("%-15s %016llx %-13s %016llx\n",
3516 "rsi:", vmsa->rsi, "rdi:", vmsa->rdi);
3517 pr_err("%-15s %016llx %-13s %016llx\n",
3518 "rbp:", vmsa->rbp, "rsp:", vmsa->rsp);
3519 pr_err("%-15s %016llx %-13s %016llx\n",
3520 "r8:", vmsa->r8, "r9:", vmsa->r9);
3521 pr_err("%-15s %016llx %-13s %016llx\n",
3522 "r10:", vmsa->r10, "r11:", vmsa->r11);
3523 pr_err("%-15s %016llx %-13s %016llx\n",
3524 "r12:", vmsa->r12, "r13:", vmsa->r13);
3525 pr_err("%-15s %016llx %-13s %016llx\n",
3526 "r14:", vmsa->r14, "r15:", vmsa->r15);
3527 pr_err("%-15s %016llx %-13s %016llx\n",
3528 "xcr0:", vmsa->xcr0, "xss:", vmsa->xss);
3529 } else {
3530 pr_err("%-15s %016llx %-13s %016lx\n",
3531 "rax:", save->rax, "rbx:",
3532 vcpu->arch.regs[VCPU_REGS_RBX]);
3533 pr_err("%-15s %016lx %-13s %016lx\n",
3534 "rcx:", vcpu->arch.regs[VCPU_REGS_RCX],
3535 "rdx:", vcpu->arch.regs[VCPU_REGS_RDX]);
3536 pr_err("%-15s %016lx %-13s %016lx\n",
3537 "rsi:", vcpu->arch.regs[VCPU_REGS_RSI],
3538 "rdi:", vcpu->arch.regs[VCPU_REGS_RDI]);
3539 pr_err("%-15s %016lx %-13s %016llx\n",
3540 "rbp:", vcpu->arch.regs[VCPU_REGS_RBP],
3541 "rsp:", save->rsp);
3542 #ifdef CONFIG_X86_64
3543 pr_err("%-15s %016lx %-13s %016lx\n",
3544 "r8:", vcpu->arch.regs[VCPU_REGS_R8],
3545 "r9:", vcpu->arch.regs[VCPU_REGS_R9]);
3546 pr_err("%-15s %016lx %-13s %016lx\n",
3547 "r10:", vcpu->arch.regs[VCPU_REGS_R10],
3548 "r11:", vcpu->arch.regs[VCPU_REGS_R11]);
3549 pr_err("%-15s %016lx %-13s %016lx\n",
3550 "r12:", vcpu->arch.regs[VCPU_REGS_R12],
3551 "r13:", vcpu->arch.regs[VCPU_REGS_R13]);
3552 pr_err("%-15s %016lx %-13s %016lx\n",
3553 "r14:", vcpu->arch.regs[VCPU_REGS_R14],
3554 "r15:", vcpu->arch.regs[VCPU_REGS_R15]);
3555 #endif
3556 }
3557
3558 no_vmsa:
3559 if (is_sev_es_guest(vcpu))
3560 sev_free_decrypted_vmsa(vcpu, save);
3561 }
3562
svm_invoke_exit_handler(struct kvm_vcpu * vcpu,u64 __exit_code)3563 int svm_invoke_exit_handler(struct kvm_vcpu *vcpu, u64 __exit_code)
3564 {
3565 u32 exit_code = __exit_code;
3566
3567 /*
3568 * SVM uses negative values, i.e. 64-bit values, to indicate that VMRUN
3569 * failed. Report all such errors to userspace (note, VMEXIT_INVALID,
3570 * a.k.a. SVM_EXIT_ERR, is special cased by svm_handle_exit()). Skip
3571 * the check when running as a VM, as KVM has historically left garbage
3572 * in bits 63:32, i.e. running KVM-on-KVM would hit false positives if
3573 * the underlying kernel is buggy.
3574 */
3575 if (!cpu_feature_enabled(X86_FEATURE_HYPERVISOR) &&
3576 (u64)exit_code != __exit_code)
3577 goto unexpected_vmexit;
3578
3579 #ifdef CONFIG_MITIGATION_RETPOLINE
3580 if (exit_code == SVM_EXIT_MSR)
3581 return msr_interception(vcpu);
3582 else if (exit_code == SVM_EXIT_VINTR)
3583 return interrupt_window_interception(vcpu);
3584 else if (exit_code == SVM_EXIT_INTR)
3585 return intr_interception(vcpu);
3586 else if (exit_code == SVM_EXIT_HLT || exit_code == SVM_EXIT_IDLE_HLT)
3587 return kvm_emulate_halt(vcpu);
3588 else if (exit_code == SVM_EXIT_NPF)
3589 return npf_interception(vcpu);
3590 #ifdef CONFIG_KVM_AMD_SEV
3591 else if (exit_code == SVM_EXIT_VMGEXIT)
3592 return sev_handle_vmgexit(vcpu);
3593 #endif
3594 #endif
3595 if (exit_code >= ARRAY_SIZE(svm_exit_handlers))
3596 goto unexpected_vmexit;
3597
3598 exit_code = array_index_nospec(exit_code, ARRAY_SIZE(svm_exit_handlers));
3599 if (!svm_exit_handlers[exit_code])
3600 goto unexpected_vmexit;
3601
3602 return svm_exit_handlers[exit_code](vcpu);
3603
3604 unexpected_vmexit:
3605 dump_vmcb(vcpu);
3606 kvm_prepare_unexpected_reason_exit(vcpu, __exit_code);
3607 return 0;
3608 }
3609
svm_get_exit_info(struct kvm_vcpu * vcpu,u32 * reason,u64 * info1,u64 * info2,u32 * intr_info,u32 * error_code)3610 static void svm_get_exit_info(struct kvm_vcpu *vcpu, u32 *reason,
3611 u64 *info1, u64 *info2,
3612 u32 *intr_info, u32 *error_code)
3613 {
3614 struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control;
3615
3616 *reason = control->exit_code;
3617 *info1 = control->exit_info_1;
3618 *info2 = control->exit_info_2;
3619 *intr_info = control->exit_int_info;
3620 if ((*intr_info & SVM_EXITINTINFO_VALID) &&
3621 (*intr_info & SVM_EXITINTINFO_VALID_ERR))
3622 *error_code = control->exit_int_info_err;
3623 else
3624 *error_code = 0;
3625 }
3626
svm_get_entry_info(struct kvm_vcpu * vcpu,u32 * intr_info,u32 * error_code)3627 static void svm_get_entry_info(struct kvm_vcpu *vcpu, u32 *intr_info,
3628 u32 *error_code)
3629 {
3630 struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control;
3631
3632 *intr_info = control->event_inj;
3633
3634 if ((*intr_info & SVM_EXITINTINFO_VALID) &&
3635 (*intr_info & SVM_EXITINTINFO_VALID_ERR))
3636 *error_code = control->event_inj_err;
3637 else
3638 *error_code = 0;
3639
3640 }
3641
svm_handle_exit(struct kvm_vcpu * vcpu,fastpath_t exit_fastpath)3642 static int svm_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
3643 {
3644 struct vcpu_svm *svm = to_svm(vcpu);
3645 struct kvm_run *kvm_run = vcpu->run;
3646
3647 /* SEV-ES guests must use the CR write traps to track CR registers. */
3648 if (!is_sev_es_guest(vcpu)) {
3649 if (!svm_is_intercept(svm, INTERCEPT_CR0_WRITE))
3650 vcpu->arch.cr0 = svm->vmcb->save.cr0;
3651 if (npt_enabled)
3652 vcpu->arch.cr3 = svm->vmcb->save.cr3;
3653 }
3654
3655 if (is_guest_mode(vcpu)) {
3656 int vmexit;
3657
3658 trace_kvm_nested_vmexit(vcpu, KVM_ISA_SVM);
3659
3660 vmexit = nested_svm_exit_special(svm);
3661
3662 if (vmexit == NESTED_EXIT_CONTINUE)
3663 vmexit = nested_svm_exit_handled(svm);
3664
3665 if (vmexit == NESTED_EXIT_DONE)
3666 return 1;
3667 }
3668
3669 if (svm_is_vmrun_failure(svm->vmcb->control.exit_code)) {
3670 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
3671 kvm_run->fail_entry.hardware_entry_failure_reason
3672 = svm->vmcb->control.exit_code;
3673 kvm_run->fail_entry.cpu = vcpu->arch.last_vmentry_cpu;
3674 dump_vmcb(vcpu);
3675 return 0;
3676 }
3677
3678 if (exit_fastpath != EXIT_FASTPATH_NONE)
3679 return 1;
3680
3681 return svm_invoke_exit_handler(vcpu, svm->vmcb->control.exit_code);
3682 }
3683
svm_set_nested_run_soft_int_state(struct kvm_vcpu * vcpu)3684 static void svm_set_nested_run_soft_int_state(struct kvm_vcpu *vcpu)
3685 {
3686 struct vcpu_svm *svm = to_svm(vcpu);
3687
3688 svm->soft_int_csbase = svm->vmcb->save.cs.base;
3689 svm->soft_int_old_rip = kvm_rip_read(vcpu);
3690 if (!guest_cpu_cap_has(vcpu, X86_FEATURE_NRIPS))
3691 svm->soft_int_next_rip = kvm_rip_read(vcpu);
3692 }
3693
pre_svm_run(struct kvm_vcpu * vcpu)3694 static int pre_svm_run(struct kvm_vcpu *vcpu)
3695 {
3696 struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, vcpu->cpu);
3697 struct vcpu_svm *svm = to_svm(vcpu);
3698
3699 /*
3700 * If the previous vmrun of the vmcb occurred on a different physical
3701 * cpu, then mark the vmcb dirty and assign a new asid. Hardware's
3702 * vmcb clean bits are per logical CPU, as are KVM's asid assignments.
3703 */
3704 if (unlikely(svm->current_vmcb->cpu != vcpu->cpu)) {
3705 svm->current_vmcb->asid_generation = 0;
3706 vmcb_mark_all_dirty(svm->vmcb);
3707 svm->current_vmcb->cpu = vcpu->cpu;
3708 }
3709
3710 if (is_sev_guest(vcpu))
3711 return pre_sev_run(svm, vcpu->cpu);
3712
3713 /* FIXME: handle wraparound of asid_generation */
3714 if (svm->current_vmcb->asid_generation != sd->asid_generation)
3715 new_asid(svm, sd);
3716
3717 return 0;
3718 }
3719
svm_inject_nmi(struct kvm_vcpu * vcpu)3720 static void svm_inject_nmi(struct kvm_vcpu *vcpu)
3721 {
3722 struct vcpu_svm *svm = to_svm(vcpu);
3723
3724 svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
3725
3726 if (svm->nmi_l1_to_l2)
3727 return;
3728
3729 /*
3730 * No need to manually track NMI masking when vNMI is enabled, hardware
3731 * automatically sets V_NMI_BLOCKING_MASK as appropriate, including the
3732 * case where software directly injects an NMI.
3733 */
3734 if (!is_vnmi_enabled(svm)) {
3735 svm->nmi_masked = true;
3736 svm_set_iret_intercept(svm);
3737 }
3738 ++vcpu->stat.nmi_injections;
3739 }
3740
svm_is_vnmi_pending(struct kvm_vcpu * vcpu)3741 static bool svm_is_vnmi_pending(struct kvm_vcpu *vcpu)
3742 {
3743 struct vcpu_svm *svm = to_svm(vcpu);
3744
3745 if (!is_vnmi_enabled(svm))
3746 return false;
3747
3748 return !!(svm->vmcb->control.int_ctl & V_NMI_PENDING_MASK);
3749 }
3750
svm_set_vnmi_pending(struct kvm_vcpu * vcpu)3751 static bool svm_set_vnmi_pending(struct kvm_vcpu *vcpu)
3752 {
3753 struct vcpu_svm *svm = to_svm(vcpu);
3754
3755 if (!is_vnmi_enabled(svm))
3756 return false;
3757
3758 if (svm->vmcb->control.int_ctl & V_NMI_PENDING_MASK)
3759 return false;
3760
3761 svm->vmcb->control.int_ctl |= V_NMI_PENDING_MASK;
3762 vmcb_mark_dirty(svm->vmcb, VMCB_INTR);
3763
3764 /*
3765 * Because the pending NMI is serviced by hardware, KVM can't know when
3766 * the NMI is "injected", but for all intents and purposes, passing the
3767 * NMI off to hardware counts as injection.
3768 */
3769 ++vcpu->stat.nmi_injections;
3770
3771 return true;
3772 }
3773
svm_inject_irq(struct kvm_vcpu * vcpu,bool reinjected)3774 static void svm_inject_irq(struct kvm_vcpu *vcpu, bool reinjected)
3775 {
3776 struct kvm_queued_interrupt *intr = &vcpu->arch.interrupt;
3777 struct vcpu_svm *svm = to_svm(vcpu);
3778 u32 type;
3779
3780 if (intr->soft) {
3781 if (svm_update_soft_interrupt_rip(vcpu, intr->nr))
3782 return;
3783
3784 type = SVM_EVTINJ_TYPE_SOFT;
3785 } else {
3786 type = SVM_EVTINJ_TYPE_INTR;
3787 }
3788
3789 /*
3790 * If AVIC was inhibited in order to detect an IRQ window, and there's
3791 * no other injectable interrupts pending or L2 is active (see below),
3792 * then drop the inhibit as the window has served its purpose.
3793 *
3794 * If L2 is active, this path is reachable if L1 is not intercepting
3795 * IRQs, i.e. if KVM is injecting L1 IRQs into L2. AVIC is locally
3796 * inhibited while L2 is active; drop the VM-wide inhibit to optimize
3797 * the case in which the interrupt window was requested while L1 was
3798 * active (the vCPU was not running nested).
3799 */
3800 if (svm->avic_irq_window &&
3801 (!kvm_cpu_has_injectable_intr(vcpu) || is_guest_mode(vcpu))) {
3802 svm->avic_irq_window = false;
3803 kvm_dec_apicv_irq_window_req(svm->vcpu.kvm);
3804 }
3805
3806 trace_kvm_inj_virq(intr->nr, intr->soft, reinjected);
3807 ++vcpu->stat.irq_injections;
3808
3809 svm->vmcb->control.event_inj = intr->nr | SVM_EVTINJ_VALID | type;
3810 }
3811
svm_fixup_nested_rips(struct kvm_vcpu * vcpu)3812 static void svm_fixup_nested_rips(struct kvm_vcpu *vcpu)
3813 {
3814 struct vcpu_svm *svm = to_svm(vcpu);
3815
3816 if (!is_guest_mode(vcpu) || !vcpu->arch.nested_run_pending)
3817 return;
3818
3819 /*
3820 * If nrips is supported in hardware but not exposed to L1, stuff the
3821 * actual L2 RIP to emulate what a nrips=0 CPU would do (L1 is
3822 * responsible for advancing RIP prior to injecting the event). Once L2
3823 * runs after L1 executes VMRUN, NextRIP is updated by the CPU and/or
3824 * KVM, and this is no longer needed.
3825 *
3826 * This is done here (as opposed to when preparing vmcb02) to use the
3827 * most up-to-date value of RIP regardless of the order of restoring
3828 * registers and nested state in the vCPU save+restore path.
3829 */
3830 if (boot_cpu_has(X86_FEATURE_NRIPS) &&
3831 !guest_cpu_cap_has(vcpu, X86_FEATURE_NRIPS))
3832 svm->vmcb->control.next_rip = kvm_rip_read(vcpu);
3833
3834 /*
3835 * Simiarly, initialize the soft int metadata here to use the most
3836 * up-to-date values of RIP and CS base, regardless of restore order.
3837 */
3838 if (svm->soft_int_injected)
3839 svm_set_nested_run_soft_int_state(vcpu);
3840 }
3841
svm_complete_interrupt_delivery(struct kvm_vcpu * vcpu,int delivery_mode,int trig_mode,int vector)3842 void svm_complete_interrupt_delivery(struct kvm_vcpu *vcpu, int delivery_mode,
3843 int trig_mode, int vector)
3844 {
3845 /*
3846 * apic->apicv_active must be read after vcpu->mode.
3847 * Pairs with smp_store_release in vcpu_enter_guest.
3848 */
3849 bool in_guest_mode = (smp_load_acquire(&vcpu->mode) == IN_GUEST_MODE);
3850
3851 /* Note, this is called iff the local APIC is in-kernel. */
3852 if (!READ_ONCE(vcpu->arch.apic->apicv_active)) {
3853 /* Process the interrupt via kvm_check_and_inject_events(). */
3854 kvm_make_request(KVM_REQ_EVENT, vcpu);
3855 kvm_vcpu_kick(vcpu);
3856 return;
3857 }
3858
3859 trace_kvm_apicv_accept_irq(vcpu->vcpu_id, delivery_mode, trig_mode, vector);
3860 if (in_guest_mode) {
3861 /*
3862 * Signal the doorbell to tell hardware to inject the IRQ. If
3863 * the vCPU exits the guest before the doorbell chimes, hardware
3864 * will automatically process AVIC interrupts at the next VMRUN.
3865 */
3866 avic_ring_doorbell(vcpu);
3867 } else {
3868 /*
3869 * Wake the vCPU if it was blocking. KVM will then detect the
3870 * pending IRQ when checking if the vCPU has a wake event.
3871 */
3872 kvm_vcpu_wake_up(vcpu);
3873 }
3874 }
3875
svm_deliver_interrupt(struct kvm_lapic * apic,int delivery_mode,int trig_mode,int vector)3876 static void svm_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode,
3877 int trig_mode, int vector)
3878 {
3879 kvm_lapic_set_irr(vector, apic);
3880
3881 /*
3882 * Pairs with the smp_mb_*() after setting vcpu->guest_mode in
3883 * vcpu_enter_guest() to ensure the write to the vIRR is ordered before
3884 * the read of guest_mode. This guarantees that either VMRUN will see
3885 * and process the new vIRR entry, or that svm_complete_interrupt_delivery
3886 * will signal the doorbell if the CPU has already entered the guest.
3887 */
3888 smp_mb__after_atomic();
3889 svm_complete_interrupt_delivery(apic->vcpu, delivery_mode, trig_mode, vector);
3890 }
3891
svm_update_cr8_intercept(struct kvm_vcpu * vcpu,int tpr,int irr)3892 static void svm_update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
3893 {
3894 struct vcpu_svm *svm = to_svm(vcpu);
3895
3896 /*
3897 * SEV-ES guests must always keep the CR intercepts cleared. CR
3898 * tracking is done using the CR write traps.
3899 */
3900 if (is_sev_es_guest(vcpu))
3901 return;
3902
3903 if (nested_svm_virtualize_tpr(vcpu))
3904 return;
3905
3906 svm_clr_intercept(svm, INTERCEPT_CR8_WRITE);
3907
3908 if (irr == -1)
3909 return;
3910
3911 if (tpr >= irr)
3912 svm_set_intercept(svm, INTERCEPT_CR8_WRITE);
3913 }
3914
svm_get_nmi_mask(struct kvm_vcpu * vcpu)3915 static bool svm_get_nmi_mask(struct kvm_vcpu *vcpu)
3916 {
3917 struct vcpu_svm *svm = to_svm(vcpu);
3918
3919 if (is_vnmi_enabled(svm))
3920 return svm->vmcb->control.int_ctl & V_NMI_BLOCKING_MASK;
3921 else
3922 return svm->nmi_masked;
3923 }
3924
svm_set_nmi_mask(struct kvm_vcpu * vcpu,bool masked)3925 static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
3926 {
3927 struct vcpu_svm *svm = to_svm(vcpu);
3928
3929 if (is_vnmi_enabled(svm)) {
3930 if (masked)
3931 svm->vmcb->control.int_ctl |= V_NMI_BLOCKING_MASK;
3932 else
3933 svm->vmcb->control.int_ctl &= ~V_NMI_BLOCKING_MASK;
3934
3935 } else {
3936 svm->nmi_masked = masked;
3937 if (masked)
3938 svm_set_iret_intercept(svm);
3939 else
3940 svm_clr_iret_intercept(svm);
3941 }
3942 }
3943
svm_nmi_blocked(struct kvm_vcpu * vcpu)3944 bool svm_nmi_blocked(struct kvm_vcpu *vcpu)
3945 {
3946 struct vcpu_svm *svm = to_svm(vcpu);
3947 struct vmcb *vmcb = svm->vmcb;
3948
3949 if (!gif_set(svm))
3950 return true;
3951
3952 if (is_guest_mode(vcpu) && nested_exit_on_nmi(svm))
3953 return false;
3954
3955 if (svm_get_nmi_mask(vcpu))
3956 return true;
3957
3958 return vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK;
3959 }
3960
svm_nmi_allowed(struct kvm_vcpu * vcpu,bool for_injection)3961 static int svm_nmi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
3962 {
3963 struct vcpu_svm *svm = to_svm(vcpu);
3964 if (vcpu->arch.nested_run_pending)
3965 return -EBUSY;
3966
3967 if (svm_nmi_blocked(vcpu))
3968 return 0;
3969
3970 /* An NMI must not be injected into L2 if it's supposed to VM-Exit. */
3971 if (for_injection && is_guest_mode(vcpu) && nested_exit_on_nmi(svm))
3972 return -EBUSY;
3973 return 1;
3974 }
3975
svm_interrupt_blocked(struct kvm_vcpu * vcpu)3976 bool svm_interrupt_blocked(struct kvm_vcpu *vcpu)
3977 {
3978 struct vcpu_svm *svm = to_svm(vcpu);
3979 struct vmcb *vmcb = svm->vmcb;
3980
3981 if (!gif_set(svm))
3982 return true;
3983
3984 if (is_guest_mode(vcpu)) {
3985 /* As long as interrupts are being delivered... */
3986 if ((svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK)
3987 ? !(svm->vmcb01.ptr->save.rflags & X86_EFLAGS_IF)
3988 : !(kvm_get_rflags(vcpu) & X86_EFLAGS_IF))
3989 return true;
3990
3991 /* ... vmexits aren't blocked by the interrupt shadow */
3992 if (nested_exit_on_intr(svm))
3993 return false;
3994 } else {
3995 if (!svm_get_if_flag(vcpu))
3996 return true;
3997 }
3998
3999 return (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK);
4000 }
4001
svm_interrupt_allowed(struct kvm_vcpu * vcpu,bool for_injection)4002 static int svm_interrupt_allowed(struct kvm_vcpu *vcpu, bool for_injection)
4003 {
4004 struct vcpu_svm *svm = to_svm(vcpu);
4005
4006 if (vcpu->arch.nested_run_pending)
4007 return -EBUSY;
4008
4009 if (svm_interrupt_blocked(vcpu))
4010 return 0;
4011
4012 /*
4013 * An IRQ must not be injected into L2 if it's supposed to VM-Exit,
4014 * e.g. if the IRQ arrived asynchronously after checking nested events.
4015 */
4016 if (for_injection && is_guest_mode(vcpu) && nested_exit_on_intr(svm))
4017 return -EBUSY;
4018
4019 return 1;
4020 }
4021
svm_enable_irq_window(struct kvm_vcpu * vcpu)4022 static void svm_enable_irq_window(struct kvm_vcpu *vcpu)
4023 {
4024 struct vcpu_svm *svm = to_svm(vcpu);
4025
4026 /*
4027 * In case GIF=0 we can't rely on the CPU to tell us when GIF becomes
4028 * 1, because that's a separate STGI/VMRUN intercept. The next time we
4029 * get that intercept, this function will be called again though and
4030 * we'll get the vintr intercept. However, if the vGIF feature is
4031 * enabled, the STGI interception will not occur. Enable the irq
4032 * window under the assumption that the hardware will set the GIF.
4033 */
4034 if (vgif || gif_set(svm)) {
4035 /*
4036 * KVM only enables IRQ windows when AVIC is enabled if there's
4037 * pending ExtINT since it cannot be injected via AVIC (ExtINT
4038 * bypasses the local APIC). V_IRQ is ignored by hardware when
4039 * AVIC is enabled, and so KVM needs to temporarily disable
4040 * AVIC in order to detect when it's ok to inject the ExtINT.
4041 *
4042 * If running nested, AVIC is already locally inhibited on this
4043 * vCPU (L2 vCPUs use a different MMU that never maps the AVIC
4044 * backing page), therefore there is no need to increment the
4045 * VM-wide AVIC inhibit. KVM will re-evaluate events when the
4046 * vCPU exits to L1 and enable an IRQ window if the ExtINT is
4047 * still pending.
4048 *
4049 * Note, the IRQ window inhibit needs to be updated even if
4050 * AVIC is inhibited for a different reason, as KVM needs to
4051 * keep AVIC inhibited if the other reason is cleared and there
4052 * is still an injectable interrupt pending.
4053 */
4054 if (enable_apicv && !svm->avic_irq_window && !is_guest_mode(vcpu)) {
4055 svm->avic_irq_window = true;
4056 kvm_inc_apicv_irq_window_req(vcpu->kvm);
4057 }
4058
4059 svm_set_vintr(svm);
4060 }
4061 }
4062
svm_enable_nmi_window(struct kvm_vcpu * vcpu)4063 static void svm_enable_nmi_window(struct kvm_vcpu *vcpu)
4064 {
4065 struct vcpu_svm *svm = to_svm(vcpu);
4066
4067 /*
4068 * If NMIs are outright masked, i.e. the vCPU is already handling an
4069 * NMI, and KVM has not yet intercepted an IRET, then there is nothing
4070 * more to do at this time as KVM has already enabled IRET intercepts.
4071 * If KVM has already intercepted IRET, then single-step over the IRET,
4072 * as NMIs aren't architecturally unmasked until the IRET completes.
4073 *
4074 * If vNMI is enabled, KVM should never request an NMI window if NMIs
4075 * are masked, as KVM allows at most one to-be-injected NMI and one
4076 * pending NMI. If two NMIs arrive simultaneously, KVM will inject one
4077 * NMI and set V_NMI_PENDING for the other, but if and only if NMIs are
4078 * unmasked. KVM _will_ request an NMI window in some situations, e.g.
4079 * if the vCPU is in an STI shadow or if GIF=0, KVM can't immediately
4080 * inject the NMI. In those situations, KVM needs to single-step over
4081 * the STI shadow or intercept STGI.
4082 */
4083 if (svm_get_nmi_mask(vcpu)) {
4084 WARN_ON_ONCE(is_vnmi_enabled(svm));
4085
4086 if (!svm->awaiting_iret_completion)
4087 return; /* IRET will cause a vm exit */
4088 }
4089
4090 /*
4091 * SEV-ES guests are responsible for signaling when a vCPU is ready to
4092 * receive a new NMI, as SEV-ES guests can't be single-stepped, i.e.
4093 * KVM can't intercept and single-step IRET to detect when NMIs are
4094 * unblocked (architecturally speaking). See SVM_VMGEXIT_NMI_COMPLETE.
4095 *
4096 * Note, GIF is guaranteed to be '1' for SEV-ES guests as hardware
4097 * ignores SEV-ES guest writes to EFER.SVME *and* CLGI/STGI are not
4098 * supported NAEs in the GHCB protocol.
4099 */
4100 if (is_sev_es_guest(vcpu))
4101 return;
4102
4103 if (!gif_set(svm)) {
4104 if (vgif)
4105 svm_set_intercept(svm, INTERCEPT_STGI);
4106 return; /* STGI will cause a vm exit */
4107 }
4108
4109 /*
4110 * Something prevents NMI from been injected. Single step over possible
4111 * problem (IRET or exception injection or interrupt shadow)
4112 */
4113 svm->nmi_singlestep_guest_rflags = svm_get_rflags(vcpu);
4114 svm->nmi_singlestep = true;
4115 svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
4116 }
4117
svm_flush_tlb_asid(struct kvm_vcpu * vcpu)4118 static void svm_flush_tlb_asid(struct kvm_vcpu *vcpu)
4119 {
4120 struct vcpu_svm *svm = to_svm(vcpu);
4121
4122 /*
4123 * Unlike VMX, SVM doesn't provide a way to flush only NPT TLB entries.
4124 * A TLB flush for the current ASID flushes both "host" and "guest" TLB
4125 * entries, and thus is a superset of Hyper-V's fine grained flushing.
4126 */
4127 kvm_hv_vcpu_purge_flush_tlb(vcpu);
4128
4129 /*
4130 * Flush only the current ASID even if the TLB flush was invoked via
4131 * kvm_flush_remote_tlbs(). Although flushing remote TLBs requires all
4132 * ASIDs to be flushed, KVM uses a single ASID for L1 and L2, and
4133 * unconditionally does a TLB flush on both nested VM-Enter and nested
4134 * VM-Exit (via kvm_mmu_reset_context()).
4135 */
4136 if (static_cpu_has(X86_FEATURE_FLUSHBYASID))
4137 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
4138 else
4139 svm->current_vmcb->asid_generation--;
4140 }
4141
svm_flush_tlb_current(struct kvm_vcpu * vcpu)4142 static void svm_flush_tlb_current(struct kvm_vcpu *vcpu)
4143 {
4144 hpa_t root_tdp = vcpu->arch.mmu->root.hpa;
4145
4146 /*
4147 * When running on Hyper-V with EnlightenedNptTlb enabled, explicitly
4148 * flush the NPT mappings via hypercall as flushing the ASID only
4149 * affects virtual to physical mappings, it does not invalidate guest
4150 * physical to host physical mappings.
4151 */
4152 if (svm_hv_is_enlightened_tlb_enabled(vcpu) && VALID_PAGE(root_tdp))
4153 hyperv_flush_guest_mapping(root_tdp);
4154
4155 svm_flush_tlb_asid(vcpu);
4156 }
4157
svm_flush_tlb_all(struct kvm_vcpu * vcpu)4158 static void svm_flush_tlb_all(struct kvm_vcpu *vcpu)
4159 {
4160 /*
4161 * When running on Hyper-V with EnlightenedNptTlb enabled, remote TLB
4162 * flushes should be routed to hv_flush_remote_tlbs() without requesting
4163 * a "regular" remote flush. Reaching this point means either there's
4164 * a KVM bug or a prior hv_flush_remote_tlbs() call failed, both of
4165 * which might be fatal to the guest. Yell, but try to recover.
4166 */
4167 if (WARN_ON_ONCE(svm_hv_is_enlightened_tlb_enabled(vcpu)))
4168 hv_flush_remote_tlbs(vcpu->kvm);
4169
4170 svm_flush_tlb_asid(vcpu);
4171 }
4172
svm_flush_tlb_gva(struct kvm_vcpu * vcpu,gva_t gva)4173 static void svm_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t gva)
4174 {
4175 struct vcpu_svm *svm = to_svm(vcpu);
4176
4177 invlpga(gva, svm->vmcb->control.asid);
4178 }
4179
svm_flush_tlb_guest(struct kvm_vcpu * vcpu)4180 static void svm_flush_tlb_guest(struct kvm_vcpu *vcpu)
4181 {
4182 kvm_register_mark_dirty(vcpu, VCPU_EXREG_ERAPS);
4183
4184 svm_flush_tlb_asid(vcpu);
4185 }
4186
sync_cr8_to_lapic(struct kvm_vcpu * vcpu)4187 static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu)
4188 {
4189 struct vcpu_svm *svm = to_svm(vcpu);
4190
4191 if (nested_svm_virtualize_tpr(vcpu))
4192 return;
4193
4194 if (!svm_is_intercept(svm, INTERCEPT_CR8_WRITE)) {
4195 int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK;
4196 kvm_set_cr8(vcpu, cr8);
4197 }
4198 }
4199
sync_lapic_to_cr8(struct kvm_vcpu * vcpu)4200 static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu)
4201 {
4202 struct vcpu_svm *svm = to_svm(vcpu);
4203 u64 cr8;
4204
4205 if (nested_svm_virtualize_tpr(vcpu))
4206 return;
4207
4208 cr8 = kvm_get_cr8(vcpu);
4209 svm->vmcb->control.int_ctl &= ~V_TPR_MASK;
4210 svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK;
4211 }
4212
svm_complete_soft_interrupt(struct kvm_vcpu * vcpu,u8 vector,int type)4213 static void svm_complete_soft_interrupt(struct kvm_vcpu *vcpu, u8 vector,
4214 int type)
4215 {
4216 bool is_exception = (type == SVM_EXITINTINFO_TYPE_EXEPT);
4217 bool is_soft = (type == SVM_EXITINTINFO_TYPE_SOFT);
4218 struct vcpu_svm *svm = to_svm(vcpu);
4219
4220 /*
4221 * Initialize the soft int fields *before* reading them below if KVM
4222 * aborted entry to the guest with a nested VMRUN pending. To ensure
4223 * KVM uses up-to-date values for RIP and CS base across save/restore,
4224 * regardless of restore order, KVM waits to set the soft int fields
4225 * until VMRUN is imminent. But when canceling injection, KVM requeues
4226 * the soft int and will reinject it via the standard injection flow,
4227 * and so KVM needs to grab the state from the pending nested VMRUN.
4228 */
4229 if (is_guest_mode(vcpu) && vcpu->arch.nested_run_pending)
4230 svm_set_nested_run_soft_int_state(vcpu);
4231
4232 /*
4233 * If NRIPS is enabled, KVM must snapshot the pre-VMRUN next_rip that's
4234 * associated with the original soft exception/interrupt. next_rip is
4235 * cleared on all exits that can occur while vectoring an event, so KVM
4236 * needs to manually set next_rip for re-injection. Unlike the !nrips
4237 * case below, this needs to be done if and only if KVM is re-injecting
4238 * the same event, i.e. if the event is a soft exception/interrupt,
4239 * otherwise next_rip is unused on VMRUN.
4240 */
4241 if (nrips && (is_soft || (is_exception && kvm_exception_is_soft(vector))) &&
4242 kvm_is_linear_rip(vcpu, svm->soft_int_old_rip + svm->soft_int_csbase))
4243 svm->vmcb->control.next_rip = svm->soft_int_next_rip;
4244 /*
4245 * If NRIPS isn't enabled, KVM must manually advance RIP prior to
4246 * injecting the soft exception/interrupt. That advancement needs to
4247 * be unwound if vectoring didn't complete. Note, the new event may
4248 * not be the injected event, e.g. if KVM injected an INTn, the INTn
4249 * hit a #NP in the guest, and the #NP encountered a #PF, the #NP will
4250 * be the reported vectored event, but RIP still needs to be unwound.
4251 */
4252 else if (!nrips && (is_soft || is_exception) &&
4253 kvm_is_linear_rip(vcpu, svm->soft_int_next_rip + svm->soft_int_csbase))
4254 kvm_rip_write(vcpu, svm->soft_int_old_rip);
4255 }
4256
svm_complete_interrupts(struct kvm_vcpu * vcpu)4257 static void svm_complete_interrupts(struct kvm_vcpu *vcpu)
4258 {
4259 struct vcpu_svm *svm = to_svm(vcpu);
4260 u8 vector;
4261 int type;
4262 u32 exitintinfo = svm->vmcb->control.exit_int_info;
4263 bool nmi_l1_to_l2 = svm->nmi_l1_to_l2;
4264 bool soft_int_injected = svm->soft_int_injected;
4265
4266 svm->nmi_l1_to_l2 = false;
4267 svm->soft_int_injected = false;
4268
4269 /*
4270 * If we've made progress since setting awaiting_iret_completion, we've
4271 * executed an IRET and can allow NMI injection.
4272 */
4273 if (svm->awaiting_iret_completion &&
4274 kvm_rip_read(vcpu) != svm->nmi_iret_rip) {
4275 svm->awaiting_iret_completion = false;
4276 svm->nmi_masked = false;
4277 kvm_make_request(KVM_REQ_EVENT, vcpu);
4278 }
4279
4280 vcpu->arch.nmi_injected = false;
4281 kvm_clear_exception_queue(vcpu);
4282 kvm_clear_interrupt_queue(vcpu);
4283
4284 if (!(exitintinfo & SVM_EXITINTINFO_VALID))
4285 return;
4286
4287 kvm_make_request(KVM_REQ_EVENT, vcpu);
4288
4289 vector = exitintinfo & SVM_EXITINTINFO_VEC_MASK;
4290 type = exitintinfo & SVM_EXITINTINFO_TYPE_MASK;
4291
4292 if (soft_int_injected)
4293 svm_complete_soft_interrupt(vcpu, vector, type);
4294
4295 switch (type) {
4296 case SVM_EXITINTINFO_TYPE_NMI:
4297 vcpu->arch.nmi_injected = true;
4298 svm->nmi_l1_to_l2 = nmi_l1_to_l2;
4299 break;
4300 case SVM_EXITINTINFO_TYPE_EXEPT: {
4301 u32 error_code = 0;
4302
4303 /*
4304 * Never re-inject a #VC exception.
4305 */
4306 if (vector == X86_TRAP_VC)
4307 break;
4308
4309 if (exitintinfo & SVM_EXITINTINFO_VALID_ERR)
4310 error_code = svm->vmcb->control.exit_int_info_err;
4311
4312 kvm_requeue_exception(vcpu, vector,
4313 exitintinfo & SVM_EXITINTINFO_VALID_ERR,
4314 error_code);
4315 break;
4316 }
4317 case SVM_EXITINTINFO_TYPE_INTR:
4318 kvm_queue_interrupt(vcpu, vector, false);
4319 break;
4320 case SVM_EXITINTINFO_TYPE_SOFT:
4321 kvm_queue_interrupt(vcpu, vector, true);
4322 break;
4323 default:
4324 break;
4325 }
4326
4327 }
4328
svm_cancel_injection(struct kvm_vcpu * vcpu)4329 static void svm_cancel_injection(struct kvm_vcpu *vcpu)
4330 {
4331 struct vcpu_svm *svm = to_svm(vcpu);
4332 struct vmcb_control_area *control = &svm->vmcb->control;
4333
4334 control->exit_int_info = control->event_inj;
4335 control->exit_int_info_err = control->event_inj_err;
4336 control->event_inj = 0;
4337 svm_complete_interrupts(vcpu);
4338 }
4339
svm_vcpu_pre_run(struct kvm_vcpu * vcpu)4340 static int svm_vcpu_pre_run(struct kvm_vcpu *vcpu)
4341 {
4342 #ifdef CONFIG_KVM_AMD_SEV
4343 if (to_kvm_sev_info(vcpu->kvm)->need_init)
4344 return -EINVAL;
4345 #endif
4346
4347 return 1;
4348 }
4349
svm_exit_handlers_fastpath(struct kvm_vcpu * vcpu)4350 static fastpath_t svm_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
4351 {
4352 struct vcpu_svm *svm = to_svm(vcpu);
4353 struct vmcb_control_area *control = &svm->vmcb->control;
4354
4355 /*
4356 * Next RIP must be provided as IRQs are disabled, and accessing guest
4357 * memory to decode the instruction might fault, i.e. might sleep.
4358 */
4359 if (!nrips || !control->next_rip)
4360 return EXIT_FASTPATH_NONE;
4361
4362 if (is_guest_mode(vcpu))
4363 return EXIT_FASTPATH_NONE;
4364
4365 switch (control->exit_code) {
4366 case SVM_EXIT_MSR:
4367 if (!control->exit_info_1)
4368 break;
4369 return handle_fastpath_wrmsr(vcpu);
4370 case SVM_EXIT_HLT:
4371 return handle_fastpath_hlt(vcpu);
4372 case SVM_EXIT_INVD:
4373 return handle_fastpath_invd(vcpu);
4374 default:
4375 break;
4376 }
4377
4378 return EXIT_FASTPATH_NONE;
4379 }
4380
svm_vcpu_enter_exit(struct kvm_vcpu * vcpu,bool spec_ctrl_intercepted)4381 static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, bool spec_ctrl_intercepted)
4382 {
4383 struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, vcpu->cpu);
4384 struct vcpu_svm *svm = to_svm(vcpu);
4385
4386 guest_state_enter_irqoff();
4387
4388 /*
4389 * Set RFLAGS.IF prior to VMRUN, as the host's RFLAGS.IF at the time of
4390 * VMRUN controls whether or not physical IRQs are masked (KVM always
4391 * runs with V_INTR_MASKING_MASK). Toggle RFLAGS.IF here to avoid the
4392 * temptation to do STI+VMRUN+CLI, as AMD CPUs bleed the STI shadow
4393 * into guest state if delivery of an event during VMRUN triggers a
4394 * #VMEXIT, and the guest_state transitions already tell lockdep that
4395 * IRQs are being enabled/disabled. Note! GIF=0 for the entirety of
4396 * this path, so IRQs aren't actually unmasked while running host code.
4397 */
4398 raw_local_irq_enable();
4399
4400 amd_clear_divider();
4401
4402 if (is_sev_es_guest(vcpu))
4403 __svm_sev_es_vcpu_run(svm, spec_ctrl_intercepted,
4404 sev_es_host_save_area(sd));
4405 else
4406 __svm_vcpu_run(svm, spec_ctrl_intercepted);
4407
4408 raw_local_irq_disable();
4409
4410 guest_state_exit_irqoff();
4411 }
4412
svm_vcpu_run(struct kvm_vcpu * vcpu,u64 run_flags)4413 static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu, u64 run_flags)
4414 {
4415 bool force_immediate_exit = run_flags & KVM_RUN_FORCE_IMMEDIATE_EXIT;
4416 struct vcpu_svm *svm = to_svm(vcpu);
4417 bool spec_ctrl_intercepted = msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL);
4418
4419 trace_kvm_entry(vcpu, force_immediate_exit);
4420
4421 svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
4422 svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
4423 svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
4424
4425 /*
4426 * Disable singlestep if we're injecting an interrupt/exception.
4427 * We don't want our modified rflags to be pushed on the stack where
4428 * we might not be able to easily reset them if we disabled NMI
4429 * singlestep later.
4430 */
4431 if (svm->nmi_singlestep && svm->vmcb->control.event_inj) {
4432 /*
4433 * Event injection happens before external interrupts cause a
4434 * vmexit and interrupts are disabled here, so smp_send_reschedule
4435 * is enough to force an immediate vmexit.
4436 */
4437 disable_nmi_singlestep(svm);
4438 force_immediate_exit = true;
4439 }
4440
4441 if (force_immediate_exit)
4442 smp_send_reschedule(vcpu->cpu);
4443
4444 if (pre_svm_run(vcpu)) {
4445 vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY;
4446 vcpu->run->fail_entry.hardware_entry_failure_reason = SVM_EXIT_ERR;
4447 vcpu->run->fail_entry.cpu = vcpu->cpu;
4448 return EXIT_FASTPATH_EXIT_USERSPACE;
4449 }
4450
4451 sync_lapic_to_cr8(vcpu);
4452
4453 if (unlikely(svm->asid != svm->vmcb->control.asid)) {
4454 svm->vmcb->control.asid = svm->asid;
4455 vmcb_mark_dirty(svm->vmcb, VMCB_ASID);
4456 }
4457 svm->vmcb->save.cr2 = vcpu->arch.cr2;
4458
4459 if (guest_cpu_cap_has(vcpu, X86_FEATURE_ERAPS) &&
4460 kvm_register_is_dirty(vcpu, VCPU_EXREG_ERAPS))
4461 svm->vmcb->control.erap_ctl |= ERAP_CONTROL_CLEAR_RAP;
4462
4463 svm_fixup_nested_rips(vcpu);
4464
4465 svm_hv_update_vp_id(svm->vmcb, vcpu);
4466
4467 /*
4468 * Run with all-zero DR6 unless the guest can write DR6 freely, so that
4469 * KVM can get the exact cause of a #DB. Note, loading guest DR6 from
4470 * KVM's snapshot is only necessary when DR accesses won't exit.
4471 */
4472 if (unlikely(run_flags & KVM_RUN_LOAD_GUEST_DR6))
4473 svm_set_dr6(vcpu, vcpu->arch.dr6);
4474 else if (likely(!(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)))
4475 svm_set_dr6(vcpu, DR6_ACTIVE_LOW);
4476
4477 clgi();
4478
4479 /*
4480 * Hardware only context switches DEBUGCTL if LBR virtualization is
4481 * enabled. Manually load DEBUGCTL if necessary (and restore it after
4482 * VM-Exit), as running with the host's DEBUGCTL can negatively affect
4483 * guest state and can even be fatal, e.g. due to Bus Lock Detect.
4484 */
4485 if (!(svm->vmcb->control.misc_ctl2 & SVM_MISC2_ENABLE_V_LBR) &&
4486 vcpu->arch.host_debugctl != svm->vmcb->save.dbgctl)
4487 update_debugctlmsr(svm->vmcb->save.dbgctl);
4488
4489 kvm_wait_lapic_expire(vcpu);
4490
4491 /*
4492 * If this vCPU has touched SPEC_CTRL, restore the guest's value if
4493 * it's non-zero. Since vmentry is serialising on affected CPUs, there
4494 * is no need to worry about the conditional branch over the wrmsr
4495 * being speculatively taken.
4496 */
4497 if (!static_cpu_has(X86_FEATURE_V_SPEC_CTRL))
4498 x86_spec_ctrl_set_guest(svm->virt_spec_ctrl);
4499
4500 svm_vcpu_enter_exit(vcpu, spec_ctrl_intercepted);
4501
4502 if (!static_cpu_has(X86_FEATURE_V_SPEC_CTRL))
4503 x86_spec_ctrl_restore_host(svm->virt_spec_ctrl);
4504
4505 if (!is_sev_es_guest(vcpu)) {
4506 vcpu->arch.cr2 = svm->vmcb->save.cr2;
4507 vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
4508 vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
4509 vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
4510 }
4511 vcpu->arch.regs_dirty = 0;
4512
4513 if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
4514 kvm_before_interrupt(vcpu, KVM_HANDLING_NMI);
4515
4516 if (!(svm->vmcb->control.misc_ctl2 & SVM_MISC2_ENABLE_V_LBR) &&
4517 vcpu->arch.host_debugctl != svm->vmcb->save.dbgctl)
4518 update_debugctlmsr(vcpu->arch.host_debugctl);
4519
4520 stgi();
4521
4522 /* Any pending NMI will happen here */
4523
4524 if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
4525 kvm_after_interrupt(vcpu);
4526
4527 sync_cr8_to_lapic(vcpu);
4528
4529 svm->next_rip = 0;
4530 if (is_guest_mode(vcpu)) {
4531 nested_sync_control_from_vmcb02(svm);
4532
4533 /* Track VMRUNs that have made past consistency checking */
4534 if (vcpu->arch.nested_run_pending &&
4535 !svm_is_vmrun_failure(svm->vmcb->control.exit_code))
4536 ++vcpu->stat.nested_run;
4537
4538 vcpu->arch.nested_run_pending = 0;
4539 }
4540
4541 svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
4542
4543 /*
4544 * Unconditionally mask off the CLEAR_RAP bit, the AND is just as cheap
4545 * as the TEST+Jcc to avoid it.
4546 */
4547 if (cpu_feature_enabled(X86_FEATURE_ERAPS))
4548 svm->vmcb->control.erap_ctl &= ~ERAP_CONTROL_CLEAR_RAP;
4549
4550 vmcb_mark_all_clean(svm->vmcb);
4551
4552 /* if exit due to PF check for async PF */
4553 if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR)
4554 vcpu->arch.apf.host_apf_flags =
4555 kvm_read_and_reset_apf_flags();
4556
4557 vcpu->arch.regs_avail &= ~SVM_REGS_LAZY_LOAD_SET;
4558
4559 if (!msr_write_intercepted(vcpu, MSR_AMD64_PERF_CNTR_GLOBAL_CTL))
4560 rdmsrq(MSR_AMD64_PERF_CNTR_GLOBAL_CTL, vcpu_to_pmu(vcpu)->global_ctrl);
4561
4562 trace_kvm_exit(vcpu, KVM_ISA_SVM);
4563
4564 svm_complete_interrupts(vcpu);
4565
4566 /*
4567 * Update the cache after completing interrupts to get an accurate
4568 * NextRIP, e.g. when re-injecting a soft interrupt.
4569 *
4570 * FIXME: Rework svm_get_nested_state() to not pull data from the
4571 * cache (except for maybe int_ctl).
4572 */
4573 if (is_guest_mode(vcpu))
4574 svm->nested.ctl.next_rip = svm->vmcb->control.next_rip;
4575
4576 return svm_exit_handlers_fastpath(vcpu);
4577 }
4578
svm_load_mmu_pgd(struct kvm_vcpu * vcpu,hpa_t root_hpa,int root_level)4579 static void svm_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa,
4580 int root_level)
4581 {
4582 struct vcpu_svm *svm = to_svm(vcpu);
4583 unsigned long cr3;
4584
4585 if (npt_enabled) {
4586 svm->vmcb->control.nested_cr3 = __sme_set(root_hpa);
4587 vmcb_mark_dirty(svm->vmcb, VMCB_NPT);
4588
4589 hv_track_root_tdp(vcpu, root_hpa);
4590
4591 cr3 = vcpu->arch.cr3;
4592 } else if (root_level >= PT64_ROOT_4LEVEL) {
4593 cr3 = __sme_set(root_hpa) | kvm_get_active_pcid(vcpu);
4594 } else {
4595 /* PCID in the guest should be impossible with a 32-bit MMU. */
4596 WARN_ON_ONCE(kvm_get_active_pcid(vcpu));
4597 cr3 = root_hpa;
4598 }
4599
4600 svm->vmcb->save.cr3 = cr3;
4601 vmcb_mark_dirty(svm->vmcb, VMCB_CR);
4602 }
4603
4604 static void
svm_patch_hypercall(struct kvm_vcpu * vcpu,unsigned char * hypercall)4605 svm_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
4606 {
4607 /*
4608 * Patch in the VMMCALL instruction:
4609 */
4610 hypercall[0] = 0x0f;
4611 hypercall[1] = 0x01;
4612 hypercall[2] = 0xd9;
4613 }
4614
4615 /*
4616 * The kvm parameter can be NULL (module initialization, or invocation before
4617 * VM creation). Be sure to check the kvm parameter before using it.
4618 */
svm_has_emulated_msr(struct kvm * kvm,u32 index)4619 static bool svm_has_emulated_msr(struct kvm *kvm, u32 index)
4620 {
4621 switch (index) {
4622 case MSR_IA32_MCG_EXT_CTL:
4623 case KVM_FIRST_EMULATED_VMX_MSR ... KVM_LAST_EMULATED_VMX_MSR:
4624 return false;
4625 case MSR_IA32_SMBASE:
4626 if (!IS_ENABLED(CONFIG_KVM_SMM))
4627 return false;
4628
4629 #ifdef CONFIG_KVM_AMD_SEV
4630 /*
4631 * KVM can't access register state to emulate SMM for SEV-ES
4632 * guests. Conusming stale data here is "fine", as KVM only
4633 * checks for MSR_IA32_SMBASE support without a vCPU when
4634 * userspace is querying KVM_CAP_X86_SMM.
4635 */
4636 if (kvm && ____sev_es_guest(kvm))
4637 return false;
4638 #endif
4639 break;
4640 default:
4641 break;
4642 }
4643
4644 return true;
4645 }
4646
svm_vcpu_after_set_cpuid(struct kvm_vcpu * vcpu)4647 static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
4648 {
4649 struct vcpu_svm *svm = to_svm(vcpu);
4650
4651 /*
4652 * SVM doesn't provide a way to disable just XSAVES in the guest, KVM
4653 * can only disable all variants of by disallowing CR4.OSXSAVE from
4654 * being set. As a result, if the host has XSAVE and XSAVES, and the
4655 * guest has XSAVE enabled, the guest can execute XSAVES without
4656 * faulting. Treat XSAVES as enabled in this case regardless of
4657 * whether it's advertised to the guest so that KVM context switches
4658 * XSS on VM-Enter/VM-Exit. Failure to do so would effectively give
4659 * the guest read/write access to the host's XSS.
4660 */
4661 guest_cpu_cap_change(vcpu, X86_FEATURE_XSAVES,
4662 boot_cpu_has(X86_FEATURE_XSAVES) &&
4663 guest_cpu_cap_has(vcpu, X86_FEATURE_XSAVE));
4664
4665 /*
4666 * Intercept VMLOAD if the vCPU model is Intel in order to emulate that
4667 * VMLOAD drops bits 63:32 of SYSENTER (ignoring the fact that exposing
4668 * SVM on Intel is bonkers and extremely unlikely to work).
4669 */
4670 if (guest_cpuid_is_intel_compatible(vcpu))
4671 guest_cpu_cap_clear(vcpu, X86_FEATURE_V_VMSAVE_VMLOAD);
4672
4673 if (is_sev_guest(vcpu))
4674 sev_vcpu_after_set_cpuid(svm);
4675 }
4676
svm_has_wbinvd_exit(void)4677 static bool svm_has_wbinvd_exit(void)
4678 {
4679 return true;
4680 }
4681
4682 #define PRE_EX(exit) { .exit_code = (exit), \
4683 .stage = X86_ICPT_PRE_EXCEPT, }
4684 #define POST_EX(exit) { .exit_code = (exit), \
4685 .stage = X86_ICPT_POST_EXCEPT, }
4686 #define POST_MEM(exit) { .exit_code = (exit), \
4687 .stage = X86_ICPT_POST_MEMACCESS, }
4688
4689 static const struct __x86_intercept {
4690 u32 exit_code;
4691 enum x86_intercept_stage stage;
4692 } x86_intercept_map[] = {
4693 [x86_intercept_cr_read] = POST_EX(SVM_EXIT_READ_CR0),
4694 [x86_intercept_cr_write] = POST_EX(SVM_EXIT_WRITE_CR0),
4695 [x86_intercept_clts] = POST_EX(SVM_EXIT_WRITE_CR0),
4696 [x86_intercept_lmsw] = POST_EX(SVM_EXIT_WRITE_CR0),
4697 [x86_intercept_smsw] = POST_EX(SVM_EXIT_READ_CR0),
4698 [x86_intercept_dr_read] = POST_EX(SVM_EXIT_READ_DR0),
4699 [x86_intercept_dr_write] = POST_EX(SVM_EXIT_WRITE_DR0),
4700 [x86_intercept_sldt] = POST_EX(SVM_EXIT_LDTR_READ),
4701 [x86_intercept_str] = POST_EX(SVM_EXIT_TR_READ),
4702 [x86_intercept_lldt] = POST_EX(SVM_EXIT_LDTR_WRITE),
4703 [x86_intercept_ltr] = POST_EX(SVM_EXIT_TR_WRITE),
4704 [x86_intercept_sgdt] = POST_EX(SVM_EXIT_GDTR_READ),
4705 [x86_intercept_sidt] = POST_EX(SVM_EXIT_IDTR_READ),
4706 [x86_intercept_lgdt] = POST_EX(SVM_EXIT_GDTR_WRITE),
4707 [x86_intercept_lidt] = POST_EX(SVM_EXIT_IDTR_WRITE),
4708 [x86_intercept_vmrun] = POST_EX(SVM_EXIT_VMRUN),
4709 [x86_intercept_vmmcall] = POST_EX(SVM_EXIT_VMMCALL),
4710 [x86_intercept_vmload] = POST_EX(SVM_EXIT_VMLOAD),
4711 [x86_intercept_vmsave] = POST_EX(SVM_EXIT_VMSAVE),
4712 [x86_intercept_stgi] = POST_EX(SVM_EXIT_STGI),
4713 [x86_intercept_clgi] = POST_EX(SVM_EXIT_CLGI),
4714 [x86_intercept_skinit] = POST_EX(SVM_EXIT_SKINIT),
4715 [x86_intercept_invlpga] = POST_EX(SVM_EXIT_INVLPGA),
4716 [x86_intercept_rdtscp] = POST_EX(SVM_EXIT_RDTSCP),
4717 [x86_intercept_monitor] = POST_MEM(SVM_EXIT_MONITOR),
4718 [x86_intercept_mwait] = POST_EX(SVM_EXIT_MWAIT),
4719 [x86_intercept_invlpg] = POST_EX(SVM_EXIT_INVLPG),
4720 [x86_intercept_invd] = POST_EX(SVM_EXIT_INVD),
4721 [x86_intercept_wbinvd] = POST_EX(SVM_EXIT_WBINVD),
4722 [x86_intercept_wrmsr] = POST_EX(SVM_EXIT_MSR),
4723 [x86_intercept_rdtsc] = POST_EX(SVM_EXIT_RDTSC),
4724 [x86_intercept_rdmsr] = POST_EX(SVM_EXIT_MSR),
4725 [x86_intercept_rdpmc] = POST_EX(SVM_EXIT_RDPMC),
4726 [x86_intercept_cpuid] = PRE_EX(SVM_EXIT_CPUID),
4727 [x86_intercept_rsm] = PRE_EX(SVM_EXIT_RSM),
4728 [x86_intercept_pause] = PRE_EX(SVM_EXIT_PAUSE),
4729 [x86_intercept_pushf] = PRE_EX(SVM_EXIT_PUSHF),
4730 [x86_intercept_popf] = PRE_EX(SVM_EXIT_POPF),
4731 [x86_intercept_intn] = PRE_EX(SVM_EXIT_SWINT),
4732 [x86_intercept_iret] = PRE_EX(SVM_EXIT_IRET),
4733 [x86_intercept_icebp] = PRE_EX(SVM_EXIT_ICEBP),
4734 [x86_intercept_hlt] = POST_EX(SVM_EXIT_HLT),
4735 [x86_intercept_in] = POST_EX(SVM_EXIT_IOIO),
4736 [x86_intercept_ins] = POST_EX(SVM_EXIT_IOIO),
4737 [x86_intercept_out] = POST_EX(SVM_EXIT_IOIO),
4738 [x86_intercept_outs] = POST_EX(SVM_EXIT_IOIO),
4739 [x86_intercept_xsetbv] = PRE_EX(SVM_EXIT_XSETBV),
4740 };
4741
4742 #undef PRE_EX
4743 #undef POST_EX
4744 #undef POST_MEM
4745
svm_check_intercept(struct kvm_vcpu * vcpu,struct x86_instruction_info * info,enum x86_intercept_stage stage,struct x86_exception * exception)4746 static int svm_check_intercept(struct kvm_vcpu *vcpu,
4747 struct x86_instruction_info *info,
4748 enum x86_intercept_stage stage,
4749 struct x86_exception *exception)
4750 {
4751 struct vcpu_svm *svm = to_svm(vcpu);
4752 int vmexit, ret = X86EMUL_CONTINUE;
4753 struct __x86_intercept icpt_info;
4754 struct vmcb *vmcb = svm->vmcb;
4755
4756 if (info->intercept >= ARRAY_SIZE(x86_intercept_map))
4757 goto out;
4758
4759 icpt_info = x86_intercept_map[info->intercept];
4760
4761 if (stage != icpt_info.stage)
4762 goto out;
4763
4764 switch (icpt_info.exit_code) {
4765 case SVM_EXIT_READ_CR0:
4766 if (info->intercept == x86_intercept_cr_read)
4767 icpt_info.exit_code += info->modrm_reg;
4768 break;
4769 case SVM_EXIT_WRITE_CR0: {
4770 unsigned long cr0, val;
4771
4772 /*
4773 * Adjust the exit code accordingly if a CR other than CR0 is
4774 * being written, and skip straight to the common handling as
4775 * only CR0 has an additional selective intercept.
4776 */
4777 if (info->intercept == x86_intercept_cr_write && info->modrm_reg) {
4778 icpt_info.exit_code += info->modrm_reg;
4779 break;
4780 }
4781
4782 /*
4783 * Convert the exit_code to SVM_EXIT_CR0_SEL_WRITE if a
4784 * selective CR0 intercept is triggered (the common logic will
4785 * treat the selective intercept as being enabled). Note, the
4786 * unconditional intercept has higher priority, i.e. this is
4787 * only relevant if *only* the selective intercept is enabled.
4788 */
4789 if (vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_CR0_WRITE) ||
4790 !(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_SELECTIVE_CR0)))
4791 break;
4792
4793 /* CLTS never triggers INTERCEPT_SELECTIVE_CR0 */
4794 if (info->intercept == x86_intercept_clts)
4795 break;
4796
4797 /* LMSW always triggers INTERCEPT_SELECTIVE_CR0 */
4798 if (info->intercept == x86_intercept_lmsw) {
4799 icpt_info.exit_code = SVM_EXIT_CR0_SEL_WRITE;
4800 break;
4801 }
4802
4803 /*
4804 * MOV-to-CR0 only triggers INTERCEPT_SELECTIVE_CR0 if any bit
4805 * other than SVM_CR0_SELECTIVE_MASK is changed.
4806 */
4807 cr0 = vcpu->arch.cr0 & ~SVM_CR0_SELECTIVE_MASK;
4808 val = info->src_val & ~SVM_CR0_SELECTIVE_MASK;
4809 if (cr0 ^ val)
4810 icpt_info.exit_code = SVM_EXIT_CR0_SEL_WRITE;
4811 break;
4812 }
4813 case SVM_EXIT_READ_DR0:
4814 case SVM_EXIT_WRITE_DR0:
4815 icpt_info.exit_code += info->modrm_reg;
4816 break;
4817 case SVM_EXIT_MSR:
4818 if (info->intercept == x86_intercept_wrmsr)
4819 vmcb->control.exit_info_1 = 1;
4820 else
4821 vmcb->control.exit_info_1 = 0;
4822 break;
4823 case SVM_EXIT_PAUSE:
4824 /*
4825 * We get this for NOP only, but pause
4826 * is rep not, check this here
4827 */
4828 if (info->rep_prefix != REPE_PREFIX)
4829 goto out;
4830 break;
4831 case SVM_EXIT_IOIO: {
4832 u64 exit_info;
4833 u32 bytes;
4834
4835 if (info->intercept == x86_intercept_in ||
4836 info->intercept == x86_intercept_ins) {
4837 exit_info = ((info->src_val & 0xffff) << 16) |
4838 SVM_IOIO_TYPE_MASK;
4839 bytes = info->dst_bytes;
4840 } else {
4841 exit_info = (info->dst_val & 0xffff) << 16;
4842 bytes = info->src_bytes;
4843 }
4844
4845 if (info->intercept == x86_intercept_outs ||
4846 info->intercept == x86_intercept_ins)
4847 exit_info |= SVM_IOIO_STR_MASK;
4848
4849 if (info->rep_prefix)
4850 exit_info |= SVM_IOIO_REP_MASK;
4851
4852 bytes = min(bytes, 4u);
4853
4854 exit_info |= bytes << SVM_IOIO_SIZE_SHIFT;
4855
4856 exit_info |= (u32)info->ad_bytes << (SVM_IOIO_ASIZE_SHIFT - 1);
4857
4858 vmcb->control.exit_info_1 = exit_info;
4859 vmcb->control.exit_info_2 = info->next_rip;
4860
4861 break;
4862 }
4863 default:
4864 break;
4865 }
4866
4867 /* TODO: Advertise NRIPS to guest hypervisor unconditionally */
4868 if (static_cpu_has(X86_FEATURE_NRIPS))
4869 vmcb->control.next_rip = info->next_rip;
4870 vmcb->control.exit_code = icpt_info.exit_code;
4871 vmexit = nested_svm_exit_handled(svm);
4872
4873 ret = (vmexit == NESTED_EXIT_DONE) ? X86EMUL_INTERCEPTED
4874 : X86EMUL_CONTINUE;
4875
4876 out:
4877 return ret;
4878 }
4879
svm_handle_exit_irqoff(struct kvm_vcpu * vcpu)4880 static void svm_handle_exit_irqoff(struct kvm_vcpu *vcpu)
4881 {
4882 switch (to_svm(vcpu)->vmcb->control.exit_code) {
4883 case SVM_EXIT_EXCP_BASE + MC_VECTOR:
4884 svm_handle_mce(vcpu);
4885 break;
4886 case SVM_EXIT_INTR:
4887 vcpu->arch.at_instruction_boundary = true;
4888 break;
4889 default:
4890 break;
4891 }
4892 }
4893
svm_setup_mce(struct kvm_vcpu * vcpu)4894 static void svm_setup_mce(struct kvm_vcpu *vcpu)
4895 {
4896 /* [63:9] are reserved. */
4897 vcpu->arch.mcg_cap &= 0x1ff;
4898 }
4899
4900 #ifdef CONFIG_KVM_SMM
svm_smi_blocked(struct kvm_vcpu * vcpu)4901 bool svm_smi_blocked(struct kvm_vcpu *vcpu)
4902 {
4903 struct vcpu_svm *svm = to_svm(vcpu);
4904
4905 /* Per APM Vol.2 15.22.2 "Response to SMI" */
4906 if (!gif_set(svm))
4907 return true;
4908
4909 return is_smm(vcpu);
4910 }
4911
svm_smi_allowed(struct kvm_vcpu * vcpu,bool for_injection)4912 static int svm_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
4913 {
4914 struct vcpu_svm *svm = to_svm(vcpu);
4915 if (vcpu->arch.nested_run_pending)
4916 return -EBUSY;
4917
4918 if (svm_smi_blocked(vcpu))
4919 return 0;
4920
4921 /* An SMI must not be injected into L2 if it's supposed to VM-Exit. */
4922 if (for_injection && is_guest_mode(vcpu) && nested_exit_on_smi(svm))
4923 return -EBUSY;
4924
4925 return 1;
4926 }
4927
svm_enter_smm(struct kvm_vcpu * vcpu,union kvm_smram * smram)4928 static int svm_enter_smm(struct kvm_vcpu *vcpu, union kvm_smram *smram)
4929 {
4930 struct vcpu_svm *svm = to_svm(vcpu);
4931 struct kvm_host_map map_save;
4932
4933 if (!is_guest_mode(vcpu))
4934 return 0;
4935
4936 /*
4937 * 32-bit SMRAM format doesn't preserve EFER and SVM state. Userspace is
4938 * responsible for ensuring nested SVM and SMIs are mutually exclusive.
4939 */
4940
4941 if (!guest_cpu_cap_has(vcpu, X86_FEATURE_LM))
4942 return 1;
4943
4944 smram->smram64.svm_guest_flag = 1;
4945 smram->smram64.svm_guest_vmcb_gpa = svm->nested.vmcb12_gpa;
4946
4947 svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
4948 svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
4949 svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
4950
4951 nested_svm_simple_vmexit(svm, SVM_EXIT_SW);
4952
4953 /*
4954 * KVM uses VMCB01 to store L1 host state while L2 runs but
4955 * VMCB01 is going to be used during SMM and thus the state will
4956 * be lost. Temporary save non-VMLOAD/VMSAVE state to the host save
4957 * area pointed to by MSR_VM_HSAVE_PA. APM guarantees that the
4958 * format of the area is identical to guest save area offsetted
4959 * by 0x400 (matches the offset of 'struct vmcb_save_area'
4960 * within 'struct vmcb'). Note: HSAVE area may also be used by
4961 * L1 hypervisor to save additional host context (e.g. KVM does
4962 * that, see svm_prepare_switch_to_guest()) which must be
4963 * preserved.
4964 */
4965 if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr), &map_save))
4966 return 1;
4967
4968 BUILD_BUG_ON(offsetof(struct vmcb, save) != 0x400);
4969
4970 svm_copy_vmrun_state(map_save.hva + 0x400,
4971 &svm->vmcb01.ptr->save);
4972
4973 kvm_vcpu_unmap(vcpu, &map_save);
4974 return 0;
4975 }
4976
svm_leave_smm(struct kvm_vcpu * vcpu,const union kvm_smram * smram)4977 static int svm_leave_smm(struct kvm_vcpu *vcpu, const union kvm_smram *smram)
4978 {
4979 struct vcpu_svm *svm = to_svm(vcpu);
4980 struct kvm_host_map map, map_save;
4981 struct vmcb *vmcb12;
4982 int ret;
4983
4984 const struct kvm_smram_state_64 *smram64 = &smram->smram64;
4985
4986 if (!guest_cpu_cap_has(vcpu, X86_FEATURE_LM))
4987 return 0;
4988
4989 /* Non-zero if SMI arrived while vCPU was in guest mode. */
4990 if (!smram64->svm_guest_flag)
4991 return 0;
4992
4993 if (!guest_cpu_cap_has(vcpu, X86_FEATURE_SVM))
4994 return 1;
4995
4996 if (!(smram64->efer & EFER_SVME))
4997 return 1;
4998
4999 if (kvm_vcpu_map(vcpu, gpa_to_gfn(smram64->svm_guest_vmcb_gpa), &map))
5000 return 1;
5001
5002 ret = 1;
5003 if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr), &map_save))
5004 goto unmap_map;
5005
5006 if (svm_allocate_nested(svm))
5007 goto unmap_save;
5008
5009 /*
5010 * Restore L1 host state from L1 HSAVE area as VMCB01 was
5011 * used during SMM (see svm_enter_smm())
5012 */
5013
5014 svm_copy_vmrun_state(&svm->vmcb01.ptr->save, map_save.hva + 0x400);
5015
5016 /*
5017 * Enter the nested guest now
5018 */
5019
5020 vmcb_mark_all_dirty(svm->vmcb01.ptr);
5021
5022 vmcb12 = map.hva;
5023 nested_copy_vmcb_control_to_cache(svm, &vmcb12->control);
5024 nested_copy_vmcb_save_to_cache(svm, &vmcb12->save);
5025
5026 if (nested_svm_check_cached_vmcb12(vcpu) < 0)
5027 goto unmap_save;
5028
5029 if (enter_svm_guest_mode(vcpu, smram64->svm_guest_vmcb_gpa, false) != 0)
5030 goto unmap_save;
5031
5032 ret = 0;
5033 vcpu->arch.nested_run_pending = KVM_NESTED_RUN_PENDING;
5034
5035 unmap_save:
5036 kvm_vcpu_unmap(vcpu, &map_save);
5037 unmap_map:
5038 kvm_vcpu_unmap(vcpu, &map);
5039 return ret;
5040 }
5041
svm_enable_smi_window(struct kvm_vcpu * vcpu)5042 static void svm_enable_smi_window(struct kvm_vcpu *vcpu)
5043 {
5044 struct vcpu_svm *svm = to_svm(vcpu);
5045
5046 if (!gif_set(svm)) {
5047 if (vgif)
5048 svm_set_intercept(svm, INTERCEPT_STGI);
5049 /* STGI will cause a vm exit */
5050 } else {
5051 /* We must be in SMM; RSM will cause a vmexit anyway. */
5052 }
5053 }
5054 #endif
5055
svm_check_emulate_instruction(struct kvm_vcpu * vcpu,int emul_type,void * insn,int insn_len)5056 static int svm_check_emulate_instruction(struct kvm_vcpu *vcpu, int emul_type,
5057 void *insn, int insn_len)
5058 {
5059 struct vcpu_svm *svm = to_svm(vcpu);
5060 bool smep, smap, is_user;
5061 u64 error_code;
5062
5063 /* Check that emulation is possible during event vectoring */
5064 if ((svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_TYPE_MASK) &&
5065 !kvm_can_emulate_event_vectoring(emul_type))
5066 return X86EMUL_UNHANDLEABLE_VECTORING;
5067
5068 /* Emulation is always possible when KVM has access to all guest state. */
5069 if (!is_sev_guest(vcpu))
5070 return X86EMUL_CONTINUE;
5071
5072 /* #UD and #GP should never be intercepted for SEV guests. */
5073 WARN_ON_ONCE(emul_type & (EMULTYPE_TRAP_UD |
5074 EMULTYPE_TRAP_UD_FORCED |
5075 EMULTYPE_VMWARE_GP));
5076
5077 /*
5078 * Emulation is impossible for SEV-ES guests as KVM doesn't have access
5079 * to guest register state.
5080 */
5081 if (is_sev_es_guest(vcpu))
5082 return X86EMUL_RETRY_INSTR;
5083
5084 /*
5085 * Emulation is possible if the instruction is already decoded, e.g.
5086 * when completing I/O after returning from userspace.
5087 */
5088 if (emul_type & EMULTYPE_NO_DECODE)
5089 return X86EMUL_CONTINUE;
5090
5091 /*
5092 * Emulation is possible for SEV guests if and only if a prefilled
5093 * buffer containing the bytes of the intercepted instruction is
5094 * available. SEV guest memory is encrypted with a guest specific key
5095 * and cannot be decrypted by KVM, i.e. KVM would read ciphertext and
5096 * decode garbage.
5097 *
5098 * If KVM is NOT trying to simply skip an instruction, inject #UD if
5099 * KVM reached this point without an instruction buffer. In practice,
5100 * this path should never be hit by a well-behaved guest, e.g. KVM
5101 * doesn't intercept #UD or #GP for SEV guests, but this path is still
5102 * theoretically reachable, e.g. via unaccelerated fault-like AVIC
5103 * access, and needs to be handled by KVM to avoid putting the guest
5104 * into an infinite loop. Injecting #UD is somewhat arbitrary, but
5105 * its the least awful option given lack of insight into the guest.
5106 *
5107 * If KVM is trying to skip an instruction, simply resume the guest.
5108 * If a #NPF occurs while the guest is vectoring an INT3/INTO, then KVM
5109 * will attempt to re-inject the INT3/INTO and skip the instruction.
5110 * In that scenario, retrying the INT3/INTO and hoping the guest will
5111 * make forward progress is the only option that has a chance of
5112 * success (and in practice it will work the vast majority of the time).
5113 */
5114 if (unlikely(!insn)) {
5115 if (emul_type & EMULTYPE_SKIP)
5116 return X86EMUL_UNHANDLEABLE;
5117
5118 kvm_queue_exception(vcpu, UD_VECTOR);
5119 return X86EMUL_PROPAGATE_FAULT;
5120 }
5121
5122 /*
5123 * Emulate for SEV guests if the insn buffer is not empty. The buffer
5124 * will be empty if the DecodeAssist microcode cannot fetch bytes for
5125 * the faulting instruction because the code fetch itself faulted, e.g.
5126 * the guest attempted to fetch from emulated MMIO or a guest page
5127 * table used to translate CS:RIP resides in emulated MMIO.
5128 */
5129 if (likely(insn_len))
5130 return X86EMUL_CONTINUE;
5131
5132 /*
5133 * Detect and workaround Errata 1096 Fam_17h_00_0Fh.
5134 *
5135 * Errata:
5136 * When CPU raises #NPF on guest data access and vCPU CR4.SMAP=1, it is
5137 * possible that CPU microcode implementing DecodeAssist will fail to
5138 * read guest memory at CS:RIP and vmcb.GuestIntrBytes will incorrectly
5139 * be '0'. This happens because microcode reads CS:RIP using a _data_
5140 * loap uop with CPL=0 privileges. If the load hits a SMAP #PF, ucode
5141 * gives up and does not fill the instruction bytes buffer.
5142 *
5143 * As above, KVM reaches this point iff the VM is an SEV guest, the CPU
5144 * supports DecodeAssist, a #NPF was raised, KVM's page fault handler
5145 * triggered emulation (e.g. for MMIO), and the CPU returned 0 in the
5146 * GuestIntrBytes field of the VMCB.
5147 *
5148 * This does _not_ mean that the erratum has been encountered, as the
5149 * DecodeAssist will also fail if the load for CS:RIP hits a legitimate
5150 * #PF, e.g. if the guest attempt to execute from emulated MMIO and
5151 * encountered a reserved/not-present #PF.
5152 *
5153 * To hit the erratum, the following conditions must be true:
5154 * 1. CR4.SMAP=1 (obviously).
5155 * 2. CR4.SMEP=0 || CPL=3. If SMEP=1 and CPL<3, the erratum cannot
5156 * have been hit as the guest would have encountered a SMEP
5157 * violation #PF, not a #NPF.
5158 * 3. The #NPF is not due to a code fetch, in which case failure to
5159 * retrieve the instruction bytes is legitimate (see abvoe).
5160 *
5161 * In addition, don't apply the erratum workaround if the #NPF occurred
5162 * while translating guest page tables (see below).
5163 */
5164 error_code = svm->vmcb->control.exit_info_1;
5165 if (error_code & (PFERR_GUEST_PAGE_MASK | PFERR_FETCH_MASK))
5166 goto resume_guest;
5167
5168 smep = kvm_is_cr4_bit_set(vcpu, X86_CR4_SMEP);
5169 smap = kvm_is_cr4_bit_set(vcpu, X86_CR4_SMAP);
5170 is_user = svm_get_cpl(vcpu) == 3;
5171 if (smap && (!smep || is_user)) {
5172 pr_err_ratelimited("SEV Guest triggered AMD Erratum 1096\n");
5173
5174 /*
5175 * If the fault occurred in userspace, arbitrarily inject #GP
5176 * to avoid killing the guest and to hopefully avoid confusing
5177 * the guest kernel too much, e.g. injecting #PF would not be
5178 * coherent with respect to the guest's page tables. Request
5179 * triple fault if the fault occurred in the kernel as there's
5180 * no fault that KVM can inject without confusing the guest.
5181 * In practice, the triple fault is moot as no sane SEV kernel
5182 * will execute from user memory while also running with SMAP=1.
5183 */
5184 if (is_user)
5185 kvm_inject_gp(vcpu, 0);
5186 else
5187 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
5188 return X86EMUL_PROPAGATE_FAULT;
5189 }
5190
5191 resume_guest:
5192 /*
5193 * If the erratum was not hit, simply resume the guest and let it fault
5194 * again. While awful, e.g. the vCPU may get stuck in an infinite loop
5195 * if the fault is at CPL=0, it's the lesser of all evils. Exiting to
5196 * userspace will kill the guest, and letting the emulator read garbage
5197 * will yield random behavior and potentially corrupt the guest.
5198 *
5199 * Simply resuming the guest is technically not a violation of the SEV
5200 * architecture. AMD's APM states that all code fetches and page table
5201 * accesses for SEV guest are encrypted, regardless of the C-Bit. The
5202 * APM also states that encrypted accesses to MMIO are "ignored", but
5203 * doesn't explicitly define "ignored", i.e. doing nothing and letting
5204 * the guest spin is technically "ignoring" the access.
5205 */
5206 return X86EMUL_RETRY_INSTR;
5207 }
5208
svm_apic_init_signal_blocked(struct kvm_vcpu * vcpu)5209 static bool svm_apic_init_signal_blocked(struct kvm_vcpu *vcpu)
5210 {
5211 struct vcpu_svm *svm = to_svm(vcpu);
5212
5213 return !gif_set(svm);
5214 }
5215
svm_vcpu_deliver_sipi_vector(struct kvm_vcpu * vcpu,u8 vector)5216 static void svm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
5217 {
5218 if (!is_sev_es_guest(vcpu))
5219 return kvm_vcpu_deliver_sipi_vector(vcpu, vector);
5220
5221 sev_vcpu_deliver_sipi_vector(vcpu, vector);
5222 }
5223
svm_vm_destroy(struct kvm * kvm)5224 static void svm_vm_destroy(struct kvm *kvm)
5225 {
5226 avic_vm_destroy(kvm);
5227 sev_vm_destroy(kvm);
5228
5229 svm_srso_vm_destroy();
5230 }
5231
svm_vm_init(struct kvm * kvm)5232 static int svm_vm_init(struct kvm *kvm)
5233 {
5234 sev_vm_init(kvm);
5235
5236 if (!pause_filter_count || !pause_filter_thresh)
5237 kvm_disable_exits(kvm, KVM_X86_DISABLE_EXITS_PAUSE);
5238
5239 if (enable_apicv) {
5240 int ret = avic_vm_init(kvm);
5241 if (ret)
5242 return ret;
5243 }
5244
5245 svm_srso_vm_init();
5246 return 0;
5247 }
5248
svm_alloc_apic_backing_page(struct kvm_vcpu * vcpu)5249 static void *svm_alloc_apic_backing_page(struct kvm_vcpu *vcpu)
5250 {
5251 struct page *page = snp_safe_alloc_page();
5252
5253 if (!page)
5254 return NULL;
5255
5256 return page_address(page);
5257 }
5258
5259 struct kvm_x86_ops svm_x86_ops __initdata = {
5260 .name = KBUILD_MODNAME,
5261
5262 .check_processor_compatibility = svm_check_processor_compat,
5263
5264 .hardware_unsetup = svm_hardware_unsetup,
5265 .enable_virtualization_cpu = svm_enable_virtualization_cpu,
5266 .disable_virtualization_cpu = svm_disable_virtualization_cpu,
5267 .emergency_disable_virtualization_cpu = svm_emergency_disable_virtualization_cpu,
5268 .has_emulated_msr = svm_has_emulated_msr,
5269
5270 .vcpu_precreate = svm_vcpu_precreate,
5271 .vcpu_create = svm_vcpu_create,
5272 .vcpu_free = svm_vcpu_free,
5273 .vcpu_reset = svm_vcpu_reset,
5274
5275 .vm_size = sizeof(struct kvm_svm),
5276 .vm_init = svm_vm_init,
5277 .vm_destroy = svm_vm_destroy,
5278
5279 .prepare_switch_to_guest = svm_prepare_switch_to_guest,
5280 .vcpu_load = svm_vcpu_load,
5281 .vcpu_put = svm_vcpu_put,
5282 .vcpu_blocking = avic_vcpu_blocking,
5283 .vcpu_unblocking = avic_vcpu_unblocking,
5284
5285 .update_exception_bitmap = svm_update_exception_bitmap,
5286 .get_feature_msr = svm_get_feature_msr,
5287 .get_msr = svm_get_msr,
5288 .set_msr = svm_set_msr,
5289 .get_segment_base = svm_get_segment_base,
5290 .get_segment = svm_get_segment,
5291 .set_segment = svm_set_segment,
5292 .get_cpl = svm_get_cpl,
5293 .get_cpl_no_cache = svm_get_cpl,
5294 .get_cs_db_l_bits = svm_get_cs_db_l_bits,
5295 .is_valid_cr0 = svm_is_valid_cr0,
5296 .set_cr0 = svm_set_cr0,
5297 .post_set_cr3 = sev_post_set_cr3,
5298 .is_valid_cr4 = svm_is_valid_cr4,
5299 .set_cr4 = svm_set_cr4,
5300 .set_efer = svm_set_efer,
5301 .get_idt = svm_get_idt,
5302 .set_idt = svm_set_idt,
5303 .get_gdt = svm_get_gdt,
5304 .set_gdt = svm_set_gdt,
5305 .set_dr7 = svm_set_dr7,
5306 .sync_dirty_debug_regs = svm_sync_dirty_debug_regs,
5307 .cache_reg = svm_cache_reg,
5308 .get_rflags = svm_get_rflags,
5309 .set_rflags = svm_set_rflags,
5310 .get_if_flag = svm_get_if_flag,
5311
5312 .flush_tlb_all = svm_flush_tlb_all,
5313 .flush_tlb_current = svm_flush_tlb_current,
5314 .flush_tlb_gva = svm_flush_tlb_gva,
5315 .flush_tlb_guest = svm_flush_tlb_guest,
5316
5317 .vcpu_pre_run = svm_vcpu_pre_run,
5318 .vcpu_run = svm_vcpu_run,
5319 .handle_exit = svm_handle_exit,
5320 .skip_emulated_instruction = svm_skip_emulated_instruction,
5321 .update_emulated_instruction = NULL,
5322 .set_interrupt_shadow = svm_set_interrupt_shadow,
5323 .get_interrupt_shadow = svm_get_interrupt_shadow,
5324 .patch_hypercall = svm_patch_hypercall,
5325 .inject_irq = svm_inject_irq,
5326 .inject_nmi = svm_inject_nmi,
5327 .is_vnmi_pending = svm_is_vnmi_pending,
5328 .set_vnmi_pending = svm_set_vnmi_pending,
5329 .inject_exception = svm_inject_exception,
5330 .cancel_injection = svm_cancel_injection,
5331 .interrupt_allowed = svm_interrupt_allowed,
5332 .nmi_allowed = svm_nmi_allowed,
5333 .get_nmi_mask = svm_get_nmi_mask,
5334 .set_nmi_mask = svm_set_nmi_mask,
5335 .enable_nmi_window = svm_enable_nmi_window,
5336 .enable_irq_window = svm_enable_irq_window,
5337 .update_cr8_intercept = svm_update_cr8_intercept,
5338
5339 .x2apic_icr_is_split = true,
5340 .set_virtual_apic_mode = avic_refresh_virtual_apic_mode,
5341 .refresh_apicv_exec_ctrl = avic_refresh_apicv_exec_ctrl,
5342 .apicv_post_state_restore = avic_apicv_post_state_restore,
5343 .required_apicv_inhibits = AVIC_REQUIRED_APICV_INHIBITS,
5344
5345 .get_exit_info = svm_get_exit_info,
5346 .get_entry_info = svm_get_entry_info,
5347
5348 .vcpu_after_set_cpuid = svm_vcpu_after_set_cpuid,
5349
5350 .has_wbinvd_exit = svm_has_wbinvd_exit,
5351
5352 .get_l2_tsc_offset = svm_get_l2_tsc_offset,
5353 .get_l2_tsc_multiplier = svm_get_l2_tsc_multiplier,
5354 .write_tsc_offset = svm_write_tsc_offset,
5355 .write_tsc_multiplier = svm_write_tsc_multiplier,
5356
5357 .load_mmu_pgd = svm_load_mmu_pgd,
5358
5359 .check_intercept = svm_check_intercept,
5360 .handle_exit_irqoff = svm_handle_exit_irqoff,
5361
5362 .nested_ops = &svm_nested_ops,
5363
5364 .deliver_interrupt = svm_deliver_interrupt,
5365 .pi_update_irte = avic_pi_update_irte,
5366 .setup_mce = svm_setup_mce,
5367
5368 #ifdef CONFIG_KVM_SMM
5369 .smi_allowed = svm_smi_allowed,
5370 .enter_smm = svm_enter_smm,
5371 .leave_smm = svm_leave_smm,
5372 .enable_smi_window = svm_enable_smi_window,
5373 #endif
5374
5375 #ifdef CONFIG_KVM_AMD_SEV
5376 .dev_get_attr = sev_dev_get_attr,
5377 .mem_enc_ioctl = sev_mem_enc_ioctl,
5378 .mem_enc_register_region = sev_mem_enc_register_region,
5379 .mem_enc_unregister_region = sev_mem_enc_unregister_region,
5380 .guest_memory_reclaimed = sev_guest_memory_reclaimed,
5381
5382 .vm_copy_enc_context_from = sev_vm_copy_enc_context_from,
5383 .vm_move_enc_context_from = sev_vm_move_enc_context_from,
5384 #endif
5385 .check_emulate_instruction = svm_check_emulate_instruction,
5386
5387 .apic_init_signal_blocked = svm_apic_init_signal_blocked,
5388
5389 .recalc_intercepts = svm_recalc_intercepts,
5390 .complete_emulated_msr = svm_complete_emulated_msr,
5391
5392 .vcpu_deliver_sipi_vector = svm_vcpu_deliver_sipi_vector,
5393 .vcpu_get_apicv_inhibit_reasons = avic_vcpu_get_apicv_inhibit_reasons,
5394 .alloc_apic_backing_page = svm_alloc_apic_backing_page,
5395
5396 .gmem_prepare = sev_gmem_prepare,
5397 .gmem_invalidate = sev_gmem_invalidate,
5398 .gmem_max_mapping_level = sev_gmem_max_mapping_level,
5399 };
5400
5401 /*
5402 * The default MMIO mask is a single bit (excluding the present bit),
5403 * which could conflict with the memory encryption bit. Check for
5404 * memory encryption support and override the default MMIO mask if
5405 * memory encryption is enabled.
5406 */
svm_adjust_mmio_mask(void)5407 static __init void svm_adjust_mmio_mask(void)
5408 {
5409 unsigned int enc_bit, mask_bit;
5410 u64 msr, mask;
5411
5412 /* If there is no memory encryption support, use existing mask */
5413 if (cpuid_eax(0x80000000) < 0x8000001f)
5414 return;
5415
5416 /* If memory encryption is not enabled, use existing mask */
5417 rdmsrq(MSR_AMD64_SYSCFG, msr);
5418 if (!(msr & MSR_AMD64_SYSCFG_MEM_ENCRYPT))
5419 return;
5420
5421 enc_bit = cpuid_ebx(0x8000001f) & 0x3f;
5422 mask_bit = boot_cpu_data.x86_phys_bits;
5423
5424 /* Increment the mask bit if it is the same as the encryption bit */
5425 if (enc_bit == mask_bit)
5426 mask_bit++;
5427
5428 /*
5429 * If the mask bit location is below 52, then some bits above the
5430 * physical addressing limit will always be reserved, so use the
5431 * rsvd_bits() function to generate the mask. This mask, along with
5432 * the present bit, will be used to generate a page fault with
5433 * PFER.RSV = 1.
5434 *
5435 * If the mask bit location is 52 (or above), then clear the mask.
5436 */
5437 mask = (mask_bit < 52) ? rsvd_bits(mask_bit, 51) | PT_PRESENT_MASK : 0;
5438
5439 kvm_mmu_set_mmio_spte_mask(mask, mask, PT_WRITABLE_MASK | PT_USER_MASK);
5440 }
5441
svm_set_cpu_caps(void)5442 static __init void svm_set_cpu_caps(void)
5443 {
5444 kvm_initialize_cpu_caps();
5445
5446 kvm_caps.supported_perf_cap = 0;
5447
5448 kvm_cpu_cap_clear(X86_FEATURE_IBT);
5449
5450 /* CPUID 0x80000001 and 0x8000000A (SVM features) */
5451 if (nested) {
5452 kvm_cpu_cap_set(X86_FEATURE_SVM);
5453 kvm_cpu_cap_set(X86_FEATURE_VMCBCLEAN);
5454
5455 /*
5456 * KVM currently flushes TLBs on *every* nested SVM transition,
5457 * and so for all intents and purposes KVM supports flushing by
5458 * ASID, i.e. KVM is guaranteed to honor every L1 ASID flush.
5459 */
5460 kvm_cpu_cap_set(X86_FEATURE_FLUSHBYASID);
5461
5462 if (nrips)
5463 kvm_cpu_cap_set(X86_FEATURE_NRIPS);
5464
5465 if (npt_enabled)
5466 kvm_cpu_cap_set(X86_FEATURE_NPT);
5467
5468 if (tsc_scaling)
5469 kvm_cpu_cap_set(X86_FEATURE_TSCRATEMSR);
5470
5471 if (vls)
5472 kvm_cpu_cap_set(X86_FEATURE_V_VMSAVE_VMLOAD);
5473 if (lbrv)
5474 kvm_cpu_cap_set(X86_FEATURE_LBRV);
5475
5476 if (boot_cpu_has(X86_FEATURE_PAUSEFILTER))
5477 kvm_cpu_cap_set(X86_FEATURE_PAUSEFILTER);
5478
5479 if (boot_cpu_has(X86_FEATURE_PFTHRESHOLD))
5480 kvm_cpu_cap_set(X86_FEATURE_PFTHRESHOLD);
5481
5482 if (vgif)
5483 kvm_cpu_cap_set(X86_FEATURE_VGIF);
5484
5485 if (vnmi)
5486 kvm_cpu_cap_set(X86_FEATURE_VNMI);
5487
5488 /* Nested VM can receive #VMEXIT instead of triggering #GP */
5489 kvm_cpu_cap_set(X86_FEATURE_SVME_ADDR_CHK);
5490 }
5491
5492 if (cpu_feature_enabled(X86_FEATURE_BUS_LOCK_THRESHOLD))
5493 kvm_caps.has_bus_lock_exit = true;
5494
5495 /* CPUID 0x80000008 */
5496 if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD) ||
5497 boot_cpu_has(X86_FEATURE_AMD_SSBD))
5498 kvm_cpu_cap_set(X86_FEATURE_VIRT_SSBD);
5499
5500 if (enable_pmu) {
5501 /*
5502 * Enumerate support for PERFCTR_CORE if and only if KVM has
5503 * access to enough counters to virtualize "core" support,
5504 * otherwise limit vPMU support to the legacy number of counters.
5505 */
5506 if (kvm_pmu_cap.num_counters_gp < AMD64_NUM_COUNTERS_CORE)
5507 kvm_pmu_cap.num_counters_gp = min(AMD64_NUM_COUNTERS,
5508 kvm_pmu_cap.num_counters_gp);
5509 else
5510 kvm_cpu_cap_check_and_set(X86_FEATURE_PERFCTR_CORE);
5511
5512 if (kvm_pmu_cap.version != 2 ||
5513 !kvm_cpu_cap_has(X86_FEATURE_PERFCTR_CORE))
5514 kvm_cpu_cap_clear(X86_FEATURE_PERFMON_V2);
5515 }
5516
5517 /* CPUID 0x8000001F (SME/SEV features) */
5518 sev_set_cpu_caps();
5519
5520 /*
5521 * Clear capabilities that are automatically configured by common code,
5522 * but that require explicit SVM support (that isn't yet implemented).
5523 */
5524 kvm_cpu_cap_clear(X86_FEATURE_BUS_LOCK_DETECT);
5525 kvm_cpu_cap_clear(X86_FEATURE_MSR_IMM);
5526
5527 kvm_setup_xss_caps();
5528 kvm_finalize_cpu_caps();
5529 }
5530
svm_hardware_setup(void)5531 static __init int svm_hardware_setup(void)
5532 {
5533 void *iopm_va;
5534 int cpu, r;
5535
5536 /*
5537 * NX is required for shadow paging and for NPT if the NX huge pages
5538 * mitigation is enabled.
5539 */
5540 if (!boot_cpu_has(X86_FEATURE_NX)) {
5541 pr_err_ratelimited("NX (Execute Disable) not supported\n");
5542 return -EOPNOTSUPP;
5543 }
5544
5545 kvm_caps.supported_xcr0 &= ~(XFEATURE_MASK_BNDREGS |
5546 XFEATURE_MASK_BNDCSR);
5547
5548 if (tsc_scaling) {
5549 if (!boot_cpu_has(X86_FEATURE_TSCRATEMSR)) {
5550 tsc_scaling = false;
5551 } else {
5552 pr_info("TSC scaling supported\n");
5553 kvm_caps.has_tsc_control = true;
5554 }
5555 }
5556 kvm_caps.max_tsc_scaling_ratio = SVM_TSC_RATIO_MAX;
5557 kvm_caps.tsc_scaling_ratio_frac_bits = 32;
5558
5559 tsc_aux_uret_slot = kvm_add_user_return_msr(MSR_TSC_AUX);
5560
5561 /* Check for pause filtering support */
5562 if (!boot_cpu_has(X86_FEATURE_PAUSEFILTER)) {
5563 pause_filter_count = 0;
5564 pause_filter_thresh = 0;
5565 } else if (!boot_cpu_has(X86_FEATURE_PFTHRESHOLD)) {
5566 pause_filter_thresh = 0;
5567 }
5568
5569 if (nested) {
5570 pr_info("Nested Virtualization enabled\n");
5571 kvm_enable_efer_bits(EFER_SVME);
5572 if (!boot_cpu_has(X86_FEATURE_EFER_LMSLE_MBZ))
5573 kvm_enable_efer_bits(EFER_LMSLE);
5574
5575 r = nested_svm_init_msrpm_merge_offsets();
5576 if (r)
5577 return r;
5578 }
5579
5580 /*
5581 * KVM's MMU doesn't support using 2-level paging for itself, and thus
5582 * NPT isn't supported if the host is using 2-level paging since host
5583 * CR4 is unchanged on VMRUN.
5584 */
5585 if (!IS_ENABLED(CONFIG_X86_64) && !IS_ENABLED(CONFIG_X86_PAE))
5586 npt_enabled = false;
5587
5588 if (!boot_cpu_has(X86_FEATURE_NPT))
5589 npt_enabled = false;
5590
5591 /* Force VM NPT level equal to the host's paging level */
5592 kvm_configure_mmu(npt_enabled, get_npt_level(),
5593 get_npt_level(), PG_LEVEL_1G);
5594 pr_info("Nested Paging %s\n", str_enabled_disabled(npt_enabled));
5595
5596 /*
5597 * It seems that on AMD processors PTE's accessed bit is
5598 * being set by the CPU hardware before the NPF vmexit.
5599 * This is not expected behaviour and our tests fail because
5600 * of it.
5601 * A workaround here is to disable support for
5602 * GUEST_MAXPHYADDR < HOST_MAXPHYADDR if NPT is enabled.
5603 * In this case userspace can know if there is support using
5604 * KVM_CAP_SMALLER_MAXPHYADDR extension and decide how to handle
5605 * it
5606 * If future AMD CPU models change the behaviour described above,
5607 * this variable can be changed accordingly
5608 */
5609 allow_smaller_maxphyaddr = !npt_enabled;
5610
5611 /* Setup shadow_me_value and shadow_me_mask */
5612 kvm_mmu_set_me_spte_mask(sme_me_mask, sme_me_mask);
5613
5614 svm_adjust_mmio_mask();
5615
5616 nrips = nrips && boot_cpu_has(X86_FEATURE_NRIPS);
5617
5618 if (lbrv) {
5619 if (!boot_cpu_has(X86_FEATURE_LBRV))
5620 lbrv = false;
5621 else
5622 pr_info("LBR virtualization supported\n");
5623 }
5624
5625 iopm_va = svm_alloc_permissions_map(IOPM_SIZE, GFP_KERNEL);
5626 if (!iopm_va)
5627 return -ENOMEM;
5628
5629 iopm_base = __sme_set(__pa(iopm_va));
5630
5631 /*
5632 * Note, SEV setup consumes npt_enabled and enable_mmio_caching (which
5633 * may be modified by svm_adjust_mmio_mask()), as well as nrips.
5634 */
5635 sev_hardware_setup();
5636
5637 svm_hv_hardware_setup();
5638
5639 enable_apicv = avic_hardware_setup();
5640 if (!enable_apicv) {
5641 enable_ipiv = false;
5642 svm_x86_ops.vcpu_blocking = NULL;
5643 svm_x86_ops.vcpu_unblocking = NULL;
5644 svm_x86_ops.vcpu_get_apicv_inhibit_reasons = NULL;
5645 }
5646
5647 if (vls) {
5648 if (!npt_enabled ||
5649 !boot_cpu_has(X86_FEATURE_V_VMSAVE_VMLOAD) ||
5650 !IS_ENABLED(CONFIG_X86_64)) {
5651 vls = false;
5652 } else {
5653 pr_info("Virtual VMLOAD VMSAVE supported\n");
5654 }
5655 }
5656
5657 if (boot_cpu_has(X86_FEATURE_SVME_ADDR_CHK))
5658 svm_gp_erratum_intercept = false;
5659
5660 if (vgif) {
5661 if (!boot_cpu_has(X86_FEATURE_VGIF))
5662 vgif = false;
5663 else
5664 pr_info("Virtual GIF supported\n");
5665 }
5666
5667 vnmi = vgif && vnmi && boot_cpu_has(X86_FEATURE_VNMI);
5668 if (vnmi)
5669 pr_info("Virtual NMI enabled\n");
5670
5671 if (!vnmi) {
5672 svm_x86_ops.is_vnmi_pending = NULL;
5673 svm_x86_ops.set_vnmi_pending = NULL;
5674 }
5675
5676 if (!enable_pmu)
5677 pr_info("PMU virtualization is disabled\n");
5678
5679 svm_set_cpu_caps();
5680
5681 kvm_caps.inapplicable_quirks &= ~KVM_X86_QUIRK_CD_NW_CLEARED;
5682
5683 for_each_possible_cpu(cpu) {
5684 r = svm_cpu_init(cpu);
5685 if (r)
5686 goto err;
5687 }
5688
5689 return 0;
5690
5691 err:
5692 svm_hardware_unsetup();
5693 return r;
5694 }
5695
5696
5697 static struct kvm_x86_init_ops svm_init_ops __initdata = {
5698 .hardware_setup = svm_hardware_setup,
5699
5700 .runtime_ops = &svm_x86_ops,
5701 .pmu_ops = &amd_pmu_ops,
5702 };
5703
__svm_exit(void)5704 static void __svm_exit(void)
5705 {
5706 kvm_x86_vendor_exit();
5707 }
5708
svm_init(void)5709 static int __init svm_init(void)
5710 {
5711 int r;
5712
5713 KVM_SANITY_CHECK_VM_STRUCT_SIZE(kvm_svm);
5714
5715 __unused_size_checks();
5716
5717 if (!kvm_is_svm_supported())
5718 return -EOPNOTSUPP;
5719
5720 r = kvm_x86_vendor_init(&svm_init_ops);
5721 if (r)
5722 return r;
5723
5724 /*
5725 * Common KVM initialization _must_ come last, after this, /dev/kvm is
5726 * exposed to userspace!
5727 */
5728 r = kvm_init(sizeof(struct vcpu_svm), __alignof__(struct vcpu_svm),
5729 THIS_MODULE);
5730 if (r)
5731 goto err_kvm_init;
5732
5733 return 0;
5734
5735 err_kvm_init:
5736 __svm_exit();
5737 return r;
5738 }
5739
svm_exit(void)5740 static void __exit svm_exit(void)
5741 {
5742 kvm_exit();
5743 __svm_exit();
5744 }
5745
5746 module_init(svm_init)
5747 module_exit(svm_exit)
5748