1 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 2 3 #include <linux/kvm_host.h> 4 5 #include "irq.h" 6 #include "mmu.h" 7 #include "kvm_cache_regs.h" 8 #include "x86.h" 9 #include "smm.h" 10 #include "cpuid.h" 11 #include "pmu.h" 12 13 #include <linux/module.h> 14 #include <linux/mod_devicetable.h> 15 #include <linux/kernel.h> 16 #include <linux/vmalloc.h> 17 #include <linux/highmem.h> 18 #include <linux/amd-iommu.h> 19 #include <linux/sched.h> 20 #include <linux/trace_events.h> 21 #include <linux/slab.h> 22 #include <linux/hashtable.h> 23 #include <linux/objtool.h> 24 #include <linux/psp-sev.h> 25 #include <linux/file.h> 26 #include <linux/pagemap.h> 27 #include <linux/swap.h> 28 #include <linux/rwsem.h> 29 #include <linux/cc_platform.h> 30 #include <linux/smp.h> 31 #include <linux/string_choices.h> 32 #include <linux/mutex.h> 33 34 #include <asm/apic.h> 35 #include <asm/msr.h> 36 #include <asm/perf_event.h> 37 #include <asm/tlbflush.h> 38 #include <asm/desc.h> 39 #include <asm/debugreg.h> 40 #include <asm/kvm_para.h> 41 #include <asm/irq_remapping.h> 42 #include <asm/spec-ctrl.h> 43 #include <asm/cpu_device_id.h> 44 #include <asm/traps.h> 45 #include <asm/reboot.h> 46 #include <asm/fpu/api.h> 47 48 #include <trace/events/ipi.h> 49 50 #include "trace.h" 51 52 #include "svm.h" 53 #include "svm_ops.h" 54 55 #include "kvm_onhyperv.h" 56 #include "svm_onhyperv.h" 57 58 MODULE_AUTHOR("Qumranet"); 59 MODULE_DESCRIPTION("KVM support for SVM (AMD-V) extensions"); 60 MODULE_LICENSE("GPL"); 61 62 #ifdef MODULE 63 static const struct x86_cpu_id svm_cpu_id[] = { 64 X86_MATCH_FEATURE(X86_FEATURE_SVM, NULL), 65 {} 66 }; 67 MODULE_DEVICE_TABLE(x86cpu, svm_cpu_id); 68 #endif 69 70 #define SEG_TYPE_LDT 2 71 #define SEG_TYPE_BUSY_TSS16 3 72 73 static bool erratum_383_found __read_mostly; 74 75 /* 76 * Set osvw_len to higher value when updated Revision Guides 77 * are published and we know what the new status bits are 78 */ 79 static uint64_t osvw_len = 4, osvw_status; 80 81 static DEFINE_PER_CPU(u64, current_tsc_ratio); 82 83 /* 84 * These 2 parameters are used to config the controls for Pause-Loop Exiting: 85 * pause_filter_count: On processors that support Pause filtering(indicated 86 * by CPUID Fn8000_000A_EDX), the VMCB provides a 16 bit pause filter 87 * count value. On VMRUN this value is loaded into an internal counter. 88 * Each time a pause instruction is executed, this counter is decremented 89 * until it reaches zero at which time a #VMEXIT is generated if pause 90 * intercept is enabled. Refer to AMD APM Vol 2 Section 15.14.4 Pause 91 * Intercept Filtering for more details. 92 * This also indicate if ple logic enabled. 93 * 94 * pause_filter_thresh: In addition, some processor families support advanced 95 * pause filtering (indicated by CPUID Fn8000_000A_EDX) upper bound on 96 * the amount of time a guest is allowed to execute in a pause loop. 97 * In this mode, a 16-bit pause filter threshold field is added in the 98 * VMCB. The threshold value is a cycle count that is used to reset the 99 * pause counter. As with simple pause filtering, VMRUN loads the pause 100 * count value from VMCB into an internal counter. Then, on each pause 101 * instruction the hardware checks the elapsed number of cycles since 102 * the most recent pause instruction against the pause filter threshold. 103 * If the elapsed cycle count is greater than the pause filter threshold, 104 * then the internal pause count is reloaded from the VMCB and execution 105 * continues. If the elapsed cycle count is less than the pause filter 106 * threshold, then the internal pause count is decremented. If the count 107 * value is less than zero and PAUSE intercept is enabled, a #VMEXIT is 108 * triggered. If advanced pause filtering is supported and pause filter 109 * threshold field is set to zero, the filter will operate in the simpler, 110 * count only mode. 111 */ 112 113 static unsigned short pause_filter_thresh = KVM_DEFAULT_PLE_GAP; 114 module_param(pause_filter_thresh, ushort, 0444); 115 116 static unsigned short pause_filter_count = KVM_SVM_DEFAULT_PLE_WINDOW; 117 module_param(pause_filter_count, ushort, 0444); 118 119 /* Default doubles per-vcpu window every exit. */ 120 static unsigned short pause_filter_count_grow = KVM_DEFAULT_PLE_WINDOW_GROW; 121 module_param(pause_filter_count_grow, ushort, 0444); 122 123 /* Default resets per-vcpu window every exit to pause_filter_count. */ 124 static unsigned short pause_filter_count_shrink = KVM_DEFAULT_PLE_WINDOW_SHRINK; 125 module_param(pause_filter_count_shrink, ushort, 0444); 126 127 /* Default is to compute the maximum so we can never overflow. */ 128 static unsigned short pause_filter_count_max = KVM_SVM_DEFAULT_PLE_WINDOW_MAX; 129 module_param(pause_filter_count_max, ushort, 0444); 130 131 /* 132 * Use nested page tables by default. Note, NPT may get forced off by 133 * svm_hardware_setup() if it's unsupported by hardware or the host kernel. 134 */ 135 bool npt_enabled = true; 136 module_param_named(npt, npt_enabled, bool, 0444); 137 138 /* allow nested virtualization in KVM/SVM */ 139 static int nested = true; 140 module_param(nested, int, 0444); 141 142 /* enable/disable Next RIP Save */ 143 int nrips = true; 144 module_param(nrips, int, 0444); 145 146 /* enable/disable Virtual VMLOAD VMSAVE */ 147 static int vls = true; 148 module_param(vls, int, 0444); 149 150 /* enable/disable Virtual GIF */ 151 int vgif = true; 152 module_param(vgif, int, 0444); 153 154 /* enable/disable LBR virtualization */ 155 int lbrv = true; 156 module_param(lbrv, int, 0444); 157 158 static int tsc_scaling = true; 159 module_param(tsc_scaling, int, 0444); 160 161 module_param(enable_device_posted_irqs, bool, 0444); 162 163 bool __read_mostly dump_invalid_vmcb; 164 module_param(dump_invalid_vmcb, bool, 0644); 165 166 167 bool intercept_smi = true; 168 module_param(intercept_smi, bool, 0444); 169 170 bool vnmi = true; 171 module_param(vnmi, bool, 0444); 172 173 static bool svm_gp_erratum_intercept = true; 174 175 static u8 rsm_ins_bytes[] = "\x0f\xaa"; 176 177 static unsigned long iopm_base; 178 179 DEFINE_PER_CPU(struct svm_cpu_data, svm_data); 180 181 static DEFINE_MUTEX(vmcb_dump_mutex); 182 183 /* 184 * Only MSR_TSC_AUX is switched via the user return hook. EFER is switched via 185 * the VMCB, and the SYSCALL/SYSENTER MSRs are handled by VMLOAD/VMSAVE. 186 * 187 * RDTSCP and RDPID are not used in the kernel, specifically to allow KVM to 188 * defer the restoration of TSC_AUX until the CPU returns to userspace. 189 */ 190 int tsc_aux_uret_slot __ro_after_init = -1; 191 192 static int get_npt_level(void) 193 { 194 #ifdef CONFIG_X86_64 195 return pgtable_l5_enabled() ? PT64_ROOT_5LEVEL : PT64_ROOT_4LEVEL; 196 #else 197 return PT32E_ROOT_LEVEL; 198 #endif 199 } 200 201 int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer) 202 { 203 struct vcpu_svm *svm = to_svm(vcpu); 204 u64 old_efer = vcpu->arch.efer; 205 vcpu->arch.efer = efer; 206 207 if (!npt_enabled) { 208 /* Shadow paging assumes NX to be available. */ 209 efer |= EFER_NX; 210 211 if (!(efer & EFER_LMA)) 212 efer &= ~EFER_LME; 213 } 214 215 if ((old_efer & EFER_SVME) != (efer & EFER_SVME)) { 216 if (!(efer & EFER_SVME)) { 217 svm_leave_nested(vcpu); 218 svm_set_gif(svm, true); 219 /* #GP intercept is still needed for vmware backdoor */ 220 if (!enable_vmware_backdoor) 221 clr_exception_intercept(svm, GP_VECTOR); 222 223 /* 224 * Free the nested guest state, unless we are in SMM. 225 * In this case we will return to the nested guest 226 * as soon as we leave SMM. 227 */ 228 if (!is_smm(vcpu)) 229 svm_free_nested(svm); 230 231 } else { 232 int ret = svm_allocate_nested(svm); 233 234 if (ret) { 235 vcpu->arch.efer = old_efer; 236 return ret; 237 } 238 239 /* 240 * Never intercept #GP for SEV guests, KVM can't 241 * decrypt guest memory to workaround the erratum. 242 */ 243 if (svm_gp_erratum_intercept && !sev_guest(vcpu->kvm)) 244 set_exception_intercept(svm, GP_VECTOR); 245 } 246 } 247 248 svm->vmcb->save.efer = efer | EFER_SVME; 249 vmcb_mark_dirty(svm->vmcb, VMCB_CR); 250 return 0; 251 } 252 253 static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu) 254 { 255 struct vcpu_svm *svm = to_svm(vcpu); 256 u32 ret = 0; 257 258 if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) 259 ret = KVM_X86_SHADOW_INT_STI | KVM_X86_SHADOW_INT_MOV_SS; 260 return ret; 261 } 262 263 static void svm_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask) 264 { 265 struct vcpu_svm *svm = to_svm(vcpu); 266 267 if (mask == 0) 268 svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK; 269 else 270 svm->vmcb->control.int_state |= SVM_INTERRUPT_SHADOW_MASK; 271 272 } 273 274 static int __svm_skip_emulated_instruction(struct kvm_vcpu *vcpu, 275 int emul_type, 276 bool commit_side_effects) 277 { 278 struct vcpu_svm *svm = to_svm(vcpu); 279 unsigned long old_rflags; 280 281 /* 282 * SEV-ES does not expose the next RIP. The RIP update is controlled by 283 * the type of exit and the #VC handler in the guest. 284 */ 285 if (sev_es_guest(vcpu->kvm)) 286 goto done; 287 288 if (nrips && svm->vmcb->control.next_rip != 0) { 289 WARN_ON_ONCE(!static_cpu_has(X86_FEATURE_NRIPS)); 290 svm->next_rip = svm->vmcb->control.next_rip; 291 } 292 293 if (!svm->next_rip) { 294 if (unlikely(!commit_side_effects)) 295 old_rflags = svm->vmcb->save.rflags; 296 297 if (!kvm_emulate_instruction(vcpu, emul_type)) 298 return 0; 299 300 if (unlikely(!commit_side_effects)) 301 svm->vmcb->save.rflags = old_rflags; 302 } else { 303 kvm_rip_write(vcpu, svm->next_rip); 304 } 305 306 done: 307 if (likely(commit_side_effects)) 308 svm_set_interrupt_shadow(vcpu, 0); 309 310 return 1; 311 } 312 313 static int svm_skip_emulated_instruction(struct kvm_vcpu *vcpu) 314 { 315 return __svm_skip_emulated_instruction(vcpu, EMULTYPE_SKIP, true); 316 } 317 318 static int svm_update_soft_interrupt_rip(struct kvm_vcpu *vcpu, u8 vector) 319 { 320 const int emul_type = EMULTYPE_SKIP | EMULTYPE_SKIP_SOFT_INT | 321 EMULTYPE_SET_SOFT_INT_VECTOR(vector); 322 unsigned long rip, old_rip = kvm_rip_read(vcpu); 323 struct vcpu_svm *svm = to_svm(vcpu); 324 325 /* 326 * Due to architectural shortcomings, the CPU doesn't always provide 327 * NextRIP, e.g. if KVM intercepted an exception that occurred while 328 * the CPU was vectoring an INTO/INT3 in the guest. Temporarily skip 329 * the instruction even if NextRIP is supported to acquire the next 330 * RIP so that it can be shoved into the NextRIP field, otherwise 331 * hardware will fail to advance guest RIP during event injection. 332 * Drop the exception/interrupt if emulation fails and effectively 333 * retry the instruction, it's the least awful option. If NRIPS is 334 * in use, the skip must not commit any side effects such as clearing 335 * the interrupt shadow or RFLAGS.RF. 336 */ 337 if (!__svm_skip_emulated_instruction(vcpu, emul_type, !nrips)) 338 return -EIO; 339 340 rip = kvm_rip_read(vcpu); 341 342 /* 343 * Save the injection information, even when using next_rip, as the 344 * VMCB's next_rip will be lost (cleared on VM-Exit) if the injection 345 * doesn't complete due to a VM-Exit occurring while the CPU is 346 * vectoring the event. Decoding the instruction isn't guaranteed to 347 * work as there may be no backing instruction, e.g. if the event is 348 * being injected by L1 for L2, or if the guest is patching INT3 into 349 * a different instruction. 350 */ 351 svm->soft_int_injected = true; 352 svm->soft_int_csbase = svm->vmcb->save.cs.base; 353 svm->soft_int_old_rip = old_rip; 354 svm->soft_int_next_rip = rip; 355 356 if (nrips) 357 kvm_rip_write(vcpu, old_rip); 358 359 if (static_cpu_has(X86_FEATURE_NRIPS)) 360 svm->vmcb->control.next_rip = rip; 361 362 return 0; 363 } 364 365 static void svm_inject_exception(struct kvm_vcpu *vcpu) 366 { 367 struct kvm_queued_exception *ex = &vcpu->arch.exception; 368 struct vcpu_svm *svm = to_svm(vcpu); 369 370 kvm_deliver_exception_payload(vcpu, ex); 371 372 if (kvm_exception_is_soft(ex->vector) && 373 svm_update_soft_interrupt_rip(vcpu, ex->vector)) 374 return; 375 376 svm->vmcb->control.event_inj = ex->vector 377 | SVM_EVTINJ_VALID 378 | (ex->has_error_code ? SVM_EVTINJ_VALID_ERR : 0) 379 | SVM_EVTINJ_TYPE_EXEPT; 380 svm->vmcb->control.event_inj_err = ex->error_code; 381 } 382 383 static void svm_init_erratum_383(void) 384 { 385 u64 val; 386 387 if (!static_cpu_has_bug(X86_BUG_AMD_TLB_MMATCH)) 388 return; 389 390 /* Use _safe variants to not break nested virtualization */ 391 if (native_read_msr_safe(MSR_AMD64_DC_CFG, &val)) 392 return; 393 394 val |= (1ULL << 47); 395 396 native_write_msr_safe(MSR_AMD64_DC_CFG, val); 397 398 erratum_383_found = true; 399 } 400 401 static void svm_init_osvw(struct kvm_vcpu *vcpu) 402 { 403 /* 404 * Guests should see errata 400 and 415 as fixed (assuming that 405 * HLT and IO instructions are intercepted). 406 */ 407 vcpu->arch.osvw.length = (osvw_len >= 3) ? (osvw_len) : 3; 408 vcpu->arch.osvw.status = osvw_status & ~(6ULL); 409 410 /* 411 * By increasing VCPU's osvw.length to 3 we are telling the guest that 412 * all osvw.status bits inside that length, including bit 0 (which is 413 * reserved for erratum 298), are valid. However, if host processor's 414 * osvw_len is 0 then osvw_status[0] carries no information. We need to 415 * be conservative here and therefore we tell the guest that erratum 298 416 * is present (because we really don't know). 417 */ 418 if (osvw_len == 0 && boot_cpu_data.x86 == 0x10) 419 vcpu->arch.osvw.status |= 1; 420 } 421 422 static bool __kvm_is_svm_supported(void) 423 { 424 int cpu = smp_processor_id(); 425 struct cpuinfo_x86 *c = &cpu_data(cpu); 426 427 if (c->x86_vendor != X86_VENDOR_AMD && 428 c->x86_vendor != X86_VENDOR_HYGON) { 429 pr_err("CPU %d isn't AMD or Hygon\n", cpu); 430 return false; 431 } 432 433 if (!cpu_has(c, X86_FEATURE_SVM)) { 434 pr_err("SVM not supported by CPU %d\n", cpu); 435 return false; 436 } 437 438 if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) { 439 pr_info("KVM is unsupported when running as an SEV guest\n"); 440 return false; 441 } 442 443 return true; 444 } 445 446 static bool kvm_is_svm_supported(void) 447 { 448 bool supported; 449 450 migrate_disable(); 451 supported = __kvm_is_svm_supported(); 452 migrate_enable(); 453 454 return supported; 455 } 456 457 static int svm_check_processor_compat(void) 458 { 459 if (!__kvm_is_svm_supported()) 460 return -EIO; 461 462 return 0; 463 } 464 465 static void __svm_write_tsc_multiplier(u64 multiplier) 466 { 467 if (multiplier == __this_cpu_read(current_tsc_ratio)) 468 return; 469 470 wrmsrq(MSR_AMD64_TSC_RATIO, multiplier); 471 __this_cpu_write(current_tsc_ratio, multiplier); 472 } 473 474 static __always_inline struct sev_es_save_area *sev_es_host_save_area(struct svm_cpu_data *sd) 475 { 476 return &sd->save_area->host_sev_es_save; 477 } 478 479 static inline void kvm_cpu_svm_disable(void) 480 { 481 uint64_t efer; 482 483 wrmsrq(MSR_VM_HSAVE_PA, 0); 484 rdmsrq(MSR_EFER, efer); 485 if (efer & EFER_SVME) { 486 /* 487 * Force GIF=1 prior to disabling SVM, e.g. to ensure INIT and 488 * NMI aren't blocked. 489 */ 490 stgi(); 491 wrmsrq(MSR_EFER, efer & ~EFER_SVME); 492 } 493 } 494 495 static void svm_emergency_disable_virtualization_cpu(void) 496 { 497 kvm_rebooting = true; 498 499 kvm_cpu_svm_disable(); 500 } 501 502 static void svm_disable_virtualization_cpu(void) 503 { 504 /* Make sure we clean up behind us */ 505 if (tsc_scaling) 506 __svm_write_tsc_multiplier(SVM_TSC_RATIO_DEFAULT); 507 508 kvm_cpu_svm_disable(); 509 510 amd_pmu_disable_virt(); 511 } 512 513 static int svm_enable_virtualization_cpu(void) 514 { 515 516 struct svm_cpu_data *sd; 517 uint64_t efer; 518 int me = raw_smp_processor_id(); 519 520 rdmsrq(MSR_EFER, efer); 521 if (efer & EFER_SVME) 522 return -EBUSY; 523 524 sd = per_cpu_ptr(&svm_data, me); 525 sd->asid_generation = 1; 526 sd->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1; 527 sd->next_asid = sd->max_asid + 1; 528 sd->min_asid = max_sev_asid + 1; 529 530 wrmsrq(MSR_EFER, efer | EFER_SVME); 531 532 wrmsrq(MSR_VM_HSAVE_PA, sd->save_area_pa); 533 534 if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) { 535 /* 536 * Set the default value, even if we don't use TSC scaling 537 * to avoid having stale value in the msr 538 */ 539 __svm_write_tsc_multiplier(SVM_TSC_RATIO_DEFAULT); 540 } 541 542 543 /* 544 * Get OSVW bits. 545 * 546 * Note that it is possible to have a system with mixed processor 547 * revisions and therefore different OSVW bits. If bits are not the same 548 * on different processors then choose the worst case (i.e. if erratum 549 * is present on one processor and not on another then assume that the 550 * erratum is present everywhere). 551 */ 552 if (cpu_has(&boot_cpu_data, X86_FEATURE_OSVW)) { 553 u64 len, status = 0; 554 int err; 555 556 err = native_read_msr_safe(MSR_AMD64_OSVW_ID_LENGTH, &len); 557 if (!err) 558 err = native_read_msr_safe(MSR_AMD64_OSVW_STATUS, &status); 559 560 if (err) 561 osvw_status = osvw_len = 0; 562 else { 563 if (len < osvw_len) 564 osvw_len = len; 565 osvw_status |= status; 566 osvw_status &= (1ULL << osvw_len) - 1; 567 } 568 } else 569 osvw_status = osvw_len = 0; 570 571 svm_init_erratum_383(); 572 573 amd_pmu_enable_virt(); 574 575 return 0; 576 } 577 578 static void svm_cpu_uninit(int cpu) 579 { 580 struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, cpu); 581 582 if (!sd->save_area) 583 return; 584 585 kfree(sd->sev_vmcbs); 586 __free_page(__sme_pa_to_page(sd->save_area_pa)); 587 sd->save_area_pa = 0; 588 sd->save_area = NULL; 589 } 590 591 static int svm_cpu_init(int cpu) 592 { 593 struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, cpu); 594 struct page *save_area_page; 595 int ret = -ENOMEM; 596 597 memset(sd, 0, sizeof(struct svm_cpu_data)); 598 save_area_page = snp_safe_alloc_page_node(cpu_to_node(cpu), GFP_KERNEL); 599 if (!save_area_page) 600 return ret; 601 602 ret = sev_cpu_init(sd); 603 if (ret) 604 goto free_save_area; 605 606 sd->save_area = page_address(save_area_page); 607 sd->save_area_pa = __sme_page_pa(save_area_page); 608 return 0; 609 610 free_save_area: 611 __free_page(save_area_page); 612 return ret; 613 614 } 615 616 static void set_dr_intercepts(struct vcpu_svm *svm) 617 { 618 struct vmcb *vmcb = svm->vmcb01.ptr; 619 620 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_READ); 621 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR1_READ); 622 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR2_READ); 623 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR3_READ); 624 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR4_READ); 625 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR5_READ); 626 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR6_READ); 627 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_WRITE); 628 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR1_WRITE); 629 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR2_WRITE); 630 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR3_WRITE); 631 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR4_WRITE); 632 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR5_WRITE); 633 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR6_WRITE); 634 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_READ); 635 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_WRITE); 636 637 recalc_intercepts(svm); 638 } 639 640 static void clr_dr_intercepts(struct vcpu_svm *svm) 641 { 642 struct vmcb *vmcb = svm->vmcb01.ptr; 643 644 vmcb->control.intercepts[INTERCEPT_DR] = 0; 645 646 recalc_intercepts(svm); 647 } 648 649 static bool msr_write_intercepted(struct kvm_vcpu *vcpu, u32 msr) 650 { 651 /* 652 * For non-nested case: 653 * If the L01 MSR bitmap does not intercept the MSR, then we need to 654 * save it. 655 * 656 * For nested case: 657 * If the L02 MSR bitmap does not intercept the MSR, then we need to 658 * save it. 659 */ 660 void *msrpm = is_guest_mode(vcpu) ? to_svm(vcpu)->nested.msrpm : 661 to_svm(vcpu)->msrpm; 662 663 return svm_test_msr_bitmap_write(msrpm, msr); 664 } 665 666 void svm_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type, bool set) 667 { 668 struct vcpu_svm *svm = to_svm(vcpu); 669 void *msrpm = svm->msrpm; 670 671 /* Don't disable interception for MSRs userspace wants to handle. */ 672 if (type & MSR_TYPE_R) { 673 if (!set && kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_READ)) 674 svm_clear_msr_bitmap_read(msrpm, msr); 675 else 676 svm_set_msr_bitmap_read(msrpm, msr); 677 } 678 679 if (type & MSR_TYPE_W) { 680 if (!set && kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_WRITE)) 681 svm_clear_msr_bitmap_write(msrpm, msr); 682 else 683 svm_set_msr_bitmap_write(msrpm, msr); 684 } 685 686 svm_hv_vmcb_dirty_nested_enlightenments(vcpu); 687 svm->nested.force_msr_bitmap_recalc = true; 688 } 689 690 void *svm_alloc_permissions_map(unsigned long size, gfp_t gfp_mask) 691 { 692 unsigned int order = get_order(size); 693 struct page *pages = alloc_pages(gfp_mask, order); 694 void *pm; 695 696 if (!pages) 697 return NULL; 698 699 /* 700 * Set all bits in the permissions map so that all MSR and I/O accesses 701 * are intercepted by default. 702 */ 703 pm = page_address(pages); 704 memset(pm, 0xff, PAGE_SIZE * (1 << order)); 705 706 return pm; 707 } 708 709 static void svm_recalc_lbr_msr_intercepts(struct kvm_vcpu *vcpu) 710 { 711 struct vcpu_svm *svm = to_svm(vcpu); 712 bool intercept = !(svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK); 713 714 if (intercept == svm->lbr_msrs_intercepted) 715 return; 716 717 svm_set_intercept_for_msr(vcpu, MSR_IA32_LASTBRANCHFROMIP, MSR_TYPE_RW, intercept); 718 svm_set_intercept_for_msr(vcpu, MSR_IA32_LASTBRANCHTOIP, MSR_TYPE_RW, intercept); 719 svm_set_intercept_for_msr(vcpu, MSR_IA32_LASTINTFROMIP, MSR_TYPE_RW, intercept); 720 svm_set_intercept_for_msr(vcpu, MSR_IA32_LASTINTTOIP, MSR_TYPE_RW, intercept); 721 722 if (sev_es_guest(vcpu->kvm)) 723 svm_set_intercept_for_msr(vcpu, MSR_IA32_DEBUGCTLMSR, MSR_TYPE_RW, intercept); 724 725 svm->lbr_msrs_intercepted = intercept; 726 } 727 728 void svm_vcpu_free_msrpm(void *msrpm) 729 { 730 __free_pages(virt_to_page(msrpm), get_order(MSRPM_SIZE)); 731 } 732 733 static void svm_recalc_msr_intercepts(struct kvm_vcpu *vcpu) 734 { 735 struct vcpu_svm *svm = to_svm(vcpu); 736 737 svm_disable_intercept_for_msr(vcpu, MSR_STAR, MSR_TYPE_RW); 738 svm_disable_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_CS, MSR_TYPE_RW); 739 740 #ifdef CONFIG_X86_64 741 svm_disable_intercept_for_msr(vcpu, MSR_GS_BASE, MSR_TYPE_RW); 742 svm_disable_intercept_for_msr(vcpu, MSR_FS_BASE, MSR_TYPE_RW); 743 svm_disable_intercept_for_msr(vcpu, MSR_KERNEL_GS_BASE, MSR_TYPE_RW); 744 svm_disable_intercept_for_msr(vcpu, MSR_LSTAR, MSR_TYPE_RW); 745 svm_disable_intercept_for_msr(vcpu, MSR_CSTAR, MSR_TYPE_RW); 746 svm_disable_intercept_for_msr(vcpu, MSR_SYSCALL_MASK, MSR_TYPE_RW); 747 #endif 748 749 if (lbrv) 750 svm_recalc_lbr_msr_intercepts(vcpu); 751 752 if (cpu_feature_enabled(X86_FEATURE_IBPB)) 753 svm_set_intercept_for_msr(vcpu, MSR_IA32_PRED_CMD, MSR_TYPE_W, 754 !guest_has_pred_cmd_msr(vcpu)); 755 756 if (cpu_feature_enabled(X86_FEATURE_FLUSH_L1D)) 757 svm_set_intercept_for_msr(vcpu, MSR_IA32_FLUSH_CMD, MSR_TYPE_W, 758 !guest_cpu_cap_has(vcpu, X86_FEATURE_FLUSH_L1D)); 759 760 /* 761 * Disable interception of SPEC_CTRL if KVM doesn't need to manually 762 * context switch the MSR (SPEC_CTRL is virtualized by the CPU), or if 763 * the guest has a non-zero SPEC_CTRL value, i.e. is likely actively 764 * using SPEC_CTRL. 765 */ 766 if (cpu_feature_enabled(X86_FEATURE_V_SPEC_CTRL)) 767 svm_set_intercept_for_msr(vcpu, MSR_IA32_SPEC_CTRL, MSR_TYPE_RW, 768 !guest_has_spec_ctrl_msr(vcpu)); 769 else 770 svm_set_intercept_for_msr(vcpu, MSR_IA32_SPEC_CTRL, MSR_TYPE_RW, 771 !svm->spec_ctrl); 772 773 /* 774 * Intercept SYSENTER_EIP and SYSENTER_ESP when emulating an Intel CPU, 775 * as AMD hardware only store 32 bits, whereas Intel CPUs track 64 bits. 776 */ 777 svm_set_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_EIP, MSR_TYPE_RW, 778 guest_cpuid_is_intel_compatible(vcpu)); 779 svm_set_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_ESP, MSR_TYPE_RW, 780 guest_cpuid_is_intel_compatible(vcpu)); 781 782 if (kvm_aperfmperf_in_guest(vcpu->kvm)) { 783 svm_disable_intercept_for_msr(vcpu, MSR_IA32_APERF, MSR_TYPE_R); 784 svm_disable_intercept_for_msr(vcpu, MSR_IA32_MPERF, MSR_TYPE_R); 785 } 786 787 if (kvm_cpu_cap_has(X86_FEATURE_SHSTK)) { 788 bool shstk_enabled = guest_cpu_cap_has(vcpu, X86_FEATURE_SHSTK); 789 790 svm_set_intercept_for_msr(vcpu, MSR_IA32_U_CET, MSR_TYPE_RW, !shstk_enabled); 791 svm_set_intercept_for_msr(vcpu, MSR_IA32_S_CET, MSR_TYPE_RW, !shstk_enabled); 792 svm_set_intercept_for_msr(vcpu, MSR_IA32_PL0_SSP, MSR_TYPE_RW, !shstk_enabled); 793 svm_set_intercept_for_msr(vcpu, MSR_IA32_PL1_SSP, MSR_TYPE_RW, !shstk_enabled); 794 svm_set_intercept_for_msr(vcpu, MSR_IA32_PL2_SSP, MSR_TYPE_RW, !shstk_enabled); 795 svm_set_intercept_for_msr(vcpu, MSR_IA32_PL3_SSP, MSR_TYPE_RW, !shstk_enabled); 796 } 797 798 if (sev_es_guest(vcpu->kvm)) 799 sev_es_recalc_msr_intercepts(vcpu); 800 801 /* 802 * x2APIC intercepts are modified on-demand and cannot be filtered by 803 * userspace. 804 */ 805 } 806 807 void svm_copy_lbrs(struct vmcb *to_vmcb, struct vmcb *from_vmcb) 808 { 809 to_vmcb->save.dbgctl = from_vmcb->save.dbgctl; 810 to_vmcb->save.br_from = from_vmcb->save.br_from; 811 to_vmcb->save.br_to = from_vmcb->save.br_to; 812 to_vmcb->save.last_excp_from = from_vmcb->save.last_excp_from; 813 to_vmcb->save.last_excp_to = from_vmcb->save.last_excp_to; 814 815 vmcb_mark_dirty(to_vmcb, VMCB_LBR); 816 } 817 818 static void __svm_enable_lbrv(struct kvm_vcpu *vcpu) 819 { 820 to_svm(vcpu)->vmcb->control.virt_ext |= LBR_CTL_ENABLE_MASK; 821 } 822 823 void svm_enable_lbrv(struct kvm_vcpu *vcpu) 824 { 825 __svm_enable_lbrv(vcpu); 826 svm_recalc_lbr_msr_intercepts(vcpu); 827 } 828 829 static void __svm_disable_lbrv(struct kvm_vcpu *vcpu) 830 { 831 KVM_BUG_ON(sev_es_guest(vcpu->kvm), vcpu->kvm); 832 to_svm(vcpu)->vmcb->control.virt_ext &= ~LBR_CTL_ENABLE_MASK; 833 } 834 835 void svm_update_lbrv(struct kvm_vcpu *vcpu) 836 { 837 struct vcpu_svm *svm = to_svm(vcpu); 838 bool current_enable_lbrv = svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK; 839 bool enable_lbrv = (svm->vmcb->save.dbgctl & DEBUGCTLMSR_LBR) || 840 (is_guest_mode(vcpu) && guest_cpu_cap_has(vcpu, X86_FEATURE_LBRV) && 841 (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK)); 842 843 if (enable_lbrv && !current_enable_lbrv) 844 __svm_enable_lbrv(vcpu); 845 else if (!enable_lbrv && current_enable_lbrv) 846 __svm_disable_lbrv(vcpu); 847 848 /* 849 * During nested transitions, it is possible that the current VMCB has 850 * LBR_CTL set, but the previous LBR_CTL had it cleared (or vice versa). 851 * In this case, even though LBR_CTL does not need an update, intercepts 852 * do, so always recalculate the intercepts here. 853 */ 854 svm_recalc_lbr_msr_intercepts(vcpu); 855 } 856 857 void disable_nmi_singlestep(struct vcpu_svm *svm) 858 { 859 svm->nmi_singlestep = false; 860 861 if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP)) { 862 /* Clear our flags if they were not set by the guest */ 863 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF)) 864 svm->vmcb->save.rflags &= ~X86_EFLAGS_TF; 865 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_RF)) 866 svm->vmcb->save.rflags &= ~X86_EFLAGS_RF; 867 } 868 } 869 870 static void grow_ple_window(struct kvm_vcpu *vcpu) 871 { 872 struct vcpu_svm *svm = to_svm(vcpu); 873 struct vmcb_control_area *control = &svm->vmcb->control; 874 int old = control->pause_filter_count; 875 876 if (kvm_pause_in_guest(vcpu->kvm)) 877 return; 878 879 control->pause_filter_count = __grow_ple_window(old, 880 pause_filter_count, 881 pause_filter_count_grow, 882 pause_filter_count_max); 883 884 if (control->pause_filter_count != old) { 885 vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS); 886 trace_kvm_ple_window_update(vcpu->vcpu_id, 887 control->pause_filter_count, old); 888 } 889 } 890 891 static void shrink_ple_window(struct kvm_vcpu *vcpu) 892 { 893 struct vcpu_svm *svm = to_svm(vcpu); 894 struct vmcb_control_area *control = &svm->vmcb->control; 895 int old = control->pause_filter_count; 896 897 if (kvm_pause_in_guest(vcpu->kvm)) 898 return; 899 900 control->pause_filter_count = 901 __shrink_ple_window(old, 902 pause_filter_count, 903 pause_filter_count_shrink, 904 pause_filter_count); 905 if (control->pause_filter_count != old) { 906 vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS); 907 trace_kvm_ple_window_update(vcpu->vcpu_id, 908 control->pause_filter_count, old); 909 } 910 } 911 912 static void svm_hardware_unsetup(void) 913 { 914 int cpu; 915 916 avic_hardware_unsetup(); 917 918 sev_hardware_unsetup(); 919 920 for_each_possible_cpu(cpu) 921 svm_cpu_uninit(cpu); 922 923 __free_pages(__sme_pa_to_page(iopm_base), get_order(IOPM_SIZE)); 924 iopm_base = 0; 925 } 926 927 static void init_seg(struct vmcb_seg *seg) 928 { 929 seg->selector = 0; 930 seg->attrib = SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK | 931 SVM_SELECTOR_WRITE_MASK; /* Read/Write Data Segment */ 932 seg->limit = 0xffff; 933 seg->base = 0; 934 } 935 936 static void init_sys_seg(struct vmcb_seg *seg, uint32_t type) 937 { 938 seg->selector = 0; 939 seg->attrib = SVM_SELECTOR_P_MASK | type; 940 seg->limit = 0xffff; 941 seg->base = 0; 942 } 943 944 static u64 svm_get_l2_tsc_offset(struct kvm_vcpu *vcpu) 945 { 946 struct vcpu_svm *svm = to_svm(vcpu); 947 948 return svm->nested.ctl.tsc_offset; 949 } 950 951 static u64 svm_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu) 952 { 953 struct vcpu_svm *svm = to_svm(vcpu); 954 955 return svm->tsc_ratio_msr; 956 } 957 958 static void svm_write_tsc_offset(struct kvm_vcpu *vcpu) 959 { 960 struct vcpu_svm *svm = to_svm(vcpu); 961 962 svm->vmcb01.ptr->control.tsc_offset = vcpu->arch.l1_tsc_offset; 963 svm->vmcb->control.tsc_offset = vcpu->arch.tsc_offset; 964 vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS); 965 } 966 967 void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu) 968 { 969 preempt_disable(); 970 if (to_svm(vcpu)->guest_state_loaded) 971 __svm_write_tsc_multiplier(vcpu->arch.tsc_scaling_ratio); 972 preempt_enable(); 973 } 974 975 /* Evaluate instruction intercepts that depend on guest CPUID features. */ 976 static void svm_recalc_instruction_intercepts(struct kvm_vcpu *vcpu) 977 { 978 struct vcpu_svm *svm = to_svm(vcpu); 979 980 /* 981 * Intercept INVPCID if shadow paging is enabled to sync/free shadow 982 * roots, or if INVPCID is disabled in the guest to inject #UD. 983 */ 984 if (kvm_cpu_cap_has(X86_FEATURE_INVPCID)) { 985 if (!npt_enabled || 986 !guest_cpu_cap_has(&svm->vcpu, X86_FEATURE_INVPCID)) 987 svm_set_intercept(svm, INTERCEPT_INVPCID); 988 else 989 svm_clr_intercept(svm, INTERCEPT_INVPCID); 990 } 991 992 if (kvm_cpu_cap_has(X86_FEATURE_RDTSCP)) { 993 if (guest_cpu_cap_has(vcpu, X86_FEATURE_RDTSCP)) 994 svm_clr_intercept(svm, INTERCEPT_RDTSCP); 995 else 996 svm_set_intercept(svm, INTERCEPT_RDTSCP); 997 } 998 999 if (guest_cpuid_is_intel_compatible(vcpu)) { 1000 svm_set_intercept(svm, INTERCEPT_VMLOAD); 1001 svm_set_intercept(svm, INTERCEPT_VMSAVE); 1002 svm->vmcb->control.virt_ext &= ~VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK; 1003 } else { 1004 /* 1005 * If hardware supports Virtual VMLOAD VMSAVE then enable it 1006 * in VMCB and clear intercepts to avoid #VMEXIT. 1007 */ 1008 if (vls) { 1009 svm_clr_intercept(svm, INTERCEPT_VMLOAD); 1010 svm_clr_intercept(svm, INTERCEPT_VMSAVE); 1011 svm->vmcb->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK; 1012 } 1013 } 1014 } 1015 1016 static void svm_recalc_intercepts(struct kvm_vcpu *vcpu) 1017 { 1018 svm_recalc_instruction_intercepts(vcpu); 1019 svm_recalc_msr_intercepts(vcpu); 1020 } 1021 1022 static void init_vmcb(struct kvm_vcpu *vcpu, bool init_event) 1023 { 1024 struct vcpu_svm *svm = to_svm(vcpu); 1025 struct vmcb *vmcb = svm->vmcb01.ptr; 1026 struct vmcb_control_area *control = &vmcb->control; 1027 struct vmcb_save_area *save = &vmcb->save; 1028 1029 svm_set_intercept(svm, INTERCEPT_CR0_READ); 1030 svm_set_intercept(svm, INTERCEPT_CR3_READ); 1031 svm_set_intercept(svm, INTERCEPT_CR4_READ); 1032 svm_set_intercept(svm, INTERCEPT_CR0_WRITE); 1033 svm_set_intercept(svm, INTERCEPT_CR3_WRITE); 1034 svm_set_intercept(svm, INTERCEPT_CR4_WRITE); 1035 if (!kvm_vcpu_apicv_active(vcpu)) 1036 svm_set_intercept(svm, INTERCEPT_CR8_WRITE); 1037 1038 set_dr_intercepts(svm); 1039 1040 set_exception_intercept(svm, PF_VECTOR); 1041 set_exception_intercept(svm, UD_VECTOR); 1042 set_exception_intercept(svm, MC_VECTOR); 1043 set_exception_intercept(svm, AC_VECTOR); 1044 set_exception_intercept(svm, DB_VECTOR); 1045 /* 1046 * Guest access to VMware backdoor ports could legitimately 1047 * trigger #GP because of TSS I/O permission bitmap. 1048 * We intercept those #GP and allow access to them anyway 1049 * as VMware does. 1050 */ 1051 if (enable_vmware_backdoor) 1052 set_exception_intercept(svm, GP_VECTOR); 1053 1054 svm_set_intercept(svm, INTERCEPT_INTR); 1055 svm_set_intercept(svm, INTERCEPT_NMI); 1056 1057 if (intercept_smi) 1058 svm_set_intercept(svm, INTERCEPT_SMI); 1059 1060 svm_set_intercept(svm, INTERCEPT_SELECTIVE_CR0); 1061 svm_set_intercept(svm, INTERCEPT_RDPMC); 1062 svm_set_intercept(svm, INTERCEPT_CPUID); 1063 svm_set_intercept(svm, INTERCEPT_INVD); 1064 svm_set_intercept(svm, INTERCEPT_INVLPG); 1065 svm_set_intercept(svm, INTERCEPT_INVLPGA); 1066 svm_set_intercept(svm, INTERCEPT_IOIO_PROT); 1067 svm_set_intercept(svm, INTERCEPT_MSR_PROT); 1068 svm_set_intercept(svm, INTERCEPT_TASK_SWITCH); 1069 svm_set_intercept(svm, INTERCEPT_SHUTDOWN); 1070 svm_set_intercept(svm, INTERCEPT_VMRUN); 1071 svm_set_intercept(svm, INTERCEPT_VMMCALL); 1072 svm_set_intercept(svm, INTERCEPT_VMLOAD); 1073 svm_set_intercept(svm, INTERCEPT_VMSAVE); 1074 svm_set_intercept(svm, INTERCEPT_STGI); 1075 svm_set_intercept(svm, INTERCEPT_CLGI); 1076 svm_set_intercept(svm, INTERCEPT_SKINIT); 1077 svm_set_intercept(svm, INTERCEPT_WBINVD); 1078 svm_set_intercept(svm, INTERCEPT_XSETBV); 1079 svm_set_intercept(svm, INTERCEPT_RDPRU); 1080 svm_set_intercept(svm, INTERCEPT_RSM); 1081 1082 if (!kvm_mwait_in_guest(vcpu->kvm)) { 1083 svm_set_intercept(svm, INTERCEPT_MONITOR); 1084 svm_set_intercept(svm, INTERCEPT_MWAIT); 1085 } 1086 1087 if (!kvm_hlt_in_guest(vcpu->kvm)) { 1088 if (cpu_feature_enabled(X86_FEATURE_IDLE_HLT)) 1089 svm_set_intercept(svm, INTERCEPT_IDLE_HLT); 1090 else 1091 svm_set_intercept(svm, INTERCEPT_HLT); 1092 } 1093 1094 control->iopm_base_pa = iopm_base; 1095 control->msrpm_base_pa = __sme_set(__pa(svm->msrpm)); 1096 control->int_ctl = V_INTR_MASKING_MASK; 1097 1098 init_seg(&save->es); 1099 init_seg(&save->ss); 1100 init_seg(&save->ds); 1101 init_seg(&save->fs); 1102 init_seg(&save->gs); 1103 1104 save->cs.selector = 0xf000; 1105 save->cs.base = 0xffff0000; 1106 /* Executable/Readable Code Segment */ 1107 save->cs.attrib = SVM_SELECTOR_READ_MASK | SVM_SELECTOR_P_MASK | 1108 SVM_SELECTOR_S_MASK | SVM_SELECTOR_CODE_MASK; 1109 save->cs.limit = 0xffff; 1110 1111 save->gdtr.base = 0; 1112 save->gdtr.limit = 0xffff; 1113 save->idtr.base = 0; 1114 save->idtr.limit = 0xffff; 1115 1116 init_sys_seg(&save->ldtr, SEG_TYPE_LDT); 1117 init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16); 1118 1119 if (npt_enabled) { 1120 /* Setup VMCB for Nested Paging */ 1121 control->nested_ctl |= SVM_NESTED_CTL_NP_ENABLE; 1122 svm_clr_intercept(svm, INTERCEPT_INVLPG); 1123 clr_exception_intercept(svm, PF_VECTOR); 1124 svm_clr_intercept(svm, INTERCEPT_CR3_READ); 1125 svm_clr_intercept(svm, INTERCEPT_CR3_WRITE); 1126 save->g_pat = vcpu->arch.pat; 1127 save->cr3 = 0; 1128 } 1129 svm->current_vmcb->asid_generation = 0; 1130 svm->asid = 0; 1131 1132 svm->nested.vmcb12_gpa = INVALID_GPA; 1133 svm->nested.last_vmcb12_gpa = INVALID_GPA; 1134 1135 if (!kvm_pause_in_guest(vcpu->kvm)) { 1136 control->pause_filter_count = pause_filter_count; 1137 if (pause_filter_thresh) 1138 control->pause_filter_thresh = pause_filter_thresh; 1139 svm_set_intercept(svm, INTERCEPT_PAUSE); 1140 } else { 1141 svm_clr_intercept(svm, INTERCEPT_PAUSE); 1142 } 1143 1144 if (kvm_vcpu_apicv_active(vcpu)) 1145 avic_init_vmcb(svm, vmcb); 1146 1147 if (vnmi) 1148 svm->vmcb->control.int_ctl |= V_NMI_ENABLE_MASK; 1149 1150 if (vgif) { 1151 svm_clr_intercept(svm, INTERCEPT_STGI); 1152 svm_clr_intercept(svm, INTERCEPT_CLGI); 1153 svm->vmcb->control.int_ctl |= V_GIF_ENABLE_MASK; 1154 } 1155 1156 if (vcpu->kvm->arch.bus_lock_detection_enabled) 1157 svm_set_intercept(svm, INTERCEPT_BUSLOCK); 1158 1159 if (sev_guest(vcpu->kvm)) 1160 sev_init_vmcb(svm, init_event); 1161 1162 svm_hv_init_vmcb(vmcb); 1163 1164 kvm_make_request(KVM_REQ_RECALC_INTERCEPTS, vcpu); 1165 1166 vmcb_mark_all_dirty(vmcb); 1167 1168 enable_gif(svm); 1169 } 1170 1171 static void __svm_vcpu_reset(struct kvm_vcpu *vcpu) 1172 { 1173 struct vcpu_svm *svm = to_svm(vcpu); 1174 1175 svm_init_osvw(vcpu); 1176 1177 if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_STUFF_FEATURE_MSRS)) 1178 vcpu->arch.microcode_version = 0x01000065; 1179 svm->tsc_ratio_msr = kvm_caps.default_tsc_scaling_ratio; 1180 1181 svm->nmi_masked = false; 1182 svm->awaiting_iret_completion = false; 1183 } 1184 1185 static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) 1186 { 1187 struct vcpu_svm *svm = to_svm(vcpu); 1188 1189 svm->spec_ctrl = 0; 1190 svm->virt_spec_ctrl = 0; 1191 1192 init_vmcb(vcpu, init_event); 1193 1194 if (!init_event) 1195 __svm_vcpu_reset(vcpu); 1196 } 1197 1198 void svm_switch_vmcb(struct vcpu_svm *svm, struct kvm_vmcb_info *target_vmcb) 1199 { 1200 svm->current_vmcb = target_vmcb; 1201 svm->vmcb = target_vmcb->ptr; 1202 } 1203 1204 static int svm_vcpu_precreate(struct kvm *kvm) 1205 { 1206 return avic_alloc_physical_id_table(kvm); 1207 } 1208 1209 static int svm_vcpu_create(struct kvm_vcpu *vcpu) 1210 { 1211 struct vcpu_svm *svm; 1212 struct page *vmcb01_page; 1213 int err; 1214 1215 BUILD_BUG_ON(offsetof(struct vcpu_svm, vcpu) != 0); 1216 svm = to_svm(vcpu); 1217 1218 err = -ENOMEM; 1219 vmcb01_page = snp_safe_alloc_page(); 1220 if (!vmcb01_page) 1221 goto out; 1222 1223 err = sev_vcpu_create(vcpu); 1224 if (err) 1225 goto error_free_vmcb_page; 1226 1227 err = avic_init_vcpu(svm); 1228 if (err) 1229 goto error_free_sev; 1230 1231 svm->msrpm = svm_vcpu_alloc_msrpm(); 1232 if (!svm->msrpm) { 1233 err = -ENOMEM; 1234 goto error_free_sev; 1235 } 1236 1237 svm->x2avic_msrs_intercepted = true; 1238 svm->lbr_msrs_intercepted = true; 1239 1240 svm->vmcb01.ptr = page_address(vmcb01_page); 1241 svm->vmcb01.pa = __sme_set(page_to_pfn(vmcb01_page) << PAGE_SHIFT); 1242 svm_switch_vmcb(svm, &svm->vmcb01); 1243 1244 svm->guest_state_loaded = false; 1245 1246 return 0; 1247 1248 error_free_sev: 1249 sev_free_vcpu(vcpu); 1250 error_free_vmcb_page: 1251 __free_page(vmcb01_page); 1252 out: 1253 return err; 1254 } 1255 1256 static void svm_vcpu_free(struct kvm_vcpu *vcpu) 1257 { 1258 struct vcpu_svm *svm = to_svm(vcpu); 1259 1260 WARN_ON_ONCE(!list_empty(&svm->ir_list)); 1261 1262 svm_leave_nested(vcpu); 1263 svm_free_nested(svm); 1264 1265 sev_free_vcpu(vcpu); 1266 1267 __free_page(__sme_pa_to_page(svm->vmcb01.pa)); 1268 svm_vcpu_free_msrpm(svm->msrpm); 1269 } 1270 1271 #ifdef CONFIG_CPU_MITIGATIONS 1272 static DEFINE_SPINLOCK(srso_lock); 1273 static atomic_t srso_nr_vms; 1274 1275 static void svm_srso_clear_bp_spec_reduce(void *ign) 1276 { 1277 struct svm_cpu_data *sd = this_cpu_ptr(&svm_data); 1278 1279 if (!sd->bp_spec_reduce_set) 1280 return; 1281 1282 msr_clear_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_BP_SPEC_REDUCE_BIT); 1283 sd->bp_spec_reduce_set = false; 1284 } 1285 1286 static void svm_srso_vm_destroy(void) 1287 { 1288 if (!cpu_feature_enabled(X86_FEATURE_SRSO_BP_SPEC_REDUCE)) 1289 return; 1290 1291 if (atomic_dec_return(&srso_nr_vms)) 1292 return; 1293 1294 guard(spinlock)(&srso_lock); 1295 1296 /* 1297 * Verify a new VM didn't come along, acquire the lock, and increment 1298 * the count before this task acquired the lock. 1299 */ 1300 if (atomic_read(&srso_nr_vms)) 1301 return; 1302 1303 on_each_cpu(svm_srso_clear_bp_spec_reduce, NULL, 1); 1304 } 1305 1306 static void svm_srso_vm_init(void) 1307 { 1308 if (!cpu_feature_enabled(X86_FEATURE_SRSO_BP_SPEC_REDUCE)) 1309 return; 1310 1311 /* 1312 * Acquire the lock on 0 => 1 transitions to ensure a potential 1 => 0 1313 * transition, i.e. destroying the last VM, is fully complete, e.g. so 1314 * that a delayed IPI doesn't clear BP_SPEC_REDUCE after a vCPU runs. 1315 */ 1316 if (atomic_inc_not_zero(&srso_nr_vms)) 1317 return; 1318 1319 guard(spinlock)(&srso_lock); 1320 1321 atomic_inc(&srso_nr_vms); 1322 } 1323 #else 1324 static void svm_srso_vm_init(void) { } 1325 static void svm_srso_vm_destroy(void) { } 1326 #endif 1327 1328 static void svm_prepare_switch_to_guest(struct kvm_vcpu *vcpu) 1329 { 1330 struct vcpu_svm *svm = to_svm(vcpu); 1331 struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, vcpu->cpu); 1332 1333 if (sev_es_guest(vcpu->kvm)) 1334 sev_es_unmap_ghcb(svm); 1335 1336 if (svm->guest_state_loaded) 1337 return; 1338 1339 /* 1340 * Save additional host state that will be restored on VMEXIT (sev-es) 1341 * or subsequent vmload of host save area. 1342 */ 1343 vmsave(sd->save_area_pa); 1344 if (sev_es_guest(vcpu->kvm)) 1345 sev_es_prepare_switch_to_guest(svm, sev_es_host_save_area(sd)); 1346 1347 if (tsc_scaling) 1348 __svm_write_tsc_multiplier(vcpu->arch.tsc_scaling_ratio); 1349 1350 /* 1351 * TSC_AUX is always virtualized (context switched by hardware) for 1352 * SEV-ES guests when the feature is available. For non-SEV-ES guests, 1353 * context switch TSC_AUX via the user_return MSR infrastructure (not 1354 * all CPUs support TSC_AUX virtualization). 1355 */ 1356 if (likely(tsc_aux_uret_slot >= 0) && 1357 (!boot_cpu_has(X86_FEATURE_V_TSC_AUX) || !sev_es_guest(vcpu->kvm))) 1358 kvm_set_user_return_msr(tsc_aux_uret_slot, svm->tsc_aux, -1ull); 1359 1360 if (cpu_feature_enabled(X86_FEATURE_SRSO_BP_SPEC_REDUCE) && 1361 !sd->bp_spec_reduce_set) { 1362 sd->bp_spec_reduce_set = true; 1363 msr_set_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_BP_SPEC_REDUCE_BIT); 1364 } 1365 svm->guest_state_loaded = true; 1366 } 1367 1368 static void svm_prepare_host_switch(struct kvm_vcpu *vcpu) 1369 { 1370 to_svm(vcpu)->guest_state_loaded = false; 1371 } 1372 1373 static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 1374 { 1375 if (vcpu->scheduled_out && !kvm_pause_in_guest(vcpu->kvm)) 1376 shrink_ple_window(vcpu); 1377 1378 if (kvm_vcpu_apicv_active(vcpu)) 1379 avic_vcpu_load(vcpu, cpu); 1380 } 1381 1382 static void svm_vcpu_put(struct kvm_vcpu *vcpu) 1383 { 1384 if (kvm_vcpu_apicv_active(vcpu)) 1385 avic_vcpu_put(vcpu); 1386 1387 svm_prepare_host_switch(vcpu); 1388 1389 ++vcpu->stat.host_state_reload; 1390 } 1391 1392 static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu) 1393 { 1394 struct vcpu_svm *svm = to_svm(vcpu); 1395 unsigned long rflags = svm->vmcb->save.rflags; 1396 1397 if (svm->nmi_singlestep) { 1398 /* Hide our flags if they were not set by the guest */ 1399 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF)) 1400 rflags &= ~X86_EFLAGS_TF; 1401 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_RF)) 1402 rflags &= ~X86_EFLAGS_RF; 1403 } 1404 return rflags; 1405 } 1406 1407 static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) 1408 { 1409 if (to_svm(vcpu)->nmi_singlestep) 1410 rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF); 1411 1412 /* 1413 * Any change of EFLAGS.VM is accompanied by a reload of SS 1414 * (caused by either a task switch or an inter-privilege IRET), 1415 * so we do not need to update the CPL here. 1416 */ 1417 to_svm(vcpu)->vmcb->save.rflags = rflags; 1418 } 1419 1420 static bool svm_get_if_flag(struct kvm_vcpu *vcpu) 1421 { 1422 struct vmcb *vmcb = to_svm(vcpu)->vmcb; 1423 1424 return sev_es_guest(vcpu->kvm) 1425 ? vmcb->control.int_state & SVM_GUEST_INTERRUPT_MASK 1426 : kvm_get_rflags(vcpu) & X86_EFLAGS_IF; 1427 } 1428 1429 static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg) 1430 { 1431 kvm_register_mark_available(vcpu, reg); 1432 1433 switch (reg) { 1434 case VCPU_EXREG_PDPTR: 1435 /* 1436 * When !npt_enabled, mmu->pdptrs[] is already available since 1437 * it is always updated per SDM when moving to CRs. 1438 */ 1439 if (npt_enabled) 1440 load_pdptrs(vcpu, kvm_read_cr3(vcpu)); 1441 break; 1442 default: 1443 KVM_BUG_ON(1, vcpu->kvm); 1444 } 1445 } 1446 1447 static void svm_set_vintr(struct vcpu_svm *svm) 1448 { 1449 struct vmcb_control_area *control; 1450 1451 /* 1452 * The following fields are ignored when AVIC is enabled 1453 */ 1454 WARN_ON(kvm_vcpu_apicv_activated(&svm->vcpu)); 1455 1456 svm_set_intercept(svm, INTERCEPT_VINTR); 1457 1458 /* 1459 * Recalculating intercepts may have cleared the VINTR intercept. If 1460 * V_INTR_MASKING is enabled in vmcb12, then the effective RFLAGS.IF 1461 * for L1 physical interrupts is L1's RFLAGS.IF at the time of VMRUN. 1462 * Requesting an interrupt window if save.RFLAGS.IF=0 is pointless as 1463 * interrupts will never be unblocked while L2 is running. 1464 */ 1465 if (!svm_is_intercept(svm, INTERCEPT_VINTR)) 1466 return; 1467 1468 /* 1469 * This is just a dummy VINTR to actually cause a vmexit to happen. 1470 * Actual injection of virtual interrupts happens through EVENTINJ. 1471 */ 1472 control = &svm->vmcb->control; 1473 control->int_vector = 0x0; 1474 control->int_ctl &= ~V_INTR_PRIO_MASK; 1475 control->int_ctl |= V_IRQ_MASK | 1476 ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT); 1477 vmcb_mark_dirty(svm->vmcb, VMCB_INTR); 1478 } 1479 1480 static void svm_clear_vintr(struct vcpu_svm *svm) 1481 { 1482 svm_clr_intercept(svm, INTERCEPT_VINTR); 1483 1484 /* Drop int_ctl fields related to VINTR injection. */ 1485 svm->vmcb->control.int_ctl &= ~V_IRQ_INJECTION_BITS_MASK; 1486 if (is_guest_mode(&svm->vcpu)) { 1487 svm->vmcb01.ptr->control.int_ctl &= ~V_IRQ_INJECTION_BITS_MASK; 1488 1489 WARN_ON((svm->vmcb->control.int_ctl & V_TPR_MASK) != 1490 (svm->nested.ctl.int_ctl & V_TPR_MASK)); 1491 1492 svm->vmcb->control.int_ctl |= svm->nested.ctl.int_ctl & 1493 V_IRQ_INJECTION_BITS_MASK; 1494 1495 svm->vmcb->control.int_vector = svm->nested.ctl.int_vector; 1496 } 1497 1498 vmcb_mark_dirty(svm->vmcb, VMCB_INTR); 1499 } 1500 1501 static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg) 1502 { 1503 struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save; 1504 struct vmcb_save_area *save01 = &to_svm(vcpu)->vmcb01.ptr->save; 1505 1506 switch (seg) { 1507 case VCPU_SREG_CS: return &save->cs; 1508 case VCPU_SREG_DS: return &save->ds; 1509 case VCPU_SREG_ES: return &save->es; 1510 case VCPU_SREG_FS: return &save01->fs; 1511 case VCPU_SREG_GS: return &save01->gs; 1512 case VCPU_SREG_SS: return &save->ss; 1513 case VCPU_SREG_TR: return &save01->tr; 1514 case VCPU_SREG_LDTR: return &save01->ldtr; 1515 } 1516 BUG(); 1517 return NULL; 1518 } 1519 1520 static u64 svm_get_segment_base(struct kvm_vcpu *vcpu, int seg) 1521 { 1522 struct vmcb_seg *s = svm_seg(vcpu, seg); 1523 1524 return s->base; 1525 } 1526 1527 static void svm_get_segment(struct kvm_vcpu *vcpu, 1528 struct kvm_segment *var, int seg) 1529 { 1530 struct vmcb_seg *s = svm_seg(vcpu, seg); 1531 1532 var->base = s->base; 1533 var->limit = s->limit; 1534 var->selector = s->selector; 1535 var->type = s->attrib & SVM_SELECTOR_TYPE_MASK; 1536 var->s = (s->attrib >> SVM_SELECTOR_S_SHIFT) & 1; 1537 var->dpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3; 1538 var->present = (s->attrib >> SVM_SELECTOR_P_SHIFT) & 1; 1539 var->avl = (s->attrib >> SVM_SELECTOR_AVL_SHIFT) & 1; 1540 var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1; 1541 var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1; 1542 1543 /* 1544 * AMD CPUs circa 2014 track the G bit for all segments except CS. 1545 * However, the SVM spec states that the G bit is not observed by the 1546 * CPU, and some VMware virtual CPUs drop the G bit for all segments. 1547 * So let's synthesize a legal G bit for all segments, this helps 1548 * running KVM nested. It also helps cross-vendor migration, because 1549 * Intel's vmentry has a check on the 'G' bit. 1550 */ 1551 var->g = s->limit > 0xfffff; 1552 1553 /* 1554 * AMD's VMCB does not have an explicit unusable field, so emulate it 1555 * for cross vendor migration purposes by "not present" 1556 */ 1557 var->unusable = !var->present; 1558 1559 switch (seg) { 1560 case VCPU_SREG_TR: 1561 /* 1562 * Work around a bug where the busy flag in the tr selector 1563 * isn't exposed 1564 */ 1565 var->type |= 0x2; 1566 break; 1567 case VCPU_SREG_DS: 1568 case VCPU_SREG_ES: 1569 case VCPU_SREG_FS: 1570 case VCPU_SREG_GS: 1571 /* 1572 * The accessed bit must always be set in the segment 1573 * descriptor cache, although it can be cleared in the 1574 * descriptor, the cached bit always remains at 1. Since 1575 * Intel has a check on this, set it here to support 1576 * cross-vendor migration. 1577 */ 1578 if (!var->unusable) 1579 var->type |= 0x1; 1580 break; 1581 case VCPU_SREG_SS: 1582 /* 1583 * On AMD CPUs sometimes the DB bit in the segment 1584 * descriptor is left as 1, although the whole segment has 1585 * been made unusable. Clear it here to pass an Intel VMX 1586 * entry check when cross vendor migrating. 1587 */ 1588 if (var->unusable) 1589 var->db = 0; 1590 /* This is symmetric with svm_set_segment() */ 1591 var->dpl = to_svm(vcpu)->vmcb->save.cpl; 1592 break; 1593 } 1594 } 1595 1596 static int svm_get_cpl(struct kvm_vcpu *vcpu) 1597 { 1598 struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save; 1599 1600 return save->cpl; 1601 } 1602 1603 static void svm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l) 1604 { 1605 struct kvm_segment cs; 1606 1607 svm_get_segment(vcpu, &cs, VCPU_SREG_CS); 1608 *db = cs.db; 1609 *l = cs.l; 1610 } 1611 1612 static void svm_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) 1613 { 1614 struct vcpu_svm *svm = to_svm(vcpu); 1615 1616 dt->size = svm->vmcb->save.idtr.limit; 1617 dt->address = svm->vmcb->save.idtr.base; 1618 } 1619 1620 static void svm_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) 1621 { 1622 struct vcpu_svm *svm = to_svm(vcpu); 1623 1624 svm->vmcb->save.idtr.limit = dt->size; 1625 svm->vmcb->save.idtr.base = dt->address ; 1626 vmcb_mark_dirty(svm->vmcb, VMCB_DT); 1627 } 1628 1629 static void svm_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) 1630 { 1631 struct vcpu_svm *svm = to_svm(vcpu); 1632 1633 dt->size = svm->vmcb->save.gdtr.limit; 1634 dt->address = svm->vmcb->save.gdtr.base; 1635 } 1636 1637 static void svm_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) 1638 { 1639 struct vcpu_svm *svm = to_svm(vcpu); 1640 1641 svm->vmcb->save.gdtr.limit = dt->size; 1642 svm->vmcb->save.gdtr.base = dt->address ; 1643 vmcb_mark_dirty(svm->vmcb, VMCB_DT); 1644 } 1645 1646 static void sev_post_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) 1647 { 1648 struct vcpu_svm *svm = to_svm(vcpu); 1649 1650 /* 1651 * For guests that don't set guest_state_protected, the cr3 update is 1652 * handled via kvm_mmu_load() while entering the guest. For guests 1653 * that do (SEV-ES/SEV-SNP), the cr3 update needs to be written to 1654 * VMCB save area now, since the save area will become the initial 1655 * contents of the VMSA, and future VMCB save area updates won't be 1656 * seen. 1657 */ 1658 if (sev_es_guest(vcpu->kvm)) { 1659 svm->vmcb->save.cr3 = cr3; 1660 vmcb_mark_dirty(svm->vmcb, VMCB_CR); 1661 } 1662 } 1663 1664 static bool svm_is_valid_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) 1665 { 1666 return true; 1667 } 1668 1669 void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) 1670 { 1671 struct vcpu_svm *svm = to_svm(vcpu); 1672 u64 hcr0 = cr0; 1673 bool old_paging = is_paging(vcpu); 1674 1675 #ifdef CONFIG_X86_64 1676 if (vcpu->arch.efer & EFER_LME) { 1677 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) { 1678 vcpu->arch.efer |= EFER_LMA; 1679 if (!vcpu->arch.guest_state_protected) 1680 svm->vmcb->save.efer |= EFER_LMA | EFER_LME; 1681 } 1682 1683 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) { 1684 vcpu->arch.efer &= ~EFER_LMA; 1685 if (!vcpu->arch.guest_state_protected) 1686 svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME); 1687 } 1688 } 1689 #endif 1690 vcpu->arch.cr0 = cr0; 1691 1692 if (!npt_enabled) { 1693 hcr0 |= X86_CR0_PG | X86_CR0_WP; 1694 if (old_paging != is_paging(vcpu)) 1695 svm_set_cr4(vcpu, kvm_read_cr4(vcpu)); 1696 } 1697 1698 /* 1699 * re-enable caching here because the QEMU bios 1700 * does not do it - this results in some delay at 1701 * reboot 1702 */ 1703 if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED)) 1704 hcr0 &= ~(X86_CR0_CD | X86_CR0_NW); 1705 1706 svm->vmcb->save.cr0 = hcr0; 1707 vmcb_mark_dirty(svm->vmcb, VMCB_CR); 1708 1709 /* 1710 * SEV-ES guests must always keep the CR intercepts cleared. CR 1711 * tracking is done using the CR write traps. 1712 */ 1713 if (sev_es_guest(vcpu->kvm)) 1714 return; 1715 1716 if (hcr0 == cr0) { 1717 /* Selective CR0 write remains on. */ 1718 svm_clr_intercept(svm, INTERCEPT_CR0_READ); 1719 svm_clr_intercept(svm, INTERCEPT_CR0_WRITE); 1720 } else { 1721 svm_set_intercept(svm, INTERCEPT_CR0_READ); 1722 svm_set_intercept(svm, INTERCEPT_CR0_WRITE); 1723 } 1724 } 1725 1726 static bool svm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) 1727 { 1728 return true; 1729 } 1730 1731 void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) 1732 { 1733 unsigned long host_cr4_mce = cr4_read_shadow() & X86_CR4_MCE; 1734 unsigned long old_cr4 = vcpu->arch.cr4; 1735 1736 vcpu->arch.cr4 = cr4; 1737 if (!npt_enabled) { 1738 cr4 |= X86_CR4_PAE; 1739 1740 if (!is_paging(vcpu)) 1741 cr4 &= ~(X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE); 1742 } 1743 cr4 |= host_cr4_mce; 1744 to_svm(vcpu)->vmcb->save.cr4 = cr4; 1745 vmcb_mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR); 1746 1747 if ((cr4 ^ old_cr4) & (X86_CR4_OSXSAVE | X86_CR4_PKE)) 1748 vcpu->arch.cpuid_dynamic_bits_dirty = true; 1749 } 1750 1751 static void svm_set_segment(struct kvm_vcpu *vcpu, 1752 struct kvm_segment *var, int seg) 1753 { 1754 struct vcpu_svm *svm = to_svm(vcpu); 1755 struct vmcb_seg *s = svm_seg(vcpu, seg); 1756 1757 s->base = var->base; 1758 s->limit = var->limit; 1759 s->selector = var->selector; 1760 s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK); 1761 s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT; 1762 s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT; 1763 s->attrib |= ((var->present & 1) && !var->unusable) << SVM_SELECTOR_P_SHIFT; 1764 s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT; 1765 s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT; 1766 s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT; 1767 s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT; 1768 1769 /* 1770 * This is always accurate, except if SYSRET returned to a segment 1771 * with SS.DPL != 3. Intel does not have this quirk, and always 1772 * forces SS.DPL to 3 on sysret, so we ignore that case; fixing it 1773 * would entail passing the CPL to userspace and back. 1774 */ 1775 if (seg == VCPU_SREG_SS) 1776 /* This is symmetric with svm_get_segment() */ 1777 svm->vmcb->save.cpl = (var->dpl & 3); 1778 1779 vmcb_mark_dirty(svm->vmcb, VMCB_SEG); 1780 } 1781 1782 static void svm_update_exception_bitmap(struct kvm_vcpu *vcpu) 1783 { 1784 struct vcpu_svm *svm = to_svm(vcpu); 1785 1786 clr_exception_intercept(svm, BP_VECTOR); 1787 1788 if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) { 1789 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) 1790 set_exception_intercept(svm, BP_VECTOR); 1791 } 1792 } 1793 1794 static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd) 1795 { 1796 if (sd->next_asid > sd->max_asid) { 1797 ++sd->asid_generation; 1798 sd->next_asid = sd->min_asid; 1799 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID; 1800 vmcb_mark_dirty(svm->vmcb, VMCB_ASID); 1801 } 1802 1803 svm->current_vmcb->asid_generation = sd->asid_generation; 1804 svm->asid = sd->next_asid++; 1805 } 1806 1807 static void svm_set_dr6(struct kvm_vcpu *vcpu, unsigned long value) 1808 { 1809 struct vmcb *vmcb = to_svm(vcpu)->vmcb; 1810 1811 if (vcpu->arch.guest_state_protected) 1812 return; 1813 1814 if (unlikely(value != vmcb->save.dr6)) { 1815 vmcb->save.dr6 = value; 1816 vmcb_mark_dirty(vmcb, VMCB_DR); 1817 } 1818 } 1819 1820 static void svm_sync_dirty_debug_regs(struct kvm_vcpu *vcpu) 1821 { 1822 struct vcpu_svm *svm = to_svm(vcpu); 1823 1824 if (WARN_ON_ONCE(sev_es_guest(vcpu->kvm))) 1825 return; 1826 1827 get_debugreg(vcpu->arch.db[0], 0); 1828 get_debugreg(vcpu->arch.db[1], 1); 1829 get_debugreg(vcpu->arch.db[2], 2); 1830 get_debugreg(vcpu->arch.db[3], 3); 1831 /* 1832 * We cannot reset svm->vmcb->save.dr6 to DR6_ACTIVE_LOW here, 1833 * because db_interception might need it. We can do it before vmentry. 1834 */ 1835 vcpu->arch.dr6 = svm->vmcb->save.dr6; 1836 vcpu->arch.dr7 = svm->vmcb->save.dr7; 1837 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT; 1838 set_dr_intercepts(svm); 1839 } 1840 1841 static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value) 1842 { 1843 struct vcpu_svm *svm = to_svm(vcpu); 1844 1845 if (vcpu->arch.guest_state_protected) 1846 return; 1847 1848 svm->vmcb->save.dr7 = value; 1849 vmcb_mark_dirty(svm->vmcb, VMCB_DR); 1850 } 1851 1852 static int pf_interception(struct kvm_vcpu *vcpu) 1853 { 1854 struct vcpu_svm *svm = to_svm(vcpu); 1855 1856 u64 fault_address = svm->vmcb->control.exit_info_2; 1857 u64 error_code = svm->vmcb->control.exit_info_1; 1858 1859 return kvm_handle_page_fault(vcpu, error_code, fault_address, 1860 static_cpu_has(X86_FEATURE_DECODEASSISTS) ? 1861 svm->vmcb->control.insn_bytes : NULL, 1862 svm->vmcb->control.insn_len); 1863 } 1864 1865 static int npf_interception(struct kvm_vcpu *vcpu) 1866 { 1867 struct vcpu_svm *svm = to_svm(vcpu); 1868 int rc; 1869 1870 u64 fault_address = svm->vmcb->control.exit_info_2; 1871 u64 error_code = svm->vmcb->control.exit_info_1; 1872 1873 /* 1874 * WARN if hardware generates a fault with an error code that collides 1875 * with KVM-defined sythentic flags. Clear the flags and continue on, 1876 * i.e. don't terminate the VM, as KVM can't possibly be relying on a 1877 * flag that KVM doesn't know about. 1878 */ 1879 if (WARN_ON_ONCE(error_code & PFERR_SYNTHETIC_MASK)) 1880 error_code &= ~PFERR_SYNTHETIC_MASK; 1881 1882 if (sev_snp_guest(vcpu->kvm) && (error_code & PFERR_GUEST_ENC_MASK)) 1883 error_code |= PFERR_PRIVATE_ACCESS; 1884 1885 trace_kvm_page_fault(vcpu, fault_address, error_code); 1886 rc = kvm_mmu_page_fault(vcpu, fault_address, error_code, 1887 static_cpu_has(X86_FEATURE_DECODEASSISTS) ? 1888 svm->vmcb->control.insn_bytes : NULL, 1889 svm->vmcb->control.insn_len); 1890 1891 if (rc > 0 && error_code & PFERR_GUEST_RMP_MASK) 1892 sev_handle_rmp_fault(vcpu, fault_address, error_code); 1893 1894 return rc; 1895 } 1896 1897 static int db_interception(struct kvm_vcpu *vcpu) 1898 { 1899 struct kvm_run *kvm_run = vcpu->run; 1900 struct vcpu_svm *svm = to_svm(vcpu); 1901 1902 if (!(vcpu->guest_debug & 1903 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) && 1904 !svm->nmi_singlestep) { 1905 u32 payload = svm->vmcb->save.dr6 ^ DR6_ACTIVE_LOW; 1906 kvm_queue_exception_p(vcpu, DB_VECTOR, payload); 1907 return 1; 1908 } 1909 1910 if (svm->nmi_singlestep) { 1911 disable_nmi_singlestep(svm); 1912 /* Make sure we check for pending NMIs upon entry */ 1913 kvm_make_request(KVM_REQ_EVENT, vcpu); 1914 } 1915 1916 if (vcpu->guest_debug & 1917 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) { 1918 kvm_run->exit_reason = KVM_EXIT_DEBUG; 1919 kvm_run->debug.arch.dr6 = svm->vmcb->save.dr6; 1920 kvm_run->debug.arch.dr7 = svm->vmcb->save.dr7; 1921 kvm_run->debug.arch.pc = 1922 svm->vmcb->save.cs.base + svm->vmcb->save.rip; 1923 kvm_run->debug.arch.exception = DB_VECTOR; 1924 return 0; 1925 } 1926 1927 return 1; 1928 } 1929 1930 static int bp_interception(struct kvm_vcpu *vcpu) 1931 { 1932 struct vcpu_svm *svm = to_svm(vcpu); 1933 struct kvm_run *kvm_run = vcpu->run; 1934 1935 kvm_run->exit_reason = KVM_EXIT_DEBUG; 1936 kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip; 1937 kvm_run->debug.arch.exception = BP_VECTOR; 1938 return 0; 1939 } 1940 1941 static int ud_interception(struct kvm_vcpu *vcpu) 1942 { 1943 return handle_ud(vcpu); 1944 } 1945 1946 static int ac_interception(struct kvm_vcpu *vcpu) 1947 { 1948 kvm_queue_exception_e(vcpu, AC_VECTOR, 0); 1949 return 1; 1950 } 1951 1952 static bool is_erratum_383(void) 1953 { 1954 int i; 1955 u64 value; 1956 1957 if (!erratum_383_found) 1958 return false; 1959 1960 if (native_read_msr_safe(MSR_IA32_MC0_STATUS, &value)) 1961 return false; 1962 1963 /* Bit 62 may or may not be set for this mce */ 1964 value &= ~(1ULL << 62); 1965 1966 if (value != 0xb600000000010015ULL) 1967 return false; 1968 1969 /* Clear MCi_STATUS registers */ 1970 for (i = 0; i < 6; ++i) 1971 native_write_msr_safe(MSR_IA32_MCx_STATUS(i), 0); 1972 1973 if (!native_read_msr_safe(MSR_IA32_MCG_STATUS, &value)) { 1974 value &= ~(1ULL << 2); 1975 native_write_msr_safe(MSR_IA32_MCG_STATUS, value); 1976 } 1977 1978 /* Flush tlb to evict multi-match entries */ 1979 __flush_tlb_all(); 1980 1981 return true; 1982 } 1983 1984 static void svm_handle_mce(struct kvm_vcpu *vcpu) 1985 { 1986 if (is_erratum_383()) { 1987 /* 1988 * Erratum 383 triggered. Guest state is corrupt so kill the 1989 * guest. 1990 */ 1991 pr_err("Guest triggered AMD Erratum 383\n"); 1992 1993 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); 1994 1995 return; 1996 } 1997 1998 /* 1999 * On an #MC intercept the MCE handler is not called automatically in 2000 * the host. So do it by hand here. 2001 */ 2002 kvm_machine_check(); 2003 } 2004 2005 static int mc_interception(struct kvm_vcpu *vcpu) 2006 { 2007 return 1; 2008 } 2009 2010 static int shutdown_interception(struct kvm_vcpu *vcpu) 2011 { 2012 struct kvm_run *kvm_run = vcpu->run; 2013 struct vcpu_svm *svm = to_svm(vcpu); 2014 2015 2016 /* 2017 * VMCB is undefined after a SHUTDOWN intercept. INIT the vCPU to put 2018 * the VMCB in a known good state. Unfortuately, KVM doesn't have 2019 * KVM_MP_STATE_SHUTDOWN and can't add it without potentially breaking 2020 * userspace. At a platform view, INIT is acceptable behavior as 2021 * there exist bare metal platforms that automatically INIT the CPU 2022 * in response to shutdown. 2023 * 2024 * The VM save area for SEV-ES guests has already been encrypted so it 2025 * cannot be reinitialized, i.e. synthesizing INIT is futile. 2026 */ 2027 if (!sev_es_guest(vcpu->kvm)) { 2028 clear_page(svm->vmcb); 2029 #ifdef CONFIG_KVM_SMM 2030 if (is_smm(vcpu)) 2031 kvm_smm_changed(vcpu, false); 2032 #endif 2033 kvm_vcpu_reset(vcpu, true); 2034 } 2035 2036 kvm_run->exit_reason = KVM_EXIT_SHUTDOWN; 2037 return 0; 2038 } 2039 2040 static int io_interception(struct kvm_vcpu *vcpu) 2041 { 2042 struct vcpu_svm *svm = to_svm(vcpu); 2043 u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */ 2044 int size, in, string; 2045 unsigned port; 2046 2047 ++vcpu->stat.io_exits; 2048 string = (io_info & SVM_IOIO_STR_MASK) != 0; 2049 in = (io_info & SVM_IOIO_TYPE_MASK) != 0; 2050 port = io_info >> 16; 2051 size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT; 2052 2053 if (string) { 2054 if (sev_es_guest(vcpu->kvm)) 2055 return sev_es_string_io(svm, size, port, in); 2056 else 2057 return kvm_emulate_instruction(vcpu, 0); 2058 } 2059 2060 svm->next_rip = svm->vmcb->control.exit_info_2; 2061 2062 return kvm_fast_pio(vcpu, size, port, in); 2063 } 2064 2065 static int nmi_interception(struct kvm_vcpu *vcpu) 2066 { 2067 return 1; 2068 } 2069 2070 static int smi_interception(struct kvm_vcpu *vcpu) 2071 { 2072 return 1; 2073 } 2074 2075 static int intr_interception(struct kvm_vcpu *vcpu) 2076 { 2077 ++vcpu->stat.irq_exits; 2078 return 1; 2079 } 2080 2081 static int vmload_vmsave_interception(struct kvm_vcpu *vcpu, bool vmload) 2082 { 2083 struct vcpu_svm *svm = to_svm(vcpu); 2084 struct vmcb *vmcb12; 2085 struct kvm_host_map map; 2086 int ret; 2087 2088 if (nested_svm_check_permissions(vcpu)) 2089 return 1; 2090 2091 ret = kvm_vcpu_map(vcpu, gpa_to_gfn(svm->vmcb->save.rax), &map); 2092 if (ret) { 2093 if (ret == -EINVAL) 2094 kvm_inject_gp(vcpu, 0); 2095 return 1; 2096 } 2097 2098 vmcb12 = map.hva; 2099 2100 ret = kvm_skip_emulated_instruction(vcpu); 2101 2102 if (vmload) { 2103 svm_copy_vmloadsave_state(svm->vmcb, vmcb12); 2104 svm->sysenter_eip_hi = 0; 2105 svm->sysenter_esp_hi = 0; 2106 } else { 2107 svm_copy_vmloadsave_state(vmcb12, svm->vmcb); 2108 } 2109 2110 kvm_vcpu_unmap(vcpu, &map); 2111 2112 return ret; 2113 } 2114 2115 static int vmload_interception(struct kvm_vcpu *vcpu) 2116 { 2117 return vmload_vmsave_interception(vcpu, true); 2118 } 2119 2120 static int vmsave_interception(struct kvm_vcpu *vcpu) 2121 { 2122 return vmload_vmsave_interception(vcpu, false); 2123 } 2124 2125 static int vmrun_interception(struct kvm_vcpu *vcpu) 2126 { 2127 if (nested_svm_check_permissions(vcpu)) 2128 return 1; 2129 2130 return nested_svm_vmrun(vcpu); 2131 } 2132 2133 enum { 2134 NONE_SVM_INSTR, 2135 SVM_INSTR_VMRUN, 2136 SVM_INSTR_VMLOAD, 2137 SVM_INSTR_VMSAVE, 2138 }; 2139 2140 /* Return NONE_SVM_INSTR if not SVM instrs, otherwise return decode result */ 2141 static int svm_instr_opcode(struct kvm_vcpu *vcpu) 2142 { 2143 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; 2144 2145 if (ctxt->b != 0x1 || ctxt->opcode_len != 2) 2146 return NONE_SVM_INSTR; 2147 2148 switch (ctxt->modrm) { 2149 case 0xd8: /* VMRUN */ 2150 return SVM_INSTR_VMRUN; 2151 case 0xda: /* VMLOAD */ 2152 return SVM_INSTR_VMLOAD; 2153 case 0xdb: /* VMSAVE */ 2154 return SVM_INSTR_VMSAVE; 2155 default: 2156 break; 2157 } 2158 2159 return NONE_SVM_INSTR; 2160 } 2161 2162 static int emulate_svm_instr(struct kvm_vcpu *vcpu, int opcode) 2163 { 2164 const int guest_mode_exit_codes[] = { 2165 [SVM_INSTR_VMRUN] = SVM_EXIT_VMRUN, 2166 [SVM_INSTR_VMLOAD] = SVM_EXIT_VMLOAD, 2167 [SVM_INSTR_VMSAVE] = SVM_EXIT_VMSAVE, 2168 }; 2169 int (*const svm_instr_handlers[])(struct kvm_vcpu *vcpu) = { 2170 [SVM_INSTR_VMRUN] = vmrun_interception, 2171 [SVM_INSTR_VMLOAD] = vmload_interception, 2172 [SVM_INSTR_VMSAVE] = vmsave_interception, 2173 }; 2174 struct vcpu_svm *svm = to_svm(vcpu); 2175 int ret; 2176 2177 if (is_guest_mode(vcpu)) { 2178 /* Returns '1' or -errno on failure, '0' on success. */ 2179 ret = nested_svm_simple_vmexit(svm, guest_mode_exit_codes[opcode]); 2180 if (ret) 2181 return ret; 2182 return 1; 2183 } 2184 return svm_instr_handlers[opcode](vcpu); 2185 } 2186 2187 /* 2188 * #GP handling code. Note that #GP can be triggered under the following two 2189 * cases: 2190 * 1) SVM VM-related instructions (VMRUN/VMSAVE/VMLOAD) that trigger #GP on 2191 * some AMD CPUs when EAX of these instructions are in the reserved memory 2192 * regions (e.g. SMM memory on host). 2193 * 2) VMware backdoor 2194 */ 2195 static int gp_interception(struct kvm_vcpu *vcpu) 2196 { 2197 struct vcpu_svm *svm = to_svm(vcpu); 2198 u32 error_code = svm->vmcb->control.exit_info_1; 2199 int opcode; 2200 2201 /* Both #GP cases have zero error_code */ 2202 if (error_code) 2203 goto reinject; 2204 2205 /* Decode the instruction for usage later */ 2206 if (x86_decode_emulated_instruction(vcpu, 0, NULL, 0) != EMULATION_OK) 2207 goto reinject; 2208 2209 opcode = svm_instr_opcode(vcpu); 2210 2211 if (opcode == NONE_SVM_INSTR) { 2212 if (!enable_vmware_backdoor) 2213 goto reinject; 2214 2215 /* 2216 * VMware backdoor emulation on #GP interception only handles 2217 * IN{S}, OUT{S}, and RDPMC. 2218 */ 2219 if (!is_guest_mode(vcpu)) 2220 return kvm_emulate_instruction(vcpu, 2221 EMULTYPE_VMWARE_GP | EMULTYPE_NO_DECODE); 2222 } else { 2223 /* All SVM instructions expect page aligned RAX */ 2224 if (svm->vmcb->save.rax & ~PAGE_MASK) 2225 goto reinject; 2226 2227 return emulate_svm_instr(vcpu, opcode); 2228 } 2229 2230 reinject: 2231 kvm_queue_exception_e(vcpu, GP_VECTOR, error_code); 2232 return 1; 2233 } 2234 2235 void svm_set_gif(struct vcpu_svm *svm, bool value) 2236 { 2237 if (value) { 2238 /* 2239 * If VGIF is enabled, the STGI intercept is only added to 2240 * detect the opening of the SMI/NMI window; remove it now. 2241 * Likewise, clear the VINTR intercept, we will set it 2242 * again while processing KVM_REQ_EVENT if needed. 2243 */ 2244 if (vgif) 2245 svm_clr_intercept(svm, INTERCEPT_STGI); 2246 if (svm_is_intercept(svm, INTERCEPT_VINTR)) 2247 svm_clear_vintr(svm); 2248 2249 enable_gif(svm); 2250 if (svm->vcpu.arch.smi_pending || 2251 svm->vcpu.arch.nmi_pending || 2252 kvm_cpu_has_injectable_intr(&svm->vcpu) || 2253 kvm_apic_has_pending_init_or_sipi(&svm->vcpu)) 2254 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); 2255 } else { 2256 disable_gif(svm); 2257 2258 /* 2259 * After a CLGI no interrupts should come. But if vGIF is 2260 * in use, we still rely on the VINTR intercept (rather than 2261 * STGI) to detect an open interrupt window. 2262 */ 2263 if (!vgif) 2264 svm_clear_vintr(svm); 2265 } 2266 } 2267 2268 static int stgi_interception(struct kvm_vcpu *vcpu) 2269 { 2270 int ret; 2271 2272 if (nested_svm_check_permissions(vcpu)) 2273 return 1; 2274 2275 ret = kvm_skip_emulated_instruction(vcpu); 2276 svm_set_gif(to_svm(vcpu), true); 2277 return ret; 2278 } 2279 2280 static int clgi_interception(struct kvm_vcpu *vcpu) 2281 { 2282 int ret; 2283 2284 if (nested_svm_check_permissions(vcpu)) 2285 return 1; 2286 2287 ret = kvm_skip_emulated_instruction(vcpu); 2288 svm_set_gif(to_svm(vcpu), false); 2289 return ret; 2290 } 2291 2292 static int invlpga_interception(struct kvm_vcpu *vcpu) 2293 { 2294 gva_t gva = kvm_rax_read(vcpu); 2295 u32 asid = kvm_rcx_read(vcpu); 2296 2297 /* FIXME: Handle an address size prefix. */ 2298 if (!is_long_mode(vcpu)) 2299 gva = (u32)gva; 2300 2301 trace_kvm_invlpga(to_svm(vcpu)->vmcb->save.rip, asid, gva); 2302 2303 /* Let's treat INVLPGA the same as INVLPG (can be optimized!) */ 2304 kvm_mmu_invlpg(vcpu, gva); 2305 2306 return kvm_skip_emulated_instruction(vcpu); 2307 } 2308 2309 static int skinit_interception(struct kvm_vcpu *vcpu) 2310 { 2311 trace_kvm_skinit(to_svm(vcpu)->vmcb->save.rip, kvm_rax_read(vcpu)); 2312 2313 kvm_queue_exception(vcpu, UD_VECTOR); 2314 return 1; 2315 } 2316 2317 static int task_switch_interception(struct kvm_vcpu *vcpu) 2318 { 2319 struct vcpu_svm *svm = to_svm(vcpu); 2320 u16 tss_selector; 2321 int reason; 2322 int int_type = svm->vmcb->control.exit_int_info & 2323 SVM_EXITINTINFO_TYPE_MASK; 2324 int int_vec = svm->vmcb->control.exit_int_info & SVM_EVTINJ_VEC_MASK; 2325 uint32_t type = 2326 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_TYPE_MASK; 2327 uint32_t idt_v = 2328 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID; 2329 bool has_error_code = false; 2330 u32 error_code = 0; 2331 2332 tss_selector = (u16)svm->vmcb->control.exit_info_1; 2333 2334 if (svm->vmcb->control.exit_info_2 & 2335 (1ULL << SVM_EXITINFOSHIFT_TS_REASON_IRET)) 2336 reason = TASK_SWITCH_IRET; 2337 else if (svm->vmcb->control.exit_info_2 & 2338 (1ULL << SVM_EXITINFOSHIFT_TS_REASON_JMP)) 2339 reason = TASK_SWITCH_JMP; 2340 else if (idt_v) 2341 reason = TASK_SWITCH_GATE; 2342 else 2343 reason = TASK_SWITCH_CALL; 2344 2345 if (reason == TASK_SWITCH_GATE) { 2346 switch (type) { 2347 case SVM_EXITINTINFO_TYPE_NMI: 2348 vcpu->arch.nmi_injected = false; 2349 break; 2350 case SVM_EXITINTINFO_TYPE_EXEPT: 2351 if (svm->vmcb->control.exit_info_2 & 2352 (1ULL << SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE)) { 2353 has_error_code = true; 2354 error_code = 2355 (u32)svm->vmcb->control.exit_info_2; 2356 } 2357 kvm_clear_exception_queue(vcpu); 2358 break; 2359 case SVM_EXITINTINFO_TYPE_INTR: 2360 case SVM_EXITINTINFO_TYPE_SOFT: 2361 kvm_clear_interrupt_queue(vcpu); 2362 break; 2363 default: 2364 break; 2365 } 2366 } 2367 2368 if (reason != TASK_SWITCH_GATE || 2369 int_type == SVM_EXITINTINFO_TYPE_SOFT || 2370 (int_type == SVM_EXITINTINFO_TYPE_EXEPT && 2371 (int_vec == OF_VECTOR || int_vec == BP_VECTOR))) { 2372 if (!svm_skip_emulated_instruction(vcpu)) 2373 return 0; 2374 } 2375 2376 if (int_type != SVM_EXITINTINFO_TYPE_SOFT) 2377 int_vec = -1; 2378 2379 return kvm_task_switch(vcpu, tss_selector, int_vec, reason, 2380 has_error_code, error_code); 2381 } 2382 2383 static void svm_clr_iret_intercept(struct vcpu_svm *svm) 2384 { 2385 if (!sev_es_guest(svm->vcpu.kvm)) 2386 svm_clr_intercept(svm, INTERCEPT_IRET); 2387 } 2388 2389 static void svm_set_iret_intercept(struct vcpu_svm *svm) 2390 { 2391 if (!sev_es_guest(svm->vcpu.kvm)) 2392 svm_set_intercept(svm, INTERCEPT_IRET); 2393 } 2394 2395 static int iret_interception(struct kvm_vcpu *vcpu) 2396 { 2397 struct vcpu_svm *svm = to_svm(vcpu); 2398 2399 WARN_ON_ONCE(sev_es_guest(vcpu->kvm)); 2400 2401 ++vcpu->stat.nmi_window_exits; 2402 svm->awaiting_iret_completion = true; 2403 2404 svm_clr_iret_intercept(svm); 2405 svm->nmi_iret_rip = kvm_rip_read(vcpu); 2406 2407 kvm_make_request(KVM_REQ_EVENT, vcpu); 2408 return 1; 2409 } 2410 2411 static int invlpg_interception(struct kvm_vcpu *vcpu) 2412 { 2413 if (!static_cpu_has(X86_FEATURE_DECODEASSISTS)) 2414 return kvm_emulate_instruction(vcpu, 0); 2415 2416 kvm_mmu_invlpg(vcpu, to_svm(vcpu)->vmcb->control.exit_info_1); 2417 return kvm_skip_emulated_instruction(vcpu); 2418 } 2419 2420 static int emulate_on_interception(struct kvm_vcpu *vcpu) 2421 { 2422 return kvm_emulate_instruction(vcpu, 0); 2423 } 2424 2425 static int rsm_interception(struct kvm_vcpu *vcpu) 2426 { 2427 return kvm_emulate_instruction_from_buffer(vcpu, rsm_ins_bytes, 2); 2428 } 2429 2430 static bool check_selective_cr0_intercepted(struct kvm_vcpu *vcpu, 2431 unsigned long val) 2432 { 2433 struct vcpu_svm *svm = to_svm(vcpu); 2434 unsigned long cr0 = vcpu->arch.cr0; 2435 bool ret = false; 2436 2437 if (!is_guest_mode(vcpu) || 2438 (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_SELECTIVE_CR0)))) 2439 return false; 2440 2441 cr0 &= ~SVM_CR0_SELECTIVE_MASK; 2442 val &= ~SVM_CR0_SELECTIVE_MASK; 2443 2444 if (cr0 ^ val) { 2445 svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE; 2446 ret = (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE); 2447 } 2448 2449 return ret; 2450 } 2451 2452 #define CR_VALID (1ULL << 63) 2453 2454 static int cr_interception(struct kvm_vcpu *vcpu) 2455 { 2456 struct vcpu_svm *svm = to_svm(vcpu); 2457 int reg, cr; 2458 unsigned long val; 2459 int err; 2460 2461 if (!static_cpu_has(X86_FEATURE_DECODEASSISTS)) 2462 return emulate_on_interception(vcpu); 2463 2464 if (unlikely((svm->vmcb->control.exit_info_1 & CR_VALID) == 0)) 2465 return emulate_on_interception(vcpu); 2466 2467 reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK; 2468 if (svm->vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE) 2469 cr = SVM_EXIT_WRITE_CR0 - SVM_EXIT_READ_CR0; 2470 else 2471 cr = svm->vmcb->control.exit_code - SVM_EXIT_READ_CR0; 2472 2473 err = 0; 2474 if (cr >= 16) { /* mov to cr */ 2475 cr -= 16; 2476 val = kvm_register_read(vcpu, reg); 2477 trace_kvm_cr_write(cr, val); 2478 switch (cr) { 2479 case 0: 2480 if (!check_selective_cr0_intercepted(vcpu, val)) 2481 err = kvm_set_cr0(vcpu, val); 2482 else 2483 return 1; 2484 2485 break; 2486 case 3: 2487 err = kvm_set_cr3(vcpu, val); 2488 break; 2489 case 4: 2490 err = kvm_set_cr4(vcpu, val); 2491 break; 2492 case 8: 2493 err = kvm_set_cr8(vcpu, val); 2494 break; 2495 default: 2496 WARN(1, "unhandled write to CR%d", cr); 2497 kvm_queue_exception(vcpu, UD_VECTOR); 2498 return 1; 2499 } 2500 } else { /* mov from cr */ 2501 switch (cr) { 2502 case 0: 2503 val = kvm_read_cr0(vcpu); 2504 break; 2505 case 2: 2506 val = vcpu->arch.cr2; 2507 break; 2508 case 3: 2509 val = kvm_read_cr3(vcpu); 2510 break; 2511 case 4: 2512 val = kvm_read_cr4(vcpu); 2513 break; 2514 case 8: 2515 val = kvm_get_cr8(vcpu); 2516 break; 2517 default: 2518 WARN(1, "unhandled read from CR%d", cr); 2519 kvm_queue_exception(vcpu, UD_VECTOR); 2520 return 1; 2521 } 2522 kvm_register_write(vcpu, reg, val); 2523 trace_kvm_cr_read(cr, val); 2524 } 2525 return kvm_complete_insn_gp(vcpu, err); 2526 } 2527 2528 static int cr_trap(struct kvm_vcpu *vcpu) 2529 { 2530 struct vcpu_svm *svm = to_svm(vcpu); 2531 unsigned long old_value, new_value; 2532 unsigned int cr; 2533 int ret = 0; 2534 2535 new_value = (unsigned long)svm->vmcb->control.exit_info_1; 2536 2537 cr = svm->vmcb->control.exit_code - SVM_EXIT_CR0_WRITE_TRAP; 2538 switch (cr) { 2539 case 0: 2540 old_value = kvm_read_cr0(vcpu); 2541 svm_set_cr0(vcpu, new_value); 2542 2543 kvm_post_set_cr0(vcpu, old_value, new_value); 2544 break; 2545 case 4: 2546 old_value = kvm_read_cr4(vcpu); 2547 svm_set_cr4(vcpu, new_value); 2548 2549 kvm_post_set_cr4(vcpu, old_value, new_value); 2550 break; 2551 case 8: 2552 ret = kvm_set_cr8(vcpu, new_value); 2553 break; 2554 default: 2555 WARN(1, "unhandled CR%d write trap", cr); 2556 kvm_queue_exception(vcpu, UD_VECTOR); 2557 return 1; 2558 } 2559 2560 return kvm_complete_insn_gp(vcpu, ret); 2561 } 2562 2563 static int dr_interception(struct kvm_vcpu *vcpu) 2564 { 2565 struct vcpu_svm *svm = to_svm(vcpu); 2566 int reg, dr; 2567 int err = 0; 2568 2569 /* 2570 * SEV-ES intercepts DR7 only to disable guest debugging and the guest issues a VMGEXIT 2571 * for DR7 write only. KVM cannot change DR7 (always swapped as type 'A') so return early. 2572 */ 2573 if (sev_es_guest(vcpu->kvm)) 2574 return 1; 2575 2576 if (vcpu->guest_debug == 0) { 2577 /* 2578 * No more DR vmexits; force a reload of the debug registers 2579 * and reenter on this instruction. The next vmexit will 2580 * retrieve the full state of the debug registers. 2581 */ 2582 clr_dr_intercepts(svm); 2583 vcpu->arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT; 2584 return 1; 2585 } 2586 2587 if (!boot_cpu_has(X86_FEATURE_DECODEASSISTS)) 2588 return emulate_on_interception(vcpu); 2589 2590 reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK; 2591 dr = svm->vmcb->control.exit_code - SVM_EXIT_READ_DR0; 2592 if (dr >= 16) { /* mov to DRn */ 2593 dr -= 16; 2594 err = kvm_set_dr(vcpu, dr, kvm_register_read(vcpu, reg)); 2595 } else { 2596 kvm_register_write(vcpu, reg, kvm_get_dr(vcpu, dr)); 2597 } 2598 2599 return kvm_complete_insn_gp(vcpu, err); 2600 } 2601 2602 static int cr8_write_interception(struct kvm_vcpu *vcpu) 2603 { 2604 int r; 2605 2606 u8 cr8_prev = kvm_get_cr8(vcpu); 2607 /* instruction emulation calls kvm_set_cr8() */ 2608 r = cr_interception(vcpu); 2609 if (lapic_in_kernel(vcpu)) 2610 return r; 2611 if (cr8_prev <= kvm_get_cr8(vcpu)) 2612 return r; 2613 vcpu->run->exit_reason = KVM_EXIT_SET_TPR; 2614 return 0; 2615 } 2616 2617 static int efer_trap(struct kvm_vcpu *vcpu) 2618 { 2619 struct msr_data msr_info; 2620 int ret; 2621 2622 /* 2623 * Clear the EFER_SVME bit from EFER. The SVM code always sets this 2624 * bit in svm_set_efer(), but __kvm_valid_efer() checks it against 2625 * whether the guest has X86_FEATURE_SVM - this avoids a failure if 2626 * the guest doesn't have X86_FEATURE_SVM. 2627 */ 2628 msr_info.host_initiated = false; 2629 msr_info.index = MSR_EFER; 2630 msr_info.data = to_svm(vcpu)->vmcb->control.exit_info_1 & ~EFER_SVME; 2631 ret = kvm_set_msr_common(vcpu, &msr_info); 2632 2633 return kvm_complete_insn_gp(vcpu, ret); 2634 } 2635 2636 static int svm_get_feature_msr(u32 msr, u64 *data) 2637 { 2638 *data = 0; 2639 2640 switch (msr) { 2641 case MSR_AMD64_DE_CFG: 2642 if (cpu_feature_enabled(X86_FEATURE_LFENCE_RDTSC)) 2643 *data |= MSR_AMD64_DE_CFG_LFENCE_SERIALIZE; 2644 break; 2645 default: 2646 return KVM_MSR_RET_UNSUPPORTED; 2647 } 2648 2649 return 0; 2650 } 2651 2652 static bool sev_es_prevent_msr_access(struct kvm_vcpu *vcpu, 2653 struct msr_data *msr_info) 2654 { 2655 return sev_es_guest(vcpu->kvm) && vcpu->arch.guest_state_protected && 2656 msr_info->index != MSR_IA32_XSS && 2657 !msr_write_intercepted(vcpu, msr_info->index); 2658 } 2659 2660 static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) 2661 { 2662 struct vcpu_svm *svm = to_svm(vcpu); 2663 2664 if (sev_es_prevent_msr_access(vcpu, msr_info)) { 2665 msr_info->data = 0; 2666 return vcpu->kvm->arch.has_protected_state ? -EINVAL : 0; 2667 } 2668 2669 switch (msr_info->index) { 2670 case MSR_AMD64_TSC_RATIO: 2671 if (!msr_info->host_initiated && 2672 !guest_cpu_cap_has(vcpu, X86_FEATURE_TSCRATEMSR)) 2673 return 1; 2674 msr_info->data = svm->tsc_ratio_msr; 2675 break; 2676 case MSR_STAR: 2677 msr_info->data = svm->vmcb01.ptr->save.star; 2678 break; 2679 #ifdef CONFIG_X86_64 2680 case MSR_LSTAR: 2681 msr_info->data = svm->vmcb01.ptr->save.lstar; 2682 break; 2683 case MSR_CSTAR: 2684 msr_info->data = svm->vmcb01.ptr->save.cstar; 2685 break; 2686 case MSR_GS_BASE: 2687 msr_info->data = svm->vmcb01.ptr->save.gs.base; 2688 break; 2689 case MSR_FS_BASE: 2690 msr_info->data = svm->vmcb01.ptr->save.fs.base; 2691 break; 2692 case MSR_KERNEL_GS_BASE: 2693 msr_info->data = svm->vmcb01.ptr->save.kernel_gs_base; 2694 break; 2695 case MSR_SYSCALL_MASK: 2696 msr_info->data = svm->vmcb01.ptr->save.sfmask; 2697 break; 2698 #endif 2699 case MSR_IA32_SYSENTER_CS: 2700 msr_info->data = svm->vmcb01.ptr->save.sysenter_cs; 2701 break; 2702 case MSR_IA32_SYSENTER_EIP: 2703 msr_info->data = (u32)svm->vmcb01.ptr->save.sysenter_eip; 2704 if (guest_cpuid_is_intel_compatible(vcpu)) 2705 msr_info->data |= (u64)svm->sysenter_eip_hi << 32; 2706 break; 2707 case MSR_IA32_SYSENTER_ESP: 2708 msr_info->data = svm->vmcb01.ptr->save.sysenter_esp; 2709 if (guest_cpuid_is_intel_compatible(vcpu)) 2710 msr_info->data |= (u64)svm->sysenter_esp_hi << 32; 2711 break; 2712 case MSR_IA32_S_CET: 2713 msr_info->data = svm->vmcb->save.s_cet; 2714 break; 2715 case MSR_IA32_INT_SSP_TAB: 2716 msr_info->data = svm->vmcb->save.isst_addr; 2717 break; 2718 case MSR_KVM_INTERNAL_GUEST_SSP: 2719 msr_info->data = svm->vmcb->save.ssp; 2720 break; 2721 case MSR_TSC_AUX: 2722 msr_info->data = svm->tsc_aux; 2723 break; 2724 case MSR_IA32_DEBUGCTLMSR: 2725 msr_info->data = svm->vmcb->save.dbgctl; 2726 break; 2727 case MSR_IA32_LASTBRANCHFROMIP: 2728 msr_info->data = svm->vmcb->save.br_from; 2729 break; 2730 case MSR_IA32_LASTBRANCHTOIP: 2731 msr_info->data = svm->vmcb->save.br_to; 2732 break; 2733 case MSR_IA32_LASTINTFROMIP: 2734 msr_info->data = svm->vmcb->save.last_excp_from; 2735 break; 2736 case MSR_IA32_LASTINTTOIP: 2737 msr_info->data = svm->vmcb->save.last_excp_to; 2738 break; 2739 case MSR_VM_HSAVE_PA: 2740 msr_info->data = svm->nested.hsave_msr; 2741 break; 2742 case MSR_VM_CR: 2743 msr_info->data = svm->nested.vm_cr_msr; 2744 break; 2745 case MSR_IA32_SPEC_CTRL: 2746 if (!msr_info->host_initiated && 2747 !guest_has_spec_ctrl_msr(vcpu)) 2748 return 1; 2749 2750 if (boot_cpu_has(X86_FEATURE_V_SPEC_CTRL)) 2751 msr_info->data = svm->vmcb->save.spec_ctrl; 2752 else 2753 msr_info->data = svm->spec_ctrl; 2754 break; 2755 case MSR_AMD64_VIRT_SPEC_CTRL: 2756 if (!msr_info->host_initiated && 2757 !guest_cpu_cap_has(vcpu, X86_FEATURE_VIRT_SSBD)) 2758 return 1; 2759 2760 msr_info->data = svm->virt_spec_ctrl; 2761 break; 2762 case MSR_F15H_IC_CFG: { 2763 2764 int family, model; 2765 2766 family = guest_cpuid_family(vcpu); 2767 model = guest_cpuid_model(vcpu); 2768 2769 if (family < 0 || model < 0) 2770 return kvm_get_msr_common(vcpu, msr_info); 2771 2772 msr_info->data = 0; 2773 2774 if (family == 0x15 && 2775 (model >= 0x2 && model < 0x20)) 2776 msr_info->data = 0x1E; 2777 } 2778 break; 2779 case MSR_AMD64_DE_CFG: 2780 msr_info->data = svm->msr_decfg; 2781 break; 2782 default: 2783 return kvm_get_msr_common(vcpu, msr_info); 2784 } 2785 return 0; 2786 } 2787 2788 static int svm_complete_emulated_msr(struct kvm_vcpu *vcpu, int err) 2789 { 2790 struct vcpu_svm *svm = to_svm(vcpu); 2791 if (!err || !sev_es_guest(vcpu->kvm) || WARN_ON_ONCE(!svm->sev_es.ghcb)) 2792 return kvm_complete_insn_gp(vcpu, err); 2793 2794 svm_vmgexit_inject_exception(svm, X86_TRAP_GP); 2795 return 1; 2796 } 2797 2798 static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data) 2799 { 2800 struct vcpu_svm *svm = to_svm(vcpu); 2801 int svm_dis, chg_mask; 2802 2803 if (data & ~SVM_VM_CR_VALID_MASK) 2804 return 1; 2805 2806 chg_mask = SVM_VM_CR_VALID_MASK; 2807 2808 if (svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK) 2809 chg_mask &= ~(SVM_VM_CR_SVM_LOCK_MASK | SVM_VM_CR_SVM_DIS_MASK); 2810 2811 svm->nested.vm_cr_msr &= ~chg_mask; 2812 svm->nested.vm_cr_msr |= (data & chg_mask); 2813 2814 svm_dis = svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK; 2815 2816 /* check for svm_disable while efer.svme is set */ 2817 if (svm_dis && (vcpu->arch.efer & EFER_SVME)) 2818 return 1; 2819 2820 return 0; 2821 } 2822 2823 static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) 2824 { 2825 struct vcpu_svm *svm = to_svm(vcpu); 2826 int ret = 0; 2827 2828 u32 ecx = msr->index; 2829 u64 data = msr->data; 2830 2831 if (sev_es_prevent_msr_access(vcpu, msr)) 2832 return vcpu->kvm->arch.has_protected_state ? -EINVAL : 0; 2833 2834 switch (ecx) { 2835 case MSR_AMD64_TSC_RATIO: 2836 2837 if (!guest_cpu_cap_has(vcpu, X86_FEATURE_TSCRATEMSR)) { 2838 2839 if (!msr->host_initiated) 2840 return 1; 2841 /* 2842 * In case TSC scaling is not enabled, always 2843 * leave this MSR at the default value. 2844 * 2845 * Due to bug in qemu 6.2.0, it would try to set 2846 * this msr to 0 if tsc scaling is not enabled. 2847 * Ignore this value as well. 2848 */ 2849 if (data != 0 && data != svm->tsc_ratio_msr) 2850 return 1; 2851 break; 2852 } 2853 2854 if (data & SVM_TSC_RATIO_RSVD) 2855 return 1; 2856 2857 svm->tsc_ratio_msr = data; 2858 2859 if (guest_cpu_cap_has(vcpu, X86_FEATURE_TSCRATEMSR) && 2860 is_guest_mode(vcpu)) 2861 nested_svm_update_tsc_ratio_msr(vcpu); 2862 2863 break; 2864 case MSR_IA32_CR_PAT: 2865 ret = kvm_set_msr_common(vcpu, msr); 2866 if (ret) 2867 break; 2868 2869 svm->vmcb01.ptr->save.g_pat = data; 2870 if (is_guest_mode(vcpu)) 2871 nested_vmcb02_compute_g_pat(svm); 2872 vmcb_mark_dirty(svm->vmcb, VMCB_NPT); 2873 break; 2874 case MSR_IA32_SPEC_CTRL: 2875 if (!msr->host_initiated && 2876 !guest_has_spec_ctrl_msr(vcpu)) 2877 return 1; 2878 2879 if (kvm_spec_ctrl_test_value(data)) 2880 return 1; 2881 2882 if (boot_cpu_has(X86_FEATURE_V_SPEC_CTRL)) 2883 svm->vmcb->save.spec_ctrl = data; 2884 else 2885 svm->spec_ctrl = data; 2886 if (!data) 2887 break; 2888 2889 /* 2890 * For non-nested: 2891 * When it's written (to non-zero) for the first time, pass 2892 * it through. 2893 * 2894 * For nested: 2895 * The handling of the MSR bitmap for L2 guests is done in 2896 * nested_svm_merge_msrpm(). 2897 * We update the L1 MSR bit as well since it will end up 2898 * touching the MSR anyway now. 2899 */ 2900 svm_disable_intercept_for_msr(vcpu, MSR_IA32_SPEC_CTRL, MSR_TYPE_RW); 2901 break; 2902 case MSR_AMD64_VIRT_SPEC_CTRL: 2903 if (!msr->host_initiated && 2904 !guest_cpu_cap_has(vcpu, X86_FEATURE_VIRT_SSBD)) 2905 return 1; 2906 2907 if (data & ~SPEC_CTRL_SSBD) 2908 return 1; 2909 2910 svm->virt_spec_ctrl = data; 2911 break; 2912 case MSR_STAR: 2913 svm->vmcb01.ptr->save.star = data; 2914 break; 2915 #ifdef CONFIG_X86_64 2916 case MSR_LSTAR: 2917 svm->vmcb01.ptr->save.lstar = data; 2918 break; 2919 case MSR_CSTAR: 2920 svm->vmcb01.ptr->save.cstar = data; 2921 break; 2922 case MSR_GS_BASE: 2923 svm->vmcb01.ptr->save.gs.base = data; 2924 break; 2925 case MSR_FS_BASE: 2926 svm->vmcb01.ptr->save.fs.base = data; 2927 break; 2928 case MSR_KERNEL_GS_BASE: 2929 svm->vmcb01.ptr->save.kernel_gs_base = data; 2930 break; 2931 case MSR_SYSCALL_MASK: 2932 svm->vmcb01.ptr->save.sfmask = data; 2933 break; 2934 #endif 2935 case MSR_IA32_SYSENTER_CS: 2936 svm->vmcb01.ptr->save.sysenter_cs = data; 2937 break; 2938 case MSR_IA32_SYSENTER_EIP: 2939 svm->vmcb01.ptr->save.sysenter_eip = (u32)data; 2940 /* 2941 * We only intercept the MSR_IA32_SYSENTER_{EIP|ESP} msrs 2942 * when we spoof an Intel vendor ID (for cross vendor migration). 2943 * In this case we use this intercept to track the high 2944 * 32 bit part of these msrs to support Intel's 2945 * implementation of SYSENTER/SYSEXIT. 2946 */ 2947 svm->sysenter_eip_hi = guest_cpuid_is_intel_compatible(vcpu) ? (data >> 32) : 0; 2948 break; 2949 case MSR_IA32_SYSENTER_ESP: 2950 svm->vmcb01.ptr->save.sysenter_esp = (u32)data; 2951 svm->sysenter_esp_hi = guest_cpuid_is_intel_compatible(vcpu) ? (data >> 32) : 0; 2952 break; 2953 case MSR_IA32_S_CET: 2954 svm->vmcb->save.s_cet = data; 2955 vmcb_mark_dirty(svm->vmcb01.ptr, VMCB_CET); 2956 break; 2957 case MSR_IA32_INT_SSP_TAB: 2958 svm->vmcb->save.isst_addr = data; 2959 vmcb_mark_dirty(svm->vmcb01.ptr, VMCB_CET); 2960 break; 2961 case MSR_KVM_INTERNAL_GUEST_SSP: 2962 svm->vmcb->save.ssp = data; 2963 vmcb_mark_dirty(svm->vmcb01.ptr, VMCB_CET); 2964 break; 2965 case MSR_TSC_AUX: 2966 /* 2967 * TSC_AUX is always virtualized for SEV-ES guests when the 2968 * feature is available. The user return MSR support is not 2969 * required in this case because TSC_AUX is restored on #VMEXIT 2970 * from the host save area. 2971 */ 2972 if (boot_cpu_has(X86_FEATURE_V_TSC_AUX) && sev_es_guest(vcpu->kvm)) 2973 break; 2974 2975 /* 2976 * TSC_AUX is usually changed only during boot and never read 2977 * directly. Intercept TSC_AUX and switch it via user return. 2978 */ 2979 preempt_disable(); 2980 ret = kvm_set_user_return_msr(tsc_aux_uret_slot, data, -1ull); 2981 preempt_enable(); 2982 if (ret) 2983 break; 2984 2985 svm->tsc_aux = data; 2986 break; 2987 case MSR_IA32_DEBUGCTLMSR: 2988 if (!lbrv) { 2989 kvm_pr_unimpl_wrmsr(vcpu, ecx, data); 2990 break; 2991 } 2992 2993 /* 2994 * Suppress BTF as KVM doesn't virtualize BTF, but there's no 2995 * way to communicate lack of support to the guest. 2996 */ 2997 if (data & DEBUGCTLMSR_BTF) { 2998 kvm_pr_unimpl_wrmsr(vcpu, MSR_IA32_DEBUGCTLMSR, data); 2999 data &= ~DEBUGCTLMSR_BTF; 3000 } 3001 3002 if (data & DEBUGCTL_RESERVED_BITS) 3003 return 1; 3004 3005 if (svm->vmcb->save.dbgctl == data) 3006 break; 3007 3008 svm->vmcb->save.dbgctl = data; 3009 vmcb_mark_dirty(svm->vmcb, VMCB_LBR); 3010 svm_update_lbrv(vcpu); 3011 break; 3012 case MSR_VM_HSAVE_PA: 3013 /* 3014 * Old kernels did not validate the value written to 3015 * MSR_VM_HSAVE_PA. Allow KVM_SET_MSR to set an invalid 3016 * value to allow live migrating buggy or malicious guests 3017 * originating from those kernels. 3018 */ 3019 if (!msr->host_initiated && !page_address_valid(vcpu, data)) 3020 return 1; 3021 3022 svm->nested.hsave_msr = data & PAGE_MASK; 3023 break; 3024 case MSR_VM_CR: 3025 return svm_set_vm_cr(vcpu, data); 3026 case MSR_VM_IGNNE: 3027 kvm_pr_unimpl_wrmsr(vcpu, ecx, data); 3028 break; 3029 case MSR_AMD64_DE_CFG: { 3030 u64 supported_de_cfg; 3031 3032 if (svm_get_feature_msr(ecx, &supported_de_cfg)) 3033 return 1; 3034 3035 if (data & ~supported_de_cfg) 3036 return 1; 3037 3038 svm->msr_decfg = data; 3039 break; 3040 } 3041 default: 3042 return kvm_set_msr_common(vcpu, msr); 3043 } 3044 return ret; 3045 } 3046 3047 static int msr_interception(struct kvm_vcpu *vcpu) 3048 { 3049 if (to_svm(vcpu)->vmcb->control.exit_info_1) 3050 return kvm_emulate_wrmsr(vcpu); 3051 else 3052 return kvm_emulate_rdmsr(vcpu); 3053 } 3054 3055 static int interrupt_window_interception(struct kvm_vcpu *vcpu) 3056 { 3057 kvm_make_request(KVM_REQ_EVENT, vcpu); 3058 svm_clear_vintr(to_svm(vcpu)); 3059 3060 /* 3061 * If not running nested, for AVIC, the only reason to end up here is ExtINTs. 3062 * In this case AVIC was temporarily disabled for 3063 * requesting the IRQ window and we have to re-enable it. 3064 * 3065 * If running nested, still remove the VM wide AVIC inhibit to 3066 * support case in which the interrupt window was requested when the 3067 * vCPU was not running nested. 3068 3069 * All vCPUs which run still run nested, will remain to have their 3070 * AVIC still inhibited due to per-cpu AVIC inhibition. 3071 */ 3072 kvm_clear_apicv_inhibit(vcpu->kvm, APICV_INHIBIT_REASON_IRQWIN); 3073 3074 ++vcpu->stat.irq_window_exits; 3075 return 1; 3076 } 3077 3078 static int pause_interception(struct kvm_vcpu *vcpu) 3079 { 3080 bool in_kernel; 3081 /* 3082 * CPL is not made available for an SEV-ES guest, therefore 3083 * vcpu->arch.preempted_in_kernel can never be true. Just 3084 * set in_kernel to false as well. 3085 */ 3086 in_kernel = !sev_es_guest(vcpu->kvm) && svm_get_cpl(vcpu) == 0; 3087 3088 grow_ple_window(vcpu); 3089 3090 kvm_vcpu_on_spin(vcpu, in_kernel); 3091 return kvm_skip_emulated_instruction(vcpu); 3092 } 3093 3094 static int invpcid_interception(struct kvm_vcpu *vcpu) 3095 { 3096 struct vcpu_svm *svm = to_svm(vcpu); 3097 unsigned long type; 3098 gva_t gva; 3099 3100 if (!guest_cpu_cap_has(vcpu, X86_FEATURE_INVPCID)) { 3101 kvm_queue_exception(vcpu, UD_VECTOR); 3102 return 1; 3103 } 3104 3105 /* 3106 * For an INVPCID intercept: 3107 * EXITINFO1 provides the linear address of the memory operand. 3108 * EXITINFO2 provides the contents of the register operand. 3109 */ 3110 type = svm->vmcb->control.exit_info_2; 3111 gva = svm->vmcb->control.exit_info_1; 3112 3113 /* 3114 * FIXME: Perform segment checks for 32-bit mode, and inject #SS if the 3115 * stack segment is used. The intercept takes priority over all 3116 * #GP checks except CPL>0, but somehow still generates a linear 3117 * address? The APM is sorely lacking. 3118 */ 3119 if (is_noncanonical_address(gva, vcpu, 0)) { 3120 kvm_queue_exception_e(vcpu, GP_VECTOR, 0); 3121 return 1; 3122 } 3123 3124 return kvm_handle_invpcid(vcpu, type, gva); 3125 } 3126 3127 static inline int complete_userspace_buslock(struct kvm_vcpu *vcpu) 3128 { 3129 struct vcpu_svm *svm = to_svm(vcpu); 3130 3131 /* 3132 * If userspace has NOT changed RIP, then KVM's ABI is to let the guest 3133 * execute the bus-locking instruction. Set the bus lock counter to '1' 3134 * to effectively step past the bus lock. 3135 */ 3136 if (kvm_is_linear_rip(vcpu, vcpu->arch.cui_linear_rip)) 3137 svm->vmcb->control.bus_lock_counter = 1; 3138 3139 return 1; 3140 } 3141 3142 static int bus_lock_exit(struct kvm_vcpu *vcpu) 3143 { 3144 struct vcpu_svm *svm = to_svm(vcpu); 3145 3146 vcpu->run->exit_reason = KVM_EXIT_X86_BUS_LOCK; 3147 vcpu->run->flags |= KVM_RUN_X86_BUS_LOCK; 3148 3149 vcpu->arch.cui_linear_rip = kvm_get_linear_rip(vcpu); 3150 vcpu->arch.complete_userspace_io = complete_userspace_buslock; 3151 3152 if (is_guest_mode(vcpu)) 3153 svm->nested.ctl.bus_lock_rip = vcpu->arch.cui_linear_rip; 3154 3155 return 0; 3156 } 3157 3158 static int (*const svm_exit_handlers[])(struct kvm_vcpu *vcpu) = { 3159 [SVM_EXIT_READ_CR0] = cr_interception, 3160 [SVM_EXIT_READ_CR3] = cr_interception, 3161 [SVM_EXIT_READ_CR4] = cr_interception, 3162 [SVM_EXIT_READ_CR8] = cr_interception, 3163 [SVM_EXIT_CR0_SEL_WRITE] = cr_interception, 3164 [SVM_EXIT_WRITE_CR0] = cr_interception, 3165 [SVM_EXIT_WRITE_CR3] = cr_interception, 3166 [SVM_EXIT_WRITE_CR4] = cr_interception, 3167 [SVM_EXIT_WRITE_CR8] = cr8_write_interception, 3168 [SVM_EXIT_READ_DR0] = dr_interception, 3169 [SVM_EXIT_READ_DR1] = dr_interception, 3170 [SVM_EXIT_READ_DR2] = dr_interception, 3171 [SVM_EXIT_READ_DR3] = dr_interception, 3172 [SVM_EXIT_READ_DR4] = dr_interception, 3173 [SVM_EXIT_READ_DR5] = dr_interception, 3174 [SVM_EXIT_READ_DR6] = dr_interception, 3175 [SVM_EXIT_READ_DR7] = dr_interception, 3176 [SVM_EXIT_WRITE_DR0] = dr_interception, 3177 [SVM_EXIT_WRITE_DR1] = dr_interception, 3178 [SVM_EXIT_WRITE_DR2] = dr_interception, 3179 [SVM_EXIT_WRITE_DR3] = dr_interception, 3180 [SVM_EXIT_WRITE_DR4] = dr_interception, 3181 [SVM_EXIT_WRITE_DR5] = dr_interception, 3182 [SVM_EXIT_WRITE_DR6] = dr_interception, 3183 [SVM_EXIT_WRITE_DR7] = dr_interception, 3184 [SVM_EXIT_EXCP_BASE + DB_VECTOR] = db_interception, 3185 [SVM_EXIT_EXCP_BASE + BP_VECTOR] = bp_interception, 3186 [SVM_EXIT_EXCP_BASE + UD_VECTOR] = ud_interception, 3187 [SVM_EXIT_EXCP_BASE + PF_VECTOR] = pf_interception, 3188 [SVM_EXIT_EXCP_BASE + MC_VECTOR] = mc_interception, 3189 [SVM_EXIT_EXCP_BASE + AC_VECTOR] = ac_interception, 3190 [SVM_EXIT_EXCP_BASE + GP_VECTOR] = gp_interception, 3191 [SVM_EXIT_INTR] = intr_interception, 3192 [SVM_EXIT_NMI] = nmi_interception, 3193 [SVM_EXIT_SMI] = smi_interception, 3194 [SVM_EXIT_VINTR] = interrupt_window_interception, 3195 [SVM_EXIT_RDPMC] = kvm_emulate_rdpmc, 3196 [SVM_EXIT_CPUID] = kvm_emulate_cpuid, 3197 [SVM_EXIT_IRET] = iret_interception, 3198 [SVM_EXIT_INVD] = kvm_emulate_invd, 3199 [SVM_EXIT_PAUSE] = pause_interception, 3200 [SVM_EXIT_HLT] = kvm_emulate_halt, 3201 [SVM_EXIT_INVLPG] = invlpg_interception, 3202 [SVM_EXIT_INVLPGA] = invlpga_interception, 3203 [SVM_EXIT_IOIO] = io_interception, 3204 [SVM_EXIT_MSR] = msr_interception, 3205 [SVM_EXIT_TASK_SWITCH] = task_switch_interception, 3206 [SVM_EXIT_SHUTDOWN] = shutdown_interception, 3207 [SVM_EXIT_VMRUN] = vmrun_interception, 3208 [SVM_EXIT_VMMCALL] = kvm_emulate_hypercall, 3209 [SVM_EXIT_VMLOAD] = vmload_interception, 3210 [SVM_EXIT_VMSAVE] = vmsave_interception, 3211 [SVM_EXIT_STGI] = stgi_interception, 3212 [SVM_EXIT_CLGI] = clgi_interception, 3213 [SVM_EXIT_SKINIT] = skinit_interception, 3214 [SVM_EXIT_RDTSCP] = kvm_handle_invalid_op, 3215 [SVM_EXIT_WBINVD] = kvm_emulate_wbinvd, 3216 [SVM_EXIT_MONITOR] = kvm_emulate_monitor, 3217 [SVM_EXIT_MWAIT] = kvm_emulate_mwait, 3218 [SVM_EXIT_XSETBV] = kvm_emulate_xsetbv, 3219 [SVM_EXIT_RDPRU] = kvm_handle_invalid_op, 3220 [SVM_EXIT_EFER_WRITE_TRAP] = efer_trap, 3221 [SVM_EXIT_CR0_WRITE_TRAP] = cr_trap, 3222 [SVM_EXIT_CR4_WRITE_TRAP] = cr_trap, 3223 [SVM_EXIT_CR8_WRITE_TRAP] = cr_trap, 3224 [SVM_EXIT_INVPCID] = invpcid_interception, 3225 [SVM_EXIT_IDLE_HLT] = kvm_emulate_halt, 3226 [SVM_EXIT_NPF] = npf_interception, 3227 [SVM_EXIT_BUS_LOCK] = bus_lock_exit, 3228 [SVM_EXIT_RSM] = rsm_interception, 3229 [SVM_EXIT_AVIC_INCOMPLETE_IPI] = avic_incomplete_ipi_interception, 3230 [SVM_EXIT_AVIC_UNACCELERATED_ACCESS] = avic_unaccelerated_access_interception, 3231 #ifdef CONFIG_KVM_AMD_SEV 3232 [SVM_EXIT_VMGEXIT] = sev_handle_vmgexit, 3233 #endif 3234 }; 3235 3236 static void dump_vmcb(struct kvm_vcpu *vcpu) 3237 { 3238 struct vcpu_svm *svm = to_svm(vcpu); 3239 struct vmcb_control_area *control = &svm->vmcb->control; 3240 struct vmcb_save_area *save = &svm->vmcb->save; 3241 struct vmcb_save_area *save01 = &svm->vmcb01.ptr->save; 3242 char *vm_type; 3243 3244 if (!dump_invalid_vmcb) { 3245 pr_warn_ratelimited("set kvm_amd.dump_invalid_vmcb=1 to dump internal KVM state.\n"); 3246 return; 3247 } 3248 3249 guard(mutex)(&vmcb_dump_mutex); 3250 3251 vm_type = sev_snp_guest(vcpu->kvm) ? "SEV-SNP" : 3252 sev_es_guest(vcpu->kvm) ? "SEV-ES" : 3253 sev_guest(vcpu->kvm) ? "SEV" : "SVM"; 3254 3255 pr_err("%s vCPU%u VMCB %p, last attempted VMRUN on CPU %d\n", 3256 vm_type, vcpu->vcpu_id, svm->current_vmcb->ptr, vcpu->arch.last_vmentry_cpu); 3257 pr_err("VMCB Control Area:\n"); 3258 pr_err("%-20s%04x\n", "cr_read:", control->intercepts[INTERCEPT_CR] & 0xffff); 3259 pr_err("%-20s%04x\n", "cr_write:", control->intercepts[INTERCEPT_CR] >> 16); 3260 pr_err("%-20s%04x\n", "dr_read:", control->intercepts[INTERCEPT_DR] & 0xffff); 3261 pr_err("%-20s%04x\n", "dr_write:", control->intercepts[INTERCEPT_DR] >> 16); 3262 pr_err("%-20s%08x\n", "exceptions:", control->intercepts[INTERCEPT_EXCEPTION]); 3263 pr_err("%-20s%08x %08x\n", "intercepts:", 3264 control->intercepts[INTERCEPT_WORD3], 3265 control->intercepts[INTERCEPT_WORD4]); 3266 pr_err("%-20s%d\n", "pause filter count:", control->pause_filter_count); 3267 pr_err("%-20s%d\n", "pause filter threshold:", 3268 control->pause_filter_thresh); 3269 pr_err("%-20s%016llx\n", "iopm_base_pa:", control->iopm_base_pa); 3270 pr_err("%-20s%016llx\n", "msrpm_base_pa:", control->msrpm_base_pa); 3271 pr_err("%-20s%016llx\n", "tsc_offset:", control->tsc_offset); 3272 pr_err("%-20s%d\n", "asid:", control->asid); 3273 pr_err("%-20s%d\n", "tlb_ctl:", control->tlb_ctl); 3274 pr_err("%-20s%08x\n", "int_ctl:", control->int_ctl); 3275 pr_err("%-20s%08x\n", "int_vector:", control->int_vector); 3276 pr_err("%-20s%08x\n", "int_state:", control->int_state); 3277 pr_err("%-20s%08x\n", "exit_code:", control->exit_code); 3278 pr_err("%-20s%016llx\n", "exit_info1:", control->exit_info_1); 3279 pr_err("%-20s%016llx\n", "exit_info2:", control->exit_info_2); 3280 pr_err("%-20s%08x\n", "exit_int_info:", control->exit_int_info); 3281 pr_err("%-20s%08x\n", "exit_int_info_err:", control->exit_int_info_err); 3282 pr_err("%-20s%lld\n", "nested_ctl:", control->nested_ctl); 3283 pr_err("%-20s%016llx\n", "nested_cr3:", control->nested_cr3); 3284 pr_err("%-20s%016llx\n", "avic_vapic_bar:", control->avic_vapic_bar); 3285 pr_err("%-20s%016llx\n", "ghcb:", control->ghcb_gpa); 3286 pr_err("%-20s%08x\n", "event_inj:", control->event_inj); 3287 pr_err("%-20s%08x\n", "event_inj_err:", control->event_inj_err); 3288 pr_err("%-20s%lld\n", "virt_ext:", control->virt_ext); 3289 pr_err("%-20s%016llx\n", "next_rip:", control->next_rip); 3290 pr_err("%-20s%016llx\n", "avic_backing_page:", control->avic_backing_page); 3291 pr_err("%-20s%016llx\n", "avic_logical_id:", control->avic_logical_id); 3292 pr_err("%-20s%016llx\n", "avic_physical_id:", control->avic_physical_id); 3293 pr_err("%-20s%016llx\n", "vmsa_pa:", control->vmsa_pa); 3294 pr_err("%-20s%016llx\n", "allowed_sev_features:", control->allowed_sev_features); 3295 pr_err("%-20s%016llx\n", "guest_sev_features:", control->guest_sev_features); 3296 3297 if (sev_es_guest(vcpu->kvm)) { 3298 save = sev_decrypt_vmsa(vcpu); 3299 if (!save) 3300 goto no_vmsa; 3301 3302 save01 = save; 3303 } 3304 3305 pr_err("VMCB State Save Area:\n"); 3306 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", 3307 "es:", 3308 save->es.selector, save->es.attrib, 3309 save->es.limit, save->es.base); 3310 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", 3311 "cs:", 3312 save->cs.selector, save->cs.attrib, 3313 save->cs.limit, save->cs.base); 3314 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", 3315 "ss:", 3316 save->ss.selector, save->ss.attrib, 3317 save->ss.limit, save->ss.base); 3318 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", 3319 "ds:", 3320 save->ds.selector, save->ds.attrib, 3321 save->ds.limit, save->ds.base); 3322 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", 3323 "fs:", 3324 save01->fs.selector, save01->fs.attrib, 3325 save01->fs.limit, save01->fs.base); 3326 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", 3327 "gs:", 3328 save01->gs.selector, save01->gs.attrib, 3329 save01->gs.limit, save01->gs.base); 3330 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", 3331 "gdtr:", 3332 save->gdtr.selector, save->gdtr.attrib, 3333 save->gdtr.limit, save->gdtr.base); 3334 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", 3335 "ldtr:", 3336 save01->ldtr.selector, save01->ldtr.attrib, 3337 save01->ldtr.limit, save01->ldtr.base); 3338 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", 3339 "idtr:", 3340 save->idtr.selector, save->idtr.attrib, 3341 save->idtr.limit, save->idtr.base); 3342 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", 3343 "tr:", 3344 save01->tr.selector, save01->tr.attrib, 3345 save01->tr.limit, save01->tr.base); 3346 pr_err("vmpl: %d cpl: %d efer: %016llx\n", 3347 save->vmpl, save->cpl, save->efer); 3348 pr_err("%-15s %016llx %-13s %016llx\n", 3349 "cr0:", save->cr0, "cr2:", save->cr2); 3350 pr_err("%-15s %016llx %-13s %016llx\n", 3351 "cr3:", save->cr3, "cr4:", save->cr4); 3352 pr_err("%-15s %016llx %-13s %016llx\n", 3353 "dr6:", save->dr6, "dr7:", save->dr7); 3354 pr_err("%-15s %016llx %-13s %016llx\n", 3355 "rip:", save->rip, "rflags:", save->rflags); 3356 pr_err("%-15s %016llx %-13s %016llx\n", 3357 "rsp:", save->rsp, "rax:", save->rax); 3358 pr_err("%-15s %016llx %-13s %016llx\n", 3359 "s_cet:", save->s_cet, "ssp:", save->ssp); 3360 pr_err("%-15s %016llx\n", 3361 "isst_addr:", save->isst_addr); 3362 pr_err("%-15s %016llx %-13s %016llx\n", 3363 "star:", save01->star, "lstar:", save01->lstar); 3364 pr_err("%-15s %016llx %-13s %016llx\n", 3365 "cstar:", save01->cstar, "sfmask:", save01->sfmask); 3366 pr_err("%-15s %016llx %-13s %016llx\n", 3367 "kernel_gs_base:", save01->kernel_gs_base, 3368 "sysenter_cs:", save01->sysenter_cs); 3369 pr_err("%-15s %016llx %-13s %016llx\n", 3370 "sysenter_esp:", save01->sysenter_esp, 3371 "sysenter_eip:", save01->sysenter_eip); 3372 pr_err("%-15s %016llx %-13s %016llx\n", 3373 "gpat:", save->g_pat, "dbgctl:", save->dbgctl); 3374 pr_err("%-15s %016llx %-13s %016llx\n", 3375 "br_from:", save->br_from, "br_to:", save->br_to); 3376 pr_err("%-15s %016llx %-13s %016llx\n", 3377 "excp_from:", save->last_excp_from, 3378 "excp_to:", save->last_excp_to); 3379 3380 if (sev_es_guest(vcpu->kvm)) { 3381 struct sev_es_save_area *vmsa = (struct sev_es_save_area *)save; 3382 3383 pr_err("%-15s %016llx\n", 3384 "sev_features", vmsa->sev_features); 3385 3386 pr_err("%-15s %016llx %-13s %016llx\n", 3387 "pl0_ssp:", vmsa->pl0_ssp, "pl1_ssp:", vmsa->pl1_ssp); 3388 pr_err("%-15s %016llx %-13s %016llx\n", 3389 "pl2_ssp:", vmsa->pl2_ssp, "pl3_ssp:", vmsa->pl3_ssp); 3390 pr_err("%-15s %016llx\n", 3391 "u_cet:", vmsa->u_cet); 3392 3393 pr_err("%-15s %016llx %-13s %016llx\n", 3394 "rax:", vmsa->rax, "rbx:", vmsa->rbx); 3395 pr_err("%-15s %016llx %-13s %016llx\n", 3396 "rcx:", vmsa->rcx, "rdx:", vmsa->rdx); 3397 pr_err("%-15s %016llx %-13s %016llx\n", 3398 "rsi:", vmsa->rsi, "rdi:", vmsa->rdi); 3399 pr_err("%-15s %016llx %-13s %016llx\n", 3400 "rbp:", vmsa->rbp, "rsp:", vmsa->rsp); 3401 pr_err("%-15s %016llx %-13s %016llx\n", 3402 "r8:", vmsa->r8, "r9:", vmsa->r9); 3403 pr_err("%-15s %016llx %-13s %016llx\n", 3404 "r10:", vmsa->r10, "r11:", vmsa->r11); 3405 pr_err("%-15s %016llx %-13s %016llx\n", 3406 "r12:", vmsa->r12, "r13:", vmsa->r13); 3407 pr_err("%-15s %016llx %-13s %016llx\n", 3408 "r14:", vmsa->r14, "r15:", vmsa->r15); 3409 pr_err("%-15s %016llx %-13s %016llx\n", 3410 "xcr0:", vmsa->xcr0, "xss:", vmsa->xss); 3411 } else { 3412 pr_err("%-15s %016llx %-13s %016lx\n", 3413 "rax:", save->rax, "rbx:", 3414 vcpu->arch.regs[VCPU_REGS_RBX]); 3415 pr_err("%-15s %016lx %-13s %016lx\n", 3416 "rcx:", vcpu->arch.regs[VCPU_REGS_RCX], 3417 "rdx:", vcpu->arch.regs[VCPU_REGS_RDX]); 3418 pr_err("%-15s %016lx %-13s %016lx\n", 3419 "rsi:", vcpu->arch.regs[VCPU_REGS_RSI], 3420 "rdi:", vcpu->arch.regs[VCPU_REGS_RDI]); 3421 pr_err("%-15s %016lx %-13s %016llx\n", 3422 "rbp:", vcpu->arch.regs[VCPU_REGS_RBP], 3423 "rsp:", save->rsp); 3424 #ifdef CONFIG_X86_64 3425 pr_err("%-15s %016lx %-13s %016lx\n", 3426 "r8:", vcpu->arch.regs[VCPU_REGS_R8], 3427 "r9:", vcpu->arch.regs[VCPU_REGS_R9]); 3428 pr_err("%-15s %016lx %-13s %016lx\n", 3429 "r10:", vcpu->arch.regs[VCPU_REGS_R10], 3430 "r11:", vcpu->arch.regs[VCPU_REGS_R11]); 3431 pr_err("%-15s %016lx %-13s %016lx\n", 3432 "r12:", vcpu->arch.regs[VCPU_REGS_R12], 3433 "r13:", vcpu->arch.regs[VCPU_REGS_R13]); 3434 pr_err("%-15s %016lx %-13s %016lx\n", 3435 "r14:", vcpu->arch.regs[VCPU_REGS_R14], 3436 "r15:", vcpu->arch.regs[VCPU_REGS_R15]); 3437 #endif 3438 } 3439 3440 no_vmsa: 3441 if (sev_es_guest(vcpu->kvm)) 3442 sev_free_decrypted_vmsa(vcpu, save); 3443 } 3444 3445 static bool svm_check_exit_valid(u64 exit_code) 3446 { 3447 return (exit_code < ARRAY_SIZE(svm_exit_handlers) && 3448 svm_exit_handlers[exit_code]); 3449 } 3450 3451 static int svm_handle_invalid_exit(struct kvm_vcpu *vcpu, u64 exit_code) 3452 { 3453 dump_vmcb(vcpu); 3454 kvm_prepare_unexpected_reason_exit(vcpu, exit_code); 3455 return 0; 3456 } 3457 3458 int svm_invoke_exit_handler(struct kvm_vcpu *vcpu, u64 exit_code) 3459 { 3460 if (!svm_check_exit_valid(exit_code)) 3461 return svm_handle_invalid_exit(vcpu, exit_code); 3462 3463 #ifdef CONFIG_MITIGATION_RETPOLINE 3464 if (exit_code == SVM_EXIT_MSR) 3465 return msr_interception(vcpu); 3466 else if (exit_code == SVM_EXIT_VINTR) 3467 return interrupt_window_interception(vcpu); 3468 else if (exit_code == SVM_EXIT_INTR) 3469 return intr_interception(vcpu); 3470 else if (exit_code == SVM_EXIT_HLT || exit_code == SVM_EXIT_IDLE_HLT) 3471 return kvm_emulate_halt(vcpu); 3472 else if (exit_code == SVM_EXIT_NPF) 3473 return npf_interception(vcpu); 3474 #ifdef CONFIG_KVM_AMD_SEV 3475 else if (exit_code == SVM_EXIT_VMGEXIT) 3476 return sev_handle_vmgexit(vcpu); 3477 #endif 3478 #endif 3479 return svm_exit_handlers[exit_code](vcpu); 3480 } 3481 3482 static void svm_get_exit_info(struct kvm_vcpu *vcpu, u32 *reason, 3483 u64 *info1, u64 *info2, 3484 u32 *intr_info, u32 *error_code) 3485 { 3486 struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control; 3487 3488 *reason = control->exit_code; 3489 *info1 = control->exit_info_1; 3490 *info2 = control->exit_info_2; 3491 *intr_info = control->exit_int_info; 3492 if ((*intr_info & SVM_EXITINTINFO_VALID) && 3493 (*intr_info & SVM_EXITINTINFO_VALID_ERR)) 3494 *error_code = control->exit_int_info_err; 3495 else 3496 *error_code = 0; 3497 } 3498 3499 static void svm_get_entry_info(struct kvm_vcpu *vcpu, u32 *intr_info, 3500 u32 *error_code) 3501 { 3502 struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control; 3503 3504 *intr_info = control->event_inj; 3505 3506 if ((*intr_info & SVM_EXITINTINFO_VALID) && 3507 (*intr_info & SVM_EXITINTINFO_VALID_ERR)) 3508 *error_code = control->event_inj_err; 3509 else 3510 *error_code = 0; 3511 3512 } 3513 3514 static int svm_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath) 3515 { 3516 struct vcpu_svm *svm = to_svm(vcpu); 3517 struct kvm_run *kvm_run = vcpu->run; 3518 u32 exit_code = svm->vmcb->control.exit_code; 3519 3520 /* SEV-ES guests must use the CR write traps to track CR registers. */ 3521 if (!sev_es_guest(vcpu->kvm)) { 3522 if (!svm_is_intercept(svm, INTERCEPT_CR0_WRITE)) 3523 vcpu->arch.cr0 = svm->vmcb->save.cr0; 3524 if (npt_enabled) 3525 vcpu->arch.cr3 = svm->vmcb->save.cr3; 3526 } 3527 3528 if (is_guest_mode(vcpu)) { 3529 int vmexit; 3530 3531 trace_kvm_nested_vmexit(vcpu, KVM_ISA_SVM); 3532 3533 vmexit = nested_svm_exit_special(svm); 3534 3535 if (vmexit == NESTED_EXIT_CONTINUE) 3536 vmexit = nested_svm_exit_handled(svm); 3537 3538 if (vmexit == NESTED_EXIT_DONE) 3539 return 1; 3540 } 3541 3542 if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) { 3543 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY; 3544 kvm_run->fail_entry.hardware_entry_failure_reason 3545 = svm->vmcb->control.exit_code; 3546 kvm_run->fail_entry.cpu = vcpu->arch.last_vmentry_cpu; 3547 dump_vmcb(vcpu); 3548 return 0; 3549 } 3550 3551 if (exit_fastpath != EXIT_FASTPATH_NONE) 3552 return 1; 3553 3554 return svm_invoke_exit_handler(vcpu, exit_code); 3555 } 3556 3557 static int pre_svm_run(struct kvm_vcpu *vcpu) 3558 { 3559 struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, vcpu->cpu); 3560 struct vcpu_svm *svm = to_svm(vcpu); 3561 3562 /* 3563 * If the previous vmrun of the vmcb occurred on a different physical 3564 * cpu, then mark the vmcb dirty and assign a new asid. Hardware's 3565 * vmcb clean bits are per logical CPU, as are KVM's asid assignments. 3566 */ 3567 if (unlikely(svm->current_vmcb->cpu != vcpu->cpu)) { 3568 svm->current_vmcb->asid_generation = 0; 3569 vmcb_mark_all_dirty(svm->vmcb); 3570 svm->current_vmcb->cpu = vcpu->cpu; 3571 } 3572 3573 if (sev_guest(vcpu->kvm)) 3574 return pre_sev_run(svm, vcpu->cpu); 3575 3576 /* FIXME: handle wraparound of asid_generation */ 3577 if (svm->current_vmcb->asid_generation != sd->asid_generation) 3578 new_asid(svm, sd); 3579 3580 return 0; 3581 } 3582 3583 static void svm_inject_nmi(struct kvm_vcpu *vcpu) 3584 { 3585 struct vcpu_svm *svm = to_svm(vcpu); 3586 3587 svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI; 3588 3589 if (svm->nmi_l1_to_l2) 3590 return; 3591 3592 /* 3593 * No need to manually track NMI masking when vNMI is enabled, hardware 3594 * automatically sets V_NMI_BLOCKING_MASK as appropriate, including the 3595 * case where software directly injects an NMI. 3596 */ 3597 if (!is_vnmi_enabled(svm)) { 3598 svm->nmi_masked = true; 3599 svm_set_iret_intercept(svm); 3600 } 3601 ++vcpu->stat.nmi_injections; 3602 } 3603 3604 static bool svm_is_vnmi_pending(struct kvm_vcpu *vcpu) 3605 { 3606 struct vcpu_svm *svm = to_svm(vcpu); 3607 3608 if (!is_vnmi_enabled(svm)) 3609 return false; 3610 3611 return !!(svm->vmcb->control.int_ctl & V_NMI_PENDING_MASK); 3612 } 3613 3614 static bool svm_set_vnmi_pending(struct kvm_vcpu *vcpu) 3615 { 3616 struct vcpu_svm *svm = to_svm(vcpu); 3617 3618 if (!is_vnmi_enabled(svm)) 3619 return false; 3620 3621 if (svm->vmcb->control.int_ctl & V_NMI_PENDING_MASK) 3622 return false; 3623 3624 svm->vmcb->control.int_ctl |= V_NMI_PENDING_MASK; 3625 vmcb_mark_dirty(svm->vmcb, VMCB_INTR); 3626 3627 /* 3628 * Because the pending NMI is serviced by hardware, KVM can't know when 3629 * the NMI is "injected", but for all intents and purposes, passing the 3630 * NMI off to hardware counts as injection. 3631 */ 3632 ++vcpu->stat.nmi_injections; 3633 3634 return true; 3635 } 3636 3637 static void svm_inject_irq(struct kvm_vcpu *vcpu, bool reinjected) 3638 { 3639 struct kvm_queued_interrupt *intr = &vcpu->arch.interrupt; 3640 struct vcpu_svm *svm = to_svm(vcpu); 3641 u32 type; 3642 3643 if (intr->soft) { 3644 if (svm_update_soft_interrupt_rip(vcpu, intr->nr)) 3645 return; 3646 3647 type = SVM_EVTINJ_TYPE_SOFT; 3648 } else { 3649 type = SVM_EVTINJ_TYPE_INTR; 3650 } 3651 3652 trace_kvm_inj_virq(intr->nr, intr->soft, reinjected); 3653 ++vcpu->stat.irq_injections; 3654 3655 svm->vmcb->control.event_inj = intr->nr | SVM_EVTINJ_VALID | type; 3656 } 3657 3658 void svm_complete_interrupt_delivery(struct kvm_vcpu *vcpu, int delivery_mode, 3659 int trig_mode, int vector) 3660 { 3661 /* 3662 * apic->apicv_active must be read after vcpu->mode. 3663 * Pairs with smp_store_release in vcpu_enter_guest. 3664 */ 3665 bool in_guest_mode = (smp_load_acquire(&vcpu->mode) == IN_GUEST_MODE); 3666 3667 /* Note, this is called iff the local APIC is in-kernel. */ 3668 if (!READ_ONCE(vcpu->arch.apic->apicv_active)) { 3669 /* Process the interrupt via kvm_check_and_inject_events(). */ 3670 kvm_make_request(KVM_REQ_EVENT, vcpu); 3671 kvm_vcpu_kick(vcpu); 3672 return; 3673 } 3674 3675 trace_kvm_apicv_accept_irq(vcpu->vcpu_id, delivery_mode, trig_mode, vector); 3676 if (in_guest_mode) { 3677 /* 3678 * Signal the doorbell to tell hardware to inject the IRQ. If 3679 * the vCPU exits the guest before the doorbell chimes, hardware 3680 * will automatically process AVIC interrupts at the next VMRUN. 3681 */ 3682 avic_ring_doorbell(vcpu); 3683 } else { 3684 /* 3685 * Wake the vCPU if it was blocking. KVM will then detect the 3686 * pending IRQ when checking if the vCPU has a wake event. 3687 */ 3688 kvm_vcpu_wake_up(vcpu); 3689 } 3690 } 3691 3692 static void svm_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode, 3693 int trig_mode, int vector) 3694 { 3695 kvm_lapic_set_irr(vector, apic); 3696 3697 /* 3698 * Pairs with the smp_mb_*() after setting vcpu->guest_mode in 3699 * vcpu_enter_guest() to ensure the write to the vIRR is ordered before 3700 * the read of guest_mode. This guarantees that either VMRUN will see 3701 * and process the new vIRR entry, or that svm_complete_interrupt_delivery 3702 * will signal the doorbell if the CPU has already entered the guest. 3703 */ 3704 smp_mb__after_atomic(); 3705 svm_complete_interrupt_delivery(apic->vcpu, delivery_mode, trig_mode, vector); 3706 } 3707 3708 static void svm_update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr) 3709 { 3710 struct vcpu_svm *svm = to_svm(vcpu); 3711 3712 /* 3713 * SEV-ES guests must always keep the CR intercepts cleared. CR 3714 * tracking is done using the CR write traps. 3715 */ 3716 if (sev_es_guest(vcpu->kvm)) 3717 return; 3718 3719 if (nested_svm_virtualize_tpr(vcpu)) 3720 return; 3721 3722 svm_clr_intercept(svm, INTERCEPT_CR8_WRITE); 3723 3724 if (irr == -1) 3725 return; 3726 3727 if (tpr >= irr) 3728 svm_set_intercept(svm, INTERCEPT_CR8_WRITE); 3729 } 3730 3731 static bool svm_get_nmi_mask(struct kvm_vcpu *vcpu) 3732 { 3733 struct vcpu_svm *svm = to_svm(vcpu); 3734 3735 if (is_vnmi_enabled(svm)) 3736 return svm->vmcb->control.int_ctl & V_NMI_BLOCKING_MASK; 3737 else 3738 return svm->nmi_masked; 3739 } 3740 3741 static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked) 3742 { 3743 struct vcpu_svm *svm = to_svm(vcpu); 3744 3745 if (is_vnmi_enabled(svm)) { 3746 if (masked) 3747 svm->vmcb->control.int_ctl |= V_NMI_BLOCKING_MASK; 3748 else 3749 svm->vmcb->control.int_ctl &= ~V_NMI_BLOCKING_MASK; 3750 3751 } else { 3752 svm->nmi_masked = masked; 3753 if (masked) 3754 svm_set_iret_intercept(svm); 3755 else 3756 svm_clr_iret_intercept(svm); 3757 } 3758 } 3759 3760 bool svm_nmi_blocked(struct kvm_vcpu *vcpu) 3761 { 3762 struct vcpu_svm *svm = to_svm(vcpu); 3763 struct vmcb *vmcb = svm->vmcb; 3764 3765 if (!gif_set(svm)) 3766 return true; 3767 3768 if (is_guest_mode(vcpu) && nested_exit_on_nmi(svm)) 3769 return false; 3770 3771 if (svm_get_nmi_mask(vcpu)) 3772 return true; 3773 3774 return vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK; 3775 } 3776 3777 static int svm_nmi_allowed(struct kvm_vcpu *vcpu, bool for_injection) 3778 { 3779 struct vcpu_svm *svm = to_svm(vcpu); 3780 if (svm->nested.nested_run_pending) 3781 return -EBUSY; 3782 3783 if (svm_nmi_blocked(vcpu)) 3784 return 0; 3785 3786 /* An NMI must not be injected into L2 if it's supposed to VM-Exit. */ 3787 if (for_injection && is_guest_mode(vcpu) && nested_exit_on_nmi(svm)) 3788 return -EBUSY; 3789 return 1; 3790 } 3791 3792 bool svm_interrupt_blocked(struct kvm_vcpu *vcpu) 3793 { 3794 struct vcpu_svm *svm = to_svm(vcpu); 3795 struct vmcb *vmcb = svm->vmcb; 3796 3797 if (!gif_set(svm)) 3798 return true; 3799 3800 if (is_guest_mode(vcpu)) { 3801 /* As long as interrupts are being delivered... */ 3802 if ((svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK) 3803 ? !(svm->vmcb01.ptr->save.rflags & X86_EFLAGS_IF) 3804 : !(kvm_get_rflags(vcpu) & X86_EFLAGS_IF)) 3805 return true; 3806 3807 /* ... vmexits aren't blocked by the interrupt shadow */ 3808 if (nested_exit_on_intr(svm)) 3809 return false; 3810 } else { 3811 if (!svm_get_if_flag(vcpu)) 3812 return true; 3813 } 3814 3815 return (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK); 3816 } 3817 3818 static int svm_interrupt_allowed(struct kvm_vcpu *vcpu, bool for_injection) 3819 { 3820 struct vcpu_svm *svm = to_svm(vcpu); 3821 3822 if (svm->nested.nested_run_pending) 3823 return -EBUSY; 3824 3825 if (svm_interrupt_blocked(vcpu)) 3826 return 0; 3827 3828 /* 3829 * An IRQ must not be injected into L2 if it's supposed to VM-Exit, 3830 * e.g. if the IRQ arrived asynchronously after checking nested events. 3831 */ 3832 if (for_injection && is_guest_mode(vcpu) && nested_exit_on_intr(svm)) 3833 return -EBUSY; 3834 3835 return 1; 3836 } 3837 3838 static void svm_enable_irq_window(struct kvm_vcpu *vcpu) 3839 { 3840 struct vcpu_svm *svm = to_svm(vcpu); 3841 3842 /* 3843 * In case GIF=0 we can't rely on the CPU to tell us when GIF becomes 3844 * 1, because that's a separate STGI/VMRUN intercept. The next time we 3845 * get that intercept, this function will be called again though and 3846 * we'll get the vintr intercept. However, if the vGIF feature is 3847 * enabled, the STGI interception will not occur. Enable the irq 3848 * window under the assumption that the hardware will set the GIF. 3849 */ 3850 if (vgif || gif_set(svm)) { 3851 /* 3852 * IRQ window is not needed when AVIC is enabled, 3853 * unless we have pending ExtINT since it cannot be injected 3854 * via AVIC. In such case, KVM needs to temporarily disable AVIC, 3855 * and fallback to injecting IRQ via V_IRQ. 3856 * 3857 * If running nested, AVIC is already locally inhibited 3858 * on this vCPU, therefore there is no need to request 3859 * the VM wide AVIC inhibition. 3860 */ 3861 if (!is_guest_mode(vcpu)) 3862 kvm_set_apicv_inhibit(vcpu->kvm, APICV_INHIBIT_REASON_IRQWIN); 3863 3864 svm_set_vintr(svm); 3865 } 3866 } 3867 3868 static void svm_enable_nmi_window(struct kvm_vcpu *vcpu) 3869 { 3870 struct vcpu_svm *svm = to_svm(vcpu); 3871 3872 /* 3873 * If NMIs are outright masked, i.e. the vCPU is already handling an 3874 * NMI, and KVM has not yet intercepted an IRET, then there is nothing 3875 * more to do at this time as KVM has already enabled IRET intercepts. 3876 * If KVM has already intercepted IRET, then single-step over the IRET, 3877 * as NMIs aren't architecturally unmasked until the IRET completes. 3878 * 3879 * If vNMI is enabled, KVM should never request an NMI window if NMIs 3880 * are masked, as KVM allows at most one to-be-injected NMI and one 3881 * pending NMI. If two NMIs arrive simultaneously, KVM will inject one 3882 * NMI and set V_NMI_PENDING for the other, but if and only if NMIs are 3883 * unmasked. KVM _will_ request an NMI window in some situations, e.g. 3884 * if the vCPU is in an STI shadow or if GIF=0, KVM can't immediately 3885 * inject the NMI. In those situations, KVM needs to single-step over 3886 * the STI shadow or intercept STGI. 3887 */ 3888 if (svm_get_nmi_mask(vcpu)) { 3889 WARN_ON_ONCE(is_vnmi_enabled(svm)); 3890 3891 if (!svm->awaiting_iret_completion) 3892 return; /* IRET will cause a vm exit */ 3893 } 3894 3895 /* 3896 * SEV-ES guests are responsible for signaling when a vCPU is ready to 3897 * receive a new NMI, as SEV-ES guests can't be single-stepped, i.e. 3898 * KVM can't intercept and single-step IRET to detect when NMIs are 3899 * unblocked (architecturally speaking). See SVM_VMGEXIT_NMI_COMPLETE. 3900 * 3901 * Note, GIF is guaranteed to be '1' for SEV-ES guests as hardware 3902 * ignores SEV-ES guest writes to EFER.SVME *and* CLGI/STGI are not 3903 * supported NAEs in the GHCB protocol. 3904 */ 3905 if (sev_es_guest(vcpu->kvm)) 3906 return; 3907 3908 if (!gif_set(svm)) { 3909 if (vgif) 3910 svm_set_intercept(svm, INTERCEPT_STGI); 3911 return; /* STGI will cause a vm exit */ 3912 } 3913 3914 /* 3915 * Something prevents NMI from been injected. Single step over possible 3916 * problem (IRET or exception injection or interrupt shadow) 3917 */ 3918 svm->nmi_singlestep_guest_rflags = svm_get_rflags(vcpu); 3919 svm->nmi_singlestep = true; 3920 svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF); 3921 } 3922 3923 static void svm_flush_tlb_asid(struct kvm_vcpu *vcpu) 3924 { 3925 struct vcpu_svm *svm = to_svm(vcpu); 3926 3927 /* 3928 * Unlike VMX, SVM doesn't provide a way to flush only NPT TLB entries. 3929 * A TLB flush for the current ASID flushes both "host" and "guest" TLB 3930 * entries, and thus is a superset of Hyper-V's fine grained flushing. 3931 */ 3932 kvm_hv_vcpu_purge_flush_tlb(vcpu); 3933 3934 /* 3935 * Flush only the current ASID even if the TLB flush was invoked via 3936 * kvm_flush_remote_tlbs(). Although flushing remote TLBs requires all 3937 * ASIDs to be flushed, KVM uses a single ASID for L1 and L2, and 3938 * unconditionally does a TLB flush on both nested VM-Enter and nested 3939 * VM-Exit (via kvm_mmu_reset_context()). 3940 */ 3941 if (static_cpu_has(X86_FEATURE_FLUSHBYASID)) 3942 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID; 3943 else 3944 svm->current_vmcb->asid_generation--; 3945 } 3946 3947 static void svm_flush_tlb_current(struct kvm_vcpu *vcpu) 3948 { 3949 hpa_t root_tdp = vcpu->arch.mmu->root.hpa; 3950 3951 /* 3952 * When running on Hyper-V with EnlightenedNptTlb enabled, explicitly 3953 * flush the NPT mappings via hypercall as flushing the ASID only 3954 * affects virtual to physical mappings, it does not invalidate guest 3955 * physical to host physical mappings. 3956 */ 3957 if (svm_hv_is_enlightened_tlb_enabled(vcpu) && VALID_PAGE(root_tdp)) 3958 hyperv_flush_guest_mapping(root_tdp); 3959 3960 svm_flush_tlb_asid(vcpu); 3961 } 3962 3963 static void svm_flush_tlb_all(struct kvm_vcpu *vcpu) 3964 { 3965 /* 3966 * When running on Hyper-V with EnlightenedNptTlb enabled, remote TLB 3967 * flushes should be routed to hv_flush_remote_tlbs() without requesting 3968 * a "regular" remote flush. Reaching this point means either there's 3969 * a KVM bug or a prior hv_flush_remote_tlbs() call failed, both of 3970 * which might be fatal to the guest. Yell, but try to recover. 3971 */ 3972 if (WARN_ON_ONCE(svm_hv_is_enlightened_tlb_enabled(vcpu))) 3973 hv_flush_remote_tlbs(vcpu->kvm); 3974 3975 svm_flush_tlb_asid(vcpu); 3976 } 3977 3978 static void svm_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t gva) 3979 { 3980 struct vcpu_svm *svm = to_svm(vcpu); 3981 3982 invlpga(gva, svm->vmcb->control.asid); 3983 } 3984 3985 static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu) 3986 { 3987 struct vcpu_svm *svm = to_svm(vcpu); 3988 3989 if (nested_svm_virtualize_tpr(vcpu)) 3990 return; 3991 3992 if (!svm_is_intercept(svm, INTERCEPT_CR8_WRITE)) { 3993 int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK; 3994 kvm_set_cr8(vcpu, cr8); 3995 } 3996 } 3997 3998 static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu) 3999 { 4000 struct vcpu_svm *svm = to_svm(vcpu); 4001 u64 cr8; 4002 4003 if (nested_svm_virtualize_tpr(vcpu)) 4004 return; 4005 4006 cr8 = kvm_get_cr8(vcpu); 4007 svm->vmcb->control.int_ctl &= ~V_TPR_MASK; 4008 svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK; 4009 } 4010 4011 static void svm_complete_soft_interrupt(struct kvm_vcpu *vcpu, u8 vector, 4012 int type) 4013 { 4014 bool is_exception = (type == SVM_EXITINTINFO_TYPE_EXEPT); 4015 bool is_soft = (type == SVM_EXITINTINFO_TYPE_SOFT); 4016 struct vcpu_svm *svm = to_svm(vcpu); 4017 4018 /* 4019 * If NRIPS is enabled, KVM must snapshot the pre-VMRUN next_rip that's 4020 * associated with the original soft exception/interrupt. next_rip is 4021 * cleared on all exits that can occur while vectoring an event, so KVM 4022 * needs to manually set next_rip for re-injection. Unlike the !nrips 4023 * case below, this needs to be done if and only if KVM is re-injecting 4024 * the same event, i.e. if the event is a soft exception/interrupt, 4025 * otherwise next_rip is unused on VMRUN. 4026 */ 4027 if (nrips && (is_soft || (is_exception && kvm_exception_is_soft(vector))) && 4028 kvm_is_linear_rip(vcpu, svm->soft_int_old_rip + svm->soft_int_csbase)) 4029 svm->vmcb->control.next_rip = svm->soft_int_next_rip; 4030 /* 4031 * If NRIPS isn't enabled, KVM must manually advance RIP prior to 4032 * injecting the soft exception/interrupt. That advancement needs to 4033 * be unwound if vectoring didn't complete. Note, the new event may 4034 * not be the injected event, e.g. if KVM injected an INTn, the INTn 4035 * hit a #NP in the guest, and the #NP encountered a #PF, the #NP will 4036 * be the reported vectored event, but RIP still needs to be unwound. 4037 */ 4038 else if (!nrips && (is_soft || is_exception) && 4039 kvm_is_linear_rip(vcpu, svm->soft_int_next_rip + svm->soft_int_csbase)) 4040 kvm_rip_write(vcpu, svm->soft_int_old_rip); 4041 } 4042 4043 static void svm_complete_interrupts(struct kvm_vcpu *vcpu) 4044 { 4045 struct vcpu_svm *svm = to_svm(vcpu); 4046 u8 vector; 4047 int type; 4048 u32 exitintinfo = svm->vmcb->control.exit_int_info; 4049 bool nmi_l1_to_l2 = svm->nmi_l1_to_l2; 4050 bool soft_int_injected = svm->soft_int_injected; 4051 4052 svm->nmi_l1_to_l2 = false; 4053 svm->soft_int_injected = false; 4054 4055 /* 4056 * If we've made progress since setting awaiting_iret_completion, we've 4057 * executed an IRET and can allow NMI injection. 4058 */ 4059 if (svm->awaiting_iret_completion && 4060 kvm_rip_read(vcpu) != svm->nmi_iret_rip) { 4061 svm->awaiting_iret_completion = false; 4062 svm->nmi_masked = false; 4063 kvm_make_request(KVM_REQ_EVENT, vcpu); 4064 } 4065 4066 vcpu->arch.nmi_injected = false; 4067 kvm_clear_exception_queue(vcpu); 4068 kvm_clear_interrupt_queue(vcpu); 4069 4070 if (!(exitintinfo & SVM_EXITINTINFO_VALID)) 4071 return; 4072 4073 kvm_make_request(KVM_REQ_EVENT, vcpu); 4074 4075 vector = exitintinfo & SVM_EXITINTINFO_VEC_MASK; 4076 type = exitintinfo & SVM_EXITINTINFO_TYPE_MASK; 4077 4078 if (soft_int_injected) 4079 svm_complete_soft_interrupt(vcpu, vector, type); 4080 4081 switch (type) { 4082 case SVM_EXITINTINFO_TYPE_NMI: 4083 vcpu->arch.nmi_injected = true; 4084 svm->nmi_l1_to_l2 = nmi_l1_to_l2; 4085 break; 4086 case SVM_EXITINTINFO_TYPE_EXEPT: { 4087 u32 error_code = 0; 4088 4089 /* 4090 * Never re-inject a #VC exception. 4091 */ 4092 if (vector == X86_TRAP_VC) 4093 break; 4094 4095 if (exitintinfo & SVM_EXITINTINFO_VALID_ERR) 4096 error_code = svm->vmcb->control.exit_int_info_err; 4097 4098 kvm_requeue_exception(vcpu, vector, 4099 exitintinfo & SVM_EXITINTINFO_VALID_ERR, 4100 error_code); 4101 break; 4102 } 4103 case SVM_EXITINTINFO_TYPE_INTR: 4104 kvm_queue_interrupt(vcpu, vector, false); 4105 break; 4106 case SVM_EXITINTINFO_TYPE_SOFT: 4107 kvm_queue_interrupt(vcpu, vector, true); 4108 break; 4109 default: 4110 break; 4111 } 4112 4113 } 4114 4115 static void svm_cancel_injection(struct kvm_vcpu *vcpu) 4116 { 4117 struct vcpu_svm *svm = to_svm(vcpu); 4118 struct vmcb_control_area *control = &svm->vmcb->control; 4119 4120 control->exit_int_info = control->event_inj; 4121 control->exit_int_info_err = control->event_inj_err; 4122 control->event_inj = 0; 4123 svm_complete_interrupts(vcpu); 4124 } 4125 4126 static int svm_vcpu_pre_run(struct kvm_vcpu *vcpu) 4127 { 4128 if (to_kvm_sev_info(vcpu->kvm)->need_init) 4129 return -EINVAL; 4130 4131 return 1; 4132 } 4133 4134 static fastpath_t svm_exit_handlers_fastpath(struct kvm_vcpu *vcpu) 4135 { 4136 struct vcpu_svm *svm = to_svm(vcpu); 4137 struct vmcb_control_area *control = &svm->vmcb->control; 4138 4139 /* 4140 * Next RIP must be provided as IRQs are disabled, and accessing guest 4141 * memory to decode the instruction might fault, i.e. might sleep. 4142 */ 4143 if (!nrips || !control->next_rip) 4144 return EXIT_FASTPATH_NONE; 4145 4146 if (is_guest_mode(vcpu)) 4147 return EXIT_FASTPATH_NONE; 4148 4149 switch (control->exit_code) { 4150 case SVM_EXIT_MSR: 4151 if (!control->exit_info_1) 4152 break; 4153 return handle_fastpath_wrmsr(vcpu); 4154 case SVM_EXIT_HLT: 4155 return handle_fastpath_hlt(vcpu); 4156 case SVM_EXIT_INVD: 4157 return handle_fastpath_invd(vcpu); 4158 default: 4159 break; 4160 } 4161 4162 return EXIT_FASTPATH_NONE; 4163 } 4164 4165 static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, bool spec_ctrl_intercepted) 4166 { 4167 struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, vcpu->cpu); 4168 struct vcpu_svm *svm = to_svm(vcpu); 4169 4170 guest_state_enter_irqoff(); 4171 4172 /* 4173 * Set RFLAGS.IF prior to VMRUN, as the host's RFLAGS.IF at the time of 4174 * VMRUN controls whether or not physical IRQs are masked (KVM always 4175 * runs with V_INTR_MASKING_MASK). Toggle RFLAGS.IF here to avoid the 4176 * temptation to do STI+VMRUN+CLI, as AMD CPUs bleed the STI shadow 4177 * into guest state if delivery of an event during VMRUN triggers a 4178 * #VMEXIT, and the guest_state transitions already tell lockdep that 4179 * IRQs are being enabled/disabled. Note! GIF=0 for the entirety of 4180 * this path, so IRQs aren't actually unmasked while running host code. 4181 */ 4182 raw_local_irq_enable(); 4183 4184 amd_clear_divider(); 4185 4186 if (sev_es_guest(vcpu->kvm)) 4187 __svm_sev_es_vcpu_run(svm, spec_ctrl_intercepted, 4188 sev_es_host_save_area(sd)); 4189 else 4190 __svm_vcpu_run(svm, spec_ctrl_intercepted); 4191 4192 raw_local_irq_disable(); 4193 4194 guest_state_exit_irqoff(); 4195 } 4196 4197 static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu, u64 run_flags) 4198 { 4199 bool force_immediate_exit = run_flags & KVM_RUN_FORCE_IMMEDIATE_EXIT; 4200 struct vcpu_svm *svm = to_svm(vcpu); 4201 bool spec_ctrl_intercepted = msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL); 4202 4203 trace_kvm_entry(vcpu, force_immediate_exit); 4204 4205 svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX]; 4206 svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP]; 4207 svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP]; 4208 4209 /* 4210 * Disable singlestep if we're injecting an interrupt/exception. 4211 * We don't want our modified rflags to be pushed on the stack where 4212 * we might not be able to easily reset them if we disabled NMI 4213 * singlestep later. 4214 */ 4215 if (svm->nmi_singlestep && svm->vmcb->control.event_inj) { 4216 /* 4217 * Event injection happens before external interrupts cause a 4218 * vmexit and interrupts are disabled here, so smp_send_reschedule 4219 * is enough to force an immediate vmexit. 4220 */ 4221 disable_nmi_singlestep(svm); 4222 force_immediate_exit = true; 4223 } 4224 4225 if (force_immediate_exit) 4226 smp_send_reschedule(vcpu->cpu); 4227 4228 if (pre_svm_run(vcpu)) { 4229 vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY; 4230 vcpu->run->fail_entry.hardware_entry_failure_reason = SVM_EXIT_ERR; 4231 vcpu->run->fail_entry.cpu = vcpu->cpu; 4232 return EXIT_FASTPATH_EXIT_USERSPACE; 4233 } 4234 4235 sync_lapic_to_cr8(vcpu); 4236 4237 if (unlikely(svm->asid != svm->vmcb->control.asid)) { 4238 svm->vmcb->control.asid = svm->asid; 4239 vmcb_mark_dirty(svm->vmcb, VMCB_ASID); 4240 } 4241 svm->vmcb->save.cr2 = vcpu->arch.cr2; 4242 4243 svm_hv_update_vp_id(svm->vmcb, vcpu); 4244 4245 /* 4246 * Run with all-zero DR6 unless the guest can write DR6 freely, so that 4247 * KVM can get the exact cause of a #DB. Note, loading guest DR6 from 4248 * KVM's snapshot is only necessary when DR accesses won't exit. 4249 */ 4250 if (unlikely(run_flags & KVM_RUN_LOAD_GUEST_DR6)) 4251 svm_set_dr6(vcpu, vcpu->arch.dr6); 4252 else if (likely(!(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT))) 4253 svm_set_dr6(vcpu, DR6_ACTIVE_LOW); 4254 4255 clgi(); 4256 4257 /* 4258 * Hardware only context switches DEBUGCTL if LBR virtualization is 4259 * enabled. Manually load DEBUGCTL if necessary (and restore it after 4260 * VM-Exit), as running with the host's DEBUGCTL can negatively affect 4261 * guest state and can even be fatal, e.g. due to Bus Lock Detect. 4262 */ 4263 if (!(svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK) && 4264 vcpu->arch.host_debugctl != svm->vmcb->save.dbgctl) 4265 update_debugctlmsr(svm->vmcb->save.dbgctl); 4266 4267 kvm_wait_lapic_expire(vcpu); 4268 4269 /* 4270 * If this vCPU has touched SPEC_CTRL, restore the guest's value if 4271 * it's non-zero. Since vmentry is serialising on affected CPUs, there 4272 * is no need to worry about the conditional branch over the wrmsr 4273 * being speculatively taken. 4274 */ 4275 if (!static_cpu_has(X86_FEATURE_V_SPEC_CTRL)) 4276 x86_spec_ctrl_set_guest(svm->virt_spec_ctrl); 4277 4278 svm_vcpu_enter_exit(vcpu, spec_ctrl_intercepted); 4279 4280 if (!static_cpu_has(X86_FEATURE_V_SPEC_CTRL)) 4281 x86_spec_ctrl_restore_host(svm->virt_spec_ctrl); 4282 4283 if (!sev_es_guest(vcpu->kvm)) { 4284 vcpu->arch.cr2 = svm->vmcb->save.cr2; 4285 vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax; 4286 vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp; 4287 vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip; 4288 } 4289 vcpu->arch.regs_dirty = 0; 4290 4291 if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI)) 4292 kvm_before_interrupt(vcpu, KVM_HANDLING_NMI); 4293 4294 if (!(svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK) && 4295 vcpu->arch.host_debugctl != svm->vmcb->save.dbgctl) 4296 update_debugctlmsr(vcpu->arch.host_debugctl); 4297 4298 stgi(); 4299 4300 /* Any pending NMI will happen here */ 4301 4302 if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI)) 4303 kvm_after_interrupt(vcpu); 4304 4305 sync_cr8_to_lapic(vcpu); 4306 4307 svm->next_rip = 0; 4308 if (is_guest_mode(vcpu)) { 4309 nested_sync_control_from_vmcb02(svm); 4310 4311 /* Track VMRUNs that have made past consistency checking */ 4312 if (svm->nested.nested_run_pending && 4313 svm->vmcb->control.exit_code != SVM_EXIT_ERR) 4314 ++vcpu->stat.nested_run; 4315 4316 svm->nested.nested_run_pending = 0; 4317 } 4318 4319 svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING; 4320 vmcb_mark_all_clean(svm->vmcb); 4321 4322 /* if exit due to PF check for async PF */ 4323 if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR) 4324 vcpu->arch.apf.host_apf_flags = 4325 kvm_read_and_reset_apf_flags(); 4326 4327 vcpu->arch.regs_avail &= ~SVM_REGS_LAZY_LOAD_SET; 4328 4329 trace_kvm_exit(vcpu, KVM_ISA_SVM); 4330 4331 svm_complete_interrupts(vcpu); 4332 4333 return svm_exit_handlers_fastpath(vcpu); 4334 } 4335 4336 static void svm_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa, 4337 int root_level) 4338 { 4339 struct vcpu_svm *svm = to_svm(vcpu); 4340 unsigned long cr3; 4341 4342 if (npt_enabled) { 4343 svm->vmcb->control.nested_cr3 = __sme_set(root_hpa); 4344 vmcb_mark_dirty(svm->vmcb, VMCB_NPT); 4345 4346 hv_track_root_tdp(vcpu, root_hpa); 4347 4348 cr3 = vcpu->arch.cr3; 4349 } else if (root_level >= PT64_ROOT_4LEVEL) { 4350 cr3 = __sme_set(root_hpa) | kvm_get_active_pcid(vcpu); 4351 } else { 4352 /* PCID in the guest should be impossible with a 32-bit MMU. */ 4353 WARN_ON_ONCE(kvm_get_active_pcid(vcpu)); 4354 cr3 = root_hpa; 4355 } 4356 4357 svm->vmcb->save.cr3 = cr3; 4358 vmcb_mark_dirty(svm->vmcb, VMCB_CR); 4359 } 4360 4361 static void 4362 svm_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall) 4363 { 4364 /* 4365 * Patch in the VMMCALL instruction: 4366 */ 4367 hypercall[0] = 0x0f; 4368 hypercall[1] = 0x01; 4369 hypercall[2] = 0xd9; 4370 } 4371 4372 /* 4373 * The kvm parameter can be NULL (module initialization, or invocation before 4374 * VM creation). Be sure to check the kvm parameter before using it. 4375 */ 4376 static bool svm_has_emulated_msr(struct kvm *kvm, u32 index) 4377 { 4378 switch (index) { 4379 case MSR_IA32_MCG_EXT_CTL: 4380 case KVM_FIRST_EMULATED_VMX_MSR ... KVM_LAST_EMULATED_VMX_MSR: 4381 return false; 4382 case MSR_IA32_SMBASE: 4383 if (!IS_ENABLED(CONFIG_KVM_SMM)) 4384 return false; 4385 /* SEV-ES guests do not support SMM, so report false */ 4386 if (kvm && sev_es_guest(kvm)) 4387 return false; 4388 break; 4389 default: 4390 break; 4391 } 4392 4393 return true; 4394 } 4395 4396 static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) 4397 { 4398 struct vcpu_svm *svm = to_svm(vcpu); 4399 4400 /* 4401 * SVM doesn't provide a way to disable just XSAVES in the guest, KVM 4402 * can only disable all variants of by disallowing CR4.OSXSAVE from 4403 * being set. As a result, if the host has XSAVE and XSAVES, and the 4404 * guest has XSAVE enabled, the guest can execute XSAVES without 4405 * faulting. Treat XSAVES as enabled in this case regardless of 4406 * whether it's advertised to the guest so that KVM context switches 4407 * XSS on VM-Enter/VM-Exit. Failure to do so would effectively give 4408 * the guest read/write access to the host's XSS. 4409 */ 4410 guest_cpu_cap_change(vcpu, X86_FEATURE_XSAVES, 4411 boot_cpu_has(X86_FEATURE_XSAVES) && 4412 guest_cpu_cap_has(vcpu, X86_FEATURE_XSAVE)); 4413 4414 /* 4415 * Intercept VMLOAD if the vCPU model is Intel in order to emulate that 4416 * VMLOAD drops bits 63:32 of SYSENTER (ignoring the fact that exposing 4417 * SVM on Intel is bonkers and extremely unlikely to work). 4418 */ 4419 if (guest_cpuid_is_intel_compatible(vcpu)) 4420 guest_cpu_cap_clear(vcpu, X86_FEATURE_V_VMSAVE_VMLOAD); 4421 4422 if (sev_guest(vcpu->kvm)) 4423 sev_vcpu_after_set_cpuid(svm); 4424 } 4425 4426 static bool svm_has_wbinvd_exit(void) 4427 { 4428 return true; 4429 } 4430 4431 #define PRE_EX(exit) { .exit_code = (exit), \ 4432 .stage = X86_ICPT_PRE_EXCEPT, } 4433 #define POST_EX(exit) { .exit_code = (exit), \ 4434 .stage = X86_ICPT_POST_EXCEPT, } 4435 #define POST_MEM(exit) { .exit_code = (exit), \ 4436 .stage = X86_ICPT_POST_MEMACCESS, } 4437 4438 static const struct __x86_intercept { 4439 u32 exit_code; 4440 enum x86_intercept_stage stage; 4441 } x86_intercept_map[] = { 4442 [x86_intercept_cr_read] = POST_EX(SVM_EXIT_READ_CR0), 4443 [x86_intercept_cr_write] = POST_EX(SVM_EXIT_WRITE_CR0), 4444 [x86_intercept_clts] = POST_EX(SVM_EXIT_WRITE_CR0), 4445 [x86_intercept_lmsw] = POST_EX(SVM_EXIT_WRITE_CR0), 4446 [x86_intercept_smsw] = POST_EX(SVM_EXIT_READ_CR0), 4447 [x86_intercept_dr_read] = POST_EX(SVM_EXIT_READ_DR0), 4448 [x86_intercept_dr_write] = POST_EX(SVM_EXIT_WRITE_DR0), 4449 [x86_intercept_sldt] = POST_EX(SVM_EXIT_LDTR_READ), 4450 [x86_intercept_str] = POST_EX(SVM_EXIT_TR_READ), 4451 [x86_intercept_lldt] = POST_EX(SVM_EXIT_LDTR_WRITE), 4452 [x86_intercept_ltr] = POST_EX(SVM_EXIT_TR_WRITE), 4453 [x86_intercept_sgdt] = POST_EX(SVM_EXIT_GDTR_READ), 4454 [x86_intercept_sidt] = POST_EX(SVM_EXIT_IDTR_READ), 4455 [x86_intercept_lgdt] = POST_EX(SVM_EXIT_GDTR_WRITE), 4456 [x86_intercept_lidt] = POST_EX(SVM_EXIT_IDTR_WRITE), 4457 [x86_intercept_vmrun] = POST_EX(SVM_EXIT_VMRUN), 4458 [x86_intercept_vmmcall] = POST_EX(SVM_EXIT_VMMCALL), 4459 [x86_intercept_vmload] = POST_EX(SVM_EXIT_VMLOAD), 4460 [x86_intercept_vmsave] = POST_EX(SVM_EXIT_VMSAVE), 4461 [x86_intercept_stgi] = POST_EX(SVM_EXIT_STGI), 4462 [x86_intercept_clgi] = POST_EX(SVM_EXIT_CLGI), 4463 [x86_intercept_skinit] = POST_EX(SVM_EXIT_SKINIT), 4464 [x86_intercept_invlpga] = POST_EX(SVM_EXIT_INVLPGA), 4465 [x86_intercept_rdtscp] = POST_EX(SVM_EXIT_RDTSCP), 4466 [x86_intercept_monitor] = POST_MEM(SVM_EXIT_MONITOR), 4467 [x86_intercept_mwait] = POST_EX(SVM_EXIT_MWAIT), 4468 [x86_intercept_invlpg] = POST_EX(SVM_EXIT_INVLPG), 4469 [x86_intercept_invd] = POST_EX(SVM_EXIT_INVD), 4470 [x86_intercept_wbinvd] = POST_EX(SVM_EXIT_WBINVD), 4471 [x86_intercept_wrmsr] = POST_EX(SVM_EXIT_MSR), 4472 [x86_intercept_rdtsc] = POST_EX(SVM_EXIT_RDTSC), 4473 [x86_intercept_rdmsr] = POST_EX(SVM_EXIT_MSR), 4474 [x86_intercept_rdpmc] = POST_EX(SVM_EXIT_RDPMC), 4475 [x86_intercept_cpuid] = PRE_EX(SVM_EXIT_CPUID), 4476 [x86_intercept_rsm] = PRE_EX(SVM_EXIT_RSM), 4477 [x86_intercept_pause] = PRE_EX(SVM_EXIT_PAUSE), 4478 [x86_intercept_pushf] = PRE_EX(SVM_EXIT_PUSHF), 4479 [x86_intercept_popf] = PRE_EX(SVM_EXIT_POPF), 4480 [x86_intercept_intn] = PRE_EX(SVM_EXIT_SWINT), 4481 [x86_intercept_iret] = PRE_EX(SVM_EXIT_IRET), 4482 [x86_intercept_icebp] = PRE_EX(SVM_EXIT_ICEBP), 4483 [x86_intercept_hlt] = POST_EX(SVM_EXIT_HLT), 4484 [x86_intercept_in] = POST_EX(SVM_EXIT_IOIO), 4485 [x86_intercept_ins] = POST_EX(SVM_EXIT_IOIO), 4486 [x86_intercept_out] = POST_EX(SVM_EXIT_IOIO), 4487 [x86_intercept_outs] = POST_EX(SVM_EXIT_IOIO), 4488 [x86_intercept_xsetbv] = PRE_EX(SVM_EXIT_XSETBV), 4489 }; 4490 4491 #undef PRE_EX 4492 #undef POST_EX 4493 #undef POST_MEM 4494 4495 static int svm_check_intercept(struct kvm_vcpu *vcpu, 4496 struct x86_instruction_info *info, 4497 enum x86_intercept_stage stage, 4498 struct x86_exception *exception) 4499 { 4500 struct vcpu_svm *svm = to_svm(vcpu); 4501 int vmexit, ret = X86EMUL_CONTINUE; 4502 struct __x86_intercept icpt_info; 4503 struct vmcb *vmcb = svm->vmcb; 4504 4505 if (info->intercept >= ARRAY_SIZE(x86_intercept_map)) 4506 goto out; 4507 4508 icpt_info = x86_intercept_map[info->intercept]; 4509 4510 if (stage != icpt_info.stage) 4511 goto out; 4512 4513 switch (icpt_info.exit_code) { 4514 case SVM_EXIT_READ_CR0: 4515 if (info->intercept == x86_intercept_cr_read) 4516 icpt_info.exit_code += info->modrm_reg; 4517 break; 4518 case SVM_EXIT_WRITE_CR0: { 4519 unsigned long cr0, val; 4520 4521 /* 4522 * Adjust the exit code accordingly if a CR other than CR0 is 4523 * being written, and skip straight to the common handling as 4524 * only CR0 has an additional selective intercept. 4525 */ 4526 if (info->intercept == x86_intercept_cr_write && info->modrm_reg) { 4527 icpt_info.exit_code += info->modrm_reg; 4528 break; 4529 } 4530 4531 /* 4532 * Convert the exit_code to SVM_EXIT_CR0_SEL_WRITE if a 4533 * selective CR0 intercept is triggered (the common logic will 4534 * treat the selective intercept as being enabled). Note, the 4535 * unconditional intercept has higher priority, i.e. this is 4536 * only relevant if *only* the selective intercept is enabled. 4537 */ 4538 if (vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_CR0_WRITE) || 4539 !(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_SELECTIVE_CR0))) 4540 break; 4541 4542 /* CLTS never triggers INTERCEPT_SELECTIVE_CR0 */ 4543 if (info->intercept == x86_intercept_clts) 4544 break; 4545 4546 /* LMSW always triggers INTERCEPT_SELECTIVE_CR0 */ 4547 if (info->intercept == x86_intercept_lmsw) { 4548 icpt_info.exit_code = SVM_EXIT_CR0_SEL_WRITE; 4549 break; 4550 } 4551 4552 /* 4553 * MOV-to-CR0 only triggers INTERCEPT_SELECTIVE_CR0 if any bit 4554 * other than SVM_CR0_SELECTIVE_MASK is changed. 4555 */ 4556 cr0 = vcpu->arch.cr0 & ~SVM_CR0_SELECTIVE_MASK; 4557 val = info->src_val & ~SVM_CR0_SELECTIVE_MASK; 4558 if (cr0 ^ val) 4559 icpt_info.exit_code = SVM_EXIT_CR0_SEL_WRITE; 4560 break; 4561 } 4562 case SVM_EXIT_READ_DR0: 4563 case SVM_EXIT_WRITE_DR0: 4564 icpt_info.exit_code += info->modrm_reg; 4565 break; 4566 case SVM_EXIT_MSR: 4567 if (info->intercept == x86_intercept_wrmsr) 4568 vmcb->control.exit_info_1 = 1; 4569 else 4570 vmcb->control.exit_info_1 = 0; 4571 break; 4572 case SVM_EXIT_PAUSE: 4573 /* 4574 * We get this for NOP only, but pause 4575 * is rep not, check this here 4576 */ 4577 if (info->rep_prefix != REPE_PREFIX) 4578 goto out; 4579 break; 4580 case SVM_EXIT_IOIO: { 4581 u64 exit_info; 4582 u32 bytes; 4583 4584 if (info->intercept == x86_intercept_in || 4585 info->intercept == x86_intercept_ins) { 4586 exit_info = ((info->src_val & 0xffff) << 16) | 4587 SVM_IOIO_TYPE_MASK; 4588 bytes = info->dst_bytes; 4589 } else { 4590 exit_info = (info->dst_val & 0xffff) << 16; 4591 bytes = info->src_bytes; 4592 } 4593 4594 if (info->intercept == x86_intercept_outs || 4595 info->intercept == x86_intercept_ins) 4596 exit_info |= SVM_IOIO_STR_MASK; 4597 4598 if (info->rep_prefix) 4599 exit_info |= SVM_IOIO_REP_MASK; 4600 4601 bytes = min(bytes, 4u); 4602 4603 exit_info |= bytes << SVM_IOIO_SIZE_SHIFT; 4604 4605 exit_info |= (u32)info->ad_bytes << (SVM_IOIO_ASIZE_SHIFT - 1); 4606 4607 vmcb->control.exit_info_1 = exit_info; 4608 vmcb->control.exit_info_2 = info->next_rip; 4609 4610 break; 4611 } 4612 default: 4613 break; 4614 } 4615 4616 /* TODO: Advertise NRIPS to guest hypervisor unconditionally */ 4617 if (static_cpu_has(X86_FEATURE_NRIPS)) 4618 vmcb->control.next_rip = info->next_rip; 4619 vmcb->control.exit_code = icpt_info.exit_code; 4620 vmexit = nested_svm_exit_handled(svm); 4621 4622 ret = (vmexit == NESTED_EXIT_DONE) ? X86EMUL_INTERCEPTED 4623 : X86EMUL_CONTINUE; 4624 4625 out: 4626 return ret; 4627 } 4628 4629 static void svm_handle_exit_irqoff(struct kvm_vcpu *vcpu) 4630 { 4631 switch (to_svm(vcpu)->vmcb->control.exit_code) { 4632 case SVM_EXIT_EXCP_BASE + MC_VECTOR: 4633 svm_handle_mce(vcpu); 4634 break; 4635 case SVM_EXIT_INTR: 4636 vcpu->arch.at_instruction_boundary = true; 4637 break; 4638 default: 4639 break; 4640 } 4641 } 4642 4643 static void svm_setup_mce(struct kvm_vcpu *vcpu) 4644 { 4645 /* [63:9] are reserved. */ 4646 vcpu->arch.mcg_cap &= 0x1ff; 4647 } 4648 4649 #ifdef CONFIG_KVM_SMM 4650 bool svm_smi_blocked(struct kvm_vcpu *vcpu) 4651 { 4652 struct vcpu_svm *svm = to_svm(vcpu); 4653 4654 /* Per APM Vol.2 15.22.2 "Response to SMI" */ 4655 if (!gif_set(svm)) 4656 return true; 4657 4658 return is_smm(vcpu); 4659 } 4660 4661 static int svm_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection) 4662 { 4663 struct vcpu_svm *svm = to_svm(vcpu); 4664 if (svm->nested.nested_run_pending) 4665 return -EBUSY; 4666 4667 if (svm_smi_blocked(vcpu)) 4668 return 0; 4669 4670 /* An SMI must not be injected into L2 if it's supposed to VM-Exit. */ 4671 if (for_injection && is_guest_mode(vcpu) && nested_exit_on_smi(svm)) 4672 return -EBUSY; 4673 4674 return 1; 4675 } 4676 4677 static int svm_enter_smm(struct kvm_vcpu *vcpu, union kvm_smram *smram) 4678 { 4679 struct vcpu_svm *svm = to_svm(vcpu); 4680 struct kvm_host_map map_save; 4681 int ret; 4682 4683 if (!is_guest_mode(vcpu)) 4684 return 0; 4685 4686 /* 4687 * 32-bit SMRAM format doesn't preserve EFER and SVM state. Userspace is 4688 * responsible for ensuring nested SVM and SMIs are mutually exclusive. 4689 */ 4690 4691 if (!guest_cpu_cap_has(vcpu, X86_FEATURE_LM)) 4692 return 1; 4693 4694 smram->smram64.svm_guest_flag = 1; 4695 smram->smram64.svm_guest_vmcb_gpa = svm->nested.vmcb12_gpa; 4696 4697 svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX]; 4698 svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP]; 4699 svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP]; 4700 4701 ret = nested_svm_simple_vmexit(svm, SVM_EXIT_SW); 4702 if (ret) 4703 return ret; 4704 4705 /* 4706 * KVM uses VMCB01 to store L1 host state while L2 runs but 4707 * VMCB01 is going to be used during SMM and thus the state will 4708 * be lost. Temporary save non-VMLOAD/VMSAVE state to the host save 4709 * area pointed to by MSR_VM_HSAVE_PA. APM guarantees that the 4710 * format of the area is identical to guest save area offsetted 4711 * by 0x400 (matches the offset of 'struct vmcb_save_area' 4712 * within 'struct vmcb'). Note: HSAVE area may also be used by 4713 * L1 hypervisor to save additional host context (e.g. KVM does 4714 * that, see svm_prepare_switch_to_guest()) which must be 4715 * preserved. 4716 */ 4717 if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr), &map_save)) 4718 return 1; 4719 4720 BUILD_BUG_ON(offsetof(struct vmcb, save) != 0x400); 4721 4722 svm_copy_vmrun_state(map_save.hva + 0x400, 4723 &svm->vmcb01.ptr->save); 4724 4725 kvm_vcpu_unmap(vcpu, &map_save); 4726 return 0; 4727 } 4728 4729 static int svm_leave_smm(struct kvm_vcpu *vcpu, const union kvm_smram *smram) 4730 { 4731 struct vcpu_svm *svm = to_svm(vcpu); 4732 struct kvm_host_map map, map_save; 4733 struct vmcb *vmcb12; 4734 int ret; 4735 4736 const struct kvm_smram_state_64 *smram64 = &smram->smram64; 4737 4738 if (!guest_cpu_cap_has(vcpu, X86_FEATURE_LM)) 4739 return 0; 4740 4741 /* Non-zero if SMI arrived while vCPU was in guest mode. */ 4742 if (!smram64->svm_guest_flag) 4743 return 0; 4744 4745 if (!guest_cpu_cap_has(vcpu, X86_FEATURE_SVM)) 4746 return 1; 4747 4748 if (!(smram64->efer & EFER_SVME)) 4749 return 1; 4750 4751 if (kvm_vcpu_map(vcpu, gpa_to_gfn(smram64->svm_guest_vmcb_gpa), &map)) 4752 return 1; 4753 4754 ret = 1; 4755 if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr), &map_save)) 4756 goto unmap_map; 4757 4758 if (svm_allocate_nested(svm)) 4759 goto unmap_save; 4760 4761 /* 4762 * Restore L1 host state from L1 HSAVE area as VMCB01 was 4763 * used during SMM (see svm_enter_smm()) 4764 */ 4765 4766 svm_copy_vmrun_state(&svm->vmcb01.ptr->save, map_save.hva + 0x400); 4767 4768 /* 4769 * Enter the nested guest now 4770 */ 4771 4772 vmcb_mark_all_dirty(svm->vmcb01.ptr); 4773 4774 vmcb12 = map.hva; 4775 nested_copy_vmcb_control_to_cache(svm, &vmcb12->control); 4776 nested_copy_vmcb_save_to_cache(svm, &vmcb12->save); 4777 ret = enter_svm_guest_mode(vcpu, smram64->svm_guest_vmcb_gpa, vmcb12, false); 4778 4779 if (ret) 4780 goto unmap_save; 4781 4782 svm->nested.nested_run_pending = 1; 4783 4784 unmap_save: 4785 kvm_vcpu_unmap(vcpu, &map_save); 4786 unmap_map: 4787 kvm_vcpu_unmap(vcpu, &map); 4788 return ret; 4789 } 4790 4791 static void svm_enable_smi_window(struct kvm_vcpu *vcpu) 4792 { 4793 struct vcpu_svm *svm = to_svm(vcpu); 4794 4795 if (!gif_set(svm)) { 4796 if (vgif) 4797 svm_set_intercept(svm, INTERCEPT_STGI); 4798 /* STGI will cause a vm exit */ 4799 } else { 4800 /* We must be in SMM; RSM will cause a vmexit anyway. */ 4801 } 4802 } 4803 #endif 4804 4805 static int svm_check_emulate_instruction(struct kvm_vcpu *vcpu, int emul_type, 4806 void *insn, int insn_len) 4807 { 4808 struct vcpu_svm *svm = to_svm(vcpu); 4809 bool smep, smap, is_user; 4810 u64 error_code; 4811 4812 /* Check that emulation is possible during event vectoring */ 4813 if ((svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_TYPE_MASK) && 4814 !kvm_can_emulate_event_vectoring(emul_type)) 4815 return X86EMUL_UNHANDLEABLE_VECTORING; 4816 4817 /* Emulation is always possible when KVM has access to all guest state. */ 4818 if (!sev_guest(vcpu->kvm)) 4819 return X86EMUL_CONTINUE; 4820 4821 /* #UD and #GP should never be intercepted for SEV guests. */ 4822 WARN_ON_ONCE(emul_type & (EMULTYPE_TRAP_UD | 4823 EMULTYPE_TRAP_UD_FORCED | 4824 EMULTYPE_VMWARE_GP)); 4825 4826 /* 4827 * Emulation is impossible for SEV-ES guests as KVM doesn't have access 4828 * to guest register state. 4829 */ 4830 if (sev_es_guest(vcpu->kvm)) 4831 return X86EMUL_RETRY_INSTR; 4832 4833 /* 4834 * Emulation is possible if the instruction is already decoded, e.g. 4835 * when completing I/O after returning from userspace. 4836 */ 4837 if (emul_type & EMULTYPE_NO_DECODE) 4838 return X86EMUL_CONTINUE; 4839 4840 /* 4841 * Emulation is possible for SEV guests if and only if a prefilled 4842 * buffer containing the bytes of the intercepted instruction is 4843 * available. SEV guest memory is encrypted with a guest specific key 4844 * and cannot be decrypted by KVM, i.e. KVM would read ciphertext and 4845 * decode garbage. 4846 * 4847 * If KVM is NOT trying to simply skip an instruction, inject #UD if 4848 * KVM reached this point without an instruction buffer. In practice, 4849 * this path should never be hit by a well-behaved guest, e.g. KVM 4850 * doesn't intercept #UD or #GP for SEV guests, but this path is still 4851 * theoretically reachable, e.g. via unaccelerated fault-like AVIC 4852 * access, and needs to be handled by KVM to avoid putting the guest 4853 * into an infinite loop. Injecting #UD is somewhat arbitrary, but 4854 * its the least awful option given lack of insight into the guest. 4855 * 4856 * If KVM is trying to skip an instruction, simply resume the guest. 4857 * If a #NPF occurs while the guest is vectoring an INT3/INTO, then KVM 4858 * will attempt to re-inject the INT3/INTO and skip the instruction. 4859 * In that scenario, retrying the INT3/INTO and hoping the guest will 4860 * make forward progress is the only option that has a chance of 4861 * success (and in practice it will work the vast majority of the time). 4862 */ 4863 if (unlikely(!insn)) { 4864 if (emul_type & EMULTYPE_SKIP) 4865 return X86EMUL_UNHANDLEABLE; 4866 4867 kvm_queue_exception(vcpu, UD_VECTOR); 4868 return X86EMUL_PROPAGATE_FAULT; 4869 } 4870 4871 /* 4872 * Emulate for SEV guests if the insn buffer is not empty. The buffer 4873 * will be empty if the DecodeAssist microcode cannot fetch bytes for 4874 * the faulting instruction because the code fetch itself faulted, e.g. 4875 * the guest attempted to fetch from emulated MMIO or a guest page 4876 * table used to translate CS:RIP resides in emulated MMIO. 4877 */ 4878 if (likely(insn_len)) 4879 return X86EMUL_CONTINUE; 4880 4881 /* 4882 * Detect and workaround Errata 1096 Fam_17h_00_0Fh. 4883 * 4884 * Errata: 4885 * When CPU raises #NPF on guest data access and vCPU CR4.SMAP=1, it is 4886 * possible that CPU microcode implementing DecodeAssist will fail to 4887 * read guest memory at CS:RIP and vmcb.GuestIntrBytes will incorrectly 4888 * be '0'. This happens because microcode reads CS:RIP using a _data_ 4889 * loap uop with CPL=0 privileges. If the load hits a SMAP #PF, ucode 4890 * gives up and does not fill the instruction bytes buffer. 4891 * 4892 * As above, KVM reaches this point iff the VM is an SEV guest, the CPU 4893 * supports DecodeAssist, a #NPF was raised, KVM's page fault handler 4894 * triggered emulation (e.g. for MMIO), and the CPU returned 0 in the 4895 * GuestIntrBytes field of the VMCB. 4896 * 4897 * This does _not_ mean that the erratum has been encountered, as the 4898 * DecodeAssist will also fail if the load for CS:RIP hits a legitimate 4899 * #PF, e.g. if the guest attempt to execute from emulated MMIO and 4900 * encountered a reserved/not-present #PF. 4901 * 4902 * To hit the erratum, the following conditions must be true: 4903 * 1. CR4.SMAP=1 (obviously). 4904 * 2. CR4.SMEP=0 || CPL=3. If SMEP=1 and CPL<3, the erratum cannot 4905 * have been hit as the guest would have encountered a SMEP 4906 * violation #PF, not a #NPF. 4907 * 3. The #NPF is not due to a code fetch, in which case failure to 4908 * retrieve the instruction bytes is legitimate (see abvoe). 4909 * 4910 * In addition, don't apply the erratum workaround if the #NPF occurred 4911 * while translating guest page tables (see below). 4912 */ 4913 error_code = svm->vmcb->control.exit_info_1; 4914 if (error_code & (PFERR_GUEST_PAGE_MASK | PFERR_FETCH_MASK)) 4915 goto resume_guest; 4916 4917 smep = kvm_is_cr4_bit_set(vcpu, X86_CR4_SMEP); 4918 smap = kvm_is_cr4_bit_set(vcpu, X86_CR4_SMAP); 4919 is_user = svm_get_cpl(vcpu) == 3; 4920 if (smap && (!smep || is_user)) { 4921 pr_err_ratelimited("SEV Guest triggered AMD Erratum 1096\n"); 4922 4923 /* 4924 * If the fault occurred in userspace, arbitrarily inject #GP 4925 * to avoid killing the guest and to hopefully avoid confusing 4926 * the guest kernel too much, e.g. injecting #PF would not be 4927 * coherent with respect to the guest's page tables. Request 4928 * triple fault if the fault occurred in the kernel as there's 4929 * no fault that KVM can inject without confusing the guest. 4930 * In practice, the triple fault is moot as no sane SEV kernel 4931 * will execute from user memory while also running with SMAP=1. 4932 */ 4933 if (is_user) 4934 kvm_inject_gp(vcpu, 0); 4935 else 4936 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); 4937 return X86EMUL_PROPAGATE_FAULT; 4938 } 4939 4940 resume_guest: 4941 /* 4942 * If the erratum was not hit, simply resume the guest and let it fault 4943 * again. While awful, e.g. the vCPU may get stuck in an infinite loop 4944 * if the fault is at CPL=0, it's the lesser of all evils. Exiting to 4945 * userspace will kill the guest, and letting the emulator read garbage 4946 * will yield random behavior and potentially corrupt the guest. 4947 * 4948 * Simply resuming the guest is technically not a violation of the SEV 4949 * architecture. AMD's APM states that all code fetches and page table 4950 * accesses for SEV guest are encrypted, regardless of the C-Bit. The 4951 * APM also states that encrypted accesses to MMIO are "ignored", but 4952 * doesn't explicitly define "ignored", i.e. doing nothing and letting 4953 * the guest spin is technically "ignoring" the access. 4954 */ 4955 return X86EMUL_RETRY_INSTR; 4956 } 4957 4958 static bool svm_apic_init_signal_blocked(struct kvm_vcpu *vcpu) 4959 { 4960 struct vcpu_svm *svm = to_svm(vcpu); 4961 4962 return !gif_set(svm); 4963 } 4964 4965 static void svm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector) 4966 { 4967 if (!sev_es_guest(vcpu->kvm)) 4968 return kvm_vcpu_deliver_sipi_vector(vcpu, vector); 4969 4970 sev_vcpu_deliver_sipi_vector(vcpu, vector); 4971 } 4972 4973 static void svm_vm_destroy(struct kvm *kvm) 4974 { 4975 avic_vm_destroy(kvm); 4976 sev_vm_destroy(kvm); 4977 4978 svm_srso_vm_destroy(); 4979 } 4980 4981 static int svm_vm_init(struct kvm *kvm) 4982 { 4983 int type = kvm->arch.vm_type; 4984 4985 if (type != KVM_X86_DEFAULT_VM && 4986 type != KVM_X86_SW_PROTECTED_VM) { 4987 kvm->arch.has_protected_state = 4988 (type == KVM_X86_SEV_ES_VM || type == KVM_X86_SNP_VM); 4989 to_kvm_sev_info(kvm)->need_init = true; 4990 4991 kvm->arch.has_private_mem = (type == KVM_X86_SNP_VM); 4992 kvm->arch.pre_fault_allowed = !kvm->arch.has_private_mem; 4993 } 4994 4995 if (!pause_filter_count || !pause_filter_thresh) 4996 kvm_disable_exits(kvm, KVM_X86_DISABLE_EXITS_PAUSE); 4997 4998 if (enable_apicv) { 4999 int ret = avic_vm_init(kvm); 5000 if (ret) 5001 return ret; 5002 } 5003 5004 svm_srso_vm_init(); 5005 return 0; 5006 } 5007 5008 static void *svm_alloc_apic_backing_page(struct kvm_vcpu *vcpu) 5009 { 5010 struct page *page = snp_safe_alloc_page(); 5011 5012 if (!page) 5013 return NULL; 5014 5015 return page_address(page); 5016 } 5017 5018 struct kvm_x86_ops svm_x86_ops __initdata = { 5019 .name = KBUILD_MODNAME, 5020 5021 .check_processor_compatibility = svm_check_processor_compat, 5022 5023 .hardware_unsetup = svm_hardware_unsetup, 5024 .enable_virtualization_cpu = svm_enable_virtualization_cpu, 5025 .disable_virtualization_cpu = svm_disable_virtualization_cpu, 5026 .emergency_disable_virtualization_cpu = svm_emergency_disable_virtualization_cpu, 5027 .has_emulated_msr = svm_has_emulated_msr, 5028 5029 .vcpu_precreate = svm_vcpu_precreate, 5030 .vcpu_create = svm_vcpu_create, 5031 .vcpu_free = svm_vcpu_free, 5032 .vcpu_reset = svm_vcpu_reset, 5033 5034 .vm_size = sizeof(struct kvm_svm), 5035 .vm_init = svm_vm_init, 5036 .vm_destroy = svm_vm_destroy, 5037 5038 .prepare_switch_to_guest = svm_prepare_switch_to_guest, 5039 .vcpu_load = svm_vcpu_load, 5040 .vcpu_put = svm_vcpu_put, 5041 .vcpu_blocking = avic_vcpu_blocking, 5042 .vcpu_unblocking = avic_vcpu_unblocking, 5043 5044 .update_exception_bitmap = svm_update_exception_bitmap, 5045 .get_feature_msr = svm_get_feature_msr, 5046 .get_msr = svm_get_msr, 5047 .set_msr = svm_set_msr, 5048 .get_segment_base = svm_get_segment_base, 5049 .get_segment = svm_get_segment, 5050 .set_segment = svm_set_segment, 5051 .get_cpl = svm_get_cpl, 5052 .get_cpl_no_cache = svm_get_cpl, 5053 .get_cs_db_l_bits = svm_get_cs_db_l_bits, 5054 .is_valid_cr0 = svm_is_valid_cr0, 5055 .set_cr0 = svm_set_cr0, 5056 .post_set_cr3 = sev_post_set_cr3, 5057 .is_valid_cr4 = svm_is_valid_cr4, 5058 .set_cr4 = svm_set_cr4, 5059 .set_efer = svm_set_efer, 5060 .get_idt = svm_get_idt, 5061 .set_idt = svm_set_idt, 5062 .get_gdt = svm_get_gdt, 5063 .set_gdt = svm_set_gdt, 5064 .set_dr7 = svm_set_dr7, 5065 .sync_dirty_debug_regs = svm_sync_dirty_debug_regs, 5066 .cache_reg = svm_cache_reg, 5067 .get_rflags = svm_get_rflags, 5068 .set_rflags = svm_set_rflags, 5069 .get_if_flag = svm_get_if_flag, 5070 5071 .flush_tlb_all = svm_flush_tlb_all, 5072 .flush_tlb_current = svm_flush_tlb_current, 5073 .flush_tlb_gva = svm_flush_tlb_gva, 5074 .flush_tlb_guest = svm_flush_tlb_asid, 5075 5076 .vcpu_pre_run = svm_vcpu_pre_run, 5077 .vcpu_run = svm_vcpu_run, 5078 .handle_exit = svm_handle_exit, 5079 .skip_emulated_instruction = svm_skip_emulated_instruction, 5080 .update_emulated_instruction = NULL, 5081 .set_interrupt_shadow = svm_set_interrupt_shadow, 5082 .get_interrupt_shadow = svm_get_interrupt_shadow, 5083 .patch_hypercall = svm_patch_hypercall, 5084 .inject_irq = svm_inject_irq, 5085 .inject_nmi = svm_inject_nmi, 5086 .is_vnmi_pending = svm_is_vnmi_pending, 5087 .set_vnmi_pending = svm_set_vnmi_pending, 5088 .inject_exception = svm_inject_exception, 5089 .cancel_injection = svm_cancel_injection, 5090 .interrupt_allowed = svm_interrupt_allowed, 5091 .nmi_allowed = svm_nmi_allowed, 5092 .get_nmi_mask = svm_get_nmi_mask, 5093 .set_nmi_mask = svm_set_nmi_mask, 5094 .enable_nmi_window = svm_enable_nmi_window, 5095 .enable_irq_window = svm_enable_irq_window, 5096 .update_cr8_intercept = svm_update_cr8_intercept, 5097 5098 .x2apic_icr_is_split = true, 5099 .set_virtual_apic_mode = avic_refresh_virtual_apic_mode, 5100 .refresh_apicv_exec_ctrl = avic_refresh_apicv_exec_ctrl, 5101 .apicv_post_state_restore = avic_apicv_post_state_restore, 5102 .required_apicv_inhibits = AVIC_REQUIRED_APICV_INHIBITS, 5103 5104 .get_exit_info = svm_get_exit_info, 5105 .get_entry_info = svm_get_entry_info, 5106 5107 .vcpu_after_set_cpuid = svm_vcpu_after_set_cpuid, 5108 5109 .has_wbinvd_exit = svm_has_wbinvd_exit, 5110 5111 .get_l2_tsc_offset = svm_get_l2_tsc_offset, 5112 .get_l2_tsc_multiplier = svm_get_l2_tsc_multiplier, 5113 .write_tsc_offset = svm_write_tsc_offset, 5114 .write_tsc_multiplier = svm_write_tsc_multiplier, 5115 5116 .load_mmu_pgd = svm_load_mmu_pgd, 5117 5118 .check_intercept = svm_check_intercept, 5119 .handle_exit_irqoff = svm_handle_exit_irqoff, 5120 5121 .nested_ops = &svm_nested_ops, 5122 5123 .deliver_interrupt = svm_deliver_interrupt, 5124 .pi_update_irte = avic_pi_update_irte, 5125 .setup_mce = svm_setup_mce, 5126 5127 #ifdef CONFIG_KVM_SMM 5128 .smi_allowed = svm_smi_allowed, 5129 .enter_smm = svm_enter_smm, 5130 .leave_smm = svm_leave_smm, 5131 .enable_smi_window = svm_enable_smi_window, 5132 #endif 5133 5134 #ifdef CONFIG_KVM_AMD_SEV 5135 .dev_get_attr = sev_dev_get_attr, 5136 .mem_enc_ioctl = sev_mem_enc_ioctl, 5137 .mem_enc_register_region = sev_mem_enc_register_region, 5138 .mem_enc_unregister_region = sev_mem_enc_unregister_region, 5139 .guest_memory_reclaimed = sev_guest_memory_reclaimed, 5140 5141 .vm_copy_enc_context_from = sev_vm_copy_enc_context_from, 5142 .vm_move_enc_context_from = sev_vm_move_enc_context_from, 5143 #endif 5144 .check_emulate_instruction = svm_check_emulate_instruction, 5145 5146 .apic_init_signal_blocked = svm_apic_init_signal_blocked, 5147 5148 .recalc_intercepts = svm_recalc_intercepts, 5149 .complete_emulated_msr = svm_complete_emulated_msr, 5150 5151 .vcpu_deliver_sipi_vector = svm_vcpu_deliver_sipi_vector, 5152 .vcpu_get_apicv_inhibit_reasons = avic_vcpu_get_apicv_inhibit_reasons, 5153 .alloc_apic_backing_page = svm_alloc_apic_backing_page, 5154 5155 .gmem_prepare = sev_gmem_prepare, 5156 .gmem_invalidate = sev_gmem_invalidate, 5157 .gmem_max_mapping_level = sev_gmem_max_mapping_level, 5158 }; 5159 5160 /* 5161 * The default MMIO mask is a single bit (excluding the present bit), 5162 * which could conflict with the memory encryption bit. Check for 5163 * memory encryption support and override the default MMIO mask if 5164 * memory encryption is enabled. 5165 */ 5166 static __init void svm_adjust_mmio_mask(void) 5167 { 5168 unsigned int enc_bit, mask_bit; 5169 u64 msr, mask; 5170 5171 /* If there is no memory encryption support, use existing mask */ 5172 if (cpuid_eax(0x80000000) < 0x8000001f) 5173 return; 5174 5175 /* If memory encryption is not enabled, use existing mask */ 5176 rdmsrq(MSR_AMD64_SYSCFG, msr); 5177 if (!(msr & MSR_AMD64_SYSCFG_MEM_ENCRYPT)) 5178 return; 5179 5180 enc_bit = cpuid_ebx(0x8000001f) & 0x3f; 5181 mask_bit = boot_cpu_data.x86_phys_bits; 5182 5183 /* Increment the mask bit if it is the same as the encryption bit */ 5184 if (enc_bit == mask_bit) 5185 mask_bit++; 5186 5187 /* 5188 * If the mask bit location is below 52, then some bits above the 5189 * physical addressing limit will always be reserved, so use the 5190 * rsvd_bits() function to generate the mask. This mask, along with 5191 * the present bit, will be used to generate a page fault with 5192 * PFER.RSV = 1. 5193 * 5194 * If the mask bit location is 52 (or above), then clear the mask. 5195 */ 5196 mask = (mask_bit < 52) ? rsvd_bits(mask_bit, 51) | PT_PRESENT_MASK : 0; 5197 5198 kvm_mmu_set_mmio_spte_mask(mask, mask, PT_WRITABLE_MASK | PT_USER_MASK); 5199 } 5200 5201 static __init void svm_set_cpu_caps(void) 5202 { 5203 kvm_set_cpu_caps(); 5204 5205 kvm_caps.supported_perf_cap = 0; 5206 5207 kvm_cpu_cap_clear(X86_FEATURE_IBT); 5208 5209 /* CPUID 0x80000001 and 0x8000000A (SVM features) */ 5210 if (nested) { 5211 kvm_cpu_cap_set(X86_FEATURE_SVM); 5212 kvm_cpu_cap_set(X86_FEATURE_VMCBCLEAN); 5213 5214 /* 5215 * KVM currently flushes TLBs on *every* nested SVM transition, 5216 * and so for all intents and purposes KVM supports flushing by 5217 * ASID, i.e. KVM is guaranteed to honor every L1 ASID flush. 5218 */ 5219 kvm_cpu_cap_set(X86_FEATURE_FLUSHBYASID); 5220 5221 if (nrips) 5222 kvm_cpu_cap_set(X86_FEATURE_NRIPS); 5223 5224 if (npt_enabled) 5225 kvm_cpu_cap_set(X86_FEATURE_NPT); 5226 5227 if (tsc_scaling) 5228 kvm_cpu_cap_set(X86_FEATURE_TSCRATEMSR); 5229 5230 if (vls) 5231 kvm_cpu_cap_set(X86_FEATURE_V_VMSAVE_VMLOAD); 5232 if (lbrv) 5233 kvm_cpu_cap_set(X86_FEATURE_LBRV); 5234 5235 if (boot_cpu_has(X86_FEATURE_PAUSEFILTER)) 5236 kvm_cpu_cap_set(X86_FEATURE_PAUSEFILTER); 5237 5238 if (boot_cpu_has(X86_FEATURE_PFTHRESHOLD)) 5239 kvm_cpu_cap_set(X86_FEATURE_PFTHRESHOLD); 5240 5241 if (vgif) 5242 kvm_cpu_cap_set(X86_FEATURE_VGIF); 5243 5244 if (vnmi) 5245 kvm_cpu_cap_set(X86_FEATURE_VNMI); 5246 5247 /* Nested VM can receive #VMEXIT instead of triggering #GP */ 5248 kvm_cpu_cap_set(X86_FEATURE_SVME_ADDR_CHK); 5249 } 5250 5251 if (cpu_feature_enabled(X86_FEATURE_BUS_LOCK_THRESHOLD)) 5252 kvm_caps.has_bus_lock_exit = true; 5253 5254 /* CPUID 0x80000008 */ 5255 if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD) || 5256 boot_cpu_has(X86_FEATURE_AMD_SSBD)) 5257 kvm_cpu_cap_set(X86_FEATURE_VIRT_SSBD); 5258 5259 if (enable_pmu) { 5260 /* 5261 * Enumerate support for PERFCTR_CORE if and only if KVM has 5262 * access to enough counters to virtualize "core" support, 5263 * otherwise limit vPMU support to the legacy number of counters. 5264 */ 5265 if (kvm_pmu_cap.num_counters_gp < AMD64_NUM_COUNTERS_CORE) 5266 kvm_pmu_cap.num_counters_gp = min(AMD64_NUM_COUNTERS, 5267 kvm_pmu_cap.num_counters_gp); 5268 else 5269 kvm_cpu_cap_check_and_set(X86_FEATURE_PERFCTR_CORE); 5270 5271 if (kvm_pmu_cap.version != 2 || 5272 !kvm_cpu_cap_has(X86_FEATURE_PERFCTR_CORE)) 5273 kvm_cpu_cap_clear(X86_FEATURE_PERFMON_V2); 5274 } 5275 5276 /* CPUID 0x8000001F (SME/SEV features) */ 5277 sev_set_cpu_caps(); 5278 5279 /* 5280 * Clear capabilities that are automatically configured by common code, 5281 * but that require explicit SVM support (that isn't yet implemented). 5282 */ 5283 kvm_cpu_cap_clear(X86_FEATURE_BUS_LOCK_DETECT); 5284 kvm_cpu_cap_clear(X86_FEATURE_MSR_IMM); 5285 } 5286 5287 static __init int svm_hardware_setup(void) 5288 { 5289 void *iopm_va; 5290 int cpu, r; 5291 5292 /* 5293 * NX is required for shadow paging and for NPT if the NX huge pages 5294 * mitigation is enabled. 5295 */ 5296 if (!boot_cpu_has(X86_FEATURE_NX)) { 5297 pr_err_ratelimited("NX (Execute Disable) not supported\n"); 5298 return -EOPNOTSUPP; 5299 } 5300 kvm_enable_efer_bits(EFER_NX); 5301 5302 kvm_caps.supported_xcr0 &= ~(XFEATURE_MASK_BNDREGS | 5303 XFEATURE_MASK_BNDCSR); 5304 5305 if (boot_cpu_has(X86_FEATURE_FXSR_OPT)) 5306 kvm_enable_efer_bits(EFER_FFXSR); 5307 5308 if (tsc_scaling) { 5309 if (!boot_cpu_has(X86_FEATURE_TSCRATEMSR)) { 5310 tsc_scaling = false; 5311 } else { 5312 pr_info("TSC scaling supported\n"); 5313 kvm_caps.has_tsc_control = true; 5314 } 5315 } 5316 kvm_caps.max_tsc_scaling_ratio = SVM_TSC_RATIO_MAX; 5317 kvm_caps.tsc_scaling_ratio_frac_bits = 32; 5318 5319 tsc_aux_uret_slot = kvm_add_user_return_msr(MSR_TSC_AUX); 5320 5321 if (boot_cpu_has(X86_FEATURE_AUTOIBRS)) 5322 kvm_enable_efer_bits(EFER_AUTOIBRS); 5323 5324 /* Check for pause filtering support */ 5325 if (!boot_cpu_has(X86_FEATURE_PAUSEFILTER)) { 5326 pause_filter_count = 0; 5327 pause_filter_thresh = 0; 5328 } else if (!boot_cpu_has(X86_FEATURE_PFTHRESHOLD)) { 5329 pause_filter_thresh = 0; 5330 } 5331 5332 if (nested) { 5333 pr_info("Nested Virtualization enabled\n"); 5334 kvm_enable_efer_bits(EFER_SVME); 5335 if (!boot_cpu_has(X86_FEATURE_EFER_LMSLE_MBZ)) 5336 kvm_enable_efer_bits(EFER_LMSLE); 5337 5338 r = nested_svm_init_msrpm_merge_offsets(); 5339 if (r) 5340 return r; 5341 } 5342 5343 /* 5344 * KVM's MMU doesn't support using 2-level paging for itself, and thus 5345 * NPT isn't supported if the host is using 2-level paging since host 5346 * CR4 is unchanged on VMRUN. 5347 */ 5348 if (!IS_ENABLED(CONFIG_X86_64) && !IS_ENABLED(CONFIG_X86_PAE)) 5349 npt_enabled = false; 5350 5351 if (!boot_cpu_has(X86_FEATURE_NPT)) 5352 npt_enabled = false; 5353 5354 /* Force VM NPT level equal to the host's paging level */ 5355 kvm_configure_mmu(npt_enabled, get_npt_level(), 5356 get_npt_level(), PG_LEVEL_1G); 5357 pr_info("Nested Paging %s\n", str_enabled_disabled(npt_enabled)); 5358 5359 /* 5360 * It seems that on AMD processors PTE's accessed bit is 5361 * being set by the CPU hardware before the NPF vmexit. 5362 * This is not expected behaviour and our tests fail because 5363 * of it. 5364 * A workaround here is to disable support for 5365 * GUEST_MAXPHYADDR < HOST_MAXPHYADDR if NPT is enabled. 5366 * In this case userspace can know if there is support using 5367 * KVM_CAP_SMALLER_MAXPHYADDR extension and decide how to handle 5368 * it 5369 * If future AMD CPU models change the behaviour described above, 5370 * this variable can be changed accordingly 5371 */ 5372 allow_smaller_maxphyaddr = !npt_enabled; 5373 5374 /* Setup shadow_me_value and shadow_me_mask */ 5375 kvm_mmu_set_me_spte_mask(sme_me_mask, sme_me_mask); 5376 5377 svm_adjust_mmio_mask(); 5378 5379 nrips = nrips && boot_cpu_has(X86_FEATURE_NRIPS); 5380 5381 if (lbrv) { 5382 if (!boot_cpu_has(X86_FEATURE_LBRV)) 5383 lbrv = false; 5384 else 5385 pr_info("LBR virtualization supported\n"); 5386 } 5387 5388 iopm_va = svm_alloc_permissions_map(IOPM_SIZE, GFP_KERNEL); 5389 if (!iopm_va) 5390 return -ENOMEM; 5391 5392 iopm_base = __sme_set(__pa(iopm_va)); 5393 5394 /* 5395 * Note, SEV setup consumes npt_enabled and enable_mmio_caching (which 5396 * may be modified by svm_adjust_mmio_mask()), as well as nrips. 5397 */ 5398 sev_hardware_setup(); 5399 5400 svm_hv_hardware_setup(); 5401 5402 enable_apicv = avic_hardware_setup(); 5403 if (!enable_apicv) { 5404 enable_ipiv = false; 5405 svm_x86_ops.vcpu_blocking = NULL; 5406 svm_x86_ops.vcpu_unblocking = NULL; 5407 svm_x86_ops.vcpu_get_apicv_inhibit_reasons = NULL; 5408 } 5409 5410 if (vls) { 5411 if (!npt_enabled || 5412 !boot_cpu_has(X86_FEATURE_V_VMSAVE_VMLOAD) || 5413 !IS_ENABLED(CONFIG_X86_64)) { 5414 vls = false; 5415 } else { 5416 pr_info("Virtual VMLOAD VMSAVE supported\n"); 5417 } 5418 } 5419 5420 if (boot_cpu_has(X86_FEATURE_SVME_ADDR_CHK)) 5421 svm_gp_erratum_intercept = false; 5422 5423 if (vgif) { 5424 if (!boot_cpu_has(X86_FEATURE_VGIF)) 5425 vgif = false; 5426 else 5427 pr_info("Virtual GIF supported\n"); 5428 } 5429 5430 vnmi = vgif && vnmi && boot_cpu_has(X86_FEATURE_VNMI); 5431 if (vnmi) 5432 pr_info("Virtual NMI enabled\n"); 5433 5434 if (!vnmi) { 5435 svm_x86_ops.is_vnmi_pending = NULL; 5436 svm_x86_ops.set_vnmi_pending = NULL; 5437 } 5438 5439 if (!enable_pmu) 5440 pr_info("PMU virtualization is disabled\n"); 5441 5442 svm_set_cpu_caps(); 5443 5444 kvm_caps.inapplicable_quirks &= ~KVM_X86_QUIRK_CD_NW_CLEARED; 5445 5446 for_each_possible_cpu(cpu) { 5447 r = svm_cpu_init(cpu); 5448 if (r) 5449 goto err; 5450 } 5451 5452 return 0; 5453 5454 err: 5455 svm_hardware_unsetup(); 5456 return r; 5457 } 5458 5459 5460 static struct kvm_x86_init_ops svm_init_ops __initdata = { 5461 .hardware_setup = svm_hardware_setup, 5462 5463 .runtime_ops = &svm_x86_ops, 5464 .pmu_ops = &amd_pmu_ops, 5465 }; 5466 5467 static void __svm_exit(void) 5468 { 5469 kvm_x86_vendor_exit(); 5470 } 5471 5472 static int __init svm_init(void) 5473 { 5474 int r; 5475 5476 KVM_SANITY_CHECK_VM_STRUCT_SIZE(kvm_svm); 5477 5478 __unused_size_checks(); 5479 5480 if (!kvm_is_svm_supported()) 5481 return -EOPNOTSUPP; 5482 5483 r = kvm_x86_vendor_init(&svm_init_ops); 5484 if (r) 5485 return r; 5486 5487 /* 5488 * Common KVM initialization _must_ come last, after this, /dev/kvm is 5489 * exposed to userspace! 5490 */ 5491 r = kvm_init(sizeof(struct vcpu_svm), __alignof__(struct vcpu_svm), 5492 THIS_MODULE); 5493 if (r) 5494 goto err_kvm_init; 5495 5496 return 0; 5497 5498 err_kvm_init: 5499 __svm_exit(); 5500 return r; 5501 } 5502 5503 static void __exit svm_exit(void) 5504 { 5505 kvm_exit(); 5506 __svm_exit(); 5507 } 5508 5509 module_init(svm_init) 5510 module_exit(svm_exit) 5511