1 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 2 3 #include <linux/kvm_host.h> 4 5 #include "irq.h" 6 #include "mmu.h" 7 #include "kvm_cache_regs.h" 8 #include "x86.h" 9 #include "smm.h" 10 #include "cpuid.h" 11 #include "pmu.h" 12 13 #include <linux/module.h> 14 #include <linux/mod_devicetable.h> 15 #include <linux/kernel.h> 16 #include <linux/vmalloc.h> 17 #include <linux/highmem.h> 18 #include <linux/amd-iommu.h> 19 #include <linux/sched.h> 20 #include <linux/trace_events.h> 21 #include <linux/slab.h> 22 #include <linux/hashtable.h> 23 #include <linux/objtool.h> 24 #include <linux/psp-sev.h> 25 #include <linux/file.h> 26 #include <linux/pagemap.h> 27 #include <linux/swap.h> 28 #include <linux/rwsem.h> 29 #include <linux/cc_platform.h> 30 #include <linux/smp.h> 31 32 #include <asm/apic.h> 33 #include <asm/perf_event.h> 34 #include <asm/tlbflush.h> 35 #include <asm/desc.h> 36 #include <asm/debugreg.h> 37 #include <asm/kvm_para.h> 38 #include <asm/irq_remapping.h> 39 #include <asm/spec-ctrl.h> 40 #include <asm/cpu_device_id.h> 41 #include <asm/traps.h> 42 #include <asm/fpu/api.h> 43 44 #include <asm/virtext.h> 45 46 #include <trace/events/ipi.h> 47 48 #include "trace.h" 49 50 #include "svm.h" 51 #include "svm_ops.h" 52 53 #include "kvm_onhyperv.h" 54 #include "svm_onhyperv.h" 55 56 MODULE_AUTHOR("Qumranet"); 57 MODULE_LICENSE("GPL"); 58 59 #ifdef MODULE 60 static const struct x86_cpu_id svm_cpu_id[] = { 61 X86_MATCH_FEATURE(X86_FEATURE_SVM, NULL), 62 {} 63 }; 64 MODULE_DEVICE_TABLE(x86cpu, svm_cpu_id); 65 #endif 66 67 #define SEG_TYPE_LDT 2 68 #define SEG_TYPE_BUSY_TSS16 3 69 70 static bool erratum_383_found __read_mostly; 71 72 u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly; 73 74 /* 75 * Set osvw_len to higher value when updated Revision Guides 76 * are published and we know what the new status bits are 77 */ 78 static uint64_t osvw_len = 4, osvw_status; 79 80 static DEFINE_PER_CPU(u64, current_tsc_ratio); 81 82 #define X2APIC_MSR(x) (APIC_BASE_MSR + (x >> 4)) 83 84 static const struct svm_direct_access_msrs { 85 u32 index; /* Index of the MSR */ 86 bool always; /* True if intercept is initially cleared */ 87 } direct_access_msrs[MAX_DIRECT_ACCESS_MSRS] = { 88 { .index = MSR_STAR, .always = true }, 89 { .index = MSR_IA32_SYSENTER_CS, .always = true }, 90 { .index = MSR_IA32_SYSENTER_EIP, .always = false }, 91 { .index = MSR_IA32_SYSENTER_ESP, .always = false }, 92 #ifdef CONFIG_X86_64 93 { .index = MSR_GS_BASE, .always = true }, 94 { .index = MSR_FS_BASE, .always = true }, 95 { .index = MSR_KERNEL_GS_BASE, .always = true }, 96 { .index = MSR_LSTAR, .always = true }, 97 { .index = MSR_CSTAR, .always = true }, 98 { .index = MSR_SYSCALL_MASK, .always = true }, 99 #endif 100 { .index = MSR_IA32_SPEC_CTRL, .always = false }, 101 { .index = MSR_IA32_PRED_CMD, .always = false }, 102 { .index = MSR_IA32_FLUSH_CMD, .always = false }, 103 { .index = MSR_IA32_LASTBRANCHFROMIP, .always = false }, 104 { .index = MSR_IA32_LASTBRANCHTOIP, .always = false }, 105 { .index = MSR_IA32_LASTINTFROMIP, .always = false }, 106 { .index = MSR_IA32_LASTINTTOIP, .always = false }, 107 { .index = MSR_EFER, .always = false }, 108 { .index = MSR_IA32_CR_PAT, .always = false }, 109 { .index = MSR_AMD64_SEV_ES_GHCB, .always = true }, 110 { .index = MSR_TSC_AUX, .always = false }, 111 { .index = X2APIC_MSR(APIC_ID), .always = false }, 112 { .index = X2APIC_MSR(APIC_LVR), .always = false }, 113 { .index = X2APIC_MSR(APIC_TASKPRI), .always = false }, 114 { .index = X2APIC_MSR(APIC_ARBPRI), .always = false }, 115 { .index = X2APIC_MSR(APIC_PROCPRI), .always = false }, 116 { .index = X2APIC_MSR(APIC_EOI), .always = false }, 117 { .index = X2APIC_MSR(APIC_RRR), .always = false }, 118 { .index = X2APIC_MSR(APIC_LDR), .always = false }, 119 { .index = X2APIC_MSR(APIC_DFR), .always = false }, 120 { .index = X2APIC_MSR(APIC_SPIV), .always = false }, 121 { .index = X2APIC_MSR(APIC_ISR), .always = false }, 122 { .index = X2APIC_MSR(APIC_TMR), .always = false }, 123 { .index = X2APIC_MSR(APIC_IRR), .always = false }, 124 { .index = X2APIC_MSR(APIC_ESR), .always = false }, 125 { .index = X2APIC_MSR(APIC_ICR), .always = false }, 126 { .index = X2APIC_MSR(APIC_ICR2), .always = false }, 127 128 /* 129 * Note: 130 * AMD does not virtualize APIC TSC-deadline timer mode, but it is 131 * emulated by KVM. When setting APIC LVTT (0x832) register bit 18, 132 * the AVIC hardware would generate GP fault. Therefore, always 133 * intercept the MSR 0x832, and do not setup direct_access_msr. 134 */ 135 { .index = X2APIC_MSR(APIC_LVTTHMR), .always = false }, 136 { .index = X2APIC_MSR(APIC_LVTPC), .always = false }, 137 { .index = X2APIC_MSR(APIC_LVT0), .always = false }, 138 { .index = X2APIC_MSR(APIC_LVT1), .always = false }, 139 { .index = X2APIC_MSR(APIC_LVTERR), .always = false }, 140 { .index = X2APIC_MSR(APIC_TMICT), .always = false }, 141 { .index = X2APIC_MSR(APIC_TMCCT), .always = false }, 142 { .index = X2APIC_MSR(APIC_TDCR), .always = false }, 143 { .index = MSR_INVALID, .always = false }, 144 }; 145 146 /* 147 * These 2 parameters are used to config the controls for Pause-Loop Exiting: 148 * pause_filter_count: On processors that support Pause filtering(indicated 149 * by CPUID Fn8000_000A_EDX), the VMCB provides a 16 bit pause filter 150 * count value. On VMRUN this value is loaded into an internal counter. 151 * Each time a pause instruction is executed, this counter is decremented 152 * until it reaches zero at which time a #VMEXIT is generated if pause 153 * intercept is enabled. Refer to AMD APM Vol 2 Section 15.14.4 Pause 154 * Intercept Filtering for more details. 155 * This also indicate if ple logic enabled. 156 * 157 * pause_filter_thresh: In addition, some processor families support advanced 158 * pause filtering (indicated by CPUID Fn8000_000A_EDX) upper bound on 159 * the amount of time a guest is allowed to execute in a pause loop. 160 * In this mode, a 16-bit pause filter threshold field is added in the 161 * VMCB. The threshold value is a cycle count that is used to reset the 162 * pause counter. As with simple pause filtering, VMRUN loads the pause 163 * count value from VMCB into an internal counter. Then, on each pause 164 * instruction the hardware checks the elapsed number of cycles since 165 * the most recent pause instruction against the pause filter threshold. 166 * If the elapsed cycle count is greater than the pause filter threshold, 167 * then the internal pause count is reloaded from the VMCB and execution 168 * continues. If the elapsed cycle count is less than the pause filter 169 * threshold, then the internal pause count is decremented. If the count 170 * value is less than zero and PAUSE intercept is enabled, a #VMEXIT is 171 * triggered. If advanced pause filtering is supported and pause filter 172 * threshold field is set to zero, the filter will operate in the simpler, 173 * count only mode. 174 */ 175 176 static unsigned short pause_filter_thresh = KVM_DEFAULT_PLE_GAP; 177 module_param(pause_filter_thresh, ushort, 0444); 178 179 static unsigned short pause_filter_count = KVM_SVM_DEFAULT_PLE_WINDOW; 180 module_param(pause_filter_count, ushort, 0444); 181 182 /* Default doubles per-vcpu window every exit. */ 183 static unsigned short pause_filter_count_grow = KVM_DEFAULT_PLE_WINDOW_GROW; 184 module_param(pause_filter_count_grow, ushort, 0444); 185 186 /* Default resets per-vcpu window every exit to pause_filter_count. */ 187 static unsigned short pause_filter_count_shrink = KVM_DEFAULT_PLE_WINDOW_SHRINK; 188 module_param(pause_filter_count_shrink, ushort, 0444); 189 190 /* Default is to compute the maximum so we can never overflow. */ 191 static unsigned short pause_filter_count_max = KVM_SVM_DEFAULT_PLE_WINDOW_MAX; 192 module_param(pause_filter_count_max, ushort, 0444); 193 194 /* 195 * Use nested page tables by default. Note, NPT may get forced off by 196 * svm_hardware_setup() if it's unsupported by hardware or the host kernel. 197 */ 198 bool npt_enabled = true; 199 module_param_named(npt, npt_enabled, bool, 0444); 200 201 /* allow nested virtualization in KVM/SVM */ 202 static int nested = true; 203 module_param(nested, int, S_IRUGO); 204 205 /* enable/disable Next RIP Save */ 206 static int nrips = true; 207 module_param(nrips, int, 0444); 208 209 /* enable/disable Virtual VMLOAD VMSAVE */ 210 static int vls = true; 211 module_param(vls, int, 0444); 212 213 /* enable/disable Virtual GIF */ 214 int vgif = true; 215 module_param(vgif, int, 0444); 216 217 /* enable/disable LBR virtualization */ 218 static int lbrv = true; 219 module_param(lbrv, int, 0444); 220 221 static int tsc_scaling = true; 222 module_param(tsc_scaling, int, 0444); 223 224 /* 225 * enable / disable AVIC. Because the defaults differ for APICv 226 * support between VMX and SVM we cannot use module_param_named. 227 */ 228 static bool avic; 229 module_param(avic, bool, 0444); 230 231 bool __read_mostly dump_invalid_vmcb; 232 module_param(dump_invalid_vmcb, bool, 0644); 233 234 235 bool intercept_smi = true; 236 module_param(intercept_smi, bool, 0444); 237 238 bool vnmi = true; 239 module_param(vnmi, bool, 0444); 240 241 static bool svm_gp_erratum_intercept = true; 242 243 static u8 rsm_ins_bytes[] = "\x0f\xaa"; 244 245 static unsigned long iopm_base; 246 247 DEFINE_PER_CPU(struct svm_cpu_data, svm_data); 248 249 /* 250 * Only MSR_TSC_AUX is switched via the user return hook. EFER is switched via 251 * the VMCB, and the SYSCALL/SYSENTER MSRs are handled by VMLOAD/VMSAVE. 252 * 253 * RDTSCP and RDPID are not used in the kernel, specifically to allow KVM to 254 * defer the restoration of TSC_AUX until the CPU returns to userspace. 255 */ 256 static int tsc_aux_uret_slot __read_mostly = -1; 257 258 static const u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000}; 259 260 #define NUM_MSR_MAPS ARRAY_SIZE(msrpm_ranges) 261 #define MSRS_RANGE_SIZE 2048 262 #define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2) 263 264 u32 svm_msrpm_offset(u32 msr) 265 { 266 u32 offset; 267 int i; 268 269 for (i = 0; i < NUM_MSR_MAPS; i++) { 270 if (msr < msrpm_ranges[i] || 271 msr >= msrpm_ranges[i] + MSRS_IN_RANGE) 272 continue; 273 274 offset = (msr - msrpm_ranges[i]) / 4; /* 4 msrs per u8 */ 275 offset += (i * MSRS_RANGE_SIZE); /* add range offset */ 276 277 /* Now we have the u8 offset - but need the u32 offset */ 278 return offset / 4; 279 } 280 281 /* MSR not in any range */ 282 return MSR_INVALID; 283 } 284 285 static void svm_flush_tlb_current(struct kvm_vcpu *vcpu); 286 287 static int get_npt_level(void) 288 { 289 #ifdef CONFIG_X86_64 290 return pgtable_l5_enabled() ? PT64_ROOT_5LEVEL : PT64_ROOT_4LEVEL; 291 #else 292 return PT32E_ROOT_LEVEL; 293 #endif 294 } 295 296 int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer) 297 { 298 struct vcpu_svm *svm = to_svm(vcpu); 299 u64 old_efer = vcpu->arch.efer; 300 vcpu->arch.efer = efer; 301 302 if (!npt_enabled) { 303 /* Shadow paging assumes NX to be available. */ 304 efer |= EFER_NX; 305 306 if (!(efer & EFER_LMA)) 307 efer &= ~EFER_LME; 308 } 309 310 if ((old_efer & EFER_SVME) != (efer & EFER_SVME)) { 311 if (!(efer & EFER_SVME)) { 312 svm_leave_nested(vcpu); 313 svm_set_gif(svm, true); 314 /* #GP intercept is still needed for vmware backdoor */ 315 if (!enable_vmware_backdoor) 316 clr_exception_intercept(svm, GP_VECTOR); 317 318 /* 319 * Free the nested guest state, unless we are in SMM. 320 * In this case we will return to the nested guest 321 * as soon as we leave SMM. 322 */ 323 if (!is_smm(vcpu)) 324 svm_free_nested(svm); 325 326 } else { 327 int ret = svm_allocate_nested(svm); 328 329 if (ret) { 330 vcpu->arch.efer = old_efer; 331 return ret; 332 } 333 334 /* 335 * Never intercept #GP for SEV guests, KVM can't 336 * decrypt guest memory to workaround the erratum. 337 */ 338 if (svm_gp_erratum_intercept && !sev_guest(vcpu->kvm)) 339 set_exception_intercept(svm, GP_VECTOR); 340 } 341 } 342 343 svm->vmcb->save.efer = efer | EFER_SVME; 344 vmcb_mark_dirty(svm->vmcb, VMCB_CR); 345 return 0; 346 } 347 348 static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu) 349 { 350 struct vcpu_svm *svm = to_svm(vcpu); 351 u32 ret = 0; 352 353 if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) 354 ret = KVM_X86_SHADOW_INT_STI | KVM_X86_SHADOW_INT_MOV_SS; 355 return ret; 356 } 357 358 static void svm_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask) 359 { 360 struct vcpu_svm *svm = to_svm(vcpu); 361 362 if (mask == 0) 363 svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK; 364 else 365 svm->vmcb->control.int_state |= SVM_INTERRUPT_SHADOW_MASK; 366 367 } 368 369 static int __svm_skip_emulated_instruction(struct kvm_vcpu *vcpu, 370 bool commit_side_effects) 371 { 372 struct vcpu_svm *svm = to_svm(vcpu); 373 unsigned long old_rflags; 374 375 /* 376 * SEV-ES does not expose the next RIP. The RIP update is controlled by 377 * the type of exit and the #VC handler in the guest. 378 */ 379 if (sev_es_guest(vcpu->kvm)) 380 goto done; 381 382 if (nrips && svm->vmcb->control.next_rip != 0) { 383 WARN_ON_ONCE(!static_cpu_has(X86_FEATURE_NRIPS)); 384 svm->next_rip = svm->vmcb->control.next_rip; 385 } 386 387 if (!svm->next_rip) { 388 if (unlikely(!commit_side_effects)) 389 old_rflags = svm->vmcb->save.rflags; 390 391 if (!kvm_emulate_instruction(vcpu, EMULTYPE_SKIP)) 392 return 0; 393 394 if (unlikely(!commit_side_effects)) 395 svm->vmcb->save.rflags = old_rflags; 396 } else { 397 kvm_rip_write(vcpu, svm->next_rip); 398 } 399 400 done: 401 if (likely(commit_side_effects)) 402 svm_set_interrupt_shadow(vcpu, 0); 403 404 return 1; 405 } 406 407 static int svm_skip_emulated_instruction(struct kvm_vcpu *vcpu) 408 { 409 return __svm_skip_emulated_instruction(vcpu, true); 410 } 411 412 static int svm_update_soft_interrupt_rip(struct kvm_vcpu *vcpu) 413 { 414 unsigned long rip, old_rip = kvm_rip_read(vcpu); 415 struct vcpu_svm *svm = to_svm(vcpu); 416 417 /* 418 * Due to architectural shortcomings, the CPU doesn't always provide 419 * NextRIP, e.g. if KVM intercepted an exception that occurred while 420 * the CPU was vectoring an INTO/INT3 in the guest. Temporarily skip 421 * the instruction even if NextRIP is supported to acquire the next 422 * RIP so that it can be shoved into the NextRIP field, otherwise 423 * hardware will fail to advance guest RIP during event injection. 424 * Drop the exception/interrupt if emulation fails and effectively 425 * retry the instruction, it's the least awful option. If NRIPS is 426 * in use, the skip must not commit any side effects such as clearing 427 * the interrupt shadow or RFLAGS.RF. 428 */ 429 if (!__svm_skip_emulated_instruction(vcpu, !nrips)) 430 return -EIO; 431 432 rip = kvm_rip_read(vcpu); 433 434 /* 435 * Save the injection information, even when using next_rip, as the 436 * VMCB's next_rip will be lost (cleared on VM-Exit) if the injection 437 * doesn't complete due to a VM-Exit occurring while the CPU is 438 * vectoring the event. Decoding the instruction isn't guaranteed to 439 * work as there may be no backing instruction, e.g. if the event is 440 * being injected by L1 for L2, or if the guest is patching INT3 into 441 * a different instruction. 442 */ 443 svm->soft_int_injected = true; 444 svm->soft_int_csbase = svm->vmcb->save.cs.base; 445 svm->soft_int_old_rip = old_rip; 446 svm->soft_int_next_rip = rip; 447 448 if (nrips) 449 kvm_rip_write(vcpu, old_rip); 450 451 if (static_cpu_has(X86_FEATURE_NRIPS)) 452 svm->vmcb->control.next_rip = rip; 453 454 return 0; 455 } 456 457 static void svm_inject_exception(struct kvm_vcpu *vcpu) 458 { 459 struct kvm_queued_exception *ex = &vcpu->arch.exception; 460 struct vcpu_svm *svm = to_svm(vcpu); 461 462 kvm_deliver_exception_payload(vcpu, ex); 463 464 if (kvm_exception_is_soft(ex->vector) && 465 svm_update_soft_interrupt_rip(vcpu)) 466 return; 467 468 svm->vmcb->control.event_inj = ex->vector 469 | SVM_EVTINJ_VALID 470 | (ex->has_error_code ? SVM_EVTINJ_VALID_ERR : 0) 471 | SVM_EVTINJ_TYPE_EXEPT; 472 svm->vmcb->control.event_inj_err = ex->error_code; 473 } 474 475 static void svm_init_erratum_383(void) 476 { 477 u32 low, high; 478 int err; 479 u64 val; 480 481 if (!static_cpu_has_bug(X86_BUG_AMD_TLB_MMATCH)) 482 return; 483 484 /* Use _safe variants to not break nested virtualization */ 485 val = native_read_msr_safe(MSR_AMD64_DC_CFG, &err); 486 if (err) 487 return; 488 489 val |= (1ULL << 47); 490 491 low = lower_32_bits(val); 492 high = upper_32_bits(val); 493 494 native_write_msr_safe(MSR_AMD64_DC_CFG, low, high); 495 496 erratum_383_found = true; 497 } 498 499 static void svm_init_osvw(struct kvm_vcpu *vcpu) 500 { 501 /* 502 * Guests should see errata 400 and 415 as fixed (assuming that 503 * HLT and IO instructions are intercepted). 504 */ 505 vcpu->arch.osvw.length = (osvw_len >= 3) ? (osvw_len) : 3; 506 vcpu->arch.osvw.status = osvw_status & ~(6ULL); 507 508 /* 509 * By increasing VCPU's osvw.length to 3 we are telling the guest that 510 * all osvw.status bits inside that length, including bit 0 (which is 511 * reserved for erratum 298), are valid. However, if host processor's 512 * osvw_len is 0 then osvw_status[0] carries no information. We need to 513 * be conservative here and therefore we tell the guest that erratum 298 514 * is present (because we really don't know). 515 */ 516 if (osvw_len == 0 && boot_cpu_data.x86 == 0x10) 517 vcpu->arch.osvw.status |= 1; 518 } 519 520 static bool kvm_is_svm_supported(void) 521 { 522 int cpu = raw_smp_processor_id(); 523 const char *msg; 524 u64 vm_cr; 525 526 if (!cpu_has_svm(&msg)) { 527 pr_err("SVM not supported by CPU %d, %s\n", cpu, msg); 528 return false; 529 } 530 531 if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) { 532 pr_info("KVM is unsupported when running as an SEV guest\n"); 533 return false; 534 } 535 536 rdmsrl(MSR_VM_CR, vm_cr); 537 if (vm_cr & (1 << SVM_VM_CR_SVM_DISABLE)) { 538 pr_err("SVM disabled (by BIOS) in MSR_VM_CR on CPU %d\n", cpu); 539 return false; 540 } 541 542 return true; 543 } 544 545 static int svm_check_processor_compat(void) 546 { 547 if (!kvm_is_svm_supported()) 548 return -EIO; 549 550 return 0; 551 } 552 553 void __svm_write_tsc_multiplier(u64 multiplier) 554 { 555 preempt_disable(); 556 557 if (multiplier == __this_cpu_read(current_tsc_ratio)) 558 goto out; 559 560 wrmsrl(MSR_AMD64_TSC_RATIO, multiplier); 561 __this_cpu_write(current_tsc_ratio, multiplier); 562 out: 563 preempt_enable(); 564 } 565 566 static void svm_hardware_disable(void) 567 { 568 /* Make sure we clean up behind us */ 569 if (tsc_scaling) 570 __svm_write_tsc_multiplier(SVM_TSC_RATIO_DEFAULT); 571 572 cpu_svm_disable(); 573 574 amd_pmu_disable_virt(); 575 } 576 577 static int svm_hardware_enable(void) 578 { 579 580 struct svm_cpu_data *sd; 581 uint64_t efer; 582 int me = raw_smp_processor_id(); 583 584 rdmsrl(MSR_EFER, efer); 585 if (efer & EFER_SVME) 586 return -EBUSY; 587 588 sd = per_cpu_ptr(&svm_data, me); 589 sd->asid_generation = 1; 590 sd->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1; 591 sd->next_asid = sd->max_asid + 1; 592 sd->min_asid = max_sev_asid + 1; 593 594 wrmsrl(MSR_EFER, efer | EFER_SVME); 595 596 wrmsrl(MSR_VM_HSAVE_PA, sd->save_area_pa); 597 598 if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) { 599 /* 600 * Set the default value, even if we don't use TSC scaling 601 * to avoid having stale value in the msr 602 */ 603 __svm_write_tsc_multiplier(SVM_TSC_RATIO_DEFAULT); 604 } 605 606 607 /* 608 * Get OSVW bits. 609 * 610 * Note that it is possible to have a system with mixed processor 611 * revisions and therefore different OSVW bits. If bits are not the same 612 * on different processors then choose the worst case (i.e. if erratum 613 * is present on one processor and not on another then assume that the 614 * erratum is present everywhere). 615 */ 616 if (cpu_has(&boot_cpu_data, X86_FEATURE_OSVW)) { 617 uint64_t len, status = 0; 618 int err; 619 620 len = native_read_msr_safe(MSR_AMD64_OSVW_ID_LENGTH, &err); 621 if (!err) 622 status = native_read_msr_safe(MSR_AMD64_OSVW_STATUS, 623 &err); 624 625 if (err) 626 osvw_status = osvw_len = 0; 627 else { 628 if (len < osvw_len) 629 osvw_len = len; 630 osvw_status |= status; 631 osvw_status &= (1ULL << osvw_len) - 1; 632 } 633 } else 634 osvw_status = osvw_len = 0; 635 636 svm_init_erratum_383(); 637 638 amd_pmu_enable_virt(); 639 640 return 0; 641 } 642 643 static void svm_cpu_uninit(int cpu) 644 { 645 struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, cpu); 646 647 if (!sd->save_area) 648 return; 649 650 kfree(sd->sev_vmcbs); 651 __free_page(sd->save_area); 652 sd->save_area_pa = 0; 653 sd->save_area = NULL; 654 } 655 656 static int svm_cpu_init(int cpu) 657 { 658 struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, cpu); 659 int ret = -ENOMEM; 660 661 memset(sd, 0, sizeof(struct svm_cpu_data)); 662 sd->save_area = alloc_page(GFP_KERNEL | __GFP_ZERO); 663 if (!sd->save_area) 664 return ret; 665 666 ret = sev_cpu_init(sd); 667 if (ret) 668 goto free_save_area; 669 670 sd->save_area_pa = __sme_page_pa(sd->save_area); 671 return 0; 672 673 free_save_area: 674 __free_page(sd->save_area); 675 sd->save_area = NULL; 676 return ret; 677 678 } 679 680 static int direct_access_msr_slot(u32 msr) 681 { 682 u32 i; 683 684 for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) 685 if (direct_access_msrs[i].index == msr) 686 return i; 687 688 return -ENOENT; 689 } 690 691 static void set_shadow_msr_intercept(struct kvm_vcpu *vcpu, u32 msr, int read, 692 int write) 693 { 694 struct vcpu_svm *svm = to_svm(vcpu); 695 int slot = direct_access_msr_slot(msr); 696 697 if (slot == -ENOENT) 698 return; 699 700 /* Set the shadow bitmaps to the desired intercept states */ 701 if (read) 702 set_bit(slot, svm->shadow_msr_intercept.read); 703 else 704 clear_bit(slot, svm->shadow_msr_intercept.read); 705 706 if (write) 707 set_bit(slot, svm->shadow_msr_intercept.write); 708 else 709 clear_bit(slot, svm->shadow_msr_intercept.write); 710 } 711 712 static bool valid_msr_intercept(u32 index) 713 { 714 return direct_access_msr_slot(index) != -ENOENT; 715 } 716 717 static bool msr_write_intercepted(struct kvm_vcpu *vcpu, u32 msr) 718 { 719 u8 bit_write; 720 unsigned long tmp; 721 u32 offset; 722 u32 *msrpm; 723 724 /* 725 * For non-nested case: 726 * If the L01 MSR bitmap does not intercept the MSR, then we need to 727 * save it. 728 * 729 * For nested case: 730 * If the L02 MSR bitmap does not intercept the MSR, then we need to 731 * save it. 732 */ 733 msrpm = is_guest_mode(vcpu) ? to_svm(vcpu)->nested.msrpm: 734 to_svm(vcpu)->msrpm; 735 736 offset = svm_msrpm_offset(msr); 737 bit_write = 2 * (msr & 0x0f) + 1; 738 tmp = msrpm[offset]; 739 740 BUG_ON(offset == MSR_INVALID); 741 742 return test_bit(bit_write, &tmp); 743 } 744 745 static void set_msr_interception_bitmap(struct kvm_vcpu *vcpu, u32 *msrpm, 746 u32 msr, int read, int write) 747 { 748 struct vcpu_svm *svm = to_svm(vcpu); 749 u8 bit_read, bit_write; 750 unsigned long tmp; 751 u32 offset; 752 753 /* 754 * If this warning triggers extend the direct_access_msrs list at the 755 * beginning of the file 756 */ 757 WARN_ON(!valid_msr_intercept(msr)); 758 759 /* Enforce non allowed MSRs to trap */ 760 if (read && !kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_READ)) 761 read = 0; 762 763 if (write && !kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_WRITE)) 764 write = 0; 765 766 offset = svm_msrpm_offset(msr); 767 bit_read = 2 * (msr & 0x0f); 768 bit_write = 2 * (msr & 0x0f) + 1; 769 tmp = msrpm[offset]; 770 771 BUG_ON(offset == MSR_INVALID); 772 773 read ? clear_bit(bit_read, &tmp) : set_bit(bit_read, &tmp); 774 write ? clear_bit(bit_write, &tmp) : set_bit(bit_write, &tmp); 775 776 msrpm[offset] = tmp; 777 778 svm_hv_vmcb_dirty_nested_enlightenments(vcpu); 779 svm->nested.force_msr_bitmap_recalc = true; 780 } 781 782 void set_msr_interception(struct kvm_vcpu *vcpu, u32 *msrpm, u32 msr, 783 int read, int write) 784 { 785 set_shadow_msr_intercept(vcpu, msr, read, write); 786 set_msr_interception_bitmap(vcpu, msrpm, msr, read, write); 787 } 788 789 u32 *svm_vcpu_alloc_msrpm(void) 790 { 791 unsigned int order = get_order(MSRPM_SIZE); 792 struct page *pages = alloc_pages(GFP_KERNEL_ACCOUNT, order); 793 u32 *msrpm; 794 795 if (!pages) 796 return NULL; 797 798 msrpm = page_address(pages); 799 memset(msrpm, 0xff, PAGE_SIZE * (1 << order)); 800 801 return msrpm; 802 } 803 804 void svm_vcpu_init_msrpm(struct kvm_vcpu *vcpu, u32 *msrpm) 805 { 806 int i; 807 808 for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) { 809 if (!direct_access_msrs[i].always) 810 continue; 811 set_msr_interception(vcpu, msrpm, direct_access_msrs[i].index, 1, 1); 812 } 813 } 814 815 void svm_set_x2apic_msr_interception(struct vcpu_svm *svm, bool intercept) 816 { 817 int i; 818 819 if (intercept == svm->x2avic_msrs_intercepted) 820 return; 821 822 if (!x2avic_enabled || 823 !apic_x2apic_mode(svm->vcpu.arch.apic)) 824 return; 825 826 for (i = 0; i < MAX_DIRECT_ACCESS_MSRS; i++) { 827 int index = direct_access_msrs[i].index; 828 829 if ((index < APIC_BASE_MSR) || 830 (index > APIC_BASE_MSR + 0xff)) 831 continue; 832 set_msr_interception(&svm->vcpu, svm->msrpm, index, 833 !intercept, !intercept); 834 } 835 836 svm->x2avic_msrs_intercepted = intercept; 837 } 838 839 void svm_vcpu_free_msrpm(u32 *msrpm) 840 { 841 __free_pages(virt_to_page(msrpm), get_order(MSRPM_SIZE)); 842 } 843 844 static void svm_msr_filter_changed(struct kvm_vcpu *vcpu) 845 { 846 struct vcpu_svm *svm = to_svm(vcpu); 847 u32 i; 848 849 /* 850 * Set intercept permissions for all direct access MSRs again. They 851 * will automatically get filtered through the MSR filter, so we are 852 * back in sync after this. 853 */ 854 for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) { 855 u32 msr = direct_access_msrs[i].index; 856 u32 read = test_bit(i, svm->shadow_msr_intercept.read); 857 u32 write = test_bit(i, svm->shadow_msr_intercept.write); 858 859 set_msr_interception_bitmap(vcpu, svm->msrpm, msr, read, write); 860 } 861 } 862 863 static void add_msr_offset(u32 offset) 864 { 865 int i; 866 867 for (i = 0; i < MSRPM_OFFSETS; ++i) { 868 869 /* Offset already in list? */ 870 if (msrpm_offsets[i] == offset) 871 return; 872 873 /* Slot used by another offset? */ 874 if (msrpm_offsets[i] != MSR_INVALID) 875 continue; 876 877 /* Add offset to list */ 878 msrpm_offsets[i] = offset; 879 880 return; 881 } 882 883 /* 884 * If this BUG triggers the msrpm_offsets table has an overflow. Just 885 * increase MSRPM_OFFSETS in this case. 886 */ 887 BUG(); 888 } 889 890 static void init_msrpm_offsets(void) 891 { 892 int i; 893 894 memset(msrpm_offsets, 0xff, sizeof(msrpm_offsets)); 895 896 for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) { 897 u32 offset; 898 899 offset = svm_msrpm_offset(direct_access_msrs[i].index); 900 BUG_ON(offset == MSR_INVALID); 901 902 add_msr_offset(offset); 903 } 904 } 905 906 void svm_copy_lbrs(struct vmcb *to_vmcb, struct vmcb *from_vmcb) 907 { 908 to_vmcb->save.dbgctl = from_vmcb->save.dbgctl; 909 to_vmcb->save.br_from = from_vmcb->save.br_from; 910 to_vmcb->save.br_to = from_vmcb->save.br_to; 911 to_vmcb->save.last_excp_from = from_vmcb->save.last_excp_from; 912 to_vmcb->save.last_excp_to = from_vmcb->save.last_excp_to; 913 914 vmcb_mark_dirty(to_vmcb, VMCB_LBR); 915 } 916 917 static void svm_enable_lbrv(struct kvm_vcpu *vcpu) 918 { 919 struct vcpu_svm *svm = to_svm(vcpu); 920 921 svm->vmcb->control.virt_ext |= LBR_CTL_ENABLE_MASK; 922 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1); 923 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1); 924 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP, 1, 1); 925 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 1, 1); 926 927 /* Move the LBR msrs to the vmcb02 so that the guest can see them. */ 928 if (is_guest_mode(vcpu)) 929 svm_copy_lbrs(svm->vmcb, svm->vmcb01.ptr); 930 } 931 932 static void svm_disable_lbrv(struct kvm_vcpu *vcpu) 933 { 934 struct vcpu_svm *svm = to_svm(vcpu); 935 936 svm->vmcb->control.virt_ext &= ~LBR_CTL_ENABLE_MASK; 937 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHFROMIP, 0, 0); 938 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP, 0, 0); 939 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP, 0, 0); 940 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 0, 0); 941 942 /* 943 * Move the LBR msrs back to the vmcb01 to avoid copying them 944 * on nested guest entries. 945 */ 946 if (is_guest_mode(vcpu)) 947 svm_copy_lbrs(svm->vmcb01.ptr, svm->vmcb); 948 } 949 950 static int svm_get_lbr_msr(struct vcpu_svm *svm, u32 index) 951 { 952 /* 953 * If the LBR virtualization is disabled, the LBR msrs are always 954 * kept in the vmcb01 to avoid copying them on nested guest entries. 955 * 956 * If nested, and the LBR virtualization is enabled/disabled, the msrs 957 * are moved between the vmcb01 and vmcb02 as needed. 958 */ 959 struct vmcb *vmcb = 960 (svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK) ? 961 svm->vmcb : svm->vmcb01.ptr; 962 963 switch (index) { 964 case MSR_IA32_DEBUGCTLMSR: 965 return vmcb->save.dbgctl; 966 case MSR_IA32_LASTBRANCHFROMIP: 967 return vmcb->save.br_from; 968 case MSR_IA32_LASTBRANCHTOIP: 969 return vmcb->save.br_to; 970 case MSR_IA32_LASTINTFROMIP: 971 return vmcb->save.last_excp_from; 972 case MSR_IA32_LASTINTTOIP: 973 return vmcb->save.last_excp_to; 974 default: 975 KVM_BUG(false, svm->vcpu.kvm, 976 "%s: Unknown MSR 0x%x", __func__, index); 977 return 0; 978 } 979 } 980 981 void svm_update_lbrv(struct kvm_vcpu *vcpu) 982 { 983 struct vcpu_svm *svm = to_svm(vcpu); 984 985 bool enable_lbrv = svm_get_lbr_msr(svm, MSR_IA32_DEBUGCTLMSR) & 986 DEBUGCTLMSR_LBR; 987 988 bool current_enable_lbrv = !!(svm->vmcb->control.virt_ext & 989 LBR_CTL_ENABLE_MASK); 990 991 if (unlikely(is_guest_mode(vcpu) && svm->lbrv_enabled)) 992 if (unlikely(svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK)) 993 enable_lbrv = true; 994 995 if (enable_lbrv == current_enable_lbrv) 996 return; 997 998 if (enable_lbrv) 999 svm_enable_lbrv(vcpu); 1000 else 1001 svm_disable_lbrv(vcpu); 1002 } 1003 1004 void disable_nmi_singlestep(struct vcpu_svm *svm) 1005 { 1006 svm->nmi_singlestep = false; 1007 1008 if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP)) { 1009 /* Clear our flags if they were not set by the guest */ 1010 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF)) 1011 svm->vmcb->save.rflags &= ~X86_EFLAGS_TF; 1012 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_RF)) 1013 svm->vmcb->save.rflags &= ~X86_EFLAGS_RF; 1014 } 1015 } 1016 1017 static void grow_ple_window(struct kvm_vcpu *vcpu) 1018 { 1019 struct vcpu_svm *svm = to_svm(vcpu); 1020 struct vmcb_control_area *control = &svm->vmcb->control; 1021 int old = control->pause_filter_count; 1022 1023 if (kvm_pause_in_guest(vcpu->kvm)) 1024 return; 1025 1026 control->pause_filter_count = __grow_ple_window(old, 1027 pause_filter_count, 1028 pause_filter_count_grow, 1029 pause_filter_count_max); 1030 1031 if (control->pause_filter_count != old) { 1032 vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS); 1033 trace_kvm_ple_window_update(vcpu->vcpu_id, 1034 control->pause_filter_count, old); 1035 } 1036 } 1037 1038 static void shrink_ple_window(struct kvm_vcpu *vcpu) 1039 { 1040 struct vcpu_svm *svm = to_svm(vcpu); 1041 struct vmcb_control_area *control = &svm->vmcb->control; 1042 int old = control->pause_filter_count; 1043 1044 if (kvm_pause_in_guest(vcpu->kvm)) 1045 return; 1046 1047 control->pause_filter_count = 1048 __shrink_ple_window(old, 1049 pause_filter_count, 1050 pause_filter_count_shrink, 1051 pause_filter_count); 1052 if (control->pause_filter_count != old) { 1053 vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS); 1054 trace_kvm_ple_window_update(vcpu->vcpu_id, 1055 control->pause_filter_count, old); 1056 } 1057 } 1058 1059 static void svm_hardware_unsetup(void) 1060 { 1061 int cpu; 1062 1063 sev_hardware_unsetup(); 1064 1065 for_each_possible_cpu(cpu) 1066 svm_cpu_uninit(cpu); 1067 1068 __free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), 1069 get_order(IOPM_SIZE)); 1070 iopm_base = 0; 1071 } 1072 1073 static void init_seg(struct vmcb_seg *seg) 1074 { 1075 seg->selector = 0; 1076 seg->attrib = SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK | 1077 SVM_SELECTOR_WRITE_MASK; /* Read/Write Data Segment */ 1078 seg->limit = 0xffff; 1079 seg->base = 0; 1080 } 1081 1082 static void init_sys_seg(struct vmcb_seg *seg, uint32_t type) 1083 { 1084 seg->selector = 0; 1085 seg->attrib = SVM_SELECTOR_P_MASK | type; 1086 seg->limit = 0xffff; 1087 seg->base = 0; 1088 } 1089 1090 static u64 svm_get_l2_tsc_offset(struct kvm_vcpu *vcpu) 1091 { 1092 struct vcpu_svm *svm = to_svm(vcpu); 1093 1094 return svm->nested.ctl.tsc_offset; 1095 } 1096 1097 static u64 svm_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu) 1098 { 1099 struct vcpu_svm *svm = to_svm(vcpu); 1100 1101 return svm->tsc_ratio_msr; 1102 } 1103 1104 static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) 1105 { 1106 struct vcpu_svm *svm = to_svm(vcpu); 1107 1108 svm->vmcb01.ptr->control.tsc_offset = vcpu->arch.l1_tsc_offset; 1109 svm->vmcb->control.tsc_offset = offset; 1110 vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS); 1111 } 1112 1113 static void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 multiplier) 1114 { 1115 __svm_write_tsc_multiplier(multiplier); 1116 } 1117 1118 1119 /* Evaluate instruction intercepts that depend on guest CPUID features. */ 1120 static void svm_recalc_instruction_intercepts(struct kvm_vcpu *vcpu, 1121 struct vcpu_svm *svm) 1122 { 1123 /* 1124 * Intercept INVPCID if shadow paging is enabled to sync/free shadow 1125 * roots, or if INVPCID is disabled in the guest to inject #UD. 1126 */ 1127 if (kvm_cpu_cap_has(X86_FEATURE_INVPCID)) { 1128 if (!npt_enabled || 1129 !guest_cpuid_has(&svm->vcpu, X86_FEATURE_INVPCID)) 1130 svm_set_intercept(svm, INTERCEPT_INVPCID); 1131 else 1132 svm_clr_intercept(svm, INTERCEPT_INVPCID); 1133 } 1134 1135 if (kvm_cpu_cap_has(X86_FEATURE_RDTSCP)) { 1136 if (guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP)) 1137 svm_clr_intercept(svm, INTERCEPT_RDTSCP); 1138 else 1139 svm_set_intercept(svm, INTERCEPT_RDTSCP); 1140 } 1141 } 1142 1143 static inline void init_vmcb_after_set_cpuid(struct kvm_vcpu *vcpu) 1144 { 1145 struct vcpu_svm *svm = to_svm(vcpu); 1146 1147 if (guest_cpuid_is_intel(vcpu)) { 1148 /* 1149 * We must intercept SYSENTER_EIP and SYSENTER_ESP 1150 * accesses because the processor only stores 32 bits. 1151 * For the same reason we cannot use virtual VMLOAD/VMSAVE. 1152 */ 1153 svm_set_intercept(svm, INTERCEPT_VMLOAD); 1154 svm_set_intercept(svm, INTERCEPT_VMSAVE); 1155 svm->vmcb->control.virt_ext &= ~VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK; 1156 1157 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_EIP, 0, 0); 1158 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_ESP, 0, 0); 1159 1160 svm->v_vmload_vmsave_enabled = false; 1161 } else { 1162 /* 1163 * If hardware supports Virtual VMLOAD VMSAVE then enable it 1164 * in VMCB and clear intercepts to avoid #VMEXIT. 1165 */ 1166 if (vls) { 1167 svm_clr_intercept(svm, INTERCEPT_VMLOAD); 1168 svm_clr_intercept(svm, INTERCEPT_VMSAVE); 1169 svm->vmcb->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK; 1170 } 1171 /* No need to intercept these MSRs */ 1172 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_EIP, 1, 1); 1173 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_ESP, 1, 1); 1174 } 1175 } 1176 1177 static void init_vmcb(struct kvm_vcpu *vcpu) 1178 { 1179 struct vcpu_svm *svm = to_svm(vcpu); 1180 struct vmcb *vmcb = svm->vmcb01.ptr; 1181 struct vmcb_control_area *control = &vmcb->control; 1182 struct vmcb_save_area *save = &vmcb->save; 1183 1184 svm_set_intercept(svm, INTERCEPT_CR0_READ); 1185 svm_set_intercept(svm, INTERCEPT_CR3_READ); 1186 svm_set_intercept(svm, INTERCEPT_CR4_READ); 1187 svm_set_intercept(svm, INTERCEPT_CR0_WRITE); 1188 svm_set_intercept(svm, INTERCEPT_CR3_WRITE); 1189 svm_set_intercept(svm, INTERCEPT_CR4_WRITE); 1190 if (!kvm_vcpu_apicv_active(vcpu)) 1191 svm_set_intercept(svm, INTERCEPT_CR8_WRITE); 1192 1193 set_dr_intercepts(svm); 1194 1195 set_exception_intercept(svm, PF_VECTOR); 1196 set_exception_intercept(svm, UD_VECTOR); 1197 set_exception_intercept(svm, MC_VECTOR); 1198 set_exception_intercept(svm, AC_VECTOR); 1199 set_exception_intercept(svm, DB_VECTOR); 1200 /* 1201 * Guest access to VMware backdoor ports could legitimately 1202 * trigger #GP because of TSS I/O permission bitmap. 1203 * We intercept those #GP and allow access to them anyway 1204 * as VMware does. Don't intercept #GP for SEV guests as KVM can't 1205 * decrypt guest memory to decode the faulting instruction. 1206 */ 1207 if (enable_vmware_backdoor && !sev_guest(vcpu->kvm)) 1208 set_exception_intercept(svm, GP_VECTOR); 1209 1210 svm_set_intercept(svm, INTERCEPT_INTR); 1211 svm_set_intercept(svm, INTERCEPT_NMI); 1212 1213 if (intercept_smi) 1214 svm_set_intercept(svm, INTERCEPT_SMI); 1215 1216 svm_set_intercept(svm, INTERCEPT_SELECTIVE_CR0); 1217 svm_set_intercept(svm, INTERCEPT_RDPMC); 1218 svm_set_intercept(svm, INTERCEPT_CPUID); 1219 svm_set_intercept(svm, INTERCEPT_INVD); 1220 svm_set_intercept(svm, INTERCEPT_INVLPG); 1221 svm_set_intercept(svm, INTERCEPT_INVLPGA); 1222 svm_set_intercept(svm, INTERCEPT_IOIO_PROT); 1223 svm_set_intercept(svm, INTERCEPT_MSR_PROT); 1224 svm_set_intercept(svm, INTERCEPT_TASK_SWITCH); 1225 svm_set_intercept(svm, INTERCEPT_SHUTDOWN); 1226 svm_set_intercept(svm, INTERCEPT_VMRUN); 1227 svm_set_intercept(svm, INTERCEPT_VMMCALL); 1228 svm_set_intercept(svm, INTERCEPT_VMLOAD); 1229 svm_set_intercept(svm, INTERCEPT_VMSAVE); 1230 svm_set_intercept(svm, INTERCEPT_STGI); 1231 svm_set_intercept(svm, INTERCEPT_CLGI); 1232 svm_set_intercept(svm, INTERCEPT_SKINIT); 1233 svm_set_intercept(svm, INTERCEPT_WBINVD); 1234 svm_set_intercept(svm, INTERCEPT_XSETBV); 1235 svm_set_intercept(svm, INTERCEPT_RDPRU); 1236 svm_set_intercept(svm, INTERCEPT_RSM); 1237 1238 if (!kvm_mwait_in_guest(vcpu->kvm)) { 1239 svm_set_intercept(svm, INTERCEPT_MONITOR); 1240 svm_set_intercept(svm, INTERCEPT_MWAIT); 1241 } 1242 1243 if (!kvm_hlt_in_guest(vcpu->kvm)) 1244 svm_set_intercept(svm, INTERCEPT_HLT); 1245 1246 control->iopm_base_pa = __sme_set(iopm_base); 1247 control->msrpm_base_pa = __sme_set(__pa(svm->msrpm)); 1248 control->int_ctl = V_INTR_MASKING_MASK; 1249 1250 init_seg(&save->es); 1251 init_seg(&save->ss); 1252 init_seg(&save->ds); 1253 init_seg(&save->fs); 1254 init_seg(&save->gs); 1255 1256 save->cs.selector = 0xf000; 1257 save->cs.base = 0xffff0000; 1258 /* Executable/Readable Code Segment */ 1259 save->cs.attrib = SVM_SELECTOR_READ_MASK | SVM_SELECTOR_P_MASK | 1260 SVM_SELECTOR_S_MASK | SVM_SELECTOR_CODE_MASK; 1261 save->cs.limit = 0xffff; 1262 1263 save->gdtr.base = 0; 1264 save->gdtr.limit = 0xffff; 1265 save->idtr.base = 0; 1266 save->idtr.limit = 0xffff; 1267 1268 init_sys_seg(&save->ldtr, SEG_TYPE_LDT); 1269 init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16); 1270 1271 if (npt_enabled) { 1272 /* Setup VMCB for Nested Paging */ 1273 control->nested_ctl |= SVM_NESTED_CTL_NP_ENABLE; 1274 svm_clr_intercept(svm, INTERCEPT_INVLPG); 1275 clr_exception_intercept(svm, PF_VECTOR); 1276 svm_clr_intercept(svm, INTERCEPT_CR3_READ); 1277 svm_clr_intercept(svm, INTERCEPT_CR3_WRITE); 1278 save->g_pat = vcpu->arch.pat; 1279 save->cr3 = 0; 1280 } 1281 svm->current_vmcb->asid_generation = 0; 1282 svm->asid = 0; 1283 1284 svm->nested.vmcb12_gpa = INVALID_GPA; 1285 svm->nested.last_vmcb12_gpa = INVALID_GPA; 1286 1287 if (!kvm_pause_in_guest(vcpu->kvm)) { 1288 control->pause_filter_count = pause_filter_count; 1289 if (pause_filter_thresh) 1290 control->pause_filter_thresh = pause_filter_thresh; 1291 svm_set_intercept(svm, INTERCEPT_PAUSE); 1292 } else { 1293 svm_clr_intercept(svm, INTERCEPT_PAUSE); 1294 } 1295 1296 svm_recalc_instruction_intercepts(vcpu, svm); 1297 1298 /* 1299 * If the host supports V_SPEC_CTRL then disable the interception 1300 * of MSR_IA32_SPEC_CTRL. 1301 */ 1302 if (boot_cpu_has(X86_FEATURE_V_SPEC_CTRL)) 1303 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SPEC_CTRL, 1, 1); 1304 1305 if (kvm_vcpu_apicv_active(vcpu)) 1306 avic_init_vmcb(svm, vmcb); 1307 1308 if (vnmi) 1309 svm->vmcb->control.int_ctl |= V_NMI_ENABLE_MASK; 1310 1311 if (vgif) { 1312 svm_clr_intercept(svm, INTERCEPT_STGI); 1313 svm_clr_intercept(svm, INTERCEPT_CLGI); 1314 svm->vmcb->control.int_ctl |= V_GIF_ENABLE_MASK; 1315 } 1316 1317 if (sev_guest(vcpu->kvm)) 1318 sev_init_vmcb(svm); 1319 1320 svm_hv_init_vmcb(vmcb); 1321 init_vmcb_after_set_cpuid(vcpu); 1322 1323 vmcb_mark_all_dirty(vmcb); 1324 1325 enable_gif(svm); 1326 } 1327 1328 static void __svm_vcpu_reset(struct kvm_vcpu *vcpu) 1329 { 1330 struct vcpu_svm *svm = to_svm(vcpu); 1331 1332 svm_vcpu_init_msrpm(vcpu, svm->msrpm); 1333 1334 svm_init_osvw(vcpu); 1335 vcpu->arch.microcode_version = 0x01000065; 1336 svm->tsc_ratio_msr = kvm_caps.default_tsc_scaling_ratio; 1337 1338 svm->nmi_masked = false; 1339 svm->awaiting_iret_completion = false; 1340 1341 if (sev_es_guest(vcpu->kvm)) 1342 sev_es_vcpu_reset(svm); 1343 } 1344 1345 static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) 1346 { 1347 struct vcpu_svm *svm = to_svm(vcpu); 1348 1349 svm->spec_ctrl = 0; 1350 svm->virt_spec_ctrl = 0; 1351 1352 init_vmcb(vcpu); 1353 1354 if (!init_event) 1355 __svm_vcpu_reset(vcpu); 1356 } 1357 1358 void svm_switch_vmcb(struct vcpu_svm *svm, struct kvm_vmcb_info *target_vmcb) 1359 { 1360 svm->current_vmcb = target_vmcb; 1361 svm->vmcb = target_vmcb->ptr; 1362 } 1363 1364 static int svm_vcpu_create(struct kvm_vcpu *vcpu) 1365 { 1366 struct vcpu_svm *svm; 1367 struct page *vmcb01_page; 1368 struct page *vmsa_page = NULL; 1369 int err; 1370 1371 BUILD_BUG_ON(offsetof(struct vcpu_svm, vcpu) != 0); 1372 svm = to_svm(vcpu); 1373 1374 err = -ENOMEM; 1375 vmcb01_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO); 1376 if (!vmcb01_page) 1377 goto out; 1378 1379 if (sev_es_guest(vcpu->kvm)) { 1380 /* 1381 * SEV-ES guests require a separate VMSA page used to contain 1382 * the encrypted register state of the guest. 1383 */ 1384 vmsa_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO); 1385 if (!vmsa_page) 1386 goto error_free_vmcb_page; 1387 1388 /* 1389 * SEV-ES guests maintain an encrypted version of their FPU 1390 * state which is restored and saved on VMRUN and VMEXIT. 1391 * Mark vcpu->arch.guest_fpu->fpstate as scratch so it won't 1392 * do xsave/xrstor on it. 1393 */ 1394 fpstate_set_confidential(&vcpu->arch.guest_fpu); 1395 } 1396 1397 err = avic_init_vcpu(svm); 1398 if (err) 1399 goto error_free_vmsa_page; 1400 1401 svm->msrpm = svm_vcpu_alloc_msrpm(); 1402 if (!svm->msrpm) { 1403 err = -ENOMEM; 1404 goto error_free_vmsa_page; 1405 } 1406 1407 svm->x2avic_msrs_intercepted = true; 1408 1409 svm->vmcb01.ptr = page_address(vmcb01_page); 1410 svm->vmcb01.pa = __sme_set(page_to_pfn(vmcb01_page) << PAGE_SHIFT); 1411 svm_switch_vmcb(svm, &svm->vmcb01); 1412 1413 if (vmsa_page) 1414 svm->sev_es.vmsa = page_address(vmsa_page); 1415 1416 svm->guest_state_loaded = false; 1417 1418 return 0; 1419 1420 error_free_vmsa_page: 1421 if (vmsa_page) 1422 __free_page(vmsa_page); 1423 error_free_vmcb_page: 1424 __free_page(vmcb01_page); 1425 out: 1426 return err; 1427 } 1428 1429 static void svm_clear_current_vmcb(struct vmcb *vmcb) 1430 { 1431 int i; 1432 1433 for_each_online_cpu(i) 1434 cmpxchg(per_cpu_ptr(&svm_data.current_vmcb, i), vmcb, NULL); 1435 } 1436 1437 static void svm_vcpu_free(struct kvm_vcpu *vcpu) 1438 { 1439 struct vcpu_svm *svm = to_svm(vcpu); 1440 1441 /* 1442 * The vmcb page can be recycled, causing a false negative in 1443 * svm_vcpu_load(). So, ensure that no logical CPU has this 1444 * vmcb page recorded as its current vmcb. 1445 */ 1446 svm_clear_current_vmcb(svm->vmcb); 1447 1448 svm_leave_nested(vcpu); 1449 svm_free_nested(svm); 1450 1451 sev_free_vcpu(vcpu); 1452 1453 __free_page(pfn_to_page(__sme_clr(svm->vmcb01.pa) >> PAGE_SHIFT)); 1454 __free_pages(virt_to_page(svm->msrpm), get_order(MSRPM_SIZE)); 1455 } 1456 1457 static void svm_prepare_switch_to_guest(struct kvm_vcpu *vcpu) 1458 { 1459 struct vcpu_svm *svm = to_svm(vcpu); 1460 struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, vcpu->cpu); 1461 1462 if (sev_es_guest(vcpu->kvm)) 1463 sev_es_unmap_ghcb(svm); 1464 1465 if (svm->guest_state_loaded) 1466 return; 1467 1468 /* 1469 * Save additional host state that will be restored on VMEXIT (sev-es) 1470 * or subsequent vmload of host save area. 1471 */ 1472 vmsave(sd->save_area_pa); 1473 if (sev_es_guest(vcpu->kvm)) { 1474 struct sev_es_save_area *hostsa; 1475 hostsa = (struct sev_es_save_area *)(page_address(sd->save_area) + 0x400); 1476 1477 sev_es_prepare_switch_to_guest(hostsa); 1478 } 1479 1480 if (tsc_scaling) 1481 __svm_write_tsc_multiplier(vcpu->arch.tsc_scaling_ratio); 1482 1483 if (likely(tsc_aux_uret_slot >= 0)) 1484 kvm_set_user_return_msr(tsc_aux_uret_slot, svm->tsc_aux, -1ull); 1485 1486 svm->guest_state_loaded = true; 1487 } 1488 1489 static void svm_prepare_host_switch(struct kvm_vcpu *vcpu) 1490 { 1491 to_svm(vcpu)->guest_state_loaded = false; 1492 } 1493 1494 static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 1495 { 1496 struct vcpu_svm *svm = to_svm(vcpu); 1497 struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, cpu); 1498 1499 if (sd->current_vmcb != svm->vmcb) { 1500 sd->current_vmcb = svm->vmcb; 1501 indirect_branch_prediction_barrier(); 1502 } 1503 if (kvm_vcpu_apicv_active(vcpu)) 1504 avic_vcpu_load(vcpu, cpu); 1505 } 1506 1507 static void svm_vcpu_put(struct kvm_vcpu *vcpu) 1508 { 1509 if (kvm_vcpu_apicv_active(vcpu)) 1510 avic_vcpu_put(vcpu); 1511 1512 svm_prepare_host_switch(vcpu); 1513 1514 ++vcpu->stat.host_state_reload; 1515 } 1516 1517 static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu) 1518 { 1519 struct vcpu_svm *svm = to_svm(vcpu); 1520 unsigned long rflags = svm->vmcb->save.rflags; 1521 1522 if (svm->nmi_singlestep) { 1523 /* Hide our flags if they were not set by the guest */ 1524 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF)) 1525 rflags &= ~X86_EFLAGS_TF; 1526 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_RF)) 1527 rflags &= ~X86_EFLAGS_RF; 1528 } 1529 return rflags; 1530 } 1531 1532 static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) 1533 { 1534 if (to_svm(vcpu)->nmi_singlestep) 1535 rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF); 1536 1537 /* 1538 * Any change of EFLAGS.VM is accompanied by a reload of SS 1539 * (caused by either a task switch or an inter-privilege IRET), 1540 * so we do not need to update the CPL here. 1541 */ 1542 to_svm(vcpu)->vmcb->save.rflags = rflags; 1543 } 1544 1545 static bool svm_get_if_flag(struct kvm_vcpu *vcpu) 1546 { 1547 struct vmcb *vmcb = to_svm(vcpu)->vmcb; 1548 1549 return sev_es_guest(vcpu->kvm) 1550 ? vmcb->control.int_state & SVM_GUEST_INTERRUPT_MASK 1551 : kvm_get_rflags(vcpu) & X86_EFLAGS_IF; 1552 } 1553 1554 static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg) 1555 { 1556 kvm_register_mark_available(vcpu, reg); 1557 1558 switch (reg) { 1559 case VCPU_EXREG_PDPTR: 1560 /* 1561 * When !npt_enabled, mmu->pdptrs[] is already available since 1562 * it is always updated per SDM when moving to CRs. 1563 */ 1564 if (npt_enabled) 1565 load_pdptrs(vcpu, kvm_read_cr3(vcpu)); 1566 break; 1567 default: 1568 KVM_BUG_ON(1, vcpu->kvm); 1569 } 1570 } 1571 1572 static void svm_set_vintr(struct vcpu_svm *svm) 1573 { 1574 struct vmcb_control_area *control; 1575 1576 /* 1577 * The following fields are ignored when AVIC is enabled 1578 */ 1579 WARN_ON(kvm_vcpu_apicv_activated(&svm->vcpu)); 1580 1581 svm_set_intercept(svm, INTERCEPT_VINTR); 1582 1583 /* 1584 * Recalculating intercepts may have cleared the VINTR intercept. If 1585 * V_INTR_MASKING is enabled in vmcb12, then the effective RFLAGS.IF 1586 * for L1 physical interrupts is L1's RFLAGS.IF at the time of VMRUN. 1587 * Requesting an interrupt window if save.RFLAGS.IF=0 is pointless as 1588 * interrupts will never be unblocked while L2 is running. 1589 */ 1590 if (!svm_is_intercept(svm, INTERCEPT_VINTR)) 1591 return; 1592 1593 /* 1594 * This is just a dummy VINTR to actually cause a vmexit to happen. 1595 * Actual injection of virtual interrupts happens through EVENTINJ. 1596 */ 1597 control = &svm->vmcb->control; 1598 control->int_vector = 0x0; 1599 control->int_ctl &= ~V_INTR_PRIO_MASK; 1600 control->int_ctl |= V_IRQ_MASK | 1601 ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT); 1602 vmcb_mark_dirty(svm->vmcb, VMCB_INTR); 1603 } 1604 1605 static void svm_clear_vintr(struct vcpu_svm *svm) 1606 { 1607 svm_clr_intercept(svm, INTERCEPT_VINTR); 1608 1609 /* Drop int_ctl fields related to VINTR injection. */ 1610 svm->vmcb->control.int_ctl &= ~V_IRQ_INJECTION_BITS_MASK; 1611 if (is_guest_mode(&svm->vcpu)) { 1612 svm->vmcb01.ptr->control.int_ctl &= ~V_IRQ_INJECTION_BITS_MASK; 1613 1614 WARN_ON((svm->vmcb->control.int_ctl & V_TPR_MASK) != 1615 (svm->nested.ctl.int_ctl & V_TPR_MASK)); 1616 1617 svm->vmcb->control.int_ctl |= svm->nested.ctl.int_ctl & 1618 V_IRQ_INJECTION_BITS_MASK; 1619 1620 svm->vmcb->control.int_vector = svm->nested.ctl.int_vector; 1621 } 1622 1623 vmcb_mark_dirty(svm->vmcb, VMCB_INTR); 1624 } 1625 1626 static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg) 1627 { 1628 struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save; 1629 struct vmcb_save_area *save01 = &to_svm(vcpu)->vmcb01.ptr->save; 1630 1631 switch (seg) { 1632 case VCPU_SREG_CS: return &save->cs; 1633 case VCPU_SREG_DS: return &save->ds; 1634 case VCPU_SREG_ES: return &save->es; 1635 case VCPU_SREG_FS: return &save01->fs; 1636 case VCPU_SREG_GS: return &save01->gs; 1637 case VCPU_SREG_SS: return &save->ss; 1638 case VCPU_SREG_TR: return &save01->tr; 1639 case VCPU_SREG_LDTR: return &save01->ldtr; 1640 } 1641 BUG(); 1642 return NULL; 1643 } 1644 1645 static u64 svm_get_segment_base(struct kvm_vcpu *vcpu, int seg) 1646 { 1647 struct vmcb_seg *s = svm_seg(vcpu, seg); 1648 1649 return s->base; 1650 } 1651 1652 static void svm_get_segment(struct kvm_vcpu *vcpu, 1653 struct kvm_segment *var, int seg) 1654 { 1655 struct vmcb_seg *s = svm_seg(vcpu, seg); 1656 1657 var->base = s->base; 1658 var->limit = s->limit; 1659 var->selector = s->selector; 1660 var->type = s->attrib & SVM_SELECTOR_TYPE_MASK; 1661 var->s = (s->attrib >> SVM_SELECTOR_S_SHIFT) & 1; 1662 var->dpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3; 1663 var->present = (s->attrib >> SVM_SELECTOR_P_SHIFT) & 1; 1664 var->avl = (s->attrib >> SVM_SELECTOR_AVL_SHIFT) & 1; 1665 var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1; 1666 var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1; 1667 1668 /* 1669 * AMD CPUs circa 2014 track the G bit for all segments except CS. 1670 * However, the SVM spec states that the G bit is not observed by the 1671 * CPU, and some VMware virtual CPUs drop the G bit for all segments. 1672 * So let's synthesize a legal G bit for all segments, this helps 1673 * running KVM nested. It also helps cross-vendor migration, because 1674 * Intel's vmentry has a check on the 'G' bit. 1675 */ 1676 var->g = s->limit > 0xfffff; 1677 1678 /* 1679 * AMD's VMCB does not have an explicit unusable field, so emulate it 1680 * for cross vendor migration purposes by "not present" 1681 */ 1682 var->unusable = !var->present; 1683 1684 switch (seg) { 1685 case VCPU_SREG_TR: 1686 /* 1687 * Work around a bug where the busy flag in the tr selector 1688 * isn't exposed 1689 */ 1690 var->type |= 0x2; 1691 break; 1692 case VCPU_SREG_DS: 1693 case VCPU_SREG_ES: 1694 case VCPU_SREG_FS: 1695 case VCPU_SREG_GS: 1696 /* 1697 * The accessed bit must always be set in the segment 1698 * descriptor cache, although it can be cleared in the 1699 * descriptor, the cached bit always remains at 1. Since 1700 * Intel has a check on this, set it here to support 1701 * cross-vendor migration. 1702 */ 1703 if (!var->unusable) 1704 var->type |= 0x1; 1705 break; 1706 case VCPU_SREG_SS: 1707 /* 1708 * On AMD CPUs sometimes the DB bit in the segment 1709 * descriptor is left as 1, although the whole segment has 1710 * been made unusable. Clear it here to pass an Intel VMX 1711 * entry check when cross vendor migrating. 1712 */ 1713 if (var->unusable) 1714 var->db = 0; 1715 /* This is symmetric with svm_set_segment() */ 1716 var->dpl = to_svm(vcpu)->vmcb->save.cpl; 1717 break; 1718 } 1719 } 1720 1721 static int svm_get_cpl(struct kvm_vcpu *vcpu) 1722 { 1723 struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save; 1724 1725 return save->cpl; 1726 } 1727 1728 static void svm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l) 1729 { 1730 struct kvm_segment cs; 1731 1732 svm_get_segment(vcpu, &cs, VCPU_SREG_CS); 1733 *db = cs.db; 1734 *l = cs.l; 1735 } 1736 1737 static void svm_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) 1738 { 1739 struct vcpu_svm *svm = to_svm(vcpu); 1740 1741 dt->size = svm->vmcb->save.idtr.limit; 1742 dt->address = svm->vmcb->save.idtr.base; 1743 } 1744 1745 static void svm_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) 1746 { 1747 struct vcpu_svm *svm = to_svm(vcpu); 1748 1749 svm->vmcb->save.idtr.limit = dt->size; 1750 svm->vmcb->save.idtr.base = dt->address ; 1751 vmcb_mark_dirty(svm->vmcb, VMCB_DT); 1752 } 1753 1754 static void svm_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) 1755 { 1756 struct vcpu_svm *svm = to_svm(vcpu); 1757 1758 dt->size = svm->vmcb->save.gdtr.limit; 1759 dt->address = svm->vmcb->save.gdtr.base; 1760 } 1761 1762 static void svm_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) 1763 { 1764 struct vcpu_svm *svm = to_svm(vcpu); 1765 1766 svm->vmcb->save.gdtr.limit = dt->size; 1767 svm->vmcb->save.gdtr.base = dt->address ; 1768 vmcb_mark_dirty(svm->vmcb, VMCB_DT); 1769 } 1770 1771 static void sev_post_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) 1772 { 1773 struct vcpu_svm *svm = to_svm(vcpu); 1774 1775 /* 1776 * For guests that don't set guest_state_protected, the cr3 update is 1777 * handled via kvm_mmu_load() while entering the guest. For guests 1778 * that do (SEV-ES/SEV-SNP), the cr3 update needs to be written to 1779 * VMCB save area now, since the save area will become the initial 1780 * contents of the VMSA, and future VMCB save area updates won't be 1781 * seen. 1782 */ 1783 if (sev_es_guest(vcpu->kvm)) { 1784 svm->vmcb->save.cr3 = cr3; 1785 vmcb_mark_dirty(svm->vmcb, VMCB_CR); 1786 } 1787 } 1788 1789 static bool svm_is_valid_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) 1790 { 1791 return true; 1792 } 1793 1794 void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) 1795 { 1796 struct vcpu_svm *svm = to_svm(vcpu); 1797 u64 hcr0 = cr0; 1798 bool old_paging = is_paging(vcpu); 1799 1800 #ifdef CONFIG_X86_64 1801 if (vcpu->arch.efer & EFER_LME && !vcpu->arch.guest_state_protected) { 1802 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) { 1803 vcpu->arch.efer |= EFER_LMA; 1804 svm->vmcb->save.efer |= EFER_LMA | EFER_LME; 1805 } 1806 1807 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) { 1808 vcpu->arch.efer &= ~EFER_LMA; 1809 svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME); 1810 } 1811 } 1812 #endif 1813 vcpu->arch.cr0 = cr0; 1814 1815 if (!npt_enabled) { 1816 hcr0 |= X86_CR0_PG | X86_CR0_WP; 1817 if (old_paging != is_paging(vcpu)) 1818 svm_set_cr4(vcpu, kvm_read_cr4(vcpu)); 1819 } 1820 1821 /* 1822 * re-enable caching here because the QEMU bios 1823 * does not do it - this results in some delay at 1824 * reboot 1825 */ 1826 if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED)) 1827 hcr0 &= ~(X86_CR0_CD | X86_CR0_NW); 1828 1829 svm->vmcb->save.cr0 = hcr0; 1830 vmcb_mark_dirty(svm->vmcb, VMCB_CR); 1831 1832 /* 1833 * SEV-ES guests must always keep the CR intercepts cleared. CR 1834 * tracking is done using the CR write traps. 1835 */ 1836 if (sev_es_guest(vcpu->kvm)) 1837 return; 1838 1839 if (hcr0 == cr0) { 1840 /* Selective CR0 write remains on. */ 1841 svm_clr_intercept(svm, INTERCEPT_CR0_READ); 1842 svm_clr_intercept(svm, INTERCEPT_CR0_WRITE); 1843 } else { 1844 svm_set_intercept(svm, INTERCEPT_CR0_READ); 1845 svm_set_intercept(svm, INTERCEPT_CR0_WRITE); 1846 } 1847 } 1848 1849 static bool svm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) 1850 { 1851 return true; 1852 } 1853 1854 void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) 1855 { 1856 unsigned long host_cr4_mce = cr4_read_shadow() & X86_CR4_MCE; 1857 unsigned long old_cr4 = vcpu->arch.cr4; 1858 1859 if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE)) 1860 svm_flush_tlb_current(vcpu); 1861 1862 vcpu->arch.cr4 = cr4; 1863 if (!npt_enabled) { 1864 cr4 |= X86_CR4_PAE; 1865 1866 if (!is_paging(vcpu)) 1867 cr4 &= ~(X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE); 1868 } 1869 cr4 |= host_cr4_mce; 1870 to_svm(vcpu)->vmcb->save.cr4 = cr4; 1871 vmcb_mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR); 1872 1873 if ((cr4 ^ old_cr4) & (X86_CR4_OSXSAVE | X86_CR4_PKE)) 1874 kvm_update_cpuid_runtime(vcpu); 1875 } 1876 1877 static void svm_set_segment(struct kvm_vcpu *vcpu, 1878 struct kvm_segment *var, int seg) 1879 { 1880 struct vcpu_svm *svm = to_svm(vcpu); 1881 struct vmcb_seg *s = svm_seg(vcpu, seg); 1882 1883 s->base = var->base; 1884 s->limit = var->limit; 1885 s->selector = var->selector; 1886 s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK); 1887 s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT; 1888 s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT; 1889 s->attrib |= ((var->present & 1) && !var->unusable) << SVM_SELECTOR_P_SHIFT; 1890 s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT; 1891 s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT; 1892 s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT; 1893 s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT; 1894 1895 /* 1896 * This is always accurate, except if SYSRET returned to a segment 1897 * with SS.DPL != 3. Intel does not have this quirk, and always 1898 * forces SS.DPL to 3 on sysret, so we ignore that case; fixing it 1899 * would entail passing the CPL to userspace and back. 1900 */ 1901 if (seg == VCPU_SREG_SS) 1902 /* This is symmetric with svm_get_segment() */ 1903 svm->vmcb->save.cpl = (var->dpl & 3); 1904 1905 vmcb_mark_dirty(svm->vmcb, VMCB_SEG); 1906 } 1907 1908 static void svm_update_exception_bitmap(struct kvm_vcpu *vcpu) 1909 { 1910 struct vcpu_svm *svm = to_svm(vcpu); 1911 1912 clr_exception_intercept(svm, BP_VECTOR); 1913 1914 if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) { 1915 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) 1916 set_exception_intercept(svm, BP_VECTOR); 1917 } 1918 } 1919 1920 static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd) 1921 { 1922 if (sd->next_asid > sd->max_asid) { 1923 ++sd->asid_generation; 1924 sd->next_asid = sd->min_asid; 1925 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID; 1926 vmcb_mark_dirty(svm->vmcb, VMCB_ASID); 1927 } 1928 1929 svm->current_vmcb->asid_generation = sd->asid_generation; 1930 svm->asid = sd->next_asid++; 1931 } 1932 1933 static void svm_set_dr6(struct vcpu_svm *svm, unsigned long value) 1934 { 1935 struct vmcb *vmcb = svm->vmcb; 1936 1937 if (svm->vcpu.arch.guest_state_protected) 1938 return; 1939 1940 if (unlikely(value != vmcb->save.dr6)) { 1941 vmcb->save.dr6 = value; 1942 vmcb_mark_dirty(vmcb, VMCB_DR); 1943 } 1944 } 1945 1946 static void svm_sync_dirty_debug_regs(struct kvm_vcpu *vcpu) 1947 { 1948 struct vcpu_svm *svm = to_svm(vcpu); 1949 1950 if (vcpu->arch.guest_state_protected) 1951 return; 1952 1953 get_debugreg(vcpu->arch.db[0], 0); 1954 get_debugreg(vcpu->arch.db[1], 1); 1955 get_debugreg(vcpu->arch.db[2], 2); 1956 get_debugreg(vcpu->arch.db[3], 3); 1957 /* 1958 * We cannot reset svm->vmcb->save.dr6 to DR6_ACTIVE_LOW here, 1959 * because db_interception might need it. We can do it before vmentry. 1960 */ 1961 vcpu->arch.dr6 = svm->vmcb->save.dr6; 1962 vcpu->arch.dr7 = svm->vmcb->save.dr7; 1963 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT; 1964 set_dr_intercepts(svm); 1965 } 1966 1967 static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value) 1968 { 1969 struct vcpu_svm *svm = to_svm(vcpu); 1970 1971 if (vcpu->arch.guest_state_protected) 1972 return; 1973 1974 svm->vmcb->save.dr7 = value; 1975 vmcb_mark_dirty(svm->vmcb, VMCB_DR); 1976 } 1977 1978 static int pf_interception(struct kvm_vcpu *vcpu) 1979 { 1980 struct vcpu_svm *svm = to_svm(vcpu); 1981 1982 u64 fault_address = svm->vmcb->control.exit_info_2; 1983 u64 error_code = svm->vmcb->control.exit_info_1; 1984 1985 return kvm_handle_page_fault(vcpu, error_code, fault_address, 1986 static_cpu_has(X86_FEATURE_DECODEASSISTS) ? 1987 svm->vmcb->control.insn_bytes : NULL, 1988 svm->vmcb->control.insn_len); 1989 } 1990 1991 static int npf_interception(struct kvm_vcpu *vcpu) 1992 { 1993 struct vcpu_svm *svm = to_svm(vcpu); 1994 1995 u64 fault_address = svm->vmcb->control.exit_info_2; 1996 u64 error_code = svm->vmcb->control.exit_info_1; 1997 1998 trace_kvm_page_fault(vcpu, fault_address, error_code); 1999 return kvm_mmu_page_fault(vcpu, fault_address, error_code, 2000 static_cpu_has(X86_FEATURE_DECODEASSISTS) ? 2001 svm->vmcb->control.insn_bytes : NULL, 2002 svm->vmcb->control.insn_len); 2003 } 2004 2005 static int db_interception(struct kvm_vcpu *vcpu) 2006 { 2007 struct kvm_run *kvm_run = vcpu->run; 2008 struct vcpu_svm *svm = to_svm(vcpu); 2009 2010 if (!(vcpu->guest_debug & 2011 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) && 2012 !svm->nmi_singlestep) { 2013 u32 payload = svm->vmcb->save.dr6 ^ DR6_ACTIVE_LOW; 2014 kvm_queue_exception_p(vcpu, DB_VECTOR, payload); 2015 return 1; 2016 } 2017 2018 if (svm->nmi_singlestep) { 2019 disable_nmi_singlestep(svm); 2020 /* Make sure we check for pending NMIs upon entry */ 2021 kvm_make_request(KVM_REQ_EVENT, vcpu); 2022 } 2023 2024 if (vcpu->guest_debug & 2025 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) { 2026 kvm_run->exit_reason = KVM_EXIT_DEBUG; 2027 kvm_run->debug.arch.dr6 = svm->vmcb->save.dr6; 2028 kvm_run->debug.arch.dr7 = svm->vmcb->save.dr7; 2029 kvm_run->debug.arch.pc = 2030 svm->vmcb->save.cs.base + svm->vmcb->save.rip; 2031 kvm_run->debug.arch.exception = DB_VECTOR; 2032 return 0; 2033 } 2034 2035 return 1; 2036 } 2037 2038 static int bp_interception(struct kvm_vcpu *vcpu) 2039 { 2040 struct vcpu_svm *svm = to_svm(vcpu); 2041 struct kvm_run *kvm_run = vcpu->run; 2042 2043 kvm_run->exit_reason = KVM_EXIT_DEBUG; 2044 kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip; 2045 kvm_run->debug.arch.exception = BP_VECTOR; 2046 return 0; 2047 } 2048 2049 static int ud_interception(struct kvm_vcpu *vcpu) 2050 { 2051 return handle_ud(vcpu); 2052 } 2053 2054 static int ac_interception(struct kvm_vcpu *vcpu) 2055 { 2056 kvm_queue_exception_e(vcpu, AC_VECTOR, 0); 2057 return 1; 2058 } 2059 2060 static bool is_erratum_383(void) 2061 { 2062 int err, i; 2063 u64 value; 2064 2065 if (!erratum_383_found) 2066 return false; 2067 2068 value = native_read_msr_safe(MSR_IA32_MC0_STATUS, &err); 2069 if (err) 2070 return false; 2071 2072 /* Bit 62 may or may not be set for this mce */ 2073 value &= ~(1ULL << 62); 2074 2075 if (value != 0xb600000000010015ULL) 2076 return false; 2077 2078 /* Clear MCi_STATUS registers */ 2079 for (i = 0; i < 6; ++i) 2080 native_write_msr_safe(MSR_IA32_MCx_STATUS(i), 0, 0); 2081 2082 value = native_read_msr_safe(MSR_IA32_MCG_STATUS, &err); 2083 if (!err) { 2084 u32 low, high; 2085 2086 value &= ~(1ULL << 2); 2087 low = lower_32_bits(value); 2088 high = upper_32_bits(value); 2089 2090 native_write_msr_safe(MSR_IA32_MCG_STATUS, low, high); 2091 } 2092 2093 /* Flush tlb to evict multi-match entries */ 2094 __flush_tlb_all(); 2095 2096 return true; 2097 } 2098 2099 static void svm_handle_mce(struct kvm_vcpu *vcpu) 2100 { 2101 if (is_erratum_383()) { 2102 /* 2103 * Erratum 383 triggered. Guest state is corrupt so kill the 2104 * guest. 2105 */ 2106 pr_err("Guest triggered AMD Erratum 383\n"); 2107 2108 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); 2109 2110 return; 2111 } 2112 2113 /* 2114 * On an #MC intercept the MCE handler is not called automatically in 2115 * the host. So do it by hand here. 2116 */ 2117 kvm_machine_check(); 2118 } 2119 2120 static int mc_interception(struct kvm_vcpu *vcpu) 2121 { 2122 return 1; 2123 } 2124 2125 static int shutdown_interception(struct kvm_vcpu *vcpu) 2126 { 2127 struct kvm_run *kvm_run = vcpu->run; 2128 struct vcpu_svm *svm = to_svm(vcpu); 2129 2130 /* 2131 * The VM save area has already been encrypted so it 2132 * cannot be reinitialized - just terminate. 2133 */ 2134 if (sev_es_guest(vcpu->kvm)) 2135 return -EINVAL; 2136 2137 /* 2138 * VMCB is undefined after a SHUTDOWN intercept. INIT the vCPU to put 2139 * the VMCB in a known good state. Unfortuately, KVM doesn't have 2140 * KVM_MP_STATE_SHUTDOWN and can't add it without potentially breaking 2141 * userspace. At a platform view, INIT is acceptable behavior as 2142 * there exist bare metal platforms that automatically INIT the CPU 2143 * in response to shutdown. 2144 */ 2145 clear_page(svm->vmcb); 2146 kvm_vcpu_reset(vcpu, true); 2147 2148 kvm_run->exit_reason = KVM_EXIT_SHUTDOWN; 2149 return 0; 2150 } 2151 2152 static int io_interception(struct kvm_vcpu *vcpu) 2153 { 2154 struct vcpu_svm *svm = to_svm(vcpu); 2155 u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */ 2156 int size, in, string; 2157 unsigned port; 2158 2159 ++vcpu->stat.io_exits; 2160 string = (io_info & SVM_IOIO_STR_MASK) != 0; 2161 in = (io_info & SVM_IOIO_TYPE_MASK) != 0; 2162 port = io_info >> 16; 2163 size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT; 2164 2165 if (string) { 2166 if (sev_es_guest(vcpu->kvm)) 2167 return sev_es_string_io(svm, size, port, in); 2168 else 2169 return kvm_emulate_instruction(vcpu, 0); 2170 } 2171 2172 svm->next_rip = svm->vmcb->control.exit_info_2; 2173 2174 return kvm_fast_pio(vcpu, size, port, in); 2175 } 2176 2177 static int nmi_interception(struct kvm_vcpu *vcpu) 2178 { 2179 return 1; 2180 } 2181 2182 static int smi_interception(struct kvm_vcpu *vcpu) 2183 { 2184 return 1; 2185 } 2186 2187 static int intr_interception(struct kvm_vcpu *vcpu) 2188 { 2189 ++vcpu->stat.irq_exits; 2190 return 1; 2191 } 2192 2193 static int vmload_vmsave_interception(struct kvm_vcpu *vcpu, bool vmload) 2194 { 2195 struct vcpu_svm *svm = to_svm(vcpu); 2196 struct vmcb *vmcb12; 2197 struct kvm_host_map map; 2198 int ret; 2199 2200 if (nested_svm_check_permissions(vcpu)) 2201 return 1; 2202 2203 ret = kvm_vcpu_map(vcpu, gpa_to_gfn(svm->vmcb->save.rax), &map); 2204 if (ret) { 2205 if (ret == -EINVAL) 2206 kvm_inject_gp(vcpu, 0); 2207 return 1; 2208 } 2209 2210 vmcb12 = map.hva; 2211 2212 ret = kvm_skip_emulated_instruction(vcpu); 2213 2214 if (vmload) { 2215 svm_copy_vmloadsave_state(svm->vmcb, vmcb12); 2216 svm->sysenter_eip_hi = 0; 2217 svm->sysenter_esp_hi = 0; 2218 } else { 2219 svm_copy_vmloadsave_state(vmcb12, svm->vmcb); 2220 } 2221 2222 kvm_vcpu_unmap(vcpu, &map, true); 2223 2224 return ret; 2225 } 2226 2227 static int vmload_interception(struct kvm_vcpu *vcpu) 2228 { 2229 return vmload_vmsave_interception(vcpu, true); 2230 } 2231 2232 static int vmsave_interception(struct kvm_vcpu *vcpu) 2233 { 2234 return vmload_vmsave_interception(vcpu, false); 2235 } 2236 2237 static int vmrun_interception(struct kvm_vcpu *vcpu) 2238 { 2239 if (nested_svm_check_permissions(vcpu)) 2240 return 1; 2241 2242 return nested_svm_vmrun(vcpu); 2243 } 2244 2245 enum { 2246 NONE_SVM_INSTR, 2247 SVM_INSTR_VMRUN, 2248 SVM_INSTR_VMLOAD, 2249 SVM_INSTR_VMSAVE, 2250 }; 2251 2252 /* Return NONE_SVM_INSTR if not SVM instrs, otherwise return decode result */ 2253 static int svm_instr_opcode(struct kvm_vcpu *vcpu) 2254 { 2255 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; 2256 2257 if (ctxt->b != 0x1 || ctxt->opcode_len != 2) 2258 return NONE_SVM_INSTR; 2259 2260 switch (ctxt->modrm) { 2261 case 0xd8: /* VMRUN */ 2262 return SVM_INSTR_VMRUN; 2263 case 0xda: /* VMLOAD */ 2264 return SVM_INSTR_VMLOAD; 2265 case 0xdb: /* VMSAVE */ 2266 return SVM_INSTR_VMSAVE; 2267 default: 2268 break; 2269 } 2270 2271 return NONE_SVM_INSTR; 2272 } 2273 2274 static int emulate_svm_instr(struct kvm_vcpu *vcpu, int opcode) 2275 { 2276 const int guest_mode_exit_codes[] = { 2277 [SVM_INSTR_VMRUN] = SVM_EXIT_VMRUN, 2278 [SVM_INSTR_VMLOAD] = SVM_EXIT_VMLOAD, 2279 [SVM_INSTR_VMSAVE] = SVM_EXIT_VMSAVE, 2280 }; 2281 int (*const svm_instr_handlers[])(struct kvm_vcpu *vcpu) = { 2282 [SVM_INSTR_VMRUN] = vmrun_interception, 2283 [SVM_INSTR_VMLOAD] = vmload_interception, 2284 [SVM_INSTR_VMSAVE] = vmsave_interception, 2285 }; 2286 struct vcpu_svm *svm = to_svm(vcpu); 2287 int ret; 2288 2289 if (is_guest_mode(vcpu)) { 2290 /* Returns '1' or -errno on failure, '0' on success. */ 2291 ret = nested_svm_simple_vmexit(svm, guest_mode_exit_codes[opcode]); 2292 if (ret) 2293 return ret; 2294 return 1; 2295 } 2296 return svm_instr_handlers[opcode](vcpu); 2297 } 2298 2299 /* 2300 * #GP handling code. Note that #GP can be triggered under the following two 2301 * cases: 2302 * 1) SVM VM-related instructions (VMRUN/VMSAVE/VMLOAD) that trigger #GP on 2303 * some AMD CPUs when EAX of these instructions are in the reserved memory 2304 * regions (e.g. SMM memory on host). 2305 * 2) VMware backdoor 2306 */ 2307 static int gp_interception(struct kvm_vcpu *vcpu) 2308 { 2309 struct vcpu_svm *svm = to_svm(vcpu); 2310 u32 error_code = svm->vmcb->control.exit_info_1; 2311 int opcode; 2312 2313 /* Both #GP cases have zero error_code */ 2314 if (error_code) 2315 goto reinject; 2316 2317 /* Decode the instruction for usage later */ 2318 if (x86_decode_emulated_instruction(vcpu, 0, NULL, 0) != EMULATION_OK) 2319 goto reinject; 2320 2321 opcode = svm_instr_opcode(vcpu); 2322 2323 if (opcode == NONE_SVM_INSTR) { 2324 if (!enable_vmware_backdoor) 2325 goto reinject; 2326 2327 /* 2328 * VMware backdoor emulation on #GP interception only handles 2329 * IN{S}, OUT{S}, and RDPMC. 2330 */ 2331 if (!is_guest_mode(vcpu)) 2332 return kvm_emulate_instruction(vcpu, 2333 EMULTYPE_VMWARE_GP | EMULTYPE_NO_DECODE); 2334 } else { 2335 /* All SVM instructions expect page aligned RAX */ 2336 if (svm->vmcb->save.rax & ~PAGE_MASK) 2337 goto reinject; 2338 2339 return emulate_svm_instr(vcpu, opcode); 2340 } 2341 2342 reinject: 2343 kvm_queue_exception_e(vcpu, GP_VECTOR, error_code); 2344 return 1; 2345 } 2346 2347 void svm_set_gif(struct vcpu_svm *svm, bool value) 2348 { 2349 if (value) { 2350 /* 2351 * If VGIF is enabled, the STGI intercept is only added to 2352 * detect the opening of the SMI/NMI window; remove it now. 2353 * Likewise, clear the VINTR intercept, we will set it 2354 * again while processing KVM_REQ_EVENT if needed. 2355 */ 2356 if (vgif) 2357 svm_clr_intercept(svm, INTERCEPT_STGI); 2358 if (svm_is_intercept(svm, INTERCEPT_VINTR)) 2359 svm_clear_vintr(svm); 2360 2361 enable_gif(svm); 2362 if (svm->vcpu.arch.smi_pending || 2363 svm->vcpu.arch.nmi_pending || 2364 kvm_cpu_has_injectable_intr(&svm->vcpu) || 2365 kvm_apic_has_pending_init_or_sipi(&svm->vcpu)) 2366 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); 2367 } else { 2368 disable_gif(svm); 2369 2370 /* 2371 * After a CLGI no interrupts should come. But if vGIF is 2372 * in use, we still rely on the VINTR intercept (rather than 2373 * STGI) to detect an open interrupt window. 2374 */ 2375 if (!vgif) 2376 svm_clear_vintr(svm); 2377 } 2378 } 2379 2380 static int stgi_interception(struct kvm_vcpu *vcpu) 2381 { 2382 int ret; 2383 2384 if (nested_svm_check_permissions(vcpu)) 2385 return 1; 2386 2387 ret = kvm_skip_emulated_instruction(vcpu); 2388 svm_set_gif(to_svm(vcpu), true); 2389 return ret; 2390 } 2391 2392 static int clgi_interception(struct kvm_vcpu *vcpu) 2393 { 2394 int ret; 2395 2396 if (nested_svm_check_permissions(vcpu)) 2397 return 1; 2398 2399 ret = kvm_skip_emulated_instruction(vcpu); 2400 svm_set_gif(to_svm(vcpu), false); 2401 return ret; 2402 } 2403 2404 static int invlpga_interception(struct kvm_vcpu *vcpu) 2405 { 2406 gva_t gva = kvm_rax_read(vcpu); 2407 u32 asid = kvm_rcx_read(vcpu); 2408 2409 /* FIXME: Handle an address size prefix. */ 2410 if (!is_long_mode(vcpu)) 2411 gva = (u32)gva; 2412 2413 trace_kvm_invlpga(to_svm(vcpu)->vmcb->save.rip, asid, gva); 2414 2415 /* Let's treat INVLPGA the same as INVLPG (can be optimized!) */ 2416 kvm_mmu_invlpg(vcpu, gva); 2417 2418 return kvm_skip_emulated_instruction(vcpu); 2419 } 2420 2421 static int skinit_interception(struct kvm_vcpu *vcpu) 2422 { 2423 trace_kvm_skinit(to_svm(vcpu)->vmcb->save.rip, kvm_rax_read(vcpu)); 2424 2425 kvm_queue_exception(vcpu, UD_VECTOR); 2426 return 1; 2427 } 2428 2429 static int task_switch_interception(struct kvm_vcpu *vcpu) 2430 { 2431 struct vcpu_svm *svm = to_svm(vcpu); 2432 u16 tss_selector; 2433 int reason; 2434 int int_type = svm->vmcb->control.exit_int_info & 2435 SVM_EXITINTINFO_TYPE_MASK; 2436 int int_vec = svm->vmcb->control.exit_int_info & SVM_EVTINJ_VEC_MASK; 2437 uint32_t type = 2438 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_TYPE_MASK; 2439 uint32_t idt_v = 2440 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID; 2441 bool has_error_code = false; 2442 u32 error_code = 0; 2443 2444 tss_selector = (u16)svm->vmcb->control.exit_info_1; 2445 2446 if (svm->vmcb->control.exit_info_2 & 2447 (1ULL << SVM_EXITINFOSHIFT_TS_REASON_IRET)) 2448 reason = TASK_SWITCH_IRET; 2449 else if (svm->vmcb->control.exit_info_2 & 2450 (1ULL << SVM_EXITINFOSHIFT_TS_REASON_JMP)) 2451 reason = TASK_SWITCH_JMP; 2452 else if (idt_v) 2453 reason = TASK_SWITCH_GATE; 2454 else 2455 reason = TASK_SWITCH_CALL; 2456 2457 if (reason == TASK_SWITCH_GATE) { 2458 switch (type) { 2459 case SVM_EXITINTINFO_TYPE_NMI: 2460 vcpu->arch.nmi_injected = false; 2461 break; 2462 case SVM_EXITINTINFO_TYPE_EXEPT: 2463 if (svm->vmcb->control.exit_info_2 & 2464 (1ULL << SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE)) { 2465 has_error_code = true; 2466 error_code = 2467 (u32)svm->vmcb->control.exit_info_2; 2468 } 2469 kvm_clear_exception_queue(vcpu); 2470 break; 2471 case SVM_EXITINTINFO_TYPE_INTR: 2472 case SVM_EXITINTINFO_TYPE_SOFT: 2473 kvm_clear_interrupt_queue(vcpu); 2474 break; 2475 default: 2476 break; 2477 } 2478 } 2479 2480 if (reason != TASK_SWITCH_GATE || 2481 int_type == SVM_EXITINTINFO_TYPE_SOFT || 2482 (int_type == SVM_EXITINTINFO_TYPE_EXEPT && 2483 (int_vec == OF_VECTOR || int_vec == BP_VECTOR))) { 2484 if (!svm_skip_emulated_instruction(vcpu)) 2485 return 0; 2486 } 2487 2488 if (int_type != SVM_EXITINTINFO_TYPE_SOFT) 2489 int_vec = -1; 2490 2491 return kvm_task_switch(vcpu, tss_selector, int_vec, reason, 2492 has_error_code, error_code); 2493 } 2494 2495 static void svm_clr_iret_intercept(struct vcpu_svm *svm) 2496 { 2497 if (!sev_es_guest(svm->vcpu.kvm)) 2498 svm_clr_intercept(svm, INTERCEPT_IRET); 2499 } 2500 2501 static void svm_set_iret_intercept(struct vcpu_svm *svm) 2502 { 2503 if (!sev_es_guest(svm->vcpu.kvm)) 2504 svm_set_intercept(svm, INTERCEPT_IRET); 2505 } 2506 2507 static int iret_interception(struct kvm_vcpu *vcpu) 2508 { 2509 struct vcpu_svm *svm = to_svm(vcpu); 2510 2511 ++vcpu->stat.nmi_window_exits; 2512 svm->awaiting_iret_completion = true; 2513 2514 svm_clr_iret_intercept(svm); 2515 if (!sev_es_guest(vcpu->kvm)) 2516 svm->nmi_iret_rip = kvm_rip_read(vcpu); 2517 2518 kvm_make_request(KVM_REQ_EVENT, vcpu); 2519 return 1; 2520 } 2521 2522 static int invlpg_interception(struct kvm_vcpu *vcpu) 2523 { 2524 if (!static_cpu_has(X86_FEATURE_DECODEASSISTS)) 2525 return kvm_emulate_instruction(vcpu, 0); 2526 2527 kvm_mmu_invlpg(vcpu, to_svm(vcpu)->vmcb->control.exit_info_1); 2528 return kvm_skip_emulated_instruction(vcpu); 2529 } 2530 2531 static int emulate_on_interception(struct kvm_vcpu *vcpu) 2532 { 2533 return kvm_emulate_instruction(vcpu, 0); 2534 } 2535 2536 static int rsm_interception(struct kvm_vcpu *vcpu) 2537 { 2538 return kvm_emulate_instruction_from_buffer(vcpu, rsm_ins_bytes, 2); 2539 } 2540 2541 static bool check_selective_cr0_intercepted(struct kvm_vcpu *vcpu, 2542 unsigned long val) 2543 { 2544 struct vcpu_svm *svm = to_svm(vcpu); 2545 unsigned long cr0 = vcpu->arch.cr0; 2546 bool ret = false; 2547 2548 if (!is_guest_mode(vcpu) || 2549 (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_SELECTIVE_CR0)))) 2550 return false; 2551 2552 cr0 &= ~SVM_CR0_SELECTIVE_MASK; 2553 val &= ~SVM_CR0_SELECTIVE_MASK; 2554 2555 if (cr0 ^ val) { 2556 svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE; 2557 ret = (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE); 2558 } 2559 2560 return ret; 2561 } 2562 2563 #define CR_VALID (1ULL << 63) 2564 2565 static int cr_interception(struct kvm_vcpu *vcpu) 2566 { 2567 struct vcpu_svm *svm = to_svm(vcpu); 2568 int reg, cr; 2569 unsigned long val; 2570 int err; 2571 2572 if (!static_cpu_has(X86_FEATURE_DECODEASSISTS)) 2573 return emulate_on_interception(vcpu); 2574 2575 if (unlikely((svm->vmcb->control.exit_info_1 & CR_VALID) == 0)) 2576 return emulate_on_interception(vcpu); 2577 2578 reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK; 2579 if (svm->vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE) 2580 cr = SVM_EXIT_WRITE_CR0 - SVM_EXIT_READ_CR0; 2581 else 2582 cr = svm->vmcb->control.exit_code - SVM_EXIT_READ_CR0; 2583 2584 err = 0; 2585 if (cr >= 16) { /* mov to cr */ 2586 cr -= 16; 2587 val = kvm_register_read(vcpu, reg); 2588 trace_kvm_cr_write(cr, val); 2589 switch (cr) { 2590 case 0: 2591 if (!check_selective_cr0_intercepted(vcpu, val)) 2592 err = kvm_set_cr0(vcpu, val); 2593 else 2594 return 1; 2595 2596 break; 2597 case 3: 2598 err = kvm_set_cr3(vcpu, val); 2599 break; 2600 case 4: 2601 err = kvm_set_cr4(vcpu, val); 2602 break; 2603 case 8: 2604 err = kvm_set_cr8(vcpu, val); 2605 break; 2606 default: 2607 WARN(1, "unhandled write to CR%d", cr); 2608 kvm_queue_exception(vcpu, UD_VECTOR); 2609 return 1; 2610 } 2611 } else { /* mov from cr */ 2612 switch (cr) { 2613 case 0: 2614 val = kvm_read_cr0(vcpu); 2615 break; 2616 case 2: 2617 val = vcpu->arch.cr2; 2618 break; 2619 case 3: 2620 val = kvm_read_cr3(vcpu); 2621 break; 2622 case 4: 2623 val = kvm_read_cr4(vcpu); 2624 break; 2625 case 8: 2626 val = kvm_get_cr8(vcpu); 2627 break; 2628 default: 2629 WARN(1, "unhandled read from CR%d", cr); 2630 kvm_queue_exception(vcpu, UD_VECTOR); 2631 return 1; 2632 } 2633 kvm_register_write(vcpu, reg, val); 2634 trace_kvm_cr_read(cr, val); 2635 } 2636 return kvm_complete_insn_gp(vcpu, err); 2637 } 2638 2639 static int cr_trap(struct kvm_vcpu *vcpu) 2640 { 2641 struct vcpu_svm *svm = to_svm(vcpu); 2642 unsigned long old_value, new_value; 2643 unsigned int cr; 2644 int ret = 0; 2645 2646 new_value = (unsigned long)svm->vmcb->control.exit_info_1; 2647 2648 cr = svm->vmcb->control.exit_code - SVM_EXIT_CR0_WRITE_TRAP; 2649 switch (cr) { 2650 case 0: 2651 old_value = kvm_read_cr0(vcpu); 2652 svm_set_cr0(vcpu, new_value); 2653 2654 kvm_post_set_cr0(vcpu, old_value, new_value); 2655 break; 2656 case 4: 2657 old_value = kvm_read_cr4(vcpu); 2658 svm_set_cr4(vcpu, new_value); 2659 2660 kvm_post_set_cr4(vcpu, old_value, new_value); 2661 break; 2662 case 8: 2663 ret = kvm_set_cr8(vcpu, new_value); 2664 break; 2665 default: 2666 WARN(1, "unhandled CR%d write trap", cr); 2667 kvm_queue_exception(vcpu, UD_VECTOR); 2668 return 1; 2669 } 2670 2671 return kvm_complete_insn_gp(vcpu, ret); 2672 } 2673 2674 static int dr_interception(struct kvm_vcpu *vcpu) 2675 { 2676 struct vcpu_svm *svm = to_svm(vcpu); 2677 int reg, dr; 2678 unsigned long val; 2679 int err = 0; 2680 2681 if (vcpu->guest_debug == 0) { 2682 /* 2683 * No more DR vmexits; force a reload of the debug registers 2684 * and reenter on this instruction. The next vmexit will 2685 * retrieve the full state of the debug registers. 2686 */ 2687 clr_dr_intercepts(svm); 2688 vcpu->arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT; 2689 return 1; 2690 } 2691 2692 if (!boot_cpu_has(X86_FEATURE_DECODEASSISTS)) 2693 return emulate_on_interception(vcpu); 2694 2695 reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK; 2696 dr = svm->vmcb->control.exit_code - SVM_EXIT_READ_DR0; 2697 if (dr >= 16) { /* mov to DRn */ 2698 dr -= 16; 2699 val = kvm_register_read(vcpu, reg); 2700 err = kvm_set_dr(vcpu, dr, val); 2701 } else { 2702 kvm_get_dr(vcpu, dr, &val); 2703 kvm_register_write(vcpu, reg, val); 2704 } 2705 2706 return kvm_complete_insn_gp(vcpu, err); 2707 } 2708 2709 static int cr8_write_interception(struct kvm_vcpu *vcpu) 2710 { 2711 int r; 2712 2713 u8 cr8_prev = kvm_get_cr8(vcpu); 2714 /* instruction emulation calls kvm_set_cr8() */ 2715 r = cr_interception(vcpu); 2716 if (lapic_in_kernel(vcpu)) 2717 return r; 2718 if (cr8_prev <= kvm_get_cr8(vcpu)) 2719 return r; 2720 vcpu->run->exit_reason = KVM_EXIT_SET_TPR; 2721 return 0; 2722 } 2723 2724 static int efer_trap(struct kvm_vcpu *vcpu) 2725 { 2726 struct msr_data msr_info; 2727 int ret; 2728 2729 /* 2730 * Clear the EFER_SVME bit from EFER. The SVM code always sets this 2731 * bit in svm_set_efer(), but __kvm_valid_efer() checks it against 2732 * whether the guest has X86_FEATURE_SVM - this avoids a failure if 2733 * the guest doesn't have X86_FEATURE_SVM. 2734 */ 2735 msr_info.host_initiated = false; 2736 msr_info.index = MSR_EFER; 2737 msr_info.data = to_svm(vcpu)->vmcb->control.exit_info_1 & ~EFER_SVME; 2738 ret = kvm_set_msr_common(vcpu, &msr_info); 2739 2740 return kvm_complete_insn_gp(vcpu, ret); 2741 } 2742 2743 static int svm_get_msr_feature(struct kvm_msr_entry *msr) 2744 { 2745 msr->data = 0; 2746 2747 switch (msr->index) { 2748 case MSR_AMD64_DE_CFG: 2749 if (cpu_feature_enabled(X86_FEATURE_LFENCE_RDTSC)) 2750 msr->data |= MSR_AMD64_DE_CFG_LFENCE_SERIALIZE; 2751 break; 2752 default: 2753 return KVM_MSR_RET_INVALID; 2754 } 2755 2756 return 0; 2757 } 2758 2759 static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) 2760 { 2761 struct vcpu_svm *svm = to_svm(vcpu); 2762 2763 switch (msr_info->index) { 2764 case MSR_AMD64_TSC_RATIO: 2765 if (!msr_info->host_initiated && !svm->tsc_scaling_enabled) 2766 return 1; 2767 msr_info->data = svm->tsc_ratio_msr; 2768 break; 2769 case MSR_STAR: 2770 msr_info->data = svm->vmcb01.ptr->save.star; 2771 break; 2772 #ifdef CONFIG_X86_64 2773 case MSR_LSTAR: 2774 msr_info->data = svm->vmcb01.ptr->save.lstar; 2775 break; 2776 case MSR_CSTAR: 2777 msr_info->data = svm->vmcb01.ptr->save.cstar; 2778 break; 2779 case MSR_KERNEL_GS_BASE: 2780 msr_info->data = svm->vmcb01.ptr->save.kernel_gs_base; 2781 break; 2782 case MSR_SYSCALL_MASK: 2783 msr_info->data = svm->vmcb01.ptr->save.sfmask; 2784 break; 2785 #endif 2786 case MSR_IA32_SYSENTER_CS: 2787 msr_info->data = svm->vmcb01.ptr->save.sysenter_cs; 2788 break; 2789 case MSR_IA32_SYSENTER_EIP: 2790 msr_info->data = (u32)svm->vmcb01.ptr->save.sysenter_eip; 2791 if (guest_cpuid_is_intel(vcpu)) 2792 msr_info->data |= (u64)svm->sysenter_eip_hi << 32; 2793 break; 2794 case MSR_IA32_SYSENTER_ESP: 2795 msr_info->data = svm->vmcb01.ptr->save.sysenter_esp; 2796 if (guest_cpuid_is_intel(vcpu)) 2797 msr_info->data |= (u64)svm->sysenter_esp_hi << 32; 2798 break; 2799 case MSR_TSC_AUX: 2800 msr_info->data = svm->tsc_aux; 2801 break; 2802 case MSR_IA32_DEBUGCTLMSR: 2803 case MSR_IA32_LASTBRANCHFROMIP: 2804 case MSR_IA32_LASTBRANCHTOIP: 2805 case MSR_IA32_LASTINTFROMIP: 2806 case MSR_IA32_LASTINTTOIP: 2807 msr_info->data = svm_get_lbr_msr(svm, msr_info->index); 2808 break; 2809 case MSR_VM_HSAVE_PA: 2810 msr_info->data = svm->nested.hsave_msr; 2811 break; 2812 case MSR_VM_CR: 2813 msr_info->data = svm->nested.vm_cr_msr; 2814 break; 2815 case MSR_IA32_SPEC_CTRL: 2816 if (!msr_info->host_initiated && 2817 !guest_has_spec_ctrl_msr(vcpu)) 2818 return 1; 2819 2820 if (boot_cpu_has(X86_FEATURE_V_SPEC_CTRL)) 2821 msr_info->data = svm->vmcb->save.spec_ctrl; 2822 else 2823 msr_info->data = svm->spec_ctrl; 2824 break; 2825 case MSR_AMD64_VIRT_SPEC_CTRL: 2826 if (!msr_info->host_initiated && 2827 !guest_cpuid_has(vcpu, X86_FEATURE_VIRT_SSBD)) 2828 return 1; 2829 2830 msr_info->data = svm->virt_spec_ctrl; 2831 break; 2832 case MSR_F15H_IC_CFG: { 2833 2834 int family, model; 2835 2836 family = guest_cpuid_family(vcpu); 2837 model = guest_cpuid_model(vcpu); 2838 2839 if (family < 0 || model < 0) 2840 return kvm_get_msr_common(vcpu, msr_info); 2841 2842 msr_info->data = 0; 2843 2844 if (family == 0x15 && 2845 (model >= 0x2 && model < 0x20)) 2846 msr_info->data = 0x1E; 2847 } 2848 break; 2849 case MSR_AMD64_DE_CFG: 2850 msr_info->data = svm->msr_decfg; 2851 break; 2852 default: 2853 return kvm_get_msr_common(vcpu, msr_info); 2854 } 2855 return 0; 2856 } 2857 2858 static int svm_complete_emulated_msr(struct kvm_vcpu *vcpu, int err) 2859 { 2860 struct vcpu_svm *svm = to_svm(vcpu); 2861 if (!err || !sev_es_guest(vcpu->kvm) || WARN_ON_ONCE(!svm->sev_es.ghcb)) 2862 return kvm_complete_insn_gp(vcpu, err); 2863 2864 ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, 1); 2865 ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, 2866 X86_TRAP_GP | 2867 SVM_EVTINJ_TYPE_EXEPT | 2868 SVM_EVTINJ_VALID); 2869 return 1; 2870 } 2871 2872 static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data) 2873 { 2874 struct vcpu_svm *svm = to_svm(vcpu); 2875 int svm_dis, chg_mask; 2876 2877 if (data & ~SVM_VM_CR_VALID_MASK) 2878 return 1; 2879 2880 chg_mask = SVM_VM_CR_VALID_MASK; 2881 2882 if (svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK) 2883 chg_mask &= ~(SVM_VM_CR_SVM_LOCK_MASK | SVM_VM_CR_SVM_DIS_MASK); 2884 2885 svm->nested.vm_cr_msr &= ~chg_mask; 2886 svm->nested.vm_cr_msr |= (data & chg_mask); 2887 2888 svm_dis = svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK; 2889 2890 /* check for svm_disable while efer.svme is set */ 2891 if (svm_dis && (vcpu->arch.efer & EFER_SVME)) 2892 return 1; 2893 2894 return 0; 2895 } 2896 2897 static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) 2898 { 2899 struct vcpu_svm *svm = to_svm(vcpu); 2900 int ret = 0; 2901 2902 u32 ecx = msr->index; 2903 u64 data = msr->data; 2904 switch (ecx) { 2905 case MSR_AMD64_TSC_RATIO: 2906 2907 if (!svm->tsc_scaling_enabled) { 2908 2909 if (!msr->host_initiated) 2910 return 1; 2911 /* 2912 * In case TSC scaling is not enabled, always 2913 * leave this MSR at the default value. 2914 * 2915 * Due to bug in qemu 6.2.0, it would try to set 2916 * this msr to 0 if tsc scaling is not enabled. 2917 * Ignore this value as well. 2918 */ 2919 if (data != 0 && data != svm->tsc_ratio_msr) 2920 return 1; 2921 break; 2922 } 2923 2924 if (data & SVM_TSC_RATIO_RSVD) 2925 return 1; 2926 2927 svm->tsc_ratio_msr = data; 2928 2929 if (svm->tsc_scaling_enabled && is_guest_mode(vcpu)) 2930 nested_svm_update_tsc_ratio_msr(vcpu); 2931 2932 break; 2933 case MSR_IA32_CR_PAT: 2934 ret = kvm_set_msr_common(vcpu, msr); 2935 if (ret) 2936 break; 2937 2938 svm->vmcb01.ptr->save.g_pat = data; 2939 if (is_guest_mode(vcpu)) 2940 nested_vmcb02_compute_g_pat(svm); 2941 vmcb_mark_dirty(svm->vmcb, VMCB_NPT); 2942 break; 2943 case MSR_IA32_SPEC_CTRL: 2944 if (!msr->host_initiated && 2945 !guest_has_spec_ctrl_msr(vcpu)) 2946 return 1; 2947 2948 if (kvm_spec_ctrl_test_value(data)) 2949 return 1; 2950 2951 if (boot_cpu_has(X86_FEATURE_V_SPEC_CTRL)) 2952 svm->vmcb->save.spec_ctrl = data; 2953 else 2954 svm->spec_ctrl = data; 2955 if (!data) 2956 break; 2957 2958 /* 2959 * For non-nested: 2960 * When it's written (to non-zero) for the first time, pass 2961 * it through. 2962 * 2963 * For nested: 2964 * The handling of the MSR bitmap for L2 guests is done in 2965 * nested_svm_vmrun_msrpm. 2966 * We update the L1 MSR bit as well since it will end up 2967 * touching the MSR anyway now. 2968 */ 2969 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SPEC_CTRL, 1, 1); 2970 break; 2971 case MSR_AMD64_VIRT_SPEC_CTRL: 2972 if (!msr->host_initiated && 2973 !guest_cpuid_has(vcpu, X86_FEATURE_VIRT_SSBD)) 2974 return 1; 2975 2976 if (data & ~SPEC_CTRL_SSBD) 2977 return 1; 2978 2979 svm->virt_spec_ctrl = data; 2980 break; 2981 case MSR_STAR: 2982 svm->vmcb01.ptr->save.star = data; 2983 break; 2984 #ifdef CONFIG_X86_64 2985 case MSR_LSTAR: 2986 svm->vmcb01.ptr->save.lstar = data; 2987 break; 2988 case MSR_CSTAR: 2989 svm->vmcb01.ptr->save.cstar = data; 2990 break; 2991 case MSR_KERNEL_GS_BASE: 2992 svm->vmcb01.ptr->save.kernel_gs_base = data; 2993 break; 2994 case MSR_SYSCALL_MASK: 2995 svm->vmcb01.ptr->save.sfmask = data; 2996 break; 2997 #endif 2998 case MSR_IA32_SYSENTER_CS: 2999 svm->vmcb01.ptr->save.sysenter_cs = data; 3000 break; 3001 case MSR_IA32_SYSENTER_EIP: 3002 svm->vmcb01.ptr->save.sysenter_eip = (u32)data; 3003 /* 3004 * We only intercept the MSR_IA32_SYSENTER_{EIP|ESP} msrs 3005 * when we spoof an Intel vendor ID (for cross vendor migration). 3006 * In this case we use this intercept to track the high 3007 * 32 bit part of these msrs to support Intel's 3008 * implementation of SYSENTER/SYSEXIT. 3009 */ 3010 svm->sysenter_eip_hi = guest_cpuid_is_intel(vcpu) ? (data >> 32) : 0; 3011 break; 3012 case MSR_IA32_SYSENTER_ESP: 3013 svm->vmcb01.ptr->save.sysenter_esp = (u32)data; 3014 svm->sysenter_esp_hi = guest_cpuid_is_intel(vcpu) ? (data >> 32) : 0; 3015 break; 3016 case MSR_TSC_AUX: 3017 /* 3018 * TSC_AUX is usually changed only during boot and never read 3019 * directly. Intercept TSC_AUX instead of exposing it to the 3020 * guest via direct_access_msrs, and switch it via user return. 3021 */ 3022 preempt_disable(); 3023 ret = kvm_set_user_return_msr(tsc_aux_uret_slot, data, -1ull); 3024 preempt_enable(); 3025 if (ret) 3026 break; 3027 3028 svm->tsc_aux = data; 3029 break; 3030 case MSR_IA32_DEBUGCTLMSR: 3031 if (!lbrv) { 3032 kvm_pr_unimpl_wrmsr(vcpu, ecx, data); 3033 break; 3034 } 3035 if (data & DEBUGCTL_RESERVED_BITS) 3036 return 1; 3037 3038 if (svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK) 3039 svm->vmcb->save.dbgctl = data; 3040 else 3041 svm->vmcb01.ptr->save.dbgctl = data; 3042 3043 svm_update_lbrv(vcpu); 3044 3045 break; 3046 case MSR_VM_HSAVE_PA: 3047 /* 3048 * Old kernels did not validate the value written to 3049 * MSR_VM_HSAVE_PA. Allow KVM_SET_MSR to set an invalid 3050 * value to allow live migrating buggy or malicious guests 3051 * originating from those kernels. 3052 */ 3053 if (!msr->host_initiated && !page_address_valid(vcpu, data)) 3054 return 1; 3055 3056 svm->nested.hsave_msr = data & PAGE_MASK; 3057 break; 3058 case MSR_VM_CR: 3059 return svm_set_vm_cr(vcpu, data); 3060 case MSR_VM_IGNNE: 3061 kvm_pr_unimpl_wrmsr(vcpu, ecx, data); 3062 break; 3063 case MSR_AMD64_DE_CFG: { 3064 struct kvm_msr_entry msr_entry; 3065 3066 msr_entry.index = msr->index; 3067 if (svm_get_msr_feature(&msr_entry)) 3068 return 1; 3069 3070 /* Check the supported bits */ 3071 if (data & ~msr_entry.data) 3072 return 1; 3073 3074 /* Don't allow the guest to change a bit, #GP */ 3075 if (!msr->host_initiated && (data ^ msr_entry.data)) 3076 return 1; 3077 3078 svm->msr_decfg = data; 3079 break; 3080 } 3081 default: 3082 return kvm_set_msr_common(vcpu, msr); 3083 } 3084 return ret; 3085 } 3086 3087 static int msr_interception(struct kvm_vcpu *vcpu) 3088 { 3089 if (to_svm(vcpu)->vmcb->control.exit_info_1) 3090 return kvm_emulate_wrmsr(vcpu); 3091 else 3092 return kvm_emulate_rdmsr(vcpu); 3093 } 3094 3095 static int interrupt_window_interception(struct kvm_vcpu *vcpu) 3096 { 3097 kvm_make_request(KVM_REQ_EVENT, vcpu); 3098 svm_clear_vintr(to_svm(vcpu)); 3099 3100 /* 3101 * If not running nested, for AVIC, the only reason to end up here is ExtINTs. 3102 * In this case AVIC was temporarily disabled for 3103 * requesting the IRQ window and we have to re-enable it. 3104 * 3105 * If running nested, still remove the VM wide AVIC inhibit to 3106 * support case in which the interrupt window was requested when the 3107 * vCPU was not running nested. 3108 3109 * All vCPUs which run still run nested, will remain to have their 3110 * AVIC still inhibited due to per-cpu AVIC inhibition. 3111 */ 3112 kvm_clear_apicv_inhibit(vcpu->kvm, APICV_INHIBIT_REASON_IRQWIN); 3113 3114 ++vcpu->stat.irq_window_exits; 3115 return 1; 3116 } 3117 3118 static int pause_interception(struct kvm_vcpu *vcpu) 3119 { 3120 bool in_kernel; 3121 /* 3122 * CPL is not made available for an SEV-ES guest, therefore 3123 * vcpu->arch.preempted_in_kernel can never be true. Just 3124 * set in_kernel to false as well. 3125 */ 3126 in_kernel = !sev_es_guest(vcpu->kvm) && svm_get_cpl(vcpu) == 0; 3127 3128 grow_ple_window(vcpu); 3129 3130 kvm_vcpu_on_spin(vcpu, in_kernel); 3131 return kvm_skip_emulated_instruction(vcpu); 3132 } 3133 3134 static int invpcid_interception(struct kvm_vcpu *vcpu) 3135 { 3136 struct vcpu_svm *svm = to_svm(vcpu); 3137 unsigned long type; 3138 gva_t gva; 3139 3140 if (!guest_cpuid_has(vcpu, X86_FEATURE_INVPCID)) { 3141 kvm_queue_exception(vcpu, UD_VECTOR); 3142 return 1; 3143 } 3144 3145 /* 3146 * For an INVPCID intercept: 3147 * EXITINFO1 provides the linear address of the memory operand. 3148 * EXITINFO2 provides the contents of the register operand. 3149 */ 3150 type = svm->vmcb->control.exit_info_2; 3151 gva = svm->vmcb->control.exit_info_1; 3152 3153 return kvm_handle_invpcid(vcpu, type, gva); 3154 } 3155 3156 static int (*const svm_exit_handlers[])(struct kvm_vcpu *vcpu) = { 3157 [SVM_EXIT_READ_CR0] = cr_interception, 3158 [SVM_EXIT_READ_CR3] = cr_interception, 3159 [SVM_EXIT_READ_CR4] = cr_interception, 3160 [SVM_EXIT_READ_CR8] = cr_interception, 3161 [SVM_EXIT_CR0_SEL_WRITE] = cr_interception, 3162 [SVM_EXIT_WRITE_CR0] = cr_interception, 3163 [SVM_EXIT_WRITE_CR3] = cr_interception, 3164 [SVM_EXIT_WRITE_CR4] = cr_interception, 3165 [SVM_EXIT_WRITE_CR8] = cr8_write_interception, 3166 [SVM_EXIT_READ_DR0] = dr_interception, 3167 [SVM_EXIT_READ_DR1] = dr_interception, 3168 [SVM_EXIT_READ_DR2] = dr_interception, 3169 [SVM_EXIT_READ_DR3] = dr_interception, 3170 [SVM_EXIT_READ_DR4] = dr_interception, 3171 [SVM_EXIT_READ_DR5] = dr_interception, 3172 [SVM_EXIT_READ_DR6] = dr_interception, 3173 [SVM_EXIT_READ_DR7] = dr_interception, 3174 [SVM_EXIT_WRITE_DR0] = dr_interception, 3175 [SVM_EXIT_WRITE_DR1] = dr_interception, 3176 [SVM_EXIT_WRITE_DR2] = dr_interception, 3177 [SVM_EXIT_WRITE_DR3] = dr_interception, 3178 [SVM_EXIT_WRITE_DR4] = dr_interception, 3179 [SVM_EXIT_WRITE_DR5] = dr_interception, 3180 [SVM_EXIT_WRITE_DR6] = dr_interception, 3181 [SVM_EXIT_WRITE_DR7] = dr_interception, 3182 [SVM_EXIT_EXCP_BASE + DB_VECTOR] = db_interception, 3183 [SVM_EXIT_EXCP_BASE + BP_VECTOR] = bp_interception, 3184 [SVM_EXIT_EXCP_BASE + UD_VECTOR] = ud_interception, 3185 [SVM_EXIT_EXCP_BASE + PF_VECTOR] = pf_interception, 3186 [SVM_EXIT_EXCP_BASE + MC_VECTOR] = mc_interception, 3187 [SVM_EXIT_EXCP_BASE + AC_VECTOR] = ac_interception, 3188 [SVM_EXIT_EXCP_BASE + GP_VECTOR] = gp_interception, 3189 [SVM_EXIT_INTR] = intr_interception, 3190 [SVM_EXIT_NMI] = nmi_interception, 3191 [SVM_EXIT_SMI] = smi_interception, 3192 [SVM_EXIT_VINTR] = interrupt_window_interception, 3193 [SVM_EXIT_RDPMC] = kvm_emulate_rdpmc, 3194 [SVM_EXIT_CPUID] = kvm_emulate_cpuid, 3195 [SVM_EXIT_IRET] = iret_interception, 3196 [SVM_EXIT_INVD] = kvm_emulate_invd, 3197 [SVM_EXIT_PAUSE] = pause_interception, 3198 [SVM_EXIT_HLT] = kvm_emulate_halt, 3199 [SVM_EXIT_INVLPG] = invlpg_interception, 3200 [SVM_EXIT_INVLPGA] = invlpga_interception, 3201 [SVM_EXIT_IOIO] = io_interception, 3202 [SVM_EXIT_MSR] = msr_interception, 3203 [SVM_EXIT_TASK_SWITCH] = task_switch_interception, 3204 [SVM_EXIT_SHUTDOWN] = shutdown_interception, 3205 [SVM_EXIT_VMRUN] = vmrun_interception, 3206 [SVM_EXIT_VMMCALL] = kvm_emulate_hypercall, 3207 [SVM_EXIT_VMLOAD] = vmload_interception, 3208 [SVM_EXIT_VMSAVE] = vmsave_interception, 3209 [SVM_EXIT_STGI] = stgi_interception, 3210 [SVM_EXIT_CLGI] = clgi_interception, 3211 [SVM_EXIT_SKINIT] = skinit_interception, 3212 [SVM_EXIT_RDTSCP] = kvm_handle_invalid_op, 3213 [SVM_EXIT_WBINVD] = kvm_emulate_wbinvd, 3214 [SVM_EXIT_MONITOR] = kvm_emulate_monitor, 3215 [SVM_EXIT_MWAIT] = kvm_emulate_mwait, 3216 [SVM_EXIT_XSETBV] = kvm_emulate_xsetbv, 3217 [SVM_EXIT_RDPRU] = kvm_handle_invalid_op, 3218 [SVM_EXIT_EFER_WRITE_TRAP] = efer_trap, 3219 [SVM_EXIT_CR0_WRITE_TRAP] = cr_trap, 3220 [SVM_EXIT_CR4_WRITE_TRAP] = cr_trap, 3221 [SVM_EXIT_CR8_WRITE_TRAP] = cr_trap, 3222 [SVM_EXIT_INVPCID] = invpcid_interception, 3223 [SVM_EXIT_NPF] = npf_interception, 3224 [SVM_EXIT_RSM] = rsm_interception, 3225 [SVM_EXIT_AVIC_INCOMPLETE_IPI] = avic_incomplete_ipi_interception, 3226 [SVM_EXIT_AVIC_UNACCELERATED_ACCESS] = avic_unaccelerated_access_interception, 3227 [SVM_EXIT_VMGEXIT] = sev_handle_vmgexit, 3228 }; 3229 3230 static void dump_vmcb(struct kvm_vcpu *vcpu) 3231 { 3232 struct vcpu_svm *svm = to_svm(vcpu); 3233 struct vmcb_control_area *control = &svm->vmcb->control; 3234 struct vmcb_save_area *save = &svm->vmcb->save; 3235 struct vmcb_save_area *save01 = &svm->vmcb01.ptr->save; 3236 3237 if (!dump_invalid_vmcb) { 3238 pr_warn_ratelimited("set kvm_amd.dump_invalid_vmcb=1 to dump internal KVM state.\n"); 3239 return; 3240 } 3241 3242 pr_err("VMCB %p, last attempted VMRUN on CPU %d\n", 3243 svm->current_vmcb->ptr, vcpu->arch.last_vmentry_cpu); 3244 pr_err("VMCB Control Area:\n"); 3245 pr_err("%-20s%04x\n", "cr_read:", control->intercepts[INTERCEPT_CR] & 0xffff); 3246 pr_err("%-20s%04x\n", "cr_write:", control->intercepts[INTERCEPT_CR] >> 16); 3247 pr_err("%-20s%04x\n", "dr_read:", control->intercepts[INTERCEPT_DR] & 0xffff); 3248 pr_err("%-20s%04x\n", "dr_write:", control->intercepts[INTERCEPT_DR] >> 16); 3249 pr_err("%-20s%08x\n", "exceptions:", control->intercepts[INTERCEPT_EXCEPTION]); 3250 pr_err("%-20s%08x %08x\n", "intercepts:", 3251 control->intercepts[INTERCEPT_WORD3], 3252 control->intercepts[INTERCEPT_WORD4]); 3253 pr_err("%-20s%d\n", "pause filter count:", control->pause_filter_count); 3254 pr_err("%-20s%d\n", "pause filter threshold:", 3255 control->pause_filter_thresh); 3256 pr_err("%-20s%016llx\n", "iopm_base_pa:", control->iopm_base_pa); 3257 pr_err("%-20s%016llx\n", "msrpm_base_pa:", control->msrpm_base_pa); 3258 pr_err("%-20s%016llx\n", "tsc_offset:", control->tsc_offset); 3259 pr_err("%-20s%d\n", "asid:", control->asid); 3260 pr_err("%-20s%d\n", "tlb_ctl:", control->tlb_ctl); 3261 pr_err("%-20s%08x\n", "int_ctl:", control->int_ctl); 3262 pr_err("%-20s%08x\n", "int_vector:", control->int_vector); 3263 pr_err("%-20s%08x\n", "int_state:", control->int_state); 3264 pr_err("%-20s%08x\n", "exit_code:", control->exit_code); 3265 pr_err("%-20s%016llx\n", "exit_info1:", control->exit_info_1); 3266 pr_err("%-20s%016llx\n", "exit_info2:", control->exit_info_2); 3267 pr_err("%-20s%08x\n", "exit_int_info:", control->exit_int_info); 3268 pr_err("%-20s%08x\n", "exit_int_info_err:", control->exit_int_info_err); 3269 pr_err("%-20s%lld\n", "nested_ctl:", control->nested_ctl); 3270 pr_err("%-20s%016llx\n", "nested_cr3:", control->nested_cr3); 3271 pr_err("%-20s%016llx\n", "avic_vapic_bar:", control->avic_vapic_bar); 3272 pr_err("%-20s%016llx\n", "ghcb:", control->ghcb_gpa); 3273 pr_err("%-20s%08x\n", "event_inj:", control->event_inj); 3274 pr_err("%-20s%08x\n", "event_inj_err:", control->event_inj_err); 3275 pr_err("%-20s%lld\n", "virt_ext:", control->virt_ext); 3276 pr_err("%-20s%016llx\n", "next_rip:", control->next_rip); 3277 pr_err("%-20s%016llx\n", "avic_backing_page:", control->avic_backing_page); 3278 pr_err("%-20s%016llx\n", "avic_logical_id:", control->avic_logical_id); 3279 pr_err("%-20s%016llx\n", "avic_physical_id:", control->avic_physical_id); 3280 pr_err("%-20s%016llx\n", "vmsa_pa:", control->vmsa_pa); 3281 pr_err("VMCB State Save Area:\n"); 3282 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", 3283 "es:", 3284 save->es.selector, save->es.attrib, 3285 save->es.limit, save->es.base); 3286 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", 3287 "cs:", 3288 save->cs.selector, save->cs.attrib, 3289 save->cs.limit, save->cs.base); 3290 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", 3291 "ss:", 3292 save->ss.selector, save->ss.attrib, 3293 save->ss.limit, save->ss.base); 3294 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", 3295 "ds:", 3296 save->ds.selector, save->ds.attrib, 3297 save->ds.limit, save->ds.base); 3298 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", 3299 "fs:", 3300 save01->fs.selector, save01->fs.attrib, 3301 save01->fs.limit, save01->fs.base); 3302 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", 3303 "gs:", 3304 save01->gs.selector, save01->gs.attrib, 3305 save01->gs.limit, save01->gs.base); 3306 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", 3307 "gdtr:", 3308 save->gdtr.selector, save->gdtr.attrib, 3309 save->gdtr.limit, save->gdtr.base); 3310 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", 3311 "ldtr:", 3312 save01->ldtr.selector, save01->ldtr.attrib, 3313 save01->ldtr.limit, save01->ldtr.base); 3314 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", 3315 "idtr:", 3316 save->idtr.selector, save->idtr.attrib, 3317 save->idtr.limit, save->idtr.base); 3318 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", 3319 "tr:", 3320 save01->tr.selector, save01->tr.attrib, 3321 save01->tr.limit, save01->tr.base); 3322 pr_err("vmpl: %d cpl: %d efer: %016llx\n", 3323 save->vmpl, save->cpl, save->efer); 3324 pr_err("%-15s %016llx %-13s %016llx\n", 3325 "cr0:", save->cr0, "cr2:", save->cr2); 3326 pr_err("%-15s %016llx %-13s %016llx\n", 3327 "cr3:", save->cr3, "cr4:", save->cr4); 3328 pr_err("%-15s %016llx %-13s %016llx\n", 3329 "dr6:", save->dr6, "dr7:", save->dr7); 3330 pr_err("%-15s %016llx %-13s %016llx\n", 3331 "rip:", save->rip, "rflags:", save->rflags); 3332 pr_err("%-15s %016llx %-13s %016llx\n", 3333 "rsp:", save->rsp, "rax:", save->rax); 3334 pr_err("%-15s %016llx %-13s %016llx\n", 3335 "star:", save01->star, "lstar:", save01->lstar); 3336 pr_err("%-15s %016llx %-13s %016llx\n", 3337 "cstar:", save01->cstar, "sfmask:", save01->sfmask); 3338 pr_err("%-15s %016llx %-13s %016llx\n", 3339 "kernel_gs_base:", save01->kernel_gs_base, 3340 "sysenter_cs:", save01->sysenter_cs); 3341 pr_err("%-15s %016llx %-13s %016llx\n", 3342 "sysenter_esp:", save01->sysenter_esp, 3343 "sysenter_eip:", save01->sysenter_eip); 3344 pr_err("%-15s %016llx %-13s %016llx\n", 3345 "gpat:", save->g_pat, "dbgctl:", save->dbgctl); 3346 pr_err("%-15s %016llx %-13s %016llx\n", 3347 "br_from:", save->br_from, "br_to:", save->br_to); 3348 pr_err("%-15s %016llx %-13s %016llx\n", 3349 "excp_from:", save->last_excp_from, 3350 "excp_to:", save->last_excp_to); 3351 } 3352 3353 static bool svm_check_exit_valid(u64 exit_code) 3354 { 3355 return (exit_code < ARRAY_SIZE(svm_exit_handlers) && 3356 svm_exit_handlers[exit_code]); 3357 } 3358 3359 static int svm_handle_invalid_exit(struct kvm_vcpu *vcpu, u64 exit_code) 3360 { 3361 vcpu_unimpl(vcpu, "svm: unexpected exit reason 0x%llx\n", exit_code); 3362 dump_vmcb(vcpu); 3363 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 3364 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON; 3365 vcpu->run->internal.ndata = 2; 3366 vcpu->run->internal.data[0] = exit_code; 3367 vcpu->run->internal.data[1] = vcpu->arch.last_vmentry_cpu; 3368 return 0; 3369 } 3370 3371 int svm_invoke_exit_handler(struct kvm_vcpu *vcpu, u64 exit_code) 3372 { 3373 if (!svm_check_exit_valid(exit_code)) 3374 return svm_handle_invalid_exit(vcpu, exit_code); 3375 3376 #ifdef CONFIG_RETPOLINE 3377 if (exit_code == SVM_EXIT_MSR) 3378 return msr_interception(vcpu); 3379 else if (exit_code == SVM_EXIT_VINTR) 3380 return interrupt_window_interception(vcpu); 3381 else if (exit_code == SVM_EXIT_INTR) 3382 return intr_interception(vcpu); 3383 else if (exit_code == SVM_EXIT_HLT) 3384 return kvm_emulate_halt(vcpu); 3385 else if (exit_code == SVM_EXIT_NPF) 3386 return npf_interception(vcpu); 3387 #endif 3388 return svm_exit_handlers[exit_code](vcpu); 3389 } 3390 3391 static void svm_get_exit_info(struct kvm_vcpu *vcpu, u32 *reason, 3392 u64 *info1, u64 *info2, 3393 u32 *intr_info, u32 *error_code) 3394 { 3395 struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control; 3396 3397 *reason = control->exit_code; 3398 *info1 = control->exit_info_1; 3399 *info2 = control->exit_info_2; 3400 *intr_info = control->exit_int_info; 3401 if ((*intr_info & SVM_EXITINTINFO_VALID) && 3402 (*intr_info & SVM_EXITINTINFO_VALID_ERR)) 3403 *error_code = control->exit_int_info_err; 3404 else 3405 *error_code = 0; 3406 } 3407 3408 static int svm_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath) 3409 { 3410 struct vcpu_svm *svm = to_svm(vcpu); 3411 struct kvm_run *kvm_run = vcpu->run; 3412 u32 exit_code = svm->vmcb->control.exit_code; 3413 3414 /* SEV-ES guests must use the CR write traps to track CR registers. */ 3415 if (!sev_es_guest(vcpu->kvm)) { 3416 if (!svm_is_intercept(svm, INTERCEPT_CR0_WRITE)) 3417 vcpu->arch.cr0 = svm->vmcb->save.cr0; 3418 if (npt_enabled) 3419 vcpu->arch.cr3 = svm->vmcb->save.cr3; 3420 } 3421 3422 if (is_guest_mode(vcpu)) { 3423 int vmexit; 3424 3425 trace_kvm_nested_vmexit(vcpu, KVM_ISA_SVM); 3426 3427 vmexit = nested_svm_exit_special(svm); 3428 3429 if (vmexit == NESTED_EXIT_CONTINUE) 3430 vmexit = nested_svm_exit_handled(svm); 3431 3432 if (vmexit == NESTED_EXIT_DONE) 3433 return 1; 3434 } 3435 3436 if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) { 3437 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY; 3438 kvm_run->fail_entry.hardware_entry_failure_reason 3439 = svm->vmcb->control.exit_code; 3440 kvm_run->fail_entry.cpu = vcpu->arch.last_vmentry_cpu; 3441 dump_vmcb(vcpu); 3442 return 0; 3443 } 3444 3445 if (exit_fastpath != EXIT_FASTPATH_NONE) 3446 return 1; 3447 3448 return svm_invoke_exit_handler(vcpu, exit_code); 3449 } 3450 3451 static void pre_svm_run(struct kvm_vcpu *vcpu) 3452 { 3453 struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, vcpu->cpu); 3454 struct vcpu_svm *svm = to_svm(vcpu); 3455 3456 /* 3457 * If the previous vmrun of the vmcb occurred on a different physical 3458 * cpu, then mark the vmcb dirty and assign a new asid. Hardware's 3459 * vmcb clean bits are per logical CPU, as are KVM's asid assignments. 3460 */ 3461 if (unlikely(svm->current_vmcb->cpu != vcpu->cpu)) { 3462 svm->current_vmcb->asid_generation = 0; 3463 vmcb_mark_all_dirty(svm->vmcb); 3464 svm->current_vmcb->cpu = vcpu->cpu; 3465 } 3466 3467 if (sev_guest(vcpu->kvm)) 3468 return pre_sev_run(svm, vcpu->cpu); 3469 3470 /* FIXME: handle wraparound of asid_generation */ 3471 if (svm->current_vmcb->asid_generation != sd->asid_generation) 3472 new_asid(svm, sd); 3473 } 3474 3475 static void svm_inject_nmi(struct kvm_vcpu *vcpu) 3476 { 3477 struct vcpu_svm *svm = to_svm(vcpu); 3478 3479 svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI; 3480 3481 if (svm->nmi_l1_to_l2) 3482 return; 3483 3484 svm->nmi_masked = true; 3485 svm_set_iret_intercept(svm); 3486 ++vcpu->stat.nmi_injections; 3487 } 3488 3489 static bool svm_is_vnmi_pending(struct kvm_vcpu *vcpu) 3490 { 3491 struct vcpu_svm *svm = to_svm(vcpu); 3492 3493 if (!is_vnmi_enabled(svm)) 3494 return false; 3495 3496 return !!(svm->vmcb->control.int_ctl & V_NMI_PENDING_MASK); 3497 } 3498 3499 static bool svm_set_vnmi_pending(struct kvm_vcpu *vcpu) 3500 { 3501 struct vcpu_svm *svm = to_svm(vcpu); 3502 3503 if (!is_vnmi_enabled(svm)) 3504 return false; 3505 3506 if (svm->vmcb->control.int_ctl & V_NMI_PENDING_MASK) 3507 return false; 3508 3509 svm->vmcb->control.int_ctl |= V_NMI_PENDING_MASK; 3510 vmcb_mark_dirty(svm->vmcb, VMCB_INTR); 3511 3512 /* 3513 * Because the pending NMI is serviced by hardware, KVM can't know when 3514 * the NMI is "injected", but for all intents and purposes, passing the 3515 * NMI off to hardware counts as injection. 3516 */ 3517 ++vcpu->stat.nmi_injections; 3518 3519 return true; 3520 } 3521 3522 static void svm_inject_irq(struct kvm_vcpu *vcpu, bool reinjected) 3523 { 3524 struct vcpu_svm *svm = to_svm(vcpu); 3525 u32 type; 3526 3527 if (vcpu->arch.interrupt.soft) { 3528 if (svm_update_soft_interrupt_rip(vcpu)) 3529 return; 3530 3531 type = SVM_EVTINJ_TYPE_SOFT; 3532 } else { 3533 type = SVM_EVTINJ_TYPE_INTR; 3534 } 3535 3536 trace_kvm_inj_virq(vcpu->arch.interrupt.nr, 3537 vcpu->arch.interrupt.soft, reinjected); 3538 ++vcpu->stat.irq_injections; 3539 3540 svm->vmcb->control.event_inj = vcpu->arch.interrupt.nr | 3541 SVM_EVTINJ_VALID | type; 3542 } 3543 3544 void svm_complete_interrupt_delivery(struct kvm_vcpu *vcpu, int delivery_mode, 3545 int trig_mode, int vector) 3546 { 3547 /* 3548 * apic->apicv_active must be read after vcpu->mode. 3549 * Pairs with smp_store_release in vcpu_enter_guest. 3550 */ 3551 bool in_guest_mode = (smp_load_acquire(&vcpu->mode) == IN_GUEST_MODE); 3552 3553 /* Note, this is called iff the local APIC is in-kernel. */ 3554 if (!READ_ONCE(vcpu->arch.apic->apicv_active)) { 3555 /* Process the interrupt via kvm_check_and_inject_events(). */ 3556 kvm_make_request(KVM_REQ_EVENT, vcpu); 3557 kvm_vcpu_kick(vcpu); 3558 return; 3559 } 3560 3561 trace_kvm_apicv_accept_irq(vcpu->vcpu_id, delivery_mode, trig_mode, vector); 3562 if (in_guest_mode) { 3563 /* 3564 * Signal the doorbell to tell hardware to inject the IRQ. If 3565 * the vCPU exits the guest before the doorbell chimes, hardware 3566 * will automatically process AVIC interrupts at the next VMRUN. 3567 */ 3568 avic_ring_doorbell(vcpu); 3569 } else { 3570 /* 3571 * Wake the vCPU if it was blocking. KVM will then detect the 3572 * pending IRQ when checking if the vCPU has a wake event. 3573 */ 3574 kvm_vcpu_wake_up(vcpu); 3575 } 3576 } 3577 3578 static void svm_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode, 3579 int trig_mode, int vector) 3580 { 3581 kvm_lapic_set_irr(vector, apic); 3582 3583 /* 3584 * Pairs with the smp_mb_*() after setting vcpu->guest_mode in 3585 * vcpu_enter_guest() to ensure the write to the vIRR is ordered before 3586 * the read of guest_mode. This guarantees that either VMRUN will see 3587 * and process the new vIRR entry, or that svm_complete_interrupt_delivery 3588 * will signal the doorbell if the CPU has already entered the guest. 3589 */ 3590 smp_mb__after_atomic(); 3591 svm_complete_interrupt_delivery(apic->vcpu, delivery_mode, trig_mode, vector); 3592 } 3593 3594 static void svm_update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr) 3595 { 3596 struct vcpu_svm *svm = to_svm(vcpu); 3597 3598 /* 3599 * SEV-ES guests must always keep the CR intercepts cleared. CR 3600 * tracking is done using the CR write traps. 3601 */ 3602 if (sev_es_guest(vcpu->kvm)) 3603 return; 3604 3605 if (nested_svm_virtualize_tpr(vcpu)) 3606 return; 3607 3608 svm_clr_intercept(svm, INTERCEPT_CR8_WRITE); 3609 3610 if (irr == -1) 3611 return; 3612 3613 if (tpr >= irr) 3614 svm_set_intercept(svm, INTERCEPT_CR8_WRITE); 3615 } 3616 3617 static bool svm_get_nmi_mask(struct kvm_vcpu *vcpu) 3618 { 3619 struct vcpu_svm *svm = to_svm(vcpu); 3620 3621 if (is_vnmi_enabled(svm)) 3622 return svm->vmcb->control.int_ctl & V_NMI_BLOCKING_MASK; 3623 else 3624 return svm->nmi_masked; 3625 } 3626 3627 static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked) 3628 { 3629 struct vcpu_svm *svm = to_svm(vcpu); 3630 3631 if (is_vnmi_enabled(svm)) { 3632 if (masked) 3633 svm->vmcb->control.int_ctl |= V_NMI_BLOCKING_MASK; 3634 else 3635 svm->vmcb->control.int_ctl &= ~V_NMI_BLOCKING_MASK; 3636 3637 } else { 3638 svm->nmi_masked = masked; 3639 if (masked) 3640 svm_set_iret_intercept(svm); 3641 else 3642 svm_clr_iret_intercept(svm); 3643 } 3644 } 3645 3646 bool svm_nmi_blocked(struct kvm_vcpu *vcpu) 3647 { 3648 struct vcpu_svm *svm = to_svm(vcpu); 3649 struct vmcb *vmcb = svm->vmcb; 3650 3651 if (!gif_set(svm)) 3652 return true; 3653 3654 if (is_guest_mode(vcpu) && nested_exit_on_nmi(svm)) 3655 return false; 3656 3657 if (svm_get_nmi_mask(vcpu)) 3658 return true; 3659 3660 return vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK; 3661 } 3662 3663 static int svm_nmi_allowed(struct kvm_vcpu *vcpu, bool for_injection) 3664 { 3665 struct vcpu_svm *svm = to_svm(vcpu); 3666 if (svm->nested.nested_run_pending) 3667 return -EBUSY; 3668 3669 if (svm_nmi_blocked(vcpu)) 3670 return 0; 3671 3672 /* An NMI must not be injected into L2 if it's supposed to VM-Exit. */ 3673 if (for_injection && is_guest_mode(vcpu) && nested_exit_on_nmi(svm)) 3674 return -EBUSY; 3675 return 1; 3676 } 3677 3678 bool svm_interrupt_blocked(struct kvm_vcpu *vcpu) 3679 { 3680 struct vcpu_svm *svm = to_svm(vcpu); 3681 struct vmcb *vmcb = svm->vmcb; 3682 3683 if (!gif_set(svm)) 3684 return true; 3685 3686 if (is_guest_mode(vcpu)) { 3687 /* As long as interrupts are being delivered... */ 3688 if ((svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK) 3689 ? !(svm->vmcb01.ptr->save.rflags & X86_EFLAGS_IF) 3690 : !(kvm_get_rflags(vcpu) & X86_EFLAGS_IF)) 3691 return true; 3692 3693 /* ... vmexits aren't blocked by the interrupt shadow */ 3694 if (nested_exit_on_intr(svm)) 3695 return false; 3696 } else { 3697 if (!svm_get_if_flag(vcpu)) 3698 return true; 3699 } 3700 3701 return (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK); 3702 } 3703 3704 static int svm_interrupt_allowed(struct kvm_vcpu *vcpu, bool for_injection) 3705 { 3706 struct vcpu_svm *svm = to_svm(vcpu); 3707 3708 if (svm->nested.nested_run_pending) 3709 return -EBUSY; 3710 3711 if (svm_interrupt_blocked(vcpu)) 3712 return 0; 3713 3714 /* 3715 * An IRQ must not be injected into L2 if it's supposed to VM-Exit, 3716 * e.g. if the IRQ arrived asynchronously after checking nested events. 3717 */ 3718 if (for_injection && is_guest_mode(vcpu) && nested_exit_on_intr(svm)) 3719 return -EBUSY; 3720 3721 return 1; 3722 } 3723 3724 static void svm_enable_irq_window(struct kvm_vcpu *vcpu) 3725 { 3726 struct vcpu_svm *svm = to_svm(vcpu); 3727 3728 /* 3729 * In case GIF=0 we can't rely on the CPU to tell us when GIF becomes 3730 * 1, because that's a separate STGI/VMRUN intercept. The next time we 3731 * get that intercept, this function will be called again though and 3732 * we'll get the vintr intercept. However, if the vGIF feature is 3733 * enabled, the STGI interception will not occur. Enable the irq 3734 * window under the assumption that the hardware will set the GIF. 3735 */ 3736 if (vgif || gif_set(svm)) { 3737 /* 3738 * IRQ window is not needed when AVIC is enabled, 3739 * unless we have pending ExtINT since it cannot be injected 3740 * via AVIC. In such case, KVM needs to temporarily disable AVIC, 3741 * and fallback to injecting IRQ via V_IRQ. 3742 * 3743 * If running nested, AVIC is already locally inhibited 3744 * on this vCPU, therefore there is no need to request 3745 * the VM wide AVIC inhibition. 3746 */ 3747 if (!is_guest_mode(vcpu)) 3748 kvm_set_apicv_inhibit(vcpu->kvm, APICV_INHIBIT_REASON_IRQWIN); 3749 3750 svm_set_vintr(svm); 3751 } 3752 } 3753 3754 static void svm_enable_nmi_window(struct kvm_vcpu *vcpu) 3755 { 3756 struct vcpu_svm *svm = to_svm(vcpu); 3757 3758 /* 3759 * KVM should never request an NMI window when vNMI is enabled, as KVM 3760 * allows at most one to-be-injected NMI and one pending NMI, i.e. if 3761 * two NMIs arrive simultaneously, KVM will inject one and set 3762 * V_NMI_PENDING for the other. WARN, but continue with the standard 3763 * single-step approach to try and salvage the pending NMI. 3764 */ 3765 WARN_ON_ONCE(is_vnmi_enabled(svm)); 3766 3767 if (svm_get_nmi_mask(vcpu) && !svm->awaiting_iret_completion) 3768 return; /* IRET will cause a vm exit */ 3769 3770 if (!gif_set(svm)) { 3771 if (vgif) 3772 svm_set_intercept(svm, INTERCEPT_STGI); 3773 return; /* STGI will cause a vm exit */ 3774 } 3775 3776 /* 3777 * Something prevents NMI from been injected. Single step over possible 3778 * problem (IRET or exception injection or interrupt shadow) 3779 */ 3780 svm->nmi_singlestep_guest_rflags = svm_get_rflags(vcpu); 3781 svm->nmi_singlestep = true; 3782 svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF); 3783 } 3784 3785 static void svm_flush_tlb_asid(struct kvm_vcpu *vcpu) 3786 { 3787 struct vcpu_svm *svm = to_svm(vcpu); 3788 3789 /* 3790 * Unlike VMX, SVM doesn't provide a way to flush only NPT TLB entries. 3791 * A TLB flush for the current ASID flushes both "host" and "guest" TLB 3792 * entries, and thus is a superset of Hyper-V's fine grained flushing. 3793 */ 3794 kvm_hv_vcpu_purge_flush_tlb(vcpu); 3795 3796 /* 3797 * Flush only the current ASID even if the TLB flush was invoked via 3798 * kvm_flush_remote_tlbs(). Although flushing remote TLBs requires all 3799 * ASIDs to be flushed, KVM uses a single ASID for L1 and L2, and 3800 * unconditionally does a TLB flush on both nested VM-Enter and nested 3801 * VM-Exit (via kvm_mmu_reset_context()). 3802 */ 3803 if (static_cpu_has(X86_FEATURE_FLUSHBYASID)) 3804 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID; 3805 else 3806 svm->current_vmcb->asid_generation--; 3807 } 3808 3809 static void svm_flush_tlb_current(struct kvm_vcpu *vcpu) 3810 { 3811 hpa_t root_tdp = vcpu->arch.mmu->root.hpa; 3812 3813 /* 3814 * When running on Hyper-V with EnlightenedNptTlb enabled, explicitly 3815 * flush the NPT mappings via hypercall as flushing the ASID only 3816 * affects virtual to physical mappings, it does not invalidate guest 3817 * physical to host physical mappings. 3818 */ 3819 if (svm_hv_is_enlightened_tlb_enabled(vcpu) && VALID_PAGE(root_tdp)) 3820 hyperv_flush_guest_mapping(root_tdp); 3821 3822 svm_flush_tlb_asid(vcpu); 3823 } 3824 3825 static void svm_flush_tlb_all(struct kvm_vcpu *vcpu) 3826 { 3827 /* 3828 * When running on Hyper-V with EnlightenedNptTlb enabled, remote TLB 3829 * flushes should be routed to hv_flush_remote_tlbs() without requesting 3830 * a "regular" remote flush. Reaching this point means either there's 3831 * a KVM bug or a prior hv_flush_remote_tlbs() call failed, both of 3832 * which might be fatal to the guest. Yell, but try to recover. 3833 */ 3834 if (WARN_ON_ONCE(svm_hv_is_enlightened_tlb_enabled(vcpu))) 3835 hv_flush_remote_tlbs(vcpu->kvm); 3836 3837 svm_flush_tlb_asid(vcpu); 3838 } 3839 3840 static void svm_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t gva) 3841 { 3842 struct vcpu_svm *svm = to_svm(vcpu); 3843 3844 invlpga(gva, svm->vmcb->control.asid); 3845 } 3846 3847 static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu) 3848 { 3849 struct vcpu_svm *svm = to_svm(vcpu); 3850 3851 if (nested_svm_virtualize_tpr(vcpu)) 3852 return; 3853 3854 if (!svm_is_intercept(svm, INTERCEPT_CR8_WRITE)) { 3855 int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK; 3856 kvm_set_cr8(vcpu, cr8); 3857 } 3858 } 3859 3860 static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu) 3861 { 3862 struct vcpu_svm *svm = to_svm(vcpu); 3863 u64 cr8; 3864 3865 if (nested_svm_virtualize_tpr(vcpu) || 3866 kvm_vcpu_apicv_active(vcpu)) 3867 return; 3868 3869 cr8 = kvm_get_cr8(vcpu); 3870 svm->vmcb->control.int_ctl &= ~V_TPR_MASK; 3871 svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK; 3872 } 3873 3874 static void svm_complete_soft_interrupt(struct kvm_vcpu *vcpu, u8 vector, 3875 int type) 3876 { 3877 bool is_exception = (type == SVM_EXITINTINFO_TYPE_EXEPT); 3878 bool is_soft = (type == SVM_EXITINTINFO_TYPE_SOFT); 3879 struct vcpu_svm *svm = to_svm(vcpu); 3880 3881 /* 3882 * If NRIPS is enabled, KVM must snapshot the pre-VMRUN next_rip that's 3883 * associated with the original soft exception/interrupt. next_rip is 3884 * cleared on all exits that can occur while vectoring an event, so KVM 3885 * needs to manually set next_rip for re-injection. Unlike the !nrips 3886 * case below, this needs to be done if and only if KVM is re-injecting 3887 * the same event, i.e. if the event is a soft exception/interrupt, 3888 * otherwise next_rip is unused on VMRUN. 3889 */ 3890 if (nrips && (is_soft || (is_exception && kvm_exception_is_soft(vector))) && 3891 kvm_is_linear_rip(vcpu, svm->soft_int_old_rip + svm->soft_int_csbase)) 3892 svm->vmcb->control.next_rip = svm->soft_int_next_rip; 3893 /* 3894 * If NRIPS isn't enabled, KVM must manually advance RIP prior to 3895 * injecting the soft exception/interrupt. That advancement needs to 3896 * be unwound if vectoring didn't complete. Note, the new event may 3897 * not be the injected event, e.g. if KVM injected an INTn, the INTn 3898 * hit a #NP in the guest, and the #NP encountered a #PF, the #NP will 3899 * be the reported vectored event, but RIP still needs to be unwound. 3900 */ 3901 else if (!nrips && (is_soft || is_exception) && 3902 kvm_is_linear_rip(vcpu, svm->soft_int_next_rip + svm->soft_int_csbase)) 3903 kvm_rip_write(vcpu, svm->soft_int_old_rip); 3904 } 3905 3906 static void svm_complete_interrupts(struct kvm_vcpu *vcpu) 3907 { 3908 struct vcpu_svm *svm = to_svm(vcpu); 3909 u8 vector; 3910 int type; 3911 u32 exitintinfo = svm->vmcb->control.exit_int_info; 3912 bool nmi_l1_to_l2 = svm->nmi_l1_to_l2; 3913 bool soft_int_injected = svm->soft_int_injected; 3914 3915 svm->nmi_l1_to_l2 = false; 3916 svm->soft_int_injected = false; 3917 3918 /* 3919 * If we've made progress since setting HF_IRET_MASK, we've 3920 * executed an IRET and can allow NMI injection. 3921 */ 3922 if (svm->awaiting_iret_completion && 3923 (sev_es_guest(vcpu->kvm) || 3924 kvm_rip_read(vcpu) != svm->nmi_iret_rip)) { 3925 svm->awaiting_iret_completion = false; 3926 svm->nmi_masked = false; 3927 kvm_make_request(KVM_REQ_EVENT, vcpu); 3928 } 3929 3930 vcpu->arch.nmi_injected = false; 3931 kvm_clear_exception_queue(vcpu); 3932 kvm_clear_interrupt_queue(vcpu); 3933 3934 if (!(exitintinfo & SVM_EXITINTINFO_VALID)) 3935 return; 3936 3937 kvm_make_request(KVM_REQ_EVENT, vcpu); 3938 3939 vector = exitintinfo & SVM_EXITINTINFO_VEC_MASK; 3940 type = exitintinfo & SVM_EXITINTINFO_TYPE_MASK; 3941 3942 if (soft_int_injected) 3943 svm_complete_soft_interrupt(vcpu, vector, type); 3944 3945 switch (type) { 3946 case SVM_EXITINTINFO_TYPE_NMI: 3947 vcpu->arch.nmi_injected = true; 3948 svm->nmi_l1_to_l2 = nmi_l1_to_l2; 3949 break; 3950 case SVM_EXITINTINFO_TYPE_EXEPT: 3951 /* 3952 * Never re-inject a #VC exception. 3953 */ 3954 if (vector == X86_TRAP_VC) 3955 break; 3956 3957 if (exitintinfo & SVM_EXITINTINFO_VALID_ERR) { 3958 u32 err = svm->vmcb->control.exit_int_info_err; 3959 kvm_requeue_exception_e(vcpu, vector, err); 3960 3961 } else 3962 kvm_requeue_exception(vcpu, vector); 3963 break; 3964 case SVM_EXITINTINFO_TYPE_INTR: 3965 kvm_queue_interrupt(vcpu, vector, false); 3966 break; 3967 case SVM_EXITINTINFO_TYPE_SOFT: 3968 kvm_queue_interrupt(vcpu, vector, true); 3969 break; 3970 default: 3971 break; 3972 } 3973 3974 } 3975 3976 static void svm_cancel_injection(struct kvm_vcpu *vcpu) 3977 { 3978 struct vcpu_svm *svm = to_svm(vcpu); 3979 struct vmcb_control_area *control = &svm->vmcb->control; 3980 3981 control->exit_int_info = control->event_inj; 3982 control->exit_int_info_err = control->event_inj_err; 3983 control->event_inj = 0; 3984 svm_complete_interrupts(vcpu); 3985 } 3986 3987 static int svm_vcpu_pre_run(struct kvm_vcpu *vcpu) 3988 { 3989 return 1; 3990 } 3991 3992 static fastpath_t svm_exit_handlers_fastpath(struct kvm_vcpu *vcpu) 3993 { 3994 if (to_svm(vcpu)->vmcb->control.exit_code == SVM_EXIT_MSR && 3995 to_svm(vcpu)->vmcb->control.exit_info_1) 3996 return handle_fastpath_set_msr_irqoff(vcpu); 3997 3998 return EXIT_FASTPATH_NONE; 3999 } 4000 4001 static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, bool spec_ctrl_intercepted) 4002 { 4003 struct vcpu_svm *svm = to_svm(vcpu); 4004 4005 guest_state_enter_irqoff(); 4006 4007 if (sev_es_guest(vcpu->kvm)) 4008 __svm_sev_es_vcpu_run(svm, spec_ctrl_intercepted); 4009 else 4010 __svm_vcpu_run(svm, spec_ctrl_intercepted); 4011 4012 guest_state_exit_irqoff(); 4013 } 4014 4015 static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu) 4016 { 4017 struct vcpu_svm *svm = to_svm(vcpu); 4018 bool spec_ctrl_intercepted = msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL); 4019 4020 trace_kvm_entry(vcpu); 4021 4022 svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX]; 4023 svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP]; 4024 svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP]; 4025 4026 /* 4027 * Disable singlestep if we're injecting an interrupt/exception. 4028 * We don't want our modified rflags to be pushed on the stack where 4029 * we might not be able to easily reset them if we disabled NMI 4030 * singlestep later. 4031 */ 4032 if (svm->nmi_singlestep && svm->vmcb->control.event_inj) { 4033 /* 4034 * Event injection happens before external interrupts cause a 4035 * vmexit and interrupts are disabled here, so smp_send_reschedule 4036 * is enough to force an immediate vmexit. 4037 */ 4038 disable_nmi_singlestep(svm); 4039 smp_send_reschedule(vcpu->cpu); 4040 } 4041 4042 pre_svm_run(vcpu); 4043 4044 sync_lapic_to_cr8(vcpu); 4045 4046 if (unlikely(svm->asid != svm->vmcb->control.asid)) { 4047 svm->vmcb->control.asid = svm->asid; 4048 vmcb_mark_dirty(svm->vmcb, VMCB_ASID); 4049 } 4050 svm->vmcb->save.cr2 = vcpu->arch.cr2; 4051 4052 svm_hv_update_vp_id(svm->vmcb, vcpu); 4053 4054 /* 4055 * Run with all-zero DR6 unless needed, so that we can get the exact cause 4056 * of a #DB. 4057 */ 4058 if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) 4059 svm_set_dr6(svm, vcpu->arch.dr6); 4060 else 4061 svm_set_dr6(svm, DR6_ACTIVE_LOW); 4062 4063 clgi(); 4064 kvm_load_guest_xsave_state(vcpu); 4065 4066 kvm_wait_lapic_expire(vcpu); 4067 4068 /* 4069 * If this vCPU has touched SPEC_CTRL, restore the guest's value if 4070 * it's non-zero. Since vmentry is serialising on affected CPUs, there 4071 * is no need to worry about the conditional branch over the wrmsr 4072 * being speculatively taken. 4073 */ 4074 if (!static_cpu_has(X86_FEATURE_V_SPEC_CTRL)) 4075 x86_spec_ctrl_set_guest(svm->virt_spec_ctrl); 4076 4077 svm_vcpu_enter_exit(vcpu, spec_ctrl_intercepted); 4078 4079 if (!static_cpu_has(X86_FEATURE_V_SPEC_CTRL)) 4080 x86_spec_ctrl_restore_host(svm->virt_spec_ctrl); 4081 4082 if (!sev_es_guest(vcpu->kvm)) { 4083 vcpu->arch.cr2 = svm->vmcb->save.cr2; 4084 vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax; 4085 vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp; 4086 vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip; 4087 } 4088 vcpu->arch.regs_dirty = 0; 4089 4090 if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI)) 4091 kvm_before_interrupt(vcpu, KVM_HANDLING_NMI); 4092 4093 kvm_load_host_xsave_state(vcpu); 4094 stgi(); 4095 4096 /* Any pending NMI will happen here */ 4097 4098 if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI)) 4099 kvm_after_interrupt(vcpu); 4100 4101 sync_cr8_to_lapic(vcpu); 4102 4103 svm->next_rip = 0; 4104 if (is_guest_mode(vcpu)) { 4105 nested_sync_control_from_vmcb02(svm); 4106 4107 /* Track VMRUNs that have made past consistency checking */ 4108 if (svm->nested.nested_run_pending && 4109 svm->vmcb->control.exit_code != SVM_EXIT_ERR) 4110 ++vcpu->stat.nested_run; 4111 4112 svm->nested.nested_run_pending = 0; 4113 } 4114 4115 svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING; 4116 vmcb_mark_all_clean(svm->vmcb); 4117 4118 /* if exit due to PF check for async PF */ 4119 if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR) 4120 vcpu->arch.apf.host_apf_flags = 4121 kvm_read_and_reset_apf_flags(); 4122 4123 vcpu->arch.regs_avail &= ~SVM_REGS_LAZY_LOAD_SET; 4124 4125 /* 4126 * We need to handle MC intercepts here before the vcpu has a chance to 4127 * change the physical cpu 4128 */ 4129 if (unlikely(svm->vmcb->control.exit_code == 4130 SVM_EXIT_EXCP_BASE + MC_VECTOR)) 4131 svm_handle_mce(vcpu); 4132 4133 trace_kvm_exit(vcpu, KVM_ISA_SVM); 4134 4135 svm_complete_interrupts(vcpu); 4136 4137 if (is_guest_mode(vcpu)) 4138 return EXIT_FASTPATH_NONE; 4139 4140 return svm_exit_handlers_fastpath(vcpu); 4141 } 4142 4143 static void svm_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa, 4144 int root_level) 4145 { 4146 struct vcpu_svm *svm = to_svm(vcpu); 4147 unsigned long cr3; 4148 4149 if (npt_enabled) { 4150 svm->vmcb->control.nested_cr3 = __sme_set(root_hpa); 4151 vmcb_mark_dirty(svm->vmcb, VMCB_NPT); 4152 4153 hv_track_root_tdp(vcpu, root_hpa); 4154 4155 cr3 = vcpu->arch.cr3; 4156 } else if (root_level >= PT64_ROOT_4LEVEL) { 4157 cr3 = __sme_set(root_hpa) | kvm_get_active_pcid(vcpu); 4158 } else { 4159 /* PCID in the guest should be impossible with a 32-bit MMU. */ 4160 WARN_ON_ONCE(kvm_get_active_pcid(vcpu)); 4161 cr3 = root_hpa; 4162 } 4163 4164 svm->vmcb->save.cr3 = cr3; 4165 vmcb_mark_dirty(svm->vmcb, VMCB_CR); 4166 } 4167 4168 static void 4169 svm_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall) 4170 { 4171 /* 4172 * Patch in the VMMCALL instruction: 4173 */ 4174 hypercall[0] = 0x0f; 4175 hypercall[1] = 0x01; 4176 hypercall[2] = 0xd9; 4177 } 4178 4179 /* 4180 * The kvm parameter can be NULL (module initialization, or invocation before 4181 * VM creation). Be sure to check the kvm parameter before using it. 4182 */ 4183 static bool svm_has_emulated_msr(struct kvm *kvm, u32 index) 4184 { 4185 switch (index) { 4186 case MSR_IA32_MCG_EXT_CTL: 4187 case KVM_FIRST_EMULATED_VMX_MSR ... KVM_LAST_EMULATED_VMX_MSR: 4188 return false; 4189 case MSR_IA32_SMBASE: 4190 if (!IS_ENABLED(CONFIG_KVM_SMM)) 4191 return false; 4192 /* SEV-ES guests do not support SMM, so report false */ 4193 if (kvm && sev_es_guest(kvm)) 4194 return false; 4195 break; 4196 default: 4197 break; 4198 } 4199 4200 return true; 4201 } 4202 4203 static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) 4204 { 4205 struct vcpu_svm *svm = to_svm(vcpu); 4206 struct kvm_cpuid_entry2 *best; 4207 4208 vcpu->arch.xsaves_enabled = guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) && 4209 boot_cpu_has(X86_FEATURE_XSAVE) && 4210 boot_cpu_has(X86_FEATURE_XSAVES); 4211 4212 /* Update nrips enabled cache */ 4213 svm->nrips_enabled = kvm_cpu_cap_has(X86_FEATURE_NRIPS) && 4214 guest_cpuid_has(vcpu, X86_FEATURE_NRIPS); 4215 4216 svm->tsc_scaling_enabled = tsc_scaling && guest_cpuid_has(vcpu, X86_FEATURE_TSCRATEMSR); 4217 svm->lbrv_enabled = lbrv && guest_cpuid_has(vcpu, X86_FEATURE_LBRV); 4218 4219 svm->v_vmload_vmsave_enabled = vls && guest_cpuid_has(vcpu, X86_FEATURE_V_VMSAVE_VMLOAD); 4220 4221 svm->pause_filter_enabled = kvm_cpu_cap_has(X86_FEATURE_PAUSEFILTER) && 4222 guest_cpuid_has(vcpu, X86_FEATURE_PAUSEFILTER); 4223 4224 svm->pause_threshold_enabled = kvm_cpu_cap_has(X86_FEATURE_PFTHRESHOLD) && 4225 guest_cpuid_has(vcpu, X86_FEATURE_PFTHRESHOLD); 4226 4227 svm->vgif_enabled = vgif && guest_cpuid_has(vcpu, X86_FEATURE_VGIF); 4228 4229 svm->vnmi_enabled = vnmi && guest_cpuid_has(vcpu, X86_FEATURE_VNMI); 4230 4231 svm_recalc_instruction_intercepts(vcpu, svm); 4232 4233 if (boot_cpu_has(X86_FEATURE_IBPB)) 4234 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_PRED_CMD, 0, 4235 !!guest_has_pred_cmd_msr(vcpu)); 4236 4237 if (boot_cpu_has(X86_FEATURE_FLUSH_L1D)) 4238 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_FLUSH_CMD, 0, 4239 !!guest_cpuid_has(vcpu, X86_FEATURE_FLUSH_L1D)); 4240 4241 /* For sev guests, the memory encryption bit is not reserved in CR3. */ 4242 if (sev_guest(vcpu->kvm)) { 4243 best = kvm_find_cpuid_entry(vcpu, 0x8000001F); 4244 if (best) 4245 vcpu->arch.reserved_gpa_bits &= ~(1UL << (best->ebx & 0x3f)); 4246 } 4247 4248 init_vmcb_after_set_cpuid(vcpu); 4249 } 4250 4251 static bool svm_has_wbinvd_exit(void) 4252 { 4253 return true; 4254 } 4255 4256 #define PRE_EX(exit) { .exit_code = (exit), \ 4257 .stage = X86_ICPT_PRE_EXCEPT, } 4258 #define POST_EX(exit) { .exit_code = (exit), \ 4259 .stage = X86_ICPT_POST_EXCEPT, } 4260 #define POST_MEM(exit) { .exit_code = (exit), \ 4261 .stage = X86_ICPT_POST_MEMACCESS, } 4262 4263 static const struct __x86_intercept { 4264 u32 exit_code; 4265 enum x86_intercept_stage stage; 4266 } x86_intercept_map[] = { 4267 [x86_intercept_cr_read] = POST_EX(SVM_EXIT_READ_CR0), 4268 [x86_intercept_cr_write] = POST_EX(SVM_EXIT_WRITE_CR0), 4269 [x86_intercept_clts] = POST_EX(SVM_EXIT_WRITE_CR0), 4270 [x86_intercept_lmsw] = POST_EX(SVM_EXIT_WRITE_CR0), 4271 [x86_intercept_smsw] = POST_EX(SVM_EXIT_READ_CR0), 4272 [x86_intercept_dr_read] = POST_EX(SVM_EXIT_READ_DR0), 4273 [x86_intercept_dr_write] = POST_EX(SVM_EXIT_WRITE_DR0), 4274 [x86_intercept_sldt] = POST_EX(SVM_EXIT_LDTR_READ), 4275 [x86_intercept_str] = POST_EX(SVM_EXIT_TR_READ), 4276 [x86_intercept_lldt] = POST_EX(SVM_EXIT_LDTR_WRITE), 4277 [x86_intercept_ltr] = POST_EX(SVM_EXIT_TR_WRITE), 4278 [x86_intercept_sgdt] = POST_EX(SVM_EXIT_GDTR_READ), 4279 [x86_intercept_sidt] = POST_EX(SVM_EXIT_IDTR_READ), 4280 [x86_intercept_lgdt] = POST_EX(SVM_EXIT_GDTR_WRITE), 4281 [x86_intercept_lidt] = POST_EX(SVM_EXIT_IDTR_WRITE), 4282 [x86_intercept_vmrun] = POST_EX(SVM_EXIT_VMRUN), 4283 [x86_intercept_vmmcall] = POST_EX(SVM_EXIT_VMMCALL), 4284 [x86_intercept_vmload] = POST_EX(SVM_EXIT_VMLOAD), 4285 [x86_intercept_vmsave] = POST_EX(SVM_EXIT_VMSAVE), 4286 [x86_intercept_stgi] = POST_EX(SVM_EXIT_STGI), 4287 [x86_intercept_clgi] = POST_EX(SVM_EXIT_CLGI), 4288 [x86_intercept_skinit] = POST_EX(SVM_EXIT_SKINIT), 4289 [x86_intercept_invlpga] = POST_EX(SVM_EXIT_INVLPGA), 4290 [x86_intercept_rdtscp] = POST_EX(SVM_EXIT_RDTSCP), 4291 [x86_intercept_monitor] = POST_MEM(SVM_EXIT_MONITOR), 4292 [x86_intercept_mwait] = POST_EX(SVM_EXIT_MWAIT), 4293 [x86_intercept_invlpg] = POST_EX(SVM_EXIT_INVLPG), 4294 [x86_intercept_invd] = POST_EX(SVM_EXIT_INVD), 4295 [x86_intercept_wbinvd] = POST_EX(SVM_EXIT_WBINVD), 4296 [x86_intercept_wrmsr] = POST_EX(SVM_EXIT_MSR), 4297 [x86_intercept_rdtsc] = POST_EX(SVM_EXIT_RDTSC), 4298 [x86_intercept_rdmsr] = POST_EX(SVM_EXIT_MSR), 4299 [x86_intercept_rdpmc] = POST_EX(SVM_EXIT_RDPMC), 4300 [x86_intercept_cpuid] = PRE_EX(SVM_EXIT_CPUID), 4301 [x86_intercept_rsm] = PRE_EX(SVM_EXIT_RSM), 4302 [x86_intercept_pause] = PRE_EX(SVM_EXIT_PAUSE), 4303 [x86_intercept_pushf] = PRE_EX(SVM_EXIT_PUSHF), 4304 [x86_intercept_popf] = PRE_EX(SVM_EXIT_POPF), 4305 [x86_intercept_intn] = PRE_EX(SVM_EXIT_SWINT), 4306 [x86_intercept_iret] = PRE_EX(SVM_EXIT_IRET), 4307 [x86_intercept_icebp] = PRE_EX(SVM_EXIT_ICEBP), 4308 [x86_intercept_hlt] = POST_EX(SVM_EXIT_HLT), 4309 [x86_intercept_in] = POST_EX(SVM_EXIT_IOIO), 4310 [x86_intercept_ins] = POST_EX(SVM_EXIT_IOIO), 4311 [x86_intercept_out] = POST_EX(SVM_EXIT_IOIO), 4312 [x86_intercept_outs] = POST_EX(SVM_EXIT_IOIO), 4313 [x86_intercept_xsetbv] = PRE_EX(SVM_EXIT_XSETBV), 4314 }; 4315 4316 #undef PRE_EX 4317 #undef POST_EX 4318 #undef POST_MEM 4319 4320 static int svm_check_intercept(struct kvm_vcpu *vcpu, 4321 struct x86_instruction_info *info, 4322 enum x86_intercept_stage stage, 4323 struct x86_exception *exception) 4324 { 4325 struct vcpu_svm *svm = to_svm(vcpu); 4326 int vmexit, ret = X86EMUL_CONTINUE; 4327 struct __x86_intercept icpt_info; 4328 struct vmcb *vmcb = svm->vmcb; 4329 4330 if (info->intercept >= ARRAY_SIZE(x86_intercept_map)) 4331 goto out; 4332 4333 icpt_info = x86_intercept_map[info->intercept]; 4334 4335 if (stage != icpt_info.stage) 4336 goto out; 4337 4338 switch (icpt_info.exit_code) { 4339 case SVM_EXIT_READ_CR0: 4340 if (info->intercept == x86_intercept_cr_read) 4341 icpt_info.exit_code += info->modrm_reg; 4342 break; 4343 case SVM_EXIT_WRITE_CR0: { 4344 unsigned long cr0, val; 4345 4346 if (info->intercept == x86_intercept_cr_write) 4347 icpt_info.exit_code += info->modrm_reg; 4348 4349 if (icpt_info.exit_code != SVM_EXIT_WRITE_CR0 || 4350 info->intercept == x86_intercept_clts) 4351 break; 4352 4353 if (!(vmcb12_is_intercept(&svm->nested.ctl, 4354 INTERCEPT_SELECTIVE_CR0))) 4355 break; 4356 4357 cr0 = vcpu->arch.cr0 & ~SVM_CR0_SELECTIVE_MASK; 4358 val = info->src_val & ~SVM_CR0_SELECTIVE_MASK; 4359 4360 if (info->intercept == x86_intercept_lmsw) { 4361 cr0 &= 0xfUL; 4362 val &= 0xfUL; 4363 /* lmsw can't clear PE - catch this here */ 4364 if (cr0 & X86_CR0_PE) 4365 val |= X86_CR0_PE; 4366 } 4367 4368 if (cr0 ^ val) 4369 icpt_info.exit_code = SVM_EXIT_CR0_SEL_WRITE; 4370 4371 break; 4372 } 4373 case SVM_EXIT_READ_DR0: 4374 case SVM_EXIT_WRITE_DR0: 4375 icpt_info.exit_code += info->modrm_reg; 4376 break; 4377 case SVM_EXIT_MSR: 4378 if (info->intercept == x86_intercept_wrmsr) 4379 vmcb->control.exit_info_1 = 1; 4380 else 4381 vmcb->control.exit_info_1 = 0; 4382 break; 4383 case SVM_EXIT_PAUSE: 4384 /* 4385 * We get this for NOP only, but pause 4386 * is rep not, check this here 4387 */ 4388 if (info->rep_prefix != REPE_PREFIX) 4389 goto out; 4390 break; 4391 case SVM_EXIT_IOIO: { 4392 u64 exit_info; 4393 u32 bytes; 4394 4395 if (info->intercept == x86_intercept_in || 4396 info->intercept == x86_intercept_ins) { 4397 exit_info = ((info->src_val & 0xffff) << 16) | 4398 SVM_IOIO_TYPE_MASK; 4399 bytes = info->dst_bytes; 4400 } else { 4401 exit_info = (info->dst_val & 0xffff) << 16; 4402 bytes = info->src_bytes; 4403 } 4404 4405 if (info->intercept == x86_intercept_outs || 4406 info->intercept == x86_intercept_ins) 4407 exit_info |= SVM_IOIO_STR_MASK; 4408 4409 if (info->rep_prefix) 4410 exit_info |= SVM_IOIO_REP_MASK; 4411 4412 bytes = min(bytes, 4u); 4413 4414 exit_info |= bytes << SVM_IOIO_SIZE_SHIFT; 4415 4416 exit_info |= (u32)info->ad_bytes << (SVM_IOIO_ASIZE_SHIFT - 1); 4417 4418 vmcb->control.exit_info_1 = exit_info; 4419 vmcb->control.exit_info_2 = info->next_rip; 4420 4421 break; 4422 } 4423 default: 4424 break; 4425 } 4426 4427 /* TODO: Advertise NRIPS to guest hypervisor unconditionally */ 4428 if (static_cpu_has(X86_FEATURE_NRIPS)) 4429 vmcb->control.next_rip = info->next_rip; 4430 vmcb->control.exit_code = icpt_info.exit_code; 4431 vmexit = nested_svm_exit_handled(svm); 4432 4433 ret = (vmexit == NESTED_EXIT_DONE) ? X86EMUL_INTERCEPTED 4434 : X86EMUL_CONTINUE; 4435 4436 out: 4437 return ret; 4438 } 4439 4440 static void svm_handle_exit_irqoff(struct kvm_vcpu *vcpu) 4441 { 4442 if (to_svm(vcpu)->vmcb->control.exit_code == SVM_EXIT_INTR) 4443 vcpu->arch.at_instruction_boundary = true; 4444 } 4445 4446 static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu) 4447 { 4448 if (!kvm_pause_in_guest(vcpu->kvm)) 4449 shrink_ple_window(vcpu); 4450 } 4451 4452 static void svm_setup_mce(struct kvm_vcpu *vcpu) 4453 { 4454 /* [63:9] are reserved. */ 4455 vcpu->arch.mcg_cap &= 0x1ff; 4456 } 4457 4458 #ifdef CONFIG_KVM_SMM 4459 bool svm_smi_blocked(struct kvm_vcpu *vcpu) 4460 { 4461 struct vcpu_svm *svm = to_svm(vcpu); 4462 4463 /* Per APM Vol.2 15.22.2 "Response to SMI" */ 4464 if (!gif_set(svm)) 4465 return true; 4466 4467 return is_smm(vcpu); 4468 } 4469 4470 static int svm_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection) 4471 { 4472 struct vcpu_svm *svm = to_svm(vcpu); 4473 if (svm->nested.nested_run_pending) 4474 return -EBUSY; 4475 4476 if (svm_smi_blocked(vcpu)) 4477 return 0; 4478 4479 /* An SMI must not be injected into L2 if it's supposed to VM-Exit. */ 4480 if (for_injection && is_guest_mode(vcpu) && nested_exit_on_smi(svm)) 4481 return -EBUSY; 4482 4483 return 1; 4484 } 4485 4486 static int svm_enter_smm(struct kvm_vcpu *vcpu, union kvm_smram *smram) 4487 { 4488 struct vcpu_svm *svm = to_svm(vcpu); 4489 struct kvm_host_map map_save; 4490 int ret; 4491 4492 if (!is_guest_mode(vcpu)) 4493 return 0; 4494 4495 /* 4496 * 32-bit SMRAM format doesn't preserve EFER and SVM state. Userspace is 4497 * responsible for ensuring nested SVM and SMIs are mutually exclusive. 4498 */ 4499 4500 if (!guest_cpuid_has(vcpu, X86_FEATURE_LM)) 4501 return 1; 4502 4503 smram->smram64.svm_guest_flag = 1; 4504 smram->smram64.svm_guest_vmcb_gpa = svm->nested.vmcb12_gpa; 4505 4506 svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX]; 4507 svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP]; 4508 svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP]; 4509 4510 ret = nested_svm_simple_vmexit(svm, SVM_EXIT_SW); 4511 if (ret) 4512 return ret; 4513 4514 /* 4515 * KVM uses VMCB01 to store L1 host state while L2 runs but 4516 * VMCB01 is going to be used during SMM and thus the state will 4517 * be lost. Temporary save non-VMLOAD/VMSAVE state to the host save 4518 * area pointed to by MSR_VM_HSAVE_PA. APM guarantees that the 4519 * format of the area is identical to guest save area offsetted 4520 * by 0x400 (matches the offset of 'struct vmcb_save_area' 4521 * within 'struct vmcb'). Note: HSAVE area may also be used by 4522 * L1 hypervisor to save additional host context (e.g. KVM does 4523 * that, see svm_prepare_switch_to_guest()) which must be 4524 * preserved. 4525 */ 4526 if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr), &map_save)) 4527 return 1; 4528 4529 BUILD_BUG_ON(offsetof(struct vmcb, save) != 0x400); 4530 4531 svm_copy_vmrun_state(map_save.hva + 0x400, 4532 &svm->vmcb01.ptr->save); 4533 4534 kvm_vcpu_unmap(vcpu, &map_save, true); 4535 return 0; 4536 } 4537 4538 static int svm_leave_smm(struct kvm_vcpu *vcpu, const union kvm_smram *smram) 4539 { 4540 struct vcpu_svm *svm = to_svm(vcpu); 4541 struct kvm_host_map map, map_save; 4542 struct vmcb *vmcb12; 4543 int ret; 4544 4545 const struct kvm_smram_state_64 *smram64 = &smram->smram64; 4546 4547 if (!guest_cpuid_has(vcpu, X86_FEATURE_LM)) 4548 return 0; 4549 4550 /* Non-zero if SMI arrived while vCPU was in guest mode. */ 4551 if (!smram64->svm_guest_flag) 4552 return 0; 4553 4554 if (!guest_cpuid_has(vcpu, X86_FEATURE_SVM)) 4555 return 1; 4556 4557 if (!(smram64->efer & EFER_SVME)) 4558 return 1; 4559 4560 if (kvm_vcpu_map(vcpu, gpa_to_gfn(smram64->svm_guest_vmcb_gpa), &map)) 4561 return 1; 4562 4563 ret = 1; 4564 if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr), &map_save)) 4565 goto unmap_map; 4566 4567 if (svm_allocate_nested(svm)) 4568 goto unmap_save; 4569 4570 /* 4571 * Restore L1 host state from L1 HSAVE area as VMCB01 was 4572 * used during SMM (see svm_enter_smm()) 4573 */ 4574 4575 svm_copy_vmrun_state(&svm->vmcb01.ptr->save, map_save.hva + 0x400); 4576 4577 /* 4578 * Enter the nested guest now 4579 */ 4580 4581 vmcb_mark_all_dirty(svm->vmcb01.ptr); 4582 4583 vmcb12 = map.hva; 4584 nested_copy_vmcb_control_to_cache(svm, &vmcb12->control); 4585 nested_copy_vmcb_save_to_cache(svm, &vmcb12->save); 4586 ret = enter_svm_guest_mode(vcpu, smram64->svm_guest_vmcb_gpa, vmcb12, false); 4587 4588 if (ret) 4589 goto unmap_save; 4590 4591 svm->nested.nested_run_pending = 1; 4592 4593 unmap_save: 4594 kvm_vcpu_unmap(vcpu, &map_save, true); 4595 unmap_map: 4596 kvm_vcpu_unmap(vcpu, &map, true); 4597 return ret; 4598 } 4599 4600 static void svm_enable_smi_window(struct kvm_vcpu *vcpu) 4601 { 4602 struct vcpu_svm *svm = to_svm(vcpu); 4603 4604 if (!gif_set(svm)) { 4605 if (vgif) 4606 svm_set_intercept(svm, INTERCEPT_STGI); 4607 /* STGI will cause a vm exit */ 4608 } else { 4609 /* We must be in SMM; RSM will cause a vmexit anyway. */ 4610 } 4611 } 4612 #endif 4613 4614 static bool svm_can_emulate_instruction(struct kvm_vcpu *vcpu, int emul_type, 4615 void *insn, int insn_len) 4616 { 4617 bool smep, smap, is_user; 4618 u64 error_code; 4619 4620 /* Emulation is always possible when KVM has access to all guest state. */ 4621 if (!sev_guest(vcpu->kvm)) 4622 return true; 4623 4624 /* #UD and #GP should never be intercepted for SEV guests. */ 4625 WARN_ON_ONCE(emul_type & (EMULTYPE_TRAP_UD | 4626 EMULTYPE_TRAP_UD_FORCED | 4627 EMULTYPE_VMWARE_GP)); 4628 4629 /* 4630 * Emulation is impossible for SEV-ES guests as KVM doesn't have access 4631 * to guest register state. 4632 */ 4633 if (sev_es_guest(vcpu->kvm)) 4634 return false; 4635 4636 /* 4637 * Emulation is possible if the instruction is already decoded, e.g. 4638 * when completing I/O after returning from userspace. 4639 */ 4640 if (emul_type & EMULTYPE_NO_DECODE) 4641 return true; 4642 4643 /* 4644 * Emulation is possible for SEV guests if and only if a prefilled 4645 * buffer containing the bytes of the intercepted instruction is 4646 * available. SEV guest memory is encrypted with a guest specific key 4647 * and cannot be decrypted by KVM, i.e. KVM would read cyphertext and 4648 * decode garbage. 4649 * 4650 * Inject #UD if KVM reached this point without an instruction buffer. 4651 * In practice, this path should never be hit by a well-behaved guest, 4652 * e.g. KVM doesn't intercept #UD or #GP for SEV guests, but this path 4653 * is still theoretically reachable, e.g. via unaccelerated fault-like 4654 * AVIC access, and needs to be handled by KVM to avoid putting the 4655 * guest into an infinite loop. Injecting #UD is somewhat arbitrary, 4656 * but its the least awful option given lack of insight into the guest. 4657 */ 4658 if (unlikely(!insn)) { 4659 kvm_queue_exception(vcpu, UD_VECTOR); 4660 return false; 4661 } 4662 4663 /* 4664 * Emulate for SEV guests if the insn buffer is not empty. The buffer 4665 * will be empty if the DecodeAssist microcode cannot fetch bytes for 4666 * the faulting instruction because the code fetch itself faulted, e.g. 4667 * the guest attempted to fetch from emulated MMIO or a guest page 4668 * table used to translate CS:RIP resides in emulated MMIO. 4669 */ 4670 if (likely(insn_len)) 4671 return true; 4672 4673 /* 4674 * Detect and workaround Errata 1096 Fam_17h_00_0Fh. 4675 * 4676 * Errata: 4677 * When CPU raises #NPF on guest data access and vCPU CR4.SMAP=1, it is 4678 * possible that CPU microcode implementing DecodeAssist will fail to 4679 * read guest memory at CS:RIP and vmcb.GuestIntrBytes will incorrectly 4680 * be '0'. This happens because microcode reads CS:RIP using a _data_ 4681 * loap uop with CPL=0 privileges. If the load hits a SMAP #PF, ucode 4682 * gives up and does not fill the instruction bytes buffer. 4683 * 4684 * As above, KVM reaches this point iff the VM is an SEV guest, the CPU 4685 * supports DecodeAssist, a #NPF was raised, KVM's page fault handler 4686 * triggered emulation (e.g. for MMIO), and the CPU returned 0 in the 4687 * GuestIntrBytes field of the VMCB. 4688 * 4689 * This does _not_ mean that the erratum has been encountered, as the 4690 * DecodeAssist will also fail if the load for CS:RIP hits a legitimate 4691 * #PF, e.g. if the guest attempt to execute from emulated MMIO and 4692 * encountered a reserved/not-present #PF. 4693 * 4694 * To hit the erratum, the following conditions must be true: 4695 * 1. CR4.SMAP=1 (obviously). 4696 * 2. CR4.SMEP=0 || CPL=3. If SMEP=1 and CPL<3, the erratum cannot 4697 * have been hit as the guest would have encountered a SMEP 4698 * violation #PF, not a #NPF. 4699 * 3. The #NPF is not due to a code fetch, in which case failure to 4700 * retrieve the instruction bytes is legitimate (see abvoe). 4701 * 4702 * In addition, don't apply the erratum workaround if the #NPF occurred 4703 * while translating guest page tables (see below). 4704 */ 4705 error_code = to_svm(vcpu)->vmcb->control.exit_info_1; 4706 if (error_code & (PFERR_GUEST_PAGE_MASK | PFERR_FETCH_MASK)) 4707 goto resume_guest; 4708 4709 smep = kvm_is_cr4_bit_set(vcpu, X86_CR4_SMEP); 4710 smap = kvm_is_cr4_bit_set(vcpu, X86_CR4_SMAP); 4711 is_user = svm_get_cpl(vcpu) == 3; 4712 if (smap && (!smep || is_user)) { 4713 pr_err_ratelimited("SEV Guest triggered AMD Erratum 1096\n"); 4714 4715 /* 4716 * If the fault occurred in userspace, arbitrarily inject #GP 4717 * to avoid killing the guest and to hopefully avoid confusing 4718 * the guest kernel too much, e.g. injecting #PF would not be 4719 * coherent with respect to the guest's page tables. Request 4720 * triple fault if the fault occurred in the kernel as there's 4721 * no fault that KVM can inject without confusing the guest. 4722 * In practice, the triple fault is moot as no sane SEV kernel 4723 * will execute from user memory while also running with SMAP=1. 4724 */ 4725 if (is_user) 4726 kvm_inject_gp(vcpu, 0); 4727 else 4728 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); 4729 } 4730 4731 resume_guest: 4732 /* 4733 * If the erratum was not hit, simply resume the guest and let it fault 4734 * again. While awful, e.g. the vCPU may get stuck in an infinite loop 4735 * if the fault is at CPL=0, it's the lesser of all evils. Exiting to 4736 * userspace will kill the guest, and letting the emulator read garbage 4737 * will yield random behavior and potentially corrupt the guest. 4738 * 4739 * Simply resuming the guest is technically not a violation of the SEV 4740 * architecture. AMD's APM states that all code fetches and page table 4741 * accesses for SEV guest are encrypted, regardless of the C-Bit. The 4742 * APM also states that encrypted accesses to MMIO are "ignored", but 4743 * doesn't explicitly define "ignored", i.e. doing nothing and letting 4744 * the guest spin is technically "ignoring" the access. 4745 */ 4746 return false; 4747 } 4748 4749 static bool svm_apic_init_signal_blocked(struct kvm_vcpu *vcpu) 4750 { 4751 struct vcpu_svm *svm = to_svm(vcpu); 4752 4753 return !gif_set(svm); 4754 } 4755 4756 static void svm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector) 4757 { 4758 if (!sev_es_guest(vcpu->kvm)) 4759 return kvm_vcpu_deliver_sipi_vector(vcpu, vector); 4760 4761 sev_vcpu_deliver_sipi_vector(vcpu, vector); 4762 } 4763 4764 static void svm_vm_destroy(struct kvm *kvm) 4765 { 4766 avic_vm_destroy(kvm); 4767 sev_vm_destroy(kvm); 4768 } 4769 4770 static int svm_vm_init(struct kvm *kvm) 4771 { 4772 if (!pause_filter_count || !pause_filter_thresh) 4773 kvm->arch.pause_in_guest = true; 4774 4775 if (enable_apicv) { 4776 int ret = avic_vm_init(kvm); 4777 if (ret) 4778 return ret; 4779 } 4780 4781 return 0; 4782 } 4783 4784 static struct kvm_x86_ops svm_x86_ops __initdata = { 4785 .name = KBUILD_MODNAME, 4786 4787 .check_processor_compatibility = svm_check_processor_compat, 4788 4789 .hardware_unsetup = svm_hardware_unsetup, 4790 .hardware_enable = svm_hardware_enable, 4791 .hardware_disable = svm_hardware_disable, 4792 .has_emulated_msr = svm_has_emulated_msr, 4793 4794 .vcpu_create = svm_vcpu_create, 4795 .vcpu_free = svm_vcpu_free, 4796 .vcpu_reset = svm_vcpu_reset, 4797 4798 .vm_size = sizeof(struct kvm_svm), 4799 .vm_init = svm_vm_init, 4800 .vm_destroy = svm_vm_destroy, 4801 4802 .prepare_switch_to_guest = svm_prepare_switch_to_guest, 4803 .vcpu_load = svm_vcpu_load, 4804 .vcpu_put = svm_vcpu_put, 4805 .vcpu_blocking = avic_vcpu_blocking, 4806 .vcpu_unblocking = avic_vcpu_unblocking, 4807 4808 .update_exception_bitmap = svm_update_exception_bitmap, 4809 .get_msr_feature = svm_get_msr_feature, 4810 .get_msr = svm_get_msr, 4811 .set_msr = svm_set_msr, 4812 .get_segment_base = svm_get_segment_base, 4813 .get_segment = svm_get_segment, 4814 .set_segment = svm_set_segment, 4815 .get_cpl = svm_get_cpl, 4816 .get_cs_db_l_bits = svm_get_cs_db_l_bits, 4817 .is_valid_cr0 = svm_is_valid_cr0, 4818 .set_cr0 = svm_set_cr0, 4819 .post_set_cr3 = sev_post_set_cr3, 4820 .is_valid_cr4 = svm_is_valid_cr4, 4821 .set_cr4 = svm_set_cr4, 4822 .set_efer = svm_set_efer, 4823 .get_idt = svm_get_idt, 4824 .set_idt = svm_set_idt, 4825 .get_gdt = svm_get_gdt, 4826 .set_gdt = svm_set_gdt, 4827 .set_dr7 = svm_set_dr7, 4828 .sync_dirty_debug_regs = svm_sync_dirty_debug_regs, 4829 .cache_reg = svm_cache_reg, 4830 .get_rflags = svm_get_rflags, 4831 .set_rflags = svm_set_rflags, 4832 .get_if_flag = svm_get_if_flag, 4833 4834 .flush_tlb_all = svm_flush_tlb_all, 4835 .flush_tlb_current = svm_flush_tlb_current, 4836 .flush_tlb_gva = svm_flush_tlb_gva, 4837 .flush_tlb_guest = svm_flush_tlb_asid, 4838 4839 .vcpu_pre_run = svm_vcpu_pre_run, 4840 .vcpu_run = svm_vcpu_run, 4841 .handle_exit = svm_handle_exit, 4842 .skip_emulated_instruction = svm_skip_emulated_instruction, 4843 .update_emulated_instruction = NULL, 4844 .set_interrupt_shadow = svm_set_interrupt_shadow, 4845 .get_interrupt_shadow = svm_get_interrupt_shadow, 4846 .patch_hypercall = svm_patch_hypercall, 4847 .inject_irq = svm_inject_irq, 4848 .inject_nmi = svm_inject_nmi, 4849 .is_vnmi_pending = svm_is_vnmi_pending, 4850 .set_vnmi_pending = svm_set_vnmi_pending, 4851 .inject_exception = svm_inject_exception, 4852 .cancel_injection = svm_cancel_injection, 4853 .interrupt_allowed = svm_interrupt_allowed, 4854 .nmi_allowed = svm_nmi_allowed, 4855 .get_nmi_mask = svm_get_nmi_mask, 4856 .set_nmi_mask = svm_set_nmi_mask, 4857 .enable_nmi_window = svm_enable_nmi_window, 4858 .enable_irq_window = svm_enable_irq_window, 4859 .update_cr8_intercept = svm_update_cr8_intercept, 4860 .set_virtual_apic_mode = avic_refresh_virtual_apic_mode, 4861 .refresh_apicv_exec_ctrl = avic_refresh_apicv_exec_ctrl, 4862 .apicv_post_state_restore = avic_apicv_post_state_restore, 4863 .required_apicv_inhibits = AVIC_REQUIRED_APICV_INHIBITS, 4864 4865 .get_exit_info = svm_get_exit_info, 4866 4867 .vcpu_after_set_cpuid = svm_vcpu_after_set_cpuid, 4868 4869 .has_wbinvd_exit = svm_has_wbinvd_exit, 4870 4871 .get_l2_tsc_offset = svm_get_l2_tsc_offset, 4872 .get_l2_tsc_multiplier = svm_get_l2_tsc_multiplier, 4873 .write_tsc_offset = svm_write_tsc_offset, 4874 .write_tsc_multiplier = svm_write_tsc_multiplier, 4875 4876 .load_mmu_pgd = svm_load_mmu_pgd, 4877 4878 .check_intercept = svm_check_intercept, 4879 .handle_exit_irqoff = svm_handle_exit_irqoff, 4880 4881 .request_immediate_exit = __kvm_request_immediate_exit, 4882 4883 .sched_in = svm_sched_in, 4884 4885 .nested_ops = &svm_nested_ops, 4886 4887 .deliver_interrupt = svm_deliver_interrupt, 4888 .pi_update_irte = avic_pi_update_irte, 4889 .setup_mce = svm_setup_mce, 4890 4891 #ifdef CONFIG_KVM_SMM 4892 .smi_allowed = svm_smi_allowed, 4893 .enter_smm = svm_enter_smm, 4894 .leave_smm = svm_leave_smm, 4895 .enable_smi_window = svm_enable_smi_window, 4896 #endif 4897 4898 .mem_enc_ioctl = sev_mem_enc_ioctl, 4899 .mem_enc_register_region = sev_mem_enc_register_region, 4900 .mem_enc_unregister_region = sev_mem_enc_unregister_region, 4901 .guest_memory_reclaimed = sev_guest_memory_reclaimed, 4902 4903 .vm_copy_enc_context_from = sev_vm_copy_enc_context_from, 4904 .vm_move_enc_context_from = sev_vm_move_enc_context_from, 4905 4906 .can_emulate_instruction = svm_can_emulate_instruction, 4907 4908 .apic_init_signal_blocked = svm_apic_init_signal_blocked, 4909 4910 .msr_filter_changed = svm_msr_filter_changed, 4911 .complete_emulated_msr = svm_complete_emulated_msr, 4912 4913 .vcpu_deliver_sipi_vector = svm_vcpu_deliver_sipi_vector, 4914 .vcpu_get_apicv_inhibit_reasons = avic_vcpu_get_apicv_inhibit_reasons, 4915 }; 4916 4917 /* 4918 * The default MMIO mask is a single bit (excluding the present bit), 4919 * which could conflict with the memory encryption bit. Check for 4920 * memory encryption support and override the default MMIO mask if 4921 * memory encryption is enabled. 4922 */ 4923 static __init void svm_adjust_mmio_mask(void) 4924 { 4925 unsigned int enc_bit, mask_bit; 4926 u64 msr, mask; 4927 4928 /* If there is no memory encryption support, use existing mask */ 4929 if (cpuid_eax(0x80000000) < 0x8000001f) 4930 return; 4931 4932 /* If memory encryption is not enabled, use existing mask */ 4933 rdmsrl(MSR_AMD64_SYSCFG, msr); 4934 if (!(msr & MSR_AMD64_SYSCFG_MEM_ENCRYPT)) 4935 return; 4936 4937 enc_bit = cpuid_ebx(0x8000001f) & 0x3f; 4938 mask_bit = boot_cpu_data.x86_phys_bits; 4939 4940 /* Increment the mask bit if it is the same as the encryption bit */ 4941 if (enc_bit == mask_bit) 4942 mask_bit++; 4943 4944 /* 4945 * If the mask bit location is below 52, then some bits above the 4946 * physical addressing limit will always be reserved, so use the 4947 * rsvd_bits() function to generate the mask. This mask, along with 4948 * the present bit, will be used to generate a page fault with 4949 * PFER.RSV = 1. 4950 * 4951 * If the mask bit location is 52 (or above), then clear the mask. 4952 */ 4953 mask = (mask_bit < 52) ? rsvd_bits(mask_bit, 51) | PT_PRESENT_MASK : 0; 4954 4955 kvm_mmu_set_mmio_spte_mask(mask, mask, PT_WRITABLE_MASK | PT_USER_MASK); 4956 } 4957 4958 static __init void svm_set_cpu_caps(void) 4959 { 4960 kvm_set_cpu_caps(); 4961 4962 kvm_caps.supported_perf_cap = 0; 4963 kvm_caps.supported_xss = 0; 4964 4965 /* CPUID 0x80000001 and 0x8000000A (SVM features) */ 4966 if (nested) { 4967 kvm_cpu_cap_set(X86_FEATURE_SVM); 4968 kvm_cpu_cap_set(X86_FEATURE_VMCBCLEAN); 4969 4970 if (nrips) 4971 kvm_cpu_cap_set(X86_FEATURE_NRIPS); 4972 4973 if (npt_enabled) 4974 kvm_cpu_cap_set(X86_FEATURE_NPT); 4975 4976 if (tsc_scaling) 4977 kvm_cpu_cap_set(X86_FEATURE_TSCRATEMSR); 4978 4979 if (vls) 4980 kvm_cpu_cap_set(X86_FEATURE_V_VMSAVE_VMLOAD); 4981 if (lbrv) 4982 kvm_cpu_cap_set(X86_FEATURE_LBRV); 4983 4984 if (boot_cpu_has(X86_FEATURE_PAUSEFILTER)) 4985 kvm_cpu_cap_set(X86_FEATURE_PAUSEFILTER); 4986 4987 if (boot_cpu_has(X86_FEATURE_PFTHRESHOLD)) 4988 kvm_cpu_cap_set(X86_FEATURE_PFTHRESHOLD); 4989 4990 if (vgif) 4991 kvm_cpu_cap_set(X86_FEATURE_VGIF); 4992 4993 if (vnmi) 4994 kvm_cpu_cap_set(X86_FEATURE_VNMI); 4995 4996 /* Nested VM can receive #VMEXIT instead of triggering #GP */ 4997 kvm_cpu_cap_set(X86_FEATURE_SVME_ADDR_CHK); 4998 } 4999 5000 /* CPUID 0x80000008 */ 5001 if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD) || 5002 boot_cpu_has(X86_FEATURE_AMD_SSBD)) 5003 kvm_cpu_cap_set(X86_FEATURE_VIRT_SSBD); 5004 5005 if (enable_pmu) { 5006 /* 5007 * Enumerate support for PERFCTR_CORE if and only if KVM has 5008 * access to enough counters to virtualize "core" support, 5009 * otherwise limit vPMU support to the legacy number of counters. 5010 */ 5011 if (kvm_pmu_cap.num_counters_gp < AMD64_NUM_COUNTERS_CORE) 5012 kvm_pmu_cap.num_counters_gp = min(AMD64_NUM_COUNTERS, 5013 kvm_pmu_cap.num_counters_gp); 5014 else 5015 kvm_cpu_cap_check_and_set(X86_FEATURE_PERFCTR_CORE); 5016 5017 if (kvm_pmu_cap.version != 2 || 5018 !kvm_cpu_cap_has(X86_FEATURE_PERFCTR_CORE)) 5019 kvm_cpu_cap_clear(X86_FEATURE_PERFMON_V2); 5020 } 5021 5022 /* CPUID 0x8000001F (SME/SEV features) */ 5023 sev_set_cpu_caps(); 5024 } 5025 5026 static __init int svm_hardware_setup(void) 5027 { 5028 int cpu; 5029 struct page *iopm_pages; 5030 void *iopm_va; 5031 int r; 5032 unsigned int order = get_order(IOPM_SIZE); 5033 5034 /* 5035 * NX is required for shadow paging and for NPT if the NX huge pages 5036 * mitigation is enabled. 5037 */ 5038 if (!boot_cpu_has(X86_FEATURE_NX)) { 5039 pr_err_ratelimited("NX (Execute Disable) not supported\n"); 5040 return -EOPNOTSUPP; 5041 } 5042 kvm_enable_efer_bits(EFER_NX); 5043 5044 iopm_pages = alloc_pages(GFP_KERNEL, order); 5045 5046 if (!iopm_pages) 5047 return -ENOMEM; 5048 5049 iopm_va = page_address(iopm_pages); 5050 memset(iopm_va, 0xff, PAGE_SIZE * (1 << order)); 5051 iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT; 5052 5053 init_msrpm_offsets(); 5054 5055 kvm_caps.supported_xcr0 &= ~(XFEATURE_MASK_BNDREGS | 5056 XFEATURE_MASK_BNDCSR); 5057 5058 if (boot_cpu_has(X86_FEATURE_FXSR_OPT)) 5059 kvm_enable_efer_bits(EFER_FFXSR); 5060 5061 if (tsc_scaling) { 5062 if (!boot_cpu_has(X86_FEATURE_TSCRATEMSR)) { 5063 tsc_scaling = false; 5064 } else { 5065 pr_info("TSC scaling supported\n"); 5066 kvm_caps.has_tsc_control = true; 5067 } 5068 } 5069 kvm_caps.max_tsc_scaling_ratio = SVM_TSC_RATIO_MAX; 5070 kvm_caps.tsc_scaling_ratio_frac_bits = 32; 5071 5072 tsc_aux_uret_slot = kvm_add_user_return_msr(MSR_TSC_AUX); 5073 5074 if (boot_cpu_has(X86_FEATURE_AUTOIBRS)) 5075 kvm_enable_efer_bits(EFER_AUTOIBRS); 5076 5077 /* Check for pause filtering support */ 5078 if (!boot_cpu_has(X86_FEATURE_PAUSEFILTER)) { 5079 pause_filter_count = 0; 5080 pause_filter_thresh = 0; 5081 } else if (!boot_cpu_has(X86_FEATURE_PFTHRESHOLD)) { 5082 pause_filter_thresh = 0; 5083 } 5084 5085 if (nested) { 5086 pr_info("Nested Virtualization enabled\n"); 5087 kvm_enable_efer_bits(EFER_SVME | EFER_LMSLE); 5088 } 5089 5090 /* 5091 * KVM's MMU doesn't support using 2-level paging for itself, and thus 5092 * NPT isn't supported if the host is using 2-level paging since host 5093 * CR4 is unchanged on VMRUN. 5094 */ 5095 if (!IS_ENABLED(CONFIG_X86_64) && !IS_ENABLED(CONFIG_X86_PAE)) 5096 npt_enabled = false; 5097 5098 if (!boot_cpu_has(X86_FEATURE_NPT)) 5099 npt_enabled = false; 5100 5101 /* Force VM NPT level equal to the host's paging level */ 5102 kvm_configure_mmu(npt_enabled, get_npt_level(), 5103 get_npt_level(), PG_LEVEL_1G); 5104 pr_info("Nested Paging %sabled\n", npt_enabled ? "en" : "dis"); 5105 5106 /* Setup shadow_me_value and shadow_me_mask */ 5107 kvm_mmu_set_me_spte_mask(sme_me_mask, sme_me_mask); 5108 5109 svm_adjust_mmio_mask(); 5110 5111 /* 5112 * Note, SEV setup consumes npt_enabled and enable_mmio_caching (which 5113 * may be modified by svm_adjust_mmio_mask()). 5114 */ 5115 sev_hardware_setup(); 5116 5117 svm_hv_hardware_setup(); 5118 5119 for_each_possible_cpu(cpu) { 5120 r = svm_cpu_init(cpu); 5121 if (r) 5122 goto err; 5123 } 5124 5125 if (nrips) { 5126 if (!boot_cpu_has(X86_FEATURE_NRIPS)) 5127 nrips = false; 5128 } 5129 5130 enable_apicv = avic = avic && avic_hardware_setup(); 5131 5132 if (!enable_apicv) { 5133 svm_x86_ops.vcpu_blocking = NULL; 5134 svm_x86_ops.vcpu_unblocking = NULL; 5135 svm_x86_ops.vcpu_get_apicv_inhibit_reasons = NULL; 5136 } else if (!x2avic_enabled) { 5137 svm_x86_ops.allow_apicv_in_x2apic_without_x2apic_virtualization = true; 5138 } 5139 5140 if (vls) { 5141 if (!npt_enabled || 5142 !boot_cpu_has(X86_FEATURE_V_VMSAVE_VMLOAD) || 5143 !IS_ENABLED(CONFIG_X86_64)) { 5144 vls = false; 5145 } else { 5146 pr_info("Virtual VMLOAD VMSAVE supported\n"); 5147 } 5148 } 5149 5150 if (boot_cpu_has(X86_FEATURE_SVME_ADDR_CHK)) 5151 svm_gp_erratum_intercept = false; 5152 5153 if (vgif) { 5154 if (!boot_cpu_has(X86_FEATURE_VGIF)) 5155 vgif = false; 5156 else 5157 pr_info("Virtual GIF supported\n"); 5158 } 5159 5160 vnmi = vgif && vnmi && boot_cpu_has(X86_FEATURE_VNMI); 5161 if (vnmi) 5162 pr_info("Virtual NMI enabled\n"); 5163 5164 if (!vnmi) { 5165 svm_x86_ops.is_vnmi_pending = NULL; 5166 svm_x86_ops.set_vnmi_pending = NULL; 5167 } 5168 5169 5170 if (lbrv) { 5171 if (!boot_cpu_has(X86_FEATURE_LBRV)) 5172 lbrv = false; 5173 else 5174 pr_info("LBR virtualization supported\n"); 5175 } 5176 5177 if (!enable_pmu) 5178 pr_info("PMU virtualization is disabled\n"); 5179 5180 svm_set_cpu_caps(); 5181 5182 /* 5183 * It seems that on AMD processors PTE's accessed bit is 5184 * being set by the CPU hardware before the NPF vmexit. 5185 * This is not expected behaviour and our tests fail because 5186 * of it. 5187 * A workaround here is to disable support for 5188 * GUEST_MAXPHYADDR < HOST_MAXPHYADDR if NPT is enabled. 5189 * In this case userspace can know if there is support using 5190 * KVM_CAP_SMALLER_MAXPHYADDR extension and decide how to handle 5191 * it 5192 * If future AMD CPU models change the behaviour described above, 5193 * this variable can be changed accordingly 5194 */ 5195 allow_smaller_maxphyaddr = !npt_enabled; 5196 5197 return 0; 5198 5199 err: 5200 svm_hardware_unsetup(); 5201 return r; 5202 } 5203 5204 5205 static struct kvm_x86_init_ops svm_init_ops __initdata = { 5206 .hardware_setup = svm_hardware_setup, 5207 5208 .runtime_ops = &svm_x86_ops, 5209 .pmu_ops = &amd_pmu_ops, 5210 }; 5211 5212 static int __init svm_init(void) 5213 { 5214 int r; 5215 5216 __unused_size_checks(); 5217 5218 if (!kvm_is_svm_supported()) 5219 return -EOPNOTSUPP; 5220 5221 r = kvm_x86_vendor_init(&svm_init_ops); 5222 if (r) 5223 return r; 5224 5225 /* 5226 * Common KVM initialization _must_ come last, after this, /dev/kvm is 5227 * exposed to userspace! 5228 */ 5229 r = kvm_init(sizeof(struct vcpu_svm), __alignof__(struct vcpu_svm), 5230 THIS_MODULE); 5231 if (r) 5232 goto err_kvm_init; 5233 5234 return 0; 5235 5236 err_kvm_init: 5237 kvm_x86_vendor_exit(); 5238 return r; 5239 } 5240 5241 static void __exit svm_exit(void) 5242 { 5243 kvm_exit(); 5244 kvm_x86_vendor_exit(); 5245 } 5246 5247 module_init(svm_init) 5248 module_exit(svm_exit) 5249