Lines Matching +full:break +full:- +full:control

18 #include <linux/amd-iommu.h>
24 #include <linux/psp-sev.h>
42 #include <asm/spec-ctrl.h>
59 MODULE_DESCRIPTION("KVM support for SVM (AMD-V) extensions");
84 * These 2 parameters are used to config the controls for Pause-Loop Exiting:
97 * In this mode, a 16-bit pause filter threshold field is added in the
119 /* Default doubles per-vcpu window every exit. */
123 /* Default resets per-vcpu window every exit to pause_filter_count. */
190 int tsc_aux_uret_slot __ro_after_init = -1;
204 u64 old_efer = vcpu->arch.efer; in svm_set_efer()
205 vcpu->arch.efer = efer; in svm_set_efer()
235 vcpu->arch.efer = old_efer; in svm_set_efer()
243 if (svm_gp_erratum_intercept && !sev_guest(vcpu->kvm)) in svm_set_efer()
248 svm->vmcb->save.efer = efer | EFER_SVME; in svm_set_efer()
249 vmcb_mark_dirty(svm->vmcb, VMCB_CR); in svm_set_efer()
258 if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) in svm_get_interrupt_shadow()
268 svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK; in svm_set_interrupt_shadow()
270 svm->vmcb->control.int_state |= SVM_INTERRUPT_SHADOW_MASK; in svm_set_interrupt_shadow()
281 * SEV-ES does not expose the next RIP. The RIP update is controlled by in __svm_skip_emulated_instruction()
284 if (sev_es_guest(vcpu->kvm)) in __svm_skip_emulated_instruction()
287 if (nrips && svm->vmcb->control.next_rip != 0) { in __svm_skip_emulated_instruction()
289 svm->next_rip = svm->vmcb->control.next_rip; in __svm_skip_emulated_instruction()
292 if (!svm->next_rip) { in __svm_skip_emulated_instruction()
294 old_rflags = svm->vmcb->save.rflags; in __svm_skip_emulated_instruction()
300 svm->vmcb->save.rflags = old_rflags; in __svm_skip_emulated_instruction()
302 kvm_rip_write(vcpu, svm->next_rip); in __svm_skip_emulated_instruction()
335 return -EIO; in svm_update_soft_interrupt_rip()
341 * VMCB's next_rip will be lost (cleared on VM-Exit) if the injection in svm_update_soft_interrupt_rip()
342 * doesn't complete due to a VM-Exit occurring while the CPU is in svm_update_soft_interrupt_rip()
348 svm->soft_int_injected = true; in svm_update_soft_interrupt_rip()
349 svm->soft_int_csbase = svm->vmcb->save.cs.base; in svm_update_soft_interrupt_rip()
350 svm->soft_int_old_rip = old_rip; in svm_update_soft_interrupt_rip()
351 svm->soft_int_next_rip = rip; in svm_update_soft_interrupt_rip()
357 svm->vmcb->control.next_rip = rip; in svm_update_soft_interrupt_rip()
364 struct kvm_queued_exception *ex = &vcpu->arch.exception; in svm_inject_exception()
369 if (kvm_exception_is_soft(ex->vector) && in svm_inject_exception()
373 svm->vmcb->control.event_inj = ex->vector in svm_inject_exception()
375 | (ex->has_error_code ? SVM_EVTINJ_VALID_ERR : 0) in svm_inject_exception()
377 svm->vmcb->control.event_inj_err = ex->error_code; in svm_inject_exception()
387 /* Use _safe variants to not break nested virtualization */ in svm_init_erratum_383()
404 vcpu->arch.osvw.length = (osvw_len >= 3) ? (osvw_len) : 3; in svm_init_osvw()
405 vcpu->arch.osvw.status = osvw_status & ~(6ULL); in svm_init_osvw()
416 vcpu->arch.osvw.status |= 1; in svm_init_osvw()
424 if (c->x86_vendor != X86_VENDOR_AMD && in __kvm_is_svm_supported()
425 c->x86_vendor != X86_VENDOR_HYGON) { in __kvm_is_svm_supported()
457 return -EIO; in svm_check_processor_compat()
473 return &sd->save_area->host_sev_es_save; in sev_es_host_save_area()
519 return -EBUSY; in svm_enable_virtualization_cpu()
522 sd->asid_generation = 1; in svm_enable_virtualization_cpu()
523 sd->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1; in svm_enable_virtualization_cpu()
524 sd->next_asid = sd->max_asid + 1; in svm_enable_virtualization_cpu()
525 sd->min_asid = max_sev_asid + 1; in svm_enable_virtualization_cpu()
529 wrmsrq(MSR_VM_HSAVE_PA, sd->save_area_pa); in svm_enable_virtualization_cpu()
563 osvw_status &= (1ULL << osvw_len) - 1; in svm_enable_virtualization_cpu()
579 if (!sd->save_area) in svm_cpu_uninit()
582 kfree(sd->sev_vmcbs); in svm_cpu_uninit()
583 __free_page(__sme_pa_to_page(sd->save_area_pa)); in svm_cpu_uninit()
584 sd->save_area_pa = 0; in svm_cpu_uninit()
585 sd->save_area = NULL; in svm_cpu_uninit()
592 int ret = -ENOMEM; in svm_cpu_init()
603 sd->save_area = page_address(save_area_page); in svm_cpu_init()
604 sd->save_area_pa = __sme_page_pa(save_area_page); in svm_cpu_init()
615 struct vmcb *vmcb = svm->vmcb01.ptr; in set_dr_intercepts()
617 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_READ); in set_dr_intercepts()
618 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR1_READ); in set_dr_intercepts()
619 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR2_READ); in set_dr_intercepts()
620 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR3_READ); in set_dr_intercepts()
621 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR4_READ); in set_dr_intercepts()
622 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR5_READ); in set_dr_intercepts()
623 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR6_READ); in set_dr_intercepts()
624 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_WRITE); in set_dr_intercepts()
625 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR1_WRITE); in set_dr_intercepts()
626 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR2_WRITE); in set_dr_intercepts()
627 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR3_WRITE); in set_dr_intercepts()
628 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR4_WRITE); in set_dr_intercepts()
629 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR5_WRITE); in set_dr_intercepts()
630 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR6_WRITE); in set_dr_intercepts()
631 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_READ); in set_dr_intercepts()
632 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_WRITE); in set_dr_intercepts()
639 struct vmcb *vmcb = svm->vmcb01.ptr; in clr_dr_intercepts()
641 vmcb->control.intercepts[INTERCEPT_DR] = 0; in clr_dr_intercepts()
649 * For non-nested case: in msr_write_intercepted()
657 void *msrpm = is_guest_mode(vcpu) ? to_svm(vcpu)->nested.msrpm : in msr_write_intercepted()
658 to_svm(vcpu)->msrpm; in msr_write_intercepted()
666 void *msrpm = svm->msrpm; in svm_set_intercept_for_msr()
684 svm->nested.force_msr_bitmap_recalc = true; in svm_set_intercept_for_msr()
708 bool intercept = !(to_svm(vcpu)->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK); in svm_recalc_lbr_msr_intercepts()
715 if (sev_es_guest(vcpu->kvm)) in svm_recalc_lbr_msr_intercepts()
754 * the guest has a non-zero SPEC_CTRL value, i.e. is likely actively in svm_recalc_msr_intercepts()
762 !svm->spec_ctrl); in svm_recalc_msr_intercepts()
773 if (kvm_aperfmperf_in_guest(vcpu->kvm)) { in svm_recalc_msr_intercepts()
789 if (sev_es_guest(vcpu->kvm)) in svm_recalc_msr_intercepts()
793 * x2APIC intercepts are modified on-demand and cannot be filtered by in svm_recalc_msr_intercepts()
800 to_vmcb->save.dbgctl = from_vmcb->save.dbgctl; in svm_copy_lbrs()
801 to_vmcb->save.br_from = from_vmcb->save.br_from; in svm_copy_lbrs()
802 to_vmcb->save.br_to = from_vmcb->save.br_to; in svm_copy_lbrs()
803 to_vmcb->save.last_excp_from = from_vmcb->save.last_excp_from; in svm_copy_lbrs()
804 to_vmcb->save.last_excp_to = from_vmcb->save.last_excp_to; in svm_copy_lbrs()
813 svm->vmcb->control.virt_ext |= LBR_CTL_ENABLE_MASK; in svm_enable_lbrv()
818 svm_copy_lbrs(svm->vmcb, svm->vmcb01.ptr); in svm_enable_lbrv()
825 KVM_BUG_ON(sev_es_guest(vcpu->kvm), vcpu->kvm); in svm_disable_lbrv()
826 svm->vmcb->control.virt_ext &= ~LBR_CTL_ENABLE_MASK; in svm_disable_lbrv()
834 svm_copy_lbrs(svm->vmcb01.ptr, svm->vmcb); in svm_disable_lbrv()
844 return svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK ? svm->vmcb : in svm_get_lbr_vmcb()
845 svm->vmcb01.ptr; in svm_get_lbr_vmcb()
851 bool current_enable_lbrv = svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK; in svm_update_lbrv()
852 bool enable_lbrv = (svm_get_lbr_vmcb(svm)->save.dbgctl & DEBUGCTLMSR_LBR) || in svm_update_lbrv()
854 (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK)); in svm_update_lbrv()
867 svm->nmi_singlestep = false; in disable_nmi_singlestep()
869 if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP)) { in disable_nmi_singlestep()
871 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF)) in disable_nmi_singlestep()
872 svm->vmcb->save.rflags &= ~X86_EFLAGS_TF; in disable_nmi_singlestep()
873 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_RF)) in disable_nmi_singlestep()
874 svm->vmcb->save.rflags &= ~X86_EFLAGS_RF; in disable_nmi_singlestep()
881 struct vmcb_control_area *control = &svm->vmcb->control; in grow_ple_window() local
882 int old = control->pause_filter_count; in grow_ple_window()
884 if (kvm_pause_in_guest(vcpu->kvm)) in grow_ple_window()
887 control->pause_filter_count = __grow_ple_window(old, in grow_ple_window()
892 if (control->pause_filter_count != old) { in grow_ple_window()
893 vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS); in grow_ple_window()
894 trace_kvm_ple_window_update(vcpu->vcpu_id, in grow_ple_window()
895 control->pause_filter_count, old); in grow_ple_window()
902 struct vmcb_control_area *control = &svm->vmcb->control; in shrink_ple_window() local
903 int old = control->pause_filter_count; in shrink_ple_window()
905 if (kvm_pause_in_guest(vcpu->kvm)) in shrink_ple_window()
908 control->pause_filter_count = in shrink_ple_window()
913 if (control->pause_filter_count != old) { in shrink_ple_window()
914 vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS); in shrink_ple_window()
915 trace_kvm_ple_window_update(vcpu->vcpu_id, in shrink_ple_window()
916 control->pause_filter_count, old); in shrink_ple_window()
935 seg->selector = 0; in init_seg()
936 seg->attrib = SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK | in init_seg()
938 seg->limit = 0xffff; in init_seg()
939 seg->base = 0; in init_seg()
944 seg->selector = 0; in init_sys_seg()
945 seg->attrib = SVM_SELECTOR_P_MASK | type; in init_sys_seg()
946 seg->limit = 0xffff; in init_sys_seg()
947 seg->base = 0; in init_sys_seg()
954 return svm->nested.ctl.tsc_offset; in svm_get_l2_tsc_offset()
961 return svm->tsc_ratio_msr; in svm_get_l2_tsc_multiplier()
968 svm->vmcb01.ptr->control.tsc_offset = vcpu->arch.l1_tsc_offset; in svm_write_tsc_offset()
969 svm->vmcb->control.tsc_offset = vcpu->arch.tsc_offset; in svm_write_tsc_offset()
970 vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS); in svm_write_tsc_offset()
976 if (to_svm(vcpu)->guest_state_loaded) in svm_write_tsc_multiplier()
977 __svm_write_tsc_multiplier(vcpu->arch.tsc_scaling_ratio); in svm_write_tsc_multiplier()
992 !guest_cpu_cap_has(&svm->vcpu, X86_FEATURE_INVPCID)) in svm_recalc_instruction_intercepts()
1008 svm->vmcb->control.virt_ext &= ~VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK; in svm_recalc_instruction_intercepts()
1017 svm->vmcb->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK; in svm_recalc_instruction_intercepts()
1031 struct vmcb *vmcb = svm->vmcb01.ptr; in init_vmcb()
1032 struct vmcb_control_area *control = &vmcb->control; in init_vmcb() local
1033 struct vmcb_save_area *save = &vmcb->save; in init_vmcb()
1088 if (!kvm_mwait_in_guest(vcpu->kvm)) { in init_vmcb()
1093 if (!kvm_hlt_in_guest(vcpu->kvm)) { in init_vmcb()
1100 control->iopm_base_pa = iopm_base; in init_vmcb()
1101 control->msrpm_base_pa = __sme_set(__pa(svm->msrpm)); in init_vmcb()
1102 control->int_ctl = V_INTR_MASKING_MASK; in init_vmcb()
1104 init_seg(&save->es); in init_vmcb()
1105 init_seg(&save->ss); in init_vmcb()
1106 init_seg(&save->ds); in init_vmcb()
1107 init_seg(&save->fs); in init_vmcb()
1108 init_seg(&save->gs); in init_vmcb()
1110 save->cs.selector = 0xf000; in init_vmcb()
1111 save->cs.base = 0xffff0000; in init_vmcb()
1113 save->cs.attrib = SVM_SELECTOR_READ_MASK | SVM_SELECTOR_P_MASK | in init_vmcb()
1115 save->cs.limit = 0xffff; in init_vmcb()
1117 save->gdtr.base = 0; in init_vmcb()
1118 save->gdtr.limit = 0xffff; in init_vmcb()
1119 save->idtr.base = 0; in init_vmcb()
1120 save->idtr.limit = 0xffff; in init_vmcb()
1122 init_sys_seg(&save->ldtr, SEG_TYPE_LDT); in init_vmcb()
1123 init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16); in init_vmcb()
1127 control->nested_ctl |= SVM_NESTED_CTL_NP_ENABLE; in init_vmcb()
1132 save->g_pat = vcpu->arch.pat; in init_vmcb()
1133 save->cr3 = 0; in init_vmcb()
1135 svm->current_vmcb->asid_generation = 0; in init_vmcb()
1136 svm->asid = 0; in init_vmcb()
1138 svm->nested.vmcb12_gpa = INVALID_GPA; in init_vmcb()
1139 svm->nested.last_vmcb12_gpa = INVALID_GPA; in init_vmcb()
1141 if (!kvm_pause_in_guest(vcpu->kvm)) { in init_vmcb()
1142 control->pause_filter_count = pause_filter_count; in init_vmcb()
1144 control->pause_filter_thresh = pause_filter_thresh; in init_vmcb()
1154 svm->vmcb->control.int_ctl |= V_NMI_ENABLE_MASK; in init_vmcb()
1159 svm->vmcb->control.int_ctl |= V_GIF_ENABLE_MASK; in init_vmcb()
1162 if (vcpu->kvm->arch.bus_lock_detection_enabled) in init_vmcb()
1165 if (sev_guest(vcpu->kvm)) in init_vmcb()
1183 if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_STUFF_FEATURE_MSRS)) in __svm_vcpu_reset()
1184 vcpu->arch.microcode_version = 0x01000065; in __svm_vcpu_reset()
1185 svm->tsc_ratio_msr = kvm_caps.default_tsc_scaling_ratio; in __svm_vcpu_reset()
1187 svm->nmi_masked = false; in __svm_vcpu_reset()
1188 svm->awaiting_iret_completion = false; in __svm_vcpu_reset()
1195 svm->spec_ctrl = 0; in svm_vcpu_reset()
1196 svm->virt_spec_ctrl = 0; in svm_vcpu_reset()
1206 svm->current_vmcb = target_vmcb; in svm_switch_vmcb()
1207 svm->vmcb = target_vmcb->ptr; in svm_switch_vmcb()
1219 err = -ENOMEM; in svm_vcpu_create()
1232 svm->msrpm = svm_vcpu_alloc_msrpm(); in svm_vcpu_create()
1233 if (!svm->msrpm) { in svm_vcpu_create()
1234 err = -ENOMEM; in svm_vcpu_create()
1238 svm->x2avic_msrs_intercepted = true; in svm_vcpu_create()
1240 svm->vmcb01.ptr = page_address(vmcb01_page); in svm_vcpu_create()
1241 svm->vmcb01.pa = __sme_set(page_to_pfn(vmcb01_page) << PAGE_SHIFT); in svm_vcpu_create()
1242 svm_switch_vmcb(svm, &svm->vmcb01); in svm_vcpu_create()
1244 svm->guest_state_loaded = false; in svm_vcpu_create()
1260 WARN_ON_ONCE(!list_empty(&svm->ir_list)); in svm_vcpu_free()
1267 __free_page(__sme_pa_to_page(svm->vmcb01.pa)); in svm_vcpu_free()
1268 svm_vcpu_free_msrpm(svm->msrpm); in svm_vcpu_free()
1279 if (!sd->bp_spec_reduce_set) in svm_srso_clear_bp_spec_reduce()
1283 sd->bp_spec_reduce_set = false; in svm_srso_clear_bp_spec_reduce()
1331 struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, vcpu->cpu); in svm_prepare_switch_to_guest()
1333 if (sev_es_guest(vcpu->kvm)) in svm_prepare_switch_to_guest()
1336 if (svm->guest_state_loaded) in svm_prepare_switch_to_guest()
1340 * Save additional host state that will be restored on VMEXIT (sev-es) in svm_prepare_switch_to_guest()
1343 vmsave(sd->save_area_pa); in svm_prepare_switch_to_guest()
1344 if (sev_es_guest(vcpu->kvm)) in svm_prepare_switch_to_guest()
1348 __svm_write_tsc_multiplier(vcpu->arch.tsc_scaling_ratio); in svm_prepare_switch_to_guest()
1352 * SEV-ES guests when the feature is available. For non-SEV-ES guests, in svm_prepare_switch_to_guest()
1357 (!boot_cpu_has(X86_FEATURE_V_TSC_AUX) || !sev_es_guest(vcpu->kvm))) in svm_prepare_switch_to_guest()
1358 kvm_set_user_return_msr(tsc_aux_uret_slot, svm->tsc_aux, -1ull); in svm_prepare_switch_to_guest()
1361 !sd->bp_spec_reduce_set) { in svm_prepare_switch_to_guest()
1362 sd->bp_spec_reduce_set = true; in svm_prepare_switch_to_guest()
1365 svm->guest_state_loaded = true; in svm_prepare_switch_to_guest()
1370 to_svm(vcpu)->guest_state_loaded = false; in svm_prepare_host_switch()
1375 if (vcpu->scheduled_out && !kvm_pause_in_guest(vcpu->kvm)) in svm_vcpu_load()
1389 ++vcpu->stat.host_state_reload; in svm_vcpu_put()
1395 unsigned long rflags = svm->vmcb->save.rflags; in svm_get_rflags()
1397 if (svm->nmi_singlestep) { in svm_get_rflags()
1399 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF)) in svm_get_rflags()
1401 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_RF)) in svm_get_rflags()
1409 if (to_svm(vcpu)->nmi_singlestep) in svm_set_rflags()
1414 * (caused by either a task switch or an inter-privilege IRET), in svm_set_rflags()
1417 to_svm(vcpu)->vmcb->save.rflags = rflags; in svm_set_rflags()
1422 struct vmcb *vmcb = to_svm(vcpu)->vmcb; in svm_get_if_flag()
1424 return sev_es_guest(vcpu->kvm) in svm_get_if_flag()
1425 ? vmcb->control.int_state & SVM_GUEST_INTERRUPT_MASK in svm_get_if_flag()
1436 * When !npt_enabled, mmu->pdptrs[] is already available since in svm_cache_reg()
1441 break; in svm_cache_reg()
1443 KVM_BUG_ON(1, vcpu->kvm); in svm_cache_reg()
1449 struct vmcb_control_area *control; in svm_set_vintr() local
1454 WARN_ON(kvm_vcpu_apicv_activated(&svm->vcpu)); in svm_set_vintr()
1472 control = &svm->vmcb->control; in svm_set_vintr()
1473 control->int_vector = 0x0; in svm_set_vintr()
1474 control->int_ctl &= ~V_INTR_PRIO_MASK; in svm_set_vintr()
1475 control->int_ctl |= V_IRQ_MASK | in svm_set_vintr()
1476 ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT); in svm_set_vintr()
1477 vmcb_mark_dirty(svm->vmcb, VMCB_INTR); in svm_set_vintr()
1485 svm->vmcb->control.int_ctl &= ~V_IRQ_INJECTION_BITS_MASK; in svm_clear_vintr()
1486 if (is_guest_mode(&svm->vcpu)) { in svm_clear_vintr()
1487 svm->vmcb01.ptr->control.int_ctl &= ~V_IRQ_INJECTION_BITS_MASK; in svm_clear_vintr()
1489 WARN_ON((svm->vmcb->control.int_ctl & V_TPR_MASK) != in svm_clear_vintr()
1490 (svm->nested.ctl.int_ctl & V_TPR_MASK)); in svm_clear_vintr()
1492 svm->vmcb->control.int_ctl |= svm->nested.ctl.int_ctl & in svm_clear_vintr()
1495 svm->vmcb->control.int_vector = svm->nested.ctl.int_vector; in svm_clear_vintr()
1498 vmcb_mark_dirty(svm->vmcb, VMCB_INTR); in svm_clear_vintr()
1503 struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save; in svm_seg()
1504 struct vmcb_save_area *save01 = &to_svm(vcpu)->vmcb01.ptr->save; in svm_seg()
1507 case VCPU_SREG_CS: return &save->cs; in svm_seg()
1508 case VCPU_SREG_DS: return &save->ds; in svm_seg()
1509 case VCPU_SREG_ES: return &save->es; in svm_seg()
1510 case VCPU_SREG_FS: return &save01->fs; in svm_seg()
1511 case VCPU_SREG_GS: return &save01->gs; in svm_seg()
1512 case VCPU_SREG_SS: return &save->ss; in svm_seg()
1513 case VCPU_SREG_TR: return &save01->tr; in svm_seg()
1514 case VCPU_SREG_LDTR: return &save01->ldtr; in svm_seg()
1524 return s->base; in svm_get_segment_base()
1532 var->base = s->base; in svm_get_segment()
1533 var->limit = s->limit; in svm_get_segment()
1534 var->selector = s->selector; in svm_get_segment()
1535 var->type = s->attrib & SVM_SELECTOR_TYPE_MASK; in svm_get_segment()
1536 var->s = (s->attrib >> SVM_SELECTOR_S_SHIFT) & 1; in svm_get_segment()
1537 var->dpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3; in svm_get_segment()
1538 var->present = (s->attrib >> SVM_SELECTOR_P_SHIFT) & 1; in svm_get_segment()
1539 var->avl = (s->attrib >> SVM_SELECTOR_AVL_SHIFT) & 1; in svm_get_segment()
1540 var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1; in svm_get_segment()
1541 var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1; in svm_get_segment()
1548 * running KVM nested. It also helps cross-vendor migration, because in svm_get_segment()
1551 var->g = s->limit > 0xfffff; in svm_get_segment()
1557 var->unusable = !var->present; in svm_get_segment()
1565 var->type |= 0x2; in svm_get_segment()
1566 break; in svm_get_segment()
1576 * cross-vendor migration. in svm_get_segment()
1578 if (!var->unusable) in svm_get_segment()
1579 var->type |= 0x1; in svm_get_segment()
1580 break; in svm_get_segment()
1588 if (var->unusable) in svm_get_segment()
1589 var->db = 0; in svm_get_segment()
1591 var->dpl = to_svm(vcpu)->vmcb->save.cpl; in svm_get_segment()
1592 break; in svm_get_segment()
1598 struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save; in svm_get_cpl()
1600 return save->cpl; in svm_get_cpl()
1616 dt->size = svm->vmcb->save.idtr.limit; in svm_get_idt()
1617 dt->address = svm->vmcb->save.idtr.base; in svm_get_idt()
1624 svm->vmcb->save.idtr.limit = dt->size; in svm_set_idt()
1625 svm->vmcb->save.idtr.base = dt->address ; in svm_set_idt()
1626 vmcb_mark_dirty(svm->vmcb, VMCB_DT); in svm_set_idt()
1633 dt->size = svm->vmcb->save.gdtr.limit; in svm_get_gdt()
1634 dt->address = svm->vmcb->save.gdtr.base; in svm_get_gdt()
1641 svm->vmcb->save.gdtr.limit = dt->size; in svm_set_gdt()
1642 svm->vmcb->save.gdtr.base = dt->address ; in svm_set_gdt()
1643 vmcb_mark_dirty(svm->vmcb, VMCB_DT); in svm_set_gdt()
1653 * that do (SEV-ES/SEV-SNP), the cr3 update needs to be written to in sev_post_set_cr3()
1658 if (sev_es_guest(vcpu->kvm)) { in sev_post_set_cr3()
1659 svm->vmcb->save.cr3 = cr3; in sev_post_set_cr3()
1660 vmcb_mark_dirty(svm->vmcb, VMCB_CR); in sev_post_set_cr3()
1676 if (vcpu->arch.efer & EFER_LME) { in svm_set_cr0()
1678 vcpu->arch.efer |= EFER_LMA; in svm_set_cr0()
1679 if (!vcpu->arch.guest_state_protected) in svm_set_cr0()
1680 svm->vmcb->save.efer |= EFER_LMA | EFER_LME; in svm_set_cr0()
1684 vcpu->arch.efer &= ~EFER_LMA; in svm_set_cr0()
1685 if (!vcpu->arch.guest_state_protected) in svm_set_cr0()
1686 svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME); in svm_set_cr0()
1690 vcpu->arch.cr0 = cr0; in svm_set_cr0()
1699 * re-enable caching here because the QEMU bios in svm_set_cr0()
1700 * does not do it - this results in some delay at in svm_set_cr0()
1703 if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED)) in svm_set_cr0()
1706 svm->vmcb->save.cr0 = hcr0; in svm_set_cr0()
1707 vmcb_mark_dirty(svm->vmcb, VMCB_CR); in svm_set_cr0()
1710 * SEV-ES guests must always keep the CR intercepts cleared. CR in svm_set_cr0()
1713 if (sev_es_guest(vcpu->kvm)) in svm_set_cr0()
1734 unsigned long old_cr4 = vcpu->arch.cr4; in svm_set_cr4()
1736 vcpu->arch.cr4 = cr4; in svm_set_cr4()
1744 to_svm(vcpu)->vmcb->save.cr4 = cr4; in svm_set_cr4()
1745 vmcb_mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR); in svm_set_cr4()
1748 vcpu->arch.cpuid_dynamic_bits_dirty = true; in svm_set_cr4()
1757 s->base = var->base; in svm_set_segment()
1758 s->limit = var->limit; in svm_set_segment()
1759 s->selector = var->selector; in svm_set_segment()
1760 s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK); in svm_set_segment()
1761 s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT; in svm_set_segment()
1762 s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT; in svm_set_segment()
1763 s->attrib |= ((var->present & 1) && !var->unusable) << SVM_SELECTOR_P_SHIFT; in svm_set_segment()
1764 s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT; in svm_set_segment()
1765 s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT; in svm_set_segment()
1766 s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT; in svm_set_segment()
1767 s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT; in svm_set_segment()
1777 svm->vmcb->save.cpl = (var->dpl & 3); in svm_set_segment()
1779 vmcb_mark_dirty(svm->vmcb, VMCB_SEG); in svm_set_segment()
1788 if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) { in svm_update_exception_bitmap()
1789 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) in svm_update_exception_bitmap()
1796 if (sd->next_asid > sd->max_asid) { in new_asid()
1797 ++sd->asid_generation; in new_asid()
1798 sd->next_asid = sd->min_asid; in new_asid()
1799 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID; in new_asid()
1800 vmcb_mark_dirty(svm->vmcb, VMCB_ASID); in new_asid()
1803 svm->current_vmcb->asid_generation = sd->asid_generation; in new_asid()
1804 svm->asid = sd->next_asid++; in new_asid()
1809 struct vmcb *vmcb = to_svm(vcpu)->vmcb; in svm_set_dr6()
1811 if (vcpu->arch.guest_state_protected) in svm_set_dr6()
1814 if (unlikely(value != vmcb->save.dr6)) { in svm_set_dr6()
1815 vmcb->save.dr6 = value; in svm_set_dr6()
1824 if (WARN_ON_ONCE(sev_es_guest(vcpu->kvm))) in svm_sync_dirty_debug_regs()
1827 get_debugreg(vcpu->arch.db[0], 0); in svm_sync_dirty_debug_regs()
1828 get_debugreg(vcpu->arch.db[1], 1); in svm_sync_dirty_debug_regs()
1829 get_debugreg(vcpu->arch.db[2], 2); in svm_sync_dirty_debug_regs()
1830 get_debugreg(vcpu->arch.db[3], 3); in svm_sync_dirty_debug_regs()
1832 * We cannot reset svm->vmcb->save.dr6 to DR6_ACTIVE_LOW here, in svm_sync_dirty_debug_regs()
1835 vcpu->arch.dr6 = svm->vmcb->save.dr6; in svm_sync_dirty_debug_regs()
1836 vcpu->arch.dr7 = svm->vmcb->save.dr7; in svm_sync_dirty_debug_regs()
1837 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT; in svm_sync_dirty_debug_regs()
1845 if (vcpu->arch.guest_state_protected) in svm_set_dr7()
1848 svm->vmcb->save.dr7 = value; in svm_set_dr7()
1849 vmcb_mark_dirty(svm->vmcb, VMCB_DR); in svm_set_dr7()
1856 u64 fault_address = svm->vmcb->control.exit_info_2; in pf_interception()
1857 u64 error_code = svm->vmcb->control.exit_info_1; in pf_interception()
1861 svm->vmcb->control.insn_bytes : NULL, in pf_interception()
1862 svm->vmcb->control.insn_len); in pf_interception()
1870 u64 fault_address = svm->vmcb->control.exit_info_2; in npf_interception()
1871 u64 error_code = svm->vmcb->control.exit_info_1; in npf_interception()
1875 * with KVM-defined sythentic flags. Clear the flags and continue on, in npf_interception()
1882 if (sev_snp_guest(vcpu->kvm) && (error_code & PFERR_GUEST_ENC_MASK)) in npf_interception()
1888 svm->vmcb->control.insn_bytes : NULL, in npf_interception()
1889 svm->vmcb->control.insn_len); in npf_interception()
1899 struct kvm_run *kvm_run = vcpu->run; in db_interception()
1902 if (!(vcpu->guest_debug & in db_interception()
1904 !svm->nmi_singlestep) { in db_interception()
1905 u32 payload = svm->vmcb->save.dr6 ^ DR6_ACTIVE_LOW; in db_interception()
1910 if (svm->nmi_singlestep) { in db_interception()
1916 if (vcpu->guest_debug & in db_interception()
1918 kvm_run->exit_reason = KVM_EXIT_DEBUG; in db_interception()
1919 kvm_run->debug.arch.dr6 = svm->vmcb->save.dr6; in db_interception()
1920 kvm_run->debug.arch.dr7 = svm->vmcb->save.dr7; in db_interception()
1921 kvm_run->debug.arch.pc = in db_interception()
1922 svm->vmcb->save.cs.base + svm->vmcb->save.rip; in db_interception()
1923 kvm_run->debug.arch.exception = DB_VECTOR; in db_interception()
1933 struct kvm_run *kvm_run = vcpu->run; in bp_interception()
1935 kvm_run->exit_reason = KVM_EXIT_DEBUG; in bp_interception()
1936 kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip; in bp_interception()
1937 kvm_run->debug.arch.exception = BP_VECTOR; in bp_interception()
1978 /* Flush tlb to evict multi-match entries */ in is_erratum_383()
2012 struct kvm_run *kvm_run = vcpu->run; in shutdown_interception()
2024 * The VM save area for SEV-ES guests has already been encrypted so it in shutdown_interception()
2027 if (!sev_es_guest(vcpu->kvm)) { in shutdown_interception()
2028 clear_page(svm->vmcb); in shutdown_interception()
2036 kvm_run->exit_reason = KVM_EXIT_SHUTDOWN; in shutdown_interception()
2043 u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */ in io_interception()
2047 ++vcpu->stat.io_exits; in io_interception()
2054 if (sev_es_guest(vcpu->kvm)) in io_interception()
2060 svm->next_rip = svm->vmcb->control.exit_info_2; in io_interception()
2077 ++vcpu->stat.irq_exits; in intr_interception()
2091 ret = kvm_vcpu_map(vcpu, gpa_to_gfn(svm->vmcb->save.rax), &map); in vmload_vmsave_interception()
2093 if (ret == -EINVAL) in vmload_vmsave_interception()
2103 svm_copy_vmloadsave_state(svm->vmcb, vmcb12); in vmload_vmsave_interception()
2104 svm->sysenter_eip_hi = 0; in vmload_vmsave_interception()
2105 svm->sysenter_esp_hi = 0; in vmload_vmsave_interception()
2107 svm_copy_vmloadsave_state(vmcb12, svm->vmcb); in vmload_vmsave_interception()
2143 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; in svm_instr_opcode()
2145 if (ctxt->b != 0x1 || ctxt->opcode_len != 2) in svm_instr_opcode()
2148 switch (ctxt->modrm) { in svm_instr_opcode()
2156 break; in svm_instr_opcode()
2178 /* Returns '1' or -errno on failure, '0' on success. */ in emulate_svm_instr()
2190 * 1) SVM VM-related instructions (VMRUN/VMSAVE/VMLOAD) that trigger #GP on
2198 u32 error_code = svm->vmcb->control.exit_info_1; in gp_interception()
2224 if (svm->vmcb->save.rax & ~PAGE_MASK) in gp_interception()
2250 if (svm->vcpu.arch.smi_pending || in svm_set_gif()
2251 svm->vcpu.arch.nmi_pending || in svm_set_gif()
2252 kvm_cpu_has_injectable_intr(&svm->vcpu) || in svm_set_gif()
2253 kvm_apic_has_pending_init_or_sipi(&svm->vcpu)) in svm_set_gif()
2254 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); in svm_set_gif()
2301 trace_kvm_invlpga(to_svm(vcpu)->vmcb->save.rip, asid, gva); in invlpga_interception()
2311 trace_kvm_skinit(to_svm(vcpu)->vmcb->save.rip, kvm_rax_read(vcpu)); in skinit_interception()
2322 int int_type = svm->vmcb->control.exit_int_info & in task_switch_interception()
2324 int int_vec = svm->vmcb->control.exit_int_info & SVM_EVTINJ_VEC_MASK; in task_switch_interception()
2326 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_TYPE_MASK; in task_switch_interception()
2328 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID; in task_switch_interception()
2332 tss_selector = (u16)svm->vmcb->control.exit_info_1; in task_switch_interception()
2334 if (svm->vmcb->control.exit_info_2 & in task_switch_interception()
2337 else if (svm->vmcb->control.exit_info_2 & in task_switch_interception()
2348 vcpu->arch.nmi_injected = false; in task_switch_interception()
2349 break; in task_switch_interception()
2351 if (svm->vmcb->control.exit_info_2 & in task_switch_interception()
2355 (u32)svm->vmcb->control.exit_info_2; in task_switch_interception()
2358 break; in task_switch_interception()
2362 break; in task_switch_interception()
2364 break; in task_switch_interception()
2377 int_vec = -1; in task_switch_interception()
2385 if (!sev_es_guest(svm->vcpu.kvm)) in svm_clr_iret_intercept()
2391 if (!sev_es_guest(svm->vcpu.kvm)) in svm_set_iret_intercept()
2399 WARN_ON_ONCE(sev_es_guest(vcpu->kvm)); in iret_interception()
2401 ++vcpu->stat.nmi_window_exits; in iret_interception()
2402 svm->awaiting_iret_completion = true; in iret_interception()
2405 svm->nmi_iret_rip = kvm_rip_read(vcpu); in iret_interception()
2416 kvm_mmu_invlpg(vcpu, to_svm(vcpu)->vmcb->control.exit_info_1); in invlpg_interception()
2434 unsigned long cr0 = vcpu->arch.cr0; in check_selective_cr0_intercepted()
2438 (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_SELECTIVE_CR0)))) in check_selective_cr0_intercepted()
2445 svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE; in check_selective_cr0_intercepted()
2464 if (unlikely((svm->vmcb->control.exit_info_1 & CR_VALID) == 0)) in cr_interception()
2467 reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK; in cr_interception()
2468 if (svm->vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE) in cr_interception()
2469 cr = SVM_EXIT_WRITE_CR0 - SVM_EXIT_READ_CR0; in cr_interception()
2471 cr = svm->vmcb->control.exit_code - SVM_EXIT_READ_CR0; in cr_interception()
2475 cr -= 16; in cr_interception()
2485 break; in cr_interception()
2488 break; in cr_interception()
2491 break; in cr_interception()
2494 break; in cr_interception()
2504 break; in cr_interception()
2506 val = vcpu->arch.cr2; in cr_interception()
2507 break; in cr_interception()
2510 break; in cr_interception()
2513 break; in cr_interception()
2516 break; in cr_interception()
2535 new_value = (unsigned long)svm->vmcb->control.exit_info_1; in cr_trap()
2537 cr = svm->vmcb->control.exit_code - SVM_EXIT_CR0_WRITE_TRAP; in cr_trap()
2544 break; in cr_trap()
2550 break; in cr_trap()
2553 break; in cr_trap()
2570 * SEV-ES intercepts DR7 only to disable guest debugging and the guest issues a VMGEXIT in dr_interception()
2573 if (sev_es_guest(vcpu->kvm)) in dr_interception()
2576 if (vcpu->guest_debug == 0) { in dr_interception()
2583 vcpu->arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT; in dr_interception()
2590 reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK; in dr_interception()
2591 dr = svm->vmcb->control.exit_code - SVM_EXIT_READ_DR0; in dr_interception()
2593 dr -= 16; in dr_interception()
2613 vcpu->run->exit_reason = KVM_EXIT_SET_TPR; in cr8_write_interception()
2625 * whether the guest has X86_FEATURE_SVM - this avoids a failure if in efer_trap()
2630 msr_info.data = to_svm(vcpu)->vmcb->control.exit_info_1 & ~EFER_SVME; in efer_trap()
2644 break; in svm_get_feature_msr()
2655 return sev_es_guest(vcpu->kvm) && vcpu->arch.guest_state_protected && in sev_es_prevent_msr_access()
2656 msr_info->index != MSR_IA32_XSS && in sev_es_prevent_msr_access()
2657 !msr_write_intercepted(vcpu, msr_info->index); in sev_es_prevent_msr_access()
2665 msr_info->data = 0; in svm_get_msr()
2666 return vcpu->kvm->arch.has_protected_state ? -EINVAL : 0; in svm_get_msr()
2669 switch (msr_info->index) { in svm_get_msr()
2671 if (!msr_info->host_initiated && in svm_get_msr()
2674 msr_info->data = svm->tsc_ratio_msr; in svm_get_msr()
2675 break; in svm_get_msr()
2677 msr_info->data = svm->vmcb01.ptr->save.star; in svm_get_msr()
2678 break; in svm_get_msr()
2681 msr_info->data = svm->vmcb01.ptr->save.lstar; in svm_get_msr()
2682 break; in svm_get_msr()
2684 msr_info->data = svm->vmcb01.ptr->save.cstar; in svm_get_msr()
2685 break; in svm_get_msr()
2687 msr_info->data = svm->vmcb01.ptr->save.gs.base; in svm_get_msr()
2688 break; in svm_get_msr()
2690 msr_info->data = svm->vmcb01.ptr->save.fs.base; in svm_get_msr()
2691 break; in svm_get_msr()
2693 msr_info->data = svm->vmcb01.ptr->save.kernel_gs_base; in svm_get_msr()
2694 break; in svm_get_msr()
2696 msr_info->data = svm->vmcb01.ptr->save.sfmask; in svm_get_msr()
2697 break; in svm_get_msr()
2700 msr_info->data = svm->vmcb01.ptr->save.sysenter_cs; in svm_get_msr()
2701 break; in svm_get_msr()
2703 msr_info->data = (u32)svm->vmcb01.ptr->save.sysenter_eip; in svm_get_msr()
2705 msr_info->data |= (u64)svm->sysenter_eip_hi << 32; in svm_get_msr()
2706 break; in svm_get_msr()
2708 msr_info->data = svm->vmcb01.ptr->save.sysenter_esp; in svm_get_msr()
2710 msr_info->data |= (u64)svm->sysenter_esp_hi << 32; in svm_get_msr()
2711 break; in svm_get_msr()
2713 msr_info->data = svm->vmcb->save.s_cet; in svm_get_msr()
2714 break; in svm_get_msr()
2716 msr_info->data = svm->vmcb->save.isst_addr; in svm_get_msr()
2717 break; in svm_get_msr()
2719 msr_info->data = svm->vmcb->save.ssp; in svm_get_msr()
2720 break; in svm_get_msr()
2722 msr_info->data = svm->tsc_aux; in svm_get_msr()
2723 break; in svm_get_msr()
2725 msr_info->data = svm_get_lbr_vmcb(svm)->save.dbgctl; in svm_get_msr()
2726 break; in svm_get_msr()
2728 msr_info->data = svm_get_lbr_vmcb(svm)->save.br_from; in svm_get_msr()
2729 break; in svm_get_msr()
2731 msr_info->data = svm_get_lbr_vmcb(svm)->save.br_to; in svm_get_msr()
2732 break; in svm_get_msr()
2734 msr_info->data = svm_get_lbr_vmcb(svm)->save.last_excp_from; in svm_get_msr()
2735 break; in svm_get_msr()
2737 msr_info->data = svm_get_lbr_vmcb(svm)->save.last_excp_to; in svm_get_msr()
2738 break; in svm_get_msr()
2740 msr_info->data = svm->nested.hsave_msr; in svm_get_msr()
2741 break; in svm_get_msr()
2743 msr_info->data = svm->nested.vm_cr_msr; in svm_get_msr()
2744 break; in svm_get_msr()
2746 if (!msr_info->host_initiated && in svm_get_msr()
2751 msr_info->data = svm->vmcb->save.spec_ctrl; in svm_get_msr()
2753 msr_info->data = svm->spec_ctrl; in svm_get_msr()
2754 break; in svm_get_msr()
2756 if (!msr_info->host_initiated && in svm_get_msr()
2760 msr_info->data = svm->virt_spec_ctrl; in svm_get_msr()
2761 break; in svm_get_msr()
2772 msr_info->data = 0; in svm_get_msr()
2776 msr_info->data = 0x1E; in svm_get_msr()
2778 break; in svm_get_msr()
2780 msr_info->data = svm->msr_decfg; in svm_get_msr()
2781 break; in svm_get_msr()
2791 if (!err || !sev_es_guest(vcpu->kvm) || WARN_ON_ONCE(!svm->sev_es.ghcb)) in svm_complete_emulated_msr()
2808 if (svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK) in svm_set_vm_cr()
2811 svm->nested.vm_cr_msr &= ~chg_mask; in svm_set_vm_cr()
2812 svm->nested.vm_cr_msr |= (data & chg_mask); in svm_set_vm_cr()
2814 svm_dis = svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK; in svm_set_vm_cr()
2817 if (svm_dis && (vcpu->arch.efer & EFER_SVME)) in svm_set_vm_cr()
2828 u32 ecx = msr->index; in svm_set_msr()
2829 u64 data = msr->data; in svm_set_msr()
2832 return vcpu->kvm->arch.has_protected_state ? -EINVAL : 0; in svm_set_msr()
2839 if (!msr->host_initiated) in svm_set_msr()
2849 if (data != 0 && data != svm->tsc_ratio_msr) in svm_set_msr()
2851 break; in svm_set_msr()
2857 svm->tsc_ratio_msr = data; in svm_set_msr()
2863 break; in svm_set_msr()
2867 break; in svm_set_msr()
2869 svm->vmcb01.ptr->save.g_pat = data; in svm_set_msr()
2872 vmcb_mark_dirty(svm->vmcb, VMCB_NPT); in svm_set_msr()
2873 break; in svm_set_msr()
2875 if (!msr->host_initiated && in svm_set_msr()
2883 svm->vmcb->save.spec_ctrl = data; in svm_set_msr()
2885 svm->spec_ctrl = data; in svm_set_msr()
2887 break; in svm_set_msr()
2890 * For non-nested: in svm_set_msr()
2891 * When it's written (to non-zero) for the first time, pass in svm_set_msr()
2901 break; in svm_set_msr()
2903 if (!msr->host_initiated && in svm_set_msr()
2910 svm->virt_spec_ctrl = data; in svm_set_msr()
2911 break; in svm_set_msr()
2913 svm->vmcb01.ptr->save.star = data; in svm_set_msr()
2914 break; in svm_set_msr()
2917 svm->vmcb01.ptr->save.lstar = data; in svm_set_msr()
2918 break; in svm_set_msr()
2920 svm->vmcb01.ptr->save.cstar = data; in svm_set_msr()
2921 break; in svm_set_msr()
2923 svm->vmcb01.ptr->save.gs.base = data; in svm_set_msr()
2924 break; in svm_set_msr()
2926 svm->vmcb01.ptr->save.fs.base = data; in svm_set_msr()
2927 break; in svm_set_msr()
2929 svm->vmcb01.ptr->save.kernel_gs_base = data; in svm_set_msr()
2930 break; in svm_set_msr()
2932 svm->vmcb01.ptr->save.sfmask = data; in svm_set_msr()
2933 break; in svm_set_msr()
2936 svm->vmcb01.ptr->save.sysenter_cs = data; in svm_set_msr()
2937 break; in svm_set_msr()
2939 svm->vmcb01.ptr->save.sysenter_eip = (u32)data; in svm_set_msr()
2947 svm->sysenter_eip_hi = guest_cpuid_is_intel_compatible(vcpu) ? (data >> 32) : 0; in svm_set_msr()
2948 break; in svm_set_msr()
2950 svm->vmcb01.ptr->save.sysenter_esp = (u32)data; in svm_set_msr()
2951 svm->sysenter_esp_hi = guest_cpuid_is_intel_compatible(vcpu) ? (data >> 32) : 0; in svm_set_msr()
2952 break; in svm_set_msr()
2954 svm->vmcb->save.s_cet = data; in svm_set_msr()
2955 vmcb_mark_dirty(svm->vmcb01.ptr, VMCB_CET); in svm_set_msr()
2956 break; in svm_set_msr()
2958 svm->vmcb->save.isst_addr = data; in svm_set_msr()
2959 vmcb_mark_dirty(svm->vmcb01.ptr, VMCB_CET); in svm_set_msr()
2960 break; in svm_set_msr()
2962 svm->vmcb->save.ssp = data; in svm_set_msr()
2963 vmcb_mark_dirty(svm->vmcb01.ptr, VMCB_CET); in svm_set_msr()
2964 break; in svm_set_msr()
2967 * TSC_AUX is always virtualized for SEV-ES guests when the in svm_set_msr()
2972 if (boot_cpu_has(X86_FEATURE_V_TSC_AUX) && sev_es_guest(vcpu->kvm)) in svm_set_msr()
2973 break; in svm_set_msr()
2980 ret = kvm_set_user_return_msr(tsc_aux_uret_slot, data, -1ull); in svm_set_msr()
2983 break; in svm_set_msr()
2985 svm->tsc_aux = data; in svm_set_msr()
2986 break; in svm_set_msr()
2990 break; in svm_set_msr()
3005 svm_get_lbr_vmcb(svm)->save.dbgctl = data; in svm_set_msr()
3007 break; in svm_set_msr()
3015 if (!msr->host_initiated && !page_address_valid(vcpu, data)) in svm_set_msr()
3018 svm->nested.hsave_msr = data & PAGE_MASK; in svm_set_msr()
3019 break; in svm_set_msr()
3024 break; in svm_set_msr()
3034 svm->msr_decfg = data; in svm_set_msr()
3035 break; in svm_set_msr()
3045 if (to_svm(vcpu)->vmcb->control.exit_info_1) in msr_interception()
3059 * requesting the IRQ window and we have to re-enable it. in interrupt_window_interception()
3066 * AVIC still inhibited due to per-cpu AVIC inhibition. in interrupt_window_interception()
3068 kvm_clear_apicv_inhibit(vcpu->kvm, APICV_INHIBIT_REASON_IRQWIN); in interrupt_window_interception()
3070 ++vcpu->stat.irq_window_exits; in interrupt_window_interception()
3078 * CPL is not made available for an SEV-ES guest, therefore in pause_interception()
3079 * vcpu->arch.preempted_in_kernel can never be true. Just in pause_interception()
3082 in_kernel = !sev_es_guest(vcpu->kvm) && svm_get_cpl(vcpu) == 0; in pause_interception()
3106 type = svm->vmcb->control.exit_info_2; in invpcid_interception()
3107 gva = svm->vmcb->control.exit_info_1; in invpcid_interception()
3110 * FIXME: Perform segment checks for 32-bit mode, and inject #SS if the in invpcid_interception()
3129 * execute the bus-locking instruction. Set the bus lock counter to '1' in complete_userspace_buslock()
3132 if (kvm_is_linear_rip(vcpu, vcpu->arch.cui_linear_rip)) in complete_userspace_buslock()
3133 svm->vmcb->control.bus_lock_counter = 1; in complete_userspace_buslock()
3142 vcpu->run->exit_reason = KVM_EXIT_X86_BUS_LOCK; in bus_lock_exit()
3143 vcpu->run->flags |= KVM_RUN_X86_BUS_LOCK; in bus_lock_exit()
3145 vcpu->arch.cui_linear_rip = kvm_get_linear_rip(vcpu); in bus_lock_exit()
3146 vcpu->arch.complete_userspace_io = complete_userspace_buslock; in bus_lock_exit()
3149 svm->nested.ctl.bus_lock_rip = vcpu->arch.cui_linear_rip; in bus_lock_exit()
3235 struct vmcb_control_area *control = &svm->vmcb->control; in dump_vmcb() local
3236 struct vmcb_save_area *save = &svm->vmcb->save; in dump_vmcb()
3237 struct vmcb_save_area *save01 = &svm->vmcb01.ptr->save; in dump_vmcb()
3247 vm_type = sev_snp_guest(vcpu->kvm) ? "SEV-SNP" : in dump_vmcb()
3248 sev_es_guest(vcpu->kvm) ? "SEV-ES" : in dump_vmcb()
3249 sev_guest(vcpu->kvm) ? "SEV" : "SVM"; in dump_vmcb()
3252 vm_type, vcpu->vcpu_id, svm->current_vmcb->ptr, vcpu->arch.last_vmentry_cpu); in dump_vmcb()
3253 pr_err("VMCB Control Area:\n"); in dump_vmcb()
3254 pr_err("%-20s%04x\n", "cr_read:", control->intercepts[INTERCEPT_CR] & 0xffff); in dump_vmcb()
3255 pr_err("%-20s%04x\n", "cr_write:", control->intercepts[INTERCEPT_CR] >> 16); in dump_vmcb()
3256 pr_err("%-20s%04x\n", "dr_read:", control->intercepts[INTERCEPT_DR] & 0xffff); in dump_vmcb()
3257 pr_err("%-20s%04x\n", "dr_write:", control->intercepts[INTERCEPT_DR] >> 16); in dump_vmcb()
3258 pr_err("%-20s%08x\n", "exceptions:", control->intercepts[INTERCEPT_EXCEPTION]); in dump_vmcb()
3259 pr_err("%-20s%08x %08x\n", "intercepts:", in dump_vmcb()
3260 control->intercepts[INTERCEPT_WORD3], in dump_vmcb()
3261 control->intercepts[INTERCEPT_WORD4]); in dump_vmcb()
3262 pr_err("%-20s%d\n", "pause filter count:", control->pause_filter_count); in dump_vmcb()
3263 pr_err("%-20s%d\n", "pause filter threshold:", in dump_vmcb()
3264 control->pause_filter_thresh); in dump_vmcb()
3265 pr_err("%-20s%016llx\n", "iopm_base_pa:", control->iopm_base_pa); in dump_vmcb()
3266 pr_err("%-20s%016llx\n", "msrpm_base_pa:", control->msrpm_base_pa); in dump_vmcb()
3267 pr_err("%-20s%016llx\n", "tsc_offset:", control->tsc_offset); in dump_vmcb()
3268 pr_err("%-20s%d\n", "asid:", control->asid); in dump_vmcb()
3269 pr_err("%-20s%d\n", "tlb_ctl:", control->tlb_ctl); in dump_vmcb()
3270 pr_err("%-20s%08x\n", "int_ctl:", control->int_ctl); in dump_vmcb()
3271 pr_err("%-20s%08x\n", "int_vector:", control->int_vector); in dump_vmcb()
3272 pr_err("%-20s%08x\n", "int_state:", control->int_state); in dump_vmcb()
3273 pr_err("%-20s%08x\n", "exit_code:", control->exit_code); in dump_vmcb()
3274 pr_err("%-20s%016llx\n", "exit_info1:", control->exit_info_1); in dump_vmcb()
3275 pr_err("%-20s%016llx\n", "exit_info2:", control->exit_info_2); in dump_vmcb()
3276 pr_err("%-20s%08x\n", "exit_int_info:", control->exit_int_info); in dump_vmcb()
3277 pr_err("%-20s%08x\n", "exit_int_info_err:", control->exit_int_info_err); in dump_vmcb()
3278 pr_err("%-20s%lld\n", "nested_ctl:", control->nested_ctl); in dump_vmcb()
3279 pr_err("%-20s%016llx\n", "nested_cr3:", control->nested_cr3); in dump_vmcb()
3280 pr_err("%-20s%016llx\n", "avic_vapic_bar:", control->avic_vapic_bar); in dump_vmcb()
3281 pr_err("%-20s%016llx\n", "ghcb:", control->ghcb_gpa); in dump_vmcb()
3282 pr_err("%-20s%08x\n", "event_inj:", control->event_inj); in dump_vmcb()
3283 pr_err("%-20s%08x\n", "event_inj_err:", control->event_inj_err); in dump_vmcb()
3284 pr_err("%-20s%lld\n", "virt_ext:", control->virt_ext); in dump_vmcb()
3285 pr_err("%-20s%016llx\n", "next_rip:", control->next_rip); in dump_vmcb()
3286 pr_err("%-20s%016llx\n", "avic_backing_page:", control->avic_backing_page); in dump_vmcb()
3287 pr_err("%-20s%016llx\n", "avic_logical_id:", control->avic_logical_id); in dump_vmcb()
3288 pr_err("%-20s%016llx\n", "avic_physical_id:", control->avic_physical_id); in dump_vmcb()
3289 pr_err("%-20s%016llx\n", "vmsa_pa:", control->vmsa_pa); in dump_vmcb()
3290 pr_err("%-20s%016llx\n", "allowed_sev_features:", control->allowed_sev_features); in dump_vmcb()
3291 pr_err("%-20s%016llx\n", "guest_sev_features:", control->guest_sev_features); in dump_vmcb()
3293 if (sev_es_guest(vcpu->kvm)) { in dump_vmcb()
3302 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", in dump_vmcb()
3304 save->es.selector, save->es.attrib, in dump_vmcb()
3305 save->es.limit, save->es.base); in dump_vmcb()
3306 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", in dump_vmcb()
3308 save->cs.selector, save->cs.attrib, in dump_vmcb()
3309 save->cs.limit, save->cs.base); in dump_vmcb()
3310 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", in dump_vmcb()
3312 save->ss.selector, save->ss.attrib, in dump_vmcb()
3313 save->ss.limit, save->ss.base); in dump_vmcb()
3314 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", in dump_vmcb()
3316 save->ds.selector, save->ds.attrib, in dump_vmcb()
3317 save->ds.limit, save->ds.base); in dump_vmcb()
3318 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", in dump_vmcb()
3320 save01->fs.selector, save01->fs.attrib, in dump_vmcb()
3321 save01->fs.limit, save01->fs.base); in dump_vmcb()
3322 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", in dump_vmcb()
3324 save01->gs.selector, save01->gs.attrib, in dump_vmcb()
3325 save01->gs.limit, save01->gs.base); in dump_vmcb()
3326 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", in dump_vmcb()
3328 save->gdtr.selector, save->gdtr.attrib, in dump_vmcb()
3329 save->gdtr.limit, save->gdtr.base); in dump_vmcb()
3330 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", in dump_vmcb()
3332 save01->ldtr.selector, save01->ldtr.attrib, in dump_vmcb()
3333 save01->ldtr.limit, save01->ldtr.base); in dump_vmcb()
3334 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", in dump_vmcb()
3336 save->idtr.selector, save->idtr.attrib, in dump_vmcb()
3337 save->idtr.limit, save->idtr.base); in dump_vmcb()
3338 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", in dump_vmcb()
3340 save01->tr.selector, save01->tr.attrib, in dump_vmcb()
3341 save01->tr.limit, save01->tr.base); in dump_vmcb()
3343 save->vmpl, save->cpl, save->efer); in dump_vmcb()
3344 pr_err("%-15s %016llx %-13s %016llx\n", in dump_vmcb()
3345 "cr0:", save->cr0, "cr2:", save->cr2); in dump_vmcb()
3346 pr_err("%-15s %016llx %-13s %016llx\n", in dump_vmcb()
3347 "cr3:", save->cr3, "cr4:", save->cr4); in dump_vmcb()
3348 pr_err("%-15s %016llx %-13s %016llx\n", in dump_vmcb()
3349 "dr6:", save->dr6, "dr7:", save->dr7); in dump_vmcb()
3350 pr_err("%-15s %016llx %-13s %016llx\n", in dump_vmcb()
3351 "rip:", save->rip, "rflags:", save->rflags); in dump_vmcb()
3352 pr_err("%-15s %016llx %-13s %016llx\n", in dump_vmcb()
3353 "rsp:", save->rsp, "rax:", save->rax); in dump_vmcb()
3354 pr_err("%-15s %016llx %-13s %016llx\n", in dump_vmcb()
3355 "s_cet:", save->s_cet, "ssp:", save->ssp); in dump_vmcb()
3356 pr_err("%-15s %016llx\n", in dump_vmcb()
3357 "isst_addr:", save->isst_addr); in dump_vmcb()
3358 pr_err("%-15s %016llx %-13s %016llx\n", in dump_vmcb()
3359 "star:", save01->star, "lstar:", save01->lstar); in dump_vmcb()
3360 pr_err("%-15s %016llx %-13s %016llx\n", in dump_vmcb()
3361 "cstar:", save01->cstar, "sfmask:", save01->sfmask); in dump_vmcb()
3362 pr_err("%-15s %016llx %-13s %016llx\n", in dump_vmcb()
3363 "kernel_gs_base:", save01->kernel_gs_base, in dump_vmcb()
3364 "sysenter_cs:", save01->sysenter_cs); in dump_vmcb()
3365 pr_err("%-15s %016llx %-13s %016llx\n", in dump_vmcb()
3366 "sysenter_esp:", save01->sysenter_esp, in dump_vmcb()
3367 "sysenter_eip:", save01->sysenter_eip); in dump_vmcb()
3368 pr_err("%-15s %016llx %-13s %016llx\n", in dump_vmcb()
3369 "gpat:", save->g_pat, "dbgctl:", save->dbgctl); in dump_vmcb()
3370 pr_err("%-15s %016llx %-13s %016llx\n", in dump_vmcb()
3371 "br_from:", save->br_from, "br_to:", save->br_to); in dump_vmcb()
3372 pr_err("%-15s %016llx %-13s %016llx\n", in dump_vmcb()
3373 "excp_from:", save->last_excp_from, in dump_vmcb()
3374 "excp_to:", save->last_excp_to); in dump_vmcb()
3376 if (sev_es_guest(vcpu->kvm)) { in dump_vmcb()
3379 pr_err("%-15s %016llx\n", in dump_vmcb()
3380 "sev_features", vmsa->sev_features); in dump_vmcb()
3382 pr_err("%-15s %016llx %-13s %016llx\n", in dump_vmcb()
3383 "pl0_ssp:", vmsa->pl0_ssp, "pl1_ssp:", vmsa->pl1_ssp); in dump_vmcb()
3384 pr_err("%-15s %016llx %-13s %016llx\n", in dump_vmcb()
3385 "pl2_ssp:", vmsa->pl2_ssp, "pl3_ssp:", vmsa->pl3_ssp); in dump_vmcb()
3386 pr_err("%-15s %016llx\n", in dump_vmcb()
3387 "u_cet:", vmsa->u_cet); in dump_vmcb()
3389 pr_err("%-15s %016llx %-13s %016llx\n", in dump_vmcb()
3390 "rax:", vmsa->rax, "rbx:", vmsa->rbx); in dump_vmcb()
3391 pr_err("%-15s %016llx %-13s %016llx\n", in dump_vmcb()
3392 "rcx:", vmsa->rcx, "rdx:", vmsa->rdx); in dump_vmcb()
3393 pr_err("%-15s %016llx %-13s %016llx\n", in dump_vmcb()
3394 "rsi:", vmsa->rsi, "rdi:", vmsa->rdi); in dump_vmcb()
3395 pr_err("%-15s %016llx %-13s %016llx\n", in dump_vmcb()
3396 "rbp:", vmsa->rbp, "rsp:", vmsa->rsp); in dump_vmcb()
3397 pr_err("%-15s %016llx %-13s %016llx\n", in dump_vmcb()
3398 "r8:", vmsa->r8, "r9:", vmsa->r9); in dump_vmcb()
3399 pr_err("%-15s %016llx %-13s %016llx\n", in dump_vmcb()
3400 "r10:", vmsa->r10, "r11:", vmsa->r11); in dump_vmcb()
3401 pr_err("%-15s %016llx %-13s %016llx\n", in dump_vmcb()
3402 "r12:", vmsa->r12, "r13:", vmsa->r13); in dump_vmcb()
3403 pr_err("%-15s %016llx %-13s %016llx\n", in dump_vmcb()
3404 "r14:", vmsa->r14, "r15:", vmsa->r15); in dump_vmcb()
3405 pr_err("%-15s %016llx %-13s %016llx\n", in dump_vmcb()
3406 "xcr0:", vmsa->xcr0, "xss:", vmsa->xss); in dump_vmcb()
3408 pr_err("%-15s %016llx %-13s %016lx\n", in dump_vmcb()
3409 "rax:", save->rax, "rbx:", in dump_vmcb()
3410 vcpu->arch.regs[VCPU_REGS_RBX]); in dump_vmcb()
3411 pr_err("%-15s %016lx %-13s %016lx\n", in dump_vmcb()
3412 "rcx:", vcpu->arch.regs[VCPU_REGS_RCX], in dump_vmcb()
3413 "rdx:", vcpu->arch.regs[VCPU_REGS_RDX]); in dump_vmcb()
3414 pr_err("%-15s %016lx %-13s %016lx\n", in dump_vmcb()
3415 "rsi:", vcpu->arch.regs[VCPU_REGS_RSI], in dump_vmcb()
3416 "rdi:", vcpu->arch.regs[VCPU_REGS_RDI]); in dump_vmcb()
3417 pr_err("%-15s %016lx %-13s %016llx\n", in dump_vmcb()
3418 "rbp:", vcpu->arch.regs[VCPU_REGS_RBP], in dump_vmcb()
3419 "rsp:", save->rsp); in dump_vmcb()
3421 pr_err("%-15s %016lx %-13s %016lx\n", in dump_vmcb()
3422 "r8:", vcpu->arch.regs[VCPU_REGS_R8], in dump_vmcb()
3423 "r9:", vcpu->arch.regs[VCPU_REGS_R9]); in dump_vmcb()
3424 pr_err("%-15s %016lx %-13s %016lx\n", in dump_vmcb()
3425 "r10:", vcpu->arch.regs[VCPU_REGS_R10], in dump_vmcb()
3426 "r11:", vcpu->arch.regs[VCPU_REGS_R11]); in dump_vmcb()
3427 pr_err("%-15s %016lx %-13s %016lx\n", in dump_vmcb()
3428 "r12:", vcpu->arch.regs[VCPU_REGS_R12], in dump_vmcb()
3429 "r13:", vcpu->arch.regs[VCPU_REGS_R13]); in dump_vmcb()
3430 pr_err("%-15s %016lx %-13s %016lx\n", in dump_vmcb()
3431 "r14:", vcpu->arch.regs[VCPU_REGS_R14], in dump_vmcb()
3432 "r15:", vcpu->arch.regs[VCPU_REGS_R15]); in dump_vmcb()
3437 if (sev_es_guest(vcpu->kvm)) in dump_vmcb()
3451 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in svm_handle_invalid_exit()
3452 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON; in svm_handle_invalid_exit()
3453 vcpu->run->internal.ndata = 2; in svm_handle_invalid_exit()
3454 vcpu->run->internal.data[0] = exit_code; in svm_handle_invalid_exit()
3455 vcpu->run->internal.data[1] = vcpu->arch.last_vmentry_cpu; in svm_handle_invalid_exit()
3487 struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control; in svm_get_exit_info() local
3489 *reason = control->exit_code; in svm_get_exit_info()
3490 *info1 = control->exit_info_1; in svm_get_exit_info()
3491 *info2 = control->exit_info_2; in svm_get_exit_info()
3492 *intr_info = control->exit_int_info; in svm_get_exit_info()
3495 *error_code = control->exit_int_info_err; in svm_get_exit_info()
3503 struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control; in svm_get_entry_info() local
3505 *intr_info = control->event_inj; in svm_get_entry_info()
3509 *error_code = control->event_inj_err; in svm_get_entry_info()
3518 struct kvm_run *kvm_run = vcpu->run; in svm_handle_exit()
3519 u32 exit_code = svm->vmcb->control.exit_code; in svm_handle_exit()
3521 /* SEV-ES guests must use the CR write traps to track CR registers. */ in svm_handle_exit()
3522 if (!sev_es_guest(vcpu->kvm)) { in svm_handle_exit()
3524 vcpu->arch.cr0 = svm->vmcb->save.cr0; in svm_handle_exit()
3526 vcpu->arch.cr3 = svm->vmcb->save.cr3; in svm_handle_exit()
3543 if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) { in svm_handle_exit()
3544 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY; in svm_handle_exit()
3545 kvm_run->fail_entry.hardware_entry_failure_reason in svm_handle_exit()
3546 = svm->vmcb->control.exit_code; in svm_handle_exit()
3547 kvm_run->fail_entry.cpu = vcpu->arch.last_vmentry_cpu; in svm_handle_exit()
3560 struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, vcpu->cpu); in pre_svm_run()
3568 if (unlikely(svm->current_vmcb->cpu != vcpu->cpu)) { in pre_svm_run()
3569 svm->current_vmcb->asid_generation = 0; in pre_svm_run()
3570 vmcb_mark_all_dirty(svm->vmcb); in pre_svm_run()
3571 svm->current_vmcb->cpu = vcpu->cpu; in pre_svm_run()
3574 if (sev_guest(vcpu->kvm)) in pre_svm_run()
3575 return pre_sev_run(svm, vcpu->cpu); in pre_svm_run()
3578 if (svm->current_vmcb->asid_generation != sd->asid_generation) in pre_svm_run()
3588 svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI; in svm_inject_nmi()
3590 if (svm->nmi_l1_to_l2) in svm_inject_nmi()
3599 svm->nmi_masked = true; in svm_inject_nmi()
3602 ++vcpu->stat.nmi_injections; in svm_inject_nmi()
3612 return !!(svm->vmcb->control.int_ctl & V_NMI_PENDING_MASK); in svm_is_vnmi_pending()
3622 if (svm->vmcb->control.int_ctl & V_NMI_PENDING_MASK) in svm_set_vnmi_pending()
3625 svm->vmcb->control.int_ctl |= V_NMI_PENDING_MASK; in svm_set_vnmi_pending()
3626 vmcb_mark_dirty(svm->vmcb, VMCB_INTR); in svm_set_vnmi_pending()
3633 ++vcpu->stat.nmi_injections; in svm_set_vnmi_pending()
3643 if (vcpu->arch.interrupt.soft) { in svm_inject_irq()
3652 trace_kvm_inj_virq(vcpu->arch.interrupt.nr, in svm_inject_irq()
3653 vcpu->arch.interrupt.soft, reinjected); in svm_inject_irq()
3654 ++vcpu->stat.irq_injections; in svm_inject_irq()
3656 svm->vmcb->control.event_inj = vcpu->arch.interrupt.nr | in svm_inject_irq()
3664 * apic->apicv_active must be read after vcpu->mode. in svm_complete_interrupt_delivery()
3667 bool in_guest_mode = (smp_load_acquire(&vcpu->mode) == IN_GUEST_MODE); in svm_complete_interrupt_delivery()
3669 /* Note, this is called iff the local APIC is in-kernel. */ in svm_complete_interrupt_delivery()
3670 if (!READ_ONCE(vcpu->arch.apic->apicv_active)) { in svm_complete_interrupt_delivery()
3677 trace_kvm_apicv_accept_irq(vcpu->vcpu_id, delivery_mode, trig_mode, vector); in svm_complete_interrupt_delivery()
3700 * Pairs with the smp_mb_*() after setting vcpu->guest_mode in in svm_deliver_interrupt()
3707 svm_complete_interrupt_delivery(apic->vcpu, delivery_mode, trig_mode, vector); in svm_deliver_interrupt()
3715 * SEV-ES guests must always keep the CR intercepts cleared. CR in svm_update_cr8_intercept()
3718 if (sev_es_guest(vcpu->kvm)) in svm_update_cr8_intercept()
3726 if (irr == -1) in svm_update_cr8_intercept()
3738 return svm->vmcb->control.int_ctl & V_NMI_BLOCKING_MASK; in svm_get_nmi_mask()
3740 return svm->nmi_masked; in svm_get_nmi_mask()
3749 svm->vmcb->control.int_ctl |= V_NMI_BLOCKING_MASK; in svm_set_nmi_mask()
3751 svm->vmcb->control.int_ctl &= ~V_NMI_BLOCKING_MASK; in svm_set_nmi_mask()
3754 svm->nmi_masked = masked; in svm_set_nmi_mask()
3765 struct vmcb *vmcb = svm->vmcb; in svm_nmi_blocked()
3776 return vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK; in svm_nmi_blocked()
3782 if (svm->nested.nested_run_pending) in svm_nmi_allowed()
3783 return -EBUSY; in svm_nmi_allowed()
3788 /* An NMI must not be injected into L2 if it's supposed to VM-Exit. */ in svm_nmi_allowed()
3790 return -EBUSY; in svm_nmi_allowed()
3797 struct vmcb *vmcb = svm->vmcb; in svm_interrupt_blocked()
3804 if ((svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK) in svm_interrupt_blocked()
3805 ? !(svm->vmcb01.ptr->save.rflags & X86_EFLAGS_IF) in svm_interrupt_blocked()
3817 return (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK); in svm_interrupt_blocked()
3824 if (svm->nested.nested_run_pending) in svm_interrupt_allowed()
3825 return -EBUSY; in svm_interrupt_allowed()
3831 * An IRQ must not be injected into L2 if it's supposed to VM-Exit, in svm_interrupt_allowed()
3835 return -EBUSY; in svm_interrupt_allowed()
3864 kvm_set_apicv_inhibit(vcpu->kvm, APICV_INHIBIT_REASON_IRQWIN); in svm_enable_irq_window()
3878 * If KVM has already intercepted IRET, then single-step over the IRET, in svm_enable_nmi_window()
3882 * are masked, as KVM allows at most one to-be-injected NMI and one in svm_enable_nmi_window()
3887 * inject the NMI. In those situations, KVM needs to single-step over in svm_enable_nmi_window()
3893 if (!svm->awaiting_iret_completion) in svm_enable_nmi_window()
3898 * SEV-ES guests are responsible for signaling when a vCPU is ready to in svm_enable_nmi_window()
3899 * receive a new NMI, as SEV-ES guests can't be single-stepped, i.e. in svm_enable_nmi_window()
3900 * KVM can't intercept and single-step IRET to detect when NMIs are in svm_enable_nmi_window()
3903 * Note, GIF is guaranteed to be '1' for SEV-ES guests as hardware in svm_enable_nmi_window()
3904 * ignores SEV-ES guest writes to EFER.SVME *and* CLGI/STGI are not in svm_enable_nmi_window()
3907 if (sev_es_guest(vcpu->kvm)) in svm_enable_nmi_window()
3920 svm->nmi_singlestep_guest_rflags = svm_get_rflags(vcpu); in svm_enable_nmi_window()
3921 svm->nmi_singlestep = true; in svm_enable_nmi_window()
3922 svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF); in svm_enable_nmi_window()
3932 * entries, and thus is a superset of Hyper-V's fine grained flushing. in svm_flush_tlb_asid()
3940 * unconditionally does a TLB flush on both nested VM-Enter and nested in svm_flush_tlb_asid()
3941 * VM-Exit (via kvm_mmu_reset_context()). in svm_flush_tlb_asid()
3944 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID; in svm_flush_tlb_asid()
3946 svm->current_vmcb->asid_generation--; in svm_flush_tlb_asid()
3951 hpa_t root_tdp = vcpu->arch.mmu->root.hpa; in svm_flush_tlb_current()
3954 * When running on Hyper-V with EnlightenedNptTlb enabled, explicitly in svm_flush_tlb_current()
3968 * When running on Hyper-V with EnlightenedNptTlb enabled, remote TLB in svm_flush_tlb_all()
3975 hv_flush_remote_tlbs(vcpu->kvm); in svm_flush_tlb_all()
3984 invlpga(gva, svm->vmcb->control.asid); in svm_flush_tlb_gva()
3995 int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK; in sync_cr8_to_lapic()
4009 svm->vmcb->control.int_ctl &= ~V_TPR_MASK; in sync_lapic_to_cr8()
4010 svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK; in sync_lapic_to_cr8()
4021 * If NRIPS is enabled, KVM must snapshot the pre-VMRUN next_rip that's in svm_complete_soft_interrupt()
4024 * needs to manually set next_rip for re-injection. Unlike the !nrips in svm_complete_soft_interrupt()
4025 * case below, this needs to be done if and only if KVM is re-injecting in svm_complete_soft_interrupt()
4030 kvm_is_linear_rip(vcpu, svm->soft_int_old_rip + svm->soft_int_csbase)) in svm_complete_soft_interrupt()
4031 svm->vmcb->control.next_rip = svm->soft_int_next_rip; in svm_complete_soft_interrupt()
4041 kvm_is_linear_rip(vcpu, svm->soft_int_next_rip + svm->soft_int_csbase)) in svm_complete_soft_interrupt()
4042 kvm_rip_write(vcpu, svm->soft_int_old_rip); in svm_complete_soft_interrupt()
4050 u32 exitintinfo = svm->vmcb->control.exit_int_info; in svm_complete_interrupts()
4051 bool nmi_l1_to_l2 = svm->nmi_l1_to_l2; in svm_complete_interrupts()
4052 bool soft_int_injected = svm->soft_int_injected; in svm_complete_interrupts()
4054 svm->nmi_l1_to_l2 = false; in svm_complete_interrupts()
4055 svm->soft_int_injected = false; in svm_complete_interrupts()
4061 if (svm->awaiting_iret_completion && in svm_complete_interrupts()
4062 kvm_rip_read(vcpu) != svm->nmi_iret_rip) { in svm_complete_interrupts()
4063 svm->awaiting_iret_completion = false; in svm_complete_interrupts()
4064 svm->nmi_masked = false; in svm_complete_interrupts()
4068 vcpu->arch.nmi_injected = false; in svm_complete_interrupts()
4085 vcpu->arch.nmi_injected = true; in svm_complete_interrupts()
4086 svm->nmi_l1_to_l2 = nmi_l1_to_l2; in svm_complete_interrupts()
4087 break; in svm_complete_interrupts()
4092 * Never re-inject a #VC exception. in svm_complete_interrupts()
4095 break; in svm_complete_interrupts()
4098 error_code = svm->vmcb->control.exit_int_info_err; in svm_complete_interrupts()
4103 break; in svm_complete_interrupts()
4107 break; in svm_complete_interrupts()
4110 break; in svm_complete_interrupts()
4112 break; in svm_complete_interrupts()
4120 struct vmcb_control_area *control = &svm->vmcb->control; in svm_cancel_injection() local
4122 control->exit_int_info = control->event_inj; in svm_cancel_injection()
4123 control->exit_int_info_err = control->event_inj_err; in svm_cancel_injection()
4124 control->event_inj = 0; in svm_cancel_injection()
4130 if (to_kvm_sev_info(vcpu->kvm)->need_init) in svm_vcpu_pre_run()
4131 return -EINVAL; in svm_vcpu_pre_run()
4139 struct vmcb_control_area *control = &svm->vmcb->control; in svm_exit_handlers_fastpath() local
4145 if (!nrips || !control->next_rip) in svm_exit_handlers_fastpath()
4151 switch (control->exit_code) { in svm_exit_handlers_fastpath()
4153 if (!control->exit_info_1) in svm_exit_handlers_fastpath()
4154 break; in svm_exit_handlers_fastpath()
4161 break; in svm_exit_handlers_fastpath()
4169 struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, vcpu->cpu); in svm_vcpu_enter_exit()
4188 if (sev_es_guest(vcpu->kvm)) in svm_vcpu_enter_exit()
4207 svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX]; in svm_vcpu_run()
4208 svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP]; in svm_vcpu_run()
4209 svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP]; in svm_vcpu_run()
4217 if (svm->nmi_singlestep && svm->vmcb->control.event_inj) { in svm_vcpu_run()
4228 smp_send_reschedule(vcpu->cpu); in svm_vcpu_run()
4231 vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY; in svm_vcpu_run()
4232 vcpu->run->fail_entry.hardware_entry_failure_reason = SVM_EXIT_ERR; in svm_vcpu_run()
4233 vcpu->run->fail_entry.cpu = vcpu->cpu; in svm_vcpu_run()
4239 if (unlikely(svm->asid != svm->vmcb->control.asid)) { in svm_vcpu_run()
4240 svm->vmcb->control.asid = svm->asid; in svm_vcpu_run()
4241 vmcb_mark_dirty(svm->vmcb, VMCB_ASID); in svm_vcpu_run()
4243 svm->vmcb->save.cr2 = vcpu->arch.cr2; in svm_vcpu_run()
4245 svm_hv_update_vp_id(svm->vmcb, vcpu); in svm_vcpu_run()
4248 * Run with all-zero DR6 unless the guest can write DR6 freely, so that in svm_vcpu_run()
4253 svm_set_dr6(vcpu, vcpu->arch.dr6); in svm_vcpu_run()
4254 else if (likely(!(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT))) in svm_vcpu_run()
4263 * VM-Exit), as running with the host's DEBUGCTL can negatively affect in svm_vcpu_run()
4266 if (!(svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK) && in svm_vcpu_run()
4267 vcpu->arch.host_debugctl != svm->vmcb->save.dbgctl) in svm_vcpu_run()
4268 update_debugctlmsr(svm->vmcb->save.dbgctl); in svm_vcpu_run()
4274 * it's non-zero. Since vmentry is serialising on affected CPUs, there in svm_vcpu_run()
4279 x86_spec_ctrl_set_guest(svm->virt_spec_ctrl); in svm_vcpu_run()
4284 x86_spec_ctrl_restore_host(svm->virt_spec_ctrl); in svm_vcpu_run()
4286 if (!sev_es_guest(vcpu->kvm)) { in svm_vcpu_run()
4287 vcpu->arch.cr2 = svm->vmcb->save.cr2; in svm_vcpu_run()
4288 vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax; in svm_vcpu_run()
4289 vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp; in svm_vcpu_run()
4290 vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip; in svm_vcpu_run()
4292 vcpu->arch.regs_dirty = 0; in svm_vcpu_run()
4294 if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI)) in svm_vcpu_run()
4297 if (!(svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK) && in svm_vcpu_run()
4298 vcpu->arch.host_debugctl != svm->vmcb->save.dbgctl) in svm_vcpu_run()
4299 update_debugctlmsr(vcpu->arch.host_debugctl); in svm_vcpu_run()
4306 if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI)) in svm_vcpu_run()
4311 svm->next_rip = 0; in svm_vcpu_run()
4316 if (svm->nested.nested_run_pending && in svm_vcpu_run()
4317 svm->vmcb->control.exit_code != SVM_EXIT_ERR) in svm_vcpu_run()
4318 ++vcpu->stat.nested_run; in svm_vcpu_run()
4320 svm->nested.nested_run_pending = 0; in svm_vcpu_run()
4323 svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING; in svm_vcpu_run()
4324 vmcb_mark_all_clean(svm->vmcb); in svm_vcpu_run()
4327 if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR) in svm_vcpu_run()
4328 vcpu->arch.apf.host_apf_flags = in svm_vcpu_run()
4331 vcpu->arch.regs_avail &= ~SVM_REGS_LAZY_LOAD_SET; in svm_vcpu_run()
4337 if (unlikely(svm->vmcb->control.exit_code == in svm_vcpu_run()
4355 svm->vmcb->control.nested_cr3 = __sme_set(root_hpa); in svm_load_mmu_pgd()
4356 vmcb_mark_dirty(svm->vmcb, VMCB_NPT); in svm_load_mmu_pgd()
4360 cr3 = vcpu->arch.cr3; in svm_load_mmu_pgd()
4364 /* PCID in the guest should be impossible with a 32-bit MMU. */ in svm_load_mmu_pgd()
4369 svm->vmcb->save.cr3 = cr3; in svm_load_mmu_pgd()
4370 vmcb_mark_dirty(svm->vmcb, VMCB_CR); in svm_load_mmu_pgd()
4397 /* SEV-ES guests do not support SMM, so report false */ in svm_has_emulated_msr()
4400 break; in svm_has_emulated_msr()
4402 break; in svm_has_emulated_msr()
4419 * XSS on VM-Enter/VM-Exit. Failure to do so would effectively give in svm_vcpu_after_set_cpuid()
4434 if (sev_guest(vcpu->kvm)) in svm_vcpu_after_set_cpuid()
4515 struct vmcb *vmcb = svm->vmcb; in svm_check_intercept()
4517 if (info->intercept >= ARRAY_SIZE(x86_intercept_map)) in svm_check_intercept()
4520 icpt_info = x86_intercept_map[info->intercept]; in svm_check_intercept()
4527 if (info->intercept == x86_intercept_cr_read) in svm_check_intercept()
4528 icpt_info.exit_code += info->modrm_reg; in svm_check_intercept()
4529 break; in svm_check_intercept()
4533 if (info->intercept == x86_intercept_cr_write) in svm_check_intercept()
4534 icpt_info.exit_code += info->modrm_reg; in svm_check_intercept()
4537 info->intercept == x86_intercept_clts) in svm_check_intercept()
4538 break; in svm_check_intercept()
4540 if (!(vmcb12_is_intercept(&svm->nested.ctl, in svm_check_intercept()
4542 break; in svm_check_intercept()
4544 cr0 = vcpu->arch.cr0 & ~SVM_CR0_SELECTIVE_MASK; in svm_check_intercept()
4545 val = info->src_val & ~SVM_CR0_SELECTIVE_MASK; in svm_check_intercept()
4547 if (info->intercept == x86_intercept_lmsw) { in svm_check_intercept()
4550 /* lmsw can't clear PE - catch this here */ in svm_check_intercept()
4558 break; in svm_check_intercept()
4562 icpt_info.exit_code += info->modrm_reg; in svm_check_intercept()
4563 break; in svm_check_intercept()
4565 if (info->intercept == x86_intercept_wrmsr) in svm_check_intercept()
4566 vmcb->control.exit_info_1 = 1; in svm_check_intercept()
4568 vmcb->control.exit_info_1 = 0; in svm_check_intercept()
4569 break; in svm_check_intercept()
4575 if (info->rep_prefix != REPE_PREFIX) in svm_check_intercept()
4577 break; in svm_check_intercept()
4582 if (info->intercept == x86_intercept_in || in svm_check_intercept()
4583 info->intercept == x86_intercept_ins) { in svm_check_intercept()
4584 exit_info = ((info->src_val & 0xffff) << 16) | in svm_check_intercept()
4586 bytes = info->dst_bytes; in svm_check_intercept()
4588 exit_info = (info->dst_val & 0xffff) << 16; in svm_check_intercept()
4589 bytes = info->src_bytes; in svm_check_intercept()
4592 if (info->intercept == x86_intercept_outs || in svm_check_intercept()
4593 info->intercept == x86_intercept_ins) in svm_check_intercept()
4596 if (info->rep_prefix) in svm_check_intercept()
4603 exit_info |= (u32)info->ad_bytes << (SVM_IOIO_ASIZE_SHIFT - 1); in svm_check_intercept()
4605 vmcb->control.exit_info_1 = exit_info; in svm_check_intercept()
4606 vmcb->control.exit_info_2 = info->next_rip; in svm_check_intercept()
4608 break; in svm_check_intercept()
4611 break; in svm_check_intercept()
4616 vmcb->control.next_rip = info->next_rip; in svm_check_intercept()
4617 vmcb->control.exit_code = icpt_info.exit_code; in svm_check_intercept()
4629 if (to_svm(vcpu)->vmcb->control.exit_code == SVM_EXIT_INTR) in svm_handle_exit_irqoff()
4630 vcpu->arch.at_instruction_boundary = true; in svm_handle_exit_irqoff()
4636 vcpu->arch.mcg_cap &= 0x1ff; in svm_setup_mce()
4654 if (svm->nested.nested_run_pending) in svm_smi_allowed()
4655 return -EBUSY; in svm_smi_allowed()
4660 /* An SMI must not be injected into L2 if it's supposed to VM-Exit. */ in svm_smi_allowed()
4662 return -EBUSY; in svm_smi_allowed()
4677 * 32-bit SMRAM format doesn't preserve EFER and SVM state. Userspace is in svm_enter_smm()
4684 smram->smram64.svm_guest_flag = 1; in svm_enter_smm()
4685 smram->smram64.svm_guest_vmcb_gpa = svm->nested.vmcb12_gpa; in svm_enter_smm()
4687 svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX]; in svm_enter_smm()
4688 svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP]; in svm_enter_smm()
4689 svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP]; in svm_enter_smm()
4698 * be lost. Temporary save non-VMLOAD/VMSAVE state to the host save in svm_enter_smm()
4707 if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr), &map_save)) in svm_enter_smm()
4713 &svm->vmcb01.ptr->save); in svm_enter_smm()
4726 const struct kvm_smram_state_64 *smram64 = &smram->smram64; in svm_leave_smm()
4731 /* Non-zero if SMI arrived while vCPU was in guest mode. */ in svm_leave_smm()
4732 if (!smram64->svm_guest_flag) in svm_leave_smm()
4738 if (!(smram64->efer & EFER_SVME)) in svm_leave_smm()
4741 if (kvm_vcpu_map(vcpu, gpa_to_gfn(smram64->svm_guest_vmcb_gpa), &map)) in svm_leave_smm()
4745 if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr), &map_save)) in svm_leave_smm()
4756 svm_copy_vmrun_state(&svm->vmcb01.ptr->save, map_save.hva + 0x400); in svm_leave_smm()
4762 vmcb_mark_all_dirty(svm->vmcb01.ptr); in svm_leave_smm()
4765 nested_copy_vmcb_control_to_cache(svm, &vmcb12->control); in svm_leave_smm()
4766 nested_copy_vmcb_save_to_cache(svm, &vmcb12->save); in svm_leave_smm()
4767 ret = enter_svm_guest_mode(vcpu, smram64->svm_guest_vmcb_gpa, vmcb12, false); in svm_leave_smm()
4772 svm->nested.nested_run_pending = 1; in svm_leave_smm()
4803 if ((svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_TYPE_MASK) && in svm_check_emulate_instruction()
4808 if (!sev_guest(vcpu->kvm)) in svm_check_emulate_instruction()
4817 * Emulation is impossible for SEV-ES guests as KVM doesn't have access in svm_check_emulate_instruction()
4820 if (sev_es_guest(vcpu->kvm)) in svm_check_emulate_instruction()
4839 * this path should never be hit by a well-behaved guest, e.g. KVM in svm_check_emulate_instruction()
4841 * theoretically reachable, e.g. via unaccelerated fault-like AVIC in svm_check_emulate_instruction()
4848 * will attempt to re-inject the INT3/INTO and skip the instruction. in svm_check_emulate_instruction()
4890 * encountered a reserved/not-present #PF. in svm_check_emulate_instruction()
4903 error_code = svm->vmcb->control.exit_info_1; in svm_check_emulate_instruction()
4940 * accesses for SEV guest are encrypted, regardless of the C-Bit. The in svm_check_emulate_instruction()
4957 if (!sev_es_guest(vcpu->kvm)) in svm_vcpu_deliver_sipi_vector()
4973 int type = kvm->arch.vm_type; in svm_vm_init()
4977 kvm->arch.has_protected_state = in svm_vm_init()
4979 to_kvm_sev_info(kvm)->need_init = true; in svm_vm_init()
4981 kvm->arch.has_private_mem = (type == KVM_X86_SNP_VM); in svm_vm_init()
4982 kvm->arch.pre_fault_allowed = !kvm->arch.has_private_mem; in svm_vm_init()
5287 return -EOPNOTSUPP; in svm_hardware_setup()
5331 * KVM's MMU doesn't support using 2-level paging for itself, and thus in svm_hardware_setup()
5332 * NPT isn't supported if the host is using 2-level paging since host in svm_hardware_setup()
5377 return -ENOMEM; in svm_hardware_setup()
5467 return -EOPNOTSUPP; in svm_init()