svm.c (e287bd005ad9d85dd6271dd795d3ecfb6bca46ad) svm.c (9f2febf3f04daebdaaa5a43cfa20e3844905c0f9)
1#define pr_fmt(fmt) "SVM: " fmt
2
3#include <linux/kvm_host.h>
4
5#include "irq.h"
6#include "mmu.h"
7#include "kvm_cache_regs.h"
8#include "x86.h"

--- 706 unchanged lines hidden (view full) ---

715
716static bool msr_write_intercepted(struct kvm_vcpu *vcpu, u32 msr)
717{
718 u8 bit_write;
719 unsigned long tmp;
720 u32 offset;
721 u32 *msrpm;
722
1#define pr_fmt(fmt) "SVM: " fmt
2
3#include <linux/kvm_host.h>
4
5#include "irq.h"
6#include "mmu.h"
7#include "kvm_cache_regs.h"
8#include "x86.h"

--- 706 unchanged lines hidden (view full) ---

715
716static bool msr_write_intercepted(struct kvm_vcpu *vcpu, u32 msr)
717{
718 u8 bit_write;
719 unsigned long tmp;
720 u32 offset;
721 u32 *msrpm;
722
723 /*
724 * For non-nested case:
725 * If the L01 MSR bitmap does not intercept the MSR, then we need to
726 * save it.
727 *
728 * For nested case:
729 * If the L02 MSR bitmap does not intercept the MSR, then we need to
730 * save it.
731 */
723 msrpm = is_guest_mode(vcpu) ? to_svm(vcpu)->nested.msrpm:
724 to_svm(vcpu)->msrpm;
725
726 offset = svm_msrpm_offset(msr);
727 bit_write = 2 * (msr & 0x0f) + 1;
728 tmp = msrpm[offset];
729
730 BUG_ON(offset == MSR_INVALID);

--- 3165 unchanged lines hidden (view full) ---

3896{
3897 if (to_svm(vcpu)->vmcb->control.exit_code == SVM_EXIT_MSR &&
3898 to_svm(vcpu)->vmcb->control.exit_info_1)
3899 return handle_fastpath_set_msr_irqoff(vcpu);
3900
3901 return EXIT_FASTPATH_NONE;
3902}
3903
732 msrpm = is_guest_mode(vcpu) ? to_svm(vcpu)->nested.msrpm:
733 to_svm(vcpu)->msrpm;
734
735 offset = svm_msrpm_offset(msr);
736 bit_write = 2 * (msr & 0x0f) + 1;
737 tmp = msrpm[offset];
738
739 BUG_ON(offset == MSR_INVALID);

--- 3165 unchanged lines hidden (view full) ---

3905{
3906 if (to_svm(vcpu)->vmcb->control.exit_code == SVM_EXIT_MSR &&
3907 to_svm(vcpu)->vmcb->control.exit_info_1)
3908 return handle_fastpath_set_msr_irqoff(vcpu);
3909
3910 return EXIT_FASTPATH_NONE;
3911}
3912
3904static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu)
3913static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, bool spec_ctrl_intercepted)
3905{
3906 struct vcpu_svm *svm = to_svm(vcpu);
3907
3908 guest_state_enter_irqoff();
3909
3910 if (sev_es_guest(vcpu->kvm))
3914{
3915 struct vcpu_svm *svm = to_svm(vcpu);
3916
3917 guest_state_enter_irqoff();
3918
3919 if (sev_es_guest(vcpu->kvm))
3911 __svm_sev_es_vcpu_run(svm);
3920 __svm_sev_es_vcpu_run(svm, spec_ctrl_intercepted);
3912 else
3921 else
3913 __svm_vcpu_run(svm);
3922 __svm_vcpu_run(svm, spec_ctrl_intercepted);
3914
3915 guest_state_exit_irqoff();
3916}
3917
3918static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
3919{
3920 struct vcpu_svm *svm = to_svm(vcpu);
3923
3924 guest_state_exit_irqoff();
3925}
3926
3927static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
3928{
3929 struct vcpu_svm *svm = to_svm(vcpu);
3930 bool spec_ctrl_intercepted = msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL);
3921
3922 trace_kvm_entry(vcpu);
3923
3924 svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
3925 svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
3926 svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
3927
3928 /*

--- 42 unchanged lines hidden (view full) ---

3971 * If this vCPU has touched SPEC_CTRL, restore the guest's value if
3972 * it's non-zero. Since vmentry is serialising on affected CPUs, there
3973 * is no need to worry about the conditional branch over the wrmsr
3974 * being speculatively taken.
3975 */
3976 if (!static_cpu_has(X86_FEATURE_V_SPEC_CTRL))
3977 x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl);
3978
3931
3932 trace_kvm_entry(vcpu);
3933
3934 svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
3935 svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
3936 svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
3937
3938 /*

--- 42 unchanged lines hidden (view full) ---

3981 * If this vCPU has touched SPEC_CTRL, restore the guest's value if
3982 * it's non-zero. Since vmentry is serialising on affected CPUs, there
3983 * is no need to worry about the conditional branch over the wrmsr
3984 * being speculatively taken.
3985 */
3986 if (!static_cpu_has(X86_FEATURE_V_SPEC_CTRL))
3987 x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl);
3988
3979 svm_vcpu_enter_exit(vcpu);
3989 svm_vcpu_enter_exit(vcpu, spec_ctrl_intercepted);
3980
3990
3981 /*
3982 * We do not use IBRS in the kernel. If this vCPU has used the
3983 * SPEC_CTRL MSR it may have left it on; save the value and
3984 * turn it off. This is much more efficient than blindly adding
3985 * it to the atomic save/restore list. Especially as the former
3986 * (Saving guest MSRs on vmexit) doesn't even exist in KVM.
3987 *
3988 * For non-nested case:
3989 * If the L01 MSR bitmap does not intercept the MSR, then we need to
3990 * save it.
3991 *
3992 * For nested case:
3993 * If the L02 MSR bitmap does not intercept the MSR, then we need to
3994 * save it.
3995 */
3996 if (!static_cpu_has(X86_FEATURE_V_SPEC_CTRL) &&
3997 unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
3998 svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
3999
4000 if (!sev_es_guest(vcpu->kvm))
4001 reload_tss(vcpu);
4002
4003 if (!static_cpu_has(X86_FEATURE_V_SPEC_CTRL))
4004 x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl);
4005
4006 if (!sev_es_guest(vcpu->kvm)) {
4007 vcpu->arch.cr2 = svm->vmcb->save.cr2;

--- 1105 unchanged lines hidden ---
3991 if (!sev_es_guest(vcpu->kvm))
3992 reload_tss(vcpu);
3993
3994 if (!static_cpu_has(X86_FEATURE_V_SPEC_CTRL))
3995 x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl);
3996
3997 if (!sev_es_guest(vcpu->kvm)) {
3998 vcpu->arch.cr2 = svm->vmcb->save.cr2;

--- 1105 unchanged lines hidden ---