svm.c (a4721ced760684d1776bf31f7925aa41bb3f4846) svm.c (f14eec0a32038f2d6c05b8ea91c7701f65ce7418)
1#define pr_fmt(fmt) "SVM: " fmt
2
3#include <linux/kvm_host.h>
4
5#include "irq.h"
6#include "mmu.h"
7#include "kvm_cache_regs.h"
8#include "x86.h"

--- 3316 unchanged lines hidden (view full) ---

3325 /*
3326 * If this vCPU has touched SPEC_CTRL, restore the guest's value if
3327 * it's non-zero. Since vmentry is serialising on affected CPUs, there
3328 * is no need to worry about the conditional branch over the wrmsr
3329 * being speculatively taken.
3330 */
3331 x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl);
3332
1#define pr_fmt(fmt) "SVM: " fmt
2
3#include <linux/kvm_host.h>
4
5#include "irq.h"
6#include "mmu.h"
7#include "kvm_cache_regs.h"
8#include "x86.h"

--- 3316 unchanged lines hidden (view full) ---

3325 /*
3326 * If this vCPU has touched SPEC_CTRL, restore the guest's value if
3327 * it's non-zero. Since vmentry is serialising on affected CPUs, there
3328 * is no need to worry about the conditional branch over the wrmsr
3329 * being speculatively taken.
3330 */
3331 x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl);
3332
3333 local_irq_enable();
3334
3335 __svm_vcpu_run(svm->vmcb_pa, (unsigned long *)&svm->vcpu.arch.regs);
3336
3333 __svm_vcpu_run(svm->vmcb_pa, (unsigned long *)&svm->vcpu.arch.regs);
3334
3337 /* Eliminate branch target predictions from guest mode */
3338 vmexit_fill_RSB();
3339
3340#ifdef CONFIG_X86_64
3341 wrmsrl(MSR_GS_BASE, svm->host.gs_base);
3342#else
3343 loadsegment(fs, svm->host.fs);
3344#ifndef CONFIG_X86_32_LAZY_GS
3345 loadsegment(gs, svm->host.gs);
3346#endif
3347#endif

--- 13 unchanged lines hidden (view full) ---

3361 * If the L02 MSR bitmap does not intercept the MSR, then we need to
3362 * save it.
3363 */
3364 if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
3365 svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
3366
3367 reload_tss(vcpu);
3368
3335#ifdef CONFIG_X86_64
3336 wrmsrl(MSR_GS_BASE, svm->host.gs_base);
3337#else
3338 loadsegment(fs, svm->host.fs);
3339#ifndef CONFIG_X86_32_LAZY_GS
3340 loadsegment(gs, svm->host.gs);
3341#endif
3342#endif

--- 13 unchanged lines hidden (view full) ---

3356 * If the L02 MSR bitmap does not intercept the MSR, then we need to
3357 * save it.
3358 */
3359 if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
3360 svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
3361
3362 reload_tss(vcpu);
3363
3369 local_irq_disable();
3370
3371 x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl);
3372
3373 vcpu->arch.cr2 = svm->vmcb->save.cr2;
3374 vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
3375 vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
3376 vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
3377
3378 if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))

--- 670 unchanged lines hidden ---
3364 x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl);
3365
3366 vcpu->arch.cr2 = svm->vmcb->save.cr2;
3367 vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
3368 vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
3369 vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
3370
3371 if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))

--- 670 unchanged lines hidden ---