svm.c (100c85421b52e41269ada88f7d71a6b8a06c7a11) svm.c (c92be2fd8edf7b300a758c185fe032fd0257b886)
1#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
2
3#include <linux/kvm_host.h>
4
5#include "irq.h"
6#include "mmu.h"
7#include "kvm_cache_regs.h"
8#include "x86.h"

--- 1489 unchanged lines hidden (view full) ---

1498 svm_free_nested(svm);
1499
1500 sev_free_vcpu(vcpu);
1501
1502 __free_page(pfn_to_page(__sme_clr(svm->vmcb01.pa) >> PAGE_SHIFT));
1503 __free_pages(virt_to_page(svm->msrpm), get_order(MSRPM_SIZE));
1504}
1505
1#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
2
3#include <linux/kvm_host.h>
4
5#include "irq.h"
6#include "mmu.h"
7#include "kvm_cache_regs.h"
8#include "x86.h"

--- 1489 unchanged lines hidden (view full) ---

1498 svm_free_nested(svm);
1499
1500 sev_free_vcpu(vcpu);
1501
1502 __free_page(pfn_to_page(__sme_clr(svm->vmcb01.pa) >> PAGE_SHIFT));
1503 __free_pages(virt_to_page(svm->msrpm), get_order(MSRPM_SIZE));
1504}
1505
1506static struct sev_es_save_area *sev_es_host_save_area(struct svm_cpu_data *sd)
1507{
1508 return page_address(sd->save_area) + 0x400;
1509}
1510
1506static void svm_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
1507{
1508 struct vcpu_svm *svm = to_svm(vcpu);
1509 struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, vcpu->cpu);
1510
1511 if (sev_es_guest(vcpu->kvm))
1512 sev_es_unmap_ghcb(svm);
1513
1514 if (svm->guest_state_loaded)
1515 return;
1516
1517 /*
1518 * Save additional host state that will be restored on VMEXIT (sev-es)
1519 * or subsequent vmload of host save area.
1520 */
1521 vmsave(sd->save_area_pa);
1511static void svm_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
1512{
1513 struct vcpu_svm *svm = to_svm(vcpu);
1514 struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, vcpu->cpu);
1515
1516 if (sev_es_guest(vcpu->kvm))
1517 sev_es_unmap_ghcb(svm);
1518
1519 if (svm->guest_state_loaded)
1520 return;
1521
1522 /*
1523 * Save additional host state that will be restored on VMEXIT (sev-es)
1524 * or subsequent vmload of host save area.
1525 */
1526 vmsave(sd->save_area_pa);
1522 if (sev_es_guest(vcpu->kvm)) {
1523 struct sev_es_save_area *hostsa;
1524 hostsa = (struct sev_es_save_area *)(page_address(sd->save_area) + 0x400);
1527 if (sev_es_guest(vcpu->kvm))
1528 sev_es_prepare_switch_to_guest(sev_es_host_save_area(sd));
1525
1529
1526 sev_es_prepare_switch_to_guest(hostsa);
1527 }
1528
1529 if (tsc_scaling)
1530 __svm_write_tsc_multiplier(vcpu->arch.tsc_scaling_ratio);
1531
1532 /*
1533 * TSC_AUX is always virtualized for SEV-ES guests when the feature is
1534 * available. The user return MSR support is not required in this case
1535 * because TSC_AUX is restored on #VMEXIT from the host save area
1536 * (which has been initialized in svm_hardware_enable()).

--- 2559 unchanged lines hidden (view full) ---

4096 to_svm(vcpu)->vmcb->control.exit_info_1)
4097 return handle_fastpath_set_msr_irqoff(vcpu);
4098
4099 return EXIT_FASTPATH_NONE;
4100}
4101
4102static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, bool spec_ctrl_intercepted)
4103{
1530 if (tsc_scaling)
1531 __svm_write_tsc_multiplier(vcpu->arch.tsc_scaling_ratio);
1532
1533 /*
1534 * TSC_AUX is always virtualized for SEV-ES guests when the feature is
1535 * available. The user return MSR support is not required in this case
1536 * because TSC_AUX is restored on #VMEXIT from the host save area
1537 * (which has been initialized in svm_hardware_enable()).

--- 2559 unchanged lines hidden (view full) ---

4097 to_svm(vcpu)->vmcb->control.exit_info_1)
4098 return handle_fastpath_set_msr_irqoff(vcpu);
4099
4100 return EXIT_FASTPATH_NONE;
4101}
4102
4103static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, bool spec_ctrl_intercepted)
4104{
4105 struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, vcpu->cpu);
4104 struct vcpu_svm *svm = to_svm(vcpu);
4105
4106 guest_state_enter_irqoff();
4107
4108 amd_clear_divider();
4109
4110 if (sev_es_guest(vcpu->kvm))
4106 struct vcpu_svm *svm = to_svm(vcpu);
4107
4108 guest_state_enter_irqoff();
4109
4110 amd_clear_divider();
4111
4112 if (sev_es_guest(vcpu->kvm))
4111 __svm_sev_es_vcpu_run(svm, spec_ctrl_intercepted);
4113 __svm_sev_es_vcpu_run(svm, spec_ctrl_intercepted,
4114 sev_es_host_save_area(sd));
4112 else
4113 __svm_vcpu_run(svm, spec_ctrl_intercepted);
4114
4115 guest_state_exit_irqoff();
4116}
4117
4118static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu,
4119 bool force_immediate_exit)

--- 1271 unchanged lines hidden ---
4115 else
4116 __svm_vcpu_run(svm, spec_ctrl_intercepted);
4117
4118 guest_state_exit_irqoff();
4119}
4120
4121static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu,
4122 bool force_immediate_exit)

--- 1271 unchanged lines hidden ---