Lines Matching refs:svm

140 static bool sev_vcpu_has_debug_swap(struct vcpu_svm *svm)  in sev_vcpu_has_debug_swap()  argument
142 struct kvm_vcpu *vcpu = &svm->vcpu; in sev_vcpu_has_debug_swap()
807 static int sev_es_sync_vmsa(struct vcpu_svm *svm) in sev_es_sync_vmsa() argument
809 struct kvm_vcpu *vcpu = &svm->vcpu; in sev_es_sync_vmsa()
811 struct sev_es_save_area *save = svm->sev_es.vmsa; in sev_es_sync_vmsa()
818 if (svm->vcpu.guest_debug || (svm->vmcb->save.dr7 & ~DR7_FIXED_1)) in sev_es_sync_vmsa()
827 memcpy(save, &svm->vmcb->save, sizeof(svm->vmcb->save)); in sev_es_sync_vmsa()
830 save->rax = svm->vcpu.arch.regs[VCPU_REGS_RAX]; in sev_es_sync_vmsa()
831 save->rbx = svm->vcpu.arch.regs[VCPU_REGS_RBX]; in sev_es_sync_vmsa()
832 save->rcx = svm->vcpu.arch.regs[VCPU_REGS_RCX]; in sev_es_sync_vmsa()
833 save->rdx = svm->vcpu.arch.regs[VCPU_REGS_RDX]; in sev_es_sync_vmsa()
834 save->rsp = svm->vcpu.arch.regs[VCPU_REGS_RSP]; in sev_es_sync_vmsa()
835 save->rbp = svm->vcpu.arch.regs[VCPU_REGS_RBP]; in sev_es_sync_vmsa()
836 save->rsi = svm->vcpu.arch.regs[VCPU_REGS_RSI]; in sev_es_sync_vmsa()
837 save->rdi = svm->vcpu.arch.regs[VCPU_REGS_RDI]; in sev_es_sync_vmsa()
839 save->r8 = svm->vcpu.arch.regs[VCPU_REGS_R8]; in sev_es_sync_vmsa()
840 save->r9 = svm->vcpu.arch.regs[VCPU_REGS_R9]; in sev_es_sync_vmsa()
841 save->r10 = svm->vcpu.arch.regs[VCPU_REGS_R10]; in sev_es_sync_vmsa()
842 save->r11 = svm->vcpu.arch.regs[VCPU_REGS_R11]; in sev_es_sync_vmsa()
843 save->r12 = svm->vcpu.arch.regs[VCPU_REGS_R12]; in sev_es_sync_vmsa()
844 save->r13 = svm->vcpu.arch.regs[VCPU_REGS_R13]; in sev_es_sync_vmsa()
845 save->r14 = svm->vcpu.arch.regs[VCPU_REGS_R14]; in sev_es_sync_vmsa()
846 save->r15 = svm->vcpu.arch.regs[VCPU_REGS_R15]; in sev_es_sync_vmsa()
848 save->rip = svm->vcpu.arch.regs[VCPU_REGS_RIP]; in sev_es_sync_vmsa()
851 save->xcr0 = svm->vcpu.arch.xcr0; in sev_es_sync_vmsa()
852 save->pkru = svm->vcpu.arch.pkru; in sev_es_sync_vmsa()
853 save->xss = svm->vcpu.arch.ia32_xss; in sev_es_sync_vmsa()
854 save->dr6 = svm->vcpu.arch.dr6; in sev_es_sync_vmsa()
906 struct vcpu_svm *svm = to_svm(vcpu); in __sev_launch_update_vmsa() local
915 ret = sev_es_sync_vmsa(svm); in __sev_launch_update_vmsa()
924 clflush_cache_range(svm->sev_es.vmsa, PAGE_SIZE); in __sev_launch_update_vmsa()
928 vmsa.address = __sme_pa(svm->sev_es.vmsa); in __sev_launch_update_vmsa()
2456 struct vcpu_svm *svm = to_svm(vcpu); in snp_launch_update_vmsa() local
2457 u64 pfn = __pa(svm->sev_es.vmsa) >> PAGE_SHIFT; in snp_launch_update_vmsa()
2459 ret = sev_es_sync_vmsa(svm); in snp_launch_update_vmsa()
2469 data.address = __sme_pa(svm->sev_es.vmsa); in snp_launch_update_vmsa()
2478 svm->vcpu.arch.guest_state_protected = true; in snp_launch_update_vmsa()
3170 struct vcpu_svm *svm; in sev_free_vcpu() local
3175 svm = to_svm(vcpu); in sev_free_vcpu()
3183 u64 pfn = __pa(svm->sev_es.vmsa) >> PAGE_SHIFT; in sev_free_vcpu()
3190 sev_flush_encrypted_page(vcpu, svm->sev_es.vmsa); in sev_free_vcpu()
3192 __free_page(virt_to_page(svm->sev_es.vmsa)); in sev_free_vcpu()
3195 if (svm->sev_es.ghcb_sa_free) in sev_free_vcpu()
3196 kvfree(svm->sev_es.ghcb_sa); in sev_free_vcpu()
3199 static void dump_ghcb(struct vcpu_svm *svm) in dump_ghcb() argument
3201 struct ghcb *ghcb = svm->sev_es.ghcb; in dump_ghcb()
3212 pr_err("GHCB (GPA=%016llx):\n", svm->vmcb->control.ghcb_gpa); in dump_ghcb()
3224 static void sev_es_sync_to_ghcb(struct vcpu_svm *svm) in sev_es_sync_to_ghcb() argument
3226 struct kvm_vcpu *vcpu = &svm->vcpu; in sev_es_sync_to_ghcb()
3227 struct ghcb *ghcb = svm->sev_es.ghcb; in sev_es_sync_to_ghcb()
3243 static void sev_es_sync_from_ghcb(struct vcpu_svm *svm) in sev_es_sync_from_ghcb() argument
3245 struct vmcb_control_area *control = &svm->vmcb->control; in sev_es_sync_from_ghcb()
3246 struct kvm_vcpu *vcpu = &svm->vcpu; in sev_es_sync_from_ghcb()
3247 struct ghcb *ghcb = svm->sev_es.ghcb; in sev_es_sync_from_ghcb()
3264 BUILD_BUG_ON(sizeof(svm->sev_es.valid_bitmap) != sizeof(ghcb->save.valid_bitmap)); in sev_es_sync_from_ghcb()
3265 memcpy(&svm->sev_es.valid_bitmap, &ghcb->save.valid_bitmap, sizeof(ghcb->save.valid_bitmap)); in sev_es_sync_from_ghcb()
3267 vcpu->arch.regs[VCPU_REGS_RAX] = kvm_ghcb_get_rax_if_valid(svm, ghcb); in sev_es_sync_from_ghcb()
3268 vcpu->arch.regs[VCPU_REGS_RBX] = kvm_ghcb_get_rbx_if_valid(svm, ghcb); in sev_es_sync_from_ghcb()
3269 vcpu->arch.regs[VCPU_REGS_RCX] = kvm_ghcb_get_rcx_if_valid(svm, ghcb); in sev_es_sync_from_ghcb()
3270 vcpu->arch.regs[VCPU_REGS_RDX] = kvm_ghcb_get_rdx_if_valid(svm, ghcb); in sev_es_sync_from_ghcb()
3271 vcpu->arch.regs[VCPU_REGS_RSI] = kvm_ghcb_get_rsi_if_valid(svm, ghcb); in sev_es_sync_from_ghcb()
3273 svm->vmcb->save.cpl = kvm_ghcb_get_cpl_if_valid(svm, ghcb); in sev_es_sync_from_ghcb()
3275 if (kvm_ghcb_xcr0_is_valid(svm)) { in sev_es_sync_from_ghcb()
3286 svm->sev_es.sw_scratch = kvm_ghcb_get_sw_scratch_if_valid(svm, ghcb); in sev_es_sync_from_ghcb()
3297 static int sev_es_validate_vmgexit(struct vcpu_svm *svm) in sev_es_validate_vmgexit() argument
3299 struct vmcb_control_area *control = &svm->vmcb->control; in sev_es_validate_vmgexit()
3300 struct kvm_vcpu *vcpu = &svm->vcpu; in sev_es_validate_vmgexit()
3311 if (svm->sev_es.ghcb->ghcb_usage) { in sev_es_validate_vmgexit()
3318 if (!kvm_ghcb_sw_exit_code_is_valid(svm) || in sev_es_validate_vmgexit()
3319 !kvm_ghcb_sw_exit_info_1_is_valid(svm) || in sev_es_validate_vmgexit()
3320 !kvm_ghcb_sw_exit_info_2_is_valid(svm)) in sev_es_validate_vmgexit()
3327 if (!kvm_ghcb_rax_is_valid(svm)) in sev_es_validate_vmgexit()
3333 if (!kvm_ghcb_rcx_is_valid(svm)) in sev_es_validate_vmgexit()
3337 if (!kvm_ghcb_rax_is_valid(svm) || in sev_es_validate_vmgexit()
3338 !kvm_ghcb_rcx_is_valid(svm)) in sev_es_validate_vmgexit()
3341 if (!kvm_ghcb_xcr0_is_valid(svm)) in sev_es_validate_vmgexit()
3348 if (!kvm_ghcb_sw_scratch_is_valid(svm)) in sev_es_validate_vmgexit()
3352 if (!kvm_ghcb_rax_is_valid(svm)) in sev_es_validate_vmgexit()
3357 if (!kvm_ghcb_rcx_is_valid(svm)) in sev_es_validate_vmgexit()
3360 if (!kvm_ghcb_rax_is_valid(svm) || in sev_es_validate_vmgexit()
3361 !kvm_ghcb_rdx_is_valid(svm)) in sev_es_validate_vmgexit()
3366 if (!kvm_ghcb_rax_is_valid(svm) || in sev_es_validate_vmgexit()
3367 !kvm_ghcb_cpl_is_valid(svm)) in sev_es_validate_vmgexit()
3375 if (!kvm_ghcb_rax_is_valid(svm) || in sev_es_validate_vmgexit()
3376 !kvm_ghcb_rcx_is_valid(svm) || in sev_es_validate_vmgexit()
3377 !kvm_ghcb_rdx_is_valid(svm)) in sev_es_validate_vmgexit()
3381 if (!kvm_ghcb_rax_is_valid(svm) || in sev_es_validate_vmgexit()
3382 !kvm_ghcb_rcx_is_valid(svm)) in sev_es_validate_vmgexit()
3387 if (!kvm_ghcb_sw_scratch_is_valid(svm)) in sev_es_validate_vmgexit()
3394 if (!kvm_ghcb_rax_is_valid(svm)) in sev_es_validate_vmgexit()
3405 if (!sev_snp_guest(vcpu->kvm) || !kvm_ghcb_sw_scratch_is_valid(svm)) in sev_es_validate_vmgexit()
3426 svm->sev_es.ghcb->ghcb_usage); in sev_es_validate_vmgexit()
3433 dump_ghcb(svm); in sev_es_validate_vmgexit()
3436 ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, 2); in sev_es_validate_vmgexit()
3437 ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, reason); in sev_es_validate_vmgexit()
3443 void sev_es_unmap_ghcb(struct vcpu_svm *svm) in sev_es_unmap_ghcb() argument
3446 svm->sev_es.ap_reset_hold_type = AP_RESET_HOLD_NONE; in sev_es_unmap_ghcb()
3448 if (!svm->sev_es.ghcb) in sev_es_unmap_ghcb()
3451 if (svm->sev_es.ghcb_sa_free) { in sev_es_unmap_ghcb()
3457 if (svm->sev_es.ghcb_sa_sync) { in sev_es_unmap_ghcb()
3458 kvm_write_guest(svm->vcpu.kvm, in sev_es_unmap_ghcb()
3459 svm->sev_es.sw_scratch, in sev_es_unmap_ghcb()
3460 svm->sev_es.ghcb_sa, in sev_es_unmap_ghcb()
3461 svm->sev_es.ghcb_sa_len); in sev_es_unmap_ghcb()
3462 svm->sev_es.ghcb_sa_sync = false; in sev_es_unmap_ghcb()
3465 kvfree(svm->sev_es.ghcb_sa); in sev_es_unmap_ghcb()
3466 svm->sev_es.ghcb_sa = NULL; in sev_es_unmap_ghcb()
3467 svm->sev_es.ghcb_sa_free = false; in sev_es_unmap_ghcb()
3470 trace_kvm_vmgexit_exit(svm->vcpu.vcpu_id, svm->sev_es.ghcb); in sev_es_unmap_ghcb()
3472 sev_es_sync_to_ghcb(svm); in sev_es_unmap_ghcb()
3474 kvm_vcpu_unmap(&svm->vcpu, &svm->sev_es.ghcb_map, true); in sev_es_unmap_ghcb()
3475 svm->sev_es.ghcb = NULL; in sev_es_unmap_ghcb()
3478 void pre_sev_run(struct vcpu_svm *svm, int cpu) in pre_sev_run() argument
3481 unsigned int asid = sev_get_asid(svm->vcpu.kvm); in pre_sev_run()
3484 svm->asid = asid; in pre_sev_run()
3492 if (sd->sev_vmcbs[asid] == svm->vmcb && in pre_sev_run()
3493 svm->vcpu.arch.last_vmentry_cpu == cpu) in pre_sev_run()
3496 sd->sev_vmcbs[asid] = svm->vmcb; in pre_sev_run()
3497 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID; in pre_sev_run()
3498 vmcb_mark_dirty(svm->vmcb, VMCB_ASID); in pre_sev_run()
3502 static int setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len) in setup_vmgexit_scratch() argument
3504 struct vmcb_control_area *control = &svm->vmcb->control; in setup_vmgexit_scratch()
3509 scratch_gpa_beg = svm->sev_es.sw_scratch; in setup_vmgexit_scratch()
3540 scratch_va = (void *)svm->sev_es.ghcb; in setup_vmgexit_scratch()
3556 if (kvm_read_guest(svm->vcpu.kvm, scratch_gpa_beg, scratch_va, len)) { in setup_vmgexit_scratch()
3570 svm->sev_es.ghcb_sa_sync = sync; in setup_vmgexit_scratch()
3571 svm->sev_es.ghcb_sa_free = true; in setup_vmgexit_scratch()
3574 svm->sev_es.ghcb_sa = scratch_va; in setup_vmgexit_scratch()
3575 svm->sev_es.ghcb_sa_len = len; in setup_vmgexit_scratch()
3580 ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, 2); in setup_vmgexit_scratch()
3581 ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, GHCB_ERR_INVALID_SCRATCH_AREA); in setup_vmgexit_scratch()
3586 static void set_ghcb_msr_bits(struct vcpu_svm *svm, u64 value, u64 mask, in set_ghcb_msr_bits() argument
3589 svm->vmcb->control.ghcb_gpa &= ~(mask << pos); in set_ghcb_msr_bits()
3590 svm->vmcb->control.ghcb_gpa |= (value & mask) << pos; in set_ghcb_msr_bits()
3593 static u64 get_ghcb_msr_bits(struct vcpu_svm *svm, u64 mask, unsigned int pos) in get_ghcb_msr_bits() argument
3595 return (svm->vmcb->control.ghcb_gpa >> pos) & mask; in get_ghcb_msr_bits()
3598 static void set_ghcb_msr(struct vcpu_svm *svm, u64 value) in set_ghcb_msr() argument
3600 svm->vmcb->control.ghcb_gpa = value; in set_ghcb_msr()
3622 struct vcpu_svm *svm = to_svm(vcpu); in snp_complete_psc_msr() local
3625 set_ghcb_msr(svm, GHCB_MSR_PSC_RESP_ERROR); in snp_complete_psc_msr()
3627 set_ghcb_msr(svm, GHCB_MSR_PSC_RESP); in snp_complete_psc_msr()
3632 static int snp_begin_psc_msr(struct vcpu_svm *svm, u64 ghcb_msr) in snp_begin_psc_msr() argument
3636 struct kvm_vcpu *vcpu = &svm->vcpu; in snp_begin_psc_msr()
3639 set_ghcb_msr(svm, GHCB_MSR_PSC_RESP_ERROR); in snp_begin_psc_msr()
3644 set_ghcb_msr(svm, GHCB_MSR_PSC_RESP_ERROR); in snp_begin_psc_msr()
3667 static int snp_begin_psc(struct vcpu_svm *svm, struct psc_buffer *psc);
3669 static void snp_complete_psc(struct vcpu_svm *svm, u64 psc_ret) in snp_complete_psc() argument
3671 svm->sev_es.psc_inflight = 0; in snp_complete_psc()
3672 svm->sev_es.psc_idx = 0; in snp_complete_psc()
3673 svm->sev_es.psc_2m = false; in snp_complete_psc()
3674 ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, psc_ret); in snp_complete_psc()
3677 static void __snp_complete_one_psc(struct vcpu_svm *svm) in __snp_complete_one_psc() argument
3679 struct psc_buffer *psc = svm->sev_es.ghcb_sa; in __snp_complete_one_psc()
3689 for (idx = svm->sev_es.psc_idx; svm->sev_es.psc_inflight; in __snp_complete_one_psc()
3690 svm->sev_es.psc_inflight--, idx++) { in __snp_complete_one_psc()
3701 struct vcpu_svm *svm = to_svm(vcpu); in snp_complete_one_psc() local
3702 struct psc_buffer *psc = svm->sev_es.ghcb_sa; in snp_complete_one_psc()
3705 snp_complete_psc(svm, VMGEXIT_PSC_ERROR_GENERIC); in snp_complete_one_psc()
3709 __snp_complete_one_psc(svm); in snp_complete_one_psc()
3712 return snp_begin_psc(svm, psc); in snp_complete_one_psc()
3715 static int snp_begin_psc(struct vcpu_svm *svm, struct psc_buffer *psc) in snp_begin_psc() argument
3718 struct kvm_vcpu *vcpu = &svm->vcpu; in snp_begin_psc()
3727 snp_complete_psc(svm, VMGEXIT_PSC_ERROR_GENERIC); in snp_begin_psc()
3733 if (WARN_ON_ONCE(svm->sev_es.psc_inflight)) { in snp_begin_psc()
3734 snp_complete_psc(svm, VMGEXIT_PSC_ERROR_GENERIC); in snp_begin_psc()
3747 snp_complete_psc(svm, VMGEXIT_PSC_ERROR_INVALID_HDR); in snp_begin_psc()
3760 snp_complete_psc(svm, VMGEXIT_PSC_ERROR_INVALID_ENTRY); in snp_begin_psc()
3783 snp_complete_psc(svm, 0); in snp_begin_psc()
3787 svm->sev_es.psc_2m = huge; in snp_begin_psc()
3788 svm->sev_es.psc_idx = idx; in snp_begin_psc()
3789 svm->sev_es.psc_inflight = 1; in snp_begin_psc()
3804 svm->sev_es.psc_inflight++; in snp_begin_psc()
3832 __snp_complete_one_psc(svm); in snp_begin_psc()
3841 struct vcpu_svm *svm = to_svm(vcpu); in __sev_snp_update_protected_guest_state() local
3843 WARN_ON(!mutex_is_locked(&svm->sev_es.snp_vmsa_mutex)); in __sev_snp_update_protected_guest_state()
3850 svm->vmcb->control.vmsa_pa = INVALID_PAGE; in __sev_snp_update_protected_guest_state()
3852 if (VALID_PAGE(svm->sev_es.snp_vmsa_gpa)) { in __sev_snp_update_protected_guest_state()
3853 gfn_t gfn = gpa_to_gfn(svm->sev_es.snp_vmsa_gpa); in __sev_snp_update_protected_guest_state()
3878 svm->sev_es.snp_has_guest_vmsa = true; in __sev_snp_update_protected_guest_state()
3881 svm->vmcb->control.vmsa_pa = pfn_to_hpa(pfn); in __sev_snp_update_protected_guest_state()
3887 svm->sev_es.snp_vmsa_gpa = INVALID_PAGE; in __sev_snp_update_protected_guest_state()
3901 vmcb_mark_all_dirty(svm->vmcb); in __sev_snp_update_protected_guest_state()
3911 struct vcpu_svm *svm = to_svm(vcpu); in sev_snp_init_protected_guest_state() local
3917 mutex_lock(&svm->sev_es.snp_vmsa_mutex); in sev_snp_init_protected_guest_state()
3919 if (!svm->sev_es.snp_ap_waiting_for_reset) in sev_snp_init_protected_guest_state()
3922 svm->sev_es.snp_ap_waiting_for_reset = false; in sev_snp_init_protected_guest_state()
3929 mutex_unlock(&svm->sev_es.snp_vmsa_mutex); in sev_snp_init_protected_guest_state()
3932 static int sev_snp_ap_creation(struct vcpu_svm *svm) in sev_snp_ap_creation() argument
3934 struct kvm_sev_info *sev = &to_kvm_svm(svm->vcpu.kvm)->sev_info; in sev_snp_ap_creation()
3935 struct kvm_vcpu *vcpu = &svm->vcpu; in sev_snp_ap_creation()
3943 request = lower_32_bits(svm->vmcb->control.exit_info_1); in sev_snp_ap_creation()
3944 apic_id = upper_32_bits(svm->vmcb->control.exit_info_1); in sev_snp_ap_creation()
3990 if (!page_address_valid(vcpu, svm->vmcb->control.exit_info_2)) { in sev_snp_ap_creation()
3992 svm->vmcb->control.exit_info_2); in sev_snp_ap_creation()
4004 if (IS_ALIGNED(svm->vmcb->control.exit_info_2, PMD_SIZE)) { in sev_snp_ap_creation()
4007 svm->vmcb->control.exit_info_2); in sev_snp_ap_creation()
4012 target_svm->sev_es.snp_vmsa_gpa = svm->vmcb->control.exit_info_2; in sev_snp_ap_creation()
4034 static int snp_handle_guest_req(struct vcpu_svm *svm, gpa_t req_gpa, gpa_t resp_gpa) in snp_handle_guest_req() argument
4037 struct kvm *kvm = svm->vcpu.kvm; in snp_handle_guest_req()
4070 ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, SNP_GUEST_ERR(0, fw_err)); in snp_handle_guest_req()
4079 static int snp_handle_ext_guest_req(struct vcpu_svm *svm, gpa_t req_gpa, gpa_t resp_gpa) in snp_handle_ext_guest_req() argument
4081 struct kvm *kvm = svm->vcpu.kvm; in snp_handle_ext_guest_req()
4102 struct kvm_vcpu *vcpu = &svm->vcpu; in snp_handle_ext_guest_req()
4106 if (!kvm_ghcb_rax_is_valid(svm) || !kvm_ghcb_rbx_is_valid(svm)) in snp_handle_ext_guest_req()
4123 return snp_handle_guest_req(svm, req_gpa, resp_gpa); in snp_handle_ext_guest_req()
4126 ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, 2); in snp_handle_ext_guest_req()
4127 ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, GHCB_ERR_INVALID_INPUT); in snp_handle_ext_guest_req()
4131 static int sev_handle_vmgexit_msr_protocol(struct vcpu_svm *svm) in sev_handle_vmgexit_msr_protocol() argument
4133 struct vmcb_control_area *control = &svm->vmcb->control; in sev_handle_vmgexit_msr_protocol()
4134 struct kvm_vcpu *vcpu = &svm->vcpu; in sev_handle_vmgexit_msr_protocol()
4141 trace_kvm_vmgexit_msr_protocol_enter(svm->vcpu.vcpu_id, in sev_handle_vmgexit_msr_protocol()
4146 set_ghcb_msr(svm, GHCB_MSR_SEV_INFO((__u64)sev->ghcb_version, in sev_handle_vmgexit_msr_protocol()
4153 cpuid_fn = get_ghcb_msr_bits(svm, in sev_handle_vmgexit_msr_protocol()
4167 cpuid_reg = get_ghcb_msr_bits(svm, in sev_handle_vmgexit_msr_protocol()
4179 set_ghcb_msr_bits(svm, cpuid_value, in sev_handle_vmgexit_msr_protocol()
4183 set_ghcb_msr_bits(svm, GHCB_MSR_CPUID_RESP, in sev_handle_vmgexit_msr_protocol()
4189 svm->sev_es.ap_reset_hold_type = AP_RESET_HOLD_MSR_PROTO; in sev_handle_vmgexit_msr_protocol()
4190 ret = kvm_emulate_ap_reset_hold(&svm->vcpu); in sev_handle_vmgexit_msr_protocol()
4196 set_ghcb_msr_bits(svm, 0, in sev_handle_vmgexit_msr_protocol()
4200 set_ghcb_msr_bits(svm, GHCB_MSR_AP_RESET_HOLD_RESP, in sev_handle_vmgexit_msr_protocol()
4205 set_ghcb_msr_bits(svm, GHCB_HV_FT_SUPPORTED, in sev_handle_vmgexit_msr_protocol()
4207 set_ghcb_msr_bits(svm, GHCB_MSR_HV_FT_RESP, in sev_handle_vmgexit_msr_protocol()
4214 set_ghcb_msr_bits(svm, GHCB_MSR_PREF_GPA_NONE, GHCB_MSR_GPA_VALUE_MASK, in sev_handle_vmgexit_msr_protocol()
4216 set_ghcb_msr_bits(svm, GHCB_MSR_PREF_GPA_RESP, GHCB_MSR_INFO_MASK, in sev_handle_vmgexit_msr_protocol()
4225 gfn = get_ghcb_msr_bits(svm, GHCB_MSR_GPA_VALUE_MASK, in sev_handle_vmgexit_msr_protocol()
4228 svm->sev_es.ghcb_registered_gpa = gfn_to_gpa(gfn); in sev_handle_vmgexit_msr_protocol()
4230 set_ghcb_msr_bits(svm, gfn, GHCB_MSR_GPA_VALUE_MASK, in sev_handle_vmgexit_msr_protocol()
4232 set_ghcb_msr_bits(svm, GHCB_MSR_REG_GPA_RESP, GHCB_MSR_INFO_MASK, in sev_handle_vmgexit_msr_protocol()
4240 ret = snp_begin_psc_msr(svm, control->ghcb_gpa); in sev_handle_vmgexit_msr_protocol()
4245 reason_set = get_ghcb_msr_bits(svm, in sev_handle_vmgexit_msr_protocol()
4248 reason_code = get_ghcb_msr_bits(svm, in sev_handle_vmgexit_msr_protocol()
4261 trace_kvm_vmgexit_msr_protocol_exit(svm->vcpu.vcpu_id, in sev_handle_vmgexit_msr_protocol()
4277 struct vcpu_svm *svm = to_svm(vcpu); in sev_handle_vmgexit() local
4278 struct vmcb_control_area *control = &svm->vmcb->control; in sev_handle_vmgexit()
4285 return sev_handle_vmgexit_msr_protocol(svm); in sev_handle_vmgexit()
4294 if (kvm_vcpu_map(vcpu, ghcb_gpa >> PAGE_SHIFT, &svm->sev_es.ghcb_map)) { in sev_handle_vmgexit()
4303 svm->sev_es.ghcb = svm->sev_es.ghcb_map.hva; in sev_handle_vmgexit()
4305 trace_kvm_vmgexit_enter(vcpu->vcpu_id, svm->sev_es.ghcb); in sev_handle_vmgexit()
4307 sev_es_sync_from_ghcb(svm); in sev_handle_vmgexit()
4310 if (sev_snp_guest(svm->vcpu.kvm) && !ghcb_gpa_is_registered(svm, ghcb_gpa)) { in sev_handle_vmgexit()
4311 vcpu_unimpl(&svm->vcpu, "vmgexit: GHCB GPA [%#llx] is not registered.\n", ghcb_gpa); in sev_handle_vmgexit()
4315 ret = sev_es_validate_vmgexit(svm); in sev_handle_vmgexit()
4319 ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, 0); in sev_handle_vmgexit()
4320 ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, 0); in sev_handle_vmgexit()
4325 ret = setup_vmgexit_scratch(svm, true, control->exit_info_2); in sev_handle_vmgexit()
4332 svm->sev_es.ghcb_sa); in sev_handle_vmgexit()
4335 ret = setup_vmgexit_scratch(svm, false, control->exit_info_2); in sev_handle_vmgexit()
4342 svm->sev_es.ghcb_sa); in sev_handle_vmgexit()
4346 svm->nmi_masked = false; in sev_handle_vmgexit()
4351 svm->sev_es.ap_reset_hold_type = AP_RESET_HOLD_NAE_EVENT; in sev_handle_vmgexit()
4364 ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, sev->ap_jump_table); in sev_handle_vmgexit()
4369 ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, 2); in sev_handle_vmgexit()
4370 ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, GHCB_ERR_INVALID_INPUT); in sev_handle_vmgexit()
4377 ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, GHCB_HV_FT_SUPPORTED); in sev_handle_vmgexit()
4390 ret = setup_vmgexit_scratch(svm, true, control->exit_info_2); in sev_handle_vmgexit()
4394 ret = snp_begin_psc(svm, svm->sev_es.ghcb_sa); in sev_handle_vmgexit()
4397 ret = sev_snp_ap_creation(svm); in sev_handle_vmgexit()
4399 ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, 2); in sev_handle_vmgexit()
4400 ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, GHCB_ERR_INVALID_INPUT); in sev_handle_vmgexit()
4406 ret = snp_handle_guest_req(svm, control->exit_info_1, control->exit_info_2); in sev_handle_vmgexit()
4409 ret = snp_handle_ext_guest_req(svm, control->exit_info_1, control->exit_info_2); in sev_handle_vmgexit()
4424 int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in) in sev_es_string_io() argument
4430 if (svm->vmcb->control.exit_info_2 > INT_MAX) in sev_es_string_io()
4433 count = svm->vmcb->control.exit_info_2; in sev_es_string_io()
4437 r = setup_vmgexit_scratch(svm, in, bytes); in sev_es_string_io()
4441 return kvm_sev_es_string_io(&svm->vcpu, size, port, svm->sev_es.ghcb_sa, in sev_es_string_io()
4445 static void sev_es_vcpu_after_set_cpuid(struct vcpu_svm *svm) in sev_es_vcpu_after_set_cpuid() argument
4447 struct kvm_vcpu *vcpu = &svm->vcpu; in sev_es_vcpu_after_set_cpuid()
4453 set_msr_interception(vcpu, svm->msrpm, MSR_TSC_AUX, v_tsc_aux, v_tsc_aux); in sev_es_vcpu_after_set_cpuid()
4471 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_XSS, 1, 1); in sev_es_vcpu_after_set_cpuid()
4473 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_XSS, 0, 0); in sev_es_vcpu_after_set_cpuid()
4476 void sev_vcpu_after_set_cpuid(struct vcpu_svm *svm) in sev_vcpu_after_set_cpuid() argument
4478 struct kvm_vcpu *vcpu = &svm->vcpu; in sev_vcpu_after_set_cpuid()
4486 if (sev_es_guest(svm->vcpu.kvm)) in sev_vcpu_after_set_cpuid()
4487 sev_es_vcpu_after_set_cpuid(svm); in sev_vcpu_after_set_cpuid()
4490 static void sev_es_init_vmcb(struct vcpu_svm *svm) in sev_es_init_vmcb() argument
4492 struct vmcb *vmcb = svm->vmcb01.ptr; in sev_es_init_vmcb()
4493 struct kvm_vcpu *vcpu = &svm->vcpu; in sev_es_init_vmcb()
4495 svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ES_ENABLE; in sev_es_init_vmcb()
4504 if (svm->sev_es.vmsa && !svm->sev_es.snp_has_guest_vmsa) in sev_es_init_vmcb()
4505 svm->vmcb->control.vmsa_pa = __pa(svm->sev_es.vmsa); in sev_es_init_vmcb()
4508 svm_clr_intercept(svm, INTERCEPT_CR0_READ); in sev_es_init_vmcb()
4509 svm_clr_intercept(svm, INTERCEPT_CR4_READ); in sev_es_init_vmcb()
4510 svm_clr_intercept(svm, INTERCEPT_CR8_READ); in sev_es_init_vmcb()
4511 svm_clr_intercept(svm, INTERCEPT_CR0_WRITE); in sev_es_init_vmcb()
4512 svm_clr_intercept(svm, INTERCEPT_CR4_WRITE); in sev_es_init_vmcb()
4513 svm_clr_intercept(svm, INTERCEPT_CR8_WRITE); in sev_es_init_vmcb()
4515 svm_clr_intercept(svm, INTERCEPT_SELECTIVE_CR0); in sev_es_init_vmcb()
4518 svm_set_intercept(svm, TRAP_EFER_WRITE); in sev_es_init_vmcb()
4519 svm_set_intercept(svm, TRAP_CR0_WRITE); in sev_es_init_vmcb()
4520 svm_set_intercept(svm, TRAP_CR4_WRITE); in sev_es_init_vmcb()
4521 svm_set_intercept(svm, TRAP_CR8_WRITE); in sev_es_init_vmcb()
4524 if (!sev_vcpu_has_debug_swap(svm)) { in sev_es_init_vmcb()
4527 recalc_intercepts(svm); in sev_es_init_vmcb()
4538 clr_exception_intercept(svm, DB_VECTOR); in sev_es_init_vmcb()
4542 svm_clr_intercept(svm, INTERCEPT_XSETBV); in sev_es_init_vmcb()
4545 set_msr_interception(vcpu, svm->msrpm, MSR_EFER, 1, 1); in sev_es_init_vmcb()
4546 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_CR_PAT, 1, 1); in sev_es_init_vmcb()
4549 void sev_init_vmcb(struct vcpu_svm *svm) in sev_init_vmcb() argument
4551 svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ENABLE; in sev_init_vmcb()
4552 clr_exception_intercept(svm, UD_VECTOR); in sev_init_vmcb()
4558 clr_exception_intercept(svm, GP_VECTOR); in sev_init_vmcb()
4560 if (sev_es_guest(svm->vcpu.kvm)) in sev_init_vmcb()
4561 sev_es_init_vmcb(svm); in sev_init_vmcb()
4564 void sev_es_vcpu_reset(struct vcpu_svm *svm) in sev_es_vcpu_reset() argument
4566 struct kvm_vcpu *vcpu = &svm->vcpu; in sev_es_vcpu_reset()
4573 set_ghcb_msr(svm, GHCB_MSR_SEV_INFO((__u64)sev->ghcb_version, in sev_es_vcpu_reset()
4577 mutex_init(&svm->sev_es.snp_vmsa_mutex); in sev_es_vcpu_reset()
4580 void sev_es_prepare_switch_to_guest(struct vcpu_svm *svm, struct sev_es_save_area *hostsa) in sev_es_prepare_switch_to_guest() argument
4608 if (sev_vcpu_has_debug_swap(svm)) { in sev_es_prepare_switch_to_guest()
4622 struct vcpu_svm *svm = to_svm(vcpu); in sev_vcpu_deliver_sipi_vector() local
4625 if (!svm->sev_es.received_first_sipi) { in sev_vcpu_deliver_sipi_vector()
4626 svm->sev_es.received_first_sipi = true; in sev_vcpu_deliver_sipi_vector()
4631 switch (svm->sev_es.ap_reset_hold_type) { in sev_vcpu_deliver_sipi_vector()
4637 ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, 1); in sev_vcpu_deliver_sipi_vector()
4644 set_ghcb_msr_bits(svm, 1, in sev_vcpu_deliver_sipi_vector()
4648 set_ghcb_msr_bits(svm, GHCB_MSR_AP_RESET_HOLD_RESP, in sev_vcpu_deliver_sipi_vector()